lkml.org 
[lkml]   [2009]   [May]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 3/6] ring-buffer: convert cpu buffer entries to local_t
    From: Steven Rostedt <srostedt@redhat.com>

    The entries counter in cpu buffer is not atomic. It can be updated by
    other interrupts or from another CPU (readers).

    But making entries into "atomic_t" causes an atomic operation that can
    hurt performance. Instead we convert it to a local_t that will increment
    a counter with a local CPU atomic operation (if the arch supports it).

    Instead of fighting with readers and overwrites that decrement the counter,
    I added a "read" counter. Every time a reader reads an entry it is
    incremented.

    We already have a overrun counter and with that, the entries counter and
    the read counter, we can calculate the total number of entries in the
    buffer with:

    (entries - overrun) - read

    As long as the total number of entries in the ring buffer is less than
    the word size, this will work. But since the entries counter was previously
    a long, this is no different than what we had before.

    Thanks to Andrew Morton for pointing out in the first version that
    atomic_t does not replace unsigned long. I switched to atomic_long_t
    even though it is signed. A negative count is most likely a bug.

    [ Impact: keep accurate count of cpu buffer entries ]

    Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
    ---
    kernel/trace/ring_buffer.c | 20 +++++++++++---------
    1 files changed, 11 insertions(+), 9 deletions(-)

    diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
    index dc8b2ab..f2d56e9 100644
    --- a/kernel/trace/ring_buffer.c
    +++ b/kernel/trace/ring_buffer.c
    @@ -405,7 +405,8 @@ struct ring_buffer_per_cpu {
    unsigned long nmi_dropped;
    unsigned long commit_overrun;
    unsigned long overrun;
    - unsigned long entries;
    + unsigned long read;
    + local_t entries;
    u64 write_stamp;
    u64 read_stamp;
    atomic_t record_disabled;
    @@ -997,7 +998,6 @@ static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
    if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
    continue;
    cpu_buffer->overrun++;
    - cpu_buffer->entries--;
    }
    }

    @@ -1588,7 +1588,7 @@ EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve);
    static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
    struct ring_buffer_event *event)
    {
    - cpu_buffer->entries++;
    + local_inc(&cpu_buffer->entries);

    /* Only process further if we own the commit */
    if (!rb_is_commit(cpu_buffer, event))
    @@ -1722,7 +1722,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
    * The commit is still visible by the reader, so we
    * must increment entries.
    */
    - cpu_buffer->entries++;
    + local_inc(&cpu_buffer->entries);
    out:
    /*
    * If a write came in and pushed the tail page
    @@ -1902,7 +1902,8 @@ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
    return 0;

    cpu_buffer = buffer->buffers[cpu];
    - ret = cpu_buffer->entries;
    + ret = (local_read(&cpu_buffer->entries) - cpu_buffer->overrun)
    + - cpu_buffer->read;

    return ret;
    }
    @@ -1985,7 +1986,7 @@ unsigned long ring_buffer_entries(struct ring_buffer *buffer)
    /* if you care about this being correct, lock the buffer */
    for_each_buffer_cpu(buffer, cpu) {
    cpu_buffer = buffer->buffers[cpu];
    - entries += cpu_buffer->entries;
    + entries += local_read(&cpu_buffer->entries);
    }

    return entries;
    @@ -2225,7 +2226,7 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)

    if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX
    || rb_discarded_event(event))
    - cpu_buffer->entries--;
    + cpu_buffer->read++;

    rb_update_read_stamp(cpu_buffer, event);

    @@ -2642,7 +2643,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
    cpu_buffer->nmi_dropped = 0;
    cpu_buffer->commit_overrun = 0;
    cpu_buffer->overrun = 0;
    - cpu_buffer->entries = 0;
    + cpu_buffer->read = 0;
    + local_set(&cpu_buffer->entries, 0);

    cpu_buffer->write_stamp = 0;
    cpu_buffer->read_stamp = 0;
    @@ -2813,7 +2815,7 @@ static void rb_remove_entries(struct ring_buffer_per_cpu *cpu_buffer,
    /* Only count data entries */
    if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
    continue;
    - cpu_buffer->entries--;
    + cpu_buffer->read++;
    }
    __raw_spin_unlock(&cpu_buffer->lock);
    }
    --
    1.6.2.4
    --


    \
     
     \ /
      Last update: 2009-05-04 17:25    [W:0.026 / U:0.332 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site