lkml.org 
[lkml]   [2009]   [May]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:perfcounters/core] perf_counter: inheritable sample counters
    Commit-ID:  2023b359214bbc5bad31571cf50d7fb83b535c0a
    Gitweb: http://git.kernel.org/tip/2023b359214bbc5bad31571cf50d7fb83b535c0a
    Author: Peter Zijlstra <a.p.zijlstra@chello.nl>
    AuthorDate: Tue, 5 May 2009 17:50:26 +0200
    Committer: Ingo Molnar <mingo@elte.hu>
    CommitDate: Tue, 5 May 2009 20:18:33 +0200

    perf_counter: inheritable sample counters

    Redirect the output to the parent counter and put in some sanity checks.

    [ Impact: new perfcounter feature - inherited sampling counters ]

    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Cc: Paul Mackerras <paulus@samba.org>
    Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
    LKML-Reference: <20090505155437.331556171@chello.nl>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>


    ---
    kernel/perf_counter.c | 32 ++++++++++++++++++++++++++++++--
    1 files changed, 30 insertions(+), 2 deletions(-)

    diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
    index c881afe..60e55f0 100644
    --- a/kernel/perf_counter.c
    +++ b/kernel/perf_counter.c
    @@ -738,10 +738,18 @@ static void perf_counter_enable(struct perf_counter *counter)
    spin_unlock_irq(&ctx->lock);
    }

    -static void perf_counter_refresh(struct perf_counter *counter, int refresh)
    +static int perf_counter_refresh(struct perf_counter *counter, int refresh)
    {
    + /*
    + * not supported on inherited counters
    + */
    + if (counter->hw_event.inherit)
    + return -EINVAL;
    +
    atomic_add(refresh, &counter->event_limit);
    perf_counter_enable(counter);
    +
    + return 0;
    }

    /*
    @@ -1307,7 +1315,7 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
    perf_counter_disable_family(counter);
    break;
    case PERF_COUNTER_IOC_REFRESH:
    - perf_counter_refresh(counter, arg);
    + err = perf_counter_refresh(counter, arg);
    break;
    case PERF_COUNTER_IOC_RESET:
    perf_counter_reset(counter);
    @@ -1814,6 +1822,12 @@ static int perf_output_begin(struct perf_output_handle *handle,
    struct perf_mmap_data *data;
    unsigned int offset, head;

    + /*
    + * For inherited counters we send all the output towards the parent.
    + */
    + if (counter->parent)
    + counter = counter->parent;
    +
    rcu_read_lock();
    data = rcu_dereference(counter->data);
    if (!data)
    @@ -1995,6 +2009,9 @@ static void perf_counter_output(struct perf_counter *counter,
    if (record_type & PERF_RECORD_ADDR)
    perf_output_put(&handle, addr);

    + /*
    + * XXX PERF_RECORD_GROUP vs inherited counters seems difficult.
    + */
    if (record_type & PERF_RECORD_GROUP) {
    struct perf_counter *leader, *sub;
    u64 nr = counter->nr_siblings;
    @@ -2281,6 +2298,11 @@ int perf_counter_overflow(struct perf_counter *counter,
    int events = atomic_read(&counter->event_limit);
    int ret = 0;

    + /*
    + * XXX event_limit might not quite work as expected on inherited
    + * counters
    + */
    +
    counter->pending_kill = POLL_IN;
    if (events && atomic_dec_and_test(&counter->event_limit)) {
    ret = 1;
    @@ -2801,6 +2823,12 @@ perf_counter_alloc(struct perf_counter_hw_event *hw_event,

    pmu = NULL;

    + /*
    + * we currently do not support PERF_RECORD_GROUP on inherited counters
    + */
    + if (hw_event->inherit && (hw_event->record_type & PERF_RECORD_GROUP))
    + goto done;
    +
    if (perf_event_raw(hw_event)) {
    pmu = hw_perf_counter_init(counter);
    goto done;

    \
     
     \ /
      Last update: 2009-05-05 20:39    [W:0.065 / U:328.324 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site