lkml.org 
[lkml]   [2012]   [Jun]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 2/5] perf, x86: Don't assume there can be only 4 PEBS events
    Date
    From: Andi Kleen <ak@linux.intel.com>

    On Sandy Bridge in non HT mode there are 8 counters available. Since every
    counter can write a PEBS record assuming there are 4 max is incorrect. Use
    the reported counter number -- with an upper limit for a static array -- instead.

    Also I made the warning messages a bit more informational.

    Signed-off-by: Andi Kleen <ak@linux.intel.com>
    ---
    arch/x86/kernel/cpu/perf_event.h | 3 ++-
    arch/x86/kernel/cpu/perf_event_intel.c | 2 ++
    arch/x86/kernel/cpu/perf_event_intel_ds.c | 8 ++++----
    3 files changed, 8 insertions(+), 5 deletions(-)

    diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
    index cdddcef..43cfed2 100644
    --- a/arch/x86/kernel/cpu/perf_event.h
    +++ b/arch/x86/kernel/cpu/perf_event.h
    @@ -57,7 +57,7 @@ struct amd_nb {
    };

    /* The maximal number of PEBS events: */
    -#define MAX_PEBS_EVENTS 4
    +#define MAX_PEBS_EVENTS 8

    /*
    * A debug store configuration.
    @@ -365,6 +365,7 @@ struct x86_pmu {
    int pebs_record_size;
    void (*drain_pebs)(struct pt_regs *regs);
    struct event_constraint *pebs_constraints;
    + int max_pebs_events;

    /*
    * Intel LBR
    diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
    index 2e40391..71b8de5 100644
    --- a/arch/x86/kernel/cpu/perf_event_intel.c
    +++ b/arch/x86/kernel/cpu/perf_event_intel.c
    @@ -1732,6 +1732,8 @@ __init int intel_pmu_init(void)
    x86_pmu.events_maskl = ebx.full;
    x86_pmu.events_mask_len = eax.split.mask_length;

    + x86_pmu.max_pebs_events = min_t(unsigned, MAX_PEBS_EVENTS, x86_pmu.num_counters);
    +
    /*
    * Quirk: v2 perfmon does not report fixed-purpose events, so
    * assume at least 3 events:
    diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
    index 5a3edc2..0042942 100644
    --- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
    +++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
    @@ -627,7 +627,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
    * Should not happen, we program the threshold at 1 and do not
    * set a reset value.
    */
    - WARN_ON_ONCE(n > 1);
    + WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
    at += n - 1;

    __intel_pmu_pebs_event(event, iregs, at);
    @@ -658,10 +658,10 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
    * Should not happen, we program the threshold at 1 and do not
    * set a reset value.
    */
    - WARN_ON_ONCE(n > MAX_PEBS_EVENTS);
    + WARN_ONCE(n > x86_pmu.max_pebs_events, "Unexpected number of pebs records %d\n", n);

    for ( ; at < top; at++) {
    - for_each_set_bit(bit, (unsigned long *)&at->status, MAX_PEBS_EVENTS) {
    + for_each_set_bit(bit, (unsigned long *)&at->status, x86_pmu.max_pebs_events) {
    event = cpuc->events[bit];
    if (!test_bit(bit, cpuc->active_mask))
    continue;
    @@ -677,7 +677,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
    break;
    }

    - if (!event || bit >= MAX_PEBS_EVENTS)
    + if (!event || bit >= x86_pmu.max_pebs_events)
    continue;

    __intel_pmu_pebs_event(event, iregs, at);
    --
    1.7.7.6


    \
     
     \ /
      Last update: 2012-06-06 03:21    [W:0.026 / U:82.776 seconds]
    ©2003-2017 Jasper Spaans. hosted at Digital OceanAdvertise on this site