lkml.org 
[lkml]   [2011]   [Mar]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    SubjectRe: [PATCH v2 -tip] perf: x86, add SandyBridge support
    From
    Date
    On Tue, 2011-03-01 at 15:43 +0800, Stephane Eranian wrote:
    > On Mon, Feb 28, 2011 at 10:15 AM, Peter Zijlstra <a.p.zijlstra@chello.nl> wrote:
    > > On Mon, 2011-02-28 at 15:22 +0800, Lin Ming wrote:
    > >> This patch adds basic SandyBridge support, including hardware cache
    > >> events and PEBS events support.
    > >>
    > >> LLC-* hareware cache events don't work for now, it depends on the
    > >> offcore patches.
    > >
    > > What's the status of those, Stephane reported some problems last I
    > > remember?
    > >
    > I tried the trick I mentioned and it seems to work.
    >
    > Something like below with hwc->extra_alloc.
    > Could probably find a better name for that field.

    Stephane,

    I'll integrate below changes to the offcore patches, OK?

    diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
    index f152930..ac1d100 100644
    --- a/arch/x86/kernel/cpu/perf_event_intel.c
    +++ b/arch/x86/kernel/cpu/perf_event_intel.c
    @@ -906,7 +906,7 @@ intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
    int free_slot;
    int found;

    - if (!x86_pmu.percore_constraints)
    + if (!x86_pmu.percore_constraints || hwc->extra_alloc)
    return NULL;

    for (c = x86_pmu.percore_constraints; c->cmask; c++) {
    @@ -931,6 +931,7 @@ intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
    if (hwc->extra_config == era->extra_config) {
    era->ref++;
    cpuc->percore_used = 1;
    + hwc->extra_alloc = 1;
    c = NULL;
    }
    /* else conflict */
    @@ -945,6 +946,7 @@ intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
    era->extra_reg = hwc->extra_reg;
    era->extra_config = hwc->extra_config;
    cpuc->percore_used = 1;
    + hwc->extra_alloc = 1;
    c = NULL;
    }
    raw_spin_unlock(&pc->lock);
    @@ -998,6 +1000,7 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
    era->extra_config == hwc->extra_config &&
    era->extra_reg == er->msr) {
    era->ref--;
    + hwc->extra_alloc = 0;
    break;
    }
    }
    diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
    index f531ce3..dbbf33a 100644
    --- a/include/linux/perf_event.h
    +++ b/include/linux/perf_event.h
    @@ -546,6 +546,7 @@ struct hw_perf_event {
    int last_cpu;
    unsigned int extra_reg;
    u64 extra_config;
    + int extra_alloc;
    };
    struct { /* software */
    struct hrtimer hrtimer;

    >
    > static struct event_constraint *
    > intel_percore_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
    > {
    > struct hw_perf_event *hwc = &event->hw;
    > unsigned int e = hwc->config & ARCH_PERFMON_EVENTSEL_EVENT;
    > struct event_constraint *c;
    > struct intel_percore *pc;
    > struct er_account *era;
    > int i;
    > int free_slot;
    > int found;
    >
    > if (!x86_pmu.percore_constraints)
    > return NULL;
    >
    > if (hwc->extra_alloc)
    > return NULL;
    >
    > for (c = x86_pmu.percore_constraints; c->cmask; c++) {
    > if (e != c->code)
    > continue;
    >
    > /*
    > * Allocate resource per core.
    > */
    > c = NULL;
    > pc = cpuc->per_core;
    > if (!pc)
    > break;
    > c = &emptyconstraint;
    > raw_spin_lock(&pc->lock);
    > free_slot = -1;
    > found = 0;
    > for (i = 0; i < MAX_EXTRA_REGS; i++) {
    > era = &pc->regs[i];
    > if (era->ref > 0 && hwc->extra_reg == era->extra_reg) {
    > /* Allow sharing same config */
    > if (hwc->extra_config == era->extra_config) {
    > era->ref++;
    > cpuc->percore_used = 1;
    > hwc->extra_alloc = 1;
    > c = NULL;
    > }
    > /* else conflict */
    > found = 1;
    > break;
    > } else if (era->ref == 0 && free_slot == -1)
    > free_slot = i;
    > }
    > if (!found && free_slot != -1) {
    > era = &pc->regs[free_slot];
    > era->ref = 1;
    > era->extra_reg = hwc->extra_reg;
    > era->extra_config = hwc->extra_config;
    > cpuc->percore_used = 1;
    > hwc->extra_alloc = 1;
    > c = NULL;
    > }
    > raw_spin_unlock(&pc->lock);
    > return c;
    > }
    >
    > return NULL;
    > }
    >
    > static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
    > struct perf_event *event)
    > {
    > struct extra_reg *er;
    > struct intel_percore *pc;
    > struct er_account *era;
    > struct hw_perf_event *hwc = &event->hw;
    > int i, allref;
    >
    > if (!cpuc->percore_used)
    > return;
    >
    > for (er = x86_pmu.extra_regs; er->msr; er++) {
    > if (er->event != (hwc->config & er->config_mask))
    > continue;
    >
    > pc = cpuc->per_core;
    > raw_spin_lock(&pc->lock);
    > for (i = 0; i < MAX_EXTRA_REGS; i++) {
    > era = &pc->regs[i];
    > if (era->ref > 0 &&
    > era->extra_config == hwc->extra_config &&
    > era->extra_reg == er->msr) {
    > era->ref--;
    > hwc->extra_alloc = 0;
    > break;
    > }
    > }
    > allref = 0;
    > for (i = 0; i < MAX_EXTRA_REGS; i++)
    > allref += pc->regs[i].ref;
    > if (allref == 0)
    > cpuc->percore_used = 0;
    > raw_spin_unlock(&pc->lock);
    > break;
    > }
    > }
    >
    > >
    > >> #define INTEL_EVENT_CONSTRAINT(c, n) \
    > >> EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
    > >> +#define INTEL_EVENT_CONSTRAINT2(c, n) \
    > >> + EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
    > >
    > > That's a particularly bad name, how about something like
    > >
    > > INTEL_UEVENT_CONSTRAINT or somesuch.
    > >
    > >> @@ -702,7 +738,13 @@ static void intel_ds_init(void)
    > >> printk(KERN_CONT "PEBS fmt1%c, ", pebs_type);
    > >> x86_pmu.pebs_record_size = sizeof(struct pebs_record_nhm);
    > >> x86_pmu.drain_pebs = intel_pmu_drain_pebs_nhm;
    > >> - x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
    > >> + switch (boot_cpu_data.x86_model) {
    > >> + case 42: /* SandyBridge */
    > >> + x86_pmu.pebs_constraints = intel_snb_pebs_events;
    > >> + break;
    > >> + default:
    > >> + x86_pmu.pebs_constraints = intel_nehalem_pebs_events;
    > >> + }
    > >> break;
    > >>
    > >> default:
    > >
    > > We already have this massive model switch right after this function,
    > > might as well move the pebs constraint assignment there.
    > >




    \
     
     \ /
      Last update: 2011-03-01 09:47    [W:0.034 / U:0.520 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site