lkml.org 
[lkml]   [2011]   [Jul]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    SubjectRe: [PATCH v2 2/6] perf, x86: Add Intel Nehalem/Westmere uncore pmu
    From
    Date
    On Fri, 2011-07-15 at 14:34 +0000, Lin Ming wrote:
    > Add Intel Nehalem/Westmere uncore pmu support.
    > And also the generic data structure to support uncore pmu.
    >
    > Uncore pmu interrupt does not work, so hrtimer is used to pull counters.

    s/pull/poll/

    > diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
    > new file mode 100644
    > index 0000000..79a501e
    > --- /dev/null
    > +++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
    > @@ -0,0 +1,450 @@
    > +#include "perf_event_intel_uncore.h"
    > +
    > +static DEFINE_PER_CPU(struct cpu_uncore_events, cpu_uncore_events);
    > +static DEFINE_RAW_SPINLOCK(intel_uncore_lock);
    > +
    > +static bool uncore_pmu_initialized;
    > +static struct intel_uncore_pmu intel_uncore_pmu __read_mostly;
    > +
    > +/*
    > + * Uncore pmu interrupt does not work.
    > + * Use hrtimer to pull the counter every 10 seconds.
    > + */
    > +#define UNCORE_PMU_HRTIMER_INTERVAL (10000000000ULL)
    10 * NSEC_PER_SEC

    > +static int uncore_pmu_event_init(struct perf_event *event)
    > +{
    > + struct hw_perf_event *hwc = &event->hw;
    > +
    > + if (!uncore_pmu_initialized)
    > + return -ENOENT;
    > +
    > + if (event->attr.type != uncore_pmu.type)
    > + return -ENOENT;
    > +
    > + /*
    > + * Uncore PMU does measure at all privilege level all the time.
    > + * So it doesn't make sense to specify any exclude bits.
    > + */
    > + if (event->attr.exclude_user || event->attr.exclude_kernel
    > + || event->attr.exclude_hv || event->attr.exclude_idle)
    > + return -ENOENT;

    -EINVAL, the PMU exists and is the right one, we just don't support
    this.

    > + /* Sampling not supported yet */
    > + if (hwc->sample_period)
    > + return -EINVAL;
    > +
    > + return 0;
    > +}

    > +static int uncore_pmu_add(struct perf_event *event, int flags)
    > +{
    > + struct cpu_uncore_events *cpuc = &__get_cpu_var(cpu_uncore_events);
    > + struct intel_uncore *uncore = cpuc->intel_uncore;
    > + int ret = 1;
    > + int i;
    > +
    > + raw_spin_lock(&uncore->lock);
    > +
    > + if (event->attr.config == UNCORE_FIXED_EVENT) {
    > + i = X86_PMC_IDX_FIXED;
    > + goto fixed_event;

    Can the GP counters also count that event? If so, what happens if I
    start 2 of them?

    > + }
    > +
    > + for (i = 0; i < X86_PMC_IDX_FIXED; i++) {
    > +fixed_event:
    > + if (!uncore->events[i]) {
    > + uncore->events[i] = event;
    > + uncore->n_events++;
    > + event->hw.idx = i;
    > + __set_bit(i, uncore->active_mask);
    > +
    > + intel_uncore_pmu.hw_config(event);
    > +
    > + if (flags & PERF_EF_START)
    > + uncore_pmu_start(event, flags);
    > + ret = 0;
    > + break;
    > + }
    > + }
    > +
    > + if (uncore->n_events == 1) {
    > + uncore_pmu_start_hrtimer(uncore);
    > + intel_uncore_pmu.enable_all();
    > + }
    > +
    > + raw_spin_unlock(&uncore->lock);
    > +
    > + return ret;
    > +}

    uncore is fully symmetric and doesn't have any constraints other than
    the fixed counter?

    I guess we can start with this, there is still the issue of mapping the
    events to a single active cpu in the node, but I guess we can do that a
    little later.


    \
     
     \ /
      Last update: 2011-07-18 16:23    [W:2.660 / U:0.168 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site