lkml.org 
[lkml]   [2014]   [Jul]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [PATCH v2 5/7] perf, x86: drain PEBS buffer during context switch
    On Tue, Jul 15, 2014 at 04:58:57PM +0800, Yan, Zheng wrote:
    > +void intel_pmu_drain_pebs_buffer(void)
    > +{
    > + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
    > + struct debug_store *ds = cpuc->ds;
    > + struct pt_regs regs;
    > +
    > + if (!x86_pmu.pebs_active)
    > + return;
    > + if (ds->pebs_index <= ds->pebs_buffer_base)
    > + return;

    Both implementations of drain_pebs() already do that.

    > + x86_pmu.drain_pebs(&regs);
    > +}

    > @@ -759,8 +787,19 @@ void intel_pmu_pebs_disable(struct perf_event *event)
    > {
    > struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
    > struct hw_perf_event *hwc = &event->hw;
    > + struct debug_store *ds = cpuc->ds;
    > + bool multi_pebs = false;
    > +
    > + if (ds->pebs_interrupt_threshold >
    > + ds->pebs_buffer_base + x86_pmu.pebs_record_size)
    > + multi_pebs = true;
    >
    > cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
    > + if (cpuc->pebs_sched_cb_enabled &&
    > + !(cpuc->pebs_enabled & ((1ULL << MAX_PEBS_EVENTS) - 1))) {

    You seem fond of that expression, maybe make it an inline somewhere to
    avoid all this repetition.

    > + perf_sched_cb_disable(event->ctx->pmu);
    > + cpuc->pebs_sched_cb_enabled = false;
    > + }
    >
    > if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_LDLAT)
    > cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
    > @@ -772,6 +811,9 @@ void intel_pmu_pebs_disable(struct perf_event *event)
    >
    > hwc->config |= ARCH_PERFMON_EVENTSEL_INT;
    > hwc->autoreload = false;
    > +
    > + if (multi_pebs)
    > + intel_pmu_drain_pebs_buffer();
    > }

    Is that condition worth the effort? Seeing how you already need to load
    the DS state to compute multi_pebs in the first place.

    > diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
    > index 430f1ad..a3df61d 100644
    > --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
    > +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
    > @@ -199,8 +199,6 @@ void intel_pmu_lbr_enable(struct perf_event *event)
    > {
    > struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
    >
    > - if (!x86_pmu.lbr_nr)
    > - return;
    > /*
    > * Reset the LBR stack if we changed task context to
    > * avoid data leaks.

    More random hunks?
    [unhandled content-type:application/pgp-signature]
    \
     
     \ /
      Last update: 2014-07-15 14:21    [W:2.202 / U:0.284 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site