lkml.org 
[lkml]   [2014]   [Jun]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 3.11 88/93] perf: Fix race in removing an event
    Date
    3.11.10.12 -stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Peter Zijlstra <peterz@infradead.org>

    commit 46ce0fe97a6be7532ce6126bb26ce89fed81528c upstream.

    When removing a (sibling) event we do:

    raw_spin_lock_irq(&ctx->lock);
    perf_group_detach(event);
    raw_spin_unlock_irq(&ctx->lock);

    <hole>

    perf_remove_from_context(event);
    raw_spin_lock_irq(&ctx->lock);
    ...
    raw_spin_unlock_irq(&ctx->lock);

    Now, assuming the event is a sibling, it will be 'unreachable' for
    things like ctx_sched_out() because that iterates the
    groups->siblings, and we just unhooked the sibling.

    So, if during <hole> we get ctx_sched_out(), it will miss the event
    and not call event_sched_out() on it, leaving it programmed on the
    PMU.

    The subsequent perf_remove_from_context() call will find the ctx is
    inactive and only call list_del_event() to remove the event from all
    other lists.

    Hereafter we can proceed to free the event; while still programmed!

    Close this hole by moving perf_group_detach() inside the same
    ctx->lock region(s) perf_remove_from_context() has.

    The condition on inherited events only in __perf_event_exit_task() is
    likely complete crap because non-inherited events are part of groups
    too and we're tearing down just the same. But leave that for another
    patch.

    Most-likely-Fixes: e03a9a55b4e ("perf: Change close() semantics for group events")
    Reported-by: Vince Weaver <vincent.weaver@maine.edu>
    Tested-by: Vince Weaver <vincent.weaver@maine.edu>
    Much-staring-at-traces-by: Vince Weaver <vincent.weaver@maine.edu>
    Much-staring-at-traces-by: Thomas Gleixner <tglx@linutronix.de>
    Cc: Arnaldo Carvalho de Melo <acme@kernel.org>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Signed-off-by: Peter Zijlstra <peterz@infradead.org>
    Link: http://lkml.kernel.org/r/20140505093124.GN17778@laptop.programming.kicks-ass.net
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    [ luis: backported to 3.11: adjusted context ]
    Signed-off-by: Luis Henriques <luis.henriques@canonical.com>
    ---
    kernel/events/core.c | 47 ++++++++++++++++++++++++++---------------------
    1 file changed, 26 insertions(+), 21 deletions(-)

    diff --git a/kernel/events/core.c b/kernel/events/core.c
    index a59cdc594cda..0c1023c3cfbd 100644
    --- a/kernel/events/core.c
    +++ b/kernel/events/core.c
    @@ -1423,6 +1423,11 @@ group_sched_out(struct perf_event *group_event,
    cpuctx->exclusive = 0;
    }

    +struct remove_event {
    + struct perf_event *event;
    + bool detach_group;
    +};
    +
    /*
    * Cross CPU call to remove a performance event
    *
    @@ -1431,12 +1436,15 @@ group_sched_out(struct perf_event *group_event,
    */
    static int __perf_remove_from_context(void *info)
    {
    - struct perf_event *event = info;
    + struct remove_event *re = info;
    + struct perf_event *event = re->event;
    struct perf_event_context *ctx = event->ctx;
    struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);

    raw_spin_lock(&ctx->lock);
    event_sched_out(event, cpuctx, ctx);
    + if (re->detach_group)
    + perf_group_detach(event);
    list_del_event(event, ctx);
    if (!ctx->nr_events && cpuctx->task_ctx == ctx) {
    ctx->is_active = 0;
    @@ -1461,10 +1469,14 @@ static int __perf_remove_from_context(void *info)
    * When called from perf_event_exit_task, it's OK because the
    * context has been detached from its task.
    */
    -static void perf_remove_from_context(struct perf_event *event)
    +static void perf_remove_from_context(struct perf_event *event, bool detach_group)
    {
    struct perf_event_context *ctx = event->ctx;
    struct task_struct *task = ctx->task;
    + struct remove_event re = {
    + .event = event,
    + .detach_group = detach_group,
    + };

    lockdep_assert_held(&ctx->mutex);

    @@ -1473,12 +1485,12 @@ static void perf_remove_from_context(struct perf_event *event)
    * Per cpu events are removed via an smp call and
    * the removal is always successful.
    */
    - cpu_function_call(event->cpu, __perf_remove_from_context, event);
    + cpu_function_call(event->cpu, __perf_remove_from_context, &re);
    return;
    }

    retry:
    - if (!task_function_call(task, __perf_remove_from_context, event))
    + if (!task_function_call(task, __perf_remove_from_context, &re))
    return;

    raw_spin_lock_irq(&ctx->lock);
    @@ -1495,6 +1507,8 @@ retry:
    * Since the task isn't running, its safe to remove the event, us
    * holding the ctx->lock ensures the task won't get scheduled in.
    */
    + if (detach_group)
    + perf_group_detach(event);
    list_del_event(event, ctx);
    raw_spin_unlock_irq(&ctx->lock);
    }
    @@ -3204,10 +3218,7 @@ int perf_event_release_kernel(struct perf_event *event)
    * to trigger the AB-BA case.
    */
    mutex_lock_nested(&ctx->mutex, SINGLE_DEPTH_NESTING);
    - raw_spin_lock_irq(&ctx->lock);
    - perf_group_detach(event);
    - raw_spin_unlock_irq(&ctx->lock);
    - perf_remove_from_context(event);
    + perf_remove_from_context(event, true);
    mutex_unlock(&ctx->mutex);

    free_event(event);
    @@ -6974,7 +6985,7 @@ SYSCALL_DEFINE5(perf_event_open,
    struct perf_event_context *gctx = group_leader->ctx;

    mutex_lock(&gctx->mutex);
    - perf_remove_from_context(group_leader);
    + perf_remove_from_context(group_leader, false);

    /*
    * Removing from the context ends up with disabled
    @@ -6984,7 +6995,7 @@ SYSCALL_DEFINE5(perf_event_open,
    perf_event__state_init(group_leader);
    list_for_each_entry(sibling, &group_leader->sibling_list,
    group_entry) {
    - perf_remove_from_context(sibling);
    + perf_remove_from_context(sibling, false);
    perf_event__state_init(sibling);
    put_ctx(gctx);
    }
    @@ -7114,7 +7125,7 @@ void perf_pmu_migrate_context(struct pmu *pmu, int src_cpu, int dst_cpu)
    mutex_lock(&src_ctx->mutex);
    list_for_each_entry_safe(event, tmp, &src_ctx->event_list,
    event_entry) {
    - perf_remove_from_context(event);
    + perf_remove_from_context(event, false);
    put_ctx(src_ctx);
    list_add(&event->event_entry, &events);
    }
    @@ -7174,13 +7185,7 @@ __perf_event_exit_task(struct perf_event *child_event,
    struct perf_event_context *child_ctx,
    struct task_struct *child)
    {
    - if (child_event->parent) {
    - raw_spin_lock_irq(&child_ctx->lock);
    - perf_group_detach(child_event);
    - raw_spin_unlock_irq(&child_ctx->lock);
    - }
    -
    - perf_remove_from_context(child_event);
    + perf_remove_from_context(child_event, !!child_event->parent);

    /*
    * It can happen that the parent exits first, and has events
    @@ -7665,14 +7670,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)

    static void __perf_event_exit_context(void *__info)
    {
    + struct remove_event re = { .detach_group = false };
    struct perf_event_context *ctx = __info;
    - struct perf_event *event;

    perf_pmu_rotate_stop(ctx->pmu);

    rcu_read_lock();
    - list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
    - __perf_remove_from_context(event);
    + list_for_each_entry_rcu(re.event, &ctx->event_list, event_entry)
    + __perf_remove_from_context(&re);
    rcu_read_unlock();
    }

    --
    1.9.1


    \
     
     \ /
      Last update: 2014-06-23 15:41    [W:4.182 / U:0.088 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site