lkml.org 
[lkml]   [2016]   [Jan]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2] perf: Synchronously cleanup child events
Date
The orphan cleanup workqueue doesn't always catch orphans, for example,
if they never schedule after they are orphaned. IOW, the event leak is
still very real. It also wouldn't work for kernel counters.

Also, there seems to be no reason not to carry out this cleanup
procedure synchronously during parent event's destruction.

This patch replaces the workqueue approach with a simple cleanup round
in the event's destruction path. To avoid racing with clone, we still
check that parent event has an owner in the inheritance path.

Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
---
include/linux/perf_event.h | 3 -
kernel/events/core.c | 142 ++++++++++++++++++++-------------------------
2 files changed, 63 insertions(+), 82 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 6612732d8f..cd9c1ace29 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -634,9 +634,6 @@ struct perf_event_context {
int nr_cgroups; /* cgroup evts */
void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
-
- struct delayed_work orphans_remove;
- bool orphans_remove_sched;
};

/*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 630f53acce..33083ed5a6 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -49,8 +49,6 @@

#include <asm/irq_regs.h>

-static struct workqueue_struct *perf_wq;
-
typedef int (*remote_function_f)(void *);

struct remote_function_call {
@@ -1652,40 +1650,9 @@ out:
*/
static bool is_orphaned_event(struct perf_event *event)
{
- return event && !is_kernel_event(event) && !event->owner;
-}
-
-/*
- * Event has a parent but parent's task finished and it's
- * alive only because of children holding refference.
- */
-static bool is_orphaned_child(struct perf_event *event)
-{
- return is_orphaned_event(event->parent);
+ return event && !event->owner;
}

-static void orphans_remove_work(struct work_struct *work);
-
-static void schedule_orphans_remove(struct perf_event_context *ctx)
-{
- if (!ctx->task || ctx->orphans_remove_sched || !perf_wq)
- return;
-
- if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) {
- get_ctx(ctx);
- ctx->orphans_remove_sched = true;
- }
-}
-
-static int __init perf_workqueue_init(void)
-{
- perf_wq = create_singlethread_workqueue("perf");
- WARN(!perf_wq, "failed to create perf workqueue\n");
- return perf_wq ? 0 : -1;
-}
-
-core_initcall(perf_workqueue_init);
-
static inline int pmu_filter_match(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
@@ -1746,9 +1713,6 @@ event_sched_out(struct perf_event *event,
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;

- if (is_orphaned_child(event))
- schedule_orphans_remove(ctx);
-
perf_pmu_enable(event->pmu);
}

@@ -1991,9 +1955,6 @@ event_sched_in(struct perf_event *event,
if (event->attr.exclusive)
cpuctx->exclusive = 1;

- if (is_orphaned_child(event))
- schedule_orphans_remove(ctx);
-
out:
perf_pmu_enable(event->pmu);

@@ -3370,7 +3331,6 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
atomic_set(&ctx->refcount, 1);
- INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work);
}

static struct perf_event_context *
@@ -3786,13 +3746,10 @@ static void perf_remove_from_owner(struct perf_event *event)
}
}

-static void put_event(struct perf_event *event)
+static void __put_event(struct perf_event *event)
{
struct perf_event_context *ctx;

- if (!atomic_long_dec_and_test(&event->refcount))
- return;
-
if (!is_kernel_event(event))
perf_remove_from_owner(event);

@@ -3816,56 +3773,83 @@ static void put_event(struct perf_event *event)
_free_event(event);
}

-int perf_event_release_kernel(struct perf_event *event)
+static void put_event(struct perf_event *event)
{
- put_event(event);
- return 0;
+ if (atomic_long_dec_and_test(&event->refcount))
+ __put_event(event);
}
-EXPORT_SYMBOL_GPL(perf_event_release_kernel);

-/*
- * Called when the last reference to the file is gone.
- */
-static int perf_release(struct inode *inode, struct file *file)
+static bool put_event_last(struct perf_event *event)
{
- put_event(file->private_data);
- return 0;
+ if (atomic_long_cmpxchg(&event->refcount, 1, 0)) {
+ __put_event(event);
+ return true;
+ }
+
+ return false;
}

-/*
- * Remove all orphanes events from the context.
- */
-static void orphans_remove_work(struct work_struct *work)
+int perf_event_release_kernel(struct perf_event *event)
{
- struct perf_event_context *ctx;
- struct perf_event *event, *tmp;
+ struct perf_event *child, *tmp;
+ LIST_HEAD(child_list);

- ctx = container_of(work, struct perf_event_context,
- orphans_remove.work);
+ if (!is_kernel_event(event))
+ perf_remove_from_owner(event);

- mutex_lock(&ctx->mutex);
- list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
- struct perf_event *parent_event = event->parent;
+ event->owner = NULL;

- if (!is_orphaned_child(event))
- continue;
+retry:
+ /*
+ * event::child_mutex nests inside ctx::mutex, so move children
+ * to a safe place first and avoid inversion
+ */
+ mutex_lock(&event->child_mutex);
+ list_splice_init(&event->child_list, &child_list);
+ mutex_unlock(&event->child_mutex);

- perf_remove_from_context(event, true);
+ list_for_each_entry_safe(child, tmp, &child_list, child_list) {
+ struct perf_event_context *ctx;

- mutex_lock(&parent_event->child_mutex);
- list_del_init(&event->child_list);
- mutex_unlock(&parent_event->child_mutex);
+ /*
+ * This is somewhat similar to perf_free_event(),
+ * except for these events are alive and need
+ * proper perf_remove_from_context().
+ */
+ ctx = perf_event_ctx_lock(child);
+ perf_remove_from_context(child, true);
+ perf_event_ctx_unlock(child, ctx);
+
+ list_del(&child->child_list);

- free_event(event);
- put_event(parent_event);
+ /* Children will have exactly one reference */
+ free_event(child);
+
+ /*
+ * This matches the refcount bump in inherit_event();
+ * this can't be the last reference.
+ */
+ put_event(event);
}

- raw_spin_lock_irq(&ctx->lock);
- ctx->orphans_remove_sched = false;
- raw_spin_unlock_irq(&ctx->lock);
- mutex_unlock(&ctx->mutex);
+ /*
+ * If this is the last reference, we're done here, otherwise
+ * we must have raced with inherit_event(), in which case, repeat
+ */
+ if (!put_event_last(event))
+ goto retry;

- put_ctx(ctx);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(perf_event_release_kernel);
+
+/*
+ * Called when the last reference to the file is gone.
+ */
+static int perf_release(struct inode *inode, struct file *file)
+{
+ perf_event_release_kernel(file->private_data);
+ return 0;
}

u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
--
2.7.0.rc3

\
 
 \ /
  Last update: 2016-01-19 16:21    [W:1.888 / U:0.084 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site