lkml.org 
[lkml]   [2015]   [Dec]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 2/7] perf: Generalize task_function_call()ers
Date
A number of places in perf core (perf_event_enable, perf_event_disable,
perf_install_in_context, perf_remove_from_context) are performing the
same tapdance around task_function_call() to modify an event and/or its
context on its target cpu. The sequence mainly consists of re-trying
task_function_call() until it either succeeds or is no longer necessary
or the event's context has gone inactive, in which case the action can
be performed on the caller's cpu provided the context lock is still
held. The complication that all callers are dealing with is that they
have to release the context lock before calling task_function_call(),
which gives an opportunity for the context in question to get scheduled
out, so then they have to re-acquire the lock, check whether that is
the case and re-try the cross-call.

This patch creates a more generic helper that either executes a callback
on the target cpu or returns with ctx::lock locked, so that the caller
can perform its action on an inactive context. This patch also converts
the existing users of this sequence to use the new helper.

Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
---
include/linux/perf_event.h | 1 +
kernel/events/core.c | 190 +++++++++++++++------------------------------
2 files changed, 64 insertions(+), 127 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f9828a48f1..55290d609e 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -402,6 +402,7 @@ enum perf_event_active_state {
PERF_EVENT_STATE_OFF = -1,
PERF_EVENT_STATE_INACTIVE = 0,
PERF_EVENT_STATE_ACTIVE = 1,
+ PERF_EVENT_STATE_NONE = 2,
};

struct file;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5854fcf7f0..0d3296f600 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -126,6 +126,59 @@ static int cpu_function_call(int cpu, remote_function_f func, void *info)
return data.ret;
}

+static int
+remote_call_or_ctx_lock(struct perf_event *event, remote_function_f func,
+ void *info, enum perf_event_active_state retry_state)
+{
+ struct perf_event_context *ctx = event->ctx;
+ struct task_struct *task = ctx->task;
+ unsigned int retry = 1;
+
+ if (!task) {
+ /*
+ * Per cpu events are removed via an smp call. The removal can
+ * fail if the CPU is currently offline, but in that case we
+ * already called __perf_remove_from_context from
+ * perf_event_exit_cpu.
+ */
+ cpu_function_call(event->cpu, func, info);
+ return 0;
+ }
+
+ raw_spin_lock_irq(&ctx->lock);
+ do {
+ /*
+ * Reload the task pointer, it might have been changed by
+ * a concurrent perf_event_context_sched_out().
+ */
+ task = ctx->task;
+
+ /*
+ * If the context is inactive, we don't need a cross call to
+ * fiddle with the event so long as the ctx::lock is held.
+ */
+ if (!ctx->is_active)
+ break;
+
+ raw_spin_unlock_irq(&ctx->lock);
+
+ if (!task_function_call(task, func, info))
+ return 0;
+
+ raw_spin_lock_irq(&ctx->lock);
+
+ if (retry_state <= PERF_EVENT_STATE_ACTIVE)
+ retry = event->state == retry_state;
+ } while (retry);
+
+ /*
+ * No luck: leaving with ctx::lock held and interrupts disabled
+ * so that the caller can safely fiddle with event or context.
+ */
+
+ return -ENXIO;
+}
+
#define EVENT_OWNER_KERNEL ((void *) -1)

static bool is_kernel_event(struct perf_event *event)
@@ -1673,7 +1726,6 @@ static int __perf_remove_from_context(void *info)
static void perf_remove_from_context(struct perf_event *event, bool detach_group)
{
struct perf_event_context *ctx = event->ctx;
- struct task_struct *task = ctx->task;
struct remove_event re = {
.event = event,
.detach_group = detach_group,
@@ -1681,36 +1733,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group

lockdep_assert_held(&ctx->mutex);

- if (!task) {
- /*
- * Per cpu events are removed via an smp call. The removal can
- * fail if the CPU is currently offline, but in that case we
- * already called __perf_remove_from_context from
- * perf_event_exit_cpu.
- */
- cpu_function_call(event->cpu, __perf_remove_from_context, &re);
- return;
- }
-
-retry:
- if (!task_function_call(task, __perf_remove_from_context, &re))
+ if (!remote_call_or_ctx_lock(event, __perf_remove_from_context, &re,
+ PERF_EVENT_STATE_NONE))
return;

- raw_spin_lock_irq(&ctx->lock);
- /*
- * If we failed to find a running task, but find the context active now
- * that we've acquired the ctx->lock, retry.
- */
- if (ctx->is_active) {
- raw_spin_unlock_irq(&ctx->lock);
- /*
- * Reload the task pointer, it might have been changed by
- * a concurrent perf_event_context_sched_out().
- */
- task = ctx->task;
- goto retry;
- }
-
/*
* Since the task isn't running, its safe to remove the event, us
* holding the ctx->lock ensures the task won't get scheduled in.
@@ -1778,34 +1804,10 @@ int __perf_event_disable(void *info)
static void _perf_event_disable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
- struct task_struct *task = ctx->task;
-
- if (!task) {
- /*
- * Disable the event on the cpu that it's on
- */
- cpu_function_call(event->cpu, __perf_event_disable, event);
- return;
- }

-retry:
- if (!task_function_call(task, __perf_event_disable, event))
+ if (!remote_call_or_ctx_lock(event, __perf_event_disable, event, PERF_EVENT_STATE_ACTIVE))
return;

- raw_spin_lock_irq(&ctx->lock);
- /*
- * If the event is still active, we need to retry the cross-call.
- */
- if (event->state == PERF_EVENT_STATE_ACTIVE) {
- raw_spin_unlock_irq(&ctx->lock);
- /*
- * Reload the task pointer, it might have been changed by
- * a concurrent perf_event_context_sched_out().
- */
- task = ctx->task;
- goto retry;
- }
-
/*
* Since we have the lock this context can't be scheduled
* in, so we can change the state safely.
@@ -2143,42 +2145,16 @@ perf_install_in_context(struct perf_event_context *ctx,
struct perf_event *event,
int cpu)
{
- struct task_struct *task = ctx->task;
-
lockdep_assert_held(&ctx->mutex);

event->ctx = ctx;
if (event->cpu != -1)
event->cpu = cpu;

- if (!task) {
- /*
- * Per cpu events are installed via an smp call and
- * the install is always successful.
- */
- cpu_function_call(cpu, __perf_install_in_context, event);
- return;
- }
-
-retry:
- if (!task_function_call(task, __perf_install_in_context, event))
+ if (!remote_call_or_ctx_lock(event, __perf_install_in_context, event,
+ PERF_EVENT_STATE_NONE))
return;

- raw_spin_lock_irq(&ctx->lock);
- /*
- * If we failed to find a running task, but find the context active now
- * that we've acquired the ctx->lock, retry.
- */
- if (ctx->is_active) {
- raw_spin_unlock_irq(&ctx->lock);
- /*
- * Reload the task pointer, it might have been changed by
- * a concurrent perf_event_context_sched_out().
- */
- task = ctx->task;
- goto retry;
- }
-
/*
* Since the task isn't running, its safe to add the event, us holding
* the ctx->lock ensures the task won't get scheduled in.
@@ -2299,15 +2275,6 @@ unlock:
static void _perf_event_enable(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
- struct task_struct *task = ctx->task;
-
- if (!task) {
- /*
- * Enable the event on the cpu that it's on
- */
- cpu_function_call(event->cpu, __perf_event_enable, event);
- return;
- }

raw_spin_lock_irq(&ctx->lock);
if (event->state >= PERF_EVENT_STATE_INACTIVE)
@@ -2322,32 +2289,13 @@ static void _perf_event_enable(struct perf_event *event)
*/
if (event->state == PERF_EVENT_STATE_ERROR)
event->state = PERF_EVENT_STATE_OFF;
-
-retry:
- if (!ctx->is_active) {
- __perf_event_mark_enabled(event);
- goto out;
- }
-
raw_spin_unlock_irq(&ctx->lock);

- if (!task_function_call(task, __perf_event_enable, event))
+ if (!remote_call_or_ctx_lock(event, __perf_event_enable, event, PERF_EVENT_STATE_OFF))
return;

- raw_spin_lock_irq(&ctx->lock);
-
- /*
- * If the context is active and the event is still off,
- * we need to retry the cross-call.
- */
- if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
- /*
- * task could have been flipped by a concurrent
- * perf_event_context_sched_out()
- */
- task = ctx->task;
- goto retry;
- }
+ if (!ctx->is_active)
+ __perf_event_mark_enabled(event);

out:
raw_spin_unlock_irq(&ctx->lock);
@@ -4209,22 +4157,10 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
task = ctx->task;
pe.value = value;

- if (!task) {
- cpu_function_call(event->cpu, __perf_event_period, &pe);
- return 0;
- }
-
-retry:
- if (!task_function_call(task, __perf_event_period, &pe))
+ if (!remote_call_or_ctx_lock(event, __perf_event_period, &pe,
+ PERF_EVENT_STATE_NONE))
return 0;

- raw_spin_lock_irq(&ctx->lock);
- if (ctx->is_active) {
- raw_spin_unlock_irq(&ctx->lock);
- task = ctx->task;
- goto retry;
- }
-
__perf_event_period(&pe);
raw_spin_unlock_irq(&ctx->lock);

--
2.6.2


\
 
 \ /
  Last update: 2015-12-03 11:41    [W:0.110 / U:0.520 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site