lkml.org 
[lkml]   [2010]   [May]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 09/12] perf: factor out perf_event_switch_clones()
Date
Factor out perf_event_switch_clones() from
perf_event_task_sched_out(). This is to ease future changes and
doesn't cause any functional difference.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
---
kernel/perf_event.c | 62 +++++++++++++++++++++++++++++---------------------
1 files changed, 36 insertions(+), 26 deletions(-)

diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 295699f..3f3e328 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1158,30 +1158,14 @@ void perf_event_task_migrate(struct task_struct *task, int new_cpu)
perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 1, NULL, 0);
}

-/*
- * Called from scheduler to remove the events of the current task,
- * with interrupts disabled.
- *
- * We stop each event and update the event value in event->count.
- *
- * This does not protect us against NMI, but disable()
- * sets the disabled bit in the control field of event _before_
- * accessing the event control register. If a NMI hits, then it will
- * not restart the event.
- */
-void perf_event_task_sched_out(struct rq *rq, struct task_struct *task,
- struct task_struct *next)
+static bool perf_event_switch_clones(struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx,
+ struct task_struct *task,
+ struct task_struct *next)
{
- struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
- struct perf_event_context *ctx = task->perf_event_ctxp;
struct perf_event_context *next_ctx;
struct perf_event_context *parent;
- int do_switch = 1;
-
- perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
-
- if (likely(!ctx || !cpuctx->task_ctx))
- return;
+ bool switched = false;

rcu_read_lock();
parent = rcu_dereference(ctx->parent_ctx);
@@ -1208,7 +1192,7 @@ void perf_event_task_sched_out(struct rq *rq, struct task_struct *task,
next->perf_event_ctxp = ctx;
ctx->task = next;
next_ctx->task = task;
- do_switch = 0;
+ switched = true;

perf_event_sync_stat(ctx, next_ctx);
}
@@ -1217,10 +1201,36 @@ void perf_event_task_sched_out(struct rq *rq, struct task_struct *task,
}
rcu_read_unlock();

- if (do_switch) {
- ctx_sched_out(ctx, cpuctx, EVENT_ALL);
- cpuctx->task_ctx = NULL;
- }
+ return switched;
+}
+
+/*
+ * Called from scheduler to remove the events of the current task,
+ * with interrupts disabled.
+ *
+ * We stop each event and update the event value in event->count.
+ *
+ * This does not protect us against NMI, but disable()
+ * sets the disabled bit in the control field of event _before_
+ * accessing the event control register. If a NMI hits, then it will
+ * not restart the event.
+ */
+void perf_event_task_sched_out(struct rq *rq, struct task_struct *task,
+ struct task_struct *next)
+{
+ struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
+ struct perf_event_context *ctx = task->perf_event_ctxp;
+
+ perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
+
+ if (likely(!ctx || !cpuctx->task_ctx))
+ return;
+
+ if (perf_event_switch_clones(cpuctx, ctx, task, next))
+ return;
+
+ ctx_sched_out(ctx, cpuctx, EVENT_ALL);
+ cpuctx->task_ctx = NULL;
}

static void task_ctx_sched_out(struct perf_event_context *ctx,
--
1.6.4.2


\
 
 \ /
  Last update: 2010-05-04 14:43    [W:0.221 / U:0.444 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site