lkml.org 
[lkml]   [2009]   [Nov]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH 3/4] perf/core: Split up pinned and non pinned processing
Date
Split up pinned and non-pinned events processing in two helpers
so that it's more flexible to handle them seperately.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
---
kernel/perf_event.c | 51 +++++++++++++++++++++++++++++++++++----------------
1 files changed, 35 insertions(+), 16 deletions(-)

diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 0432c1c..50f2997 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1246,25 +1246,11 @@ static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
}

static void
-__perf_event_sched_in(struct perf_event_context *ctx,
- struct perf_cpu_context *cpuctx, int cpu)
+__perf_event_sched_in_pinned(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx, int cpu)
{
struct perf_event *event;
- int can_add_hw = 1;
-
- spin_lock(&ctx->lock);
- ctx->is_active = 1;
- if (likely(!ctx->nr_events))
- goto out;
-
- ctx->timestamp = perf_clock();
-
- perf_disable();

- /*
- * First go through the list and put on any pinned groups
- * in order to give them the best chance of going on.
- */
list_for_each_entry(event, &ctx->pinned_grp_list, group_entry) {
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
@@ -1283,6 +1269,14 @@ __perf_event_sched_in(struct perf_event_context *ctx,
event->state = PERF_EVENT_STATE_ERROR;
}
}
+}
+
+static void
+__perf_event_sched_in_volatile(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx, int cpu)
+{
+ int can_add_hw = 1;
+ struct perf_event *event;

list_for_each_entry(event, &ctx->volatile_grp_list, group_entry) {
/*
@@ -1303,6 +1297,31 @@ __perf_event_sched_in(struct perf_event_context *ctx,
if (group_sched_in(event, cpuctx, ctx, cpu))
can_add_hw = 0;
}
+}
+
+static void
+__perf_event_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx, int cpu)
+{
+ spin_lock(&ctx->lock);
+ ctx->is_active = 1;
+ if (likely(!ctx->nr_events))
+ goto out;
+
+ ctx->timestamp = perf_clock();
+
+ perf_disable();
+
+ /*
+ * First go through the list and put on any pinned groups
+ * in order to give them the best chance of going on.
+ */
+ __perf_event_sched_in_pinned(ctx, cpuctx, cpu);
+
+ /* Then handle the non-pinned groups */
+ __perf_event_sched_in_volatile(ctx, cpuctx, cpu);
+
+
perf_enable();
out:
spin_unlock(&ctx->lock);
--
1.6.2.3


\
 
 \ /
  Last update: 2009-11-08 21:15    [W:0.070 / U:0.168 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site