lkml.org 
[lkml]   [2010]   [May]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[tip:perf/core] perf, x86: Consolidate some code repetition
Commit-ID:  2b0b5c6fe9b383f3cf35a0a6371c9d577bd523ff
Gitweb: http://git.kernel.org/tip/2b0b5c6fe9b383f3cf35a0a6371c9d577bd523ff
Author: Peter Zijlstra <peterz@infradead.org>
AuthorDate: Thu, 8 Apr 2010 23:03:20 +0200
Committer: Ingo Molnar <mingo@elte.hu>
CommitDate: Fri, 7 May 2010 11:31:02 +0200

perf, x86: Consolidate some code repetition

Remove some duplicated logic.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
---
arch/x86/kernel/cpu/perf_event_intel_ds.c | 97 +++++++++++++----------------
1 files changed, 44 insertions(+), 53 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 080b9b0..35056f7 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -452,14 +452,54 @@ static int intel_pmu_pebs_fixup_ip(struct pt_regs *regs)

static int intel_pmu_save_and_restart(struct perf_event *event);

+static void __intel_pmu_pebs_event(struct perf_event *event,
+ struct pt_regs *iregs, void *__pebs)
+{
+ /*
+ * We cast to pebs_record_core since that is a subset of
+ * both formats and we don't use the other fields in this
+ * routine.
+ */
+ struct pebs_record_core *pebs = __pebs;
+ struct perf_sample_data data;
+ struct pt_regs regs;
+
+ if (!intel_pmu_save_and_restart(event))
+ return;
+
+ perf_sample_data_init(&data, 0);
+ data.period = event->hw.last_period;
+
+ /*
+ * We use the interrupt regs as a base because the PEBS record
+ * does not contain a full regs set, specifically it seems to
+ * lack segment descriptors, which get used by things like
+ * user_mode().
+ *
+ * In the simple case fix up only the IP and BP,SP regs, for
+ * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
+ * A possible PERF_SAMPLE_REGS will have to transfer all regs.
+ */
+ regs = *iregs;
+ regs.ip = pebs->ip;
+ regs.bp = pebs->bp;
+ regs.sp = pebs->sp;
+
+ if (intel_pmu_pebs_fixup_ip(regs))
+ regs.flags |= PERF_EFLAGS_EXACT;
+ else
+ regs.flags &= ~PERF_EFLAGS_EXACT;
+
+ if (perf_event_overflow(event, 1, &data, &regs))
+ x86_pmu_stop(event);
+}
+
static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
{
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct perf_event *event = cpuc->events[0]; /* PMC0 only */
struct pebs_record_core *at, *top;
- struct perf_sample_data data;
- struct pt_regs regs;
int n;

if (!ds || !x86_pmu.pebs)
@@ -485,9 +525,6 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
if (n <= 0)
return;

- if (!intel_pmu_save_and_restart(event))
- return;
-
/*
* Should not happen, we program the threshold at 1 and do not
* set a reset value.
@@ -495,31 +532,7 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
WARN_ON_ONCE(n > 1);
at += n - 1;

- perf_sample_data_init(&data, 0);
- data.period = event->hw.last_period;
-
- /*
- * We use the interrupt regs as a base because the PEBS record
- * does not contain a full regs set, specifically it seems to
- * lack segment descriptors, which get used by things like
- * user_mode().
- *
- * In the simple case fix up only the IP and BP,SP regs, for
- * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
- * A possible PERF_SAMPLE_REGS will have to transfer all regs.
- */
- regs = *iregs;
- regs.ip = at->ip;
- regs.bp = at->bp;
- regs.sp = at->sp;
-
- if (intel_pmu_pebs_fixup_ip(&regs))
- regs.flags |= PERF_EFLAGS_EXACT;
- else
- regs.flags &= ~PERF_EFLAGS_EXACT;
-
- if (perf_event_overflow(event, 1, &data, &regs))
- x86_pmu_stop(event);
+ __intel_pmu_pebs_event(event, iregs, at);
}

static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
@@ -527,9 +540,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
struct debug_store *ds = cpuc->ds;
struct pebs_record_nhm *at, *top;
- struct perf_sample_data data;
struct perf_event *event = NULL;
- struct pt_regs regs;
u64 status = 0;
int bit, n;

@@ -571,27 +582,7 @@ static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
if (!event || bit >= MAX_PEBS_EVENTS)
continue;

- if (!intel_pmu_save_and_restart(event))
- continue;
-
- perf_sample_data_init(&data, 0);
- data.period = event->hw.last_period;
-
- /*
- * See the comment in intel_pmu_drain_pebs_core()
- */
- regs = *iregs;
- regs.ip = at->ip;
- regs.bp = at->bp;
- regs.sp = at->sp;
-
- if (intel_pmu_pebs_fixup_ip(&regs))
- regs.flags |= PERF_EFLAGS_EXACT;
- else
- regs.flags &= ~PERF_EFLAGS_EXACT;
-
- if (perf_event_overflow(event, 1, &data, &regs))
- x86_pmu_stop(event);
+ __intel_pmu_pebs_event(event, iregs, at);
}
}


\
 
 \ /
  Last update: 2010-05-07 20:47    [W:0.025 / U:0.344 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site