lkml.org 
[lkml]   [2010]   [May]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 3/4] perf: Support for irq exclusion
Date
Provide exclude_softirq and exclude_hardirq support in perf
event attributes. This brings the final pieces to subscribe
to any desired context granularity of profiling or tracing.

Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Paul Mackerras <paulus@samba.org>
---
include/linux/perf_event.h | 8 +++++---
kernel/perf_event.c | 42 +++++++++++++++++++++++++++++++-----------
2 files changed, 36 insertions(+), 14 deletions(-)

diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index d939fc7..ca55ec5 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -213,10 +213,12 @@ struct perf_event_attr {
*
* See also PERF_RECORD_MISC_EXACT_IP
*/
- precise_ip : 2, /* skid constraint */
- exclude_task : 1, /* don't count task context */
+ precise_ip : 2, /* skid constraint */
+ exclude_task : 1, /* don't count task context */
+ exclude_softirq : 1, /* don't count softirq */
+ exclude_hardirq : 1, /* don't count hardirq */

- __reserved_1 : 46;
+ __reserved_1 : 45;

union {
__u32 wakeup_events; /* wakeup every n events */
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index ab96411..03c17b2 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -3837,16 +3837,9 @@ static void perf_log_throttle(struct perf_event *event, int enable)
perf_output_end(&handle);
}

-/*
- * Generic event overflow handling, sampling.
- */
-
-static int __perf_event_overflow(struct perf_event *event, int nmi,
- int throttle, struct perf_sample_data *data,
- struct pt_regs *regs)
+static int perf_event_unthrottle(struct perf_event *event,
+ struct hw_perf_event *hwc, int throttle)
{
- int events = atomic_read(&event->event_limit);
- struct hw_perf_event *hwc = &event->hw;
int ret = 0;

throttle = (throttle && event->pmu->unthrottle != NULL);
@@ -3872,6 +3865,23 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
}
}

+ return ret;
+}
+
+/*
+ * Generic event overflow handling, sampling.
+ */
+
+static int __perf_event_overflow(struct perf_event *event, int nmi,
+ int throttle, struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ int events = atomic_read(&event->event_limit);
+ struct hw_perf_event *hwc = &event->hw;
+ int ret;
+
+ ret = perf_event_unthrottle(event, &event->hw, throttle);
+
if (event->attr.freq) {
u64 now = perf_clock();
s64 delta = now - hwc->freq_time_stamp;
@@ -3910,9 +3920,19 @@ static int __perf_event_overflow(struct perf_event *event, int nmi,
static bool
perf_exclude_event(struct perf_event *event, int offset)
{
- if (!in_interrupt_offset(preempt_count() - offset)) {
+ int preempt_offset = preempt_count() - offset;
+
+ if (!in_interrupt_offset(preempt_offset)) {
if (event->attr.exclude_task)
return true;
+ } else {
+ if (in_irq_offset(preempt_offset)) {
+ if (event->attr.exclude_hardirq)
+ return true;
+ } else if (in_softirq_offset(preempt_offset)) {
+ if (event->attr.exclude_softirq)
+ return true;
+ }
}

return false;
@@ -3932,7 +3952,7 @@ int perf_event_overflow(struct perf_event *event, int nmi,
* context.
*/
if (perf_exclude_event(event, offset))
- return 0;
+ return perf_event_unthrottle(event, &event->hw, 1);

return __perf_event_overflow(event, nmi, 1, data, regs);
}
--
1.6.2.3


\
 
 \ /
  Last update: 2010-05-21 16:09    [W:0.326 / U:0.404 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site