lkml.org 
[lkml]   [2012]   [Dec]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH RFC -tip 3/6] perf/x86/AMD PMU: IRQ-bound performance events
Signed-off-by: Alexander Gordeev <agordeev@redhat.com>
---
arch/x86/kernel/cpu/perf_event.c | 38 ++++++++++++++++++++++++++++-----
arch/x86/kernel/cpu/perf_event.h | 14 ++++++++++++
arch/x86/kernel/cpu/perf_event_amd.c | 4 +-
3 files changed, 48 insertions(+), 8 deletions(-)

diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 8ab32d2..aa69997 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -496,15 +496,23 @@ void x86_pmu_disable_all(void)
int idx;

for (idx = 0; idx < x86_pmu.num_counters; idx++) {
- u64 val;
-
if (!test_bit(idx, cpuc->active_mask))
continue;
- rdmsrl(x86_pmu_config_addr(idx), val);
- if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
+ __x86_pmu_disable_event(idx, ARCH_PERFMON_EVENTSEL_ENABLE);
+ }
+}
+
+void x86_pmu_disable_irq(int irq)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int idx;
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ if (!test_bit(idx, cpuc->actirq_mask))
+ continue;
+ if (cpuc->events[idx]->irq != irq)
continue;
- val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
- wrmsrl(x86_pmu_config_addr(idx), val);
+ __x86_pmu_disable_event(idx, ARCH_PERFMON_EVENTSEL_ENABLE);
}
}

@@ -549,6 +557,24 @@ void x86_pmu_enable_irq_nop_int(int irq)
{
}

+void x86_pmu_enable_irq(int irq)
+{
+ struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+ int idx;
+
+ for (idx = 0; idx < x86_pmu.num_counters; idx++) {
+ struct perf_event *event = cpuc->events[idx];
+
+ if (!test_bit(idx, cpuc->actirq_mask))
+ continue;
+ if (event->irq != irq)
+ continue;
+
+ __x86_pmu_enable_event(&event->hw,
+ ARCH_PERFMON_EVENTSEL_ENABLE);
+ }
+}
+
static struct pmu pmu;

static inline int is_x86_event(struct perf_event *event)
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index ab56c05..e7d47a0 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -479,6 +479,19 @@ int x86_pmu_hw_config(struct perf_event *event);

void x86_pmu_disable_all(void);

+void x86_pmu_disable_irq(int irq);
+
+static void inline __x86_pmu_disable_event(int idx, u64 enable_mask)
+{
+ u64 val;
+
+ rdmsrl(x86_pmu_config_addr(idx), val);
+ if (val & enable_mask) {
+ val &= ~enable_mask;
+ wrmsrl(x86_pmu_config_addr(idx), val);
+ }
+}
+
static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
u64 enable_mask)
{
@@ -491,6 +504,7 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,

void x86_pmu_enable_all(int added);

+void x86_pmu_enable_irq(int irq);
void x86_pmu_enable_irq_nop_int(int irq);

int perf_assign_events(struct event_constraint **constraints, int n,
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index d42845f..2754880 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -581,8 +581,8 @@ static __initconst const struct x86_pmu amd_pmu = {
.handle_irq = x86_pmu_handle_irq,
.disable_all = x86_pmu_disable_all,
.enable_all = x86_pmu_enable_all,
- .disable_irq = x86_pmu_enable_irq_nop_int,
- .enable_irq = x86_pmu_enable_irq_nop_int,
+ .disable_irq = x86_pmu_disable_irq,
+ .enable_irq = x86_pmu_enable_irq,
.enable = x86_pmu_enable_event,
.disable = x86_pmu_disable_event,
.hw_config = amd_pmu_hw_config,
--
1.7.7.6

--
Regards,
Alexander Gordeev
agordeev@redhat.com


\
 
 \ /
  Last update: 2012-12-17 13:43    [W:0.056 / U:0.144 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site