lkml.org 
[lkml]   [2021]   [Feb]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 08/49] perf/x86: Hybrid PMU support for event constraints
Date
From: Kan Liang <kan.liang@linux.intel.com>

The events are different among hybrid PMUs. Each hybrid PMU should use
its own event constraints.

Reviewed-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
---
arch/x86/events/core.c | 3 ++-
arch/x86/events/intel/core.c | 5 +++--
arch/x86/events/intel/ds.c | 5 +++--
arch/x86/events/perf_event.h | 5 +++++
4 files changed, 13 insertions(+), 5 deletions(-)

diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 27c87a7..2160142 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1514,6 +1514,7 @@ void perf_event_print_debug(void)
struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
int num_counters = X86_HYBRID_READ_FROM_CPUC(num_counters, cpuc);
int num_counters_fixed = X86_HYBRID_READ_FROM_CPUC(num_counters_fixed, cpuc);
+ struct event_constraint *pebs_constraints = X86_HYBRID_READ_FROM_CPUC(pebs_constraints, cpuc);
unsigned long flags;
int idx;

@@ -1533,7 +1534,7 @@ void perf_event_print_debug(void)
pr_info("CPU#%d: status: %016llx\n", cpu, status);
pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
- if (x86_pmu.pebs_constraints) {
+ if (pebs_constraints) {
rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
}
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 9baa6b6..9acfa82 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3136,10 +3136,11 @@ struct event_constraint *
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event)
{
+ struct event_constraint *event_constraints = X86_HYBRID_READ_FROM_CPUC(event_constraints, cpuc);
struct event_constraint *c;

- if (x86_pmu.event_constraints) {
- for_each_event_constraint(c, x86_pmu.event_constraints) {
+ if (event_constraints) {
+ for_each_event_constraint(c, event_constraints) {
if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags;
return c;
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index a528966..ba651d9 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -959,13 +959,14 @@ struct event_constraint intel_spr_pebs_event_constraints[] = {

struct event_constraint *intel_pebs_constraints(struct perf_event *event)
{
+ struct event_constraint *pebs_constraints = X86_HYBRID_READ_FROM_EVENT(pebs_constraints, event);
struct event_constraint *c;

if (!event->attr.precise_ip)
return NULL;

- if (x86_pmu.pebs_constraints) {
- for_each_event_constraint(c, x86_pmu.pebs_constraints) {
+ if (pebs_constraints) {
+ for_each_event_constraint(c, pebs_constraints) {
if (constraint_match(c, event->hw.config)) {
event->hw.flags |= c->flags;
return c;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 00fcd92..7a5d036 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -661,6 +661,8 @@ struct x86_hybrid_pmu {
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
[PERF_COUNT_HW_CACHE_RESULT_MAX];
+ struct event_constraint *event_constraints;
+ struct event_constraint *pebs_constraints;
};

#define IS_X86_HYBRID cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)
@@ -673,6 +675,9 @@ struct x86_hybrid_pmu {
#define X86_HYBRID_READ_FROM_CPUC(_name, _cpuc) \
(_cpuc && HAS_VALID_HYBRID_PMU_IN_CPUC(_cpuc) ? x86_pmu.hybrid_pmu[(_cpuc)->hybrid_pmu_idx]._name : x86_pmu._name)

+#define X86_HYBRID_READ_FROM_EVENT(_name, _event) \
+ (IS_X86_HYBRID ? ((struct x86_hybrid_pmu *)(_event->pmu))->_name : x86_pmu._name)
+
/*
* struct x86_pmu - generic x86 pmu
*/
--
2.7.4
\
 
 \ /
  Last update: 2021-02-08 19:06    [W:0.315 / U:0.320 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site