Messages in this thread Patch in this message | | | From | kan.liang@linux ... | Subject | [PATCH V2 12/25] perf/x86/intel: Factor out intel_pmu_check_event_constraints | Date | Wed, 10 Mar 2021 08:37:48 -0800 |
| |
From: Kan Liang <kan.liang@linux.intel.com>
Each Hybrid PMU has to check and update its own event constraints before registration.
The intel_pmu_check_event_constraints will be reused later when registering a dedicated hybrid PMU.
Reviewed-by: Andi Kleen <ak@linux.intel.com> Signed-off-by: Kan Liang <kan.liang@linux.intel.com> --- arch/x86/events/intel/core.c | 82 +++++++++++++++++++++++++------------------- 1 file changed, 47 insertions(+), 35 deletions(-)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index 2da6ae2..6df8edd7 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4235,6 +4235,49 @@ static void intel_pmu_check_num_counters(int *num_counters, *intel_ctrl |= fixed_mask << INTEL_PMC_IDX_FIXED; } +static void intel_pmu_check_event_constraints(struct event_constraint *event_constraints, + int num_counters, + int num_counters_fixed, + u64 intel_ctrl) +{ + struct event_constraint *c; + + if (!event_constraints) + return; + + /* + * event on fixed counter2 (REF_CYCLES) only works on this + * counter, so do not extend mask to generic counters + */ + for_each_event_constraint(c, event_constraints) { + /* + * Don't extend the topdown slots and metrics + * events to the generic counters. + */ + if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) { + /* + * Disable topdown slots and metrics events, + * if slots event is not in CPUID. + */ + if (!(INTEL_PMC_MSK_FIXED_SLOTS & intel_ctrl)) + c->idxmsk64 = 0; + c->weight = hweight64(c->idxmsk64); + continue; + } + + if (c->cmask == FIXED_EVENT_FLAGS) { + /* Disabled fixed counters which are not in CPUID */ + c->idxmsk64 &= intel_ctrl; + + if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) + c->idxmsk64 |= (1ULL << num_counters) - 1; + } + c->idxmsk64 &= + ~(~0ULL << (INTEL_PMC_IDX_FIXED + num_counters_fixed)); + c->weight = hweight64(c->idxmsk64); + } +} + static void intel_pmu_cpu_starting(int cpu) { struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); @@ -5097,7 +5140,6 @@ __init int intel_pmu_init(void) union cpuid10_edx edx; union cpuid10_eax eax; union cpuid10_ebx ebx; - struct event_constraint *c; unsigned int fixed_mask; struct extra_reg *er; bool pmem = false; @@ -5735,40 +5777,10 @@ __init int intel_pmu_init(void) if (x86_pmu.intel_cap.anythread_deprecated) x86_pmu.format_attrs = intel_arch_formats_attr; - if (x86_pmu.event_constraints) { - /* - * event on fixed counter2 (REF_CYCLES) only works on this - * counter, so do not extend mask to generic counters - */ - for_each_event_constraint(c, x86_pmu.event_constraints) { - /* - * Don't extend the topdown slots and metrics - * events to the generic counters. - */ - if (c->idxmsk64 & INTEL_PMC_MSK_TOPDOWN) { - /* - * Disable topdown slots and metrics events, - * if slots event is not in CPUID. - */ - if (!(INTEL_PMC_MSK_FIXED_SLOTS & x86_pmu.intel_ctrl)) - c->idxmsk64 = 0; - c->weight = hweight64(c->idxmsk64); - continue; - } - - if (c->cmask == FIXED_EVENT_FLAGS) { - /* Disabled fixed counters which are not in CPUID */ - c->idxmsk64 &= x86_pmu.intel_ctrl; - - if (c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) - c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1; - } - c->idxmsk64 &= - ~(~0ULL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed)); - c->weight = hweight64(c->idxmsk64); - } - } - + intel_pmu_check_event_constraints(x86_pmu.event_constraints, + x86_pmu.num_counters, + x86_pmu.num_counters_fixed, + x86_pmu.intel_ctrl); /* * Access LBR MSR may cause #GP under certain circumstances. * E.g. KVM doesn't support LBR MSR -- 2.7.4
| |