Messages in this thread Patch in this message | | | Date | Mon, 29 Aug 2022 12:10:05 +0200 | From | Peter Zijlstra <> | Subject | [PATCH v2 6/9] perf/x86/intel: Remove x86_pmu::update_topdown_event |
| |
Now that it is all internal to the intel driver, remove x86_pmu::update_topdown_event.
Assumes that is_topdown_count(event) can only be true when the hardware has topdown stuff and the function is set.
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> --- arch/x86/events/intel/core.c | 22 ++++++++++++---------- arch/x86/events/perf_event.h | 1 - 2 files changed, 12 insertions(+), 11 deletions(-)
--- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -2672,6 +2672,7 @@ static u64 adl_update_topdown_event(stru return icl_update_topdown_event(event); } +DEFINE_STATIC_CALL(intel_pmu_update_topdown_event, x86_perf_event_update); static void intel_pmu_read_topdown_event(struct perf_event *event) { @@ -2683,7 +2684,7 @@ static void intel_pmu_read_topdown_event return; perf_pmu_disable(event->pmu); - x86_pmu.update_topdown_event(event); + static_call(intel_pmu_update_topdown_event)(event); perf_pmu_enable(event->pmu); } @@ -2691,7 +2692,7 @@ static void intel_pmu_read_event(struct { if (event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD) intel_pmu_auto_reload_read(event); - else if (is_topdown_count(event) && x86_pmu.update_topdown_event) + else if (is_topdown_count(event)) intel_pmu_read_topdown_event(event); else x86_perf_event_update(event); @@ -2820,9 +2821,8 @@ static int intel_pmu_set_period(struct p static u64 intel_pmu_update(struct perf_event *event) { - if (unlikely(is_topdown_count(event)) && - x86_pmu.update_topdown_event) - return x86_pmu.update_topdown_event(event); + if (unlikely(is_topdown_count(event))) + return static_call(intel_pmu_update_topdown_event)(event); return x86_perf_event_update(event); } @@ -2950,8 +2950,7 @@ static int handle_pmi_common(struct pt_r */ if (__test_and_clear_bit(GLOBAL_STATUS_PERF_METRICS_OVF_BIT, (unsigned long *)&status)) { handled++; - if (x86_pmu.update_topdown_event) - x86_pmu.update_topdown_event(NULL); + static_call(intel_pmu_update_topdown_event)(NULL); } /* @@ -6191,7 +6190,8 @@ __init int intel_pmu_init(void) x86_pmu.lbr_pt_coexist = true; intel_pmu_pebs_data_source_skl(pmem); x86_pmu.num_topdown_events = 4; - x86_pmu.update_topdown_event = icl_update_topdown_event; + static_call_update(intel_pmu_update_topdown_event, + &icl_update_topdown_event); static_call_update(intel_pmu_set_topdown_event_period, &icl_set_topdown_event_period); pr_cont("Icelake events, "); @@ -6229,7 +6229,8 @@ __init int intel_pmu_init(void) x86_pmu.lbr_pt_coexist = true; intel_pmu_pebs_data_source_skl(pmem); x86_pmu.num_topdown_events = 8; - x86_pmu.update_topdown_event = icl_update_topdown_event; + static_call_update(intel_pmu_update_topdown_event, + &icl_update_topdown_event); static_call_update(intel_pmu_set_topdown_event_period, &icl_set_topdown_event_period); pr_cont("Sapphire Rapids events, "); @@ -6266,7 +6267,8 @@ __init int intel_pmu_init(void) intel_pmu_pebs_data_source_adl(); x86_pmu.pebs_latency_data = adl_latency_data_small; x86_pmu.num_topdown_events = 8; - x86_pmu.update_topdown_event = adl_update_topdown_event; + static_call_update(intel_pmu_update_topdown_event, + &adl_update_topdown_event); static_call_update(intel_pmu_set_topdown_event_period, &adl_set_topdown_event_period); --- a/arch/x86/events/perf_event.h +++ b/arch/x86/events/perf_event.h @@ -889,7 +889,6 @@ struct x86_pmu { * Intel perf metrics */ int num_topdown_events; - u64 (*update_topdown_event)(struct perf_event *event); /* * perf task context (i.e. struct perf_event_context::task_ctx_data)
| |