lkml.org 
[lkml]   [2016]   [May]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 2/3] perf/x86/mbm: Fix mbm counting for RMID reuse
Date
This patch tries to fix the issue where multiple perf instances try to
monitor the same PID.
MBM cannot count directly in the usual perf way of continuously adding
the diff of current h/w counter and the prev count to the event->count
because of some h/w dependencies: (1) the mbm h/w counters overflow. (2)
There are limited h/w RMIDs and hence we recycle the RMIDs due to
which an event may count from different RMIDs. (3) Also we may not want to
count at every sched_in and sched_out because the MSR reads involve
quite a bit of overhead.

However we try to do something similar to usual perf way in this patch
and mainly handle (1) and (3).
update_sample takes care of the overflow in the hardware counters and
provides abstraction by returning total bytes counted as if there was no
overflow. We use this abstraction to count as below:

init:
event->prev_count = update_sample(rmid) //returns current total_bytes

count: // MBM right now uses count instead of read
cur_count = update_sample(rmid)
event->count += cur_count - event->prev_count
event->prev_count = cur_count

Signed-off-by: Vikas Shivappa <vikas.shivappa@linux.intel.com>
---
arch/x86/events/intel/cqm.c | 66 ++++++++++++++++++++++++++++++++++++++++++---
include/linux/perf_event.h | 1 +
2 files changed, 63 insertions(+), 4 deletions(-)

diff --git a/arch/x86/events/intel/cqm.c b/arch/x86/events/intel/cqm.c
index 5f2104a..a98d841 100644
--- a/arch/x86/events/intel/cqm.c
+++ b/arch/x86/events/intel/cqm.c
@@ -479,6 +479,14 @@ static void cqm_mask_call(struct rmid_read *rr)
on_each_cpu_mask(&cqm_cpumask, __intel_cqm_event_count, rr, 1);
}

+static void update_mbm_count(u64 val, struct perf_event *event)
+{
+ u64 diff = val - local64_read(&event->hw.cqm_prev_count);
+
+ local64_add(diff, &event->count);
+ local64_set(&event->hw.cqm_prev_count, val);
+}
+
/*
* Exchange the RMID of a group of events.
*/
@@ -1005,6 +1013,52 @@ static void init_mbm_sample(u32 rmid, u32 evt_type)
on_each_cpu_mask(&cqm_cpumask, __intel_mbm_event_init, &rr, 1);
}

+static inline bool first_event_ingroup(struct perf_event *group,
+ struct perf_event *event)
+{
+ struct list_head *head = &group->hw.cqm_group_entry;
+ u32 evt_type = event->attr.config;
+
+ if (evt_type == group->attr.config)
+ return false;
+ list_for_each_entry(event, head, hw.cqm_group_entry) {
+ if (evt_type == event->attr.config)
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * mbm_setup_event - Does mbm specific count initialization
+ * when multiple events share RMID.
+ *
+ * If this is the first mbm event then the event prev_count is 0 bytes,
+ * else the current bytes of the RMID is the prev_count.
+*/
+static inline void mbm_setup_event(u32 rmid, struct perf_event *group,
+ struct perf_event *event)
+{
+ u32 evt_type = event->attr.config;
+ struct rmid_read rr;
+ u64 val;
+
+ if (first_event_ingroup(group, event)) {
+ init_mbm_sample(rmid, evt_type);
+ } else {
+ rr = __init_rr(rmid, evt_type, 0);
+ cqm_mask_call(&rr);
+ val = atomic64_read(&rr.value);
+ local64_set(&event->hw.cqm_prev_count, val);
+ }
+}
+
+static inline void mbm_setup_event_init(struct perf_event *event)
+{
+ event->hw.is_group_event = false;
+ local64_set(&event->hw.cqm_prev_count, 0UL);
+}
+
/*
* Find a group and setup RMID.
*
@@ -1017,7 +1071,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
bool conflict = false;
u32 rmid;

- event->hw.is_group_event = false;
+ mbm_setup_event_init(event);
list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
rmid = iter->hw.cqm_rmid;

@@ -1026,7 +1080,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
event->hw.cqm_rmid = rmid;
*group = iter;
if (is_mbm_event(event->attr.config) && __rmid_valid(rmid))
- init_mbm_sample(rmid, event->attr.config);
+ mbm_setup_event(rmid, iter, event);
return;
}

@@ -1244,8 +1298,12 @@ static u64 intel_cqm_event_count(struct perf_event *event)
cqm_mask_call(&rr);

raw_spin_lock_irqsave(&cache_lock, flags);
- if (event->hw.cqm_rmid == rr.rmid)
- local64_set(&event->count, atomic64_read(&rr.value));
+ if (event->hw.cqm_rmid == rr.rmid) {
+ if (is_mbm_event(event->attr.config))
+ update_mbm_count(atomic64_read(&rr.value), event);
+ else
+ local64_set(&event->count, atomic64_read(&rr.value));
+ }
raw_spin_unlock_irqrestore(&cache_lock, flags);
out:
return __perf_event_count(event);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f291275..9298a89 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -122,6 +122,7 @@ struct hw_perf_event {
int cqm_state;
u32 cqm_rmid;
int is_group_event;
+ local64_t cqm_prev_count;
struct list_head cqm_events_entry;
struct list_head cqm_groups_entry;
struct list_head cqm_group_entry;
--
1.9.1
\
 
 \ /
  Last update: 2016-05-07 02:01    [W:2.076 / U:0.200 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site