lkml.org 
[lkml]   [2020]   [Jul]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 5/6] arm64: perf: Add cap_user_time_short
Date
From: Peter Zijlstra <peterz@infradead.org>

This completes the ARM64 cap_user_time support.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
arch/arm64/kernel/perf_event.c | 12 +++++++-----
1 file changed, 7 insertions(+), 5 deletions(-)

diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 76f6afd28b48..1e0f15305f67 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -1174,6 +1174,7 @@ void arch_perf_update_userpage(struct perf_event *event,

userpg->cap_user_time = 0;
userpg->cap_user_time_zero = 0;
+ userpg->cap_user_time_short = 0;

do {
rd = sched_clock_read_begin(&seq);
@@ -1184,13 +1185,13 @@ void arch_perf_update_userpage(struct perf_event *event,
userpg->time_mult = rd->mult;
userpg->time_shift = rd->shift;
userpg->time_zero = rd->epoch_ns;
+ userpg->time_cycles = rd->epoch_cyc;
+ userpg->time_mask = rd->sched_clock_mask;

/*
- * This isn't strictly correct, the ARM64 counter can be
- * 'short' and then we get funnies when it wraps. The correct
- * thing would be to extend the perf ABI with a cycle and mask
- * value, but because wrapping on ARM64 is very rare in
- * practise this 'works'.
+ * Subtract the cycle base, such that software that
+ * doesn't know about cap_user_time_short still 'works'
+ * assuming no wraps.
*/
quot = rd->epoch_cyc >> rd->shift;
rem = rd->epoch_cyc & (((u64)1 << rd->shift) - 1);
@@ -1218,4 +1219,5 @@ void arch_perf_update_userpage(struct perf_event *event,
*/
userpg->cap_user_time = 1;
userpg->cap_user_time_zero = 1;
+ userpg->cap_user_time_short = 1;
}
--
2.17.1
\
 
 \ /
  Last update: 2020-07-15 04:06    [W:0.115 / U:0.528 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site