lkml.org 
[lkml]   [2015]   [Jun]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 14/17] x86/tsc: Move rdtsc_barrier() and rename it to barrier_before_rdtsc()
Date
rdtsc_barrier() (i.e. MFENCE or LFENCE depending on vendor) is
supported by the docs as a barrier immediately before RDTSC. There
is neither empirical evidence that it's needed at all after RDTSC
nor is there any reason to believe that the type of fence to put
after RDTSC (if any) would be the same as a fence before RDTSC.

Rename the function accordingly. While we're at it, move it so it
lives with the rest of the RDTSC inlines.

There are some callers that look odd now. That's fine -- they are
odd. A subsequent patch will fix them up.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
---
arch/x86/entry/vdso/vclock_gettime.c | 2 +-
arch/x86/include/asm/barrier.h | 11 -----------
arch/x86/include/asm/msr.h | 21 +++++++++++++++++++++
arch/x86/include/asm/pvclock.h | 6 +++---
arch/x86/kernel/trace_clock.c | 2 +-
arch/x86/kernel/tsc_sync.c | 8 ++++----
arch/x86/kvm/x86.c | 2 +-
arch/x86/lib/delay.c | 6 +++---
arch/x86/um/asm/barrier.h | 4 +---
9 files changed, 35 insertions(+), 27 deletions(-)

diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index f9a0429875a7..0f91cc7373df 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -185,7 +185,7 @@ notrace static cycle_t vread_tsc(void)
* as to whether rdtsc can be reordered with later loads,
* but no one has ever seen it happen.
*/
- rdtsc_barrier();
+ barrier_before_rdtsc();
ret = (cycle_t)rdtsc_unordered();

last = gtod->cycle_last;
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index e51a8f803f55..818cb8788225 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -91,15 +91,4 @@ do { \
#define smp_mb__before_atomic() barrier()
#define smp_mb__after_atomic() barrier()

-/*
- * Stop RDTSC speculation. This is needed when you need to use RDTSC
- * (or get_cycles or vread that possibly accesses the TSC) in a defined
- * code region.
- */
-static __always_inline void rdtsc_barrier(void)
-{
- alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
- "lfence", X86_FEATURE_LFENCE_RDTSC);
-}
-
#endif /* _ASM_X86_BARRIER_H */
diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index e04f36f65c95..a47fb11af5f5 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -127,6 +127,27 @@ static __always_inline unsigned long long rdtsc_unordered(void)
return EAX_EDX_VAL(val, low, high);
}

+/**
+ * barrier_before_rdtsc() - memory barrier before rdtsc_unordered
+ *
+ * The RDTSC instruction is not ordered relative to memory access. The
+ * Intel SDM and the AMD APM are both vague on this point, but
+ * empirically an RDTSC instruction can be speculatively executed before
+ * prior loads. An RDTSC immediately after an appropriate barrier
+ * appears to be ordered as a normal load, that is, it provides the same
+ * ordering guarantees as reading from a global memory location that
+ * some other imaginary CPU is updating continuously with a time stamp.
+ *
+ * For applications that require monotonicity across multiple CPUs or
+ * that need RDTSC to time code in a defined region, this barrier can be
+ * used before rdtsc_unordered().
+ */
+static __always_inline void barrier_before_rdtsc(void)
+{
+ alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
+ "lfence", X86_FEATURE_LFENCE_RDTSC);
+}
+
static inline unsigned long long native_read_pmc(int counter)
{
DECLARE_ARGS(val, low, high);
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index cfd4e89c3acf..4216986b5535 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -79,14 +79,14 @@ unsigned __pvclock_read_cycles(const struct pvclock_vcpu_time_info *src,
/* Note: emulated platforms which do not advertise SSE2 support
* result in kvmclock not using the necessary RDTSC barriers.
* Without barriers, it is possible that RDTSC instruction reads from
- * the time stamp counter outside rdtsc_barrier protected section
+ * the time stamp counter outside barrier_before_rdtsc protected section
* below, resulting in violation of monotonicity.
*/
- rdtsc_barrier();
+ barrier_before_rdtsc();
offset = pvclock_get_nsec_offset(src);
ret = src->system_time + offset;
ret_flags = src->flags;
- rdtsc_barrier();
+ barrier_before_rdtsc();

*cycles = ret;
*flags = ret_flags;
diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c
index c0ab0bed02ae..60ddc0501beb 100644
--- a/arch/x86/kernel/trace_clock.c
+++ b/arch/x86/kernel/trace_clock.c
@@ -14,7 +14,7 @@ u64 notrace trace_clock_x86_tsc(void)
{
u64 ret;

- rdtsc_barrier();
+ barrier_before_rdtsc();
ret = rdtsc_unordered();

return ret;
diff --git a/arch/x86/kernel/tsc_sync.c b/arch/x86/kernel/tsc_sync.c
index dd8d0791dfb5..cf36e9befb39 100644
--- a/arch/x86/kernel/tsc_sync.c
+++ b/arch/x86/kernel/tsc_sync.c
@@ -46,9 +46,9 @@ static void check_tsc_warp(unsigned int timeout)
cycles_t start, now, prev, end;
int i;

- rdtsc_barrier();
+ barrier_before_rdtsc();
start = get_cycles();
- rdtsc_barrier();
+ barrier_before_rdtsc();
/*
* The measurement runs for 'timeout' msecs:
*/
@@ -63,9 +63,9 @@ static void check_tsc_warp(unsigned int timeout)
*/
arch_spin_lock(&sync_lock);
prev = last_tsc;
- rdtsc_barrier();
+ barrier_before_rdtsc();
now = get_cycles();
- rdtsc_barrier();
+ barrier_before_rdtsc();
last_tsc = now;
arch_spin_unlock(&sync_lock);

diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a9a3f31311e1..dbd2e5b4346a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1429,7 +1429,7 @@ static cycle_t read_tsc(void)
* as to whether rdtsc can be reordered with later loads,
* but no one has ever seen it happen.
*/
- rdtsc_barrier();
+ barrier_before_rdtsc();
ret = (cycle_t)rdtsc_unordered();

last = pvclock_gtod_data.clock.cycle_last;
diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
index a524708fa165..d49b98a192ae 100644
--- a/arch/x86/lib/delay.c
+++ b/arch/x86/lib/delay.c
@@ -54,10 +54,10 @@ static void delay_tsc(unsigned long __loops)

preempt_disable();
cpu = smp_processor_id();
- rdtsc_barrier();
+ barrier_before_rdtsc();
bclock = rdtsc_unordered();
for (;;) {
- rdtsc_barrier();
+ barrier_before_rdtsc();
now = rdtsc_unordered();
if ((now - bclock) >= loops)
break;
@@ -79,7 +79,7 @@ static void delay_tsc(unsigned long __loops)
if (unlikely(cpu != smp_processor_id())) {
loops -= (now - bclock);
cpu = smp_processor_id();
- rdtsc_barrier();
+ barrier_before_rdtsc();
bclock = rdtsc_unordered();
}
}
diff --git a/arch/x86/um/asm/barrier.h b/arch/x86/um/asm/barrier.h
index b9531d343134..f168d818d551 100644
--- a/arch/x86/um/asm/barrier.h
+++ b/arch/x86/um/asm/barrier.h
@@ -49,10 +49,8 @@
* Stop RDTSC speculation. This is needed when you need to use RDTSC
* (or get_cycles or vread that possibly accesses the TSC) in a defined
* code region.
- *
- * (Could use an alternative three way for this if there was one.)
*/
-static inline void rdtsc_barrier(void)
+static inline void barrier_before_rdtsc(void)
{
alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
"lfence", X86_FEATURE_LFENCE_RDTSC);
--
2.4.2


\
 
 \ /
  Last update: 2015-06-13 02:21    [W:0.163 / U:0.192 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site