lkml.org 
[lkml]   [2015]   [Jul]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:x86/asm] x86/asm/tsc: Add rdtsc_ordered() and use it in trivial call sites
    Commit-ID:  03b9730b769fc4d87e40f6104f4c5b2e43889f19
    Gitweb: http://git.kernel.org/tip/03b9730b769fc4d87e40f6104f4c5b2e43889f19
    Author: Andy Lutomirski <luto@kernel.org>
    AuthorDate: Thu, 25 Jun 2015 18:44:08 +0200
    Committer: Ingo Molnar <mingo@kernel.org>
    CommitDate: Mon, 6 Jul 2015 15:23:29 +0200

    x86/asm/tsc: Add rdtsc_ordered() and use it in trivial call sites

    rdtsc_barrier(); rdtsc() is an unnecessary mouthful and requires
    more thought than should be necessary. Add an rdtsc_ordered()
    helper and replace the trivial call sites with it.

    This should not change generated code. The duplication of the
    fence asm is temporary.

    Signed-off-by: Andy Lutomirski <luto@kernel.org>
    Signed-off-by: Borislav Petkov <bp@suse.de>
    Cc: Andy Lutomirski <luto@amacapital.net>
    Cc: Borislav Petkov <bp@alien8.de>
    Cc: Brian Gerst <brgerst@gmail.com>
    Cc: Denys Vlasenko <dvlasenk@redhat.com>
    Cc: H. Peter Anvin <hpa@zytor.com>
    Cc: Huang Rui <ray.huang@amd.com>
    Cc: John Stultz <john.stultz@linaro.org>
    Cc: Len Brown <lenb@kernel.org>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Ralf Baechle <ralf@linux-mips.org>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Cc: kvm ML <kvm@vger.kernel.org>
    Link: http://lkml.kernel.org/r/dddbf98a2af53312e9aa73a5a2b1622fe5d6f52b.1434501121.git.luto@kernel.org
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    ---
    arch/x86/entry/vdso/vclock_gettime.c | 16 ++--------------
    arch/x86/include/asm/msr.h | 26 ++++++++++++++++++++++++++
    arch/x86/kernel/trace_clock.c | 7 +------
    arch/x86/kvm/x86.c | 16 ++--------------
    arch/x86/lib/delay.c | 9 +++------
    5 files changed, 34 insertions(+), 40 deletions(-)

    diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
    index 0340d93..ca94fa6 100644
    --- a/arch/x86/entry/vdso/vclock_gettime.c
    +++ b/arch/x86/entry/vdso/vclock_gettime.c
    @@ -175,20 +175,8 @@ static notrace cycle_t vread_pvclock(int *mode)

    notrace static cycle_t vread_tsc(void)
    {
    - cycle_t ret;
    - u64 last;
    -
    - /*
    - * Empirically, a fence (of type that depends on the CPU)
    - * before rdtsc is enough to ensure that rdtsc is ordered
    - * with respect to loads. The various CPU manuals are unclear
    - * as to whether rdtsc can be reordered with later loads,
    - * but no one has ever seen it happen.
    - */
    - rdtsc_barrier();
    - ret = (cycle_t)rdtsc();
    -
    - last = gtod->cycle_last;
    + cycle_t ret = (cycle_t)rdtsc_ordered();
    + u64 last = gtod->cycle_last;

    if (likely(ret >= last))
    return ret;
    diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
    index ff0c120..02bdd6c 100644
    --- a/arch/x86/include/asm/msr.h
    +++ b/arch/x86/include/asm/msr.h
    @@ -127,6 +127,32 @@ static __always_inline unsigned long long rdtsc(void)
    return EAX_EDX_VAL(val, low, high);
    }

    +/**
    + * rdtsc_ordered() - read the current TSC in program order
    + *
    + * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
    + * It is ordered like a load to a global in-memory counter. It should
    + * be impossible to observe non-monotonic rdtsc_unordered() behavior
    + * across multiple CPUs as long as the TSC is synced.
    + */
    +static __always_inline unsigned long long rdtsc_ordered(void)
    +{
    + /*
    + * The RDTSC instruction is not ordered relative to memory
    + * access. The Intel SDM and the AMD APM are both vague on this
    + * point, but empirically an RDTSC instruction can be
    + * speculatively executed before prior loads. An RDTSC
    + * immediately after an appropriate barrier appears to be
    + * ordered as a normal load, that is, it provides the same
    + * ordering guarantees as reading from a global memory location
    + * that some other imaginary CPU is updating continuously with a
    + * time stamp.
    + */
    + alternative_2("", "mfence", X86_FEATURE_MFENCE_RDTSC,
    + "lfence", X86_FEATURE_LFENCE_RDTSC);
    + return rdtsc();
    +}
    +
    static inline unsigned long long native_read_pmc(int counter)
    {
    DECLARE_ARGS(val, low, high);
    diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c
    index 67efb8c..80bb24d 100644
    --- a/arch/x86/kernel/trace_clock.c
    +++ b/arch/x86/kernel/trace_clock.c
    @@ -12,10 +12,5 @@
    */
    u64 notrace trace_clock_x86_tsc(void)
    {
    - u64 ret;
    -
    - rdtsc_barrier();
    - ret = rdtsc();
    -
    - return ret;
    + return rdtsc_ordered();
    }
    diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
    index dfa9713..8d73ec8 100644
    --- a/arch/x86/kvm/x86.c
    +++ b/arch/x86/kvm/x86.c
    @@ -1444,20 +1444,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);

    static cycle_t read_tsc(void)
    {
    - cycle_t ret;
    - u64 last;
    -
    - /*
    - * Empirically, a fence (of type that depends on the CPU)
    - * before rdtsc is enough to ensure that rdtsc is ordered
    - * with respect to loads. The various CPU manuals are unclear
    - * as to whether rdtsc can be reordered with later loads,
    - * but no one has ever seen it happen.
    - */
    - rdtsc_barrier();
    - ret = (cycle_t)rdtsc();
    -
    - last = pvclock_gtod_data.clock.cycle_last;
    + cycle_t ret = (cycle_t)rdtsc_ordered();
    + u64 last = pvclock_gtod_data.clock.cycle_last;

    if (likely(ret >= last))
    return ret;
    diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
    index f24bc59..4453d52 100644
    --- a/arch/x86/lib/delay.c
    +++ b/arch/x86/lib/delay.c
    @@ -54,11 +54,9 @@ static void delay_tsc(unsigned long __loops)

    preempt_disable();
    cpu = smp_processor_id();
    - rdtsc_barrier();
    - bclock = rdtsc();
    + bclock = rdtsc_ordered();
    for (;;) {
    - rdtsc_barrier();
    - now = rdtsc();
    + now = rdtsc_ordered();
    if ((now - bclock) >= loops)
    break;

    @@ -79,8 +77,7 @@ static void delay_tsc(unsigned long __loops)
    if (unlikely(cpu != smp_processor_id())) {
    loops -= (now - bclock);
    cpu = smp_processor_id();
    - rdtsc_barrier();
    - bclock = rdtsc();
    + bclock = rdtsc_ordered();
    }
    }
    preempt_enable();

    \
     
     \ /
      Last update: 2015-07-06 18:01    [W:4.060 / U:0.112 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site