lkml.org 
[lkml]   [2015]   [Jun]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 15/17] x86: Add rdtsc_ordered() and use it in trivial call sites
    Date
    barrier_before_rdtsc(); rdtsc_unordered() is an unnecessary mouthful and
    requires more thought than should be necessary. Add an rdtsc_ordered()
    helper and replace the trivial call sites with it.

    This should not change generated code.

    Signed-off-by: Andy Lutomirski <luto@kernel.org>
    ---
    arch/x86/entry/vdso/vclock_gettime.c | 16 ++--------------
    arch/x86/include/asm/msr.h | 14 ++++++++++++++
    arch/x86/kernel/trace_clock.c | 7 +------
    arch/x86/kvm/x86.c | 16 ++--------------
    arch/x86/lib/delay.c | 9 +++------
    5 files changed, 22 insertions(+), 40 deletions(-)

    diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
    index 0f91cc7373df..ca94fa649251 100644
    --- a/arch/x86/entry/vdso/vclock_gettime.c
    +++ b/arch/x86/entry/vdso/vclock_gettime.c
    @@ -175,20 +175,8 @@ static notrace cycle_t vread_pvclock(int *mode)

    notrace static cycle_t vread_tsc(void)
    {
    - cycle_t ret;
    - u64 last;
    -
    - /*
    - * Empirically, a fence (of type that depends on the CPU)
    - * before rdtsc is enough to ensure that rdtsc is ordered
    - * with respect to loads. The various CPU manuals are unclear
    - * as to whether rdtsc can be reordered with later loads,
    - * but no one has ever seen it happen.
    - */
    - barrier_before_rdtsc();
    - ret = (cycle_t)rdtsc_unordered();
    -
    - last = gtod->cycle_last;
    + cycle_t ret = (cycle_t)rdtsc_ordered();
    + u64 last = gtod->cycle_last;

    if (likely(ret >= last))
    return ret;
    diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
    index a47fb11af5f5..22d69d2d1f0d 100644
    --- a/arch/x86/include/asm/msr.h
    +++ b/arch/x86/include/asm/msr.h
    @@ -148,6 +148,20 @@ static __always_inline void barrier_before_rdtsc(void)
    "lfence", X86_FEATURE_LFENCE_RDTSC);
    }

    +/**
    + * rdtsc_ordered() - read the current TSC in program order
    + *
    + * rdtsc_ordered() returns the result of RDTSC as a 64-bit integer.
    + * It is ordered like a load to a global in-memory counter. It should
    + * be impossible to observe non-monotonic rdtsc_unordered() behavior
    + * across multiple CPUs as long as the TSC is synced.
    + */
    +static __always_inline unsigned long long rdtsc_ordered(void)
    +{
    + barrier_before_rdtsc();
    + return rdtsc_unordered();
    +}
    +
    static inline unsigned long long native_read_pmc(int counter)
    {
    DECLARE_ARGS(val, low, high);
    diff --git a/arch/x86/kernel/trace_clock.c b/arch/x86/kernel/trace_clock.c
    index 60ddc0501beb..80bb24d9b880 100644
    --- a/arch/x86/kernel/trace_clock.c
    +++ b/arch/x86/kernel/trace_clock.c
    @@ -12,10 +12,5 @@
    */
    u64 notrace trace_clock_x86_tsc(void)
    {
    - u64 ret;
    -
    - barrier_before_rdtsc();
    - ret = rdtsc_unordered();
    -
    - return ret;
    + return rdtsc_ordered();
    }
    diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
    index dbd2e5b4346a..ba69c06d8150 100644
    --- a/arch/x86/kvm/x86.c
    +++ b/arch/x86/kvm/x86.c
    @@ -1419,20 +1419,8 @@ EXPORT_SYMBOL_GPL(kvm_write_tsc);

    static cycle_t read_tsc(void)
    {
    - cycle_t ret;
    - u64 last;
    -
    - /*
    - * Empirically, a fence (of type that depends on the CPU)
    - * before rdtsc is enough to ensure that rdtsc is ordered
    - * with respect to loads. The various CPU manuals are unclear
    - * as to whether rdtsc can be reordered with later loads,
    - * but no one has ever seen it happen.
    - */
    - barrier_before_rdtsc();
    - ret = (cycle_t)rdtsc_unordered();
    -
    - last = pvclock_gtod_data.clock.cycle_last;
    + cycle_t ret = (cycle_t)rdtsc_ordered();
    + u64 last = pvclock_gtod_data.clock.cycle_last;

    if (likely(ret >= last))
    return ret;
    diff --git a/arch/x86/lib/delay.c b/arch/x86/lib/delay.c
    index d49b98a192ae..82bd6aaafe72 100644
    --- a/arch/x86/lib/delay.c
    +++ b/arch/x86/lib/delay.c
    @@ -54,11 +54,9 @@ static void delay_tsc(unsigned long __loops)

    preempt_disable();
    cpu = smp_processor_id();
    - barrier_before_rdtsc();
    - bclock = rdtsc_unordered();
    + bclock = rdtsc_ordered();
    for (;;) {
    - barrier_before_rdtsc();
    - now = rdtsc_unordered();
    + now = rdtsc_ordered();
    if ((now - bclock) >= loops)
    break;

    @@ -79,8 +77,7 @@ static void delay_tsc(unsigned long __loops)
    if (unlikely(cpu != smp_processor_id())) {
    loops -= (now - bclock);
    cpu = smp_processor_id();
    - barrier_before_rdtsc();
    - bclock = rdtsc_unordered();
    + bclock = rdtsc_ordered();
    }
    }
    preempt_enable();
    --
    2.4.2


    \
     
     \ /
      Last update: 2015-06-13 02:21    [W:4.883 / U:3.156 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site