lkml.org 
[lkml]   [2015]   [Jun]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 01/19] x86/tsc: Inline native_read_tsc() and remove __native_read_tsc()
Date
From: Andy Lutomirski <luto@kernel.org>

In

cdc7957d1954 ("x86: move native_read_tsc() offline")

native_read_tsc() was moved out of line, presumably for some
now-obsolete vDSO-related reason. Undo it.

The entire rdtsc, shl, or sequence is only 11 bytes, and calls via
rdtscl() and similar helpers were already inlined.

Signed-off-by: Andy Lutomirski <luto@kernel.org>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Huang Rui <ray.huang@amd.com>
Cc: John Stultz <john.stultz@linaro.org>
Cc: Len Brown <lenb@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: kvm ML <kvm@vger.kernel.org>
Cc: x86-ml <x86@kernel.org>
Link: http://lkml.kernel.org/r/d05ffe2aaf8468ca475ebc00efad7b2fa174af19.1434501121.git.luto@kernel.org
Signed-off-by: Borislav Petkov <bp@suse.de>
---
arch/x86/entry/vdso/vclock_gettime.c | 2 +-
arch/x86/include/asm/msr.h | 8 +++-----
arch/x86/include/asm/pvclock.h | 2 +-
arch/x86/include/asm/stackprotector.h | 2 +-
arch/x86/include/asm/tsc.h | 2 +-
arch/x86/kernel/apb_timer.c | 4 ++--
arch/x86/kernel/tsc.c | 6 ------
7 files changed, 9 insertions(+), 17 deletions(-)

diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 9793322751e0..972b488ac16a 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -186,7 +186,7 @@ notrace static cycle_t vread_tsc(void)
* but no one has ever seen it happen.
*/
rdtsc_barrier();
- ret = (cycle_t)__native_read_tsc();
+ ret = (cycle_t)native_read_tsc();

last = gtod->cycle_last;

diff --git a/arch/x86/include/asm/msr.h b/arch/x86/include/asm/msr.h
index e6a707eb5081..88711470af7f 100644
--- a/arch/x86/include/asm/msr.h
+++ b/arch/x86/include/asm/msr.h
@@ -106,12 +106,10 @@ notrace static inline int native_write_msr_safe(unsigned int msr,
return err;
}

-extern unsigned long long native_read_tsc(void);
-
extern int rdmsr_safe_regs(u32 regs[8]);
extern int wrmsr_safe_regs(u32 regs[8]);

-static __always_inline unsigned long long __native_read_tsc(void)
+static __always_inline unsigned long long native_read_tsc(void)
{
DECLARE_ARGS(val, low, high);

@@ -181,10 +179,10 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
}

#define rdtscl(low) \
- ((low) = (u32)__native_read_tsc())
+ ((low) = (u32)native_read_tsc())

#define rdtscll(val) \
- ((val) = __native_read_tsc())
+ ((val) = native_read_tsc())

#define rdpmc(counter, low, high) \
do { \
diff --git a/arch/x86/include/asm/pvclock.h b/arch/x86/include/asm/pvclock.h
index d6b078e9fa28..71bd485c2986 100644
--- a/arch/x86/include/asm/pvclock.h
+++ b/arch/x86/include/asm/pvclock.h
@@ -62,7 +62,7 @@ static inline u64 pvclock_scale_delta(u64 delta, u32 mul_frac, int shift)
static __always_inline
u64 pvclock_get_nsec_offset(const struct pvclock_vcpu_time_info *src)
{
- u64 delta = __native_read_tsc() - src->tsc_timestamp;
+ u64 delta = native_read_tsc() - src->tsc_timestamp;
return pvclock_scale_delta(delta, src->tsc_to_system_mul,
src->tsc_shift);
}
diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
index c2e00bb2a136..bc5fa2af112e 100644
--- a/arch/x86/include/asm/stackprotector.h
+++ b/arch/x86/include/asm/stackprotector.h
@@ -72,7 +72,7 @@ static __always_inline void boot_init_stack_canary(void)
* on during the bootup the random pool has true entropy too.
*/
get_random_bytes(&canary, sizeof(canary));
- tsc = __native_read_tsc();
+ tsc = native_read_tsc();
canary += tsc + (tsc << 32UL);

current->stack_canary = canary;
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index 94605c0e9cee..fd11128faf25 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -42,7 +42,7 @@ static __always_inline cycles_t vget_cycles(void)
if (!cpu_has_tsc)
return 0;
#endif
- return (cycles_t)__native_read_tsc();
+ return (cycles_t)native_read_tsc();
}

extern void tsc_init(void);
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index ede92c3364d3..9fe111cc50f8 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -390,13 +390,13 @@ unsigned long apbt_quick_calibrate(void)
old = dw_apb_clocksource_read(clocksource_apbt);
old += loop;

- t1 = __native_read_tsc();
+ t1 = native_read_tsc();

do {
new = dw_apb_clocksource_read(clocksource_apbt);
} while (new < old);

- t2 = __native_read_tsc();
+ t2 = native_read_tsc();

shift = 5;
if (unlikely(loop >> shift == 0)) {
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 505449700e0c..e7710cd7ba00 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -308,12 +308,6 @@ unsigned long long
sched_clock(void) __attribute__((alias("native_sched_clock")));
#endif

-unsigned long long native_read_tsc(void)
-{
- return __native_read_tsc();
-}
-EXPORT_SYMBOL(native_read_tsc);
-
int check_tsc_unstable(void)
{
return tsc_unstable;
--
2.3.5


\
 
 \ /
  Last update: 2015-06-25 19:21    [W:0.081 / U:0.076 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site