lkml.org 
[lkml]   [2017]   [Aug]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v6 4/4] x86/tsc: use tsc early
Date
tsc_early_init():
Determines offset, shift and multiplier for the early clock based on the
TSC frequency. Notifies sched clock by calling sched_clock_early_init()
that early clock is available.

tsc_early_fini()
Implement the finish part of early tsc feature, prints message about the
offset, which can be useful to find out how much time was spent in post and
boot manager (if TSC starts from 0 during boot), and also calls
sched_clock_early_fini() to let sched clock that early clock cannot be used
anymore.

sched_clock_early():
TSC based implementation of weak function that is defined in sched clock.

Call tsc_early_init() to initialize early boot time stamps functionality on
the supported x86 platforms, and call tsc_early_fini() to finish this
feature after permanent tsc has been initialized.

Signed-off-by: Pavel Tatashin <pasha.tatashin@oracle.com>
---
arch/x86/include/asm/tsc.h | 4 ++++
arch/x86/kernel/setup.c | 10 ++++++++--
arch/x86/kernel/time.c | 1 +
arch/x86/kernel/tsc.c | 47 ++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 60 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h
index f5e6f1c417df..6dc9618b24e3 100644
--- a/arch/x86/include/asm/tsc.h
+++ b/arch/x86/include/asm/tsc.h
@@ -50,11 +50,15 @@ extern bool tsc_store_and_check_tsc_adjust(bool bootcpu);
extern void tsc_verify_tsc_adjust(bool resume);
extern void check_tsc_sync_source(int cpu);
extern void check_tsc_sync_target(void);
+void tsc_early_init(unsigned int khz);
+void tsc_early_fini(void);
#else
static inline bool tsc_store_and_check_tsc_adjust(bool bootcpu) { return false; }
static inline void tsc_verify_tsc_adjust(bool resume) { }
static inline void check_tsc_sync_source(int cpu) { }
static inline void check_tsc_sync_target(void) { }
+static inline void tsc_early_init(unsigned int khz) { }
+static inline void tsc_early_fini(void) { }
#endif

extern int notsc_setup(char *);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 3486d0498800..413434d98a23 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -812,7 +812,11 @@ dump_kernel_offset(struct notifier_block *self, unsigned long v, void *p)
return 0;
}

-static void __init simple_udelay_calibration(void)
+/*
+ * Initialize early tsc to show early boot timestamps, and also loops_per_jiffy
+ * for udelay
+ */
+static void __init early_clock_calibration(void)
{
unsigned int tsc_khz, cpu_khz;
unsigned long lpj;
@@ -827,6 +831,8 @@ static void __init simple_udelay_calibration(void)
if (!tsc_khz)
return;

+ tsc_early_init(tsc_khz);
+
lpj = tsc_khz * 1000;
do_div(lpj, HZ);
loops_per_jiffy = lpj;
@@ -1039,7 +1045,7 @@ void __init setup_arch(char **cmdline_p)
*/
init_hypervisor_platform();

- simple_udelay_calibration();
+ early_clock_calibration();

x86_init.resources.probe_roms();

diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index fbad8bf2fa24..44411d769b53 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -86,6 +86,7 @@ static __init void x86_late_time_init(void)
{
x86_init.timers.timer_init();
tsc_init();
+ tsc_early_fini();
}

/*
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 796d96bb0821..bd44c2dd4235 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1263,6 +1263,53 @@ static int __init init_tsc_clocksource(void)
*/
device_initcall(init_tsc_clocksource);

+#ifdef CONFIG_X86_TSC
+
+static struct cyc2ns_data cyc2ns_early;
+static bool sched_clock_early_enabled;
+
+u64 sched_clock_early(void)
+{
+ u64 ns;
+
+ if (!sched_clock_early_enabled)
+ return 0;
+ ns = mul_u64_u32_shr(rdtsc(), cyc2ns_early.cyc2ns_mul,
+ cyc2ns_early.cyc2ns_shift);
+ return ns + cyc2ns_early.cyc2ns_offset;
+}
+
+/*
+ * Initialize clock for early time stamps
+ */
+void __init tsc_early_init(unsigned int khz)
+{
+ sched_clock_early_enabled = true;
+ clocks_calc_mult_shift(&cyc2ns_early.cyc2ns_mul,
+ &cyc2ns_early.cyc2ns_shift,
+ khz, NSEC_PER_MSEC, 0);
+ cyc2ns_early.cyc2ns_offset = -sched_clock_early();
+ sched_clock_early_init();
+}
+
+void __init tsc_early_fini(void)
+{
+ unsigned long long t;
+ unsigned long r;
+
+ /* We did not have early sched clock if multiplier is 0 */
+ if (cyc2ns_early.cyc2ns_mul == 0)
+ return;
+
+ t = -cyc2ns_early.cyc2ns_offset;
+ r = do_div(t, NSEC_PER_SEC);
+
+ sched_clock_early_fini();
+ pr_info("sched clock early is finished, offset [%lld.%09lds]\n", t, r);
+ sched_clock_early_enabled = false;
+}
+#endif /* CONFIG_X86_TSC */
+
void __init tsc_init(void)
{
u64 lpj, cyc;
--
2.14.1
\
 
 \ /
  Last update: 2017-08-30 20:06    [W:0.134 / U:1.004 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site