lkml.org 
[lkml]   [2005]   [Nov]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 7/13] Time: i386 Conversion - part 3: Rework TSC Support
All,
The conversion of i386 to use the generic timeofday subsystem has been
split into 6 parts. This patch, the third of six, reworks some of the
code in the new tsc.c file, adding some new interfaces and hooks to use
these new interfaces appropriately.

It applies on top of my timeofday-arch-i386-part2 patch. This patch is
part the timeofday-arch-i386 patchset, so without the following parts it
is not expected to compile.

thanks
-john

Signed-off-by: John Stultz <johnstul@us.ibm.com>

linux-2.6.15-rc1-mm2_timeofday-arch-i386-part3_B11.patch
============================================
diff -ruN tod-mm_1/arch/i386/kernel/setup.c tod-mm_2/arch/i386/kernel/setup.c
--- tod-mm_1/arch/i386/kernel/setup.c 2005-11-21 16:39:09.000000000 -0800
+++ tod-mm_2/arch/i386/kernel/setup.c 2005-11-21 16:51:24.000000000 -0800
@@ -1628,6 +1628,7 @@
conswitchp = &dummy_con;
#endif
#endif
+ tsc_init();
}

#include "setup_arch_post.h"
diff -ruN tod-mm_1/arch/i386/kernel/tsc.c tod-mm_2/arch/i386/kernel/tsc.c
--- tod-mm_1/arch/i386/kernel/tsc.c 2005-11-21 16:51:16.000000000 -0800
+++ tod-mm_2/arch/i386/kernel/tsc.c 2005-11-21 16:51:24.000000000 -0800
@@ -5,11 +5,18 @@
*/

#include <linux/init.h>
-#include <linux/timex.h>
#include <linux/cpufreq.h>
+#include <linux/jiffies.h>
+#include <asm/tsc.h>
#include <asm/io.h>
#include "mach_timer.h"

+/* On some systems the TSC frequency does not
+ * change with the cpu frequency. So we need
+ * an extra value to store the TSC freq
+ */
+unsigned int tsc_khz;
+
int tsc_disable __initdata = 0;
#ifndef CONFIG_X86_TSC
/* disable flag for tsc. Takes effect by clearing the TSC cpu flag
@@ -32,15 +39,46 @@

int read_current_timer(unsigned long *timer_val)
{
- if (cur_timer->read_timer) {
- *timer_val = cur_timer->read_timer();
+ if (!tsc_disable && cpu_khz) {
+ rdtscl(*timer_val);
return 0;
}
return -1;
}

+/* Code to mark and check if the TSC is unstable
+ * due to cpufreq or due to unsynced TSCs
+ */
+static int tsc_unstable;
+static inline int check_tsc_unstable(void)
+{
+ return tsc_unstable;
+}
+
+void mark_tsc_unstable(void)
+{
+ tsc_unstable = 1;
+}
+
+/* Code to compensate for C3 stalls */
+static u64 tsc_c3_offset;
+void tsc_c3_compensate(unsigned long nsecs)
+{
+ /* this could def be optimized */
+ u64 cycles = ((u64)nsecs * tsc_khz);
+ do_div(cycles, 1000000);
+ tsc_c3_offset += cycles;
+}
+
+EXPORT_SYMBOL_GPL(tsc_c3_compensate);

-/* convert from cycles(64bits) => nanoseconds (64bits)
+static inline u64 tsc_read_c3_time(void)
+{
+ return tsc_c3_offset;
+}
+
+/* Accellerators for sched_clock()
+ * convert from cycles(64bits) => nanoseconds (64bits)
* basic equation:
* ns = cycles / (freq / ns_per_sec)
* ns = cycles * (ns_per_sec / freq)
@@ -85,76 +123,54 @@
* synchronized across all CPUs.
*/
#ifndef CONFIG_NUMA
- if (!use_tsc)
+ if (!cpu_khz || check_tsc_unstable())
#endif
/* no locking but a rare wrong value is not a big deal */
- return jiffies_64 * (1000000000 / HZ);
+ return (jiffies_64 - INITIAL_JIFFIES) * (1000000000 / HZ);

/* Read the Time Stamp Counter */
rdtscll(this_offset);
+ this_offset += tsc_read_c3_time();

/* return the value in ns */
return cycles_2_ns(this_offset);
}

-/* ------ Calibrate the TSC -------
- * Return 2^32 * (1 / (TSC clocks per usec)) for do_fast_gettimeoffset().
- * Too much 64-bit arithmetic here to do this cleanly in C, and for
- * accuracy's sake we want to keep the overhead on the CTC speaker (channel 2)
- * output busy loop as low as possible. We avoid reading the CTC registers
- * directly because of the awkward 8-bit access mechanism of the 82C54
- * device.
- */
-
-#define CALIBRATE_TIME (5 * 1000020/HZ)

-unsigned long calibrate_tsc(void)
+static unsigned long calculate_cpu_khz(void)
{
- mach_prepare_counter();
-
- {
- unsigned long startlow, starthigh;
- unsigned long endlow, endhigh;
- unsigned long count;
-
- rdtsc(startlow,starthigh);
+ unsigned long long start, end;
+ unsigned long count;
+ u64 delta64;
+ int i;
+ /* run 3 times to ensure the cache is warm */
+ for(i=0; i<3; i++) {
+ mach_prepare_counter();
+ rdtscll(start);
mach_countup(&count);
- rdtsc(endlow,endhigh);
-
-
- /* Error: ECTCNEVERSET */
- if (count <= 1)
- goto bad_ctc;
-
- /* 64-bit subtract - gcc just messes up with long longs */
- __asm__("subl %2,%0\n\t"
- "sbbl %3,%1"
- :"=a" (endlow), "=d" (endhigh)
- :"g" (startlow), "g" (starthigh),
- "0" (endlow), "1" (endhigh));
-
- /* Error: ECPUTOOFAST */
- if (endhigh)
- goto bad_ctc;
-
- /* Error: ECPUTOOSLOW */
- if (endlow <= CALIBRATE_TIME)
- goto bad_ctc;
-
- __asm__("divl %2"
- :"=a" (endlow), "=d" (endhigh)
- :"r" (endlow), "0" (0), "1" (CALIBRATE_TIME));
-
- return endlow;
+ rdtscll(end);
}
-
- /*
+ /* Error: ECTCNEVERSET
* The CTC wasn't reliable: we got a hit on the very first read,
* or the CPU was so fast/slow that the quotient wouldn't fit in
* 32 bits..
*/
-bad_ctc:
- return 0;
+ if (count <= 1)
+ return 0;
+
+ delta64 = end - start;
+
+ /* cpu freq too fast */
+ if(delta64 > (1ULL<<32))
+ return 0;
+ /* cpu freq too slow */
+ if (delta64 <= CALIBRATE_TIME_MSEC)
+ return 0;
+
+ delta64 += CALIBRATE_TIME_MSEC/2; /* round for do_div */
+ do_div(delta64,CALIBRATE_TIME_MSEC);
+
+ return (unsigned long)delta64;
}

int recalibrate_cpu_khz(void)
@@ -163,11 +179,11 @@
unsigned long cpu_khz_old = cpu_khz;

if (cpu_has_tsc) {
- init_cpu_khz();
+ cpu_khz = calculate_cpu_khz();
+ tsc_khz = cpu_khz;
cpu_data[0].loops_per_jiffy =
- cpufreq_scale(cpu_data[0].loops_per_jiffy,
- cpu_khz_old,
- cpu_khz);
+ cpufreq_scale(cpu_data[0].loops_per_jiffy,
+ cpu_khz_old, cpu_khz);
return 0;
} else
return -ENODEV;
@@ -178,25 +194,22 @@
EXPORT_SYMBOL(recalibrate_cpu_khz);


-/* calculate cpu_khz */
-void init_cpu_khz(void)
+void tsc_init(void)
{
- if (cpu_has_tsc) {
- unsigned long tsc_quotient = calibrate_tsc();
- if (tsc_quotient) {
- /* report CPU clock rate in Hz.
- * The formula is (10^6 * 2^32) / (2^32 * 1 / (clocks/us)) =
- * clock/second. Our precision is about 100 ppm.
- */
- { unsigned long eax=0, edx=1000;
- __asm__("divl %2"
- :"=a" (cpu_khz), "=d" (edx)
- :"r" (tsc_quotient),
- "0" (eax), "1" (edx));
- printk("Detected %lu.%03lu MHz processor.\n", cpu_khz / 1000, cpu_khz % 1000);
- }
- }
- }
+ if(!cpu_has_tsc || tsc_disable)
+ return;
+
+ cpu_khz = calculate_cpu_khz();
+ tsc_khz = cpu_khz;
+
+ if (!cpu_khz)
+ return;
+
+ printk("Detected %lu.%03lu MHz processor.\n",
+ (unsigned long)cpu_khz / 1000,
+ (unsigned long)cpu_khz % 1000);
+
+ set_cyc2ns_scale(cpu_khz);
}


@@ -216,15 +229,15 @@
cpufreq_delayed_issched = 0;
}

-/* if we notice lost ticks, schedule a call to cpufreq_get() as it tries
+/* if we notice cpufreq oddness, schedule a call to cpufreq_get() as it tries
* to verify the CPU frequency the timing core thinks the CPU is running
* at is still correct.
*/
-void cpufreq_delayed_get(void)
+static inline void cpufreq_delayed_get(void)
{
if (cpufreq_init && !cpufreq_delayed_issched) {
cpufreq_delayed_issched = 1;
- printk(KERN_DEBUG "Losing some ticks... checking if CPU frequency changed.\n");
+ printk(KERN_DEBUG "Checking if CPU frequency changed.\n");
schedule_work(&cpufreq_delayed_get_work);
}
}
@@ -237,13 +250,11 @@
static unsigned long loops_per_jiffy_ref = 0;

#ifndef CONFIG_SMP
-static unsigned long fast_gettimeoffset_ref = 0;
static unsigned long cpu_khz_ref = 0;
#endif

-static int
-time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
- void *data)
+static int time_cpufreq_notifier(struct notifier_block *nb,
+ unsigned long val, void *data)
{
struct cpufreq_freqs *freq = data;

@@ -253,7 +264,6 @@
ref_freq = freq->old;
loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy;
#ifndef CONFIG_SMP
- fast_gettimeoffset_ref = fast_gettimeoffset_quotient;
cpu_khz_ref = cpu_khz;
#endif
}
@@ -263,16 +273,20 @@
(val == CPUFREQ_RESUMECHANGE)) {
if (!(freq->flags & CPUFREQ_CONST_LOOPS))
cpu_data[freq->cpu].loops_per_jiffy = cpufreq_scale(loops_per_jiffy_ref, ref_freq, freq->new);
+
+ if (cpu_khz) {
#ifndef CONFIG_SMP
- if (cpu_khz)
cpu_khz = cpufreq_scale(cpu_khz_ref, ref_freq, freq->new);
- if (use_tsc) {
+#endif
if (!(freq->flags & CPUFREQ_CONST_LOOPS)) {
- fast_gettimeoffset_quotient = cpufreq_scale(fast_gettimeoffset_ref, freq->new, ref_freq);
+ tsc_khz = cpu_khz;
set_cyc2ns_scale(cpu_khz);
+ /* TSC based sched_clock turns
+ * to junk w/ cpufreq
+ */
+ mark_tsc_unstable();
}
}
-#endif
}

if (val != CPUFREQ_RESUMECHANGE)
@@ -294,10 +308,9 @@
CPUFREQ_TRANSITION_NOTIFIER);
if (!ret)
cpufreq_init = 1;
+
return ret;
}
core_initcall(cpufreq_tsc);

-#else /* CONFIG_CPU_FREQ */
-void cpufreq_delayed_get(void) { return; }
#endif
diff -ruN tod-mm_1/drivers/acpi/processor_idle.c tod-mm_2/drivers/acpi/processor_idle.c
--- tod-mm_1/drivers/acpi/processor_idle.c 2005-11-21 16:39:09.000000000 -0800
+++ tod-mm_2/drivers/acpi/processor_idle.c 2005-11-21 16:51:24.000000000 -0800
@@ -268,6 +268,7 @@
}

static atomic_t c3_cpu_count;
+extern void tsc_c3_compensate(unsigned long nsecs);

static void acpi_processor_idle(void)
{
@@ -451,6 +452,11 @@
ACPI_MTX_DO_NOT_LOCK);
}

+#ifdef CONFIG_GENERIC_TIME
+ /* compensate for TSC pause */
+ tsc_c3_compensate((u32)(((u64)((t2-t1)&0xFFFFFF)*286070)>>10));
+#endif
+
/* Re-enable interrupts */
local_irq_enable();
/* Compute time (ticks) that we were actually asleep */
diff -ruN tod-mm_1/include/asm-i386/mach-default/mach_timer.h tod-mm_2/include/asm-i386/mach-default/mach_timer.h
--- tod-mm_1/include/asm-i386/mach-default/mach_timer.h 2005-11-21 16:39:14.000000000 -0800
+++ tod-mm_2/include/asm-i386/mach-default/mach_timer.h 2005-11-21 16:51:24.000000000 -0800
@@ -15,7 +15,9 @@
#ifndef _MACH_TIMER_H
#define _MACH_TIMER_H

-#define CALIBRATE_LATCH (5 * LATCH)
+#define CALIBRATE_TIME_MSEC 30 /* 30 msecs */
+#define CALIBRATE_LATCH \
+ ((CLOCK_TICK_RATE * CALIBRATE_TIME_MSEC + 1000/2)/1000)

static inline void mach_prepare_counter(void)
{
diff -ruN tod-mm_1/include/asm-i386/mach-summit/mach_mpparse.h tod-mm_2/include/asm-i386/mach-summit/mach_mpparse.h
--- tod-mm_1/include/asm-i386/mach-summit/mach_mpparse.h 2005-11-21 16:39:14.000000000 -0800
+++ tod-mm_2/include/asm-i386/mach-summit/mach_mpparse.h 2005-11-21 16:51:24.000000000 -0800
@@ -2,6 +2,7 @@
#define __ASM_MACH_MPPARSE_H

#include <mach_apic.h>
+#include <asm/tsc.h>

extern int use_cyclone;

@@ -29,6 +30,7 @@
(!strncmp(productid, "VIGIL SMP", 9)
|| !strncmp(productid, "EXA", 3)
|| !strncmp(productid, "RUTHLESS SMP", 12))){
+ mark_tsc_unstable();
use_cyclone = 1; /*enable cyclone-timer*/
setup_summit();
return 1;
@@ -42,6 +44,7 @@
if (!strncmp(oem_id, "IBM", 3) &&
(!strncmp(oem_table_id, "SERVIGIL", 8)
|| !strncmp(oem_table_id, "EXA", 3))){
+ mark_tsc_unstable();
use_cyclone = 1; /*enable cyclone-timer*/
setup_summit();
return 1;
diff -ruN tod-mm_1/include/asm-i386/tsc.h tod-mm_2/include/asm-i386/tsc.h
--- tod-mm_1/include/asm-i386/tsc.h 2005-11-21 16:51:16.000000000 -0800
+++ tod-mm_2/include/asm-i386/tsc.h 2005-11-21 16:51:24.000000000 -0800
@@ -41,4 +41,8 @@
}

extern unsigned int cpu_khz;
+extern unsigned int tsc_khz;
+extern void tsc_init(void);
+void tsc_c3_compensate(unsigned long usecs);
+extern void mark_tsc_unstable(void);
#endif
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/
\
 
 \ /
  Last update: 2005-11-22 02:39    [W:0.219 / U:1.112 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site