lkml.org 
[lkml]   [2000]   [Feb]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH] precise time accounting for 2.3.45
This patch contains improved time accounting for 2.3.45 kernel.
It fixes problem of accounting of short time slices to another
process.

Based on the feedback I got on my previous patch, I prepared
much simplier (and more readable :)) version.

Context switch latency (by lmbench) of patched kernel is around
10% higher (on my machine) than without the patch (depending on
number of processes). For desktop people it's not worth it, but
for sysadmins of multi user servers and for the people that need
to know _exactly_ what load is generated by process (e.g. for
load balancing in HPC clusters) is correct accounting quite
important. (That's why there is an entry in kernel setup...)

I'd like to thank the people for their comments. If there is still
something in the patch that can be improved or fixed, please drop
me a note. Thanks.

Jan


--- linux-2.3.45/kernel/timer.c Tue Feb 15 10:19:07 2000
+++ linux/kernel/timer.c Tue Feb 15 10:28:02 2000
@@ -71,6 +71,11 @@
unsigned long prof_len = 0;
unsigned long prof_shift = 0;

+#ifdef CONFIG_PRECISE_ACCT
+unsigned long cycles_per_jiffie = 0;
+cycles_t last_cycles = 0;
+#endif
+
/*
* Event timer code
*/
@@ -559,7 +564,7 @@
do_it_prof(p, ticks);
}

-static void update_process_times(unsigned long ticks, unsigned long
system)
+void update_process_times(unsigned long ticks, unsigned long system)
{
/*
* SMP does this on a per-CPU basis elsewhere
@@ -635,6 +640,11 @@
static inline void update_times(void)
{
unsigned long ticks;
+#ifdef CONFIG_PRECISE_ACCT
+ cycles_t t;
+ struct task_struct *p = current;
+ static unsigned int last_swtch_num = 0;
+#endif

/*
* update_times() is run from the raw timer_bh handler so we
@@ -652,6 +662,25 @@

calc_load(ticks);
update_wall_time(ticks);
+
+#ifdef CONFIG_PRECISE_ACCT
+ t = get_cycles();
+
+ if (kstat.context_swtch != last_swtch_num) {
+ p->cycles += t - last_cycles;
+ while (p->cycles >= cycles_per_jiffie) {
+ p->cycles -= cycles_per_jiffie;
+ update_process_times(1, 0);
+ }
+
+ last_cycles = t;
+ last_swtch_num = kstat.context_swtch;
+
+ write_unlock_irq(&xtime_lock);
+ return;
+ }
+ last_cycles = t;
+#endif
write_unlock_irq(&xtime_lock);

update_process_times(ticks, system);
--- linux-2.3.45/kernel/sched.c Tue Feb 15 10:19:07 2000
+++ linux/kernel/sched.c Tue Feb 15 10:28:02 2000
@@ -39,6 +39,12 @@

unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security
settings */

+#ifdef CONFIG_PRECISE_ACCT
+extern void update_process_times(unsigned long ticks, unsigned long
system);
+extern unsigned long cycles_per_jiffie;
+extern cycles_t last_cycles;
+#endif
+
extern void mem_use(void);

/*
@@ -589,6 +595,21 @@
}
}

+#ifdef CONFIG_PRECISE_ACCT
+ {
+ cycles_t t;
+
+ t = get_cycles();
+
+ prev->cycles += t - last_cycles;
+ while (prev->cycles >= cycles_per_jiffie) {
+ prev->cycles -= cycles_per_jiffie;
+ update_process_times(1, 0);
+ }
+ last_cycles = t;
+ }
+#endif
+
/*
* This just switches the register state and the
* stack.
@@ -1170,6 +1191,10 @@

for(nr = 0; nr < PIDHASH_SZ; nr++)
pidhash[nr] = NULL;
+
+#ifdef CONFIG_PRECISE_ACCT
+ last_cycles = get_cycles();
+#endif

init_bh(TIMER_BH, timer_bh);
init_bh(TQUEUE_BH, tqueue_bh);
--- linux-2.3.45/include/linux/sched.h Tue Feb 15 10:19:06 2000
+++ linux/include/linux/sched.h Tue Feb 15 10:28:02 2000
@@ -356,6 +356,9 @@
u32 self_exec_id;
/* Protection of fields allocatio/deallocation */
struct semaphore exit_sem;
+
+/* Cycles used in last tick */
+ cycles_t cycles;
};

/*
@@ -426,6 +429,7 @@
/* signals */ SPIN_LOCK_UNLOCKED, &init_signals, {{0}}, {{0}}, NULL,
&init_task.sigqueue, 0, 0, \
/* exec cts */ 0,0, \
/* exit_sem */ __MUTEX_INITIALIZER(name.exit_sem), \
+/* cycles */ 0, \
}

#ifndef INIT_TASK_SIZE
--- linux-2.3.45/arch/i386/kernel/time.c Thu Jan 20 18:51:42 2000
+++ linux/arch/i386/kernel/time.c Tue Feb 15 10:28:02 2000
@@ -66,6 +66,10 @@

unsigned long cpu_hz; /* Detected as we calibrate the TSC */

+#ifdef CONFIG_PRECISE_ACCT
+extern unsigned long cycles_per_jiffie;
+#endif
+
/* Number of usecs that the last interrupt was delayed */
static int delay_at_last_interrupt;

@@ -685,5 +689,9 @@
setup_irq(CO_IRQ_TIMER, &irq0);
#else
setup_irq(0, &irq0);
+#endif
+
+#ifdef CONFIG_PRECISE_ACCT
+ cycles_per_jiffie = cpu_hz / HZ;
#endif
}
--- linux-2.3.45/kernel/fork.c Tue Feb 15 10:19:44 2000
+++ linux/kernel/fork.c Tue Feb 15 10:28:02 2000
@@ -668,6 +668,7 @@
p->tty_old_pgrp = 0;
p->times.tms_utime = p->times.tms_stime = 0;
p->times.tms_cutime = p->times.tms_cstime = 0;
+ p->cycles = 0;
#ifdef __SMP__
{
int i;
--- linux-2.3.45/arch/i386/config.in Tue Feb 15 10:18:14 2000
+++ linux/arch/i386/config.in Tue Feb 15 10:28:02 2000
@@ -125,6 +125,9 @@

bool 'System V IPC' CONFIG_SYSVIPC
bool 'BSD Process Accounting' CONFIG_BSD_PROCESS_ACCT
+if [ "$CONFIG_X86_TSC" = "y" -a "$CONFIG_SMP" = "n" ]; then
+ bool 'Precise accounting' CONFIG_PRECISE_ACCT
+fi
bool 'Sysctl support' CONFIG_SYSCTL
if [ "$CONFIG_PROC_FS" = "y" ]; then
choice 'Kernel core (/proc/kcore) format' \
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 13:56    [W:0.320 / U:0.156 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site