lkml.org 
[lkml]   [2008]   [Nov]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 3/6] x86 chargeback accounting patch
Date
Provides scaled utime and stime CPU usage statistics to facilitate chargeback
accounting for CPU use on x86 architecture. This code allows taskstat users to
get an idea of not just the wall clock time that a process used, but also how
much demand was put on the CPU to run quickly, similar to how the SPURR and
PURR registers are used in arch/powerpc/ to provide scaled utime and stime.

Signed-off-by: Darrick J. Wong <djwong@us.ibm.com>
---
arch/x86/Kconfig | 12 ++++++++++
arch/x86/include/asm/system.h | 4 +++
arch/x86/kernel/time.c | 48 +++++++++++++++++++++++++++++++++++++++++
3 files changed, 64 insertions(+), 0 deletions(-)

diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index ac22bb7..44e32b1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -206,6 +206,18 @@ menu "Processor type and features"

source "kernel/time/Kconfig"

+config VIRT_CPU_ACCOUNTING
+ bool "Deterministic task and CPU time accounting"
+ default n
+ help
+ Select this option to enable more accurate task and CPU time
+ accounting. This is done by reading a CPU counter on each
+ kernel entry and exit and on transitions within the kernel
+ between system, softirq and hardirq state, so there is a
+ small performance impact.
+
+ If in doubt, say N here.
+
config SMP
bool "Symmetric multi-processing support"
---help---
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index 787f5c2..3fabf5d 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -422,6 +422,10 @@ static inline void rdtsc_barrier(void)
alternative(ASM_NOP3, "lfence", X86_FEATURE_LFENCE_RDTSC);
}

+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+void account_system_vtime(struct task_struct *tsk);
+#endif
+
#define U64_MAX (u64)(~((u64)0))

static inline u64 delta_perf(u64 now, u64 *old)
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
index 41ff323..727aa7b 100644
--- a/arch/x86/kernel/time.c
+++ b/arch/x86/kernel/time.c
@@ -2,6 +2,10 @@
#include <linux/module.h>
#include <linux/hardirq.h>

+/* Buffer to store old values of aperf/mperf */
+DEFINE_PER_CPU(u64, vcpu_acct_old_aperf);
+DEFINE_PER_CPU(u64, vcpu_acct_old_mperf);
+
void get_intel_aperf_mperf_registers(u64 *aperf, u64 *mperf)
{
union {
@@ -101,3 +105,47 @@ static inline int is_intel_cpu_with_aperf(void)
return has_aperf;
}
EXPORT_SYMBOL_GPL(is_intel_cpu_with_aperf);
+
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+void account_system_vtime(struct task_struct *tsk)
+{
+ /* if intel with aperf */
+ if (!is_intel_cpu_with_aperf())
+ return;
+
+ /* record aperf/mperf right now */
+ get_intel_aperf_mperf_registers(
+ &(per_cpu(vcpu_acct_old_aperf, smp_processor_id())),
+ &(per_cpu(vcpu_acct_old_mperf, smp_processor_id())));
+}
+EXPORT_SYMBOL_GPL(account_system_vtime);
+
+static inline cputime_t cputime_to_scaled(const cputime_t ct)
+{
+ u64 a, m, ad, md;
+
+ /* if not intel with aperf, scale is 1 */
+ if (!is_intel_cpu_with_aperf())
+ return ct;
+
+ /* calculate delta of aperf/mperf since a_s_v() above */
+ get_intel_aperf_mperf_registers(&a, &m);
+ ad = delta_perf(a, &(per_cpu(vcpu_acct_old_aperf, smp_processor_id())));
+ md = delta_perf(m, &(per_cpu(vcpu_acct_old_mperf, smp_processor_id())));
+
+ return scale_with_perf(ct, ad, md);
+}
+
+void account_process_tick(struct task_struct *p, int user_tick)
+{
+ cputime_t one_jiffy = jiffies_to_cputime(1);
+
+ if (user_tick) {
+ account_user_time(p, one_jiffy);
+ account_user_time_scaled(p, cputime_to_scaled(one_jiffy));
+ } else {
+ account_system_time(p, HARDIRQ_OFFSET, one_jiffy);
+ account_system_time_scaled(p, cputime_to_scaled(one_jiffy));
+ }
+}
+#endif


\
 
 \ /
  Last update: 2008-11-25 22:51    [W:0.102 / U:1.180 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site