lkml.org 
[lkml]   [2012]   [Mar]   [5]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
SubjectRe: [RFC patch] spindep: add cross cache lines checking
From
Date

On Mon, 2012-03-05 at 11:24 +0800, Alex Shi wrote:
> Oops.
> Sorry, the patch is not tested well! will update it later.

corrected version:
==========
>From 28745c1970a61a1420d388660cd9dcc619cd38ba Mon Sep 17 00:00:00 2001
From: Alex Shi <alex.shi@intel.com>
Date: Mon, 5 Mar 2012 13:03:35 +0800
Subject: [PATCH] lockdep: add cross cache lines checking

Modern x86 CPU won't hold whole memory bus when executing 'lock'
prefixed instructions unless the instruction destination is crossing 2
cache lines. If so, it is disaster of system performance.

Actually if the lock is not in the 'packed' structure, gcc places it
safely under x86 arch. But seems add this checking in
CONFIG_DEBUG_LOCK_ALLOC is harmless.

Inspired-by: Andi Kleen <andi.kleen@intel.com>
Signed-off-by: Alex Shi <alex.shi@intel.com>
---
arch/x86/include/asm/cache.h | 2 +
include/asm-generic/cache.h | 2 +
lib/spinlock_debug.c | 76 ++++++++++++++++++++++-------------------
3 files changed, 45 insertions(+), 35 deletions(-)

diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
index 48f99f1..63c2316 100644
--- a/arch/x86/include/asm/cache.h
+++ b/arch/x86/include/asm/cache.h
@@ -7,6 +7,8 @@
#define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)

+#define L1_CACHE_SIZE_MASK (~(L1_CACHE_BYTES - 1))
+
#define __read_mostly __attribute__((__section__(".data..read_mostly")))

#define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
index 1bfcfe5..6f8eb29 100644
--- a/include/asm-generic/cache.h
+++ b/include/asm-generic/cache.h
@@ -9,4 +9,6 @@
#define L1_CACHE_SHIFT 5
#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)

+#define L1_CACHE_SIZE_MASK (~(L1_CACHE_BYTES - 1))
+
#endif /* __ASM_GENERIC_CACHE_H */
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 5f3eacd..938a145 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -13,41 +13,9 @@
#include <linux/delay.h>
#include <linux/module.h>

-void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
- struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
- * Make sure we are not reinitializing a held lock:
- */
- debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key, 0);
-#endif
- lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
- lock->magic = SPINLOCK_MAGIC;
- lock->owner = SPINLOCK_OWNER_INIT;
- lock->owner_cpu = -1;
-}
-
-EXPORT_SYMBOL(__raw_spin_lock_init);
-
-void __rwlock_init(rwlock_t *lock, const char *name,
- struct lock_class_key *key)
-{
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
- * Make sure we are not reinitializing a held lock:
- */
- debug_check_no_locks_freed((void *)lock, sizeof(*lock));
- lockdep_init_map(&lock->dep_map, name, key, 0);
-#endif
- lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
- lock->magic = RWLOCK_MAGIC;
- lock->owner = SPINLOCK_OWNER_INIT;
- lock->owner_cpu = -1;
-}
-
-EXPORT_SYMBOL(__rwlock_init);
+#define is_cross_lines(p) \
+ (((unsigned long)(p) & L1_CACHE_SIZE_MASK) != \
+ (((unsigned long)(p) + sizeof(*p) - 1) & L1_CACHE_SIZE_MASK)) \

static void spin_dump(raw_spinlock_t *lock, const char *msg)
{
@@ -296,3 +264,41 @@ void do_raw_write_unlock(rwlock_t *lock)
debug_write_unlock(lock);
arch_write_unlock(&lock->raw_lock);
}
+
+void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+ lockdep_init_map(&lock->dep_map, name, key, 0);
+ SPIN_BUG_ON(is_cross_lines(&lock->raw_lock), lock,
+ "!!! the lock cross cache lines !!!");
+#endif
+ lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
+ lock->magic = SPINLOCK_MAGIC;
+ lock->owner = SPINLOCK_OWNER_INIT;
+ lock->owner_cpu = -1;
+}
+EXPORT_SYMBOL(__raw_spin_lock_init);
+
+void __rwlock_init(rwlock_t *lock, const char *name,
+ struct lock_class_key *key)
+{
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ /*
+ * Make sure we are not reinitializing a held lock:
+ */
+ debug_check_no_locks_freed((void *)lock, sizeof(*lock));
+ lockdep_init_map(&lock->dep_map, name, key, 0);
+ RWLOCK_BUG_ON(is_cross_lines(&lock->raw_lock), lock,
+ "!!! the lock cross cache lines !!!");
+#endif
+ lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
+ lock->magic = RWLOCK_MAGIC;
+ lock->owner = SPINLOCK_OWNER_INIT;
+ lock->owner_cpu = -1;
+}
+EXPORT_SYMBOL(__rwlock_init);
--
1.6.3.3




\
 
 \ /
  Last update: 2012-03-05 06:47    [W:0.227 / U:0.020 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site