lkml.org 
[lkml]   [2012]   [Mar]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
SubjectRe: [RFC patch] spin_lock: add cross cache lines checking
From
Date
Oops.
Sorry, the patch is not tested well! will update it later.

On Mon, 2012-03-05 at 11:20 +0800, Alex Shi wrote:
> Modern x86 CPU won't hold whole memory bus when executing 'lock'
> prefixed instructions unless the instruction destination is crossing 2
> cache lines. If so, it is disaster of system performance.
>
> Actually if the lock is not in the 'packed' structure, gcc places it
> safely under x86 arch. But seems add this checking in
> CONFIG_DEBUG_LOCK_ALLOC is harmless.
>
> btw, change SPIN_BUG_ON macro a little for style complain.
>
> Inspired-by: Andi Kleen <andi.kleen@intel.com>
> Signed-off-by: Alex Shi <alex.shi@intel.com>
> ---
> diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
> index 48f99f1..79d146e 100644
> --- a/arch/x86/include/asm/cache.h
> +++ b/arch/x86/include/asm/cache.h
> @@ -7,6 +7,8 @@
> #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
> #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
>
> +#define L1_CACHE_SIZE_MASK (~(L1_CACHE_BYTES - 1)UL)
> +
> #define __read_mostly __attribute__((__section__(".data..read_mostly")))
>
> #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
> diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
> index 1bfcfe5..244e528 100644
> --- a/include/asm-generic/cache.h
> +++ b/include/asm-generic/cache.h
> @@ -9,4 +9,6 @@
> #define L1_CACHE_SHIFT 5
> #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
>
> +#define L1_CACHE_SIZE_MASK (~(L1_CACHE_BYTES - 1)UL)
> +
> #endif /* __ASM_GENERIC_CACHE_H */
> diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
> index 5f3eacd..554dcda 100644
> --- a/lib/spinlock_debug.c
> +++ b/lib/spinlock_debug.c
> @@ -13,6 +13,12 @@
> #include <linux/delay.h>
> #include <linux/module.h>
>
> +#define SPIN_BUG_ON(cond, lock, msg) {if (unlikely(cond)) spin_bug(lock, msg); }
> +
> +#define is_cross_lines(p) \
> + (((unsigned long)(p) & L1_CACHE_SIZE_MASK) != \
> + (((unsigned long)(p) + sizeof(*p) - 1) & L1_CACHE_SIZE_MASK)) \
> +
> void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
> struct lock_class_key *key)
> {
> @@ -22,6 +28,8 @@ void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
> */
> debug_check_no_locks_freed((void *)lock, sizeof(*lock));
> lockdep_init_map(&lock->dep_map, name, key, 0);
> + SPIN_BUG_ON(is_cross_lines(lock->raw_lock), lock,
> + "!!! the lock cross cache lines !!!");
> #endif
> lock->raw_lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
> lock->magic = SPINLOCK_MAGIC;
> @@ -40,6 +48,8 @@ void __rwlock_init(rwlock_t *lock, const char *name,
> */
> debug_check_no_locks_freed((void *)lock, sizeof(*lock));
> lockdep_init_map(&lock->dep_map, name, key, 0);
> + SPIN_BUG_ON(is_cross_lines(lock->raw_lock), lock,
> + "!!! the lock cross cache lines !!!");
> #endif
> lock->raw_lock = (arch_rwlock_t) __ARCH_RW_LOCK_UNLOCKED;
> lock->magic = RWLOCK_MAGIC;
> @@ -75,8 +85,6 @@ static void spin_bug(raw_spinlock_t *lock, const char *msg)
> spin_dump(lock, msg);
> }
>
> -#define SPIN_BUG_ON(cond, lock, msg) if (unlikely(cond)) spin_bug(lock, msg)
> -
> static inline void
> debug_spin_lock_before(raw_spinlock_t *lock)
> {
>




\
 
 \ /
  Last update: 2012-03-05 04:27    [from the cache]
©2003-2011 Jasper Spaans