lkml.org 
[lkml]   [2010]   [Nov]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Subject[PATCH] mutex: Introduce arch_mutex_cpu_relax()
From
Date
From: Gerald Schaefer <gerald.schaefer@de.ibm.com>

The spinning mutex implementation uses cpu_relax() in busy loops as a
compiler barrier. Depending on the architecture, cpu_relax() may do more
than needed in this specific mutex spin loops. On System z we also give
up the time slice of the virtual cpu in cpu_relax(), which prevents
effective spinning on the mutex.

This patch replaces cpu_relax() in the spinning mutex code with
arch_mutex_cpu_relax(), which can be defined by each architecture that
selects HAVE_ARCH_MUTEX_CPU_RELAX. The default is still cpu_relax(), so
this patch should not affect other architectures than System z for now.

Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
---
arch/Kconfig | 3 +++
arch/s390/Kconfig | 1 +
arch/s390/include/asm/mutex.h | 2 ++
include/linux/mutex.h | 4 ++++
kernel/mutex.c | 2 +-
kernel/sched.c | 3 ++-
6 files changed, 13 insertions(+), 2 deletions(-)

--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -175,4 +175,7 @@ config HAVE_PERF_EVENTS_NMI
config HAVE_ARCH_JUMP_LABEL
bool

+config HAVE_ARCH_MUTEX_CPU_RELAX
+ bool
+
source "kernel/gcov/Kconfig"
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -99,6 +99,7 @@ config S390
select HAVE_KERNEL_LZMA
select HAVE_KERNEL_LZO
select HAVE_GET_USER_PAGES_FAST
+ select HAVE_ARCH_MUTEX_CPU_RELAX
select ARCH_INLINE_SPIN_TRYLOCK
select ARCH_INLINE_SPIN_TRYLOCK_BH
select ARCH_INLINE_SPIN_LOCK
--- a/arch/s390/include/asm/mutex.h
+++ b/arch/s390/include/asm/mutex.h
@@ -7,3 +7,5 @@
*/

#include <asm-generic/mutex-dec.h>
+
+#define arch_mutex_cpu_relax() barrier()
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -160,4 +160,8 @@ extern int mutex_trylock(struct mutex *l
extern void mutex_unlock(struct mutex *lock);
extern int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock);

+#ifndef CONFIG_HAVE_ARCH_MUTEX_CPU_RELAX
+#define arch_mutex_cpu_relax() cpu_relax()
+#endif
+
#endif
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -199,7 +199,7 @@ __mutex_lock_common(struct mutex *lock,
* memory barriers as we'll eventually observe the right
* values at the cost of a few extra spins.
*/
- cpu_relax();
+ arch_mutex_cpu_relax();
}
#endif
spin_lock_mutex(&lock->wait_lock, flags);
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -75,6 +75,7 @@

#include <asm/tlb.h>
#include <asm/irq_regs.h>
+#include <asm/mutex.h>

#include "sched_cpupri.h"
#include "workqueue_sched.h"
@@ -4029,7 +4030,7 @@ int mutex_spin_on_owner(struct mutex *lo
if (task_thread_info(rq->curr) != owner || need_resched())
return 0;

- cpu_relax();
+ arch_mutex_cpu_relax();
}

return 1;



\
 
 \ /
  Last update: 2010-11-22 15:49    [W:1.250 / U:0.008 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site