lkml.org 
[lkml]   [2010]   [Jul]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Date
Subject[PATCH RFC 05/12] x86/ticketlock: make __ticket_spin_lock common
Aside from the particular form of the xadd instruction, they're identical.
So factor out the xadd and use common code for the rest.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
---
arch/x86/include/asm/spinlock.h | 42 ++++++++++++++++++--------------------
1 files changed, 20 insertions(+), 22 deletions(-)

diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
index 7586d7a..4f9fa24 100644
--- a/arch/x86/include/asm/spinlock.h
+++ b/arch/x86/include/asm/spinlock.h
@@ -69,13 +69,27 @@ static __always_inline void __ticket_unlock_release(struct arch_spinlock *lock)
* save some instructions and make the code more elegant. There really isn't
* much between them in performance though, especially as locks are out of line.
*/
-#if (NR_CPUS < 256)
-static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
+static __always_inline struct __raw_tickets __ticket_spin_claim(struct arch_spinlock *lock)
{
- register struct __raw_tickets inc = { .tail = 1 };
+ register struct __raw_tickets tickets = { .tail = 1 };
+
+ if (sizeof(lock->tickets.head) == sizeof(u8))
+ asm volatile (LOCK_PREFIX "xaddw %w0, %1\n"
+ : "+r" (tickets), "+m" (lock->tickets)
+ : : "memory", "cc");
+ else
+ asm volatile (LOCK_PREFIX "xaddl %0, %1\n"
+ : "+r" (tickets), "+m" (lock->tickets)
+ : : "memory", "cc");

- asm volatile (LOCK_PREFIX "xaddw %w0, %1\n"
- : "+r" (inc), "+m" (lock->tickets) : : "memory", "cc");
+ return tickets;
+}
+
+static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock)
+{
+ register struct __raw_tickets inc;
+
+ inc = __ticket_spin_claim(lock);

for (;;) {
if (inc.head == inc.tail)
@@ -86,6 +100,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
barrier(); /* make sure nothing creeps before the lock is taken */
}

+#if (NR_CPUS < 256)
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
unsigned int tmp, new;
@@ -105,23 +120,6 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
return tmp;
}
#else
-static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
-{
- register struct __raw_tickets inc = { .tickets.tail = 1 };
-
- asm volatile(LOCK_PREFIX "xaddl %0, %1\n\t"
- : "+r" (inc), "+m" (lock->tickets)
- : : "memory", "cc");
-
- for (;;) {
- if (inc.head == inc.tail)
- return;
- cpu_relax();
- inc.head = ACCESS_ONCE(lock->tickets.head);
- }
- barrier(); /* make sure nothing creeps before the lock is taken */
-}
-
static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
{
unsigned tmp;
--
1.7.1.1



\
 
 \ /
  Last update: 2010-07-17 02:53    [W:0.486 / U:0.900 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site