lkml.org 
[lkml]   [2011]   [Jan]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5/6] x86/ticketlock: make __ticket_spin_lock common
    Date
    From: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>

    Aside from the particular form of the xadd instruction, they're identical.
    So factor out the xadd and use common code for the rest.

    Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
    ---
    arch/x86/include/asm/spinlock.h | 42 ++++++++++++++++++--------------------
    1 files changed, 20 insertions(+), 22 deletions(-)

    diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
    index 1b81809..f722f96 100644
    --- a/arch/x86/include/asm/spinlock.h
    +++ b/arch/x86/include/asm/spinlock.h
    @@ -67,13 +67,27 @@ static __always_inline void __ticket_unlock_release(struct arch_spinlock *lock)
    * save some instructions and make the code more elegant. There really isn't
    * much between them in performance though, especially as locks are out of line.
    */
    -#if (NR_CPUS < 256)
    -static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
    +static __always_inline struct __raw_tickets __ticket_spin_claim(struct arch_spinlock *lock)
    {
    - register struct __raw_tickets inc = { .tail = 1 };
    + register struct __raw_tickets tickets = { .tail = 1 };
    +
    + if (sizeof(lock->tickets.head) == sizeof(u8))
    + asm volatile (LOCK_PREFIX "xaddw %w0, %1\n"
    + : "+r" (tickets), "+m" (lock->tickets)
    + : : "memory", "cc");
    + else
    + asm volatile (LOCK_PREFIX "xaddl %0, %1\n"
    + : "+r" (tickets), "+m" (lock->tickets)
    + : : "memory", "cc");

    - asm volatile (LOCK_PREFIX "xaddw %w0, %1\n"
    - : "+r" (inc), "+m" (lock->tickets) : : "memory", "cc");
    + return tickets;
    +}
    +
    +static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock)
    +{
    + register struct __raw_tickets inc;
    +
    + inc = __ticket_spin_claim(lock);

    for (;;) {
    if (inc.head == inc.tail)
    @@ -84,6 +98,7 @@ static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
    out: barrier(); /* make sure nothing creeps before the lock is taken */
    }

    +#if (NR_CPUS < 256)
    static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
    {
    unsigned int tmp, new;
    @@ -103,23 +118,6 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
    return tmp;
    }
    #else
    -static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
    -{
    - register struct __raw_tickets inc = { .tail = 1 };
    -
    - asm volatile(LOCK_PREFIX "xaddl %0, %1\n\t"
    - : "+r" (inc), "+m" (lock->tickets)
    - : : "memory", "cc");
    -
    - for (;;) {
    - if (inc.head == inc.tail)
    - goto out;
    - cpu_relax();
    - inc.head = ACCESS_ONCE(lock->tickets.head);
    - }
    -out: barrier(); /* make sure nothing creeps before the lock is taken */
    -}
    -
    static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
    {
    unsigned tmp;
    --
    1.7.3.4


    \
     
     \ /
      Last update: 2011-01-25 00:45    [W:4.270 / U:0.360 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site