lkml.org 
[lkml]   [2011]   [Jun]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 3/8] x86/ticketlock: Use C for __ticket_spin_unlock
    Date
    From: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>

    If we don't need to use a locked inc for unlock, then implement it in C.

    Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
    ---
    arch/x86/include/asm/spinlock.h | 33 ++++++++++++++++++---------------
    1 files changed, 18 insertions(+), 15 deletions(-)

    diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
    index f48a6e3..704b0c3 100644
    --- a/arch/x86/include/asm/spinlock.h
    +++ b/arch/x86/include/asm/spinlock.h
    @@ -33,9 +33,21 @@
    * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock
    * (PPro errata 66, 92)
    */
    -# define UNLOCK_LOCK_PREFIX LOCK_PREFIX
    +static __always_inline void __ticket_unlock_release(struct arch_spinlock *lock)
    +{
    + if (sizeof(lock->tickets.head) == sizeof(u8))
    + asm (LOCK_PREFIX "incb %0"
    + : "+m" (lock->tickets.head) : : "memory");
    + else
    + asm (LOCK_PREFIX "incw %0"
    + : "+m" (lock->tickets.head) : : "memory");
    +
    +}
    #else
    -# define UNLOCK_LOCK_PREFIX
    +static __always_inline void __ticket_unlock_release(struct arch_spinlock *lock)
    +{
    + lock->tickets.head++;
    +}
    #endif

    /*
    @@ -93,14 +105,6 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)

    return tmp;
    }
    -
    -static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
    -{
    - asm volatile(UNLOCK_LOCK_PREFIX "incb %0"
    - : "+m" (lock->slock)
    - :
    - : "memory", "cc");
    -}
    #else
    static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
    {
    @@ -144,15 +148,14 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)

    return tmp;
    }
    +#endif

    static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
    {
    - asm volatile(UNLOCK_LOCK_PREFIX "incw %0"
    - : "+m" (lock->slock)
    - :
    - : "memory", "cc");
    + barrier(); /* prevent reordering out of locked region */
    + __ticket_unlock_release(lock);
    + barrier(); /* prevent reordering into locked region */
    }
    -#endif

    static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
    {
    --
    1.7.5.4


    \
     
     \ /
      Last update: 2011-06-24 03:23    [W:0.046 / U:28.684 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site