lkml.org 
[lkml]   [2012]   [Mar]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Date
    Subject[PATCH RFC V6 3/11] x86/ticketlock: collapse a layer of functions
    From: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>

    Now that the paravirtualization layer doesn't exist at the spinlock
    level any more, we can collapse the __ticket_ functions into the arch_
    functions.

    Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
    Signed-off-by: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
    ---
    arch/x86/include/asm/spinlock.h | 35 +++++------------------------------
    1 files changed, 5 insertions(+), 30 deletions(-)
    diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
    index 7e66b85..f6442f4 100644
    --- a/arch/x86/include/asm/spinlock.h
    +++ b/arch/x86/include/asm/spinlock.h
    @@ -79,7 +79,7 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
    * in the high part, because a wide xadd increment of the low part would carry
    * up and contaminate the high part.
    */
    -static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock)
    +static __always_inline void arch_spin_lock(struct arch_spinlock *lock)
    {
    register struct __raw_tickets inc = { .tail = 1 };

    @@ -99,7 +99,7 @@ static __always_inline void __ticket_spin_lock(struct arch_spinlock *lock)
    out: barrier(); /* make sure nothing creeps before the lock is taken */
    }

    -static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
    +static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
    {
    arch_spinlock_t old, new;

    @@ -113,7 +113,7 @@ static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
    return cmpxchg(&lock->head_tail, old.head_tail, new.head_tail) == old.head_tail;
    }

    -static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
    +static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
    {
    __ticket_t next = lock->tickets.head + 1;

    @@ -121,46 +121,21 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
    __ticket_unlock_kick(lock, next);
    }

    -static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
    +static inline int arch_spin_is_locked(arch_spinlock_t *lock)
    {
    struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);

    return !!(tmp.tail ^ tmp.head);
    }

    -static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
    +static inline int arch_spin_is_contended(arch_spinlock_t *lock)
    {
    struct __raw_tickets tmp = ACCESS_ONCE(lock->tickets);

    return ((tmp.tail - tmp.head) & TICKET_MASK) > 1;
    }
    -
    -static inline int arch_spin_is_locked(arch_spinlock_t *lock)
    -{
    - return __ticket_spin_is_locked(lock);
    -}
    -
    -static inline int arch_spin_is_contended(arch_spinlock_t *lock)
    -{
    - return __ticket_spin_is_contended(lock);
    -}
    #define arch_spin_is_contended arch_spin_is_contended

    -static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
    -{
    - __ticket_spin_lock(lock);
    -}
    -
    -static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
    -{
    - return __ticket_spin_trylock(lock);
    -}
    -
    -static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
    -{
    - __ticket_spin_unlock(lock);
    -}
    -
    static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
    unsigned long flags)
    {


    \
     
     \ /
      Last update: 2012-03-21 11:25    [from the cache]
    ©2003-2014 Jasper Spaans. hosted at Digital Ocean