lkml.org 
[lkml]   [2011]   [Sep]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 11/13] x86/ticketlock: only do kick after doing unlock
    Date
    From: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>

    We must release the lock before checking to see if the lock is in
    slowpath or else there's a potential race where the lock enters the
    slow path after the unlocker has checked the slowpath flag, but before
    it has actually unlocked.

    Signed-off-by: Srivatsa Vaddagiri <vatsa@linux.vnet.ibm.com>
    Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
    ---
    arch/x86/include/asm/spinlock.h | 7 +++----
    arch/x86/kernel/paravirt-spinlocks.c | 23 ++++++-----------------
    2 files changed, 9 insertions(+), 21 deletions(-)

    diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
    index 365d787..bf6dff4 100644
    --- a/arch/x86/include/asm/spinlock.h
    +++ b/arch/x86/include/asm/spinlock.h
    @@ -43,7 +43,7 @@

    /* Only defined when CONFIG_PARAVIRT_SPINLOCKS defined, but may as
    * well leave the prototype always visible. */
    -extern void __ticket_unlock_release_slowpath(struct arch_spinlock *lock);
    +extern void __ticket_unlock_slowpath(struct arch_spinlock *lock);

    #ifdef CONFIG_PARAVIRT_SPINLOCKS

    @@ -154,10 +154,9 @@ static __always_inline void __ticket_unlock_release(arch_spinlock_t *lock)
    static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
    {

    + __ticket_unlock_release(lock);
    if (unlikely(__ticket_in_slowpath(lock)))
    - __ticket_unlock_release_slowpath(lock);
    - else
    - __ticket_unlock_release(lock);
    + __ticket_unlock_slowpath(lock);
    }

    static inline int arch_spin_is_locked(arch_spinlock_t *lock)
    diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
    index 71b8557..674718f 100644
    --- a/arch/x86/kernel/paravirt-spinlocks.c
    +++ b/arch/x86/kernel/paravirt-spinlocks.c
    @@ -22,32 +22,21 @@ EXPORT_SYMBOL(pv_lock_ops);
    * bits. However, we need to be careful about this because someone
    * may just be entering as we leave, and enter the slowpath.
    */
    -void __ticket_unlock_release_slowpath(struct arch_spinlock *lock)
    +void __ticket_unlock_slowpath(struct arch_spinlock *lock)
    {
    struct arch_spinlock old, new;

    BUILD_BUG_ON(((__ticket_t)NR_CPUS) != NR_CPUS);

    old = ACCESS_ONCE(*lock);
    -
    new = old;
    - new.tickets.head += TICKET_LOCK_INC;

    /* Clear the slowpath flag */
    new.tickets.tail &= ~TICKET_SLOWPATH_FLAG;
    + if (new.tickets.head == new.tickets.tail)
    + cmpxchg(&lock->head_tail, old.head_tail, new.head_tail);

    - /*
    - * If there's currently people waiting or someone snuck in
    - * since we read the lock above, then do a normal unlock and
    - * kick. If we managed to unlock with no queued waiters, then
    - * we can clear the slowpath flag.
    - */
    - if (new.tickets.head != new.tickets.tail ||
    - cmpxchg(&lock->head_tail,
    - old.head_tail, new.head_tail) != old.head_tail) {
    - /* still people waiting */
    - __ticket_unlock_release(lock);
    - __ticket_unlock_kick(lock, new.tickets.head);
    - }
    + /* Wake up an appropriate waiter */
    + __ticket_unlock_kick(lock, new.tickets.head);
    }
    -EXPORT_SYMBOL(__ticket_unlock_release_slowpath);
    +EXPORT_SYMBOL(__ticket_unlock_slowpath);
    --
    1.7.6


    \
     
     \ /
      Last update: 2011-09-02 03:01    [W:4.018 / U:0.240 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site