lkml.org 
[lkml]   [2015]   [Apr]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v15 04/15] qspinlock: Extract out code snippets for the next patch
    Date
    This is a preparatory patch that extracts out the following 2 code
    snippets to prepare for the next performance optimization patch.

    1) the logic for the exchange of new and previous tail code words
    into a new xchg_tail() function.
    2) the logic for clearing the pending bit and setting the locked bit
    into a new clear_pending_set_locked() function.

    This patch also simplifies the trylock operation before queuing by
    calling queue_spin_trylock() directly.

    Signed-off-by: Waiman Long <Waiman.Long@hp.com>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    ---
    include/asm-generic/qspinlock_types.h | 2 +
    kernel/locking/qspinlock.c | 79 ++++++++++++++++++++-------------
    2 files changed, 50 insertions(+), 31 deletions(-)

    diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
    index 9c3f5c2..ef36613 100644
    --- a/include/asm-generic/qspinlock_types.h
    +++ b/include/asm-generic/qspinlock_types.h
    @@ -58,6 +58,8 @@ typedef struct qspinlock {
    #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
    #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)

    +#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
    +
    #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
    #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)

    diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
    index 0351f78..11f6ad9 100644
    --- a/kernel/locking/qspinlock.c
    +++ b/kernel/locking/qspinlock.c
    @@ -97,6 +97,42 @@ static inline struct mcs_spinlock *decode_tail(u32 tail)
    #define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)

    /**
    + * clear_pending_set_locked - take ownership and clear the pending bit.
    + * @lock: Pointer to queue spinlock structure
    + *
    + * *,1,0 -> *,0,1
    + */
    +static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
    +{
    + atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
    +}
    +
    +/**
    + * xchg_tail - Put in the new queue tail code word & retrieve previous one
    + * @lock : Pointer to queue spinlock structure
    + * @tail : The new queue tail code word
    + * Return: The previous queue tail code word
    + *
    + * xchg(lock, tail)
    + *
    + * p,*,* -> n,*,* ; prev = xchg(lock, node)
    + */
    +static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
    +{
    + u32 old, new, val = atomic_read(&lock->val);
    +
    + for (;;) {
    + new = (val & _Q_LOCKED_PENDING_MASK) | tail;
    + old = atomic_cmpxchg(&lock->val, val, new);
    + if (old == val)
    + break;
    +
    + val = old;
    + }
    + return old;
    +}
    +
    +/**
    * queue_spin_lock_slowpath - acquire the queue spinlock
    * @lock: Pointer to queue spinlock structure
    * @val: Current value of the queue spinlock 32-bit word
    @@ -178,15 +214,7 @@ void queue_spin_lock_slowpath(struct qspinlock *lock, u32 val)
    *
    * *,1,0 -> *,0,1
    */
    - for (;;) {
    - new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;
    -
    - old = atomic_cmpxchg(&lock->val, val, new);
    - if (old == val)
    - break;
    -
    - val = old;
    - }
    + clear_pending_set_locked(lock);
    return;

    /*
    @@ -203,37 +231,26 @@ queue:
    node->next = NULL;

    /*
    - * We have already touched the queueing cacheline; don't bother with
    - * pending stuff.
    - *
    - * trylock || xchg(lock, node)
    - *
    - * 0,0,0 -> 0,0,1 ; no tail, not locked -> no tail, locked.
    - * p,y,x -> n,y,x ; tail was p -> tail is n; preserving locked.
    + * We touched a (possibly) cold cacheline in the per-cpu queue node;
    + * attempt the trylock once more in the hope someone let go while we
    + * weren't watching.
    */
    - for (;;) {
    - new = _Q_LOCKED_VAL;
    - if (val)
    - new = tail | (val & _Q_LOCKED_PENDING_MASK);
    -
    - old = atomic_cmpxchg(&lock->val, val, new);
    - if (old == val)
    - break;
    -
    - val = old;
    - }
    + if (queue_spin_trylock(lock))
    + goto release;

    /*
    - * we won the trylock; forget about queueing.
    + * We have already touched the queueing cacheline; don't bother with
    + * pending stuff.
    + *
    + * p,*,* -> n,*,*
    */
    - if (new == _Q_LOCKED_VAL)
    - goto release;
    + old = xchg_tail(lock, tail);

    /*
    * if there was a previous node; link it and wait until reaching the
    * head of the waitqueue.
    */
    - if (old & ~_Q_LOCKED_PENDING_MASK) {
    + if (old & _Q_TAIL_MASK) {
    prev = decode_tail(old);
    WRITE_ONCE(prev->next, node);

    --
    1.7.1


    \
     
     \ /
      Last update: 2015-04-07 05:41    [W:4.725 / U:0.016 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site