lkml.org 
[lkml]   [2015]   [Sep]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v6 6/6] locking/pvqspinlock: Queue node adaptive spinning
    Date
    In an overcommitted guest where some vCPUs have to be halted to make
    forward progress in other areas, it is highly likely that a vCPU later
    in the spinlock queue will be spinning while the ones earlier in the
    queue would have been halted. The spinning in the later vCPUs is then
    just a waste of precious CPU cycles because they are not going to
    get the lock soon as the earlier ones have to be woken up and take
    their turn to get the lock.

    This patch implements an adaptive spinning mechanism where the vCPU
    will call pv_wait() if the following conditions are true:

    1) the vCPU has not been halted before;
    2) the previous vCPU is not running.

    Linux kernel builds were run in KVM guest on an 8-socket, 4
    cores/socket Westmere-EX system and a 4-socket, 8 cores/socket
    Haswell-EX system. Both systems are configured to have 32 physical
    CPUs. The kernel build times before and after the patch were:

    Westmere Haswell
    Patch 32 vCPUs 48 vCPUs 32 vCPUs 48 vCPUs
    ----- -------- -------- -------- --------
    Before patch 3m02.3s 5m00.2s 1m43.7s 3m03.5s
    After patch 3m03.0s 4m37.5s 1m43.0s 2m47.2s

    For 32 vCPUs, this patch doesn't cause any noticeable change in
    performance. For 48 vCPUs (over-committed), there is about 8%
    performance improvement.

    Signed-off-by: Waiman Long <Waiman.Long@hpe.com>
    ---
    kernel/locking/qspinlock.c | 5 ++-
    kernel/locking/qspinlock_paravirt.h | 52 +++++++++++++++++++++++++++++++++-
    2 files changed, 53 insertions(+), 4 deletions(-)

    diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
    index 1be1aab..319e823 100644
    --- a/kernel/locking/qspinlock.c
    +++ b/kernel/locking/qspinlock.c
    @@ -247,7 +247,8 @@ static __always_inline void set_locked(struct qspinlock *lock)
    */

    static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
    -static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
    +static __always_inline void __pv_wait_node(struct mcs_spinlock *node,
    + struct mcs_spinlock *prev) { }
    static __always_inline bool __pv_wait_head_and_lock(struct qspinlock *lock,
    struct mcs_spinlock *node,
    u32 tail)
    @@ -398,7 +399,7 @@ queue:
    prev = decode_tail(old);
    WRITE_ONCE(prev->next, node);

    - pv_wait_node(node);
    + pv_wait_node(node, prev);
    arch_mcs_spin_lock_contended(&node->locked);
    }

    diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
    index 9fd49a2..57ed38b 100644
    --- a/kernel/locking/qspinlock_paravirt.h
    +++ b/kernel/locking/qspinlock_paravirt.h
    @@ -23,6 +23,22 @@
    #define _Q_SLOW_VAL (3U << _Q_LOCKED_OFFSET)

    /*
    + * Queue Node Adaptive Spinning
    + *
    + * A queue node vCPU will stop spinning if the following conditions are true:
    + * 1) vCPU in the previous node is not running
    + * 2) current vCPU has not been halted before
    + *
    + * The one lock stealing attempt allowed at slowpath entry mitigates the
    + * slight slowdown for non-overcommitted guest with this aggressive wait-early
    + * mechanism.
    + *
    + * The status of the previous node will be checked at fixed interval
    + * controlled by PV_PREV_CHECK_MASK.
    + */
    +#define PV_PREV_CHECK_MASK 0xff
    +
    +/*
    * Queue node uses: vcpu_running & vcpu_halted.
    * Queue head uses: vcpu_running & vcpu_hashed.
    */
    @@ -71,6 +87,7 @@ enum pv_qlock_stat {
    pvstat_wait_head,
    pvstat_wait_node,
    pvstat_wait_again,
    + pvstat_wait_early,
    pvstat_kick_wait,
    pvstat_kick_unlock,
    pvstat_spurious,
    @@ -90,6 +107,7 @@ static const char * const stat_fsnames[pvstat_num] = {
    [pvstat_wait_head] = "wait_head_count",
    [pvstat_wait_node] = "wait_node_count",
    [pvstat_wait_again] = "wait_again_count",
    + [pvstat_wait_early] = "wait_early_count",
    [pvstat_kick_wait] = "kick_wait_count",
    [pvstat_kick_unlock] = "kick_unlock_count",
    [pvstat_spurious] = "spurious_wakeup",
    @@ -328,6 +346,20 @@ static struct pv_node *pv_unhash(struct qspinlock *lock)
    }

    /*
    + * Return true if when it is time to check the previous node which is not
    + * in a running state.
    + */
    +static inline bool
    +pv_wait_early(struct pv_node *node, struct pv_node *prev, int loop)
    +{
    +
    + if ((loop & PV_PREV_CHECK_MASK) != 0)
    + return false;
    +
    + return READ_ONCE(prev->state) != vcpu_running;
    +}
    +
    +/*
    * Initialize the PV part of the mcs_spinlock node.
    */
    static void pv_init_node(struct mcs_spinlock *node)
    @@ -345,16 +377,25 @@ static void pv_init_node(struct mcs_spinlock *node)
    * pv_kick_node() is used to set _Q_SLOW_VAL and fill in hash table on its
    * behalf.
    */
    -static void pv_wait_node(struct mcs_spinlock *node)
    +static void pv_wait_node(struct mcs_spinlock *node, struct mcs_spinlock *prev)
    {
    struct pv_node *pn = (struct pv_node *)node;
    + struct pv_node *pp = (struct pv_node *)prev;
    int waitcnt = 0;
    int loop;
    + bool wait_early;

    for (;; waitcnt++) {
    - for (loop = SPIN_THRESHOLD; loop; loop--) {
    + for (wait_early = false, loop = SPIN_THRESHOLD; loop; loop--) {
    if (READ_ONCE(node->locked))
    return;
    + /*
    + * Wait early only if it has not been halted before.
    + */
    + if (!waitcnt && pv_wait_early(pn, pp, loop)) {
    + wait_early = true;
    + break;
    + }
    cpu_relax();
    }

    @@ -457,6 +498,12 @@ static int pv_wait_head_and_lock(struct qspinlock *lock,
    lp = (struct qspinlock **)1;

    for (;; waitcnt++) {
    + /*
    + * Set correct vCPU state to be used by queue node wait-early
    + * mechanism.
    + */
    + WRITE_ONCE(pn->state, vcpu_running);
    +
    for (loop = SPIN_THRESHOLD; loop; loop--) {
    /*
    * Try to acquire the lock when it is free.
    @@ -492,6 +539,7 @@ static int pv_wait_head_and_lock(struct qspinlock *lock,
    goto gotlock;
    }
    }
    + WRITE_ONCE(pn->state, vcpu_halted);
    pvstat_inc(pvstat_wait_head);
    if (waitcnt)
    pvstat_inc(pvstat_wait_again);
    --
    1.7.1


    \
     
     \ /
      Last update: 2015-09-11 20:41    [W:5.364 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site