lkml.org 
[lkml]   [2016]   [Dec]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 08/12] locking/ww_mutex: Wake at most one waiter for back off when acquiring the lock
    Date
    From: Nicolai Hähnle <Nicolai.Haehnle@amd.com>

    The wait list is sorted by stamp order, and the only waiting task that may
    have to back off is the first waiter with a context.

    The regular slow path does not have to wake any other tasks at all, since
    all other waiters that would have to back off were either woken up when
    the waiter was added to the list, or detected the condition before they
    added themselves.

    Median timings taken of a contention-heavy GPU workload:

    Without this series:
    real 0m59.900s
    user 0m7.516s
    sys 2m16.076s

    With changes up to and including this patch:
    real 0m52.946s
    user 0m7.272s
    sys 1m55.964s

    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Ingo Molnar <mingo@redhat.com>
    Cc: Maarten Lankhorst <dev@mblankhorst.nl>
    Cc: Daniel Vetter <daniel@ffwll.ch>
    Cc: Chris Wilson <chris@chris-wilson.co.uk>
    Cc: dri-devel@lists.freedesktop.org
    Signed-off-by: Nicolai Hähnle <Nicolai.Haehnle@amd.com>
    ---
    kernel/locking/mutex.c | 58 +++++++++++++++++++++++++++++++++-----------------
    1 file changed, 39 insertions(+), 19 deletions(-)

    diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
    index ee4d152..c3f70dd 100644
    --- a/kernel/locking/mutex.c
    +++ b/kernel/locking/mutex.c
    @@ -285,6 +285,35 @@ __ww_ctx_stamp_after(struct ww_acquire_ctx *a, struct ww_acquire_ctx *b)
    }

    /*
    + * Wake up any waiters that may have to back off when the lock is held by the
    + * given context.
    + *
    + * Due to the invariants on the wait list, this can only affect the first
    + * waiter with a context.
    + *
    + * Must be called with wait_lock held. The current task must not be on the
    + * wait list.
    + */
    +static void __sched
    +__ww_mutex_wakeup_for_backoff(struct mutex *lock, struct ww_acquire_ctx *ww_ctx)
    +{
    + struct mutex_waiter *cur;
    +
    + list_for_each_entry(cur, &lock->wait_list, list) {
    + if (!cur->ww_ctx)
    + continue;
    +
    + if (cur->ww_ctx->acquired > 0 &&
    + __ww_ctx_stamp_after(cur->ww_ctx, ww_ctx)) {
    + debug_mutex_wake_waiter(lock, cur);
    + wake_up_process(cur->task);
    + }
    +
    + break;
    + }
    +}
    +
    +/*
    * After acquiring lock with fastpath or when we lost out in contested
    * slowpath, set ctx and wake up any waiters so they can recheck.
    */
    @@ -293,7 +322,6 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
    struct ww_acquire_ctx *ctx)
    {
    unsigned long flags;
    - struct mutex_waiter *cur;

    ww_mutex_lock_acquired(lock, ctx);

    @@ -319,16 +347,15 @@ ww_mutex_set_context_fastpath(struct ww_mutex *lock,
    * so they can see the new lock->ctx.
    */
    spin_lock_mutex(&lock->base.wait_lock, flags);
    - list_for_each_entry(cur, &lock->base.wait_list, list) {
    - debug_mutex_wake_waiter(&lock->base, cur);
    - wake_up_process(cur->task);
    - }
    + __ww_mutex_wakeup_for_backoff(&lock->base, ctx);
    spin_unlock_mutex(&lock->base.wait_lock, flags);
    }

    /*
    - * After acquiring lock in the slowpath set ctx and wake up any
    - * waiters so they can recheck.
    + * After acquiring lock in the slowpath set ctx.
    + *
    + * Unlike for the fast path, the caller ensures that waiters are woken up where
    + * necessary.
    *
    * Callers must hold the mutex wait_lock.
    */
    @@ -336,19 +363,8 @@ static __always_inline void
    ww_mutex_set_context_slowpath(struct ww_mutex *lock,
    struct ww_acquire_ctx *ctx)
    {
    - struct mutex_waiter *cur;
    -
    ww_mutex_lock_acquired(lock, ctx);
    lock->ctx = ctx;
    -
    - /*
    - * Give any possible sleeping processes the chance to wake up,
    - * so they can recheck if they have to back off.
    - */
    - list_for_each_entry(cur, &lock->base.wait_list, list) {
    - debug_mutex_wake_waiter(&lock->base, cur);
    - wake_up_process(cur->task);
    - }
    }

    #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
    @@ -726,8 +742,12 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
    /*
    * After waiting to acquire the wait_lock, try again.
    */
    - if (__mutex_trylock(lock, false))
    + if (__mutex_trylock(lock, false)) {
    + if (use_ww_ctx && ww_ctx)
    + __ww_mutex_wakeup_for_backoff(lock, ww_ctx);
    +
    goto skip_wait;
    + }

    debug_mutex_lock_common(lock, &waiter);
    debug_mutex_add_waiter(lock, &waiter, task);
    --
    2.7.4
    \
     
     \ /
      Last update: 2016-12-21 19:48    [W:4.320 / U:0.076 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site