lkml.org 
[lkml]   [2016]   [Aug]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v5 1/3] locking/mutex: Add waiter parameter to mutex_optimistic_spin()
Date
This patch adds a new waiter parameter to the mutex_optimistic_spin()
function to prepare it to be used by a waiter-spinner that doesn't
need to go into the OSQ as there can only be one waiter-spinner which
is the head of the waiting queue.

Signed-off-by: Waiman Long <Waiman.Long@hp.com>
---
kernel/locking/mutex.c | 62 ++++++++++++++++++++++++++++++++---------------
1 files changed, 42 insertions(+), 20 deletions(-)

diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index a70b90d..3bcbbd1 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -273,11 +273,16 @@ static inline int mutex_can_spin_on_owner(struct mutex *lock)

/*
* Atomically try to take the lock when it is available
+ *
+ * For waiter-spinner, the count needs to be set to -1 first which will be
+ * cleared to 0 later on if the list becomes empty. For regular spinner,
+ * the count will be set to 0 as the the woken waiter will set it to -1,
+ * if necessary.
*/
-static inline bool mutex_try_to_acquire(struct mutex *lock)
+static inline bool mutex_try_to_acquire(struct mutex *lock, int waiter)
{
return !mutex_is_locked(lock) &&
- (atomic_cmpxchg_acquire(&lock->count, 1, 0) == 1);
+ (atomic_cmpxchg_acquire(&lock->count, 1, waiter ? -1 : 0) == 1);
}

/*
@@ -302,22 +307,37 @@ static inline bool mutex_try_to_acquire(struct mutex *lock)
*
* Returns true when the lock was taken, otherwise false, indicating
* that we need to jump to the slowpath and sleep.
+ *
+ * The waiter flag is set to true if the spinner is a waiter in the wait
+ * queue. The waiter-spinner will spin on the lock directly and concurrently
+ * with the spinner at the head of the OSQ, if present.
*/
static bool mutex_optimistic_spin(struct mutex *lock,
- struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+ struct ww_acquire_ctx *ww_ctx,
+ const bool use_ww_ctx, int waiter)
{
struct task_struct *task = current;
+ bool acquired = false;

- if (!mutex_can_spin_on_owner(lock))
- goto done;
+ if (!waiter) {
+ /*
+ * The purpose of the mutex_can_spin_on_owner() function is
+ * to eliminate the overhead of osq_lock() and osq_unlock()
+ * in case spinning isn't possible. As a waiter-spinner
+ * is not going to take OSQ lock anyway, there is no need
+ * to call mutex_can_spin_on_owner().
+ */
+ if (!mutex_can_spin_on_owner(lock))
+ goto done;

- /*
- * In order to avoid a stampede of mutex spinners trying to
- * acquire the mutex all at once, the spinners need to take a
- * MCS (queued) lock first before spinning on the owner field.
- */
- if (!osq_lock(&lock->osq))
- goto done;
+ /*
+ * In order to avoid a stampede of mutex spinners trying to
+ * acquire the mutex all at once, the spinners need to take a
+ * MCS (queued) lock first before spinning on the owner field.
+ */
+ if (!osq_lock(&lock->osq))
+ goto done;
+ }

while (true) {
struct task_struct *owner;
@@ -347,7 +367,7 @@ static bool mutex_optimistic_spin(struct mutex *lock,
break;

/* Try to acquire the mutex if it is unlocked. */
- if (mutex_try_to_acquire(lock)) {
+ if (mutex_try_to_acquire(lock, waiter)) {
lock_acquired(&lock->dep_map, ip);

if (use_ww_ctx) {
@@ -358,8 +378,8 @@ static bool mutex_optimistic_spin(struct mutex *lock,
}

mutex_set_owner(lock);
- osq_unlock(&lock->osq);
- return true;
+ acquired = true;
+ break;
}

/*
@@ -380,14 +400,15 @@ static bool mutex_optimistic_spin(struct mutex *lock,
cpu_relax_lowlatency();
}

- osq_unlock(&lock->osq);
+ if (!waiter)
+ osq_unlock(&lock->osq);
done:
/*
* If we fell out of the spin path because of need_resched(),
* reschedule now, before we try-lock the mutex. This avoids getting
* scheduled out right after we obtained the mutex.
*/
- if (need_resched()) {
+ if (!acquired && need_resched()) {
/*
* We _should_ have TASK_RUNNING here, but just in case
* we do not, make it so, otherwise we might get stuck.
@@ -396,11 +417,12 @@ done:
schedule_preempt_disabled();
}

- return false;
+ return acquired;
}
#else
static bool mutex_optimistic_spin(struct mutex *lock,
- struct ww_acquire_ctx *ww_ctx, const bool use_ww_ctx)
+ struct ww_acquire_ctx *ww_ctx,
+ const bool use_ww_ctx, int waiter)
{
return false;
}
@@ -520,7 +542,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
preempt_disable();
mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);

- if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx)) {
+ if (mutex_optimistic_spin(lock, ww_ctx, use_ww_ctx, false)) {
/* got the lock, yay! */
preempt_enable();
return 0;
--
1.7.1
\
 
 \ /
  Last update: 2016-08-10 23:01    [W:0.106 / U:0.232 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site