lkml.org 
[lkml]   [2008]   [Dec]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch 4/7] rtmutex: unify state manipulation
The manipulation of the waiter task state is copied all over the place
with slightly different details. Use one set of functions to reduce
duplicated code and make the handling consistent for all instances.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/rtmutex.c | 95 +++++++++++++++++++++++++++----------------------------
1 file changed, 48 insertions(+), 47 deletions(-)

Index: linux-2.6.24/kernel/rtmutex.c
===================================================================
--- linux-2.6.24.orig/kernel/rtmutex.c
+++ linux-2.6.24/kernel/rtmutex.c
@@ -765,13 +765,6 @@ rt_spin_lock_fastunlock(struct rt_mutex
slowfn(lock);
}

-static inline void
-update_current(unsigned long new_state, unsigned long *saved_state)
-{
- unsigned long state = xchg(&current->state, new_state);
- if (unlikely(state == TASK_RUNNING))
- *saved_state = TASK_RUNNING;
-}

#ifdef CONFIG_SMP
static int adaptive_wait(struct rt_mutex_waiter *waiter,
@@ -803,6 +796,34 @@ static int adaptive_wait(struct rt_mutex
#endif

/*
+ * The state setting needs to preserve the original state and needs to
+ * take care of non rtmutex wakeups.
+ */
+static inline unsigned long
+rt_set_current_blocked_state(unsigned long saved_state)
+{
+ unsigned long state;
+
+ state = xchg(&current->state, TASK_UNINTERRUPTIBLE);
+ /*
+ * Take care of non rtmutex wakeups. rtmutex wakeups
+ * set the state to TASK_RUNNING_MUTEX.
+ */
+ if (state == TASK_RUNNING)
+ saved_state = TASK_RUNNING;
+
+ return saved_state;
+}
+
+static inline void rt_restore_current_state(unsigned long saved_state)
+{
+ unsigned long state = xchg(&current->state, saved_state);
+
+ if (state == TASK_RUNNING)
+ current->state = TASK_RUNNING;
+}
+
+/*
* Slow path lock function spin_lock style: this variant is very
* careful not to miss any non-lock wakeups.
*
@@ -816,7 +837,7 @@ static void fastcall noinline __sched
rt_spin_lock_slowlock(struct rt_mutex *lock)
{
struct rt_mutex_waiter waiter;
- unsigned long saved_state, state, flags;
+ unsigned long saved_state, flags;
struct task_struct *orig_owner;
int missed = 0;

@@ -836,7 +857,9 @@ rt_spin_lock_slowlock(struct rt_mutex *l
* of the lock sleep/wakeup mechanism. When we get a real
* wakeup the task->state is TASK_RUNNING and we change
* saved_state accordingly. If we did not get a real wakeup
- * then we return with the saved state.
+ * then we return with the saved state. We need to be careful
+ * about original state TASK_INTERRUPTIBLE as well, as we
+ * could miss a wakeup_interruptible()
*/
saved_state = current->state;

@@ -880,7 +903,8 @@ rt_spin_lock_slowlock(struct rt_mutex *l

if (adaptive_wait(&waiter, orig_owner)) {
put_task_struct(orig_owner);
- update_current(TASK_UNINTERRUPTIBLE, &saved_state);
+
+ saved_state = rt_set_current_blocked_state(saved_state);
/*
* The xchg() in update_current() is an implicit
* barrier which we rely upon to ensure current->state
@@ -896,9 +920,7 @@ rt_spin_lock_slowlock(struct rt_mutex *l
current->lock_depth = saved_lock_depth;
}

- state = xchg(&current->state, saved_state);
- if (unlikely(state == TASK_RUNNING))
- current->state = TASK_RUNNING;
+ rt_restore_current_state(saved_state);

/*
* Extremely rare case, if we got woken up by a non-mutex wakeup,
@@ -1333,7 +1355,7 @@ rt_read_slowlock(struct rw_mutex *rwm, i
struct rt_mutex_waiter waiter;
struct rt_mutex *mutex = &rwm->mutex;
int saved_lock_depth = -1;
- unsigned long saved_state = -1, state, flags;
+ unsigned long saved_state, flags;

spin_lock_irqsave(&mutex->wait_lock, flags);
init_rw_lists(rwm);
@@ -1357,13 +1379,13 @@ rt_read_slowlock(struct rw_mutex *rwm, i
*/
if (unlikely(current->lock_depth >= 0))
saved_lock_depth = rt_release_bkl(mutex, flags);
- set_current_state(TASK_UNINTERRUPTIBLE);
} else {
/* Spin lock must preserve BKL */
- saved_state = xchg(&current->state, TASK_UNINTERRUPTIBLE);
saved_lock_depth = current->lock_depth;
}

+ saved_state = rt_set_current_blocked_state(current->state);
+
for (;;) {
unsigned long saved_flags;

@@ -1398,23 +1420,12 @@ rt_read_slowlock(struct rw_mutex *rwm, i
spin_lock_irqsave(&mutex->wait_lock, flags);

current->flags |= saved_flags;
- if (mtx)
- set_current_state(TASK_UNINTERRUPTIBLE);
- else {
+ if (!mtx)
current->lock_depth = saved_lock_depth;
- state = xchg(&current->state, TASK_UNINTERRUPTIBLE);
- if (unlikely(state == TASK_RUNNING))
- saved_state = TASK_RUNNING;
- }
+ saved_state = rt_set_current_blocked_state(saved_state);
}

- if (mtx)
- set_current_state(TASK_RUNNING);
- else {
- state = xchg(&current->state, saved_state);
- if (unlikely(state == TASK_RUNNING))
- current->state = TASK_RUNNING;
- }
+ rt_restore_current_state(!mtx ? saved_state : TASK_RUNNING);

if (unlikely(waiter.task))
remove_waiter(mutex, &waiter, flags);
@@ -1490,7 +1501,7 @@ rt_write_slowlock(struct rw_mutex *rwm,
struct rt_mutex *mutex = &rwm->mutex;
struct rt_mutex_waiter waiter;
int saved_lock_depth = -1;
- unsigned long flags, saved_state = -1, state;
+ unsigned long flags, saved_state;

debug_rt_mutex_init_waiter(&waiter);
waiter.task = NULL;
@@ -1514,13 +1525,13 @@ rt_write_slowlock(struct rw_mutex *rwm,
*/
if (unlikely(current->lock_depth >= 0))
saved_lock_depth = rt_release_bkl(mutex, flags);
- set_current_state(TASK_UNINTERRUPTIBLE);
} else {
/* Spin locks must preserve the BKL */
saved_lock_depth = current->lock_depth;
- saved_state = xchg(&current->state, TASK_UNINTERRUPTIBLE);
}

+ saved_state = rt_set_current_blocked_state(current->state);
+
for (;;) {
unsigned long saved_flags;

@@ -1555,24 +1566,14 @@ rt_write_slowlock(struct rw_mutex *rwm,
spin_lock_irqsave(&mutex->wait_lock, flags);

current->flags |= saved_flags;
- if (mtx)
- set_current_state(TASK_UNINTERRUPTIBLE);
- else {
+ if (!mtx)
current->lock_depth = saved_lock_depth;
- state = xchg(&current->state, TASK_UNINTERRUPTIBLE);
- if (unlikely(state == TASK_RUNNING))
- saved_state = TASK_RUNNING;
- }
- }

- if (mtx)
- set_current_state(TASK_RUNNING);
- else {
- state = xchg(&current->state, saved_state);
- if (unlikely(state == TASK_RUNNING))
- current->state = TASK_RUNNING;
+ saved_state = rt_set_current_blocked_state(saved_state);
}

+ rt_restore_current_state(!mtx ? saved_state : TASK_RUNNING);
+
if (unlikely(waiter.task))
remove_waiter(mutex, &waiter, flags);

--



\
 
 \ /
  Last update: 2008-12-19 22:43    [W:0.103 / U:0.036 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site