lkml.org 
[lkml]   [2016]   [Jun]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [RFC][PATCH 5/8] rtmutex: Clean up
    Hi,

    On 07/06/16 21:56, Peter Zijlstra wrote:
    > Previous patches changed the meaning of the return value of
    > rt_mutex_slowunlock(); update comments and code to reflect this.
    >
    > Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    > ---
    > kernel/futex.c | 12 ++++++------
    > kernel/locking/rtmutex.c | 20 +++++++++-----------
    > kernel/locking/rtmutex_common.h | 2 +-
    > 3 files changed, 16 insertions(+), 18 deletions(-)
    >
    > --- a/kernel/futex.c
    > +++ b/kernel/futex.c
    > @@ -1261,7 +1261,7 @@ static int wake_futex_pi(u32 __user *uad
    > struct futex_pi_state *pi_state = this->pi_state;
    > u32 uninitialized_var(curval), newval;
    > WAKE_Q(wake_q);
    > - bool deboost;
    > + bool postunlock;
    > int ret = 0;
    >
    > if (!pi_state)
    > @@ -1327,17 +1327,17 @@ static int wake_futex_pi(u32 __user *uad
    >
    > raw_spin_unlock_irq(&pi_state->pi_mutex.wait_lock);
    >
    > - deboost = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
    > + postunlock = rt_mutex_futex_unlock(&pi_state->pi_mutex, &wake_q);
    >
    > /*
    > * First unlock HB so the waiter does not spin on it once he got woken
    > - * up. Second wake up the waiter before the priority is adjusted. If we
    > - * deboost first (and lose our higher priority), then the task might get
    > - * scheduled away before the wake up can take place.
    > + * up. Then wakeup the waiter by calling rt_mutex_postunlock(). Priority
    > + * is already adjusted and preemption is disabled to avoid inversion.
    > */
    > spin_unlock(&hb->lock);
    >
    > - rt_mutex_postunlock(&wake_q, deboost);
    > + if (postunlock)
    > + rt_mutex_postunlock(&wake_q);

    I'm most probably missing something, but don't we still need to call
    wake_up_q() even when postunlock is false? IIUC, we were always doing
    that, rt_mutex_postunlock(), before this change (only calling
    preempt_enable() was conditional).

    Best,

    - Juri

    >
    > return 0;
    > }
    > --- a/kernel/locking/rtmutex.c
    > +++ b/kernel/locking/rtmutex.c
    > @@ -1254,7 +1254,8 @@ static inline int rt_mutex_slowtrylock(s
    >
    > /*
    > * Slow path to release a rt-mutex.
    > - * Return whether the current task needs to undo a potential priority boosting.
    > + *
    > + * Return whether the current task needs to call rt_mutex_postunlock().
    > */
    > static bool __sched rt_mutex_slowunlock(struct rt_mutex *lock,
    > struct wake_q_head *wake_q)
    > @@ -1327,7 +1328,7 @@ static bool __sched rt_mutex_slowunlock(
    >
    > raw_spin_unlock_irqrestore(&lock->wait_lock, flags);
    >
    > - /* check PI boosting */
    > + /* call rt_mutex_postunlock() */
    > return true;
    > }
    >
    > @@ -1378,15 +1379,14 @@ rt_mutex_fasttrylock(struct rt_mutex *lo
    > }
    >
    > /*
    > - * Undo pi boosting (if necessary) and wake top waiter.
    > + * Performs the wakeup of the the top-waiter and re-enables preemption.
    > */
    > -void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost)
    > +void rt_mutex_postunlock(struct wake_q_head *wake_q)
    > {
    > wake_up_q(wake_q);
    >
    > /* Pairs with preempt_disable() in rt_mutex_slowunlock() */
    > - if (deboost)
    > - preempt_enable();
    > + preempt_enable();
    > }
    >
    > /**
    > @@ -1489,9 +1489,8 @@ void __sched rt_mutex_unlock(struct rt_m
    > rt_mutex_deadlock_account_unlock(current);
    >
    > } else {
    > - bool deboost = rt_mutex_slowunlock(lock, &wake_q);
    > -
    > - rt_mutex_postunlock(&wake_q, deboost);
    > + if (rt_mutex_slowunlock(lock, &wake_q))
    > + rt_mutex_postunlock(&wake_q);
    > }
    > }
    > EXPORT_SYMBOL_GPL(rt_mutex_unlock);
    > @@ -1500,8 +1499,7 @@ EXPORT_SYMBOL_GPL(rt_mutex_unlock);
    > * rt_mutex_futex_unlock - Futex variant of rt_mutex_unlock
    > * @lock: the rt_mutex to be unlocked
    > *
    > - * Returns: true/false indicating whether priority adjustment is
    > - * required or not.
    > + * Returns: true/false indicating whether we should call rt_mutex_postunlock().
    > */
    > bool __sched rt_mutex_futex_unlock(struct rt_mutex *lock,
    > struct wake_q_head *wqh)
    > --- a/kernel/locking/rtmutex_common.h
    > +++ b/kernel/locking/rtmutex_common.h
    > @@ -111,7 +111,7 @@ extern int rt_mutex_finish_proxy_lock(st
    > extern int rt_mutex_timed_futex_lock(struct rt_mutex *l, struct hrtimer_sleeper *to);
    > extern bool rt_mutex_futex_unlock(struct rt_mutex *lock,
    > struct wake_q_head *wqh);
    > -extern void rt_mutex_postunlock(struct wake_q_head *wake_q, bool deboost);
    > +extern void rt_mutex_postunlock(struct wake_q_head *wake_q);
    > extern void rt_mutex_adjust_prio(struct task_struct *task);
    >
    > #ifdef CONFIG_DEBUG_RT_MUTEXES
    >
    >

    \
     
     \ /
      Last update: 2016-06-14 14:41    [W:4.581 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site