| From | Thomas Gleixner <> | Subject | [patch V5 23/72] locking/rtmutex: Prepare RT rt_mutex_wake_q for RT locks | Date | Sun, 15 Aug 2021 23:28:11 +0200 (CEST) |
| |
From: Thomas Gleixner <tglx@linutronix.de>
Add a rtlock_task pointer to rt_mutex_wake_q which allows to handle the RT specific wakeup for spin/rwlock waiters. The pointer is just consuming 4/8 bytes on stack so it is provided unconditionaly to avoid #ifdeffery all over the place.
This cannot use a wake_q because a task can have concurrent wakeups which would make it miss either lock or the regular wakeup depending on what gets queued first unless task struct gains a separate wake_q_node for this which would be overkill because there can only be a single task which gets woken up in the spin/rw_lock unlock path.
No functional change for non-RT enabled kernels.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de> --- V3: Switch back to the working version (Mike) V2: Make it symmetric (PeterZ) --- include/linux/sched/wake_q.h | 1 - kernel/locking/rtmutex.c | 18 ++++++++++++++++-- kernel/locking/rtmutex_common.h | 5 ++++- 3 files changed, 20 insertions(+), 4 deletions(-) --- --- a/include/linux/sched/wake_q.h +++ b/include/linux/sched/wake_q.h @@ -62,5 +62,4 @@ static inline bool wake_q_empty(struct w extern void wake_q_add(struct wake_q_head *head, struct task_struct *task); extern void wake_q_add_safe(struct wake_q_head *head, struct task_struct *task); extern void wake_up_q(struct wake_q_head *head); - #endif /* _LINUX_SCHED_WAKE_Q_H */ --- a/kernel/locking/rtmutex.c +++ b/kernel/locking/rtmutex.c @@ -351,12 +351,26 @@ static __always_inline void rt_mutex_adj static __always_inline void rt_mutex_wake_q_add(struct rt_wake_q_head *wqh, struct rt_mutex_waiter *w) { - wake_q_add(&wqh->head, w->task); + if (IS_ENABLED(CONFIG_PREEMPT_RT) && w->wake_state != TASK_NORMAL) { + if (IS_ENABLED(CONFIG_PROVE_LOCKING)) + WARN_ON_ONCE(wqh->rtlock_task); + get_task_struct(w->task); + wqh->rtlock_task = w->task; + } else { + wake_q_add(&wqh->head, w->task); + } } static __always_inline void rt_mutex_wake_up_q(struct rt_wake_q_head *wqh) { - wake_up_q(&wqh->head); + if (IS_ENABLED(CONFIG_PREEMPT_RT) && wqh->rtlock_task) { + wake_up_state(wqh->rtlock_task, TASK_RTLOCK_WAIT); + put_task_struct(wqh->rtlock_task); + wqh->rtlock_task = NULL; + } + + if (!wake_q_empty(&wqh->head)) + wake_up_q(&wqh->head); /* Pairs with preempt_disable() in mark_wakeup_next_waiter() */ preempt_enable(); --- a/kernel/locking/rtmutex_common.h +++ b/kernel/locking/rtmutex_common.h @@ -42,15 +42,18 @@ struct rt_mutex_waiter { /** * rt_wake_q_head - Wrapper around regular wake_q_head to support * "sleeping" spinlocks on RT - * @head: The regular wake_q_head for sleeping lock variants + * @head: The regular wake_q_head for sleeping lock variants + * @rtlock_task: Task pointer for RT lock (spin/rwlock) wakeups */ struct rt_wake_q_head { struct wake_q_head head; + struct task_struct *rtlock_task; }; #define DEFINE_RT_WAKE_Q(name) \ struct rt_wake_q_head name = { \ .head = WAKE_Q_HEAD_INITIALIZER(name.head), \ + .rtlock_task = NULL, \ } /*
|