lkml.org 
[lkml]   [2021]   [Aug]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    SubjectRe: [patch V5 32/72] locking/rtmutex: Provide the spin/rwlock core lock function
    On Sun, Aug 15, 2021 at 11:28:25PM +0200, Thomas Gleixner wrote:
    > From: Thomas Gleixner <tglx@linutronix.de>
    >
    > A simplified version of the rtmutex slowlock function which neither handles
    > signals nor timeouts and is careful about preserving the state of the
    > blocked task across the lock operation.
    >
    > Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    > ---
    > kernel/locking/rtmutex.c | 60 ++++++++++++++++++++++++++++++++++++++++
    > kernel/locking/rtmutex_common.h | 2 -
    > 2 files changed, 61 insertions(+), 1 deletion(-)
    > ---
    > --- a/kernel/locking/rtmutex.c
    > +++ b/kernel/locking/rtmutex.c
    > @@ -1416,3 +1416,63 @@ static __always_inline int __rt_mutex_lo
    > return rt_mutex_slowlock(lock, state);
    > }
    > #endif /* RT_MUTEX_BUILD_MUTEX */
    > +
    > +#ifdef RT_MUTEX_BUILD_SPINLOCKS
    > +/*
    > + * Functions required for spin/rw_lock substitution on RT kernels
    > + */
    > +
    > +/**
    > + * rtlock_slowlock_locked - Slow path lock acquisition for RT locks
    > + * @lock: The underlying rt mutex
    > + */
    > +static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
    > +{
    > + struct rt_mutex_waiter waiter;
    > +
    > + lockdep_assert_held(&lock->wait_lock);
    > +
    > + if (try_to_take_rt_mutex(lock, current, NULL))
    > + return;
    > +
    > + rt_mutex_init_rtlock_waiter(&waiter);
    > +
    > + /* Save current state and set state to TASK_RTLOCK_WAIT */
    > + current_save_and_set_rtlock_wait_state();
    > +
    > + task_blocks_on_rt_mutex(lock, &waiter, current, RT_MUTEX_MIN_CHAINWALK);
    > +
    > + for (;;) {
    > + /* Try to acquire the lock again. */
    > + if (try_to_take_rt_mutex(lock, current, &waiter))
    > + break;
    > +
    > + raw_spin_unlock_irq(&lock->wait_lock);
    > +
    > + schedule_rtlock();
    > +
    > + raw_spin_lock_irq(&lock->wait_lock);
    > + set_current_state(TASK_RTLOCK_WAIT);
    > + }
    > +
    > + /* Restore the task state */
    > + current_restore_rtlock_saved_state();
    > +
    > + /*
    > + * try_to_take_rt_mutex() sets the waiter bit unconditionally. We
    > + * might have to fix that up:
    > + */
    > + fixup_rt_mutex_waiters(lock);
    > + debug_rt_mutex_free_waiter(&waiter);
    > +}
    > +
    > +static __always_inline void __sched rtlock_slowlock(struct rt_mutex_base *lock)
    > +{
    > + unsigned long flags;
    > +
    > + raw_spin_lock_irqsave(&lock->wait_lock, flags);
    > + rtlock_slowlock_locked(lock);
    > + raw_spin_unlock_irqrestore(&lock->wait_lock, flags);

    Just out of curiosity, could we use raw_spin_{un,}lock_irq() here
    instead of *_irq{save,restore}()? Because rtlock_slowlock() might sleep,
    and we cannot call it with irq-off.

    Regards,
    Boqun

    > +}
    > +
    > +#endif /* RT_MUTEX_BUILD_SPINLOCKS */
    > --- a/kernel/locking/rtmutex_common.h
    > +++ b/kernel/locking/rtmutex_common.h
    > @@ -181,7 +181,7 @@ static inline void rt_mutex_init_waiter(
    > waiter->task = NULL;
    > }
    >
    > -static inline void rtlock_init_rtmutex_waiter(struct rt_mutex_waiter *waiter)
    > +static inline void rt_mutex_init_rtlock_waiter(struct rt_mutex_waiter *waiter)
    > {
    > rt_mutex_init_waiter(waiter);
    > waiter->wake_state = TASK_RTLOCK_WAIT;
    >

    \
     
     \ /
      Last update: 2021-08-27 19:23    [W:2.382 / U:0.180 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site