lkml.org 
[lkml]   [2021]   [Feb]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    Subject[PATCH v2 08/28] sched: Add cond_resched_rwlock
    From
    Safely rescheduling while holding a spin lock is essential for keeping
    long running kernel operations running smoothly. Add the facility to
    cond_resched rwlocks.

    CC: Ingo Molnar <mingo@redhat.com>
    CC: Will Deacon <will@kernel.org>
    Acked-by: Peter Zijlstra <peterz@infradead.org>
    Acked-by: Davidlohr Bueso <dbueso@suse.de>
    Acked-by: Waiman Long <longman@redhat.com>
    Acked-by: Paolo Bonzini <pbonzini@redhat.com>
    Signed-off-by: Ben Gardon <bgardon@google.com>
    ---
    include/linux/sched.h | 12 ++++++++++++
    kernel/sched/core.c | 40 ++++++++++++++++++++++++++++++++++++++++
    2 files changed, 52 insertions(+)

    diff --git a/include/linux/sched.h b/include/linux/sched.h
    index 5d1378e5a040..3052d16da3cf 100644
    --- a/include/linux/sched.h
    +++ b/include/linux/sched.h
    @@ -1883,12 +1883,24 @@ static inline int _cond_resched(void) { return 0; }
    })

    extern int __cond_resched_lock(spinlock_t *lock);
    +extern int __cond_resched_rwlock_read(rwlock_t *lock);
    +extern int __cond_resched_rwlock_write(rwlock_t *lock);

    #define cond_resched_lock(lock) ({ \
    ___might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET);\
    __cond_resched_lock(lock); \
    })

    +#define cond_resched_rwlock_read(lock) ({ \
    + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
    + __cond_resched_rwlock_read(lock); \
    +})
    +
    +#define cond_resched_rwlock_write(lock) ({ \
    + __might_sleep(__FILE__, __LINE__, PREEMPT_LOCK_OFFSET); \
    + __cond_resched_rwlock_write(lock); \
    +})
    +
    static inline void cond_resched_rcu(void)
    {
    #if defined(CONFIG_DEBUG_ATOMIC_SLEEP) || !defined(CONFIG_PREEMPT_RCU)
    diff --git a/kernel/sched/core.c b/kernel/sched/core.c
    index ff74fca39ed2..efed1bf202d1 100644
    --- a/kernel/sched/core.c
    +++ b/kernel/sched/core.c
    @@ -6709,6 +6709,46 @@ int __cond_resched_lock(spinlock_t *lock)
    }
    EXPORT_SYMBOL(__cond_resched_lock);

    +int __cond_resched_rwlock_read(rwlock_t *lock)
    +{
    + int resched = should_resched(PREEMPT_LOCK_OFFSET);
    + int ret = 0;
    +
    + lockdep_assert_held_read(lock);
    +
    + if (rwlock_needbreak(lock) || resched) {
    + read_unlock(lock);
    + if (resched)
    + preempt_schedule_common();
    + else
    + cpu_relax();
    + ret = 1;
    + read_lock(lock);
    + }
    + return ret;
    +}
    +EXPORT_SYMBOL(__cond_resched_rwlock_read);
    +
    +int __cond_resched_rwlock_write(rwlock_t *lock)
    +{
    + int resched = should_resched(PREEMPT_LOCK_OFFSET);
    + int ret = 0;
    +
    + lockdep_assert_held_write(lock);
    +
    + if (rwlock_needbreak(lock) || resched) {
    + write_unlock(lock);
    + if (resched)
    + preempt_schedule_common();
    + else
    + cpu_relax();
    + ret = 1;
    + write_lock(lock);
    + }
    + return ret;
    +}
    +EXPORT_SYMBOL(__cond_resched_rwlock_write);
    +
    /**
    * yield - yield the current processor to other threads.
    *
    --
    2.30.0.365.g02bc693789-goog
    \
     
     \ /
      Last update: 2021-02-02 20:20    [W:4.383 / U:0.312 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site