lkml.org 
[lkml]   [2021]   [Aug]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip: locking/core] locking/local_lock: Add PREEMPT_RT support
    The following commit has been merged into the locking/core branch of tip:

    Commit-ID: 026659b9774e4c586baeb457557fcfc4e0ad144b
    Gitweb: https://git.kernel.org/tip/026659b9774e4c586baeb457557fcfc4e0ad144b
    Author: Thomas Gleixner <tglx@linutronix.de>
    AuthorDate: Sun, 15 Aug 2021 23:29:28 +02:00
    Committer: Ingo Molnar <mingo@kernel.org>
    CommitterDate: Tue, 17 Aug 2021 19:08:49 +02:00

    locking/local_lock: Add PREEMPT_RT support

    On PREEMPT_RT enabled kernels local_lock maps to a per CPU 'sleeping'
    spinlock which protects the critical section while staying preemptible. CPU
    locality is established by disabling migration.

    Provide the necessary types and macros to substitute the non-RT variant.

    Co-developed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
    Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    Link: https://lore.kernel.org/r/20210815211306.023630962@linutronix.de
    ---
    include/linux/local_lock_internal.h | 44 ++++++++++++++++++++++++++++-
    1 file changed, 44 insertions(+)

    diff --git a/include/linux/local_lock_internal.h b/include/linux/local_lock_internal.h
    index 3f02b81..975e33b 100644
    --- a/include/linux/local_lock_internal.h
    +++ b/include/linux/local_lock_internal.h
    @@ -6,6 +6,8 @@
    #include <linux/percpu-defs.h>
    #include <linux/lockdep.h>

    +#ifndef CONFIG_PREEMPT_RT
    +
    typedef struct {
    #ifdef CONFIG_DEBUG_LOCK_ALLOC
    struct lockdep_map dep_map;
    @@ -95,3 +97,45 @@ do { \
    local_lock_release(this_cpu_ptr(lock)); \
    local_irq_restore(flags); \
    } while (0)
    +
    +#else /* !CONFIG_PREEMPT_RT */
    +
    +/*
    + * On PREEMPT_RT local_lock maps to a per CPU spinlock, which protects the
    + * critical section while staying preemptible.
    + */
    +typedef spinlock_t local_lock_t;
    +
    +#define INIT_LOCAL_LOCK(lockname) __LOCAL_SPIN_LOCK_UNLOCKED((lockname))
    +
    +#define __local_lock_init(l) \
    + do { \
    + local_spin_lock_init((l)); \
    + } while (0)
    +
    +#define __local_lock(__lock) \
    + do { \
    + migrate_disable(); \
    + spin_lock(this_cpu_ptr((__lock))); \
    + } while (0)
    +
    +#define __local_lock_irq(lock) __local_lock(lock)
    +
    +#define __local_lock_irqsave(lock, flags) \
    + do { \
    + typecheck(unsigned long, flags); \
    + flags = 0; \
    + __local_lock(lock); \
    + } while (0)
    +
    +#define __local_unlock(__lock) \
    + do { \
    + spin_unlock(this_cpu_ptr((__lock))); \
    + migrate_enable(); \
    + } while (0)
    +
    +#define __local_unlock_irq(lock) __local_unlock(lock)
    +
    +#define __local_unlock_irqrestore(lock, flags) __local_unlock(lock)
    +
    +#endif /* CONFIG_PREEMPT_RT */
    \
     
     \ /
      Last update: 2021-08-17 22:15    [W:6.602 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site