lkml.org 
[lkml]   [2021]   [Aug]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[patch V5 71/72] locking/spinlock/rt: Prepare for RT local_lock
    Date
    Add the static and runtime initializer mechanics to support the RT variant
    of local_lock, which requires the lock type in the lockdep map to be set
    to LD_LOCK_PERCPU.

    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    ---
    V5: New patch
    ---
    include/linux/spinlock_rt.h | 24 ++++++++++++++++--------
    include/linux/spinlock_types.h | 6 ++++++
    include/linux/spinlock_types_raw.h | 8 ++++++++
    kernel/locking/spinlock_rt.c | 7 +++++--
    4 files changed, 35 insertions(+), 10 deletions(-)

    --- a/include/linux/spinlock_rt.h
    +++ b/include/linux/spinlock_rt.h
    @@ -8,20 +8,28 @@

    #ifdef CONFIG_DEBUG_LOCK_ALLOC
    extern void __rt_spin_lock_init(spinlock_t *lock, const char *name,
    - struct lock_class_key *key);
    + struct lock_class_key *key, bool percpu);
    #else
    static inline void __rt_spin_lock_init(spinlock_t *lock, const char *name,
    - struct lock_class_key *key)
    + struct lock_class_key *key, bool percpu)
    {
    }
    #endif

    -#define spin_lock_init(slock) \
    -do { \
    - static struct lock_class_key __key; \
    - \
    - rt_mutex_base_init(&(slock)->lock); \
    - __rt_spin_lock_init(slock, #slock, &__key); \
    +#define spin_lock_init(slock) \
    +do { \
    + static struct lock_class_key __key; \
    + \
    + rt_mutex_base_init(&(slock)->lock); \
    + __rt_spin_lock_init(slock, #slock, &__key, false); \
    +} while (0)
    +
    +#define local_spin_lock_init(slock) \
    +do { \
    + static struct lock_class_key __key; \
    + \
    + rt_mutex_base_init(&(slock)->lock); \
    + __rt_spin_lock_init(slock, #slock, &__key, true); \
    } while (0)

    extern void rt_spin_lock(spinlock_t *lock);
    --- a/include/linux/spinlock_types.h
    +++ b/include/linux/spinlock_types.h
    @@ -60,6 +60,12 @@ typedef struct spinlock {
    SPIN_DEP_MAP_INIT(name) \
    }

    +#define __LOCAL_SPIN_LOCK_UNLOCKED(name) \
    + { \
    + .lock = __RT_MUTEX_BASE_INITIALIZER(name.lock), \
    + LOCAL_SPIN_DEP_MAP_INIT(name) \
    + }
    +
    #define DEFINE_SPINLOCK(name) \
    spinlock_t name = __SPIN_LOCK_UNLOCKED(name)

    --- a/include/linux/spinlock_types_raw.h
    +++ b/include/linux/spinlock_types_raw.h
    @@ -37,9 +37,17 @@ typedef struct raw_spinlock {
    .name = #lockname, \
    .wait_type_inner = LD_WAIT_CONFIG, \
    }
    +
    +# define LOCAL_SPIN_DEP_MAP_INIT(lockname) \
    + .dep_map = { \
    + .name = #lockname, \
    + .wait_type_inner = LD_WAIT_CONFIG, \
    + .lock_type = LD_LOCK_PERCPU, \
    + }
    #else
    # define RAW_SPIN_DEP_MAP_INIT(lockname)
    # define SPIN_DEP_MAP_INIT(lockname)
    +# define LOCAL_SPIN_DEP_MAP_INIT(lockname)
    #endif

    #ifdef CONFIG_DEBUG_SPINLOCK
    --- a/kernel/locking/spinlock_rt.c
    +++ b/kernel/locking/spinlock_rt.c
    @@ -120,10 +120,13 @@ EXPORT_SYMBOL(rt_spin_trylock_bh);

    #ifdef CONFIG_DEBUG_LOCK_ALLOC
    void __rt_spin_lock_init(spinlock_t *lock, const char *name,
    - struct lock_class_key *key)
    + struct lock_class_key *key, bool percpu)
    {
    + u8 type = percpu ? LD_LOCK_PERCPU : LD_LOCK_NORMAL;
    +
    debug_check_no_locks_freed((void *)lock, sizeof(*lock));
    - lockdep_init_map_wait(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG);
    + lockdep_init_map_type(&lock->dep_map, name, key, 0, LD_WAIT_CONFIG,
    + LD_WAIT_INV, type);
    }
    EXPORT_SYMBOL(__rt_spin_lock_init);
    #endif
    \
     
     \ /
      Last update: 2021-08-15 23:43    [W:2.203 / U:0.028 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site