lkml.org 
[lkml]   [2017]   [May]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH tip/core/rcu 72/88] rcu: Move rnp->lock wrappers for SRCU use
    Date
    This commit moves the now-generic rnp->lock wrapper macros from
    kernel/rcu/tree.h to kernel/rcu/rcu.h, thus allowing SRCU to use them.

    Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    ---
    kernel/rcu/rcu.h | 53 +++++++++++++++++++++++++++++++++++++++++++++++++++++
    kernel/rcu/tree.h | 53 -----------------------------------------------------
    2 files changed, 53 insertions(+), 53 deletions(-)

    diff --git a/kernel/rcu/rcu.h b/kernel/rcu/rcu.h
    index 6a1e85bd2eac..2a75beb883c8 100644
    --- a/kernel/rcu/rcu.h
    +++ b/kernel/rcu/rcu.h
    @@ -303,6 +303,59 @@ static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
    cpu <= rnp->grphi; \
    cpu = cpumask_next((cpu), cpu_possible_mask))

    +/*
    + * Wrappers for the rcu_node::lock acquire and release.
    + *
    + * Because the rcu_nodes form a tree, the tree traversal locking will observe
    + * different lock values, this in turn means that an UNLOCK of one level
    + * followed by a LOCK of another level does not imply a full memory barrier;
    + * and most importantly transitivity is lost.
    + *
    + * In order to restore full ordering between tree levels, augment the regular
    + * lock acquire functions with smp_mb__after_unlock_lock().
    + *
    + * As ->lock of struct rcu_node is a __private field, therefore one should use
    + * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
    + */
    +#define raw_spin_lock_rcu_node(p) \
    +do { \
    + raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
    + smp_mb__after_unlock_lock(); \
    +} while (0)
    +
    +#define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
    +
    +#define raw_spin_lock_irq_rcu_node(p) \
    +do { \
    + raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
    + smp_mb__after_unlock_lock(); \
    +} while (0)
    +
    +#define raw_spin_unlock_irq_rcu_node(p) \
    + raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
    +
    +#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
    +do { \
    + typecheck(unsigned long, flags); \
    + raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \
    + smp_mb__after_unlock_lock(); \
    +} while (0)
    +
    +#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \
    +do { \
    + typecheck(unsigned long, flags); \
    + raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \
    +} while (0)
    +
    +#define raw_spin_trylock_rcu_node(p) \
    +({ \
    + bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
    + \
    + if (___locked) \
    + smp_mb__after_unlock_lock(); \
    + ___locked; \
    +})
    +
    #endif /* #if defined(SRCU) || !defined(TINY_RCU) */

    #ifdef CONFIG_TINY_RCU
    diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
    index a7f63f1074b4..baa0bac8da2a 100644
    --- a/kernel/rcu/tree.h
    +++ b/kernel/rcu/tree.h
    @@ -565,56 +565,3 @@ static inline void rcu_nocb_q_lengths(struct rcu_data *rdp, long *ql, long *qll)
    #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
    }
    #endif /* #ifdef CONFIG_RCU_TRACE */
    -
    -/*
    - * Wrappers for the rcu_node::lock acquire and release.
    - *
    - * Because the rcu_nodes form a tree, the tree traversal locking will observe
    - * different lock values, this in turn means that an UNLOCK of one level
    - * followed by a LOCK of another level does not imply a full memory barrier;
    - * and most importantly transitivity is lost.
    - *
    - * In order to restore full ordering between tree levels, augment the regular
    - * lock acquire functions with smp_mb__after_unlock_lock().
    - *
    - * As ->lock of struct rcu_node is a __private field, therefore one should use
    - * these wrappers rather than directly call raw_spin_{lock,unlock}* on ->lock.
    - */
    -#define raw_spin_lock_rcu_node(p) \
    -do { \
    - raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
    - smp_mb__after_unlock_lock(); \
    -} while (0)
    -
    -#define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
    -
    -#define raw_spin_lock_irq_rcu_node(p) \
    -do { \
    - raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
    - smp_mb__after_unlock_lock(); \
    -} while (0)
    -
    -#define raw_spin_unlock_irq_rcu_node(p) \
    - raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
    -
    -#define raw_spin_lock_irqsave_rcu_node(rnp, flags) \
    -do { \
    - typecheck(unsigned long, flags); \
    - raw_spin_lock_irqsave(&ACCESS_PRIVATE(rnp, lock), flags); \
    - smp_mb__after_unlock_lock(); \
    -} while (0)
    -
    -#define raw_spin_unlock_irqrestore_rcu_node(rnp, flags) \
    -do { \
    - typecheck(unsigned long, flags); \
    - raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(rnp, lock), flags); \
    -} while (0)
    -
    -#define raw_spin_trylock_rcu_node(p) \
    -({ \
    - bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
    - \
    - if (___locked) \
    - smp_mb__after_unlock_lock(); \
    - ___locked; \
    -})
    --
    2.5.2
    \
     
     \ /
      Last update: 2017-05-26 00:13    [W:3.325 / U:0.236 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site