lkml.org 
[lkml]   [2018]   [Aug]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH tip/core/rcu 18/19] rcu: Remove rcu_state_p pointer to default rcu_state structure
    Date
    The rcu_state_p pointer references the default rcu_state structure,
    that is, the one that call_rcu() uses, as opposed to call_rcu_bh()
    and sometimes call_rcu_sched(). But there is now only one rcu_state
    structure, so that one structure is by definition the default, which
    means that the rcu_state_p pointer no longer serves any useful purpose.
    This commit therefore removes it.

    Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    ---
    kernel/rcu/tree.c | 27 ++++++++++++---------------
    kernel/rcu/tree_exp.h | 2 +-
    kernel/rcu/tree_plugin.h | 16 ++++++++--------
    3 files changed, 21 insertions(+), 24 deletions(-)

    diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
    index 1dd8086ee90d..a3bcf08ad596 100644
    --- a/kernel/rcu/tree.c
    +++ b/kernel/rcu/tree.c
    @@ -85,7 +85,6 @@ struct rcu_state rcu_state = {
    .ofl_lock = __SPIN_LOCK_UNLOCKED(rcu_state.ofl_lock),
    };

    -static struct rcu_state *const rcu_state_p = &rcu_state;
    static struct rcu_data __percpu *const rcu_data_p = &rcu_data;
    LIST_HEAD(rcu_struct_flavors);

    @@ -491,7 +490,7 @@ static int rcu_pending(void);
    */
    unsigned long rcu_get_gp_seq(void)
    {
    - return READ_ONCE(rcu_state_p->gp_seq);
    + return READ_ONCE(rcu_state.gp_seq);
    }
    EXPORT_SYMBOL_GPL(rcu_get_gp_seq);

    @@ -510,7 +509,7 @@ EXPORT_SYMBOL_GPL(rcu_sched_get_gp_seq);
    */
    unsigned long rcu_bh_get_gp_seq(void)
    {
    - return READ_ONCE(rcu_state_p->gp_seq);
    + return READ_ONCE(rcu_state.gp_seq);
    }
    EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);

    @@ -522,7 +521,7 @@ EXPORT_SYMBOL_GPL(rcu_bh_get_gp_seq);
    */
    unsigned long rcu_exp_batches_completed(void)
    {
    - return rcu_state_p->expedited_sequence;
    + return rcu_state.expedited_sequence;
    }
    EXPORT_SYMBOL_GPL(rcu_exp_batches_completed);

    @@ -541,7 +540,7 @@ EXPORT_SYMBOL_GPL(rcu_exp_batches_completed_sched);
    */
    void rcu_force_quiescent_state(void)
    {
    - force_quiescent_state(rcu_state_p);
    + force_quiescent_state(&rcu_state);
    }
    EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);

    @@ -550,7 +549,7 @@ EXPORT_SYMBOL_GPL(rcu_force_quiescent_state);
    */
    void rcu_bh_force_quiescent_state(void)
    {
    - force_quiescent_state(rcu_state_p);
    + force_quiescent_state(&rcu_state);
    }
    EXPORT_SYMBOL_GPL(rcu_bh_force_quiescent_state);

    @@ -611,7 +610,7 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
    case RCU_FLAVOR:
    case RCU_BH_FLAVOR:
    case RCU_SCHED_FLAVOR:
    - rsp = rcu_state_p;
    + rsp = &rcu_state;
    break;
    default:
    break;
    @@ -2291,7 +2290,6 @@ rcu_report_unblock_qs_rnp(struct rcu_state *rsp,

    raw_lockdep_assert_held_rcu_node(rnp);
    if (WARN_ON_ONCE(!IS_ENABLED(CONFIG_PREEMPT)) ||
    - WARN_ON_ONCE(rsp != rcu_state_p) ||
    WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp)) ||
    rnp->qsmask != 0) {
    raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    @@ -2603,7 +2601,6 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *rsp))
    raw_spin_lock_irqsave_rcu_node(rnp, flags);
    if (rnp->qsmask == 0) {
    if (!IS_ENABLED(CONFIG_PREEMPT) ||
    - rsp != rcu_state_p ||
    rcu_preempt_blocked_readers_cgp(rnp)) {
    /*
    * No point in scanning bits because they
    @@ -2972,7 +2969,7 @@ __call_rcu(struct rcu_head *head, rcu_callback_t func,
    */
    void call_rcu(struct rcu_head *head, rcu_callback_t func)
    {
    - __call_rcu(head, func, rcu_state_p, -1, 0);
    + __call_rcu(head, func, &rcu_state, -1, 0);
    }
    EXPORT_SYMBOL_GPL(call_rcu);

    @@ -2999,7 +2996,7 @@ EXPORT_SYMBOL_GPL(call_rcu_sched);
    void kfree_call_rcu(struct rcu_head *head,
    rcu_callback_t func)
    {
    - __call_rcu(head, func, rcu_state_p, -1, 1);
    + __call_rcu(head, func, &rcu_state, -1, 1);
    }
    EXPORT_SYMBOL_GPL(kfree_call_rcu);

    @@ -3028,7 +3025,7 @@ unsigned long get_state_synchronize_rcu(void)
    * before the load from ->gp_seq.
    */
    smp_mb(); /* ^^^ */
    - return rcu_seq_snap(&rcu_state_p->gp_seq);
    + return rcu_seq_snap(&rcu_state.gp_seq);
    }
    EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);

    @@ -3048,7 +3045,7 @@ EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
    */
    void cond_synchronize_rcu(unsigned long oldstate)
    {
    - if (!rcu_seq_done(&rcu_state_p->gp_seq, oldstate))
    + if (!rcu_seq_done(&rcu_state.gp_seq, oldstate))
    synchronize_rcu();
    else
    smp_mb(); /* Ensure GP ends before subsequent accesses. */
    @@ -3307,7 +3304,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
    */
    void rcu_barrier_bh(void)
    {
    - _rcu_barrier(rcu_state_p);
    + _rcu_barrier(&rcu_state);
    }
    EXPORT_SYMBOL_GPL(rcu_barrier_bh);

    @@ -3321,7 +3318,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier_bh);
    */
    void rcu_barrier(void)
    {
    - _rcu_barrier(rcu_state_p);
    + _rcu_barrier(&rcu_state);
    }
    EXPORT_SYMBOL_GPL(rcu_barrier);

    diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
    index 3a8a582d9958..298a6904bbcd 100644
    --- a/kernel/rcu/tree_exp.h
    +++ b/kernel/rcu/tree_exp.h
    @@ -756,7 +756,7 @@ static void sync_sched_exp_online_cleanup(int cpu)
    */
    void synchronize_rcu_expedited(void)
    {
    - struct rcu_state *rsp = rcu_state_p;
    + struct rcu_state *rsp = &rcu_state;

    RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
    lock_is_held(&rcu_lock_map) ||
    diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
    index b7a99a6e64b6..329d5802d899 100644
    --- a/kernel/rcu/tree_plugin.h
    +++ b/kernel/rcu/tree_plugin.h
    @@ -381,7 +381,7 @@ void rcu_note_context_switch(bool preempt)
    */
    rcu_qs();
    if (rdp->deferred_qs)
    - rcu_report_exp_rdp(rcu_state_p, rdp);
    + rcu_report_exp_rdp(&rcu_state, rdp);
    trace_rcu_utilization(TPS("End context switch"));
    barrier(); /* Avoid RCU read-side critical sections leaking up. */
    }
    @@ -509,7 +509,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
    * blocked-tasks list below.
    */
    if (rdp->deferred_qs) {
    - rcu_report_exp_rdp(rcu_state_p, rdp);
    + rcu_report_exp_rdp(&rcu_state, rdp);
    if (!t->rcu_read_unlock_special.s) {
    local_irq_restore(flags);
    return;
    @@ -566,7 +566,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
    rnp->grplo,
    rnp->grphi,
    !!rnp->gp_tasks);
    - rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
    + rcu_report_unblock_qs_rnp(&rcu_state, rnp, flags);
    } else {
    raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    }
    @@ -580,7 +580,7 @@ rcu_preempt_deferred_qs_irqrestore(struct task_struct *t, unsigned long flags)
    * then we need to report up the rcu_node hierarchy.
    */
    if (!empty_exp && empty_exp_now)
    - rcu_report_exp_rnp(rcu_state_p, rnp, true);
    + rcu_report_exp_rnp(&rcu_state, rnp, true);
    } else {
    local_irq_restore(flags);
    }
    @@ -1300,7 +1300,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
    struct sched_param sp;
    struct task_struct *t;

    - if (rcu_state_p != rsp)
    + if (&rcu_state != rsp)
    return 0;

    if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
    @@ -1431,8 +1431,8 @@ static void __init rcu_spawn_boost_kthreads(void)
    for_each_possible_cpu(cpu)
    per_cpu(rcu_cpu_has_work, cpu) = 0;
    BUG_ON(smpboot_register_percpu_thread(&rcu_cpu_thread_spec));
    - rcu_for_each_leaf_node(rcu_state_p, rnp)
    - (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
    + rcu_for_each_leaf_node(&rcu_state, rnp)
    + (void)rcu_spawn_one_boost_kthread(&rcu_state, rnp);
    }

    static void rcu_prepare_kthreads(int cpu)
    @@ -1442,7 +1442,7 @@ static void rcu_prepare_kthreads(int cpu)

    /* Fire up the incoming CPU's kthread and leaf rcu_node kthread. */
    if (rcu_scheduler_fully_active)
    - (void)rcu_spawn_one_boost_kthread(rcu_state_p, rnp);
    + (void)rcu_spawn_one_boost_kthread(&rcu_state, rnp);
    }

    #else /* #ifdef CONFIG_RCU_BOOST */
    --
    2.17.1
    \
     
     \ /
      Last update: 2018-08-30 00:22    [W:4.453 / U:0.028 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site