lkml.org 
[lkml]   [2017]   [Jan]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v3 tip/core/rcu 4/6] rcu: Abstract extended quiescent state determination
    Date
    This commit is the fourth step towards full abstraction of all accesses
    to the ->dynticks counter, implementing previously open-coded checks and
    comparisons in new rcu_dynticks_in_eqs() and rcu_dynticks_in_eqs_since()
    functions. This abstraction will ease changes to the ->dynticks counter
    operation.

    Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    Reviewed-by: Josh Triplett <josh@joshtriplett.org>
    ---
    include/linux/rcutiny.h | 6 ++++++
    kernel/rcu/tree.c | 52 +++++++++++++++++++++++++++++++++++-------------
    kernel/rcu/tree.h | 2 ++
    kernel/rcu/tree_exp.h | 6 +++---
    kernel/rcu/tree_plugin.h | 2 +-
    kernel/rcu/tree_trace.c | 2 +-
    6 files changed, 51 insertions(+), 19 deletions(-)

    diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
    index ac81e4063b40..4f9b2fa2173d 100644
    --- a/include/linux/rcutiny.h
    +++ b/include/linux/rcutiny.h
    @@ -27,6 +27,12 @@

    #include <linux/cache.h>

    +struct rcu_dynticks;
    +static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
    +{
    + return 0;
    +}
    +
    static inline unsigned long get_state_synchronize_rcu(void)
    {
    return 0;
    diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
    index 3169d5a21b55..8b970319c75b 100644
    --- a/kernel/rcu/tree.c
    +++ b/kernel/rcu/tree.c
    @@ -337,10 +337,22 @@ static void rcu_dynticks_eqs_online(void)
    }

    /*
    + * Is the current CPU in an extended quiescent state?
    + *
    + * No ordering, as we are sampling CPU-local information.
    + */
    +bool rcu_dynticks_curr_cpu_in_eqs(void)
    +{
    + struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
    +
    + return !(atomic_read(&rdtp->dynticks) & 0x1);
    +}
    +
    +/*
    * Snapshot the ->dynticks counter with full ordering so as to allow
    * stable comparison of this counter with past and future snapshots.
    */
    -static int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
    +int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
    {
    int snap = atomic_add_return(0, &rdtp->dynticks);

    @@ -348,6 +360,25 @@ static int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
    }

    /*
    + * Return true if the snapshot returned from rcu_dynticks_snap()
    + * indicates that RCU is in an extended quiescent state.
    + */
    +static bool rcu_dynticks_in_eqs(int snap)
    +{
    + return !(snap & 0x1);
    +}
    +
    +/*
    + * Return true if the CPU corresponding to the specified rcu_dynticks
    + * structure has spent some time in an extended quiescent state since
    + * rcu_dynticks_snap() returned the specified snapshot.
    + */
    +static bool rcu_dynticks_in_eqs_since(struct rcu_dynticks *rdtp, int snap)
    +{
    + return snap != rcu_dynticks_snap(rdtp);
    +}
    +
    +/*
    * Do a double-increment of the ->dynticks counter to emulate a
    * momentary idle-CPU quiescent state.
    */
    @@ -1045,7 +1076,7 @@ void rcu_nmi_enter(void)
    * to be in the outermost NMI handler that interrupted an RCU-idle
    * period (observation due to Andy Lutomirski).
    */
    - if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
    + if (rcu_dynticks_curr_cpu_in_eqs()) {
    rcu_dynticks_eqs_exit();
    incby = 1;
    }
    @@ -1071,7 +1102,7 @@ void rcu_nmi_exit(void)
    * to us!)
    */
    WARN_ON_ONCE(rdtp->dynticks_nmi_nesting <= 0);
    - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
    + WARN_ON_ONCE(rcu_dynticks_curr_cpu_in_eqs());

    /*
    * If the nesting level is not 1, the CPU wasn't RCU-idle, so
    @@ -1097,9 +1128,7 @@ void rcu_nmi_exit(void)
    */
    bool notrace __rcu_is_watching(void)
    {
    - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
    -
    - return atomic_read(&rdtp->dynticks) & 0x1;
    + return !rcu_dynticks_curr_cpu_in_eqs();
    }

    /**
    @@ -1184,7 +1213,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
    {
    rdp->dynticks_snap = rcu_dynticks_snap(rdp->dynticks);
    rcu_sysidle_check_cpu(rdp, isidle, maxj);
    - if ((rdp->dynticks_snap & 0x1) == 0) {
    + if (rcu_dynticks_in_eqs(rdp->dynticks_snap)) {
    trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
    if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
    rdp->mynode->gpnum))
    @@ -1203,12 +1232,7 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
    static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
    bool *isidle, unsigned long *maxj)
    {
    - unsigned int curr;
    int *rcrmp;
    - unsigned int snap;
    -
    - curr = (unsigned int)rcu_dynticks_snap(rdp->dynticks);
    - snap = (unsigned int)rdp->dynticks_snap;

    /*
    * If the CPU passed through or entered a dynticks idle phase with
    @@ -1218,7 +1242,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
    * read-side critical section that started before the beginning
    * of the current RCU grace period.
    */
    - if ((curr & 0x1) == 0 || UINT_CMP_GE(curr, snap + 2)) {
    + if (rcu_dynticks_in_eqs_since(rdp->dynticks, rdp->dynticks_snap)) {
    trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
    rdp->dynticks_fqs++;
    return 1;
    @@ -3807,7 +3831,7 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
    rdp->grpmask = leaf_node_cpu_bit(rdp->mynode, cpu);
    rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
    WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
    - WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
    + WARN_ON_ONCE(rcu_dynticks_in_eqs(rcu_dynticks_snap(rdp->dynticks)));
    rdp->cpu = cpu;
    rdp->rsp = rsp;
    rcu_boot_init_nocb_percpu_data(rdp);
    diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
    index fe98dd24adf8..3b953dcf6afc 100644
    --- a/kernel/rcu/tree.h
    +++ b/kernel/rcu/tree.h
    @@ -595,6 +595,8 @@ extern struct rcu_state rcu_bh_state;
    extern struct rcu_state rcu_preempt_state;
    #endif /* #ifdef CONFIG_PREEMPT_RCU */

    +int rcu_dynticks_snap(struct rcu_dynticks *rdtp);
    +
    #ifdef CONFIG_RCU_BOOST
    DECLARE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
    DECLARE_PER_CPU(int, rcu_cpu_kthread_cpu);
    diff --git a/kernel/rcu/tree_exp.h b/kernel/rcu/tree_exp.h
    index 011f626b2fd8..e155a465cf84 100644
    --- a/kernel/rcu/tree_exp.h
    +++ b/kernel/rcu/tree_exp.h
    @@ -360,7 +360,7 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
    rdp->exp_dynticks_snap =
    rcu_dynticks_snap(rdp->dynticks);
    if (raw_smp_processor_id() == cpu ||
    - !(rdp->exp_dynticks_snap & 0x1) ||
    + rcu_dynticks_in_eqs(rdp->exp_dynticks_snap) ||
    !(rnp->qsmaskinitnext & rdp->grpmask))
    mask_ofl_test |= rdp->grpmask;
    }
    @@ -383,8 +383,8 @@ static void sync_rcu_exp_select_cpus(struct rcu_state *rsp,
    if (!(mask_ofl_ipi & mask))
    continue;
    retry_ipi:
    - if (rcu_dynticks_snap(rdp->dynticks) !=
    - rdp->exp_dynticks_snap) {
    + if (rcu_dynticks_in_eqs_since(rdp->dynticks,
    + rdp->exp_dynticks_snap)) {
    mask_ofl_test |= mask;
    continue;
    }
    diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
    index 56583e764ebf..652209589adf 100644
    --- a/kernel/rcu/tree_plugin.h
    +++ b/kernel/rcu/tree_plugin.h
    @@ -1643,7 +1643,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
    "o."[!!(rdp->grpmask & rdp->mynode->qsmaskinit)],
    "N."[!!(rdp->grpmask & rdp->mynode->qsmaskinitnext)],
    ticks_value, ticks_title,
    - atomic_read(&rdtp->dynticks) & 0xfff,
    + rcu_dynticks_snap(rdtp) & 0xfff,
    rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
    rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
    READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
    diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
    index b1f28972872c..b833cd0a29e8 100644
    --- a/kernel/rcu/tree_trace.c
    +++ b/kernel/rcu/tree_trace.c
    @@ -124,7 +124,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
    rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
    rdp->core_needs_qs);
    seq_printf(m, " dt=%d/%llx/%d df=%lu",
    - atomic_read(&rdp->dynticks->dynticks),
    + rcu_dynticks_snap(rdp->dynticks),
    rdp->dynticks->dynticks_nesting,
    rdp->dynticks->dynticks_nmi_nesting,
    rdp->dynticks_fqs);
    --
    2.5.2
    \
     
     \ /
      Last update: 2017-01-24 22:48    [W:4.631 / U:0.156 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site