lkml.org 
[lkml]   [2017]   [Apr]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v2 tip/core/rcu 09/39] rcu: Pull rcu_qs_ctr into rcu_dynticks structure
    Date
    The rcu_qs_ctr variable is yet another isolated per-CPU variable,
    so this commit pulls it into the pre-existing rcu_dynticks per-CPU
    structure.

    Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    ---
    .../RCU/Design/Data-Structures/Data-Structures.html | 12 ++++++++++--
    kernel/rcu/tree.c | 15 ++++++---------
    kernel/rcu/tree.h | 3 ++-
    kernel/rcu/tree_trace.c | 4 +---
    4 files changed, 19 insertions(+), 15 deletions(-)

    diff --git a/Documentation/RCU/Design/Data-Structures/Data-Structures.html b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
    index bf7f266e8888..3d0311657533 100644
    --- a/Documentation/RCU/Design/Data-Structures/Data-Structures.html
    +++ b/Documentation/RCU/Design/Data-Structures/Data-Structures.html
    @@ -1105,6 +1105,7 @@ Its fields are as follows:
    2 int dynticks_nmi_nesting;
    3 atomic_t dynticks;
    4 int rcu_sched_qs_mask;
    + 5 unsigned long rcu_qs_ctr;
    </pre>

    <p>The <tt>-&gt;dynticks_nesting</tt> field counts the
    @@ -1123,12 +1124,19 @@ CPU's transitions to and from dyntick-idle mode, so that this counter
    has an even value when the CPU is in dyntick-idle mode and an odd
    value otherwise.

    -</p><p>Finally, the <tt>-&gt;rcu_sched_qs_mask</tt> field is used
    +</p><p>The <tt>-&gt;rcu_sched_qs_mask</tt> field is used
    to record the fact that the RCU core code would really like to
    -see a quiescent state from the corresponding CPU.
    +see a quiescent state from the corresponding CPU, so much so that
    +it is willing to call for heavy-weight dyntick-counter operations.
    This flag is checked by RCU's context-switch and <tt>cond_resched()</tt>
    code, which provide a momentary idle sojourn in response.

    +</p><p>Finally the <tt>-&gt;rcu_qs_ctr</tt> field is used to record
    +quiescent states from <tt>cond_resched()</tt>.
    +Because <tt>cond_resched()</tt> can execute quite frequently, this
    +must be quite lightweight, as in a non-atomic increment of this
    +per-CPU field.
    +
    <table>
    <tr><th>&nbsp;</th></tr>
    <tr><th align="left">Quick Quiz:</th></tr>
    diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
    index 0e8d91f36bb6..fbee1d729c4b 100644
    --- a/kernel/rcu/tree.c
    +++ b/kernel/rcu/tree.c
    @@ -436,9 +436,6 @@ bool rcu_eqs_special_set(int cpu)
    return true;
    }

    -DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
    -EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
    -
    /*
    * Let the RCU core know that this CPU has gone through the scheduler,
    * which is a quiescent state. This is called when the need for a
    @@ -542,7 +539,7 @@ void rcu_all_qs(void)
    rcu_sched_qs();
    preempt_enable();
    }
    - this_cpu_inc(rcu_qs_ctr);
    + this_cpu_inc(rcu_dynticks.rcu_qs_ctr);
    barrier(); /* Avoid RCU read-side critical sections leaking up. */
    }
    EXPORT_SYMBOL_GPL(rcu_all_qs);
    @@ -1315,7 +1312,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
    */
    rnp = rdp->mynode;
    if (time_after(jiffies, rdp->rsp->gp_start + jtsq) &&
    - READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_qs_ctr, rdp->cpu) &&
    + READ_ONCE(rdp->rcu_qs_ctr_snap) != per_cpu(rcu_dynticks.rcu_qs_ctr, rdp->cpu) &&
    READ_ONCE(rdp->gpnum) == rnp->gpnum && !rdp->gpwrap) {
    trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("rqc"));
    return 1;
    @@ -2024,7 +2021,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
    trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpustart"));
    need_gp = !!(rnp->qsmask & rdp->grpmask);
    rdp->cpu_no_qs.b.norm = need_gp;
    - rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
    + rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
    rdp->core_needs_qs = need_gp;
    zero_cpu_stall_ticks(rdp);
    WRITE_ONCE(rdp->gpwrap, false);
    @@ -2622,7 +2619,7 @@ rcu_report_qs_rdp(int cpu, struct rcu_state *rsp, struct rcu_data *rdp)
    * within the current grace period.
    */
    rdp->cpu_no_qs.b.norm = true; /* need qs for new gp. */
    - rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
    + rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_dynticks.rcu_qs_ctr);
    raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    return;
    }
    @@ -3620,7 +3617,7 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
    /* Is the RCU core waiting for a quiescent state from this CPU? */
    if (rcu_scheduler_fully_active &&
    rdp->core_needs_qs && rdp->cpu_no_qs.b.norm &&
    - rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_qs_ctr)) {
    + rdp->rcu_qs_ctr_snap == __this_cpu_read(rcu_dynticks.rcu_qs_ctr)) {
    rdp->n_rp_core_needs_qs++;
    } else if (rdp->core_needs_qs && !rdp->cpu_no_qs.b.norm) {
    rdp->n_rp_report_qs++;
    @@ -3933,7 +3930,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
    rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
    rdp->completed = rnp->completed;
    rdp->cpu_no_qs.b.norm = true;
    - rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
    + rdp->rcu_qs_ctr_snap = per_cpu(rcu_dynticks.rcu_qs_ctr, cpu);
    rdp->core_needs_qs = false;
    trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
    raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
    index e298281984dc..76e4467bc765 100644
    --- a/kernel/rcu/tree.h
    +++ b/kernel/rcu/tree.h
    @@ -113,7 +113,8 @@ struct rcu_dynticks {
    /* Process level is worth LLONG_MAX/2. */
    int dynticks_nmi_nesting; /* Track NMI nesting level. */
    atomic_t dynticks; /* Even value for idle, else odd. */
    - int rcu_sched_qs_mask; /* GP old, need quiescent state. */
    + int rcu_sched_qs_mask; /* GP old, need heavy quiescent state. */
    + unsigned long rcu_qs_ctr; /* Light universal quiescent state ctr. */
    #ifdef CONFIG_NO_HZ_FULL_SYSIDLE
    long long dynticks_idle_nesting;
    /* irq/process nesting level from idle. */
    diff --git a/kernel/rcu/tree_trace.c b/kernel/rcu/tree_trace.c
    index 8751a748499a..65b43be38e68 100644
    --- a/kernel/rcu/tree_trace.c
    +++ b/kernel/rcu/tree_trace.c
    @@ -45,8 +45,6 @@
    #define RCU_TREE_NONCORE
    #include "tree.h"

    -DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, rcu_qs_ctr);
    -
    static int r_open(struct inode *inode, struct file *file,
    const struct seq_operations *op)
    {
    @@ -121,7 +119,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
    cpu_is_offline(rdp->cpu) ? '!' : ' ',
    ulong2long(rdp->completed), ulong2long(rdp->gpnum),
    rdp->cpu_no_qs.b.norm,
    - rdp->rcu_qs_ctr_snap == per_cpu(rcu_qs_ctr, rdp->cpu),
    + rdp->rcu_qs_ctr_snap == per_cpu(rdp->dynticks->rcu_qs_ctr, rdp->cpu),
    rdp->core_needs_qs);
    seq_printf(m, " dt=%d/%llx/%d df=%lu",
    rcu_dynticks_snap(rdp->dynticks),
    --
    2.5.2
    \
     
     \ /
      Last update: 2017-04-18 01:55    [W:2.864 / U:0.240 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site