lkml.org 
[lkml]   [2017]   [Jan]   [21]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    SubjectRe: [PATCH v2 tip/core/rcu 3/6] rcu: Abstract dynticks extended quiescent state enter/exit operations
    On Tue, Jan 17, 2017 at 06:45:25PM -0800, Paul E. McKenney wrote:
    > This commit is the third step towards full abstraction of all accesses
    > to the ->dynticks counter, implementing the previously open-coded atomic
    > add of 1 and entry checks in a new rcu_dynticks_eqs_enter() function, and
    > the same but with exit checks in a new rcu_dynticks_eqs_exit() function.
    > This abstraction will ease changes to the ->dynticks counter operation.
    >
    > Note that this commit gets rid of the smp_mb__before_atomic() and the
    > smp_mb__after_atomic() calls that were previously present. The reason
    > that this is OK from a memory-ordering perspective is that the atomic
    > operation is now atomic_add_return(), which, as a value-returning atomic,
    > guarantees full ordering.
    >
    > Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>

    A couple of issues with RCU_TRACE usage noted below. With those fixed:
    Reviewed-by: Josh Triplett <josh@joshtriplett.org>.

    > kernel/rcu/tree.c | 88 +++++++++++++++++++++++++++++++++++++++----------------
    > 1 file changed, 62 insertions(+), 26 deletions(-)
    >
    > diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
    > index 805d55ee0b2a..70b01e1983e6 100644
    > --- a/kernel/rcu/tree.c
    > +++ b/kernel/rcu/tree.c
    > @@ -282,6 +282,61 @@ static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
    > };
    >
    > /*
    > + * Record entry into an extended quiescent state. This is only to be
    > + * called when not already in an extended quiescent state.
    > + */
    > +static void rcu_dynticks_eqs_enter(void)
    > +{
    > + struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
    > + int special;
    > +
    > + /*
    > + * CPUs seeing atomic_inc_return() must see prior RCU read-side
    > + * critical sections, and we also must force ordering with the
    > + * next idle sojourn.
    > + */
    > + special = atomic_inc_return(&rdtp->dynticks);
    > + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && special & 0x1);
    > +}
    > +
    > +/*
    > + * Record exit from an extended quiescent state. This is only to be
    > + * called from an extended quiescent state.
    > + */
    > +static void rcu_dynticks_eqs_exit(void)
    > +{
    > + struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
    > + int special;
    > +
    > + /*
    > + * CPUs seeing atomic_inc_return() must see prior idle sojourns,
    > + * and we also must force ordering with the next RCU read-side
    > + * critical section.
    > + */
    > + special = atomic_inc_return(&rdtp->dynticks);
    > + WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && !(special & 0x1));
    > +}
    > +
    > +/*
    > + * Reset the current CPU's ->dynticks counter to indicate that the
    > + * newly onlined CPU is no longer in an extended quiescent state.
    > + * This will either leave the counter unchanged, or increment it
    > + * to the next non-quiescent value.
    > + *
    > + * The non-atomic test/increment sequence works because the upper bits
    > + * of the ->dynticks counter are manipulated only by the corresponding CPU,
    > + * or when the corresponding CPU is offline.
    > + */
    > +static void rcu_dynticks_eqs_online(void)
    > +{
    > + struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
    > +
    > + if (atomic_read(&rdtp->dynticks) & 0x1)
    > + return;
    > + atomic_add(0x1, &rdtp->dynticks);
    > +}
    > +
    > +/*
    > * Snapshot the ->dynticks counter with full ordering so as to allow
    > * stable comparison of this counter with past and future snapshots.
    > */
    > @@ -693,7 +748,7 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
    > {
    > struct rcu_state *rsp;
    > struct rcu_data *rdp;
    > - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
    > + RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks));

    RCU_TRACE doesn't act like a statement, and doesn't take a semicolon; it
    just acts like a shorthand for an #ifdef. The semicolon belongs inside
    the macro parentheses, since it goes with the declaration. Otherwise,
    this compiles to the empty statement ';' without tracing. Among other
    things, that would lead to surprises for anyone who added a subsequent
    declaration, because ';' followed by a declaration will produce a
    -Wdeclaration-after-statement warning.

    > trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
    > if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
    > @@ -712,12 +767,7 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
    > do_nocb_deferred_wakeup(rdp);
    > }
    > rcu_prepare_for_idle();
    > - /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
    > - smp_mb__before_atomic(); /* See above. */
    > - atomic_inc(&rdtp->dynticks);
    > - smp_mb__after_atomic(); /* Force ordering with next sojourn. */
    > - WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
    > - atomic_read(&rdtp->dynticks) & 0x1);
    > + rcu_dynticks_eqs_enter();
    > rcu_dynticks_task_enter();
    >
    > /*
    > @@ -846,15 +896,10 @@ void rcu_irq_exit_irqson(void)
    > */
    > static void rcu_eqs_exit_common(long long oldval, int user)
    > {
    > - struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
    > + RCU_TRACE(struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks));

    As above.

    > rcu_dynticks_task_exit();
    > - smp_mb__before_atomic(); /* Force ordering w/previous sojourn. */
    > - atomic_inc(&rdtp->dynticks);
    > - /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
    > - smp_mb__after_atomic(); /* See above. */
    > - WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
    > - !(atomic_read(&rdtp->dynticks) & 0x1));
    > + rcu_dynticks_eqs_exit();
    > rcu_cleanup_after_idle();
    > trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
    > if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
    > @@ -1001,11 +1046,7 @@ void rcu_nmi_enter(void)
    > * period (observation due to Andy Lutomirski).
    > */
    > if (!(atomic_read(&rdtp->dynticks) & 0x1)) {
    > - smp_mb__before_atomic(); /* Force delay from prior write. */
    > - atomic_inc(&rdtp->dynticks);
    > - /* atomic_inc() before later RCU read-side crit sects */
    > - smp_mb__after_atomic(); /* See above. */
    > - WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
    > + rcu_dynticks_eqs_exit();
    > incby = 1;
    > }
    > rdtp->dynticks_nmi_nesting += incby;
    > @@ -1043,11 +1084,7 @@ void rcu_nmi_exit(void)
    >
    > /* This NMI interrupted an RCU-idle CPU, restore RCU-idleness. */
    > rdtp->dynticks_nmi_nesting = 0;
    > - /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
    > - smp_mb__before_atomic(); /* See above. */
    > - atomic_inc(&rdtp->dynticks);
    > - smp_mb__after_atomic(); /* Force delay to next write. */
    > - WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
    > + rcu_dynticks_eqs_enter();
    > }
    >
    > /**
    > @@ -3800,8 +3837,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
    > init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
    > rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
    > rcu_sysidle_init_percpu_data(rdp->dynticks);
    > - atomic_set(&rdp->dynticks->dynticks,
    > - (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
    > + rcu_dynticks_eqs_online();
    > raw_spin_unlock_rcu_node(rnp); /* irqs remain disabled. */
    >
    > /*
    > --
    > 2.5.2
    >

    \
     
     \ /
      Last update: 2017-01-21 21:50    [W:6.126 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site