lkml.org 
[lkml]   [2016]   [Jul]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:smp/hotplug] rcu: Convert rcutree to hotplug state machine
    Commit-ID:  4df8374254ea9294dfe4b8c447a1b7eddc543dbf
    Gitweb: http://git.kernel.org/tip/4df8374254ea9294dfe4b8c447a1b7eddc543dbf
    Author: Thomas Gleixner <tglx@linutronix.de>
    AuthorDate: Wed, 13 Jul 2016 17:17:03 +0000
    Committer: Ingo Molnar <mingo@kernel.org>
    CommitDate: Fri, 15 Jul 2016 10:41:44 +0200

    rcu: Convert rcutree to hotplug state machine

    Straight forward conversion to the state machine. Though the question arises
    whether this needs really all these state transitions to work.

    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
    Reviewed-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: rt@linutronix.de
    Link: http://lkml.kernel.org/r/20160713153337.982013161@linutronix.de
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    ---
    include/linux/cpuhotplug.h | 3 ++
    include/linux/rcutiny.h | 7 +++
    include/linux/rcutree.h | 7 +++
    kernel/cpu.c | 14 ++++++
    kernel/rcu/tree.c | 105 ++++++++++++++++++++++-----------------------
    5 files changed, 83 insertions(+), 53 deletions(-)

    diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
    index 544b556..201a2e2 100644
    --- a/include/linux/cpuhotplug.h
    +++ b/include/linux/cpuhotplug.h
    @@ -20,11 +20,13 @@ enum cpuhp_state {
    CPUHP_X2APIC_PREPARE,
    CPUHP_SMPCFD_PREPARE,
    CPUHP_TIMERS_DEAD,
    + CPUHP_RCUTREE_PREP,
    CPUHP_NOTIFY_PREPARE,
    CPUHP_BRINGUP_CPU,
    CPUHP_AP_IDLE_DEAD,
    CPUHP_AP_OFFLINE,
    CPUHP_AP_SCHED_STARTING,
    + CPUHP_AP_RCUTREE_DYING,
    CPUHP_AP_IRQ_GIC_STARTING,
    CPUHP_AP_IRQ_GICV3_STARTING,
    CPUHP_AP_IRQ_HIP04_STARTING,
    @@ -80,6 +82,7 @@ enum cpuhp_state {
    CPUHP_AP_PERF_ARM_CCI_ONLINE,
    CPUHP_AP_PERF_ARM_CCN_ONLINE,
    CPUHP_AP_WORKQUEUE_ONLINE,
    + CPUHP_AP_RCUTREE_ONLINE,
    CPUHP_AP_NOTIFY_ONLINE,
    CPUHP_AP_ONLINE_DYN,
    CPUHP_AP_ONLINE_DYN_END = CPUHP_AP_ONLINE_DYN + 30,
    diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
    index 93aea75..ac81e40 100644
    --- a/include/linux/rcutiny.h
    +++ b/include/linux/rcutiny.h
    @@ -243,4 +243,11 @@ static inline void rcu_all_qs(void)
    barrier(); /* Avoid RCU read-side critical sections leaking across. */
    }

    +/* RCUtree hotplug events */
    +#define rcutree_prepare_cpu NULL
    +#define rcutree_online_cpu NULL
    +#define rcutree_offline_cpu NULL
    +#define rcutree_dead_cpu NULL
    +#define rcutree_dying_cpu NULL
    +
    #endif /* __LINUX_RCUTINY_H */
    diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
    index 5043cb8..63a4e4c 100644
    --- a/include/linux/rcutree.h
    +++ b/include/linux/rcutree.h
    @@ -111,4 +111,11 @@ bool rcu_is_watching(void);

    void rcu_all_qs(void);

    +/* RCUtree hotplug events */
    +int rcutree_prepare_cpu(unsigned int cpu);
    +int rcutree_online_cpu(unsigned int cpu);
    +int rcutree_offline_cpu(unsigned int cpu);
    +int rcutree_dead_cpu(unsigned int cpu);
    +int rcutree_dying_cpu(unsigned int cpu);
    +
    #endif /* __LINUX_RCUTREE_H */
    diff --git a/kernel/cpu.c b/kernel/cpu.c
    index 008e2fd..f24f459 100644
    --- a/kernel/cpu.c
    +++ b/kernel/cpu.c
    @@ -1205,6 +1205,11 @@ static struct cpuhp_step cpuhp_bp_states[] = {
    .startup = NULL,
    .teardown = timers_dead_cpu,
    },
    + [CPUHP_RCUTREE_PREP] = {
    + .name = "RCU-tree prepare",
    + .startup = rcutree_prepare_cpu,
    + .teardown = rcutree_dead_cpu,
    + },
    /*
    * Preparatory and dead notifiers. Will be replaced once the notifiers
    * are converted to states.
    @@ -1263,6 +1268,10 @@ static struct cpuhp_step cpuhp_ap_states[] = {
    .startup = sched_cpu_starting,
    .teardown = sched_cpu_dying,
    },
    + [CPUHP_AP_RCUTREE_DYING] = {
    + .startup = NULL,
    + .teardown = rcutree_dying_cpu,
    + },
    /*
    * Low level startup/teardown notifiers. Run with interrupts
    * disabled. Will be removed once the notifiers are converted to
    @@ -1296,6 +1305,11 @@ static struct cpuhp_step cpuhp_ap_states[] = {
    .startup = workqueue_online_cpu,
    .teardown = workqueue_offline_cpu,
    },
    + [CPUHP_AP_RCUTREE_ONLINE] = {
    + .name = "RCU-tree online",
    + .startup = rcutree_online_cpu,
    + .teardown = rcutree_offline_cpu,
    + },

    /*
    * Online/down_prepare notifiers. Will be removed once the notifiers
    diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
    index f433959..5d80925 100644
    --- a/kernel/rcu/tree.c
    +++ b/kernel/rcu/tree.c
    @@ -1073,11 +1073,11 @@ EXPORT_SYMBOL_GPL(rcu_is_watching);
    * offline to continue to use RCU for one jiffy after marking itself
    * offline in the cpu_online_mask. This leniency is necessary given the
    * non-atomic nature of the online and offline processing, for example,
    - * the fact that a CPU enters the scheduler after completing the CPU_DYING
    - * notifiers.
    + * the fact that a CPU enters the scheduler after completing the teardown
    + * of the CPU.
    *
    - * This is also why RCU internally marks CPUs online during the
    - * CPU_UP_PREPARE phase and offline during the CPU_DEAD phase.
    + * This is also why RCU internally marks CPUs online during in the
    + * preparation phase and offline after the CPU has been taken down.
    *
    * Disable checking if in an NMI handler because we cannot safely report
    * errors from NMI handlers anyway.
    @@ -3806,12 +3806,58 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
    raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    }

    -static void rcu_prepare_cpu(int cpu)
    +int rcutree_prepare_cpu(unsigned int cpu)
    {
    struct rcu_state *rsp;

    for_each_rcu_flavor(rsp)
    rcu_init_percpu_data(cpu, rsp);
    +
    + rcu_prepare_kthreads(cpu);
    + rcu_spawn_all_nocb_kthreads(cpu);
    +
    + return 0;
    +}
    +
    +static void rcutree_affinity_setting(unsigned int cpu, int outgoing)
    +{
    + struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
    +
    + rcu_boost_kthread_setaffinity(rdp->mynode, outgoing);
    +}
    +
    +int rcutree_online_cpu(unsigned int cpu)
    +{
    + sync_sched_exp_online_cleanup(cpu);
    + rcutree_affinity_setting(cpu, -1);
    + return 0;
    +}
    +
    +int rcutree_offline_cpu(unsigned int cpu)
    +{
    + rcutree_affinity_setting(cpu, cpu);
    + return 0;
    +}
    +
    +
    +int rcutree_dying_cpu(unsigned int cpu)
    +{
    + struct rcu_state *rsp;
    +
    + for_each_rcu_flavor(rsp)
    + rcu_cleanup_dying_cpu(rsp);
    + return 0;
    +}
    +
    +int rcutree_dead_cpu(unsigned int cpu)
    +{
    + struct rcu_state *rsp;
    +
    + for_each_rcu_flavor(rsp) {
    + rcu_cleanup_dead_cpu(cpu, rsp);
    + do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
    + }
    + return 0;
    }

    #ifdef CONFIG_HOTPLUG_CPU
    @@ -3851,52 +3897,6 @@ void rcu_report_dead(unsigned int cpu)
    }
    #endif

    -/*
    - * Handle CPU online/offline notification events.
    - */
    -int rcu_cpu_notify(struct notifier_block *self,
    - unsigned long action, void *hcpu)
    -{
    - long cpu = (long)hcpu;
    - struct rcu_data *rdp = per_cpu_ptr(rcu_state_p->rda, cpu);
    - struct rcu_node *rnp = rdp->mynode;
    - struct rcu_state *rsp;
    -
    - switch (action) {
    - case CPU_UP_PREPARE:
    - case CPU_UP_PREPARE_FROZEN:
    - rcu_prepare_cpu(cpu);
    - rcu_prepare_kthreads(cpu);
    - rcu_spawn_all_nocb_kthreads(cpu);
    - break;
    - case CPU_ONLINE:
    - case CPU_DOWN_FAILED:
    - sync_sched_exp_online_cleanup(cpu);
    - rcu_boost_kthread_setaffinity(rnp, -1);
    - break;
    - case CPU_DOWN_PREPARE:
    - rcu_boost_kthread_setaffinity(rnp, cpu);
    - break;
    - case CPU_DYING:
    - case CPU_DYING_FROZEN:
    - for_each_rcu_flavor(rsp)
    - rcu_cleanup_dying_cpu(rsp);
    - break;
    - case CPU_DEAD:
    - case CPU_DEAD_FROZEN:
    - case CPU_UP_CANCELED:
    - case CPU_UP_CANCELED_FROZEN:
    - for_each_rcu_flavor(rsp) {
    - rcu_cleanup_dead_cpu(cpu, rsp);
    - do_nocb_deferred_wakeup(per_cpu_ptr(rsp->rda, cpu));
    - }
    - break;
    - default:
    - break;
    - }
    - return NOTIFY_OK;
    -}
    -
    static int rcu_pm_notify(struct notifier_block *self,
    unsigned long action, void *hcpu)
    {
    @@ -4208,10 +4208,9 @@ void __init rcu_init(void)
    * this is called early in boot, before either interrupts
    * or the scheduler are operational.
    */
    - cpu_notifier(rcu_cpu_notify, 0);
    pm_notifier(rcu_pm_notify, 0);
    for_each_online_cpu(cpu)
    - rcu_cpu_notify(NULL, CPU_UP_PREPARE, (void *)(long)cpu);
    + rcutree_prepare_cpu(cpu);
    }

    #include "tree_exp.h"
    \
     
     \ /
      Last update: 2016-07-19 10:21    [W:4.835 / U:0.012 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site