lkml.org 
[lkml]   [2017]   [May]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:smp/hotplug] cpu/hotplug: Convert hotplug locking to percpu rwsem
    Commit-ID:  fc8dffd379ca5620664336eb895a426b42847558
    Gitweb: http://git.kernel.org/tip/fc8dffd379ca5620664336eb895a426b42847558
    Author: Thomas Gleixner <tglx@linutronix.de>
    AuthorDate: Wed, 24 May 2017 10:15:40 +0200
    Committer: Thomas Gleixner <tglx@linutronix.de>
    CommitDate: Fri, 26 May 2017 10:10:46 +0200

    cpu/hotplug: Convert hotplug locking to percpu rwsem

    There are no more (known) nested calls to get_online_cpus() and all
    observed lock ordering problems have been addressed.

    Replace the magic nested 'rwsem' hackery with a percpu-rwsem.

    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Tested-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    Acked-by: Ingo Molnar <mingo@kernel.org>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Sebastian Siewior <bigeasy@linutronix.de>
    Cc: Steven Rostedt <rostedt@goodmis.org>
    Link: http://lkml.kernel.org/r/20170524081549.447014063@linutronix.de

    ---
    include/linux/cpu.h | 2 +-
    kernel/cpu.c | 107 +++++++---------------------------------------------
    2 files changed, 14 insertions(+), 95 deletions(-)

    diff --git a/include/linux/cpu.h b/include/linux/cpu.h
    index af4d660..ca73bc1 100644
    --- a/include/linux/cpu.h
    +++ b/include/linux/cpu.h
    @@ -103,7 +103,7 @@ extern void cpus_write_lock(void);
    extern void cpus_write_unlock(void);
    extern void cpus_read_lock(void);
    extern void cpus_read_unlock(void);
    -static inline void lockdep_assert_cpus_held(void) { }
    +extern void lockdep_assert_cpus_held(void);
    extern void cpu_hotplug_disable(void);
    extern void cpu_hotplug_enable(void);
    void clear_tasks_mm_cpumask(int cpu);
    diff --git a/kernel/cpu.c b/kernel/cpu.c
    index 142d889..6683621 100644
    --- a/kernel/cpu.c
    +++ b/kernel/cpu.c
    @@ -27,6 +27,7 @@
    #include <linux/smpboot.h>
    #include <linux/relay.h>
    #include <linux/slab.h>
    +#include <linux/percpu-rwsem.h>

    #include <trace/events/power.h>
    #define CREATE_TRACE_POINTS
    @@ -196,121 +197,41 @@ void cpu_maps_update_done(void)
    mutex_unlock(&cpu_add_remove_lock);
    }

    -/* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
    +/*
    + * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
    * Should always be manipulated under cpu_add_remove_lock
    */
    static int cpu_hotplug_disabled;

    #ifdef CONFIG_HOTPLUG_CPU

    -static struct {
    - struct task_struct *active_writer;
    - /* wait queue to wake up the active_writer */
    - wait_queue_head_t wq;
    - /* verifies that no writer will get active while readers are active */
    - struct mutex lock;
    - /*
    - * Also blocks the new readers during
    - * an ongoing cpu hotplug operation.
    - */
    - atomic_t refcount;
    -
    -#ifdef CONFIG_DEBUG_LOCK_ALLOC
    - struct lockdep_map dep_map;
    -#endif
    -} cpu_hotplug = {
    - .active_writer = NULL,
    - .wq = __WAIT_QUEUE_HEAD_INITIALIZER(cpu_hotplug.wq),
    - .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
    -#ifdef CONFIG_DEBUG_LOCK_ALLOC
    - .dep_map = STATIC_LOCKDEP_MAP_INIT("cpu_hotplug.dep_map", &cpu_hotplug.dep_map),
    -#endif
    -};
    -
    -/* Lockdep annotations for get/put_online_cpus() and cpu_hotplug_begin/end() */
    -#define cpuhp_lock_acquire_read() lock_map_acquire_read(&cpu_hotplug.dep_map)
    -#define cpuhp_lock_acquire_tryread() \
    - lock_map_acquire_tryread(&cpu_hotplug.dep_map)
    -#define cpuhp_lock_acquire() lock_map_acquire(&cpu_hotplug.dep_map)
    -#define cpuhp_lock_release() lock_map_release(&cpu_hotplug.dep_map)
    -
    +DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);

    void cpus_read_lock(void)
    {
    - might_sleep();
    - if (cpu_hotplug.active_writer == current)
    - return;
    - cpuhp_lock_acquire_read();
    - mutex_lock(&cpu_hotplug.lock);
    - atomic_inc(&cpu_hotplug.refcount);
    - mutex_unlock(&cpu_hotplug.lock);
    + percpu_down_read(&cpu_hotplug_lock);
    }
    EXPORT_SYMBOL_GPL(cpus_read_lock);

    void cpus_read_unlock(void)
    {
    - int refcount;
    -
    - if (cpu_hotplug.active_writer == current)
    - return;
    -
    - refcount = atomic_dec_return(&cpu_hotplug.refcount);
    - if (WARN_ON(refcount < 0)) /* try to fix things up */
    - atomic_inc(&cpu_hotplug.refcount);
    -
    - if (refcount <= 0 && waitqueue_active(&cpu_hotplug.wq))
    - wake_up(&cpu_hotplug.wq);
    -
    - cpuhp_lock_release();
    -
    + percpu_up_read(&cpu_hotplug_lock);
    }
    EXPORT_SYMBOL_GPL(cpus_read_unlock);

    -/*
    - * This ensures that the hotplug operation can begin only when the
    - * refcount goes to zero.
    - *
    - * Note that during a cpu-hotplug operation, the new readers, if any,
    - * will be blocked by the cpu_hotplug.lock
    - *
    - * Since cpu_hotplug_begin() is always called after invoking
    - * cpu_maps_update_begin(), we can be sure that only one writer is active.
    - *
    - * Note that theoretically, there is a possibility of a livelock:
    - * - Refcount goes to zero, last reader wakes up the sleeping
    - * writer.
    - * - Last reader unlocks the cpu_hotplug.lock.
    - * - A new reader arrives at this moment, bumps up the refcount.
    - * - The writer acquires the cpu_hotplug.lock finds the refcount
    - * non zero and goes to sleep again.
    - *
    - * However, this is very difficult to achieve in practice since
    - * get_online_cpus() not an api which is called all that often.
    - *
    - */
    void cpus_write_lock(void)
    {
    - DEFINE_WAIT(wait);
    -
    - cpu_hotplug.active_writer = current;
    - cpuhp_lock_acquire();
    -
    - for (;;) {
    - mutex_lock(&cpu_hotplug.lock);
    - prepare_to_wait(&cpu_hotplug.wq, &wait, TASK_UNINTERRUPTIBLE);
    - if (likely(!atomic_read(&cpu_hotplug.refcount)))
    - break;
    - mutex_unlock(&cpu_hotplug.lock);
    - schedule();
    - }
    - finish_wait(&cpu_hotplug.wq, &wait);
    + percpu_down_write(&cpu_hotplug_lock);
    }

    void cpus_write_unlock(void)
    {
    - cpu_hotplug.active_writer = NULL;
    - mutex_unlock(&cpu_hotplug.lock);
    - cpuhp_lock_release();
    + percpu_up_write(&cpu_hotplug_lock);
    +}
    +
    +void lockdep_assert_cpus_held(void)
    +{
    + percpu_rwsem_assert_held(&cpu_hotplug_lock);
    }

    /*
    @@ -344,8 +265,6 @@ void cpu_hotplug_enable(void)
    EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
    #endif /* CONFIG_HOTPLUG_CPU */

    -/* Notifier wrappers for transitioning to state machine */
    -
    static int bringup_wait_for_ap(unsigned int cpu)
    {
    struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
    \
     
     \ /
      Last update: 2017-05-26 10:50    [W:4.225 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site