lkml.org 
[lkml]   [2020]   [Dec]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH -tip V3 7/8] workqueue: reorganize workqueue_offline_cpu() unbind_workers()
    Date
    From: Lai Jiangshan <laijs@linux.alibaba.com>

    Just move around the code, no functionality changed.
    Only wq_pool_attach_mutex protected region becomes a little larger.

    It prepares for later patch protecting wq_online_cpumask
    in wq_pool_attach_mutex.

    Acked-by: Tejun Heo <tj@kernel.org>
    Signed-off-by: Lai Jiangshan <laijs@linux.alibaba.com>
    ---
    kernel/workqueue.c | 90 +++++++++++++++++++++++-----------------------
    1 file changed, 45 insertions(+), 45 deletions(-)

    diff --git a/kernel/workqueue.c b/kernel/workqueue.c
    index 94545e6feda5..dd32398edf55 100644
    --- a/kernel/workqueue.c
    +++ b/kernel/workqueue.c
    @@ -4896,61 +4896,57 @@ void wq_worker_comm(char *buf, size_t size, struct task_struct *task)
    * cpu comes back online.
    */

    -static void unbind_workers(int cpu)
    +static void unbind_workers(struct worker_pool *pool)
    {
    - struct worker_pool *pool;
    struct worker *worker;

    - for_each_cpu_worker_pool(pool, cpu) {
    - mutex_lock(&wq_pool_attach_mutex);
    - raw_spin_lock_irq(&pool->lock);
    + lockdep_assert_held(&wq_pool_attach_mutex);

    - /*
    - * We've blocked all attach/detach operations. Make all workers
    - * unbound and set DISASSOCIATED. Before this, all workers
    - * except for the ones which are still executing works from
    - * before the last CPU down must be on the cpu. After
    - * this, they may become diasporas.
    - */
    - for_each_pool_worker(worker, pool)
    - worker->flags |= WORKER_UNBOUND;
    + raw_spin_lock_irq(&pool->lock);

    - pool->flags |= POOL_DISASSOCIATED;
    + /*
    + * We've blocked all attach/detach operations. Make all workers
    + * unbound and set DISASSOCIATED. Before this, all workers
    + * except for the ones which are still executing works from
    + * before the last CPU down must be on the cpu. After
    + * this, they may become diasporas.
    + */
    + for_each_pool_worker(worker, pool)
    + worker->flags |= WORKER_UNBOUND;

    - raw_spin_unlock_irq(&pool->lock);
    + pool->flags |= POOL_DISASSOCIATED;

    - for_each_pool_worker(worker, pool)
    - WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);
    + raw_spin_unlock_irq(&pool->lock);

    - mutex_unlock(&wq_pool_attach_mutex);
    + for_each_pool_worker(worker, pool)
    + WARN_ON_ONCE(set_cpus_allowed_ptr(worker->task, cpu_possible_mask) < 0);

    - /*
    - * Call schedule() so that we cross rq->lock and thus can
    - * guarantee sched callbacks see the %WORKER_UNBOUND flag.
    - * This is necessary as scheduler callbacks may be invoked
    - * from other cpus.
    - */
    - schedule();
    + /*
    + * Call schedule() so that we cross rq->lock and thus can
    + * guarantee sched callbacks see the %WORKER_UNBOUND flag.
    + * This is necessary as scheduler callbacks may be invoked
    + * from other cpus.
    + */
    + schedule();

    - /*
    - * Sched callbacks are disabled now. Zap nr_running.
    - * After this, nr_running stays zero and need_more_worker()
    - * and keep_working() are always true as long as the
    - * worklist is not empty. This pool now behaves as an
    - * unbound (in terms of concurrency management) pool which
    - * are served by workers tied to the pool.
    - */
    - atomic_set(&pool->nr_running, 0);
    + /*
    + * Sched callbacks are disabled now. Zap nr_running.
    + * After this, nr_running stays zero and need_more_worker()
    + * and keep_working() are always true as long as the
    + * worklist is not empty. This pool now behaves as an
    + * unbound (in terms of concurrency management) pool which
    + * are served by workers tied to the pool.
    + */
    + atomic_set(&pool->nr_running, 0);

    - /*
    - * With concurrency management just turned off, a busy
    - * worker blocking could lead to lengthy stalls. Kick off
    - * unbound chain execution of currently pending work items.
    - */
    - raw_spin_lock_irq(&pool->lock);
    - wake_up_worker(pool);
    - raw_spin_unlock_irq(&pool->lock);
    - }
    + /*
    + * With concurrency management just turned off, a busy
    + * worker blocking could lead to lengthy stalls. Kick off
    + * unbound chain execution of currently pending work items.
    + */
    + raw_spin_lock_irq(&pool->lock);
    + wake_up_worker(pool);
    + raw_spin_unlock_irq(&pool->lock);
    }

    /**
    @@ -5122,7 +5118,11 @@ int workqueue_offline_cpu(unsigned int cpu)
    if (WARN_ON(cpu != smp_processor_id()))
    return -1;

    - unbind_workers(cpu);
    + for_each_cpu_worker_pool(pool, cpu) {
    + mutex_lock(&wq_pool_attach_mutex);
    + unbind_workers(pool);
    + mutex_unlock(&wq_pool_attach_mutex);
    + }

    mutex_lock(&wq_pool_mutex);
    cpumask_clear_cpu(cpu, wq_online_cpumask);
    --
    2.19.1.6.gb485710b
    \
     
     \ /
      Last update: 2020-12-26 02:53    [W:4.104 / U:0.008 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site