lkml.org 
[lkml]   [2013]   [Mar]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 19/31] workqueue: drop "std" from cpu_std_worker_pools and for_each_std_worker_pool()
Date
All per-cpu pools are standard, so there's no need to use both "cpu"
and "std" and for_each_std_worker_pool() is confusing in that it can
be used only for per-cpu pools.

* s/cpu_std_worker_pools/cpu_worker_pools/

* s/for_each_std_worker_pool()/for_each_cpu_worker_pool()/

Signed-off-by: Tejun Heo <tj@kernel.org>
---
kernel/workqueue.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)

diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index f7f627c..95a3dcc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -252,9 +252,9 @@ EXPORT_SYMBOL_GPL(system_freezable_wq);
lockdep_is_held(&workqueue_lock), \
"sched RCU or workqueue lock should be held")

-#define for_each_std_worker_pool(pool, cpu) \
- for ((pool) = &per_cpu(cpu_std_worker_pools, cpu)[0]; \
- (pool) < &per_cpu(cpu_std_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
+#define for_each_cpu_worker_pool(pool, cpu) \
+ for ((pool) = &per_cpu(cpu_worker_pools, cpu)[0]; \
+ (pool) < &per_cpu(cpu_worker_pools, cpu)[NR_STD_WORKER_POOLS]; \
(pool)++)

#define for_each_busy_worker(worker, i, pos, pool) \
@@ -416,7 +416,7 @@ static bool workqueue_freezing; /* W: have wqs started freezing? */
* POOL_DISASSOCIATED set, and their workers have WORKER_UNBOUND set.
*/
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
- cpu_std_worker_pools);
+ cpu_worker_pools);

/*
* idr of all pools. Modifications are protected by workqueue_lock. Read
@@ -3335,7 +3335,7 @@ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
struct pool_workqueue *pwq =
per_cpu_ptr(wq->cpu_pwqs, cpu);
struct worker_pool *cpu_pools =
- per_cpu(cpu_std_worker_pools, cpu);
+ per_cpu(cpu_worker_pools, cpu);

pwq->pool = &cpu_pools[highpri];
list_add_tail_rcu(&pwq->pwqs_node, &wq->pwqs);
@@ -3688,7 +3688,7 @@ static void wq_unbind_fn(struct work_struct *work)
struct hlist_node *pos;
int i;

- for_each_std_worker_pool(pool, cpu) {
+ for_each_cpu_worker_pool(pool, cpu) {
WARN_ON_ONCE(cpu != smp_processor_id());

mutex_lock(&pool->assoc_mutex);
@@ -3731,7 +3731,7 @@ static void wq_unbind_fn(struct work_struct *work)
* unbound chain execution of pending work items if other workers
* didn't already.
*/
- for_each_std_worker_pool(pool, cpu)
+ for_each_cpu_worker_pool(pool, cpu)
atomic_set(&pool->nr_running, 0);
}

@@ -3748,7 +3748,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,

switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- for_each_std_worker_pool(pool, cpu) {
+ for_each_cpu_worker_pool(pool, cpu) {
struct worker *worker;

if (pool->nr_workers)
@@ -3766,7 +3766,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,

case CPU_DOWN_FAILED:
case CPU_ONLINE:
- for_each_std_worker_pool(pool, cpu) {
+ for_each_cpu_worker_pool(pool, cpu) {
mutex_lock(&pool->assoc_mutex);
spin_lock_irq(&pool->lock);

@@ -4006,7 +4006,7 @@ static int __init init_workqueues(void)
struct worker_pool *pool;

i = 0;
- for_each_std_worker_pool(pool, cpu) {
+ for_each_cpu_worker_pool(pool, cpu) {
BUG_ON(init_worker_pool(pool));
pool->cpu = cpu;
cpumask_copy(pool->attrs->cpumask, cpumask_of(cpu));
@@ -4021,7 +4021,7 @@ static int __init init_workqueues(void)
for_each_online_cpu(cpu) {
struct worker_pool *pool;

- for_each_std_worker_pool(pool, cpu) {
+ for_each_cpu_worker_pool(pool, cpu) {
struct worker *worker;

pool->flags &= ~POOL_DISASSOCIATED;
--
1.8.1.2


\
 
 \ /
  Last update: 2013-03-02 05:21    [W:0.429 / U:0.308 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site