lkml.org 
[lkml]   [2011]   [Jun]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 2/3] stop_machine: reorganize stop_cpus() implementation
Date
Split __stop_cpus() into stop_cpus_queue() and stop_cpus_locked().
The former handles only the queueing part. The latter uses the queue
function and is functionally equivalent to __stop_cpus().

The reorganization is to help future improvements to stop_machine()
and doesn't introduce any behavior difference.

Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Suresh Siddha <suresh.b.siddha@intel.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
kernel/stop_machine.c | 20 ++++++++++++++------
1 files changed, 14 insertions(+), 6 deletions(-)

diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 3d3f47d..198973f 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -136,10 +136,11 @@ void stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
static DEFINE_MUTEX(stop_cpus_mutex);
static DEFINE_PER_CPU(struct cpu_stop_work, stop_cpus_work);

-int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
+static void stop_cpus_queue(const struct cpumask *cpumask,
+ cpu_stop_fn_t fn, void *arg,
+ struct cpu_stop_done *done)
{
struct cpu_stop_work *work;
- struct cpu_stop_done done;
unsigned int cpu;

/* initialize works and done */
@@ -147,9 +148,8 @@ int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
work = &per_cpu(stop_cpus_work, cpu);
work->fn = fn;
work->arg = arg;
- work->done = &done;
+ work->done = done;
}
- cpu_stop_init_done(&done, cpumask_weight(cpumask));

/*
* Disable preemption while queueing to avoid getting
@@ -161,7 +161,15 @@ int __stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
cpu_stop_queue_work(&per_cpu(cpu_stopper, cpu),
&per_cpu(stop_cpus_work, cpu));
preempt_enable();
+}

+static int stop_cpus_locked(const struct cpumask *cpumask,
+ cpu_stop_fn_t fn, void *arg)
+{
+ struct cpu_stop_done done;
+
+ cpu_stop_init_done(&done, cpumask_weight(cpumask));
+ stop_cpus_queue(cpumask, fn, arg, &done);
wait_for_completion(&done.completion);
return done.executed ? done.ret : -ENOENT;
}
@@ -200,7 +208,7 @@ int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)

/* static works are used, process one request at a time */
mutex_lock(&stop_cpus_mutex);
- ret = __stop_cpus(cpumask, fn, arg);
+ ret = stop_cpus_locked(cpumask, fn, arg);
mutex_unlock(&stop_cpus_mutex);
return ret;
}
@@ -230,7 +238,7 @@ int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg)
/* static works are used, process one request at a time */
if (!mutex_trylock(&stop_cpus_mutex))
return -EAGAIN;
- ret = __stop_cpus(cpumask, fn, arg);
+ ret = stop_cpus_locked(cpumask, fn, arg);
mutex_unlock(&stop_cpus_mutex);
return ret;
}
--
1.7.5.2


\
 
 \ /
  Last update: 2011-06-14 19:09    [W:0.112 / U:0.760 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site