lkml.org 
[lkml]   [2009]   [Feb]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 4/4] generic-smp: clean up some of the csd->flags fiddling
Break out the WAIT and LOCK bit operations into functions and provide some
extra comments.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
kernel/smp.c | 100 +++++++++++++++++++++++++++++++++++------------------------
1 file changed, 60 insertions(+), 40 deletions(-)

Index: linux-2.6/kernel/smp.c
===================================================================
--- linux-2.6.orig/kernel/smp.c
+++ linux-2.6/kernel/smp.c
@@ -100,14 +100,50 @@ static int __cpuinit init_call_single_da
}
early_initcall(init_call_single_data);

-static void csd_flag_wait(struct call_single_data *data)
+/*
+ * csd_wait/csd_complete are used for synchronous ipi calls
+ */
+static void csd_wait_prepare(struct call_single_data *data)
+{
+ data->flags |= CSD_FLAG_WAIT;
+}
+
+static void csd_complete(struct call_single_data *data)
+{
+ /*
+ * Serialize stores to data with the flag clear and wakeup.
+ */
+ smp_wmb();
+ data->flags &= ~CSD_FLAG_WAIT;
+}
+
+static void csd_wait(struct call_single_data *data)
+{
+ while (data->flags & CSD_FLAG_WAIT)
+ cpu_relax();
+}
+
+/*
+ * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
+ *
+ * For non-synchronous ipi calls the csd can still be in use by the previous
+ * function call. For multi-cpu calls its even more interesting as we'll have
+ * to ensure no other cpu is observing our csd.
+ */
+static void csd_lock(struct call_single_data *data)
{
- /* Wait for response */
- do {
- if (!(data->flags & CSD_FLAG_WAIT))
- break;
+ while (data->flags & CSD_FLAG_LOCK)
cpu_relax();
- } while (1);
+ data->flags = CSD_FLAG_LOCK;
+}
+
+static void csd_unlock(struct call_single_data *data)
+{
+ /*
+ * Serialize stores to data with the flags clear.
+ */
+ smp_wmb();
+ data->flags &= ~CSD_FLAG_LOCK;
}

/*
@@ -134,7 +170,7 @@ static void generic_exec_single(int cpu,
arch_send_call_function_single_ipi(cpu);

if (wait)
- csd_flag_wait(data);
+ csd_wait(data);
}

/*
@@ -172,18 +208,17 @@ void generic_smp_call_function_interrupt
spin_lock(&call_function.lock);
list_add(&data->free_list, &call_function.free_list);
list_del_rcu(&data->csd.list);
+ /*
+ * When the global queue is empty, its guaranteed that no cpu
+ * is still observing any entry on the free_list, therefore
+ * we can go ahead and unlock them.
+ */
if (!--call_function.counter)
list_splice_init(&call_function.free_list, &free_list);
spin_unlock(&call_function.lock);

- if (data->csd.flags & CSD_FLAG_WAIT) {
- /*
- * serialize stores to data with the flag clear
- * and wakeup
- */
- smp_wmb();
- data->csd.flags &= ~CSD_FLAG_WAIT;
- }
+ if (data->csd.flags & CSD_FLAG_WAIT)
+ csd_complete(&data->csd);

while (!list_empty(&free_list)) {
struct call_function_data *free;
@@ -192,12 +227,7 @@ void generic_smp_call_function_interrupt
struct call_function_data, free_list);

list_del(&data->free_list);
- /*
- * serialize stores to data with the flags
- * clear
- */
- smp_wmb();
- free->csd.flags &= ~CSD_FLAG_LOCK;
+ csd_unlock(&free->csd);
}
}

@@ -242,13 +272,10 @@ void generic_smp_call_function_single_in

data->func(data->info);

- if (data_flags & CSD_FLAG_WAIT) {
- smp_wmb();
- data->flags &= ~CSD_FLAG_WAIT;
- } else if (data_flags & CSD_FLAG_LOCK) {
- smp_wmb();
- data->flags &= ~CSD_FLAG_LOCK;
- }
+ if (data_flags & CSD_FLAG_WAIT)
+ csd_complete(data);
+ else if (data_flags & CSD_FLAG_LOCK)
+ csd_unlock(data);
}
/*
* See comment on outer loop
@@ -308,12 +335,10 @@ int smp_call_function_single(int cpu, vo
* data before a new caller will use it.
*/
data = &per_cpu(csd_data, me);
- while (data->flags & CSD_FLAG_LOCK)
- cpu_relax();
- data->flags = CSD_FLAG_LOCK;
+ csd_lock(data);
} else {
data = &d;
- data->flags = CSD_FLAG_WAIT;
+ csd_wait_prepare(data);
}

data->func = func;
@@ -398,16 +423,11 @@ void smp_call_function_many(const struct
}

data = &per_cpu(cfd_data, me);
- /*
- * We need to wait for all previous users to go away.
- */
- while (data->csd.flags & CSD_FLAG_LOCK)
- cpu_relax();
- data->csd.flags = CSD_FLAG_LOCK;
+ csd_lock(&data->csd);

spin_lock_init(&data->lock);
if (wait)
- data->csd.flags |= CSD_FLAG_WAIT;
+ csd_wait_prepare(&data->csd);
data->csd.func = func;
data->csd.info = info;
cpumask_and(data->cpumask, mask, cpu_online_mask);
@@ -429,7 +449,7 @@ void smp_call_function_many(const struct

/* optionally wait for the CPUs to complete */
if (wait)
- csd_flag_wait(&data->csd);
+ csd_wait(&data->csd);
}
EXPORT_SYMBOL(smp_call_function_many);

--



\
 
 \ /
  Last update: 2009-02-18 12:23    [W:0.967 / U:0.168 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site