lkml.org 
[lkml]   [2009]   [Feb]   [16]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 2/4] generic-smp: remove kmalloc usage
Now that there is no strict need for kmalloc anymore, and nobody seems to
rely it for the queueing behaviour, remove it.

Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
---
kernel/smp.c | 66 ++++++++++++++++-------------------------------------------
1 file changed, 19 insertions(+), 47 deletions(-)

Index: linux-2.6/kernel/smp.c
===================================================================
--- linux-2.6.orig/kernel/smp.c
+++ linux-2.6/kernel/smp.c
@@ -32,18 +32,14 @@ static struct {

enum {
CSD_FLAG_WAIT = 0x01,
- CSD_FLAG_ALLOC = 0x02,
- CSD_FLAG_LOCK = 0x04,
+ CSD_FLAG_LOCK = 0x02,
};

struct call_function_data {
struct call_single_data csd;
spinlock_t lock;
unsigned int refs;
- union {
- struct rcu_head rcu_head;
- struct list_head free_list;
- };
+ struct list_head free_list;
struct cpumask cpumask;
};

@@ -103,15 +99,6 @@ static void generic_exec_single(int cpu,
csd_flag_wait(data);
}

-static void rcu_free_call_data(struct rcu_head *head)
-{
- struct call_function_data *data;
-
- data = container_of(head, struct call_function_data, rcu_head);
-
- kfree(data);
-}
-
/*
* Invoked by arch to handle an IPI for call function. Must be called with
* interrupts disabled.
@@ -159,8 +146,6 @@ void generic_smp_call_function_interrupt
smp_wmb();
data->csd.flags &= ~CSD_FLAG_WAIT;
}
- if (data->csd.flags & CSD_FLAG_ALLOC)
- call_rcu(&data->rcu_head, rcu_free_call_data);

while (!list_empty(&free_list)) {
struct call_function_data *free;
@@ -225,8 +210,7 @@ void generic_smp_call_function_single_in
} else if (data_flags & CSD_FLAG_LOCK) {
smp_wmb();
data->flags &= ~CSD_FLAG_LOCK;
- } else if (data_flags & CSD_FLAG_ALLOC)
- kfree(data);
+ }
}
/*
* See comment on outer loop
@@ -271,13 +255,11 @@ int smp_call_function_single(int cpu, vo
/*
* We are calling a function on a single CPU
* and we are not going to wait for it to finish.
- * We first try to allocate the data, but if we
- * fail, we fall back to use a per cpu data to pass
- * the information to that CPU. Since all callers
- * of this code will use the same data, we must
- * synchronize the callers to prevent a new caller
- * from corrupting the data before the callee
- * can access it.
+ * We use a per cpu data to pass the information to
+ * that CPU. Since all callers of this code will
+ * use the same data, we must synchronize the
+ * callers to prevent a new caller from corrupting
+ * the data before the callee can access it.
*
* The CSD_FLAG_LOCK is used to let us know when
* the IPI handler is done with the data.
@@ -287,15 +269,10 @@ int smp_call_function_single(int cpu, vo
* will make sure the callee is done with the
* data before a new caller will use it.
*/
- data = kmalloc(sizeof(*data), GFP_ATOMIC);
- if (data)
- data->flags = CSD_FLAG_ALLOC;
- else {
- data = &per_cpu(csd_data, me);
- while (data->flags & CSD_FLAG_LOCK)
- cpu_relax();
- data->flags = CSD_FLAG_LOCK;
- }
+ data = &per_cpu(csd_data, me);
+ while (data->flags & CSD_FLAG_LOCK)
+ cpu_relax();
+ data->flags = CSD_FLAG_LOCK;
} else {
data = &d;
data->flags = CSD_FLAG_WAIT;
@@ -384,18 +361,13 @@ void smp_call_function_many(const struct
return;
}

- data = kmalloc(sizeof(*data), GFP_ATOMIC);
- if (data)
- data->csd.flags = CSD_FLAG_ALLOC;
- else {
- data = &per_cpu(cfd_data, me);
- /*
- * We need to wait for all previous users to go away.
- */
- while (data->csd.flags & CSD_FLAG_LOCK)
- cpu_relax();
- data->csd.flags = CSD_FLAG_LOCK;
- }
+ data = &per_cpu(cfd_data, me);
+ /*
+ * We need to wait for all previous users to go away.
+ */
+ while (data->csd.flags & CSD_FLAG_LOCK)
+ cpu_relax();
+ data->csd.flags = CSD_FLAG_LOCK;

spin_lock_init(&data->lock);
if (wait)
--



\
 
 \ /
  Last update: 2009-02-18 12:23    [W:0.381 / U:0.160 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site