lkml.org 
[lkml]   [2002]   [Sep]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH] CPU possible optimization
Date
[ This Works For Me(tm), but changing the boot order is always fraught
with danger... Should save some space as other things get converted
to use the per-cpu infrastructure. ]

Name: per-cpu only for possible CPUs
Author: Rusty Russell
Status: Tested on 2.5.34 2-way i386

D: This allocates per-cpu areas only for those CPUs which may actually
D: exist, before each one comes online.

diff -urNp --exclude TAGS -X /home/rusty/current-dontdiff --minimal linux-2.5.34/init/main.c working-2.5.34-percpu_possible/init/main.c
--- linux-2.5.34/init/main.c Tue Sep 10 09:11:21 2002
+++ working-2.5.34-percpu_possible/init/main.c Thu Sep 12 15:01:31 2002
@@ -309,32 +309,36 @@ static void __init smp_init(void)
#define smp_init() do { } while (0)
#endif

-static inline void setup_per_cpu_areas(void) { }
+static inline void setup_per_cpu_area(unsigned int cpu) { }
static inline void smp_prepare_cpus(unsigned int maxcpus) { }

#else

#ifdef __GENERIC_PER_CPU
+/* Created by linker magic */
+extern char __per_cpu_start[], __per_cpu_end[];
+
unsigned long __per_cpu_offset[NR_CPUS];

-static void __init setup_per_cpu_areas(void)
+/* Sets up per-cpu area for boot CPU. */
+static void __init setup_per_cpu_area(unsigned int cpu)
{
- unsigned long size, i;
+ unsigned long size;
char *ptr;
- /* Created by linker magic */
- extern char __per_cpu_start[], __per_cpu_end[];

/* Copy section for each CPU (we discard the original) */
size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
if (!size)
return;

- ptr = alloc_bootmem(size * NR_CPUS);
+ /* First CPU happens really early... */
+ if (cpu == smp_processor_id())
+ ptr = alloc_bootmem(size);
+ else
+ ptr = kmalloc(size, GFP_ATOMIC);

- for (i = 0; i < NR_CPUS; i++, ptr += size) {
- __per_cpu_offset[i] = ptr - __per_cpu_start;
- memcpy(ptr, __per_cpu_start, size);
- }
+ __per_cpu_offset[cpu] = ptr - __per_cpu_start;
+ memcpy(ptr, __per_cpu_start, size);
}
#endif /* !__GENERIC_PER_CPU */

@@ -343,7 +347,16 @@ static void __init smp_init(void)
{
unsigned int i;

- /* FIXME: This should be done in userspace --RR */
+ for (i = 0; i < NR_CPUS; i++) {
+ if (cpu_possible(i)) {
+ if (i != smp_processor_id())
+ setup_per_cpu_area(i);
+ } else {
+ /* Force a NULL deref on use */
+ __per_cpu_offset[i] = (char *)0 - __per_cpu_start;
+ }
+ }
+
for (i = 0; i < NR_CPUS; i++) {
if (num_online_cpus() >= max_cpus)
break;
@@ -395,7 +408,7 @@ asmlinkage void __init start_kernel(void
lock_kernel();
printk(linux_banner);
setup_arch(&command_line);
- setup_per_cpu_areas();
+ setup_per_cpu_area(smp_processor_id());
printk("Kernel command line: %s\n", saved_command_line);
parse_options(command_line);
trap_init();

--
Anyone who quotes me in their sig is an idiot. -- Rusty Russell.
-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.kernel.org
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/

\
 
 \ /
  Last update: 2005-03-22 13:29    [W:0.023 / U:0.528 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site