lkml.org 
[lkml]   [2003]   [Jan]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patches in this message
/
From
SubjectRe: [PATCH 2.5.58] new NUMA scheduler: fix
Date
Thanks for the patience with the problems in the yesterday patches. I
resend the patches in the same form. Made following changes:

- moved NODE_BALANCE_{RATE,MIN,MAX} to topology.h
- removed the divide in the find_busiest_node() loop (thanks, Martin)
- removed the modulo (%) in in the cross-node balancing trigger
- re-added node_nr_running_init() stub, nr_running_init() and comments
from Christoph
- removed the constant factor 4 in find_busiest_node. The
find_busiest_queue routine will take care of the case where the
busiest_node is running only few processes (at most one per CPU) and
return busiest=NULL .

I hope we can start tuning the parameters now. In the basic NUMA
scheduler part these are:
NODE_THRESHOLD : minimum percentage of node overload to
trigger cross-node balancing
NODE_BALANCE_RATE : arch specific, cross-node balancing is called
after this many intra-node load balance calls

In the extended NUMA scheduler the fixed value of NODE_BALANCE_RATE is
replaced by a variable rate set to :
NODE_BALANCE_MIN : if own node load is less then avg_load/2
NODE_BALANCE_MAX : if load is larger than avg_load/2
Together with the reduced number of steals across nodes this might
help us in achieving equal load among nodes. I'm not aware of any
simple benchmark which can demonstrate this...

Regards,
Erich
diff -urNp 2.5.58/fs/exec.c 2.5.58-ms-ilb-nb/fs/exec.c
--- 2.5.58/fs/exec.c 2003-01-14 06:58:33.000000000 +0100
+++ 2.5.58-ms-ilb-nb/fs/exec.c 2003-01-15 11:47:32.000000000 +0100
@@ -1031,6 +1031,8 @@ int do_execve(char * filename, char ** a
int retval;
int i;

+ sched_balance_exec();
+
file = open_exec(filename);

retval = PTR_ERR(file);
diff -urNp 2.5.58/include/asm-generic/topology.h 2.5.58-ms-ilb-nb/include/asm-generic/topology.h
--- 2.5.58/include/asm-generic/topology.h 2003-01-14 06:58:06.000000000 +0100
+++ 2.5.58-ms-ilb-nb/include/asm-generic/topology.h 2003-01-15 15:24:45.000000000 +0100
@@ -48,4 +48,9 @@
#define __node_to_memblk(node) (0)
#endif

+/* Cross-node load balancing interval. */
+#ifndef NODE_BALANCE_RATE
+#define NODE_BALANCE_RATE 10
+#endif
+
#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff -urNp 2.5.58/include/asm-i386/topology.h 2.5.58-ms-ilb-nb/include/asm-i386/topology.h
--- 2.5.58/include/asm-i386/topology.h 2003-01-14 06:58:20.000000000 +0100
+++ 2.5.58-ms-ilb-nb/include/asm-i386/topology.h 2003-01-15 15:25:11.000000000 +0100
@@ -61,6 +61,9 @@ static inline int __node_to_first_cpu(in
/* Returns the number of the first MemBlk on Node 'node' */
#define __node_to_memblk(node) (node)

+/* Cross-node load balancing interval. */
+#define NODE_BALANCE_RATE 100
+
#else /* !CONFIG_NUMA */
/*
* Other i386 platforms should define their own version of the
diff -urNp 2.5.58/include/asm-ia64/topology.h 2.5.58-ms-ilb-nb/include/asm-ia64/topology.h
--- 2.5.58/include/asm-ia64/topology.h 2003-01-14 06:58:03.000000000 +0100
+++ 2.5.58-ms-ilb-nb/include/asm-ia64/topology.h 2003-01-15 15:25:28.000000000 +0100
@@ -60,4 +60,7 @@
*/
#define __node_to_memblk(node) (node)

+/* Cross-node load balancing interval. */
+#define NODE_BALANCE_RATE 10
+
#endif /* _ASM_IA64_TOPOLOGY_H */
diff -urNp 2.5.58/include/asm-ppc64/topology.h 2.5.58-ms-ilb-nb/include/asm-ppc64/topology.h
--- 2.5.58/include/asm-ppc64/topology.h 2003-01-14 06:59:29.000000000 +0100
+++ 2.5.58-ms-ilb-nb/include/asm-ppc64/topology.h 2003-01-15 15:24:15.000000000 +0100
@@ -46,6 +46,9 @@ static inline unsigned long __node_to_cp
return mask;
}

+/* Cross-node load balancing interval. */
+#define NODE_BALANCE_RATE 10
+
#else /* !CONFIG_NUMA */

#define __cpu_to_node(cpu) (0)
diff -urNp 2.5.58/include/linux/sched.h 2.5.58-ms-ilb-nb/include/linux/sched.h
--- 2.5.58/include/linux/sched.h 2003-01-14 06:58:06.000000000 +0100
+++ 2.5.58-ms-ilb-nb/include/linux/sched.h 2003-01-15 11:47:32.000000000 +0100
@@ -160,7 +160,6 @@ extern void update_one_process(struct ta
extern void scheduler_tick(int user_tick, int system);
extern unsigned long cache_decay_ticks;

-
#define MAX_SCHEDULE_TIMEOUT LONG_MAX
extern signed long FASTCALL(schedule_timeout(signed long timeout));
asmlinkage void schedule(void);
@@ -444,6 +443,14 @@ extern void set_cpus_allowed(task_t *p,
# define set_cpus_allowed(p, new_mask) do { } while (0)
#endif

+#ifdef CONFIG_NUMA
+extern void sched_balance_exec(void);
+extern void node_nr_running_init(void);
+#else
+#define sched_balance_exec() {}
+#define node_nr_running_init() {}
+#endif
+
extern void set_user_nice(task_t *p, long nice);
extern int task_prio(task_t *p);
extern int task_nice(task_t *p);
diff -urNp 2.5.58/init/main.c 2.5.58-ms-ilb-nb/init/main.c
--- 2.5.58/init/main.c 2003-01-14 06:58:20.000000000 +0100
+++ 2.5.58-ms-ilb-nb/init/main.c 2003-01-15 11:47:32.000000000 +0100
@@ -495,6 +495,7 @@ static void do_pre_smp_initcalls(void)

migration_init();
#endif
+ node_nr_running_init();
spawn_ksoftirqd();
}

diff -urNp 2.5.58/kernel/sched.c 2.5.58-ms-ilb-nb/kernel/sched.c
--- 2.5.58/kernel/sched.c 2003-01-14 06:59:11.000000000 +0100
+++ 2.5.58-ms-ilb-nb/kernel/sched.c 2003-01-15 15:26:29.000000000 +0100
@@ -67,6 +67,7 @@
#define INTERACTIVE_DELTA 2
#define MAX_SLEEP_AVG (2*HZ)
#define STARVATION_LIMIT (2*HZ)
+#define NODE_THRESHOLD 125

/*
* If a task is 'interactive' then we reinsert it in the active
@@ -153,7 +154,10 @@ struct runqueue {
task_t *curr, *idle;
prio_array_t *active, *expired, arrays[2];
int prev_nr_running[NR_CPUS];
-
+#ifdef CONFIG_NUMA
+ atomic_t *node_nr_running;
+ unsigned int nr_balanced;
+#endif
task_t *migration_thread;
struct list_head migration_queue;

@@ -178,6 +182,44 @@ static struct runqueue runqueues[NR_CPUS
#endif

/*
+ * Keep track of running tasks.
+ */
+#if CONFIG_NUMA
+/* XXX(hch): this should go into a struct sched_node_data */
+static atomic_t node_nr_running[MAX_NUMNODES] ____cacheline_maxaligned_in_smp =
+ {[0 ...MAX_NUMNODES-1] = ATOMIC_INIT(0)};
+
+static inline void nr_running_init(struct runqueue *rq)
+{
+ rq->node_nr_running = &node_nr_running[0];
+}
+
+static inline void nr_running_inc(runqueue_t *rq)
+{
+ atomic_inc(rq->node_nr_running);
+ rq->nr_running++;
+}
+
+static inline void nr_running_dec(runqueue_t *rq)
+{
+ atomic_dec(rq->node_nr_running);
+ rq->nr_running--;
+}
+
+__init void node_nr_running_init(void)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++)
+ cpu_rq(i)->node_nr_running = &node_nr_running[__cpu_to_node(i)];
+}
+#else
+# define nr_running_init(rq) do { } while (0)
+# define nr_running_inc(rq) do { (rq)->nr_running++; } while (0)
+# define nr_running_dec(rq) do { (rq)->nr_running--; } while (0)
+#endif /* CONFIG_NUMA */
+
+/*
* task_rq_lock - lock the runqueue a given task resides on and disable
* interrupts. Note the ordering: we can safely lookup the task_rq without
* explicitly disabling preemption.
@@ -294,7 +336,7 @@ static inline void activate_task(task_t
p->prio = effective_prio(p);
}
enqueue_task(p, array);
- rq->nr_running++;
+ nr_running_inc(rq);
}

/*
@@ -302,7 +344,7 @@ static inline void activate_task(task_t
*/
static inline void deactivate_task(struct task_struct *p, runqueue_t *rq)
{
- rq->nr_running--;
+ nr_running_dec(rq);
if (p->state == TASK_UNINTERRUPTIBLE)
rq->nr_uninterruptible++;
dequeue_task(p, p->array);
@@ -624,6 +666,91 @@ static inline void double_rq_unlock(runq
spin_unlock(&rq2->lock);
}

+#if CONFIG_NUMA
+/*
+ * If dest_cpu is allowed for this process, migrate the task to it.
+ * This is accomplished by forcing the cpu_allowed mask to only
+ * allow dest_cpu, which will force the cpu onto dest_cpu. Then
+ * the cpu_allowed mask is restored.
+ */
+static void sched_migrate_task(task_t *p, int dest_cpu)
+{
+ unsigned long old_mask;
+
+ old_mask = p->cpus_allowed;
+ if (!(old_mask & (1UL << dest_cpu)))
+ return;
+ /* force the process onto the specified CPU */
+ set_cpus_allowed(p, 1UL << dest_cpu);
+
+ /* restore the cpus allowed mask */
+ set_cpus_allowed(p, old_mask);
+}
+
+/*
+ * Find the least loaded CPU. Slightly favor the current CPU by
+ * setting its runqueue length as the minimum to start.
+ */
+static int sched_best_cpu(struct task_struct *p)
+{
+ int i, minload, load, best_cpu, node = 0;
+ unsigned long cpumask;
+
+ best_cpu = task_cpu(p);
+ if (cpu_rq(best_cpu)->nr_running <= 2)
+ return best_cpu;
+
+ minload = 10000000;
+ for (i = 0; i < numnodes; i++) {
+ load = atomic_read(&node_nr_running[i]);
+ if (load < minload) {
+ minload = load;
+ node = i;
+ }
+ }
+
+ minload = 10000000;
+ cpumask = __node_to_cpu_mask(node);
+ for (i = 0; i < NR_CPUS; ++i) {
+ if (!(cpumask & (1UL << i)))
+ continue;
+ if (cpu_rq(i)->nr_running < minload) {
+ best_cpu = i;
+ minload = cpu_rq(i)->nr_running;
+ }
+ }
+ return best_cpu;
+}
+
+void sched_balance_exec(void)
+{
+ int new_cpu;
+
+ if (numnodes > 1) {
+ new_cpu = sched_best_cpu(current);
+ if (new_cpu != smp_processor_id())
+ sched_migrate_task(current, new_cpu);
+ }
+}
+
+static int find_busiest_node(int this_node)
+{
+ int i, node = -1, load, this_load, maxload;
+
+ this_load = maxload = atomic_read(&node_nr_running[this_node]);
+ for (i = 0; i < numnodes; i++) {
+ if (i == this_node)
+ continue;
+ load = atomic_read(&node_nr_running[i]);
+ if (load > maxload && (100*load > NODE_THRESHOLD*this_load)) {
+ maxload = load;
+ node = i;
+ }
+ }
+ return node;
+}
+#endif /* CONFIG_NUMA */
+
#if CONFIG_SMP

/*
@@ -652,9 +779,9 @@ static inline unsigned int double_lock_b
}

/*
- * find_busiest_queue - find the busiest runqueue.
+ * find_busiest_queue - find the busiest runqueue among the cpus in cpumask
*/
-static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu, int idle, int *imbalance)
+static inline runqueue_t *find_busiest_queue(runqueue_t *this_rq, int this_cpu, int idle, int *imbalance, unsigned long cpumask)
{
int nr_running, load, max_load, i;
runqueue_t *busiest, *rq_src;
@@ -689,7 +816,7 @@ static inline runqueue_t *find_busiest_q
busiest = NULL;
max_load = 1;
for (i = 0; i < NR_CPUS; i++) {
- if (!cpu_online(i))
+ if (!cpu_online(i) || !((1UL << i) & cpumask))
continue;

rq_src = cpu_rq(i);
@@ -736,9 +863,9 @@ out:
static inline void pull_task(runqueue_t *src_rq, prio_array_t *src_array, task_t *p, runqueue_t *this_rq, int this_cpu)
{
dequeue_task(p, src_array);
- src_rq->nr_running--;
+ nr_running_dec(src_rq);
set_task_cpu(p, this_cpu);
- this_rq->nr_running++;
+ nr_running_inc(this_rq);
enqueue_task(p, this_rq->active);
/*
* Note that idle threads have a prio of MAX_PRIO, for this test
@@ -758,13 +885,30 @@ static inline void pull_task(runqueue_t
*/
static void load_balance(runqueue_t *this_rq, int idle)
{
- int imbalance, idx, this_cpu = smp_processor_id();
+ int imbalance, idx, this_cpu, this_node;
+ unsigned long cpumask;
runqueue_t *busiest;
prio_array_t *array;
struct list_head *head, *curr;
task_t *tmp;

- busiest = find_busiest_queue(this_rq, this_cpu, idle, &imbalance);
+ this_cpu = smp_processor_id();
+ this_node = __cpu_to_node(this_cpu);
+ cpumask = __node_to_cpu_mask(this_node);
+
+#if CONFIG_NUMA
+ /*
+ * Avoid rebalancing between nodes too often.
+ */
+ if (++(this_rq->nr_balanced) == NODE_BALANCE_RATE) {
+ int node = find_busiest_node(this_node);
+ this_rq->nr_balanced = 0;
+ if (node >= 0)
+ cpumask = __node_to_cpu_mask(node) | (1UL << this_cpu);
+ }
+#endif
+ busiest = find_busiest_queue(this_rq, this_cpu, idle, &imbalance,
+ cpumask);
if (!busiest)
goto out;

@@ -2231,6 +2375,7 @@ void __init sched_init(void)
spin_lock_init(&rq->lock);
INIT_LIST_HEAD(&rq->migration_queue);
atomic_set(&rq->nr_iowait, 0);
+ nr_running_init(rq);

for (j = 0; j < 2; j++) {
array = rq->arrays + j;diff -urNp 2.5.58-ms-ilb-nb/include/asm-generic/topology.h 2.5.58-ms-ilb-nb-sm-var/include/asm-generic/topology.h
--- 2.5.58-ms-ilb-nb/include/asm-generic/topology.h 2003-01-15 15:24:45.000000000 +0100
+++ 2.5.58-ms-ilb-nb-sm-var/include/asm-generic/topology.h 2003-01-15 15:32:39.000000000 +0100
@@ -49,8 +49,11 @@
#endif

/* Cross-node load balancing interval. */
-#ifndef NODE_BALANCE_RATE
-#define NODE_BALANCE_RATE 10
+#ifndef NODE_BALANCE_MIN
+#define NODE_BALANCE_MIN 10
+#endif
+#ifndef NODE_BALANCE_MAX
+#define NODE_BALANCE_MAX 50
#endif

#endif /* _ASM_GENERIC_TOPOLOGY_H */
diff -urNp 2.5.58-ms-ilb-nb/include/asm-i386/topology.h 2.5.58-ms-ilb-nb-sm-var/include/asm-i386/topology.h
--- 2.5.58-ms-ilb-nb/include/asm-i386/topology.h 2003-01-15 15:25:11.000000000 +0100
+++ 2.5.58-ms-ilb-nb-sm-var/include/asm-i386/topology.h 2003-01-15 15:33:50.000000000 +0100
@@ -62,7 +62,8 @@ static inline int __node_to_first_cpu(in
#define __node_to_memblk(node) (node)

/* Cross-node load balancing interval. */
-#define NODE_BALANCE_RATE 100
+#define NODE_BALANCE_MIN 10
+#define NODE_BALANCE_MAX 250

#else /* !CONFIG_NUMA */
/*
diff -urNp 2.5.58-ms-ilb-nb/include/asm-ia64/topology.h 2.5.58-ms-ilb-nb-sm-var/include/asm-ia64/topology.h
--- 2.5.58-ms-ilb-nb/include/asm-ia64/topology.h 2003-01-15 15:25:28.000000000 +0100
+++ 2.5.58-ms-ilb-nb-sm-var/include/asm-ia64/topology.h 2003-01-15 15:34:11.000000000 +0100
@@ -61,6 +61,7 @@
#define __node_to_memblk(node) (node)

/* Cross-node load balancing interval. */
-#define NODE_BALANCE_RATE 10
+#define NODE_BALANCE_MIN 10
+#define NODE_BALANCE_MAX 50

#endif /* _ASM_IA64_TOPOLOGY_H */
diff -urNp 2.5.58-ms-ilb-nb/include/asm-ppc64/topology.h 2.5.58-ms-ilb-nb-sm-var/include/asm-ppc64/topology.h
--- 2.5.58-ms-ilb-nb/include/asm-ppc64/topology.h 2003-01-15 15:24:15.000000000 +0100
+++ 2.5.58-ms-ilb-nb-sm-var/include/asm-ppc64/topology.h 2003-01-15 15:34:36.000000000 +0100
@@ -47,7 +47,8 @@ static inline unsigned long __node_to_cp
}

/* Cross-node load balancing interval. */
-#define NODE_BALANCE_RATE 10
+#define NODE_BALANCE_MIN 10
+#define NODE_BALANCE_MAX 50

#else /* !CONFIG_NUMA */

diff -urNp 2.5.58-ms-ilb-nb/kernel/sched.c 2.5.58-ms-ilb-nb-sm-var/kernel/sched.c
--- 2.5.58-ms-ilb-nb/kernel/sched.c 2003-01-15 15:26:29.000000000 +0100
+++ 2.5.58-ms-ilb-nb-sm-var/kernel/sched.c 2003-01-15 15:31:35.000000000 +0100
@@ -157,6 +157,7 @@ struct runqueue {
#ifdef CONFIG_NUMA
atomic_t *node_nr_running;
unsigned int nr_balanced;
+ int prev_node_load[MAX_NUMNODES];
#endif
task_t *migration_thread;
struct list_head migration_queue;
@@ -188,6 +189,8 @@ static struct runqueue runqueues[NR_CPUS
/* XXX(hch): this should go into a struct sched_node_data */
static atomic_t node_nr_running[MAX_NUMNODES] ____cacheline_maxaligned_in_smp =
{[0 ...MAX_NUMNODES-1] = ATOMIC_INIT(0)};
+static int internode_lb[MAX_NUMNODES] ____cacheline_maxaligned_in_smp =
+ {[0 ...MAX_NUMNODES-1] = NODE_BALANCE_MAX};

static inline void nr_running_init(struct runqueue *rq)
{
@@ -733,22 +736,53 @@ void sched_balance_exec(void)
}
}

+/*
+ * Find the busiest node. All previous node loads contribute with a
+ * geometrically deccaying weight to the load measure:
+ * load_{t} = load_{t-1}/2 + nr_node_running_{t}
+ * This way sudden load peaks are flattened out a bit.
+ */
static int find_busiest_node(int this_node)
{
int i, node = -1, load, this_load, maxload;
+ int avg_load;

- this_load = maxload = atomic_read(&node_nr_running[this_node]);
+ this_load = maxload = (this_rq()->prev_node_load[this_node] >> 1)
+ + atomic_read(&node_nr_running[this_node]);
+ this_rq()->prev_node_load[this_node] = this_load;
+ avg_load = this_load;
for (i = 0; i < numnodes; i++) {
if (i == this_node)
continue;
- load = atomic_read(&node_nr_running[i]);
+ load = (this_rq()->prev_node_load[i] >> 1)
+ + atomic_read(&node_nr_running[i]);
+ avg_load += load;
+ this_rq()->prev_node_load[i] = load;
if (load > maxload && (100*load > NODE_THRESHOLD*this_load)) {
maxload = load;
node = i;
}
}
+ avg_load = avg_load / (numnodes ? numnodes : 1);
+ if (this_load < (avg_load / 2)) {
+ if (internode_lb[this_node] == NODE_BALANCE_MAX)
+ internode_lb[this_node] = NODE_BALANCE_MIN;
+ } else
+ if (internode_lb[this_node] == NODE_BALANCE_MIN)
+ internode_lb[this_node] = NODE_BALANCE_MAX;
+ if (this_load >= avg_load)
+ node = -1;
return node;
}
+
+static inline int remote_steal_factor(runqueue_t *rq)
+{
+ int cpu = __cpu_to_node(task_cpu(rq->curr));
+
+ return (cpu == __cpu_to_node(smp_processor_id())) ? 1 : 2;
+}
+#else
+# define remote_steal_factor(rq) 1
#endif /* CONFIG_NUMA */

#if CONFIG_SMP
@@ -900,7 +934,7 @@ static void load_balance(runqueue_t *thi
/*
* Avoid rebalancing between nodes too often.
*/
- if (++(this_rq->nr_balanced) == NODE_BALANCE_RATE) {
+ if (++(this_rq->nr_balanced) == internode_lb[this_node]) {
int node = find_busiest_node(this_node);
this_rq->nr_balanced = 0;
if (node >= 0)
@@ -965,7 +999,7 @@ skip_queue:
goto skip_bitmap;
}
pull_task(busiest, array, tmp, this_rq, this_cpu);
- if (!idle && --imbalance) {
+ if (!idle && ((--imbalance)/remote_steal_factor(busiest))) {
if (curr != head)
goto skip_queue;
idx++;[unhandled content-type:application/x-tgz]
\
 
 \ /
  Last update: 2005-03-22 13:32    [W:0.230 / U:0.784 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site