lkml.org 
[lkml]   [2012]   [Jul]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[RFC PATCH] sched: add load balance cpumask pointer into lb_env
From: Michael Wang <wangyun@linux.vnet.ibm.com>

with this patch, ld_env will have a pointer of load balance cpumask,
and we don't need to pass cpus anymore.

Signed-off-by: Michael Wang <wangyun@linux.vnet.ibm.com>
---
kernel/sched/fair.c | 34 ++++++++++++++++------------------
1 files changed, 16 insertions(+), 18 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 77f1ed7..5e6a1ef 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3102,14 +3102,16 @@ struct lb_env {

struct list_head *tasks;

+ /* The set of CPUs under consideration for load-balancing */
+ struct cpumask *cpus;
+
unsigned int flags;
unsigned int loop;
unsigned int loop_break;
unsigned int loop_max;

struct rq * (*find_busiest_queue)(struct lb_env *,
- struct sched_group *,
- const struct cpumask *);
+ struct sched_group *);
};

/*
@@ -3847,8 +3849,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
*/
static inline void update_sg_lb_stats(struct lb_env *env,
struct sched_group *group, int load_idx,
- int local_group, const struct cpumask *cpus,
- int *balance, struct sg_lb_stats *sgs)
+ int local_group, int *balance, struct sg_lb_stats *sgs)
{
unsigned long nr_running, max_nr_running, min_nr_running;
unsigned long load, max_cpu_load, min_cpu_load;
@@ -3865,7 +3866,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
max_nr_running = 0;
min_nr_running = ~0UL;

- for_each_cpu_and(i, sched_group_cpus(group), cpus) {
+ for_each_cpu_and(i, sched_group_cpus(group), env->cpus) {
struct rq *rq = cpu_rq(i);

nr_running = rq->nr_running;
@@ -3995,9 +3996,8 @@ static bool update_sd_pick_busiest(struct lb_env *env,
* @balance: Should we balance.
* @sds: variable to hold the statistics for this sched_domain.
*/
-static inline void update_sd_lb_stats(struct lb_env *env,
- const struct cpumask *cpus,
- int *balance, struct sd_lb_stats *sds)
+static inline void update_sd_lb_stats(struct lb_env *env,
+ int *balance, struct sd_lb_stats *sds)
{
struct sched_domain *child = env->sd->child;
struct sched_group *sg = env->sd->groups;
@@ -4014,8 +4014,7 @@ static inline void update_sd_lb_stats(struct lb_env *env,

local_group = cpumask_test_cpu(env->dst_cpu, sched_group_cpus(sg));
memset(&sgs, 0, sizeof(sgs));
- update_sg_lb_stats(env, sg, load_idx, local_group,
- cpus, balance, &sgs);
+ update_sg_lb_stats(env, sg, load_idx, local_group, balance, &sgs);

if (local_group && !(*balance))
return;
@@ -4253,7 +4252,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* to restore balance.
*
* @env: The load balancing environment.
- * @cpus: The set of CPUs under consideration for load-balancing.
* @balance: Pointer to a variable indicating if this_cpu
* is the appropriate cpu to perform load balancing at this_level.
*
@@ -4263,7 +4261,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
* put to idle by rebalancing its tasks onto our group.
*/
static struct sched_group *
-find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
+find_busiest_group(struct lb_env *env, int *balance)
{
struct sd_lb_stats sds;

@@ -4273,7 +4271,7 @@ find_busiest_group(struct lb_env *env, const struct cpumask *cpus, int *balance)
* Compute the various statistics relavent for load balancing at
* this level.
*/
- update_sd_lb_stats(env, cpus, balance, &sds);
+ update_sd_lb_stats(env, balance, &sds);

/*
* this_cpu is not the appropriate cpu to perform load balancing at
@@ -4356,8 +4354,7 @@ ret:
* find_busiest_queue - find the busiest runqueue among the cpus in group.
*/
static struct rq *find_busiest_queue(struct lb_env *env,
- struct sched_group *group,
- const struct cpumask *cpus)
+ struct sched_group *group)
{
struct rq *busiest = NULL, *rq;
unsigned long max_load = 0;
@@ -4372,7 +4369,7 @@ static struct rq *find_busiest_queue(struct lb_env *env,
if (!capacity)
capacity = fix_small_capacity(env->sd, group);

- if (!cpumask_test_cpu(i, cpus))
+ if (!cpumask_test_cpu(i, env->cpus))
continue;

rq = cpu_rq(i);
@@ -4456,6 +4453,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
.dst_grpmask = sched_group_cpus(sd->groups),
.idle = idle,
.loop_break = sched_nr_migrate_break,
+ .cpus = cpus,
.find_busiest_queue = find_busiest_queue,
};

@@ -4465,7 +4463,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
schedstat_inc(sd, lb_count[idle]);

redo:
- group = find_busiest_group(&env, cpus, balance);
+ group = find_busiest_group(&env, balance);

if (*balance == 0)
goto out_balanced;
@@ -4475,7 +4473,7 @@ redo:
goto out_balanced;
}

- busiest = env.find_busiest_queue(&env, group, cpus);
+ busiest = env.find_busiest_queue(&env, group);
if (!busiest) {
schedstat_inc(sd, lb_nobusyq[idle]);
goto out_balanced;
--
1.7.4.1


\
 
 \ /
  Last update: 2012-07-12 10:42    [W:0.049 / U:0.260 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site