lkml.org 
[lkml]   [2019]   [Oct]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
SubjectRe: [PATCH v3 04/10] sched/fair: rework load_balance
From
Date
On 19/09/2019 09:33, Vincent Guittot wrote:


[...]

> @@ -8042,14 +8104,24 @@ static inline void update_sg_lb_stats(struct lb_env *env,
> }
> }
>
> - /* Adjust by relative CPU capacity of the group */
> + /* Check if dst cpu is idle and preferred to this group */
> + if (env->sd->flags & SD_ASYM_PACKING &&
> + env->idle != CPU_NOT_IDLE &&
> + sgs->sum_h_nr_running &&
> + sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) {
> + sgs->group_asym_packing = 1;
> + }
> +

Since asym_packing check is done per-sg rather per-CPU (like misfit_task), can you
not check for asym_packing in group_classify() directly (like for overloaded) and
so get rid of struct sg_lb_stats::group_asym_packing?

Something like (only compile tested).

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index d0c3aa1dc290..b2848d6f8a2a 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7692,7 +7692,6 @@ struct sg_lb_stats {
unsigned int idle_cpus;
unsigned int group_weight;
enum group_type group_type;
- unsigned int group_asym_packing; /* Tasks should be moved to preferred CPU */
unsigned long group_misfit_task_load; /* A CPU has a task too big for its capacity */
#ifdef CONFIG_NUMA_BALANCING
unsigned int nr_numa_running;
@@ -7952,6 +7951,20 @@ group_is_overloaded(struct lb_env *env, struct sg_lb_stats *sgs)
return false;
}

+static inline bool
+group_has_asym_packing(struct lb_env *env, struct sched_group *sg,
+ struct sg_lb_stats *sgs)
+{
+ if (env->sd->flags & SD_ASYM_PACKING &&
+ env->idle != CPU_NOT_IDLE &&
+ sgs->sum_h_nr_running &&
+ sched_asym_prefer(env->dst_cpu, sg->asym_prefer_cpu)) {
+ return true;
+ }
+
+ return false;
+}
+
/*
* group_smaller_min_cpu_capacity: Returns true if sched_group sg has smaller
* per-CPU capacity than sched_group ref.
@@ -7983,7 +7996,7 @@ group_type group_classify(struct lb_env *env,
if (sg_imbalanced(group))
return group_imbalanced;

- if (sgs->group_asym_packing)
+ if (group_has_asym_packing(env, group, sgs))
return group_asym_packing;

if (sgs->group_misfit_task_load)
@@ -8076,14 +8089,6 @@ static inline void update_sg_lb_stats(struct lb_env *env,
}
}

- /* Check if dst cpu is idle and preferred to this group */
- if (env->sd->flags & SD_ASYM_PACKING &&
- env->idle != CPU_NOT_IDLE &&
- sgs->sum_h_nr_running &&
- sched_asym_prefer(env->dst_cpu, group->asym_prefer_cpu)) {
- sgs->group_asym_packing = 1;
- }
-
sgs->group_capacity = group->sgc->capacity;

[...]
\
 
 \ /
  Last update: 2019-10-01 10:15    [W:0.360 / U:0.428 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site