Messages in this thread Patch in this message | | | From | Yuyang Du <> | Subject | [PATCH v3 08/12] sched/fair: Remove SCHED_LOAD_SHIFT and SCHED_LOAD_SCALE | Date | Wed, 4 May 2016 04:02:49 +0800 |
| |
After cleaning up the sched metrics, these two definitions that cause ambiguity are not needed any more. Use NICE_0_LOAD_SHIFT and NICE_0_LOAD instead (the names suggest clearly who they are).
Suggested-by: Ben Segall <bsegall@google.com> Signed-off-by: Yuyang Du <yuyang.du@intel.com> --- kernel/sched/fair.c | 4 ++-- kernel/sched/sched.h | 22 +++++++++++----------- 2 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 1a61137..017a26a 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -721,7 +721,7 @@ void post_init_entity_util_avg(struct sched_entity *se) { struct cfs_rq *cfs_rq = cfs_rq_of(se); struct sched_avg *sa = &se->avg; - long cap = (long)(scale_load_down(SCHED_LOAD_SCALE) - cfs_rq->avg.util_avg) / 2; + long cap = (long)(SCHED_CAPACITY_SCALE - cfs_rq->avg.util_avg) / 2; if (cap > 0) { if (cfs_rq->avg.util_avg != 0) { @@ -7019,7 +7019,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s if (busiest->group_type == group_overloaded && local->group_type == group_overloaded) { load_above_capacity = busiest->sum_nr_running * - SCHED_LOAD_SCALE; + scale_load_down(NICE_0_LOAD); if (load_above_capacity > busiest->group_capacity) load_above_capacity -= busiest->group_capacity; else diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 996a137..1a3be6f 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -54,25 +54,25 @@ static inline void cpu_load_update_active(struct rq *this_rq) { } * increased costs. */ #if 0 /* BITS_PER_LONG > 32 -- currently broken: it increases power usage under light load */ -# define SCHED_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) +# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT + SCHED_FIXEDPOINT_SHIFT) # define scale_load(w) ((w) << SCHED_FIXEDPOINT_SHIFT) # define scale_load_down(w) ((w) >> SCHED_FIXEDPOINT_SHIFT) #else -# define SCHED_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) +# define NICE_0_LOAD_SHIFT (SCHED_FIXEDPOINT_SHIFT) # define scale_load(w) (w) # define scale_load_down(w) (w) #endif -#define SCHED_LOAD_SCALE (1L << SCHED_LOAD_SHIFT) - /* - * NICE_0's weight (visible to user) and its load (invisible to user) have - * independent ranges, but they should be well calibrated. We use scale_load() - * and scale_load_down(w) to convert between them, the following must be true: - * scale_load(sched_prio_to_weight[20]) == NICE_0_LOAD + * Task weight (visible to user) and its load (invisible to user) have + * independent resolution, but they should be well calibrated. We use + * scale_load() and scale_load_down(w) to convert between them. The + * following must be true: + * + * scale_load(sched_prio_to_weight[USER_PRIO(NICE_TO_PRIO(0))]) == NICE_0_LOAD + * */ -#define NICE_0_LOAD SCHED_LOAD_SCALE -#define NICE_0_SHIFT SCHED_LOAD_SHIFT +#define NICE_0_LOAD (1L << NICE_0_LOAD_SHIFT) /* * Single value that decides SCHED_DEADLINE internal math precision. @@ -861,7 +861,7 @@ DECLARE_PER_CPU(struct sched_domain *, sd_asym); struct sched_group_capacity { atomic_t ref; /* - * CPU capacity of this group, SCHED_LOAD_SCALE being max capacity + * CPU capacity of this group, SCHED_CAPACITY_SCALE being max capacity * for a single CPU. */ unsigned int capacity; -- 1.7.9.5
| |