Messages in this thread | | | From | Vincent Guittot <> | Date | Tue, 18 Feb 2020 16:28:55 +0100 | Subject | Re: [PATCH v2 4/5] sched/pelt: Add a new runnable average signal |
| |
On Tue, 18 Feb 2020 at 15:54, Valentin Schneider <valentin.schneider@arm.com> wrote: > > On 2/14/20 3:27 PM, Vincent Guittot wrote: > > @@ -532,8 +535,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) > > cfs_rq->removed.load_avg); > > SEQ_printf(m, " .%-30s: %ld\n", "removed.util_avg", > > cfs_rq->removed.util_avg); > > - SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_sum", > > - cfs_rq->removed.runnable_sum); > > Shouldn't that have been part of patch 3?
No this was used to propagate load_avg
> > > + SEQ_printf(m, " .%-30s: %ld\n", "removed.runnable_avg", > > + cfs_rq->removed.runnable_avg); > > #ifdef CONFIG_FAIR_GROUP_SCHED > > SEQ_printf(m, " .%-30s: %lu\n", "tg_load_avg_contrib", > > cfs_rq->tg_load_avg_contrib); > > @@ -3278,6 +3280,32 @@ update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq > > cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX; > > } > > > > +static inline void > > +update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) > > +{ > > + long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; > > + > > + /* Nothing to update */ > > + if (!delta) > > + return; > > + > > + /* > > + * The relation between sum and avg is: > > + * > > + * LOAD_AVG_MAX - 1024 + sa->period_contrib > > + * > > + * however, the PELT windows are not aligned between grq and gse. > > + */ > > + > > + /* Set new sched_entity's runnable */ > > + se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; > > + se->avg.runnable_sum = se->avg.runnable_avg * LOAD_AVG_MAX; > > + > > + /* Update parent cfs_rq runnable */ > > + add_positive(&cfs_rq->avg.runnable_avg, delta); > > + cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * LOAD_AVG_MAX; > > +} > > + > > Humph, that's an exact copy of update_tg_cfs_util(). FWIW the following > eldritch horror compiles... > > --- > diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c > index 99249a2484b4..be796532a2d3 100644 > --- a/kernel/sched/fair.c > +++ b/kernel/sched/fair.c > @@ -3254,57 +3254,34 @@ void set_task_rq_fair(struct sched_entity *se, > * > */ > > -static inline void > -update_tg_cfs_util(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) > -{ > - long delta = gcfs_rq->avg.util_avg - se->avg.util_avg; > - > - /* Nothing to update */ > - if (!delta) > - return; > - > - /* > - * The relation between sum and avg is: > - * > - * LOAD_AVG_MAX - 1024 + sa->period_contrib > - * > - * however, the PELT windows are not aligned between grq and gse. > - */ > - > - /* Set new sched_entity's utilization */ > - se->avg.util_avg = gcfs_rq->avg.util_avg; > - se->avg.util_sum = se->avg.util_avg * LOAD_AVG_MAX; > - > - /* Update parent cfs_rq utilization */ > - add_positive(&cfs_rq->avg.util_avg, delta); > - cfs_rq->avg.util_sum = cfs_rq->avg.util_avg * LOAD_AVG_MAX; > -} > - > -static inline void > -update_tg_cfs_runnable(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) > -{ > - long delta = gcfs_rq->avg.runnable_avg - se->avg.runnable_avg; > - > - /* Nothing to update */ > - if (!delta) > - return; > - > - /* > - * The relation between sum and avg is: > - * > - * LOAD_AVG_MAX - 1024 + sa->period_contrib > - * > - * however, the PELT windows are not aligned between grq and gse. > - */ > - > - /* Set new sched_entity's runnable */ > - se->avg.runnable_avg = gcfs_rq->avg.runnable_avg; > - se->avg.runnable_sum = se->avg.runnable_avg * LOAD_AVG_MAX; > +#define DECLARE_UPDATE_TG_CFS_SIGNAL(signal) \ > +static inline void \ > +update_tg_cfs_##signal(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) \ > +{ \ > + long delta = gcfs_rq->avg.signal##_avg - se->avg.signal##_avg; \ > + \ > + /* Nothing to update */ \ > + if (!delta) \ > + return; \ > + \ > + /* \ > + * The relation between sum and avg is: \ > + * \ > + * LOAD_AVG_MAX - 1024 + sa->period_contrib \ > + * \ > + * however, the PELT windows are not aligned between grq and gse. \ > + */ \ > + /* Set new sched_entity's runnable */ \ > + se->avg.signal##_avg = gcfs_rq->avg.signal##_avg; \ > + se->avg.signal##_sum = se->avg.signal##_avg * LOAD_AVG_MAX; \ > + \ > + /* Update parent cfs_rq signal## */ \ > + add_positive(&cfs_rq->avg.signal##_avg, delta); \ > + cfs_rq->avg.signal##_sum = cfs_rq->avg.signal##_avg * LOAD_AVG_MAX; \ > +} \ > > - /* Update parent cfs_rq runnable */ > - add_positive(&cfs_rq->avg.runnable_avg, delta); > - cfs_rq->avg.runnable_sum = cfs_rq->avg.runnable_avg * LOAD_AVG_MAX; > -} > +DECLARE_UPDATE_TG_CFS_SIGNAL(util); > +DECLARE_UPDATE_TG_CFS_SIGNAL(runnable);
TBH, I prefer keeping the current version which is easier to read IMO
> > static inline void > update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) > --- > > > static inline void > > update_tg_cfs_load(struct cfs_rq *cfs_rq, struct sched_entity *se, struct cfs_rq *gcfs_rq) > > { > > @@ -3358,6 +3386,7 @@ static inline int propagate_entity_load_avg(struct sched_entity *se) > > add_tg_cfs_propagate(cfs_rq, gcfs_rq->prop_runnable_sum); > > > > update_tg_cfs_util(cfs_rq, se, gcfs_rq); > > + update_tg_cfs_runnable(cfs_rq, se, gcfs_rq); > > update_tg_cfs_load(cfs_rq, se, gcfs_rq); > > > > trace_pelt_cfs_tp(cfs_rq); > > @@ -3439,7 +3468,7 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) > > raw_spin_lock(&cfs_rq->removed.lock); > > swap(cfs_rq->removed.util_avg, removed_util); > > swap(cfs_rq->removed.load_avg, removed_load); > > - swap(cfs_rq->removed.runnable_sum, removed_runnable_sum); > > Ditto on the stray from patch 3?
same as 1st point above > > > + swap(cfs_rq->removed.runnable_avg, removed_runnable); > > cfs_rq->removed.nr = 0; > > raw_spin_unlock(&cfs_rq->removed.lock); > > > > @@ -3451,7 +3480,16 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq) >
| |