Messages in this thread Patch in this message | | | From | <> | Subject | [PATCH v4 2/3] sched/fair: split the remove_entity_load_avg() into two functions | Date | Sat, 24 Oct 2015 01:16:20 +0900 |
| |
From: Byungchul Park <byungchul.park@lge.com>
remove_entity_load_avg() consists of two parts. the first part is for updating se's last_update_time and the second part is for removing se's load from cfs_rq. it can become necessary to use only the first part or second part, for the purpose of optimization. so this patch splits this function into two other functions.
additionally, remove_entity_load_avg() is performed with a se and the se's current cfs_rq, mostly. however it can become necessary to perform it with a se and previous cfs_rq during migration for the purpose of optimization. so this patch adds another a agument, that is, cfs_rq to remove_entity_load_avg().
Signed-off-by: Byungchul Park <byungchul.park@lge.com> --- kernel/sched/fair.c | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index e9c5668..522aa07 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -2853,13 +2853,9 @@ void set_task_rq_fair(struct sched_entity *se, } #endif -/* - * Task first catches up with cfs_rq, and then subtract - * itself from the cfs_rq (task must be off the queue now). - */ -void remove_entity_load_avg(struct sched_entity *se) +/* This is useful when rq lock may not be held */ +static inline void __update_entity_load_avg(struct sched_entity *se, struct cfs_rq *cfs_rq) { - struct cfs_rq *cfs_rq = cfs_rq_of(se); u64 last_update_time; #ifndef CONFIG_64BIT @@ -2875,11 +2871,25 @@ void remove_entity_load_avg(struct sched_entity *se) #endif __update_load_avg(last_update_time, cpu_of(rq_of(cfs_rq)), &se->avg, 0, 0, NULL); +} + +static inline void __remove_entity_load_avg(struct sched_entity *se, struct cfs_rq *cfs_rq) +{ atomic_long_add(se->avg.load_avg, &cfs_rq->removed_load_avg); atomic_long_add(se->avg.util_avg, &cfs_rq->removed_util_avg); } /* + * Task first catches up with cfs_rq, and then subtract + * itself from the cfs_rq (task must be off the queue now). + */ +static inline void remove_entity_load_avg(struct sched_entity *se, struct cfs_rq *cfs_rq) +{ + __update_entity_load_avg(se, cfs_rq); + __remove_entity_load_avg(se, cfs_rq); +} + +/* * Update the rq's load with the elapsed running time before entering * idle. if the last scheduled task is not a CFS task, idle_enter will * be the only way to update the runnable statistic. @@ -2916,7 +2926,8 @@ static inline void enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} static inline void dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} -static inline void remove_entity_load_avg(struct sched_entity *se) {} +static inline void +remove_entity_load_avg(struct sched_entity *se, struct cfs_rq *cfs_rq) {} static inline void attach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se) {} @@ -5063,7 +5074,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int next_cpu) * will result in the wakee task is less decayed, but giving the wakee more * load sounds not bad. */ - remove_entity_load_avg(&p->se); + remove_entity_load_avg(&p->se, cfs_rq_of(&p->se)); /* Tell new CPU we are migrated */ p->se.avg.last_update_time = 0; @@ -5074,7 +5085,7 @@ static void migrate_task_rq_fair(struct task_struct *p, int next_cpu) static void task_dead_fair(struct task_struct *p) { - remove_entity_load_avg(&p->se); + remove_entity_load_avg(&p->se, cfs_rq_of(&p->se)); } #endif /* CONFIG_SMP */ @@ -8144,7 +8155,7 @@ void free_fair_sched_group(struct task_group *tg) kfree(tg->cfs_rq[i]); if (tg->se) { if (tg->se[i]) - remove_entity_load_avg(tg->se[i]); + remove_entity_load_avg(tg->se[i], cfs_rq_of(tg->se[i])); kfree(tg->se[i]); } } -- 1.7.9.5
| |