lkml.org 
[lkml]   [2019]   [Jun]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 7/8] sched,fair: refactor enqueue/dequeue_entity
Date
Refactor enqueue_entity, dequeue_entity, and update_load_avg, in order
to split out the things we still want to happen at every level in the
cgroup hierarchy with a flat runqueue from the things we only need to
happen once.

No functional changes.

Signed-off-by: Rik van Riel <riel@surriel.com>
---
kernel/sched/fair.c | 65 +++++++++++++++++++++++++++++----------------
1 file changed, 42 insertions(+), 23 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 35153a89d5c5..c2baf3c8a879 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3481,17 +3481,17 @@ static void detach_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s
#define DO_ATTACH 0x4

/* Update task and its cfs_rq load average */
-static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+static inline bool update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
u64 now = cfs_rq_clock_pelt(cfs_rq);
- int decayed;
+ int decayed, updated = 0;

/*
* Track task load average for carrying it to new CPU after migrated, and
* track group sched_entity load average for task_h_load calc in migration
*/
if (se->avg.last_update_time && !(flags & SKIP_AGE_LOAD))
- __update_load_avg_se(now, cfs_rq, se);
+ updated = __update_load_avg_se(now, cfs_rq, se);

decayed = update_cfs_rq_load_avg(now, cfs_rq);
decayed |= propagate_entity_load_avg(se);
@@ -3510,6 +3510,8 @@ static inline void update_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *s

} else if (decayed && (flags & UPDATE_TG))
update_tg_load_avg(cfs_rq, 0);
+
+ return decayed | updated;
}

#ifndef CONFIG_64BIT
@@ -3851,6 +3853,24 @@ static inline void check_schedstat_required(void)
* CPU and an up-to-date min_vruntime on the destination CPU.
*/

+static bool
+enqueue_entity_groups(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+{
+ /*
+ * When enqueuing a sched_entity, we must:
+ * - Update loads to have both entity and cfs_rq synced with now.
+ * - Add its load to cfs_rq->runnable_avg
+ * - For group_entity, update its weight to reflect the new share of
+ * its group cfs_rq
+ * - Add its new weight to cfs_rq->load.weight
+ */
+ if (!update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH))
+ return false;
+
+ update_cfs_group(se);
+ return true;
+}
+
static void
enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
@@ -3875,16 +3895,6 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
if (renorm && !curr)
se->vruntime += cfs_rq->min_vruntime;

- /*
- * When enqueuing a sched_entity, we must:
- * - Update loads to have both entity and cfs_rq synced with now.
- * - Add its load to cfs_rq->runnable_avg
- * - For group_entity, update its weight to reflect the new share of
- * its group cfs_rq
- * - Add its new weight to cfs_rq->load.weight
- */
- update_load_avg(cfs_rq, se, UPDATE_TG | DO_ATTACH);
- update_cfs_group(se);
enqueue_runnable_load_avg(cfs_rq, se);
account_entity_enqueue(cfs_rq, se);

@@ -3951,14 +3961,9 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se)

static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq);

-static void
-dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+static bool
+dequeue_entity_groups(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
{
- /*
- * Update run-time statistics of the 'current'.
- */
- update_curr(cfs_rq);
-
/*
* When dequeuing a sched_entity, we must:
* - Update loads to have both entity and cfs_rq synced with now.
@@ -3967,7 +3972,21 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
* - For group entity, update its weight to reflect the new share
* of its group cfs_rq.
*/
- update_load_avg(cfs_rq, se, UPDATE_TG);
+ if (!update_load_avg(cfs_rq, se, UPDATE_TG))
+ return false;
+ update_cfs_group(se);
+
+ return true;
+}
+
+static void
+dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
+{
+ /*
+ * Update run-time statistics of the 'current'.
+ */
+ update_curr(cfs_rq);
+
dequeue_runnable_load_avg(cfs_rq, se);

update_stats_dequeue(cfs_rq, se, flags);
@@ -3991,8 +4010,6 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
/* return excess runtime on last dequeue */
return_cfs_rq_runtime(cfs_rq);

- update_cfs_group(se);
-
/*
* Now advance min_vruntime if @se was the entity holding it back,
* except when: DEQUEUE_SAVE && !DEQUEUE_MOVE, in this case we'll be
@@ -5157,6 +5174,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (se->on_rq)
break;
cfs_rq = cfs_rq_of(se);
+ enqueue_entity_groups(cfs_rq, se, flags);
enqueue_entity(cfs_rq, se, flags);

/*
@@ -5239,6 +5257,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)

for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
+ dequeue_entity_groups(cfs_rq, se, flags);
dequeue_entity(cfs_rq, se, flags);

/*
--
2.20.1
\
 
 \ /
  Last update: 2019-06-12 21:33    [W:0.059 / U:5.960 seconds]
©2003-2018 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site