lkml.org 
[lkml]   [2015]   [Jul]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v10 4/7] sched: Init cfs_rq's sched_entity load average
Date
The runnable load and utilization averages of cfs_rq's sched_entity
were not initiated. Like done to a task, give new cfs_rq' sched_entity
start values to heavy its load in infant time.

Signed-off-by: Yuyang Du <yuyang.du@intel.com>
---
kernel/sched/core.c | 2 +-
kernel/sched/fair.c | 11 ++++++-----
kernel/sched/sched.h | 2 +-
3 files changed, 8 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4dfab27..2d4c597 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2112,7 +2112,7 @@ void wake_up_new_task(struct task_struct *p)
#endif

/* Initialize new task's runnable average */
- init_task_runnable_average(p);
+ init_entity_runnable_average(&p->se);
rq = __task_rq_lock(p);
activate_task(rq, p, 0);
p->on_rq = TASK_ON_RQ_QUEUED;
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 3e9bccf..edc404c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -672,10 +672,10 @@ static unsigned long task_h_load(struct task_struct *p);
#define LOAD_AVG_MAX 47742 /* maximum possible load avg */
#define LOAD_AVG_MAX_N 345 /* number of full periods to produce LOAD_MAX_AVG */

-/* Give new task start runnable values to heavy its load in infant time */
-void init_task_runnable_average(struct task_struct *p)
+/* Give new sched_entity start runnable values to heavy its load in infant time */
+void init_entity_runnable_average(struct sched_entity *se)
{
- struct sched_avg *sa = &p->se.avg;
+ struct sched_avg *sa = &se->avg;

sa->last_update_time = 0;
/*
@@ -684,14 +684,14 @@ void init_task_runnable_average(struct task_struct *p)
* will definitely be update (after enqueue).
*/
sa->period_contrib = 1023;
- sa->load_avg = scale_load_down(p->se.load.weight);
+ sa->load_avg = scale_load_down(se->load.weight);
sa->load_sum = sa->load_avg * LOAD_AVG_MAX;
sa->util_avg = scale_load_down(SCHED_LOAD_SCALE);
sa->util_sum = LOAD_AVG_MAX;
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
}
#else
-void init_task_runnable_average(struct task_struct *p)
+void init_entity_runnable_average(struct sched_entity *se)
{
}
#endif
@@ -8065,6 +8065,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)

init_cfs_rq(cfs_rq);
init_tg_cfs_entry(tg, cfs_rq, se, i, parent->se[i]);
+ init_entity_runnable_average(se);
}

return 1;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 3dfec8d..f2b17ea 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1293,7 +1293,7 @@ extern void init_dl_task_timer(struct sched_dl_entity *dl_se);

unsigned long to_ratio(u64 period, u64 runtime);

-extern void init_task_runnable_average(struct task_struct *p);
+extern void init_entity_runnable_average(struct sched_entity *se);

static inline void add_nr_running(struct rq *rq, unsigned count)
{
--
2.1.4


\
 
 \ /
  Last update: 2015-07-15 10:21    [W:0.144 / U:0.800 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site