lkml.org 
[lkml]   [2016]   [Oct]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [PATCH v5 5/6] sched/fair: Track peak per-entity utilization
On Fri, Oct 14, 2016 at 02:41:11PM +0100, Morten Rasmussen wrote:
> @@ -3515,6 +3517,10 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags)
> */
> if ((flags & (DEQUEUE_SAVE | DEQUEUE_MOVE)) == DEQUEUE_SAVE)
> update_min_vruntime(cfs_rq);
> +
> + /* Save peak PELT utilization for task to help wake-up decisions */
> + if (flags & DEQUEUE_SLEEP && entity_is_task(se))
> + se->avg.util_peak = se->avg.util_avg;
> }
>
> /*

The friendly kbuild robot swiftly pointed out that this doesn't build
for !CONFIG_SMP. The below replacement patch moves this bit inside
dequeue_entity_load_avg() which should be equivalent and not break
!CONFIG_SMP.

----8<---

From 36966c83cc3493332d92dcadb795eebc8c300558 Mon Sep 17 00:00:00 2001
From: Morten Rasmussen <morten.rasmussen@arm.com>
Date: Wed, 17 Aug 2016 15:30:43 +0100
Subject: [PATCH v5 5/6] sched/fair: Track peak per-entity utilization

When using PELT (per-entity load tracking) utilization to place tasks at
wake-up using the decayed utilization (due to sleep) leads to
under-estimation of true utilization of the task. This could mean
putting the task on a cpu with less available capacity than is actually
needed. This issue can be mitigated by using 'peak' utilization instead
of the decayed utilization for placement decisions, e.g. at task
wake-up.

The 'peak' utilization metric, util_peak, tracks util_avg when the task
is running and retains its previous value while the task is
blocked/waiting on the rq. It is instantly updated to track util_avg
again as soon as the task running again.

cc: Ingo Molnar <mingo@redhat.com>
cc: Peter Zijlstra <peterz@infradead.org>

Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
---
include/linux/sched.h | 2 +-
kernel/sched/fair.c | 23 +++++++++++++++++------
2 files changed, 18 insertions(+), 7 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index ad51978ff15e..988d7f48604e 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1294,7 +1294,7 @@ struct load_weight {
struct sched_avg {
u64 last_update_time, load_sum;
u32 util_sum, period_contrib;
- unsigned long load_avg, util_avg;
+ unsigned long load_avg, util_avg, util_peak;
};

#ifdef CONFIG_SCHEDSTATS
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a5efafda23ef..84b767399d61 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -696,6 +696,7 @@ void init_entity_runnable_average(struct sched_entity *se)
* At this point, util_avg won't be used in select_task_rq_fair anyway
*/
sa->util_avg = 0;
+ sa->util_peak = 0;
sa->util_sum = 0;
/* when this task enqueue'ed, it will contribute to its cfs_rq's load_avg */
}
@@ -747,6 +748,7 @@ void post_init_entity_util_avg(struct sched_entity *se)
} else {
sa->util_avg = cap;
}
+ sa->util_peak = sa->util_avg;
sa->util_sum = sa->util_avg * LOAD_AVG_MAX;
}

@@ -3181,6 +3183,10 @@ dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se)
max_t(long, cfs_rq->runnable_load_avg - se->avg.load_avg, 0);
cfs_rq->runnable_load_sum =
max_t(s64, cfs_rq->runnable_load_sum - se->avg.load_sum, 0);
+
+ /* Save peak PELT utilization for task to help wake-up decisions */
+ if (entity_is_task(se))
+ se->avg.util_peak = se->avg.util_avg;
}

#ifndef CONFIG_64BIT
@@ -5203,7 +5209,7 @@ static int wake_affine(struct sched_domain *sd, struct task_struct *p,
return 1;
}

-static inline int task_util(struct task_struct *p);
+static inline int task_util_peak(struct task_struct *p);
static int cpu_util_wake(int cpu, struct task_struct *p);

static unsigned long capacity_spare_wake(int cpu, struct task_struct *p)
@@ -5286,14 +5292,14 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
/*
* The cross-over point between using spare capacity or least load
* is too conservative for high utilization tasks on partially
- * utilized systems if we require spare_capacity > task_util(p),
+ * utilized systems if we require spare_capacity > task_util_peak(p),
* so we allow for some task stuffing by using
- * spare_capacity > task_util(p)/2.
+ * spare_capacity > task_util_peak(p)/2.
*/
- if (this_spare > task_util(p) / 2 &&
+ if (this_spare > task_util_peak(p) / 2 &&
imbalance*this_spare > 100*most_spare)
return NULL;
- else if (most_spare > task_util(p) / 2)
+ else if (most_spare > task_util_peak(p) / 2)
return most_spare_sg;

if (!idlest || 100*this_load < imbalance*min_load)
@@ -5628,6 +5634,11 @@ static inline int task_util(struct task_struct *p)
return p->se.avg.util_avg;
}

+static inline int task_util_peak(struct task_struct *p)
+{
+ return p->se.avg.util_peak;
+}
+
/*
* cpu_util_wake: Compute cpu utilization with any contributions from
* the waking task p removed.
@@ -5667,7 +5678,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
/* Bring task utilization in sync with prev_cpu */
sync_entity_load_avg(&p->se);

- return min_cap * 1024 < task_util(p) * capacity_margin;
+ return min_cap * 1024 < task_util_peak(p) * capacity_margin;
}

/*
--
2.7.4
\
 
 \ /
  Last update: 2016-10-17 10:53    [W:0.370 / U:0.068 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site