lkml.org 
[lkml]   [2018]   [Jul]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v5 12/14] sched/fair: Select an energy-efficient CPU on task wake-up
    Date
    If an Energy Model (EM) is available and if the system isn't
    overutilized, re-route waking tasks into an energy-aware placement
    algorithm. The selection of an energy-efficient CPU for a task
    is achieved by estimating the impact on system-level active energy
    resulting from the placement of the task on the CPU with the highest
    spare capacity in each frequency domain. This strategy spreads tasks in
    a frequency domain and avoids overly aggressive task packing. The best
    CPU energy-wise is then selected if it saves a large enough amount of
    energy with respect to prev_cpu.

    Although it has already shown significant benefits on some existing
    targets, this approach cannot scale to platforms with numerous CPUs.
    This is an attempt to do something useful as writing a fast heuristic
    that performs reasonably well on a broad spectrum of architectures isn't
    an easy task. As such, the scope of usability of the energy-aware
    wake-up path is restricted to systems with the SD_ASYM_CPUCAPACITY flag
    set, and where the EM isn't too complex.

    Cc: Ingo Molnar <mingo@redhat.com>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Signed-off-by: Quentin Perret <quentin.perret@arm.com>
    ---
    kernel/sched/fair.c | 124 ++++++++++++++++++++++++++++++++++++++++++--
    1 file changed, 120 insertions(+), 4 deletions(-)

    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index dce2b1160cf4..c1b789b80cec 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -6369,6 +6369,113 @@ static long compute_energy(struct task_struct *p, int dst_cpu,
    return energy;
    }

    +/*
    + * find_energy_efficient_cpu(): Find most energy-efficient target CPU for the
    + * waking task. find_energy_efficient_cpu() looks for the CPU with maximum
    + * spare capacity in each frequency domain and uses it as a potential
    + * candidate to execute the task. Then, it uses the Energy Model to figure
    + * out which of the CPU candidates is the most energy-efficient.
    + *
    + * The rationale for this heuristic is as follows. In a frequency domain,
    + * all the most energy efficient CPU candidates (according to the Energy
    + * Model) are those for which we'll request a low frequency. When there are
    + * several CPUs for which the frequency request will be the same, we don't
    + * have enough data to break the tie between them, because the Energy Model
    + * only includes active power costs. With this model, if we assume that
    + * frequency requests follow utilization (e.g. using schedutil), the CPU with
    + * the maximum spare capacity in a frequency domain is guaranteed to be among
    + * the best candidates of the frequency domain.
    + *
    + * In practice, it could be preferable from an energy standpoint to pack
    + * small tasks on a CPU in order to let other CPUs go in deeper idle states,
    + * but that could also hurt our chances to go cluster idle, and we have no
    + * ways to tell with the current Energy Model if this is actually a good
    + * idea or not. So, find_energy_efficient_cpu() basically favors
    + * cluster-packing, and spreading inside a cluster. That should at least be
    + * a good thing for latency, and this is consistent with the idea that most
    + * of the energy savings of EAS come from the asymmetry of the system, and
    + * not so much from breaking the tie between identical CPUs. That's also the
    + * reason why EAS is enabled in the topology code only for systems where
    + * SD_ASYM_CPUCAPACITY is set.
    + */
    +static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu,
    + struct freq_domain *fd)
    +{
    + unsigned long prev_energy = ULONG_MAX, best_energy = ULONG_MAX;
    + int cpu, best_energy_cpu = prev_cpu;
    + struct freq_domain *head = fd;
    + unsigned long cpu_cap, util;
    + struct sched_domain *sd;
    +
    + sync_entity_load_avg(&p->se);
    +
    + if (!task_util_est(p))
    + return prev_cpu;
    +
    + /*
    + * Energy-aware wake-up happens on the lowest sched_domain starting
    + * from sd_ea spanning over this_cpu and prev_cpu.
    + */
    + sd = rcu_dereference(*this_cpu_ptr(&sd_ea));
    + while (sd && !cpumask_test_cpu(prev_cpu, sched_domain_span(sd)))
    + sd = sd->parent;
    + if (!sd)
    + return prev_cpu;
    +
    + while (fd) {
    + unsigned long cur_energy, spare_cap, max_spare_cap = 0;
    + int max_spare_cap_cpu = -1;
    +
    + for_each_cpu_and(cpu, freq_domain_span(fd), sched_domain_span(sd)) {
    + if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
    + continue;
    +
    + /* Skip CPUs that will be overutilized. */
    + util = cpu_util_next(cpu, p, cpu);
    + cpu_cap = capacity_of(cpu);
    + if (cpu_cap * 1024 < util * capacity_margin)
    + continue;
    +
    + /* Always use prev_cpu as a candidate. */
    + if (cpu == prev_cpu) {
    + prev_energy = compute_energy(p, prev_cpu, head);
    + if (prev_energy < best_energy)
    + best_energy = prev_energy;
    + continue;
    + }
    +
    + /*
    + * Find the CPU with the maximum spare capacity in
    + * the frequency domain
    + */
    + spare_cap = cpu_cap - util;
    + if (spare_cap > max_spare_cap) {
    + max_spare_cap = spare_cap;
    + max_spare_cap_cpu = cpu;
    + }
    + }
    +
    + /* Evaluate the energy impact of using this CPU. */
    + if (max_spare_cap_cpu >= 0) {
    + cur_energy = compute_energy(p, max_spare_cap_cpu, head);
    + if (cur_energy < best_energy) {
    + best_energy = cur_energy;
    + best_energy_cpu = max_spare_cap_cpu;
    + }
    + }
    + fd = fd->next;
    + }
    +
    + /*
    + * Pick the best CPU only if it saves at least 6% of the
    + * energy used by prev_cpu.
    + */
    + if ((prev_energy - best_energy) > (prev_energy >> 4))
    + return best_energy_cpu;
    +
    + return prev_cpu;
    +}
    +
    /*
    * select_task_rq_fair: Select target runqueue for the waking task in domains
    * that have the 'sd_flag' flag set. In practice, this is SD_BALANCE_WAKE,
    @@ -6385,18 +6492,26 @@ static int
    select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_flags)
    {
    struct sched_domain *tmp, *sd = NULL;
    + struct freq_domain *fd;
    int cpu = smp_processor_id();
    int new_cpu = prev_cpu;
    - int want_affine = 0;
    + int want_affine = 0, want_energy = 0;
    int sync = (wake_flags & WF_SYNC) && !(current->flags & PF_EXITING);

    + rcu_read_lock();
    if (sd_flag & SD_BALANCE_WAKE) {
    record_wakee(p);
    - want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu)
    - && cpumask_test_cpu(cpu, &p->cpus_allowed);
    + fd = rd_freq_domain(cpu_rq(cpu)->rd);
    + want_energy = fd && !READ_ONCE(cpu_rq(cpu)->rd->overutilized);
    + want_affine = !wake_wide(p) && !wake_cap(p, cpu, prev_cpu) &&
    + cpumask_test_cpu(cpu, &p->cpus_allowed);
    + }
    +
    + if (want_energy) {
    + new_cpu = find_energy_efficient_cpu(p, prev_cpu, fd);
    + goto unlock;
    }

    - rcu_read_lock();
    for_each_domain(cpu, tmp) {
    if (!(tmp->flags & SD_LOAD_BALANCE))
    break;
    @@ -6431,6 +6546,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int sd_flag, int wake_f
    if (want_affine)
    current->recent_used_cpu = cpu;
    }
    +unlock:
    rcu_read_unlock();

    return new_cpu;
    --
    2.18.0
    \
     
     \ /
      Last update: 2018-07-24 14:26    [W:4.172 / U:0.212 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site