lkml.org 
[lkml]   [2015]   [Feb]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFCv3 PATCH 40/48] sched: Introduce energy awareness into update_sd_lb_stats
    Date
    From: Dietmar Eggemann <dietmar.eggemann@arm.com>

    Energy-aware load balancing has to work alongside the conventional load
    based functionality. This includes the tipping point feature, i.e. being
    able to fall back from energy aware to the conventional load based
    functionality during an ongoing load balancing action.
    That is why this patch introduces an additional reference to hold the
    least efficient sched group (costliest) as well its statistics in form of
    an extra sg_lb_stats structure (costliest_stat).
    The function update_sd_pick_costliest is used to assign the least
    efficient sched group parallel to the existing update_sd_pick_busiest.

    cc: Ingo Molnar <mingo@redhat.com>
    cc: Peter Zijlstra <peterz@infradead.org>

    Signed-off-by: Dietmar Eggemann <dietmar.eggemann@arm.com>
    ---
    kernel/sched/fair.c | 21 +++++++++++++++++++++
    1 file changed, 21 insertions(+)

    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index bfa335e..36f3c77 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -6363,12 +6363,14 @@ struct sg_lb_stats {
    */
    struct sd_lb_stats {
    struct sched_group *busiest; /* Busiest group in this sd */
    + struct sched_group *costliest; /* Least efficient group in this sd */
    struct sched_group *local; /* Local group in this sd */
    unsigned long total_load; /* Total load of all groups in sd */
    unsigned long total_capacity; /* Total capacity of all groups in sd */
    unsigned long avg_load; /* Average load across all groups in sd */

    struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
    + struct sg_lb_stats costliest_stat;/* Statistics of the least efficient group */
    struct sg_lb_stats local_stat; /* Statistics of the local group */
    };

    @@ -6390,6 +6392,9 @@ static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
    .sum_nr_running = 0,
    .group_type = group_other,
    },
    + .costliest_stat = {
    + .group_eff = ULONG_MAX,
    + },
    };
    }

    @@ -6782,6 +6787,17 @@ static bool update_sd_pick_busiest(struct lb_env *env,
    return false;
    }

    +static noinline bool update_sd_pick_costliest(struct sd_lb_stats *sds,
    + struct sg_lb_stats *sgs)
    +{
    + struct sg_lb_stats *costliest = &sds->costliest_stat;
    +
    + if (sgs->group_eff < costliest->group_eff)
    + return true;
    +
    + return false;
    +}
    +
    #ifdef CONFIG_NUMA_BALANCING
    static inline enum fbq_type fbq_classify_group(struct sg_lb_stats *sgs)
    {
    @@ -6872,6 +6888,11 @@ static inline void update_sd_lb_stats(struct lb_env *env, struct sd_lb_stats *sd
    sds->busiest_stat = *sgs;
    }

    + if (env->use_ea && update_sd_pick_costliest(sds, sgs)) {
    + sds->costliest = sg;
    + sds->costliest_stat = *sgs;
    + }
    +
    next_group:
    /* Now, start updating sd_lb_stats */
    sds->total_load += sgs->group_load;
    --
    1.9.1


    \
     
     \ /
      Last update: 2015-02-04 19:41    [W:4.045 / U:0.548 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site