lkml.org 
[lkml]   [2020]   [Feb]   [14]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 08/12] sched/fair: Take into runnable_avg to classify group
    Take into account the new runnable_avg signal to classify a group and to
    mitigate the volatility of util_avg in face of intensive migration.

    Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
    Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
    ---
    kernel/sched/fair.c | 15 +++++++++++++++
    1 file changed, 15 insertions(+)

    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index 470afbb3e303..80c237677fc8 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -5460,6 +5460,11 @@ static unsigned long cpu_load_without(struct rq *rq, struct task_struct *p)
    return load;
    }

    +static unsigned long cpu_runnable(struct rq *rq)
    +{
    + return cfs_rq_runnable_avg(&rq->cfs);
    +}
    +
    static unsigned long capacity_of(int cpu)
    {
    return cpu_rq(cpu)->cpu_capacity;
    @@ -7744,6 +7749,7 @@ struct sg_lb_stats {
    unsigned long group_load; /* Total load over the CPUs of the group */
    unsigned long group_capacity;
    unsigned long group_util; /* Total utilization of the group */
    + unsigned long group_runnable; /* Total utilization of the group */
    unsigned int sum_nr_running; /* Nr of tasks running in the group */
    unsigned int sum_h_nr_running; /* Nr of CFS tasks running in the group */
    unsigned int idle_cpus;
    @@ -7964,6 +7970,10 @@ group_has_capacity(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
    if (sgs->sum_nr_running < sgs->group_weight)
    return true;

    + if ((sgs->group_capacity * imbalance_pct) <
    + (sgs->group_runnable * 100))
    + return false;
    +
    if ((sgs->group_capacity * 100) >
    (sgs->group_util * imbalance_pct))
    return true;
    @@ -7989,6 +7999,10 @@ group_is_overloaded(unsigned int imbalance_pct, struct sg_lb_stats *sgs)
    (sgs->group_util * imbalance_pct))
    return true;

    + if ((sgs->group_capacity * imbalance_pct) <
    + (sgs->group_runnable * 100))
    + return true;
    +
    return false;
    }

    @@ -8083,6 +8097,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,

    sgs->group_load += cpu_load(rq);
    sgs->group_util += cpu_util(i);
    + sgs->group_runnable += cpu_runnable(rq);
    sgs->sum_h_nr_running += rq->cfs.h_nr_running;

    nr_running = rq->nr_running;
    --
    2.16.4
    \
     
     \ /
      Last update: 2020-02-14 09:18    [W:3.378 / U:0.020 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site