lkml.org 
[lkml]   [2010]   [Oct]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Subject[RFC][PATCH 08/22] sched: SCHED_DEADLINE avg_update accounting
    From
    Date

    Make the core scheduler and load balancer aware of the load
    produced by -deadline tasks, by updating the moving average
    like for sched_rt.

    Signed-off-by: Dario Faggioli <raistlin@linux.it>
    ---
    kernel/sched.c | 13 ++++++++++++-
    kernel/sched_dl.c | 2 ++
    kernel/sched_fair.c | 4 ++--
    3 files changed, 16 insertions(+), 3 deletions(-)

    diff --git a/kernel/sched.c b/kernel/sched.c
    index 79e7c1c..7f0780c 100644
    --- a/kernel/sched.c
    +++ b/kernel/sched.c
    @@ -571,7 +571,7 @@ struct rq {

    unsigned long avg_load_per_task;

    - u64 rt_avg;
    + u64 dl_avg, rt_avg;
    u64 age_stamp;
    u64 idle_stamp;
    u64 avg_idle;
    @@ -1346,10 +1346,17 @@ static void sched_avg_update(struct rq *rq)
    */
    asm("" : "+rm" (rq->age_stamp));
    rq->age_stamp += period;
    + rq->dl_avg /= 2;
    rq->rt_avg /= 2;
    }
    }

    +static void sched_dl_avg_update(struct rq *rq, u64 dl_delta)
    +{
    + rq->dl_avg += dl_delta;
    + sched_avg_update(rq);
    +}
    +
    static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
    {
    rq->rt_avg += rt_delta;
    @@ -1363,6 +1370,10 @@ static void resched_task(struct task_struct *p)
    set_tsk_need_resched(p);
    }

    +static void sched_dl_avg_update(struct rq *rq, u64 dl_delta)
    +{
    +}
    +
    static void sched_rt_avg_update(struct rq *rq, u64 rt_delta)
    {
    }
    diff --git a/kernel/sched_dl.c b/kernel/sched_dl.c
    index 26126a6..1bb4308 100644
    --- a/kernel/sched_dl.c
    +++ b/kernel/sched_dl.c
    @@ -509,6 +509,8 @@ static void update_curr_dl(struct rq *rq)
    curr->se.exec_start = rq->clock;
    cpuacct_charge(curr, delta_exec);

    + sched_dl_avg_update(rq, delta_exec);
    +
    dl_se->runtime -= delta_exec;
    if (dl_runtime_exceeded(rq, dl_se)) {
    __dequeue_task_dl(rq, curr, 0);
    diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
    index 54c869c..2afe280 100644
    --- a/kernel/sched_fair.c
    +++ b/kernel/sched_fair.c
    @@ -2276,11 +2276,11 @@ unsigned long scale_rt_power(int cpu)

    total = sched_avg_period() + (rq->clock - rq->age_stamp);

    - if (unlikely(total < rq->rt_avg)) {
    + if (unlikely(total < rq->dl_avg + rq->rt_avg)) {
    /* Ensures that power won't end up being negative */
    available = 0;
    } else {
    - available = total - rq->rt_avg;
    + available = total - rq->dl_avg - rq->rt_avg;
    }

    if (unlikely((s64)total < SCHED_LOAD_SCALE))
    --
    1.7.2.3

    --
    <<This happens because I choose it to happen!>> (Raistlin Majere)
    ----------------------------------------------------------------------
    Dario Faggioli, ReTiS Lab, Scuola Superiore Sant'Anna, Pisa (Italy)
    http://blog.linux.it/raistlin / raistlin@ekiga.net /
    dario.faggioli@jabber.org
    [unhandled content-type:application/pgp-signature]
    \
     
     \ /
      Last update: 2010-10-29 08:35    [W:0.023 / U:62.912 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site