lkml.org 
[lkml]   [2012]   [Mar]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:sched/core] sched/rt: Keep period timer ticking when rt throttling is active
    Commit-ID:  42c62a589f1ccbf38a02cb732231f9c2fccc5ab0
    Gitweb: http://git.kernel.org/tip/42c62a589f1ccbf38a02cb732231f9c2fccc5ab0
    Author: Peter Zijlstra <peterz@infradead.org>
    AuthorDate: Tue, 18 Oct 2011 22:03:48 +0200
    Committer: Ingo Molnar <mingo@elte.hu>
    CommitDate: Thu, 1 Mar 2012 10:28:01 +0100

    sched/rt: Keep period timer ticking when rt throttling is active

    When a runqueue is throttled we cannot disable the period timer
    because that timer is the only way to undo the throttling.

    We got stale throttling entries when a rq was throttled and then the
    global sysctl was disabled, which stopped the timer.

    Signed-off-by: Peter Zijlstra <peterz@infradead.org>
    [ Added changelog ]
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Link: http://lkml.kernel.org/n/tip-nuj34q52p6ro7szapuz84i0v@git.kernel.org
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    ---
    kernel/sched/rt.c | 13 ++++++++-----
    1 files changed, 8 insertions(+), 5 deletions(-)

    diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
    index f70206c..6d1eb0b 100644
    --- a/kernel/sched/rt.c
    +++ b/kernel/sched/rt.c
    @@ -778,12 +778,9 @@ static inline int balance_runtime(struct rt_rq *rt_rq)

    static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
    {
    - int i, idle = 1;
    + int i, idle = 1, throttled = 0;
    const struct cpumask *span;

    - if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
    - return 1;
    -
    span = sched_rt_period_mask();
    for_each_cpu(i, span) {
    int enqueue = 0;
    @@ -818,12 +815,17 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
    if (!rt_rq_throttled(rt_rq))
    enqueue = 1;
    }
    + if (rt_rq->rt_throttled)
    + throttled = 1;

    if (enqueue)
    sched_rt_rq_enqueue(rt_rq);
    raw_spin_unlock(&rq->lock);
    }

    + if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
    + return 1;
    +
    return idle;
    }

    @@ -884,7 +886,8 @@ static void update_curr_rt(struct rq *rq)
    if (unlikely((s64)delta_exec < 0))
    delta_exec = 0;

    - schedstat_set(curr->se.statistics.exec_max, max(curr->se.statistics.exec_max, delta_exec));
    + schedstat_set(curr->se.statistics.exec_max,
    + max(curr->se.statistics.exec_max, delta_exec));

    curr->se.sum_exec_runtime += delta_exec;
    account_group_exec_runtime(curr, delta_exec);

    \
     
     \ /
      Last update: 2012-03-01 13:19    [W:0.025 / U:1.224 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site