lkml.org 
[lkml]   [2016]   [Mar]   [4]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 3.12 044/116] sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks
    Date
    From: Peter Zijlstra <peterz@infradead.org>

    3.12-stable review patch. If anyone has any objections, please let me know.

    ===============

    commit fd7a4bed183523275279c9addbf42fce550c2e90 upstream.

    Remove the direct {push,pull} balancing operations from
    switched_{from,to}_rt() / prio_changed_rt() and use the balance
    callback queue.

    Again, err on the side of too many reschedules; since too few is a
    hard bug while too many is just annoying.

    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Cc: ktkhai@parallels.com
    Cc: rostedt@goodmis.org
    Cc: juri.lelli@gmail.com
    Cc: pang.xunlei@linaro.org
    Cc: oleg@redhat.com
    Cc: wanpeng.li@linux.intel.com
    Cc: umgwanakikbuti@gmail.com
    Link: http://lkml.kernel.org/r/20150611124742.766832367@infradead.org
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Signed-off-by: Byungchul Park <byungchul.park@lge.com>
    Signed-off-by: Jiri Slaby <jslaby@suse.cz>
    ---
    kernel/sched/rt.c | 35 +++++++++++++++++++----------------
    1 file changed, 19 insertions(+), 16 deletions(-)

    diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
    index 85a09baa8f9f..10edf9d2a8b7 100644
    --- a/kernel/sched/rt.c
    +++ b/kernel/sched/rt.c
    @@ -315,16 +315,23 @@ static inline int has_pushable_tasks(struct rq *rq)
    return !plist_head_empty(&rq->rt.pushable_tasks);
    }

    -static DEFINE_PER_CPU(struct callback_head, rt_balance_head);
    +static DEFINE_PER_CPU(struct callback_head, rt_push_head);
    +static DEFINE_PER_CPU(struct callback_head, rt_pull_head);

    static void push_rt_tasks(struct rq *);
    +static void pull_rt_task(struct rq *);

    static inline void queue_push_tasks(struct rq *rq)
    {
    if (!has_pushable_tasks(rq))
    return;

    - queue_balance_callback(rq, &per_cpu(rt_balance_head, rq->cpu), push_rt_tasks);
    + queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
    +}
    +
    +static inline void queue_pull_task(struct rq *rq)
    +{
    + queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
    }

    static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
    @@ -1832,7 +1839,7 @@ static void switched_from_rt(struct rq *rq, struct task_struct *p)
    if (!p->on_rq || rq->rt.rt_nr_running)
    return;

    - pull_rt_task(rq);
    + queue_pull_task(rq);
    }

    void init_sched_rt_class(void)
    @@ -1853,8 +1860,6 @@ void init_sched_rt_class(void)
    */
    static void switched_to_rt(struct rq *rq, struct task_struct *p)
    {
    - int check_resched = 1;
    -
    /*
    * If we are already running, then there's nothing
    * that needs to be done. But if we are not running
    @@ -1864,13 +1869,12 @@ static void switched_to_rt(struct rq *rq, struct task_struct *p)
    */
    if (p->on_rq && rq->curr != p) {
    #ifdef CONFIG_SMP
    - if (rq->rt.overloaded && push_rt_task(rq) &&
    - /* Don't resched if we changed runqueues */
    - rq != task_rq(p))
    - check_resched = 0;
    -#endif /* CONFIG_SMP */
    - if (check_resched && p->prio < rq->curr->prio)
    + if (rq->rt.overloaded)
    + queue_push_tasks(rq);
    +#else
    + if (p->prio < rq->curr->prio)
    resched_task(rq->curr);
    +#endif /* CONFIG_SMP */
    }
    }

    @@ -1891,14 +1895,13 @@ prio_changed_rt(struct rq *rq, struct task_struct *p, int oldprio)
    * may need to pull tasks to this runqueue.
    */
    if (oldprio < p->prio)
    - pull_rt_task(rq);
    + queue_pull_task(rq);
    +
    /*
    * If there's a higher priority task waiting to run
    - * then reschedule. Note, the above pull_rt_task
    - * can release the rq lock and p could migrate.
    - * Only reschedule if p is still on the same runqueue.
    + * then reschedule.
    */
    - if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
    + if (p->prio > rq->rt.highest_prio.curr)
    resched_task(p);
    #else
    /* For UP simply resched on drop of prio */
    --
    2.7.2
    \
     
     \ /
      Last update: 2016-03-04 11:21    [W:4.803 / U:0.020 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site