lkml.org 
[lkml]   [2016]   [Mar]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 3.14 046/130] sched, rt: Convert switched_{from, to}_rt() / prio_changed_rt() to balance callbacks
    Date
    3.14-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Peter Zijlstra <peterz@infradead.org>

    commit fd7a4bed183523275279c9addbf42fce550c2e90 upstream.

    Remove the direct {push,pull} balancing operations from
    switched_{from,to}_rt() / prio_changed_rt() and use the balance
    callback queue.

    Again, err on the side of too many reschedules; since too few is a
    hard bug while too many is just annoying.

    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Cc: ktkhai@parallels.com
    Cc: rostedt@goodmis.org
    Cc: juri.lelli@gmail.com
    Cc: pang.xunlei@linaro.org
    Cc: oleg@redhat.com
    Cc: wanpeng.li@linux.intel.com
    Cc: umgwanakikbuti@gmail.com
    Link: http://lkml.kernel.org/r/20150611124742.766832367@infradead.org
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Signed-off-by: Byungchul Park <byungchul.park@lge.com>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    kernel/sched/rt.c | 35 +++++++++++++++++++----------------
    1 file changed, 19 insertions(+), 16 deletions(-)

    --- a/kernel/sched/rt.c
    +++ b/kernel/sched/rt.c
    @@ -315,16 +315,23 @@ static inline int has_pushable_tasks(str
    return !plist_head_empty(&rq->rt.pushable_tasks);
    }

    -static DEFINE_PER_CPU(struct callback_head, rt_balance_head);
    +static DEFINE_PER_CPU(struct callback_head, rt_push_head);
    +static DEFINE_PER_CPU(struct callback_head, rt_pull_head);

    static void push_rt_tasks(struct rq *);
    +static void pull_rt_task(struct rq *);

    static inline void queue_push_tasks(struct rq *rq)
    {
    if (!has_pushable_tasks(rq))
    return;

    - queue_balance_callback(rq, &per_cpu(rt_balance_head, rq->cpu), push_rt_tasks);
    + queue_balance_callback(rq, &per_cpu(rt_push_head, rq->cpu), push_rt_tasks);
    +}
    +
    +static inline void queue_pull_task(struct rq *rq)
    +{
    + queue_balance_callback(rq, &per_cpu(rt_pull_head, rq->cpu), pull_rt_task);
    }

    static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
    @@ -1837,7 +1844,7 @@ static void switched_from_rt(struct rq *
    if (!p->on_rq || rq->rt.rt_nr_running)
    return;

    - pull_rt_task(rq);
    + queue_pull_task(rq);
    }

    void init_sched_rt_class(void)
    @@ -1858,8 +1865,6 @@ void init_sched_rt_class(void)
    */
    static void switched_to_rt(struct rq *rq, struct task_struct *p)
    {
    - int check_resched = 1;
    -
    /*
    * If we are already running, then there's nothing
    * that needs to be done. But if we are not running
    @@ -1869,13 +1874,12 @@ static void switched_to_rt(struct rq *rq
    */
    if (p->on_rq && rq->curr != p) {
    #ifdef CONFIG_SMP
    - if (rq->rt.overloaded && push_rt_task(rq) &&
    - /* Don't resched if we changed runqueues */
    - rq != task_rq(p))
    - check_resched = 0;
    -#endif /* CONFIG_SMP */
    - if (check_resched && p->prio < rq->curr->prio)
    + if (rq->rt.overloaded)
    + queue_push_tasks(rq);
    +#else
    + if (p->prio < rq->curr->prio)
    resched_task(rq->curr);
    +#endif /* CONFIG_SMP */
    }
    }

    @@ -1896,14 +1900,13 @@ prio_changed_rt(struct rq *rq, struct ta
    * may need to pull tasks to this runqueue.
    */
    if (oldprio < p->prio)
    - pull_rt_task(rq);
    + queue_pull_task(rq);
    +
    /*
    * If there's a higher priority task waiting to run
    - * then reschedule. Note, the above pull_rt_task
    - * can release the rq lock and p could migrate.
    - * Only reschedule if p is still on the same runqueue.
    + * then reschedule.
    */
    - if (p->prio > rq->rt.highest_prio.curr && rq->curr == p)
    + if (p->prio > rq->rt.highest_prio.curr)
    resched_task(p);
    #else
    /* For UP simply resched on drop of prio */
    \
     
     \ /
      Last update: 2016-03-02 01:21    [W:4.061 / U:0.016 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site