lkml.org 
[lkml]   [2015]   [Apr]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [PATCH 1/2] sched/rt: Check to push task away when its affinity is changed
On Fri,  1 May 2015 00:33:17 +0800
Xunlei Pang <xlpang@126.com> wrote:

> From: Xunlei Pang <pang.xunlei@linaro.org>
>
> We may suffer from extra rt overload rq due to the affinity,
> so when the affinity of any runnable rt task is changed, we
> should check to trigger balancing, otherwise it will cause
> some unnecessary delayed real-time response. Unfortunately,
> current RT global scheduler does nothing about this.
>
> For example: a 2-cpu system with two runnable FIFO tasks(same
> rt_priority) bound on CPU0, let's name them rt1(running) and
> rt2(runnable) respectively; CPU1 has no RTs. Then, someone sets
> the affinity of rt2 to 0x3(i.e. CPU0 and CPU1), but after this,
> rt2 still can't be scheduled enters schedule(), this
> definitely causes some/big response latency for rt2.
>
> This patch introduces a new sched_class::post_set_cpus_allowed()
> for RT called after set_cpus_allowed_rt(). In this new function,
> if the task is runnable but not running, it tries to push it away
> once it got migratable.
>
> Signed-off-by: Xunlei Pang <pang.xunlei@linaro.org>
> ---
> kernel/sched/core.c | 3 +++
> kernel/sched/rt.c | 17 +++++++++++++++++
> kernel/sched/sched.h | 1 +
> 3 files changed, 21 insertions(+)
>
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index d13fc13..64a1603 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -4773,6 +4773,9 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
>
> cpumask_copy(&p->cpus_allowed, new_mask);
> p->nr_cpus_allowed = cpumask_weight(new_mask);
> +
> + if (p->sched_class->post_set_cpus_allowed)
> + p->sched_class->post_set_cpus_allowed(p);
> }
>
> /*
> diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
> index cc49a7c..a9d33a3 100644
> --- a/kernel/sched/rt.c
> +++ b/kernel/sched/rt.c
> @@ -2281,6 +2281,22 @@ static void set_cpus_allowed_rt(struct task_struct *p,
> update_rt_migration(&rq->rt);
> }
>
> +static void post_set_cpus_allowed_rt(struct task_struct *p)
> +{
> + struct rq *rq;
> +
> + if (!task_on_rq_queued(p))
> + return;
> +
> + rq = task_rq(p);
> + if (!task_running(rq, p) &&
> + p->nr_cpus_allowed > 1 &&
> + !test_tsk_need_resched(rq->curr) &&
> + cpupri_find(&rq->rd->cpupri, p, NULL)) {

I don't think we need cpupri_find() call here. It's done in
push_rt_tasks() and doing it twice is just a wasted effort.

> + push_rt_tasks(rq);
> + }
> +}
> +
> /* Assumes rq->lock is held */
> static void rq_online_rt(struct rq *rq)
> {
> @@ -2495,6 +2511,7 @@ const struct sched_class rt_sched_class = {
> .select_task_rq = select_task_rq_rt,
>
> .set_cpus_allowed = set_cpus_allowed_rt,
> + .post_set_cpus_allowed = post_set_cpus_allowed_rt,
> .rq_online = rq_online_rt,
> .rq_offline = rq_offline_rt,
> .post_schedule = post_schedule_rt,
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index e0e1299..6f90645 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -1191,6 +1191,7 @@ struct sched_class {
>
> void (*set_cpus_allowed)(struct task_struct *p,
> const struct cpumask *newmask);
> + void (*post_set_cpus_allowed)(struct task_struct *p);
>
> void (*rq_online)(struct rq *rq);
> void (*rq_offline)(struct rq *rq);



\
 
 \ /
  Last update: 2015-04-30 19:41    [W:0.045 / U:0.320 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site