lkml.org 
[lkml]   [2012]   [Apr]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Subject[patch] sched,rt: non-isolated cores lift isolcpus throttle for CONFIG_RT_GROUP_SCHED
    From
    Date
    s/patch/hack.  Better ideas?

    When CONFIG_RT_GROUP_SCHED is enabled, isolcpus have no replentishment timer
    running, and unlike !CONFIG_RT_GROUP_SCHED, are not in rd->span of the cpu
    running replentishment. If you trigger the throttle, you're rewarded with a
    dead box. Should the user reassign cpus to a domain, they become sane again,
    replentishment starts/stops as usual.

    Signed-off-by: Mike Galbraith <efault@gmx.de>
    ---
    kernel/sched/core.c | 15 ++++++++++++++-
    kernel/sched/rt.c | 16 ++++++++++++++--
    kernel/sched/sched.h | 5 +++++
    3 files changed, 33 insertions(+), 3 deletions(-)

    --- a/kernel/sched/core.c
    +++ b/kernel/sched/core.c
    @@ -5877,6 +5877,14 @@ cpu_attach_domain(struct sched_domain *s

    sched_domain_debug(sd, cpu);

    +#ifdef CONFIG_RT_GROUP_SCHED
    + /* If the cpu was an isolcpu, it no longer is. */
    + if (sd) {
    + cpumask_clear_cpu(cpu, cpu_isolated_map);
    + nr_isolated_cpus = cpumask_weight(cpu_isolated_map);
    + }
    +#endif
    +
    rq_attach_root(rq, rd);
    tmp = rq->sd;
    rcu_assign_pointer(rq->sd, sd);
    @@ -5886,13 +5894,18 @@ cpu_attach_domain(struct sched_domain *s
    }

    /* cpus with isolated domains */
    -static cpumask_var_t cpu_isolated_map;
    +cpumask_var_t cpu_isolated_map;
    +
    +__read_mostly int nr_isolated_cpus;

    /* Setup the mask of cpus configured for isolated domains */
    static int __init isolated_cpu_setup(char *str)
    {
    alloc_bootmem_cpumask_var(&cpu_isolated_map);
    cpulist_parse(str, cpu_isolated_map);
    +#ifdef CONFIG_RT_GROUP_SCHED
    + nr_isolated_cpus = cpumask_weight(cpu_isolated_map);
    +#endif
    return 1;
    }

    --- a/kernel/sched/rt.c
    +++ b/kernel/sched/rt.c
    @@ -778,10 +778,11 @@ static inline int balance_runtime(struct

    static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
    {
    - int i, idle = 1, throttled = 0;
    + int i, idle = 1, throttled = 0, isol_cpus = nr_isolated_cpus;
    const struct cpumask *span;

    span = sched_rt_period_mask();
    +do_isolcpus:
    for_each_cpu(i, span) {
    int enqueue = 0;
    struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i);
    @@ -792,7 +793,7 @@ static int do_sched_rt_period_timer(stru
    u64 runtime;

    raw_spin_lock(&rt_rq->rt_runtime_lock);
    - if (rt_rq->rt_throttled)
    + if (rt_rq->rt_throttled && span != cpu_isolated_map)
    balance_runtime(rt_rq);
    runtime = rt_rq->rt_runtime;
    rt_rq->rt_time -= min(rt_rq->rt_time, overrun*runtime);
    @@ -823,6 +824,17 @@ static int do_sched_rt_period_timer(stru
    raw_spin_unlock(&rq->lock);
    }

    + /*
    + * Hack: unthrottle isolcpus for RT_GROUP_SCHED. No replentishment
    + * timer is running on isolcpus, and unlike !RT_GROUP_SCHED, they're
    + * not in the rd->span of the cpu running the timer.
    + */
    + if (isol_cpus) {
    + span = cpu_isolated_map;
    + isol_cpus = 0;
    + goto do_isolcpus;
    + }
    +
    if (!throttled && (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF))
    return 1;

    --- a/kernel/sched/sched.h
    +++ b/kernel/sched/sched.h
    @@ -572,6 +572,11 @@ static inline void set_task_rq(struct ta
    #endif
    }

    +/* cpus with isolated domains */
    +extern cpumask_var_t cpu_isolated_map;
    +
    +extern __read_mostly int nr_isolated_cpus;
    +
    #else /* CONFIG_CGROUP_SCHED */

    static inline void set_task_rq(struct task_struct *p, unsigned int cpu) { }



    \
     
     \ /
      Last update: 2012-04-03 11:11    [W:0.023 / U:37.008 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site