lkml.org 
[lkml]   [2018]   [Apr]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v7 5/5] cpuset: Make generate_sched_domains() recognize isolated_cpus
    Date
    The generate_sched_domains() function and the hotplug code are modified
    to make them use the newly introduced isolated_cpus mask for schedule
    domains generation.

    Signed-off-by: Waiman Long <longman@redhat.com>
    ---
    kernel/cgroup/cpuset.c | 35 +++++++++++++++++++++++++++++++++--
    1 file changed, 33 insertions(+), 2 deletions(-)

    diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c
    index d05c4c8..a67c77a 100644
    --- a/kernel/cgroup/cpuset.c
    +++ b/kernel/cgroup/cpuset.c
    @@ -683,13 +683,14 @@ static int generate_sched_domains(cpumask_var_t **domains,
    int ndoms = 0; /* number of sched domains in result */
    int nslot; /* next empty doms[] struct cpumask slot */
    struct cgroup_subsys_state *pos_css;
    + bool root_load_balance = is_sched_load_balance(&top_cpuset);

    doms = NULL;
    dattr = NULL;
    csa = NULL;

    /* Special case for the 99% of systems with one, full, sched domain */
    - if (is_sched_load_balance(&top_cpuset)) {
    + if (root_load_balance && !top_cpuset.isolation_count) {
    ndoms = 1;
    doms = alloc_sched_domains(ndoms);
    if (!doms)
    @@ -712,6 +713,8 @@ static int generate_sched_domains(cpumask_var_t **domains,
    csn = 0;

    rcu_read_lock();
    + if (root_load_balance)
    + csa[csn++] = &top_cpuset;
    cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
    if (cp == &top_cpuset)
    continue;
    @@ -722,6 +725,9 @@ static int generate_sched_domains(cpumask_var_t **domains,
    * parent's cpus, so just skip them, and then we call
    * update_domain_attr_tree() to calc relax_domain_level of
    * the corresponding sched domain.
    + *
    + * If root is load-balancing, we can skip @cp if it
    + * is a subset of the root's effective_cpus.
    */
    if (!cpumask_empty(cp->cpus_allowed) &&
    !(is_sched_load_balance(cp) &&
    @@ -729,6 +735,10 @@ static int generate_sched_domains(cpumask_var_t **domains,
    housekeeping_cpumask(HK_FLAG_DOMAIN))))
    continue;

    + if (root_load_balance &&
    + cpumask_subset(cp->cpus_allowed, top_cpuset.effective_cpus))
    + continue;
    +
    if (is_sched_load_balance(cp))
    csa[csn++] = cp;

    @@ -820,6 +830,12 @@ static int generate_sched_domains(cpumask_var_t **domains,
    }
    BUG_ON(nslot != ndoms);

    +#ifdef CONFIG_DEBUG_KERNEL
    + for (i = 0; i < ndoms; i++)
    + pr_info("rebuild_sched_domains dom %d: %*pbl\n", i,
    + cpumask_pr_args(doms[i]));
    +#endif
    +
    done:
    kfree(csa);

    @@ -860,7 +876,12 @@ static void rebuild_sched_domains_locked(void)
    * passing doms with offlined cpu to partition_sched_domains().
    * Anyways, hotplug work item will rebuild sched domains.
    */
    - if (!cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
    + if (!top_cpuset.isolation_count &&
    + !cpumask_equal(top_cpuset.effective_cpus, cpu_active_mask))
    + goto out;
    +
    + if (top_cpuset.isolation_count &&
    + !cpumask_subset(top_cpuset.effective_cpus, cpu_active_mask))
    goto out;

    /* Generate domain masks and attrs */
    @@ -1102,6 +1123,7 @@ static int update_isolated_cpumask(const char *buf)

    top_cpuset.isolation_count = cpumask_weight(top_cpuset.isolated_cpus);
    spin_unlock_irq(&callback_lock);
    + rebuild_sched_domains_locked();

    out_ok:
    retval = 0;
    @@ -2530,6 +2552,11 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
    cpumask_copy(&new_cpus, cpu_active_mask);
    new_mems = node_states[N_MEMORY];

    + /*
    + * If isolated_cpus is populated, it is likely that the check below
    + * will produce a false positive on cpus_updated when the cpu list
    + * isn't changed. It is extra work, but it is better to be safe.
    + */
    cpus_updated = !cpumask_equal(top_cpuset.effective_cpus, &new_cpus);
    mems_updated = !nodes_equal(top_cpuset.effective_mems, new_mems);

    @@ -2538,6 +2565,10 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
    spin_lock_irq(&callback_lock);
    if (!on_dfl)
    cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
    +
    + if (top_cpuset.isolation_count)
    + cpumask_andnot(&new_cpus, &new_cpus,
    + top_cpuset.isolated_cpus);
    cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
    spin_unlock_irq(&callback_lock);
    /* we don't mess with cpumasks of tasks in top_cpuset */
    --
    1.8.3.1
    \
     
     \ /
      Last update: 2018-04-19 15:49    [W:4.425 / U:0.324 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site