lkml.org 
[lkml]   [2011]   [Dec]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[patch v3 4/6] sched, nohz: cleanup the find_new_ilb() using sched groups nr_busy_cpus
    nr_busy_cpus in the sched_group_power indicates whether the group
    is semi idle or not. This helps remove the is_semi_idle_group() and simplify
    the find_new_ilb() in the context of finding an optimal cpu that can do
    idle load balancing.

    Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com>
    ---
    kernel/sched/fair.c | 48 ++++++++++++------------------------------------
    1 file changed, 12 insertions(+), 36 deletions(-)

    Index: tip/kernel/sched/fair.c
    ===================================================================
    --- tip.orig/kernel/sched/fair.c
    +++ tip/kernel/sched/fair.c
    @@ -4710,7 +4710,6 @@ out_unlock:
    */
    static struct {
    cpumask_var_t idle_cpus_mask;
    - cpumask_var_t grp_idle_mask;
    atomic_t nr_cpus;
    unsigned long next_balance; /* in jiffy units */
    } nohz ____cacheline_aligned;
    @@ -4751,33 +4750,6 @@ static inline struct sched_domain *lowes
    (sd && (sd->flags & flag)); sd = sd->parent)

    /**
    - * is_semi_idle_group - Checks if the given sched_group is semi-idle.
    - * @ilb_group: group to be checked for semi-idleness
    - *
    - * Returns: 1 if the group is semi-idle. 0 otherwise.
    - *
    - * We define a sched_group to be semi idle if it has atleast one idle-CPU
    - * and atleast one non-idle CPU. This helper function checks if the given
    - * sched_group is semi-idle or not.
    - */
    -static inline int is_semi_idle_group(struct sched_group *ilb_group)
    -{
    - cpumask_and(nohz.grp_idle_mask, nohz.idle_cpus_mask,
    - sched_group_cpus(ilb_group));
    -
    - /*
    - * A sched_group is semi-idle when it has atleast one busy cpu
    - * and atleast one idle cpu.
    - */
    - if (cpumask_empty(nohz.grp_idle_mask))
    - return 0;
    -
    - if (cpumask_equal(nohz.grp_idle_mask, sched_group_cpus(ilb_group)))
    - return 0;
    -
    - return 1;
    -}
    -/**
    * find_new_ilb - Finds the optimum idle load balancer for nomination.
    * @cpu: The cpu which is nominating a new idle_load_balancer.
    *
    @@ -4792,8 +4764,8 @@ static inline int is_semi_idle_group(str
    static int find_new_ilb(int cpu)
    {
    int ilb = cpumask_first(nohz.idle_cpus_mask);
    + struct sched_group *ilbg;
    struct sched_domain *sd;
    - struct sched_group *ilb_group;

    /*
    * Have idle load balancer selection from semi-idle packages only
    @@ -4811,23 +4783,28 @@ static int find_new_ilb(int cpu)

    rcu_read_lock();
    for_each_flag_domain(cpu, sd, SD_POWERSAVINGS_BALANCE) {
    - ilb_group = sd->groups;
    + ilbg = sd->groups;

    do {
    - if (is_semi_idle_group(ilb_group)) {
    - ilb = cpumask_first(nohz.grp_idle_mask);
    + if (ilbg->group_weight !=
    + atomic_read(&ilbg->sgp->nr_busy_cpus)) {
    + ilb = cpumask_first_and(nohz.idle_cpus_mask,
    + sched_group_cpus(ilbg));
    goto unlock;
    }

    - ilb_group = ilb_group->next;
    + ilbg = ilbg->next;

    - } while (ilb_group != sd->groups);
    + } while (ilbg != sd->groups);
    }
    unlock:
    rcu_read_unlock();

    out_done:
    - return ilb;
    + if (ilb < nr_cpu_ids && idle_cpu(ilb))
    + return ilb;
    +
    + return nr_cpu_ids;
    }
    #else /* (CONFIG_SCHED_MC || CONFIG_SCHED_SMT) */
    static inline int find_new_ilb(int call_cpu)
    @@ -5565,7 +5542,6 @@ __init void init_sched_fair_class(void)

    #ifdef CONFIG_NO_HZ
    zalloc_cpumask_var(&nohz.idle_cpus_mask, GFP_NOWAIT);
    - alloc_cpumask_var(&nohz.grp_idle_mask, GFP_NOWAIT);
    #endif
    #endif /* SMP */




    \
     
     \ /
      Last update: 2011-12-02 02:13    [W:0.027 / U:59.948 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site