lkml.org 
[lkml]   [2009]   [Mar]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:sched/balancing] sched: Create a helper function to calculate imbalance
    Commit-ID:  dbc523a3b86f9e1765b5e70e6886913b99cc5cec
    Gitweb: http://git.kernel.org/tip/dbc523a3b86f9e1765b5e70e6886913b99cc5cec
    Author: Gautham R Shenoy <ego@in.ibm.com>
    AuthorDate: Wed, 25 Mar 2009 14:44:12 +0530
    Committer: Ingo Molnar <mingo@elte.hu>
    CommitDate: Wed, 25 Mar 2009 10:30:47 +0100

    sched: Create a helper function to calculate imbalance

    Move all the imbalance calculation out of find_busiest_group()
    through this helper function.

    With this change, the structure of find_busiest_group() will be
    as follows:

    - update_sched_domain_statistics.

    - check if imbalance exits.

    - update imbalance and return busiest.

    Signed-off-by: Gautham R Shenoy <ego@in.ibm.com>
    Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    Cc: Suresh Siddha <suresh.b.siddha@intel.com>
    Cc: "Balbir Singh" <balbir@in.ibm.com>
    Cc: Nick Piggin <nickpiggin@yahoo.com.au>
    Cc: "Dhaval Giani" <dhaval@linux.vnet.ibm.com>
    Cc: Bharata B Rao <bharata@linux.vnet.ibm.com>
    Cc: "Vaidyanathan Srinivasan" <svaidy@linux.vnet.ibm.com>
    LKML-Reference: <20090325091411.13992.43293.stgit@sofia.in.ibm.com>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>


    ---
    kernel/sched.c | 78 ++++++++++++++++++++++++++++++++-----------------------
    1 files changed, 45 insertions(+), 33 deletions(-)

    diff --git a/kernel/sched.c b/kernel/sched.c
    index 540147e..934f615 100644
    --- a/kernel/sched.c
    +++ b/kernel/sched.c
    @@ -3487,8 +3487,8 @@ group_next:

    /**
    * fix_small_imbalance - Calculate the minor imbalance that exists
    - * amongst the groups of a sched_domain, during
    - * load balancing.
    + * amongst the groups of a sched_domain, during
    + * load balancing.
    * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
    * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
    * @imbalance: Variable to store the imbalance.
    @@ -3549,6 +3549,47 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
    if (pwr_move > pwr_now)
    *imbalance = sds->busiest_load_per_task;
    }
    +
    +/**
    + * calculate_imbalance - Calculate the amount of imbalance present within the
    + * groups of a given sched_domain during load balance.
    + * @sds: statistics of the sched_domain whose imbalance is to be calculated.
    + * @this_cpu: Cpu for which currently load balance is being performed.
    + * @imbalance: The variable to store the imbalance.
    + */
    +static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
    + unsigned long *imbalance)
    +{
    + unsigned long max_pull;
    + /*
    + * In the presence of smp nice balancing, certain scenarios can have
    + * max load less than avg load(as we skip the groups at or below
    + * its cpu_power, while calculating max_load..)
    + */
    + if (sds->max_load < sds->avg_load) {
    + *imbalance = 0;
    + return fix_small_imbalance(sds, this_cpu, imbalance);
    + }
    +
    + /* Don't want to pull so many tasks that a group would go idle */
    + max_pull = min(sds->max_load - sds->avg_load,
    + sds->max_load - sds->busiest_load_per_task);
    +
    + /* How much load to actually move to equalise the imbalance */
    + *imbalance = min(max_pull * sds->busiest->__cpu_power,
    + (sds->avg_load - sds->this_load) * sds->this->__cpu_power)
    + / SCHED_LOAD_SCALE;
    +
    + /*
    + * if *imbalance is less than the average load per runnable task
    + * there is no gaurantee that any tasks will be moved so we'll have
    + * a think about bumping its value to force at least one task to be
    + * moved
    + */
    + if (*imbalance < sds->busiest_load_per_task)
    + return fix_small_imbalance(sds, this_cpu, imbalance);
    +
    +}
    /******* find_busiest_group() helpers end here *********************/

    /*
    @@ -3562,7 +3603,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
    int *sd_idle, const struct cpumask *cpus, int *balance)
    {
    struct sd_lb_stats sds;
    - unsigned long max_pull;

    memset(&sds, 0, sizeof(sds));

    @@ -3605,36 +3645,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
    if (sds.max_load <= sds.busiest_load_per_task)
    goto out_balanced;

    - /*
    - * In the presence of smp nice balancing, certain scenarios can have
    - * max load less than avg load(as we skip the groups at or below
    - * its cpu_power, while calculating max_load..)
    - */
    - if (sds.max_load < sds.avg_load) {
    - *imbalance = 0;
    - fix_small_imbalance(&sds, this_cpu, imbalance);
    - goto ret_busiest;
    - }
    -
    - /* Don't want to pull so many tasks that a group would go idle */
    - max_pull = min(sds.max_load - sds.avg_load,
    - sds.max_load - sds.busiest_load_per_task);
    -
    - /* How much load to actually move to equalise the imbalance */
    - *imbalance = min(max_pull * sds.busiest->__cpu_power,
    - (sds.avg_load - sds.this_load) * sds.this->__cpu_power)
    - / SCHED_LOAD_SCALE;
    -
    - /*
    - * if *imbalance is less than the average load per runnable task
    - * there is no gaurantee that any tasks will be moved so we'll have
    - * a think about bumping its value to force at least one task to be
    - * moved
    - */
    - if (*imbalance < sds.busiest_load_per_task)
    - fix_small_imbalance(&sds, this_cpu, imbalance);
    -
    -ret_busiest:
    + /* Looks like there is an imbalance. Compute it */
    + calculate_imbalance(&sds, this_cpu, imbalance);
    return sds.busiest;

    out_balanced:

    \
     
     \ /
      Last update: 2009-03-25 10:55    [W:0.031 / U:62.032 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site