lkml.org 
[lkml]   [2013]   [Aug]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 04/10] sched, fair: Shrink sg_lb_stats and play memset games
    We can shrink sg_lb_stats because rq::nr_running is an 'unsigned int'
    and cpu numbers are 'int'

    Before:
    sgs: /* size: 72, cachelines: 2, members: 10 */
    sds: /* size: 184, cachelines: 3, members: 7 */

    After:
    sgs: /* size: 56, cachelines: 1, members: 10 */
    sds: /* size: 152, cachelines: 3, members: 7 */

    Further we can avoid clearing all of sds since we do a total
    clear/assignment of sg_stats in update_sg_lb_stats() with exception of
    busiest_stat.avg_load which is referenced in update_sd_pick_busiest().

    Signed-off-by: Peter Zijlstra <peterz@infradead.org>
    ---
    kernel/sched/fair.c | 42 +++++++++++++++++++++++++++++++++++-------
    1 file changed, 35 insertions(+), 7 deletions(-)

    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -4282,12 +4282,12 @@ static unsigned long task_h_load(struct
    struct sg_lb_stats {
    unsigned long avg_load; /*Avg load across the CPUs of the group */
    unsigned long group_load; /* Total load over the CPUs of the group */
    - unsigned long sum_nr_running; /* Nr tasks running in the group */
    unsigned long sum_weighted_load; /* Weighted load of group's tasks */
    unsigned long load_per_task;
    - unsigned long group_capacity;
    - unsigned long idle_cpus;
    - unsigned long group_weight;
    + unsigned int sum_nr_running; /* Nr tasks running in the group */
    + unsigned int group_capacity;
    + unsigned int idle_cpus;
    + unsigned int group_weight;
    int group_imb; /* Is there an imbalance in the group ? */
    int group_has_capacity; /* Is there extra capacity in the group? */
    };
    @@ -4303,10 +4303,38 @@ struct sd_lb_stats {
    unsigned long total_pwr; /* Total power of all groups in sd */
    unsigned long avg_load; /* Average load across all groups in sd */

    - struct sg_lb_stats this_stat; /* Statistics of this group */
    struct sg_lb_stats busiest_stat;/* Statistics of the busiest group */
    + struct sg_lb_stats this_stat; /* Statistics of this group */
    };

    +static inline void init_sd_lb_stats(struct sd_lb_stats *sds)
    +{
    + /*
    + * struct sd_lb_stats {
    + * struct sched_group * busiest; // 0 8
    + * struct sched_group * this; // 8 8
    + * long unsigned int total_load; // 16 8
    + * long unsigned int total_pwr; // 24 8
    + * long unsigned int avg_load; // 32 8
    + * struct sg_lb_stats {
    + * long unsigned int avg_load; // 40 8
    + * long unsigned int group_load; // 48 8
    + * ...
    + * } busiest_stat; // 40 56
    + * struct sg_lb_stats this_stat; // 96 56
    + *
    + * // size: 152, cachelines: 3, members: 7
    + * // last cacheline: 24 bytes
    + * };
    + *
    + * Skimp on the clearing to avoid duplicate work. We can avoid clearing
    + * this_stat because update_sg_lb_stats() does a full clear/assignment.
    + * We must however clear busiest_stat::avg_load because
    + * update_sd_pick_busiest() reads this before assignment.
    + */
    + memset(sds, 0, offsetof(struct sd_lb_stats, busiest_stat.group_load));
    +}
    +
    /**
    * get_sd_load_idx - Obtain the load index for a given sched domain.
    * @sd: The sched_domain whose load_idx is to be obtained.
    @@ -4665,7 +4693,7 @@ static inline void update_sd_lb_stats(st
    */
    if (prefer_sibling && !local_group &&
    sds->this && sds->this_stat.group_has_capacity)
    - sgs->group_capacity = min(sgs->group_capacity, 1UL);
    + sgs->group_capacity = min(sgs->group_capacity, 1U);

    /* Now, start updating sd_lb_stats */
    sds->total_load += sgs->group_load;
    @@ -4896,7 +4924,7 @@ static struct sched_group *find_busiest_
    struct sg_lb_stats *this, *busiest;
    struct sd_lb_stats sds;

    - memset(&sds, 0, sizeof(sds));
    + init_sd_lb_stats(&sds);

    /*
    * Compute the various statistics relavent for load balancing at



    \
     
     \ /
      Last update: 2013-08-19 21:41    [W:4.400 / U:0.104 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site