lkml.org 
[lkml]   [2010]   [Nov]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:sched/core] sched: Make tg_shares_up() walk on-demand
    Commit-ID:  9e3081ca61147b29f52fddb4f7c6b6b82ea5eb7a
    Gitweb: http://git.kernel.org/tip/9e3081ca61147b29f52fddb4f7c6b6b82ea5eb7a
    Author: Peter Zijlstra <a.p.zijlstra@chello.nl>
    AuthorDate: Mon, 15 Nov 2010 15:47:02 -0800
    Committer: Ingo Molnar <mingo@elte.hu>
    CommitDate: Thu, 18 Nov 2010 13:27:47 +0100

    sched: Make tg_shares_up() walk on-demand

    Make tg_shares_up() use the active cgroup list, this means we cannot
    do a strict bottom-up walk of the hierarchy, but assuming its a very
    wide tree with a small number of active groups it should be a win.

    Signed-off-by: Paul Turner <pjt@google.com>
    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    LKML-Reference: <20101115234937.754159484@google.com>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    ---
    kernel/sched.c | 67 ---------------------------------------------------
    kernel/sched_fair.c | 58 ++++++++++++++++++++++++++++++++++++++++++++
    2 files changed, 58 insertions(+), 67 deletions(-)

    diff --git a/kernel/sched.c b/kernel/sched.c
    index 22436dd..6268d2d 100644
    --- a/kernel/sched.c
    +++ b/kernel/sched.c
    @@ -279,13 +279,6 @@ static DEFINE_SPINLOCK(task_group_lock);

    #ifdef CONFIG_FAIR_GROUP_SCHED

    -#ifdef CONFIG_SMP
    -static int root_task_group_empty(void)
    -{
    - return list_empty(&root_task_group.children);
    -}
    -#endif
    -
    # define INIT_TASK_GROUP_LOAD NICE_0_LOAD

    /*
    @@ -1546,48 +1539,6 @@ static unsigned long cpu_avg_load_per_task(int cpu)

    #ifdef CONFIG_FAIR_GROUP_SCHED

    -static void update_cfs_load(struct cfs_rq *cfs_rq, int lb);
    -static void update_cfs_shares(struct cfs_rq *cfs_rq);
    -
    -/*
    - * update tg->load_weight by folding this cpu's load_avg
    - */
    -static int tg_shares_up(struct task_group *tg, void *data)
    -{
    - long load_avg;
    - struct cfs_rq *cfs_rq;
    - unsigned long flags;
    - int cpu = (long)data;
    - struct rq *rq;
    -
    - if (!tg->se[cpu])
    - return 0;
    -
    - rq = cpu_rq(cpu);
    - cfs_rq = tg->cfs_rq[cpu];
    -
    - raw_spin_lock_irqsave(&rq->lock, flags);
    -
    - update_rq_clock(rq);
    - update_cfs_load(cfs_rq, 1);
    -
    - load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
    - load_avg -= cfs_rq->load_contribution;
    -
    - atomic_add(load_avg, &tg->load_weight);
    - cfs_rq->load_contribution += load_avg;
    -
    - /*
    - * We need to update shares after updating tg->load_weight in
    - * order to adjust the weight of groups with long running tasks.
    - */
    - update_cfs_shares(cfs_rq);
    -
    - raw_spin_unlock_irqrestore(&rq->lock, flags);
    -
    - return 0;
    -}
    -
    /*
    * Compute the cpu's hierarchical load factor for each task group.
    * This needs to be done in a top-down fashion because the load of a child
    @@ -1611,29 +1562,11 @@ static int tg_load_down(struct task_group *tg, void *data)
    return 0;
    }

    -static void update_shares(long cpu)
    -{
    - if (root_task_group_empty())
    - return;
    -
    - /*
    - * XXX: replace with an on-demand list
    - */
    -
    - walk_tg_tree(tg_nop, tg_shares_up, (void *)cpu);
    -}
    -
    static void update_h_load(long cpu)
    {
    walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
    }

    -#else
    -
    -static inline void update_shares(int cpu)
    -{
    -}
    -
    #endif

    #ifdef CONFIG_PREEMPT
    diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
    index 0560e72..46ff658 100644
    --- a/kernel/sched_fair.c
    +++ b/kernel/sched_fair.c
    @@ -2004,6 +2004,60 @@ out:
    }

    #ifdef CONFIG_FAIR_GROUP_SCHED
    +/*
    + * update tg->load_weight by folding this cpu's load_avg
    + */
    +static int tg_shares_up(struct task_group *tg, int cpu)
    +{
    + struct cfs_rq *cfs_rq;
    + unsigned long flags;
    + struct rq *rq;
    + long load_avg;
    +
    + if (!tg->se[cpu])
    + return 0;
    +
    + rq = cpu_rq(cpu);
    + cfs_rq = tg->cfs_rq[cpu];
    +
    + raw_spin_lock_irqsave(&rq->lock, flags);
    +
    + update_rq_clock(rq);
    + update_cfs_load(cfs_rq, 1);
    +
    + load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1);
    + load_avg -= cfs_rq->load_contribution;
    + atomic_add(load_avg, &tg->load_weight);
    + cfs_rq->load_contribution += load_avg;
    +
    + /*
    + * We need to update shares after updating tg->load_weight in
    + * order to adjust the weight of groups with long running tasks.
    + */
    + update_cfs_shares(cfs_rq);
    +
    + raw_spin_unlock_irqrestore(&rq->lock, flags);
    +
    + return 0;
    +}
    +
    +static void update_shares(int cpu)
    +{
    + struct cfs_rq *cfs_rq;
    + struct rq *rq = cpu_rq(cpu);
    +
    + rcu_read_lock();
    + for_each_leaf_cfs_rq(rq, cfs_rq) {
    + struct task_group *tg = cfs_rq->tg;
    +
    + do {
    + tg_shares_up(tg, cpu);
    + tg = tg->parent;
    + } while (tg);
    + }
    + rcu_read_unlock();
    +}
    +
    static unsigned long
    load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
    unsigned long max_load_move,
    @@ -2051,6 +2105,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
    return max_load_move - rem_load_move;
    }
    #else
    +static inline void update_shares(int cpu)
    +{
    +}
    +
    static unsigned long
    load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
    unsigned long max_load_move,

    \
     
     \ /
      Last update: 2010-11-18 15:13    [W:0.031 / U:1.920 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site