lkml.org 
[lkml]   [2010]   [Nov]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip:sched/core] sched: Update tg->shares after cpu.shares write
    Commit-ID:  9437178f623a19af5951808d880a8599f66ac150
    Gitweb: http://git.kernel.org/tip/9437178f623a19af5951808d880a8599f66ac150
    Author: Paul Turner <pjt@google.com>
    AuthorDate: Mon, 15 Nov 2010 15:47:10 -0800
    Committer: Ingo Molnar <mingo@elte.hu>
    CommitDate: Thu, 18 Nov 2010 13:27:50 +0100

    sched: Update tg->shares after cpu.shares write

    Formerly sched_group_set_shares would force a rebalance by overflowing domain
    share sums. Now that per-cpu averages are maintained we can set the true value
    by issuing an update_cfs_shares() following a tg->shares update.

    Also initialize tg se->load to 0 for consistency since we'll now set correct
    weights on enqueue.

    Signed-off-by: Paul Turner <pjt@google.com?>
    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    LKML-Reference: <20101115234938.465521344@google.com>
    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    ---
    kernel/sched.c | 42 +++++++++++-------------------------------
    1 files changed, 11 insertions(+), 31 deletions(-)

    diff --git a/kernel/sched.c b/kernel/sched.c
    index e914a71..550cf3a 100644
    --- a/kernel/sched.c
    +++ b/kernel/sched.c
    @@ -7646,7 +7646,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
    se->cfs_rq = parent->my_q;

    se->my_q = cfs_rq;
    - update_load_set(&se->load, tg->shares);
    + update_load_set(&se->load, 0);
    se->parent = parent;
    }
    #endif
    @@ -8274,37 +8274,12 @@ void sched_move_task(struct task_struct *tsk)
    #endif /* CONFIG_CGROUP_SCHED */

    #ifdef CONFIG_FAIR_GROUP_SCHED
    -static void __set_se_shares(struct sched_entity *se, unsigned long shares)
    -{
    - struct cfs_rq *cfs_rq = se->cfs_rq;
    - int on_rq;
    -
    - on_rq = se->on_rq;
    - if (on_rq)
    - dequeue_entity(cfs_rq, se, 0);
    -
    - update_load_set(&se->load, shares);
    -
    - if (on_rq)
    - enqueue_entity(cfs_rq, se, 0);
    -}
    -
    -static void set_se_shares(struct sched_entity *se, unsigned long shares)
    -{
    - struct cfs_rq *cfs_rq = se->cfs_rq;
    - struct rq *rq = cfs_rq->rq;
    - unsigned long flags;
    -
    - raw_spin_lock_irqsave(&rq->lock, flags);
    - __set_se_shares(se, shares);
    - raw_spin_unlock_irqrestore(&rq->lock, flags);
    -}
    -
    static DEFINE_MUTEX(shares_mutex);

    int sched_group_set_shares(struct task_group *tg, unsigned long shares)
    {
    int i;
    + unsigned long flags;

    /*
    * We can't change the weight of the root cgroup.
    @@ -8323,10 +8298,15 @@ int sched_group_set_shares(struct task_group *tg, unsigned long shares)

    tg->shares = shares;
    for_each_possible_cpu(i) {
    - /*
    - * force a rebalance
    - */
    - set_se_shares(tg->se[i], shares);
    + struct rq *rq = cpu_rq(i);
    + struct sched_entity *se;
    +
    + se = tg->se[i];
    + /* Propagate contribution to hierarchy */
    + raw_spin_lock_irqsave(&rq->lock, flags);
    + for_each_sched_entity(se)
    + update_cfs_shares(group_cfs_rq(se), 0);
    + raw_spin_unlock_irqrestore(&rq->lock, flags);
    }

    done:

    \
     
     \ /
      Last update: 2010-11-18 15:17    [W:3.116 / U:0.188 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site