lkml.org 
[lkml]   [2011]   [May]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH v1 19/19] sched: convert atomic ops in shares update to use atomic64_t ops
    Date
    Convert uses of atomic_t to atomic64_t in shares update calculations. Total
    task weight in a tg can overflow the atomic type on 32-bit systems.

    Signed-off-by: Nikhil Rao <ncrao@google.com>
    ---
    kernel/sched.c | 2 +-
    kernel/sched_debug.c | 4 ++--
    kernel/sched_fair.c | 8 +++-----
    3 files changed, 6 insertions(+), 8 deletions(-)

    diff --git a/kernel/sched.c b/kernel/sched.c
    index e131225..af26b3e 100644
    --- a/kernel/sched.c
    +++ b/kernel/sched.c
    @@ -255,7 +255,7 @@ struct task_group {
    struct cfs_rq **cfs_rq;
    unsigned long shares;

    - atomic_t load_weight;
    + atomic64_t load_weight;
    #endif

    #ifdef CONFIG_RT_GROUP_SCHED
    diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
    index b809651..2d0fff9 100644
    --- a/kernel/sched_debug.c
    +++ b/kernel/sched_debug.c
    @@ -210,8 +210,8 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
    SPLIT_NS(cfs_rq->load_period));
    SEQ_printf(m, " .%-30s: %lld\n", "load_contrib",
    cfs_rq->load_contribution);
    - SEQ_printf(m, " .%-30s: %d\n", "load_tg",
    - atomic_read(&cfs_rq->tg->load_weight));
    + SEQ_printf(m, " .%-30s: %ld\n", "load_tg",
    + atomic64_read(&cfs_rq->tg->load_weight));
    #endif

    print_cfs_group_stats(m, cpu, cfs_rq->tg);
    diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
    index 3f56410..0152410 100644
    --- a/kernel/sched_fair.c
    +++ b/kernel/sched_fair.c
    @@ -714,8 +714,7 @@ static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq,
    load_avg -= cfs_rq->load_contribution;

    if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) {
    - /* TODO: fix atomics for 64-bit additions */
    - atomic_add(load_avg, &tg->load_weight);
    + atomic64_add(load_avg, &tg->load_weight);
    cfs_rq->load_contribution += load_avg;
    }
    }
    @@ -779,8 +778,7 @@ static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg)

    load = cfs_rq->load.weight;

    - /* TODO: fixup atomics to handle u64 in 32-bit */
    - load_weight = atomic_read(&tg->load_weight);
    + load_weight = atomic64_read(&tg->load_weight);
    load_weight += load;
    load_weight -= cfs_rq->load_contribution;

    @@ -1409,7 +1407,7 @@ static s64 effective_load(struct task_group *tg, int cpu, s64 wl, s64 wg)
    w = se->my_q->load.weight;

    /* use this cpu's instantaneous contribution */
    - lw = atomic_read(&tg->load_weight);
    + lw = atomic64_read(&tg->load_weight);
    lw -= se->my_q->load_contribution;
    lw += w + wg;

    --
    1.7.3.1


    \
     
     \ /
      Last update: 2011-05-02 03:25    [W:0.024 / U:59.440 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site