Messages in this thread Patch in this message | | | From | Nikhil Rao <> | Subject | [PATCH v1 18/19] sched: update shares distribution to use u64 | Date | Sun, 1 May 2011 18:19:16 -0700 |
| |
Update the shares distribution code to use u64. We still maintain tg->shares as an unsigned long since sched entity weights can't exceed MAX_SHARES (2^28). This patch updates all the calculations required to estimate shares to use u64.
Signed-off-by: Nikhil Rao <ncrao@google.com> --- kernel/sched.c | 2 +- kernel/sched_debug.c | 6 +++--- kernel/sched_fair.c | 17 ++++++++++++----- 3 files changed, 16 insertions(+), 9 deletions(-)
diff --git a/kernel/sched.c b/kernel/sched.c index 6b9b02a..e131225 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -367,7 +367,7 @@ struct cfs_rq { u64 load_period; u64 load_stamp, load_last, load_unacc_exec_time; - unsigned long load_contribution; + u64 load_contribution; #endif #endif }; diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index d22b666..b809651 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c @@ -204,11 +204,11 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq) SEQ_printf(m, " .%-30s: %lld\n", "load", cfs_rq->load.weight); #ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_SMP - SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_avg", + SEQ_printf(m, " .%-30s: %lld.%06ld\n", "load_avg", SPLIT_NS(cfs_rq->load_avg)); - SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "load_period", + SEQ_printf(m, " .%-30s: %lld.%06ld\n", "load_period", SPLIT_NS(cfs_rq->load_period)); - SEQ_printf(m, " .%-30s: %ld\n", "load_contrib", + SEQ_printf(m, " .%-30s: %lld\n", "load_contrib", cfs_rq->load_contribution); SEQ_printf(m, " .%-30s: %d\n", "load_tg", atomic_read(&cfs_rq->tg->load_weight)); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index bf9bbaa..3f56410 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -708,12 +708,13 @@ static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, int global_update) { struct task_group *tg = cfs_rq->tg; - long load_avg; + s64 load_avg; load_avg = div64_u64(cfs_rq->load_avg, cfs_rq->load_period+1); load_avg -= cfs_rq->load_contribution; if (global_update || abs(load_avg) > cfs_rq->load_contribution / 8) { + /* TODO: fix atomics for 64-bit additions */ atomic_add(load_avg, &tg->load_weight); cfs_rq->load_contribution += load_avg; } @@ -723,7 +724,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) { u64 period = sysctl_sched_shares_window; u64 now, delta; - unsigned long load = cfs_rq->load.weight; + u64 load = cfs_rq->load.weight; if (cfs_rq->tg == &root_task_group) return; @@ -743,8 +744,13 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) cfs_rq->load_unacc_exec_time = 0; cfs_rq->load_period += delta; if (load) { + u64 tmp = cfs_rq->load_avg; cfs_rq->load_last = now; cfs_rq->load_avg += delta * load; + + /* Detect overflow and set load_avg to max */ + if (unlikely(cfs_rq->load_avg < tmp)) + cfs_rq->load_avg = ~0ULL; } /* consider updating load contribution on each fold or truncate */ @@ -769,24 +775,25 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) static long calc_cfs_shares(struct cfs_rq *cfs_rq, struct task_group *tg) { - long load_weight, load, shares; + s64 load_weight, load, shares; load = cfs_rq->load.weight; + /* TODO: fixup atomics to handle u64 in 32-bit */ load_weight = atomic_read(&tg->load_weight); load_weight += load; load_weight -= cfs_rq->load_contribution; shares = (tg->shares * load); if (load_weight) - shares /= load_weight; + shares = div64_u64(shares, load_weight); if (shares < MIN_SHARES) shares = MIN_SHARES; if (shares > tg->shares) shares = tg->shares; - return shares; + return (long)shares; } static void update_entity_shares_tick(struct cfs_rq *cfs_rq) -- 1.7.3.1
| |