lkml.org 
[lkml]   [2013]   [May]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
From
Date
SubjectRe: [PATCH v5 1/7] Revert "sched: Introduce temporary FAIR_GROUP_SCHED dependency for load-tracking"
On Sun, May 5, 2013 at 6:45 PM, Alex Shi <alex.shi@intel.com> wrote:
> Remove CONFIG_FAIR_GROUP_SCHED that covers the runnable info, then
> we can use runnable load variables.
>
> Signed-off-by: Alex Shi <alex.shi@intel.com>
> ---
> include/linux/sched.h | 7 +------
> kernel/sched/core.c | 7 +------
> kernel/sched/fair.c | 13 ++-----------
> kernel/sched/sched.h | 9 +--------
> 4 files changed, 5 insertions(+), 31 deletions(-)
>
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index e692a02..9539597 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -1161,12 +1161,7 @@ struct sched_entity {
> struct cfs_rq *my_q;
> #endif
>
> -/*
> - * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
> - * removed when useful for applications beyond shares distribution (e.g.
> - * load-balance).
> - */
> -#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
> +#ifdef CONFIG_SMP
> /* Per-entity load-tracking */
> struct sched_avg avg;
> #endif
> diff --git a/kernel/sched/core.c b/kernel/sched/core.c
> index 67d0465..c8db984 100644
> --- a/kernel/sched/core.c
> +++ b/kernel/sched/core.c
> @@ -1563,12 +1563,7 @@ static void __sched_fork(struct task_struct *p)
> p->se.vruntime = 0;
> INIT_LIST_HEAD(&p->se.group_node);
>
> -/*
> - * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
> - * removed when useful for applications beyond shares distribution (e.g.
> - * load-balance).
> - */
> -#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
> +#ifdef CONFIG_SMP
> p->se.avg.runnable_avg_period = 0;
> p->se.avg.runnable_avg_sum = 0;
> #endif
> diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
> index 7a33e59..9c2f726 100644
> --- a/kernel/sched/fair.c
> +++ b/kernel/sched/fair.c
> @@ -1109,8 +1109,7 @@ static inline void update_cfs_shares(struct cfs_rq *cfs_rq)
> }
> #endif /* CONFIG_FAIR_GROUP_SCHED */
>
> -/* Only depends on SMP, FAIR_GROUP_SCHED may be removed when useful in lb */
> -#if defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)
> +#ifdef CONFIG_SMP
> /*
> * We choose a half-life close to 1 scheduling period.
> * Note: The tables below are dependent on this value.
> @@ -3394,12 +3393,6 @@ unlock:
> }
>
> /*
> - * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
> - * removed when useful for applications beyond shares distribution (e.g.
> - * load-balance).
> - */
> -#ifdef CONFIG_FAIR_GROUP_SCHED
> -/*
> * Called immediately before a task is migrated to a new cpu; task_cpu(p) and
> * cfs_rq_of(p) references at time of call are still valid and identify the
> * previous cpu. However, the caller only guarantees p->pi_lock is held; no
> @@ -3422,7 +3415,6 @@ migrate_task_rq_fair(struct task_struct *p, int next_cpu)
> atomic64_add(se->avg.load_avg_contrib, &cfs_rq->removed_load);
> }
> }
> -#endif
> #endif /* CONFIG_SMP */
>
> static unsigned long
> @@ -6114,9 +6106,8 @@ const struct sched_class fair_sched_class = {
>
> #ifdef CONFIG_SMP
> .select_task_rq = select_task_rq_fair,
> -#ifdef CONFIG_FAIR_GROUP_SCHED
> .migrate_task_rq = migrate_task_rq_fair,
> -#endif
> +
> .rq_online = rq_online_fair,
> .rq_offline = rq_offline_fair,
>
> diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
> index cc03cfd..7f36024f 100644
> --- a/kernel/sched/sched.h
> +++ b/kernel/sched/sched.h
> @@ -227,12 +227,6 @@ struct cfs_rq {
> #endif
>
> #ifdef CONFIG_SMP
> -/*
> - * Load-tracking only depends on SMP, FAIR_GROUP_SCHED dependency below may be
> - * removed when useful for applications beyond shares distribution (e.g.
> - * load-balance).
> - */
> -#ifdef CONFIG_FAIR_GROUP_SCHED
> /*
> * CFS Load tracking
> * Under CFS, load is tracked on a per-entity basis and aggregated up.
> @@ -242,8 +236,7 @@ struct cfs_rq {
> u64 runnable_load_avg, blocked_load_avg;
> atomic64_t decay_counter, removed_load;
> u64 last_decay;
> -#endif /* CONFIG_FAIR_GROUP_SCHED */
> -/* These always depend on CONFIG_FAIR_GROUP_SCHED */

We should perhaps replace this with a comment that these are only
needed to aggregate the point-wise representation in the
FAIR_GROUP_SCHED case.

> +
> #ifdef CONFIG_FAIR_GROUP_SCHED
> u32 tg_runnable_contrib;
> u64 tg_load_contrib;
> --
> 1.7.12
>


\
 
 \ /
  Last update: 2013-05-06 11:01    [W:0.217 / U:0.496 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site