lkml.org 
[lkml]   [2013]   [Jul]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC][PATCH 2/9] sched: Redirect update_cpu_power to sched/power.c
    Date
    With CONFIG_SCHED_POWER enabled, update_cpu_power() gets the capacity
    managed cpu_power from the power scheduler instead of
    arch_scale_freq_power().

    Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com>
    CC: Ingo Molnar <mingo@kernel.org>
    CC: Peter Zijlstra <peterz@infradead.org>
    CC: Catalin Marinas <catalin.marinas@arm.com>
    ---
    kernel/sched/fair.c | 19 ++++++++++---------
    kernel/sched/sched.h | 24 ++++++++++++++++++++++++
    2 files changed, 34 insertions(+), 9 deletions(-)

    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index c61a614..01f1f26 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -3214,6 +3214,10 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
    tsk_cpus_allowed(p)))
    continue;

    + /* Group restricted by power scheduler (cpu_power=1) */
    + if (!power_group_balance(group))
    + continue;
    +
    local_group = cpumask_test_cpu(this_cpu,
    sched_group_cpus(group));

    @@ -3258,6 +3262,11 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu)

    /* Traverse only the allowed CPUs */
    for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) {
    +
    + /* Skip cpus disabled by power scheduler */
    + if (!power_cpu_balance(i))
    + continue;
    +
    load = weighted_cpuload(i);

    if (load < min_load || (load == min_load && i == this_cpu)) {
    @@ -4265,11 +4274,6 @@ static inline int get_sd_load_idx(struct sched_domain *sd,
    return load_idx;
    }

    -static unsigned long default_scale_freq_power(struct sched_domain *sd, int cpu)
    -{
    - return SCHED_POWER_SCALE;
    -}
    -
    unsigned long __weak arch_scale_freq_power(struct sched_domain *sd, int cpu)
    {
    return default_scale_freq_power(sd, cpu);
    @@ -4336,10 +4340,7 @@ static void update_cpu_power(struct sched_domain *sd, int cpu)

    sdg->sgp->power_orig = power;

    - if (sched_feat(ARCH_POWER))
    - power *= arch_scale_freq_power(sd, cpu);
    - else
    - power *= default_scale_freq_power(sd, cpu);
    + power *= power_sched_cpu_power(sd, cpu);

    power >>= SCHED_POWER_SHIFT;

    diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
    index ce39224..2e62faa 100644
    --- a/kernel/sched/sched.h
    +++ b/kernel/sched/sched.h
    @@ -1377,3 +1377,27 @@ static inline u64 irq_time_read(int cpu)
    }
    #endif /* CONFIG_64BIT */
    #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
    +
    +static inline unsigned long default_scale_freq_power(struct sched_domain *sd,
    + int cpu)
    +{
    + return SCHED_POWER_SCALE;
    +}
    +
    +extern unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu);
    +
    +#ifdef CONFIG_SCHED_POWER
    +extern unsigned long power_sched_cpu_power(struct sched_domain *sd, int cpu);
    +#define power_cpu_balance(cpu) (cpu_rq(cpu)->cpu_power > 1)
    +#define power_group_balance(group) (group->sgp->power > group->group_weight)
    +#else
    +static inline unsigned long power_sched_cpu_power(struct sched_domain *sd,
    + int cpu)
    +{
    + if (sched_feat(ARCH_POWER))
    + return arch_scale_freq_power(sd, cpu);
    + return default_scale_freq_power(sd, cpu);
    +}
    +#define power_cpu_balance(cpu) 1
    +#define power_group_balance(group) 1
    +#endif
    --
    1.7.9.5



    \
     
     \ /
      Last update: 2013-07-10 23:39    [W:3.974 / U:0.192 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site