Messages in this thread Patch in this message | | | From | Morten Rasmussen <> | Subject | [PATCHv4 01/12] sched: Add static_key for asymmetric cpu capacity optimizations | Date | Wed, 4 Jul 2018 11:17:39 +0100 |
| |
The existing asymmetric cpu capacity code should cause minimal overhead for others. Putting it behind a static_key, it has been done for SMT optimizations, would make it easier to extend and improve without causing harm to others moving forward.
cc: Ingo Molnar <mingo@redhat.com> cc: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Morten Rasmussen <morten.rasmussen@arm.com> --- kernel/sched/fair.c | 3 +++ kernel/sched/sched.h | 1 + kernel/sched/topology.c | 19 +++++++++++++++++++ 3 files changed, 23 insertions(+)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 321cd5dcf2e8..85fb7e8ff5c8 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -6583,6 +6583,9 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu) { long min_cap, max_cap; + if (!static_branch_unlikely(&sched_asym_cpucapacity)) + return 0; + min_cap = min(capacity_orig_of(prev_cpu), capacity_orig_of(cpu)); max_cap = cpu_rq(cpu)->rd->max_cpu_capacity; diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index c7742dcc136c..35ce218f0157 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h @@ -1160,6 +1160,7 @@ DECLARE_PER_CPU(int, sd_llc_id); DECLARE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); DECLARE_PER_CPU(struct sched_domain *, sd_numa); DECLARE_PER_CPU(struct sched_domain *, sd_asym); +extern struct static_key_false sched_asym_cpucapacity; struct sched_group_capacity { atomic_t ref; diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 05a831427bc7..0cfdeff669fe 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -398,6 +398,7 @@ DEFINE_PER_CPU(int, sd_llc_id); DEFINE_PER_CPU(struct sched_domain_shared *, sd_llc_shared); DEFINE_PER_CPU(struct sched_domain *, sd_numa); DEFINE_PER_CPU(struct sched_domain *, sd_asym); +DEFINE_STATIC_KEY_FALSE(sched_asym_cpucapacity); static void update_top_cache_domain(int cpu) { @@ -425,6 +426,21 @@ static void update_top_cache_domain(int cpu) rcu_assign_pointer(per_cpu(sd_asym, cpu), sd); } +static void update_asym_cpucapacity(int cpu) +{ + int enable = false; + + rcu_read_lock(); + if (lowest_flag_domain(cpu, SD_ASYM_CPUCAPACITY)) + enable = true; + rcu_read_unlock(); + + if (enable) { + /* This expects to be hotplug-safe */ + static_branch_enable_cpuslocked(&sched_asym_cpucapacity); + } +} + /* * Attach the domain 'sd' to 'cpu' as its base domain. Callers must * hold the hotplug lock. @@ -1707,6 +1723,9 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att } rcu_read_unlock(); + if (!cpumask_empty(cpu_map)) + update_asym_cpucapacity(cpumask_first(cpu_map)); + if (rq && sched_debug_enabled) { pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n", cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity); -- 2.7.4
| |