lkml.org 
[lkml]   [2017]   [Jul]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 4.9 61/72] Revert "sched/core: Optimize SCHED_SMT"
Date
4.9-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

This reverts commit 1b568f0aabf280555125bc7cefc08321ff0ebaba.

For the 4.9 kernel tree, this patch causes scheduler regressions. It is
fixed in newer kernels with a large number of individual patches, the
sum of which is too big for the stable kernel tree.

Ingo recommended just reverting the single patch for this tree, as it's
much simpler.

Reported-by: Ben Guthro <ben@guthro.net>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
kernel/sched/core.c | 19 -------------------
kernel/sched/fair.c | 8 +-------
kernel/sched/sched.h | 23 ++++++-----------------
3 files changed, 7 insertions(+), 43 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7422,22 +7422,6 @@ int sched_cpu_dying(unsigned int cpu)
}
#endif

-#ifdef CONFIG_SCHED_SMT
-DEFINE_STATIC_KEY_FALSE(sched_smt_present);
-
-static void sched_init_smt(void)
-{
- /*
- * We've enumerated all CPUs and will assume that if any CPU
- * has SMT siblings, CPU0 will too.
- */
- if (cpumask_weight(cpu_smt_mask(0)) > 1)
- static_branch_enable(&sched_smt_present);
-}
-#else
-static inline void sched_init_smt(void) { }
-#endif
-
void __init sched_init_smp(void)
{
cpumask_var_t non_isolated_cpus;
@@ -7467,9 +7451,6 @@ void __init sched_init_smp(void)

init_sched_rt_class();
init_sched_dl_class();
-
- sched_init_smt();
-
sched_smp_initialized = true;
}

--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5376,7 +5376,7 @@ static inline bool test_idle_cores(int c
* Since SMT siblings share all cache levels, inspecting this limited remote
* state should be fairly cheap.
*/
-void __update_idle_core(struct rq *rq)
+void update_idle_core(struct rq *rq)
{
int core = cpu_of(rq);
int cpu;
@@ -5408,9 +5408,6 @@ static int select_idle_core(struct task_
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
int core, cpu, wrap;

- if (!static_branch_likely(&sched_smt_present))
- return -1;
-
if (!test_idle_cores(target, false))
return -1;

@@ -5444,9 +5441,6 @@ static int select_idle_smt(struct task_s
{
int cpu;

- if (!static_branch_likely(&sched_smt_present))
- return -1;
-
for_each_cpu(cpu, cpu_smt_mask(target)) {
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
continue;
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -43,6 +43,12 @@ extern void cpu_load_update_active(struc
static inline void cpu_load_update_active(struct rq *this_rq) { }
#endif

+#ifdef CONFIG_SCHED_SMT
+extern void update_idle_core(struct rq *rq);
+#else
+static inline void update_idle_core(struct rq *rq) { }
+#endif
+
/*
* Helpers for converting nanosecond timing to jiffy resolution
*/
@@ -731,23 +737,6 @@ static inline int cpu_of(struct rq *rq)
#endif
}

-
-#ifdef CONFIG_SCHED_SMT
-
-extern struct static_key_false sched_smt_present;
-
-extern void __update_idle_core(struct rq *rq);
-
-static inline void update_idle_core(struct rq *rq)
-{
- if (static_branch_unlikely(&sched_smt_present))
- __update_idle_core(rq);
-}
-
-#else
-static inline void update_idle_core(struct rq *rq) { }
-#endif
-
DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);

#define cpu_rq(cpu) (&per_cpu(runqueues, (cpu)))

\
 
 \ /
  Last update: 2017-07-19 12:32    [W:0.163 / U:0.308 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site