lkml.org 
[lkml]   [2020]   [Nov]   [17]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH -tip 19/32] entry/idle: Enter and exit kernel protection during idle entry and exit
Date
Add a generic_idle_{enter,exit} helper function to enter and exit kernel
protection when entering and exiting idle, respectively.

While at it, remove a stale RCU comment.

Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
Tested-by: Julien Desfossez <jdesfossez@digitalocean.com>
Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
---
include/linux/entry-common.h | 18 ++++++++++++++++++
kernel/sched/idle.c | 11 ++++++-----
2 files changed, 24 insertions(+), 5 deletions(-)

diff --git a/include/linux/entry-common.h b/include/linux/entry-common.h
index 022e1f114157..8f34ae625f83 100644
--- a/include/linux/entry-common.h
+++ b/include/linux/entry-common.h
@@ -454,4 +454,22 @@ static inline bool entry_kernel_protected(void)
return IS_ENABLED(CONFIG_SCHED_CORE) && sched_core_kernel_protected()
&& _TIF_UNSAFE_RET != 0;
}
+
+/**
+ * generic_idle_enter - General tasks to perform during idle entry.
+ */
+static inline void generic_idle_enter(void)
+{
+ /* Entering idle ends the protected kernel region. */
+ sched_core_unsafe_exit();
+}
+
+/**
+ * generic_idle_exit - General tasks to perform during idle exit.
+ */
+static inline void generic_idle_exit(void)
+{
+ /* Exiting idle (re)starts the protected kernel region. */
+ sched_core_unsafe_enter();
+}
#endif
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 8bdb214eb78f..ee4f91396c31 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -8,6 +8,7 @@
*/
#include "sched.h"

+#include <linux/entry-common.h>
#include <trace/events/power.h>

/* Linker adds these: start and end of __cpuidle functions */
@@ -54,6 +55,7 @@ __setup("hlt", cpu_idle_nopoll_setup);

static noinline int __cpuidle cpu_idle_poll(void)
{
+ generic_idle_enter();
trace_cpu_idle(0, smp_processor_id());
stop_critical_timings();
rcu_idle_enter();
@@ -66,6 +68,7 @@ static noinline int __cpuidle cpu_idle_poll(void)
rcu_idle_exit();
start_critical_timings();
trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+ generic_idle_exit();

return 1;
}
@@ -156,11 +159,7 @@ static void cpuidle_idle_call(void)
return;
}

- /*
- * The RCU framework needs to be told that we are entering an idle
- * section, so no more rcu read side critical sections and one more
- * step to the grace period
- */
+ generic_idle_enter();

if (cpuidle_not_available(drv, dev)) {
tick_nohz_idle_stop_tick();
@@ -225,6 +224,8 @@ static void cpuidle_idle_call(void)
*/
if (WARN_ON_ONCE(irqs_disabled()))
local_irq_enable();
+
+ generic_idle_exit();
}

/*
--
2.29.2.299.gdc1121823c-goog
\
 
 \ /
  Last update: 2020-11-18 00:40    [W:0.681 / U:0.228 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site