lkml.org 
[lkml]   [2015]   [May]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 4/7] preempt: Disable preemption from preempt_schedule*() callers
Date
Lets gather the preempt operations (set PREEMPT_ACTIVE and disable
preemption) in a single operation. This way we prepare to remove the
preemption disablement in __schedule() in order to omptimize this
duty on the caller.

Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
---
include/linux/preempt.h | 12 ++++++++++++
kernel/sched/core.c | 20 ++++++--------------
2 files changed, 18 insertions(+), 14 deletions(-)

diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 4689ef2..45da394 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -137,6 +137,18 @@ extern void preempt_count_sub(int val);
#define preempt_count_inc() preempt_count_add(1)
#define preempt_count_dec() preempt_count_sub(1)

+#define preempt_active_enter() \
+do { \
+ preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
+ barrier(); \
+} while (0)
+
+#define preempt_active_exit() \
+do { \
+ barrier(); \
+ preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
+} while (0)
+
#ifdef CONFIG_PREEMPT_COUNT

#define preempt_disable() \
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8027cfd..182127a 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2859,15 +2859,14 @@ void __sched schedule_preempt_disabled(void)
static void __sched notrace preempt_schedule_common(void)
{
do {
- __preempt_count_add(PREEMPT_ACTIVE);
+ preempt_active_enter();
__schedule();
- __preempt_count_sub(PREEMPT_ACTIVE);
+ preempt_active_exit();

/*
* Check again in case we missed a preemption opportunity
* between schedule and now.
*/
- barrier();
} while (need_resched());
}

@@ -2914,7 +2913,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
return;

do {
- __preempt_count_add(PREEMPT_ACTIVE);
+ preempt_active_enter();
/*
* Needs preempt disabled in case user_exit() is traced
* and the tracer calls preempt_enable_notrace() causing
@@ -2924,8 +2923,7 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
__schedule();
exception_exit(prev_ctx);

- __preempt_count_sub(PREEMPT_ACTIVE);
- barrier();
+ preempt_active_exit();
} while (need_resched());
}
EXPORT_SYMBOL_GPL(preempt_schedule_context);
@@ -2949,17 +2947,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
prev_state = exception_enter();

do {
- __preempt_count_add(PREEMPT_ACTIVE);
+ preempt_active_enter();
local_irq_enable();
__schedule();
local_irq_disable();
- __preempt_count_sub(PREEMPT_ACTIVE);
-
- /*
- * Check again in case we missed a preemption opportunity
- * between schedule and now.
- */
- barrier();
+ preempt_active_exit();
} while (need_resched());

exception_exit(prev_state);
--
2.1.4


\
 
 \ /
  Last update: 2015-05-11 17:41    [W:0.051 / U:4.132 seconds]
©2003-2018 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site