lkml.org 
[lkml]   [2017]   [Dec]   [18]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 1/5] sched: Move tick code to a separate file
Date
Let's debloat some more core.c
Also we are going to expand the tick code even further to introduce
scheduler tick offloading.

Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: Luiz Capitulino <lcapitulino@redhat.com>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Wanpeng Li <kernellwp@gmail.com>
Cc: Ingo Molnar <mingo@kernel.org>
---
kernel/sched/Makefile | 2 +-
kernel/sched/core.c | 182 --------------------------------------------------
kernel/sched/sched.h | 7 +-
kernel/sched/tick.c | 177 ++++++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 184 insertions(+), 184 deletions(-)
create mode 100644 kernel/sched/tick.c

diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index e2f9d4f..dd0b01e 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -16,7 +16,7 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
endif

-obj-y += core.o loadavg.o clock.o cputime.o
+obj-y += core.o loadavg.o clock.o cputime.o tick.o
obj-y += idle_task.o fair.o rt.o deadline.o
obj-y += wait.o wait_bit.o swait.o completion.o idle.o
obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o topology.o stop_task.o
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 644fa2e..06af4fa 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -233,129 +233,6 @@ void update_rq_clock(struct rq *rq)
update_rq_clock_task(rq, delta);
}

-
-#ifdef CONFIG_SCHED_HRTICK
-/*
- * Use HR-timers to deliver accurate preemption points.
- */
-
-static void hrtick_clear(struct rq *rq)
-{
- if (hrtimer_active(&rq->hrtick_timer))
- hrtimer_cancel(&rq->hrtick_timer);
-}
-
-/*
- * High-resolution timer tick.
- * Runs from hardirq context with interrupts disabled.
- */
-static enum hrtimer_restart hrtick(struct hrtimer *timer)
-{
- struct rq *rq = container_of(timer, struct rq, hrtick_timer);
- struct rq_flags rf;
-
- WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
-
- rq_lock(rq, &rf);
- update_rq_clock(rq);
- rq->curr->sched_class->task_tick(rq, rq->curr, 1);
- rq_unlock(rq, &rf);
-
- return HRTIMER_NORESTART;
-}
-
-#ifdef CONFIG_SMP
-
-static void __hrtick_restart(struct rq *rq)
-{
- struct hrtimer *timer = &rq->hrtick_timer;
-
- hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
-}
-
-/*
- * called from hardirq (IPI) context
- */
-static void __hrtick_start(void *arg)
-{
- struct rq *rq = arg;
- struct rq_flags rf;
-
- rq_lock(rq, &rf);
- __hrtick_restart(rq);
- rq->hrtick_csd_pending = 0;
- rq_unlock(rq, &rf);
-}
-
-/*
- * Called to set the hrtick timer state.
- *
- * called with rq->lock held and irqs disabled
- */
-void hrtick_start(struct rq *rq, u64 delay)
-{
- struct hrtimer *timer = &rq->hrtick_timer;
- ktime_t time;
- s64 delta;
-
- /*
- * Don't schedule slices shorter than 10000ns, that just
- * doesn't make sense and can cause timer DoS.
- */
- delta = max_t(s64, delay, 10000LL);
- time = ktime_add_ns(timer->base->get_time(), delta);
-
- hrtimer_set_expires(timer, time);
-
- if (rq == this_rq()) {
- __hrtick_restart(rq);
- } else if (!rq->hrtick_csd_pending) {
- smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
- rq->hrtick_csd_pending = 1;
- }
-}
-
-#else
-/*
- * Called to set the hrtick timer state.
- *
- * called with rq->lock held and irqs disabled
- */
-void hrtick_start(struct rq *rq, u64 delay)
-{
- /*
- * Don't schedule slices shorter than 10000ns, that just
- * doesn't make sense. Rely on vruntime for fairness.
- */
- delay = max_t(u64, delay, 10000LL);
- hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
- HRTIMER_MODE_REL_PINNED);
-}
-#endif /* CONFIG_SMP */
-
-static void init_rq_hrtick(struct rq *rq)
-{
-#ifdef CONFIG_SMP
- rq->hrtick_csd_pending = 0;
-
- rq->hrtick_csd.flags = 0;
- rq->hrtick_csd.func = __hrtick_start;
- rq->hrtick_csd.info = rq;
-#endif
-
- hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
- rq->hrtick_timer.function = hrtick;
-}
-#else /* CONFIG_SCHED_HRTICK */
-static inline void hrtick_clear(struct rq *rq)
-{
-}
-
-static inline void init_rq_hrtick(struct rq *rq)
-{
-}
-#endif /* CONFIG_SCHED_HRTICK */
-
/*
* cmpxchg based fetch_or, macro so it works for different integer types
*/
@@ -3005,65 +2882,6 @@ unsigned long long task_sched_runtime(struct task_struct *p)
return ns;
}

-/*
- * This function gets called by the timer code, with HZ frequency.
- * We call it with interrupts disabled.
- */
-void scheduler_tick(void)
-{
- int cpu = smp_processor_id();
- struct rq *rq = cpu_rq(cpu);
- struct task_struct *curr = rq->curr;
- struct rq_flags rf;
-
- sched_clock_tick();
-
- rq_lock(rq, &rf);
-
- update_rq_clock(rq);
- curr->sched_class->task_tick(rq, curr, 0);
- cpu_load_update_active(rq);
- calc_global_load_tick(rq);
-
- rq_unlock(rq, &rf);
-
- perf_event_task_tick();
-
-#ifdef CONFIG_SMP
- rq->idle_balance = idle_cpu(cpu);
- trigger_load_balance(rq);
-#endif
- rq_last_tick_reset(rq);
-}
-
-#ifdef CONFIG_NO_HZ_FULL
-/**
- * scheduler_tick_max_deferment
- *
- * Keep at least one tick per second when a single
- * active task is running because the scheduler doesn't
- * yet completely support full dynticks environment.
- *
- * This makes sure that uptime, CFS vruntime, load
- * balancing, etc... continue to move forward, even
- * with a very low granularity.
- *
- * Return: Maximum deferment in nanoseconds.
- */
-u64 scheduler_tick_max_deferment(void)
-{
- struct rq *rq = this_rq();
- unsigned long next, now = READ_ONCE(jiffies);
-
- next = rq->last_sched_tick + HZ;
-
- if (time_before_eq(next, now))
- return 0;
-
- return jiffies_to_nsecs(next - now);
-}
-#endif
-
#if defined(CONFIG_PREEMPT) && (defined(CONFIG_DEBUG_PREEMPT) || \
defined(CONFIG_PREEMPT_TRACER))
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b19552a2..43f065e 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1676,7 +1676,9 @@ static inline int hrtick_enabled(struct rq *rq)
return hrtimer_is_hres_active(&rq->hrtick_timer);
}

-void hrtick_start(struct rq *rq, u64 delay);
+extern void hrtick_start(struct rq *rq, u64 delay);
+extern void hrtick_clear(struct rq *rq);
+extern void init_rq_hrtick(struct rq *rq);

#else

@@ -1685,6 +1687,9 @@ static inline int hrtick_enabled(struct rq *rq)
return 0;
}

+static inline void hrtick_clear(struct rq *rq) { }
+static inline void init_rq_hrtick(struct rq *rq) { }
+
#endif /* CONFIG_SCHED_HRTICK */

#ifdef CONFIG_SMP
diff --git a/kernel/sched/tick.c b/kernel/sched/tick.c
new file mode 100644
index 0000000..bcc6d7d
--- /dev/null
+++ b/kernel/sched/tick.c
@@ -0,0 +1,177 @@
+#include <linux/sched.h>
+#include <linux/sched/clock.h>
+#include <linux/perf_event.h>
+#include "sched.h"
+
+/*
+ * This function gets called by the timer code, with HZ frequency.
+ * We call it with interrupts disabled.
+ */
+void scheduler_tick(void)
+{
+ int cpu = smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+ struct task_struct *curr = rq->curr;
+ struct rq_flags rf;
+
+ sched_clock_tick();
+
+ rq_lock(rq, &rf);
+
+ update_rq_clock(rq);
+ curr->sched_class->task_tick(rq, curr, 0);
+ cpu_load_update_active(rq);
+ calc_global_load_tick(rq);
+
+ rq_unlock(rq, &rf);
+
+ perf_event_task_tick();
+
+#ifdef CONFIG_SMP
+ rq->idle_balance = idle_cpu(cpu);
+ trigger_load_balance(rq);
+#endif
+ rq_last_tick_reset(rq);
+}
+
+#ifdef CONFIG_NO_HZ_FULL
+/**
+ * scheduler_tick_max_deferment
+ *
+ * Keep at least one tick per second when a single
+ * active task is running because the scheduler doesn't
+ * yet completely support full dynticks environment.
+ *
+ * This makes sure that uptime, CFS vruntime, load
+ * balancing, etc... continue to move forward, even
+ * with a very low granularity.
+ *
+ * Return: Maximum deferment in nanoseconds.
+ */
+u64 scheduler_tick_max_deferment(void)
+{
+ struct rq *rq = this_rq();
+ unsigned long next, now = READ_ONCE(jiffies);
+
+ next = rq->last_sched_tick + HZ;
+
+ if (time_before_eq(next, now))
+ return 0;
+
+ return jiffies_to_nsecs(next - now);
+}
+#endif
+
+#ifdef CONFIG_SCHED_HRTICK
+/*
+ * Use HR-timers to deliver accurate preemption points.
+ */
+
+void hrtick_clear(struct rq *rq)
+{
+ if (hrtimer_active(&rq->hrtick_timer))
+ hrtimer_cancel(&rq->hrtick_timer);
+}
+
+/*
+ * High-resolution timer tick.
+ * Runs from hardirq context with interrupts disabled.
+ */
+static enum hrtimer_restart hrtick(struct hrtimer *timer)
+{
+ struct rq *rq = container_of(timer, struct rq, hrtick_timer);
+ struct rq_flags rf;
+
+ WARN_ON_ONCE(cpu_of(rq) != smp_processor_id());
+
+ rq_lock(rq, &rf);
+ update_rq_clock(rq);
+ rq->curr->sched_class->task_tick(rq, rq->curr, 1);
+ rq_unlock(rq, &rf);
+
+ return HRTIMER_NORESTART;
+}
+
+#ifdef CONFIG_SMP
+
+static void __hrtick_restart(struct rq *rq)
+{
+ struct hrtimer *timer = &rq->hrtick_timer;
+
+ hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
+}
+
+/*
+ * called from hardirq (IPI) context
+ */
+static void __hrtick_start(void *arg)
+{
+ struct rq *rq = arg;
+ struct rq_flags rf;
+
+ rq_lock(rq, &rf);
+ __hrtick_restart(rq);
+ rq->hrtick_csd_pending = 0;
+ rq_unlock(rq, &rf);
+}
+
+/*
+ * Called to set the hrtick timer state.
+ *
+ * called with rq->lock held and irqs disabled
+ */
+void hrtick_start(struct rq *rq, u64 delay)
+{
+ struct hrtimer *timer = &rq->hrtick_timer;
+ ktime_t time;
+ s64 delta;
+
+ /*
+ * Don't schedule slices shorter than 10000ns, that just
+ * doesn't make sense and can cause timer DoS.
+ */
+ delta = max_t(s64, delay, 10000LL);
+ time = ktime_add_ns(timer->base->get_time(), delta);
+
+ hrtimer_set_expires(timer, time);
+
+ if (rq == this_rq()) {
+ __hrtick_restart(rq);
+ } else if (!rq->hrtick_csd_pending) {
+ smp_call_function_single_async(cpu_of(rq), &rq->hrtick_csd);
+ rq->hrtick_csd_pending = 1;
+ }
+}
+
+#else
+/*
+ * Called to set the hrtick timer state.
+ *
+ * called with rq->lock held and irqs disabled
+ */
+void hrtick_start(struct rq *rq, u64 delay)
+{
+ /*
+ * Don't schedule slices shorter than 10000ns, that just
+ * doesn't make sense. Rely on vruntime for fairness.
+ */
+ delay = max_t(u64, delay, 10000LL);
+ hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
+ HRTIMER_MODE_REL_PINNED);
+}
+#endif /* CONFIG_SMP */
+
+void init_rq_hrtick(struct rq *rq)
+{
+#ifdef CONFIG_SMP
+ rq->hrtick_csd_pending = 0;
+
+ rq->hrtick_csd.flags = 0;
+ rq->hrtick_csd.func = __hrtick_start;
+ rq->hrtick_csd.info = rq;
+#endif
+
+ hrtimer_init(&rq->hrtick_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ rq->hrtick_timer.function = hrtick;
+}
+#endif /* CONFIG_SCHED_HRTICK */
--
2.7.4
\
 
 \ /
  Last update: 2017-12-19 04:26    [W:0.381 / U:0.140 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site