lkml.org 
[lkml]   [2016]   [Jun]   [13]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[patch 19/20] timer: Split out index calculation
From: Anna-Maria Gleixner <anna-maria@linutronix.de>

For further optimizations we need to seperate index calculation and
queueing. No functional change.

Signed-off-by: Anna-Maria Gleixner <anna-maria@linutronix.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
---
kernel/time/timer.c | 41 +++++++++++++++++++++++++++++++++--------
1 file changed, 33 insertions(+), 8 deletions(-)

--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -456,12 +456,9 @@ static inline unsigned calc_index(unsign
return base + (((expires + gran) >> sft) & LVL_MASK);
}

-static void
-__internal_add_timer(struct timer_base *base, struct timer_list *timer)
+static int calc_wheel_index(unsigned long expires, unsigned long clk)
{
- unsigned long expires = timer->expires;
- unsigned long delta = expires - base->clk;
- struct hlist_head *vec;
+ unsigned long delta = expires - clk;
unsigned int idx;

if (delta < LVL1_TSTART) {
@@ -475,7 +472,7 @@ static void
} else if (delta < LVL5_TSTART) {
idx = calc_index(expires, LVL4_GRAN, LVL4_SHIFT, LVL4_OFFS);
} else if ((long) delta < 0) {
- idx = base->clk & LVL_MASK;
+ idx = clk & LVL_MASK;
} else {
/*
* The long timeouts go into the last array level. They
@@ -485,6 +482,18 @@ static void
idx = calc_index(expires, LVL5_GRAN, LVL5_SHIFT, LVL5_OFFS);
}

+ return idx;
+}
+
+/*
+ * Enqueue the timer into the hash bucket, mark it pending in
+ * the bitmap and store the index in the timer flags.
+ */
+static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
+ unsigned int idx)
+{
+ struct hlist_head *vec;
+
/*
* Enqueue the timer into the array bucket, mark it pending in
* the bitmap and store the index in the timer flags.
@@ -495,10 +504,19 @@ static void
timer_set_idx(timer, idx);
}

-static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
+static void
+__internal_add_timer(struct timer_base *base, struct timer_list *timer)
{
- __internal_add_timer(base, timer);
+ unsigned long expires = timer->expires;
+ unsigned int idx;
+
+ idx = calc_wheel_index(expires, base->clk);
+ enqueue_timer(base, timer, idx);
+}

+static void
+trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
+{
/*
* We might have to IPI the remote CPU if the base is idle and the
* timer is not deferrable. If the other cpu is on the way to idle
@@ -523,6 +541,13 @@ static void internal_add_timer(struct ti
wake_up_nohz_cpu(base->cpu);
}

+static void
+internal_add_timer(struct timer_base *base, struct timer_list *timer)
+{
+ __internal_add_timer(base, timer);
+ trigger_dyntick_cpu(base, timer);
+}
+
#ifdef CONFIG_TIMER_STATS
void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
{

\
 
 \ /
  Last update: 2016-06-13 11:21    [W:0.843 / U:0.200 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site