Messages in this thread Patch in this message | | | Date | Fri, 26 Feb 2021 11:52:39 -0800 | Subject | [PATCH] sched: Optimize __calc_delta. | From | Josh Don <> |
| |
From: Clement Courbet <courbet@google.com>
A significant portion of __calc_delta time is spent in the loop shifting a u64 by 32 bits. Use a __builtin_clz instead of iterating.
This is ~7x faster on benchmarks.
Signed-off-by: Clement Courbet <courbet@google.com> Signed-off-by: Josh Don <joshdon@google.com> --- kernel/sched/fair.c | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 8a8bd7b13634..dbd1ae203f7c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -214,6 +214,16 @@ static void __update_inv_weight(struct load_weight *lw) lw->inv_weight = WMULT_CONST / w; } +/* + * A __builtin_clz that handles a u32 value on architectures + * where `sizeof(unsigned int) < 32`. + */ +#if (__SIZEOF_INT__ < 32) +# define BUILTIN_CLZ32(v) __builtin_clzl(v) +#else +# define BUILTIN_CLZ32(v) __builtin_clz(v) +#endif + /* * delta_exec * weight / lw.weight * OR @@ -229,27 +239,31 @@ static void __update_inv_weight(struct load_weight *lw) static u64 __calc_delta(u64 delta_exec, unsigned long weight, struct load_weight *lw) { u64 fact = scale_load_down(weight); + u32 fact_hi = (u32)(fact >> 32); int shift = WMULT_SHIFT; + int fs; __update_inv_weight(lw); - if (unlikely(fact >> 32)) { - while (fact >> 32) { - fact >>= 1; - shift--; - } + if (unlikely(fact_hi)) { + fs = 32 - BUILTIN_CLZ32(fact_hi); + shift -= fs; + fact >>= fs; } fact = mul_u32_u32(fact, lw->inv_weight); - while (fact >> 32) { - fact >>= 1; - shift--; + fact_hi = (u32)(fact >> 32); + if (fact_hi) { + fs = 32 - BUILTIN_CLZ32(fact_hi); + shift -= fs; + fact >>= fs; } return mul_u64_u32_shr(delta_exec, fact, shift); } +#undef BUILTIN_CLZ32 const struct sched_class fair_sched_class; -- 2.30.1.766.gb4fecdf3b7-goog
| |