lkml.org 
[lkml]   [2022]   [Sep]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v3 2/2] minmax: clamp more efficiently by avoiding extra comparison
Date
Currently the clamp algorithm does:

if (val > hi)
val = hi;
if (val < lo)
val = lo;

But since hi > lo by definition, this can be made more efficient with:

if (val > hi)
val = hi;
else if (val < lo)
val = lo;

So fix up the clamp and clamp_t functions to do this, adding the same
argument checking as for min and min_t.

For simple cases, code generation on x86_64 and aarch64 stay about the
same:

before:
cmp edi, edx
mov eax, esi
cmova edi, edx
cmp edi, esi
cmovnb eax, edi
ret
after:
cmp edi, esi
mov eax, edx
cmovnb esi, edi
cmp edi, edx
cmovb eax, esi
ret

before:
cmp w0, w2
csel w8, w0, w2, lo
cmp w8, w1
csel w0, w8, w1, hi
ret
after:
cmp w0, w1
csel w8, w0, w1, hi
cmp w0, w2
csel w0, w8, w2, lo
ret

On MIPS64, however, code generation improves, by removing arithmetic in
the second branch:

before:
sltu $3,$6,$4
bne $3,$0,.L2
move $2,$6

move $2,$4
.L2:
sltu $3,$2,$5
bnel $3,$0,.L7
move $2,$5

.L7:
jr $31
nop
after:
sltu $3,$4,$6
beq $3,$0,.L13
move $2,$6

sltu $3,$4,$5
bne $3,$0,.L12
move $2,$4

.L13:
jr $31
nop

.L12:
jr $31
move $2,$5

For more complex cases with surrounding code, the effects are a bit
more complicated. For example, consider this simplified version of
timestamp_truncate() from fs/inode.c on x86_64:

struct timespec64 timestamp_truncate(struct timespec64 t, struct inode *inode)
{
struct super_block *sb = inode->i_sb;
unsigned int gran = sb->s_time_gran;

t.tv_sec = clamp(t.tv_sec, sb->s_time_min, sb->s_time_max);
if (t.tv_sec == sb->s_time_max || t.tv_sec == sb->s_time_min)
t.tv_nsec = 0;
return t;
}

before:
mov r8, rdx
mov rdx, rsi
mov rcx, QWORD PTR [r8]
mov rax, QWORD PTR [rcx+8]
mov rcx, QWORD PTR [rcx+16]
cmp rax, rdi
mov r8, rcx
cmovge rdi, rax
cmp rdi, rcx
cmovle r8, rdi
cmp rax, r8
je .L4
cmp rdi, rcx
jge .L4
mov rax, r8
ret
.L4:
xor edx, edx
mov rax, r8
ret

after:
mov rax, QWORD PTR [rdx]
mov rdx, QWORD PTR [rax+8]
mov rax, QWORD PTR [rax+16]
cmp rax, rdi
jg .L6
mov r8, rax
xor edx, edx
.L2:
mov rax, r8
ret
.L6:
cmp rdx, rdi
mov r8, rdi
cmovge r8, rdx
cmp rax, r8
je .L4
xor eax, eax
cmp rdx, rdi
cmovl rax, rsi
mov rdx, rax
mov rax, r8
ret
.L4:
xor edx, edx
jmp .L2

In this case, we actually gain a branch, unfortunately, because the
compiler's replacement axioms no longer as cleanly apply.

So all and all, this change is a bit of a mixed bag.

Cc: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Kees Cook <keescook@chromium.org>
Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
---
include/linux/minmax.h | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index 293a66ad7830..9e7cc5319676 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -38,7 +38,7 @@
__cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))

#define __clamp(val, lo, hi) \
- __cmp(__cmp(val, lo, >), hi, <)
+ ((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))

#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
typeof(val) unique_val = (val); \
--
2.37.3
\
 
 \ /
  Last update: 2022-09-26 17:04    [W:0.107 / U:0.164 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site