lkml.org 
[lkml]   [2014]   [Oct]   [27]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH 3.16 106/127] sparc: bpf_jit: fix loads from negative offsets
Date
3.16-stable review patch.  If anyone has any objections, please let me know.

------------------

From: Alexei Starovoitov <ast@plumgrid.com>

[ Upstream commit 35607b02dbef304fa5037236a3b43c1d8ab2aa52 ]

- fix BPF_LD|ABS|IND from negative offsets:
make sure to sign extend lower 32 bits in 64-bit register
before calling C helpers from JITed code, otherwise 'int k'
argument of bpf_internal_load_pointer_neg_helper() function
will be added as large unsigned integer, causing packet size
check to trigger and abort the program.

It's worth noting that JITed code for 'A = A op K' will affect
upper 32 bits differently depending whether K is simm13 or not.
Since small constants are sign extended, whereas large constants
are stored in temp register and zero extended.
That is ok and we don't have to pay a penalty of sign extension
for every sethi, since all classic BPF instructions have 32-bit
semantics and we only need to set correct upper bits when
transitioning from JITed code into C.

- though instructions 'A &= 0' and 'A *= 0' are odd, JIT compiler
should not optimize them out

Signed-off-by: Alexei Starovoitov <ast@plumgrid.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
---
arch/sparc/net/bpf_jit_asm.S | 3 +++
arch/sparc/net/bpf_jit_comp.c | 2 +-
2 files changed, 4 insertions(+), 1 deletion(-)

--- a/arch/sparc/net/bpf_jit_asm.S
+++ b/arch/sparc/net/bpf_jit_asm.S
@@ -6,10 +6,12 @@
#define SAVE_SZ 176
#define SCRATCH_OFF STACK_BIAS + 128
#define BE_PTR(label) be,pn %xcc, label
+#define SIGN_EXTEND(reg) sra reg, 0, reg
#else
#define SAVE_SZ 96
#define SCRATCH_OFF 72
#define BE_PTR(label) be label
+#define SIGN_EXTEND(reg)
#endif

#define SKF_MAX_NEG_OFF (-0x200000) /* SKF_LL_OFF from filter.h */
@@ -135,6 +137,7 @@ bpf_slow_path_byte_msh:
save %sp, -SAVE_SZ, %sp; \
mov %i0, %o0; \
mov r_OFF, %o1; \
+ SIGN_EXTEND(%o1); \
call bpf_internal_load_pointer_neg_helper; \
mov (LEN), %o2; \
mov %o0, r_TMP; \
--- a/arch/sparc/net/bpf_jit_comp.c
+++ b/arch/sparc/net/bpf_jit_comp.c
@@ -184,7 +184,7 @@ do { \
*/
#define emit_alu_K(OPCODE, K) \
do { \
- if (K) { \
+ if (K || OPCODE == AND || OPCODE == MUL) { \
unsigned int _insn = OPCODE; \
_insn |= RS1(r_A) | RD(r_A); \
if (is_simm13(K)) { \



\
 
 \ /
  Last update: 2014-10-28 08:41    [W:0.343 / U:1.504 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site