lkml.org 
[lkml]   [2022]   [Mar]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[tip: x86/core] x86/ibt,ftrace: Make function-graph play nice
The following commit has been merged into the x86/core branch of tip:

Commit-ID: 069cfa7285137030111dc78a3fcec091371da514
Gitweb: https://git.kernel.org/tip/069cfa7285137030111dc78a3fcec091371da514
Author: Peter Zijlstra <peterz@infradead.org>
AuthorDate: Tue, 08 Mar 2022 16:30:31 +01:00
Committer: Peter Zijlstra <peterz@infradead.org>
CommitterDate: Tue, 08 Mar 2022 23:53:32 +01:00

x86/ibt,ftrace: Make function-graph play nice

Return trampoline must not use indirect branch to return; while this
preserves the RSB, it is fundamentally incompatible with IBT. Instead
use a retpoline like ROP gadget that defeats IBT while not unbalancing
the RSB.

And since ftrace_stub is no longer a plain RET, don't use it to copy
from. Since RET is a trivial instruction, poke it directly.

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
Link: https://lore.kernel.org/r/20220308154318.347296408@infradead.org
---
arch/x86/kernel/ftrace.c | 9 ++-------
arch/x86/kernel/ftrace_64.S | 21 +++++++++++++++++----
2 files changed, 19 insertions(+), 11 deletions(-)

diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 7cc540e..1e31c7d 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -316,12 +316,12 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
unsigned long offset;
unsigned long npages;
unsigned long size;
- unsigned long retq;
unsigned long *ptr;
void *trampoline;
void *ip;
/* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
+ unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
union ftrace_op_code_union op_ptr;
int ret;

@@ -359,12 +359,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
goto fail;

ip = trampoline + size;
-
- /* The trampoline ends with ret(q) */
- retq = (unsigned long)ftrace_stub;
- ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE);
- if (WARN_ON(ret < 0))
- goto fail;
+ memcpy(ip, retq, RET_SIZE);

/* No need to test direct calls on created trampolines */
if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index 11ac028..e32b5cd 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -176,10 +176,10 @@ SYM_FUNC_END(ftrace_caller);
SYM_FUNC_START(ftrace_epilogue)
/*
* This is weak to keep gas from relaxing the jumps.
- * It is also used to copy the RET for trampolines.
*/
SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
UNWIND_HINT_FUNC
+ ENDBR
RET
SYM_FUNC_END(ftrace_epilogue)

@@ -284,6 +284,7 @@ SYM_FUNC_START(__fentry__)
jnz trace

SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
+ ENDBR
RET

trace:
@@ -307,7 +308,7 @@ EXPORT_SYMBOL(__fentry__)

#ifdef CONFIG_FUNCTION_GRAPH_TRACER
SYM_FUNC_START(return_to_handler)
- subq $24, %rsp
+ subq $16, %rsp

/* Save the return values */
movq %rax, (%rsp)
@@ -319,7 +320,19 @@ SYM_FUNC_START(return_to_handler)
movq %rax, %rdi
movq 8(%rsp), %rdx
movq (%rsp), %rax
- addq $24, %rsp
- JMP_NOSPEC rdi
+
+ addq $16, %rsp
+ /*
+ * Jump back to the old return address. This cannot be JMP_NOSPEC rdi
+ * since IBT would demand that contain ENDBR, which simply isn't so for
+ * return addresses. Use a retpoline here to keep the RSB balanced.
+ */
+ ANNOTATE_INTRA_FUNCTION_CALL
+ call .Ldo_rop
+ int3
+.Ldo_rop:
+ mov %rdi, (%rsp)
+ UNWIND_HINT_FUNC
+ RET
SYM_FUNC_END(return_to_handler)
#endif
\
 
 \ /
  Last update: 2022-03-09 08:57    [W:2.037 / U:0.028 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site