lkml.org 
[lkml]   [2017]   [Nov]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[ftrace-bpf 4/5] enable bpf filter for graph trace in x86-64 arch
Date
define FTRACE_BPF_FILTER if CONFIG_FTRACE_BPF_FILTER is enabled,
create struct ftrace_regs, struct ftrace_regs is similar as pt_regs in
kprobe, but ftrace doesn't save all context, only caller save
registers, so use ftrace_regs to store these registers.

Signed-off-by: yupeng0921@gmail.com
---
arch/x86/include/asm/ftrace.h | 22 ++++++++++++++++++++++
arch/x86/kernel/ftrace.c | 15 +++++++++++++++
arch/x86/kernel/ftrace_64.S | 1 +
3 files changed, 38 insertions(+)

diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 09ad885..9a5bffc 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -11,6 +11,28 @@
#endif
#define MCOUNT_INSN_SIZE 5 /* sizeof mcount call */

+#ifndef __i386__
+#ifdef CONFIG_FTRACE_BPF_FILTER
+#define FTRACE_BPF_FILTER
+#ifndef __ASSEMBLY__
+/*
+ * The order is exactly same as
+ * arch/x86/entry/calling.h
+ */
+struct ftrace_regs {
+ unsigned long r9;
+ unsigned long r8;
+ unsigned long rax;
+ unsigned long rcx;
+ unsigned long rdx;
+ unsigned long rsi;
+ unsigned long rdi;
+};
+#endif
+#endif /* CONFIG_FTRACE_BPF_FILTER */
+
+#endif
+
#ifdef CONFIG_DYNAMIC_FTRACE
#define ARCH_SUPPORTS_FTRACE_OPS 1
#endif
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 01ebcb6..d190534 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -896,8 +896,14 @@ static void *addr_from_call(void *ptr)
return ptr + MCOUNT_INSN_SIZE + calc.offset;
}

+#ifdef FTRACE_BPF_FILTER
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
+ unsigned long frame_pointer,
+ struct ftrace_regs *ctx);
+#else
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
unsigned long frame_pointer);
+#endif

/*
* If the ops->trampoline was not allocated, then it probably
@@ -989,8 +995,14 @@ int ftrace_disable_ftrace_graph_caller(void)
* Hook the return address and push it in the stack of return addrs
* in current thread info.
*/
+#ifdef FTRACE_BPF_FILTER
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
+ unsigned long frame_pointer,
+ struct ftrace_regs *ctx)
+#else
void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,
unsigned long frame_pointer)
+#endif
{
unsigned long old;
int faulted;
@@ -1048,6 +1060,9 @@ void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent,

trace.func = self_addr;
trace.depth = current->curr_ret_stack + 1;
+#ifdef FTRACE_BPF_FILTER
+ trace.ctx = ctx;
+#endif

/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
diff --git a/arch/x86/kernel/ftrace_64.S b/arch/x86/kernel/ftrace_64.S
index c832291..5e51b93 100644
--- a/arch/x86/kernel/ftrace_64.S
+++ b/arch/x86/kernel/ftrace_64.S
@@ -108,6 +108,7 @@ EXPORT_SYMBOL(mcount)
movq MCOUNT_REG_SIZE-8(%rsp), %rdx
movq %rdx, RBP(%rsp)

+ leaq R9(%rsp), %rcx
/* Copy the parent address into %rsi (second parameter) */
#ifdef CC_USING_FENTRY
movq MCOUNT_REG_SIZE+8+\added(%rsp), %rsi
--
2.7.4
\
 
 \ /
  Last update: 2017-11-12 08:29    [W:0.142 / U:0.048 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site