lkml.org 
[lkml]   [2019]   [Dec]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v7 3/3] arm64: implement KPROBES_ON_FTRACE
Date
KPROBES_ON_FTRACE avoids much of the overhead with regular kprobes as it
eliminates the need for a trap, as well as the need to emulate or
single-step instructions.

Tested on berlin arm64 platform.

~ # mount -t debugfs debugfs /sys/kernel/debug/
~ # cd /sys/kernel/debug/
/sys/kernel/debug # echo 'p _do_fork' > tracing/kprobe_events

before the patch:

/sys/kernel/debug # cat kprobes/list
ffffff801009fe28 k _do_fork+0x0 [DISABLED]

after the patch:

/sys/kernel/debug # cat kprobes/list
ffffff801009ff54 k _do_fork+0x0 [DISABLED][FTRACE]

Signed-off-by: Jisheng Zhang <Jisheng.Zhang@synaptics.com>
---
.../debug/kprobes-on-ftrace/arch-support.txt | 2 +-
arch/arm64/Kconfig | 1 +
arch/arm64/include/asm/ftrace.h | 1 +
arch/arm64/kernel/probes/Makefile | 1 +
arch/arm64/kernel/probes/ftrace.c | 78 +++++++++++++++++++
5 files changed, 82 insertions(+), 1 deletion(-)
create mode 100644 arch/arm64/kernel/probes/ftrace.c

diff --git a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
index 4fae0464ddff..f9dd9dd91e0c 100644
--- a/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
+++ b/Documentation/features/debug/kprobes-on-ftrace/arch-support.txt
@@ -9,7 +9,7 @@
| alpha: | TODO |
| arc: | TODO |
| arm: | TODO |
- | arm64: | TODO |
+ | arm64: | ok |
| c6x: | TODO |
| csky: | TODO |
| h8300: | TODO |
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index b1b4476ddb83..92b9882889ac 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -166,6 +166,7 @@ config ARM64
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_KPROBES
+ select HAVE_KPROBES_ON_FTRACE
select HAVE_KRETPROBES
select HAVE_GENERIC_VDSO
select IOMMU_DMA if IOMMU_SUPPORT
diff --git a/arch/arm64/include/asm/ftrace.h b/arch/arm64/include/asm/ftrace.h
index 91fa4baa1a93..875aeb839654 100644
--- a/arch/arm64/include/asm/ftrace.h
+++ b/arch/arm64/include/asm/ftrace.h
@@ -20,6 +20,7 @@

/* The BL at the callsite's adjusted rec->ip */
#define MCOUNT_INSN_SIZE AARCH64_INSN_SIZE
+#define FTRACE_IP_EXTENSION MCOUNT_INSN_SIZE

#define FTRACE_PLT_IDX 0
#define FTRACE_REGS_PLT_IDX 1
diff --git a/arch/arm64/kernel/probes/Makefile b/arch/arm64/kernel/probes/Makefile
index 8e4be92e25b1..4020cfc66564 100644
--- a/arch/arm64/kernel/probes/Makefile
+++ b/arch/arm64/kernel/probes/Makefile
@@ -4,3 +4,4 @@ obj-$(CONFIG_KPROBES) += kprobes.o decode-insn.o \
simulate-insn.o
obj-$(CONFIG_UPROBES) += uprobes.o decode-insn.o \
simulate-insn.o
+obj-$(CONFIG_KPROBES_ON_FTRACE) += ftrace.o
diff --git a/arch/arm64/kernel/probes/ftrace.c b/arch/arm64/kernel/probes/ftrace.c
new file mode 100644
index 000000000000..0643aa2dacdb
--- /dev/null
+++ b/arch/arm64/kernel/probes/ftrace.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Dynamic Ftrace based Kprobes Optimization
+ *
+ * Copyright (C) Hitachi Ltd., 2012
+ * Copyright (C) 2019 Jisheng Zhang <jszhang@kernel.org>
+ * Synaptics Incorporated
+ */
+
+#include <linux/kprobes.h>
+
+/*
+ * In arm64 FTRACE_WITH_REGS implementation, we patch two nop instructions:
+ * the lr saver and bl ftrace-entry. Both these instructions are claimed
+ * by ftrace and we should allow probing on either instruction.
+ */
+int arch_check_ftrace_location(struct kprobe *p)
+{
+ if (ftrace_location((unsigned long)p->addr))
+ p->flags |= KPROBE_FLAG_FTRACE;
+ return 0;
+}
+
+/* Ftrace callback handler for kprobes -- called under preepmt disabed */
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct pt_regs *regs)
+{
+ bool lr_saver = false;
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ /* Preempt is disabled by ftrace */
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (!p) {
+ p = get_kprobe((kprobe_opcode_t *)(ip - MCOUNT_INSN_SIZE));
+ if (unlikely(!p) || kprobe_disabled(p))
+ return;
+ lr_saver = true;
+ }
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ unsigned long orig_ip = instruction_pointer(regs);
+
+ if (lr_saver)
+ ip -= MCOUNT_INSN_SIZE;
+ instruction_pointer_set(regs, ip);
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ /*
+ * Emulate singlestep (and also recover regs->pc)
+ * as if there is a nop
+ */
+ instruction_pointer_set(regs,
+ (unsigned long)p->addr + MCOUNT_INSN_SIZE);
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ instruction_pointer_set(regs, orig_ip);
+ }
+ /*
+ * If pre_handler returns !0, it changes regs->pc. We have to
+ * skip emulating post_handler.
+ */
+ __this_cpu_write(current_kprobe, NULL);
+ }
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.api.insn = NULL;
+ return 0;
+}
--
2.24.1
\
 
 \ /
  Last update: 2019-12-25 10:45    [W:0.090 / U:0.496 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site