lkml.org 
[lkml]   [2016]   [Feb]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC][PATCH 06/10] tracing: Add kprobe/uprobe support for TRACE_EVENT eBPF progs
Date
Add a call at the end of kprobe/kretprobe/uprobe probe functions for
BPF_PROG_TYPE_TRACE_EVENT programs. These new calls are made after
the trace event fields have been stored into the trace buffer, where
they can then be accessed by the eBPF program.

The existing BPF_PROG_TYPE_KPROBE calls at the beginning remain, but
the prog type needs to be checked to see whether it's a kprobe or a
trace event prog.

Signed-off-by: Tom Zanussi <tom.zanussi@linux.intel.com>
---
kernel/trace/trace_kprobe.c | 20 ++++++++++++++++++--
kernel/trace/trace_uprobe.c | 11 ++++++++++-
2 files changed, 28 insertions(+), 3 deletions(-)

diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index c995644..34231f1 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -19,6 +19,8 @@

#include <linux/module.h>
#include <linux/uaccess.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>

#include "trace_probe.h"

@@ -1129,7 +1131,8 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
int size, __size, dsize;
int rctx;

- if (prog && !trace_call_bpf(prog, regs))
+ if (prog && prog->type == BPF_PROG_TYPE_KPROBE &&
+ !trace_call_bpf(prog, regs))
return;

head = this_cpu_ptr(call->perf_events);
@@ -1148,6 +1151,12 @@ kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
entry->ip = (unsigned long)tk->rp.kp.addr;
memset(&entry[1], 0, dsize);
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
+ if (prog && prog->type == BPF_PROG_TYPE_TRACE_EVENT) {
+ struct trace_event_context ctx = { call, entry };
+
+ if (!trace_call_bpf(prog, &ctx))
+ return;
+ }
perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
}
NOKPROBE_SYMBOL(kprobe_perf_func);
@@ -1164,7 +1173,8 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
int size, __size, dsize;
int rctx;

- if (prog && !trace_call_bpf(prog, regs))
+ if (prog && prog->type == BPF_PROG_TYPE_KPROBE &&
+ !trace_call_bpf(prog, regs))
return;

head = this_cpu_ptr(call->perf_events);
@@ -1183,6 +1193,12 @@ kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
entry->func = (unsigned long)tk->rp.kp.addr;
entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
+ if (prog && prog->type == BPF_PROG_TYPE_TRACE_EVENT) {
+ struct trace_event_context ctx = { call, entry };
+
+ if (!trace_call_bpf(prog, &ctx))
+ return;
+ }
perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
}
NOKPROBE_SYMBOL(kretprobe_perf_func);
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index d2f6d0b..990bc71 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -23,6 +23,8 @@
#include <linux/uprobes.h>
#include <linux/namei.h>
#include <linux/string.h>
+#include <linux/filter.h>
+#include <linux/bpf.h>

#include "trace_probe.h"

@@ -1116,7 +1118,8 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
int size, esize;
int rctx;

- if (prog && !trace_call_bpf(prog, regs))
+ if (prog && prog->type == BPF_PROG_TYPE_KPROBE &&
+ trace_call_bpf(prog, regs))
return;

esize = SIZEOF_TRACE_ENTRY(is_ret_probe(tu));
@@ -1152,6 +1155,12 @@ static void __uprobe_perf_func(struct trace_uprobe *tu,
memset(data + len, 0, size - esize - len);
}

+ if (prog && prog->type == BPF_PROG_TYPE_TRACE_EVENT) {
+ struct trace_event_context ctx = { call, entry };
+
+ if (!trace_call_bpf(prog, &ctx))
+ return;
+ }
perf_trace_buf_submit(entry, size, rctx, 0, 1, regs, head, NULL);
out:
preempt_enable();
--
1.9.3
\
 
 \ /
  Last update: 2016-02-12 17:21    [W:0.233 / U:0.504 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site