lkml.org 
[lkml]   [2017]   [Oct]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 1/3] perf/ftrace: Revert ("perf/ftrace: Fix double traces of perf on ftrace:function")
Revert commit:

75e8387685f6 ("perf/ftrace: Fix double traces of perf on ftrace:function")

The reason I insta stumbled on that patch is that it only addresses the ftrace
situation and doesn't mention the other _5_ places that use this interface. It
doesn't explain why those don't have the problem and if not, why their solution
doesn't work for ftrace.

It doesn't, but this is just putting more duct tape on.

Cc: Zhou Chengming <zhouchengming1@huawei.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
Cc: Ingo Molnar <mingo@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
include/linux/perf_event.h | 2 +-
include/linux/trace_events.h | 4 ++--
kernel/events/core.c | 13 ++++---------
kernel/trace/trace_event_perf.c | 4 +---
kernel/trace/trace_kprobe.c | 4 ++--
kernel/trace/trace_syscalls.c | 4 ++--
kernel/trace/trace_uprobe.c | 2 +-
7 files changed, 13 insertions(+), 20 deletions(-)

--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -1169,7 +1169,7 @@ extern void perf_event_init(void);
extern void perf_tp_event(u16 event_type, u64 count, void *record,
int entry_size, struct pt_regs *regs,
struct hlist_head *head, int rctx,
- struct task_struct *task, struct perf_event *event);
+ struct task_struct *task);
extern void perf_bp_event(struct perf_event *event, void *data);

#ifndef perf_misc_flags
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -507,9 +507,9 @@ void perf_trace_run_bpf_submit(void *raw
static inline void
perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
u64 count, struct pt_regs *regs, void *head,
- struct task_struct *task, struct perf_event *event)
+ struct task_struct *task)
{
- perf_tp_event(type, count, raw_data, size, regs, head, rctx, task, event);
+ perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
}
#endif

--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7873,15 +7873,16 @@ void perf_trace_run_bpf_submit(void *raw
}
}
perf_tp_event(call->event.type, count, raw_data, size, regs, head,
- rctx, task, NULL);
+ rctx, task);
}
EXPORT_SYMBOL_GPL(perf_trace_run_bpf_submit);

void perf_tp_event(u16 event_type, u64 count, void *record, int entry_size,
struct pt_regs *regs, struct hlist_head *head, int rctx,
- struct task_struct *task, struct perf_event *event)
+ struct task_struct *task)
{
struct perf_sample_data data;
+ struct perf_event *event;

struct perf_raw_record raw = {
.frag = {
@@ -7895,15 +7896,9 @@ void perf_tp_event(u16 event_type, u64 c

perf_trace_buf_update(record, event_type);

- /* Use the given event instead of the hlist */
- if (event) {
+ hlist_for_each_entry_rcu(event, head, hlist_entry) {
if (perf_tp_event_match(event, &data, regs))
perf_swevent_event(event, count, &data, regs);
- } else {
- hlist_for_each_entry_rcu(event, head, hlist_entry) {
- if (perf_tp_event_match(event, &data, regs))
- perf_swevent_event(event, count, &data, regs);
- }
}

/*
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -306,7 +306,6 @@ static void
perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
struct ftrace_ops *ops, struct pt_regs *pt_regs)
{
- struct perf_event *event;
struct ftrace_entry *entry;
struct hlist_head *head;
struct pt_regs regs;
@@ -330,9 +329,8 @@ perf_ftrace_function_call(unsigned long

entry->ip = ip;
entry->parent_ip = parent_ip;
- event = container_of(ops, struct perf_event, ftrace_ops);
perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
- 1, &regs, head, NULL, event);
+ 1, &regs, head, NULL);

#undef ENTRY_SIZE
}
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1200,7 +1200,7 @@ kprobe_perf_func(struct trace_kprobe *tk
memset(&entry[1], 0, dsize);
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
- head, NULL, NULL);
+ head, NULL);
}
NOKPROBE_SYMBOL(kprobe_perf_func);

@@ -1236,7 +1236,7 @@ kretprobe_perf_func(struct trace_kprobe
entry->ret_ip = (unsigned long)ri->ret_addr;
store_trace_args(sizeof(*entry), &tk->tp, regs, (u8 *)&entry[1], dsize);
perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
- head, NULL, NULL);
+ head, NULL);
}
NOKPROBE_SYMBOL(kretprobe_perf_func);
#endif /* CONFIG_PERF_EVENTS */
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -622,7 +622,7 @@ static void perf_syscall_enter(void *ign

perf_trace_buf_submit(rec, size, rctx,
sys_data->enter_event->event.type, 1, regs,
- head, NULL, NULL);
+ head, NULL);
}

static int perf_sysenter_enable(struct trace_event_call *call)
@@ -716,7 +716,7 @@ static void perf_syscall_exit(void *igno
}

perf_trace_buf_submit(rec, size, rctx, sys_data->exit_event->event.type,
- 1, regs, head, NULL, NULL);
+ 1, regs, head, NULL);
}

static int perf_sysexit_enable(struct trace_event_call *call)
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -1156,7 +1156,7 @@ static void __uprobe_perf_func(struct tr
}

perf_trace_buf_submit(entry, size, rctx, call->event.type, 1, regs,
- head, NULL, NULL);
+ head, NULL);
out:
preempt_enable();
}

\
 
 \ /
  Last update: 2017-10-10 18:38    [W:0.118 / U:0.980 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site