lkml.org 
[lkml]   [2016]   [Dec]   [12]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
/
Date
From
SubjectRe: [for-next][PATCH 8/8] tracing/fgraph: Have wakeup and irqsoff tracers ignore graph functions too
On Tue, Dec 13, 2016 at 01:33:42AM +0900, Namhyung Kim wrote:
> From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
>
> Currently both the wakeup and irqsoff traces do not handle set_graph_notrace
> well. The ftrace infrastructure will ignore the return paths of all
> functions leaving them hanging without an end:
>
> # echo '*spin*' > set_graph_notrace
> # cat trace
> [...]
> _raw_spin_lock() {
> preempt_count_add() {
> do_raw_spin_lock() {
> update_rq_clock();
>
> Where the '*spin*' functions should have looked like this:
>
> _raw_spin_lock() {
> preempt_count_add();
> do_raw_spin_lock();
> }
> update_rq_clock();
>
> Instead, have the wakeup and irqsoff tracers ignore the functions that are
> set by the set_graph_notrace like the function_graph tracer does. Move
> the logic in the function_graph tracer into a header to allow wakeup and
> irqsoff tracers to use it as well.
>
> Signed-off-by: Steven Rostedt <rostedt@goodmis.org>

Acked-by: Namhyung Kim <namhyung@kernel.org>

Thanks,
Namhyung


> ---
> kernel/trace/trace.h | 11 +++++++++++
> kernel/trace/trace_functions_graph.c | 14 +++++++-------
> kernel/trace/trace_irqsoff.c | 12 ++++++++++++
> kernel/trace/trace_sched_wakeup.c | 12 ++++++++++++
> 4 files changed, 42 insertions(+), 7 deletions(-)
>
> diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
> index 37602e722336..c2234494f40c 100644
> --- a/kernel/trace/trace.h
> +++ b/kernel/trace/trace.h
> @@ -846,6 +846,17 @@ static inline int
> ftrace_graph_notrace_addr(unsigned long addr)
> return 0;
> }
> #endif /* CONFIG_DYNAMIC_FTRACE */
> +
> +extern unsigned int fgraph_max_depth;
> +
> +static inline bool ftrace_graph_ignore_func(struct ftrace_graph_ent *trace)
> +{
> + /* trace it when it is-nested-in or is a function enabled. */
> + return !(trace->depth || ftrace_graph_addr(trace->func)) ||
> + (trace->depth < 0) ||
> + (fgraph_max_depth && trace->depth >= fgraph_max_depth);
> +}
> +
> #else /* CONFIG_FUNCTION_GRAPH_TRACER */
> static inline enum print_line_t
> print_graph_function_flags(struct trace_iterator *iter, u32 flags)
> diff --git a/kernel/trace/trace_functions_graph.c
> b/kernel/trace/trace_functions_graph.c
> index 566f7327c3aa..d56123cdcc89 100644
> --- a/kernel/trace/trace_functions_graph.c
> +++ b/kernel/trace/trace_functions_graph.c
> @@ -65,7 +65,7 @@ struct fgraph_data {
>
> #define TRACE_GRAPH_INDENT 2
>
> -static unsigned int max_depth;
> +unsigned int fgraph_max_depth;
>
> static struct tracer_opt trace_opts[] = {
> /* Display overruns? (for self-debug purpose) */
> @@ -384,10 +384,10 @@ int trace_graph_entry(struct ftrace_graph_ent *trace)
> if (!ftrace_trace_task(tr))
> return 0;
>
> - /* trace it when it is-nested-in or is a function enabled. */
> - if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
> - ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
> - (max_depth && trace->depth >= max_depth))
> + if (ftrace_graph_ignore_func(trace))
> + return 0;
> +
> + if (ftrace_graph_ignore_irqs())
> return 0;
>
> /*
> @@ -1500,7 +1500,7 @@ graph_depth_write(struct file *filp, const char
> __user *ubuf, size_t cnt,
> if (ret)
> return ret;
>
> - max_depth = val;
> + fgraph_max_depth = val;
>
> *ppos += cnt;
>
> @@ -1514,7 +1514,7 @@ graph_depth_read(struct file *filp, char __user
> *ubuf, size_t cnt,
> char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
> int n;
>
> - n = sprintf(buf, "%d\n", max_depth);
> + n = sprintf(buf, "%d\n", fgraph_max_depth);
>
> return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
> }
> diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
> index 03cdff84d026..86654d7e1afe 100644
> --- a/kernel/trace/trace_irqsoff.c
> +++ b/kernel/trace/trace_irqsoff.c
> @@ -175,6 +175,18 @@ static int irqsoff_graph_entry(struct
> ftrace_graph_ent *trace)
> int ret;
> int pc;
>
> + if (ftrace_graph_ignore_func(trace))
> + return 0;
> + /*
> + * Do not trace a function if it's filtered by set_graph_notrace.
> + * Make the index of ret stack negative to indicate that it should
> + * ignore further functions. But it needs its own ret stack entry
> + * to recover the original index in order to continue tracing after
> + * returning from the function.
> + */
> + if (ftrace_graph_notrace_addr(trace->func))
> + return 1;
> +
> if (!func_prolog_dec(tr, &data, &flags))
> return 0;
>
> diff --git a/kernel/trace/trace_sched_wakeup.c
> b/kernel/trace/trace_sched_wakeup.c
> index 1bf2324dc682..5d0bb025bb21 100644
> --- a/kernel/trace/trace_sched_wakeup.c
> +++ b/kernel/trace/trace_sched_wakeup.c
> @@ -239,6 +239,18 @@ static int wakeup_graph_entry(struct
> ftrace_graph_ent *trace)
> unsigned long flags;
> int pc, ret = 0;
>
> + if (ftrace_graph_ignore_func(trace))
> + return 0;
> + /*
> + * Do not trace a function if it's filtered by set_graph_notrace.
> + * Make the index of ret stack negative to indicate that it should
> + * ignore further functions. But it needs its own ret stack entry
> + * to recover the original index in order to continue tracing after
> + * returning from the function.
> + */
> + if (ftrace_graph_notrace_addr(trace->func))
> + return 1;
> +
> if (!func_prolog_preempt_disable(tr, &data, &pc))
> return 0;
>
> --
> 2.10.2

\
 
 \ /
  Last update: 2016-12-12 17:42    [W:0.755 / U:0.012 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site