lkml.org 
[lkml]   [2019]   [Apr]   [25]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
SubjectRe: [patch V3 21/29] tracing: Use percpu stack trace buffer more intelligently
On Thu, Apr 25, 2019 at 11:45:14AM +0200, Thomas Gleixner wrote:
> @@ -2788,29 +2798,32 @@ static void __ftrace_trace_stack(struct
> */
> preempt_disable_notrace();
>
> - use_stack = __this_cpu_inc_return(ftrace_stack_reserve);
> + stackidx = __this_cpu_inc_return(ftrace_stack_reserve);
> +
> + /* This should never happen. If it does, yell once and skip */
> + if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
> + goto out;
> +
> /*
> - * We don't need any atomic variables, just a barrier.
> - * If an interrupt comes in, we don't care, because it would
> - * have exited and put the counter back to what we want.
> - * We just need a barrier to keep gcc from moving things
> - * around.
> + * The above __this_cpu_inc_return() is 'atomic' cpu local. An
> + * interrupt will either see the value pre increment or post
> + * increment. If the interrupt happens pre increment it will have
> + * restored the counter when it returns. We just need a barrier to
> + * keep gcc from moving things around.
> */
> barrier();
> - if (use_stack == 1) {
> - trace.entries = this_cpu_ptr(ftrace_stack.calls);
> - trace.max_entries = FTRACE_STACK_MAX_ENTRIES;
> -
> - if (regs)
> - save_stack_trace_regs(regs, &trace);
> - else
> - save_stack_trace(&trace);
> -
> - if (trace.nr_entries > size)
> - size = trace.nr_entries;
> - } else
> - /* From now on, use_stack is a boolean */
> - use_stack = 0;
> +
> + fstack = this_cpu_ptr(ftrace_stacks.stacks) + (stackidx - 1);

nit: it would be slightly less surprising if stackidx were 0-based:

diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index d3f6ec7eb729..4fc93004feab 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2798,10 +2798,10 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
*/
preempt_disable_notrace();

- stackidx = __this_cpu_inc_return(ftrace_stack_reserve);
+ stackidx = __this_cpu_inc_return(ftrace_stack_reserve) - 1;

/* This should never happen. If it does, yell once and skip */
- if (WARN_ON_ONCE(stackidx >= FTRACE_KSTACK_NESTING))
+ if (WARN_ON_ONCE(stackidx > FTRACE_KSTACK_NESTING))
goto out;

/*
@@ -2813,7 +2813,7 @@ static void __ftrace_trace_stack(struct ring_buffer *buffer,
*/
barrier();

- fstack = this_cpu_ptr(ftrace_stacks.stacks) + (stackidx - 1);
+ fstack = this_cpu_ptr(ftrace_stacks.stacks) + stackidx;
trace.entries = fstack->calls;
trace.max_entries = FTRACE_KSTACK_ENTRIES;

\
 
 \ /
  Last update: 2019-04-25 15:31    [W:0.208 / U:0.756 seconds]
©2003-2018 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site