lkml.org 
[lkml]   [2013]   [May]   [10]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Date
    Subject[005/118] tracing: Remove most or all of stack tracer stack size from stack_max_size
    3.2.45-rc1 review patch.  If anyone has any objections, please let me know.

    ------------------

    From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>

    commit 4df297129f622bdc18935c856f42b9ddd18f9f28 upstream.

    Currently, the depth reported in the stack tracer stack_trace file
    does not match the stack_max_size file. This is because the stack_max_size
    includes the overhead of stack tracer itself while the depth does not.

    The first time a max is triggered, a calculation is not performed that
    figures out the overhead of the stack tracer and subtracts it from
    the stack_max_size variable. The overhead is stored and is subtracted
    from the reported stack size for comparing for a new max.

    Now the stack_max_size corresponds to the reported depth:

    # cat stack_max_size
    4640

    # cat stack_trace
    Depth Size Location (48 entries)
    ----- ---- --------
    0) 4640 32 _raw_spin_lock+0x18/0x24
    1) 4608 112 ____cache_alloc+0xb7/0x22d
    2) 4496 80 kmem_cache_alloc+0x63/0x12f
    3) 4416 16 mempool_alloc_slab+0x15/0x17
    [...]

    While testing against and older gcc on x86 that uses mcount instead
    of fentry, I found that pasing in ip + MCOUNT_INSN_SIZE let the
    stack trace show one more function deep which was missing before.

    Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
    Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
    ---
    kernel/trace/trace_stack.c | 75 +++++++++++++++++++++++++++++++-------------
    1 file changed, 54 insertions(+), 21 deletions(-)

    --- a/kernel/trace/trace_stack.c
    +++ b/kernel/trace/trace_stack.c
    @@ -17,27 +17,24 @@

    #define STACK_TRACE_ENTRIES 500

    -/*
    - * If fentry is used, then the function being traced will
    - * jump to fentry directly before it sets up its stack frame.
    - * We need to ignore that one and record the parent. Since
    - * the stack frame for the traced function wasn't set up yet,
    - * the stack_trace wont see the parent. That needs to be added
    - * manually to stack_dump_trace[] as the first element.
    - */
    #ifdef CC_USING_FENTRY
    -# define add_func 1
    +# define fentry 1
    #else
    -# define add_func 0
    +# define fentry 0
    #endif

    static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
    { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
    static unsigned stack_dump_index[STACK_TRACE_ENTRIES];

    +/*
    + * Reserve one entry for the passed in ip. This will allow
    + * us to remove most or all of the stack size overhead
    + * added by the stack tracer itself.
    + */
    static struct stack_trace max_stack_trace = {
    - .max_entries = STACK_TRACE_ENTRIES - add_func,
    - .entries = &stack_dump_trace[add_func],
    + .max_entries = STACK_TRACE_ENTRIES - 1,
    + .entries = &stack_dump_trace[1],
    };

    static unsigned long max_stack_size;
    @@ -56,10 +53,14 @@ check_stack(unsigned long ip, unsigned l
    {
    unsigned long this_size, flags;
    unsigned long *p, *top, *start;
    + static int tracer_frame;
    + int frame_size = ACCESS_ONCE(tracer_frame);
    int i;

    this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
    this_size = THREAD_SIZE - this_size;
    + /* Remove the frame of the tracer */
    + this_size -= frame_size;

    if (this_size <= max_stack_size)
    return;
    @@ -71,6 +72,10 @@ check_stack(unsigned long ip, unsigned l
    local_irq_save(flags);
    arch_spin_lock(&max_stack_lock);

    + /* In case another CPU set the tracer_frame on us */
    + if (unlikely(!frame_size))
    + this_size -= tracer_frame;
    +
    /* a race could have already updated it */
    if (this_size <= max_stack_size)
    goto out;
    @@ -83,15 +88,12 @@ check_stack(unsigned long ip, unsigned l
    save_stack_trace(&max_stack_trace);

    /*
    - * When fentry is used, the traced function does not get
    - * its stack frame set up, and we lose the parent.
    - * Add that one in manally. We set up save_stack_trace()
    - * to not touch the first element in this case.
    + * Add the passed in ip from the function tracer.
    + * Searching for this on the stack will skip over
    + * most of the overhead from the stack tracer itself.
    */
    - if (add_func) {
    - stack_dump_trace[0] = ip;
    - max_stack_trace.nr_entries++;
    - }
    + stack_dump_trace[0] = ip;
    + max_stack_trace.nr_entries++;

    /*
    * Now find where in the stack these are.
    @@ -121,6 +123,18 @@ check_stack(unsigned long ip, unsigned l
    found = 1;
    /* Start the search from here */
    start = p + 1;
    + /*
    + * We do not want to show the overhead
    + * of the stack tracer stack in the
    + * max stack. If we haven't figured
    + * out what that is, then figure it out
    + * now.
    + */
    + if (unlikely(!tracer_frame) && i == 1) {
    + tracer_frame = (p - stack) *
    + sizeof(unsigned long);
    + max_stack_size -= tracer_frame;
    + }
    }
    }

    @@ -149,7 +163,26 @@ stack_trace_call(unsigned long ip, unsig
    if (per_cpu(trace_active, cpu)++ != 0)
    goto out;

    - check_stack(parent_ip, &stack);
    + /*
    + * When fentry is used, the traced function does not get
    + * its stack frame set up, and we lose the parent.
    + * The ip is pretty useless because the function tracer
    + * was called before that function set up its stack frame.
    + * In this case, we use the parent ip.
    + *
    + * By adding the return address of either the parent ip
    + * or the current ip we can disregard most of the stack usage
    + * caused by the stack tracer itself.
    + *
    + * The function tracer always reports the address of where the
    + * mcount call was, but the stack will hold the return address.
    + */
    + if (fentry)
    + ip = parent_ip;
    + else
    + ip += MCOUNT_INSN_SIZE;
    +
    + check_stack(ip, &stack);

    out:
    per_cpu(trace_active, cpu)--;


    \
     
     \ /
      Last update: 2013-05-10 20:41    [W:4.483 / U:0.016 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site