lkml.org 
[lkml]   [2017]   [Apr]   [7]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 0/5 v2] tracing: Add usecase of synchronize_rcu_tasks() and stack_tracer_disable()
    Paul,

    Here's my latest. You OK with it?

    -- Steve


    Paul E. McKenney (1):
    rcu: Fix dyntick-idle tracing

    Steven Rostedt (VMware) (4):
    ftrace: Add use of synchronize_rcu_tasks() with dynamic trampolines
    tracing: Replace the per_cpu() with this_cpu() in trace_stack.c
    tracing: Add stack_tracer_disable/enable() functions
    tracing: Rename trace_active to disable_stack_tracer and inline its modification

    ----
    Changes since v1:

    . Make stack_tracer_{dis,en}able() into inlines

    . Rename trace_active to disable_stack_tracer and make global

    . Do not call preempt_disable() from stack_tracer_disable()
    and likewise from stack_tracer_enable()

    . Add check for preemption disabled when CONFIG_PREEMPT_DEBUG

    include/linux/ftrace.h | 38 ++++++++++++++++++++++++++++++++++++
    kernel/rcu/tree.c | 48 ++++++++++++++++++++++------------------------
    kernel/trace/Kconfig | 3 ++-
    kernel/trace/ftrace.c | 42 +++++++++++++++++-----------------------
    kernel/trace/trace_stack.c | 27 +++++++++-----------------
    5 files changed, 90 insertions(+), 68 deletions(-)

    Diff from v1:

    diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
    index 40afee3..4bde7ff 100644
    --- a/include/linux/ftrace.h
    +++ b/include/linux/ftrace.h
    @@ -287,8 +287,40 @@ stack_trace_sysctl(struct ctl_table *table, int write,
    void __user *buffer, size_t *lenp,
    loff_t *ppos);

    -void stack_tracer_disable(void);
    -void stack_tracer_enable(void);
    +/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
    +DECLARE_PER_CPU(int, disable_stack_tracer);
    +
    +/**
    + * stack_tracer_disable - temporarily disable the stack tracer
    + *
    + * There's a few locations (namely in RCU) where stack tracing
    + * cannot be executed. This function is used to disable stack
    + * tracing during those critical sections.
    + *
    + * This function must be called with preemption or interrupts
    + * disabled and stack_tracer_enable() must be called shortly after
    + * while preemption or interrupts are still disabled.
    + */
    +static inline void stack_tracer_disable(void)
    +{
    + /* Preemption or interupts must be disabled */
    + if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
    + WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
    + this_cpu_inc(disable_stack_tracer);
    +}
    +
    +/**
    + * stack_tracer_enable - re-enable the stack tracer
    + *
    + * After stack_tracer_disable() is called, stack_tracer_enable()
    + * must be called shortly afterward.
    + */
    +static inline void stack_tracer_enable(void)
    +{
    + if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
    + WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
    + this_cpu_dec(disable_stack_tracer);
    +}
    #else
    static inline void stack_tracer_disable(void) { }
    static inline void stack_tracer_enabe(void) { }
    diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
    index 5adbb73..84fafb6 100644
    --- a/kernel/trace/trace_stack.c
    +++ b/kernel/trace/trace_stack.c
    @@ -35,40 +35,12 @@ unsigned long stack_trace_max_size;
    arch_spinlock_t stack_trace_max_lock =
    (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;

    -static DEFINE_PER_CPU(int, trace_active);
    +DEFINE_PER_CPU(int, disable_stack_tracer);
    static DEFINE_MUTEX(stack_sysctl_mutex);

    int stack_tracer_enabled;
    static int last_stack_tracer_enabled;

    -/**
    - * stack_tracer_disable - temporarily disable the stack tracer
    - *
    - * There's a few locations (namely in RCU) where stack tracing
    - * can not be executed. This function is used to disable stack
    - * tracing during those critical sections.
    - *
    - * This function will disable preemption. stack_tracer_enable()
    - * must be called shortly after this is called.
    - */
    -void stack_tracer_disable(void)
    -{
    - preempt_disable_notrace();
    - this_cpu_inc(trace_active);
    -}
    -
    -/**
    - * stack_tracer_enable - re-enable the stack tracer
    - *
    - * After stack_tracer_disable() is called, stack_tracer_enable()
    - * must shortly be called afterward.
    - */
    -void stack_tracer_enable(void)
    -{
    - this_cpu_dec(trace_active);
    - preempt_enable_notrace();
    -}
    -
    void stack_trace_print(void)
    {
    long i;
    @@ -239,8 +211,8 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
    preempt_disable_notrace();

    /* no atomic needed, we only modify this variable by this cpu */
    - this_cpu_inc(trace_active);
    - if (this_cpu_read(trace_active) != 1)
    + this_cpu_inc(disable_stack_tracer);
    + if (this_cpu_read(disable_stack_tracer) != 1)
    goto out;

    ip += MCOUNT_INSN_SIZE;
    @@ -248,7 +220,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
    check_stack(ip, &stack);

    out:
    - this_cpu_dec(trace_active);
    + this_cpu_dec(disable_stack_tracer);
    /* prevent recursion in schedule */
    preempt_enable_notrace();
    }
    @@ -290,15 +262,15 @@ stack_max_size_write(struct file *filp, const char __user *ubuf,
    /*
    * In case we trace inside arch_spin_lock() or after (NMI),
    * we will cause circular lock, so we also need to increase
    - * the percpu trace_active here.
    + * the percpu disable_stack_tracer here.
    */
    - this_cpu_inc(trace_active);
    + this_cpu_inc(disable_stack_tracer);

    arch_spin_lock(&stack_trace_max_lock);
    *ptr = val;
    arch_spin_unlock(&stack_trace_max_lock);

    - this_cpu_dec(trace_active);
    + this_cpu_dec(disable_stack_tracer);
    local_irq_restore(flags);

    return count;
    @@ -334,7 +306,7 @@ static void *t_start(struct seq_file *m, loff_t *pos)
    {
    local_irq_disable();

    - this_cpu_inc(trace_active);
    + this_cpu_inc(disable_stack_tracer);

    arch_spin_lock(&stack_trace_max_lock);

    @@ -348,7 +320,7 @@ static void t_stop(struct seq_file *m, void *p)
    {
    arch_spin_unlock(&stack_trace_max_lock);

    - this_cpu_dec(trace_active);
    + this_cpu_dec(disable_stack_tracer);

    local_irq_enable();
    }
    \
     
     \ /
      Last update: 2017-04-07 16:06    [W:4.072 / U:0.112 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site