lkml.org 
[lkml]   [2012]   [Jan]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 2/7] ftrace: Add enable/disable ftrace_ops control interface
    Date
    Adding a way to temporarily enable/disable ftrace_ops. The change
    follows the same way as 'global' ftrace_ops are done.

    Introducing 2 global ftrace_ops - control_ops and ftrace_control_list
    which take over all ftrace_ops registered with FTRACE_OPS_FL_CONTROL
    flag. In addition new per cpu flag called 'disabled' is also added to
    ftrace_ops to provide the control information for each cpu.

    When ftrace_ops with FTRACE_OPS_FL_CONTROL is registered, it is
    set as disabled for all cpus.

    The ftrace_control_list contains all the registered 'control' ftrace_ops.
    The control_ops provides function which iterates ftrace_control_list
    and does the check for 'disabled' flag on current cpu.

    Adding 2 inline functions ftrace_function_enable/ftrace_function_disable,
    which enable/disable the ftrace_ops for given cpu.

    Signed-off-by: Jiri Olsa <jolsa@redhat.com>
    ---
    include/linux/ftrace.h | 42 +++++++++++++++++
    kernel/trace/ftrace.c | 119 +++++++++++++++++++++++++++++++++++++++++++++---
    kernel/trace/trace.h | 2 +
    3 files changed, 156 insertions(+), 7 deletions(-)

    diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
    index 523640f..0d43a2b 100644
    --- a/include/linux/ftrace.h
    +++ b/include/linux/ftrace.h
    @@ -35,12 +35,14 @@ enum {
    FTRACE_OPS_FL_ENABLED = 1 << 0,
    FTRACE_OPS_FL_GLOBAL = 1 << 1,
    FTRACE_OPS_FL_DYNAMIC = 1 << 2,
    + FTRACE_OPS_FL_CONTROL = 1 << 3,
    };

    struct ftrace_ops {
    ftrace_func_t func;
    struct ftrace_ops *next;
    unsigned long flags;
    + void __percpu *disabled;
    #ifdef CONFIG_DYNAMIC_FTRACE
    struct ftrace_hash *notrace_hash;
    struct ftrace_hash *filter_hash;
    @@ -97,6 +99,46 @@ int register_ftrace_function(struct ftrace_ops *ops);
    int unregister_ftrace_function(struct ftrace_ops *ops);
    void clear_ftrace_function(void);

    +/**
    + * ftrace_function_enable - enable controlled ftrace_ops on given cpu
    + *
    + * This function enables tracing on given cpu by decreasing
    + * the per cpu control variable.
    + * It must be called with preemption disabled and only on
    + * ftrace_ops registered with FTRACE_OPS_FL_CONTROL.
    + */
    +static inline void ftrace_function_enable(struct ftrace_ops *ops, int cpu)
    +{
    + atomic_t *disabled;
    +
    + if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL) ||
    + !preempt_count()))
    + return;
    +
    + disabled = per_cpu_ptr(ops->disabled, cpu);
    + atomic_dec(disabled);
    +}
    +
    +/**
    + * ftrace_function_disable - enable controlled ftrace_ops on given cpu
    + *
    + * This function enables tracing on given cpu by decreasing
    + * the per cpu control variable.
    + * It must be called with preemption disabled and only on
    + * ftrace_ops registered with FTRACE_OPS_FL_CONTROL.
    + */
    +static inline void ftrace_function_disable(struct ftrace_ops *ops, int cpu)
    +{
    + atomic_t *disabled;
    +
    + if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL) ||
    + !preempt_count()))
    + return;
    +
    + disabled = per_cpu_ptr(ops->disabled, cpu);
    + atomic_inc(disabled);
    +}
    +
    extern void ftrace_stub(unsigned long a0, unsigned long a1);

    #else /* !CONFIG_FUNCTION_TRACER */
    diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
    index 46e1031..7af5fb3 100644
    --- a/kernel/trace/ftrace.c
    +++ b/kernel/trace/ftrace.c
    @@ -60,6 +60,8 @@
    #define FTRACE_HASH_DEFAULT_BITS 10
    #define FTRACE_HASH_MAX_BITS 12

    +#define FL_GLOBAL_CONTROL_MASK (FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_CONTROL)
    +
    /* ftrace_enabled is a method to turn ftrace on or off */
    int ftrace_enabled __read_mostly;
    static int last_ftrace_enabled;
    @@ -87,12 +89,14 @@ static struct ftrace_ops ftrace_list_end __read_mostly = {
    };

    static struct ftrace_ops *ftrace_global_list __read_mostly = &ftrace_list_end;
    +static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
    static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
    ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
    static ftrace_func_t __ftrace_trace_function_delay __read_mostly = ftrace_stub;
    ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
    ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
    static struct ftrace_ops global_ops;
    +static struct ftrace_ops control_ops;

    static void
    ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip);
    @@ -166,6 +170,38 @@ static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
    }
    #endif

    +static void control_ops_disable_all(struct ftrace_ops *ops)
    +{
    + int cpu;
    +
    + for_each_possible_cpu(cpu)
    + atomic_set(per_cpu_ptr(ops->disabled, cpu), 1);
    +}
    +
    +static int control_ops_alloc(struct ftrace_ops *ops)
    +{
    + atomic_t *disabled;
    +
    + disabled = alloc_percpu(atomic_t);
    + if (!disabled)
    + return -ENOMEM;
    +
    + ops->disabled = disabled;
    + control_ops_disable_all(ops);
    + return 0;
    +}
    +
    +static void control_ops_free(struct ftrace_ops *ops)
    +{
    + free_percpu(ops->disabled);
    +}
    +
    +static int control_ops_is_disabled(struct ftrace_ops *ops, int cpu)
    +{
    + atomic_t *disabled = per_cpu_ptr(ops->disabled, cpu);
    + return atomic_read(disabled);
    +}
    +
    static void update_global_ops(void)
    {
    ftrace_func_t func;
    @@ -257,6 +293,26 @@ static int remove_ftrace_ops(struct ftrace_ops **list, struct ftrace_ops *ops)
    return 0;
    }

    +static void add_ftrace_list_ops(struct ftrace_ops **list,
    + struct ftrace_ops *main_ops,
    + struct ftrace_ops *ops)
    +{
    + int first = *list == &ftrace_list_end;
    + add_ftrace_ops(list, ops);
    + if (first)
    + add_ftrace_ops(&ftrace_ops_list, main_ops);
    +}
    +
    +static int remove_ftrace_list_ops(struct ftrace_ops **list,
    + struct ftrace_ops *main_ops,
    + struct ftrace_ops *ops)
    +{
    + int ret = remove_ftrace_ops(list, ops);
    + if (!ret && *list == &ftrace_list_end)
    + ret = remove_ftrace_ops(&ftrace_ops_list, main_ops);
    + return ret;
    +}
    +
    static int __register_ftrace_function(struct ftrace_ops *ops)
    {
    if (ftrace_disabled)
    @@ -268,15 +324,20 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
    if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))
    return -EBUSY;

    + /* We don't support both control and global flags set. */
    + if ((ops->flags & FL_GLOBAL_CONTROL_MASK) == FL_GLOBAL_CONTROL_MASK)
    + return -EINVAL;
    +
    if (!core_kernel_data((unsigned long)ops))
    ops->flags |= FTRACE_OPS_FL_DYNAMIC;

    if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
    - int first = ftrace_global_list == &ftrace_list_end;
    - add_ftrace_ops(&ftrace_global_list, ops);
    + add_ftrace_list_ops(&ftrace_global_list, &global_ops, ops);
    ops->flags |= FTRACE_OPS_FL_ENABLED;
    - if (first)
    - add_ftrace_ops(&ftrace_ops_list, &global_ops);
    + } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
    + if (control_ops_alloc(ops))
    + return -ENOMEM;
    + add_ftrace_list_ops(&ftrace_control_list, &control_ops, ops);
    } else
    add_ftrace_ops(&ftrace_ops_list, ops);

    @@ -300,11 +361,23 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
    return -EINVAL;

    if (ops->flags & FTRACE_OPS_FL_GLOBAL) {
    - ret = remove_ftrace_ops(&ftrace_global_list, ops);
    - if (!ret && ftrace_global_list == &ftrace_list_end)
    - ret = remove_ftrace_ops(&ftrace_ops_list, &global_ops);
    + ret = remove_ftrace_list_ops(&ftrace_global_list,
    + &global_ops, ops);
    if (!ret)
    ops->flags &= ~FTRACE_OPS_FL_ENABLED;
    + } else if (ops->flags & FTRACE_OPS_FL_CONTROL) {
    + ret = remove_ftrace_list_ops(&ftrace_control_list,
    + &control_ops, ops);
    + if (!ret) {
    + /*
    + * The ftrace_ops is now removed from the list,
    + * so there'll be no new users. We must ensure
    + * all current users are done before we free
    + * the control data.
    + */
    + synchronize_sched();
    + control_ops_free(ops);
    + }
    } else
    ret = remove_ftrace_ops(&ftrace_ops_list, ops);

    @@ -3565,6 +3638,38 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)
    #endif /* CONFIG_DYNAMIC_FTRACE */

    static void
    +ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip)
    +{
    + struct ftrace_ops *op;
    + int cpu;
    +
    + if (unlikely(trace_recursion_test(TRACE_CONTROL_BIT)))
    + return;
    +
    + /*
    + * Some of the ops may be dynamically allocated,
    + * they must be freed after a synchronize_sched().
    + */
    + preempt_disable_notrace();
    + trace_recursion_set(TRACE_CONTROL_BIT);
    + cpu = smp_processor_id();
    + op = rcu_dereference_raw(ftrace_control_list);
    + while (op != &ftrace_list_end) {
    + if (!control_ops_is_disabled(op, cpu) &&
    + ftrace_ops_test(op, ip))
    + op->func(ip, parent_ip);
    +
    + op = rcu_dereference_raw(op->next);
    + };
    + trace_recursion_clear(TRACE_CONTROL_BIT);
    + preempt_enable_notrace();
    +}
    +
    +static struct ftrace_ops control_ops = {
    + .func = ftrace_ops_control_func,
    +};
    +
    +static void
    ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)
    {
    struct ftrace_ops *op;
    diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
    index 2c26574..41c54e3 100644
    --- a/kernel/trace/trace.h
    +++ b/kernel/trace/trace.h
    @@ -288,6 +288,8 @@ struct tracer {
    /* for function tracing recursion */
    #define TRACE_INTERNAL_BIT (1<<11)
    #define TRACE_GLOBAL_BIT (1<<12)
    +#define TRACE_CONTROL_BIT (1<<13)
    +
    /*
    * Abuse of the trace_recursion.
    * As we need a way to maintain state if we are tracing the function
    --
    1.7.1


    \
     
     \ /
      Last update: 2012-01-02 10:09    [W:0.068 / U:155.016 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site