lkml.org 
[lkml]   [2012]   [Jul]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 09/19] ftrace: Add default recursion protection for function tracing
    From: Steven Rostedt <srostedt@redhat.com>

    As more users of the function tracer utility are being added, they do
    not always add the necessary recursion protection. To protect from
    function recursion due to tracing, if the callback ftrace_ops does not
    specifically specify that it protects against recursion (by setting
    the FTRACE_OPS_FL_RECURSION_SAFE flag), the list operation will be
    called by the mcount trampoline which adds recursion protection.

    If the flag is set, then the function will be called directly with no
    extra protection.

    Note, the list operation is called if more than one function callback
    is registered, or if the arch does not support all of the function
    tracer features.

    Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
    ---
    include/linux/ftrace.h | 5 +++++
    kernel/trace/ftrace.c | 10 ++++++++--
    kernel/trace/trace_events.c | 1 +
    kernel/trace/trace_functions.c | 4 ++--
    kernel/trace/trace_irqsoff.c | 2 +-
    kernel/trace/trace_sched_wakeup.c | 2 +-
    kernel/trace/trace_selftest.c | 7 +++++--
    kernel/trace/trace_stack.c | 1 +
    8 files changed, 24 insertions(+), 8 deletions(-)

    diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
    index ab39990..65a14e4 100644
    --- a/include/linux/ftrace.h
    +++ b/include/linux/ftrace.h
    @@ -85,6 +85,10 @@ typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
    * passing regs to the handler.
    * Note, if this flag is set, the SAVE_REGS flag will automatically
    * get set upon registering the ftrace_ops, if the arch supports it.
    + * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
    + * that the call back has its own recursion protection. If it does
    + * not set this, then the ftrace infrastructure will add recursion
    + * protection for the caller.
    */
    enum {
    FTRACE_OPS_FL_ENABLED = 1 << 0,
    @@ -93,6 +97,7 @@ enum {
    FTRACE_OPS_FL_CONTROL = 1 << 3,
    FTRACE_OPS_FL_SAVE_REGS = 1 << 4,
    FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 5,
    + FTRACE_OPS_FL_RECURSION_SAFE = 1 << 6,
    };

    struct ftrace_ops {
    diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
    index c55f7e2..ad765b4 100644
    --- a/kernel/trace/ftrace.c
    +++ b/kernel/trace/ftrace.c
    @@ -66,6 +66,7 @@

    static struct ftrace_ops ftrace_list_end __read_mostly = {
    .func = ftrace_stub,
    + .flags = FTRACE_OPS_FL_RECURSION_SAFE,
    };

    /* ftrace_enabled is a method to turn ftrace on or off */
    @@ -221,12 +222,13 @@ static void update_ftrace_function(void)

    /*
    * If we are at the end of the list and this ops is
    - * not dynamic and the arch supports passing ops, then have the
    - * mcount trampoline call the function directly.
    + * recursion safe and not dynamic and the arch supports passing ops,
    + * then have the mcount trampoline call the function directly.
    */
    if (ftrace_ops_list == &ftrace_list_end ||
    (ftrace_ops_list->next == &ftrace_list_end &&
    !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC) &&
    + (ftrace_ops_list->flags & FTRACE_OPS_FL_RECURSION_SAFE) &&
    !FTRACE_FORCE_LIST_FUNC)) {
    /* Set the ftrace_ops that the arch callback uses */
    if (ftrace_ops_list == &global_ops)
    @@ -867,6 +869,7 @@ static void unregister_ftrace_profiler(void)
    #else
    static struct ftrace_ops ftrace_profile_ops __read_mostly = {
    .func = function_profile_call,
    + .flags = FTRACE_OPS_FL_RECURSION_SAFE,
    };

    static int register_ftrace_profiler(void)
    @@ -1049,6 +1052,7 @@ static struct ftrace_ops global_ops = {
    .func = ftrace_stub,
    .notrace_hash = EMPTY_HASH,
    .filter_hash = EMPTY_HASH,
    + .flags = FTRACE_OPS_FL_RECURSION_SAFE,
    };

    static DEFINE_MUTEX(ftrace_regex_lock);
    @@ -3967,6 +3971,7 @@ void __init ftrace_init(void)

    static struct ftrace_ops global_ops = {
    .func = ftrace_stub,
    + .flags = FTRACE_OPS_FL_RECURSION_SAFE,
    };

    static int __init ftrace_nodyn_init(void)
    @@ -4023,6 +4028,7 @@ ftrace_ops_control_func(unsigned long ip, unsigned long parent_ip,

    static struct ftrace_ops control_ops = {
    .func = ftrace_ops_control_func,
    + .flags = FTRACE_OPS_FL_RECURSION_SAFE,
    };

    static inline void
    diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
    index 8c66968..6825d83 100644
    --- a/kernel/trace/trace_events.c
    +++ b/kernel/trace/trace_events.c
    @@ -1721,6 +1721,7 @@ function_test_events_call(unsigned long ip, unsigned long parent_ip,
    static struct ftrace_ops trace_ops __initdata =
    {
    .func = function_test_events_call,
    + .flags = FTRACE_OPS_FL_RECURSION_SAFE,
    };

    static __init void event_trace_self_test_with_function(void)
    diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
    index 5675ebd..fdff65d 100644
    --- a/kernel/trace/trace_functions.c
    +++ b/kernel/trace/trace_functions.c
    @@ -153,13 +153,13 @@ function_stack_trace_call(unsigned long ip, unsigned long parent_ip,
    static struct ftrace_ops trace_ops __read_mostly =
    {
    .func = function_trace_call,
    - .flags = FTRACE_OPS_FL_GLOBAL,
    + .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
    };

    static struct ftrace_ops trace_stack_ops __read_mostly =
    {
    .func = function_stack_trace_call,
    - .flags = FTRACE_OPS_FL_GLOBAL,
    + .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
    };

    /* Our two options */
    diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
    index c7a9ba9..d98ee82 100644
    --- a/kernel/trace/trace_irqsoff.c
    +++ b/kernel/trace/trace_irqsoff.c
    @@ -154,7 +154,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip,
    static struct ftrace_ops trace_ops __read_mostly =
    {
    .func = irqsoff_tracer_call,
    - .flags = FTRACE_OPS_FL_GLOBAL,
    + .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
    };
    #endif /* CONFIG_FUNCTION_TRACER */

    diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
    index 7547e36..02170c0 100644
    --- a/kernel/trace/trace_sched_wakeup.c
    +++ b/kernel/trace/trace_sched_wakeup.c
    @@ -130,7 +130,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip,
    static struct ftrace_ops trace_ops __read_mostly =
    {
    .func = wakeup_tracer_call,
    - .flags = FTRACE_OPS_FL_GLOBAL,
    + .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
    };
    #endif /* CONFIG_FUNCTION_TRACER */

    diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
    index add37e0..1fb6da8 100644
    --- a/kernel/trace/trace_selftest.c
    +++ b/kernel/trace/trace_selftest.c
    @@ -148,19 +148,22 @@ static void trace_selftest_test_dyn_func(unsigned long ip,

    static struct ftrace_ops test_probe1 = {
    .func = trace_selftest_test_probe1_func,
    + .flags = FTRACE_OPS_FL_RECURSION_SAFE,
    };

    static struct ftrace_ops test_probe2 = {
    .func = trace_selftest_test_probe2_func,
    + .flags = FTRACE_OPS_FL_RECURSION_SAFE,
    };

    static struct ftrace_ops test_probe3 = {
    .func = trace_selftest_test_probe3_func,
    + .flags = FTRACE_OPS_FL_RECURSION_SAFE,
    };

    static struct ftrace_ops test_global = {
    - .func = trace_selftest_test_global_func,
    - .flags = FTRACE_OPS_FL_GLOBAL,
    + .func = trace_selftest_test_global_func,
    + .flags = FTRACE_OPS_FL_GLOBAL | FTRACE_OPS_FL_RECURSION_SAFE,
    };

    static void print_counts(void)
    diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
    index 2fa5328..0c1b1657 100644
    --- a/kernel/trace/trace_stack.c
    +++ b/kernel/trace/trace_stack.c
    @@ -137,6 +137,7 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip,
    static struct ftrace_ops trace_ops __read_mostly =
    {
    .func = stack_trace_call,
    + .flags = FTRACE_OPS_FL_RECURSION_SAFE,
    };

    static ssize_t
    --
    1.7.10.4

    [unhandled content-type:application/pgp-signature]
    \
     
     \ /
      Last update: 2012-07-21 05:01    [W:4.291 / U:0.076 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site