lkml.org 
[lkml]   [2020]   [Jun]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    SubjectRe: [PATCH] kernel/trace: Add TRACING_ALLOW_PRINTK config option
    On Sun, 28 Jun 2020 18:28:42 -0400
    Steven Rostedt <rostedt@goodmis.org> wrote:

    > You create a bpf event just like you create any other event. When a bpf
    > program that uses a bpf_trace_printk() is loaded, you can enable that
    > event from within the kernel. Yes, there's internal interfaces to
    > enabled and disable events just like echoing 1 into
    > tracefs/events/system/event/enable. See trace_set_clr_event().

    I just started playing with what the code would look like and have
    this. It can be optimized with per-cpu sets of buffers to remove the
    spin lock. I also didn't put in the enabling of the event, but I'm sure
    you can figure that out.

    Warning, not even compiled tested.

    -- Steve

    diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
    index 6575bb0a0434..aeba5ee7325a 100644
    --- a/kernel/trace/Makefile
    +++ b/kernel/trace/Makefile
    @@ -31,6 +31,8 @@ ifdef CONFIG_GCOV_PROFILE_FTRACE
    GCOV_PROFILE := y
    endif

    +CFLAGS_bpf_trace.o := -I$(src)
    +
    CFLAGS_trace_benchmark.o := -I$(src)
    CFLAGS_trace_events_filter.o := -I$(src)

    diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
    index dc05626979b8..01bedf335b2e 100644
    --- a/kernel/trace/bpf_trace.c
    +++ b/kernel/trace/bpf_trace.c
    @@ -19,6 +19,9 @@
    #include "trace_probe.h"
    #include "trace.h"

    +#define CREATE_TRACE_EVENTS
    +#include "bpf_trace.h"
    +
    #define bpf_event_rcu_dereference(p) \
    rcu_dereference_protected(p, lockdep_is_held(&bpf_event_mutex))

    @@ -473,13 +476,29 @@ BPF_CALL_5(bpf_trace_printk, char *, fmt, u32, fmt_size, u64, arg1,
    fmt_cnt++;
    }

    +static DEFINE_SPINLOCK(trace_printk_lock);
    +#define BPF_TRACE_PRINTK_SIZE 1024
    +
    +static inline void do_trace_printk(const char *fmt, ...)
    +{
    + static char buf[BPF_TRACE_PRINT_SIZE];
    + unsigned long flags;
    +
    + spin_lock_irqsave(&trace_printk_lock, flags);
    + va_start(ap, fmt);
    + vsnprintf(buf, BPF_TRACE_PRINT_SIZE, fmt, ap);
    + va_end(ap);
    +
    + trace_bpf_trace_printk(buf);
    + spin_unlock_irqrestore(&trace_printk_lock, flags);
    +}
    +
    /* Horrid workaround for getting va_list handling working with different
    * argument type combinations generically for 32 and 64 bit archs.
    */
    #define __BPF_TP_EMIT() __BPF_ARG3_TP()
    #define __BPF_TP(...) \
    - __trace_printk(0 /* Fake ip */, \
    - fmt, ##__VA_ARGS__)
    + do_trace_printk(fmt, ##__VA_ARGS__)

    #define __BPF_ARG1_TP(...) \
    ((mod[0] == 2 || (mod[0] == 1 && __BITS_PER_LONG == 64)) \
    diff --git a/kernel/trace/bpf_trace.h b/kernel/trace/bpf_trace.h
    new file mode 100644
    index 000000000000..09088bb92fe1
    --- /dev/null
    +++ b/kernel/trace/bpf_trace.h
    @@ -0,0 +1,27 @@
    +#undef TRACE_SYSTEM
    +#define TRACE_SYSTEM bpf_trace
    +
    +#if !defined(_TRACE_BPF_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
    +#define _TRACE_BPF_TRACE_H
    +
    +TRACE_EVENT(bpf_trace_printk,
    +
    + TP_PROTO(bpf_string),
    +
    + TP_ARGS(secs, err),
    +
    + TP_STRUCT__entry(
    + __string(bpf_string, bpf_string)
    + ),
    +
    + TP_fast_assign(
    + __assign_string(bpf_string, bpf_string);
    + ),
    +
    + TP_printk("%s", __get_str(bpf_string))
    +);
    +
    +#endif /* _TRACE_BPF_TRACE_H */
    +
    +/* This part must be outside protection */
    +#include <trace/define_trace.h>
    \
     
     \ /
      Last update: 2020-06-29 01:44    [W:2.326 / U:0.232 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site