lkml.org 
[lkml]   [2008]   [Jan]   [23]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH 16/20 -v5] trace generic call to schedule switch
    This patch adds hooks into the schedule switch tracing to
    allow other latency traces to hook into the schedule switches.

    Signed-off-by: Steven Rostedt <srostedt@redhat.com>
    ---
    lib/tracing/trace_sched_switch.c | 123 +++++++++++++++++++++++++++++++++------
    lib/tracing/tracer.h | 14 ++++
    2 files changed, 119 insertions(+), 18 deletions(-)
    Index: linux-mcount.git/lib/tracing/tracer.h
    ===================================================================
    --- linux-mcount.git.orig/lib/tracing/tracer.h 2008-01-23 10:27:39.000000000 -0500
    +++ linux-mcount.git/lib/tracing/tracer.h 2008-01-23 10:27:42.000000000 -0500
    @@ -112,4 +112,18 @@ static inline notrace cycle_t now(void)
    return get_monotonic_cycles();
    }

    +#ifdef CONFIG_CONTEXT_SWITCH_TRACER
    +typedef void (*tracer_switch_func_t)(void *private,
    + struct task_struct *prev,
    + struct task_struct *next);
    +struct tracer_switch_ops {
    + tracer_switch_func_t func;
    + void *private;
    + struct tracer_switch_ops *next;
    +};
    +
    +extern int register_tracer_switch(struct tracer_switch_ops *ops);
    +extern int unregister_tracer_switch(struct tracer_switch_ops *ops);
    +#endif /* CONFIG_CONTEXT_SWITCH_TRACER */
    +
    #endif /* _LINUX_MCOUNT_TRACER_H */
    Index: linux-mcount.git/lib/tracing/trace_sched_switch.c
    ===================================================================
    --- linux-mcount.git.orig/lib/tracing/trace_sched_switch.c 2008-01-23 10:27:39.000000000 -0500
    +++ linux-mcount.git/lib/tracing/trace_sched_switch.c 2008-01-23 10:27:42.000000000 -0500
    @@ -16,33 +16,21 @@

    static struct tracing_trace *tracer_trace;
    static int trace_enabled __read_mostly;
    +static DEFINE_SPINLOCK(sched_switch_func_lock);

    -static notrace void sched_switch_callback(const struct marker *mdata,
    - void *private_data,
    - const char *format, ...)
    +static void notrace sched_switch_func(void *private,
    + struct task_struct *prev,
    + struct task_struct *next)
    {
    - struct tracing_trace **p = mdata->private;
    - struct tracing_trace *tr = *p;
    + struct tracing_trace **ptr = private;
    + struct tracing_trace *tr = *ptr;
    struct tracing_trace_cpu *data;
    - struct task_struct *prev;
    - struct task_struct *next;
    unsigned long flags;
    - va_list ap;
    int cpu;

    - if (likely(!atomic_read(&trace_record_cmdline)))
    - return;
    -
    - tracing_record_cmdline(current);
    -
    if (likely(!trace_enabled))
    return;

    - va_start(ap, format);
    - prev = va_arg(ap, typeof(prev));
    - next = va_arg(ap, typeof(next));
    - va_end(ap);
    -
    raw_local_irq_save(flags);
    cpu = raw_smp_processor_id();
    data = tr->data[cpu];
    @@ -55,6 +43,105 @@ static notrace void sched_switch_callbac
    raw_local_irq_restore(flags);
    }

    +static struct tracer_switch_ops sched_switch_ops __read_mostly =
    +{
    + .func = sched_switch_func,
    + .private = &tracer_trace,
    +};
    +
    +static tracer_switch_func_t tracer_switch_func __read_mostly =
    + sched_switch_func;
    +
    +static struct tracer_switch_ops *tracer_switch_func_ops __read_mostly =
    + &sched_switch_ops;
    +
    +static void notrace sched_switch_func_loop(void *private,
    + struct task_struct *prev,
    + struct task_struct *next)
    +{
    + struct tracer_switch_ops *ops = tracer_switch_func_ops;
    +
    + for (; ops != NULL; ops = ops->next)
    + ops->func(ops->private, prev, next);
    +}
    +
    +notrace int register_tracer_switch(struct tracer_switch_ops *ops)
    +{
    + unsigned long flags;
    +
    + spin_lock_irqsave(&sched_switch_func_lock, flags);
    + ops->next = tracer_switch_func_ops;
    + smp_wmb();
    + tracer_switch_func_ops = ops;
    +
    + if (ops->next == &sched_switch_ops)
    + tracer_switch_func = sched_switch_func_loop;
    +
    + spin_unlock_irqrestore(&sched_switch_func_lock, flags);
    +
    + return 0;
    +}
    +
    +notrace int unregister_tracer_switch(struct tracer_switch_ops *ops)
    +{
    + unsigned long flags;
    + struct tracer_switch_ops **p = &tracer_switch_func_ops;
    + int ret;
    +
    + spin_lock_irqsave(&sched_switch_func_lock, flags);
    +
    + /*
    + * If the sched_switch is the only one left, then
    + * only call that function.
    + */
    + if (*p == ops && ops->next == &sched_switch_ops) {
    + tracer_switch_func = sched_switch_func;
    + tracer_switch_func_ops = &sched_switch_ops;
    + goto out;
    + }
    +
    + for (; *p != &sched_switch_ops; p = &(*p)->next)
    + if (*p == ops)
    + break;
    +
    + if (*p != ops) {
    + ret = -1;
    + goto out;
    + }
    +
    + *p = (*p)->next;
    +
    + out:
    + spin_unlock_irqrestore(&sched_switch_func_lock, flags);
    +
    + return 0;
    +}
    +
    +static notrace void sched_switch_callback(const struct marker *mdata,
    + void *private_data,
    + const char *format, ...)
    +{
    + struct task_struct *prev;
    + struct task_struct *next;
    + va_list ap;
    +
    + if (likely(!atomic_read(&trace_record_cmdline)))
    + return;
    +
    + tracing_record_cmdline(current);
    +
    + va_start(ap, format);
    + prev = va_arg(ap, typeof(prev));
    + next = va_arg(ap, typeof(next));
    + va_end(ap);
    +
    + /*
    + * If tracer_switch_func only points to the local
    + * switch func, it still needs the ptr passed to it.
    + */
    + tracer_switch_func(mdata->private, prev, next);
    +}
    +
    static notrace void sched_switch_reset(struct tracing_trace *tr)
    {
    int cpu;
    --


    \
     
     \ /
      Last update: 2008-01-23 17:11    [from the cache]
    ©2003-2014 Jasper Spaans. hosted at Digital Ocean