lkml.org 
[lkml]   [2008]   [Jan]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[RFC PATCH 16/22 -v2] add get_monotonic_cycles
    The latency tracer needs a way to get an accurate time
    without grabbing any locks. Locks themselves might call
    the latency tracer and cause at best a slow down.

    This patch adds get_monotonic_cycles that returns cycles
    from a reliable clock source in a monotonic fashion.

    Signed-off-by: Steven Rostedt <srostedt@redhat.com>
    ---
    include/linux/clocksource.h | 3 ++
    kernel/time/timekeeping.c | 48 ++++++++++++++++++++++++++++++++++++++++++++
    2 files changed, 51 insertions(+)

    Index: linux-compile-i386.git/kernel/time/timekeeping.c
    ===================================================================
    --- linux-compile-i386.git.orig/kernel/time/timekeeping.c 2008-01-09 14:27:26.000000000 -0500
    +++ linux-compile-i386.git/kernel/time/timekeeping.c 2008-01-09 14:34:40.000000000 -0500
    @@ -103,6 +103,54 @@ static inline void __get_realtime_clock_
    timespec_add_ns(ts, nsecs);
    }

    +cycle_t notrace get_monotonic_cycles(void)
    +{
    + cycle_t cycle_now, cycle_delta, cycle_raw, cycle_last;
    +
    + do {
    + /*
    + * cycle_raw and cycle_last can change on
    + * another CPU and we need the delta calculation
    + * of cycle_now and cycle_last happen atomic, as well
    + * as the adding to cycle_raw. We don't need to grab
    + * any locks, we just keep trying until get all the
    + * calculations together in one state.
    + *
    + * In fact, we __cant__ grab any locks. This
    + * function is called from the latency_tracer which can
    + * be called anywhere. To grab any locks (including
    + * seq_locks) we risk putting ourselves into a deadlock.
    + */
    + cycle_raw = clock->cycle_raw;
    + cycle_last = clock->cycle_last;
    +
    + /* read clocksource: */
    + cycle_now = clocksource_read(clock);
    +
    + /* calculate the delta since the last update_wall_time: */
    + cycle_delta = (cycle_now - cycle_last) & clock->mask;
    +
    + } while (cycle_raw != clock->cycle_raw ||
    + cycle_last != clock->cycle_last);
    +
    + return cycle_raw + cycle_delta;
    +}
    +
    +unsigned long notrace cycles_to_usecs(cycle_t cycles)
    +{
    + u64 ret = cyc2ns(clock, cycles);
    +
    + ret += NSEC_PER_USEC/2; /* For rounding in do_div() */
    + do_div(ret, NSEC_PER_USEC);
    +
    + return ret;
    +}
    +
    +cycle_t notrace usecs_to_cycles(unsigned long usecs)
    +{
    + return ns2cyc(clock, (u64)usecs * 1000);
    +}
    +
    /**
    * getnstimeofday - Returns the time of day in a timespec
    * @ts: pointer to the timespec to be set
    Index: linux-compile-i386.git/include/linux/clocksource.h
    ===================================================================
    --- linux-compile-i386.git.orig/include/linux/clocksource.h 2008-01-09 14:27:51.000000000 -0500
    +++ linux-compile-i386.git/include/linux/clocksource.h 2008-01-09 14:29:44.000000000 -0500
    @@ -273,6 +273,9 @@ extern int clocksource_register(struct c
    extern struct clocksource* clocksource_get_next(void);
    extern void clocksource_change_rating(struct clocksource *cs, int rating);
    extern void clocksource_resume(void);
    +extern cycle_t get_monotonic_cycles(void);
    +extern unsigned long cycles_to_usecs(cycle_t cycles);
    +extern cycle_t usecs_to_cycles(unsigned long usecs);

    /* used to initialize clock */
    extern struct clocksource clocksource_jiffies;
    --


    \
     
     \ /
      Last update: 2008-01-10 00:39    [W:0.023 / U:90.836 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site