lkml.org 
[lkml]   [2008]   [Jan]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[RFC PATCH 15/30 v3] Fixup merge between xtime_cache and timkkeeping starvation fix
    >  void update_wall_time(void)
    > {
    > - cycle_t offset;
    > + cycle_t cycle_now, offset;
    >
    > /* Make sure we're fully resumed: */
    > if (unlikely(timekeeping_suspended))
    > return;
    >
    > #ifdef CONFIG_GENERIC_TIME
    > - offset = (clocksource_read(clock) - clock->cycle_last) & clock->mask;
    > + cycle_now = clocksource_read(clock);
    > #else
    > - offset = clock->cycle_interval;
    > + cycle_now = clock->cycle_last + clock->cycle_interval;
    > #endif
    > + offset = (cycle_now - clock->cycle_last) & clock->mask;

    It seems this offset addition was to merge against the colliding
    xtime_cache changes in mainline. However, I don't think its quite right,
    and might be causing incorrect time() or vtime() results if NO_HZ is
    enabled.

    > + clocksource_accumulate(clock, cycle_now);
    > +
    > clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
    >
    > /* normally this loop will run just once, however in the
    > * case of lost or late ticks, it will accumulate correctly.
    > */
    > - while (offset >= clock->cycle_interval) {
    > + while (clock->cycle_accumulated >= clock->cycle_interval) {
    > /* accumulate one interval */
    > clock->xtime_nsec += clock->xtime_interval;
    > - clock->cycle_last += clock->cycle_interval;
    > - offset -= clock->cycle_interval;
    > + clock->cycle_accumulated -= clock->cycle_interval;
    >
    > if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
    > clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
    > @@ -482,7 +485,7 @@ void update_wall_time(void)
    > }
    >
    > /* correct the clock when NTP error is too big */
    > - clocksource_adjust(offset);
    > + clocksource_adjust(clock->cycle_accumulated);


    I suspect the following is needed, but haven't been able to test it yet.


    Fixup merge between xtime_cache and timekeeping starvation fix.

    Signed-off-by: John Stultz <johnstul@us.ibm.com>
    Signed-off-by: Steven Rostedt <srostedt@redhat.com>
    ---
    kernel/time/timekeeping.c | 5 ++---
    1 file changed, 2 insertions(+), 3 deletions(-)

    Index: linux-compile.git/kernel/time/timekeeping.c
    ===================================================================
    --- linux-compile.git.orig/kernel/time/timekeeping.c 2008-01-15 10:20:15.000000000 -0500
    +++ linux-compile.git/kernel/time/timekeeping.c 2008-01-15 10:20:50.000000000 -0500
    @@ -449,7 +449,7 @@ static void clocksource_adjust(s64 offse
    */
    void update_wall_time(void)
    {
    - cycle_t cycle_now, offset;
    + cycle_t cycle_now;

    /* Make sure we're fully resumed: */
    if (unlikely(timekeeping_suspended))
    @@ -460,7 +460,6 @@ void update_wall_time(void)
    #else
    cycle_now = clock->cycle_last + clock->cycle_interval;
    #endif
    - offset = (cycle_now - clock->cycle_last) & clock->mask;
    clocksource_accumulate(clock, cycle_now);

    clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift;
    @@ -491,7 +490,7 @@ void update_wall_time(void)
    xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift;
    clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;

    - update_xtime_cache(cyc2ns(clock, offset));
    + update_xtime_cache(cyc2ns(clock, clock->cycle_accumulated));

    /* check to see if there is a new clocksource to use */
    change_clocksource();
    --


    \
     
     \ /
      Last update: 2008-01-15 21:59    [W:0.026 / U:0.384 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site