lkml.org 
[lkml]   [2024]   [Apr]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip: timers/core] vdso: Consolidate nanoseconds calculation
    The following commit has been merged into the timers/core branch of tip:

    Commit-ID: 5b26ef660a690e424d9548fdf0565d4172d5d88f
    Gitweb: https://git.kernel.org/tip/5b26ef660a690e424d9548fdf0565d4172d5d88f
    Author: Adrian Hunter <adrian.hunter@intel.com>
    AuthorDate: Mon, 25 Mar 2024 08:40:06 +02:00
    Committer: Thomas Gleixner <tglx@linutronix.de>
    CommitterDate: Mon, 08 Apr 2024 15:03:06 +02:00

    vdso: Consolidate nanoseconds calculation

    Consolidate nanoseconds calculation to simplify and reduce code
    duplication.

    Suggested-by: Thomas Gleixner <tglx@linutronix.de>
    Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
    Link: https://lore.kernel.org/r/20240325064023.2997-3-adrian.hunter@intel.com

    ---
    arch/x86/include/asm/vdso/gettimeofday.h | 17 ++++-----
    lib/vdso/gettimeofday.c | 43 ++++++++++-------------
    2 files changed, 27 insertions(+), 33 deletions(-)

    diff --git a/arch/x86/include/asm/vdso/gettimeofday.h b/arch/x86/include/asm/vdso/gettimeofday.h
    index 8e048ca..5727ded 100644
    --- a/arch/x86/include/asm/vdso/gettimeofday.h
    +++ b/arch/x86/include/asm/vdso/gettimeofday.h
    @@ -300,7 +300,7 @@ static inline bool arch_vdso_cycles_ok(u64 cycles)
    #define vdso_cycles_ok arch_vdso_cycles_ok

    /*
    - * x86 specific delta calculation.
    + * x86 specific calculation of nanoseconds for the current cycle count
    *
    * The regular implementation assumes that clocksource reads are globally
    * monotonic. The TSC can be slightly off across sockets which can cause
    @@ -308,8 +308,8 @@ static inline bool arch_vdso_cycles_ok(u64 cycles)
    * jump.
    *
    * Therefore it needs to be verified that @cycles are greater than
    - * @last. If not then use @last, which is the base time of the current
    - * conversion period.
    + * @vd->cycles_last. If not then use @vd->cycles_last, which is the base
    + * time of the current conversion period.
    *
    * This variant also uses a custom mask because while the clocksource mask of
    * all the VDSO capable clocksources on x86 is U64_MAX, the above code uses
    @@ -317,25 +317,24 @@ static inline bool arch_vdso_cycles_ok(u64 cycles)
    * declares everything with the MSB/Sign-bit set as invalid. Therefore the
    * effective mask is S64_MAX.
    */
    -static __always_inline
    -u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
    +static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
    {
    /*
    * Due to the MSB/Sign-bit being used as invalid marker (see
    * arch_vdso_cycles_valid() above), the effective mask is S64_MAX.
    */
    - u64 delta = (cycles - last) & S64_MAX;
    + u64 delta = (cycles - vd->cycle_last) & S64_MAX;

    /*
    * Due to the above mentioned TSC wobbles, filter out negative motion.
    * Per the above masking, the effective sign bit is now bit 62.
    */
    if (unlikely(delta & (1ULL << 62)))
    - return 0;
    + return base >> vd->shift;

    - return delta * mult;
    + return ((delta * vd->mult) + base) >> vd->shift;
    }
    -#define vdso_calc_delta vdso_calc_delta
    +#define vdso_calc_ns vdso_calc_ns

    #endif /* !__ASSEMBLY__ */

    diff --git a/lib/vdso/gettimeofday.c b/lib/vdso/gettimeofday.c
    index faccf12..9fa90e0 100644
    --- a/lib/vdso/gettimeofday.c
    +++ b/lib/vdso/gettimeofday.c
    @@ -5,31 +5,32 @@
    #include <vdso/datapage.h>
    #include <vdso/helpers.h>

    -#ifndef vdso_calc_delta
    +#ifndef vdso_calc_ns

    #ifdef VDSO_DELTA_NOMASK
    -# define VDSO_DELTA_MASK(mask) U64_MAX
    +# define VDSO_DELTA_MASK(vd) U64_MAX
    #else
    -# define VDSO_DELTA_MASK(mask) (mask)
    +# define VDSO_DELTA_MASK(vd) (vd->mask)
    +#endif
    +
    +#ifndef vdso_shift_ns
    +static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
    +{
    + return ns >> shift;
    +}
    #endif

    /*
    * Default implementation which works for all sane clocksources. That
    * obviously excludes x86/TSC.
    */
    -static __always_inline
    -u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
    +static __always_inline u64 vdso_calc_ns(const struct vdso_data *vd, u64 cycles, u64 base)
    {
    - return ((cycles - last) & VDSO_DELTA_MASK(mask)) * mult;
    -}
    -#endif
    + u64 delta = (cycles - vd->cycle_last) & VDSO_DELTA_MASK(vd);

    -#ifndef vdso_shift_ns
    -static __always_inline u64 vdso_shift_ns(u64 ns, u32 shift)
    -{
    - return ns >> shift;
    + return vdso_shift_ns((delta * vd->mult) + base, vd->shift);
    }
    -#endif
    +#endif /* vdso_calc_ns */

    #ifndef __arch_vdso_hres_capable
    static inline bool __arch_vdso_hres_capable(void)
    @@ -56,10 +57,10 @@ static inline bool vdso_cycles_ok(u64 cycles)
    static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
    struct __kernel_timespec *ts)
    {
    - const struct vdso_data *vd;
    const struct timens_offset *offs = &vdns->offset[clk];
    const struct vdso_timestamp *vdso_ts;
    - u64 cycles, last, ns;
    + const struct vdso_data *vd;
    + u64 cycles, ns;
    u32 seq;
    s64 sec;

    @@ -80,10 +81,7 @@ static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_
    cycles = __arch_get_hw_counter(vd->clock_mode, vd);
    if (unlikely(!vdso_cycles_ok(cycles)))
    return -1;
    - ns = vdso_ts->nsec;
    - last = vd->cycle_last;
    - ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
    - ns = vdso_shift_ns(ns, vd->shift);
    + ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
    sec = vdso_ts->sec;
    } while (unlikely(vdso_read_retry(vd, seq)));

    @@ -118,7 +116,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
    struct __kernel_timespec *ts)
    {
    const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
    - u64 cycles, last, sec, ns;
    + u64 cycles, sec, ns;
    u32 seq;

    /* Allows to compile the high resolution parts out */
    @@ -151,10 +149,7 @@ static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
    cycles = __arch_get_hw_counter(vd->clock_mode, vd);
    if (unlikely(!vdso_cycles_ok(cycles)))
    return -1;
    - ns = vdso_ts->nsec;
    - last = vd->cycle_last;
    - ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
    - ns = vdso_shift_ns(ns, vd->shift);
    + ns = vdso_calc_ns(vd, cycles, vdso_ts->nsec);
    sec = vdso_ts->sec;
    } while (unlikely(vdso_read_retry(vd, seq)));

    \
     
     \ /
      Last update: 2024-05-27 16:28    [W:6.204 / U:0.036 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site