lkml.org 
[lkml]   [2019]   [Oct]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[tip: sched/core] sched/cputime: Spare a seqcount lock/unlock cycle on context switch
    The following commit has been merged into the sched/core branch of tip:

    Commit-ID: 8d495477d62e4397207f22a432fcaa86d9f2bc2d
    Gitweb: https://git.kernel.org/tip/8d495477d62e4397207f22a432fcaa86d9f2bc2d
    Author: Frederic Weisbecker <frederic@kernel.org>
    AuthorDate: Thu, 03 Oct 2019 18:17:45 +02:00
    Committer: Ingo Molnar <mingo@kernel.org>
    CommitterDate: Wed, 09 Oct 2019 12:39:26 +02:00

    sched/cputime: Spare a seqcount lock/unlock cycle on context switch

    On context switch we are locking the vtime seqcount of the scheduling-out
    task twice:

    * On vtime_task_switch_common(), when we flush the pending vtime through
    vtime_account_system()

    * On arch_vtime_task_switch() to reset the vtime state.

    This is pointless as these actions can be performed without the need
    to unlock/lock in the middle. The reason these steps are separated is to
    consolidate a very small amount of common code between
    CONFIG_VIRT_CPU_ACCOUNTING_GEN and CONFIG_VIRT_CPU_ACCOUNTING_NATIVE.

    Performance in this fast path is definitely a priority over artificial
    code factorization so split the task switch code between GEN and
    NATIVE and mutualize the parts than can run under a single seqcount
    locked block.

    As a side effect, vtime_account_idle() becomes included in the seqcount
    protection. This happens to be a welcome preparation in order to
    properly support kcpustat under vtime in the future and fetch
    CPUTIME_IDLE without race.

    Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Rik van Riel <riel@redhat.com>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Cc: Wanpeng Li <wanpengli@tencent.com>
    Cc: Yauheni Kaliuta <yauheni.kaliuta@redhat.com>
    Link: https://lkml.kernel.org/r/20191003161745.28464-3-frederic@kernel.org
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    ---
    include/linux/vtime.h | 32 ++++++++++++++++----------------
    kernel/sched/cputime.c | 30 +++++++++++++++++++-----------
    2 files changed, 35 insertions(+), 27 deletions(-)

    diff --git a/include/linux/vtime.h b/include/linux/vtime.h
    index 2fd247f..d9160ab 100644
    --- a/include/linux/vtime.h
    +++ b/include/linux/vtime.h
    @@ -14,8 +14,12 @@ struct task_struct;
    * vtime_accounting_cpu_enabled() definitions/declarations
    */
    #if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
    +
    static inline bool vtime_accounting_cpu_enabled(void) { return true; }
    +extern void vtime_task_switch(struct task_struct *prev);
    +
    #elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
    +
    /*
    * Checks if vtime is enabled on some CPU. Cputime readers want to be careful
    * in that case and compute the tickless cputime.
    @@ -36,33 +40,29 @@ static inline bool vtime_accounting_cpu_enabled(void)

    return false;
    }
    +
    +extern void vtime_task_switch_generic(struct task_struct *prev);
    +
    +static inline void vtime_task_switch(struct task_struct *prev)
    +{
    + if (vtime_accounting_cpu_enabled())
    + vtime_task_switch_generic(prev);
    +}
    +
    #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
    +
    static inline bool vtime_accounting_cpu_enabled(void) { return false; }
    -#endif
    +static inline void vtime_task_switch(struct task_struct *prev) { }

    +#endif

    /*
    * Common vtime APIs
    */
    #ifdef CONFIG_VIRT_CPU_ACCOUNTING
    -
    -#ifdef __ARCH_HAS_VTIME_TASK_SWITCH
    -extern void vtime_task_switch(struct task_struct *prev);
    -#else
    -extern void vtime_common_task_switch(struct task_struct *prev);
    -static inline void vtime_task_switch(struct task_struct *prev)
    -{
    - if (vtime_accounting_cpu_enabled())
    - vtime_common_task_switch(prev);
    -}
    -#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */
    -
    extern void vtime_account_kernel(struct task_struct *tsk);
    extern void vtime_account_idle(struct task_struct *tsk);
    -
    #else /* !CONFIG_VIRT_CPU_ACCOUNTING */
    -
    -static inline void vtime_task_switch(struct task_struct *prev) { }
    static inline void vtime_account_kernel(struct task_struct *tsk) { }
    #endif /* !CONFIG_VIRT_CPU_ACCOUNTING */

    diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
    index b45932e..cef23c2 100644
    --- a/kernel/sched/cputime.c
    +++ b/kernel/sched/cputime.c
    @@ -405,9 +405,10 @@ static inline void irqtime_account_process_tick(struct task_struct *p, int user_
    /*
    * Use precise platform statistics if available:
    */
    -#ifdef CONFIG_VIRT_CPU_ACCOUNTING
    +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
    +
    # ifndef __ARCH_HAS_VTIME_TASK_SWITCH
    -void vtime_common_task_switch(struct task_struct *prev)
    +void vtime_task_switch(struct task_struct *prev)
    {
    if (is_idle_task(prev))
    vtime_account_idle(prev);
    @@ -418,10 +419,7 @@ void vtime_common_task_switch(struct task_struct *prev)
    arch_vtime_task_switch(prev);
    }
    # endif
    -#endif /* CONFIG_VIRT_CPU_ACCOUNTING */
    -

    -#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
    /*
    * Archs that account the whole time spent in the idle task
    * (outside irq) as idle time can rely on this and just implement
    @@ -731,6 +729,16 @@ static void vtime_account_guest(struct task_struct *tsk,
    }
    }

    +static void __vtime_account_kernel(struct task_struct *tsk,
    + struct vtime *vtime)
    +{
    + /* We might have scheduled out from guest path */
    + if (tsk->flags & PF_VCPU)
    + vtime_account_guest(tsk, vtime);
    + else
    + vtime_account_system(tsk, vtime);
    +}
    +
    void vtime_account_kernel(struct task_struct *tsk)
    {
    struct vtime *vtime = &tsk->vtime;
    @@ -739,11 +747,7 @@ void vtime_account_kernel(struct task_struct *tsk)
    return;

    write_seqcount_begin(&vtime->seqcount);
    - /* We might have scheduled out from guest path */
    - if (tsk->flags & PF_VCPU)
    - vtime_account_guest(tsk, vtime);
    - else
    - vtime_account_system(tsk, vtime);
    + __vtime_account_kernel(tsk, vtime);
    write_seqcount_end(&vtime->seqcount);
    }

    @@ -804,11 +808,15 @@ void vtime_account_idle(struct task_struct *tsk)
    account_idle_time(get_vtime_delta(&tsk->vtime));
    }

    -void arch_vtime_task_switch(struct task_struct *prev)
    +void vtime_task_switch_generic(struct task_struct *prev)
    {
    struct vtime *vtime = &prev->vtime;

    write_seqcount_begin(&vtime->seqcount);
    + if (is_idle_task(prev))
    + vtime_account_idle(prev);
    + else
    + __vtime_account_kernel(prev, vtime);
    vtime->state = VTIME_INACTIVE;
    write_seqcount_end(&vtime->seqcount);

    \
     
     \ /
      Last update: 2019-10-09 15:00    [W:5.544 / U:0.000 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site