lkml.org 
[lkml]   [2021]   [Apr]   [22]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 06/10] sched/idle: Move busy_cpu accounting to idle callback
    Date
    Currently we account nr_busy_cpus in no_hz idle functions.
    There is no reason why nr_busy_cpus should updated be in NO_HZ_COMMON
    configs only. Also scheduler can mark a CPU as non-busy as soon as an
    idle class task starts to run. Scheduler can then mark a CPU as busy
    as soon as its woken up from idle or a new task is placed on it's
    runqueue.

    Cc: LKML <linux-kernel@vger.kernel.org>
    Cc: Gautham R Shenoy <ego@linux.vnet.ibm.com>
    Cc: Parth Shah <parth@linux.ibm.com>
    Cc: Ingo Molnar <mingo@kernel.org>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Valentin Schneider <valentin.schneider@arm.com>
    Cc: Dietmar Eggemann <dietmar.eggemann@arm.com>
    Cc: Mel Gorman <mgorman@techsingularity.net>
    Cc: Vincent Guittot <vincent.guittot@linaro.org>
    Cc: Rik van Riel <riel@surriel.com>
    Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
    ---
    kernel/sched/fair.c | 6 ++++--
    kernel/sched/idle.c | 29 +++++++++++++++++++++++++++--
    kernel/sched/sched.h | 1 +
    kernel/sched/topology.c | 2 ++
    4 files changed, 34 insertions(+), 4 deletions(-)

    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index f8c98e544418..00bcf1d861b5 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -10384,7 +10384,10 @@ static void set_cpu_sd_state_busy(int cpu)
    goto unlock;
    sd->nohz_idle = 0;

    - atomic_inc(&sd->shared->nr_busy_cpus);
    + if (sd && per_cpu(is_idle, cpu)) {
    + atomic_add_unless(&sd->shared->nr_busy_cpus, 1, per_cpu(sd_llc_size, cpu));
    + per_cpu(is_idle, cpu) = 0;
    + }
    unlock:
    rcu_read_unlock();
    }
    @@ -10414,7 +10417,6 @@ static void set_cpu_sd_state_idle(int cpu)
    goto unlock;
    sd->nohz_idle = 1;

    - atomic_dec(&sd->shared->nr_busy_cpus);
    unlock:
    rcu_read_unlock();
    }
    diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
    index cc828f3efe71..e624a05e48bd 100644
    --- a/kernel/sched/idle.c
    +++ b/kernel/sched/idle.c
    @@ -425,12 +425,25 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl

    static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
    {
    -#ifdef CONFIG_SCHED_SMT
    +#ifdef CONFIG_SMP
    + struct sched_domain_shared *sds;
    int cpu = rq->cpu;

    +#ifdef CONFIG_SCHED_SMT
    if (static_branch_likely(&sched_smt_present))
    set_core_busy(cpu);
    #endif
    +
    + rcu_read_lock();
    + sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
    + if (sds) {
    + if (per_cpu(is_idle, cpu)) {
    + atomic_inc(&sds->nr_busy_cpus);
    + per_cpu(is_idle, cpu) = 0;
    + }
    + }
    + rcu_read_unlock();
    +#endif
    }

    static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool first)
    @@ -442,9 +455,21 @@ static void set_next_task_idle(struct rq *rq, struct task_struct *next, bool fir
    struct task_struct *pick_next_task_idle(struct rq *rq)
    {
    struct task_struct *next = rq->idle;
    +#ifdef CONFIG_SMP
    + struct sched_domain_shared *sds;
    + int cpu = rq->cpu;

    - set_next_task_idle(rq, next, true);
    + rcu_read_lock();
    + sds = rcu_dereference(per_cpu(sd_llc_shared, cpu));
    + if (sds) {
    + atomic_add_unless(&sds->nr_busy_cpus, -1, 0);
    + per_cpu(is_idle, cpu) = 1;
    + }

    + rcu_read_unlock();
    +#endif
    +
    + set_next_task_idle(rq, next, true);
    return next;
    }

    diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
    index 5c0bd4b0e73a..baf8d9a4cb26 100644
    --- a/kernel/sched/sched.h
    +++ b/kernel/sched/sched.h
    @@ -1483,6 +1483,7 @@ DECLARE_PER_CPU(int, sd_llc_id);
    #ifdef CONFIG_SCHED_SMT
    DECLARE_PER_CPU(int, smt_id);
    #endif
    +DECLARE_PER_CPU(int, is_idle);
    DECLARE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
    DECLARE_PER_CPU(struct sched_domain __rcu *, sd_numa);
    DECLARE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
    diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
    index 8db40c8a6ad0..00e4669bb241 100644
    --- a/kernel/sched/topology.c
    +++ b/kernel/sched/topology.c
    @@ -647,6 +647,7 @@ DEFINE_PER_CPU(int, sd_llc_id);
    #ifdef CONFIG_SCHED_SMT
    DEFINE_PER_CPU(int, smt_id);
    #endif
    +DEFINE_PER_CPU(int, is_idle);
    DEFINE_PER_CPU(struct sched_domain_shared __rcu *, sd_llc_shared);
    DEFINE_PER_CPU(struct sched_domain __rcu *, sd_numa);
    DEFINE_PER_CPU(struct sched_domain __rcu *, sd_asym_packing);
    @@ -673,6 +674,7 @@ static void update_top_cache_domain(int cpu)
    #ifdef CONFIG_SCHED_SMT
    per_cpu(smt_id, cpu) = cpumask_first(cpu_smt_mask(cpu));
    #endif
    + per_cpu(is_idle, cpu) = 1;
    rcu_assign_pointer(per_cpu(sd_llc_shared, cpu), sds);

    sd = lowest_flag_domain(cpu, SD_NUMA);
    --
    2.18.2
    \
     
     \ /
      Last update: 2021-04-22 12:27    [W:4.224 / U:0.208 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site