lkml.org 
[lkml]   [2017]   [Jul]   [19]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.11 78/88] sched/fair, cpumask: Export for_each_cpu_wrap()
    Date
    4.11-stable review patch.  If anyone has any objections, please let me know.

    ------------------

    From: Peter Zijlstra <peterz@infradead.org>

    commit c6508a39640b9a27fc2bc10cb708152672c82045 upstream.

    commit c743f0a5c50f2fcbc628526279cfa24f3dabe182 upstream.

    More users for for_each_cpu_wrap() have appeared. Promote the construct
    to generic cpumask interface.

    The implementation is slightly modified to reduce arguments.

    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    Cc: Lauro Ramos Venancio <lvenanci@redhat.com>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Mike Galbraith <efault@gmx.de>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Rik van Riel <riel@redhat.com>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Cc: lwang@redhat.com
    Link: http://lkml.kernel.org/r/20170414122005.o35me2h5nowqkxbv@hirez.programming.kicks-ass.net
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    Signed-off-by: Mel Gorman <mgorman@techsingularity.net>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>

    ---
    include/linux/cpumask.h | 17 +++++++++++++++++
    kernel/sched/fair.c | 45 ++++-----------------------------------------
    lib/cpumask.c | 32 ++++++++++++++++++++++++++++++++
    3 files changed, 53 insertions(+), 41 deletions(-)

    --- a/include/linux/cpumask.h
    +++ b/include/linux/cpumask.h
    @@ -236,6 +236,23 @@ unsigned int cpumask_local_spread(unsign
    (cpu) = cpumask_next_zero((cpu), (mask)), \
    (cpu) < nr_cpu_ids;)

    +extern int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap);
    +
    +/**
    + * for_each_cpu_wrap - iterate over every cpu in a mask, starting at a specified location
    + * @cpu: the (optionally unsigned) integer iterator
    + * @mask: the cpumask poiter
    + * @start: the start location
    + *
    + * The implementation does not assume any bit in @mask is set (including @start).
    + *
    + * After the loop, cpu is >= nr_cpu_ids.
    + */
    +#define for_each_cpu_wrap(cpu, mask, start) \
    + for ((cpu) = cpumask_next_wrap((start)-1, (mask), (start), false); \
    + (cpu) < nr_cpumask_bits; \
    + (cpu) = cpumask_next_wrap((cpu), (mask), (start), true))
    +
    /**
    * for_each_cpu_and - iterate over every cpu in both masks
    * @cpu: the (optionally unsigned) integer iterator
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -5615,43 +5615,6 @@ find_idlest_cpu(struct sched_group *grou
    return shallowest_idle_cpu != -1 ? shallowest_idle_cpu : least_loaded_cpu;
    }

    -/*
    - * Implement a for_each_cpu() variant that starts the scan at a given cpu
    - * (@start), and wraps around.
    - *
    - * This is used to scan for idle CPUs; such that not all CPUs looking for an
    - * idle CPU find the same CPU. The down-side is that tasks tend to cycle
    - * through the LLC domain.
    - *
    - * Especially tbench is found sensitive to this.
    - */
    -
    -static int cpumask_next_wrap(int n, const struct cpumask *mask, int start, int *wrapped)
    -{
    - int next;
    -
    -again:
    - next = find_next_bit(cpumask_bits(mask), nr_cpumask_bits, n+1);
    -
    - if (*wrapped) {
    - if (next >= start)
    - return nr_cpumask_bits;
    - } else {
    - if (next >= nr_cpumask_bits) {
    - *wrapped = 1;
    - n = -1;
    - goto again;
    - }
    - }
    -
    - return next;
    -}
    -
    -#define for_each_cpu_wrap(cpu, mask, start, wrap) \
    - for ((wrap) = 0, (cpu) = (start)-1; \
    - (cpu) = cpumask_next_wrap((cpu), (mask), (start), &(wrap)), \
    - (cpu) < nr_cpumask_bits; )
    -
    #ifdef CONFIG_SCHED_SMT

    static inline void set_idle_cores(int cpu, int val)
    @@ -5711,7 +5674,7 @@ unlock:
    static int select_idle_core(struct task_struct *p, struct sched_domain *sd, int target)
    {
    struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
    - int core, cpu, wrap;
    + int core, cpu;

    if (!static_branch_likely(&sched_smt_present))
    return -1;
    @@ -5721,7 +5684,7 @@ static int select_idle_core(struct task_

    cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);

    - for_each_cpu_wrap(core, cpus, target, wrap) {
    + for_each_cpu_wrap(core, cpus, target) {
    bool idle = true;

    for_each_cpu(cpu, cpu_smt_mask(core)) {
    @@ -5787,7 +5750,7 @@ static int select_idle_cpu(struct task_s
    u64 avg_cost, avg_idle = this_rq()->avg_idle;
    u64 time, cost;
    s64 delta;
    - int cpu, wrap;
    + int cpu;

    this_sd = rcu_dereference(*this_cpu_ptr(&sd_llc));
    if (!this_sd)
    @@ -5804,7 +5767,7 @@ static int select_idle_cpu(struct task_s

    time = local_clock();

    - for_each_cpu_wrap(cpu, sched_domain_span(sd), target, wrap) {
    + for_each_cpu_wrap(cpu, sched_domain_span(sd), target) {
    if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
    continue;
    if (idle_cpu(cpu))
    --- a/lib/cpumask.c
    +++ b/lib/cpumask.c
    @@ -43,6 +43,38 @@ int cpumask_any_but(const struct cpumask
    }
    EXPORT_SYMBOL(cpumask_any_but);

    +/**
    + * cpumask_next_wrap - helper to implement for_each_cpu_wrap
    + * @n: the cpu prior to the place to search
    + * @mask: the cpumask pointer
    + * @start: the start point of the iteration
    + * @wrap: assume @n crossing @start terminates the iteration
    + *
    + * Returns >= nr_cpu_ids on completion
    + *
    + * Note: the @wrap argument is required for the start condition when
    + * we cannot assume @start is set in @mask.
    + */
    +int cpumask_next_wrap(int n, const struct cpumask *mask, int start, bool wrap)
    +{
    + int next;
    +
    +again:
    + next = cpumask_next(n, mask);
    +
    + if (wrap && n < start && next >= start) {
    + return nr_cpumask_bits;
    +
    + } else if (next >= nr_cpumask_bits) {
    + wrap = true;
    + n = -1;
    + goto again;
    + }
    +
    + return next;
    +}
    +EXPORT_SYMBOL(cpumask_next_wrap);
    +
    /* These are not inline because of header tangles. */
    #ifdef CONFIG_CPUMASK_OFFSTACK
    /**

    \
     
     \ /
      Last update: 2017-07-19 12:13    [W:4.136 / U:0.212 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site