lkml.org 
[lkml]   [2015]   [Oct]   [6]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH tip/core/rcu 09/18] rcu: Switch synchronize_sched_expedited() to IPI
    Date
    This commit switches synchronize_sched_expedited() from stop_one_cpu_nowait()
    to smp_call_function_single(), thus moving from an IPI and a pair of
    context switches to an IPI and a single pass through the scheduler.
    Of course, if the scheduler actually does decide to switch to a different
    task, there will still be a pair of context switches, but there would
    likely have been a pair of context switches anyway, just a bit later.

    Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    ---
    kernel/rcu/tree.c | 32 ++++++++++++++++++++------------
    kernel/rcu/tree.h | 3 ---
    2 files changed, 20 insertions(+), 15 deletions(-)

    diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
    index 3e2875b38eae..869e58b92c53 100644
    --- a/kernel/rcu/tree.c
    +++ b/kernel/rcu/tree.c
    @@ -161,6 +161,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf);
    static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu);
    static void invoke_rcu_core(void);
    static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
    +static void __maybe_unused rcu_report_exp_rdp(struct rcu_state *rsp,
    + struct rcu_data *rdp, bool wake);

    /* rcuc/rcub kthread realtime priority */
    #ifdef CONFIG_RCU_KTHREAD_PRIO
    @@ -250,6 +252,12 @@ void rcu_sched_qs(void)
    __this_cpu_read(rcu_sched_data.gpnum),
    TPS("cpuqs"));
    __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
    + if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) {
    + __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
    + rcu_report_exp_rdp(&rcu_sched_state,
    + this_cpu_ptr(&rcu_sched_data),
    + true);
    + }
    }
    }

    @@ -3555,8 +3563,8 @@ static void rcu_report_exp_cpu_mult(struct rcu_state *rsp, struct rcu_node *rnp,
    * Report expedited quiescent state for specified rcu_data (CPU).
    * Caller must hold the root rcu_node's exp_funnel_mutex.
    */
    -static void __maybe_unused rcu_report_exp_rdp(struct rcu_state *rsp,
    - struct rcu_data *rdp, bool wake)
    +static void rcu_report_exp_rdp(struct rcu_state *rsp, struct rcu_data *rdp,
    + bool wake)
    {
    rcu_report_exp_cpu_mult(rsp, rdp->mynode, rdp->grpmask, wake);
    }
    @@ -3637,14 +3645,10 @@ static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s)
    }

    /* Invoked on each online non-idle CPU for expedited quiescent state. */
    -static int synchronize_sched_expedited_cpu_stop(void *data)
    +static void synchronize_sched_expedited_cpu_stop(void *data)
    {
    - struct rcu_data *rdp = data;
    - struct rcu_state *rsp = rdp->rsp;
    -
    - /* Report the quiescent state. */
    - rcu_report_exp_rdp(rsp, rdp, true);
    - return 0;
    + __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, true);
    + resched_cpu(smp_processor_id());
    }

    /*
    @@ -3659,6 +3663,7 @@ static void sync_sched_exp_select_cpus(struct rcu_state *rsp)
    unsigned long mask_ofl_test;
    unsigned long mask_ofl_ipi;
    struct rcu_data *rdp;
    + int ret;
    struct rcu_node *rnp;

    sync_exp_reset_tree(rsp);
    @@ -3694,9 +3699,9 @@ static void sync_sched_exp_select_cpus(struct rcu_state *rsp)
    if (!(mask_ofl_ipi & mask))
    continue;
    rdp = per_cpu_ptr(rsp->rda, cpu);
    - stop_one_cpu_nowait(cpu, synchronize_sched_expedited_cpu_stop,
    - rdp, &rdp->exp_stop_work);
    - mask_ofl_ipi &= ~mask;
    + ret = smp_call_function_single(cpu, synchronize_sched_expedited_cpu_stop, NULL, 0);
    + if (!ret)
    + mask_ofl_ipi &= ~mask;
    }
    /* Report quiescent states for those that went offline. */
    mask_ofl_test |= mask_ofl_ipi;
    @@ -4201,6 +4206,9 @@ int rcu_cpu_notify(struct notifier_block *self,
    rcu_cleanup_dying_cpu(rsp);
    break;
    case CPU_DYING_IDLE:
    + /* QS for any half-done expedited RCU-sched GP. */
    + rcu_sched_qs();
    +
    for_each_rcu_flavor(rsp) {
    rcu_cleanup_dying_idle_cpu(cpu, rsp);
    }
    diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
    index 3eee48bcf52b..1b969cef8fe4 100644
    --- a/kernel/rcu/tree.h
    +++ b/kernel/rcu/tree.h
    @@ -324,9 +324,6 @@ struct rcu_data {
    /* ticks this CPU has handled */
    /* during and after the last grace */
    /* period it is aware of. */
    - struct cpu_stop_work exp_stop_work;
    - /* Expedited grace-period control */
    - /* for CPU stopping. */

    /* 2) batch handling */
    /*
    --
    2.5.2


    \
     
     \ /
      Last update: 2015-10-06 19:01    [W:4.263 / U:0.784 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site