lkml.org 
[lkml]   [2017]   [Jul]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH tip/core/rcu 04/15] rcu: Create reasonable API for do_exit() TASKS_RCU processing
    Date
    Currently, the exit-time support for TASKS_RCU is open-coded in do_exit().
    This commit creates exit_tasks_rcu_start() and exit_tasks_rcu_finish()
    APIs for do_exit() use. This has the benefit of confining the use of the
    tasks_rcu_exit_srcu variable to one file, allowing it to become static.

    Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    ---
    include/linux/rcupdate.h | 7 ++++---
    include/linux/sched.h | 5 +++--
    kernel/exit.c | 7 ++-----
    kernel/rcu/update.c | 18 +++++++++++++++++-
    4 files changed, 26 insertions(+), 11 deletions(-)

    diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
    index c3f380befdd7..ce9d21923d75 100644
    --- a/include/linux/rcupdate.h
    +++ b/include/linux/rcupdate.h
    @@ -162,8 +162,6 @@ static inline void rcu_init_nohz(void) { }
    * macro rather than an inline function to avoid #include hell.
    */
    #ifdef CONFIG_TASKS_RCU
    -#define TASKS_RCU(x) x
    -extern struct srcu_struct tasks_rcu_exit_srcu;
    #define rcu_note_voluntary_context_switch_lite(t) \
    do { \
    if (READ_ONCE((t)->rcu_tasks_holdout)) \
    @@ -176,12 +174,15 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
    } while (0)
    void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
    void synchronize_rcu_tasks(void);
    +void exit_tasks_rcu_start(void);
    +void exit_tasks_rcu_finish(void);
    #else /* #ifdef CONFIG_TASKS_RCU */
    -#define TASKS_RCU(x) do { } while (0)
    #define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
    #define rcu_note_voluntary_context_switch(t) rcu_all_qs()
    #define call_rcu_tasks call_rcu_sched
    #define synchronize_rcu_tasks synchronize_sched
    +static inline void exit_tasks_rcu_start(void) { }
    +static inline void exit_tasks_rcu_finish(void) { }
    #endif /* #else #ifdef CONFIG_TASKS_RCU */

    /**
    diff --git a/include/linux/sched.h b/include/linux/sched.h
    index 12f326aa5871..f6d6ad47511d 100644
    --- a/include/linux/sched.h
    +++ b/include/linux/sched.h
    @@ -582,9 +582,10 @@ struct task_struct {

    #ifdef CONFIG_TASKS_RCU
    unsigned long rcu_tasks_nvcsw;
    - bool rcu_tasks_holdout;
    - struct list_head rcu_tasks_holdout_list;
    + u8 rcu_tasks_holdout;
    + u8 rcu_tasks_idx;
    int rcu_tasks_idle_cpu;
    + struct list_head rcu_tasks_holdout_list;
    #endif /* #ifdef CONFIG_TASKS_RCU */

    struct sched_info sched_info;
    diff --git a/kernel/exit.c b/kernel/exit.c
    index c5548faa9f37..d297c525f188 100644
    --- a/kernel/exit.c
    +++ b/kernel/exit.c
    @@ -764,7 +764,6 @@ void __noreturn do_exit(long code)
    {
    struct task_struct *tsk = current;
    int group_dead;
    - TASKS_RCU(int tasks_rcu_i);

    profile_task_exit(tsk);
    kcov_task_exit(tsk);
    @@ -881,9 +880,7 @@ void __noreturn do_exit(long code)
    */
    flush_ptrace_hw_breakpoint(tsk);

    - TASKS_RCU(preempt_disable());
    - TASKS_RCU(tasks_rcu_i = __srcu_read_lock(&tasks_rcu_exit_srcu));
    - TASKS_RCU(preempt_enable());
    + exit_tasks_rcu_start();
    exit_notify(tsk, group_dead);
    proc_exit_connector(tsk);
    mpol_put_task_policy(tsk);
    @@ -918,7 +915,7 @@ void __noreturn do_exit(long code)
    if (tsk->nr_dirtied)
    __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied);
    exit_rcu();
    - TASKS_RCU(__srcu_read_unlock(&tasks_rcu_exit_srcu, tasks_rcu_i));
    + exit_tasks_rcu_finish();

    do_task_dead();
    }
    diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
    index 00e77c470017..5033b66d2753 100644
    --- a/kernel/rcu/update.c
    +++ b/kernel/rcu/update.c
    @@ -568,7 +568,7 @@ static DECLARE_WAIT_QUEUE_HEAD(rcu_tasks_cbs_wq);
    static DEFINE_RAW_SPINLOCK(rcu_tasks_cbs_lock);

    /* Track exiting tasks in order to allow them to be waited for. */
    -DEFINE_SRCU(tasks_rcu_exit_srcu);
    +DEFINE_STATIC_SRCU(tasks_rcu_exit_srcu);

    /* Control stall timeouts. Disable with <= 0, otherwise jiffies till stall. */
    #define RCU_TASK_STALL_TIMEOUT (HZ * 60 * 10)
    @@ -875,6 +875,22 @@ static void rcu_spawn_tasks_kthread(void)
    mutex_unlock(&rcu_tasks_kthread_mutex);
    }

    +/* Do the srcu_read_lock() for the above synchronize_srcu(). */
    +void exit_tasks_rcu_start(void)
    +{
    + preempt_disable();
    + current->rcu_tasks_idx = __srcu_read_lock(&tasks_rcu_exit_srcu);
    + preempt_enable();
    +}
    +
    +/* Do the srcu_read_unlock() for the above synchronize_srcu(). */
    +void exit_tasks_rcu_finish(void)
    +{
    + preempt_disable();
    + __srcu_read_unlock(&tasks_rcu_exit_srcu, current->rcu_tasks_idx);
    + preempt_enable();
    +}
    +
    #endif /* #ifdef CONFIG_TASKS_RCU */

    #ifndef CONFIG_TINY_RCU
    --
    2.5.2
    \
     
     \ /
      Last update: 2017-07-24 23:48    [W:2.462 / U:0.164 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site