lkml.org 
[lkml]   [2019]   [Mar]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH tip/core/rcu 02/11] rcu: Move RCU CPU stall-warning code out of tree_plugin.h
    Date
    The RCU CPU stall-warning code for normal grace periods is currently
    scattered across two files, due to earlier Tiny RCU support for RCU
    CPU stall warnings and for old Kconfig options that have long since
    been retired. Given that it is hard for the lead RCU maintainer to
    find relevant stall-warning code, it would be good to consolidate it.
    This commit continues this process by moving stall-warning code from
    kernel/rcu/tree_plugin.c to a new kernel/rcu/tree_stall.h file.

    Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
    ---
    kernel/rcu/tree_plugin.h | 90 -------------------------------------
    kernel/rcu/tree_stall.h | 95 ++++++++++++++++++++++++++++++++++++++++
    2 files changed, 95 insertions(+), 90 deletions(-)

    diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
    index 97dba50f6fb2..7fa3bc4d481b 100644
    --- a/kernel/rcu/tree_plugin.h
    +++ b/kernel/rcu/tree_plugin.h
    @@ -642,79 +642,6 @@ static void rcu_read_unlock_special(struct task_struct *t)
    rcu_preempt_deferred_qs_irqrestore(t, flags);
    }

    -/*
    - * Dump detailed information for all tasks blocking the current RCU
    - * grace period on the specified rcu_node structure.
    - */
    -static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
    -{
    - unsigned long flags;
    - struct task_struct *t;
    -
    - raw_spin_lock_irqsave_rcu_node(rnp, flags);
    - if (!rcu_preempt_blocked_readers_cgp(rnp)) {
    - raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    - return;
    - }
    - t = list_entry(rnp->gp_tasks->prev,
    - struct task_struct, rcu_node_entry);
    - list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
    - /*
    - * We could be printing a lot while holding a spinlock.
    - * Avoid triggering hard lockup.
    - */
    - touch_nmi_watchdog();
    - sched_show_task(t);
    - }
    - raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    -}
    -
    -/*
    - * Dump detailed information for all tasks blocking the current RCU
    - * grace period.
    - */
    -static void rcu_print_detail_task_stall(void)
    -{
    - struct rcu_node *rnp = rcu_get_root();
    -
    - rcu_print_detail_task_stall_rnp(rnp);
    - rcu_for_each_leaf_node(rnp)
    - rcu_print_detail_task_stall_rnp(rnp);
    -}
    -
    -static void rcu_print_task_stall_begin(struct rcu_node *rnp)
    -{
    - pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
    - rnp->level, rnp->grplo, rnp->grphi);
    -}
    -
    -static void rcu_print_task_stall_end(void)
    -{
    - pr_cont("\n");
    -}
    -
    -/*
    - * Scan the current list of tasks blocked within RCU read-side critical
    - * sections, printing out the tid of each.
    - */
    -static int rcu_print_task_stall(struct rcu_node *rnp)
    -{
    - struct task_struct *t;
    - int ndetected = 0;
    -
    - if (!rcu_preempt_blocked_readers_cgp(rnp))
    - return 0;
    - rcu_print_task_stall_begin(rnp);
    - t = list_entry(rnp->gp_tasks->prev,
    - struct task_struct, rcu_node_entry);
    - list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
    - pr_cont(" P%d", t->pid);
    - ndetected++;
    - }
    - rcu_print_task_stall_end();
    - return ndetected;
    -}
    -
    /*
    * Scan the current list of tasks blocked within RCU read-side critical
    * sections, printing out the tid of each that is blocking the current
    @@ -979,23 +906,6 @@ static bool rcu_preempt_need_deferred_qs(struct task_struct *t)
    }
    static void rcu_preempt_deferred_qs(struct task_struct *t) { }

    -/*
    - * Because preemptible RCU does not exist, we never have to check for
    - * tasks blocked within RCU read-side critical sections.
    - */
    -static void rcu_print_detail_task_stall(void)
    -{
    -}
    -
    -/*
    - * Because preemptible RCU does not exist, we never have to check for
    - * tasks blocked within RCU read-side critical sections.
    - */
    -static int rcu_print_task_stall(struct rcu_node *rnp)
    -{
    - return 0;
    -}
    -
    /*
    * Because preemptible RCU does not exist, we never have to check for
    * tasks blocked within RCU read-side critical sections that are
    diff --git a/kernel/rcu/tree_stall.h b/kernel/rcu/tree_stall.h
    index 682189f4d083..6f5f94944f49 100644
    --- a/kernel/rcu/tree_stall.h
    +++ b/kernel/rcu/tree_stall.h
    @@ -61,3 +61,98 @@ static int __init check_cpu_stall_init(void)
    return 0;
    }
    early_initcall(check_cpu_stall_init);
    +
    +#ifdef CONFIG_PREEMPT
    +
    +/*
    + * Dump detailed information for all tasks blocking the current RCU
    + * grace period on the specified rcu_node structure.
    + */
    +static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
    +{
    + unsigned long flags;
    + struct task_struct *t;
    +
    + raw_spin_lock_irqsave_rcu_node(rnp, flags);
    + if (!rcu_preempt_blocked_readers_cgp(rnp)) {
    + raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    + return;
    + }
    + t = list_entry(rnp->gp_tasks->prev,
    + struct task_struct, rcu_node_entry);
    + list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
    + /*
    + * We could be printing a lot while holding a spinlock.
    + * Avoid triggering hard lockup.
    + */
    + touch_nmi_watchdog();
    + sched_show_task(t);
    + }
    + raw_spin_unlock_irqrestore_rcu_node(rnp, flags);
    +}
    +
    +/*
    + * Dump detailed information for all tasks blocking the current RCU
    + * grace period.
    + */
    +static void rcu_print_detail_task_stall(void)
    +{
    + struct rcu_node *rnp = rcu_get_root();
    +
    + rcu_print_detail_task_stall_rnp(rnp);
    + rcu_for_each_leaf_node(rnp)
    + rcu_print_detail_task_stall_rnp(rnp);
    +}
    +
    +static void rcu_print_task_stall_begin(struct rcu_node *rnp)
    +{
    + pr_err("\tTasks blocked on level-%d rcu_node (CPUs %d-%d):",
    + rnp->level, rnp->grplo, rnp->grphi);
    +}
    +
    +static void rcu_print_task_stall_end(void)
    +{
    + pr_cont("\n");
    +}
    +
    +/*
    + * Scan the current list of tasks blocked within RCU read-side critical
    + * sections, printing out the tid of each.
    + */
    +static int rcu_print_task_stall(struct rcu_node *rnp)
    +{
    + struct task_struct *t;
    + int ndetected = 0;
    +
    + if (!rcu_preempt_blocked_readers_cgp(rnp))
    + return 0;
    + rcu_print_task_stall_begin(rnp);
    + t = list_entry(rnp->gp_tasks->prev,
    + struct task_struct, rcu_node_entry);
    + list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
    + pr_cont(" P%d", t->pid);
    + ndetected++;
    + }
    + rcu_print_task_stall_end();
    + return ndetected;
    +}
    +
    +#else /* #ifdef CONFIG_PREEMPT */
    +
    +/*
    + * Because preemptible RCU does not exist, we never have to check for
    + * tasks blocked within RCU read-side critical sections.
    + */
    +static void rcu_print_detail_task_stall(void)
    +{
    +}
    +
    +/*
    + * Because preemptible RCU does not exist, we never have to check for
    + * tasks blocked within RCU read-side critical sections.
    + */
    +static int rcu_print_task_stall(struct rcu_node *rnp)
    +{
    + return 0;
    +}
    +#endif /* #else #ifdef CONFIG_PREEMPT */
    --
    2.17.1
    \
     
     \ /
      Last update: 2019-03-27 00:23    [W:5.568 / U:0.476 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site