lkml.org 
[lkml]   [2019]   [Aug]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH tip/core/rcu 1/8] treewide: Rename rcu_dereference_raw_notrace() to _check()
    Date
    From: "Joel Fernandes (Google)" <joel@joelfernandes.org>

    The rcu_dereference_raw_notrace() API name is confusing. It is equivalent
    to rcu_dereference_raw() except that it also does sparse pointer checking.

    There are only a few users of rcu_dereference_raw_notrace(). This patches
    renames all of them to be rcu_dereference_raw_check() with the "_check()"
    indicating sparse checking.

    Signed-off-by: Joel Fernandes (Google) <joel@joelfernandes.org>
    [ paulmck: Fix checkpatch warnings about parentheses. ]
    Signed-off-by: Paul E. McKenney <paulmck@linux.ibm.com>
    ---
    Documentation/RCU/Design/Requirements/Requirements.html | 2 +-
    arch/powerpc/include/asm/kvm_book3s_64.h | 2 +-
    include/linux/rculist.h | 6 +++---
    include/linux/rcupdate.h | 2 +-
    kernel/trace/ftrace_internal.h | 8 ++++----
    kernel/trace/trace.c | 4 ++--
    6 files changed, 12 insertions(+), 12 deletions(-)

    diff --git a/Documentation/RCU/Design/Requirements/Requirements.html b/Documentation/RCU/Design/Requirements/Requirements.html
    index 5a9238a2883c..bdbc84f1b949 100644
    --- a/Documentation/RCU/Design/Requirements/Requirements.html
    +++ b/Documentation/RCU/Design/Requirements/Requirements.html
    @@ -2512,7 +2512,7 @@ disabled across the entire RCU read-side critical section.
    <p>
    It is possible to use tracing on RCU code, but tracing itself
    uses RCU.
    -For this reason, <tt>rcu_dereference_raw_notrace()</tt>
    +For this reason, <tt>rcu_dereference_raw_check()</tt>
    is provided for use by tracing, which avoids the destructive
    recursion that could otherwise ensue.
    This API is also used by virtualization in some architectures,
    diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
    index bb7c8cc77f1a..04b2b927bb5a 100644
    --- a/arch/powerpc/include/asm/kvm_book3s_64.h
    +++ b/arch/powerpc/include/asm/kvm_book3s_64.h
    @@ -535,7 +535,7 @@ static inline void note_hpte_modification(struct kvm *kvm,
    */
    static inline struct kvm_memslots *kvm_memslots_raw(struct kvm *kvm)
    {
    - return rcu_dereference_raw_notrace(kvm->memslots[0]);
    + return rcu_dereference_raw_check(kvm->memslots[0]);
    }

    extern void kvmppc_mmu_debugfs_init(struct kvm *kvm);
    diff --git a/include/linux/rculist.h b/include/linux/rculist.h
    index e91ec9ddcd30..932296144131 100644
    --- a/include/linux/rculist.h
    +++ b/include/linux/rculist.h
    @@ -622,7 +622,7 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
    * as long as the traversal is guarded by rcu_read_lock().
    */
    #define hlist_for_each_entry_rcu(pos, head, member) \
    - for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
    + for (pos = hlist_entry_safe(rcu_dereference_raw(hlist_first_rcu(head)),\
    typeof(*(pos)), member); \
    pos; \
    pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
    @@ -642,10 +642,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
    * not do any RCU debugging or tracing.
    */
    #define hlist_for_each_entry_rcu_notrace(pos, head, member) \
    - for (pos = hlist_entry_safe (rcu_dereference_raw_notrace(hlist_first_rcu(head)),\
    + for (pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_first_rcu(head)),\
    typeof(*(pos)), member); \
    pos; \
    - pos = hlist_entry_safe(rcu_dereference_raw_notrace(hlist_next_rcu(\
    + pos = hlist_entry_safe(rcu_dereference_raw_check(hlist_next_rcu(\
    &(pos)->member)), typeof(*(pos)), member))

    /**
    diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
    index 8f7167478c1d..bfcafbc1e301 100644
    --- a/include/linux/rcupdate.h
    +++ b/include/linux/rcupdate.h
    @@ -476,7 +476,7 @@ do { \
    * The no-tracing version of rcu_dereference_raw() must not call
    * rcu_read_lock_held().
    */
    -#define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
    +#define rcu_dereference_raw_check(p) __rcu_dereference_check((p), 1, __rcu)

    /**
    * rcu_dereference_protected() - fetch RCU pointer when updates prevented
    diff --git a/kernel/trace/ftrace_internal.h b/kernel/trace/ftrace_internal.h
    index 0515a2096f90..0456e0a3dab1 100644
    --- a/kernel/trace/ftrace_internal.h
    +++ b/kernel/trace/ftrace_internal.h
    @@ -6,22 +6,22 @@

    /*
    * Traverse the ftrace_global_list, invoking all entries. The reason that we
    - * can use rcu_dereference_raw_notrace() is that elements removed from this list
    + * can use rcu_dereference_raw_check() is that elements removed from this list
    * are simply leaked, so there is no need to interact with a grace-period
    - * mechanism. The rcu_dereference_raw_notrace() calls are needed to handle
    + * mechanism. The rcu_dereference_raw_check() calls are needed to handle
    * concurrent insertions into the ftrace_global_list.
    *
    * Silly Alpha and silly pointer-speculation compiler optimizations!
    */
    #define do_for_each_ftrace_op(op, list) \
    - op = rcu_dereference_raw_notrace(list); \
    + op = rcu_dereference_raw_check(list); \
    do

    /*
    * Optimized for just a single item in the list (as that is the normal case).
    */
    #define while_for_each_ftrace_op(op) \
    - while (likely(op = rcu_dereference_raw_notrace((op)->next)) && \
    + while (likely(op = rcu_dereference_raw_check((op)->next)) && \
    unlikely((op) != &ftrace_list_end))

    extern struct ftrace_ops __rcu *ftrace_ops_list;
    diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
    index 525a97fbbc60..642474b26ba7 100644
    --- a/kernel/trace/trace.c
    +++ b/kernel/trace/trace.c
    @@ -2642,10 +2642,10 @@ static void ftrace_exports(struct ring_buffer_event *event)

    preempt_disable_notrace();

    - export = rcu_dereference_raw_notrace(ftrace_exports_list);
    + export = rcu_dereference_raw_check(ftrace_exports_list);
    while (export) {
    trace_process_export(export, event);
    - export = rcu_dereference_raw_notrace(export->next);
    + export = rcu_dereference_raw_check(export->next);
    }

    preempt_enable_notrace();
    --
    2.17.1
    \
     
     \ /
      Last update: 2019-08-02 00:43    [W:4.965 / U:0.004 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site