lkml.org 
[lkml]   [2020]   [Apr]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    /
    Date
    From
    Subject[rcu:dev.2020.04.01a 120/122] kernel/trace/ftrace.c:2914:3: error: implicit declaration of function 'synchronize_rcu_tasks_rude'; did you mean 'synchronize_rcu_tasks'?
    tree:   https://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev.2020.04.01a
    head: ffebabde5e80e76612fd06a934cd2a147128054c
    commit: ca232c9d7331168ed8495f07253cb18cb9af2053 [120/122] ftrace: Use synchronize_rcu_tasks_rude() instead of ftrace_sync()
    config: s390-randconfig-a001-20200403 (attached as .config)
    compiler: s390-linux-gcc (GCC) 9.3.0
    reproduce:
    wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
    chmod +x ~/bin/make.cross
    git checkout ca232c9d7331168ed8495f07253cb18cb9af2053
    # save the attached .config to linux build tree
    GCC_VERSION=9.3.0 make.cross ARCH=s390

    If you fix the issue, kindly add following tag as appropriate
    Reported-by: kbuild test robot <lkp@intel.com>

    All errors (new ones prefixed by >>):

    kernel/trace/ftrace.c: In function 'ftrace_shutdown':
    >> kernel/trace/ftrace.c:2914:3: error: implicit declaration of function 'synchronize_rcu_tasks_rude'; did you mean 'synchronize_rcu_tasks'? [-Werror=implicit-function-declaration]
    2914 | synchronize_rcu_tasks_rude();
    | ^~~~~~~~~~~~~~~~~~~~~~~~~~
    | synchronize_rcu_tasks
    cc1: some warnings being treated as errors

    vim +2914 kernel/trace/ftrace.c

    2818
    2819 int ftrace_shutdown(struct ftrace_ops *ops, int command)
    2820 {
    2821 int ret;
    2822
    2823 if (unlikely(ftrace_disabled))
    2824 return -ENODEV;
    2825
    2826 ret = __unregister_ftrace_function(ops);
    2827 if (ret)
    2828 return ret;
    2829
    2830 ftrace_start_up--;
    2831 /*
    2832 * Just warn in case of unbalance, no need to kill ftrace, it's not
    2833 * critical but the ftrace_call callers may be never nopped again after
    2834 * further ftrace uses.
    2835 */
    2836 WARN_ON_ONCE(ftrace_start_up < 0);
    2837
    2838 /* Disabling ipmodify never fails */
    2839 ftrace_hash_ipmodify_disable(ops);
    2840
    2841 if (ftrace_hash_rec_disable(ops, 1))
    2842 command |= FTRACE_UPDATE_CALLS;
    2843
    2844 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
    2845
    2846 if (saved_ftrace_func != ftrace_trace_function) {
    2847 saved_ftrace_func = ftrace_trace_function;
    2848 command |= FTRACE_UPDATE_TRACE_FUNC;
    2849 }
    2850
    2851 if (!command || !ftrace_enabled) {
    2852 /*
    2853 * If these are dynamic or per_cpu ops, they still
    2854 * need their data freed. Since, function tracing is
    2855 * not currently active, we can just free them
    2856 * without synchronizing all CPUs.
    2857 */
    2858 if (ops->flags & FTRACE_OPS_FL_DYNAMIC)
    2859 goto free_ops;
    2860
    2861 return 0;
    2862 }
    2863
    2864 /*
    2865 * If the ops uses a trampoline, then it needs to be
    2866 * tested first on update.
    2867 */
    2868 ops->flags |= FTRACE_OPS_FL_REMOVING;
    2869 removed_ops = ops;
    2870
    2871 /* The trampoline logic checks the old hashes */
    2872 ops->old_hash.filter_hash = ops->func_hash->filter_hash;
    2873 ops->old_hash.notrace_hash = ops->func_hash->notrace_hash;
    2874
    2875 ftrace_run_update_code(command);
    2876
    2877 /*
    2878 * If there's no more ops registered with ftrace, run a
    2879 * sanity check to make sure all rec flags are cleared.
    2880 */
    2881 if (rcu_dereference_protected(ftrace_ops_list,
    2882 lockdep_is_held(&ftrace_lock)) == &ftrace_list_end) {
    2883 struct ftrace_page *pg;
    2884 struct dyn_ftrace *rec;
    2885
    2886 do_for_each_ftrace_rec(pg, rec) {
    2887 if (FTRACE_WARN_ON_ONCE(rec->flags & ~FTRACE_FL_DISABLED))
    2888 pr_warn(" %pS flags:%lx\n",
    2889 (void *)rec->ip, rec->flags);
    2890 } while_for_each_ftrace_rec();
    2891 }
    2892
    2893 ops->old_hash.filter_hash = NULL;
    2894 ops->old_hash.notrace_hash = NULL;
    2895
    2896 removed_ops = NULL;
    2897 ops->flags &= ~FTRACE_OPS_FL_REMOVING;
    2898
    2899 /*
    2900 * Dynamic ops may be freed, we must make sure that all
    2901 * callers are done before leaving this function.
    2902 * The same goes for freeing the per_cpu data of the per_cpu
    2903 * ops.
    2904 */
    2905 if (ops->flags & FTRACE_OPS_FL_DYNAMIC) {
    2906 /*
    2907 * We need to do a hard force of sched synchronization.
    2908 * This is because we use preempt_disable() to do RCU, but
    2909 * the function tracers can be called where RCU is not watching
    2910 * (like before user_exit()). We can not rely on the RCU
    2911 * infrastructure to do the synchronization, thus we must do it
    2912 * ourselves.
    2913 */
    > 2914 synchronize_rcu_tasks_rude();
    2915
    2916 /*
    2917 * When the kernel is preeptive, tasks can be preempted
    2918 * while on a ftrace trampoline. Just scheduling a task on
    2919 * a CPU is not good enough to flush them. Calling
    2920 * synchornize_rcu_tasks() will wait for those tasks to
    2921 * execute and either schedule voluntarily or enter user space.
    2922 */
    2923 if (IS_ENABLED(CONFIG_PREEMPTION))
    2924 synchronize_rcu_tasks();
    2925
    2926 free_ops:
    2927 arch_ftrace_trampoline_free(ops);
    2928 }
    2929
    2930 return 0;
    2931 }
    2932

    ---
    0-DAY CI Kernel Test Service, Intel Corporation
    https://lists.01.org/hyperkitty/list/kbuild-all@lists.01.org
    [unhandled content-type:application/gzip]
    \
     
     \ /
      Last update: 2020-04-04 05:28    [W:3.343 / U:1.684 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site