lkml.org 
[lkml]   [2019]   [Sep]   [8]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4.19 38/57] kprobes: Fix potential deadlock in kprobe_optimizer()
    Date
    [ Upstream commit f1c6ece23729257fb46562ff9224cf5f61b818da ]

    lockdep reports the following deadlock scenario:

    WARNING: possible circular locking dependency detected

    kworker/1:1/48 is trying to acquire lock:
    000000008d7a62b2 (text_mutex){+.+.}, at: kprobe_optimizer+0x163/0x290

    but task is already holding lock:
    00000000850b5e2d (module_mutex){+.+.}, at: kprobe_optimizer+0x31/0x290

    which lock already depends on the new lock.

    the existing dependency chain (in reverse order) is:

    -> #1 (module_mutex){+.+.}:
    __mutex_lock+0xac/0x9f0
    mutex_lock_nested+0x1b/0x20
    set_all_modules_text_rw+0x22/0x90
    ftrace_arch_code_modify_prepare+0x1c/0x20
    ftrace_run_update_code+0xe/0x30
    ftrace_startup_enable+0x2e/0x50
    ftrace_startup+0xa7/0x100
    register_ftrace_function+0x27/0x70
    arm_kprobe+0xb3/0x130
    enable_kprobe+0x83/0xa0
    enable_trace_kprobe.part.0+0x2e/0x80
    kprobe_register+0x6f/0xc0
    perf_trace_event_init+0x16b/0x270
    perf_kprobe_init+0xa7/0xe0
    perf_kprobe_event_init+0x3e/0x70
    perf_try_init_event+0x4a/0x140
    perf_event_alloc+0x93a/0xde0
    __do_sys_perf_event_open+0x19f/0xf30
    __x64_sys_perf_event_open+0x20/0x30
    do_syscall_64+0x65/0x1d0
    entry_SYSCALL_64_after_hwframe+0x49/0xbe

    -> #0 (text_mutex){+.+.}:
    __lock_acquire+0xfcb/0x1b60
    lock_acquire+0xca/0x1d0
    __mutex_lock+0xac/0x9f0
    mutex_lock_nested+0x1b/0x20
    kprobe_optimizer+0x163/0x290
    process_one_work+0x22b/0x560
    worker_thread+0x50/0x3c0
    kthread+0x112/0x150
    ret_from_fork+0x3a/0x50

    other info that might help us debug this:

    Possible unsafe locking scenario:

    CPU0 CPU1
    ---- ----
    lock(module_mutex);
    lock(text_mutex);
    lock(module_mutex);
    lock(text_mutex);

    *** DEADLOCK ***

    As a reproducer I've been using bcc's funccount.py
    (https://github.com/iovisor/bcc/blob/master/tools/funccount.py),
    for example:

    # ./funccount.py '*interrupt*'

    That immediately triggers the lockdep splat.

    Fix by acquiring text_mutex before module_mutex in kprobe_optimizer().

    Signed-off-by: Andrea Righi <andrea.righi@canonical.com>
    Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
    Cc: Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
    Cc: David S. Miller <davem@davemloft.net>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: Naveen N. Rao <naveen.n.rao@linux.ibm.com>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Fixes: d5b844a2cf50 ("ftrace/x86: Remove possible deadlock between register_kprobe() and ftrace_run_update_code()")
    Link: http://lkml.kernel.org/r/20190812184302.GA7010@xps-13
    Signed-off-by: Ingo Molnar <mingo@kernel.org>
    Signed-off-by: Sasha Levin <sashal@kernel.org>
    ---
    kernel/kprobes.c | 8 ++++----
    1 file changed, 4 insertions(+), 4 deletions(-)

    diff --git a/kernel/kprobes.c b/kernel/kprobes.c
    index 29ff6635d2597..714d63f60460b 100644
    --- a/kernel/kprobes.c
    +++ b/kernel/kprobes.c
    @@ -483,6 +483,7 @@ static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
    */
    static void do_optimize_kprobes(void)
    {
    + lockdep_assert_held(&text_mutex);
    /*
    * The optimization/unoptimization refers online_cpus via
    * stop_machine() and cpu-hotplug modifies online_cpus.
    @@ -500,9 +501,7 @@ static void do_optimize_kprobes(void)
    list_empty(&optimizing_list))
    return;

    - mutex_lock(&text_mutex);
    arch_optimize_kprobes(&optimizing_list);
    - mutex_unlock(&text_mutex);
    }

    /*
    @@ -513,6 +512,7 @@ static void do_unoptimize_kprobes(void)
    {
    struct optimized_kprobe *op, *tmp;

    + lockdep_assert_held(&text_mutex);
    /* See comment in do_optimize_kprobes() */
    lockdep_assert_cpus_held();

    @@ -520,7 +520,6 @@ static void do_unoptimize_kprobes(void)
    if (list_empty(&unoptimizing_list))
    return;

    - mutex_lock(&text_mutex);
    arch_unoptimize_kprobes(&unoptimizing_list, &freeing_list);
    /* Loop free_list for disarming */
    list_for_each_entry_safe(op, tmp, &freeing_list, list) {
    @@ -537,7 +536,6 @@ static void do_unoptimize_kprobes(void)
    } else
    list_del_init(&op->list);
    }
    - mutex_unlock(&text_mutex);
    }

    /* Reclaim all kprobes on the free_list */
    @@ -563,6 +561,7 @@ static void kprobe_optimizer(struct work_struct *work)
    {
    mutex_lock(&kprobe_mutex);
    cpus_read_lock();
    + mutex_lock(&text_mutex);
    /* Lock modules while optimizing kprobes */
    mutex_lock(&module_mutex);

    @@ -590,6 +589,7 @@ static void kprobe_optimizer(struct work_struct *work)
    do_free_cleaned_kprobes();

    mutex_unlock(&module_mutex);
    + mutex_unlock(&text_mutex);
    cpus_read_unlock();
    mutex_unlock(&kprobe_mutex);

    --
    2.20.1


    \
     
     \ /
      Last update: 2019-09-08 14:57    [W:4.690 / U:0.140 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site