lkml.org 
[lkml]   [2021]   [Mar]   [1]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 5.11 723/775] printk: fix deadlock when kernel panic
    Date
    From: Muchun Song <songmuchun@bytedance.com>

    commit 8a8109f303e25a27f92c1d8edd67d7cbbc60a4eb upstream.

    printk_safe_flush_on_panic() caused the following deadlock on our
    server:

    CPU0: CPU1:
    panic rcu_dump_cpu_stacks
    kdump_nmi_shootdown_cpus nmi_trigger_cpumask_backtrace
    register_nmi_handler(crash_nmi_callback) printk_safe_flush
    __printk_safe_flush
    raw_spin_lock_irqsave(&read_lock)
    // send NMI to other processors
    apic_send_IPI_allbutself(NMI_VECTOR)
    // NMI interrupt, dead loop
    crash_nmi_callback
    printk_safe_flush_on_panic
    printk_safe_flush
    __printk_safe_flush
    // deadlock
    raw_spin_lock_irqsave(&read_lock)

    DEADLOCK: read_lock is taken on CPU1 and will never get released.

    It happens when panic() stops a CPU by NMI while it has been in
    the middle of printk_safe_flush().

    Handle the lock the same way as logbuf_lock. The printk_safe buffers
    are flushed only when both locks can be safely taken. It can avoid
    the deadlock _in this particular case_ at expense of losing contents
    of printk_safe buffers.

    Note: It would actually be safe to re-init the locks when all CPUs were
    stopped by NMI. But it would require passing this information
    from arch-specific code. It is not worth the complexity.
    Especially because logbuf_lock and printk_safe buffers have been
    obsoleted by the lockless ring buffer.

    Fixes: cf9b1106c81c ("printk/nmi: flush NMI messages on the system panic")
    Signed-off-by: Muchun Song <songmuchun@bytedance.com>
    Reviewed-by: Petr Mladek <pmladek@suse.com>
    Cc: <stable@vger.kernel.org>
    Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
    Signed-off-by: Petr Mladek <pmladek@suse.com>
    Link: https://lore.kernel.org/r/20210210034823.64867-1-songmuchun@bytedance.com
    Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
    ---
    kernel/printk/printk_safe.c | 16 ++++++++++++----
    1 file changed, 12 insertions(+), 4 deletions(-)

    --- a/kernel/printk/printk_safe.c
    +++ b/kernel/printk/printk_safe.c
    @@ -45,6 +45,8 @@ struct printk_safe_seq_buf {
    static DEFINE_PER_CPU(struct printk_safe_seq_buf, safe_print_seq);
    static DEFINE_PER_CPU(int, printk_context);

    +static DEFINE_RAW_SPINLOCK(safe_read_lock);
    +
    #ifdef CONFIG_PRINTK_NMI
    static DEFINE_PER_CPU(struct printk_safe_seq_buf, nmi_print_seq);
    #endif
    @@ -180,8 +182,6 @@ static void report_message_lost(struct p
    */
    static void __printk_safe_flush(struct irq_work *work)
    {
    - static raw_spinlock_t read_lock =
    - __RAW_SPIN_LOCK_INITIALIZER(read_lock);
    struct printk_safe_seq_buf *s =
    container_of(work, struct printk_safe_seq_buf, work);
    unsigned long flags;
    @@ -195,7 +195,7 @@ static void __printk_safe_flush(struct i
    * different CPUs. This is especially important when printing
    * a backtrace.
    */
    - raw_spin_lock_irqsave(&read_lock, flags);
    + raw_spin_lock_irqsave(&safe_read_lock, flags);

    i = 0;
    more:
    @@ -232,7 +232,7 @@ more:

    out:
    report_message_lost(s);
    - raw_spin_unlock_irqrestore(&read_lock, flags);
    + raw_spin_unlock_irqrestore(&safe_read_lock, flags);
    }

    /**
    @@ -278,6 +278,14 @@ void printk_safe_flush_on_panic(void)
    raw_spin_lock_init(&logbuf_lock);
    }

    + if (raw_spin_is_locked(&safe_read_lock)) {
    + if (num_online_cpus() > 1)
    + return;
    +
    + debug_locks_off();
    + raw_spin_lock_init(&safe_read_lock);
    + }
    +
    printk_safe_flush();
    }


    \
     
     \ /
      Last update: 2021-03-02 14:40    [W:2.139 / U:0.284 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site