lkml.org 
[lkml]   [2018]   [Oct]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[RFC PATCH 29/30] softirq: Make softirq processing softinterruptible
    Date
    From: Frederic Weisbecker <fweisbec@gmail.com>

    Make do_softirq() re-entrant and allow a vector, being either processed
    or disabled, to be interrupted by another vector. This way a vector
    won't be able to monopolize the CPU for a long while at the expense of
    the others that may rely on some predictable latency, especially on
    softirq disabled sections that used to disable all vectors.

    Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
    Cc: Ingo Molnar <mingo@kernel.org>
    Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
    Cc: Thomas Gleixner <tglx@linutronix.de>
    Cc: Peter Zijlstra <peterz@infradead.org>
    Cc: Linus Torvalds <torvalds@linux-foundation.org>
    Cc: David S. Miller <davem@davemloft.net>
    Cc: Mauro Carvalho Chehab <mchehab@s-opensource.com>
    Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
    ---
    kernel/softirq.c | 16 ++++++++--------
    1 file changed, 8 insertions(+), 8 deletions(-)

    diff --git a/kernel/softirq.c b/kernel/softirq.c
    index 457bf60..f4cb1ea 100644
    --- a/kernel/softirq.c
    +++ b/kernel/softirq.c
    @@ -195,7 +195,7 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt, unsigned int bh)
    if (cnt)
    preempt_count_sub(cnt - 1);

    - if (unlikely(!in_interrupt() && local_softirq_pending())) {
    + if (unlikely(!in_irq() && (local_softirq_pending() & local_softirq_enabled()))) {
    /*
    * Run softirq if any pending. And do it in its own stack
    * as we may be calling this deep in a task call stack already.
    @@ -387,7 +387,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
    lockdep_softirq_end(in_hardirq);
    account_irq_exit_time(current);
    local_bh_exit();
    - WARN_ON_ONCE(in_interrupt());
    + WARN_ON_ONCE(in_irq());
    current_restore_flags(old_flags, PF_MEMALLOC);
    }

    @@ -396,12 +396,12 @@ asmlinkage __visible void do_softirq(void)
    __u32 pending;
    unsigned long flags;

    - if (in_interrupt())
    + if (in_irq())
    return;

    local_irq_save(flags);

    - pending = local_softirq_pending();
    + pending = local_softirq_pending() & local_softirq_enabled();

    if (pending && !ksoftirqd_running(pending))
    do_softirq_own_stack();
    @@ -432,7 +432,7 @@ void irq_enter(void)

    static inline void invoke_softirq(void)
    {
    - if (ksoftirqd_running(local_softirq_pending()))
    + if (ksoftirqd_running(local_softirq_pending() & local_softirq_enabled()))
    return;

    if (!force_irqthreads) {
    @@ -481,7 +481,7 @@ void irq_exit(void)
    #endif
    account_irq_exit_time(current);
    preempt_count_sub(HARDIRQ_OFFSET);
    - if (!in_interrupt() && local_softirq_pending())
    + if (!in_irq() && (local_softirq_pending() & local_softirq_enabled()))
    invoke_softirq();

    tick_irq_exit();
    @@ -712,13 +712,13 @@ void __init softirq_init(void)

    static int ksoftirqd_should_run(unsigned int cpu)
    {
    - return local_softirq_pending();
    + return local_softirq_pending() & local_softirq_enabled();
    }

    static void run_ksoftirqd(unsigned int cpu)
    {
    local_irq_disable();
    - if (local_softirq_pending()) {
    + if (local_softirq_pending() & local_softirq_enabled()) {
    /*
    * We can safely run softirq on inline stack, as we are not deep
    * in the task stack here.
    --
    2.7.4
    \
     
     \ /
      Last update: 2018-10-11 01:14    [W:3.520 / U:0.088 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site