lkml.org 
[lkml]   [2018]   [Jan]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC 2/2] softirq: Introduce mask for __do_softirq()
Date
Warning: Not merge-ready, tested only on x86_64 & arm32.

For the reason of deferring net-softirqs till ksoftirqd run,
__do_softirq() needs to process softirqs with mask, depending
if it's called from ksoftirqd thread or on the context of
some task.

Signed-off-by: Dmitry Safonov <dima@arista.com>
---
include/linux/interrupt.h | 8 ++++----
kernel/softirq.c | 41 ++++++++++++++++++++++-------------------
2 files changed, 26 insertions(+), 23 deletions(-)

diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 69c238210325..1e943959b31a 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -489,14 +489,14 @@ struct softirq_action
};

asmlinkage void do_softirq(void);
-asmlinkage void __do_softirq(void);
+asmlinkage void __do_softirq(__u32 mask);

#ifdef __ARCH_HAS_DO_SOFTIRQ
-void do_softirq_own_stack(void);
+void do_softirq_own_stack(__u32 mask);
#else
-static inline void do_softirq_own_stack(void)
+static inline void do_softirq_own_stack(__u32 mask)
{
- __do_softirq();
+ __do_softirq(mask);
}
#endif

diff --git a/kernel/softirq.c b/kernel/softirq.c
index ee48f194dcec..5459b079bf73 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -88,26 +88,26 @@ static bool ksoftirqd_running(void)
return tsk && (tsk->state == TASK_RUNNING);
}

-static bool defer_softirq(void)
+#define DEFER_SOFTIRQS (NET_RX_SOFTIRQ | NET_TX_SOFTIRQ)
+
+/* Mask of softirqs that are pending to be processed on the current context. */
+static __u32 current_softirq_pending(void)
{
__u32 pending = local_softirq_pending();

- if (!pending)
- return true;
-
if (ksoftirqd_running())
- return true;
+ return 0;

/*
* Defer net-rx softirqs to ksoftirqd processing as they may
* make userspace starving cpu time.
*/
- if (pending & (NET_RX_SOFTIRQ | NET_TX_SOFTIRQ)) {
+ if (pending & DEFER_SOFTIRQS)
wakeup_softirqd();
- return true;
- }

- return false;
+ pending &= ~DEFER_SOFTIRQS;
+
+ return pending;
}

/*
@@ -261,7 +261,7 @@ static inline bool lockdep_softirq_start(void) { return false; }
static inline void lockdep_softirq_end(bool in_hardirq) { }
#endif

-asmlinkage __visible void __softirq_entry __do_softirq(void)
+asmlinkage __visible void __softirq_entry __do_softirq(__u32 mask)
{
unsigned long end = jiffies + MAX_SOFTIRQ_TIME;
unsigned long old_flags = current->flags;
@@ -286,7 +286,8 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)

restart:
/* Reset the pending bitmask before enabling irqs */
- set_softirq_pending(0);
+ set_softirq_pending(pending & ~mask);
+ pending &= mask;

local_irq_enable();

@@ -320,7 +321,7 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
local_irq_disable();

pending = local_softirq_pending();
- if (pending) {
+ if (pending & mask) {
if (time_before(jiffies, end) && !need_resched() &&
--max_restart)
goto restart;
@@ -338,14 +339,14 @@ asmlinkage __visible void __softirq_entry __do_softirq(void)
asmlinkage __visible void do_softirq(void)
{
unsigned long flags;
+ __u32 pending = current_softirq_pending();

- if (in_interrupt())
+ if (in_interrupt() || !pending)
return;

local_irq_save(flags);

- if (!defer_softirq())
- do_softirq_own_stack();
+ do_softirq_own_stack(pending);

local_irq_restore(flags);
}
@@ -371,7 +372,9 @@ void irq_enter(void)

static inline void invoke_softirq(void)
{
- if (defer_softirq())
+ __u32 pending = current_softirq_pending();
+
+ if (!pending)
return;

if (!force_irqthreads) {
@@ -381,14 +384,14 @@ static inline void invoke_softirq(void)
* it is the irq stack, because it should be near empty
* at this stage.
*/
- __do_softirq();
+ __do_softirq(pending);
#else
/*
* Otherwise, irq_exit() is called on the task stack that can
* be potentially deep already. So call softirq in its own stack
* to prevent from any overrun.
*/
- do_softirq_own_stack();
+ do_softirq_own_stack(pending);
#endif
} else {
wakeup_softirqd();
@@ -682,7 +685,7 @@ static void run_ksoftirqd(unsigned int cpu)
* We can safely run softirq on inline stack, as we are not deep
* in the task stack here.
*/
- __do_softirq();
+ __do_softirq(~0);
local_irq_enable();
cond_resched_rcu_qs();
return;
--
2.13.6
\
 
 \ /
  Last update: 2018-01-14 23:19    [W:0.097 / U:0.740 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site