lkml.org 
[lkml]   [2021]   [Apr]   [28]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[PATCH v2 2/3] sched/fair: Enable interrupts when dropping lock in newidle_balance()
Date
When combined with the next patch, which breaks out of rebalancing
when an RT task is runnable, significant latency reductions are seen
on systems with many CPUs.

Signed-off-by: Scott Wood <swood@redhat.com>
---
kernel/sched/fair.c | 8 ++++++++
1 file changed, 8 insertions(+)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ff369c38a5b5..aa8c87b6aff8 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10521,6 +10521,8 @@ static void nohz_newidle_balance(struct rq *this_rq)
return;

raw_spin_unlock(&this_rq->lock);
+ if (newidle_balance_in_callback)
+ local_irq_enable();
/*
* This CPU is going to be idle and blocked load of idle CPUs
* need to be updated. Run the ilb locally as it is a good
@@ -10529,6 +10531,8 @@ static void nohz_newidle_balance(struct rq *this_rq)
*/
if (!_nohz_idle_balance(this_rq, NOHZ_STATS_KICK, CPU_NEWLY_IDLE))
kick_ilb(NOHZ_STATS_KICK);
+ if (newidle_balance_in_callback)
+ local_irq_disable();
raw_spin_lock(&this_rq->lock);
}

@@ -10599,6 +10603,8 @@ static int do_newidle_balance(struct rq *this_rq, struct rq_flags *rf)
}

raw_spin_unlock(&this_rq->lock);
+ if (newidle_balance_in_callback)
+ local_irq_enable();

update_blocked_averages(this_cpu);
rcu_read_lock();
@@ -10636,6 +10642,8 @@ static int do_newidle_balance(struct rq *this_rq, struct rq_flags *rf)
}
rcu_read_unlock();

+ if (newidle_balance_in_callback)
+ local_irq_disable();
raw_spin_lock(&this_rq->lock);

if (curr_cost > this_rq->max_idle_balance_cost)
--
2.27.0
\
 
 \ /
  Last update: 2021-04-29 01:29    [W:0.113 / U:0.128 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site