lkml.org 
[lkml]   [2020]   [Jun]   [15]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
Date
From
Subject[PATCH 2/6] sched: Verify some SMP assumptions
Validate that:
- __smp_call_single_queue() is only used on remote CPUs
- task and rq CPUs match on activate_task()

(and always use activate_task() where we should)

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/sched/core.c | 8 ++++----
kernel/smp.c | 2 ++
2 files changed, 6 insertions(+), 4 deletions(-)

--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1311,6 +1311,8 @@ static inline void dequeue_task(struct r

void activate_task(struct rq *rq, struct task_struct *p, int flags)
{
+ SCHED_WARN_ON(task_cpu(p) != cpu_of(rq));
+
if (task_contributes_to_load(p))
rq->nr_uninterruptible--;

@@ -1474,8 +1476,7 @@ static struct rq *move_queued_task(struc
{
lockdep_assert_held(&rq->lock);

- WRITE_ONCE(p->on_rq, TASK_ON_RQ_MIGRATING);
- dequeue_task(rq, p, DEQUEUE_NOCLOCK);
+ deactivate_task(rq, p, DEQUEUE_NOCLOCK);
set_task_cpu(p, new_cpu);
rq_unlock(rq, rf);

@@ -1483,8 +1484,7 @@ static struct rq *move_queued_task(struc

rq_lock(rq, rf);
BUG_ON(task_cpu(p) != new_cpu);
- enqueue_task(rq, p, 0);
- p->on_rq = TASK_ON_RQ_QUEUED;
+ activate_task(rq, p, 0);
check_preempt_curr(rq, p, 0);

return rq;
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -135,6 +135,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(cal

void __smp_call_single_queue(int cpu, struct llist_node *node)
{
+ WARN_ON_ONCE(cpu == smp_processor_id());
+
/*
* The list addition should be visible before sending the IPI
* handler locks the list to pull the entry off it because of

\
 
 \ /
  Last update: 2020-06-15 15:18    [W:2.430 / U:1.304 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site