lkml.org 
[lkml]   [2019]   [Oct]   [30]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
Patch in this message
/
From
Subject[RFC PATCH v4 05/19] sched: Add task_struct pointer to sched_class::set_curr_task
Date
From: Peter Zijlstra <peterz@infradead.org>

In preparation of further separating pick_next_task() and
set_curr_task() we have to pass the actual task into it, while there,
rename the thing to better pair with put_prev_task().

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
---
kernel/sched/core.c | 12 ++++++------
kernel/sched/deadline.c | 7 +------
kernel/sched/fair.c | 17 ++++++++++++++---
kernel/sched/idle.c | 27 +++++++++++++++------------
kernel/sched/rt.c | 7 +------
kernel/sched/sched.h | 8 +++++---
kernel/sched/stop_task.c | 17 +++++++----------
7 files changed, 49 insertions(+), 46 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index a9c93cf71f5f..95cc10ecc7c9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1494,7 +1494,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
if (running)
- set_curr_task(rq, p);
+ set_next_task(rq, p);
}

/*
@@ -4276,7 +4276,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
if (queued)
enqueue_task(rq, p, queue_flag);
if (running)
- set_curr_task(rq, p);
+ set_next_task(rq, p);

check_class_changed(rq, p, prev_class, oldprio);
out_unlock:
@@ -4343,7 +4343,7 @@ void set_user_nice(struct task_struct *p, long nice)
resched_curr(rq);
}
if (running)
- set_curr_task(rq, p);
+ set_next_task(rq, p);
out_unlock:
task_rq_unlock(rq, p, &rf);
}
@@ -4786,7 +4786,7 @@ static int __sched_setscheduler(struct task_struct *p,
enqueue_task(rq, p, queue_flags);
}
if (running)
- set_curr_task(rq, p);
+ set_next_task(rq, p);

check_class_changed(rq, p, prev_class, oldprio);

@@ -5975,7 +5975,7 @@ void sched_setnuma(struct task_struct *p, int nid)
if (queued)
enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
if (running)
- set_curr_task(rq, p);
+ set_next_task(rq, p);
task_rq_unlock(rq, p, &rf);
}
#endif /* CONFIG_NUMA_BALANCING */
@@ -6856,7 +6856,7 @@ void sched_move_task(struct task_struct *tsk)
if (queued)
enqueue_task(rq, tsk, queue_flags);
if (running)
- set_curr_task(rq, tsk);
+ set_next_task(rq, tsk);

task_rq_unlock(rq, tsk, &rf);
}
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 4b53e7c696c8..38b45f2f890b 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1812,11 +1812,6 @@ static void task_fork_dl(struct task_struct *p)
*/
}

-static void set_curr_task_dl(struct rq *rq)
-{
- set_next_task_dl(rq, rq->curr);
-}
-
#ifdef CONFIG_SMP

/* Only try algorithms three times */
@@ -2396,6 +2391,7 @@ const struct sched_class dl_sched_class = {

.pick_next_task = pick_next_task_dl,
.put_prev_task = put_prev_task_dl,
+ .set_next_task = set_next_task_dl,

#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_dl,
@@ -2406,7 +2402,6 @@ const struct sched_class dl_sched_class = {
.task_woken = task_woken_dl,
#endif

- .set_curr_task = set_curr_task_dl,
.task_tick = task_tick_dl,
.task_fork = task_fork_dl,

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a1e8b811ce1f..a58e5de1732d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -10180,9 +10180,19 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p)
* This routine is mostly called to set cfs_rq->curr field when a task
* migrates between groups/classes.
*/
-static void set_curr_task_fair(struct rq *rq)
+static void set_next_task_fair(struct rq *rq, struct task_struct *p)
{
- struct sched_entity *se = &rq->curr->se;
+ struct sched_entity *se = &p->se;
+
+#ifdef CONFIG_SMP
+ if (task_on_rq_queued(p)) {
+ /*
+ * Move the next running task to the front of the list, so our
+ * cfs_tasks list becomes MRU one.
+ */
+ list_move(&se->group_node, &rq->cfs_tasks);
+ }
+#endif

for_each_sched_entity(se) {
struct cfs_rq *cfs_rq = cfs_rq_of(se);
@@ -10453,7 +10463,9 @@ const struct sched_class fair_sched_class = {
.check_preempt_curr = check_preempt_wakeup,

.pick_next_task = pick_next_task_fair,
+
.put_prev_task = put_prev_task_fair,
+ .set_next_task = set_next_task_fair,

#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_fair,
@@ -10466,7 +10478,6 @@ const struct sched_class fair_sched_class = {
.set_cpus_allowed = set_cpus_allowed_common,
#endif

- .set_curr_task = set_curr_task_fair,
.task_tick = task_tick_fair,
.task_fork = task_fork_fair,

diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 0d2f83899c83..3ff4889196e1 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -374,14 +374,25 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
resched_curr(rq);
}

+static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
+{
+}
+
+static void set_next_task_idle(struct rq *rq, struct task_struct *next)
+{
+ update_idle_core(rq);
+ schedstat_inc(rq->sched_goidle);
+}
+
static struct task_struct *
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
+ struct task_struct *next = rq->idle;
+
put_prev_task(rq, prev);
- update_idle_core(rq);
- schedstat_inc(rq->sched_goidle);
+ set_next_task_idle(rq, next);

- return rq->idle;
+ return next;
}

/*
@@ -397,10 +408,6 @@ dequeue_task_idle(struct rq *rq, struct task_struct *p, int flags)
raw_spin_lock_irq(rq_lockp(rq));
}

-static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
-{
-}
-
/*
* scheduler tick hitting a task of our scheduling class.
*
@@ -413,10 +420,6 @@ static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued)
{
}

-static void set_curr_task_idle(struct rq *rq)
-{
-}
-
static void switched_to_idle(struct rq *rq, struct task_struct *p)
{
BUG();
@@ -451,13 +454,13 @@ const struct sched_class idle_sched_class = {

.pick_next_task = pick_next_task_idle,
.put_prev_task = put_prev_task_idle,
+ .set_next_task = set_next_task_idle,

#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_idle,
.set_cpus_allowed = set_cpus_allowed_common,
#endif

- .set_curr_task = set_curr_task_idle,
.task_tick = task_tick_idle,

.get_rr_interval = get_rr_interval_idle,
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index f4590ac6fd92..a857945772d1 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2355,11 +2355,6 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
}
}

-static void set_curr_task_rt(struct rq *rq)
-{
- set_next_task_rt(rq, rq->curr);
-}
-
static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
{
/*
@@ -2381,6 +2376,7 @@ const struct sched_class rt_sched_class = {

.pick_next_task = pick_next_task_rt,
.put_prev_task = put_prev_task_rt,
+ .set_next_task = set_next_task_rt,

#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_rt,
@@ -2392,7 +2388,6 @@ const struct sched_class rt_sched_class = {
.switched_from = switched_from_rt,
#endif

- .set_curr_task = set_curr_task_rt,
.task_tick = task_tick_rt,

.get_rr_interval = get_rr_interval_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index af2a9972149a..657831e26008 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1715,6 +1715,7 @@ struct sched_class {
struct task_struct *prev,
struct rq_flags *rf);
void (*put_prev_task)(struct rq *rq, struct task_struct *p);
+ void (*set_next_task)(struct rq *rq, struct task_struct *p);

#ifdef CONFIG_SMP
int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
@@ -1729,7 +1730,6 @@ struct sched_class {
void (*rq_offline)(struct rq *rq);
#endif

- void (*set_curr_task)(struct rq *rq);
void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
void (*task_fork)(struct task_struct *p);
void (*task_dead)(struct task_struct *p);
@@ -1759,12 +1759,14 @@ struct sched_class {

static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
{
+ WARN_ON_ONCE(rq->curr != prev);
prev->sched_class->put_prev_task(rq, prev);
}

-static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
+static inline void set_next_task(struct rq *rq, struct task_struct *next)
{
- curr->sched_class->set_curr_task(rq);
+ WARN_ON_ONCE(rq->curr != next);
+ next->sched_class->set_next_task(rq, next);
}

#ifdef CONFIG_SMP
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index c183b790ca54..47a3d2a18a9a 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -23,6 +23,11 @@ check_preempt_curr_stop(struct rq *rq, struct task_struct *p, int flags)
/* we're never preempted */
}

+static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
+{
+ stop->se.exec_start = rq_clock_task(rq);
+}
+
static struct task_struct *
pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
{
@@ -32,8 +37,7 @@ pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
return NULL;

put_prev_task(rq, prev);
-
- stop->se.exec_start = rq_clock_task(rq);
+ set_next_task_stop(rq, stop);

return stop;
}
@@ -86,13 +90,6 @@ static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued)
{
}

-static void set_curr_task_stop(struct rq *rq)
-{
- struct task_struct *stop = rq->stop;
-
- stop->se.exec_start = rq_clock_task(rq);
-}
-
static void switched_to_stop(struct rq *rq, struct task_struct *p)
{
BUG(); /* its impossible to change to this class */
@@ -128,13 +125,13 @@ const struct sched_class stop_sched_class = {

.pick_next_task = pick_next_task_stop,
.put_prev_task = put_prev_task_stop,
+ .set_next_task = set_next_task_stop,

#ifdef CONFIG_SMP
.select_task_rq = select_task_rq_stop,
.set_cpus_allowed = set_cpus_allowed_common,
#endif

- .set_curr_task = set_curr_task_stop,
.task_tick = task_tick_stop,

.get_rr_interval = get_rr_interval_stop,
--
2.17.1
\
 
 \ /
  Last update: 2019-10-30 19:35    [W:0.390 / U:0.044 seconds]
©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site