lkml.org 
[lkml]   [2019]   [Jul]   [26]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[RFC][PATCH 05/13] sched: Add task_struct pointer to sched_class::set_curr_task
    In preparation of further separating pick_next_task() and
    set_curr_task() we have to pass the actual task into it, while there,
    rename the thing to better pair with put_prev_task().

    Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
    ---
    kernel/sched/core.c | 12 ++++++------
    kernel/sched/deadline.c | 7 +------
    kernel/sched/fair.c | 17 ++++++++++++++---
    kernel/sched/idle.c | 27 +++++++++++++++------------
    kernel/sched/rt.c | 7 +------
    kernel/sched/sched.h | 8 +++++---
    kernel/sched/stop_task.c | 17 +++++++----------
    7 files changed, 49 insertions(+), 46 deletions(-)

    --- a/kernel/sched/core.c
    +++ b/kernel/sched/core.c
    @@ -1494,7 +1494,7 @@ void do_set_cpus_allowed(struct task_str
    if (queued)
    enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
    if (running)
    - set_curr_task(rq, p);
    + set_next_task(rq, p);
    }

    /*
    @@ -4273,7 +4273,7 @@ void rt_mutex_setprio(struct task_struct
    if (queued)
    enqueue_task(rq, p, queue_flag);
    if (running)
    - set_curr_task(rq, p);
    + set_next_task(rq, p);

    check_class_changed(rq, p, prev_class, oldprio);
    out_unlock:
    @@ -4340,7 +4340,7 @@ void set_user_nice(struct task_struct *p
    resched_curr(rq);
    }
    if (running)
    - set_curr_task(rq, p);
    + set_next_task(rq, p);
    out_unlock:
    task_rq_unlock(rq, p, &rf);
    }
    @@ -4783,7 +4783,7 @@ static int __sched_setscheduler(struct t
    enqueue_task(rq, p, queue_flags);
    }
    if (running)
    - set_curr_task(rq, p);
    + set_next_task(rq, p);

    check_class_changed(rq, p, prev_class, oldprio);

    @@ -5972,7 +5972,7 @@ void sched_setnuma(struct task_struct *p
    if (queued)
    enqueue_task(rq, p, ENQUEUE_RESTORE | ENQUEUE_NOCLOCK);
    if (running)
    - set_curr_task(rq, p);
    + set_next_task(rq, p);
    task_rq_unlock(rq, p, &rf);
    }
    #endif /* CONFIG_NUMA_BALANCING */
    @@ -6853,7 +6853,7 @@ void sched_move_task(struct task_struct
    if (queued)
    enqueue_task(rq, tsk, queue_flags);
    if (running)
    - set_curr_task(rq, tsk);
    + set_next_task(rq, tsk);

    task_rq_unlock(rq, tsk, &rf);
    }
    --- a/kernel/sched/deadline.c
    +++ b/kernel/sched/deadline.c
    @@ -1812,11 +1812,6 @@ static void task_fork_dl(struct task_str
    */
    }

    -static void set_curr_task_dl(struct rq *rq)
    -{
    - set_next_task_dl(rq, rq->curr);
    -}
    -
    #ifdef CONFIG_SMP

    /* Only try algorithms three times */
    @@ -2404,6 +2399,7 @@ const struct sched_class dl_sched_class

    .pick_next_task = pick_next_task_dl,
    .put_prev_task = put_prev_task_dl,
    + .set_next_task = set_next_task_dl,

    #ifdef CONFIG_SMP
    .select_task_rq = select_task_rq_dl,
    @@ -2414,7 +2410,6 @@ const struct sched_class dl_sched_class
    .task_woken = task_woken_dl,
    #endif

    - .set_curr_task = set_curr_task_dl,
    .task_tick = task_tick_dl,
    .task_fork = task_fork_dl,

    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -10121,9 +10121,19 @@ static void switched_to_fair(struct rq *
    * This routine is mostly called to set cfs_rq->curr field when a task
    * migrates between groups/classes.
    */
    -static void set_curr_task_fair(struct rq *rq)
    +static void set_next_task_fair(struct rq *rq, struct task_struct *p)
    {
    - struct sched_entity *se = &rq->curr->se;
    + struct sched_entity *se = &p->se;
    +
    +#ifdef CONFIG_SMP
    + if (task_on_rq_queued(p)) {
    + /*
    + * Move the next running task to the front of the list, so our
    + * cfs_tasks list becomes MRU one.
    + */
    + list_move(&se->group_node, &rq->cfs_tasks);
    + }
    +#endif

    for_each_sched_entity(se) {
    struct cfs_rq *cfs_rq = cfs_rq_of(se);
    @@ -10394,7 +10404,9 @@ const struct sched_class fair_sched_clas
    .check_preempt_curr = check_preempt_wakeup,

    .pick_next_task = pick_next_task_fair,
    +
    .put_prev_task = put_prev_task_fair,
    + .set_next_task = set_next_task_fair,

    #ifdef CONFIG_SMP
    .select_task_rq = select_task_rq_fair,
    @@ -10407,7 +10419,6 @@ const struct sched_class fair_sched_clas
    .set_cpus_allowed = set_cpus_allowed_common,
    #endif

    - .set_curr_task = set_curr_task_fair,
    .task_tick = task_tick_fair,
    .task_fork = task_fork_fair,

    --- a/kernel/sched/idle.c
    +++ b/kernel/sched/idle.c
    @@ -374,14 +374,25 @@ static void check_preempt_curr_idle(stru
    resched_curr(rq);
    }

    +static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
    +{
    +}
    +
    +static void set_next_task_idle(struct rq *rq, struct task_struct *next)
    +{
    + update_idle_core(rq);
    + schedstat_inc(rq->sched_goidle);
    +}
    +
    static struct task_struct *
    pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
    {
    + struct task_struct *next = rq->idle;
    +
    put_prev_task(rq, prev);
    - update_idle_core(rq);
    - schedstat_inc(rq->sched_goidle);
    + set_next_task_idle(rq, next);

    - return rq->idle;
    + return next;
    }

    /*
    @@ -397,10 +408,6 @@ dequeue_task_idle(struct rq *rq, struct
    raw_spin_lock_irq(&rq->lock);
    }

    -static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
    -{
    -}
    -
    /*
    * scheduler tick hitting a task of our scheduling class.
    *
    @@ -413,10 +420,6 @@ static void task_tick_idle(struct rq *rq
    {
    }

    -static void set_curr_task_idle(struct rq *rq)
    -{
    -}
    -
    static void switched_to_idle(struct rq *rq, struct task_struct *p)
    {
    BUG();
    @@ -451,13 +454,13 @@ const struct sched_class idle_sched_clas

    .pick_next_task = pick_next_task_idle,
    .put_prev_task = put_prev_task_idle,
    + .set_next_task = set_next_task_idle,

    #ifdef CONFIG_SMP
    .select_task_rq = select_task_rq_idle,
    .set_cpus_allowed = set_cpus_allowed_common,
    #endif

    - .set_curr_task = set_curr_task_idle,
    .task_tick = task_tick_idle,

    .get_rr_interval = get_rr_interval_idle,
    --- a/kernel/sched/rt.c
    +++ b/kernel/sched/rt.c
    @@ -2355,11 +2355,6 @@ static void task_tick_rt(struct rq *rq,
    }
    }

    -static void set_curr_task_rt(struct rq *rq)
    -{
    - set_next_task_rt(rq, rq->curr);
    -}
    -
    static unsigned int get_rr_interval_rt(struct rq *rq, struct task_struct *task)
    {
    /*
    @@ -2381,6 +2376,7 @@ const struct sched_class rt_sched_class

    .pick_next_task = pick_next_task_rt,
    .put_prev_task = put_prev_task_rt,
    + .set_next_task = set_next_task_rt,

    #ifdef CONFIG_SMP
    .select_task_rq = select_task_rq_rt,
    @@ -2392,7 +2388,6 @@ const struct sched_class rt_sched_class
    .switched_from = switched_from_rt,
    #endif

    - .set_curr_task = set_curr_task_rt,
    .task_tick = task_tick_rt,

    .get_rr_interval = get_rr_interval_rt,
    --- a/kernel/sched/sched.h
    +++ b/kernel/sched/sched.h
    @@ -1711,6 +1711,7 @@ struct sched_class {
    struct task_struct *prev,
    struct rq_flags *rf);
    void (*put_prev_task)(struct rq *rq, struct task_struct *p);
    + void (*set_next_task)(struct rq *rq, struct task_struct *p);

    #ifdef CONFIG_SMP
    int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
    @@ -1725,7 +1726,6 @@ struct sched_class {
    void (*rq_offline)(struct rq *rq);
    #endif

    - void (*set_curr_task)(struct rq *rq);
    void (*task_tick)(struct rq *rq, struct task_struct *p, int queued);
    void (*task_fork)(struct task_struct *p);
    void (*task_dead)(struct task_struct *p);
    @@ -1755,12 +1755,14 @@ struct sched_class {

    static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
    {
    + WARN_ON_ONCE(rq->curr != prev);
    prev->sched_class->put_prev_task(rq, prev);
    }

    -static inline void set_curr_task(struct rq *rq, struct task_struct *curr)
    +static inline void set_next_task(struct rq *rq, struct task_struct *next)
    {
    - curr->sched_class->set_curr_task(rq);
    + WARN_ON_ONCE(rq->curr != next);
    + next->sched_class->set_next_task(rq, next);
    }

    #ifdef CONFIG_SMP
    --- a/kernel/sched/stop_task.c
    +++ b/kernel/sched/stop_task.c
    @@ -23,6 +23,11 @@ check_preempt_curr_stop(struct rq *rq, s
    /* we're never preempted */
    }

    +static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
    +{
    + stop->se.exec_start = rq_clock_task(rq);
    +}
    +
    static struct task_struct *
    pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
    {
    @@ -32,8 +37,7 @@ pick_next_task_stop(struct rq *rq, struc
    return NULL;

    put_prev_task(rq, prev);
    -
    - stop->se.exec_start = rq_clock_task(rq);
    + set_next_task_stop(rq, stop);

    return stop;
    }
    @@ -86,13 +90,6 @@ static void task_tick_stop(struct rq *rq
    {
    }

    -static void set_curr_task_stop(struct rq *rq)
    -{
    - struct task_struct *stop = rq->stop;
    -
    - stop->se.exec_start = rq_clock_task(rq);
    -}
    -
    static void switched_to_stop(struct rq *rq, struct task_struct *p)
    {
    BUG(); /* its impossible to change to this class */
    @@ -128,13 +125,13 @@ const struct sched_class stop_sched_clas

    .pick_next_task = pick_next_task_stop,
    .put_prev_task = put_prev_task_stop,
    + .set_next_task = set_next_task_stop,

    #ifdef CONFIG_SMP
    .select_task_rq = select_task_rq_stop,
    .set_cpus_allowed = set_cpus_allowed_common,
    #endif

    - .set_curr_task = set_curr_task_stop,
    .task_tick = task_tick_stop,

    .get_rr_interval = get_rr_interval_stop,

    \
     
     \ /
      Last update: 2019-07-26 18:21    [W:4.362 / U:0.228 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site