lkml.org 
[lkml]   [2018]   [Aug]   [3]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 4/6] sched/numa: Pass destination cpu as a parameter to migrate_task_rq
    Date
    This additional parameter (new_cpu) is used later for identifying if
    task migration is across nodes.

    No functional change.

    Signed-off-by: Srikar Dronamraju <srikar@linux.vnet.ibm.com>
    ---
    kernel/sched/core.c | 2 +-
    kernel/sched/deadline.c | 2 +-
    kernel/sched/fair.c | 2 +-
    kernel/sched/sched.h | 2 +-
    4 files changed, 4 insertions(+), 4 deletions(-)

    diff --git a/kernel/sched/core.c b/kernel/sched/core.c
    index deafa9f..fdab290 100644
    --- a/kernel/sched/core.c
    +++ b/kernel/sched/core.c
    @@ -1167,7 +1167,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)

    if (task_cpu(p) != new_cpu) {
    if (p->sched_class->migrate_task_rq)
    - p->sched_class->migrate_task_rq(p);
    + p->sched_class->migrate_task_rq(p, new_cpu);
    p->se.nr_migrations++;
    rseq_migrate(p);
    perf_event_task_migrate(p);
    diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
    index 997ea7b..91e4202 100644
    --- a/kernel/sched/deadline.c
    +++ b/kernel/sched/deadline.c
    @@ -1607,7 +1607,7 @@ static void yield_task_dl(struct rq *rq)
    return cpu;
    }

    -static void migrate_task_rq_dl(struct task_struct *p)
    +static void migrate_task_rq_dl(struct task_struct *p, int new_cpu __maybe_unused)
    {
    struct rq *rq;

    diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
    index a717870..a5936ed 100644
    --- a/kernel/sched/fair.c
    +++ b/kernel/sched/fair.c
    @@ -6308,7 +6308,7 @@ static int wake_cap(struct task_struct *p, int cpu, int prev_cpu)
    * cfs_rq_of(p) references at time of call are still valid and identify the
    * previous CPU. The caller guarantees p->pi_lock or task_rq(p)->lock is held.
    */
    -static void migrate_task_rq_fair(struct task_struct *p)
    +static void migrate_task_rq_fair(struct task_struct *p, int new_cpu __maybe_unused)
    {
    /*
    * As blocked tasks retain absolute vruntime the migration needs to
    diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
    index 0b91612..455fa33 100644
    --- a/kernel/sched/sched.h
    +++ b/kernel/sched/sched.h
    @@ -1524,7 +1524,7 @@ struct sched_class {

    #ifdef CONFIG_SMP
    int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
    - void (*migrate_task_rq)(struct task_struct *p);
    + void (*migrate_task_rq)(struct task_struct *p, int new_cpu);

    void (*task_woken)(struct rq *this_rq, struct task_struct *task);

    --
    1.8.3.1
    \
     
     \ /
      Last update: 2018-08-03 08:16    [W:6.651 / U:0.044 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site