lkml.org 
[lkml]   [2011]   [May]   [2]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    From
    Subject[PATCH 2/2] sched, cpuset: introduce do_set_cpus_allowed() helper function
    Date
    Now, we have five task->cpus_allowed writer.

    1) sched_rt.c: set_cpus_allowed_rt()
    2) sched.c: set_cpus_allowed_ptr()
    3) sched.c: init_idle()
    4) kthread.c kthread_bind()
    5) cpuset.c: cpuset_cpus_allowed_fallback()

    And, now (3), (4), (5) don't check p->sched_class->set_cpus_allowed.
    It's ok, We have a implicit gurantee that it's safe. However they
    theorically slightly bad habit. If any scheduler class will add to
    implement ->set_cpus_allowed in future, they may not work.

    Then, it would be nice to make good helper function and cleanup.

    Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
    ---
    include/linux/cpuset.h | 3 +--
    include/linux/sched.h | 7 +++++++
    kernel/cpuset.c | 5 ++---
    kernel/kthread.c | 4 ++--
    kernel/sched.c | 19 ++++++++++++-------
    5 files changed, 24 insertions(+), 14 deletions(-)

    diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
    index 42dcbdc..e9eaec5 100644
    --- a/include/linux/cpuset.h
    +++ b/include/linux/cpuset.h
    @@ -146,8 +146,7 @@ static inline void cpuset_cpus_allowed(struct task_struct *p,

    static inline int cpuset_cpus_allowed_fallback(struct task_struct *p)
    {
    - cpumask_copy(&p->cpus_allowed, cpu_possible_mask);
    - p->rt.nr_cpus_allowed = cpumask_weight(&p->cpus_allowed);
    + do_set_cpus_allowed(p, cpu_possible_mask);
    return cpumask_any(cpu_active_mask);
    }

    diff --git a/include/linux/sched.h b/include/linux/sched.h
    index 3f7d3f9..fc7964d 100644
    --- a/include/linux/sched.h
    +++ b/include/linux/sched.h
    @@ -1823,9 +1823,16 @@ static inline void rcu_copy_process(struct task_struct *p)
    #endif

    #ifdef CONFIG_SMP
    +extern void do_set_cpus_allowed(struct task_struct *p,
    + const struct cpumask *new_mask);
    +
    extern int set_cpus_allowed_ptr(struct task_struct *p,
    const struct cpumask *new_mask);
    #else
    +static inline void do_set_cpus_allowed(struct task_struct *p,
    + const struct cpumask *new_mask)
    +{
    +}
    static inline int set_cpus_allowed_ptr(struct task_struct *p,
    const struct cpumask *new_mask)
    {
    diff --git a/kernel/cpuset.c b/kernel/cpuset.c
    index 6e5bbe8..9c9b754 100644
    --- a/kernel/cpuset.c
    +++ b/kernel/cpuset.c
    @@ -2190,7 +2190,7 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
    rcu_read_lock();
    cs = task_cs(tsk);
    if (cs)
    - cpumask_copy(&tsk->cpus_allowed, cs->cpus_allowed);
    + do_set_cpus_allowed(tsk, cs->cpus_allowed);
    rcu_read_unlock();

    /*
    @@ -2217,10 +2217,9 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk)
    * Like above we can temporary set any mask and rely on
    * set_cpus_allowed_ptr() as synchronization point.
    */
    - cpumask_copy(&tsk->cpus_allowed, cpu_possible_mask);
    + do_set_cpus_allowed(tsk, cpu_possible_mask);
    cpu = cpumask_any(cpu_active_mask);
    }
    - tsk->rt.nr_cpus_allowed = cpumask_weight(&tsk->cpus_allowed);

    return cpu;
    }
    diff --git a/kernel/kthread.c b/kernel/kthread.c
    index 3b34d27..4ba7ccc 100644
    --- a/kernel/kthread.c
    +++ b/kernel/kthread.c
    @@ -202,8 +202,8 @@ void kthread_bind(struct task_struct *p, unsigned int cpu)
    return;
    }

    - p->cpus_allowed = cpumask_of_cpu(cpu);
    - p->rt.nr_cpus_allowed = 1;
    + /* It's safe because the task is inactive. */
    + do_set_cpus_allowed(p, cpumask_of(cpu));
    p->flags |= PF_THREAD_BOUND;
    }
    EXPORT_SYMBOL(kthread_bind);
    diff --git a/kernel/sched.c b/kernel/sched.c
    index bfcd219..7867e47 100644
    --- a/kernel/sched.c
    +++ b/kernel/sched.c
    @@ -5819,7 +5819,7 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu)
    idle->state = TASK_RUNNING;
    idle->se.exec_start = sched_clock();

    - cpumask_copy(&idle->cpus_allowed, cpumask_of(cpu));
    + do_set_cpus_allowed(idle, cpumask_of(cpu));
    /*
    * We're having a chicken and egg problem, even though we are
    * holding rq->lock, the cpu isn't yet set to this cpu so the
    @@ -5910,6 +5910,16 @@ static inline void sched_init_granularity(void)
    }

    #ifdef CONFIG_SMP
    +void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
    +{
    + if (p->sched_class && p->sched_class->set_cpus_allowed)
    + p->sched_class->set_cpus_allowed(p, new_mask);
    + else {
    + cpumask_copy(&p->cpus_allowed, new_mask);
    + p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
    + }
    +}
    +
    /*
    * This is how migration works:
    *
    @@ -5953,12 +5963,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
    goto out;
    }

    - if (p->sched_class->set_cpus_allowed)
    - p->sched_class->set_cpus_allowed(p, new_mask);
    - else {
    - cpumask_copy(&p->cpus_allowed, new_mask);
    - p->rt.nr_cpus_allowed = cpumask_weight(new_mask);
    - }
    + do_set_cpus_allowed(p, new_mask);

    /* Can the task run on the task's current CPU? If so, we're done */
    if (cpumask_test_cpu(task_cpu(p), new_mask))
    --
    1.7.3.1




    \
     
     \ /
      Last update: 2011-05-02 12:59    [W:0.030 / U:0.056 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site