lkml.org 
[lkml]   [2010]   [Dec]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[RFC][PATCH 10/17] sched: Add TASK_WAKING to task_rq_lock
    In order to be able to call set_task_cpu() without holding the
    appropriate rq->lock during ttwu(), add a TASK_WAKING clause to the
    task_rq_lock() primitive.

    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    ---
    kernel/sched.c | 14 +++++++-------
    1 file changed, 7 insertions(+), 7 deletions(-)

    Index: linux-2.6/kernel/sched.c
    ===================================================================
    --- linux-2.6.orig/kernel/sched.c
    +++ linux-2.6/kernel/sched.c
    @@ -928,12 +928,13 @@ static inline void finish_lock_switch(st
    #endif /* __ARCH_WANT_UNLOCKED_CTXSW */

    /*
    - * Check whether the task is waking, we use this to synchronize ->cpus_allowed
    - * against ttwu().
    + * In order to be able to call set_task_cpu() without holding the current
    + * task_rq(p)->lock during wake-ups we need to serialize on something else,
    + * use the wakeup task state.
    */
    static inline int task_is_waking(struct task_struct *p)
    {
    - return unlikely(p->state == TASK_WAKING);
    + return p->state == TASK_WAKING;
    }

    /*
    @@ -948,7 +949,7 @@ static inline struct rq *__task_rq_lock(
    for (;;) {
    rq = task_rq(p);
    raw_spin_lock(&rq->lock);
    - if (likely(rq == task_rq(p)))
    + if (likely(rq == task_rq(p) && !task_is_waking(p)))
    return rq;
    raw_spin_unlock(&rq->lock);
    }
    @@ -956,8 +957,7 @@ static inline struct rq *__task_rq_lock(

    /*
    * task_rq_lock - lock the runqueue a given task resides on and disable
    - * interrupts. Note the ordering: we can safely lookup the task_rq without
    - * explicitly disabling preemption.
    + * interrupts.
    */
    static struct rq *task_rq_lock(struct task_struct *p, unsigned long *flags)
    __acquires(rq->lock)
    @@ -968,7 +968,7 @@ static struct rq *task_rq_lock(struct ta
    local_irq_save(*flags);
    rq = task_rq(p);
    raw_spin_lock(&rq->lock);
    - if (likely(rq == task_rq(p)))
    + if (likely(rq == task_rq(p) && !task_is_waking(p)))
    return rq;
    raw_spin_unlock_irqrestore(&rq->lock, *flags);
    }



    \
     
     \ /
      Last update: 2010-12-24 13:45    [W:3.884 / U:0.512 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site