lkml.org 
[lkml]   [2010]   [Dec]   [24]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[RFC][PATCH 17/17] sched: Sort hotplug vs ttwu queueing

    Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
    ---
    kernel/sched.c | 20 ++++++++++++++++++--
    1 file changed, 18 insertions(+), 2 deletions(-)

    Index: linux-2.6/kernel/sched.c
    ===================================================================
    --- linux-2.6.orig/kernel/sched.c
    +++ linux-2.6/kernel/sched.c
    @@ -2470,15 +2470,15 @@ static int ttwu_remote(struct task_struc
    return ret;
    }

    -void sched_ttwu_pending(void)
    +static void __sched_ttwu_pending(struct rq *rq)
    {
    #ifdef CONFIG_SMP
    - struct rq *rq = this_rq();
    struct task_struct *list = xchg(&rq->wake_list, NULL);

    if (!list)
    return;

    + rq = this_rq(); /* always enqueue locally */
    raw_spin_lock(&rq->lock);

    while (list) {
    @@ -2491,6 +2491,11 @@ void sched_ttwu_pending(void)
    #endif
    }

    +void sched_ttwu_pending(void)
    +{
    + __sched_ttwu_pending(this_rq());
    +}
    +
    #ifdef CONFIG_SMP
    static void ttwu_queue_remote(struct task_struct *p, int cpu)
    {
    @@ -6162,6 +6167,17 @@ migration_call(struct notifier_block *nf
    migrate_nr_uninterruptible(rq);
    calc_global_load_remove(rq);
    break;
    +
    + case CPU_DEAD:
    + /*
    + * Queue any possible remaining pending wakeups on this cpu.
    + * Load-balancing will sort it out eventually.
    + */
    + local_irq_save(flags);
    + __sched_ttwu_pending(cpu_rq(cpu));
    + local_irq_restore(flags);
    + break;
    +
    #endif
    }
    return NOTIFY_OK;



    \
     
     \ /
      Last update: 2010-12-24 13:47    [W:2.826 / U:0.316 seconds]
    ©2003-2020 Jasper Spaans|hosted at Digital Ocean and TransIP|Read the blog|Advertise on this site