lkml.org 
[lkml]   [2007]   [Nov]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[PATCH v4 15/20] RT: Optimize rebalancing
    From: Gregory Haskins <ghaskins@novell.com>

    We have logic to detect whether the system has migratable tasks, but we are
    not using it when deciding whether to push tasks away. So we add support
    for considering this new information.

    Signed-off-by: Gregory Haskins <ghaskins@novell.com>
    Signed-off-by: Steven Rostedt <srostedt@redhat.com>
    ---

    kernel/sched.c | 2 ++
    kernel/sched_rt.c | 10 ++++++++--
    2 files changed, 10 insertions(+), 2 deletions(-)

    Index: linux-compile.git/kernel/sched.c
    ===================================================================
    --- linux-compile.git.orig/kernel/sched.c 2007-11-20 19:53:09.000000000 -0500
    +++ linux-compile.git/kernel/sched.c 2007-11-20 19:53:10.000000000 -0500
    @@ -273,6 +273,7 @@ struct rt_rq {
    unsigned long rt_nr_migratory;
    /* highest queued rt task prio */
    int highest_prio;
    + int overloaded;
    };

    /*
    @@ -6685,6 +6686,7 @@ void __init sched_init(void)
    rq->migration_thread = NULL;
    INIT_LIST_HEAD(&rq->migration_queue);
    rq->rt.highest_prio = MAX_RT_PRIO;
    + rq->rt.overloaded = 0;
    #endif
    atomic_set(&rq->nr_iowait, 0);

    Index: linux-compile.git/kernel/sched_rt.c
    ===================================================================
    --- linux-compile.git.orig/kernel/sched_rt.c 2007-11-20 19:53:09.000000000 -0500
    +++ linux-compile.git/kernel/sched_rt.c 2007-11-20 19:53:10.000000000 -0500
    @@ -16,6 +16,7 @@ static inline cpumask_t *rt_overload(voi
    }
    static inline void rt_set_overload(struct rq *rq)
    {
    + rq->rt.overloaded = 1;
    cpu_set(rq->cpu, rt_overload_mask);
    /*
    * Make sure the mask is visible before we set
    @@ -32,6 +33,7 @@ static inline void rt_clear_overload(str
    /* the order here really doesn't matter */
    atomic_dec(&rto_count);
    cpu_clear(rq->cpu, rt_overload_mask);
    + rq->rt.overloaded = 0;
    }

    static void update_rt_migration(struct rq *rq)
    @@ -445,6 +447,9 @@ static int push_rt_task(struct rq *rq)

    assert_spin_locked(&rq->lock);

    + if (!rq->rt.overloaded)
    + return 0;
    +
    next_task = pick_next_highest_task_rt(rq, -1);
    if (!next_task)
    return 0;
    @@ -672,7 +677,7 @@ static void schedule_tail_balance_rt(str
    * the lock was owned by prev, we need to release it
    * first via finish_lock_switch and then reaquire it here.
    */
    - if (unlikely(rq->rt.rt_nr_running > 1)) {
    + if (unlikely(rq->rt.overloaded)) {
    spin_lock_irq(&rq->lock);
    push_rt_tasks(rq);
    spin_unlock_irq(&rq->lock);
    @@ -684,7 +689,8 @@ static void wakeup_balance_rt(struct rq
    {
    if (unlikely(rt_task(p)) &&
    !task_running(rq, p) &&
    - (p->prio >= rq->curr->prio))
    + (p->prio >= rq->rt.highest_prio) &&
    + rq->rt.overloaded)
    push_rt_tasks(rq);
    }

    --
    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/

    \
     
     \ /
      Last update: 2007-11-21 02:21    [W:0.023 / U:30.996 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site