lkml.org 
[lkml]   [2006]   [May]   [29]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[patch 49/61] lock validator: special locking: sched.c
    From: Ingo Molnar <mingo@elte.hu>

    teach special (recursive) locking code to the lock validator. Has no
    effect on non-lockdep kernels.

    Signed-off-by: Ingo Molnar <mingo@elte.hu>
    Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
    ---
    kernel/sched.c | 16 ++++++++--------
    1 file changed, 8 insertions(+), 8 deletions(-)

    Index: linux/kernel/sched.c
    ===================================================================
    --- linux.orig/kernel/sched.c
    +++ linux/kernel/sched.c
    @@ -1963,7 +1963,7 @@ static void double_rq_unlock(runqueue_t
    __releases(rq1->lock)
    __releases(rq2->lock)
    {
    - spin_unlock(&rq1->lock);
    + spin_unlock_non_nested(&rq1->lock);
    if (rq1 != rq2)
    spin_unlock(&rq2->lock);
    else
    @@ -1980,7 +1980,7 @@ static void double_lock_balance(runqueue
    {
    if (unlikely(!spin_trylock(&busiest->lock))) {
    if (busiest->cpu < this_rq->cpu) {
    - spin_unlock(&this_rq->lock);
    + spin_unlock_non_nested(&this_rq->lock);
    spin_lock(&busiest->lock);
    spin_lock(&this_rq->lock);
    } else
    @@ -2602,7 +2602,7 @@ static int load_balance_newidle(int this
    nr_moved = move_tasks(this_rq, this_cpu, busiest,
    minus_1_or_zero(busiest->nr_running),
    imbalance, sd, NEWLY_IDLE, NULL);
    - spin_unlock(&busiest->lock);
    + spin_unlock_non_nested(&busiest->lock);
    }

    if (!nr_moved) {
    @@ -2687,7 +2687,7 @@ static void active_load_balance(runqueue
    else
    schedstat_inc(sd, alb_failed);
    out:
    - spin_unlock(&target_rq->lock);
    + spin_unlock_non_nested(&target_rq->lock);
    }

    /*
    @@ -3032,7 +3032,7 @@ static void wake_sleeping_dependent(int
    }

    for_each_cpu_mask(i, sibling_map)
    - spin_unlock(&cpu_rq(i)->lock);
    + spin_unlock_non_nested(&cpu_rq(i)->lock);
    /*
    * We exit with this_cpu's rq still held and IRQs
    * still disabled:
    @@ -3068,7 +3068,7 @@ static int dependent_sleeper(int this_cp
    * The same locking rules and details apply as for
    * wake_sleeping_dependent():
    */
    - spin_unlock(&this_rq->lock);
    + spin_unlock_non_nested(&this_rq->lock);
    sibling_map = sd->span;
    for_each_cpu_mask(i, sibling_map)
    spin_lock(&cpu_rq(i)->lock);
    @@ -3146,7 +3146,7 @@ check_smt_task:
    }
    out_unlock:
    for_each_cpu_mask(i, sibling_map)
    - spin_unlock(&cpu_rq(i)->lock);
    + spin_unlock_non_nested(&cpu_rq(i)->lock);
    return ret;
    }
    #else
    @@ -6680,7 +6680,7 @@ void __init sched_init(void)
    prio_array_t *array;

    rq = cpu_rq(i);
    - spin_lock_init(&rq->lock);
    + spin_lock_init_static(&rq->lock);
    rq->nr_running = 0;
    rq->active = rq->arrays;
    rq->expired = rq->arrays + 1;
    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/
    \
     
     \ /
      Last update: 2006-05-29 23:34    [W:0.028 / U:59.184 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site