lkml.org 
[lkml]   [2004]   [Sep]   [9]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    SubjectRe: [PATCH][5/8] Arch agnostic completely out of line locks / ppc64
    On Fri, Sep 10, 2004 at 11:50:41AM +1000, Anton Blanchard wrote:
    >> Lets just make __preempt_spin_lock inline, then everything should work
    >> as is.

    On Thu, Sep 09, 2004 at 07:22:04PM -0700, William Lee Irwin III wrote:
    > Well, there are patches that do this along with other more useful
    > things in the works (my spin on this is en route shortly, sorry the
    > response was delayed due to a power failure).

    I ran into instant pain because various read_lock() -related locking
    primitives don't exist... first approximation:

    This patch folds __preempt_spin_lock() and __preempt_write_lock() into
    their callers, and follows Ingo's patch in sweeping various other locking
    variants into doing likewise for CONFIG_PREEMPT=y && CONFIG_SMP=y.

    Compiletested on ia64.


    Index: mm4-2.6.9-rc1/kernel/sched.c
    ===================================================================
    --- mm4-2.6.9-rc1.orig/kernel/sched.c 2004-09-08 06:10:47.000000000 -0700
    +++ mm4-2.6.9-rc1/kernel/sched.c 2004-09-09 18:59:53.723177997 -0700
    @@ -4572,49 +4572,3 @@
    }
    EXPORT_SYMBOL(__might_sleep);
    #endif
    -
    -
    -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
    -/*
    - * This could be a long-held lock. If another CPU holds it for a long time,
    - * and that CPU is not asked to reschedule then *this* CPU will spin on the
    - * lock for a long time, even if *this* CPU is asked to reschedule.
    - *
    - * So what we do here, in the slow (contended) path is to spin on the lock by
    - * hand while permitting preemption.
    - *
    - * Called inside preempt_disable().
    - */
    -void __sched __preempt_spin_lock(spinlock_t *lock)
    -{
    - if (preempt_count() > 1) {
    - _raw_spin_lock(lock);
    - return;
    - }
    - do {
    - preempt_enable();
    - while (spin_is_locked(lock))
    - cpu_relax();
    - preempt_disable();
    - } while (!_raw_spin_trylock(lock));
    -}
    -
    -EXPORT_SYMBOL(__preempt_spin_lock);
    -
    -void __sched __preempt_write_lock(rwlock_t *lock)
    -{
    - if (preempt_count() > 1) {
    - _raw_write_lock(lock);
    - return;
    - }
    -
    - do {
    - preempt_enable();
    - while (rwlock_is_locked(lock))
    - cpu_relax();
    - preempt_disable();
    - } while (!_raw_write_trylock(lock));
    -}
    -
    -EXPORT_SYMBOL(__preempt_write_lock);
    -#endif /* defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) */
    Index: mm4-2.6.9-rc1/kernel/spinlock.c
    ===================================================================
    --- mm4-2.6.9-rc1.orig/kernel/spinlock.c 2004-09-08 06:10:36.000000000 -0700
    +++ mm4-2.6.9-rc1/kernel/spinlock.c 2004-09-09 19:34:54.890144445 -0700
    @@ -33,90 +33,274 @@
    }
    EXPORT_SYMBOL(_write_trylock);

    +
    #if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT)
    +/*
    + * This could be a long-held lock. If another CPU holds it for a long time,
    + * and that CPU is not asked to reschedule then *this* CPU will spin on the
    + * lock for a long time, even if *this* CPU is asked to reschedule.
    + * So what we do here, in the slow (contended) path is to spin on the lock by
    + * hand while permitting preemption.
    + */
    void __lockfunc _spin_lock(spinlock_t *lock)
    {
    preempt_disable();
    - if (unlikely(!_raw_spin_trylock(lock)))
    - __preempt_spin_lock(lock);
    + if (unlikely(!_raw_spin_trylock(lock))) {
    + if (preempt_count() > 1)
    + _raw_spin_lock(lock);
    + else {
    + do {
    + preempt_enable();
    + while (spin_is_locked(lock))
    + cpu_relax();
    + preempt_disable();
    + } while (!_raw_spin_trylock(lock));
    + }
    + }
    }

    -void __lockfunc _write_lock(rwlock_t *lock)
    +void __lockfunc _spin_lock_irq(spinlock_t *lock)
    {
    + local_irq_disable();
    preempt_disable();
    - if (unlikely(!_raw_write_trylock(lock)))
    - __preempt_write_lock(lock);
    + if (unlikely(!_raw_spin_trylock(lock))) {
    + if (preempt_count() > 1)
    + _raw_spin_lock(lock);
    + else {
    + do {
    + preempt_enable();
    + local_irq_enable();
    + while (spin_is_locked(lock))
    + cpu_relax();
    + preempt_disable();
    + local_irq_disable();
    + } while (!_raw_spin_trylock(lock));
    + }
    + }
    }
    -#else
    -void __lockfunc _spin_lock(spinlock_t *lock)
    +
    +unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
    {
    + unsigned long flags;
    +
    + local_irq_save(flags);
    preempt_disable();
    - _raw_spin_lock(lock);
    + if (unlikely(!_raw_spin_trylock(lock))) {
    + if (preempt_count() > 1)
    + _raw_spin_lock_flags(lock, flags);
    + else {
    + do {
    + preempt_enable();
    + local_irq_restore(flags);
    + while (spin_is_locked(lock))
    + cpu_relax();
    + preempt_disable();
    + local_irq_save(flags);
    + } while (!_raw_spin_trylock(lock));
    + }
    + }
    + return flags;
    +}
    +
    +void __lockfunc _spin_lock_bh(spinlock_t *lock)
    +{
    + local_bh_disable();
    + preempt_disable();
    + if (unlikely(!_raw_spin_trylock(lock))) {
    + if (preempt_count() > 1)
    + _raw_spin_lock(lock);
    + else {
    + do {
    + preempt_enable();
    + local_bh_enable();
    + while (spin_is_locked(lock))
    + cpu_relax();
    + preempt_disable();
    + local_bh_disable();
    + } while (!_raw_spin_trylock(lock));
    + }
    + }
    }

    void __lockfunc _write_lock(rwlock_t *lock)
    {
    preempt_disable();
    - _raw_write_lock(lock);
    + if (unlikely(!_raw_write_trylock(lock))) {
    + if (preempt_count() > 1)
    + _raw_write_lock(lock);
    + else {
    + do {
    + preempt_enable();
    + while (rwlock_is_locked(lock))
    + cpu_relax();
    + preempt_disable();
    + } while (!_raw_write_trylock(lock));
    + }
    + }
    }
    -#endif
    -EXPORT_SYMBOL(_spin_lock);
    -EXPORT_SYMBOL(_write_lock);

    -void __lockfunc _read_lock(rwlock_t *lock)
    +void __lockfunc _write_lock_irq(rwlock_t *lock)
    {
    + local_irq_disable();
    preempt_disable();
    - _raw_read_lock(lock);
    + if (unlikely(!_raw_write_trylock(lock))) {
    + if (preempt_count() > 1)
    + _raw_write_lock(lock);
    + else {
    + do {
    + preempt_enable();
    + local_irq_enable();
    + while (rwlock_is_locked(lock))
    + cpu_relax();
    + preempt_disable();
    + local_irq_disable();
    + } while (!_raw_write_trylock(lock));
    + }
    + }
    }
    -EXPORT_SYMBOL(_read_lock);

    -void __lockfunc _spin_unlock(spinlock_t *lock)
    +unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
    {
    - _raw_spin_unlock(lock);
    - preempt_enable();
    + unsigned long flags;
    +
    + local_irq_save(flags);
    + preempt_disable();
    + if (unlikely(!_raw_write_trylock(lock))) {
    + if (preempt_count() > 1)
    + _raw_write_lock(lock);
    + else {
    + do {
    + preempt_enable();
    + local_irq_restore(flags);
    + while (rwlock_is_locked(lock))
    + cpu_relax();
    + local_irq_save(flags);
    + preempt_disable();
    + } while (!_raw_write_trylock(lock));
    + }
    + }
    + return flags;
    }
    -EXPORT_SYMBOL(_spin_unlock);

    -void __lockfunc _write_unlock(rwlock_t *lock)
    +void __lockfunc _write_lock_bh(rwlock_t *lock)
    {
    - _raw_write_unlock(lock);
    - preempt_enable();
    + local_bh_disable();
    + preempt_disable();
    + if (unlikely(!_raw_write_trylock(lock))) {
    + if (preempt_count() > 1)
    + _raw_write_lock(lock);
    + else {
    + do {
    + preempt_enable();
    + local_bh_enable();
    + while (rwlock_is_locked(lock))
    + cpu_relax();
    + local_bh_disable();
    + preempt_disable();
    + } while (!_raw_write_trylock(lock));
    + }
    + }
    +}
    +
    +#ifdef NOTYET
    +/*
    + * XXX: FIXME architectures don't implement locking primitives used here:
    + * _raw_read_trylock()
    + * _raw_read_lock_flags()
    + * rwlock_is_write_locked()
    + */
    +void __lockfunc _read_lock(rwlock_t *lock)
    +{
    + preempt_disable();
    + if (unlikely(!_raw_read_trylock(lock))) {
    + if (preempt_count() > 1)
    + _raw_read_lock(lock);
    + else {
    + do {
    + preempt_enable();
    + while (rwlock_is_write_locked(lock))
    + cpu_relax();
    + preempt_disable();
    + } while (!_raw_read_trylock(lock));
    + }
    + }
    }
    -EXPORT_SYMBOL(_write_unlock);

    -void __lockfunc _read_unlock(rwlock_t *lock)
    +void __lockfunc _read_lock_irq(rwlock_t *lock)
    {
    - _raw_read_unlock(lock);
    - preempt_enable();
    + local_irq_disable();
    + preempt_disable();
    + if (unlikely(!_raw_read_trylock(lock))) {
    + if (preempt_count() > 1)
    + _raw_read_lock(lock);
    + else {
    + do {
    + preempt_enable();
    + local_irq_enable();
    + while (rwlock_is_write_locked(lock))
    + cpu_relax();
    + local_irq_disable();
    + preempt_disable();
    + } while (!_raw_read_trylock(lock));
    + }
    + }
    }
    -EXPORT_SYMBOL(_read_unlock);

    -unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
    +unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
    {
    unsigned long flags;

    local_irq_save(flags);
    preempt_disable();
    - _raw_spin_lock_flags(lock, flags);
    + if (unlikely(!_raw_read_trylock(lock))) {
    + if (preempt_count() > 1)
    + _raw_read_lock_flags(lock, flags);
    + else {
    + do {
    + preempt_enable();
    + local_irq_restore(flags);
    + while (rwlock_is_write_locked(lock))
    + cpu_relax();
    + local_irq_save(flags);
    + preempt_disable();
    + } while (!_raw_read_trylock(lock));
    + }
    + }
    return flags;
    }
    -EXPORT_SYMBOL(_spin_lock_irqsave);

    -void __lockfunc _spin_lock_irq(spinlock_t *lock)
    +void __lockfunc _read_lock_bh(rwlock_t *lock)
    {
    - local_irq_disable();
    + local_bh_disable();
    preempt_disable();
    - _raw_spin_lock(lock);
    + if (unlikely(_raw_read_trylock(lock))) {
    + if (preempt_count() > 1)
    + _raw_read_lock(lock);
    + else {
    + do {
    + preempt_enable();
    + local_bh_enable();
    + while (rwlock_is_write_locked(lock))
    + cpu_relax();
    + local_bh_disable();
    + preempt_disable();
    + } while (!_raw_read_trylock(lock));
    + }
    + }
    +}
    +#else /* NOTYET */
    +void __lockfunc _read_lock(rwlock_t *lock)
    +{
    + preempt_disable();
    + _raw_read_lock(lock);
    }
    -EXPORT_SYMBOL(_spin_lock_irq);

    -void __lockfunc _spin_lock_bh(spinlock_t *lock)
    +void __lockfunc _read_lock_irq(rwlock_t *lock)
    {
    - local_bh_disable();
    + local_irq_disable();
    preempt_disable();
    - _raw_spin_lock(lock);
    + _raw_read_lock(lock);
    }
    -EXPORT_SYMBOL(_spin_lock_bh);

    unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
    {
    @@ -127,34 +311,51 @@
    _raw_read_lock(lock);
    return flags;
    }
    -EXPORT_SYMBOL(_read_lock_irqsave);

    -void __lockfunc _read_lock_irq(rwlock_t *lock)
    +void __lockfunc _read_lock_bh(rwlock_t *lock)
    {
    - local_irq_disable();
    + local_bh_disable();
    preempt_disable();
    _raw_read_lock(lock);
    }
    -EXPORT_SYMBOL(_read_lock_irq);
    +#endif /* NOTYET */

    -void __lockfunc _read_lock_bh(rwlock_t *lock)
    +#else /* !CONFIG_PREEMPT */
    +void __lockfunc _spin_lock(spinlock_t *lock)
    {
    - local_bh_disable();
    preempt_disable();
    - _raw_read_lock(lock);
    + _raw_spin_lock(lock);
    }
    -EXPORT_SYMBOL(_read_lock_bh);

    -unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
    +void __lockfunc _spin_lock_irq(spinlock_t *lock)
    +{
    + local_irq_disable();
    + preempt_disable();
    + _raw_spin_lock(lock);
    +}
    +
    +unsigned long __lockfunc _spin_lock_irqsave(spinlock_t *lock)
    {
    unsigned long flags;

    local_irq_save(flags);
    preempt_disable();
    - _raw_write_lock(lock);
    + _raw_spin_lock_flags(lock, flags);
    return flags;
    }
    -EXPORT_SYMBOL(_write_lock_irqsave);
    +
    +void __lockfunc _spin_lock_bh(spinlock_t *lock)
    +{
    + local_bh_disable();
    + preempt_disable();
    + _raw_spin_lock(lock);
    +}
    +
    +void __lockfunc _write_lock(rwlock_t *lock)
    +{
    + preempt_disable();
    + _raw_write_lock(lock);
    +}

    void __lockfunc _write_lock_irq(rwlock_t *lock)
    {
    @@ -162,7 +363,16 @@
    preempt_disable();
    _raw_write_lock(lock);
    }
    -EXPORT_SYMBOL(_write_lock_irq);
    +
    +unsigned long __lockfunc _write_lock_irqsave(rwlock_t *lock)
    +{
    + unsigned long flags;
    +
    + local_irq_save(flags);
    + preempt_disable();
    + _raw_write_lock(lock);
    + return flags;
    +}

    void __lockfunc _write_lock_bh(rwlock_t *lock)
    {
    @@ -170,8 +380,71 @@
    preempt_disable();
    _raw_write_lock(lock);
    }
    +
    +void __lockfunc _read_lock(rwlock_t *lock)
    +{
    + preempt_disable();
    + _raw_read_lock(lock);
    +}
    +
    +void __lockfunc _read_lock_irq(rwlock_t *lock)
    +{
    + local_irq_disable();
    + preempt_disable();
    + _raw_read_lock(lock);
    +}
    +
    +unsigned long __lockfunc _read_lock_irqsave(rwlock_t *lock)
    +{
    + unsigned long flags;
    +
    + local_irq_save(flags);
    + preempt_disable();
    + _raw_read_lock(lock);
    + return flags;
    +}
    +
    +void __lockfunc _read_lock_bh(rwlock_t *lock)
    +{
    + local_bh_disable();
    + preempt_disable();
    + _raw_read_lock(lock);
    +}
    +#endif
    +EXPORT_SYMBOL(_spin_lock);
    +EXPORT_SYMBOL(_write_lock);
    +EXPORT_SYMBOL(_read_lock);
    +EXPORT_SYMBOL(_spin_lock_irq);
    +EXPORT_SYMBOL(_spin_lock_irqsave);
    +EXPORT_SYMBOL(_spin_lock_bh);
    +EXPORT_SYMBOL(_read_lock_irq);
    +EXPORT_SYMBOL(_read_lock_irqsave);
    +EXPORT_SYMBOL(_read_lock_bh);
    +EXPORT_SYMBOL(_write_lock_irq);
    +EXPORT_SYMBOL(_write_lock_irqsave);
    EXPORT_SYMBOL(_write_lock_bh);

    +void __lockfunc _spin_unlock(spinlock_t *lock)
    +{
    + _raw_spin_unlock(lock);
    + preempt_enable();
    +}
    +EXPORT_SYMBOL(_spin_unlock);
    +
    +void __lockfunc _write_unlock(rwlock_t *lock)
    +{
    + _raw_write_unlock(lock);
    + preempt_enable();
    +}
    +EXPORT_SYMBOL(_write_unlock);
    +
    +void __lockfunc _read_unlock(rwlock_t *lock)
    +{
    + _raw_read_unlock(lock);
    + preempt_enable();
    +}
    +EXPORT_SYMBOL(_read_unlock);
    +
    void __lockfunc _spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
    {
    _raw_spin_unlock(lock);
    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/
    \
     
     \ /
      Last update: 2005-03-22 14:06    [W:0.043 / U:117.748 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site