lkml.org 
[lkml]   [2009]   [Aug]   [11]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    SubjectRe: [ANNOUNCE] 2.6.31-rc4-rt1
    On Wed, 5 Aug 2009, Will Schmidt wrote:
    > /test/willschm/linux-2.6.31-rtx/kernel/spinlock.c:103: error: implicit
    > declaration of function ‘_raw_atomic_spin_relax’

    Fix below. Thanks,

    tglx

    -------
    commit ffc969930727238b847176c203bdbe1f9dffe403
    Author: Thomas Gleixner <tglx@linutronix.de>
    Date: Tue Aug 11 20:03:47 2009 +0200

    locks: Fix PREEMPT=y, LOCKBREAK=y, DEBUG_LOCK_ALLOC=n compile

    Should be folded back into the atomic lock conversion

    Reported-by: Will Schmidt <will_schmidt@vnet.ibm.com>
    Signed-off-by: Thomas Gleixner <tglx@linutronix.de>

    diff --git a/kernel/lock-internals.h b/kernel/lock-internals.h
    index 4f0bc8b..76f694c 100644
    --- a/kernel/lock-internals.h
    +++ b/kernel/lock-internals.h
    @@ -9,8 +9,8 @@
    * (We do this in a function because inlining it would be excessive.)
    */

    -#define BUILD_LOCK_OPS(op, locktype) \
    -void __lockfunc _##op##_lock(locktype##_t *lock) \
    +#define BUILD_LOCK_OPS(prefix, op, locktype) \
    +void __lockfunc _##prefix##_lock(locktype##_t *lock) \
    { \
    for (;;) { \
    preempt_disable(); \
    @@ -20,15 +20,15 @@ void __lockfunc _##op##_lock(locktype##_t *lock) \
    \
    if (!(lock)->break_lock) \
    (lock)->break_lock = 1; \
    - while (!op##_can_lock(lock) && (lock)->break_lock) \
    + while (!prefix##_can_lock(lock) && (lock)->break_lock) \
    _raw_##op##_relax(&lock->raw_lock); \
    } \
    (lock)->break_lock = 0; \
    } \
    \
    -EXPORT_SYMBOL(_##op##_lock); \
    +EXPORT_SYMBOL(_##prefix##_lock); \
    \
    -unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
    +unsigned long __lockfunc _##prefix##_lock_irqsave(locktype##_t *lock) \
    { \
    unsigned long flags; \
    \
    @@ -42,23 +42,23 @@ unsigned long __lockfunc _##op##_lock_irqsave(locktype##_t *lock) \
    \
    if (!(lock)->break_lock) \
    (lock)->break_lock = 1; \
    - while (!op##_can_lock(lock) && (lock)->break_lock) \
    + while (!prefix##_can_lock(lock) && (lock)->break_lock) \
    _raw_##op##_relax(&lock->raw_lock); \
    } \
    (lock)->break_lock = 0; \
    return flags; \
    } \
    \
    -EXPORT_SYMBOL(_##op##_lock_irqsave); \
    +EXPORT_SYMBOL(_##prefix##_lock_irqsave); \
    \
    -void __lockfunc _##op##_lock_irq(locktype##_t *lock) \
    +void __lockfunc _##prefix##_lock_irq(locktype##_t *lock) \
    { \
    - _##op##_lock_irqsave(lock); \
    + _##prefix##_lock_irqsave(lock); \
    } \
    \
    -EXPORT_SYMBOL(_##op##_lock_irq); \
    +EXPORT_SYMBOL(_##prefix##_lock_irq); \
    \
    -void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
    +void __lockfunc _##prefix##_lock_bh(locktype##_t *lock) \
    { \
    unsigned long flags; \
    \
    @@ -67,9 +67,9 @@ void __lockfunc _##op##_lock_bh(locktype##_t *lock) \
    /* irq-disabling. We use the generic preemption-aware */ \
    /* function: */ \
    /**/ \
    - flags = _##op##_lock_irqsave(lock); \
    + flags = _##prefix##_lock_irqsave(lock); \
    local_bh_disable(); \
    local_irq_restore(flags); \
    } \
    \
    -EXPORT_SYMBOL(_##op##_lock_bh)
    +EXPORT_SYMBOL(_##prefix##_lock_bh)
    diff --git a/kernel/rwlock.c b/kernel/rwlock.c
    index 35460b3..eaf34c8 100644
    --- a/kernel/rwlock.c
    +++ b/kernel/rwlock.c
    @@ -146,8 +146,8 @@ EXPORT_SYMBOL(_write_lock);
    * _[read|write]_lock_irqsave()
    * _[read|write]_lock_bh()
    */
    -BUILD_LOCK_OPS(read, rwlock);
    -BUILD_LOCK_OPS(write, rwlock);
    +BUILD_LOCK_OPS(read, read, rwlock);
    +BUILD_LOCK_OPS(write, write, rwlock);

    #endif /* CONFIG_PREEMPT */

    diff --git a/kernel/spinlock.c b/kernel/spinlock.c
    index 6a3c0c4..e3194d5 100644
    --- a/kernel/spinlock.c
    +++ b/kernel/spinlock.c
    @@ -100,7 +100,7 @@ EXPORT_SYMBOL(_atomic_spin_lock);
    * _atomic_spin_lock_irqsave()
    * _atomic_spin_lock_bh()
    */
    -BUILD_LOCK_OPS(atomic_spin, atomic_spinlock);
    +BUILD_LOCK_OPS(atomic_spin, spin, atomic_spinlock);

    #endif /* CONFIG_PREEMPT */
    \
     
     \ /
      Last update: 2009-08-11 20:13    [W:0.026 / U:0.564 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site