lkml.org 
[lkml]   [2005]   [Jan]   [20]   [last100]   RSS Feed
Views: [wrap][no wrap]   [headers]  [forward] 
 
Messages in this thread
    Patch in this message
    /
    Date
    From
    Subject[patch] stricter type-checking rwlock primitives, x86

    turn x86 rwlock macros into inline functions, to get stricter
    type-checking. Test-built/booted on x86. (patch comes after all
    previous spinlock patches.)

    Ingo

    Signed-off-by: Ingo Molnar <mingo@elte.hu>

    --- linux/include/asm-i386/spinlock.h.orig
    +++ linux/include/asm-i386/spinlock.h
    @@ -198,21 +198,33 @@ typedef struct {

    #define RW_LOCK_UNLOCKED (rwlock_t) { RW_LOCK_BIAS RWLOCK_MAGIC_INIT }

    -#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
    +static inline void rwlock_init(rwlock_t *rw)
    +{
    + *rw = RW_LOCK_UNLOCKED;
    +}

    -#define rwlock_is_locked(x) ((x)->lock != RW_LOCK_BIAS)
    +static inline int rwlock_is_locked(rwlock_t *rw)
    +{
    + return rw->lock != RW_LOCK_BIAS;
    +}

    /**
    * read_trylock_test - would read_trylock() succeed?
    * @lock: the rwlock in question.
    */
    -#define read_trylock_test(x) (atomic_read((atomic_t *)&(x)->lock) > 0)
    +static inline int read_trylock_test(rwlock_t *rw)
    +{
    + return atomic_read((atomic_t *)&rw->lock) > 0;
    +}

    /**
    * write_trylock_test - would write_trylock() succeed?
    * @lock: the rwlock in question.
    */
    -#define write_trylock_test(x) ((x)->lock == RW_LOCK_BIAS)
    +static inline int write_trylock_test(rwlock_t *rw)
    +{
    + return atomic_read((atomic_t *)&rw->lock) == RW_LOCK_BIAS;
    +}

    /*
    * On x86, we implement read-write locks as a 32-bit counter
    @@ -241,8 +253,16 @@ static inline void _raw_write_lock(rwloc
    __build_write_lock(rw, "__write_lock_failed");
    }

    -#define _raw_read_unlock(rw) asm volatile("lock ; incl %0" :"=m" ((rw)->lock) : : "memory")
    -#define _raw_write_unlock(rw) asm volatile("lock ; addl $" RW_LOCK_BIAS_STR ",%0":"=m" ((rw)->lock) : : "memory")
    +static inline void _raw_read_unlock(rwlock_t *rw)
    +{
    + asm volatile("lock ; incl %0" :"=m" (rw->lock) : : "memory");
    +}
    +
    +static inline void _raw_write_unlock(rwlock_t *rw)
    +{
    + asm volatile("lock ; addl $" RW_LOCK_BIAS_STR
    + ",%0":"=m" (rw->lock) : : "memory");
    +}

    static inline int _raw_read_trylock(rwlock_t *lock)
    {
    -
    To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
    the body of a message to majordomo@vger.kernel.org
    More majordomo info at http://vger.kernel.org/majordomo-info.html
    Please read the FAQ at http://www.tux.org/lkml/
    \
     
     \ /
      Last update: 2005-03-22 14:09    [W:0.026 / U:0.032 seconds]
    ©2003-2016 Jasper Spaans. hosted at Digital OceanAdvertise on this site