X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-arm%2Fspinlock.h;h=1f906d09b6880f2ac04e9bb0dedf3dd0eafdafee;hb=0e4e4220f10bf8f58a8606f0cb28538088c64b1a;hp=9705d5eec94c69c2e795a7c130af164b09fe2434;hpb=4e8fd22bd421d7aa279bcb76189505a1f96bb7bf;p=linux-2.6 diff --git a/include/asm-arm/spinlock.h b/include/asm-arm/spinlock.h index 9705d5eec9..1f906d09b6 100644 --- a/include/asm-arm/spinlock.h +++ b/include/asm-arm/spinlock.h @@ -8,9 +8,10 @@ /* * ARMv6 Spin-locking. * - * We (exclusively) read the old value, and decrement it. If it - * hits zero, we may have won the lock, so we try (exclusively) - * storing it. + * We exclusively read the old value. If it is zero, we may have + * won the lock, so we try exclusively storing it. A memory barrier + * is required after we get a lock, and before we release it, because + * V6 CPUs are assumed to have weakly ordered memory. * * Unlocked value: 0 * Locked value: 1 @@ -41,7 +42,9 @@ static inline void _raw_spin_lock(spinlock_t *lock) " bne 1b" : "=&r" (tmp) : "r" (&lock->lock), "r" (1) - : "cc", "memory"); + : "cc"); + + smp_mb(); } static inline int _raw_spin_trylock(spinlock_t *lock) @@ -54,18 +57,25 @@ static inline int _raw_spin_trylock(spinlock_t *lock) " strexeq %0, %2, [%1]" : "=&r" (tmp) : "r" (&lock->lock), "r" (1) - : "cc", "memory"); - - return tmp == 0; + : "cc"); + + if (tmp == 0) { + smp_mb(); + return 1; + } else { + return 0; + } } static inline void _raw_spin_unlock(spinlock_t *lock) { + smp_mb(); + __asm__ __volatile__( " str %1, [%0]" : : "r" (&lock->lock), "r" (0) - : "cc", "memory"); + : "cc"); } /* @@ -98,7 +108,9 @@ static inline void _raw_write_lock(rwlock_t *rw) " bne 1b" : "=&r" (tmp) : "r" (&rw->lock), "r" (0x80000000) - : "cc", "memory"); + : "cc"); + + smp_mb(); } static inline int _raw_write_trylock(rwlock_t *rw) @@ -111,18 +123,25 @@ static inline int _raw_write_trylock(rwlock_t *rw) " strexeq %0, %2, [%1]" : "=&r" (tmp) : "r" (&rw->lock), "r" (0x80000000) - : "cc", "memory"); - - return tmp == 0; + : "cc"); + + if (tmp == 0) { + smp_mb(); + return 1; + } else { + return 0; + } } static inline void _raw_write_unlock(rwlock_t *rw) { + smp_mb(); + __asm__ __volatile__( "str %1, [%0]" : : "r" (&rw->lock), "r" (0) - : "cc", "memory"); + : "cc"); } /* @@ -149,13 +168,17 @@ static inline void _raw_read_lock(rwlock_t *rw) " bmi 1b" : "=&r" (tmp), "=&r" (tmp2) : "r" (&rw->lock) - : "cc", "memory"); + : "cc"); + + smp_mb(); } static inline void _raw_read_unlock(rwlock_t *rw) { unsigned long tmp, tmp2; + smp_mb(); + __asm__ __volatile__( "1: ldrex %0, [%2]\n" " sub %0, %0, #1\n" @@ -164,7 +187,7 @@ static inline void _raw_read_unlock(rwlock_t *rw) " bne 1b" : "=&r" (tmp), "=&r" (tmp2) : "r" (&rw->lock) - : "cc", "memory"); + : "cc"); } #define _raw_read_trylock(lock) generic_raw_read_trylock(lock)