]> err.no Git - linux-2.6/commitdiff
[MIPS] Make support for weakly ordered LL/SC a config option.
authorRalf Baechle <ralf@linux-mips.org>
Sat, 14 Jul 2007 12:24:05 +0000 (13:24 +0100)
committerRalf Baechle <ralf@linux-mips.org>
Fri, 20 Jul 2007 17:57:39 +0000 (18:57 +0100)
None of weakly ordered processor supported in tree need this but it seems
like this could change ...

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/Kconfig
include/asm-mips/atomic.h
include/asm-mips/barrier.h
include/asm-mips/bitops.h
include/asm-mips/futex.h
include/asm-mips/spinlock.h
include/asm-mips/system.h

index 5c863bcd5614e4076d4417e294458e4bbd5cae8f..1e3aeccd7322cfdc06067c46dea49950687a22f0 100644 (file)
@@ -1190,8 +1190,19 @@ config SYS_HAS_CPU_RM9000
 config SYS_HAS_CPU_SB1
        bool
 
+#
+# CPU may reorder R->R, R->W, W->R, W->W
+# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
+#
 config WEAK_ORDERING
        bool
+
+#
+# CPU may reorder reads and writes beyond LL/SC
+# CPU may reorder R->LL, R->LL, W->LL, W->LL, R->SC, R->SC, W->SC, W->SC
+#
+config WEAK_REORDERING_BEYOND_LLSC
+       bool
 endmenu
 
 #
index 1b60624dab7e4aacca7518818aeaf2c6214baa33..7d8003769a44d28a5d4b8f330d70068ecd69210d 100644 (file)
@@ -138,7 +138,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
 {
        unsigned long result;
 
-       smp_mb();
+       smp_llsc_mb();
 
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                unsigned long temp;
@@ -181,7 +181,7 @@ static __inline__ int atomic_add_return(int i, atomic_t * v)
                raw_local_irq_restore(flags);
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return result;
 }
@@ -190,7 +190,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
 {
        unsigned long result;
 
-       smp_mb();
+       smp_llsc_mb();
 
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                unsigned long temp;
@@ -233,7 +233,7 @@ static __inline__ int atomic_sub_return(int i, atomic_t * v)
                raw_local_irq_restore(flags);
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return result;
 }
@@ -250,7 +250,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
 {
        unsigned long result;
 
-       smp_mb();
+       smp_llsc_mb();
 
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                unsigned long temp;
@@ -302,7 +302,7 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
                raw_local_irq_restore(flags);
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return result;
 }
@@ -519,7 +519,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
 {
        unsigned long result;
 
-       smp_mb();
+       smp_llsc_mb();
 
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                unsigned long temp;
@@ -562,7 +562,7 @@ static __inline__ long atomic64_add_return(long i, atomic64_t * v)
                raw_local_irq_restore(flags);
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return result;
 }
@@ -571,7 +571,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
 {
        unsigned long result;
 
-       smp_mb();
+       smp_llsc_mb();
 
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                unsigned long temp;
@@ -614,7 +614,7 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
                raw_local_irq_restore(flags);
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return result;
 }
@@ -631,7 +631,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
 {
        unsigned long result;
 
-       smp_mb();
+       smp_llsc_mb();
 
        if (cpu_has_llsc && R10000_LLSC_WAR) {
                unsigned long temp;
@@ -683,7 +683,7 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
                raw_local_irq_restore(flags);
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return result;
 }
@@ -791,10 +791,11 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
  * atomic*_return operations are serializing but not the non-*_return
  * versions.
  */
-#define smp_mb__before_atomic_dec()    smp_mb()
-#define smp_mb__after_atomic_dec()     smp_mb()
-#define smp_mb__before_atomic_inc()    smp_mb()
-#define smp_mb__after_atomic_inc()     smp_mb()
+#define smp_mb__before_atomic_dec()    smp_llsc_mb()
+#define smp_mb__after_atomic_dec()     smp_llsc_mb()
+#define smp_mb__before_atomic_inc()    smp_llsc_mb()
+#define smp_mb__after_atomic_inc()     smp_llsc_mb()
 
 #include <asm-generic/atomic.h>
+
 #endif /* _ASM_ATOMIC_H */
index ed82631b0017256747e798e5b69350269576f05b..9d8cfbb5e7967c97d2f03bc79ac430335260d962 100644 (file)
 #else
 #define __WEAK_ORDERING_MB     "               \n"
 #endif
+#if defined(CONFIG_WEAK_REORDERING_BEYOND_LLSC) && defined(CONFIG_SMP)
+#define __WEAK_LLSC_MB         "       sync    \n"
+#else
+#define __WEAK_LLSC_MB         "               \n"
+#endif
 
 #define smp_mb()       __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
 #define smp_rmb()      __asm__ __volatile__(__WEAK_ORDERING_MB : : :"memory")
 #define set_mb(var, value) \
        do { var = value; smp_mb(); } while (0)
 
+#define smp_llsc_mb()  __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+#define smp_llsc_rmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+#define smp_llsc_wmb() __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
+
 #endif /* __ASM_BARRIER_H */
index d9e81af53f78b3f1528cd3ed27b165fca5cf0e61..148bc79557f12b94f22b4669d7a3a6d344f24b12 100644 (file)
@@ -38,8 +38,8 @@
 /*
  * clear_bit() doesn't provide any barrier for the compiler.
  */
-#define smp_mb__before_clear_bit()     smp_mb()
-#define smp_mb__after_clear_bit()      smp_mb()
+#define smp_mb__before_clear_bit()     smp_llsc_mb()
+#define smp_mb__after_clear_bit()      smp_llsc_mb()
 
 /*
  * set_bit - Atomically set a bit in memory
@@ -289,7 +289,7 @@ static inline int test_and_set_bit(unsigned long nr,
                raw_local_irq_restore(flags);
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return res != 0;
 }
@@ -377,7 +377,7 @@ static inline int test_and_clear_bit(unsigned long nr,
                raw_local_irq_restore(flags);
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return res != 0;
 }
@@ -445,7 +445,7 @@ static inline int test_and_change_bit(unsigned long nr,
                raw_local_irq_restore(flags);
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return res != 0;
 }
index 47e5679c235303f5812eb1d4cd39a2140af89016..b623882bce192287e76d8cf7a59d42f710d8deef 100644 (file)
@@ -29,7 +29,7 @@
                "       .set    mips3                           \n"     \
                "2:     sc      $1, %2                          \n"     \
                "       beqzl   $1, 1b                          \n"     \
-               __WEAK_ORDERING_MB                                      \
+               __WEAK_LLSC_MB                                          \
                "3:                                             \n"     \
                "       .set    pop                             \n"     \
                "       .set    mips0                           \n"     \
@@ -55,7 +55,7 @@
                "       .set    mips3                           \n"     \
                "2:     sc      $1, %2                          \n"     \
                "       beqz    $1, 1b                          \n"     \
-               __WEAK_ORDERING_MB                                      \
+               __WEAK_LLSC_MB                                          \
                "3:                                             \n"     \
                "       .set    pop                             \n"     \
                "       .set    mips0                           \n"     \
@@ -152,7 +152,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
                "       .set    mips3                                   \n"
                "2:     sc      $1, %1                                  \n"
                "       beqzl   $1, 1b                                  \n"
-               __WEAK_ORDERING_MB
+               __WEAK_LLSC_MB
                "3:                                                     \n"
                "       .set    pop                                     \n"
                "       .section .fixup,\"ax\"                          \n"
@@ -179,7 +179,7 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
                "       .set    mips3                                   \n"
                "2:     sc      $1, %1                                  \n"
                "       beqz    $1, 1b                                  \n"
-               __WEAK_ORDERING_MB
+               __WEAK_LLSC_MB
                "3:                                                     \n"
                "       .set    pop                                     \n"
                "       .section .fixup,\"ax\"                          \n"
index 35e431cd796bf499bccfc2d6ef4bd76ad578009e..bb897016c491a24e8369d461a50a59a892457154 100644 (file)
@@ -67,7 +67,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
                : "memory");
        }
 
-       smp_mb();
+       smp_llsc_mb();
 }
 
 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
@@ -118,7 +118,7 @@ static inline unsigned int __raw_spin_trylock(raw_spinlock_t *lock)
                : "memory");
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return res == 0;
 }
@@ -183,7 +183,7 @@ static inline void __raw_read_lock(raw_rwlock_t *rw)
                : "memory");
        }
 
-       smp_mb();
+       smp_llsc_mb();
 }
 
 /* Note the use of sub, not subu which will make the kernel die with an
@@ -193,7 +193,7 @@ static inline void __raw_read_unlock(raw_rwlock_t *rw)
 {
        unsigned int tmp;
 
-       smp_mb();
+       smp_llsc_mb();
 
        if (R10000_LLSC_WAR) {
                __asm__ __volatile__(
@@ -262,7 +262,7 @@ static inline void __raw_write_lock(raw_rwlock_t *rw)
                : "memory");
        }
 
-       smp_mb();
+       smp_llsc_mb();
 }
 
 static inline void __raw_write_unlock(raw_rwlock_t *rw)
@@ -293,7 +293,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
                "       .set    reorder                                 \n"
                "       beqzl   %1, 1b                                  \n"
                "        nop                                            \n"
-               __WEAK_ORDERING_MB
+               __WEAK_LLSC_MB
                "       li      %2, 1                                   \n"
                "2:                                                     \n"
                : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -310,7 +310,7 @@ static inline int __raw_read_trylock(raw_rwlock_t *rw)
                "       beqz    %1, 1b                                  \n"
                "        nop                                            \n"
                "       .set    reorder                                 \n"
-               __WEAK_ORDERING_MB
+               __WEAK_LLSC_MB
                "       li      %2, 1                                   \n"
                "2:                                                     \n"
                : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret)
@@ -336,7 +336,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
                "       sc      %1, %0                                  \n"
                "       beqzl   %1, 1b                                  \n"
                "        nop                                            \n"
-               __WEAK_ORDERING_MB
+               __WEAK_LLSC_MB
                "       li      %2, 1                                   \n"
                "       .set    reorder                                 \n"
                "2:                                                     \n"
@@ -354,7 +354,7 @@ static inline int __raw_write_trylock(raw_rwlock_t *rw)
                "       beqz    %1, 3f                                  \n"
                "        li     %2, 1                                   \n"
                "2:                                                     \n"
-               __WEAK_ORDERING_MB
+               __WEAK_LLSC_MB
                "       .subsection 2                                   \n"
                "3:     b       1b                                      \n"
                "        li     %2, 0                                   \n"
index 76339165bc20e90e84089f8da7fb795f9b9b00c2..eba2e3da9abe6195143cbffc960bd290511313a6 100644 (file)
@@ -117,7 +117,7 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
                raw_local_irq_restore(flags);   /* implies memory barrier  */
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return retval;
 }
@@ -165,7 +165,7 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
                raw_local_irq_restore(flags);   /* implies memory barrier  */
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return retval;
 }
@@ -246,7 +246,7 @@ static inline unsigned long __cmpxchg_u32(volatile int * m, unsigned long old,
                raw_local_irq_restore(flags);   /* implies memory barrier  */
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return retval;
 }
@@ -352,7 +352,7 @@ static inline unsigned long __cmpxchg_u64(volatile int * m, unsigned long old,
                raw_local_irq_restore(flags);   /* implies memory barrier  */
        }
 
-       smp_mb();
+       smp_llsc_mb();
 
        return retval;
 }