#include <asm/barrier.h>
#include <asm/cpu-features.h>
#include <asm/war.h>
+#include <asm/system.h>
typedef struct { volatile int counter; } atomic_t;
*
* Atomically sets the value of @v to @i.
*/
-#define atomic_set(v,i) ((v)->counter = (i))
+#define atomic_set(v, i) ((v)->counter = (i))
/*
* atomic_add - add integer to atomic variable
{
unsigned long result;
- smp_mb();
+ smp_llsc_mb();
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return result;
}
{
unsigned long result;
- smp_mb();
+ smp_llsc_mb();
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return result;
}
{
unsigned long result;
- smp_mb();
+ smp_llsc_mb();
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return result;
}
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic_add_unless(v, a, u) \
-({ \
- __typeof__((v)->counter) c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+ int c, old;
+ c = atomic_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
-#define atomic_dec_return(v) atomic_sub_return(1,(v))
-#define atomic_inc_return(v) atomic_add_return(1,(v))
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_inc_return(v) atomic_add_return(1, (v))
/*
* atomic_sub_and_test - subtract value from variable and test result
* true if the result is zero, or false for all
* other cases.
*/
-#define atomic_sub_and_test(i,v) (atomic_sub_return((i), (v)) == 0)
+#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
/*
* atomic_inc_and_test - increment and test
*
* Atomically increments @v by 1.
*/
-#define atomic_inc(v) atomic_add(1,(v))
+#define atomic_inc(v) atomic_add(1, (v))
/*
* atomic_dec - decrement and test
*
* Atomically decrements @v by 1.
*/
-#define atomic_dec(v) atomic_sub(1,(v))
+#define atomic_dec(v) atomic_sub(1, (v))
/*
* atomic_add_negative - add and test if negative
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-#define atomic_add_negative(i,v) (atomic_add_return(i, (v)) < 0)
+#define atomic_add_negative(i, v) (atomic_add_return(i, (v)) < 0)
#ifdef CONFIG_64BIT
* @v: pointer of type atomic64_t
* @i: required value
*/
-#define atomic64_set(v,i) ((v)->counter = (i))
+#define atomic64_set(v, i) ((v)->counter = (i))
/*
* atomic64_add - add integer to atomic variable
{
unsigned long result;
- smp_mb();
+ smp_llsc_mb();
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return result;
}
{
unsigned long result;
- smp_mb();
+ smp_llsc_mb();
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return result;
}
{
unsigned long result;
- smp_mb();
+ smp_llsc_mb();
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long temp;
raw_local_irq_restore(flags);
}
- smp_mb();
+ smp_llsc_mb();
return result;
}
#define atomic64_cmpxchg(v, o, n) \
- (((__typeof__((v)->counter)))cmpxchg(&((v)->counter), (o), (n)))
+ ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), (new)))
/**
* Atomically adds @a to @v, so long as it was not @u.
* Returns non-zero if @v was not @u, and zero otherwise.
*/
-#define atomic64_add_unless(v, a, u) \
-({ \
- __typeof__((v)->counter) c, old; \
- c = atomic_read(v); \
- while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \
- c = old; \
- c != (u); \
-})
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+ long c, old;
+ c = atomic64_read(v);
+ for (;;) {
+ if (unlikely(c == (u)))
+ break;
+ old = atomic64_cmpxchg((v), c, c + (a));
+ if (likely(old == c))
+ break;
+ c = old;
+ }
+ return c != (u);
+}
+
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
-#define atomic64_dec_return(v) atomic64_sub_return(1,(v))
-#define atomic64_inc_return(v) atomic64_add_return(1,(v))
+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
/*
* atomic64_sub_and_test - subtract value from variable and test result
* true if the result is zero, or false for all
* other cases.
*/
-#define atomic64_sub_and_test(i,v) (atomic64_sub_return((i), (v)) == 0)
+#define atomic64_sub_and_test(i, v) (atomic64_sub_return((i), (v)) == 0)
/*
* atomic64_inc_and_test - increment and test
*
* Atomically increments @v by 1.
*/
-#define atomic64_inc(v) atomic64_add(1,(v))
+#define atomic64_inc(v) atomic64_add(1, (v))
/*
* atomic64_dec - decrement and test
*
* Atomically decrements @v by 1.
*/
-#define atomic64_dec(v) atomic64_sub(1,(v))
+#define atomic64_dec(v) atomic64_sub(1, (v))
/*
* atomic64_add_negative - add and test if negative
* if the result is negative, or false when
* result is greater than or equal to zero.
*/
-#define atomic64_add_negative(i,v) (atomic64_add_return(i, (v)) < 0)
+#define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
#endif /* CONFIG_64BIT */
* atomic*_return operations are serializing but not the non-*_return
* versions.
*/
-#define smp_mb__before_atomic_dec() smp_mb()
-#define smp_mb__after_atomic_dec() smp_mb()
-#define smp_mb__before_atomic_inc() smp_mb()
-#define smp_mb__after_atomic_inc() smp_mb()
+#define smp_mb__before_atomic_dec() smp_llsc_mb()
+#define smp_mb__after_atomic_dec() smp_llsc_mb()
+#define smp_mb__before_atomic_inc() smp_llsc_mb()
+#define smp_mb__after_atomic_inc() smp_llsc_mb()
#include <asm-generic/atomic.h>
+
#endif /* _ASM_ATOMIC_H */