]> err.no Git - linux-2.6/commitdiff
atomic.h: atomic_add_unless as inline. Remove system.h atomic.h circular dependency
authorMathieu Desnoyers <compudj@krystal.dyndns.org>
Tue, 8 May 2007 07:34:38 +0000 (00:34 -0700)
committerLinus Torvalds <torvalds@woody.linux-foundation.org>
Tue, 8 May 2007 18:15:20 +0000 (11:15 -0700)
atomic_add_unless as inline. Remove system.h atomic.h circular dependency.
I agree (with Andi Kleen) this typeof is not needed and more error
prone. All the original atomic.h code that uses cmpxchg (which includes
the atomic_add_unless) uses defines instead of inline functions,
probably to circumvent a circular dependency between system.h and
atomic.h on powerpc (which my patch addresses). Therefore, it makes
sense to use inline functions that will provide type checking.

atomic_add_unless as inline. Remove system.h atomic.h circular dependency.
Digging into the FRV architecture shows me that it is also affected by
such a circular dependency. Here is the diff applying this against the
rest of my atomic.h patches.

It applies over the atomic.h standardization patches.

Signed-off-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
18 files changed:
include/asm-alpha/atomic.h
include/asm-arm/atomic.h
include/asm-arm26/atomic.h
include/asm-frv/atomic.h
include/asm-frv/system.h
include/asm-generic/atomic.h
include/asm-i386/atomic.h
include/asm-ia64/atomic.h
include/asm-m32r/atomic.h
include/asm-m68k/atomic.h
include/asm-m68knommu/atomic.h
include/asm-mips/atomic.h
include/asm-parisc/atomic.h
include/asm-powerpc/atomic.h
include/asm-ppc/system.h
include/asm-sparc64/atomic.h
include/asm-x86_64/atomic.h
include/asm-xtensa/atomic.h

index 7b4fba88cbebcffa022d6c1fc472ca37c418f9f7..f5cb7b878af25ba2dd69abb0343327db8c183dd4 100644 (file)
@@ -2,6 +2,7 @@
 #define _ALPHA_ATOMIC_H
 
 #include <asm/barrier.h>
+#include <asm/system.h>
 
 /*
  * Atomic operations that C can't guarantee us.  Useful for
@@ -190,20 +191,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       __typeof__((v)->counter) c, old;                        \
-       c = atomic_read(v);                                     \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic_cmpxchg((v), c, c + (a));          \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 /**
@@ -215,20 +217,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic64_add_unless(v, a, u)                           \
-({                                                             \
-       __typeof__((v)->counter) c, old;                        \
-       c = atomic64_read(v);                                   \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic64_cmpxchg((v), c, c + (a));        \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       c != (u);                                               \
-})
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+       long c, old;
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic64_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
 #define atomic_add_negative(a, v) (atomic_add_return((a), (v)) < 0)
index f266c2795124901fc05b6ba3cff0bb607e393655..3b59f94b5a3d3f345bf5bfeff53440063ae3f426 100644 (file)
@@ -12,6 +12,7 @@
 #define __ASM_ARM_ATOMIC_H
 
 #include <linux/compiler.h>
+#include <asm/system.h>
 
 typedef struct { volatile int counter; } atomic_t;
 
index 97e944fe1cff4c9bf4d9c0eb7603fca6e43bf916..d6dd42374cf3aaef8f2586efb13da4d948451af2 100644 (file)
@@ -20,7 +20,6 @@
 #ifndef __ASM_ARM_ATOMIC_H
 #define __ASM_ARM_ATOMIC_H
 
-
 #ifdef CONFIG_SMP
 #error SMP is NOT supported
 #endif
index 066386ac238e9719fde46e714da1cce668e2f77b..d425d8d0ad7758c73d40000a9a792f740ff78a5d 100644 (file)
@@ -16,6 +16,7 @@
 
 #include <linux/types.h>
 #include <asm/spr-regs.h>
+#include <asm/system.h>
 
 #ifdef CONFIG_SMP
 #error not SMP safe
@@ -258,85 +259,23 @@ extern uint32_t __xchg_32(uint32_t i, volatile void *v);
 
 #define tas(ptr) (xchg((ptr), 1))
 
-/*****************************************************************************/
-/*
- * compare and conditionally exchange value with memory
- * - if (*ptr == test) then orig = *ptr; *ptr = test;
- * - if (*ptr != test) then orig = *ptr;
- */
-#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
-
-#define cmpxchg(ptr, test, new)                                                        \
-({                                                                             \
-       __typeof__(ptr) __xg_ptr = (ptr);                                       \
-       __typeof__(*(ptr)) __xg_orig, __xg_tmp;                                 \
-       __typeof__(*(ptr)) __xg_test = (test);                                  \
-       __typeof__(*(ptr)) __xg_new = (new);                                    \
-                                                                               \
-       switch (sizeof(__xg_orig)) {                                            \
-       case 4:                                                                 \
-               asm volatile(                                                   \
-                       "0:                                             \n"     \
-                       "       orcc            gr0,gr0,gr0,icc3        \n"     \
-                       "       ckeq            icc3,cc7                \n"     \
-                       "       ld.p            %M0,%1                  \n"     \
-                       "       orcr            cc7,cc7,cc3             \n"     \
-                       "       sub%I4cc        %1,%4,%2,icc0           \n"     \
-                       "       bne             icc0,#0,1f              \n"     \
-                       "       cst.p           %3,%M0          ,cc3,#1 \n"     \
-                       "       corcc           gr29,gr29,gr0   ,cc3,#1 \n"     \
-                       "       beq             icc3,#0,0b              \n"     \
-                       "1:                                             \n"     \
-                       : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp)    \
-                       : "r"(__xg_new), "NPr"(__xg_test)                       \
-                       : "memory", "cc7", "cc3", "icc3", "icc0"                \
-                       );                                                      \
-               break;                                                          \
-                                                                               \
-       default:                                                                \
-               __xg_orig = 0;                                                  \
-               asm volatile("break");                                          \
-               break;                                                          \
-       }                                                                       \
-                                                                               \
-       __xg_orig;                                                              \
-})
-
-#else
-
-extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
-
-#define cmpxchg(ptr, test, new)                                                        \
-({                                                                             \
-       __typeof__(ptr) __xg_ptr = (ptr);                                       \
-       __typeof__(*(ptr)) __xg_orig;                                           \
-       __typeof__(*(ptr)) __xg_test = (test);                                  \
-       __typeof__(*(ptr)) __xg_new = (new);                                    \
-                                                                               \
-       switch (sizeof(__xg_orig)) {                                            \
-       case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
-       default:                                                                \
-               __xg_orig = 0;                                                  \
-               asm volatile("break");                                          \
-               break;                                                          \
-       }                                                                       \
-                                                                               \
-       __xg_orig;                                                              \
-})
-
-#endif
-
 #define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       int c, old;                                             \
-       c = atomic_read(v);                                     \
-       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
-               c = old;                                        \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
 
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
index 1166899317d78a8ca248d798216d3391e0484b8e..be303b3eef4096e06be42497583fec69ea097e94 100644 (file)
@@ -13,7 +13,6 @@
 #define _ASM_SYSTEM_H
 
 #include <linux/linkage.h>
-#include <asm/atomic.h>
 
 struct thread_struct;
 
@@ -197,4 +196,73 @@ extern void free_initmem(void);
 
 #define arch_align_stack(x) (x)
 
+/*****************************************************************************/
+/*
+ * compare and conditionally exchange value with memory
+ * - if (*ptr == test) then orig = *ptr; *ptr = test;
+ * - if (*ptr != test) then orig = *ptr;
+ */
+#ifndef CONFIG_FRV_OUTOFLINE_ATOMIC_OPS
+
+#define cmpxchg(ptr, test, new)                                                        \
+({                                                                             \
+       __typeof__(ptr) __xg_ptr = (ptr);                                       \
+       __typeof__(*(ptr)) __xg_orig, __xg_tmp;                                 \
+       __typeof__(*(ptr)) __xg_test = (test);                                  \
+       __typeof__(*(ptr)) __xg_new = (new);                                    \
+                                                                               \
+       switch (sizeof(__xg_orig)) {                                            \
+       case 4:                                                                 \
+               asm volatile(                                                   \
+                       "0:                                             \n"     \
+                       "       orcc            gr0,gr0,gr0,icc3        \n"     \
+                       "       ckeq            icc3,cc7                \n"     \
+                       "       ld.p            %M0,%1                  \n"     \
+                       "       orcr            cc7,cc7,cc3             \n"     \
+                       "       sub%I4cc        %1,%4,%2,icc0           \n"     \
+                       "       bne             icc0,#0,1f              \n"     \
+                       "       cst.p           %3,%M0          ,cc3,#1 \n"     \
+                       "       corcc           gr29,gr29,gr0   ,cc3,#1 \n"     \
+                       "       beq             icc3,#0,0b              \n"     \
+                       "1:                                             \n"     \
+                       : "+U"(*__xg_ptr), "=&r"(__xg_orig), "=&r"(__xg_tmp)    \
+                       : "r"(__xg_new), "NPr"(__xg_test)                       \
+                       : "memory", "cc7", "cc3", "icc3", "icc0"                \
+                       );                                                      \
+               break;                                                          \
+                                                                               \
+       default:                                                                \
+               __xg_orig = 0;                                                  \
+               asm volatile("break");                                          \
+               break;                                                          \
+       }                                                                       \
+                                                                               \
+       __xg_orig;                                                              \
+})
+
+#else
+
+extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
+
+#define cmpxchg(ptr, test, new)                                                        \
+({                                                                             \
+       __typeof__(ptr) __xg_ptr = (ptr);                                       \
+       __typeof__(*(ptr)) __xg_orig;                                           \
+       __typeof__(*(ptr)) __xg_test = (test);                                  \
+       __typeof__(*(ptr)) __xg_new = (new);                                    \
+                                                                               \
+       switch (sizeof(__xg_orig)) {                                            \
+       case 4: __xg_orig = __cmpxchg_32(__xg_ptr, __xg_test, __xg_new); break; \
+       default:                                                                \
+               __xg_orig = 0;                                                  \
+               asm volatile("break");                                          \
+               break;                                                          \
+       }                                                                       \
+                                                                               \
+       __xg_orig;                                                              \
+})
+
+#endif
+
+
 #endif /* _ASM_SYSTEM_H */
index 5ae6dce1cba282a852eecf04a38d4ee90236f22d..85fd0aa27a8cd3f48a8ee9a9b47646316ddbed15 100644 (file)
@@ -9,7 +9,6 @@
  */
 
 #include <asm/types.h>
-#include <asm/system.h>
 
 /*
  * Suppport for atomic_long_t
@@ -123,8 +122,12 @@ static inline long atomic_long_dec_return(atomic_long_t *l)
        return (long)atomic64_dec_return(v);
 }
 
-#define atomic_long_add_unless(l, a, u) \
-       atomic64_add_unless((atomic64_t *)(l), (a), (u))
+static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+{
+       atomic64_t *v = (atomic64_t *)l;
+
+       return (long)atomic64_add_unless(v, a, u);
+}
 
 #define atomic_long_inc_not_zero(l) atomic64_inc_not_zero((atomic64_t *)(l))
 
@@ -236,8 +239,12 @@ static inline long atomic_long_dec_return(atomic_long_t *l)
        return (long)atomic_dec_return(v);
 }
 
-#define atomic_long_add_unless(l, a, u) \
-       atomic_add_unless((atomic_t *)(l), (a), (u))
+static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
+{
+       atomic_t *v = (atomic_t *)l;
+
+       return (long)atomic_add_unless(v, a, u);
+}
 
 #define atomic_long_inc_not_zero(l) atomic_inc_not_zero((atomic_t *)(l))
 
index 08935113206a4b1cec25e87896ce91583feffc49..ff90c6e3fcb4ab846f3bcc013279f8f64c566b35 100644 (file)
@@ -219,20 +219,21 @@ static __inline__ int atomic_sub_return(int i, atomic_t *v)
  * Atomically adds @a to @v, so long as @v was not already @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       __typeof__((v)->counter) c, old;                        \
-       c = atomic_read(v);                                     \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic_cmpxchg((v), c, c + (a));          \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 #define atomic_inc_return(v)  (atomic_add_return(1,v))
index b16ad235c7ee797da4b4704f7793b02e1fe2b045..1fc3b83325dab4fa0c20fb46abbc6d16228b34fa 100644 (file)
@@ -15,6 +15,7 @@
 #include <linux/types.h>
 
 #include <asm/intrinsics.h>
+#include <asm/system.h>
 
 /*
  * On IA-64, counter must always be volatile to ensure that that the
@@ -95,36 +96,38 @@ ia64_atomic64_sub (__s64 i, atomic64_t *v)
        (cmpxchg(&((v)->counter), old, new))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       __typeof__(v->counter) c, old;                          \
-       c = atomic_read(v);                                     \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic_cmpxchg((v), c, c + (a));          \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
-#define atomic64_add_unless(v, a, u)                           \
-({                                                             \
-       __typeof__(v->counter) c, old;                          \
-       c = atomic64_read(v);                                   \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic64_cmpxchg((v), c, c + (a));        \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       c != (u);                                               \
-})
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+       long c, old;
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic64_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
 #define atomic_add_return(i,v)                                         \
index f5a7d7301c72170523cc265402acfc487d16b553..3a38ffe4a4f49991e43b59eb9f18dcb1cb3d32d0 100644 (file)
@@ -253,14 +253,21 @@ static __inline__ int atomic_dec_return(atomic_t *v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       int c, old;                                             \
-       c = atomic_read(v);                                     \
-       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
-               c = old;                                        \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 static __inline__ void atomic_clear_mask(unsigned long  mask, atomic_t *addr)
index d5eed64cb833d277bcc459ab0175022c045232be..4915294fea63a726cb901ec1799ed9e7bc4586f9 100644 (file)
@@ -2,7 +2,7 @@
 #define __ARCH_M68K_ATOMIC__
 
 
-#include <asm/system.h>        /* local_irq_XXX() */
+#include <asm/system.h>
 
 /*
  * Atomic operations that C can't guarantee us.  Useful for
@@ -170,20 +170,21 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
        __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
 }
 
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       int c, old;                                             \
-       c = atomic_read(v);                                     \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic_cmpxchg((v), c, c + (a));          \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 /* Atomic operations are already serializing */
index 6c4e4b63e45454f018e5f3bfc599a506bcaa4827..d5632a305dae8e992626dc7d79efc778805f5a51 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef __ARCH_M68KNOMMU_ATOMIC__
 #define __ARCH_M68KNOMMU_ATOMIC__
 
-#include <asm/system.h>        /* local_irq_XXX() */
+#include <asm/system.h>
 
 /*
  * Atomic operations that C can't guarantee us.  Useful for
@@ -131,14 +131,21 @@ static inline int atomic_sub_return(int i, atomic_t * v)
 #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       int c, old;                                             \
-       c = atomic_read(v);                                     \
-       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
-               c = old;                                        \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 #define atomic_dec_return(v) atomic_sub_return(1,(v))
index 6423ffa195a409f59b3d6a6881d1b1b866c29dbe..62daa746a9c9180101af1e566635778c36d1d269 100644 (file)
@@ -18,6 +18,7 @@
 #include <asm/barrier.h>
 #include <asm/cpu-features.h>
 #include <asm/war.h>
+#include <asm/system.h>
 
 typedef struct { volatile int counter; } atomic_t;
 
@@ -318,14 +319,20 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       __typeof__((v)->counter) c, old;                        \
-       c = atomic_read(v);                                     \
-       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
-               c = old;                                        \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 #define atomic_dec_return(v) atomic_sub_return(1,(v))
@@ -694,14 +701,21 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic64_add_unless(v, a, u)                           \
-({                                                             \
-       __typeof__((v)->counter) c, old;                        \
-       c = atomic_read(v);                                     \
-       while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \
-               c = old;                                        \
-       c != (u);                                               \
-})
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+       long c, old;
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic64_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
 #define atomic64_dec_return(v) atomic64_sub_return(1,(v))
index 66a0edbb51f4cd3f67ec67d2398303785bc87143..e894ee35074b05c0890692584ecf4c85bcc2128e 100644 (file)
@@ -6,6 +6,7 @@
 #define _ASM_PARISC_ATOMIC_H_
 
 #include <linux/types.h>
+#include <asm/system.h>
 
 /*
  * Atomic operations that C can't guarantee us.  Useful for
@@ -174,14 +175,21 @@ static __inline__ int atomic_read(const atomic_t *v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       __typeof__((v)->counter) c, old;                                                \
-       c = atomic_read(v);                                     \
-       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
-               c = old;                                        \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 #define atomic_add(i,v)        ((void)(__atomic_add_return( ((int)i),(v))))
@@ -283,14 +291,21 @@ atomic64_read(const atomic64_t *v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic64_add_unless(v, a, u)                           \
-({                                                             \
-       __typeof__((v)->counter) c, old;                                                \
-       c = atomic64_read(v);                                   \
-       while (c != (u) && (old = atomic64_cmpxchg((v), c, c + (a))) != c) \
-               c = old;                                        \
-       c != (u);                                               \
-})
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+       long c, old;
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic64_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
 #endif /* CONFIG_64BIT */
index 438a7fcfba58dfd0f0a95137b952d0c85c45e6b5..c44810b9d3224dbc54474b4a6cb7eab5a2a9af3d 100644 (file)
@@ -11,6 +11,7 @@ typedef struct { volatile int counter; } atomic_t;
 #include <linux/compiler.h>
 #include <asm/synch.h>
 #include <asm/asm-compat.h>
+#include <asm/system.h>
 
 #define ATOMIC_INIT(i)         { (i) }
 
index 738943584c017547b765998214cdecb4db235397..56abe5e9e15519d4401fa230d708d7f6050865af 100644 (file)
@@ -6,7 +6,6 @@
 
 #include <linux/kernel.h>
 
-#include <asm/atomic.h>
 #include <asm/hw_irq.h>
 
 /*
index c3feb3af2cfe2e453f310ab6645c6ad15a2ec73f..3fb4e1f7f186066d23962ee7c786cd5e7ad93bcd 100644 (file)
@@ -9,6 +9,7 @@
 #define __ARCH_SPARC64_ATOMIC__
 
 #include <linux/types.h>
+#include <asm/system.h>
 
 typedef struct { volatile int counter; } atomic_t;
 typedef struct { volatile __s64 counter; } atomic64_t;
@@ -73,40 +74,42 @@ extern int atomic64_sub_ret(int, atomic64_t *);
 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
 
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       __typeof__((v)->counter) c, old;                        \
-       c = atomic_read(v);                                     \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic_cmpxchg((v), c, c + (a));          \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       likely(c != (u));                                       \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 #define atomic64_cmpxchg(v, o, n) \
        ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
 
-#define atomic64_add_unless(v, a, u)                           \
-({                                                             \
-       __typeof__((v)->counter) c, old;                        \
-       c = atomic64_read(v);                                   \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic64_cmpxchg((v), c, c + (a));        \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       likely(c != (u));                                       \
-})
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+       long c, old;
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic64_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
 /* Atomic operations are already serializing */
index 80e4fdbe220402f1551570736e6ccf5f2bfa43ba..19e0c607b568165be8fa468151b3efb27e158023 100644 (file)
@@ -2,6 +2,7 @@
 #define __ARCH_X86_64_ATOMIC__
 
 #include <asm/alternative.h>
+#include <asm/system.h>
 
 /* atomic_t should be 32 bit signed type */
 
@@ -403,20 +404,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       __typeof__((v)->counter) c, old;                        \
-       c = atomic_read(v);                                     \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic_cmpxchg((v), c, c + (a));          \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 /**
@@ -428,20 +430,21 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t *v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic64_add_unless(v, a, u)                           \
-({                                                             \
-       __typeof__((v)->counter) c, old;                        \
-       c = atomic64_read(v);                                   \
-       for (;;) {                                              \
-               if (unlikely(c == (u)))                         \
-                       break;                                  \
-               old = atomic64_cmpxchg((v), c, c + (a));        \
-               if (likely(old == c))                           \
-                       break;                                  \
-               c = old;                                        \
-       }                                                       \
-       c != (u);                                               \
-})
+static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
+{
+       long c, old;
+       c = atomic64_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic64_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
 /* These are x86-specific, used by some header files */
index 5c2672021068670758d9d9d3d4f8ef30abaa5705..b3b23540f14d2c5087e4b8d09d519378e73e726a 100644 (file)
@@ -234,14 +234,21 @@ static inline int atomic_sub_return(int i, atomic_t * v)
  * Atomically adds @a to @v, so long as it was not @u.
  * Returns non-zero if @v was not @u, and zero otherwise.
  */
-#define atomic_add_unless(v, a, u)                             \
-({                                                             \
-       int c, old;                                             \
-       c = atomic_read(v);                                     \
-       while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \
-               c = old;                                        \
-       c != (u);                                               \
-})
+static __inline__ int atomic_add_unless(atomic_t *v, int a, int u)
+{
+       int c, old;
+       c = atomic_read(v);
+       for (;;) {
+               if (unlikely(c == (u)))
+                       break;
+               old = atomic_cmpxchg((v), c, c + (a));
+               if (likely(old == c))
+                       break;
+               c = old;
+       }
+       return c != (u);
+}
+
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
 static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)