]> err.no Git - linux-2.6/commitdiff
[PATCH] m68k: fix cmpxchg compile errors if CONFIG_RMW_INSNS=n
authorRoman Zippel <zippel@linux-m68k.org>
Fri, 10 Mar 2006 01:33:53 +0000 (17:33 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Fri, 10 Mar 2006 03:47:38 +0000 (19:47 -0800)
We require that all archs implement atomic_cmpxchg(), for the generic
version of atomic_add_unless().

Signed-off-by: Roman Zippel <zippel@linux-m68k.org>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: Adrian Bunk <bunk@stusta.de>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
include/asm-m68k/atomic.h

index a4a84d5c65d50eff5cfeca8239a8c6bb16cf71c1..862e497c26453070674af81cd0786f5d1c5ad30d 100644 (file)
@@ -55,6 +55,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
 }
 
 #ifdef CONFIG_RMW_INSNS
+
 static inline int atomic_add_return(int i, atomic_t *v)
 {
        int t, tmp;
@@ -82,7 +83,12 @@ static inline int atomic_sub_return(int i, atomic_t *v)
                        : "g" (i), "2" (atomic_read(v)));
        return t;
 }
+
+#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
+#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
+
 #else /* !CONFIG_RMW_INSNS */
+
 static inline int atomic_add_return(int i, atomic_t * v)
 {
        unsigned long flags;
@@ -110,6 +116,32 @@ static inline int atomic_sub_return(int i, atomic_t * v)
 
        return t;
 }
+
+static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+{
+       unsigned long flags;
+       int prev;
+
+       local_irq_save(flags);
+       prev = atomic_read(v);
+       if (prev == old)
+               atomic_set(v, new);
+       local_irq_restore(flags);
+       return prev;
+}
+
+static inline int atomic_xchg(atomic_t *v, int new)
+{
+       unsigned long flags;
+       int prev;
+
+       local_irq_save(flags);
+       prev = atomic_read(v);
+       atomic_set(v, new);
+       local_irq_restore(flags);
+       return prev;
+}
+
 #endif /* !CONFIG_RMW_INSNS */
 
 #define atomic_dec_return(v)   atomic_sub_return(1, (v))
@@ -139,9 +171,6 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
        __asm__ __volatile__("orl %1,%0" : "+m" (*v) : "id" (mask));
 }
 
-#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
-#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-
 #define atomic_add_unless(v, a, u)                             \
 ({                                                             \
        int c, old;                                             \