X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=include%2Fasm-parisc%2Fatomic.h;h=57fcc4a5ebb4e93188fa338f786f49bcf509be35;hb=82638844d9a8581bbf33201cc209a14876eca167;hp=48bf9b8ab8ff7fe0f22c049712993881d429c79f;hpb=34641a58a227e498adf471ab016bd054cc399d7e;p=linux-2.6 diff --git a/include/asm-parisc/atomic.h b/include/asm-parisc/atomic.h index 48bf9b8ab8..57fcc4a5eb 100644 --- a/include/asm-parisc/atomic.h +++ b/include/asm-parisc/atomic.h @@ -58,7 +58,7 @@ extern void __xchg_called_with_bad_pointer(void); /* __xchg32/64 defined in arch/parisc/lib/bitops.c */ extern unsigned long __xchg8(char, char *); extern unsigned long __xchg32(int, int *); -#ifdef __LP64__ +#ifdef CONFIG_64BIT extern unsigned long __xchg64(unsigned long, unsigned long *); #endif @@ -67,7 +67,7 @@ static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) { switch(size) { -#ifdef __LP64__ +#ifdef CONFIG_64BIT case 8: return __xchg64(x,(unsigned long *) ptr); #endif case 4: return __xchg32((int) x, (int *) ptr); @@ -81,7 +81,7 @@ __xchg(unsigned long x, __volatile__ void * ptr, int size) /* ** REVISIT - Abandoned use of LDCW in xchg() for now: ** o need to test sizeof(*ptr) to avoid clearing adjacent bytes -** o and while we are at it, could __LP64__ code use LDCD too? +** o and while we are at it, could CONFIG_64BIT code use LDCD too? ** ** if (__builtin_constant_p(x) && (x == NULL)) ** if (((unsigned long)p & 0xf) == 0) @@ -105,7 +105,7 @@ static __inline__ unsigned long __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) { switch(size) { -#ifdef __LP64__ +#ifdef CONFIG_64BIT case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); #endif case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_); @@ -122,6 +122,39 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size) (unsigned long)_n_, sizeof(*(ptr))); \ }) +#include + +static inline unsigned long __cmpxchg_local(volatile void *ptr, + unsigned long old, + unsigned long new_, int size) +{ + switch (size) { +#ifdef CONFIG_64BIT + case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_); +#endif + case 4: return __cmpxchg_u32(ptr, old, new_); + default: + return __cmpxchg_local_generic(ptr, old, new_, size); + } +} + +/* + * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make + * them available. + */ +#define cmpxchg_local(ptr, o, n) \ + ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ + (unsigned long)(n), sizeof(*(ptr)))) +#ifdef CONFIG_64BIT +#define cmpxchg64_local(ptr, o, n) \ + ({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + cmpxchg_local((ptr), (o), (n)); \ + }) +#else +#define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n)) +#endif + /* Note that we need not lock read accesses - aligned word writes/reads * are atomic, so a reader never sees unconsistent values. * @@ -163,7 +196,7 @@ static __inline__ int atomic_read(const atomic_t *v) } /* exported interface */ -#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n))) +#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n))) #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) /** @@ -175,14 +208,21 @@ static __inline__ int atomic_read(const atomic_t *v) * Atomically adds @a to @v, so long as it was not @u. * Returns non-zero if @v was not @u, and zero otherwise. */ -#define atomic_add_unless(v, a, u) \ -({ \ - int c, old; \ - c = atomic_read(v); \ - while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c) \ - c = old; \ - c != (u); \ -}) +static __inline__ int atomic_add_unless(atomic_t *v, int a, int u) +{ + int c, old; + c = atomic_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) #define atomic_add(i,v) ((void)(__atomic_add_return( ((int)i),(v)))) @@ -218,7 +258,7 @@ static __inline__ int atomic_read(const atomic_t *v) #define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb() -#ifdef __LP64__ +#ifdef CONFIG_64BIT typedef struct { volatile s64 counter; } atomic64_t; @@ -270,7 +310,38 @@ atomic64_read(const atomic64_t *v) #define atomic64_dec_and_test(v) (atomic64_dec_return(v) == 0) #define atomic64_sub_and_test(i,v) (atomic64_sub_return((i),(v)) == 0) -#endif /* __LP64__ */ +/* exported interface */ +#define atomic64_cmpxchg(v, o, n) \ + ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n))) +#define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) + +/** + * atomic64_add_unless - add unless the number is a given value + * @v: pointer of type atomic64_t + * @a: the amount to add to v... + * @u: ...unless v is equal to u. + * + * Atomically adds @a to @v, so long as it was not @u. + * Returns non-zero if @v was not @u, and zero otherwise. + */ +static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) +{ + long c, old; + c = atomic64_read(v); + for (;;) { + if (unlikely(c == (u))) + break; + old = atomic64_cmpxchg((v), c, c + (a)); + if (likely(old == c)) + break; + c = old; + } + return c != (u); +} + +#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) + +#endif /* CONFIG_64BIT */ #include