]> err.no Git - linux-2.6/commitdiff
[PATCH] atomic_long_t & include/asm-generic/atomic.h V2
authorChristoph Lameter <clameter@engr.sgi.com>
Fri, 6 Jan 2006 08:11:20 +0000 (00:11 -0800)
committerLinus Torvalds <torvalds@g5.osdl.org>
Fri, 6 Jan 2006 16:33:29 +0000 (08:33 -0800)
Several counters already have the need to use 64 atomic variables on 64 bit
platforms (see mm_counter_t in sched.h).  We have to do ugly ifdefs to fall
back to 32 bit atomic on 32 bit platforms.

The VM statistics patch that I am working on will also make more extensive
use of atomic64.

This patch introduces a new type atomic_long_t by providing definitions in
asm-generic/atomic.h that works similar to the c "long" type.  Its 32 bits
on 32 bit platforms and 64 bits on 64 bit platforms.

Also cleans up the determination of the mm_counter_t in sched.h.

Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
24 files changed:
include/asm-alpha/atomic.h
include/asm-arm/atomic.h
include/asm-arm26/atomic.h
include/asm-cris/atomic.h
include/asm-frv/atomic.h
include/asm-generic/atomic.h [new file with mode: 0644]
include/asm-h8300/atomic.h
include/asm-i386/atomic.h
include/asm-ia64/atomic.h
include/asm-m32r/atomic.h
include/asm-m68k/atomic.h
include/asm-m68knommu/atomic.h
include/asm-mips/atomic.h
include/asm-parisc/atomic.h
include/asm-powerpc/atomic.h
include/asm-s390/atomic.h
include/asm-sh/atomic.h
include/asm-sh64/atomic.h
include/asm-sparc/atomic.h
include/asm-sparc64/atomic.h
include/asm-v850/atomic.h
include/asm-x86_64/atomic.h
include/asm-xtensa/atomic.h
include/linux/sched.h

index 6183eab006d43a3c30c334a3f2d9ed334c512431..cb03bbe92cdfbf503acdcb168384de49cde5d475 100644 (file)
@@ -216,4 +216,5 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
 #define smp_mb__before_atomic_inc()    smp_mb()
 #define smp_mb__after_atomic_inc()     smp_mb()
 
+#include <asm-generic/atomic.h>
 #endif /* _ALPHA_ATOMIC_H */
index d586f65c8228ee8bad32aaf945edc47ee727e731..f72b63309bc5c80b54fd0f9dc27a46f56a47c0e5 100644 (file)
@@ -205,5 +205,6 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+#include <asm-generic/atomic.h>
 #endif
 #endif
index a47cadc59686237260877f1874b2c18ff0ed9421..3074b0e76343138f7ba8eed0931603f3ec58f550 100644 (file)
@@ -118,5 +118,6 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+#include <asm-generic/atomic.h>
 #endif
 #endif
index 683b05a57d88068527bea8d99343c2425503318c..2df2c7aa19b7bd373f96ce1859edc3b169d395b8 100644 (file)
@@ -156,4 +156,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+#include <asm-generic/atomic.h>
 #endif
index f6539ff569c5278abece2c6442cbab205dd80311..3f54fea2b051b65570b556891c04d527a94f47e7 100644 (file)
@@ -426,4 +426,5 @@ extern uint32_t __cmpxchg_32(uint32_t *v, uint32_t test, uint32_t new);
 })
 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
 
+#include <asm-generic/atomic.h>
 #endif /* _ASM_ATOMIC_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
new file mode 100644 (file)
index 0000000..e0a28b9
--- /dev/null
@@ -0,0 +1,116 @@
+#ifndef _ASM_GENERIC_ATOMIC_H
+#define _ASM_GENERIC_ATOMIC_H
+/*
+ * Copyright (C) 2005 Silicon Graphics, Inc.
+ *     Christoph Lameter <clameter@sgi.com>
+ *
+ * Allows to provide arch independent atomic definitions without the need to
+ * edit all arch specific atomic.h files.
+ */
+
+
+/*
+ * Suppport for atomic_long_t
+ *
+ * Casts for parameters are avoided for existing atomic functions in order to
+ * avoid issues with cast-as-lval under gcc 4.x and other limitations that the
+ * macros of a platform may have.
+ */
+
+#if BITS_PER_LONG == 64
+
+typedef atomic64_t atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i)    ATOMIC64_INIT(i)
+
+static inline long atomic_long_read(atomic_long_t *l)
+{
+       atomic64_t *v = (atomic64_t *)l;
+
+       return (long)atomic64_read(v);
+}
+
+static inline void atomic_long_set(atomic_long_t *l, long i)
+{
+       atomic64_t *v = (atomic64_t *)l;
+
+       atomic_set(v, i);
+}
+
+static inline void atomic_long_inc(atomic_long_t *l)
+{
+       atomic64_t *v = (atomic64_t *)l;
+
+       atomic64_inc(v);
+}
+
+static inline void atomic_long_dec(atomic_long_t *l)
+{
+       atomic64_t *v = (atomic64_t *)l;
+
+       atomic64_dec(v);
+}
+
+static inline void atomic_long_add(long i, atomic_long_t *l)
+{
+       atomic64_t *v = (atomic64_t *)l;
+
+       atomic64_add(i, v);
+}
+
+static inline void atomic_long_sub(long i, atomic_long_t *l)
+{
+       atomic64_t *v = (atomic64_t *)l;
+
+       atomic64_sub(i, v);
+}
+
+#else
+
+typedef atomic_t atomic_long_t;
+
+#define ATOMIC_LONG_INIT(i)    ATOMIC_INIT(i)
+static inline long atomic_long_read(atomic_long_t *l)
+{
+       atomic_t *v = (atomic_t *)l;
+
+       return (long)atomic_read(v);
+}
+
+static inline void atomic_long_set(atomic_long_t *l, long i)
+{
+       atomic_t *v = (atomic_t *)l;
+
+       atomic_set(v, i);
+}
+
+static inline void atomic_long_inc(atomic_long_t *l)
+{
+       atomic_t *v = (atomic_t *)l;
+
+       atomic_inc(v);
+}
+
+static inline void atomic_long_dec(atomic_long_t *l)
+{
+       atomic_t *v = (atomic_t *)l;
+
+       atomic_dec(v);
+}
+
+static inline void atomic_long_add(long i, atomic_long_t *l)
+{
+       atomic_t *v = (atomic_t *)l;
+
+       atomic_add(i, v);
+}
+
+static inline void atomic_long_sub(long i, atomic_long_t *l)
+{
+       atomic_t *v = (atomic_t *)l;
+
+       atomic_sub(i, v);
+}
+
+#endif
+#endif
index f23d86819ea82b83fb75bcce32516b78817524e0..d891541e89c35f09392aa07aa6bbe37f505174b6 100644 (file)
@@ -137,4 +137,5 @@ static __inline__ void atomic_set_mask(unsigned long mask, unsigned long *v)
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc() barrier()
 
+#include <asm-generic/atomic.h>
 #endif /* __ARCH_H8300_ATOMIC __ */
index c68557aa04b2b0ef9246086f6d52b156c72966c0..7a5472d77091beafea340e772861c0c73b15f02d 100644 (file)
@@ -254,4 +254,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+#include <asm-generic/atomic.h>
 #endif
index 2fbebf85c31d55a1b1ac8b5f7d69a62f5c554860..15cf7984c48e92849058e5f8c5a006aa1ff312c3 100644 (file)
@@ -192,4 +192,5 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+#include <asm-generic/atomic.h>
 #endif /* _ASM_IA64_ATOMIC_H */
index ef1fb8ea4726b0c7b3aea02db8d0fc40d4ee6cdf..70761278b6cb95b1364308b32154c95a37c85dcf 100644 (file)
@@ -313,4 +313,5 @@ static __inline__ void atomic_set_mask(unsigned long  mask, atomic_t *addr)
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+#include <asm-generic/atomic.h>
 #endif /* _ASM_M32R_ATOMIC_H */
index e3c962eeabf3d37871d24e87638fcc8005bf3048..b8a4e75d679d810cd2948a71932851d8b09ee195 100644 (file)
@@ -157,4 +157,5 @@ static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+#include <asm-generic/atomic.h>
 #endif /* __ARCH_M68K_ATOMIC __ */
index 3c1cc153c4154c233fc05dc6f0f0b8f7f3b8e9f9..1702dbe9318c12c774b81439f6c32075ea99487d 100644 (file)
@@ -143,4 +143,5 @@ static inline int atomic_sub_return(int i, atomic_t * v)
 #define atomic_dec_return(v) atomic_sub_return(1,(v))
 #define atomic_inc_return(v) atomic_add_return(1,(v))
 
+#include <asm-generic/atomic.h>
 #endif /* __ARCH_M68KNOMMU_ATOMIC __ */
index 55c37c106ef00a364a532bb966019ddda9fbcd9e..92256e43a938e5c637904ddd7c3c77fd440293fc 100644 (file)
@@ -713,4 +713,5 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
 #define smp_mb__before_atomic_inc()    smp_mb()
 #define smp_mb__after_atomic_inc()     smp_mb()
 
+#include <asm-generic/atomic.h>
 #endif /* _ASM_ATOMIC_H */
index 983e9a2b6042858b492d8dbde8b97b9d5f96b701..64ebd086c40d231f4d7eb0e39c10f957cc69b50c 100644 (file)
@@ -216,4 +216,5 @@ static __inline__ int atomic_read(const atomic_t *v)
 #define smp_mb__before_atomic_inc()    smp_mb()
 #define smp_mb__after_atomic_inc()     smp_mb()
 
+#include <asm-generic/atomic.h>
 #endif
index ec4b14468959e295dc29ebbd0b212b6899597f70..ae395a0632a60893626efcf527a1b64bef8a6cd3 100644 (file)
@@ -402,5 +402,6 @@ static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
 
 #endif /* __powerpc64__ */
 
+#include <asm-generic/atomic.h>
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_ATOMIC_H_ */
index b3bd4f679f727d2c3bc24293d845e6f6166ff471..6d07c7df4b4000bc8e1faddebddfd0551cc139c9 100644 (file)
@@ -215,5 +215,6 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
 #define smp_mb__before_atomic_inc()    smp_mb()
 #define smp_mb__after_atomic_inc()     smp_mb()
 
+#include <asm-generic/atomic.h>
 #endif /* __KERNEL__ */
 #endif /* __ARCH_S390_ATOMIC__  */
index aabfd334462c178bbba76327a86a48a1efef70ab..618d8e0de3480e7b4c8f8610c3d8a35628846bbf 100644 (file)
@@ -140,4 +140,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+#include <asm-generic/atomic.h>
 #endif /* __ASM_SH_ATOMIC_H */
index 927a2bc27b3086140bea65c7345f49b2f97bd995..f3ce5c0df13a242549f3f052cfa7aed79b341105 100644 (file)
@@ -152,4 +152,5 @@ static __inline__ void atomic_set_mask(unsigned int mask, atomic_t *v)
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+#include <asm-generic/atomic.h>
 #endif /* __ASM_SH64_ATOMIC_H */
index 62bec7ad271c496e81a8f2d95d4f4377cb16c952..accb4967e9d2a82b5f5c8e49edf59894f742b496 100644 (file)
@@ -159,4 +159,5 @@ static inline int __atomic24_sub(int i, atomic24_t *v)
 
 #endif /* !(__KERNEL__) */
 
+#include <asm-generic/atomic.h>
 #endif /* !(__ARCH_SPARC_ATOMIC__) */
index 3789fe3159923ef5bbe8bc6bad873b5b851bf78d..11f5aa5d108c33e287da725893c856df9b915e64 100644 (file)
@@ -96,4 +96,5 @@ extern int atomic64_sub_ret(int, atomic64_t *);
 #define smp_mb__after_atomic_inc()     barrier()
 #endif
 
+#include <asm-generic/atomic.h>
 #endif /* !(__ARCH_SPARC64_ATOMIC__) */
index bede3172ce7f7e999e202fde6b04d31a6e281478..f5b9ab6f4e70da97b847db5e910c5956a63a9acd 100644 (file)
@@ -126,4 +126,5 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+#include <asm-generic/atomic.h>
 #endif /* __V850_ATOMIC_H__ */
index 50db9f39274f8d1994844b58094ebdf1e16da570..72eb071488c77e98130d8ddb326c18850c8112c3 100644 (file)
@@ -424,4 +424,5 @@ __asm__ __volatile__(LOCK "orl %0,%1" \
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+#include <asm-generic/atomic.h>
 #endif
index 3670cc7695dad79d3326323e8c974abf417851f6..e2ce06b101ad0096df8894faf7b360be4d66b2ad 100644 (file)
@@ -286,6 +286,7 @@ static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
 #define smp_mb__before_atomic_inc()    barrier()
 #define smp_mb__after_atomic_inc()     barrier()
 
+#include <asm-generic/atomic.h>
 #endif /* __KERNEL__ */
 
 #endif /* _XTENSA_ATOMIC_H */
index b0ad6f30679eac4cd128f924bd8b1010b4c75be1..7da33619d5d02c78a8f86be4c2e26ca55f3801c2 100644 (file)
@@ -254,25 +254,12 @@ extern void arch_unmap_area_topdown(struct mm_struct *, unsigned long);
  * The mm counters are not protected by its page_table_lock,
  * so must be incremented atomically.
  */
-#ifdef ATOMIC64_INIT
-#define set_mm_counter(mm, member, value) atomic64_set(&(mm)->_##member, value)
-#define get_mm_counter(mm, member) ((unsigned long)atomic64_read(&(mm)->_##member))
-#define add_mm_counter(mm, member, value) atomic64_add(value, &(mm)->_##member)
-#define inc_mm_counter(mm, member) atomic64_inc(&(mm)->_##member)
-#define dec_mm_counter(mm, member) atomic64_dec(&(mm)->_##member)
-typedef atomic64_t mm_counter_t;
-#else /* !ATOMIC64_INIT */
-/*
- * The counters wrap back to 0 at 2^32 * PAGE_SIZE,
- * that is, at 16TB if using 4kB page size.
- */
-#define set_mm_counter(mm, member, value) atomic_set(&(mm)->_##member, value)
-#define get_mm_counter(mm, member) ((unsigned long)atomic_read(&(mm)->_##member))
-#define add_mm_counter(mm, member, value) atomic_add(value, &(mm)->_##member)
-#define inc_mm_counter(mm, member) atomic_inc(&(mm)->_##member)
-#define dec_mm_counter(mm, member) atomic_dec(&(mm)->_##member)
-typedef atomic_t mm_counter_t;
-#endif /* !ATOMIC64_INIT */
+#define set_mm_counter(mm, member, value) atomic_long_set(&(mm)->_##member, value)
+#define get_mm_counter(mm, member) ((unsigned long)atomic_long_read(&(mm)->_##member))
+#define add_mm_counter(mm, member, value) atomic_long_add(value, &(mm)->_##member)
+#define inc_mm_counter(mm, member) atomic_long_inc(&(mm)->_##member)
+#define dec_mm_counter(mm, member) atomic_long_dec(&(mm)->_##member)
+typedef atomic_long_t mm_counter_t;
 
 #else  /* NR_CPUS < CONFIG_SPLIT_PTLOCK_CPUS */
 /*