1 #ifndef _ARCH_I386_LOCAL_H
2 #define _ARCH_I386_LOCAL_H
4 #include <linux/percpu.h>
11 #define LOCAL_INIT(i) { (i) }
13 #define local_read(v) ((v)->counter)
14 #define local_set(v,i) (((v)->counter) = (i))
16 static __inline__ void local_inc(local_t *v)
24 static __inline__ void local_dec(local_t *v)
32 static __inline__ void local_add(long i, local_t *v)
37 :"ir" (i), "m" (v->counter));
40 static __inline__ void local_sub(long i, local_t *v)
45 :"ir" (i), "m" (v->counter));
48 /* On x86, these are no better than the atomic variants. */
49 #define __local_inc(l) local_inc(l)
50 #define __local_dec(l) local_dec(l)
51 #define __local_add(i,l) local_add((i),(l))
52 #define __local_sub(i,l) local_sub((i),(l))
54 /* Use these for per-cpu local_t variables: on some archs they are
55 * much more efficient than these naive implementations. Note they take
56 * a variable, not an address.
59 /* Need to disable preemption for the cpu local counters otherwise we could
60 still access a variable of a previous CPU in a non atomic way. */
61 #define cpu_local_wrap_v(v) \
67 #define cpu_local_wrap(v) \
68 ({ preempt_disable(); \
70 preempt_enable(); }) \
72 #define cpu_local_read(v) cpu_local_wrap_v(local_read(&__get_cpu_var(v)))
73 #define cpu_local_set(v, i) cpu_local_wrap(local_set(&__get_cpu_var(v), (i)))
74 #define cpu_local_inc(v) cpu_local_wrap(local_inc(&__get_cpu_var(v)))
75 #define cpu_local_dec(v) cpu_local_wrap(local_dec(&__get_cpu_var(v)))
76 #define cpu_local_add(i, v) cpu_local_wrap(local_add((i), &__get_cpu_var(v)))
77 #define cpu_local_sub(i, v) cpu_local_wrap(local_sub((i), &__get_cpu_var(v)))
79 #define __cpu_local_inc(v) cpu_local_inc(v)
80 #define __cpu_local_dec(v) cpu_local_dec(v)
81 #define __cpu_local_add(i, v) cpu_local_add((i), (v))
82 #define __cpu_local_sub(i, v) cpu_local_sub((i), (v))
84 #endif /* _ARCH_I386_LOCAL_H */