2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #include <linux/compiler.h>
13 #include <linux/irqflags.h>
14 #include <linux/types.h>
15 #include <asm/barrier.h>
17 #include <asm/byteorder.h> /* sigh ... */
18 #include <asm/cpu-features.h>
19 #include <asm/sgidefs.h>
22 #if (_MIPS_SZLONG == 32)
24 #define SZLONG_MASK 31UL
27 #elif (_MIPS_SZLONG == 64)
29 #define SZLONG_MASK 63UL
35 * clear_bit() doesn't provide any barrier for the compiler.
37 #define smp_mb__before_clear_bit() smp_mb()
38 #define smp_mb__after_clear_bit() smp_mb()
41 * set_bit - Atomically set a bit in memory
43 * @addr: the address to start counting from
45 * This function is atomic and may not be reordered. See __set_bit()
46 * if you do not require the atomic guarantees.
47 * Note that @nr may be almost arbitrarily large; this function is not
48 * restricted to acting on a single-word quantity.
50 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
52 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
55 if (cpu_has_llsc && R10000_LLSC_WAR) {
58 "1: " __LL "%0, %1 # set_bit \n"
63 : "=&r" (temp), "=m" (*m)
64 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
65 } else if (cpu_has_llsc) {
68 "1: " __LL "%0, %1 # set_bit \n"
73 : "=&r" (temp), "=m" (*m)
74 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
76 volatile unsigned long *a = addr;
80 a += nr >> SZLONG_LOG;
81 mask = 1UL << (nr & SZLONG_MASK);
82 local_irq_save(flags);
84 local_irq_restore(flags);
89 * clear_bit - Clears a bit in memory
91 * @addr: Address to start counting from
93 * clear_bit() is atomic and may not be reordered. However, it does
94 * not contain a memory barrier, so if it is used for locking purposes,
95 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
96 * in order to ensure changes are visible on other processors.
98 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
100 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
103 if (cpu_has_llsc && R10000_LLSC_WAR) {
104 __asm__ __volatile__(
106 "1: " __LL "%0, %1 # clear_bit \n"
111 : "=&r" (temp), "=m" (*m)
112 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
113 } else if (cpu_has_llsc) {
114 __asm__ __volatile__(
116 "1: " __LL "%0, %1 # clear_bit \n"
121 : "=&r" (temp), "=m" (*m)
122 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
124 volatile unsigned long *a = addr;
128 a += nr >> SZLONG_LOG;
129 mask = 1UL << (nr & SZLONG_MASK);
130 local_irq_save(flags);
132 local_irq_restore(flags);
137 * change_bit - Toggle a bit in memory
139 * @addr: Address to start counting from
141 * change_bit() is atomic and may not be reordered.
142 * Note that @nr may be almost arbitrarily large; this function is not
143 * restricted to acting on a single-word quantity.
145 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
147 if (cpu_has_llsc && R10000_LLSC_WAR) {
148 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
151 __asm__ __volatile__(
153 "1: " __LL "%0, %1 # change_bit \n"
158 : "=&r" (temp), "=m" (*m)
159 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
160 } else if (cpu_has_llsc) {
161 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
164 __asm__ __volatile__(
166 "1: " __LL "%0, %1 # change_bit \n"
171 : "=&r" (temp), "=m" (*m)
172 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
174 volatile unsigned long *a = addr;
178 a += nr >> SZLONG_LOG;
179 mask = 1UL << (nr & SZLONG_MASK);
180 local_irq_save(flags);
182 local_irq_restore(flags);
187 * test_and_set_bit - Set a bit and return its old value
189 * @addr: Address to count from
191 * This operation is atomic and cannot be reordered.
192 * It also implies a memory barrier.
194 static inline int test_and_set_bit(unsigned long nr,
195 volatile unsigned long *addr)
197 if (cpu_has_llsc && R10000_LLSC_WAR) {
198 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
199 unsigned long temp, res;
201 __asm__ __volatile__(
203 "1: " __LL "%0, %1 # test_and_set_bit \n"
209 : "=&r" (temp), "=m" (*m), "=&r" (res)
210 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
214 } else if (cpu_has_llsc) {
215 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
216 unsigned long temp, res;
218 __asm__ __volatile__(
222 "1: " __LL "%0, %1 # test_and_set_bit \n"
228 : "=&r" (temp), "=m" (*m), "=&r" (res)
229 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
234 volatile unsigned long *a = addr;
239 a += nr >> SZLONG_LOG;
240 mask = 1UL << (nr & SZLONG_MASK);
241 local_irq_save(flags);
242 retval = (mask & *a) != 0;
244 local_irq_restore(flags);
253 * test_and_clear_bit - Clear a bit and return its old value
255 * @addr: Address to count from
257 * This operation is atomic and cannot be reordered.
258 * It also implies a memory barrier.
260 static inline int test_and_clear_bit(unsigned long nr,
261 volatile unsigned long *addr)
263 if (cpu_has_llsc && R10000_LLSC_WAR) {
264 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
265 unsigned long temp, res;
267 __asm__ __volatile__(
269 "1: " __LL "%0, %1 # test_and_clear_bit \n"
276 : "=&r" (temp), "=m" (*m), "=&r" (res)
277 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
281 } else if (cpu_has_llsc) {
282 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
283 unsigned long temp, res;
285 __asm__ __volatile__(
289 "1: " __LL "%0, %1 # test_and_clear_bit \n"
296 : "=&r" (temp), "=m" (*m), "=&r" (res)
297 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
302 volatile unsigned long *a = addr;
307 a += nr >> SZLONG_LOG;
308 mask = 1UL << (nr & SZLONG_MASK);
309 local_irq_save(flags);
310 retval = (mask & *a) != 0;
312 local_irq_restore(flags);
321 * test_and_change_bit - Change a bit and return its old value
323 * @addr: Address to count from
325 * This operation is atomic and cannot be reordered.
326 * It also implies a memory barrier.
328 static inline int test_and_change_bit(unsigned long nr,
329 volatile unsigned long *addr)
331 if (cpu_has_llsc && R10000_LLSC_WAR) {
332 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
333 unsigned long temp, res;
335 __asm__ __volatile__(
337 "1: " __LL "%0, %1 # test_and_change_bit \n"
343 : "=&r" (temp), "=m" (*m), "=&r" (res)
344 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
348 } else if (cpu_has_llsc) {
349 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
350 unsigned long temp, res;
352 __asm__ __volatile__(
356 "1: " __LL "%0, %1 # test_and_change_bit \n"
358 " " __SC "\t%2, %1 \n"
362 : "=&r" (temp), "=m" (*m), "=&r" (res)
363 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
368 volatile unsigned long *a = addr;
369 unsigned long mask, retval;
372 a += nr >> SZLONG_LOG;
373 mask = 1UL << (nr & SZLONG_MASK);
374 local_irq_save(flags);
375 retval = (mask & *a) != 0;
377 local_irq_restore(flags);
385 #include <asm-generic/bitops/non-atomic.h>
388 * Return the bit position (0..63) of the most significant 1 bit in a word
389 * Returns -1 if no 1 bit exists
391 static inline int __ilog2(unsigned long x)
395 if (sizeof(x) == 4) {
407 BUG_ON(sizeof(x) != 8);
420 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
423 * __ffs - find first bit in word.
424 * @word: The word to search
426 * Returns 0..SZLONG-1
427 * Undefined if no bit exists, so code should check against 0 first.
429 static inline unsigned long __ffs(unsigned long word)
431 return __ilog2(word & -word);
435 * fls - find last bit set.
436 * @word: The word to search
438 * This is defined the same way as ffs.
439 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
441 static inline int fls(int word)
443 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
448 #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
449 static inline int fls64(__u64 word)
451 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
456 #include <asm-generic/bitops/fls64.h>
460 * ffs - find first bit set.
461 * @word: The word to search
463 * This is defined the same way as
464 * the libc and compiler builtin ffs routines, therefore
465 * differs in spirit from the above ffz (man ffs).
467 static inline int ffs(int word)
472 return fls(word & -word);
477 #include <asm-generic/bitops/__ffs.h>
478 #include <asm-generic/bitops/ffs.h>
479 #include <asm-generic/bitops/fls.h>
480 #include <asm-generic/bitops/fls64.h>
482 #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
484 #include <asm-generic/bitops/ffz.h>
485 #include <asm-generic/bitops/find.h>
489 #include <asm-generic/bitops/sched.h>
490 #include <asm-generic/bitops/hweight.h>
491 #include <asm-generic/bitops/ext2-non-atomic.h>
492 #include <asm-generic/bitops/ext2-atomic.h>
493 #include <asm-generic/bitops/minix.h>
495 #endif /* __KERNEL__ */
497 #endif /* _ASM_BITOPS_H */