2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 1999, 2000, 06 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #include <linux/compiler.h>
13 #include <linux/irqflags.h>
14 #include <linux/types.h>
15 #include <asm/barrier.h>
17 #include <asm/byteorder.h> /* sigh ... */
18 #include <asm/cpu-features.h>
19 #include <asm/sgidefs.h>
22 #if (_MIPS_SZLONG == 32)
24 #define SZLONG_MASK 31UL
27 #elif (_MIPS_SZLONG == 64)
29 #define SZLONG_MASK 63UL
35 * clear_bit() doesn't provide any barrier for the compiler.
37 #define smp_mb__before_clear_bit() smp_mb()
38 #define smp_mb__after_clear_bit() smp_mb()
41 * set_bit - Atomically set a bit in memory
43 * @addr: the address to start counting from
45 * This function is atomic and may not be reordered. See __set_bit()
46 * if you do not require the atomic guarantees.
47 * Note that @nr may be almost arbitrarily large; this function is not
48 * restricted to acting on a single-word quantity.
50 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
52 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
55 if (cpu_has_llsc && R10000_LLSC_WAR) {
58 "1: " __LL "%0, %1 # set_bit \n"
63 : "=&r" (temp), "=m" (*m)
64 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
65 } else if (cpu_has_llsc) {
68 "1: " __LL "%0, %1 # set_bit \n"
76 : "=&r" (temp), "=m" (*m)
77 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
79 volatile unsigned long *a = addr;
83 a += nr >> SZLONG_LOG;
84 mask = 1UL << (nr & SZLONG_MASK);
85 local_irq_save(flags);
87 local_irq_restore(flags);
92 * clear_bit - Clears a bit in memory
94 * @addr: Address to start counting from
96 * clear_bit() is atomic and may not be reordered. However, it does
97 * not contain a memory barrier, so if it is used for locking purposes,
98 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
99 * in order to ensure changes are visible on other processors.
101 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
103 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
106 if (cpu_has_llsc && R10000_LLSC_WAR) {
107 __asm__ __volatile__(
109 "1: " __LL "%0, %1 # clear_bit \n"
114 : "=&r" (temp), "=m" (*m)
115 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
116 } else if (cpu_has_llsc) {
117 __asm__ __volatile__(
119 "1: " __LL "%0, %1 # clear_bit \n"
127 : "=&r" (temp), "=m" (*m)
128 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
130 volatile unsigned long *a = addr;
134 a += nr >> SZLONG_LOG;
135 mask = 1UL << (nr & SZLONG_MASK);
136 local_irq_save(flags);
138 local_irq_restore(flags);
143 * change_bit - Toggle a bit in memory
145 * @addr: Address to start counting from
147 * change_bit() is atomic and may not be reordered.
148 * Note that @nr may be almost arbitrarily large; this function is not
149 * restricted to acting on a single-word quantity.
151 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
153 if (cpu_has_llsc && R10000_LLSC_WAR) {
154 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
157 __asm__ __volatile__(
159 "1: " __LL "%0, %1 # change_bit \n"
164 : "=&r" (temp), "=m" (*m)
165 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
166 } else if (cpu_has_llsc) {
167 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
170 __asm__ __volatile__(
172 "1: " __LL "%0, %1 # change_bit \n"
180 : "=&r" (temp), "=m" (*m)
181 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
183 volatile unsigned long *a = addr;
187 a += nr >> SZLONG_LOG;
188 mask = 1UL << (nr & SZLONG_MASK);
189 local_irq_save(flags);
191 local_irq_restore(flags);
196 * test_and_set_bit - Set a bit and return its old value
198 * @addr: Address to count from
200 * This operation is atomic and cannot be reordered.
201 * It also implies a memory barrier.
203 static inline int test_and_set_bit(unsigned long nr,
204 volatile unsigned long *addr)
206 if (cpu_has_llsc && R10000_LLSC_WAR) {
207 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
208 unsigned long temp, res;
210 __asm__ __volatile__(
212 "1: " __LL "%0, %1 # test_and_set_bit \n"
218 : "=&r" (temp), "=m" (*m), "=&r" (res)
219 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
223 } else if (cpu_has_llsc) {
224 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
225 unsigned long temp, res;
227 __asm__ __volatile__(
231 "1: " __LL "%0, %1 # test_and_set_bit \n"
241 : "=&r" (temp), "=m" (*m), "=&r" (res)
242 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
247 volatile unsigned long *a = addr;
252 a += nr >> SZLONG_LOG;
253 mask = 1UL << (nr & SZLONG_MASK);
254 local_irq_save(flags);
255 retval = (mask & *a) != 0;
257 local_irq_restore(flags);
266 * test_and_clear_bit - Clear a bit and return its old value
268 * @addr: Address to count from
270 * This operation is atomic and cannot be reordered.
271 * It also implies a memory barrier.
273 static inline int test_and_clear_bit(unsigned long nr,
274 volatile unsigned long *addr)
276 if (cpu_has_llsc && R10000_LLSC_WAR) {
277 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
278 unsigned long temp, res;
280 __asm__ __volatile__(
282 "1: " __LL "%0, %1 # test_and_clear_bit \n"
289 : "=&r" (temp), "=m" (*m), "=&r" (res)
290 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
294 } else if (cpu_has_llsc) {
295 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
296 unsigned long temp, res;
298 __asm__ __volatile__(
302 "1: " __LL "%0, %1 # test_and_clear_bit \n"
313 : "=&r" (temp), "=m" (*m), "=&r" (res)
314 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
319 volatile unsigned long *a = addr;
324 a += nr >> SZLONG_LOG;
325 mask = 1UL << (nr & SZLONG_MASK);
326 local_irq_save(flags);
327 retval = (mask & *a) != 0;
329 local_irq_restore(flags);
338 * test_and_change_bit - Change a bit and return its old value
340 * @addr: Address to count from
342 * This operation is atomic and cannot be reordered.
343 * It also implies a memory barrier.
345 static inline int test_and_change_bit(unsigned long nr,
346 volatile unsigned long *addr)
348 if (cpu_has_llsc && R10000_LLSC_WAR) {
349 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
350 unsigned long temp, res;
352 __asm__ __volatile__(
354 "1: " __LL "%0, %1 # test_and_change_bit \n"
360 : "=&r" (temp), "=m" (*m), "=&r" (res)
361 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
365 } else if (cpu_has_llsc) {
366 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
367 unsigned long temp, res;
369 __asm__ __volatile__(
373 "1: " __LL "%0, %1 # test_and_change_bit \n"
375 " " __SC "\t%2, %1 \n"
383 : "=&r" (temp), "=m" (*m), "=&r" (res)
384 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
389 volatile unsigned long *a = addr;
390 unsigned long mask, retval;
393 a += nr >> SZLONG_LOG;
394 mask = 1UL << (nr & SZLONG_MASK);
395 local_irq_save(flags);
396 retval = (mask & *a) != 0;
398 local_irq_restore(flags);
406 #include <asm-generic/bitops/non-atomic.h>
409 * Return the bit position (0..63) of the most significant 1 bit in a word
410 * Returns -1 if no 1 bit exists
412 static inline int __ilog2(unsigned long x)
416 if (sizeof(x) == 4) {
428 BUG_ON(sizeof(x) != 8);
441 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
444 * __ffs - find first bit in word.
445 * @word: The word to search
447 * Returns 0..SZLONG-1
448 * Undefined if no bit exists, so code should check against 0 first.
450 static inline unsigned long __ffs(unsigned long word)
452 return __ilog2(word & -word);
456 * fls - find last bit set.
457 * @word: The word to search
459 * This is defined the same way as ffs.
460 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
462 static inline int fls(int word)
464 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
469 #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
470 static inline int fls64(__u64 word)
472 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
477 #include <asm-generic/bitops/fls64.h>
481 * ffs - find first bit set.
482 * @word: The word to search
484 * This is defined the same way as
485 * the libc and compiler builtin ffs routines, therefore
486 * differs in spirit from the above ffz (man ffs).
488 static inline int ffs(int word)
493 return fls(word & -word);
498 #include <asm-generic/bitops/__ffs.h>
499 #include <asm-generic/bitops/ffs.h>
500 #include <asm-generic/bitops/fls.h>
501 #include <asm-generic/bitops/fls64.h>
503 #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
505 #include <asm-generic/bitops/ffz.h>
506 #include <asm-generic/bitops/find.h>
510 #include <asm-generic/bitops/sched.h>
511 #include <asm-generic/bitops/hweight.h>
512 #include <asm-generic/bitops/ext2-non-atomic.h>
513 #include <asm-generic/bitops/ext2-atomic.h>
514 #include <asm-generic/bitops/minix.h>
516 #endif /* __KERNEL__ */
518 #endif /* _ASM_BITOPS_H */