2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #include <linux/compiler.h>
13 #include <linux/irqflags.h>
14 #include <linux/types.h>
16 #include <asm/byteorder.h> /* sigh ... */
17 #include <asm/cpu-features.h>
18 #include <asm/sgidefs.h>
21 #if (_MIPS_SZLONG == 32)
23 #define SZLONG_MASK 31UL
26 #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
27 #elif (_MIPS_SZLONG == 64)
29 #define SZLONG_MASK 63UL
32 #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
36 * clear_bit() doesn't provide any barrier for the compiler.
38 #define smp_mb__before_clear_bit() smp_mb()
39 #define smp_mb__after_clear_bit() smp_mb()
42 * set_bit - Atomically set a bit in memory
44 * @addr: the address to start counting from
46 * This function is atomic and may not be reordered. See __set_bit()
47 * if you do not require the atomic guarantees.
48 * Note that @nr may be almost arbitrarily large; this function is not
49 * restricted to acting on a single-word quantity.
51 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
53 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
56 if (cpu_has_llsc && R10000_LLSC_WAR) {
59 "1: " __LL "%0, %1 # set_bit \n"
64 : "=&r" (temp), "=m" (*m)
65 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
66 } else if (cpu_has_llsc) {
69 "1: " __LL "%0, %1 # set_bit \n"
74 : "=&r" (temp), "=m" (*m)
75 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
77 volatile unsigned long *a = addr;
81 a += nr >> SZLONG_LOG;
82 mask = 1UL << (nr & SZLONG_MASK);
83 local_irq_save(flags);
85 local_irq_restore(flags);
90 * clear_bit - Clears a bit in memory
92 * @addr: Address to start counting from
94 * clear_bit() is atomic and may not be reordered. However, it does
95 * not contain a memory barrier, so if it is used for locking purposes,
96 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
97 * in order to ensure changes are visible on other processors.
99 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
101 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
104 if (cpu_has_llsc && R10000_LLSC_WAR) {
105 __asm__ __volatile__(
107 "1: " __LL "%0, %1 # clear_bit \n"
112 : "=&r" (temp), "=m" (*m)
113 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
114 } else if (cpu_has_llsc) {
115 __asm__ __volatile__(
117 "1: " __LL "%0, %1 # clear_bit \n"
122 : "=&r" (temp), "=m" (*m)
123 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
125 volatile unsigned long *a = addr;
129 a += nr >> SZLONG_LOG;
130 mask = 1UL << (nr & SZLONG_MASK);
131 local_irq_save(flags);
133 local_irq_restore(flags);
138 * change_bit - Toggle a bit in memory
140 * @addr: Address to start counting from
142 * change_bit() is atomic and may not be reordered.
143 * Note that @nr may be almost arbitrarily large; this function is not
144 * restricted to acting on a single-word quantity.
146 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
148 if (cpu_has_llsc && R10000_LLSC_WAR) {
149 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
152 __asm__ __volatile__(
154 "1: " __LL "%0, %1 # change_bit \n"
159 : "=&r" (temp), "=m" (*m)
160 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
161 } else if (cpu_has_llsc) {
162 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
165 __asm__ __volatile__(
167 "1: " __LL "%0, %1 # change_bit \n"
172 : "=&r" (temp), "=m" (*m)
173 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
175 volatile unsigned long *a = addr;
179 a += nr >> SZLONG_LOG;
180 mask = 1UL << (nr & SZLONG_MASK);
181 local_irq_save(flags);
183 local_irq_restore(flags);
188 * test_and_set_bit - Set a bit and return its old value
190 * @addr: Address to count from
192 * This operation is atomic and cannot be reordered.
193 * It also implies a memory barrier.
195 static inline int test_and_set_bit(unsigned long nr,
196 volatile unsigned long *addr)
198 if (cpu_has_llsc && R10000_LLSC_WAR) {
199 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
200 unsigned long temp, res;
202 __asm__ __volatile__(
204 "1: " __LL "%0, %1 # test_and_set_bit \n"
213 : "=&r" (temp), "=m" (*m), "=&r" (res)
214 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
218 } else if (cpu_has_llsc) {
219 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
220 unsigned long temp, res;
222 __asm__ __volatile__(
226 "1: " __LL "%0, %1 # test_and_set_bit \n"
235 : "=&r" (temp), "=m" (*m), "=&r" (res)
236 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
241 volatile unsigned long *a = addr;
246 a += nr >> SZLONG_LOG;
247 mask = 1UL << (nr & SZLONG_MASK);
248 local_irq_save(flags);
249 retval = (mask & *a) != 0;
251 local_irq_restore(flags);
258 * test_and_clear_bit - Clear a bit and return its old value
260 * @addr: Address to count from
262 * This operation is atomic and cannot be reordered.
263 * It also implies a memory barrier.
265 static inline int test_and_clear_bit(unsigned long nr,
266 volatile unsigned long *addr)
268 if (cpu_has_llsc && R10000_LLSC_WAR) {
269 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
270 unsigned long temp, res;
272 __asm__ __volatile__(
274 "1: " __LL "%0, %1 # test_and_clear_bit \n"
284 : "=&r" (temp), "=m" (*m), "=&r" (res)
285 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
289 } else if (cpu_has_llsc) {
290 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
291 unsigned long temp, res;
293 __asm__ __volatile__(
297 "1: " __LL "%0, %1 # test_and_clear_bit \n"
307 : "=&r" (temp), "=m" (*m), "=&r" (res)
308 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
313 volatile unsigned long *a = addr;
318 a += nr >> SZLONG_LOG;
319 mask = 1UL << (nr & SZLONG_MASK);
320 local_irq_save(flags);
321 retval = (mask & *a) != 0;
323 local_irq_restore(flags);
330 * test_and_change_bit - Change a bit and return its old value
332 * @addr: Address to count from
334 * This operation is atomic and cannot be reordered.
335 * It also implies a memory barrier.
337 static inline int test_and_change_bit(unsigned long nr,
338 volatile unsigned long *addr)
340 if (cpu_has_llsc && R10000_LLSC_WAR) {
341 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
342 unsigned long temp, res;
344 __asm__ __volatile__(
346 "1: " __LL "%0, %1 # test_and_change_bit \n"
355 : "=&r" (temp), "=m" (*m), "=&r" (res)
356 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
360 } else if (cpu_has_llsc) {
361 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
362 unsigned long temp, res;
364 __asm__ __volatile__(
368 "1: " __LL "%0, %1 # test_and_change_bit \n"
370 " " __SC "\t%2, %1 \n"
377 : "=&r" (temp), "=m" (*m), "=&r" (res)
378 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
383 volatile unsigned long *a = addr;
384 unsigned long mask, retval;
387 a += nr >> SZLONG_LOG;
388 mask = 1UL << (nr & SZLONG_MASK);
389 local_irq_save(flags);
390 retval = (mask & *a) != 0;
392 local_irq_restore(flags);
398 #include <asm-generic/bitops/non-atomic.h>
401 * Return the bit position (0..63) of the most significant 1 bit in a word
402 * Returns -1 if no 1 bit exists
404 static inline int __ilog2(unsigned long x)
408 if (sizeof(x) == 4) {
420 BUG_ON(sizeof(x) != 8);
433 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
436 * __ffs - find first bit in word.
437 * @word: The word to search
439 * Returns 0..SZLONG-1
440 * Undefined if no bit exists, so code should check against 0 first.
442 static inline unsigned long __ffs(unsigned long word)
444 return __ilog2(word & -word);
448 * fls - find last bit set.
449 * @word: The word to search
451 * This is defined the same way as ffs.
452 * Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
454 static inline int fls(int word)
456 __asm__ ("clz %0, %1" : "=r" (word) : "r" (word));
461 #if defined(CONFIG_64BIT) && defined(CONFIG_CPU_MIPS64)
462 static inline int fls64(__u64 word)
464 __asm__ ("dclz %0, %1" : "=r" (word) : "r" (word));
469 #include <asm-generic/bitops/fls64.h>
473 * ffs - find first bit set.
474 * @word: The word to search
476 * This is defined the same way as
477 * the libc and compiler builtin ffs routines, therefore
478 * differs in spirit from the above ffz (man ffs).
480 static inline int ffs(int word)
485 return fls(word & -word);
490 #include <asm-generic/bitops/__ffs.h>
491 #include <asm-generic/bitops/ffs.h>
492 #include <asm-generic/bitops/fls.h>
493 #include <asm-generic/bitops/fls64.h>
495 #endif /*defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) */
497 #include <asm-generic/bitops/ffz.h>
498 #include <asm-generic/bitops/find.h>
502 #include <asm-generic/bitops/sched.h>
503 #include <asm-generic/bitops/hweight.h>
504 #include <asm-generic/bitops/ext2-non-atomic.h>
505 #include <asm-generic/bitops/ext2-atomic.h>
506 #include <asm-generic/bitops/minix.h>
508 #endif /* __KERNEL__ */
510 #endif /* _ASM_BITOPS_H */