2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (c) 1994 - 1997, 1999, 2000 Ralf Baechle (ralf@gnu.org)
7 * Copyright (c) 1999, 2000 Silicon Graphics, Inc.
12 #include <linux/config.h>
13 #include <linux/compiler.h>
14 #include <linux/types.h>
15 #include <asm/byteorder.h> /* sigh ... */
16 #include <asm/cpu-features.h>
18 #if (_MIPS_SZLONG == 32)
20 #define SZLONG_MASK 31UL
23 #define cpu_to_lelongp(x) cpu_to_le32p((__u32 *) (x))
24 #elif (_MIPS_SZLONG == 64)
26 #define SZLONG_MASK 63UL
29 #define cpu_to_lelongp(x) cpu_to_le64p((__u64 *) (x))
34 #include <asm/interrupt.h>
35 #include <asm/sgidefs.h>
39 * clear_bit() doesn't provide any barrier for the compiler.
41 #define smp_mb__before_clear_bit() smp_mb()
42 #define smp_mb__after_clear_bit() smp_mb()
45 * Only disable interrupt for kernel mode stuff to keep usermode stuff
46 * that dares to use kernel include files alive.
49 #define __bi_flags unsigned long flags
50 #define __bi_local_irq_save(x) local_irq_save(x)
51 #define __bi_local_irq_restore(x) local_irq_restore(x)
54 #define __bi_local_irq_save(x)
55 #define __bi_local_irq_restore(x)
56 #endif /* __KERNEL__ */
59 * set_bit - Atomically set a bit in memory
61 * @addr: the address to start counting from
63 * This function is atomic and may not be reordered. See __set_bit()
64 * if you do not require the atomic guarantees.
65 * Note that @nr may be almost arbitrarily large; this function is not
66 * restricted to acting on a single-word quantity.
68 static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
70 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
73 if (cpu_has_llsc && R10000_LLSC_WAR) {
76 "1: " __LL "%0, %1 # set_bit \n"
81 : "=&r" (temp), "=m" (*m)
82 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
83 } else if (cpu_has_llsc) {
86 "1: " __LL "%0, %1 # set_bit \n"
91 : "=&r" (temp), "=m" (*m)
92 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
94 volatile unsigned long *a = addr;
98 a += nr >> SZLONG_LOG;
99 mask = 1UL << (nr & SZLONG_MASK);
100 __bi_local_irq_save(flags);
102 __bi_local_irq_restore(flags);
107 * __set_bit - Set a bit in memory
108 * @nr: the bit to set
109 * @addr: the address to start counting from
111 * Unlike set_bit(), this function is non-atomic and may be reordered.
112 * If it's called on the same region of memory simultaneously, the effect
113 * may be that only one operation succeeds.
115 static inline void __set_bit(unsigned long nr, volatile unsigned long * addr)
117 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
119 *m |= 1UL << (nr & SZLONG_MASK);
123 * clear_bit - Clears a bit in memory
125 * @addr: Address to start counting from
127 * clear_bit() is atomic and may not be reordered. However, it does
128 * not contain a memory barrier, so if it is used for locking purposes,
129 * you should call smp_mb__before_clear_bit() and/or smp_mb__after_clear_bit()
130 * in order to ensure changes are visible on other processors.
132 static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
134 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
137 if (cpu_has_llsc && R10000_LLSC_WAR) {
138 __asm__ __volatile__(
140 "1: " __LL "%0, %1 # clear_bit \n"
145 : "=&r" (temp), "=m" (*m)
146 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
147 } else if (cpu_has_llsc) {
148 __asm__ __volatile__(
150 "1: " __LL "%0, %1 # clear_bit \n"
155 : "=&r" (temp), "=m" (*m)
156 : "ir" (~(1UL << (nr & SZLONG_MASK))), "m" (*m));
158 volatile unsigned long *a = addr;
162 a += nr >> SZLONG_LOG;
163 mask = 1UL << (nr & SZLONG_MASK);
164 __bi_local_irq_save(flags);
166 __bi_local_irq_restore(flags);
171 * __clear_bit - Clears a bit in memory
173 * @addr: Address to start counting from
175 * Unlike clear_bit(), this function is non-atomic and may be reordered.
176 * If it's called on the same region of memory simultaneously, the effect
177 * may be that only one operation succeeds.
179 static inline void __clear_bit(unsigned long nr, volatile unsigned long * addr)
181 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
183 *m &= ~(1UL << (nr & SZLONG_MASK));
187 * change_bit - Toggle a bit in memory
189 * @addr: Address to start counting from
191 * change_bit() is atomic and may not be reordered.
192 * Note that @nr may be almost arbitrarily large; this function is not
193 * restricted to acting on a single-word quantity.
195 static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
197 if (cpu_has_llsc && R10000_LLSC_WAR) {
198 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
201 __asm__ __volatile__(
203 "1: " __LL "%0, %1 # change_bit \n"
208 : "=&r" (temp), "=m" (*m)
209 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
210 } else if (cpu_has_llsc) {
211 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
214 __asm__ __volatile__(
216 "1: " __LL "%0, %1 # change_bit \n"
221 : "=&r" (temp), "=m" (*m)
222 : "ir" (1UL << (nr & SZLONG_MASK)), "m" (*m));
224 volatile unsigned long *a = addr;
228 a += nr >> SZLONG_LOG;
229 mask = 1UL << (nr & SZLONG_MASK);
230 __bi_local_irq_save(flags);
232 __bi_local_irq_restore(flags);
237 * __change_bit - Toggle a bit in memory
238 * @nr: the bit to change
239 * @addr: the address to start counting from
241 * Unlike change_bit(), this function is non-atomic and may be reordered.
242 * If it's called on the same region of memory simultaneously, the effect
243 * may be that only one operation succeeds.
245 static inline void __change_bit(unsigned long nr, volatile unsigned long * addr)
247 unsigned long * m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
249 *m ^= 1UL << (nr & SZLONG_MASK);
253 * test_and_set_bit - Set a bit and return its old value
255 * @addr: Address to count from
257 * This operation is atomic and cannot be reordered.
258 * It also implies a memory barrier.
260 static inline int test_and_set_bit(unsigned long nr,
261 volatile unsigned long *addr)
263 if (cpu_has_llsc && R10000_LLSC_WAR) {
264 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
265 unsigned long temp, res;
267 __asm__ __volatile__(
269 "1: " __LL "%0, %1 # test_and_set_bit \n"
278 : "=&r" (temp), "=m" (*m), "=&r" (res)
279 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
283 } else if (cpu_has_llsc) {
284 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
285 unsigned long temp, res;
287 __asm__ __volatile__(
291 "1: " __LL "%0, %1 # test_and_set_bit \n"
300 : "=&r" (temp), "=m" (*m), "=&r" (res)
301 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
306 volatile unsigned long *a = addr;
311 a += nr >> SZLONG_LOG;
312 mask = 1UL << (nr & SZLONG_MASK);
313 __bi_local_irq_save(flags);
314 retval = (mask & *a) != 0;
316 __bi_local_irq_restore(flags);
323 * __test_and_set_bit - Set a bit and return its old value
325 * @addr: Address to count from
327 * This operation is non-atomic and can be reordered.
328 * If two examples of this operation race, one can appear to succeed
329 * but actually fail. You must protect multiple accesses with a lock.
331 static inline int __test_and_set_bit(unsigned long nr,
332 volatile unsigned long *addr)
334 volatile unsigned long *a = addr;
338 a += nr >> SZLONG_LOG;
339 mask = 1UL << (nr & SZLONG_MASK);
340 retval = (mask & *a) != 0;
347 * test_and_clear_bit - Clear a bit and return its old value
349 * @addr: Address to count from
351 * This operation is atomic and cannot be reordered.
352 * It also implies a memory barrier.
354 static inline int test_and_clear_bit(unsigned long nr,
355 volatile unsigned long *addr)
357 if (cpu_has_llsc && R10000_LLSC_WAR) {
358 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
359 unsigned long temp, res;
361 __asm__ __volatile__(
363 "1: " __LL "%0, %1 # test_and_clear_bit \n"
373 : "=&r" (temp), "=m" (*m), "=&r" (res)
374 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
378 } else if (cpu_has_llsc) {
379 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
380 unsigned long temp, res;
382 __asm__ __volatile__(
386 "1: " __LL "%0, %1 # test_and_clear_bit \n"
396 : "=&r" (temp), "=m" (*m), "=&r" (res)
397 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
402 volatile unsigned long *a = addr;
407 a += nr >> SZLONG_LOG;
408 mask = 1UL << (nr & SZLONG_MASK);
409 __bi_local_irq_save(flags);
410 retval = (mask & *a) != 0;
412 __bi_local_irq_restore(flags);
419 * __test_and_clear_bit - Clear a bit and return its old value
421 * @addr: Address to count from
423 * This operation is non-atomic and can be reordered.
424 * If two examples of this operation race, one can appear to succeed
425 * but actually fail. You must protect multiple accesses with a lock.
427 static inline int __test_and_clear_bit(unsigned long nr,
428 volatile unsigned long * addr)
430 volatile unsigned long *a = addr;
434 a += (nr >> SZLONG_LOG);
435 mask = 1UL << (nr & SZLONG_MASK);
436 retval = ((mask & *a) != 0);
443 * test_and_change_bit - Change a bit and return its old value
445 * @addr: Address to count from
447 * This operation is atomic and cannot be reordered.
448 * It also implies a memory barrier.
450 static inline int test_and_change_bit(unsigned long nr,
451 volatile unsigned long *addr)
453 if (cpu_has_llsc && R10000_LLSC_WAR) {
454 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
455 unsigned long temp, res;
457 __asm__ __volatile__(
459 "1: " __LL "%0, %1 # test_and_change_bit \n"
468 : "=&r" (temp), "=m" (*m), "=&r" (res)
469 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
473 } else if (cpu_has_llsc) {
474 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
475 unsigned long temp, res;
477 __asm__ __volatile__(
481 "1: " __LL "%0, %1 # test_and_change_bit \n"
483 " " __SC "\t%2, %1 \n"
490 : "=&r" (temp), "=m" (*m), "=&r" (res)
491 : "r" (1UL << (nr & SZLONG_MASK)), "m" (*m)
496 volatile unsigned long *a = addr;
497 unsigned long mask, retval;
500 a += nr >> SZLONG_LOG;
501 mask = 1UL << (nr & SZLONG_MASK);
502 __bi_local_irq_save(flags);
503 retval = (mask & *a) != 0;
505 __bi_local_irq_restore(flags);
512 * __test_and_change_bit - Change a bit and return its old value
514 * @addr: Address to count from
516 * This operation is non-atomic and can be reordered.
517 * If two examples of this operation race, one can appear to succeed
518 * but actually fail. You must protect multiple accesses with a lock.
520 static inline int __test_and_change_bit(unsigned long nr,
521 volatile unsigned long *addr)
523 volatile unsigned long *a = addr;
527 a += (nr >> SZLONG_LOG);
528 mask = 1UL << (nr & SZLONG_MASK);
529 retval = ((mask & *a) != 0);
536 #undef __bi_local_irq_save
537 #undef __bi_local_irq_restore
540 * test_bit - Determine whether a bit is set
541 * @nr: bit number to test
542 * @addr: Address to start counting from
544 static inline int test_bit(unsigned long nr, const volatile unsigned long *addr)
546 return 1UL & (addr[nr >> SZLONG_LOG] >> (nr & SZLONG_MASK));
550 * ffz - find first zero in word.
551 * @word: The word to search
553 * Undefined if no zero exists, so code should check against ~0UL first.
555 static inline unsigned long ffz(unsigned long word)
561 s = 16; if (word << 16 != 0) s = 0; b += s; word >>= s;
562 s = 8; if (word << 24 != 0) s = 0; b += s; word >>= s;
563 s = 4; if (word << 28 != 0) s = 0; b += s; word >>= s;
564 s = 2; if (word << 30 != 0) s = 0; b += s; word >>= s;
565 s = 1; if (word << 31 != 0) s = 0; b += s;
568 s = 32; if (word << 32 != 0) s = 0; b += s; word >>= s;
569 s = 16; if (word << 48 != 0) s = 0; b += s; word >>= s;
570 s = 8; if (word << 56 != 0) s = 0; b += s; word >>= s;
571 s = 4; if (word << 60 != 0) s = 0; b += s; word >>= s;
572 s = 2; if (word << 62 != 0) s = 0; b += s; word >>= s;
573 s = 1; if (word << 63 != 0) s = 0; b += s;
580 * __ffs - find first bit in word.
581 * @word: The word to search
583 * Undefined if no bit exists, so code should check against 0 first.
585 static inline unsigned long __ffs(unsigned long word)
591 * fls: find last bit set.
594 #define fls(x) generic_fls(x)
597 * find_next_zero_bit - find the first zero bit in a memory region
598 * @addr: The address to base the search on
599 * @offset: The bitnumber to start searching at
600 * @size: The maximum size to search
602 static inline unsigned long find_next_zero_bit(const unsigned long *addr,
603 unsigned long size, unsigned long offset)
605 const unsigned long *p = addr + (offset >> SZLONG_LOG);
606 unsigned long result = offset & ~SZLONG_MASK;
612 offset &= SZLONG_MASK;
615 tmp |= ~0UL >> (_MIPS_SZLONG-offset);
616 if (size < _MIPS_SZLONG)
620 size -= _MIPS_SZLONG;
621 result += _MIPS_SZLONG;
623 while (size & ~SZLONG_MASK) {
626 result += _MIPS_SZLONG;
627 size -= _MIPS_SZLONG;
635 if (tmp == ~0UL) /* Are any bits zero? */
636 return result + size; /* Nope. */
638 return result + ffz(tmp);
641 #define find_first_zero_bit(addr, size) \
642 find_next_zero_bit((addr), (size), 0)
645 * find_next_bit - find the next set bit in a memory region
646 * @addr: The address to base the search on
647 * @offset: The bitnumber to start searching at
648 * @size: The maximum size to search
650 static inline unsigned long find_next_bit(const unsigned long *addr,
651 unsigned long size, unsigned long offset)
653 const unsigned long *p = addr + (offset >> SZLONG_LOG);
654 unsigned long result = offset & ~SZLONG_MASK;
660 offset &= SZLONG_MASK;
663 tmp &= ~0UL << offset;
664 if (size < _MIPS_SZLONG)
668 size -= _MIPS_SZLONG;
669 result += _MIPS_SZLONG;
671 while (size & ~SZLONG_MASK) {
674 result += _MIPS_SZLONG;
675 size -= _MIPS_SZLONG;
682 tmp &= ~0UL >> (_MIPS_SZLONG - size);
683 if (tmp == 0UL) /* Are any bits set? */
684 return result + size; /* Nope. */
686 return result + __ffs(tmp);
690 * find_first_bit - find the first set bit in a memory region
691 * @addr: The address to start the search at
692 * @size: The maximum size to search
694 * Returns the bit-number of the first set bit, not the number of the byte
697 #define find_first_bit(addr, size) \
698 find_next_bit((addr), (size), 0)
703 * Every architecture must define this function. It's the fastest
704 * way of searching a 140-bit bitmap where the first 100 bits are
705 * unlikely to be set. It's guaranteed that at least one of the 140
708 static inline int sched_find_first_bit(const unsigned long *b)
714 return __ffs(b[1]) + 32;
716 return __ffs(b[2]) + 64;
718 return __ffs(b[3]) + 96;
719 return __ffs(b[4]) + 128;
725 return __ffs(b[1]) + 64;
726 return __ffs(b[2]) + 128;
731 * ffs - find first bit set
732 * @x: the word to search
734 * This is defined the same way as
735 * the libc and compiler builtin ffs routines, therefore
736 * differs in spirit from the above ffz (man ffs).
739 #define ffs(x) generic_ffs(x)
742 * hweightN - returns the hamming weight of a N-bit word
743 * @x: the word to weigh
745 * The Hamming Weight of a number is the total number of bits set in it.
748 #define hweight64(x) generic_hweight64(x)
749 #define hweight32(x) generic_hweight32(x)
750 #define hweight16(x) generic_hweight16(x)
751 #define hweight8(x) generic_hweight8(x)
753 static inline int __test_and_set_le_bit(unsigned long nr, unsigned long *addr)
755 unsigned char *ADDR = (unsigned char *) addr;
759 mask = 1 << (nr & 0x07);
760 retval = (mask & *ADDR) != 0;
766 static inline int __test_and_clear_le_bit(unsigned long nr, unsigned long *addr)
768 unsigned char *ADDR = (unsigned char *) addr;
772 mask = 1 << (nr & 0x07);
773 retval = (mask & *ADDR) != 0;
779 static inline int test_le_bit(unsigned long nr, const unsigned long * addr)
781 const unsigned char *ADDR = (const unsigned char *) addr;
785 mask = 1 << (nr & 0x07);
787 return ((mask & *ADDR) != 0);
790 static inline unsigned long find_next_zero_le_bit(unsigned long *addr,
791 unsigned long size, unsigned long offset)
793 unsigned long *p = ((unsigned long *) addr) + (offset >> SZLONG_LOG);
794 unsigned long result = offset & ~SZLONG_MASK;
800 offset &= SZLONG_MASK;
802 tmp = cpu_to_lelongp(p++);
803 tmp |= ~0UL >> (_MIPS_SZLONG-offset); /* bug or feature ? */
804 if (size < _MIPS_SZLONG)
808 size -= _MIPS_SZLONG;
809 result += _MIPS_SZLONG;
811 while (size & ~SZLONG_MASK) {
812 if (~(tmp = cpu_to_lelongp(p++)))
814 result += _MIPS_SZLONG;
815 size -= _MIPS_SZLONG;
819 tmp = cpu_to_lelongp(p);
823 if (tmp == ~0UL) /* Are any bits zero? */
824 return result + size; /* Nope. */
827 return result + ffz(tmp);
830 #define find_first_zero_le_bit(addr, size) \
831 find_next_zero_le_bit((addr), (size), 0)
833 #define ext2_set_bit(nr,addr) \
834 __test_and_set_le_bit((nr),(unsigned long*)addr)
835 #define ext2_clear_bit(nr, addr) \
836 __test_and_clear_le_bit((nr),(unsigned long*)addr)
837 #define ext2_set_bit_atomic(lock, nr, addr) \
841 ret = ext2_set_bit((nr), (addr)); \
846 #define ext2_clear_bit_atomic(lock, nr, addr) \
850 ret = ext2_clear_bit((nr), (addr)); \
854 #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr)
855 #define ext2_find_first_zero_bit(addr, size) \
856 find_first_zero_le_bit((unsigned long*)addr, size)
857 #define ext2_find_next_zero_bit(addr, size, off) \
858 find_next_zero_le_bit((unsigned long*)addr, size, off)
861 * Bitmap functions for the minix filesystem.
863 * FIXME: These assume that Minix uses the native byte/bitorder.
864 * This limits the Minix filesystem's value for data exchange very much.
866 #define minix_test_and_set_bit(nr,addr) test_and_set_bit(nr,addr)
867 #define minix_set_bit(nr,addr) set_bit(nr,addr)
868 #define minix_test_and_clear_bit(nr,addr) test_and_clear_bit(nr,addr)
869 #define minix_test_bit(nr,addr) test_bit(nr,addr)
870 #define minix_find_first_zero_bit(addr,size) find_first_zero_bit(addr,size)
872 #endif /* __KERNEL__ */
874 #endif /* _ASM_BITOPS_H */