1 /* spinlock.h: 64-bit Sparc spinlock support.
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
6 #ifndef __SPARC64_SPINLOCK_H
7 #define __SPARC64_SPINLOCK_H
9 #include <linux/config.h>
10 #include <linux/threads.h> /* For NR_CPUS */
14 /* To get debugging spinlocks which detect and catch
15 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
16 * and rebuild your kernel.
19 /* All of these locking primitives are expected to work properly
20 * even in an RMO memory model, which currently is what the kernel
23 * There is another issue. Because we play games to save cycles
24 * in the non-contention case, we need to be extra careful about
25 * branch targets into the "spinning" code. They live in their
26 * own section, but the newer V9 branches have a shorter range
27 * than the traditional 32-bit sparc branch variants. The rule
28 * is that the branches that go into and out of the spinner sections
29 * must be pre-V9 branches.
32 #define __raw_spin_is_locked(lp) ((lp)->lock != 0)
34 #define __raw_spin_unlock_wait(lp) \
38 static inline void __raw_spin_lock(raw_spinlock_t *lock)
43 "1: ldstub [%1], %0\n"
44 " membar #StoreLoad | #StoreStore\n"
52 " ba,a,pt %%xcc, 1b\n"
59 static inline int __raw_spin_trylock(raw_spinlock_t *lock)
65 " membar #StoreLoad | #StoreStore"
70 return (result == 0UL);
73 static inline void __raw_spin_unlock(raw_spinlock_t *lock)
76 " membar #StoreStore | #LoadStore\n"
83 static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
85 unsigned long tmp1, tmp2;
88 "1: ldstub [%2], %0\n"
89 " membar #StoreLoad | #StoreStore\n"
102 : "=&r" (tmp1), "=&r" (tmp2)
103 : "r"(lock), "r"(flags)
107 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
109 static void inline __read_lock(raw_rwlock_t *lock)
111 unsigned long tmp1, tmp2;
113 __asm__ __volatile__ (
117 " cas [%2], %0, %1\n"
119 " membar #StoreLoad | #StoreStore\n"
120 " bne,pn %%icc, 1b\n"
124 " membar #LoadLoad\n"
127 " ba,a,pt %%xcc, 4b\n"
129 : "=&r" (tmp1), "=&r" (tmp2)
134 static void inline __read_unlock(raw_rwlock_t *lock)
136 unsigned long tmp1, tmp2;
138 __asm__ __volatile__(
139 " membar #StoreLoad | #LoadLoad\n"
142 " cas [%2], %0, %1\n"
144 " bne,pn %%xcc, 1b\n"
146 : "=&r" (tmp1), "=&r" (tmp2)
151 static void inline __write_lock(raw_rwlock_t *lock)
153 unsigned long mask, tmp1, tmp2;
157 __asm__ __volatile__(
161 " cas [%2], %0, %1\n"
163 " membar #StoreLoad | #StoreStore\n"
164 " bne,pn %%icc, 1b\n"
168 " membar #LoadLoad\n"
171 " ba,a,pt %%xcc, 4b\n"
173 : "=&r" (tmp1), "=&r" (tmp2)
174 : "r" (lock), "r" (mask)
178 static void inline __write_unlock(raw_rwlock_t *lock)
180 __asm__ __volatile__(
181 " membar #LoadStore | #StoreStore\n"
188 static int inline __write_trylock(raw_rwlock_t *lock)
190 unsigned long mask, tmp1, tmp2, result;
194 __asm__ __volatile__(
199 " cas [%3], %0, %1\n"
201 " membar #StoreLoad | #StoreStore\n"
202 " bne,pn %%icc, 1b\n"
206 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
207 : "r" (lock), "r" (mask)
213 #define __raw_read_lock(p) __read_lock(p)
214 #define __raw_read_unlock(p) __read_unlock(p)
215 #define __raw_write_lock(p) __write_lock(p)
216 #define __raw_write_unlock(p) __write_unlock(p)
217 #define __raw_write_trylock(p) __write_trylock(p)
219 #define __raw_read_trylock(lock) generic__raw_read_trylock(lock)
220 #define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
221 #define __raw_write_can_lock(rw) (!(rw)->lock)
223 #endif /* !(__ASSEMBLY__) */
225 #endif /* !(__SPARC64_SPINLOCK_H) */