2 * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
4 #ifndef _ASM_POWERPC_SYSTEM_H
5 #define _ASM_POWERPC_SYSTEM_H
7 #include <linux/config.h>
8 #include <linux/kernel.h>
10 #include <asm/hw_irq.h>
11 #include <asm/ppc_asm.h>
15 * The sync instruction guarantees that all memory accesses initiated
16 * by this processor have been performed (with respect to all other
17 * mechanisms that access memory). The eieio instruction is a barrier
18 * providing an ordering (separately) for (a) cacheable stores and (b)
19 * loads and stores to non-cacheable memory (e.g. I/O devices).
21 * mb() prevents loads and stores being reordered across this point.
22 * rmb() prevents loads being reordered across this point.
23 * wmb() prevents stores being reordered across this point.
24 * read_barrier_depends() prevents data-dependent loads being reordered
25 * across this point (nop on PPC).
27 * We have to use the sync instructions for mb(), since lwsync doesn't
28 * order loads with respect to previous stores. Lwsync is fine for
29 * rmb(), though. Note that lwsync is interpreted as sync by
30 * 32-bit and older 64-bit CPUs.
32 * For wmb(), we use sync since wmb is used in drivers to order
33 * stores to system memory with respect to writes to the device.
34 * However, smp_wmb() can be a lighter-weight eieio barrier on
35 * SMP since it is only used to order updates to system memory.
37 #define mb() __asm__ __volatile__ ("sync" : : : "memory")
38 #define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
39 #define wmb() __asm__ __volatile__ ("sync" : : : "memory")
40 #define read_barrier_depends() do { } while(0)
42 #define set_mb(var, value) do { var = value; mb(); } while (0)
43 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
47 #define smp_rmb() rmb()
48 #define smp_wmb() __asm__ __volatile__ ("eieio" : : : "memory")
49 #define smp_read_barrier_depends() read_barrier_depends()
51 #define smp_mb() barrier()
52 #define smp_rmb() barrier()
53 #define smp_wmb() barrier()
54 #define smp_read_barrier_depends() do { } while(0)
55 #endif /* CONFIG_SMP */
61 #ifdef CONFIG_DEBUGGER
63 extern int (*__debugger)(struct pt_regs *regs);
64 extern int (*__debugger_ipi)(struct pt_regs *regs);
65 extern int (*__debugger_bpt)(struct pt_regs *regs);
66 extern int (*__debugger_sstep)(struct pt_regs *regs);
67 extern int (*__debugger_iabr_match)(struct pt_regs *regs);
68 extern int (*__debugger_dabr_match)(struct pt_regs *regs);
69 extern int (*__debugger_fault_handler)(struct pt_regs *regs);
71 #define DEBUGGER_BOILERPLATE(__NAME) \
72 static inline int __NAME(struct pt_regs *regs) \
74 if (unlikely(__ ## __NAME)) \
75 return __ ## __NAME(regs); \
79 DEBUGGER_BOILERPLATE(debugger)
80 DEBUGGER_BOILERPLATE(debugger_ipi)
81 DEBUGGER_BOILERPLATE(debugger_bpt)
82 DEBUGGER_BOILERPLATE(debugger_sstep)
83 DEBUGGER_BOILERPLATE(debugger_iabr_match)
84 DEBUGGER_BOILERPLATE(debugger_dabr_match)
85 DEBUGGER_BOILERPLATE(debugger_fault_handler)
88 extern void xmon_init(int enable);
92 static inline int debugger(struct pt_regs *regs) { return 0; }
93 static inline int debugger_ipi(struct pt_regs *regs) { return 0; }
94 static inline int debugger_bpt(struct pt_regs *regs) { return 0; }
95 static inline int debugger_sstep(struct pt_regs *regs) { return 0; }
96 static inline int debugger_iabr_match(struct pt_regs *regs) { return 0; }
97 static inline int debugger_dabr_match(struct pt_regs *regs) { return 0; }
98 static inline int debugger_fault_handler(struct pt_regs *regs) { return 0; }
101 extern int set_dabr(unsigned long dabr);
102 extern void print_backtrace(unsigned long *);
103 extern void show_regs(struct pt_regs * regs);
104 extern void flush_instruction_cache(void);
105 extern void hard_reset_now(void);
106 extern void poweroff_now(void);
109 extern long _get_L2CR(void);
110 extern long _get_L3CR(void);
111 extern void _set_L2CR(unsigned long);
112 extern void _set_L3CR(unsigned long);
114 #define _get_L2CR() 0L
115 #define _get_L3CR() 0L
116 #define _set_L2CR(val) do { } while(0)
117 #define _set_L3CR(val) do { } while(0)
120 extern void via_cuda_init(void);
121 extern void pmac_nvram_init(void);
122 extern void read_rtc_time(void);
123 extern void pmac_find_display(void);
124 extern void giveup_fpu(struct task_struct *);
125 extern void enable_kernel_fp(void);
126 extern void flush_fp_to_thread(struct task_struct *);
127 extern void enable_kernel_altivec(void);
128 extern void giveup_altivec(struct task_struct *);
129 extern void load_up_altivec(struct task_struct *);
130 extern void giveup_spe(struct task_struct *);
131 extern void load_up_spe(struct task_struct *);
132 extern int fix_alignment(struct pt_regs *);
133 extern void cvt_fd(float *from, double *to, unsigned long *fpscr);
134 extern void cvt_df(double *from, float *to, unsigned long *fpscr);
136 #ifdef CONFIG_ALTIVEC
137 extern void flush_altivec_to_thread(struct task_struct *);
139 static inline void flush_altivec_to_thread(struct task_struct *t)
145 extern void flush_spe_to_thread(struct task_struct *);
147 static inline void flush_spe_to_thread(struct task_struct *t)
152 extern int call_rtas(const char *, int, int, unsigned long *, ...);
153 extern void cacheable_memzero(void *p, unsigned int nb);
154 extern void *cacheable_memcpy(void *, const void *, unsigned int);
155 extern int do_page_fault(struct pt_regs *, unsigned long, unsigned long);
156 extern void bad_page_fault(struct pt_regs *, unsigned long, int);
157 extern int die(const char *, struct pt_regs *, long);
158 extern void _exception(int, struct pt_regs *, int, unsigned long);
159 #ifdef CONFIG_BOOKE_WDT
160 extern u32 booke_wdt_enabled;
161 extern u32 booke_wdt_period;
162 #endif /* CONFIG_BOOKE_WDT */
164 /* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
165 extern unsigned char e2a(unsigned char);
168 extern void note_scsi_host(struct device_node *, void *);
170 extern struct task_struct *__switch_to(struct task_struct *,
171 struct task_struct *);
172 #define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
174 struct thread_struct;
175 extern struct task_struct *_switch(struct thread_struct *prev,
176 struct thread_struct *next);
178 extern unsigned int rtas_data;
183 * Changes the memory location '*ptr' to be val and returns
184 * the previous value stored there.
186 static __inline__ unsigned long
187 __xchg_u32(volatile void *p, unsigned long val)
191 __asm__ __volatile__(
193 "1: lwarx %0,0,%2 \n"
198 : "=&r" (prev), "=m" (*(volatile unsigned int *)p)
199 : "r" (p), "r" (val), "m" (*(volatile unsigned int *)p)
206 static __inline__ unsigned long
207 __xchg_u64(volatile void *p, unsigned long val)
211 __asm__ __volatile__(
213 "1: ldarx %0,0,%2 \n"
218 : "=&r" (prev), "=m" (*(volatile unsigned long *)p)
219 : "r" (p), "r" (val), "m" (*(volatile unsigned long *)p)
227 * This function doesn't exist, so you'll get a linker error
228 * if something tries to do an invalid xchg().
230 extern void __xchg_called_with_bad_pointer(void);
232 static __inline__ unsigned long
233 __xchg(volatile void *ptr, unsigned long x, unsigned int size)
237 return __xchg_u32(ptr, x);
240 return __xchg_u64(ptr, x);
243 __xchg_called_with_bad_pointer();
247 #define xchg(ptr,x) \
249 __typeof__(*(ptr)) _x_ = (x); \
250 (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
253 #define tas(ptr) (xchg((ptr),1))
256 * Compare and exchange - if *p == old, set it to new,
257 * and return the old value of *p.
259 #define __HAVE_ARCH_CMPXCHG 1
261 static __inline__ unsigned long
262 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
266 __asm__ __volatile__ (
268 "1: lwarx %0,0,%2 # __cmpxchg_u32\n\
277 : "=&r" (prev), "=m" (*p)
278 : "r" (p), "r" (old), "r" (new), "m" (*p)
285 static __inline__ unsigned long
286 __cmpxchg_u64(volatile long *p, unsigned long old, unsigned long new)
290 __asm__ __volatile__ (
292 "1: ldarx %0,0,%2 # __cmpxchg_u64\n\
300 : "=&r" (prev), "=m" (*p)
301 : "r" (p), "r" (old), "r" (new), "m" (*p)
308 /* This function doesn't exist, so you'll get a linker error
309 if something tries to do an invalid cmpxchg(). */
310 extern void __cmpxchg_called_with_bad_pointer(void);
312 static __inline__ unsigned long
313 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new,
318 return __cmpxchg_u32(ptr, old, new);
321 return __cmpxchg_u64(ptr, old, new);
324 __cmpxchg_called_with_bad_pointer();
328 #define cmpxchg(ptr,o,n) \
330 __typeof__(*(ptr)) _o_ = (o); \
331 __typeof__(*(ptr)) _n_ = (n); \
332 (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
333 (unsigned long)_n_, sizeof(*(ptr))); \
338 * We handle most unaligned accesses in hardware. On the other hand
339 * unaligned DMA can be very expensive on some ppc64 IO chips (it does
340 * powers of 2 writes until it reaches sufficient alignment).
342 * Based on this we disable the IP header alignment in network drivers.
344 #define NET_IP_ALIGN 0
347 #define arch_align_stack(x) (x)
349 #endif /* __KERNEL__ */
350 #endif /* _ASM_POWERPC_SYSTEM_H */