1 #ifndef __ASM_ARM_SYSTEM_H
2 #define __ASM_ARM_SYSTEM_H
6 #include <linux/config.h>
8 #define CPU_ARCH_UNKNOWN 0
9 #define CPU_ARCH_ARMv3 1
10 #define CPU_ARCH_ARMv4 2
11 #define CPU_ARCH_ARMv4T 3
12 #define CPU_ARCH_ARMv5 4
13 #define CPU_ARCH_ARMv5T 5
14 #define CPU_ARCH_ARMv5TE 6
15 #define CPU_ARCH_ARMv5TEJ 7
16 #define CPU_ARCH_ARMv6 8
19 * CR1 bits (CP#15 CR1)
21 #define CR_M (1 << 0) /* MMU enable */
22 #define CR_A (1 << 1) /* Alignment abort enable */
23 #define CR_C (1 << 2) /* Dcache enable */
24 #define CR_W (1 << 3) /* Write buffer enable */
25 #define CR_P (1 << 4) /* 32-bit exception handler */
26 #define CR_D (1 << 5) /* 32-bit data address range */
27 #define CR_L (1 << 6) /* Implementation defined */
28 #define CR_B (1 << 7) /* Big endian */
29 #define CR_S (1 << 8) /* System MMU protection */
30 #define CR_R (1 << 9) /* ROM MMU protection */
31 #define CR_F (1 << 10) /* Implementation defined */
32 #define CR_Z (1 << 11) /* Implementation defined */
33 #define CR_I (1 << 12) /* Icache enable */
34 #define CR_V (1 << 13) /* Vectors relocated to 0xffff0000 */
35 #define CR_RR (1 << 14) /* Round Robin cache replacement */
36 #define CR_L4 (1 << 15) /* LDR pc can set T bit */
37 #define CR_DT (1 << 16)
38 #define CR_IT (1 << 18)
39 #define CR_ST (1 << 19)
40 #define CR_FI (1 << 21) /* Fast interrupt (lower latency mode) */
41 #define CR_U (1 << 22) /* Unaligned access operation */
42 #define CR_XP (1 << 23) /* Extended page tables */
43 #define CR_VE (1 << 24) /* Vectored interrupts */
46 #define CPUID_CACHETYPE 1
48 #define CPUID_TLBTYPE 3
50 #define read_cpuid(reg) \
53 asm("mrc p15, 0, %0, c0, c0, " __stringify(reg) \
61 * This is used to ensure the compiler did actually allocate the register we
62 * asked it for some inline assembly sequences. Apparently we can't trust
63 * the compiler from one version to another so a bit of paranoia won't hurt.
64 * This string is meant to be concatenated with the inline asm string and
65 * will cause compilation to stop on mismatch.
66 * (for details, see gcc PR 15089)
68 #define __asmeq(x, y) ".ifnc " x "," y " ; .err ; .endif\n\t"
72 #include <linux/linkage.h>
77 /* information about the system we're running on */
78 extern unsigned int system_rev;
79 extern unsigned int system_serial_low;
80 extern unsigned int system_serial_high;
81 extern unsigned int mem_fclk_21285;
85 void die(const char *msg, struct pt_regs *regs, int err)
86 __attribute__((noreturn));
89 void notify_die(const char *str, struct pt_regs *regs, struct siginfo *info,
90 unsigned long err, unsigned long trap);
92 void hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int,
94 int sig, const char *name);
97 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
99 #define tas(ptr) (xchg((ptr),1))
101 extern asmlinkage void __backtrace(void);
102 extern asmlinkage void c_backtrace(unsigned long fp, int pmode);
105 extern void show_pte(struct mm_struct *mm, unsigned long addr);
106 extern void __show_regs(struct pt_regs *);
108 extern int cpu_architecture(void);
109 extern void cpu_init(void);
111 void arm_machine_restart(char mode);
112 extern void (*arm_pm_restart)(char str);
115 * Intel's XScale3 core supports some v6 features (supersections, L2)
116 * but advertises itself as v5 as it does not support the v6 ISA. For
117 * this reason, we need a way to explicitly test for this type of CPU.
119 #ifndef CONFIG_CPU_XSC3
120 #define cpu_is_xsc3() 0
122 static inline int cpu_is_xsc3(void)
124 extern unsigned int processor_id;
126 if ((processor_id & 0xffffe000) == 0x69056000)
133 #if !defined(CONFIG_CPU_XSCALE) && !defined(CONFIG_CPU_XSC3)
134 #define cpu_is_xscale() 0
136 #define cpu_is_xscale() 1
140 __asm__ __volatile__( \
141 "mcr p15, 0, %0, c1, c0, 0 @ set CR" \
146 unsigned int __val; \
147 __asm__ __volatile__( \
148 "mrc p15, 0, %0, c1, c0, 0 @ get CR" \
149 : "=r" (__val) : : "cc"); \
153 extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
154 extern unsigned long cr_alignment; /* defined in entry-armv.S */
156 #define UDBG_UNDEFINED (1 << 0)
157 #define UDBG_SYSCALL (1 << 1)
158 #define UDBG_BADABORT (1 << 2)
159 #define UDBG_SEGV (1 << 3)
160 #define UDBG_BUS (1 << 4)
162 extern unsigned int user_debug;
164 #if __LINUX_ARM_ARCH__ >= 4
165 #define vectors_high() (cr_alignment & CR_V)
167 #define vectors_high() (0)
170 #if __LINUX_ARM_ARCH__ >= 6
171 #define mb() __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
172 : : "r" (0) : "memory")
174 #define mb() __asm__ __volatile__ ("" : : : "memory")
178 #define read_barrier_depends() do { } while(0)
179 #define set_mb(var, value) do { var = value; mb(); } while (0)
180 #define set_wmb(var, value) do { var = value; wmb(); } while (0)
181 #define nop() __asm__ __volatile__("mov\tr0,r0\t@ nop\n\t");
184 * switch_mm() may do a full cache flush over the context switch,
185 * so enable interrupts over the context switch to avoid high
188 #define __ARCH_WANT_INTERRUPTS_ON_CTXSW
191 * switch_to(prev, next) should switch from task `prev' to `next'
192 * `prev' will never be the same as `next'. schedule() itself
193 * contains the memory barrier to tell GCC not to cache `current'.
195 extern struct task_struct *__switch_to(struct task_struct *, struct thread_info *, struct thread_info *);
197 #define switch_to(prev,next,last) \
199 last = __switch_to(prev,task_thread_info(prev), task_thread_info(next)); \
203 * On SMP systems, when the scheduler does migration-cost autodetection,
204 * it needs a way to flush as much of the CPU's caches as possible.
206 * TODO: fill this in!
208 static inline void sched_cacheflush(void)
213 * CPU interrupt mask handling.
215 #if __LINUX_ARM_ARCH__ >= 6
217 #define local_irq_save(x) \
219 __asm__ __volatile__( \
220 "mrs %0, cpsr @ local_irq_save\n" \
222 : "=r" (x) : : "memory", "cc"); \
225 #define local_irq_enable() __asm__("cpsie i @ __sti" : : : "memory", "cc")
226 #define local_irq_disable() __asm__("cpsid i @ __cli" : : : "memory", "cc")
227 #define local_fiq_enable() __asm__("cpsie f @ __stf" : : : "memory", "cc")
228 #define local_fiq_disable() __asm__("cpsid f @ __clf" : : : "memory", "cc")
233 * Save the current interrupt enable state & disable IRQs
235 #define local_irq_save(x) \
237 unsigned long temp; \
238 (void) (&temp == &x); \
239 __asm__ __volatile__( \
240 "mrs %0, cpsr @ local_irq_save\n" \
241 " orr %1, %0, #128\n" \
243 : "=r" (x), "=r" (temp) \
251 #define local_irq_enable() \
253 unsigned long temp; \
254 __asm__ __volatile__( \
255 "mrs %0, cpsr @ local_irq_enable\n" \
256 " bic %0, %0, #128\n" \
266 #define local_irq_disable() \
268 unsigned long temp; \
269 __asm__ __volatile__( \
270 "mrs %0, cpsr @ local_irq_disable\n" \
271 " orr %0, %0, #128\n" \
281 #define local_fiq_enable() \
283 unsigned long temp; \
284 __asm__ __volatile__( \
285 "mrs %0, cpsr @ stf\n" \
286 " bic %0, %0, #64\n" \
296 #define local_fiq_disable() \
298 unsigned long temp; \
299 __asm__ __volatile__( \
300 "mrs %0, cpsr @ clf\n" \
301 " orr %0, %0, #64\n" \
311 * Save the current interrupt enable state.
313 #define local_save_flags(x) \
315 __asm__ __volatile__( \
316 "mrs %0, cpsr @ local_save_flags" \
317 : "=r" (x) : : "memory", "cc"); \
321 * restore saved IRQ & FIQ state
323 #define local_irq_restore(x) \
324 __asm__ __volatile__( \
325 "msr cpsr_c, %0 @ local_irq_restore\n" \
330 #define irqs_disabled() \
332 unsigned long flags; \
333 local_save_flags(flags); \
334 (int)(flags & PSR_I_BIT); \
339 #define smp_mb() mb()
340 #define smp_rmb() rmb()
341 #define smp_wmb() wmb()
342 #define smp_read_barrier_depends() read_barrier_depends()
346 #define smp_mb() barrier()
347 #define smp_rmb() barrier()
348 #define smp_wmb() barrier()
349 #define smp_read_barrier_depends() do { } while(0)
351 #endif /* CONFIG_SMP */
353 #if defined(CONFIG_CPU_SA1100) || defined(CONFIG_CPU_SA110)
355 * On the StrongARM, "swp" is terminally broken since it bypasses the
356 * cache totally. This means that the cache becomes inconsistent, and,
357 * since we use normal loads/stores as well, this is really bad.
358 * Typically, this causes oopsen in filp_close, but could have other,
359 * more disasterous effects. There are two work-arounds:
360 * 1. Disable interrupts and emulate the atomic swap
361 * 2. Clean the cache, perform atomic swap, flush the cache
363 * We choose (1) since its the "easiest" to achieve here and is not
364 * dependent on the processor type.
366 * NOTE that this solution won't work on an SMP system, so explcitly
372 static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
374 extern void __bad_xchg(volatile void *, int);
379 #if __LINUX_ARM_ARCH__ >= 6
384 #if __LINUX_ARM_ARCH__ >= 6
386 asm volatile("@ __xchg1\n"
387 "1: ldrexb %0, [%3]\n"
388 " strexb %1, %2, [%3]\n"
391 : "=&r" (ret), "=&r" (tmp)
396 asm volatile("@ __xchg4\n"
397 "1: ldrex %0, [%3]\n"
398 " strex %1, %2, [%3]\n"
401 : "=&r" (ret), "=&r" (tmp)
405 #elif defined(swp_is_buggy)
407 #error SMP is not supported on this platform
410 local_irq_save(flags);
411 ret = *(volatile unsigned char *)ptr;
412 *(volatile unsigned char *)ptr = x;
413 local_irq_restore(flags);
417 local_irq_save(flags);
418 ret = *(volatile unsigned long *)ptr;
419 *(volatile unsigned long *)ptr = x;
420 local_irq_restore(flags);
424 asm volatile("@ __xchg1\n"
431 asm volatile("@ __xchg4\n"
439 __bad_xchg(ptr, size), ret = 0;
446 extern void disable_hlt(void);
447 extern void enable_hlt(void);
449 #endif /* __ASSEMBLY__ */
451 #define arch_align_stack(x) (x)
453 #endif /* __KERNEL__ */