2 * Copyright (C) 2004-2006 Atmel Corporation
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
10 * This file contains the low-level entry-points into the kernel, that is,
11 * exception handlers, debug trap handlers, interrupt handlers and the
12 * system call handler.
14 #include <linux/errno.h>
17 #include <asm/hardirq.h>
21 #include <asm/pgtable.h>
22 #include <asm/ptrace.h>
23 #include <asm/sysreg.h>
24 #include <asm/thread_info.h>
25 #include <asm/unistd.h>
28 # define preempt_stop mask_interrupts
31 # define fault_resume_kernel fault_restore_all
34 #define __MASK(x) ((1 << (x)) - 1)
35 #define IRQ_MASK ((__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT) | \
36 (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT))
38 .section .ex.text,"ax",@progbits
45 bral do_bus_error_write
47 bral do_bus_error_read
51 bral handle_address_fault
53 bral handle_protection_fault
57 bral do_illegal_opcode_ll
59 bral do_illegal_opcode_ll
61 bral do_illegal_opcode_ll
65 bral do_illegal_opcode_ll
67 bral handle_address_fault
69 bral handle_address_fault
71 bral handle_protection_fault
73 bral handle_protection_fault
79 * r1 : Offending address
80 * r2 : Scratch register
81 * r3 : Cause (5, 12 or 13)
83 #define tlbmiss_save pushm r0-r3
84 #define tlbmiss_restore popm r0-r3
86 .section .tlbx.ex.text,"ax",@progbits
92 .section .tlbr.ex.text,"ax",@progbits
97 .section .tlbw.ex.text,"ax",@progbits
101 .global tlb_miss_common
104 mfsr r1, SYSREG_TLBEAR
106 /* Is it the vmalloc space? */
108 brcs handle_vmalloc_miss
110 /* First level lookup */
112 lsr r2, r1, PGDIR_SHIFT
114 bld r0, _PAGE_BIT_PRESENT
115 brcc page_table_not_present
117 /* TODO: Check access rights on page table if necessary */
119 /* Translate to virtual address in P1. */
123 /* Second level lookup */
124 lsl r1, (32 - PGDIR_SHIFT)
125 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
128 bld r1, _PAGE_BIT_PRESENT
129 brcc page_not_present
131 /* Mark the page as accessed */
132 sbr r1, _PAGE_BIT_ACCESSED
135 /* Drop software flags */
136 andl r1, _PAGE_FLAGS_HARDWARE_MASK & 0xffff
137 mtsr SYSREG_TLBELO, r1
139 /* Figure out which entry we want to replace */
140 mfsr r0, SYSREG_TLBARLO
143 mov r1, -1 /* All entries have been accessed, */
144 mtsr SYSREG_TLBARLO, r1 /* so reset TLBAR */
145 mov r2, 0 /* and start at 0 */
146 1: mfsr r1, SYSREG_MMUCR
150 mtsr SYSREG_MMUCR, r1
158 /* Simply do the lookup in init's page table */
159 mov r0, lo(swapper_pg_dir)
160 orh r0, hi(swapper_pg_dir)
164 /* --- System Call --- */
166 .section .scall.text,"ax",@progbits
168 pushm r12 /* r12_orig */
171 mfsr r0, SYSREG_RAR_SUP
172 mfsr r1, SYSREG_RSR_SUP
175 /* check for syscall tracing */
177 ld.w r1, r0[TI_flags]
178 bld r1, TIF_SYSCALL_TRACE
179 brcs syscall_trace_enter
185 lddpc lr, syscall_table_addr
187 mov r8, r5 /* 5th argument (6th is pushed by stub) */
190 .global syscall_return
193 mask_interrupts /* make sure we don't miss an interrupt
194 setting need_resched or sigpending
195 between sampling and the rets */
197 /* Store the return value so that the correct value is loaded below */
198 stdsp sp[REG_R12], r12
200 ld.w r1, r0[TI_flags]
201 andl r1, _TIF_ALLWORK_MASK, COH
202 brne syscall_exit_work
206 mtsr SYSREG_RAR_SUP, r8
207 mtsr SYSREG_RSR_SUP, r9
209 sub sp, -4 /* r12_orig */
220 .global ret_from_fork
224 /* check for syscall tracing */
226 ld.w r1, r0[TI_flags]
227 andl r1, _TIF_ALLWORK_MASK, COH
228 brne syscall_exit_work
229 rjmp syscall_exit_cont
235 rjmp syscall_trace_cont
238 bld r1, TIF_SYSCALL_TRACE
243 ld.w r1, r0[TI_flags]
245 1: bld r1, TIF_NEED_RESCHED
250 ld.w r1, r0[TI_flags]
253 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
259 rcall do_notify_resume
261 ld.w r1, r0[TI_flags]
264 3: bld r1, TIF_BREAKPOINT
265 brcc syscall_exit_cont
266 mfsr r3, SYSREG_TLBEHI
272 mtdr DBGREG_BWA2A, r2
273 mtdr DBGREG_BWC2A, r3
274 rjmp syscall_exit_cont
277 /* The slow path of the TLB miss handler */
278 page_table_not_present:
283 rcall save_full_context_ex
287 rjmp ret_from_exception
289 /* This function expects to find offending PC in SYSREG_RAR_EX */
290 save_full_context_ex:
291 mfsr r8, SYSREG_RSR_EX
293 andh r8, (MODE_MASK >> 16), COH
294 mfsr r11, SYSREG_RAR_EX
297 1: pushm r11, r12 /* PC and SR */
301 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
302 stdsp sp[4], r10 /* replace saved SP */
305 /* Low-level exception handlers */
309 rcall save_full_context_ex
312 rcall do_critical_exception
314 /* We should never get here... */
316 sub r12, pc, (. - 1f)
319 1: .asciz "Return from critical exception!"
325 rcall save_full_context_ex
332 rcall save_full_context_ex
334 1: mfsr r12, SYSREG_BEAR
337 rjmp ret_from_exception
343 mfsr r9, SYSREG_RSR_NMI
344 mfsr r8, SYSREG_RAR_NMI
345 bfextu r0, r9, MODE_SHIFT, 3
348 1: pushm r8, r9 /* PC and SR */
353 mtsr SYSREG_RAR_NMI, r8
355 mtsr SYSREG_RSR_NMI, r9
359 sub sp, -4 /* skip r12_orig */
362 2: sub r10, sp, -(FRAME_SIZE_FULL - REG_LR)
363 stdsp sp[4], r10 /* replace saved SP */
367 sub sp, -4 /* skip sp */
369 sub sp, -4 /* skip r12_orig */
372 handle_address_fault:
375 rcall save_full_context_ex
378 rcall do_address_exception
379 rjmp ret_from_exception
381 handle_protection_fault:
384 rcall save_full_context_ex
388 rjmp ret_from_exception
391 do_illegal_opcode_ll:
394 rcall save_full_context_ex
397 rcall do_illegal_opcode
398 rjmp ret_from_exception
402 mfsr r1, SYSREG_TLBEAR
404 lsr r2, r1, PGDIR_SHIFT
406 lsl r1, (32 - PGDIR_SHIFT)
407 lsr r1, (32 - PGDIR_SHIFT) + PAGE_SHIFT
409 /* Translate to virtual address in P1 */
414 sbr r3, _PAGE_BIT_DIRTY
418 /* The page table is up-to-date. Update the TLB entry as well */
419 andl r0, lo(_PAGE_FLAGS_HARDWARE_MASK)
420 mtsr SYSREG_TLBELO, r0
422 /* MMUCR[DRP] is updated automatically, so let's go... */
431 rcall save_full_context_ex
436 rjmp ret_from_exception
441 andh r4, (MODE_MASK >> 16), COH
442 brne fault_resume_kernel
445 ld.w r1, r0[TI_flags]
446 andl r1, _TIF_WORK_MASK, COH
452 mtsr SYSREG_RAR_EX, r8
453 mtsr SYSREG_RSR_EX, r9
459 #ifdef CONFIG_PREEMPT
461 ld.w r2, r0[TI_preempt_count]
464 ld.w r1, r0[TI_flags]
465 bld r1, TIF_NEED_RESCHED
468 bld r4, SYSREG_GM_OFFSET
470 rcall preempt_schedule_irq
477 mtsr SYSREG_RAR_EX, r8
478 mtsr SYSREG_RSR_EX, r9
480 sub sp, -4 /* ignore SP */
482 sub sp, -4 /* ignore r12_orig */
486 /* Switch to exception mode so that we can share the same code. */
488 cbr r8, SYSREG_M0_OFFSET
489 orh r8, hi(SYSREG_BIT(M1) | SYSREG_BIT(M2))
493 ld.w r1, r0[TI_flags]
496 bld r1, TIF_NEED_RESCHED
501 ld.w r1, r0[TI_flags]
504 1: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
510 rcall do_notify_resume
512 ld.w r1, r0[TI_flags]
515 2: bld r1, TIF_BREAKPOINT
516 brcc fault_resume_user
517 mfsr r3, SYSREG_TLBEHI
523 mtdr DBGREG_BWA2A, r2
524 mtdr DBGREG_BWC2A, r3
525 rjmp fault_resume_user
527 /* If we get a debug trap from privileged context we end up here */
529 /* Fix up LR and SP in regs. r11 contains the mode we came from */
532 andh r8, hi(~MODE_MASK)
539 sub r10, sp, -FRAME_SIZE_FULL
540 stdsp sp[REG_SP], r10
544 /* Now, put everything back */
547 mtsr SYSREG_RAR_DBG, r10
548 mtsr SYSREG_RSR_DBG, r11
551 andh r8, hi(~MODE_MASK)
552 andh r11, hi(MODE_MASK)
559 sub sp, -4 /* skip SP */
565 * At this point, everything is masked, that is, interrupts,
566 * exceptions and debugging traps. We might get called from
567 * interrupt or exception context in some rare cases, but this
568 * will be taken care of by do_debug(), so we're not going to
569 * do a 100% correct context save here.
572 sub sp, 4 /* r12_orig */
574 mfsr r10, SYSREG_RAR_DBG
575 mfsr r11, SYSREG_RSR_DBG
578 andh r11, (MODE_MASK >> 16), COH
579 brne handle_debug_priv
584 lddsp r10, sp[REG_SR]
585 andh r10, (MODE_MASK >> 16), COH
586 breq debug_resume_user
591 mtsr SYSREG_RSR_DBG, r11
592 mtsr SYSREG_RAR_DBG, r10
601 ld.w r1, r0[TI_flags]
602 andl r1, _TIF_DBGWORK_MASK, COH
603 breq debug_restore_all
605 1: bld r1, TIF_NEED_RESCHED
610 ld.w r1, r0[TI_flags]
613 2: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK
619 rcall do_notify_resume
621 ld.w r1, r0[TI_flags]
624 3: bld r1, TIF_SINGLE_STEP
625 brcc debug_restore_all
629 rjmp debug_restore_all
631 .set rsr_int0, SYSREG_RSR_INT0
632 .set rsr_int1, SYSREG_RSR_INT1
633 .set rsr_int2, SYSREG_RSR_INT2
634 .set rsr_int3, SYSREG_RSR_INT3
635 .set rar_int0, SYSREG_RAR_INT0
636 .set rar_int1, SYSREG_RAR_INT1
637 .set rar_int2, SYSREG_RAR_INT2
638 .set rar_int3, SYSREG_RAR_INT3
640 .macro IRQ_LEVEL level
641 .type irq_level\level, @function
643 sub sp, 4 /* r12_orig */
645 mfsr r8, rar_int\level
646 mfsr r9, rsr_int\level
655 bfextu r4, r4, SYSREG_M0_OFFSET, 3
656 cp.w r4, MODE_SUPERVISOR >> SYSREG_M0_OFFSET
658 cp.w r4, MODE_USER >> SYSREG_M0_OFFSET
659 #ifdef CONFIG_PREEMPT
666 ld.w r1, r0[TI_flags]
667 andl r1, _TIF_WORK_MASK, COH
671 mtsr rar_int\level, r8
672 mtsr rsr_int\level, r9
674 sub sp, -4 /* ignore r12_orig */
677 2: get_thread_info r0
678 ld.w r1, r0[TI_flags]
679 bld r1, TIF_CPU_GOING_TO_SLEEP
680 #ifdef CONFIG_PREEMPT
685 sub r1, pc, . - cpu_idle_skip_sleep
687 #ifdef CONFIG_PREEMPT
688 3: get_thread_info r0
689 ld.w r2, r0[TI_preempt_count]
692 ld.w r1, r0[TI_flags]
693 bld r1, TIF_NEED_RESCHED
696 bld r4, SYSREG_GM_OFFSET
698 rcall preempt_schedule_irq
703 .section .irq.text,"ax",@progbits
705 .global cpu_idle_sleep
709 ld.w r9, r8[TI_flags]
710 bld r9, TIF_NEED_RESCHED
711 brcs cpu_idle_enable_int_and_exit
712 sbr r9, TIF_CPU_GOING_TO_SLEEP
713 st.w r8[TI_flags], r9
718 ld.w r9, r8[TI_flags]
719 cbr r9, TIF_CPU_GOING_TO_SLEEP
720 st.w r8[TI_flags], r9
721 cpu_idle_enable_int_and_exit: