2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12 * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki
14 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/spinlock.h>
21 #include <linux/kallsyms.h>
22 #include <linux/bootmem.h>
23 #include <linux/interrupt.h>
25 #include <asm/bootinfo.h>
26 #include <asm/branch.h>
27 #include <asm/break.h>
31 #include <asm/mipsregs.h>
32 #include <asm/mipsmtregs.h>
33 #include <asm/module.h>
34 #include <asm/pgtable.h>
35 #include <asm/ptrace.h>
36 #include <asm/sections.h>
37 #include <asm/system.h>
38 #include <asm/tlbdebug.h>
39 #include <asm/traps.h>
40 #include <asm/uaccess.h>
41 #include <asm/mmu_context.h>
42 #include <asm/watch.h>
43 #include <asm/types.h>
45 extern asmlinkage void handle_int(void);
46 extern asmlinkage void handle_tlbm(void);
47 extern asmlinkage void handle_tlbl(void);
48 extern asmlinkage void handle_tlbs(void);
49 extern asmlinkage void handle_adel(void);
50 extern asmlinkage void handle_ades(void);
51 extern asmlinkage void handle_ibe(void);
52 extern asmlinkage void handle_dbe(void);
53 extern asmlinkage void handle_sys(void);
54 extern asmlinkage void handle_bp(void);
55 extern asmlinkage void handle_ri(void);
56 extern asmlinkage void handle_cpu(void);
57 extern asmlinkage void handle_ov(void);
58 extern asmlinkage void handle_tr(void);
59 extern asmlinkage void handle_fpe(void);
60 extern asmlinkage void handle_mdmx(void);
61 extern asmlinkage void handle_watch(void);
62 extern asmlinkage void handle_mt(void);
63 extern asmlinkage void handle_dsp(void);
64 extern asmlinkage void handle_mcheck(void);
65 extern asmlinkage void handle_reserved(void);
67 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
68 struct mips_fpu_struct *ctx);
70 void (*board_be_init)(void);
71 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
72 void (*board_nmi_handler_setup)(void);
73 void (*board_ejtag_handler_setup)(void);
74 void (*board_bind_eic_interrupt)(int irq, int regset);
77 static void show_raw_backtrace(unsigned long *sp)
81 printk("Call Trace:");
82 #ifdef CONFIG_KALLSYMS
85 while (!kstack_end(sp)) {
87 if (__kernel_text_address(addr))
93 #ifdef CONFIG_KALLSYMS
94 static int raw_show_trace;
95 static int __init set_raw_show_trace(char *str)
100 __setup("raw_show_trace", set_raw_show_trace);
102 extern unsigned long unwind_stack(struct task_struct *task,
103 unsigned long **sp, unsigned long pc);
104 static void show_backtrace(struct task_struct *task, struct pt_regs *regs)
106 unsigned long *sp = (long *)regs->regs[29];
107 unsigned long pc = regs->cp0_epc;
110 if (raw_show_trace || !__kernel_text_address(pc)) {
111 show_raw_backtrace(sp);
114 printk("Call Trace:\n");
115 while (__kernel_text_address(pc)) {
117 pc = unwind_stack(task, &sp, pc);
119 pc = regs->regs[31]; /* leaf? */
125 #define show_backtrace(task, r) show_raw_backtrace((long *)(r)->regs[29]);
129 * This routine abuses get_user()/put_user() to reference pointers
130 * with at least a bit of error checking ...
132 static void show_stacktrace(struct task_struct *task, struct pt_regs *regs)
134 const int field = 2 * sizeof(unsigned long);
137 unsigned long *sp = (unsigned long *)regs->regs[29];
141 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
142 if (i && ((i % (64 / field)) == 0))
149 if (__get_user(stackdata, sp++)) {
150 printk(" (Bad stack address)");
154 printk(" %0*lx", field, stackdata);
158 show_backtrace(task, regs);
161 static __always_inline void prepare_frametrace(struct pt_regs *regs)
163 __asm__ __volatile__(
174 : "=m" (regs->cp0_epc),
175 "=m" (regs->regs[29]), "=m" (regs->regs[31])
179 void show_stack(struct task_struct *task, unsigned long *sp)
183 regs.regs[29] = (unsigned long)sp;
187 if (task && task != current) {
188 regs.regs[29] = task->thread.reg29;
190 regs.cp0_epc = task->thread.reg31;
192 prepare_frametrace(®s);
195 show_stacktrace(task, ®s);
199 * The architecture-independent dump_stack generator
201 void dump_stack(void)
206 * Remove any garbage that may be in regs (specially func
207 * addresses) to avoid show_raw_backtrace() to report them
209 memset(®s, 0, sizeof(regs));
210 prepare_frametrace(®s);
211 show_backtrace(current, ®s);
214 EXPORT_SYMBOL(dump_stack);
216 void show_code(unsigned int *pc)
222 for(i = -3 ; i < 6 ; i++) {
224 if (__get_user(insn, pc + i)) {
225 printk(" (Bad address in epc)\n");
228 printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
232 void show_regs(struct pt_regs *regs)
234 const int field = 2 * sizeof(unsigned long);
235 unsigned int cause = regs->cp0_cause;
238 printk("Cpu %d\n", smp_processor_id());
241 * Saved main processor registers
243 for (i = 0; i < 32; ) {
247 printk(" %0*lx", field, 0UL);
248 else if (i == 26 || i == 27)
249 printk(" %*s", field, "");
251 printk(" %0*lx", field, regs->regs[i]);
258 printk("Hi : %0*lx\n", field, regs->hi);
259 printk("Lo : %0*lx\n", field, regs->lo);
262 * Saved cp0 registers
264 printk("epc : %0*lx ", field, regs->cp0_epc);
265 print_symbol("%s ", regs->cp0_epc);
266 printk(" %s\n", print_tainted());
267 printk("ra : %0*lx ", field, regs->regs[31]);
268 print_symbol("%s\n", regs->regs[31]);
270 printk("Status: %08x ", (uint32_t) regs->cp0_status);
272 if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
273 if (regs->cp0_status & ST0_KUO)
275 if (regs->cp0_status & ST0_IEO)
277 if (regs->cp0_status & ST0_KUP)
279 if (regs->cp0_status & ST0_IEP)
281 if (regs->cp0_status & ST0_KUC)
283 if (regs->cp0_status & ST0_IEC)
286 if (regs->cp0_status & ST0_KX)
288 if (regs->cp0_status & ST0_SX)
290 if (regs->cp0_status & ST0_UX)
292 switch (regs->cp0_status & ST0_KSU) {
297 printk("SUPERVISOR ");
306 if (regs->cp0_status & ST0_ERL)
308 if (regs->cp0_status & ST0_EXL)
310 if (regs->cp0_status & ST0_IE)
315 printk("Cause : %08x\n", cause);
317 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
318 if (1 <= cause && cause <= 5)
319 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
321 printk("PrId : %08x\n", read_c0_prid());
324 void show_registers(struct pt_regs *regs)
328 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
329 current->comm, current->pid, current_thread_info(), current);
330 show_stacktrace(current, regs);
331 show_code((unsigned int *) regs->cp0_epc);
335 static DEFINE_SPINLOCK(die_lock);
337 NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
339 static int die_counter;
340 #ifdef CONFIG_MIPS_MT_SMTC
341 unsigned long dvpret = dvpe();
342 #endif /* CONFIG_MIPS_MT_SMTC */
345 spin_lock_irq(&die_lock);
347 #ifdef CONFIG_MIPS_MT_SMTC
348 mips_mt_regdump(dvpret);
349 #endif /* CONFIG_MIPS_MT_SMTC */
350 printk("%s[#%d]:\n", str, ++die_counter);
351 show_registers(regs);
352 spin_unlock_irq(&die_lock);
355 panic("Fatal exception in interrupt");
358 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
360 panic("Fatal exception");
366 extern const struct exception_table_entry __start___dbe_table[];
367 extern const struct exception_table_entry __stop___dbe_table[];
369 void __declare_dbe_table(void)
371 __asm__ __volatile__(
372 ".section\t__dbe_table,\"a\"\n\t"
377 /* Given an address, look for it in the exception tables. */
378 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
380 const struct exception_table_entry *e;
382 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
384 e = search_module_dbetables(addr);
388 asmlinkage void do_be(struct pt_regs *regs)
390 const int field = 2 * sizeof(unsigned long);
391 const struct exception_table_entry *fixup = NULL;
392 int data = regs->cp0_cause & 4;
393 int action = MIPS_BE_FATAL;
395 /* XXX For now. Fixme, this searches the wrong table ... */
396 if (data && !user_mode(regs))
397 fixup = search_dbe_tables(exception_epc(regs));
400 action = MIPS_BE_FIXUP;
402 if (board_be_handler)
403 action = board_be_handler(regs, fixup != 0);
406 case MIPS_BE_DISCARD:
410 regs->cp0_epc = fixup->nextinsn;
419 * Assume it would be too dangerous to continue ...
421 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
422 data ? "Data" : "Instruction",
423 field, regs->cp0_epc, field, regs->regs[31]);
424 die_if_kernel("Oops", regs);
425 force_sig(SIGBUS, current);
428 static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
430 unsigned int __user *epc;
432 epc = (unsigned int __user *) regs->cp0_epc +
433 ((regs->cp0_cause & CAUSEF_BD) != 0);
434 if (!get_user(*opcode, epc))
437 force_sig(SIGSEGV, current);
445 #define OPCODE 0xfc000000
446 #define BASE 0x03e00000
447 #define RT 0x001f0000
448 #define OFFSET 0x0000ffff
449 #define LL 0xc0000000
450 #define SC 0xe0000000
451 #define SPEC3 0x7c000000
452 #define RD 0x0000f800
453 #define FUNC 0x0000003f
454 #define RDHWR 0x0000003b
457 * The ll_bit is cleared by r*_switch.S
460 unsigned long ll_bit;
462 static struct task_struct *ll_task = NULL;
464 static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
466 unsigned long value, __user *vaddr;
471 * analyse the ll instruction that just caused a ri exception
472 * and put the referenced address to addr.
475 /* sign extend offset */
476 offset = opcode & OFFSET;
480 vaddr = (unsigned long __user *)
481 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
483 if ((unsigned long)vaddr & 3) {
487 if (get_user(value, vaddr)) {
494 if (ll_task == NULL || ll_task == current) {
503 compute_return_epc(regs);
505 regs->regs[(opcode & RT) >> 16] = value;
510 force_sig(signal, current);
513 static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
515 unsigned long __user *vaddr;
521 * analyse the sc instruction that just caused a ri exception
522 * and put the referenced address to addr.
525 /* sign extend offset */
526 offset = opcode & OFFSET;
530 vaddr = (unsigned long __user *)
531 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
532 reg = (opcode & RT) >> 16;
534 if ((unsigned long)vaddr & 3) {
541 if (ll_bit == 0 || ll_task != current) {
542 compute_return_epc(regs);
550 if (put_user(regs->regs[reg], vaddr)) {
555 compute_return_epc(regs);
561 force_sig(signal, current);
565 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
566 * opcodes are supposed to result in coprocessor unusable exceptions if
567 * executed on ll/sc-less processors. That's the theory. In practice a
568 * few processors such as NEC's VR4100 throw reserved instruction exceptions
569 * instead, so we're doing the emulation thing in both exception handlers.
571 static inline int simulate_llsc(struct pt_regs *regs)
575 if (unlikely(get_insn_opcode(regs, &opcode)))
578 if ((opcode & OPCODE) == LL) {
579 simulate_ll(regs, opcode);
582 if ((opcode & OPCODE) == SC) {
583 simulate_sc(regs, opcode);
587 return -EFAULT; /* Strange things going on ... */
591 * Simulate trapping 'rdhwr' instructions to provide user accessible
592 * registers not implemented in hardware. The only current use of this
593 * is the thread area pointer.
595 static inline int simulate_rdhwr(struct pt_regs *regs)
597 struct thread_info *ti = task_thread_info(current);
600 if (unlikely(get_insn_opcode(regs, &opcode)))
603 if (unlikely(compute_return_epc(regs)))
606 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
607 int rd = (opcode & RD) >> 11;
608 int rt = (opcode & RT) >> 16;
611 regs->regs[rt] = ti->tp_value;
622 asmlinkage void do_ov(struct pt_regs *regs)
626 die_if_kernel("Integer overflow", regs);
628 info.si_code = FPE_INTOVF;
629 info.si_signo = SIGFPE;
631 info.si_addr = (void __user *) regs->cp0_epc;
632 force_sig_info(SIGFPE, &info, current);
636 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
638 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
640 die_if_kernel("FP exception in kernel code", regs);
642 if (fcr31 & FPU_CSR_UNI_X) {
647 #ifdef CONFIG_PREEMPT
648 if (!is_fpu_owner()) {
649 /* We might lose fpu before disabling preempt... */
651 BUG_ON(!used_math());
656 * Unimplemented operation exception. If we've got the full
657 * software emulator on-board, let's use it...
659 * Force FPU to dump state into task/thread context. We're
660 * moving a lot of data here for what is probably a single
661 * instruction, but the alternative is to pre-decode the FP
662 * register operands before invoking the emulator, which seems
663 * a bit extreme for what should be an infrequent event.
666 /* Ensure 'resume' not overwrite saved fp context again. */
671 /* Run the emulator */
672 sig = fpu_emulator_cop1Handler (regs, ¤t->thread.fpu);
676 own_fpu(); /* Using the FPU again. */
678 * We can't allow the emulated instruction to leave any of
679 * the cause bit set in $fcr31.
681 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
683 /* Restore the hardware register state */
688 /* If something went wrong, signal */
690 force_sig(sig, current);
695 force_sig(SIGFPE, current);
698 asmlinkage void do_bp(struct pt_regs *regs)
700 unsigned int opcode, bcode;
703 die_if_kernel("Break instruction in kernel code", regs);
705 if (get_insn_opcode(regs, &opcode))
709 * There is the ancient bug in the MIPS assemblers that the break
710 * code starts left to bit 16 instead to bit 6 in the opcode.
711 * Gas is bug-compatible, but not always, grrr...
712 * We handle both cases with a simple heuristics. --macro
714 bcode = ((opcode >> 6) & ((1 << 20) - 1));
715 if (bcode < (1 << 10))
719 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
720 * insns, even for break codes that indicate arithmetic failures.
722 * But should we continue the brokenness??? --macro
725 case BRK_OVERFLOW << 10:
726 case BRK_DIVZERO << 10:
727 if (bcode == (BRK_DIVZERO << 10))
728 info.si_code = FPE_INTDIV;
730 info.si_code = FPE_INTOVF;
731 info.si_signo = SIGFPE;
733 info.si_addr = (void __user *) regs->cp0_epc;
734 force_sig_info(SIGFPE, &info, current);
737 force_sig(SIGTRAP, current);
741 asmlinkage void do_tr(struct pt_regs *regs)
743 unsigned int opcode, tcode = 0;
746 die_if_kernel("Trap instruction in kernel code", regs);
748 if (get_insn_opcode(regs, &opcode))
751 /* Immediate versions don't provide a code. */
752 if (!(opcode & OPCODE))
753 tcode = ((opcode >> 6) & ((1 << 10) - 1));
756 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
757 * insns, even for trap codes that indicate arithmetic failures.
759 * But should we continue the brokenness??? --macro
764 if (tcode == BRK_DIVZERO)
765 info.si_code = FPE_INTDIV;
767 info.si_code = FPE_INTOVF;
768 info.si_signo = SIGFPE;
770 info.si_addr = (void __user *) regs->cp0_epc;
771 force_sig_info(SIGFPE, &info, current);
774 force_sig(SIGTRAP, current);
778 asmlinkage void do_ri(struct pt_regs *regs)
780 die_if_kernel("Reserved instruction in kernel code", regs);
783 if (!simulate_llsc(regs))
786 if (!simulate_rdhwr(regs))
789 force_sig(SIGILL, current);
792 asmlinkage void do_cpu(struct pt_regs *regs)
796 die_if_kernel("do_cpu invoked from kernel context!", regs);
798 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
803 if (!simulate_llsc(regs))
806 if (!simulate_rdhwr(regs))
815 if (used_math()) { /* Using the FPU again. */
817 } else { /* First time FPU user. */
825 int sig = fpu_emulator_cop1Handler(regs,
826 ¤t->thread.fpu);
828 force_sig(sig, current);
829 #ifdef CONFIG_MIPS_MT_FPAFF
832 * MIPS MT processors may have fewer FPU contexts
833 * than CPU threads. If we've emulated more than
834 * some threshold number of instructions, force
835 * migration to a "CPU" that has FP support.
837 if(mt_fpemul_threshold > 0
838 && ((current->thread.emulated_fp++
839 > mt_fpemul_threshold))) {
841 * If there's no FPU present, or if the
842 * application has already restricted
843 * the allowed set to exclude any CPUs
844 * with FPUs, we'll skip the procedure.
846 if (cpus_intersects(current->cpus_allowed,
851 current->thread.user_cpus_allowed,
853 set_cpus_allowed(current, tmask);
854 current->thread.mflags |= MF_FPUBOUND;
858 #endif /* CONFIG_MIPS_MT_FPAFF */
865 die_if_kernel("do_cpu invoked from kernel context!", regs);
869 force_sig(SIGILL, current);
872 asmlinkage void do_mdmx(struct pt_regs *regs)
874 force_sig(SIGILL, current);
877 asmlinkage void do_watch(struct pt_regs *regs)
880 * We use the watch exception where available to detect stack
885 panic("Caught WATCH exception - probably caused by stack overflow.");
888 asmlinkage void do_mcheck(struct pt_regs *regs)
890 const int field = 2 * sizeof(unsigned long);
891 int multi_match = regs->cp0_status & ST0_TS;
896 printk("Index : %0x\n", read_c0_index());
897 printk("Pagemask: %0x\n", read_c0_pagemask());
898 printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
899 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
900 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
905 show_code((unsigned int *) regs->cp0_epc);
908 * Some chips may have other causes of machine check (e.g. SB1
911 panic("Caught Machine Check exception - %scaused by multiple "
912 "matching entries in the TLB.",
913 (multi_match) ? "" : "not ");
916 asmlinkage void do_mt(struct pt_regs *regs)
920 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
921 >> VPECONTROL_EXCPT_SHIFT;
924 printk(KERN_DEBUG "Thread Underflow\n");
927 printk(KERN_DEBUG "Thread Overflow\n");
930 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
933 printk(KERN_DEBUG "Gating Storage Exception\n");
936 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
939 printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
942 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
946 die_if_kernel("MIPS MT Thread exception in kernel", regs);
948 force_sig(SIGILL, current);
952 asmlinkage void do_dsp(struct pt_regs *regs)
955 panic("Unexpected DSP exception\n");
957 force_sig(SIGILL, current);
960 asmlinkage void do_reserved(struct pt_regs *regs)
963 * Game over - no way to handle this if it ever occurs. Most probably
964 * caused by a new unknown cpu type or after another deadly
965 * hard/software error.
968 panic("Caught reserved exception %ld - should not happen.",
969 (regs->cp0_cause & 0x7f) >> 2);
972 asmlinkage void do_default_vi(struct pt_regs *regs)
975 panic("Caught unexpected vectored interrupt.");
979 * Some MIPS CPUs can enable/disable for cache parity detection, but do
982 static inline void parity_protection_init(void)
984 switch (current_cpu_data.cputype) {
988 write_c0_ecc(0x80000000);
989 back_to_back_c0_hazard();
990 /* Set the PE bit (bit 31) in the c0_errctl register. */
991 printk(KERN_INFO "Cache parity protection %sabled\n",
992 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
996 /* Clear the DE bit (bit 16) in the c0_status register. */
997 printk(KERN_INFO "Enable cache parity protection for "
998 "MIPS 20KC/25KF CPUs.\n");
999 clear_c0_status(ST0_DE);
1006 asmlinkage void cache_parity_error(void)
1008 const int field = 2 * sizeof(unsigned long);
1009 unsigned int reg_val;
1011 /* For the moment, report the problem and hang. */
1012 printk("Cache error exception:\n");
1013 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1014 reg_val = read_c0_cacheerr();
1015 printk("c0_cacheerr == %08x\n", reg_val);
1017 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1018 reg_val & (1<<30) ? "secondary" : "primary",
1019 reg_val & (1<<31) ? "data" : "insn");
1020 printk("Error bits: %s%s%s%s%s%s%s\n",
1021 reg_val & (1<<29) ? "ED " : "",
1022 reg_val & (1<<28) ? "ET " : "",
1023 reg_val & (1<<26) ? "EE " : "",
1024 reg_val & (1<<25) ? "EB " : "",
1025 reg_val & (1<<24) ? "EI " : "",
1026 reg_val & (1<<23) ? "E1 " : "",
1027 reg_val & (1<<22) ? "E0 " : "");
1028 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1030 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1031 if (reg_val & (1<<22))
1032 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1034 if (reg_val & (1<<23))
1035 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1038 panic("Can't handle the cache error!");
1042 * SDBBP EJTAG debug exception handler.
1043 * We skip the instruction and return to the next instruction.
1045 void ejtag_exception_handler(struct pt_regs *regs)
1047 const int field = 2 * sizeof(unsigned long);
1048 unsigned long depc, old_epc;
1051 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1052 depc = read_c0_depc();
1053 debug = read_c0_debug();
1054 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1055 if (debug & 0x80000000) {
1057 * In branch delay slot.
1058 * We cheat a little bit here and use EPC to calculate the
1059 * debug return address (DEPC). EPC is restored after the
1062 old_epc = regs->cp0_epc;
1063 regs->cp0_epc = depc;
1064 __compute_return_epc(regs);
1065 depc = regs->cp0_epc;
1066 regs->cp0_epc = old_epc;
1069 write_c0_depc(depc);
1072 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1073 write_c0_debug(debug | 0x100);
1078 * NMI exception handler.
1080 void nmi_exception_handler(struct pt_regs *regs)
1082 #ifdef CONFIG_MIPS_MT_SMTC
1083 unsigned long dvpret = dvpe();
1085 printk("NMI taken!!!!\n");
1086 mips_mt_regdump(dvpret);
1089 printk("NMI taken!!!!\n");
1090 #endif /* CONFIG_MIPS_MT_SMTC */
1095 #define VECTORSPACING 0x100 /* for EI/VI mode */
1097 unsigned long ebase;
1098 unsigned long exception_handlers[32];
1099 unsigned long vi_handlers[64];
1102 * As a side effect of the way this is implemented we're limited
1103 * to interrupt handlers in the address range from
1104 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
1106 void *set_except_vector(int n, void *addr)
1108 unsigned long handler = (unsigned long) addr;
1109 unsigned long old_handler = exception_handlers[n];
1111 exception_handlers[n] = handler;
1112 if (n == 0 && cpu_has_divec) {
1113 *(volatile u32 *)(ebase + 0x200) = 0x08000000 |
1114 (0x03ffffff & (handler >> 2));
1115 flush_icache_range(ebase + 0x200, ebase + 0x204);
1117 return (void *)old_handler;
1120 #ifdef CONFIG_CPU_MIPSR2_SRS
1122 * MIPSR2 shadow register set allocation
1126 static struct shadow_registers {
1128 * Number of shadow register sets supported
1130 unsigned long sr_supported;
1132 * Bitmap of allocated shadow registers
1134 unsigned long sr_allocated;
1137 static void mips_srs_init(void)
1139 shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
1140 printk(KERN_INFO "%d MIPSR2 register sets available\n",
1141 shadow_registers.sr_supported);
1142 shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
1145 int mips_srs_max(void)
1147 return shadow_registers.sr_supported;
1150 int mips_srs_alloc(void)
1152 struct shadow_registers *sr = &shadow_registers;
1156 set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported);
1157 if (set >= sr->sr_supported)
1160 if (test_and_set_bit(set, &sr->sr_allocated))
1166 void mips_srs_free(int set)
1168 struct shadow_registers *sr = &shadow_registers;
1170 clear_bit(set, &sr->sr_allocated);
1173 static void *set_vi_srs_handler(int n, void *addr, int srs)
1175 unsigned long handler;
1176 unsigned long old_handler = vi_handlers[n];
1180 if (!cpu_has_veic && !cpu_has_vint)
1184 handler = (unsigned long) do_default_vi;
1187 handler = (unsigned long) addr;
1188 vi_handlers[n] = (unsigned long) addr;
1190 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1192 if (srs >= mips_srs_max())
1193 panic("Shadow register set %d not supported", srs);
1196 if (board_bind_eic_interrupt)
1197 board_bind_eic_interrupt (n, srs);
1198 } else if (cpu_has_vint) {
1199 /* SRSMap is only defined if shadow sets are implemented */
1200 if (mips_srs_max() > 1)
1201 change_c0_srsmap (0xf << n*4, srs << n*4);
1206 * If no shadow set is selected then use the default handler
1207 * that does normal register saving and a standard interrupt exit
1210 extern char except_vec_vi, except_vec_vi_lui;
1211 extern char except_vec_vi_ori, except_vec_vi_end;
1212 #ifdef CONFIG_MIPS_MT_SMTC
1214 * We need to provide the SMTC vectored interrupt handler
1215 * not only with the address of the handler, but with the
1216 * Status.IM bit to be masked before going there.
1218 extern char except_vec_vi_mori;
1219 const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
1220 #endif /* CONFIG_MIPS_MT_SMTC */
1221 const int handler_len = &except_vec_vi_end - &except_vec_vi;
1222 const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
1223 const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
1225 if (handler_len > VECTORSPACING) {
1227 * Sigh... panicing won't help as the console
1228 * is probably not configured :(
1230 panic ("VECTORSPACING too small");
1233 memcpy (b, &except_vec_vi, handler_len);
1234 #ifdef CONFIG_MIPS_MT_SMTC
1236 printk("Vector index %d exceeds SMTC maximum\n", n);
1237 w = (u32 *)(b + mori_offset);
1238 *w = (*w & 0xffff0000) | (0x100 << n);
1239 #endif /* CONFIG_MIPS_MT_SMTC */
1240 w = (u32 *)(b + lui_offset);
1241 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1242 w = (u32 *)(b + ori_offset);
1243 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
1244 flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
1248 * In other cases jump directly to the interrupt handler
1250 * It is the handlers responsibility to save registers if required
1251 * (eg hi/lo) and return from the exception using "eret"
1254 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
1256 flush_icache_range((unsigned long)b, (unsigned long)(b+8));
1259 return (void *)old_handler;
1262 void *set_vi_handler(int n, void *addr)
1264 return set_vi_srs_handler(n, addr, 0);
1269 static inline void mips_srs_init(void)
1273 #endif /* CONFIG_CPU_MIPSR2_SRS */
1276 * This is used by native signal handling
1278 asmlinkage int (*save_fp_context)(struct sigcontext *sc);
1279 asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
1281 extern asmlinkage int _save_fp_context(struct sigcontext *sc);
1282 extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
1284 extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
1285 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
1288 static int smp_save_fp_context(struct sigcontext *sc)
1291 ? _save_fp_context(sc)
1292 : fpu_emulator_save_context(sc);
1295 static int smp_restore_fp_context(struct sigcontext *sc)
1298 ? _restore_fp_context(sc)
1299 : fpu_emulator_restore_context(sc);
1303 static inline void signal_init(void)
1306 /* For now just do the cpu_has_fpu check when the functions are invoked */
1307 save_fp_context = smp_save_fp_context;
1308 restore_fp_context = smp_restore_fp_context;
1311 save_fp_context = _save_fp_context;
1312 restore_fp_context = _restore_fp_context;
1314 save_fp_context = fpu_emulator_save_context;
1315 restore_fp_context = fpu_emulator_restore_context;
1320 #ifdef CONFIG_MIPS32_COMPAT
1323 * This is used by 32-bit signal stuff on the 64-bit kernel
1325 asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);
1326 asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);
1328 extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc);
1329 extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc);
1331 extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc);
1332 extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc);
1334 static inline void signal32_init(void)
1337 save_fp_context32 = _save_fp_context32;
1338 restore_fp_context32 = _restore_fp_context32;
1340 save_fp_context32 = fpu_emulator_save_context32;
1341 restore_fp_context32 = fpu_emulator_restore_context32;
1346 extern void cpu_cache_init(void);
1347 extern void tlb_init(void);
1348 extern void flush_tlb_handlers(void);
1350 void __init per_cpu_trap_init(void)
1352 unsigned int cpu = smp_processor_id();
1353 unsigned int status_set = ST0_CU0;
1354 #ifdef CONFIG_MIPS_MT_SMTC
1355 int secondaryTC = 0;
1356 int bootTC = (cpu == 0);
1359 * Only do per_cpu_trap_init() for first TC of Each VPE.
1360 * Note that this hack assumes that the SMTC init code
1361 * assigns TCs consecutively and in ascending order.
1364 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1365 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1367 #endif /* CONFIG_MIPS_MT_SMTC */
1370 * Disable coprocessors and select 32-bit or 64-bit addressing
1371 * and the 16/32 or 32/32 FPR register model. Reset the BEV
1372 * flag that some firmware may have left set and the TS bit (for
1373 * IP27). Set XX for ISA IV code to work.
1376 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1378 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
1379 status_set |= ST0_XX;
1380 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1384 set_c0_status(ST0_MX);
1386 #ifdef CONFIG_CPU_MIPSR2
1387 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
1390 #ifdef CONFIG_MIPS_MT_SMTC
1392 #endif /* CONFIG_MIPS_MT_SMTC */
1395 * Interrupt handling.
1397 if (cpu_has_veic || cpu_has_vint) {
1398 write_c0_ebase (ebase);
1399 /* Setting vector spacing enables EI/VI mode */
1400 change_c0_intctl (0x3e0, VECTORSPACING);
1402 if (cpu_has_divec) {
1403 if (cpu_has_mipsmt) {
1404 unsigned int vpflags = dvpe();
1405 set_c0_cause(CAUSEF_IV);
1408 set_c0_cause(CAUSEF_IV);
1410 #ifdef CONFIG_MIPS_MT_SMTC
1412 #endif /* CONFIG_MIPS_MT_SMTC */
1414 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1415 TLBMISS_HANDLER_SETUP();
1417 atomic_inc(&init_mm.mm_count);
1418 current->active_mm = &init_mm;
1419 BUG_ON(current->mm);
1420 enter_lazy_tlb(&init_mm, current);
1422 #ifdef CONFIG_MIPS_MT_SMTC
1424 #endif /* CONFIG_MIPS_MT_SMTC */
1427 #ifdef CONFIG_MIPS_MT_SMTC
1429 #endif /* CONFIG_MIPS_MT_SMTC */
1432 /* Install CPU exception handler */
1433 void __init set_handler (unsigned long offset, void *addr, unsigned long size)
1435 memcpy((void *)(ebase + offset), addr, size);
1436 flush_icache_range(ebase + offset, ebase + offset + size);
1439 /* Install uncached CPU exception handler */
1440 void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size)
1443 unsigned long uncached_ebase = KSEG1ADDR(ebase);
1446 unsigned long uncached_ebase = TO_UNCAC(ebase);
1449 memcpy((void *)(uncached_ebase + offset), addr, size);
1452 void __init trap_init(void)
1454 extern char except_vec3_generic, except_vec3_r4000;
1455 extern char except_vec4;
1458 if (cpu_has_veic || cpu_has_vint)
1459 ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64);
1465 per_cpu_trap_init();
1468 * Copy the generic exception handlers to their final destination.
1469 * This will be overriden later as suitable for a particular
1472 set_handler(0x180, &except_vec3_generic, 0x80);
1475 * Setup default vectors
1477 for (i = 0; i <= 31; i++)
1478 set_except_vector(i, handle_reserved);
1481 * Copy the EJTAG debug exception vector handler code to it's final
1484 if (cpu_has_ejtag && board_ejtag_handler_setup)
1485 board_ejtag_handler_setup ();
1488 * Only some CPUs have the watch exceptions.
1491 set_except_vector(23, handle_watch);
1494 * Initialise interrupt handlers
1496 if (cpu_has_veic || cpu_has_vint) {
1497 int nvec = cpu_has_veic ? 64 : 8;
1498 for (i = 0; i < nvec; i++)
1499 set_vi_handler(i, NULL);
1501 else if (cpu_has_divec)
1502 set_handler(0x200, &except_vec4, 0x8);
1505 * Some CPUs can enable/disable for cache parity detection, but does
1506 * it different ways.
1508 parity_protection_init();
1511 * The Data Bus Errors / Instruction Bus Errors are signaled
1512 * by external hardware. Therefore these two exceptions
1513 * may have board specific handlers.
1518 set_except_vector(0, handle_int);
1519 set_except_vector(1, handle_tlbm);
1520 set_except_vector(2, handle_tlbl);
1521 set_except_vector(3, handle_tlbs);
1523 set_except_vector(4, handle_adel);
1524 set_except_vector(5, handle_ades);
1526 set_except_vector(6, handle_ibe);
1527 set_except_vector(7, handle_dbe);
1529 set_except_vector(8, handle_sys);
1530 set_except_vector(9, handle_bp);
1531 set_except_vector(10, handle_ri);
1532 set_except_vector(11, handle_cpu);
1533 set_except_vector(12, handle_ov);
1534 set_except_vector(13, handle_tr);
1536 if (current_cpu_data.cputype == CPU_R6000 ||
1537 current_cpu_data.cputype == CPU_R6000A) {
1539 * The R6000 is the only R-series CPU that features a machine
1540 * check exception (similar to the R4000 cache error) and
1541 * unaligned ldc1/sdc1 exception. The handlers have not been
1542 * written yet. Well, anyway there is no R6000 machine on the
1543 * current list of targets for Linux/MIPS.
1544 * (Duh, crap, there is someone with a triple R6k machine)
1546 //set_except_vector(14, handle_mc);
1547 //set_except_vector(15, handle_ndc);
1551 if (board_nmi_handler_setup)
1552 board_nmi_handler_setup();
1554 if (cpu_has_fpu && !cpu_has_nofpuex)
1555 set_except_vector(15, handle_fpe);
1557 set_except_vector(22, handle_mdmx);
1560 set_except_vector(24, handle_mcheck);
1563 set_except_vector(25, handle_mt);
1566 set_except_vector(26, handle_dsp);
1569 /* Special exception: R4[04]00 uses also the divec space. */
1570 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
1571 else if (cpu_has_4kex)
1572 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
1574 memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
1577 #ifdef CONFIG_MIPS32_COMPAT
1581 flush_icache_range(ebase, ebase + 0x400);
1582 flush_tlb_handlers();