2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
7 * Copyright (C) 1995, 1996 Paul M. Antoine
8 * Copyright (C) 1998 Ulf Carlsson
9 * Copyright (C) 1999 Silicon Graphics, Inc.
10 * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
11 * Copyright (C) 2000, 01 MIPS Technologies, Inc.
12 * Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki
14 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/spinlock.h>
21 #include <linux/kallsyms.h>
22 #include <linux/bootmem.h>
23 #include <linux/interrupt.h>
25 #include <asm/bootinfo.h>
26 #include <asm/branch.h>
27 #include <asm/break.h>
31 #include <asm/mipsregs.h>
32 #include <asm/mipsmtregs.h>
33 #include <asm/module.h>
34 #include <asm/pgtable.h>
35 #include <asm/ptrace.h>
36 #include <asm/sections.h>
37 #include <asm/system.h>
38 #include <asm/tlbdebug.h>
39 #include <asm/traps.h>
40 #include <asm/uaccess.h>
41 #include <asm/mmu_context.h>
42 #include <asm/watch.h>
43 #include <asm/types.h>
45 extern asmlinkage void handle_int(void);
46 extern asmlinkage void handle_tlbm(void);
47 extern asmlinkage void handle_tlbl(void);
48 extern asmlinkage void handle_tlbs(void);
49 extern asmlinkage void handle_adel(void);
50 extern asmlinkage void handle_ades(void);
51 extern asmlinkage void handle_ibe(void);
52 extern asmlinkage void handle_dbe(void);
53 extern asmlinkage void handle_sys(void);
54 extern asmlinkage void handle_bp(void);
55 extern asmlinkage void handle_ri(void);
56 extern asmlinkage void handle_cpu(void);
57 extern asmlinkage void handle_ov(void);
58 extern asmlinkage void handle_tr(void);
59 extern asmlinkage void handle_fpe(void);
60 extern asmlinkage void handle_mdmx(void);
61 extern asmlinkage void handle_watch(void);
62 extern asmlinkage void handle_mt(void);
63 extern asmlinkage void handle_dsp(void);
64 extern asmlinkage void handle_mcheck(void);
65 extern asmlinkage void handle_reserved(void);
67 extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
68 struct mips_fpu_struct *ctx);
70 void (*board_be_init)(void);
71 int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
72 void (*board_nmi_handler_setup)(void);
73 void (*board_ejtag_handler_setup)(void);
74 void (*board_bind_eic_interrupt)(int irq, int regset);
77 static void show_trace(unsigned long *stack)
79 const int field = 2 * sizeof(unsigned long);
82 printk("Call Trace:");
83 #ifdef CONFIG_KALLSYMS
86 while (!kstack_end(stack)) {
88 if (__kernel_text_address(addr)) {
89 printk(" [<%0*lx>] ", field, addr);
90 print_symbol("%s\n", addr);
96 #ifdef CONFIG_KALLSYMS
97 static int raw_show_trace;
98 static int __init set_raw_show_trace(char *str)
103 __setup("raw_show_trace", set_raw_show_trace);
105 extern unsigned long unwind_stack(struct task_struct *task,
106 unsigned long **sp, unsigned long pc);
107 static void show_frametrace(struct task_struct *task, struct pt_regs *regs)
109 const int field = 2 * sizeof(unsigned long);
110 unsigned long *stack = (long *)regs->regs[29];
111 unsigned long pc = regs->cp0_epc;
114 if (raw_show_trace || !__kernel_text_address(pc)) {
118 printk("Call Trace:\n");
119 while (__kernel_text_address(pc)) {
120 printk(" [<%0*lx>] ", field, pc);
121 print_symbol("%s\n", pc);
122 pc = unwind_stack(task, &stack, pc);
124 pc = regs->regs[31]; /* leaf? */
130 #define show_frametrace(task, r) show_trace((long *)(r)->regs[29]);
134 * This routine abuses get_user()/put_user() to reference pointers
135 * with at least a bit of error checking ...
137 static void show_stacktrace(struct task_struct *task, struct pt_regs *regs)
139 const int field = 2 * sizeof(unsigned long);
142 unsigned long *sp = (unsigned long *)regs->regs[29];
146 while ((unsigned long) sp & (PAGE_SIZE - 1)) {
147 if (i && ((i % (64 / field)) == 0))
154 if (__get_user(stackdata, sp++)) {
155 printk(" (Bad stack address)");
159 printk(" %0*lx", field, stackdata);
163 show_frametrace(task, regs);
166 static noinline void prepare_frametrace(struct pt_regs *regs)
168 __asm__ __volatile__(
179 : "=m" (regs->cp0_epc),
180 "=m" (regs->regs[29]), "=m" (regs->regs[31])
184 void show_stack(struct task_struct *task, unsigned long *sp)
188 regs.regs[29] = (unsigned long)sp;
192 if (task && task != current) {
193 regs.regs[29] = task->thread.reg29;
195 regs.cp0_epc = task->thread.reg31;
197 prepare_frametrace(®s);
200 show_stacktrace(task, ®s);
204 * The architecture-independent dump_stack generator
206 void dump_stack(void)
210 #ifdef CONFIG_KALLSYMS
211 if (!raw_show_trace) {
213 prepare_frametrace(®s);
214 show_frametrace(current, ®s);
221 EXPORT_SYMBOL(dump_stack);
223 void show_code(unsigned int *pc)
229 for(i = -3 ; i < 6 ; i++) {
231 if (__get_user(insn, pc + i)) {
232 printk(" (Bad address in epc)\n");
235 printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
239 void show_regs(struct pt_regs *regs)
241 const int field = 2 * sizeof(unsigned long);
242 unsigned int cause = regs->cp0_cause;
245 printk("Cpu %d\n", smp_processor_id());
248 * Saved main processor registers
250 for (i = 0; i < 32; ) {
254 printk(" %0*lx", field, 0UL);
255 else if (i == 26 || i == 27)
256 printk(" %*s", field, "");
258 printk(" %0*lx", field, regs->regs[i]);
265 printk("Hi : %0*lx\n", field, regs->hi);
266 printk("Lo : %0*lx\n", field, regs->lo);
269 * Saved cp0 registers
271 printk("epc : %0*lx ", field, regs->cp0_epc);
272 print_symbol("%s ", regs->cp0_epc);
273 printk(" %s\n", print_tainted());
274 printk("ra : %0*lx ", field, regs->regs[31]);
275 print_symbol("%s\n", regs->regs[31]);
277 printk("Status: %08x ", (uint32_t) regs->cp0_status);
279 if (current_cpu_data.isa_level == MIPS_CPU_ISA_I) {
280 if (regs->cp0_status & ST0_KUO)
282 if (regs->cp0_status & ST0_IEO)
284 if (regs->cp0_status & ST0_KUP)
286 if (regs->cp0_status & ST0_IEP)
288 if (regs->cp0_status & ST0_KUC)
290 if (regs->cp0_status & ST0_IEC)
293 if (regs->cp0_status & ST0_KX)
295 if (regs->cp0_status & ST0_SX)
297 if (regs->cp0_status & ST0_UX)
299 switch (regs->cp0_status & ST0_KSU) {
304 printk("SUPERVISOR ");
313 if (regs->cp0_status & ST0_ERL)
315 if (regs->cp0_status & ST0_EXL)
317 if (regs->cp0_status & ST0_IE)
322 printk("Cause : %08x\n", cause);
324 cause = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
325 if (1 <= cause && cause <= 5)
326 printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
328 printk("PrId : %08x\n", read_c0_prid());
331 void show_registers(struct pt_regs *regs)
335 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
336 current->comm, current->pid, current_thread_info(), current);
337 show_stacktrace(current, regs);
338 show_code((unsigned int *) regs->cp0_epc);
342 static DEFINE_SPINLOCK(die_lock);
344 NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
346 static int die_counter;
347 #ifdef CONFIG_MIPS_MT_SMTC
348 unsigned long dvpret = dvpe();
349 #endif /* CONFIG_MIPS_MT_SMTC */
352 spin_lock_irq(&die_lock);
354 #ifdef CONFIG_MIPS_MT_SMTC
355 mips_mt_regdump(dvpret);
356 #endif /* CONFIG_MIPS_MT_SMTC */
357 printk("%s[#%d]:\n", str, ++die_counter);
358 show_registers(regs);
359 spin_unlock_irq(&die_lock);
362 panic("Fatal exception in interrupt");
365 printk(KERN_EMERG "Fatal exception: panic in 5 seconds\n");
367 panic("Fatal exception");
373 extern const struct exception_table_entry __start___dbe_table[];
374 extern const struct exception_table_entry __stop___dbe_table[];
376 void __declare_dbe_table(void)
378 __asm__ __volatile__(
379 ".section\t__dbe_table,\"a\"\n\t"
384 /* Given an address, look for it in the exception tables. */
385 static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
387 const struct exception_table_entry *e;
389 e = search_extable(__start___dbe_table, __stop___dbe_table - 1, addr);
391 e = search_module_dbetables(addr);
395 asmlinkage void do_be(struct pt_regs *regs)
397 const int field = 2 * sizeof(unsigned long);
398 const struct exception_table_entry *fixup = NULL;
399 int data = regs->cp0_cause & 4;
400 int action = MIPS_BE_FATAL;
402 /* XXX For now. Fixme, this searches the wrong table ... */
403 if (data && !user_mode(regs))
404 fixup = search_dbe_tables(exception_epc(regs));
407 action = MIPS_BE_FIXUP;
409 if (board_be_handler)
410 action = board_be_handler(regs, fixup != 0);
413 case MIPS_BE_DISCARD:
417 regs->cp0_epc = fixup->nextinsn;
426 * Assume it would be too dangerous to continue ...
428 printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
429 data ? "Data" : "Instruction",
430 field, regs->cp0_epc, field, regs->regs[31]);
431 die_if_kernel("Oops", regs);
432 force_sig(SIGBUS, current);
435 static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
437 unsigned int __user *epc;
439 epc = (unsigned int __user *) regs->cp0_epc +
440 ((regs->cp0_cause & CAUSEF_BD) != 0);
441 if (!get_user(*opcode, epc))
444 force_sig(SIGSEGV, current);
452 #define OPCODE 0xfc000000
453 #define BASE 0x03e00000
454 #define RT 0x001f0000
455 #define OFFSET 0x0000ffff
456 #define LL 0xc0000000
457 #define SC 0xe0000000
458 #define SPEC3 0x7c000000
459 #define RD 0x0000f800
460 #define FUNC 0x0000003f
461 #define RDHWR 0x0000003b
464 * The ll_bit is cleared by r*_switch.S
467 unsigned long ll_bit;
469 static struct task_struct *ll_task = NULL;
471 static inline void simulate_ll(struct pt_regs *regs, unsigned int opcode)
473 unsigned long value, __user *vaddr;
478 * analyse the ll instruction that just caused a ri exception
479 * and put the referenced address to addr.
482 /* sign extend offset */
483 offset = opcode & OFFSET;
487 vaddr = (unsigned long __user *)
488 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
490 if ((unsigned long)vaddr & 3) {
494 if (get_user(value, vaddr)) {
501 if (ll_task == NULL || ll_task == current) {
510 compute_return_epc(regs);
512 regs->regs[(opcode & RT) >> 16] = value;
517 force_sig(signal, current);
520 static inline void simulate_sc(struct pt_regs *regs, unsigned int opcode)
522 unsigned long __user *vaddr;
528 * analyse the sc instruction that just caused a ri exception
529 * and put the referenced address to addr.
532 /* sign extend offset */
533 offset = opcode & OFFSET;
537 vaddr = (unsigned long __user *)
538 ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
539 reg = (opcode & RT) >> 16;
541 if ((unsigned long)vaddr & 3) {
548 if (ll_bit == 0 || ll_task != current) {
549 compute_return_epc(regs);
557 if (put_user(regs->regs[reg], vaddr)) {
562 compute_return_epc(regs);
568 force_sig(signal, current);
572 * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
573 * opcodes are supposed to result in coprocessor unusable exceptions if
574 * executed on ll/sc-less processors. That's the theory. In practice a
575 * few processors such as NEC's VR4100 throw reserved instruction exceptions
576 * instead, so we're doing the emulation thing in both exception handlers.
578 static inline int simulate_llsc(struct pt_regs *regs)
582 if (unlikely(get_insn_opcode(regs, &opcode)))
585 if ((opcode & OPCODE) == LL) {
586 simulate_ll(regs, opcode);
589 if ((opcode & OPCODE) == SC) {
590 simulate_sc(regs, opcode);
594 return -EFAULT; /* Strange things going on ... */
598 * Simulate trapping 'rdhwr' instructions to provide user accessible
599 * registers not implemented in hardware. The only current use of this
600 * is the thread area pointer.
602 static inline int simulate_rdhwr(struct pt_regs *regs)
604 struct thread_info *ti = task_thread_info(current);
607 if (unlikely(get_insn_opcode(regs, &opcode)))
610 if (unlikely(compute_return_epc(regs)))
613 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
614 int rd = (opcode & RD) >> 11;
615 int rt = (opcode & RT) >> 16;
618 regs->regs[rt] = ti->tp_value;
629 asmlinkage void do_ov(struct pt_regs *regs)
633 die_if_kernel("Integer overflow", regs);
635 info.si_code = FPE_INTOVF;
636 info.si_signo = SIGFPE;
638 info.si_addr = (void __user *) regs->cp0_epc;
639 force_sig_info(SIGFPE, &info, current);
643 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
645 asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
647 die_if_kernel("FP exception in kernel code", regs);
649 if (fcr31 & FPU_CSR_UNI_X) {
654 #ifdef CONFIG_PREEMPT
655 if (!is_fpu_owner()) {
656 /* We might lose fpu before disabling preempt... */
658 BUG_ON(!used_math());
663 * Unimplemented operation exception. If we've got the full
664 * software emulator on-board, let's use it...
666 * Force FPU to dump state into task/thread context. We're
667 * moving a lot of data here for what is probably a single
668 * instruction, but the alternative is to pre-decode the FP
669 * register operands before invoking the emulator, which seems
670 * a bit extreme for what should be an infrequent event.
673 /* Ensure 'resume' not overwrite saved fp context again. */
678 /* Run the emulator */
679 sig = fpu_emulator_cop1Handler (regs, ¤t->thread.fpu);
683 own_fpu(); /* Using the FPU again. */
685 * We can't allow the emulated instruction to leave any of
686 * the cause bit set in $fcr31.
688 current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
690 /* Restore the hardware register state */
695 /* If something went wrong, signal */
697 force_sig(sig, current);
702 force_sig(SIGFPE, current);
705 asmlinkage void do_bp(struct pt_regs *regs)
707 unsigned int opcode, bcode;
710 die_if_kernel("Break instruction in kernel code", regs);
712 if (get_insn_opcode(regs, &opcode))
716 * There is the ancient bug in the MIPS assemblers that the break
717 * code starts left to bit 16 instead to bit 6 in the opcode.
718 * Gas is bug-compatible, but not always, grrr...
719 * We handle both cases with a simple heuristics. --macro
721 bcode = ((opcode >> 6) & ((1 << 20) - 1));
722 if (bcode < (1 << 10))
726 * (A short test says that IRIX 5.3 sends SIGTRAP for all break
727 * insns, even for break codes that indicate arithmetic failures.
729 * But should we continue the brokenness??? --macro
732 case BRK_OVERFLOW << 10:
733 case BRK_DIVZERO << 10:
734 if (bcode == (BRK_DIVZERO << 10))
735 info.si_code = FPE_INTDIV;
737 info.si_code = FPE_INTOVF;
738 info.si_signo = SIGFPE;
740 info.si_addr = (void __user *) regs->cp0_epc;
741 force_sig_info(SIGFPE, &info, current);
744 force_sig(SIGTRAP, current);
748 asmlinkage void do_tr(struct pt_regs *regs)
750 unsigned int opcode, tcode = 0;
753 die_if_kernel("Trap instruction in kernel code", regs);
755 if (get_insn_opcode(regs, &opcode))
758 /* Immediate versions don't provide a code. */
759 if (!(opcode & OPCODE))
760 tcode = ((opcode >> 6) & ((1 << 10) - 1));
763 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
764 * insns, even for trap codes that indicate arithmetic failures.
766 * But should we continue the brokenness??? --macro
771 if (tcode == BRK_DIVZERO)
772 info.si_code = FPE_INTDIV;
774 info.si_code = FPE_INTOVF;
775 info.si_signo = SIGFPE;
777 info.si_addr = (void __user *) regs->cp0_epc;
778 force_sig_info(SIGFPE, &info, current);
781 force_sig(SIGTRAP, current);
785 asmlinkage void do_ri(struct pt_regs *regs)
787 die_if_kernel("Reserved instruction in kernel code", regs);
790 if (!simulate_llsc(regs))
793 if (!simulate_rdhwr(regs))
796 force_sig(SIGILL, current);
799 asmlinkage void do_cpu(struct pt_regs *regs)
803 die_if_kernel("do_cpu invoked from kernel context!", regs);
805 cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
810 if (!simulate_llsc(regs))
813 if (!simulate_rdhwr(regs))
822 if (used_math()) { /* Using the FPU again. */
824 } else { /* First time FPU user. */
832 int sig = fpu_emulator_cop1Handler(regs,
833 ¤t->thread.fpu);
835 force_sig(sig, current);
836 #ifdef CONFIG_MIPS_MT_FPAFF
839 * MIPS MT processors may have fewer FPU contexts
840 * than CPU threads. If we've emulated more than
841 * some threshold number of instructions, force
842 * migration to a "CPU" that has FP support.
844 if(mt_fpemul_threshold > 0
845 && ((current->thread.emulated_fp++
846 > mt_fpemul_threshold))) {
848 * If there's no FPU present, or if the
849 * application has already restricted
850 * the allowed set to exclude any CPUs
851 * with FPUs, we'll skip the procedure.
853 if (cpus_intersects(current->cpus_allowed,
858 current->thread.user_cpus_allowed,
860 set_cpus_allowed(current, tmask);
861 current->thread.mflags |= MF_FPUBOUND;
865 #endif /* CONFIG_MIPS_MT_FPAFF */
872 die_if_kernel("do_cpu invoked from kernel context!", regs);
876 force_sig(SIGILL, current);
879 asmlinkage void do_mdmx(struct pt_regs *regs)
881 force_sig(SIGILL, current);
884 asmlinkage void do_watch(struct pt_regs *regs)
887 * We use the watch exception where available to detect stack
892 panic("Caught WATCH exception - probably caused by stack overflow.");
895 asmlinkage void do_mcheck(struct pt_regs *regs)
897 const int field = 2 * sizeof(unsigned long);
898 int multi_match = regs->cp0_status & ST0_TS;
903 printk("Index : %0x\n", read_c0_index());
904 printk("Pagemask: %0x\n", read_c0_pagemask());
905 printk("EntryHi : %0*lx\n", field, read_c0_entryhi());
906 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
907 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
912 show_code((unsigned int *) regs->cp0_epc);
915 * Some chips may have other causes of machine check (e.g. SB1
918 panic("Caught Machine Check exception - %scaused by multiple "
919 "matching entries in the TLB.",
920 (multi_match) ? "" : "not ");
923 asmlinkage void do_mt(struct pt_regs *regs)
927 subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
928 >> VPECONTROL_EXCPT_SHIFT;
931 printk(KERN_DEBUG "Thread Underflow\n");
934 printk(KERN_DEBUG "Thread Overflow\n");
937 printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
940 printk(KERN_DEBUG "Gating Storage Exception\n");
943 printk(KERN_DEBUG "YIELD Scheduler Exception\n");
946 printk(KERN_DEBUG "Gating Storage Schedulier Exception\n");
949 printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
953 die_if_kernel("MIPS MT Thread exception in kernel", regs);
955 force_sig(SIGILL, current);
959 asmlinkage void do_dsp(struct pt_regs *regs)
962 panic("Unexpected DSP exception\n");
964 force_sig(SIGILL, current);
967 asmlinkage void do_reserved(struct pt_regs *regs)
970 * Game over - no way to handle this if it ever occurs. Most probably
971 * caused by a new unknown cpu type or after another deadly
972 * hard/software error.
975 panic("Caught reserved exception %ld - should not happen.",
976 (regs->cp0_cause & 0x7f) >> 2);
979 asmlinkage void do_default_vi(struct pt_regs *regs)
982 panic("Caught unexpected vectored interrupt.");
986 * Some MIPS CPUs can enable/disable for cache parity detection, but do
989 static inline void parity_protection_init(void)
991 switch (current_cpu_data.cputype) {
995 write_c0_ecc(0x80000000);
996 back_to_back_c0_hazard();
997 /* Set the PE bit (bit 31) in the c0_errctl register. */
998 printk(KERN_INFO "Cache parity protection %sabled\n",
999 (read_c0_ecc() & 0x80000000) ? "en" : "dis");
1003 /* Clear the DE bit (bit 16) in the c0_status register. */
1004 printk(KERN_INFO "Enable cache parity protection for "
1005 "MIPS 20KC/25KF CPUs.\n");
1006 clear_c0_status(ST0_DE);
1013 asmlinkage void cache_parity_error(void)
1015 const int field = 2 * sizeof(unsigned long);
1016 unsigned int reg_val;
1018 /* For the moment, report the problem and hang. */
1019 printk("Cache error exception:\n");
1020 printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
1021 reg_val = read_c0_cacheerr();
1022 printk("c0_cacheerr == %08x\n", reg_val);
1024 printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
1025 reg_val & (1<<30) ? "secondary" : "primary",
1026 reg_val & (1<<31) ? "data" : "insn");
1027 printk("Error bits: %s%s%s%s%s%s%s\n",
1028 reg_val & (1<<29) ? "ED " : "",
1029 reg_val & (1<<28) ? "ET " : "",
1030 reg_val & (1<<26) ? "EE " : "",
1031 reg_val & (1<<25) ? "EB " : "",
1032 reg_val & (1<<24) ? "EI " : "",
1033 reg_val & (1<<23) ? "E1 " : "",
1034 reg_val & (1<<22) ? "E0 " : "");
1035 printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
1037 #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
1038 if (reg_val & (1<<22))
1039 printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
1041 if (reg_val & (1<<23))
1042 printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
1045 panic("Can't handle the cache error!");
1049 * SDBBP EJTAG debug exception handler.
1050 * We skip the instruction and return to the next instruction.
1052 void ejtag_exception_handler(struct pt_regs *regs)
1054 const int field = 2 * sizeof(unsigned long);
1055 unsigned long depc, old_epc;
1058 printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
1059 depc = read_c0_depc();
1060 debug = read_c0_debug();
1061 printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
1062 if (debug & 0x80000000) {
1064 * In branch delay slot.
1065 * We cheat a little bit here and use EPC to calculate the
1066 * debug return address (DEPC). EPC is restored after the
1069 old_epc = regs->cp0_epc;
1070 regs->cp0_epc = depc;
1071 __compute_return_epc(regs);
1072 depc = regs->cp0_epc;
1073 regs->cp0_epc = old_epc;
1076 write_c0_depc(depc);
1079 printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
1080 write_c0_debug(debug | 0x100);
1085 * NMI exception handler.
1087 void nmi_exception_handler(struct pt_regs *regs)
1089 #ifdef CONFIG_MIPS_MT_SMTC
1090 unsigned long dvpret = dvpe();
1092 printk("NMI taken!!!!\n");
1093 mips_mt_regdump(dvpret);
1096 printk("NMI taken!!!!\n");
1097 #endif /* CONFIG_MIPS_MT_SMTC */
1102 #define VECTORSPACING 0x100 /* for EI/VI mode */
1104 unsigned long ebase;
1105 unsigned long exception_handlers[32];
1106 unsigned long vi_handlers[64];
1109 * As a side effect of the way this is implemented we're limited
1110 * to interrupt handlers in the address range from
1111 * KSEG0 <= x < KSEG0 + 256mb on the Nevada. Oh well ...
1113 void *set_except_vector(int n, void *addr)
1115 unsigned long handler = (unsigned long) addr;
1116 unsigned long old_handler = exception_handlers[n];
1118 exception_handlers[n] = handler;
1119 if (n == 0 && cpu_has_divec) {
1120 *(volatile u32 *)(ebase + 0x200) = 0x08000000 |
1121 (0x03ffffff & (handler >> 2));
1122 flush_icache_range(ebase + 0x200, ebase + 0x204);
1124 return (void *)old_handler;
1127 #ifdef CONFIG_CPU_MIPSR2_SRS
1129 * MIPSR2 shadow register set allocation
1133 static struct shadow_registers {
1135 * Number of shadow register sets supported
1137 unsigned long sr_supported;
1139 * Bitmap of allocated shadow registers
1141 unsigned long sr_allocated;
1144 static void mips_srs_init(void)
1146 shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
1147 printk(KERN_INFO "%d MIPSR2 register sets available\n",
1148 shadow_registers.sr_supported);
1149 shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
1152 int mips_srs_max(void)
1154 return shadow_registers.sr_supported;
1157 int mips_srs_alloc(void)
1159 struct shadow_registers *sr = &shadow_registers;
1163 set = find_first_zero_bit(&sr->sr_allocated, sr->sr_supported);
1164 if (set >= sr->sr_supported)
1167 if (test_and_set_bit(set, &sr->sr_allocated))
1173 void mips_srs_free(int set)
1175 struct shadow_registers *sr = &shadow_registers;
1177 clear_bit(set, &sr->sr_allocated);
1180 static void *set_vi_srs_handler(int n, void *addr, int srs)
1182 unsigned long handler;
1183 unsigned long old_handler = vi_handlers[n];
1187 if (!cpu_has_veic && !cpu_has_vint)
1191 handler = (unsigned long) do_default_vi;
1194 handler = (unsigned long) addr;
1195 vi_handlers[n] = (unsigned long) addr;
1197 b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
1199 if (srs >= mips_srs_max())
1200 panic("Shadow register set %d not supported", srs);
1203 if (board_bind_eic_interrupt)
1204 board_bind_eic_interrupt (n, srs);
1205 } else if (cpu_has_vint) {
1206 /* SRSMap is only defined if shadow sets are implemented */
1207 if (mips_srs_max() > 1)
1208 change_c0_srsmap (0xf << n*4, srs << n*4);
1213 * If no shadow set is selected then use the default handler
1214 * that does normal register saving and a standard interrupt exit
1217 extern char except_vec_vi, except_vec_vi_lui;
1218 extern char except_vec_vi_ori, except_vec_vi_end;
1219 #ifdef CONFIG_MIPS_MT_SMTC
1221 * We need to provide the SMTC vectored interrupt handler
1222 * not only with the address of the handler, but with the
1223 * Status.IM bit to be masked before going there.
1225 extern char except_vec_vi_mori;
1226 const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
1227 #endif /* CONFIG_MIPS_MT_SMTC */
1228 const int handler_len = &except_vec_vi_end - &except_vec_vi;
1229 const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
1230 const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
1232 if (handler_len > VECTORSPACING) {
1234 * Sigh... panicing won't help as the console
1235 * is probably not configured :(
1237 panic ("VECTORSPACING too small");
1240 memcpy (b, &except_vec_vi, handler_len);
1241 #ifdef CONFIG_MIPS_MT_SMTC
1243 printk("Vector index %d exceeds SMTC maximum\n", n);
1244 w = (u32 *)(b + mori_offset);
1245 *w = (*w & 0xffff0000) | (0x100 << n);
1246 #endif /* CONFIG_MIPS_MT_SMTC */
1247 w = (u32 *)(b + lui_offset);
1248 *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
1249 w = (u32 *)(b + ori_offset);
1250 *w = (*w & 0xffff0000) | ((u32)handler & 0xffff);
1251 flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len));
1255 * In other cases jump directly to the interrupt handler
1257 * It is the handlers responsibility to save registers if required
1258 * (eg hi/lo) and return from the exception using "eret"
1261 *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */
1263 flush_icache_range((unsigned long)b, (unsigned long)(b+8));
1266 return (void *)old_handler;
1269 void *set_vi_handler(int n, void *addr)
1271 return set_vi_srs_handler(n, addr, 0);
1276 static inline void mips_srs_init(void)
1280 #endif /* CONFIG_CPU_MIPSR2_SRS */
1283 * This is used by native signal handling
1285 asmlinkage int (*save_fp_context)(struct sigcontext *sc);
1286 asmlinkage int (*restore_fp_context)(struct sigcontext *sc);
1288 extern asmlinkage int _save_fp_context(struct sigcontext *sc);
1289 extern asmlinkage int _restore_fp_context(struct sigcontext *sc);
1291 extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
1292 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
1295 static int smp_save_fp_context(struct sigcontext *sc)
1298 ? _save_fp_context(sc)
1299 : fpu_emulator_save_context(sc);
1302 static int smp_restore_fp_context(struct sigcontext *sc)
1305 ? _restore_fp_context(sc)
1306 : fpu_emulator_restore_context(sc);
1310 static inline void signal_init(void)
1313 /* For now just do the cpu_has_fpu check when the functions are invoked */
1314 save_fp_context = smp_save_fp_context;
1315 restore_fp_context = smp_restore_fp_context;
1318 save_fp_context = _save_fp_context;
1319 restore_fp_context = _restore_fp_context;
1321 save_fp_context = fpu_emulator_save_context;
1322 restore_fp_context = fpu_emulator_restore_context;
1327 #ifdef CONFIG_MIPS32_COMPAT
1330 * This is used by 32-bit signal stuff on the 64-bit kernel
1332 asmlinkage int (*save_fp_context32)(struct sigcontext32 *sc);
1333 asmlinkage int (*restore_fp_context32)(struct sigcontext32 *sc);
1335 extern asmlinkage int _save_fp_context32(struct sigcontext32 *sc);
1336 extern asmlinkage int _restore_fp_context32(struct sigcontext32 *sc);
1338 extern asmlinkage int fpu_emulator_save_context32(struct sigcontext32 *sc);
1339 extern asmlinkage int fpu_emulator_restore_context32(struct sigcontext32 *sc);
1341 static inline void signal32_init(void)
1344 save_fp_context32 = _save_fp_context32;
1345 restore_fp_context32 = _restore_fp_context32;
1347 save_fp_context32 = fpu_emulator_save_context32;
1348 restore_fp_context32 = fpu_emulator_restore_context32;
1353 extern void cpu_cache_init(void);
1354 extern void tlb_init(void);
1355 extern void flush_tlb_handlers(void);
1357 void __init per_cpu_trap_init(void)
1359 unsigned int cpu = smp_processor_id();
1360 unsigned int status_set = ST0_CU0;
1361 #ifdef CONFIG_MIPS_MT_SMTC
1362 int secondaryTC = 0;
1363 int bootTC = (cpu == 0);
1366 * Only do per_cpu_trap_init() for first TC of Each VPE.
1367 * Note that this hack assumes that the SMTC init code
1368 * assigns TCs consecutively and in ascending order.
1371 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
1372 ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
1374 #endif /* CONFIG_MIPS_MT_SMTC */
1377 * Disable coprocessors and select 32-bit or 64-bit addressing
1378 * and the 16/32 or 32/32 FPR register model. Reset the BEV
1379 * flag that some firmware may have left set and the TS bit (for
1380 * IP27). Set XX for ISA IV code to work.
1383 status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
1385 if (current_cpu_data.isa_level == MIPS_CPU_ISA_IV)
1386 status_set |= ST0_XX;
1387 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1391 set_c0_status(ST0_MX);
1393 #ifdef CONFIG_CPU_MIPSR2
1394 write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
1397 #ifdef CONFIG_MIPS_MT_SMTC
1399 #endif /* CONFIG_MIPS_MT_SMTC */
1402 * Interrupt handling.
1404 if (cpu_has_veic || cpu_has_vint) {
1405 write_c0_ebase (ebase);
1406 /* Setting vector spacing enables EI/VI mode */
1407 change_c0_intctl (0x3e0, VECTORSPACING);
1409 if (cpu_has_divec) {
1410 if (cpu_has_mipsmt) {
1411 unsigned int vpflags = dvpe();
1412 set_c0_cause(CAUSEF_IV);
1415 set_c0_cause(CAUSEF_IV);
1417 #ifdef CONFIG_MIPS_MT_SMTC
1419 #endif /* CONFIG_MIPS_MT_SMTC */
1421 cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
1422 TLBMISS_HANDLER_SETUP();
1424 atomic_inc(&init_mm.mm_count);
1425 current->active_mm = &init_mm;
1426 BUG_ON(current->mm);
1427 enter_lazy_tlb(&init_mm, current);
1429 #ifdef CONFIG_MIPS_MT_SMTC
1431 #endif /* CONFIG_MIPS_MT_SMTC */
1434 #ifdef CONFIG_MIPS_MT_SMTC
1436 #endif /* CONFIG_MIPS_MT_SMTC */
1439 /* Install CPU exception handler */
1440 void __init set_handler (unsigned long offset, void *addr, unsigned long size)
1442 memcpy((void *)(ebase + offset), addr, size);
1443 flush_icache_range(ebase + offset, ebase + offset + size);
1446 /* Install uncached CPU exception handler */
1447 void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size)
1450 unsigned long uncached_ebase = KSEG1ADDR(ebase);
1453 unsigned long uncached_ebase = TO_UNCAC(ebase);
1456 memcpy((void *)(uncached_ebase + offset), addr, size);
1459 void __init trap_init(void)
1461 extern char except_vec3_generic, except_vec3_r4000;
1462 extern char except_vec4;
1465 if (cpu_has_veic || cpu_has_vint)
1466 ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64);
1472 per_cpu_trap_init();
1475 * Copy the generic exception handlers to their final destination.
1476 * This will be overriden later as suitable for a particular
1479 set_handler(0x180, &except_vec3_generic, 0x80);
1482 * Setup default vectors
1484 for (i = 0; i <= 31; i++)
1485 set_except_vector(i, handle_reserved);
1488 * Copy the EJTAG debug exception vector handler code to it's final
1491 if (cpu_has_ejtag && board_ejtag_handler_setup)
1492 board_ejtag_handler_setup ();
1495 * Only some CPUs have the watch exceptions.
1498 set_except_vector(23, handle_watch);
1501 * Initialise interrupt handlers
1503 if (cpu_has_veic || cpu_has_vint) {
1504 int nvec = cpu_has_veic ? 64 : 8;
1505 for (i = 0; i < nvec; i++)
1506 set_vi_handler(i, NULL);
1508 else if (cpu_has_divec)
1509 set_handler(0x200, &except_vec4, 0x8);
1512 * Some CPUs can enable/disable for cache parity detection, but does
1513 * it different ways.
1515 parity_protection_init();
1518 * The Data Bus Errors / Instruction Bus Errors are signaled
1519 * by external hardware. Therefore these two exceptions
1520 * may have board specific handlers.
1525 set_except_vector(0, handle_int);
1526 set_except_vector(1, handle_tlbm);
1527 set_except_vector(2, handle_tlbl);
1528 set_except_vector(3, handle_tlbs);
1530 set_except_vector(4, handle_adel);
1531 set_except_vector(5, handle_ades);
1533 set_except_vector(6, handle_ibe);
1534 set_except_vector(7, handle_dbe);
1536 set_except_vector(8, handle_sys);
1537 set_except_vector(9, handle_bp);
1538 set_except_vector(10, handle_ri);
1539 set_except_vector(11, handle_cpu);
1540 set_except_vector(12, handle_ov);
1541 set_except_vector(13, handle_tr);
1543 if (current_cpu_data.cputype == CPU_R6000 ||
1544 current_cpu_data.cputype == CPU_R6000A) {
1546 * The R6000 is the only R-series CPU that features a machine
1547 * check exception (similar to the R4000 cache error) and
1548 * unaligned ldc1/sdc1 exception. The handlers have not been
1549 * written yet. Well, anyway there is no R6000 machine on the
1550 * current list of targets for Linux/MIPS.
1551 * (Duh, crap, there is someone with a triple R6k machine)
1553 //set_except_vector(14, handle_mc);
1554 //set_except_vector(15, handle_ndc);
1558 if (board_nmi_handler_setup)
1559 board_nmi_handler_setup();
1561 if (cpu_has_fpu && !cpu_has_nofpuex)
1562 set_except_vector(15, handle_fpe);
1564 set_except_vector(22, handle_mdmx);
1567 set_except_vector(24, handle_mcheck);
1570 set_except_vector(25, handle_mt);
1573 set_except_vector(26, handle_dsp);
1576 /* Special exception: R4[04]00 uses also the divec space. */
1577 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_r4000, 0x100);
1578 else if (cpu_has_4kex)
1579 memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80);
1581 memcpy((void *)(CAC_BASE + 0x080), &except_vec3_generic, 0x80);
1584 #ifdef CONFIG_MIPS32_COMPAT
1588 flush_icache_range(ebase, ebase + 0x400);
1589 flush_tlb_handlers();