2 * linux/arch/i386/traps.c
4 * Copyright (C) 1991, 1992 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
11 * 'Traps.c' handles hardware traps and faults after we have saved some
14 #include <linux/sched.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/timer.h>
20 #include <linux/init.h>
21 #include <linux/delay.h>
22 #include <linux/spinlock.h>
23 #include <linux/interrupt.h>
24 #include <linux/highmem.h>
25 #include <linux/kallsyms.h>
26 #include <linux/ptrace.h>
27 #include <linux/utsname.h>
28 #include <linux/kprobes.h>
29 #include <linux/kexec.h>
30 #include <linux/unwind.h>
33 #include <linux/ioport.h>
34 #include <linux/eisa.h>
38 #include <linux/mca.h>
41 #include <asm/processor.h>
42 #include <asm/system.h>
43 #include <asm/uaccess.h>
45 #include <asm/atomic.h>
46 #include <asm/debugreg.h>
50 #include <asm/unwind.h>
52 #include <asm/arch_hooks.h>
53 #include <asm/kdebug.h>
55 #include <linux/module.h>
57 #include "mach_traps.h"
59 asmlinkage int system_call(void);
61 struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
64 /* Do we ignore FPU interrupts ? */
65 char ignore_fpu_irq = 0;
68 * The IDT has to be page-aligned to simplify the Pentium
69 * F0 0F bug workaround.. We have a special link segment
72 struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
74 asmlinkage void divide_error(void);
75 asmlinkage void debug(void);
76 asmlinkage void nmi(void);
77 asmlinkage void int3(void);
78 asmlinkage void overflow(void);
79 asmlinkage void bounds(void);
80 asmlinkage void invalid_op(void);
81 asmlinkage void device_not_available(void);
82 asmlinkage void coprocessor_segment_overrun(void);
83 asmlinkage void invalid_TSS(void);
84 asmlinkage void segment_not_present(void);
85 asmlinkage void stack_segment(void);
86 asmlinkage void general_protection(void);
87 asmlinkage void page_fault(void);
88 asmlinkage void coprocessor_error(void);
89 asmlinkage void simd_coprocessor_error(void);
90 asmlinkage void alignment_check(void);
91 asmlinkage void spurious_interrupt_bug(void);
92 asmlinkage void machine_check(void);
94 static int kstack_depth_to_print = 24;
95 static int call_trace = 1;
96 ATOMIC_NOTIFIER_HEAD(i386die_chain);
98 int register_die_notifier(struct notifier_block *nb)
101 return atomic_notifier_chain_register(&i386die_chain, nb);
103 EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
105 int unregister_die_notifier(struct notifier_block *nb)
107 return atomic_notifier_chain_unregister(&i386die_chain, nb);
109 EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
111 static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
113 return p > (void *)tinfo &&
114 p < (void *)tinfo + THREAD_SIZE - 3;
118 * Print one address/symbol entries per line.
120 static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl)
122 printk(" [<%08lx>] ", addr);
124 print_symbol("%s\n", addr);
127 static inline unsigned long print_context_stack(struct thread_info *tinfo,
128 unsigned long *stack, unsigned long ebp,
133 #ifdef CONFIG_FRAME_POINTER
134 while (valid_stack_ptr(tinfo, (void *)ebp)) {
135 addr = *(unsigned long *)(ebp + 4);
136 print_addr_and_symbol(addr, log_lvl);
138 * break out of recursive entries (such as
139 * end_of_stack_stop_unwind_function):
141 if (ebp == *(unsigned long *)ebp)
143 ebp = *(unsigned long *)ebp;
146 while (valid_stack_ptr(tinfo, stack)) {
148 if (__kernel_text_address(addr))
149 print_addr_and_symbol(addr, log_lvl);
155 static asmlinkage int
156 show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
160 while (unwind(info) == 0 && UNW_PC(info)) {
162 print_addr_and_symbol(UNW_PC(info), log_lvl);
163 if (arch_unw_user_mode(info))
169 static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
170 unsigned long *stack, char *log_lvl)
177 if (call_trace >= 0) {
179 struct unwind_frame_info info;
182 if (unwind_init_frame_info(&info, task, regs) == 0)
183 unw_ret = show_trace_unwind(&info, log_lvl);
184 } else if (task == current)
185 unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
187 if (unwind_init_blocked(&info, task) == 0)
188 unw_ret = show_trace_unwind(&info, log_lvl);
190 if (unw_ret > 0 && !arch_unw_user_mode(&info)) {
191 #ifdef CONFIG_STACK_UNWIND
192 print_symbol("DWARF2 unwinder stuck at %s\n",
194 if (call_trace == 1) {
195 printk("Leftover inexact backtrace:\n");
197 stack = (void *)UNW_SP(&info);
198 } else if (call_trace > 1)
201 printk("Full inexact backtrace again:\n");
203 printk("Inexact backtrace:\n");
208 if (task == current) {
209 /* Grab ebp right from our regs */
210 asm ("movl %%ebp, %0" : "=r" (ebp) : );
212 /* ebp is the last reg pushed by switch_to */
213 ebp = *(unsigned long *) task->thread.esp;
217 struct thread_info *context;
218 context = (struct thread_info *)
219 ((unsigned long)stack & (~(THREAD_SIZE - 1)));
220 ebp = print_context_stack(context, stack, ebp, log_lvl);
221 stack = (unsigned long*)context->previous_esp;
224 printk("%s =======================\n", log_lvl);
228 void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
230 show_trace_log_lvl(task, regs, stack, "");
233 static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
234 unsigned long *esp, char *log_lvl)
236 unsigned long *stack;
241 esp = (unsigned long*)task->thread.esp;
243 esp = (unsigned long *)&esp;
247 for(i = 0; i < kstack_depth_to_print; i++) {
248 if (kstack_end(stack))
250 if (i && ((i % 8) == 0))
251 printk("\n%s ", log_lvl);
252 printk("%08lx ", *stack++);
254 printk("\n%sCall Trace:\n", log_lvl);
255 show_trace_log_lvl(task, regs, esp, log_lvl);
258 void show_stack(struct task_struct *task, unsigned long *esp)
261 show_stack_log_lvl(task, NULL, esp, "");
265 * The architecture-independent dump_stack generator
267 void dump_stack(void)
271 show_trace(current, NULL, &stack);
274 EXPORT_SYMBOL(dump_stack);
276 void show_registers(struct pt_regs *regs)
283 esp = (unsigned long) (®s->esp);
285 if (user_mode_vm(regs)) {
288 ss = regs->xss & 0xffff;
291 printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
292 "EFLAGS: %08lx (%s %.*s) \n",
293 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
294 print_tainted(), regs->eflags, system_utsname.release,
295 (int)strcspn(system_utsname.version, " "),
296 system_utsname.version);
297 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
298 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
299 regs->eax, regs->ebx, regs->ecx, regs->edx);
300 printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
301 regs->esi, regs->edi, regs->ebp, esp);
302 printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
303 regs->xds & 0xffff, regs->xes & 0xffff, ss);
304 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
305 TASK_COMM_LEN, current->comm, current->pid,
306 current_thread_info(), current, current->thread_info);
308 * When in-kernel, we also print out the stack and code at the
309 * time of the fault..
314 printk("\n" KERN_EMERG "Stack: ");
315 show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
317 printk(KERN_EMERG "Code: ");
319 eip = (u8 __user *)regs->eip - 43;
320 for (i = 0; i < 64; i++, eip++) {
323 if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
324 printk(" Bad EIP value.");
327 if (eip == (u8 __user *)regs->eip)
328 printk("<%02x> ", c);
336 static void handle_BUG(struct pt_regs *regs)
338 unsigned long eip = regs->eip;
341 if (eip < PAGE_OFFSET)
343 if (__get_user(ud2, (unsigned short __user *)eip))
348 printk(KERN_EMERG "------------[ cut here ]------------\n");
350 #ifdef CONFIG_DEBUG_BUGVERBOSE
356 if (__get_user(line, (unsigned short __user *)(eip + 2)))
358 if (__get_user(file, (char * __user *)(eip + 4)) ||
359 (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
360 file = "<bad filename>";
362 printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
366 printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
369 /* This is gone through when something in the kernel
370 * has done something bad and is about to be terminated.
372 void die(const char * str, struct pt_regs * regs, long err)
377 int lock_owner_depth;
379 .lock = SPIN_LOCK_UNLOCKED,
381 .lock_owner_depth = 0
383 static int die_counter;
388 if (die.lock_owner != raw_smp_processor_id()) {
390 spin_lock_irqsave(&die.lock, flags);
391 die.lock_owner = smp_processor_id();
392 die.lock_owner_depth = 0;
396 local_save_flags(flags);
398 if (++die.lock_owner_depth < 3) {
404 printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
405 #ifdef CONFIG_PREEMPT
406 printk(KERN_EMERG "PREEMPT ");
415 #ifdef CONFIG_DEBUG_PAGEALLOC
418 printk("DEBUG_PAGEALLOC");
423 if (notify_die(DIE_OOPS, str, regs, err,
424 current->thread.trap_no, SIGSEGV) !=
426 show_registers(regs);
427 /* Executive summary in case the oops scrolled away */
428 esp = (unsigned long) (®s->esp);
430 if (user_mode(regs)) {
432 ss = regs->xss & 0xffff;
434 printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
435 print_symbol("%s", regs->eip);
436 printk(" SS:ESP %04x:%08lx\n", ss, esp);
441 printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
445 spin_unlock_irqrestore(&die.lock, flags);
450 if (kexec_should_crash(current))
454 panic("Fatal exception in interrupt");
457 panic("Fatal exception");
463 static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
465 if (!user_mode_vm(regs))
469 static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
470 struct pt_regs * regs, long error_code,
473 struct task_struct *tsk = current;
474 tsk->thread.error_code = error_code;
475 tsk->thread.trap_no = trapnr;
477 if (regs->eflags & VM_MASK) {
483 if (!user_mode(regs))
488 force_sig_info(signr, info, tsk);
490 force_sig(signr, tsk);
495 if (!fixup_exception(regs))
496 die(str, regs, error_code);
501 int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
502 if (ret) goto trap_signal;
507 #define DO_ERROR(trapnr, signr, str, name) \
508 fastcall void do_##name(struct pt_regs * regs, long error_code) \
510 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
513 do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
516 #define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
517 fastcall void do_##name(struct pt_regs * regs, long error_code) \
520 info.si_signo = signr; \
522 info.si_code = sicode; \
523 info.si_addr = (void __user *)siaddr; \
524 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
527 do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
530 #define DO_VM86_ERROR(trapnr, signr, str, name) \
531 fastcall void do_##name(struct pt_regs * regs, long error_code) \
533 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
536 do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
539 #define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
540 fastcall void do_##name(struct pt_regs * regs, long error_code) \
543 info.si_signo = signr; \
545 info.si_code = sicode; \
546 info.si_addr = (void __user *)siaddr; \
547 if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
550 do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
553 DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
554 #ifndef CONFIG_KPROBES
555 DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
557 DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
558 DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
559 DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
560 DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
561 DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
562 DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
563 DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
564 DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
565 DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
567 fastcall void __kprobes do_general_protection(struct pt_regs * regs,
571 struct tss_struct *tss = &per_cpu(init_tss, cpu);
572 struct thread_struct *thread = ¤t->thread;
575 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
576 * invalid offset set (the LAZY one) and the faulting thread has
577 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS
578 * and we set the offset field correctly. Then we let the CPU to
579 * restart the faulting instruction.
581 if (tss->io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
582 thread->io_bitmap_ptr) {
583 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
584 thread->io_bitmap_max);
586 * If the previously set map was extending to higher ports
587 * than the current one, pad extra space with 0xff (no access).
589 if (thread->io_bitmap_max < tss->io_bitmap_max)
590 memset((char *) tss->io_bitmap +
591 thread->io_bitmap_max, 0xff,
592 tss->io_bitmap_max - thread->io_bitmap_max);
593 tss->io_bitmap_max = thread->io_bitmap_max;
594 tss->io_bitmap_base = IO_BITMAP_OFFSET;
595 tss->io_bitmap_owner = thread;
601 current->thread.error_code = error_code;
602 current->thread.trap_no = 13;
604 if (regs->eflags & VM_MASK)
607 if (!user_mode(regs))
610 current->thread.error_code = error_code;
611 current->thread.trap_no = 13;
612 force_sig(SIGSEGV, current);
617 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
621 if (!fixup_exception(regs)) {
622 if (notify_die(DIE_GPF, "general protection fault", regs,
623 error_code, 13, SIGSEGV) == NOTIFY_STOP)
625 die("general protection fault", regs, error_code);
629 static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
631 printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
633 printk(KERN_EMERG "You probably have a hardware problem with your RAM "
636 /* Clear and disable the memory parity error line. */
637 clear_mem_error(reason);
640 static void io_check_error(unsigned char reason, struct pt_regs * regs)
644 printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
645 show_registers(regs);
647 /* Re-enable the IOCK line, wait for a few seconds */
648 reason = (reason & 0xf) | 8;
651 while (--i) udelay(1000);
656 static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
659 /* Might actually be able to figure out what the guilty party
666 printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
667 reason, smp_processor_id());
668 printk("Dazed and confused, but trying to continue\n");
669 printk("Do you have a strange power saving mode enabled?\n");
672 static DEFINE_SPINLOCK(nmi_print_lock);
674 void die_nmi (struct pt_regs *regs, const char *msg)
676 if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
680 spin_lock(&nmi_print_lock);
682 * We are in trouble anyway, lets at least try
683 * to get a message out.
686 printk(KERN_EMERG "%s", msg);
687 printk(" on CPU%d, eip %08lx, registers:\n",
688 smp_processor_id(), regs->eip);
689 show_registers(regs);
690 printk(KERN_EMERG "console shuts up ...\n");
692 spin_unlock(&nmi_print_lock);
695 /* If we are in kernel we are probably nested up pretty bad
696 * and might aswell get out now while we still can.
698 if (!user_mode_vm(regs)) {
699 current->thread.trap_no = 2;
706 static void default_do_nmi(struct pt_regs * regs)
708 unsigned char reason = 0;
710 /* Only the BSP gets external NMIs from the system. */
711 if (!smp_processor_id())
712 reason = get_nmi_reason();
714 if (!(reason & 0xc0)) {
715 if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
718 #ifdef CONFIG_X86_LOCAL_APIC
720 * Ok, so this is none of the documented NMI sources,
721 * so it must be the NMI watchdog.
724 nmi_watchdog_tick(regs);
728 unknown_nmi_error(reason, regs);
731 if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
734 mem_parity_error(reason, regs);
736 io_check_error(reason, regs);
738 * Reassert NMI in case it became active meanwhile
739 * as it's edge-triggered.
744 static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
749 static nmi_callback_t nmi_callback = dummy_nmi_callback;
751 fastcall void do_nmi(struct pt_regs * regs, long error_code)
757 cpu = smp_processor_id();
761 if (!rcu_dereference(nmi_callback)(regs, cpu))
762 default_do_nmi(regs);
767 void set_nmi_callback(nmi_callback_t callback)
770 rcu_assign_pointer(nmi_callback, callback);
772 EXPORT_SYMBOL_GPL(set_nmi_callback);
774 void unset_nmi_callback(void)
776 nmi_callback = dummy_nmi_callback;
778 EXPORT_SYMBOL_GPL(unset_nmi_callback);
780 #ifdef CONFIG_KPROBES
781 fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
783 if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
786 /* This is an interrupt gate, because kprobes wants interrupts
787 disabled. Normal trap handlers don't. */
788 restore_interrupts(regs);
789 do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
794 * Our handling of the processor debug registers is non-trivial.
795 * We do not clear them on entry and exit from the kernel. Therefore
796 * it is possible to get a watchpoint trap here from inside the kernel.
797 * However, the code in ./ptrace.c has ensured that the user can
798 * only set watchpoints on userspace addresses. Therefore the in-kernel
799 * watchpoint trap can only occur in code which is reading/writing
800 * from user space. Such code must not hold kernel locks (since it
801 * can equally take a page fault), therefore it is safe to call
802 * force_sig_info even though that claims and releases locks.
804 * Code in ./signal.c ensures that the debug control register
805 * is restored before we deliver any signal, and therefore that
806 * user code runs with the correct debug control register even though
809 * Being careful here means that we don't have to be as careful in a
810 * lot of more complicated places (task switching can be a bit lazy
811 * about restoring all the debug state, and ptrace doesn't have to
812 * find every occurrence of the TF bit that could be saved away even
815 fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
817 unsigned int condition;
818 struct task_struct *tsk = current;
820 get_debugreg(condition, 6);
822 if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
823 SIGTRAP) == NOTIFY_STOP)
825 /* It's safe to allow irq's after DR6 has been saved */
826 if (regs->eflags & X86_EFLAGS_IF)
829 /* Mask out spurious debug traps due to lazy DR7 setting */
830 if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
831 if (!tsk->thread.debugreg[7])
835 if (regs->eflags & VM_MASK)
838 /* Save debug status register where ptrace can see it */
839 tsk->thread.debugreg[6] = condition;
842 * Single-stepping through TF: make sure we ignore any events in
843 * kernel space (but re-enable TF when returning to user mode).
845 if (condition & DR_STEP) {
847 * We already checked v86 mode above, so we can
848 * check for kernel mode by just checking the CPL
851 if (!user_mode(regs))
852 goto clear_TF_reenable;
855 /* Ok, finally something we can handle */
856 send_sigtrap(tsk, regs, error_code);
858 /* Disable additional traps. They'll be re-enabled when
859 * the signal is delivered.
866 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
870 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
871 regs->eflags &= ~TF_MASK;
876 * Note that we play around with the 'TS' bit in an attempt to get
877 * the correct behaviour even in the presence of the asynchronous
880 void math_error(void __user *eip)
882 struct task_struct * task;
884 unsigned short cwd, swd;
887 * Save the info for the exception handler and clear the error.
891 task->thread.trap_no = 16;
892 task->thread.error_code = 0;
893 info.si_signo = SIGFPE;
895 info.si_code = __SI_FAULT;
898 * (~cwd & swd) will mask out exceptions that are not set to unmasked
899 * status. 0x3f is the exception bits in these regs, 0x200 is the
900 * C1 reg you need in case of a stack fault, 0x040 is the stack
901 * fault bit. We should only be taking one exception at a time,
902 * so if this combination doesn't produce any single exception,
903 * then we have a bad program that isn't syncronizing its FPU usage
904 * and it will suffer the consequences since we won't be able to
905 * fully reproduce the context of the exception
907 cwd = get_fpu_cwd(task);
908 swd = get_fpu_swd(task);
909 switch (swd & ~cwd & 0x3f) {
910 case 0x000: /* No unmasked exception */
912 default: /* Multiple exceptions */
914 case 0x001: /* Invalid Op */
916 * swd & 0x240 == 0x040: Stack Underflow
917 * swd & 0x240 == 0x240: Stack Overflow
918 * User must clear the SF bit (0x40) if set
920 info.si_code = FPE_FLTINV;
922 case 0x002: /* Denormalize */
923 case 0x010: /* Underflow */
924 info.si_code = FPE_FLTUND;
926 case 0x004: /* Zero Divide */
927 info.si_code = FPE_FLTDIV;
929 case 0x008: /* Overflow */
930 info.si_code = FPE_FLTOVF;
932 case 0x020: /* Precision */
933 info.si_code = FPE_FLTRES;
936 force_sig_info(SIGFPE, &info, task);
939 fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
942 math_error((void __user *)regs->eip);
945 static void simd_math_error(void __user *eip)
947 struct task_struct * task;
949 unsigned short mxcsr;
952 * Save the info for the exception handler and clear the error.
956 task->thread.trap_no = 19;
957 task->thread.error_code = 0;
958 info.si_signo = SIGFPE;
960 info.si_code = __SI_FAULT;
963 * The SIMD FPU exceptions are handled a little differently, as there
964 * is only a single status/control register. Thus, to determine which
965 * unmasked exception was caught we must mask the exception mask bits
966 * at 0x1f80, and then use these to mask the exception bits at 0x3f.
968 mxcsr = get_fpu_mxcsr(task);
969 switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
973 case 0x001: /* Invalid Op */
974 info.si_code = FPE_FLTINV;
976 case 0x002: /* Denormalize */
977 case 0x010: /* Underflow */
978 info.si_code = FPE_FLTUND;
980 case 0x004: /* Zero Divide */
981 info.si_code = FPE_FLTDIV;
983 case 0x008: /* Overflow */
984 info.si_code = FPE_FLTOVF;
986 case 0x020: /* Precision */
987 info.si_code = FPE_FLTRES;
990 force_sig_info(SIGFPE, &info, task);
993 fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
997 /* Handle SIMD FPU exceptions on PIII+ processors. */
999 simd_math_error((void __user *)regs->eip);
1002 * Handle strange cache flush from user space exception
1003 * in all other cases. This is undocumented behaviour.
1005 if (regs->eflags & VM_MASK) {
1006 handle_vm86_fault((struct kernel_vm86_regs *)regs,
1010 current->thread.trap_no = 19;
1011 current->thread.error_code = error_code;
1012 die_if_kernel("cache flush denied", regs, error_code);
1013 force_sig(SIGSEGV, current);
1017 fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
1021 /* No need to warn about this any longer. */
1022 printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
1026 fastcall void setup_x86_bogus_stack(unsigned char * stk)
1028 unsigned long *switch16_ptr, *switch32_ptr;
1029 struct pt_regs *regs;
1030 unsigned long stack_top, stack_bot;
1031 unsigned short iret_frame16_off;
1032 int cpu = smp_processor_id();
1033 /* reserve the space on 32bit stack for the magic switch16 pointer */
1034 memmove(stk, stk + 8, sizeof(struct pt_regs));
1035 switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
1036 regs = (struct pt_regs *)stk;
1037 /* now the switch32 on 16bit stack */
1038 stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
1039 stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
1040 switch32_ptr = (unsigned long *)(stack_top - 8);
1041 iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
1042 /* copy iret frame on 16bit stack */
1043 memcpy((void *)(stack_bot + iret_frame16_off), ®s->eip, 20);
1044 /* fill in the switch pointers */
1045 switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
1046 switch16_ptr[1] = __ESPFIX_SS;
1047 switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
1048 8 - CPU_16BIT_STACK_SIZE;
1049 switch32_ptr[1] = __KERNEL_DS;
1052 fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
1054 unsigned long *switch32_ptr;
1055 unsigned char *stack16, *stack32;
1056 unsigned long stack_top, stack_bot;
1058 int cpu = smp_processor_id();
1059 stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
1060 stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
1061 switch32_ptr = (unsigned long *)(stack_top - 8);
1062 /* copy the data from 16bit stack to 32bit stack */
1063 len = CPU_16BIT_STACK_SIZE - 8 - sp;
1064 stack16 = (unsigned char *)(stack_bot + sp);
1065 stack32 = (unsigned char *)
1066 (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
1067 memcpy(stack32, stack16, len);
1072 * 'math_state_restore()' saves the current math information in the
1073 * old math state array, and gets the new ones from the current task
1075 * Careful.. There are problems with IBM-designed IRQ13 behaviour.
1076 * Don't touch unless you *really* know how it works.
1078 * Must be called with kernel preemption disabled (in this case,
1079 * local interrupts are disabled at the call-site in entry.S).
1081 asmlinkage void math_state_restore(struct pt_regs regs)
1083 struct thread_info *thread = current_thread_info();
1084 struct task_struct *tsk = thread->task;
1086 clts(); /* Allow maths ops (or we recurse) */
1087 if (!tsk_used_math(tsk))
1090 thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
1093 #ifndef CONFIG_MATH_EMULATION
1095 asmlinkage void math_emulate(long arg)
1097 printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
1098 printk(KERN_EMERG "killing %s.\n",current->comm);
1099 force_sig(SIGFPE,current);
1103 #endif /* CONFIG_MATH_EMULATION */
1105 #ifdef CONFIG_X86_F00F_BUG
1106 void __init trap_init_f00f_bug(void)
1108 __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
1111 * Update the IDT descriptor and reload the IDT so that
1112 * it uses the read-only mapped virtual address.
1114 idt_descr.address = fix_to_virt(FIX_F00F_IDT);
1115 load_idt(&idt_descr);
1119 #define _set_gate(gate_addr,type,dpl,addr,seg) \
1122 __asm__ __volatile__ ("movw %%dx,%%ax\n\t" \
1123 "movw %4,%%dx\n\t" \
1124 "movl %%eax,%0\n\t" \
1126 :"=m" (*((long *) (gate_addr))), \
1127 "=m" (*(1+(long *) (gate_addr))), "=&a" (__d0), "=&d" (__d1) \
1128 :"i" ((short) (0x8000+(dpl<<13)+(type<<8))), \
1129 "3" ((char *) (addr)),"2" ((seg) << 16)); \
1134 * This needs to use 'idt_table' rather than 'idt', and
1135 * thus use the _nonmapped_ version of the IDT, as the
1136 * Pentium F0 0F bugfix can have resulted in the mapped
1137 * IDT being write-protected.
1139 void set_intr_gate(unsigned int n, void *addr)
1141 _set_gate(idt_table+n,14,0,addr,__KERNEL_CS);
1145 * This routine sets up an interrupt gate at directory privilege level 3.
1147 static inline void set_system_intr_gate(unsigned int n, void *addr)
1149 _set_gate(idt_table+n, 14, 3, addr, __KERNEL_CS);
1152 static void __init set_trap_gate(unsigned int n, void *addr)
1154 _set_gate(idt_table+n,15,0,addr,__KERNEL_CS);
1157 static void __init set_system_gate(unsigned int n, void *addr)
1159 _set_gate(idt_table+n,15,3,addr,__KERNEL_CS);
1162 static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
1164 _set_gate(idt_table+n,5,0,0,(gdt_entry<<3));
1168 void __init trap_init(void)
1171 void __iomem *p = ioremap(0x0FFFD9, 4);
1172 if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
1178 #ifdef CONFIG_X86_LOCAL_APIC
1179 init_apic_mappings();
1182 set_trap_gate(0,÷_error);
1183 set_intr_gate(1,&debug);
1184 set_intr_gate(2,&nmi);
1185 set_system_intr_gate(3, &int3); /* int3/4 can be called from all */
1186 set_system_gate(4,&overflow);
1187 set_trap_gate(5,&bounds);
1188 set_trap_gate(6,&invalid_op);
1189 set_trap_gate(7,&device_not_available);
1190 set_task_gate(8,GDT_ENTRY_DOUBLEFAULT_TSS);
1191 set_trap_gate(9,&coprocessor_segment_overrun);
1192 set_trap_gate(10,&invalid_TSS);
1193 set_trap_gate(11,&segment_not_present);
1194 set_trap_gate(12,&stack_segment);
1195 set_trap_gate(13,&general_protection);
1196 set_intr_gate(14,&page_fault);
1197 set_trap_gate(15,&spurious_interrupt_bug);
1198 set_trap_gate(16,&coprocessor_error);
1199 set_trap_gate(17,&alignment_check);
1200 #ifdef CONFIG_X86_MCE
1201 set_trap_gate(18,&machine_check);
1203 set_trap_gate(19,&simd_coprocessor_error);
1207 * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
1208 * Generates a compile-time "error: zero width for bit-field" if
1209 * the alignment is wrong.
1211 struct fxsrAlignAssert {
1212 int _:!(offsetof(struct task_struct,
1213 thread.i387.fxsave) & 15);
1216 printk(KERN_INFO "Enabling fast FPU save and restore... ");
1217 set_in_cr4(X86_CR4_OSFXSR);
1221 printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
1223 set_in_cr4(X86_CR4_OSXMMEXCPT);
1227 set_system_gate(SYSCALL_VECTOR,&system_call);
1230 * Should be a barrier for any external CPU state.
1237 static int __init kstack_setup(char *s)
1239 kstack_depth_to_print = simple_strtoul(s, NULL, 0);
1242 __setup("kstack=", kstack_setup);
1244 static int __init call_trace_setup(char *s)
1246 if (strcmp(s, "old") == 0)
1248 else if (strcmp(s, "both") == 0)
1250 else if (strcmp(s, "newfallback") == 0)
1252 else if (strcmp(s, "new") == 2)
1256 __setup("call_trace=", call_trace_setup);