2 * Copyright (C) 1995 Linus Torvalds
5 #include <linux/signal.h>
6 #include <linux/sched.h>
7 #include <linux/kernel.h>
8 #include <linux/errno.h>
9 #include <linux/string.h>
10 #include <linux/types.h>
11 #include <linux/ptrace.h>
12 #include <linux/mman.h>
14 #include <linux/smp.h>
15 #include <linux/interrupt.h>
16 #include <linux/init.h>
17 #include <linux/tty.h>
18 #include <linux/vt_kern.h> /* For unblank_screen() */
19 #include <linux/highmem.h>
20 #include <linux/bootmem.h> /* for max_low_pfn */
21 #include <linux/vmalloc.h>
22 #include <linux/module.h>
23 #include <linux/kprobes.h>
24 #include <linux/uaccess.h>
25 #include <linux/kdebug.h>
27 #include <asm/system.h>
29 #include <asm/segment.h>
32 * Page fault error code bits
33 * bit 0 == 0 means no page found, 1 means protection fault
34 * bit 1 == 0 means read, 1 means write
35 * bit 2 == 0 means kernel, 1 means user-mode
36 * bit 3 == 1 means use of reserved bit detected
37 * bit 4 == 1 means fault was an instruction fetch
39 #define PF_PROT (1<<0)
40 #define PF_WRITE (1<<1)
41 #define PF_USER (1<<2)
42 #define PF_RSVD (1<<3)
43 #define PF_INSTR (1<<4)
45 static inline int notify_page_fault(struct pt_regs *regs)
50 /* kprobe_running() needs smp_processor_id() */
51 if (!user_mode_vm(regs)) {
53 if (kprobe_running() && kprobe_fault_handler(regs, 14))
65 * Return EIP plus the CS segment base. The segment limit is also
66 * adjusted, clamped to the kernel/user address space (whichever is
67 * appropriate), and returned in *eip_limit.
69 * The segment is checked, because it might have been changed by another
70 * task between the original faulting instruction and here.
72 * If CS is no longer a valid code segment, or if EIP is beyond the
73 * limit, or if it is a kernel address when CS is not a kernel segment,
74 * then the returned value will be greater than *eip_limit.
76 * This is slow, but is very rarely executed.
78 static inline unsigned long get_segment_eip(struct pt_regs *regs,
79 unsigned long *eip_limit)
81 unsigned long ip = regs->ip;
82 unsigned seg = regs->cs & 0xffff;
83 u32 seg_ar, seg_limit, base, *desc;
85 /* Unlikely, but must come before segment checks. */
86 if (unlikely(regs->flags & VM_MASK)) {
88 *eip_limit = base + 0xffff;
89 return base + (ip & 0xffff);
92 /* The standard kernel/user address space limit. */
93 *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
95 /* By far the most common cases. */
96 if (likely(SEGMENT_IS_FLAT_CODE(seg)))
99 /* Check the segment exists, is within the current LDT/GDT size,
100 that kernel/user (ring 0..3) has the appropriate privilege,
101 that it's a code segment, and get the limit. */
102 __asm__ ("larl %3,%0; lsll %3,%1"
103 : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
104 if ((~seg_ar & 0x9800) || ip > seg_limit) {
106 return 1; /* So that returned ip > *eip_limit. */
109 /* Get the GDT/LDT descriptor base.
110 When you look for races in this code remember that
111 LDT and other horrors are only used in user space. */
113 /* Must lock the LDT while reading it. */
114 mutex_lock(¤t->mm->context.lock);
115 desc = current->mm->context.ldt;
116 desc = (void *)desc + (seg & ~7);
118 /* Must disable preemption while reading the GDT. */
119 desc = (u32 *)get_cpu_gdt_table(get_cpu());
120 desc = (void *)desc + (seg & ~7);
123 /* Decode the code segment base from the descriptor */
124 base = get_desc_base((struct desc_struct *)desc);
127 mutex_unlock(¤t->mm->context.lock);
131 /* Adjust EIP and segment limit, and clamp at the kernel limit.
132 It's legitimate for segments to wrap at 0xffffffff. */
134 if (seg_limit < *eip_limit && seg_limit >= base)
135 *eip_limit = seg_limit;
140 * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
141 * Check that here and ignore it.
143 static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
146 unsigned char *instr = (unsigned char *)get_segment_eip(regs, &limit);
151 for (i = 0; scan_more && i < 15; i++) {
152 unsigned char opcode;
153 unsigned char instr_hi;
154 unsigned char instr_lo;
156 if (instr > (unsigned char *)limit)
158 if (probe_kernel_address(instr, opcode))
161 instr_hi = opcode & 0xf0;
162 instr_lo = opcode & 0x0f;
169 * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
170 * In X86_64 long mode, the CPU will signal invalid
171 * opcode if some of these prefixes are present so
172 * X86_64 will never get here anyway
174 scan_more = ((instr_lo & 7) == 0x6);
179 * In AMD64 long mode 0x40..0x4F are valid REX prefixes
180 * Need to figure out under what instruction mode the
181 * instruction was issued. Could check the LDT for lm,
182 * but for now it's good enough to assume that long
183 * mode only uses well known segments or kernel.
185 scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
189 /* 0x64 thru 0x67 are valid prefixes in all modes. */
190 scan_more = (instr_lo & 0xC) == 0x4;
193 /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
194 scan_more = !instr_lo || (instr_lo>>1) == 1;
197 /* Prefetch instruction is 0x0F0D or 0x0F18 */
199 if (instr > (unsigned char *)limit)
201 if (probe_kernel_address(instr, opcode))
203 prefetch = (instr_lo == 0xF) &&
204 (opcode == 0x0D || opcode == 0x18);
214 static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
215 unsigned long error_code)
217 if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
218 boot_cpu_data.x86 >= 6)) {
219 /* Catch an obscure case of prefetch inside an NX page. */
220 if (nx_enabled && (error_code & 16))
222 return __is_prefetch(regs, addr);
227 static noinline void force_sig_info_fault(int si_signo, int si_code,
228 unsigned long address, struct task_struct *tsk)
232 info.si_signo = si_signo;
234 info.si_code = si_code;
235 info.si_addr = (void __user *)address;
236 force_sig_info(si_signo, &info, tsk);
239 void do_invalid_op(struct pt_regs *, unsigned long);
241 static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
243 unsigned index = pgd_index(address);
249 pgd_k = init_mm.pgd + index;
251 if (!pgd_present(*pgd_k))
255 * set_pgd(pgd, *pgd_k); here would be useless on PAE
256 * and redundant with the set_pmd() on non-PAE. As would
260 pud = pud_offset(pgd, address);
261 pud_k = pud_offset(pgd_k, address);
262 if (!pud_present(*pud_k))
265 pmd = pmd_offset(pud, address);
266 pmd_k = pmd_offset(pud_k, address);
267 if (!pmd_present(*pmd_k))
269 if (!pmd_present(*pmd)) {
270 set_pmd(pmd, *pmd_k);
271 arch_flush_lazy_mmu_mode();
273 BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
278 * Handle a fault on the vmalloc or module mapping area
280 * This assumes no large pages in there.
282 static inline int vmalloc_fault(unsigned long address)
284 unsigned long pgd_paddr;
288 * Synchronize this task's top level page-table
289 * with the 'reference' page table.
291 * Do _not_ use "current" here. We might be inside
292 * an interrupt in the middle of a task switch..
294 pgd_paddr = read_cr3();
295 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
298 pte_k = pte_offset_kernel(pmd_k, address);
299 if (!pte_present(*pte_k))
304 int show_unhandled_signals = 1;
307 * This routine handles page faults. It determines the address,
308 * and the problem, and then passes it off to one of the appropriate
311 void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
313 struct task_struct *tsk;
314 struct mm_struct *mm;
315 struct vm_area_struct *vma;
316 unsigned long address;
321 * We can fault from pretty much anywhere, with unknown IRQ state.
323 trace_hardirqs_fixup();
325 /* get the address */
326 address = read_cr2();
330 si_code = SEGV_MAPERR;
333 * We fault-in kernel-space virtual memory on-demand. The
334 * 'reference' page table is init_mm.pgd.
336 * NOTE! We MUST NOT take any locks for this case. We may
337 * be in an interrupt or a critical region, and should
338 * only copy the information from the master page table,
341 * This verifies that the fault happens in kernel space
342 * (error_code & 4) == 0, and that the fault was not a
343 * protection error (error_code & 9) == 0.
345 if (unlikely(address >= TASK_SIZE)) {
346 if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
348 if (notify_page_fault(regs))
351 * Don't take the mm semaphore here. If we fixup a prefetch
352 * fault we could otherwise deadlock.
354 goto bad_area_nosemaphore;
357 if (notify_page_fault(regs))
360 /* It's safe to allow irq's after cr2 has been saved and the vmalloc
361 fault has been handled. */
362 if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
368 * If we're in an interrupt, have no user context or are running in an
369 * atomic region then we must not take the fault.
371 if (in_atomic() || !mm)
372 goto bad_area_nosemaphore;
374 /* When running in the kernel we expect faults to occur only to
375 * addresses in user space. All other faults represent errors in the
376 * kernel and should generate an OOPS. Unfortunately, in the case of an
377 * erroneous fault occurring in a code path which already holds mmap_sem
378 * we will deadlock attempting to validate the fault against the
379 * address space. Luckily the kernel only validly references user
380 * space from well defined areas of code, which are listed in the
383 * As the vast majority of faults will be valid we will only perform
384 * the source reference check when there is a possibility of a deadlock.
385 * Attempt to lock the address space, if we cannot we then validate the
386 * source. If this is invalid we can skip the address space check,
387 * thus avoiding the deadlock.
389 if (!down_read_trylock(&mm->mmap_sem)) {
390 if ((error_code & PF_USER) == 0 &&
391 !search_exception_tables(regs->ip))
392 goto bad_area_nosemaphore;
393 down_read(&mm->mmap_sem);
396 vma = find_vma(mm, address);
399 if (vma->vm_start <= address)
401 if (!(vma->vm_flags & VM_GROWSDOWN))
403 if (error_code & PF_USER) {
405 * Accessing the stack below %sp is always a bug.
406 * The large cushion allows instructions like enter
407 * and pusha to work. ("enter $65535,$31" pushes
408 * 32 pointers and then decrements %sp by 65535.)
410 if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
413 if (expand_stack(vma, address))
416 * Ok, we have a good vm_area for this memory access, so
420 si_code = SEGV_ACCERR;
422 switch (error_code & (PF_PROT|PF_WRITE)) {
423 default: /* 3: write, present */
425 case PF_WRITE: /* write, not present */
426 if (!(vma->vm_flags & VM_WRITE))
430 case PF_PROT: /* read, present */
432 case 0: /* read, not present */
433 if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
439 * If for any reason at all we couldn't handle the fault,
440 * make sure we exit gracefully rather than endlessly redo
443 fault = handle_mm_fault(mm, vma, address, write);
444 if (unlikely(fault & VM_FAULT_ERROR)) {
445 if (fault & VM_FAULT_OOM)
447 else if (fault & VM_FAULT_SIGBUS)
451 if (fault & VM_FAULT_MAJOR)
457 * Did it hit the DOS screen memory VA from vm86 mode?
459 if (regs->flags & VM_MASK) {
460 unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
462 tsk->thread.screen_bitmap |= 1 << bit;
464 up_read(&mm->mmap_sem);
468 * Something tried to access memory that isn't in our memory map..
469 * Fix it, but check if it's kernel or user first..
472 up_read(&mm->mmap_sem);
474 bad_area_nosemaphore:
475 /* User mode accesses just cause a SIGSEGV */
476 if (error_code & PF_USER) {
478 * It's possible to have interrupts off here.
483 * Valid to do another page fault here because this one came
486 if (is_prefetch(regs, address, error_code))
489 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
490 printk_ratelimit()) {
491 printk("%s%s[%d]: segfault at %08lx ip %08lx "
492 "sp %08lx error %lx\n",
493 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
494 tsk->comm, task_pid_nr(tsk), address, regs->ip,
495 regs->sp, error_code);
497 tsk->thread.cr2 = address;
498 /* Kernel addresses are always protection faults */
499 tsk->thread.error_code = error_code | (address >= TASK_SIZE);
500 tsk->thread.trap_no = 14;
501 force_sig_info_fault(SIGSEGV, si_code, address, tsk);
505 #ifdef CONFIG_X86_F00F_BUG
507 * Pentium F0 0F C7 C8 bug workaround.
509 if (boot_cpu_data.f00f_bug) {
512 nr = (address - idt_descr.address) >> 3;
515 do_invalid_op(regs, 0);
522 /* Are we prepared to handle this kernel fault? */
523 if (fixup_exception(regs))
527 * Valid to do another page fault here, because if this fault
528 * had been triggered by is_prefetch fixup_exception would have
531 if (is_prefetch(regs, address, error_code))
535 * Oops. The kernel tried to access some bad page. We'll have to
536 * terminate things with extreme prejudice.
541 if (oops_may_print()) {
542 __typeof__(pte_val(__pte(0))) page;
544 #ifdef CONFIG_X86_PAE
545 if (error_code & 16) {
546 pte_t *pte = lookup_address(address);
548 if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
549 printk(KERN_CRIT "kernel tried to execute "
550 "NX-protected page - exploit attempt? "
551 "(uid: %d)\n", current->uid);
554 if (address < PAGE_SIZE)
555 printk(KERN_ALERT "BUG: unable to handle kernel NULL "
556 "pointer dereference");
558 printk(KERN_ALERT "BUG: unable to handle kernel paging"
560 printk(" at virtual address %08lx\n", address);
561 printk(KERN_ALERT "printing ip: %08lx ", regs->ip);
564 page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
565 #ifdef CONFIG_X86_PAE
566 printk("*pdpt = %016Lx ", page);
567 if ((page >> PAGE_SHIFT) < max_low_pfn
568 && page & _PAGE_PRESENT) {
570 page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
571 & (PTRS_PER_PMD - 1)];
572 printk(KERN_CONT "*pde = %016Lx ", page);
576 printk("*pde = %08lx ", page);
580 * We must not directly access the pte in the highpte
581 * case if the page table is located in highmem.
582 * And let's rather not kmap-atomic the pte, just in case
583 * it's allocated already.
585 if ((page >> PAGE_SHIFT) < max_low_pfn
586 && (page & _PAGE_PRESENT)
587 && !(page & _PAGE_PSE)) {
589 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
590 & (PTRS_PER_PTE - 1)];
591 printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
597 tsk->thread.cr2 = address;
598 tsk->thread.trap_no = 14;
599 tsk->thread.error_code = error_code;
600 die("Oops", regs, error_code);
605 * We ran out of memory, or some other thing happened to us that made
606 * us unable to handle the page fault gracefully.
609 up_read(&mm->mmap_sem);
610 if (is_global_init(tsk)) {
612 down_read(&mm->mmap_sem);
615 printk("VM: killing process %s\n", tsk->comm);
617 do_group_exit(SIGKILL);
621 up_read(&mm->mmap_sem);
623 /* Kernel mode? Handle exceptions or die */
624 if (!(error_code & PF_USER))
627 /* User space => ok to do another page fault */
628 if (is_prefetch(regs, address, error_code))
631 tsk->thread.cr2 = address;
632 tsk->thread.error_code = error_code;
633 tsk->thread.trap_no = 14;
634 force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
637 void vmalloc_sync_all(void)
640 * Note that races in the updates of insync and start aren't
641 * problematic: insync can only get set bits added, and updates to
642 * start are only improving performance (without affecting correctness
645 static DECLARE_BITMAP(insync, PTRS_PER_PGD);
646 static unsigned long start = TASK_SIZE;
647 unsigned long address;
649 if (SHARED_KERNEL_PMD)
652 BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
653 for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
654 if (!test_bit(pgd_index(address), insync)) {
658 spin_lock_irqsave(&pgd_lock, flags);
659 for (page = pgd_list; page; page =
660 (struct page *)page->index)
661 if (!vmalloc_sync_one(page_address(page),
663 BUG_ON(page != pgd_list);
666 spin_unlock_irqrestore(&pgd_lock, flags);
668 set_bit(pgd_index(address), insync);
670 if (address == start && test_bit(pgd_index(address), insync))
671 start = address + PGDIR_SIZE;