/*
- * linux/arch/i386/mm/fault.c
- *
* Copyright (C) 1995 Linus Torvalds
*/
#include <linux/kprobes.h>
#include <linux/uaccess.h>
#include <linux/kdebug.h>
-#include <linux/kprobes.h>
#include <asm/system.h>
#include <asm/desc.h>
#include <asm/segment.h>
-extern void die(const char *,struct pt_regs *,long);
+/*
+ * Page fault error code bits
+ * bit 0 == 0 means no page found, 1 means protection fault
+ * bit 1 == 0 means read, 1 means write
+ * bit 2 == 0 means kernel, 1 means user-mode
+ * bit 3 == 1 means use of reserved bit detected
+ * bit 4 == 1 means fault was an instruction fetch
+ */
+#define PF_PROT (1<<0)
+#define PF_WRITE (1<<1)
+#define PF_USER (1<<2)
+#define PF_RSVD (1<<3)
+#define PF_INSTR (1<<4)
-#ifdef CONFIG_KPROBES
static inline int notify_page_fault(struct pt_regs *regs)
{
+#ifdef CONFIG_KPROBES
int ret = 0;
/* kprobe_running() needs smp_processor_id() */
}
return ret;
-}
#else
-static inline int notify_page_fault(struct pt_regs *regs)
-{
return 0;
-}
#endif
+}
/*
- * Return EIP plus the CS segment base. The segment limit is also
- * adjusted, clamped to the kernel/user address space (whichever is
- * appropriate), and returned in *eip_limit.
+ * X86_32
+ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
+ * Check that here and ignore it.
*
- * The segment is checked, because it might have been changed by another
- * task between the original faulting instruction and here.
+ * X86_64
+ * Sometimes the CPU reports invalid exceptions on prefetch.
+ * Check that here and ignore it.
*
- * If CS is no longer a valid code segment, or if EIP is beyond the
- * limit, or if it is a kernel address when CS is not a kernel segment,
- * then the returned value will be greater than *eip_limit.
- *
- * This is slow, but is very rarely executed.
+ * Opcode checker based on code by Richard Brunner
*/
-static inline unsigned long get_segment_eip(struct pt_regs *regs,
- unsigned long *eip_limit)
+static int is_prefetch(struct pt_regs *regs, unsigned long addr,
+ unsigned long error_code)
{
- unsigned long eip = regs->eip;
- unsigned seg = regs->xcs & 0xffff;
- u32 seg_ar, seg_limit, base, *desc;
-
- /* Unlikely, but must come before segment checks. */
- if (unlikely(regs->eflags & VM_MASK)) {
- base = seg << 4;
- *eip_limit = base + 0xffff;
- return base + (eip & 0xffff);
- }
-
- /* The standard kernel/user address space limit. */
- *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
-
- /* By far the most common cases. */
- if (likely(SEGMENT_IS_FLAT_CODE(seg)))
- return eip;
-
- /* Check the segment exists, is within the current LDT/GDT size,
- that kernel/user (ring 0..3) has the appropriate privilege,
- that it's a code segment, and get the limit. */
- __asm__ ("larl %3,%0; lsll %3,%1"
- : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
- if ((~seg_ar & 0x9800) || eip > seg_limit) {
- *eip_limit = 0;
- return 1; /* So that returned eip > *eip_limit. */
- }
+ unsigned char *instr;
+ int scan_more = 1;
+ int prefetch = 0;
+ unsigned char *max_instr;
- /* Get the GDT/LDT descriptor base.
- When you look for races in this code remember that
- LDT and other horrors are only used in user space. */
- if (seg & (1<<2)) {
- /* Must lock the LDT while reading it. */
- mutex_lock(¤t->mm->context.lock);
- desc = current->mm->context.ldt;
- desc = (void *)desc + (seg & ~7);
+#ifdef CONFIG_X86_32
+ if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
+ boot_cpu_data.x86 >= 6)) {
+ /* Catch an obscure case of prefetch inside an NX page. */
+ if (nx_enabled && (error_code & PF_INSTR))
+ return 0;
} else {
- /* Must disable preemption while reading the GDT. */
- desc = (u32 *)get_cpu_gdt_table(get_cpu());
- desc = (void *)desc + (seg & ~7);
+ return 0;
}
+#else
+ /* If it was a exec fault ignore */
+ if (error_code & PF_INSTR)
+ return 0;
+#endif
- /* Decode the code segment base from the descriptor */
- base = get_desc_base((unsigned long *)desc);
-
- if (seg & (1<<2)) {
- mutex_unlock(¤t->mm->context.lock);
- } else
- put_cpu();
-
- /* Adjust EIP and segment limit, and clamp at the kernel limit.
- It's legitimate for segments to wrap at 0xffffffff. */
- seg_limit += base;
- if (seg_limit < *eip_limit && seg_limit >= base)
- *eip_limit = seg_limit;
- return eip + base;
-}
+ instr = (unsigned char *)convert_ip_to_linear(current, regs);
+ max_instr = instr + 15;
-/*
- * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
- * Check that here and ignore it.
- */
-static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
-{
- unsigned long limit;
- unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
- int scan_more = 1;
- int prefetch = 0;
- int i;
+ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
+ return 0;
- for (i = 0; scan_more && i < 15; i++) {
+ while (scan_more && instr < max_instr) {
unsigned char opcode;
unsigned char instr_hi;
unsigned char instr_lo;
- if (instr > (unsigned char *)limit)
- break;
if (probe_kernel_address(instr, opcode))
- break;
+ break;
- instr_hi = opcode & 0xf0;
- instr_lo = opcode & 0x0f;
+ instr_hi = opcode & 0xf0;
+ instr_lo = opcode & 0x0f;
instr++;
- switch (instr_hi) {
+ switch (instr_hi) {
case 0x20:
case 0x30:
- /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
+ /*
+ * Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes.
+ * In X86_64 long mode, the CPU will signal invalid
+ * opcode if some of these prefixes are present so
+ * X86_64 will never get here anyway
+ */
scan_more = ((instr_lo & 7) == 0x6);
break;
-
+#ifdef CONFIG_X86_64
+ case 0x40:
+ /*
+ * In AMD64 long mode 0x40..0x4F are valid REX prefixes
+ * Need to figure out under what instruction mode the
+ * instruction was issued. Could check the LDT for lm,
+ * but for now it's good enough to assume that long
+ * mode only uses well known segments or kernel.
+ */
+ scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
+ break;
+#endif
case 0x60:
/* 0x64 thru 0x67 are valid prefixes in all modes. */
scan_more = (instr_lo & 0xC) == 0x4;
- break;
+ break;
case 0xF0:
- /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
+ /* 0xF0, 0xF2, 0xF3 are valid prefixes in all modes. */
scan_more = !instr_lo || (instr_lo>>1) == 1;
- break;
+ break;
case 0x00:
/* Prefetch instruction is 0x0F0D or 0x0F18 */
scan_more = 0;
- if (instr > (unsigned char *)limit)
- break;
+
if (probe_kernel_address(instr, opcode))
break;
prefetch = (instr_lo == 0xF) &&
(opcode == 0x0D || opcode == 0x18);
- break;
+ break;
default:
scan_more = 0;
break;
- }
+ }
}
return prefetch;
}
-static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
- unsigned long error_code)
-{
- if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
- boot_cpu_data.x86 >= 6)) {
- /* Catch an obscure case of prefetch inside an NX page. */
- if (nx_enabled && (error_code & 16))
- return 0;
- return __is_prefetch(regs, addr);
- }
- return 0;
-}
-
-static noinline void force_sig_info_fault(int si_signo, int si_code,
+static void force_sig_info_fault(int si_signo, int si_code,
unsigned long address, struct task_struct *tsk)
{
siginfo_t info;
force_sig_info(si_signo, &info, tsk);
}
-fastcall void do_invalid_op(struct pt_regs *, unsigned long);
+void dump_pagetable(unsigned long address)
+{
+ __typeof__(pte_val(__pte(0))) page;
+
+ page = read_cr3();
+ page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
+#ifdef CONFIG_X86_PAE
+ printk("*pdpt = %016Lx ", page);
+ if ((page >> PAGE_SHIFT) < max_low_pfn
+ && page & _PAGE_PRESENT) {
+ page &= PAGE_MASK;
+ page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
+ & (PTRS_PER_PMD - 1)];
+ printk(KERN_CONT "*pde = %016Lx ", page);
+ page &= ~_PAGE_NX;
+ }
+#else
+ printk("*pde = %08lx ", page);
+#endif
+
+ /*
+ * We must not directly access the pte in the highpte
+ * case if the page table is located in highmem.
+ * And let's rather not kmap-atomic the pte, just in case
+ * it's allocated already.
+ */
+ if ((page >> PAGE_SHIFT) < max_low_pfn
+ && (page & _PAGE_PRESENT)
+ && !(page & _PAGE_PSE)) {
+ page &= PAGE_MASK;
+ page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
+ & (PTRS_PER_PTE - 1)];
+ printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
+ }
+
+ printk("\n");
+}
+
+void do_invalid_op(struct pt_regs *, unsigned long);
static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
{
return pmd_k;
}
+#ifdef CONFIG_X86_64
+static const char errata93_warning[] =
+KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
+KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
+KERN_ERR "******* Please consider a BIOS update.\n"
+KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
+#endif
+
+/* Workaround for K8 erratum #93 & buggy BIOS.
+ BIOS SMM functions are required to use a specific workaround
+ to avoid corruption of the 64bit RIP register on C stepping K8.
+ A lot of BIOS that didn't get tested properly miss this.
+ The OS sees this as a page fault with the upper 32bits of RIP cleared.
+ Try to work around it here.
+ Note we only handle faults in kernel here.
+ Does nothing for X86_32
+ */
+static int is_errata93(struct pt_regs *regs, unsigned long address)
+{
+#ifdef CONFIG_X86_64
+ static int warned;
+ if (address != regs->ip)
+ return 0;
+ if ((address >> 32) != 0)
+ return 0;
+ address |= 0xffffffffUL << 32;
+ if ((address >= (u64)_stext && address <= (u64)_etext) ||
+ (address >= MODULES_VADDR && address <= MODULES_END)) {
+ if (!warned) {
+ printk(errata93_warning);
+ warned = 1;
+ }
+ regs->ip = address;
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+
/*
* Handle a fault on the vmalloc or module mapping area
*
*/
static inline int vmalloc_fault(unsigned long address)
{
+#ifdef CONFIG_X86_32
unsigned long pgd_paddr;
pmd_t *pmd_k;
pte_t *pte_k;
if (!pte_present(*pte_k))
return -1;
return 0;
+#else
+ pgd_t *pgd, *pgd_ref;
+ pud_t *pud, *pud_ref;
+ pmd_t *pmd, *pmd_ref;
+ pte_t *pte, *pte_ref;
+
+ /* Copy kernel mappings over when needed. This can also
+ happen within a race in page table update. In the later
+ case just flush. */
+
+ pgd = pgd_offset(current->mm ?: &init_mm, address);
+ pgd_ref = pgd_offset_k(address);
+ if (pgd_none(*pgd_ref))
+ return -1;
+ if (pgd_none(*pgd))
+ set_pgd(pgd, *pgd_ref);
+ else
+ BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
+
+ /* Below here mismatches are bugs because these lower tables
+ are shared */
+
+ pud = pud_offset(pgd, address);
+ pud_ref = pud_offset(pgd_ref, address);
+ if (pud_none(*pud_ref))
+ return -1;
+ if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
+ BUG();
+ pmd = pmd_offset(pud, address);
+ pmd_ref = pmd_offset(pud_ref, address);
+ if (pmd_none(*pmd_ref))
+ return -1;
+ if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
+ BUG();
+ pte_ref = pte_offset_kernel(pmd_ref, address);
+ if (!pte_present(*pte_ref))
+ return -1;
+ pte = pte_offset_kernel(pmd, address);
+ /* Don't use pte_page here, because the mappings can point
+ outside mem_map, and the NUMA hash lookup cannot handle
+ that. */
+ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
+ BUG();
+ return 0;
+#endif
}
int show_unhandled_signals = 1;
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
- *
- * error_code:
- * bit 0 == 0 means no page found, 1 means protection fault
- * bit 1 == 0 means read, 1 means write
- * bit 2 == 0 means kernel, 1 means user-mode
- * bit 3 == 1 means use of reserved bit detected
- * bit 4 == 1 means fault was an instruction fetch
*/
-fastcall void __kprobes do_page_fault(struct pt_regs *regs,
- unsigned long error_code)
+void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
{
struct task_struct *tsk;
struct mm_struct *mm;
- struct vm_area_struct * vma;
+ struct vm_area_struct *vma;
unsigned long address;
int write, si_code;
int fault;
- /* get the address */
- address = read_cr2();
+ /*
+ * We can fault from pretty much anywhere, with unknown IRQ state.
+ */
+ trace_hardirqs_fixup();
tsk = current;
+ mm = tsk->mm;
+ prefetchw(&mm->mmap_sem);
+
+ /* get the address */
+ address = read_cr2();
si_code = SEGV_MAPERR;
+ if (notify_page_fault(regs))
+ return;
+
/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
* protection error (error_code & 9) == 0.
*/
if (unlikely(address >= TASK_SIZE)) {
- if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
- return;
- if (notify_page_fault(regs))
+ if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
+ vmalloc_fault(address) >= 0)
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
goto bad_area_nosemaphore;
}
- if (notify_page_fault(regs))
- return;
-
/* It's safe to allow irq's after cr2 has been saved and the vmalloc
fault has been handled. */
- if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
+ if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
local_irq_enable();
- mm = tsk->mm;
-
/*
* If we're in an interrupt, have no user context or are running in an
- * atomic region then we must not take the fault..
+ * atomic region then we must not take the fault.
*/
if (in_atomic() || !mm)
goto bad_area_nosemaphore;
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
- if ((error_code & 4) == 0 &&
- !search_exception_tables(regs->eip))
+ if ((error_code & PF_USER) == 0 &&
+ !search_exception_tables(regs->ip))
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
}
goto good_area;
if (!(vma->vm_flags & VM_GROWSDOWN))
goto bad_area;
- if (error_code & 4) {
+ if (error_code & PF_USER) {
/*
- * Accessing the stack below %esp is always a bug.
+ * Accessing the stack below %sp is always a bug.
* The large cushion allows instructions like enter
* and pusha to work. ("enter $65535,$31" pushes
- * 32 pointers and then decrements %esp by 65535.)
+ * 32 pointers and then decrements %sp by 65535.)
*/
- if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
+ if (address + 65536 + 32 * sizeof(unsigned long) < regs->sp)
goto bad_area;
}
if (expand_stack(vma, address))
good_area:
si_code = SEGV_ACCERR;
write = 0;
- switch (error_code & 3) {
- default: /* 3: write, present */
- /* fall through */
- case 2: /* write, not present */
- if (!(vma->vm_flags & VM_WRITE))
- goto bad_area;
- write++;
- break;
- case 1: /* read, present */
+ switch (error_code & (PF_PROT|PF_WRITE)) {
+ default: /* 3: write, present */
+ /* fall through */
+ case PF_WRITE: /* write, not present */
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ write++;
+ break;
+ case PF_PROT: /* read, present */
+ goto bad_area;
+ case 0: /* read, not present */
+ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
goto bad_area;
- case 0: /* read, not present */
- if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
- goto bad_area;
}
survive:
else
tsk->min_flt++;
+#ifdef CONFIG_X86_32
/*
* Did it hit the DOS screen memory VA from vm86 mode?
*/
- if (regs->eflags & VM_MASK) {
+ if (v8086_mode(regs)) {
unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
if (bit < 32)
tsk->thread.screen_bitmap |= 1 << bit;
}
+#endif
up_read(&mm->mmap_sem);
return;
bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
- if (error_code & 4) {
+ if (error_code & PF_USER) {
/*
* It's possible to have interrupts off here.
*/
local_irq_enable();
- /*
- * Valid to do another page fault here because this one came
+ /*
+ * Valid to do another page fault here because this one came
* from user space.
*/
if (is_prefetch(regs, address, error_code))
if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
printk_ratelimit()) {
- printk("%s%s[%d]: segfault at %08lx eip %08lx "
- "esp %08lx error %lx\n",
- task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
- tsk->comm, task_pid_nr(tsk), address, regs->eip,
- regs->esp, error_code);
+ printk(
+#ifdef CONFIG_X86_32
+ "%s%s[%d]: segfault at %lx ip %08lx sp %08lx error %lx",
+#else
+ "%s%s[%d]: segfault at %lx ip %lx sp %lx error %lx",
+#endif
+ task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
+ tsk->comm, task_pid_nr(tsk), address, regs->ip,
+ regs->sp, error_code);
+ print_vma_addr(" in ", regs->ip);
+ printk("\n");
}
tsk->thread.cr2 = address;
/* Kernel addresses are always protection faults */
*/
if (boot_cpu_data.f00f_bug) {
unsigned long nr;
-
+
nr = (address - idt_descr.address) >> 3;
if (nr == 6) {
if (fixup_exception(regs))
return;
- /*
+ /*
* Valid to do another page fault here, because if this fault
- * had been triggered by is_prefetch fixup_exception would have
+ * had been triggered by is_prefetch fixup_exception would have
* handled it.
*/
- if (is_prefetch(regs, address, error_code))
- return;
+ if (is_prefetch(regs, address, error_code))
+ return;
+
+ if (is_errata93(regs, address))
+ return;
/*
* Oops. The kernel tried to access some bad page. We'll have to
bust_spinlocks(1);
if (oops_may_print()) {
- __typeof__(pte_val(__pte(0))) page;
#ifdef CONFIG_X86_PAE
- if (error_code & 16) {
- pte_t *pte = lookup_address(address);
+ if (error_code & PF_INSTR) {
+ int level;
+ pte_t *pte = lookup_address(address, &level);
- if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
+ if (pte && pte_present(*pte) && !pte_exec(*pte))
printk(KERN_CRIT "kernel tried to execute "
"NX-protected page - exploit attempt? "
"(uid: %d)\n", current->uid);
else
printk(KERN_ALERT "BUG: unable to handle kernel paging"
" request");
- printk(" at virtual address %08lx\n",address);
- printk(KERN_ALERT "printing eip: %08lx ", regs->eip);
-
- page = read_cr3();
- page = ((__typeof__(page) *) __va(page))[address >> PGDIR_SHIFT];
-#ifdef CONFIG_X86_PAE
- printk("*pdpt = %016Lx ", page);
- if ((page >> PAGE_SHIFT) < max_low_pfn
- && page & _PAGE_PRESENT) {
- page &= PAGE_MASK;
- page = ((__typeof__(page) *) __va(page))[(address >> PMD_SHIFT)
- & (PTRS_PER_PMD - 1)];
- printk(KERN_CONT "*pde = %016Lx ", page);
- page &= ~_PAGE_NX;
- }
-#else
- printk("*pde = %08lx ", page);
-#endif
-
- /*
- * We must not directly access the pte in the highpte
- * case if the page table is located in highmem.
- * And let's rather not kmap-atomic the pte, just in case
- * it's allocated already.
- */
- if ((page >> PAGE_SHIFT) < max_low_pfn
- && (page & _PAGE_PRESENT)
- && !(page & _PAGE_PSE)) {
- page &= PAGE_MASK;
- page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
- & (PTRS_PER_PTE - 1)];
- printk("*pte = %0*Lx ", sizeof(page)*2, (u64)page);
- }
+ printk(" at virtual address %08lx\n", address);
+ printk(KERN_ALERT "printing ip: %08lx ", regs->ip);
- printk("\n");
+ dump_pagetable(address);
}
tsk->thread.cr2 = address;
goto survive;
}
printk("VM: killing process %s\n", tsk->comm);
- if (error_code & 4)
+ if (error_code & PF_USER)
do_group_exit(SIGKILL);
goto no_context;
up_read(&mm->mmap_sem);
/* Kernel mode? Handle exceptions or die */
- if (!(error_code & 4))
+ if (!(error_code & PF_USER))
goto no_context;
/* User space => ok to do another page fault */