X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=arch%2Fx86%2Fmm%2Ffault.c;h=ad8b9733d6b3fee4d2f90e2981e54dc2b71299e4;hb=b5360222273cb3e57a119c18eef42f59da4da87b;hp=e28cc5277b167c3907075b0f610342cd7a2ceec8;hpb=75659ca0c10992dcb39258518368a0f6f56e935d;p=linux-2.6 diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index e28cc5277b..ad8b9733d6 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -240,7 +240,8 @@ void dump_pagetable(unsigned long address) pud = pud_offset(pgd, address); if (bad_address(pud)) goto bad; printk("PUD %lx ", pud_val(*pud)); - if (!pud_present(*pud)) goto ret; + if (!pud_present(*pud) || pud_large(*pud)) + goto ret; pmd = pmd_offset(pud, address); if (bad_address(pmd)) goto bad; @@ -382,7 +383,7 @@ static void show_fault_oops(struct pt_regs *regs, unsigned long error_code, #ifdef CONFIG_X86_PAE if (error_code & PF_INSTR) { - int level; + unsigned int level; pte_t *pte = lookup_address(address, &level); if (pte && pte_present(*pte) && !pte_exec(*pte)) @@ -508,6 +509,10 @@ static int vmalloc_fault(unsigned long address) pmd_t *pmd, *pmd_ref; pte_t *pte, *pte_ref; + /* Make sure we are in vmalloc area */ + if (!(address >= VMALLOC_START && address < VMALLOC_END)) + return -1; + /* Copy kernel mappings over when needed. This can also happen within a race in page table update. In the later case just flush. */ @@ -603,6 +608,9 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) */ #ifdef CONFIG_X86_32 if (unlikely(address >= TASK_SIZE)) { +#else + if (unlikely(address >= TASK_SIZE64)) { +#endif if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && vmalloc_fault(address) >= 0) return; @@ -618,6 +626,8 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) goto bad_area_nosemaphore; } + +#ifdef CONFIG_X86_32 /* It's safe to allow irq's after cr2 has been saved and the vmalloc fault has been handled. */ if (regs->flags & (X86_EFLAGS_IF|VM_MASK)) @@ -630,28 +640,6 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code) if (in_atomic() || !mm) goto bad_area_nosemaphore; #else /* CONFIG_X86_64 */ - if (unlikely(address >= TASK_SIZE64)) { - /* - * Don't check for the module range here: its PML4 - * is always initialized because it's shared with the main - * kernel text. Only vmalloc may need PML4 syncups. - */ - if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) && - ((address >= VMALLOC_START && address < VMALLOC_END))) { - if (vmalloc_fault(address) >= 0) - return; - } - - /* Can handle a stale RO->RW TLB */ - if (spurious_fault(address, error_code)) - return; - - /* - * Don't take the mm semaphore here. If we fixup a prefetch - * fault we could otherwise deadlock. - */ - goto bad_area_nosemaphore; - } if (likely(regs->flags & X86_EFLAGS_IF)) local_irq_enable();