]> err.no Git - linux-2.6/blobdiff - arch/x86/mm/fault.c
x86: check vmlinux limits, 64-bit
[linux-2.6] / arch / x86 / mm / fault.c
index ad8b9733d6b3fee4d2f90e2981e54dc2b71299e4..81fcbeec389279de08c9ce0a83d6260e1bdf4777 100644 (file)
@@ -91,12 +91,10 @@ static int is_prefetch(struct pt_regs *regs, unsigned long addr,
        int prefetch = 0;
        unsigned char *max_instr;
 
-#ifdef CONFIG_X86_32
-       if (!(__supported_pte_mask & _PAGE_NX))
-               return 0;
-#endif
-
-       /* If it was a exec fault on NX page, ignore */
+       /*
+        * If it was a exec (instruction fetch) fault on NX page, then
+        * do not ignore the fault:
+        */
        if (error_code & PF_INSTR)
                return 0;
 
@@ -186,7 +184,7 @@ static int bad_address(void *p)
 }
 #endif
 
-void dump_pagetable(unsigned long address)
+static void dump_pagetable(unsigned long address)
 {
 #ifdef CONFIG_X86_32
        __typeof__(pte_val(__pte(0))) page;
@@ -428,6 +426,16 @@ static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
 }
 #endif
 
+static int spurious_fault_check(unsigned long error_code, pte_t *pte)
+{
+       if ((error_code & PF_WRITE) && !pte_write(*pte))
+               return 0;
+       if ((error_code & PF_INSTR) && !pte_exec(*pte))
+               return 0;
+
+       return 1;
+}
+
 /*
  * Handle a spurious fault caused by a stale TLB entry.  This allows
  * us to lazily refresh the TLB when increasing the permissions of a
@@ -457,20 +465,21 @@ static int spurious_fault(unsigned long address,
        if (!pud_present(*pud))
                return 0;
 
+       if (pud_large(*pud))
+               return spurious_fault_check(error_code, (pte_t *) pud);
+
        pmd = pmd_offset(pud, address);
        if (!pmd_present(*pmd))
                return 0;
 
+       if (pmd_large(*pmd))
+               return spurious_fault_check(error_code, (pte_t *) pmd);
+
        pte = pte_offset_kernel(pmd, address);
        if (!pte_present(*pte))
                return 0;
 
-       if ((error_code & PF_WRITE) && !pte_write(*pte))
-               return 0;
-       if ((error_code & PF_INSTR) && !pte_exec(*pte))
-               return 0;
-
-       return 1;
+       return spurious_fault_check(error_code, pte);
 }
 
 /*
@@ -947,11 +956,12 @@ void vmalloc_sync_all(void)
        for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
                if (!test_bit(pgd_index(address), insync)) {
                        const pgd_t *pgd_ref = pgd_offset_k(address);
+                       unsigned long flags;
                        struct page *page;
 
                        if (pgd_none(*pgd_ref))
                                continue;
-                       spin_lock(&pgd_lock);
+                       spin_lock_irqsave(&pgd_lock, flags);
                        list_for_each_entry(page, &pgd_list, lru) {
                                pgd_t *pgd;
                                pgd = (pgd_t *)page_address(page) + pgd_index(address);
@@ -960,15 +970,11 @@ void vmalloc_sync_all(void)
                                else
                                        BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref));
                        }
-                       spin_unlock(&pgd_lock);
+                       spin_unlock_irqrestore(&pgd_lock, flags);
                        set_bit(pgd_index(address), insync);
                }
                if (address == start)
                        start = address + PGDIR_SIZE;
        }
-       /* Check that there is no need to do the same for the modules area. */
-       BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
-       BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
-                               (__START_KERNEL & PGDIR_MASK)));
 #endif
 }