]> err.no Git - linux-2.6/commitdiff
x86: fix early_ioremap() on 64-bit
authorAndi Kleen <andi@firstfloor.org>
Wed, 30 Jan 2008 12:33:54 +0000 (13:33 +0100)
committerIngo Molnar <mingo@elte.hu>
Wed, 30 Jan 2008 12:33:54 +0000 (13:33 +0100)
Fix early_ioremap() on x86-64

I had ACPI failures on several machines since a few days. Symptom
was NUMA nodes not getting detected or worse cores not getting detected.
They all came from ACPI not being able to read various of its tables. I finally
bisected it down to Jeremy's "put _PAGE_GLOBAL into PAGE_KERNEL" change.
With that the fix was fairly obvious. The problem was that early_ioremap()
didn't use a "_all" flush that would affect the global PTEs too. So
with global bits getting used everywhere now an early_ioremap would
not actually flush a mapping if something else was mapped previously
on that slot (which can happen with early_iounmap inbetween)

This patch changes all flushes in init_64.c to be __flush_tlb_all()
and fixes the problem here.

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
arch/x86/mm/init_64.c

index 0fd9d7f77786193a7fc77f18e1093fa150118d84..c250580a9432a207f9f37e43ac9e13f964c870c7 100644 (file)
@@ -228,7 +228,7 @@ __meminit void *early_ioremap(unsigned long addr, unsigned long size)
                addr &= PMD_MASK;
                for (i = 0; i < pmds; i++, addr += PMD_SIZE)
                        set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
-               __flush_tlb();
+               __flush_tlb_all();
                return (void *)vaddr;
        next:
                ;
@@ -249,7 +249,7 @@ __meminit void early_iounmap(void *addr, unsigned long size)
        pmd = level2_kernel_pgt + pmd_index(vaddr);
        for (i = 0; i < pmds; i++)
                pmd_clear(pmd + i);
-       __flush_tlb();
+       __flush_tlb_all();
 }
 
 static void __meminit
@@ -317,7 +317,7 @@ static void __meminit phys_pud_init(pud_t *pud_page, unsigned long addr, unsigne
                spin_unlock(&init_mm.page_table_lock);
                unmap_low_page(pmd);
        }
-       __flush_tlb();
+       __flush_tlb_all();
 } 
 
 static void __init find_early_table_space(unsigned long end)