From a04ad82d0bff4bb564f290eb50982e02458592d9 Mon Sep 17 00:00:00 2001 From: Yinghai Lu Date: Sun, 29 Jun 2008 00:39:06 -0700 Subject: [PATCH] x86: fix init_memory_mapping over boundary, v4 use PMD_SHIFT to calculate boundary also adjust size for pre-allocated table size Signed-off-by: Yinghai Lu Cc: Jeremy Fitzhardinge Signed-off-by: Ingo Molnar --- arch/x86/mm/init_32.c | 89 ++++++++++++++++++++++++++++++++----------- 1 file changed, 67 insertions(+), 22 deletions(-) diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c index 90ca67be96..aa5e37c9f4 100644 --- a/arch/x86/mm/init_32.c +++ b/arch/x86/mm/init_32.c @@ -184,8 +184,9 @@ static inline int is_kernel_text(unsigned long addr) * PAGE_OFFSET: */ static void __init kernel_physical_mapping_init(pgd_t *pgd_base, - unsigned long start, - unsigned long end) + unsigned long start_pfn, + unsigned long end_pfn, + int use_pse) { int pgd_idx, pmd_idx, pte_ofs; unsigned long pfn; @@ -193,33 +194,33 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base, pmd_t *pmd; pte_t *pte; unsigned pages_2m = 0, pages_4k = 0; - unsigned limit_pfn = end >> PAGE_SHIFT; - pgd_idx = pgd_index(start + PAGE_OFFSET); - pgd = pgd_base + pgd_idx; - pfn = start >> PAGE_SHIFT; + if (!cpu_has_pse) + use_pse = 0; + pfn = start_pfn; + pgd_idx = pgd_index((pfn<= limit_pfn) - continue; - for (pmd_idx = 0; - pmd_idx < PTRS_PER_PMD && pfn < limit_pfn; + if (pfn >= end_pfn) + continue; +#ifdef CONFIG_X86_PAE + pmd_idx = pmd_index((pfn<>21) << 21); - extra += (2UL<<20); + + extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); + extra += PMD_SIZE; ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; } else ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; tables += PAGE_ALIGN(ptes * sizeof(pte_t)); + /* for fixmap */ + tables += PAGE_SIZE * 2; + /* * RED-PEN putting page tables only on node 0 could * cause a hotspot and fill up ZONE_DMA. The page tables @@ -770,6 +776,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, unsigned long end) { pgd_t *pgd_base = swapper_pg_dir; + unsigned long start_pfn, end_pfn; + unsigned long big_page_start; /* * Find space for the kernel direct mapping tables. @@ -794,7 +802,44 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL; } - kernel_physical_mapping_init(pgd_base, start, end); + /* + * Don't use a large page for the first 2/4MB of memory + * because there are often fixed size MTRRs in there + * and overlapping MTRRs into large pages can cause + * slowdowns. + */ + big_page_start = PMD_SIZE; + + if (start < big_page_start) { + start_pfn = start >> PAGE_SHIFT; + end_pfn = min(big_page_start>>PAGE_SHIFT, end>>PAGE_SHIFT); + } else { + /* head is not big page alignment ? */ + start_pfn = start >> PAGE_SHIFT; + end_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) + << (PMD_SHIFT - PAGE_SHIFT); + } + if (start_pfn < end_pfn) + kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, 0); + + /* big page range */ + start_pfn = ((start + (PMD_SIZE - 1))>>PMD_SHIFT) + << (PMD_SHIFT - PAGE_SHIFT); + if (start_pfn < (big_page_start >> PAGE_SHIFT)) + start_pfn = big_page_start >> PAGE_SHIFT; + end_pfn = (end>>PMD_SHIFT) << (PMD_SHIFT - PAGE_SHIFT); + if (start_pfn < end_pfn) + kernel_physical_mapping_init(pgd_base, start_pfn, end_pfn, + cpu_has_pse); + + /* tail is not big page alignment ? */ + start_pfn = end_pfn; + if (start_pfn > (big_page_start>>PAGE_SHIFT)) { + end_pfn = end >> PAGE_SHIFT; + if (start_pfn < end_pfn) + kernel_physical_mapping_init(pgd_base, start_pfn, + end_pfn, 0); + } early_ioremap_page_table_range_init(pgd_base); -- 2.39.5