* need roughly 0.5KB per GB.
*/
start = 0x8000;
- table_start = find_e820_area(start, end, tables);
+ table_start = find_e820_area(start, end, tables, PAGE_SIZE);
if (table_start == -1UL)
panic("Cannot find space for the kernel page tables");
- /*
- * When you have a lot of RAM like 256GB, early_table will not fit
- * into 0x8000 range, find_e820_area() will find area after kernel
- * bss but the table_start is not page aligned, so need to round it
- * up to avoid overlap with bss:
- */
- table_start = round_up(table_start, PAGE_SIZE);
table_start >>= PAGE_SHIFT;
table_end = table_start;
mmu_cr4_features = read_cr4();
__flush_tlb_all();
- reserve_early(table_start << PAGE_SHIFT, table_end << PAGE_SHIFT);
+ if (!after_bootmem)
+ reserve_early(table_start << PAGE_SHIFT,
+ table_end << PAGE_SHIFT, "PGTABLE");
}
#ifndef CONFIG_NUMA
}
#endif
-/*
- * Unmap a kernel mapping if it exists. This is useful to avoid
- * prefetches from the CPU leading to inconsistent cache lines.
- * address and size must be aligned to 2MB boundaries.
- * Does nothing when the mapping doesn't exist.
- */
-void __init clear_kernel_mapping(unsigned long address, unsigned long size)
-{
- unsigned long end = address + size;
-
- BUG_ON(address & ~LARGE_PAGE_MASK);
- BUG_ON(size & ~LARGE_PAGE_MASK);
-
- for (; address < end; address += LARGE_PAGE_SIZE) {
- pgd_t *pgd = pgd_offset_k(address);
- pud_t *pud;
- pmd_t *pmd;
-
- if (pgd_none(*pgd))
- continue;
-
- pud = pud_offset(pgd, address);
- if (pud_none(*pud))
- continue;
-
- pmd = pmd_offset(pud, address);
- if (!pmd || pmd_none(*pmd))
- continue;
-
- if (!(pmd_val(*pmd) & _PAGE_PSE)) {
- /*
- * Could handle this, but it should not happen
- * currently:
- */
- printk(KERN_ERR "clear_kernel_mapping: "
- "mapping has been split. will leak memory\n");
- pmd_ERROR(*pmd);
- }
- set_pmd(pmd, __pmd(0));
- }
- __flush_tlb_all();
-}
-
/*
* Memory hotplug specific functions
*/