X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=arch%2Fx86%2Fmm%2Finit_64.c;h=05f12c527b0257cf55719124ed9cfa769468cffa;hb=f2633105cd92b793dd6a6f623b4140287d46160a;hp=1e3862e410658b7cd3d2422476e0797b8ff4b511;hpb=c4ec20717313daafba59225f812db89595952b83;p=linux-2.6 diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c index 1e3862e410..05f12c527b 100644 --- a/arch/x86/mm/init_64.c +++ b/arch/x86/mm/init_64.c @@ -43,6 +43,8 @@ #include #include #include +#include +#include #ifndef Dprintk #define Dprintk(x...) @@ -224,7 +226,7 @@ __meminit void *early_ioremap(unsigned long addr, unsigned long size) vaddr += addr & ~PMD_MASK; addr &= PMD_MASK; for (i = 0; i < pmds; i++, addr += PMD_SIZE) - set_pmd(pmd + i,__pmd(addr | _KERNPG_TABLE | _PAGE_PSE)); + set_pmd(pmd+i, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC)); __flush_tlb(); return (void *)vaddr; next: @@ -268,7 +270,7 @@ phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end) if (pmd_val(*pmd)) continue; - entry = _PAGE_NX|_PAGE_PSE|_KERNPG_TABLE|_PAGE_GLOBAL|address; + entry = __PAGE_KERNEL_LARGE|_PAGE_GLOBAL|address; entry &= __supported_pte_mask; set_pmd(pmd, __pmd(entry)); } @@ -345,7 +347,7 @@ static void __init find_early_table_space(unsigned long end) /* Setup the direct mapping of the physical memory at PAGE_OFFSET. This runs before bootmem is initialized and gets pages directly from the physical memory. To access them they are temporarily mapped. */ -void __meminit init_memory_mapping(unsigned long start, unsigned long end) +void __init_refok init_memory_mapping(unsigned long start, unsigned long end) { unsigned long next; @@ -484,34 +486,6 @@ EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); #endif /* CONFIG_MEMORY_HOTPLUG */ -#ifdef CONFIG_MEMORY_HOTPLUG_RESERVE -/* - * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance, - * just online the pages. - */ -int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages) -{ - int err = -EIO; - unsigned long pfn; - unsigned long total = 0, mem = 0; - for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) { - if (pfn_valid(pfn)) { - online_page(pfn_to_page(pfn)); - err = 0; - mem++; - } - total++; - } - if (!err) { - z->spanned_pages += total; - z->present_pages += mem; - z->zone_pgdat->node_spanned_pages += total; - z->zone_pgdat->node_present_pages += mem; - } - return err; -} -#endif - static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules, kcore_vsyscall; @@ -521,8 +495,15 @@ void __init mem_init(void) pci_iommu_alloc(); - /* clear the zero-page */ - memset(empty_zero_page, 0, PAGE_SIZE); + /* clear_bss() already clear the empty_zero_page */ + + /* temporary debugging - double check it's true: */ + { + int i; + + for (i = 0; i < 1024; i++) + WARN_ON_ONCE(empty_zero_page[i]); + } reservedpages = 0; @@ -728,12 +709,6 @@ int in_gate_area_no_task(unsigned long addr) return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END); } -void * __init alloc_bootmem_high_node(pg_data_t *pgdat, unsigned long size) -{ - return __alloc_bootmem_core(pgdat->bdata, size, - SMP_CACHE_BYTES, (4UL*1024*1024*1024), 0); -} - const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) @@ -774,8 +749,7 @@ int __meminit vmemmap_populate(struct page *start_page, if (!p) return -ENOMEM; - entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL); - mk_pte_huge(entry); + entry = pfn_pte(__pa(p) >> PAGE_SHIFT, PAGE_KERNEL_LARGE); set_pmd(pmd, __pmd(pte_val(entry))); printk(KERN_DEBUG " [%lx-%lx] PMD ->%p on node %d\n",