unsigned long pernode_size;
struct bootmem_data bootmem_data;
unsigned long num_physpages;
+#ifdef CONFIG_ZONE_DMA
unsigned long num_dma_physpages;
+#endif
unsigned long min_pfn;
unsigned long max_pfn;
};
bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
}
- min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
- max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
-
return 0;
}
/* These actually end up getting called by call_pernode_memory() */
efi_memmap_walk(filter_rsvd_memory, build_node_maps);
efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
+ efi_memmap_walk(find_max_min_low_pfn, NULL);
for_each_online_node(node)
if (mem_data[node].bootmem_data.node_low_pfn) {
max_pfn = max_low_pfn;
find_initrd();
-
-#ifdef CONFIG_CRASH_DUMP
- /* If we are doing a crash dump, we still need to know the real mem
- * size before original memory map is reset. */
- saved_max_pfn = max_pfn;
-#endif
}
#ifdef CONFIG_SMP
unsigned long end = start + len;
mem_data[node].num_physpages += len >> PAGE_SHIFT;
+#ifdef CONFIG_ZONE_DMA
if (start <= __pa(MAX_DMA_ADDRESS))
mem_data[node].num_dma_physpages +=
(min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
+#endif
start = GRANULEROUNDDOWN(start);
start = ORDERROUNDDOWN(start);
end = GRANULEROUNDUP(end);
}
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+#ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = max_dma;
+#endif
max_zone_pfns[ZONE_NORMAL] = max_pfn;
free_area_init_nodes(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
+#ifdef CONFIG_MEMORY_HOTPLUG
pg_data_t *arch_alloc_nodedata(int nid)
{
unsigned long size = compute_pernodesize(nid);
pgdat_list[update_node] = update_pgdat;
scatter_node_data();
}
+#endif