From: Yinghai Lu Date: Wed, 4 Jun 2008 02:32:30 +0000 (-0700) Subject: x86, numa, 32-bit: make sure get we kva space X-Git-Tag: v2.6.27-rc1~1106^2~247^2~48 X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=84b56fa46b36c2df508e7d421feab514fad30f81;p=linux-2.6 x86, numa, 32-bit: make sure get we kva space when 1/3 user/kernel split is used, and less memory is installed, or if we have a big hole below 4g, max_low_pfn is still using 3g-128m try to go down from max_low_pfn until we get it. otherwise will panic. need to make 32-bit code to use register_e820_active_regions ... later. Signed-off-by: Yinghai Lu Signed-off-by: Ingo Molnar --- diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c index 914a81ee78..7ced26ab9a 100644 --- a/arch/x86/mm/discontig_32.c +++ b/arch/x86/mm/discontig_32.c @@ -328,6 +328,7 @@ unsigned long __init setup_memory(void) { int nid; unsigned long system_start_pfn, system_max_low_pfn; + long kva_target_pfn; /* * When mapping a NUMA machine we allocate the node_mem_map arrays @@ -344,11 +345,17 @@ unsigned long __init setup_memory(void) system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end); system_max_low_pfn = max_low_pfn = find_max_low_pfn(); - kva_start_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); - kva_start_pfn = find_e820_area(kva_start_pfn<> PAGE_SHIFT; + kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE); + do { + kva_start_pfn = find_e820_area(kva_target_pfn<> PAGE_SHIFT; + kva_target_pfn -= PTRS_PER_PTE; + } while (kva_start_pfn == -1UL && kva_target_pfn > min_low_pfn); + + if (kva_start_pfn == -1UL) + panic("Can not get kva space\n"); printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n", kva_start_pfn, max_low_pfn);