]> err.no Git - linux-2.6/commitdiff
x86, numa, 32-bit: use find_e820_area() to find KVA RAM on node
authorYinghai Lu <yhlu.kernel@gmail.com>
Sat, 7 Jun 2008 01:53:33 +0000 (18:53 -0700)
committerIngo Molnar <mingo@elte.hu>
Tue, 10 Jun 2008 09:31:52 +0000 (11:31 +0200)
don't assume we can use RAM near the end of every node.
Esp systems that have few memory and they could have
kva address and kva RAM all below max_low_pfn.

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
arch/x86/mm/discontig_32.c

index 489605bab85ad6c0636ba6c4d67ec8fe0133bb68..accc7c6c57fc1dace92139a5981460f7a213822a 100644 (file)
@@ -228,17 +228,21 @@ static unsigned long calculate_numa_remap_pages(void)
 {
        int nid;
        unsigned long size, reserve_pages = 0;
-       unsigned long pfn;
 
        for_each_online_node(nid) {
-               unsigned old_end_pfn = node_end_pfn[nid];
+               u64 node_end_target;
+               u64 node_end_final;
 
                /*
                 * The acpi/srat node info can show hot-add memroy zones
                 * where memory could be added but not currently present.
                 */
+               printk("node %d pfn: [%lx - %lx]\n",
+                       nid, node_start_pfn[nid], node_end_pfn[nid]);
                if (node_start_pfn[nid] > max_pfn)
                        continue;
+               if (!node_end_pfn[nid])
+                       continue;
                if (node_end_pfn[nid] > max_pfn)
                        node_end_pfn[nid] = max_pfn;
 
@@ -250,37 +254,40 @@ static unsigned long calculate_numa_remap_pages(void)
                /* now the roundup is correct, convert to PAGE_SIZE pages */
                size = size * PTRS_PER_PTE;
 
-               /*
-                * Validate the region we are allocating only contains valid
-                * pages.
-                */
-               for (pfn = node_end_pfn[nid] - size;
-                    pfn < node_end_pfn[nid]; pfn++)
-                       if (!page_is_ram(pfn))
-                               break;
-
-               if (pfn != node_end_pfn[nid])
-                       size = 0;
+               node_end_target = round_down(node_end_pfn[nid] - size,
+                                                PTRS_PER_PTE);
+               node_end_target <<= PAGE_SHIFT;
+               do {
+                       node_end_final = find_e820_area(node_end_target,
+                                       ((u64)node_end_pfn[nid])<<PAGE_SHIFT,
+                                               ((u64)size)<<PAGE_SHIFT,
+                                               LARGE_PAGE_BYTES);
+                       node_end_target -= LARGE_PAGE_BYTES;
+               } while (node_end_final == -1ULL &&
+                        (node_end_target>>PAGE_SHIFT) > (node_start_pfn[nid]));
+
+               if (node_end_final == -1ULL)
+                       panic("Can not get kva ram\n");
 
                printk("Reserving %ld pages of KVA for lmem_map of node %d\n",
                                size, nid);
                node_remap_size[nid] = size;
                node_remap_offset[nid] = reserve_pages;
                reserve_pages += size;
-               printk("Shrinking node %d from %ld pages to %ld pages\n",
-                       nid, node_end_pfn[nid], node_end_pfn[nid] - size);
-
-               if (node_end_pfn[nid] & (PTRS_PER_PTE-1)) {
-                       /*
-                        * Align node_end_pfn[] and node_remap_start_pfn[] to
-                        * pmd boundary. remap_numa_kva will barf otherwise.
-                        */
-                       printk("Shrinking node %d further by %ld pages for proper alignment\n",
-                               nid, node_end_pfn[nid] & (PTRS_PER_PTE-1));
-                       size +=  node_end_pfn[nid] & (PTRS_PER_PTE-1);
-               }
+               printk("Shrinking node %d from %ld pages to %lld pages\n",
+                       nid, node_end_pfn[nid], node_end_final>>PAGE_SHIFT);
+
+               /*
+                *  prevent kva address below max_low_pfn want it on system
+                *  with less memory later.
+                *  layout will be: KVA address , KVA RAM
+                */
+               if ((node_end_final>>PAGE_SHIFT) < max_low_pfn)
+                       reserve_early(node_end_final,
+                                     node_end_final+(((u64)size)<<PAGE_SHIFT),
+                                     "KVA RAM");
 
-               node_end_pfn[nid] -= size;
+               node_end_pfn[nid] = node_end_final>>PAGE_SHIFT;
                node_remap_start_pfn[nid] = node_end_pfn[nid];
                shrink_active_range(nid, node_end_pfn[nid]);
        }