2 * Generic VM initialization for x86-64 NUMA setups.
3 * Copyright 2002,2003 Andi Kleen, SuSE Labs.
5 #include <linux/kernel.h>
7 #include <linux/string.h>
8 #include <linux/init.h>
9 #include <linux/bootmem.h>
10 #include <linux/mmzone.h>
11 #include <linux/ctype.h>
12 #include <linux/module.h>
13 #include <linux/nodemask.h>
14 #include <linux/sched.h>
17 #include <asm/proto.h>
23 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
24 EXPORT_SYMBOL(node_data);
26 static bootmem_data_t plat_node_bdata[MAX_NUMNODES];
28 struct memnode memnode;
30 s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
31 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
34 int numa_off __initdata;
35 static unsigned long __initdata nodemap_addr;
36 static unsigned long __initdata nodemap_size;
39 * Given a shift value, try to populate memnodemap[]
42 * 0 if memnodmap[] too small (of shift too small)
43 * -1 if node overlap or lost ram (shift too big)
45 static int __init populate_memnodemap(const struct bootnode *nodes,
46 int numnodes, int shift, int *nodeids)
48 unsigned long addr, end;
51 memset(memnodemap, 0xff, sizeof(s16)*memnodemapsize);
52 for (i = 0; i < numnodes; i++) {
53 addr = nodes[i].start;
57 if ((end >> shift) >= memnodemapsize)
60 if (memnodemap[addr >> shift] != NUMA_NO_NODE)
64 memnodemap[addr >> shift] = i;
66 memnodemap[addr >> shift] = nodeids[i];
68 addr += (1UL << shift);
75 static int __init allocate_cachealigned_memnodemap(void)
79 memnodemap = memnode.embedded_map;
80 if (memnodemapsize <= ARRAY_SIZE(memnode.embedded_map))
84 nodemap_size = round_up(sizeof(s16) * memnodemapsize, L1_CACHE_BYTES);
85 nodemap_addr = find_e820_area(addr, max_pfn<<PAGE_SHIFT,
86 nodemap_size, L1_CACHE_BYTES);
87 if (nodemap_addr == -1UL) {
89 "NUMA: Unable to allocate Memory to Node hash map\n");
90 nodemap_addr = nodemap_size = 0;
93 memnodemap = phys_to_virt(nodemap_addr);
94 reserve_early(nodemap_addr, nodemap_addr + nodemap_size, "MEMNODEMAP");
96 printk(KERN_DEBUG "NUMA: Allocated memnodemap from %lx - %lx\n",
97 nodemap_addr, nodemap_addr + nodemap_size);
102 * The LSB of all start and end addresses in the node map is the value of the
103 * maximum possible shift.
105 static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
108 int i, nodes_used = 0;
109 unsigned long start, end;
110 unsigned long bitfield = 0, memtop = 0;
112 for (i = 0; i < numnodes; i++) {
113 start = nodes[i].start;
125 i = find_first_bit(&bitfield, sizeof(unsigned long)*8);
126 memnodemapsize = (memtop >> i)+1;
130 int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
135 shift = extract_lsb_from_nodes(nodes, numnodes);
136 if (allocate_cachealigned_memnodemap())
138 printk(KERN_DEBUG "NUMA: Using %d for the hash shift.\n",
141 if (populate_memnodemap(nodes, numnodes, shift, nodeids) != 1) {
142 printk(KERN_INFO "Your memory is not aligned you need to "
143 "rebuild your kernel with a bigger NODEMAPSIZE "
144 "shift=%d\n", shift);
150 int early_pfn_to_nid(unsigned long pfn)
152 return phys_to_nid(pfn << PAGE_SHIFT);
155 static void * __init early_node_mem(int nodeid, unsigned long start,
156 unsigned long end, unsigned long size,
159 unsigned long mem = find_e820_area(start, end, size, align);
165 ptr = __alloc_bootmem_nopanic(size, align, __pa(MAX_DMA_ADDRESS));
167 printk(KERN_ERR "Cannot find %lu bytes in node %d\n",
174 /* Initialize bootmem allocator for a node */
175 void __init setup_node_bootmem(int nodeid, unsigned long start,
178 unsigned long start_pfn, last_pfn, bootmap_pages, bootmap_size;
179 unsigned long bootmap_start, nodedata_phys;
181 const int pgdat_size = round_up(sizeof(pg_data_t), PAGE_SIZE);
184 start = round_up(start, ZONE_ALIGN);
186 printk(KERN_INFO "Bootmem setup node %d %016lx-%016lx\n", nodeid,
189 start_pfn = start >> PAGE_SHIFT;
190 last_pfn = end >> PAGE_SHIFT;
192 node_data[nodeid] = early_node_mem(nodeid, start, end, pgdat_size,
194 if (node_data[nodeid] == NULL)
196 nodedata_phys = __pa(node_data[nodeid]);
197 printk(KERN_INFO " NODE_DATA [%016lx - %016lx]\n", nodedata_phys,
198 nodedata_phys + pgdat_size - 1);
200 memset(NODE_DATA(nodeid), 0, sizeof(pg_data_t));
201 NODE_DATA(nodeid)->bdata = &plat_node_bdata[nodeid];
202 NODE_DATA(nodeid)->node_start_pfn = start_pfn;
203 NODE_DATA(nodeid)->node_spanned_pages = last_pfn - start_pfn;
206 * Find a place for the bootmem map
207 * nodedata_phys could be on other nodes by alloc_bootmem,
208 * so need to sure bootmap_start not to be small, otherwise
209 * early_node_mem will get that with find_e820_area instead
210 * of alloc_bootmem, that could clash with reserved range
212 bootmap_pages = bootmem_bootmap_pages(last_pfn - start_pfn);
213 nid = phys_to_nid(nodedata_phys);
215 bootmap_start = round_up(nodedata_phys + pgdat_size, PAGE_SIZE);
217 bootmap_start = round_up(start, PAGE_SIZE);
219 * SMP_CACHE_BYTES could be enough, but init_bootmem_node like
220 * to use that to align to PAGE_SIZE
222 bootmap = early_node_mem(nodeid, bootmap_start, end,
223 bootmap_pages<<PAGE_SHIFT, PAGE_SIZE);
224 if (bootmap == NULL) {
225 if (nodedata_phys < start || nodedata_phys >= end)
226 free_bootmem(nodedata_phys, pgdat_size);
227 node_data[nodeid] = NULL;
230 bootmap_start = __pa(bootmap);
232 bootmap_size = init_bootmem_node(NODE_DATA(nodeid),
233 bootmap_start >> PAGE_SHIFT,
234 start_pfn, last_pfn);
236 printk(KERN_INFO " bootmap [%016lx - %016lx] pages %lx\n",
237 bootmap_start, bootmap_start + bootmap_size - 1,
240 free_bootmem_with_active_regions(nodeid, end);
243 * convert early reserve to bootmem reserve earlier
244 * otherwise early_node_mem could use early reserved mem
247 early_res_to_bootmem(start, end);
250 * in some case early_node_mem could use alloc_bootmem
251 * to get range on other node, don't reserve that again
254 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nodeid, nid);
256 reserve_bootmem_node(NODE_DATA(nodeid), nodedata_phys,
257 pgdat_size, BOOTMEM_DEFAULT);
258 nid = phys_to_nid(bootmap_start);
260 printk(KERN_INFO " bootmap(%d) on node %d\n", nodeid, nid);
262 reserve_bootmem_node(NODE_DATA(nodeid), bootmap_start,
263 bootmap_pages<<PAGE_SHIFT, BOOTMEM_DEFAULT);
265 #ifdef CONFIG_ACPI_NUMA
266 srat_reserve_add_area(nodeid);
268 node_set_online(nodeid);
272 * There are unfortunately some poorly designed mainboards around that
273 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
274 * mapping. To avoid this fill in the mapping for all possible CPUs,
275 * as the number of CPUs is not known yet. We round robin the existing
278 void __init numa_init_array(void)
282 rr = first_node(node_online_map);
283 for (i = 0; i < NR_CPUS; i++) {
284 if (early_cpu_to_node(i) != NUMA_NO_NODE)
286 numa_set_node(i, rr);
287 rr = next_node(rr, node_online_map);
288 if (rr == MAX_NUMNODES)
289 rr = first_node(node_online_map);
293 #ifdef CONFIG_NUMA_EMU
295 static char *cmdline __initdata;
298 * Setups up nid to range from addr to addr + size. If the end
299 * boundary is greater than max_addr, then max_addr is used instead.
300 * The return value is 0 if there is additional memory left for
301 * allocation past addr and -1 otherwise. addr is adjusted to be at
302 * the end of the node.
304 static int __init setup_node_range(int nid, struct bootnode *nodes, u64 *addr,
305 u64 size, u64 max_addr)
309 nodes[nid].start = *addr;
311 if (*addr >= max_addr) {
315 nodes[nid].end = *addr;
316 node_set(nid, node_possible_map);
317 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid,
318 nodes[nid].start, nodes[nid].end,
319 (nodes[nid].end - nodes[nid].start) >> 20);
324 * Splits num_nodes nodes up equally starting at node_start. The return value
325 * is the number of nodes split up and addr is adjusted to be at the end of the
326 * last node allocated.
328 static int __init split_nodes_equally(struct bootnode *nodes, u64 *addr,
329 u64 max_addr, int node_start,
338 if (num_nodes > MAX_NUMNODES)
339 num_nodes = MAX_NUMNODES;
340 size = (max_addr - *addr - e820_hole_size(*addr, max_addr)) /
343 * Calculate the number of big nodes that can be allocated as a result
344 * of consolidating the leftovers.
346 big = ((size & ~FAKE_NODE_MIN_HASH_MASK) * num_nodes) /
349 /* Round down to nearest FAKE_NODE_MIN_SIZE. */
350 size &= FAKE_NODE_MIN_HASH_MASK;
352 printk(KERN_ERR "Not enough memory for each node. "
353 "NUMA emulation disabled.\n");
357 for (i = node_start; i < num_nodes + node_start; i++) {
358 u64 end = *addr + size;
361 end += FAKE_NODE_MIN_SIZE;
363 * The final node can have the remaining system RAM. Other
364 * nodes receive roughly the same amount of available pages.
366 if (i == num_nodes + node_start - 1)
369 while (end - *addr - e820_hole_size(*addr, end) <
371 end += FAKE_NODE_MIN_SIZE;
372 if (end > max_addr) {
377 if (setup_node_range(i, nodes, addr, end - *addr, max_addr) < 0)
380 return i - node_start + 1;
384 * Splits the remaining system RAM into chunks of size. The remaining memory is
385 * always assigned to a final node and can be asymmetric. Returns the number of
388 static int __init split_nodes_by_size(struct bootnode *nodes, u64 *addr,
389 u64 max_addr, int node_start, u64 size)
392 size = (size << 20) & FAKE_NODE_MIN_HASH_MASK;
393 while (!setup_node_range(i++, nodes, addr, size, max_addr))
395 return i - node_start;
399 * Sets up the system RAM area from start_pfn to last_pfn according to the
400 * numa=fake command-line option.
402 static struct bootnode nodes[MAX_NUMNODES] __initdata;
404 static int __init numa_emulation(unsigned long start_pfn, unsigned long last_pfn)
406 u64 size, addr = start_pfn << PAGE_SHIFT;
407 u64 max_addr = last_pfn << PAGE_SHIFT;
408 int num_nodes = 0, num = 0, coeff_flag, coeff = -1, i;
410 memset(&nodes, 0, sizeof(nodes));
412 * If the numa=fake command-line is just a single number N, split the
413 * system RAM into N fake nodes.
415 if (!strchr(cmdline, '*') && !strchr(cmdline, ',')) {
416 long n = simple_strtol(cmdline, NULL, 0);
418 num_nodes = split_nodes_equally(nodes, &addr, max_addr, 0, n);
424 /* Parse the command line. */
425 for (coeff_flag = 0; ; cmdline++) {
426 if (*cmdline && isdigit(*cmdline)) {
427 num = num * 10 + *cmdline - '0';
430 if (*cmdline == '*') {
435 if (!*cmdline || *cmdline == ',') {
439 * Round down to the nearest FAKE_NODE_MIN_SIZE.
440 * Command-line coefficients are in megabytes.
442 size = ((u64)num << 20) & FAKE_NODE_MIN_HASH_MASK;
444 for (i = 0; i < coeff; i++, num_nodes++)
445 if (setup_node_range(num_nodes, nodes,
446 &addr, size, max_addr) < 0)
458 /* Fill remainder of system RAM, if appropriate. */
459 if (addr < max_addr) {
460 if (coeff_flag && coeff < 0) {
461 /* Split remaining nodes into num-sized chunks */
462 num_nodes += split_nodes_by_size(nodes, &addr, max_addr,
466 switch (*(cmdline - 1)) {
468 /* Split remaining nodes into coeff chunks */
471 num_nodes += split_nodes_equally(nodes, &addr, max_addr,
475 /* Do not allocate remaining system RAM */
478 /* Give one final node */
479 setup_node_range(num_nodes, nodes, &addr,
480 max_addr - addr, max_addr);
485 memnode_shift = compute_hash_shift(nodes, num_nodes, NULL);
486 if (memnode_shift < 0) {
488 printk(KERN_ERR "No NUMA hash function found. NUMA emulation "
494 * We need to vacate all active ranges that may have been registered by
495 * SRAT and set acpi_numa to -1 so that srat_disabled() always returns
496 * true. NUMA emulation has succeeded so we will not scan ACPI nodes.
498 remove_all_active_ranges();
499 #ifdef CONFIG_ACPI_NUMA
502 for_each_node_mask(i, node_possible_map) {
503 e820_register_active_regions(i, nodes[i].start >> PAGE_SHIFT,
504 nodes[i].end >> PAGE_SHIFT);
505 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
507 acpi_fake_nodes(nodes, num_nodes);
511 #endif /* CONFIG_NUMA_EMU */
513 void __init initmem_init(unsigned long start_pfn, unsigned long last_pfn)
517 nodes_clear(node_possible_map);
518 nodes_clear(node_online_map);
520 #ifdef CONFIG_NUMA_EMU
521 if (cmdline && !numa_emulation(start_pfn, last_pfn))
523 nodes_clear(node_possible_map);
524 nodes_clear(node_online_map);
527 #ifdef CONFIG_ACPI_NUMA
528 if (!numa_off && !acpi_scan_nodes(start_pfn << PAGE_SHIFT,
529 last_pfn << PAGE_SHIFT))
531 nodes_clear(node_possible_map);
532 nodes_clear(node_online_map);
535 #ifdef CONFIG_K8_NUMA
536 if (!numa_off && !k8_scan_nodes(start_pfn<<PAGE_SHIFT,
537 last_pfn<<PAGE_SHIFT))
539 nodes_clear(node_possible_map);
540 nodes_clear(node_online_map);
542 printk(KERN_INFO "%s\n",
543 numa_off ? "NUMA turned off" : "No NUMA configuration found");
545 printk(KERN_INFO "Faking a node at %016lx-%016lx\n",
546 start_pfn << PAGE_SHIFT,
547 last_pfn << PAGE_SHIFT);
548 /* setup dummy node covering all memory */
550 memnodemap = memnode.embedded_map;
553 node_set(0, node_possible_map);
554 for (i = 0; i < NR_CPUS; i++)
556 e820_register_active_regions(0, start_pfn, last_pfn);
557 setup_node_bootmem(0, start_pfn << PAGE_SHIFT, last_pfn << PAGE_SHIFT);
560 unsigned long __init numa_free_all_bootmem(void)
562 unsigned long pages = 0;
565 for_each_online_node(i)
566 pages += free_all_bootmem_node(NODE_DATA(i));
571 void __init paging_init(void)
573 unsigned long max_zone_pfns[MAX_NR_ZONES];
575 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
576 max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
577 max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
578 max_zone_pfns[ZONE_NORMAL] = max_pfn;
580 sparse_memory_present_with_active_regions(MAX_NUMNODES);
583 free_area_init_nodes(max_zone_pfns);
586 static __init int numa_setup(char *opt)
590 if (!strncmp(opt, "off", 3))
592 #ifdef CONFIG_NUMA_EMU
593 if (!strncmp(opt, "fake=", 5))
596 #ifdef CONFIG_ACPI_NUMA
597 if (!strncmp(opt, "noacpi", 6))
599 if (!strncmp(opt, "hotadd=", 7))
600 hotadd_percent = simple_strtoul(opt+7, NULL, 10);
604 early_param("numa", numa_setup);
608 * Setup early cpu_to_node.
610 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
611 * and apicid_to_node[] tables have valid entries for a CPU.
612 * This means we skip cpu_to_node[] initialisation for NUMA
613 * emulation and faking node case (when running a kernel compiled
614 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
615 * is already initialized in a round robin manner at numa_init_array,
616 * prior to this call, and this initialization is good enough
617 * for the fake NUMA cases.
619 * Called before the per_cpu areas are setup.
621 void __init init_cpu_to_node(void)
624 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
626 BUG_ON(cpu_to_apicid == NULL);
628 for_each_possible_cpu(cpu) {
630 u16 apicid = cpu_to_apicid[cpu];
632 if (apicid == BAD_APICID)
634 node = apicid_to_node[apicid];
635 if (node == NUMA_NO_NODE)
637 if (!node_online(node))
639 numa_set_node(cpu, node);