4 * Copyright (C) 2002 Anton Blanchard <anton@au.ibm.com>, IBM
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/threads.h>
12 #include <linux/bootmem.h>
13 #include <linux/init.h>
15 #include <linux/mmzone.h>
16 #include <linux/module.h>
17 #include <linux/nodemask.h>
18 #include <linux/cpu.h>
19 #include <linux/notifier.h>
20 #include <linux/lmb.h>
22 #include <asm/sparsemem.h>
24 #include <asm/system.h>
27 static int numa_enabled = 1;
29 static char *cmdline __initdata;
31 static int numa_debug;
32 #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
34 int numa_cpu_lookup_table[NR_CPUS];
35 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
36 struct pglist_data *node_data[MAX_NUMNODES];
38 EXPORT_SYMBOL(numa_cpu_lookup_table);
39 EXPORT_SYMBOL(numa_cpumask_lookup_table);
40 EXPORT_SYMBOL(node_data);
42 static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
43 static int min_common_depth;
44 static int n_mem_addr_cells, n_mem_size_cells;
46 static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn,
49 unsigned long long mem;
51 static unsigned int fake_nid;
52 static unsigned long long curr_boundary;
55 * Modify node id, iff we started creating NUMA nodes
56 * We want to continue from where we left of the last time
61 * In case there are no more arguments to parse, the
62 * node_id should be the same as the last fake node id
63 * (we've handled this above).
68 mem = memparse(p, &p);
72 if (mem < curr_boundary)
77 if ((end_pfn << PAGE_SHIFT) > mem) {
79 * Skip commas and spaces
81 while (*p == ',' || *p == ' ' || *p == '\t')
87 dbg("created new fake_node with id %d\n", fake_nid);
93 static void __cpuinit map_cpu_to_node(int cpu, int node)
95 numa_cpu_lookup_table[cpu] = node;
97 dbg("adding cpu %d to node %d\n", cpu, node);
99 if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
100 cpu_set(cpu, numa_cpumask_lookup_table[node]);
103 #ifdef CONFIG_HOTPLUG_CPU
104 static void unmap_cpu_from_node(unsigned long cpu)
106 int node = numa_cpu_lookup_table[cpu];
108 dbg("removing cpu %lu from node %d\n", cpu, node);
110 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
111 cpu_clear(cpu, numa_cpumask_lookup_table[node]);
113 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
117 #endif /* CONFIG_HOTPLUG_CPU */
119 static struct device_node * __cpuinit find_cpu_node(unsigned int cpu)
121 unsigned int hw_cpuid = get_hard_smp_processor_id(cpu);
122 struct device_node *cpu_node = NULL;
123 const unsigned int *interrupt_server, *reg;
126 while ((cpu_node = of_find_node_by_type(cpu_node, "cpu")) != NULL) {
127 /* Try interrupt server first */
128 interrupt_server = of_get_property(cpu_node,
129 "ibm,ppc-interrupt-server#s", &len);
131 len = len / sizeof(u32);
133 if (interrupt_server && (len > 0)) {
135 if (interrupt_server[len] == hw_cpuid)
139 reg = of_get_property(cpu_node, "reg", &len);
140 if (reg && (len > 0) && (reg[0] == hw_cpuid))
148 /* must hold reference to node during call */
149 static const int *of_get_associativity(struct device_node *dev)
151 return of_get_property(dev, "ibm,associativity", NULL);
154 /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
157 static int of_node_to_nid_single(struct device_node *device)
160 const unsigned int *tmp;
162 if (min_common_depth == -1)
165 tmp = of_get_associativity(device);
169 if (tmp[0] >= min_common_depth)
170 nid = tmp[min_common_depth];
172 /* POWER4 LPAR uses 0xffff as invalid node */
173 if (nid == 0xffff || nid >= MAX_NUMNODES)
179 /* Walk the device tree upwards, looking for an associativity id */
180 int of_node_to_nid(struct device_node *device)
182 struct device_node *tmp;
187 nid = of_node_to_nid_single(device);
192 device = of_get_parent(tmp);
199 EXPORT_SYMBOL_GPL(of_node_to_nid);
202 * In theory, the "ibm,associativity" property may contain multiple
203 * associativity lists because a resource may be multiply connected
204 * into the machine. This resource then has different associativity
205 * characteristics relative to its multiple connections. We ignore
206 * this for now. We also assume that all cpu and memory sets have
207 * their distances represented at a common level. This won't be
208 * true for hierarchical NUMA.
210 * In any case the ibm,associativity-reference-points should give
211 * the correct depth for a normal NUMA system.
213 * - Dave Hansen <haveblue@us.ibm.com>
215 static int __init find_min_common_depth(void)
218 const unsigned int *ref_points;
219 struct device_node *rtas_root;
222 rtas_root = of_find_node_by_path("/rtas");
228 * this property is 2 32-bit integers, each representing a level of
229 * depth in the associativity nodes. The first is for an SMP
230 * configuration (should be all 0's) and the second is for a normal
231 * NUMA configuration.
233 ref_points = of_get_property(rtas_root,
234 "ibm,associativity-reference-points", &len);
236 if ((len >= 1) && ref_points) {
237 depth = ref_points[1];
239 dbg("NUMA: ibm,associativity-reference-points not found.\n");
242 of_node_put(rtas_root);
247 static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells)
249 struct device_node *memory = NULL;
251 memory = of_find_node_by_type(memory, "memory");
253 panic("numa.c: No memory nodes found!");
255 *n_addr_cells = of_n_addr_cells(memory);
256 *n_size_cells = of_n_size_cells(memory);
260 static unsigned long __devinit read_n_cells(int n, const unsigned int **buf)
262 unsigned long result = 0;
265 result = (result << 32) | **buf;
271 struct of_drconf_cell {
279 #define DRCONF_MEM_ASSIGNED 0x00000008
280 #define DRCONF_MEM_AI_INVALID 0x00000040
281 #define DRCONF_MEM_RESERVED 0x00000080
284 * Read the next lmb list entry from the ibm,dynamic-memory property
285 * and return the information in the provided of_drconf_cell structure.
287 static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp)
291 drmem->base_addr = read_n_cells(n_mem_addr_cells, cellp);
294 drmem->drc_index = cp[0];
295 drmem->reserved = cp[1];
296 drmem->aa_index = cp[2];
297 drmem->flags = cp[3];
303 * Retreive and validate the ibm,dynamic-memory property of the device tree.
305 * The layout of the ibm,dynamic-memory property is a number N of lmb
306 * list entries followed by N lmb list entries. Each lmb list entry
307 * contains information as layed out in the of_drconf_cell struct above.
309 static int of_get_drconf_memory(struct device_node *memory, const u32 **dm)
314 prop = of_get_property(memory, "ibm,dynamic-memory", &len);
315 if (!prop || len < sizeof(unsigned int))
320 /* Now that we know the number of entries, revalidate the size
321 * of the property read in to ensure we have everything
323 if (len < (entries * (n_mem_addr_cells + 4) + 1) * sizeof(unsigned int))
331 * Retreive and validate the ibm,lmb-size property for drconf memory
332 * from the device tree.
334 static u64 of_get_lmb_size(struct device_node *memory)
339 prop = of_get_property(memory, "ibm,lmb-size", &len);
340 if (!prop || len < sizeof(unsigned int))
343 return read_n_cells(n_mem_size_cells, &prop);
346 struct assoc_arrays {
353 * Retreive and validate the list of associativity arrays for drconf
354 * memory from the ibm,associativity-lookup-arrays property of the
357 * The layout of the ibm,associativity-lookup-arrays property is a number N
358 * indicating the number of associativity arrays, followed by a number M
359 * indicating the size of each associativity array, followed by a list
360 * of N associativity arrays.
362 static int of_get_assoc_arrays(struct device_node *memory,
363 struct assoc_arrays *aa)
368 prop = of_get_property(memory, "ibm,associativity-lookup-arrays", &len);
369 if (!prop || len < 2 * sizeof(unsigned int))
372 aa->n_arrays = *prop++;
373 aa->array_sz = *prop++;
375 /* Now that we know the number of arrrays and size of each array,
376 * revalidate the size of the property read in.
378 if (len < (aa->n_arrays * aa->array_sz + 2) * sizeof(unsigned int))
386 * This is like of_node_to_nid_single() for memory represented in the
387 * ibm,dynamic-reconfiguration-memory node.
389 static int of_drconf_to_nid_single(struct of_drconf_cell *drmem,
390 struct assoc_arrays *aa)
393 int nid = default_nid;
396 if (min_common_depth > 0 && min_common_depth <= aa->array_sz &&
397 !(drmem->flags & DRCONF_MEM_AI_INVALID) &&
398 drmem->aa_index < aa->n_arrays) {
399 index = drmem->aa_index * aa->array_sz + min_common_depth - 1;
400 nid = aa->arrays[index];
402 if (nid == 0xffff || nid >= MAX_NUMNODES)
410 * Figure out to which domain a cpu belongs and stick it there.
411 * Return the id of the domain used.
413 static int __cpuinit numa_setup_cpu(unsigned long lcpu)
416 struct device_node *cpu = find_cpu_node(lcpu);
423 nid = of_node_to_nid_single(cpu);
425 if (nid < 0 || !node_online(nid))
426 nid = any_online_node(NODE_MASK_ALL);
428 map_cpu_to_node(lcpu, nid);
435 static int __cpuinit cpu_numa_callback(struct notifier_block *nfb,
436 unsigned long action,
439 unsigned long lcpu = (unsigned long)hcpu;
440 int ret = NOTIFY_DONE;
444 case CPU_UP_PREPARE_FROZEN:
445 numa_setup_cpu(lcpu);
448 #ifdef CONFIG_HOTPLUG_CPU
450 case CPU_DEAD_FROZEN:
451 case CPU_UP_CANCELED:
452 case CPU_UP_CANCELED_FROZEN:
453 unmap_cpu_from_node(lcpu);
462 * Check and possibly modify a memory region to enforce the memory limit.
464 * Returns the size the region should have to enforce the memory limit.
465 * This will either be the original value of size, a truncated value,
466 * or zero. If the returned value of size is 0 the region should be
467 * discarded as it lies wholy above the memory limit.
469 static unsigned long __init numa_enforce_memory_limit(unsigned long start,
473 * We use lmb_end_of_DRAM() in here instead of memory_limit because
474 * we've already adjusted it for the limit and it takes care of
475 * having memory holes below the limit.
481 if (start + size <= lmb_end_of_DRAM())
484 if (start >= lmb_end_of_DRAM())
487 return lmb_end_of_DRAM() - start;
491 * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
492 * node. This assumes n_mem_{addr,size}_cells have been set.
494 static void __init parse_drconf_memory(struct device_node *memory)
498 unsigned long lmb_size, size;
500 struct assoc_arrays aa;
502 n = of_get_drconf_memory(memory, &dm);
506 lmb_size = of_get_lmb_size(memory);
510 rc = of_get_assoc_arrays(memory, &aa);
514 for (; n != 0; --n) {
515 struct of_drconf_cell drmem;
517 read_drconf_cell(&drmem, &dm);
519 /* skip this block if the reserved bit is set in flags (0x80)
520 or if the block is not assigned to this partition (0x8) */
521 if ((drmem.flags & DRCONF_MEM_RESERVED)
522 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
525 nid = of_drconf_to_nid_single(&drmem, &aa);
527 fake_numa_create_new_node(
528 ((drmem.base_addr + lmb_size) >> PAGE_SHIFT),
531 node_set_online(nid);
533 size = numa_enforce_memory_limit(drmem.base_addr, lmb_size);
537 add_active_range(nid, drmem.base_addr >> PAGE_SHIFT,
538 (drmem.base_addr >> PAGE_SHIFT)
539 + (size >> PAGE_SHIFT));
543 static int __init parse_numa_properties(void)
545 struct device_node *cpu = NULL;
546 struct device_node *memory = NULL;
550 if (numa_enabled == 0) {
551 printk(KERN_WARNING "NUMA disabled by user\n");
555 min_common_depth = find_min_common_depth();
557 if (min_common_depth < 0)
558 return min_common_depth;
560 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
563 * Even though we connect cpus to numa domains later in SMP
564 * init, we need to know the node ids now. This is because
565 * each node to be onlined must have NODE_DATA etc backing it.
567 for_each_present_cpu(i) {
570 cpu = find_cpu_node(i);
572 nid = of_node_to_nid_single(cpu);
576 * Don't fall back to default_nid yet -- we will plug
577 * cpus into nodes once the memory scan has discovered
582 node_set_online(nid);
585 get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells);
587 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
592 const unsigned int *memcell_buf;
595 memcell_buf = of_get_property(memory,
596 "linux,usable-memory", &len);
597 if (!memcell_buf || len <= 0)
598 memcell_buf = of_get_property(memory, "reg", &len);
599 if (!memcell_buf || len <= 0)
603 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
605 /* these are order-sensitive, and modify the buffer pointer */
606 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
607 size = read_n_cells(n_mem_size_cells, &memcell_buf);
610 * Assumption: either all memory nodes or none will
611 * have associativity properties. If none, then
612 * everything goes to default_nid.
614 nid = of_node_to_nid_single(memory);
618 fake_numa_create_new_node(((start + size) >> PAGE_SHIFT), &nid);
619 node_set_online(nid);
621 if (!(size = numa_enforce_memory_limit(start, size))) {
628 add_active_range(nid, start >> PAGE_SHIFT,
629 (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT));
636 * Now do the same thing for each LMB listed in the ibm,dynamic-memory
637 * property in the ibm,dynamic-reconfiguration-memory node.
639 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
641 parse_drconf_memory(memory);
646 static void __init setup_nonnuma(void)
648 unsigned long top_of_ram = lmb_end_of_DRAM();
649 unsigned long total_ram = lmb_phys_mem_size();
650 unsigned long start_pfn, end_pfn;
651 unsigned int i, nid = 0;
653 printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
654 top_of_ram, total_ram);
655 printk(KERN_DEBUG "Memory hole size: %ldMB\n",
656 (top_of_ram - total_ram) >> 20);
658 for (i = 0; i < lmb.memory.cnt; ++i) {
659 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT;
660 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i);
662 fake_numa_create_new_node(end_pfn, &nid);
663 add_active_range(nid, start_pfn, end_pfn);
664 node_set_online(nid);
668 void __init dump_numa_cpu_topology(void)
671 unsigned int cpu, count;
673 if (min_common_depth == -1 || !numa_enabled)
676 for_each_online_node(node) {
677 printk(KERN_DEBUG "Node %d CPUs:", node);
681 * If we used a CPU iterator here we would miss printing
682 * the holes in the cpumap.
684 for (cpu = 0; cpu < NR_CPUS; cpu++) {
685 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
691 printk("-%u", cpu - 1);
697 printk("-%u", NR_CPUS - 1);
702 static void __init dump_numa_memory_topology(void)
707 if (min_common_depth == -1 || !numa_enabled)
710 for_each_online_node(node) {
713 printk(KERN_DEBUG "Node %d Memory:", node);
717 for (i = 0; i < lmb_end_of_DRAM();
718 i += (1 << SECTION_SIZE_BITS)) {
719 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
737 * Allocate some memory, satisfying the lmb or bootmem allocator where
738 * required. nid is the preferred node and end is the physical address of
739 * the highest address in the node.
741 * Returns the physical address of the memory.
743 static void __init *careful_allocation(int nid, unsigned long size,
745 unsigned long end_pfn)
748 unsigned long ret = __lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
750 /* retry over all memory */
752 ret = __lmb_alloc_base(size, align, lmb_end_of_DRAM());
755 panic("numa.c: cannot allocate %lu bytes on node %d",
759 * If the memory came from a previously allocated node, we must
760 * retry with the bootmem allocator.
762 new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);
764 ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid),
768 panic("numa.c: cannot allocate %lu bytes on node %d",
773 dbg("alloc_bootmem %lx %lx\n", ret, size);
779 static struct notifier_block __cpuinitdata ppc64_numa_nb = {
780 .notifier_call = cpu_numa_callback,
781 .priority = 1 /* Must run before sched domains notifier. */
784 void __init do_init_bootmem(void)
790 max_low_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
791 max_pfn = max_low_pfn;
793 if (parse_numa_properties())
796 dump_numa_memory_topology();
798 register_cpu_notifier(&ppc64_numa_nb);
799 cpu_numa_callback(&ppc64_numa_nb, CPU_UP_PREPARE,
800 (void *)(unsigned long)boot_cpuid);
802 for_each_online_node(nid) {
803 unsigned long start_pfn, end_pfn;
804 unsigned long bootmem_paddr;
805 unsigned long bootmap_pages;
807 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
809 /* Allocate the node structure node local if possible */
810 NODE_DATA(nid) = careful_allocation(nid,
811 sizeof(struct pglist_data),
812 SMP_CACHE_BYTES, end_pfn);
813 NODE_DATA(nid) = __va(NODE_DATA(nid));
814 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
816 dbg("node %d\n", nid);
817 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
819 NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
820 NODE_DATA(nid)->node_start_pfn = start_pfn;
821 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
823 if (NODE_DATA(nid)->node_spanned_pages == 0)
826 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
827 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
829 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
830 bootmem_paddr = (unsigned long)careful_allocation(nid,
831 bootmap_pages << PAGE_SHIFT,
833 memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);
835 dbg("bootmap_paddr = %lx\n", bootmem_paddr);
837 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
840 free_bootmem_with_active_regions(nid, end_pfn);
842 /* Mark reserved regions on this node */
843 for (i = 0; i < lmb.reserved.cnt; i++) {
844 unsigned long physbase = lmb.reserved.region[i].base;
845 unsigned long size = lmb.reserved.region[i].size;
846 unsigned long start_paddr = start_pfn << PAGE_SHIFT;
847 unsigned long end_paddr = end_pfn << PAGE_SHIFT;
849 if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid &&
850 early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid)
853 if (physbase < end_paddr &&
854 (physbase+size) > start_paddr) {
856 if (physbase < start_paddr) {
857 size -= start_paddr - physbase;
858 physbase = start_paddr;
861 if (size > end_paddr - physbase)
862 size = end_paddr - physbase;
864 dbg("reserve_bootmem %lx %lx\n", physbase,
866 reserve_bootmem_node(NODE_DATA(nid), physbase,
867 size, BOOTMEM_DEFAULT);
871 sparse_memory_present_with_active_regions(nid);
875 void __init paging_init(void)
877 unsigned long max_zone_pfns[MAX_NR_ZONES];
878 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
879 max_zone_pfns[ZONE_DMA] = lmb_end_of_DRAM() >> PAGE_SHIFT;
880 free_area_init_nodes(max_zone_pfns);
883 static int __init early_numa(char *p)
888 if (strstr(p, "off"))
891 if (strstr(p, "debug"))
894 p = strstr(p, "fake=");
896 cmdline = p + strlen("fake=");
900 early_param("numa", early_numa);
902 #ifdef CONFIG_MEMORY_HOTPLUG
904 * Validate the node associated with the memory section we are
907 int valid_hot_add_scn(int *nid, unsigned long start, u32 lmb_size,
908 unsigned long scn_addr)
912 if (*nid < 0 || !node_online(*nid))
913 *nid = any_online_node(NODE_MASK_ALL);
915 if ((scn_addr >= start) && (scn_addr < (start + lmb_size))) {
917 while (NODE_DATA(*nid)->node_spanned_pages == 0) {
918 node_clear(*nid, nodes);
919 *nid = any_online_node(nodes);
929 * Find the node associated with a hot added memory section represented
930 * by the ibm,dynamic-reconfiguration-memory node.
932 static int hot_add_drconf_scn_to_nid(struct device_node *memory,
933 unsigned long scn_addr)
937 unsigned long lmb_size;
938 int default_nid = any_online_node(NODE_MASK_ALL);
940 struct assoc_arrays aa;
942 n = of_get_drconf_memory(memory, &dm);
946 lmb_size = of_get_lmb_size(memory);
950 rc = of_get_assoc_arrays(memory, &aa);
954 for (; n != 0; --n) {
955 struct of_drconf_cell drmem;
957 read_drconf_cell(&drmem, &dm);
959 /* skip this block if it is reserved or not assigned to
961 if ((drmem.flags & DRCONF_MEM_RESERVED)
962 || !(drmem.flags & DRCONF_MEM_ASSIGNED))
965 nid = of_drconf_to_nid_single(&drmem, &aa);
967 if (valid_hot_add_scn(&nid, drmem.base_addr, lmb_size,
972 BUG(); /* section address should be found above */
977 * Find the node associated with a hot added memory section. Section
978 * corresponds to a SPARSEMEM section, not an LMB. It is assumed that
979 * sections are fully contained within a single LMB.
981 int hot_add_scn_to_nid(unsigned long scn_addr)
983 struct device_node *memory = NULL;
986 if (!numa_enabled || (min_common_depth < 0))
987 return any_online_node(NODE_MASK_ALL);
989 memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
991 nid = hot_add_drconf_scn_to_nid(memory, scn_addr);
996 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
997 unsigned long start, size;
999 const unsigned int *memcell_buf;
1002 memcell_buf = of_get_property(memory, "reg", &len);
1003 if (!memcell_buf || len <= 0)
1006 /* ranges in cell */
1007 ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
1009 start = read_n_cells(n_mem_addr_cells, &memcell_buf);
1010 size = read_n_cells(n_mem_size_cells, &memcell_buf);
1011 nid = of_node_to_nid_single(memory);
1013 if (valid_hot_add_scn(&nid, start, size, scn_addr)) {
1014 of_node_put(memory);
1018 if (--ranges) /* process all ranges in cell */
1021 BUG(); /* section address should be found above */
1024 #endif /* CONFIG_MEMORY_HOTPLUG */