2 * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation
3 * August 2002: added remote node KVA remap - Martin J. Bligh
5 * Copyright (C) 2002, IBM Corp.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/bootmem.h>
27 #include <linux/mmzone.h>
28 #include <linux/highmem.h>
29 #include <linux/initrd.h>
30 #include <linux/nodemask.h>
31 #include <linux/module.h>
32 #include <linux/kexec.h>
33 #include <linux/pfn.h>
34 #include <linux/swap.h>
35 #include <linux/acpi.h>
38 #include <asm/setup.h>
39 #include <asm/mmzone.h>
40 #include <asm/bios_ebda.h>
42 struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
43 EXPORT_SYMBOL(node_data);
44 static bootmem_data_t node0_bdata;
47 * numa interface - we expect the numa architecture specific code to have
48 * populated the following initialisation.
50 * 1) node_online_map - the map of all nodes configured (online) in the system
51 * 2) node_start_pfn - the starting page frame number for a node
52 * 3) node_end_pfn - the ending page fram number for a node
54 unsigned long node_start_pfn[MAX_NUMNODES] __read_mostly;
55 unsigned long node_end_pfn[MAX_NUMNODES] __read_mostly;
58 #ifdef CONFIG_DISCONTIGMEM
60 * 4) physnode_map - the mapping between a pfn and owning node
61 * physnode_map keeps track of the physical memory layout of a generic
62 * numa node on a 64Mb break (each element of the array will
63 * represent 64Mb of memory and will be marked by the node id. so,
64 * if the first gig is on node 0, and the second gig is on node 1
65 * physnode_map will contain:
67 * physnode_map[0-15] = 0;
68 * physnode_map[16-31] = 1;
69 * physnode_map[32- ] = -1;
71 s8 physnode_map[MAX_ELEMENTS] __read_mostly = { [0 ... (MAX_ELEMENTS - 1)] = -1};
72 EXPORT_SYMBOL(physnode_map);
74 void memory_present(int nid, unsigned long start, unsigned long end)
78 printk(KERN_INFO "Node: %d, start_pfn: %ld, end_pfn: %ld\n",
80 printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
81 printk(KERN_DEBUG " ");
82 for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) {
83 physnode_map[pfn / PAGES_PER_ELEMENT] = nid;
84 printk(KERN_CONT "%ld ", pfn);
86 printk(KERN_CONT "\n");
89 unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
90 unsigned long end_pfn)
92 unsigned long nr_pages = end_pfn - start_pfn;
97 return (nr_pages + 1) * sizeof(struct page);
101 extern unsigned long find_max_low_pfn(void);
102 extern void add_one_highpage_init(struct page *, int, int);
103 extern unsigned long highend_pfn, highstart_pfn;
105 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
107 unsigned long node_remap_size[MAX_NUMNODES];
108 static void *node_remap_start_vaddr[MAX_NUMNODES];
109 void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
111 static unsigned long kva_start_pfn;
112 static unsigned long kva_pages;
114 * FLAT - support for basic PC memory model with discontig enabled, essentially
115 * a single node with all available processors in it with a flat
118 int __init get_memcfg_numa_flat(void)
120 printk("NUMA - single node, flat memory mode\n");
122 /* Run the memory configuration and find the top of memory. */
124 node_start_pfn[0] = 0;
125 node_end_pfn[0] = max_pfn;
126 memory_present(0, 0, max_pfn);
127 node_remap_size[0] = node_memmap_size_bytes(0, 0, max_pfn);
129 /* Indicate there is one node available. */
130 nodes_clear(node_online_map);
136 * Find the highest page frame number we have available for the node
138 static void __init propagate_e820_map_node(int nid)
140 if (node_end_pfn[nid] > max_pfn)
141 node_end_pfn[nid] = max_pfn;
143 * if a user has given mem=XXXX, then we need to make sure
144 * that the node _starts_ before that, too, not just ends
146 if (node_start_pfn[nid] > max_pfn)
147 node_start_pfn[nid] = max_pfn;
148 BUG_ON(node_start_pfn[nid] > node_end_pfn[nid]);
152 * Allocate memory for the pg_data_t for this node via a crude pre-bootmem
153 * method. For node zero take this from the bottom of memory, for
154 * subsequent nodes place them at node_remap_start_vaddr which contains
155 * node local data in physically node local memory. See setup_memory()
158 static void __init allocate_pgdat(int nid)
160 if (nid && node_has_online_mem(nid))
161 NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
163 unsigned long pgdat_phys;
164 pgdat_phys = find_e820_area(min_low_pfn<<PAGE_SHIFT,
165 max_low_pfn<<PAGE_SHIFT, sizeof(pg_data_t),
167 NODE_DATA(nid) = (pg_data_t *)(pfn_to_kaddr(pgdat_phys>>PAGE_SHIFT));
168 reserve_early(pgdat_phys, pgdat_phys + sizeof(pg_data_t),
171 printk(KERN_DEBUG "allocate_pgdat: node %d NODE_DATA %08lx\n",
172 nid, (unsigned long)NODE_DATA(nid));
175 #ifdef CONFIG_DISCONTIGMEM
177 * In the discontig memory model, a portion of the kernel virtual area (KVA)
178 * is reserved and portions of nodes are mapped using it. This is to allow
179 * node-local memory to be allocated for structures that would normally require
180 * ZONE_NORMAL. The memory is allocated with alloc_remap() and callers
181 * should be prepared to allocate from the bootmem allocator instead. This KVA
182 * mechanism is incompatible with SPARSEMEM as it makes assumptions about the
183 * layout of memory that are broken if alloc_remap() succeeds for some of the
184 * map and fails for others
186 static unsigned long node_remap_start_pfn[MAX_NUMNODES];
187 static void *node_remap_end_vaddr[MAX_NUMNODES];
188 static void *node_remap_alloc_vaddr[MAX_NUMNODES];
189 static unsigned long node_remap_offset[MAX_NUMNODES];
191 void *alloc_remap(int nid, unsigned long size)
193 void *allocation = node_remap_alloc_vaddr[nid];
195 size = ALIGN(size, L1_CACHE_BYTES);
197 if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
200 node_remap_alloc_vaddr[nid] += size;
201 memset(allocation, 0, size);
206 void __init remap_numa_kva(void)
212 for_each_online_node(node) {
213 printk(KERN_DEBUG "remap_numa_kva: node %d\n", node);
214 for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) {
215 vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT);
216 printk(KERN_DEBUG "remap_numa_kva: %08lx to pfn %08lx\n",
217 (unsigned long)vaddr,
218 node_remap_start_pfn[node] + pfn);
219 set_pmd_pfn((ulong) vaddr,
220 node_remap_start_pfn[node] + pfn,
226 static unsigned long calculate_numa_remap_pages(void)
229 unsigned long size, reserve_pages = 0;
232 for_each_online_node(nid) {
233 unsigned old_end_pfn = node_end_pfn[nid];
236 * The acpi/srat node info can show hot-add memroy zones
237 * where memory could be added but not currently present.
239 if (node_start_pfn[nid] > max_pfn)
241 if (node_end_pfn[nid] > max_pfn)
242 node_end_pfn[nid] = max_pfn;
244 /* ensure the remap includes space for the pgdat. */
245 size = node_remap_size[nid] + sizeof(pg_data_t);
247 /* convert size to large (pmd size) pages, rounding up */
248 size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
249 /* now the roundup is correct, convert to PAGE_SIZE pages */
250 size = size * PTRS_PER_PTE;
253 * Validate the region we are allocating only contains valid
256 for (pfn = node_end_pfn[nid] - size;
257 pfn < node_end_pfn[nid]; pfn++)
258 if (!page_is_ram(pfn))
261 if (pfn != node_end_pfn[nid])
264 printk("Reserving %ld pages of KVA for lmem_map of node %d\n",
266 node_remap_size[nid] = size;
267 node_remap_offset[nid] = reserve_pages;
268 reserve_pages += size;
269 printk("Shrinking node %d from %ld pages to %ld pages\n",
270 nid, node_end_pfn[nid], node_end_pfn[nid] - size);
272 if (node_end_pfn[nid] & (PTRS_PER_PTE-1)) {
274 * Align node_end_pfn[] and node_remap_start_pfn[] to
275 * pmd boundary. remap_numa_kva will barf otherwise.
277 printk("Shrinking node %d further by %ld pages for proper alignment\n",
278 nid, node_end_pfn[nid] & (PTRS_PER_PTE-1));
279 size += node_end_pfn[nid] & (PTRS_PER_PTE-1);
282 node_end_pfn[nid] -= size;
283 node_remap_start_pfn[nid] = node_end_pfn[nid];
284 shrink_active_range(nid, old_end_pfn, node_end_pfn[nid]);
286 printk("Reserving total of %ld pages for numa KVA remap\n",
288 return reserve_pages;
291 static void init_remap_allocator(int nid)
293 node_remap_start_vaddr[nid] = pfn_to_kaddr(
294 kva_start_pfn + node_remap_offset[nid]);
295 node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
296 (node_remap_size[nid] * PAGE_SIZE);
297 node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
298 ALIGN(sizeof(pg_data_t), PAGE_SIZE);
300 printk ("node %d will remap to vaddr %08lx - %08lx\n", nid,
301 (ulong) node_remap_start_vaddr[nid],
302 (ulong) node_remap_end_vaddr[nid]);
305 void *alloc_remap(int nid, unsigned long size)
310 static unsigned long calculate_numa_remap_pages(void)
315 static void init_remap_allocator(int nid)
319 void __init remap_numa_kva(void)
322 #endif /* CONFIG_DISCONTIGMEM */
324 extern void setup_bootmem_allocator(void);
325 unsigned long __init setup_memory(void)
328 unsigned long system_start_pfn, system_max_low_pfn;
329 unsigned long wasted_pages;
332 * When mapping a NUMA machine we allocate the node_mem_map arrays
333 * from node local memory. They are then mapped directly into KVA
334 * between zone normal and vmalloc space. Calculate the size of
335 * this space and use it to adjust the boundary between ZONE_NORMAL
340 kva_pages = calculate_numa_remap_pages();
342 /* partially used pages are not usable - thus round upwards */
343 system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end);
345 kva_start_pfn = find_max_low_pfn() - kva_pages;
347 #ifdef CONFIG_BLK_DEV_INITRD
348 /* Numa kva area is below the initrd */
350 kva_start_pfn = PFN_DOWN(initrd_start - PAGE_OFFSET)
355 * We waste pages past at the end of the KVA for no good reason other
356 * than how it is located. This is bad.
358 wasted_pages = kva_start_pfn & (PTRS_PER_PTE-1);
359 kva_start_pfn -= wasted_pages;
360 kva_pages += wasted_pages;
362 system_max_low_pfn = max_low_pfn = find_max_low_pfn();
363 printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n",
364 kva_start_pfn, max_low_pfn);
365 printk("max_pfn = %ld\n", max_pfn);
367 /* avoid clash with initrd */
368 reserve_early(kva_start_pfn<<PAGE_SHIFT,
369 (kva_start_pfn + kva_pages)<<PAGE_SHIFT,
371 #ifdef CONFIG_HIGHMEM
372 highstart_pfn = highend_pfn = max_pfn;
373 if (max_pfn > system_max_low_pfn)
374 highstart_pfn = system_max_low_pfn;
375 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
376 pages_to_mb(highend_pfn - highstart_pfn));
377 num_physpages = highend_pfn;
378 high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
380 num_physpages = system_max_low_pfn;
381 high_memory = (void *) __va(system_max_low_pfn * PAGE_SIZE - 1) + 1;
383 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
384 pages_to_mb(system_max_low_pfn));
385 printk("min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld\n",
386 min_low_pfn, max_low_pfn, highstart_pfn);
388 printk("Low memory ends at vaddr %08lx\n",
389 (ulong) pfn_to_kaddr(max_low_pfn));
390 for_each_online_node(nid) {
391 init_remap_allocator(nid);
395 printk("High memory starts at vaddr %08lx\n",
396 (ulong) pfn_to_kaddr(highstart_pfn));
397 for_each_online_node(nid)
398 propagate_e820_map_node(nid);
400 memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
401 NODE_DATA(0)->bdata = &node0_bdata;
402 setup_bootmem_allocator();
406 void __init zone_sizes_init(void)
409 unsigned long max_zone_pfns[MAX_NR_ZONES];
410 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
411 max_zone_pfns[ZONE_DMA] =
412 virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
413 max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
414 #ifdef CONFIG_HIGHMEM
415 max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
418 /* If SRAT has not registered memory, register it now */
419 if (find_max_pfn_with_active_regions() == 0) {
420 for_each_online_node(nid) {
421 if (node_has_online_mem(nid))
422 add_active_range(nid, node_start_pfn[nid],
427 free_area_init_nodes(max_zone_pfns);
431 void __init set_highmem_pages_init(int bad_ppro)
433 #ifdef CONFIG_HIGHMEM
437 for_each_zone(zone) {
438 unsigned long node_pfn, zone_start_pfn, zone_end_pfn;
440 if (!is_highmem(zone))
443 zone_start_pfn = zone->zone_start_pfn;
444 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
446 printk("Initializing %s for node %d (%08lx:%08lx)\n",
447 zone->name, zone_to_nid(zone),
448 zone_start_pfn, zone_end_pfn);
450 for (node_pfn = zone_start_pfn; node_pfn < zone_end_pfn; node_pfn++) {
451 if (!pfn_valid(node_pfn))
453 page = pfn_to_page(node_pfn);
454 add_one_highpage_init(page, node_pfn, bad_ppro);
457 totalram_pages += totalhigh_pages;
461 #ifdef CONFIG_MEMORY_HOTPLUG
462 static int paddr_to_nid(u64 addr)
465 unsigned long pfn = PFN_DOWN(addr);
468 if (node_start_pfn[nid] <= pfn &&
469 pfn < node_end_pfn[nid])
476 * This function is used to ask node id BEFORE memmap and mem_section's
477 * initialization (pfn_to_nid() can't be used yet).
478 * If _PXM is not defined on ACPI's DSDT, node id must be found by this.
480 int memory_add_physaddr_to_nid(u64 addr)
482 int nid = paddr_to_nid(addr);
483 return (nid >= 0) ? nid : 0;
486 EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);