2 * Copyright (c) 2000, 2003 Silicon Graphics, Inc. All rights reserved.
3 * Copyright (c) 2001 Intel Corp.
4 * Copyright (c) 2001 Tony Luck <tony.luck@intel.com>
5 * Copyright (c) 2002 NEC Corp.
6 * Copyright (c) 2002 Kimio Suganuma <k-suganuma@da.jp.nec.com>
7 * Copyright (c) 2004 Silicon Graphics, Inc
8 * Russ Anderson <rja@sgi.com>
9 * Jesse Barnes <jbarnes@sgi.com>
10 * Jack Steiner <steiner@sgi.com>
14 * Platform initialization for Discontig Memory
17 #include <linux/kernel.h>
19 #include <linux/swap.h>
20 #include <linux/bootmem.h>
21 #include <linux/acpi.h>
22 #include <linux/efi.h>
23 #include <linux/nodemask.h>
24 #include <asm/pgalloc.h>
26 #include <asm/meminit.h>
28 #include <asm/sections.h>
31 * Track per-node information needed to setup the boot memory allocator, the
32 * per-node areas, and the real VM.
34 struct early_node_data {
35 struct ia64_node_data *node_data;
37 unsigned long pernode_addr;
38 unsigned long pernode_size;
39 struct bootmem_data bootmem_data;
40 unsigned long num_physpages;
41 unsigned long num_dma_physpages;
42 unsigned long min_pfn;
43 unsigned long max_pfn;
46 static struct early_node_data mem_data[MAX_NUMNODES] __initdata;
47 static nodemask_t memory_less_mask __initdata;
50 * To prevent cache aliasing effects, align per-node structures so that they
51 * start at addresses that are strided by node number.
53 #define NODEDATA_ALIGN(addr, node) \
54 ((((addr) + 1024*1024-1) & ~(1024*1024-1)) + (node)*PERCPU_PAGE_SIZE)
57 * build_node_maps - callback to setup bootmem structs for each node
58 * @start: physical start of range
59 * @len: length of range
60 * @node: node where this range resides
62 * We allocate a struct bootmem_data for each piece of memory that we wish to
63 * treat as a virtually contiguous block (i.e. each node). Each such block
64 * must start on an %IA64_GRANULE_SIZE boundary, so we round the address down
65 * if necessary. Any non-existent pages will simply be part of the virtual
66 * memmap. We also update min_low_pfn and max_low_pfn here as we receive
67 * memory ranges from the caller.
69 static int __init build_node_maps(unsigned long start, unsigned long len,
72 unsigned long cstart, epfn, end = start + len;
73 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
75 epfn = GRANULEROUNDUP(end) >> PAGE_SHIFT;
76 cstart = GRANULEROUNDDOWN(start);
78 if (!bdp->node_low_pfn) {
79 bdp->node_boot_start = cstart;
80 bdp->node_low_pfn = epfn;
82 bdp->node_boot_start = min(cstart, bdp->node_boot_start);
83 bdp->node_low_pfn = max(epfn, bdp->node_low_pfn);
86 min_low_pfn = min(min_low_pfn, bdp->node_boot_start>>PAGE_SHIFT);
87 max_low_pfn = max(max_low_pfn, bdp->node_low_pfn);
93 * early_nr_cpus_node - return number of cpus on a given node
94 * @node: node to check
96 * Count the number of cpus on @node. We can't use nr_cpus_node() yet because
97 * acpi_boot_init() (which builds the node_to_cpu_mask array) hasn't been
98 * called yet. Note that node 0 will also count all non-existent cpus.
100 static int __init early_nr_cpus_node(int node)
104 for (cpu = 0; cpu < NR_CPUS; cpu++)
105 if (node == node_cpuid[cpu].nid)
112 * compute_pernodesize - compute size of pernode data
113 * @node: the node id.
115 static unsigned long __init compute_pernodesize(int node)
117 unsigned long pernodesize = 0, cpus;
119 cpus = early_nr_cpus_node(node);
120 pernodesize += PERCPU_PAGE_SIZE * cpus;
121 pernodesize += node * L1_CACHE_BYTES;
122 pernodesize += L1_CACHE_ALIGN(sizeof(pg_data_t));
123 pernodesize += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
124 pernodesize = PAGE_ALIGN(pernodesize);
129 * fill_pernode - initialize pernode data.
130 * @node: the node id.
131 * @pernode: physical address of pernode data
132 * @pernodesize: size of the pernode data
134 static void __init fill_pernode(int node, unsigned long pernode,
135 unsigned long pernodesize)
138 int cpus = early_nr_cpus_node(node), cpu;
139 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
141 mem_data[node].pernode_addr = pernode;
142 mem_data[node].pernode_size = pernodesize;
143 memset(__va(pernode), 0, pernodesize);
145 cpu_data = (void *)pernode;
146 pernode += PERCPU_PAGE_SIZE * cpus;
147 pernode += node * L1_CACHE_BYTES;
149 mem_data[node].pgdat = __va(pernode);
150 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
152 mem_data[node].node_data = __va(pernode);
153 pernode += L1_CACHE_ALIGN(sizeof(struct ia64_node_data));
155 mem_data[node].pgdat->bdata = bdp;
156 pernode += L1_CACHE_ALIGN(sizeof(pg_data_t));
159 * Copy the static per-cpu data into the region we
160 * just set aside and then setup __per_cpu_offset
161 * for each CPU on this node.
163 for (cpu = 0; cpu < NR_CPUS; cpu++) {
164 if (node == node_cpuid[cpu].nid) {
165 memcpy(__va(cpu_data), __phys_per_cpu_start,
166 __per_cpu_end - __per_cpu_start);
167 __per_cpu_offset[cpu] = (char*)__va(cpu_data) -
169 cpu_data += PERCPU_PAGE_SIZE;
176 * find_pernode_space - allocate memory for memory map and per-node structures
177 * @start: physical start of range
178 * @len: length of range
179 * @node: node where this range resides
181 * This routine reserves space for the per-cpu data struct, the list of
182 * pg_data_ts and the per-node data struct. Each node will have something like
183 * the following in the first chunk of addr. space large enough to hold it.
185 * ________________________
187 * |~~~~~~~~~~~~~~~~~~~~~~~~| <-- NODEDATA_ALIGN(start, node) for the first
188 * | PERCPU_PAGE_SIZE * | start and length big enough
189 * | cpus_on_this_node | Node 0 will also have entries for all non-existent cpus.
190 * |------------------------|
191 * | local pg_data_t * |
192 * |------------------------|
193 * | local ia64_node_data |
194 * |------------------------|
196 * |________________________|
198 * Once this space has been set aside, the bootmem maps are initialized. We
199 * could probably move the allocation of the per-cpu and ia64_node_data space
200 * outside of this function and use alloc_bootmem_node(), but doing it here
201 * is straightforward and we get the alignments we want so...
203 static int __init find_pernode_space(unsigned long start, unsigned long len,
207 unsigned long pernodesize = 0, pernode, pages, mapsize;
208 struct bootmem_data *bdp = &mem_data[node].bootmem_data;
210 epfn = (start + len) >> PAGE_SHIFT;
212 pages = bdp->node_low_pfn - (bdp->node_boot_start >> PAGE_SHIFT);
213 mapsize = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
216 * Make sure this memory falls within this node's usable memory
217 * since we may have thrown some away in build_maps().
219 if (start < bdp->node_boot_start || epfn > bdp->node_low_pfn)
222 /* Don't setup this node's local space twice... */
223 if (mem_data[node].pernode_addr)
227 * Calculate total size needed, incl. what's necessary
228 * for good alignment and alias prevention.
230 pernodesize = compute_pernodesize(node);
231 pernode = NODEDATA_ALIGN(start, node);
233 /* Is this range big enough for what we want to store here? */
234 if (start + len > (pernode + pernodesize + mapsize))
235 fill_pernode(node, pernode, pernodesize);
241 * free_node_bootmem - free bootmem allocator memory for use
242 * @start: physical start of range
243 * @len: length of range
244 * @node: node where this range resides
246 * Simply calls the bootmem allocator to free the specified ranged from
247 * the given pg_data_t's bdata struct. After this function has been called
248 * for all the entries in the EFI memory map, the bootmem allocator will
249 * be ready to service allocation requests.
251 static int __init free_node_bootmem(unsigned long start, unsigned long len,
254 free_bootmem_node(mem_data[node].pgdat, start, len);
260 * reserve_pernode_space - reserve memory for per-node space
262 * Reserve the space used by the bootmem maps & per-node space in the boot
263 * allocator so that when we actually create the real mem maps we don't
266 static void __init reserve_pernode_space(void)
268 unsigned long base, size, pages;
269 struct bootmem_data *bdp;
272 for_each_online_node(node) {
273 pg_data_t *pdp = mem_data[node].pgdat;
275 if (node_isset(node, memory_less_mask))
280 /* First the bootmem_map itself */
281 pages = bdp->node_low_pfn - (bdp->node_boot_start>>PAGE_SHIFT);
282 size = bootmem_bootmap_pages(pages) << PAGE_SHIFT;
283 base = __pa(bdp->node_bootmem_map);
284 reserve_bootmem_node(pdp, base, size);
286 /* Now the per-node space */
287 size = mem_data[node].pernode_size;
288 base = __pa(mem_data[node].pernode_addr);
289 reserve_bootmem_node(pdp, base, size);
294 * initialize_pernode_data - fixup per-cpu & per-node pointers
296 * Each node's per-node area has a copy of the global pg_data_t list, so
297 * we copy that to each node here, as well as setting the per-cpu pointer
298 * to the local node data structure. The active_cpus field of the per-node
299 * structure gets setup by the platform_cpu_init() function later.
301 static void __init initialize_pernode_data(void)
304 pg_data_t *pgdat_list[MAX_NUMNODES];
306 for_each_online_node(node)
307 pgdat_list[node] = mem_data[node].pgdat;
309 /* Copy the pg_data_t list to each node and init the node field */
310 for_each_online_node(node) {
311 memcpy(mem_data[node].node_data->pg_data_ptrs, pgdat_list,
315 /* Set the node_data pointer for each per-cpu struct */
316 for (cpu = 0; cpu < NR_CPUS; cpu++) {
317 node = node_cpuid[cpu].nid;
318 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data;
323 * memory_less_node_alloc - * attempt to allocate memory on the best NUMA slit
324 * node but fall back to any other node when __alloc_bootmem_node fails
327 * @pernodesize: size of this node's pernode data
328 * @align: alignment to use for this node's pernode data
330 static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize,
335 int bestnode = -1, node;
337 for_each_online_node(node) {
338 if (node_isset(node, memory_less_mask))
340 else if (node_distance(nid, node) < best) {
341 best = node_distance(nid, node);
346 ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat,
347 pernodesize, align, __pa(MAX_DMA_ADDRESS));
350 panic("NO memory for memory less node\n");
355 * pgdat_insert - insert the pgdat into global pgdat_list
356 * @pgdat: the pgdat for a node.
358 static void __init pgdat_insert(pg_data_t *pgdat)
360 pg_data_t *prev = NULL, *next;
363 if (pgdat->node_id < next->node_id)
369 prev->pgdat_next = pgdat;
370 pgdat->pgdat_next = next;
372 pgdat->pgdat_next = pgdat_list;
380 * memory_less_nodes - allocate and initialize CPU only nodes pernode
383 static void __init memory_less_nodes(void)
385 unsigned long pernodesize;
389 for_each_node_mask(node, memory_less_mask) {
390 pernodesize = compute_pernodesize(node);
391 pernode = memory_less_node_alloc(node, pernodesize,
392 (node) ? (node * PERCPU_PAGE_SIZE) : (1024*1024));
393 fill_pernode(node, __pa(pernode), pernodesize);
400 * find_memory - walk the EFI memory map and setup the bootmem allocator
402 * Called early in boot to setup the bootmem allocator, and to
403 * allocate the per-cpu and per-node structures.
405 void __init find_memory(void)
411 if (num_online_nodes() == 0) {
412 printk(KERN_ERR "node info missing!\n");
416 nodes_or(memory_less_mask, memory_less_mask, node_online_map);
420 /* These actually end up getting called by call_pernode_memory() */
421 efi_memmap_walk(filter_rsvd_memory, build_node_maps);
422 efi_memmap_walk(filter_rsvd_memory, find_pernode_space);
424 for_each_online_node(node)
425 if (mem_data[node].bootmem_data.node_low_pfn) {
426 node_clear(node, memory_less_mask);
427 mem_data[node].min_pfn = ~0UL;
430 * Initialize the boot memory maps in reverse order since that's
431 * what the bootmem allocator expects
433 for (node = MAX_NUMNODES - 1; node >= 0; node--) {
434 unsigned long pernode, pernodesize, map;
435 struct bootmem_data *bdp;
437 if (!node_online(node))
439 else if (node_isset(node, memory_less_mask))
442 bdp = &mem_data[node].bootmem_data;
443 pernode = mem_data[node].pernode_addr;
444 pernodesize = mem_data[node].pernode_size;
445 map = pernode + pernodesize;
447 init_bootmem_node(mem_data[node].pgdat,
449 bdp->node_boot_start>>PAGE_SHIFT,
453 efi_memmap_walk(filter_rsvd_memory, free_node_bootmem);
455 reserve_pernode_space();
457 initialize_pernode_data();
459 max_pfn = max_low_pfn;
465 * per_cpu_init - setup per-cpu variables
467 * find_pernode_space() does most of this already, we just need to set
468 * local_per_cpu_offset
470 void *per_cpu_init(void)
474 if (smp_processor_id() == 0) {
475 for (cpu = 0; cpu < NR_CPUS; cpu++) {
476 per_cpu(local_per_cpu_offset, cpu) =
477 __per_cpu_offset[cpu];
481 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
485 * show_mem - give short summary of memory stats
487 * Shows a simple page count of reserved and used pages in the system.
488 * For discontig machines, it does this on a per-pgdat basis.
492 int i, total_reserved = 0;
493 int total_shared = 0, total_cached = 0;
494 unsigned long total_present = 0;
497 printk("Mem-info:\n");
499 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
500 for_each_pgdat(pgdat) {
501 unsigned long present = pgdat->node_present_pages;
502 int shared = 0, cached = 0, reserved = 0;
503 printk("Node ID: %d\n", pgdat->node_id);
504 for(i = 0; i < pgdat->node_spanned_pages; i++) {
505 struct page *page = pgdat_page_nr(pgdat, i);
506 if (!ia64_pfn_valid(pgdat->node_start_pfn+i))
508 if (PageReserved(page))
510 else if (PageSwapCache(page))
512 else if (page_count(page))
513 shared += page_count(page)-1;
515 total_present += present;
516 total_reserved += reserved;
517 total_cached += cached;
518 total_shared += shared;
519 printk("\t%ld pages of RAM\n", present);
520 printk("\t%d reserved pages\n", reserved);
521 printk("\t%d pages shared\n", shared);
522 printk("\t%d pages swap cached\n", cached);
524 printk("%ld pages of RAM\n", total_present);
525 printk("%d reserved pages\n", total_reserved);
526 printk("%d pages shared\n", total_shared);
527 printk("%d pages swap cached\n", total_cached);
528 printk("Total of %ld pages in page table cache\n",
529 pgtable_quicklist_total_size());
530 printk("%d free buffer pages\n", nr_free_buffer_pages());
534 * call_pernode_memory - use SRAT to call callback functions with node info
535 * @start: physical start of range
536 * @len: length of range
537 * @arg: function to call for each range
539 * efi_memmap_walk() knows nothing about layout of memory across nodes. Find
540 * out to which node a block of memory belongs. Ignore memory that we cannot
541 * identify, and split blocks that run across multiple nodes.
543 * Take this opportunity to round the start address up and the end address
544 * down to page boundaries.
546 void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
548 unsigned long rs, re, end = start + len;
549 void (*func)(unsigned long, unsigned long, int);
552 start = PAGE_ALIGN(start);
559 if (!num_node_memblks) {
560 /* No SRAT table, so assume one node (node 0) */
562 (*func)(start, end - start, 0);
566 for (i = 0; i < num_node_memblks; i++) {
567 rs = max(start, node_memblk[i].start_paddr);
568 re = min(end, node_memblk[i].start_paddr +
569 node_memblk[i].size);
572 (*func)(rs, re - rs, node_memblk[i].nid);
580 * count_node_pages - callback to build per-node memory info structures
581 * @start: physical start of range
582 * @len: length of range
583 * @node: node where this range resides
585 * Each node has it's own number of physical pages, DMAable pages, start, and
586 * end page frame number. This routine will be called by call_pernode_memory()
587 * for each piece of usable memory and will setup these values for each node.
588 * Very similar to build_maps().
590 static __init int count_node_pages(unsigned long start, unsigned long len, int node)
592 unsigned long end = start + len;
594 mem_data[node].num_physpages += len >> PAGE_SHIFT;
595 if (start <= __pa(MAX_DMA_ADDRESS))
596 mem_data[node].num_dma_physpages +=
597 (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
598 start = GRANULEROUNDDOWN(start);
599 start = ORDERROUNDDOWN(start);
600 end = GRANULEROUNDUP(end);
601 mem_data[node].max_pfn = max(mem_data[node].max_pfn,
603 mem_data[node].min_pfn = min(mem_data[node].min_pfn,
604 start >> PAGE_SHIFT);
610 * paging_init - setup page tables
612 * paging_init() sets up the page tables for each node of the system and frees
613 * the bootmem allocator memory for general use.
615 void __init paging_init(void)
617 unsigned long max_dma;
618 unsigned long zones_size[MAX_NR_ZONES];
619 unsigned long zholes_size[MAX_NR_ZONES];
620 unsigned long pfn_offset = 0;
623 max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
625 efi_memmap_walk(filter_rsvd_memory, count_node_pages);
627 vmalloc_end -= PAGE_ALIGN(max_low_pfn * sizeof(struct page));
628 vmem_map = (struct page *) vmalloc_end;
629 efi_memmap_walk(create_mem_map_page_table, NULL);
630 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
632 for_each_online_node(node) {
633 memset(zones_size, 0, sizeof(zones_size));
634 memset(zholes_size, 0, sizeof(zholes_size));
636 num_physpages += mem_data[node].num_physpages;
638 if (mem_data[node].min_pfn >= max_dma) {
639 /* All of this node's memory is above ZONE_DMA */
640 zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
641 mem_data[node].min_pfn;
642 zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn -
643 mem_data[node].min_pfn -
644 mem_data[node].num_physpages;
645 } else if (mem_data[node].max_pfn < max_dma) {
646 /* All of this node's memory is in ZONE_DMA */
647 zones_size[ZONE_DMA] = mem_data[node].max_pfn -
648 mem_data[node].min_pfn;
649 zholes_size[ZONE_DMA] = mem_data[node].max_pfn -
650 mem_data[node].min_pfn -
651 mem_data[node].num_dma_physpages;
653 /* This node has memory in both zones */
654 zones_size[ZONE_DMA] = max_dma -
655 mem_data[node].min_pfn;
656 zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] -
657 mem_data[node].num_dma_physpages;
658 zones_size[ZONE_NORMAL] = mem_data[node].max_pfn -
660 zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] -
661 (mem_data[node].num_physpages -
662 mem_data[node].num_dma_physpages);
665 pfn_offset = mem_data[node].min_pfn;
667 NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
668 free_area_init_node(node, NODE_DATA(node), zones_size,
669 pfn_offset, zholes_size);
673 * Make memory less nodes become a member of the known nodes.
675 for_each_node_mask(node, memory_less_mask)
676 pgdat_insert(mem_data[node].pgdat);
678 zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));