]> err.no Git - linux-2.6/blobdiff - arch/ia64/mm/contig.c
[ARM] 4652/1: pxa: fix a typo of pxa27x usb host clk definition
[linux-2.6] / arch / ia64 / mm / contig.c
index 1e79551231b91de2bdac6de77ef03b688c31434f..7e9c275ea148579b8b792583f497ae15e21927f3 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/bootmem.h>
 #include <linux/efi.h>
 #include <linux/mm.h>
+#include <linux/nmi.h>
 #include <linux/swap.h>
 
 #include <asm/meminit.h>
@@ -30,70 +31,74 @@ static unsigned long max_gap;
 #endif
 
 /**
- * show_mem - display a memory statistics summary
+ * show_mem - give short summary of memory stats
  *
- * Just walks the pages in the system and describes where they're allocated.
+ * Shows a simple page count of reserved and used pages in the system.
+ * For discontig machines, it does this on a per-pgdat basis.
  */
-void
-show_mem (void)
+void show_mem(void)
 {
-       int i, total = 0, reserved = 0;
-       int shared = 0, cached = 0;
+       int i, total_reserved = 0;
+       int total_shared = 0, total_cached = 0;
+       unsigned long total_present = 0;
+       pg_data_t *pgdat;
 
        printk(KERN_INFO "Mem-info:\n");
        show_free_areas();
-
        printk(KERN_INFO "Free swap:       %6ldkB\n",
               nr_swap_pages<<(PAGE_SHIFT-10));
-       i = max_mapnr;
-       for (i = 0; i < max_mapnr; i++) {
-               if (!pfn_valid(i)) {
+       printk(KERN_INFO "Node memory in pages:\n");
+       for_each_online_pgdat(pgdat) {
+               unsigned long present;
+               unsigned long flags;
+               int shared = 0, cached = 0, reserved = 0;
+
+               pgdat_resize_lock(pgdat, &flags);
+               present = pgdat->node_present_pages;
+               for(i = 0; i < pgdat->node_spanned_pages; i++) {
+                       struct page *page;
+                       if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
+                               touch_nmi_watchdog();
+                       if (pfn_valid(pgdat->node_start_pfn + i))
+                               page = pfn_to_page(pgdat->node_start_pfn + i);
+                       else {
 #ifdef CONFIG_VIRTUAL_MEM_MAP
-                       if (max_gap < LARGE_GAP)
-                               continue;
-                       i = vmemmap_find_next_valid_pfn(0, i) - 1;
+                               if (max_gap < LARGE_GAP)
+                                       continue;
 #endif
-                       continue;
+                               i = vmemmap_find_next_valid_pfn(pgdat->node_id,
+                                        i) - 1;
+                               continue;
+                       }
+                       if (PageReserved(page))
+                               reserved++;
+                       else if (PageSwapCache(page))
+                               cached++;
+                       else if (page_count(page))
+                               shared += page_count(page)-1;
                }
-               total++;
-               if (PageReserved(mem_map+i))
-                       reserved++;
-               else if (PageSwapCache(mem_map+i))
-                       cached++;
-               else if (page_count(mem_map + i))
-                       shared += page_count(mem_map + i) - 1;
+               pgdat_resize_unlock(pgdat, &flags);
+               total_present += present;
+               total_reserved += reserved;
+               total_cached += cached;
+               total_shared += shared;
+               printk(KERN_INFO "Node %4d:  RAM: %11ld, rsvd: %8d, "
+                      "shrd: %10d, swpd: %10d\n", pgdat->node_id,
+                      present, reserved, shared, cached);
        }
-       printk(KERN_INFO "%d pages of RAM\n", total);
-       printk(KERN_INFO "%d reserved pages\n", reserved);
-       printk(KERN_INFO "%d pages shared\n", shared);
-       printk(KERN_INFO "%d pages swap cached\n", cached);
-       printk(KERN_INFO "%ld pages in page table cache\n",
-              pgtable_quicklist_total_size());
+       printk(KERN_INFO "%ld pages of RAM\n", total_present);
+       printk(KERN_INFO "%d reserved pages\n", total_reserved);
+       printk(KERN_INFO "%d pages shared\n", total_shared);
+       printk(KERN_INFO "%d pages swap cached\n", total_cached);
+       printk(KERN_INFO "Total of %ld pages in page table cache\n",
+              quicklist_total_size());
+       printk(KERN_INFO "%d free buffer pages\n", nr_free_buffer_pages());
 }
 
+
 /* physical address where the bootmem map is located */
 unsigned long bootmap_start;
 
-/**
- * find_max_pfn - adjust the maximum page number callback
- * @start: start of range
- * @end: end of range
- * @arg: address of pointer to global max_pfn variable
- *
- * Passed as a callback function to efi_memmap_walk() to determine the highest
- * available page frame number in the system.
- */
-int
-find_max_pfn (unsigned long start, unsigned long end, void *arg)
-{
-       unsigned long *max_pfnp = arg, pfn;
-
-       pfn = (PAGE_ALIGN(end - 1) - PAGE_OFFSET) >> PAGE_SHIFT;
-       if (pfn > *max_pfnp)
-               *max_pfnp = pfn;
-       return 0;
-}
-
 /**
  * find_bootmap_location - callback to find a memory area for the bootmap
  * @start: start of region
@@ -141,6 +146,46 @@ find_bootmap_location (unsigned long start, unsigned long end, void *arg)
        return 0;
 }
 
+#ifdef CONFIG_SMP
+static void *cpu_data;
+/**
+ * per_cpu_init - setup per-cpu variables
+ *
+ * Allocate and setup per-cpu data areas.
+ */
+void * __cpuinit
+per_cpu_init (void)
+{
+       int cpu;
+       static int first_time=1;
+
+       /*
+        * get_free_pages() cannot be used before cpu_init() done.  BSP
+        * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
+        * get_zeroed_page().
+        */
+       if (first_time) {
+               first_time=0;
+               for (cpu = 0; cpu < NR_CPUS; cpu++) {
+                       memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
+                       __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
+                       cpu_data += PERCPU_PAGE_SIZE;
+                       per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
+               }
+       }
+       return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
+}
+
+static inline void
+alloc_per_cpu_data(void)
+{
+       cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
+                                  PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
+}
+#else
+#define alloc_per_cpu_data() do { } while (0)
+#endif /* CONFIG_SMP */
+
 /**
  * find_memory - setup memory map
  *
@@ -155,9 +200,10 @@ find_memory (void)
        reserve_memory();
 
        /* first find highest page frame number */
-       max_pfn = 0;
-       efi_memmap_walk(find_max_pfn, &max_pfn);
-
+       min_low_pfn = ~0UL;
+       max_low_pfn = 0;
+       efi_memmap_walk(find_max_min_low_pfn, NULL);
+       max_pfn = max_low_pfn;
        /* how many bytes to cover all the pages */
        bootmap_size = bootmem_bootmap_pages(max_pfn) << PAGE_SHIFT;
 
@@ -167,7 +213,8 @@ find_memory (void)
        if (bootmap_start == ~0UL)
                panic("Cannot find %ld bytes for bootmap\n", bootmap_size);
 
-       bootmap_size = init_bootmem(bootmap_start >> PAGE_SHIFT, max_pfn);
+       bootmap_size = init_bootmem_node(NODE_DATA(0),
+                       (bootmap_start >> PAGE_SHIFT), 0, max_pfn);
 
        /* Free all available memory, then mark bootmem-map as being in use. */
        efi_memmap_walk(filter_rsvd_memory, free_bootmem);
@@ -175,45 +222,8 @@ find_memory (void)
 
        find_initrd();
 
-#ifdef CONFIG_CRASH_DUMP
-       /* If we are doing a crash dump, we still need to know the real mem
-        * size before original memory map is * reset. */
-       saved_max_pfn = max_pfn;
-#endif
-}
-
-#ifdef CONFIG_SMP
-/**
- * per_cpu_init - setup per-cpu variables
- *
- * Allocate and setup per-cpu data areas.
- */
-void * __cpuinit
-per_cpu_init (void)
-{
-       void *cpu_data;
-       int cpu;
-       static int first_time=1;
-
-       /*
-        * get_free_pages() cannot be used before cpu_init() done.  BSP
-        * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls
-        * get_zeroed_page().
-        */
-       if (first_time) {
-               first_time=0;
-               cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS,
-                                          PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
-               for (cpu = 0; cpu < NR_CPUS; cpu++) {
-                       memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
-                       __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
-                       cpu_data += PERCPU_PAGE_SIZE;
-                       per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
-               }
-       }
-       return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
+       alloc_per_cpu_data();
 }
-#endif /* CONFIG_SMP */
 
 static int
 count_pages (u64 start, u64 end, void *arg)
@@ -237,9 +247,11 @@ paging_init (void)
        num_physpages = 0;
        efi_memmap_walk(count_pages, &num_physpages);
 
-       max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+#ifdef CONFIG_ZONE_DMA
+       max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
        max_zone_pfns[ZONE_DMA] = max_dma;
+#endif
        max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP