]> err.no Git - linux-2.6/blobdiff - arch/powerpc/mm/mem.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[linux-2.6] / arch / powerpc / mm / mem.c
index 93a5c53e3423fe7c5663b411cfe53cd82da73ad9..16def4dcff6d33ccadcf2b2b53c65628519fa901 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/initrd.h>
 #include <linux/pagemap.h>
 #include <linux/suspend.h>
+#include <linux/lmb.h>
 
 #include <asm/pgalloc.h>
 #include <asm/prom.h>
@@ -42,7 +43,6 @@
 #include <asm/machdep.h>
 #include <asm/btext.h>
 #include <asm/tlb.h>
-#include <asm/lmb.h>
 #include <asm/sections.h>
 #include <asm/vdso.h>
 
@@ -111,7 +111,7 @@ int memory_add_physaddr_to_nid(u64 start)
 }
 #endif
 
-int __devinit arch_add_memory(int nid, u64 start, u64 size)
+int arch_add_memory(int nid, u64 start, u64 size)
 {
        struct pglist_data *pgdata;
        struct zone *zone;
@@ -129,6 +129,39 @@ int __devinit arch_add_memory(int nid, u64 start, u64 size)
        return __add_pages(zone, start_pfn, nr_pages);
 }
 
+#ifdef CONFIG_MEMORY_HOTREMOVE
+int remove_memory(u64 start, u64 size)
+{
+       unsigned long start_pfn, end_pfn;
+       int ret;
+
+       start_pfn = start >> PAGE_SHIFT;
+       end_pfn = start_pfn + (size >> PAGE_SHIFT);
+       ret = offline_pages(start_pfn, end_pfn, 120 * HZ);
+       if (ret)
+               goto out;
+       /* Arch-specific calls go here - next patch */
+out:
+       return ret;
+}
+#endif /* CONFIG_MEMORY_HOTREMOVE */
+
+/*
+ * walk_memory_resource() needs to make sure there is no holes in a given
+ * memory range. On PPC64, since this range comes from /sysfs, the range
+ * is guaranteed to be valid, non-overlapping and can not contain any
+ * holes. By the time we get here (memory add or remove), /proc/device-tree
+ * is updated and correct. Only reason we need to check against device-tree
+ * would be if we allow user-land to specify a memory range through a
+ * system call/ioctl etc. instead of doing offline/online through /sysfs.
+ */
+int
+walk_memory_resource(unsigned long start_pfn, unsigned long nr_pages, void *arg,
+                       int (*func)(unsigned long, unsigned long, void *))
+{
+       return  (*func)(start_pfn, nr_pages, arg);
+}
+
 #endif /* CONFIG_MEMORY_HOTPLUG */
 
 void show_mem(void)
@@ -142,7 +175,6 @@ void show_mem(void)
 
        printk("Mem-info:\n");
        show_free_areas();
-       printk("Free swap:       %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
        for_each_online_pgdat(pgdat) {
                unsigned long flags;
                pgdat_resize_lock(pgdat, &flags);
@@ -184,9 +216,11 @@ void __init do_init_bootmem(void)
        unsigned long total_pages;
        int boot_mapsize;
 
-       max_pfn = total_pages = lmb_end_of_DRAM() >> PAGE_SHIFT;
+       max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT;
+       total_pages = (lmb_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT;
 #ifdef CONFIG_HIGHMEM
        total_pages = total_lowmem >> PAGE_SHIFT;
+       max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
 #endif
 
        /*
@@ -212,18 +246,18 @@ void __init do_init_bootmem(void)
         * present.
         */
 #ifdef CONFIG_HIGHMEM
-       free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT);
+       free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
 
        /* reserve the sections we're already using */
        for (i = 0; i < lmb.reserved.cnt; i++) {
                unsigned long addr = lmb.reserved.region[i].base +
                                     lmb_size_bytes(&lmb.reserved, i) - 1;
-               if (addr < total_lowmem)
+               if (addr < lowmem_end_addr)
                        reserve_bootmem(lmb.reserved.region[i].base,
                                        lmb_size_bytes(&lmb.reserved, i),
                                        BOOTMEM_DEFAULT);
-               else if (lmb.reserved.region[i].base < total_lowmem) {
-                       unsigned long adjusted_size = total_lowmem -
+               else if (lmb.reserved.region[i].base < lowmem_end_addr) {
+                       unsigned long adjusted_size = lowmem_end_addr -
                                      lmb.reserved.region[i].base;
                        reserve_bootmem(lmb.reserved.region[i].base,
                                        adjusted_size, BOOTMEM_DEFAULT);
@@ -293,7 +327,7 @@ void __init paging_init(void)
               (top_of_ram - total_ram) >> 20);
        memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
 #ifdef CONFIG_HIGHMEM
-       max_zone_pfns[ZONE_DMA] = total_lowmem >> PAGE_SHIFT;
+       max_zone_pfns[ZONE_DMA] = lowmem_end_addr >> PAGE_SHIFT;
        max_zone_pfns[ZONE_HIGHMEM] = top_of_ram >> PAGE_SHIFT;
 #else
        max_zone_pfns[ZONE_DMA] = top_of_ram >> PAGE_SHIFT;
@@ -348,7 +382,7 @@ void __init mem_init(void)
        {
                unsigned long pfn, highmem_mapnr;
 
-               highmem_mapnr = total_lowmem >> PAGE_SHIFT;
+               highmem_mapnr = lowmem_end_addr >> PAGE_SHIFT;
                for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
                        struct page *page = pfn_to_page(pfn);
                        if (lmb_is_reserved(pfn << PAGE_SHIFT))