]> err.no Git - linux-2.6/commitdiff
[MIPS] 64-bit Sibyte kernels need DMA32.
authorRalf Baechle <ralf@linux-mips.org>
Sat, 3 Nov 2007 02:05:43 +0000 (02:05 +0000)
committerRalf Baechle <ralf@linux-mips.org>
Mon, 26 Nov 2007 17:26:14 +0000 (17:26 +0000)
Sibyte SOCs only have 32-bit PCI.  Due to the sparse use of the address
space only the first 1GB of memory is mapped at physical addresses
below 1GB.  If a system has more than 1GB of memory 32-bit DMA will
not be able to reach all of it.

For now this patch is good enough to keep Sibyte users happy but it seems
eventually something like swiotlb will be needed for Sibyte.

Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
arch/mips/Kconfig
arch/mips/kernel/setup.c
arch/mips/mm/dma-default.c
arch/mips/mm/init.c
include/asm-mips/dma.h

index 7750829b416a8ee3f9cceba0668c6c891923994f..4c6ba7b30a68e4956708db406994d3603a9eb8b3 100644 (file)
@@ -515,6 +515,7 @@ config SIBYTE_SWARM
        select SYS_SUPPORTS_HIGHMEM
        select SYS_SUPPORTS_KGDB
        select SYS_SUPPORTS_LITTLE_ENDIAN
+       select ZONE_DMA32 if 64BIT
 
 config SIBYTE_LITTLESUR
        bool "Sibyte BCM91250C2-LittleSur"
@@ -565,6 +566,7 @@ config SIBYTE_BIGSUR
        select SYS_SUPPORTS_BIG_ENDIAN
        select SYS_SUPPORTS_HIGHMEM
        select SYS_SUPPORTS_LITTLE_ENDIAN
+       select ZONE_DMA32 if 64BIT
 
 config SNI_RM
        bool "SNI RM200/300/400"
@@ -1664,6 +1666,9 @@ config ARCH_DISCONTIGMEM_ENABLE
          or have huge holes in the physical address space for other reasons.
          See <file:Documentation/vm/numa> for more.
 
+config ARCH_POPULATES_NODE_MAP
+       def_bool y
+
 config ARCH_SPARSEMEM_ENABLE
        bool
        select SPARSEMEM_STATIC
@@ -1969,6 +1974,9 @@ config I8253
 config PCSPEAKER
        bool
 
+config ZONE_DMA32
+       bool
+
 source "drivers/pcmcia/Kconfig"
 
 source "drivers/pci/hotplug/Kconfig"
index a06a27d6cfcdef8050f57f48d31d933c00b9ddbc..7f6ddcb5d48532657a52b80461644dd05c467551 100644 (file)
@@ -269,7 +269,7 @@ static void __init bootmem_init(void)
 
 static void __init bootmem_init(void)
 {
-       unsigned long reserved_end;
+       unsigned long init_begin, reserved_end;
        unsigned long mapstart = ~0UL;
        unsigned long bootmap_size;
        int i;
@@ -342,6 +342,35 @@ static void __init bootmem_init(void)
         */
        bootmap_size = init_bootmem_node(NODE_DATA(0), mapstart,
                                         min_low_pfn, max_low_pfn);
+
+
+       init_begin = PFN_UP(__pa_symbol(&__init_begin));
+       for (i = 0; i < boot_mem_map.nr_map; i++) {
+               unsigned long start, end;
+
+               start = PFN_UP(boot_mem_map.map[i].addr);
+               end = PFN_DOWN(boot_mem_map.map[i].addr
+                               + boot_mem_map.map[i].size);
+
+               if (start <= init_begin)
+                       start = init_begin;
+               if (start >= end)
+                       continue;
+
+#ifndef CONFIG_HIGHMEM
+               if (end > max_low_pfn)
+                       end = max_low_pfn;
+
+               /*
+                * ... finally, is the area going away?
+                */
+               if (end <= start)
+                       continue;
+#endif
+
+               add_active_range(0, start, end);
+       }
+
        /*
         * Register fully available low RAM pages with the bootmem allocator.
         */
index 33519ce4954043c8c16683ce88fa1b82be36b5e1..ae76795685cc2ee2789596dfbdfeba6c40b47808 100644 (file)
@@ -40,16 +40,38 @@ static inline int cpu_is_noncoherent_r10000(struct device *dev)
               current_cpu_type() == CPU_R12000);
 }
 
+static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
+{
+       /* ignore region specifiers */
+       gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
+
+#ifdef CONFIG_ZONE_DMA32
+       if (dev == NULL)
+               gfp |= __GFP_DMA;
+       else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
+               gfp |= __GFP_DMA;
+       else
+#endif
+#ifdef CONFIG_ZONE_DMA32
+            if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+               gfp |= __GFP_DMA32;
+       else
+#endif
+               ;
+
+       /* Don't invoke OOM killer */
+       gfp |= __GFP_NORETRY;
+
+       return gfp;
+}
+
 void *dma_alloc_noncoherent(struct device *dev, size_t size,
        dma_addr_t * dma_handle, gfp_t gfp)
 {
        void *ret;
 
-       /* ignore region specifiers */
-       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+       gfp = massage_gfp_flags(dev, gfp);
 
-       if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
-               gfp |= GFP_DMA;
        ret = (void *) __get_free_pages(gfp, get_order(size));
 
        if (ret != NULL) {
@@ -67,11 +89,8 @@ void *dma_alloc_coherent(struct device *dev, size_t size,
 {
        void *ret;
 
-       /* ignore region specifiers */
-       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
+       gfp = massage_gfp_flags(dev, gfp);
 
-       if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
-               gfp |= GFP_DMA;
        ret = (void *) __get_free_pages(gfp, get_order(size));
 
        if (ret) {
@@ -343,7 +362,7 @@ int dma_supported(struct device *dev, u64 mask)
         * so we can't guarantee allocations that must be
         * within a tighter range than GFP_DMA..
         */
-       if (mask < 0x00ffffff)
+       if (mask < DMA_BIT_MASK(24))
                return 0;
 
        return 1;
index ec3b9e9f30f4fd2ae5ad9edc9951564889eae58a..480dec04f552cbe086f6fa96aedbaf73ff09ff7c 100644 (file)
@@ -347,11 +347,8 @@ static int __init page_is_ram(unsigned long pagenr)
 
 void __init paging_init(void)
 {
-       unsigned long zones_size[MAX_NR_ZONES] = { 0, };
-#ifndef CONFIG_FLATMEM
-       unsigned long zholes_size[MAX_NR_ZONES] = { 0, };
-       unsigned long i, j, pfn;
-#endif
+       unsigned long max_zone_pfns[MAX_NR_ZONES];
+       unsigned long lastpfn;
 
        pagetable_init();
 
@@ -361,35 +358,27 @@ void __init paging_init(void)
        kmap_coherent_init();
 
 #ifdef CONFIG_ZONE_DMA
-       if (min_low_pfn < MAX_DMA_PFN && MAX_DMA_PFN <= max_low_pfn) {
-               zones_size[ZONE_DMA] = MAX_DMA_PFN - min_low_pfn;
-               zones_size[ZONE_NORMAL] = max_low_pfn - MAX_DMA_PFN;
-       } else if (max_low_pfn < MAX_DMA_PFN)
-               zones_size[ZONE_DMA] = max_low_pfn - min_low_pfn;
-       else
+       max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
 #endif
-       zones_size[ZONE_NORMAL] = max_low_pfn - min_low_pfn;
-
+#ifdef CONFIG_ZONE_DMA32
+       max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+       max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+       lastpfn = max_low_pfn;
 #ifdef CONFIG_HIGHMEM
-       zones_size[ZONE_HIGHMEM] = highend_pfn - highstart_pfn;
+       max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
+       lastpfn = highend_pfn;
 
-       if (cpu_has_dc_aliases && zones_size[ZONE_HIGHMEM]) {
+       if (cpu_has_dc_aliases && max_low_pfn != highend_pfn) {
                printk(KERN_WARNING "This processor doesn't support highmem."
-                      " %ldk highmem ignored\n", zones_size[ZONE_HIGHMEM]);
-               zones_size[ZONE_HIGHMEM] = 0;
+                      " %ldk highmem ignored\n",
+                      (highend_pfn - max_low_pfn) << (PAGE_SHIFT - 10));
+               max_zone_pfns[ZONE_HIGHMEM] = max_low_pfn;
+               lastpfn = max_low_pfn;
        }
 #endif
 
-#ifdef CONFIG_FLATMEM
-       free_area_init(zones_size);
-#else
-       pfn = min_low_pfn;
-       for (i = 0; i < MAX_NR_ZONES; i++)
-               for (j = 0; j < zones_size[i]; j++, pfn++)
-                       if (!page_is_ram(pfn))
-                               zholes_size[i]++;
-       free_area_init_node(0, NODE_DATA(0), zones_size, 0, zholes_size);
-#endif
+       free_area_init_nodes(max_zone_pfns);
 }
 
 static struct kcore_list kcore_mem, kcore_vmalloc;
index 833437d31ef1634c1286d957b8cca8971bc158f2..d6a6c21f16db507f7e8f362f63427fdfbd922d43 100644 (file)
@@ -92,6 +92,7 @@
 #define MAX_DMA_ADDRESS                (PAGE_OFFSET + 0x01000000)
 #endif
 #define MAX_DMA_PFN            PFN_DOWN(virt_to_phys((void *)MAX_DMA_ADDRESS))
+#define MAX_DMA32_PFN          (1UL << (32 - PAGE_SHIFT))
 
 /* 8237 DMA controllers */
 #define IO_DMA1_BASE   0x00    /* 8 bit slave DMA, channels 0..3 */