1 #include <linux/dma-mapping.h>
2 #include <linux/dmar.h>
3 #include <linux/bootmem.h>
9 #include <asm/calgary.h>
10 #include <asm/amd_iommu.h>
12 int forbid_dac __read_mostly;
13 EXPORT_SYMBOL(forbid_dac);
15 const struct dma_mapping_ops *dma_ops;
16 EXPORT_SYMBOL(dma_ops);
18 static int iommu_sac_force __read_mostly;
20 #ifdef CONFIG_IOMMU_DEBUG
21 int panic_on_overflow __read_mostly = 1;
22 int force_iommu __read_mostly = 1;
24 int panic_on_overflow __read_mostly = 0;
25 int force_iommu __read_mostly = 0;
28 int iommu_merge __read_mostly = 0;
30 int no_iommu __read_mostly;
31 /* Set this to 1 if there is a HW IOMMU in the system */
32 int iommu_detected __read_mostly = 0;
34 /* This tells the BIO block layer to assume merging. Default to off
35 because we cannot guarantee merging later. */
36 int iommu_bio_merge __read_mostly = 0;
37 EXPORT_SYMBOL(iommu_bio_merge);
39 dma_addr_t bad_dma_address __read_mostly = 0;
40 EXPORT_SYMBOL(bad_dma_address);
42 /* Dummy device used for NULL arguments (normally ISA). Better would
43 be probably a smaller DMA mask, but this is bug-to-bug compatible
45 struct device fallback_dev = {
46 .bus_id = "fallback device",
47 .coherent_dma_mask = DMA_32BIT_MASK,
48 .dma_mask = &fallback_dev.coherent_dma_mask,
51 int dma_set_mask(struct device *dev, u64 mask)
53 if (!dev->dma_mask || !dma_supported(dev, mask))
56 *dev->dma_mask = mask;
60 EXPORT_SYMBOL(dma_set_mask);
63 static __initdata void *dma32_bootmem_ptr;
64 static unsigned long dma32_bootmem_size __initdata = (128ULL<<20);
66 static int __init parse_dma32_size_opt(char *p)
70 dma32_bootmem_size = memparse(p, &p);
73 early_param("dma32_size", parse_dma32_size_opt);
75 void __init dma32_reserve_bootmem(void)
77 unsigned long size, align;
78 if (max_pfn <= MAX_DMA32_PFN)
82 * check aperture_64.c allocate_aperture() for reason about
86 size = round_up(dma32_bootmem_size, align);
87 dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align,
89 if (dma32_bootmem_ptr)
90 dma32_bootmem_size = size;
92 dma32_bootmem_size = 0;
94 static void __init dma32_free_bootmem(void)
97 if (max_pfn <= MAX_DMA32_PFN)
100 if (!dma32_bootmem_ptr)
103 free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size);
105 dma32_bootmem_ptr = NULL;
106 dma32_bootmem_size = 0;
109 void __init pci_iommu_alloc(void)
111 /* free the range so iommu could get some range less than 4G */
112 dma32_free_bootmem();
114 * The order of these functions is important for
115 * fall-back/fail-over reasons
117 gart_iommu_hole_init();
121 detect_intel_iommu();
130 * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter
133 static __init int iommu_setup(char *p)
141 if (!strncmp(p, "off", 3))
143 /* gart_parse_options has more force support */
144 if (!strncmp(p, "force", 5))
146 if (!strncmp(p, "noforce", 7)) {
151 if (!strncmp(p, "biomerge", 8)) {
152 iommu_bio_merge = 4096;
156 if (!strncmp(p, "panic", 5))
157 panic_on_overflow = 1;
158 if (!strncmp(p, "nopanic", 7))
159 panic_on_overflow = 0;
160 if (!strncmp(p, "merge", 5)) {
164 if (!strncmp(p, "nomerge", 7))
166 if (!strncmp(p, "forcesac", 8))
168 if (!strncmp(p, "allowdac", 8))
170 if (!strncmp(p, "nodac", 5))
172 if (!strncmp(p, "usedac", 6)) {
176 #ifdef CONFIG_SWIOTLB
177 if (!strncmp(p, "soft", 4))
181 gart_parse_options(p);
183 #ifdef CONFIG_CALGARY_IOMMU
184 if (!strncmp(p, "calgary", 7))
186 #endif /* CONFIG_CALGARY_IOMMU */
188 p += strcspn(p, ",");
194 early_param("iommu", iommu_setup);
197 int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
198 dma_addr_t device_addr, size_t size, int flags)
200 void __iomem *mem_base = NULL;
201 int pages = size >> PAGE_SHIFT;
202 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long);
204 if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
211 /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
213 mem_base = ioremap(bus_addr, size);
217 dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
220 dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
221 if (!dev->dma_mem->bitmap)
224 dev->dma_mem->virt_base = mem_base;
225 dev->dma_mem->device_base = device_addr;
226 dev->dma_mem->size = pages;
227 dev->dma_mem->flags = flags;
229 if (flags & DMA_MEMORY_MAP)
230 return DMA_MEMORY_MAP;
232 return DMA_MEMORY_IO;
241 EXPORT_SYMBOL(dma_declare_coherent_memory);
243 void dma_release_declared_memory(struct device *dev)
245 struct dma_coherent_mem *mem = dev->dma_mem;
250 iounmap(mem->virt_base);
254 EXPORT_SYMBOL(dma_release_declared_memory);
256 void *dma_mark_declared_memory_occupied(struct device *dev,
257 dma_addr_t device_addr, size_t size)
259 struct dma_coherent_mem *mem = dev->dma_mem;
261 int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1);
263 pages >>= PAGE_SHIFT;
266 return ERR_PTR(-EINVAL);
268 pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
269 err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
272 return mem->virt_base + (pos << PAGE_SHIFT);
274 EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
276 static int dma_alloc_from_coherent_mem(struct device *dev, ssize_t size,
277 dma_addr_t *dma_handle, void **ret)
279 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
280 int order = get_order(size);
283 int page = bitmap_find_free_region(mem->bitmap, mem->size,
286 *dma_handle = mem->device_base + (page << PAGE_SHIFT);
287 *ret = mem->virt_base + (page << PAGE_SHIFT);
288 memset(*ret, 0, size);
290 if (mem->flags & DMA_MEMORY_EXCLUSIVE)
293 return (mem != NULL);
296 static int dma_release_coherent(struct device *dev, int order, void *vaddr)
298 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
300 if (mem && vaddr >= mem->virt_base && vaddr <
301 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
302 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
304 bitmap_release_region(mem->bitmap, page, order);
310 #define dma_alloc_from_coherent_mem(dev, size, handle, ret) (0)
311 #define dma_release_coherent(dev, order, vaddr) (0)
312 #endif /* CONFIG_X86_32 */
314 int dma_supported(struct device *dev, u64 mask)
317 if (mask > 0xffffffff && forbid_dac > 0) {
318 printk(KERN_INFO "PCI: Disallowing DAC for device %s\n",
324 if (dma_ops->dma_supported)
325 return dma_ops->dma_supported(dev, mask);
327 /* Copied from i386. Doesn't make much sense, because it will
328 only work for pci_alloc_coherent.
329 The caller just has to use GFP_DMA in this case. */
330 if (mask < DMA_24BIT_MASK)
333 /* Tell the device to use SAC when IOMMU force is on. This
334 allows the driver to use cheaper accesses in some cases.
336 Problem with this is that if we overflow the IOMMU area and
337 return DAC as fallback address the device may not handle it
340 As a special case some controllers have a 39bit address
341 mode that is as efficient as 32bit (aic79xx). Don't force
342 SAC for these. Assume all masks <= 40 bits are of this
343 type. Normally this doesn't make any difference, but gives
344 more gentle handling of IOMMU overflow. */
345 if (iommu_sac_force && (mask >= DMA_40BIT_MASK)) {
346 printk(KERN_INFO "%s: Force SAC with mask %Lx\n",
353 EXPORT_SYMBOL(dma_supported);
355 /* Allocate DMA memory on node near device */
356 static noinline struct page *
357 dma_alloc_pages(struct device *dev, gfp_t gfp, unsigned order)
361 node = dev_to_node(dev);
363 return alloc_pages_node(node, gfp, order);
367 * Allocate memory for a coherent mapping.
370 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
375 unsigned long dma_mask = 0;
379 /* ignore region specifiers */
380 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
382 if (dma_alloc_from_coherent_mem(dev, size, dma_handle, &memory))
389 dma_mask = dev->coherent_dma_mask;
391 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
393 /* Device not DMA able */
394 if (dev->dma_mask == NULL)
397 /* Don't invoke OOM killer or retry in lower 16MB DMA zone */
402 /* Why <=? Even when the mask is smaller than 4GB it is often
403 larger than 16MB and in this case we have a chance of
404 finding fitting memory in the next higher zone first. If
405 not retry with true GFP_DMA. -AK */
406 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
408 if (dma_mask < DMA_32BIT_MASK)
414 page = dma_alloc_pages(dev,
415 noretry ? gfp | __GFP_NORETRY : gfp, get_order(size));
421 bus = page_to_phys(page);
422 memory = page_address(page);
423 high = (bus + size) >= dma_mask;
425 if (force_iommu && !(gfp & GFP_DMA))
428 free_pages((unsigned long)memory,
431 /* Don't use the 16MB ZONE_DMA unless absolutely
432 needed. It's better to use remapping first. */
433 if (dma_mask < DMA_32BIT_MASK && !(gfp & GFP_DMA)) {
434 gfp = (gfp & ~GFP_DMA32) | GFP_DMA;
438 /* Let low level make its own zone decisions */
439 gfp &= ~(GFP_DMA32|GFP_DMA);
441 if (dma_ops->alloc_coherent)
442 return dma_ops->alloc_coherent(dev, size,
447 memset(memory, 0, size);
454 if (dma_ops->alloc_coherent) {
455 free_pages((unsigned long)memory, get_order(size));
456 gfp &= ~(GFP_DMA|GFP_DMA32);
457 return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
460 if (dma_ops->map_simple) {
461 *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
463 PCI_DMA_BIDIRECTIONAL);
464 if (*dma_handle != bad_dma_address)
468 if (panic_on_overflow)
469 panic("dma_alloc_coherent: IOMMU overflow by %lu bytes\n",
470 (unsigned long)size);
471 free_pages((unsigned long)memory, get_order(size));
474 EXPORT_SYMBOL(dma_alloc_coherent);
477 * Unmap coherent memory.
478 * The caller must ensure that the device has finished accessing the mapping.
480 void dma_free_coherent(struct device *dev, size_t size,
481 void *vaddr, dma_addr_t bus)
483 int order = get_order(size);
484 WARN_ON(irqs_disabled()); /* for portability */
485 if (dma_release_coherent(dev, order, vaddr))
487 if (dma_ops->unmap_single)
488 dma_ops->unmap_single(dev, bus, size, 0);
489 free_pages((unsigned long)vaddr, order);
491 EXPORT_SYMBOL(dma_free_coherent);
493 static int __init pci_iommu_init(void)
495 calgary_iommu_init();
507 void pci_iommu_shutdown(void)
509 gart_iommu_shutdown();
511 /* Must execute after PCI subsystem */
512 fs_initcall(pci_iommu_init);
515 /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
517 static __devinit void via_no_dac(struct pci_dev *dev)
519 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) {
520 printk(KERN_INFO "PCI: VIA PCI bridge detected."
525 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac);