X-Git-Url: https://err.no/cgi-bin/gitweb.cgi?a=blobdiff_plain;f=lib%2Fswiotlb.c;h=10625785eefd90d412668fce760190b122569bb9;hb=45c091bb2d453ce4a8b06cf19872ec7a77fc4799;hp=cae806d1ef1267448656fdeff64a7e94fb6d2c91;hpb=569c8bf5d8eeeeb9f5250d5b7991007874f0fef2;p=linux-2.6 diff --git a/lib/swiotlb.c b/lib/swiotlb.c index cae806d1ef..10625785ee 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -17,17 +17,17 @@ */ #include +#include #include #include -#include #include #include #include #include #include -#include #include +#include #include #include @@ -127,7 +127,7 @@ __setup("swiotlb=", setup_io_tlb_npages); /* * Statically reserve bounce buffer space and initialize bounce buffer data - * structures for the software IO TLB used to implement the PCI DMA API. + * structures for the software IO TLB used to implement the DMA API. */ void swiotlb_init_with_default_size (size_t default_size) @@ -142,8 +142,7 @@ swiotlb_init_with_default_size (size_t default_size) /* * Get IO TLB memory from the low pages */ - io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * - (1 << IO_TLB_SHIFT)); + io_tlb_start = alloc_bootmem_low_pages(io_tlb_nslabs * (1 << IO_TLB_SHIFT)); if (!io_tlb_start) panic("Cannot allocate SWIOTLB buffer"); io_tlb_end = io_tlb_start + io_tlb_nslabs * (1 << IO_TLB_SHIFT); @@ -297,8 +296,7 @@ map_single(struct device *hwdev, char *buffer, size_t size, int dir) else stride = 1; - if (!nslots) - BUG(); + BUG_ON(!nslots); /* * Find suitable number of IO TLB entries size that will fit this @@ -417,14 +415,14 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, case SYNC_FOR_CPU: if (likely(dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL)) memcpy(buffer, dma_addr, size); - else if (dir != DMA_TO_DEVICE) - BUG(); + else + BUG_ON(dir != DMA_TO_DEVICE); break; case SYNC_FOR_DEVICE: if (likely(dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)) memcpy(dma_addr, buffer, size); - else if (dir != DMA_FROM_DEVICE) - BUG(); + else + BUG_ON(dir != DMA_FROM_DEVICE); break; default: BUG(); @@ -433,7 +431,7 @@ sync_single(struct device *hwdev, char *dma_addr, size_t size, void * swiotlb_alloc_coherent(struct device *hwdev, size_t size, - dma_addr_t *dma_handle, int flags) + dma_addr_t *dma_handle, gfp_t flags) { unsigned long dev_addr; void *ret; @@ -464,7 +462,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size, */ dma_addr_t handle; handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE); - if (dma_mapping_error(handle)) + if (swiotlb_dma_mapping_error(handle)) return NULL; ret = phys_to_virt(handle); @@ -502,24 +500,24 @@ swiotlb_full(struct device *dev, size_t size, int dir, int do_panic) /* * Ran out of IOMMU space for this operation. This is very bad. * Unfortunately the drivers cannot handle this operation properly. - * unless they check for pci_dma_mapping_error (most don't) + * unless they check for dma_mapping_error (most don't) * When the mapping is small enough return a static buffer to limit * the damage, or panic when the transfer is too big. */ - printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at " + printk(KERN_ERR "DMA: Out of SW-IOMMU space for %lu bytes at " "device %s\n", size, dev ? dev->bus_id : "?"); if (size > io_tlb_overflow && do_panic) { - if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL) - panic("PCI-DMA: Memory would be corrupted\n"); - if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL) - panic("PCI-DMA: Random memory would be DMAed\n"); + if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) + panic("DMA: Memory would be corrupted\n"); + if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) + panic("DMA: Random memory would be DMAed\n"); } } /* * Map a single buffer of the indicated size for DMA in streaming mode. The - * PCI address to use is returned. + * physical address to use is returned. * * Once the device is given the dma address, the device owns this memory until * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed. @@ -530,8 +528,7 @@ swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir) unsigned long dev_addr = virt_to_phys(ptr); void *map; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); /* * If the pointer passed in happens to be in the device's DMA window, * we can safely return the device addr and not worry about bounce @@ -593,8 +590,7 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, { char *dma_addr = phys_to_virt(dev_addr); - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) unmap_single(hwdev, dma_addr, size, dir); else if (dir == DMA_FROM_DEVICE) @@ -606,8 +602,8 @@ swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size, * after a transfer. * * If you perform a swiotlb_map_single() but wish to interrogate the buffer - * using the cpu, yet do not wish to teardown the PCI dma mapping, you must - * call this function before doing so. At the next point you give the PCI dma + * using the cpu, yet do not wish to teardown the dma mapping, you must + * call this function before doing so. At the next point you give the dma * address back to the card, you must first perform a * swiotlb_dma_sync_for_device, and then the device again owns the buffer */ @@ -617,8 +613,7 @@ swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr, { char *dma_addr = phys_to_virt(dev_addr); - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) @@ -649,8 +644,7 @@ swiotlb_sync_single_range(struct device *hwdev, dma_addr_t dev_addr, { char *dma_addr = phys_to_virt(dev_addr) + offset; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); if (dma_addr >= io_tlb_start && dma_addr < io_tlb_end) sync_single(hwdev, dma_addr, size, dir, target); else if (dir == DMA_FROM_DEVICE) @@ -697,15 +691,15 @@ swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems, unsigned long dev_addr; int i; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) { addr = SG_ENT_VIRT_ADDRESS(sg); dev_addr = virt_to_phys(addr); if (swiotlb_force || address_needs_mapping(hwdev, dev_addr)) { - sg->dma_address = (dma_addr_t) virt_to_phys(map_single(hwdev, addr, sg->length, dir)); - if (!sg->dma_address) { + void *map = map_single(hwdev, addr, sg->length, dir); + sg->dma_address = virt_to_bus(map); + if (!map) { /* Don't panic here, we expect map_sg users to do proper error handling. */ swiotlb_full(hwdev, sg->length, dir, 0); @@ -730,8 +724,7 @@ swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems, { int i; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) @@ -753,8 +746,7 @@ swiotlb_sync_sg(struct device *hwdev, struct scatterlist *sg, { int i; - if (dir == DMA_NONE) - BUG(); + BUG_ON(dir == DMA_NONE); for (i = 0; i < nelems; i++, sg++) if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg)) @@ -783,9 +775,9 @@ swiotlb_dma_mapping_error(dma_addr_t dma_addr) } /* - * Return whether the given PCI device DMA address mask can be supported + * Return whether the given device DMA address mask can be supported * properly. For example, if your device can only drive the low 24-bits - * during PCI bus mastering, then you would pass 0x00ffffff as the mask to + * during bus mastering, then you would pass 0x00ffffff as the mask to * this function. */ int