]> err.no Git - linux-2.6/commitdiff
dma-mapping: add the device argument to dma_mapping_error()
authorFUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Sat, 26 Jul 2008 02:44:49 +0000 (19:44 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 26 Jul 2008 19:00:03 +0000 (12:00 -0700)
Add per-device dma_mapping_ops support for CONFIG_X86_64 as POWER
architecture does:

This enables us to cleanly fix the Calgary IOMMU issue that some devices
are not behind the IOMMU (http://lkml.org/lkml/2008/5/8/423).

I think that per-device dma_mapping_ops support would be also helpful for
KVM people to support PCI passthrough but Andi thinks that this makes it
difficult to support the PCI passthrough (see the above thread).  So I
CC'ed this to KVM camp.  Comments are appreciated.

A pointer to dma_mapping_ops to struct dev_archdata is added.  If the
pointer is non NULL, DMA operations in asm/dma-mapping.h use it.  If it's
NULL, the system-wide dma_ops pointer is used as before.

If it's useful for KVM people, I plan to implement a mechanism to register
a hook called when a new pci (or dma capable) device is created (it works
with hot plugging).  It enables IOMMUs to set up an appropriate
dma_mapping_ops per device.

The major obstacle is that dma_mapping_error doesn't take a pointer to the
device unlike other DMA operations.  So x86 can't have dma_mapping_ops per
device.  Note all the POWER IOMMUs use the same dma_mapping_error function
so this is not a problem for POWER but x86 IOMMUs use different
dma_mapping_error functions.

The first patch adds the device argument to dma_mapping_error.  The patch
is trivial but large since it touches lots of drivers and dma-mapping.h in
all the architecture.

This patch:

dma_mapping_error() doesn't take a pointer to the device unlike other DMA
operations.  So we can't have dma_mapping_ops per device.

Note that POWER already has dma_mapping_ops per device but all the POWER
IOMMUs use the same dma_mapping_error function.  x86 IOMMUs use device
argument.

[akpm@linux-foundation.org: fix sge]
[akpm@linux-foundation.org: fix svc_rdma]
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: fix bnx2x]
[akpm@linux-foundation.org: fix s2io]
[akpm@linux-foundation.org: fix pasemi_mac]
[akpm@linux-foundation.org: fix sdhci]
[akpm@linux-foundation.org: build fix]
[akpm@linux-foundation.org: fix sparc]
[akpm@linux-foundation.org: fix ibmvscsi]
Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
Cc: Muli Ben-Yehuda <muli@il.ibm.com>
Cc: Andi Kleen <andi@firstfloor.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Avi Kivity <avi@qumranet.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
76 files changed:
Documentation/DMA-API.txt
arch/arm/common/dmabounce.c
arch/ia64/hp/common/hwsw_iommu.c
arch/ia64/hp/common/sba_iommu.c
arch/ia64/sn/pci/pci_dma.c
arch/mips/mm/dma-default.c
arch/powerpc/platforms/cell/celleb_scc_pciex.c
arch/powerpc/platforms/cell/spider-pci.c
arch/powerpc/platforms/iseries/mf.c
arch/x86/kernel/pci-calgary_64.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/pci-gart_64.c
arch/x86/kernel/pci-nommu.c
arch/x86/kernel/pci-swiotlb_64.c
drivers/firewire/fw-iso.c
drivers/firewire/fw-ohci.c
drivers/firewire/fw-sbp2.c
drivers/infiniband/hw/ipath/ipath_sdma.c
drivers/infiniband/hw/ipath/ipath_user_sdma.c
drivers/infiniband/hw/mthca/mthca_eq.c
drivers/media/dvb/pluto2/pluto2.c
drivers/mmc/host/sdhci.c
drivers/net/arm/ep93xx_eth.c
drivers/net/bnx2x_main.c
drivers/net/cxgb3/sge.c
drivers/net/e100.c
drivers/net/e1000e/ethtool.c
drivers/net/e1000e/netdev.c
drivers/net/ibmveth.c
drivers/net/iseries_veth.c
drivers/net/mlx4/eq.c
drivers/net/pasemi_mac.c
drivers/net/qla3xxx.c
drivers/net/s2io.c
drivers/net/sfc/rx.c
drivers/net/sfc/tx.c
drivers/net/spider_net.c
drivers/net/tc35815.c
drivers/net/wireless/ath5k/base.c
drivers/scsi/ibmvscsi/ibmvfc.c
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/ibmvscsi/ibmvstgt.c
drivers/scsi/ibmvscsi/rpa_vscsi.c
drivers/spi/atmel_spi.c
drivers/spi/au1550_spi.c
drivers/spi/omap2_mcspi.c
drivers/spi/pxa2xx_spi.c
drivers/spi/spi_imx.c
include/asm-alpha/dma-mapping.h
include/asm-alpha/pci.h
include/asm-arm/dma-mapping.h
include/asm-avr32/dma-mapping.h
include/asm-cris/dma-mapping.h
include/asm-frv/dma-mapping.h
include/asm-generic/dma-mapping-broken.h
include/asm-generic/dma-mapping.h
include/asm-generic/pci-dma-compat.h
include/asm-ia64/machvec.h
include/asm-m68k/dma-mapping.h
include/asm-mips/dma-mapping.h
include/asm-mn10300/dma-mapping.h
include/asm-parisc/dma-mapping.h
include/asm-powerpc/dma-mapping.h
include/asm-sh/dma-mapping.h
include/asm-sparc/dma-mapping_64.h
include/asm-sparc/pci_32.h
include/asm-sparc/pci_64.h
include/asm-x86/device.h
include/asm-x86/dma-mapping.h
include/asm-x86/swiotlb.h
include/asm-xtensa/dma-mapping.h
include/linux/i2o.h
include/linux/ssb/ssb.h
include/rdma/ib_verbs.h
lib/swiotlb.c
net/sunrpc/xprtrdma/svc_rdma_sendto.c

index 80d150458c80c5ac7f7f5f75b3e6ff8a602a19ca..d8b63d164e41193927af2c7fb41dcb0893f57878 100644 (file)
@@ -298,10 +298,10 @@ recommended that you never use these unless you really know what the
 cache width is.
 
 int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 
 int
-pci_dma_mapping_error(dma_addr_t dma_addr)
+pci_dma_mapping_error(struct pci_dev *hwdev, dma_addr_t dma_addr)
 
 In some circumstances dma_map_single and dma_map_page will fail to create
 a mapping. A driver can check for these errors by testing the returned
index dd2947342604b2ea43bdfa409dcc32b4b2609cf2..69130f365904bc0889e3f99d75af10834c9a9f8d 100644 (file)
@@ -280,7 +280,7 @@ unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
        /*
         * Trying to unmap an invalid mapping
         */
-       if (dma_mapping_error(dma_addr)) {
+       if (dma_mapping_error(dev, dma_addr)) {
                dev_err(dev, "Trying to unmap invalid mapping\n");
                return;
        }
index 1c44ec2a1d58e2495cb21f41759dcee218471ffc..88b6e6f3fd88b6245079c966b1a721fbee65f89d 100644 (file)
@@ -186,9 +186,10 @@ hwsw_dma_supported (struct device *dev, u64 mask)
 }
 
 int
-hwsw_dma_mapping_error (dma_addr_t dma_addr)
+hwsw_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-       return hwiommu_dma_mapping_error (dma_addr) || swiotlb_dma_mapping_error(dma_addr);
+       return hwiommu_dma_mapping_error(dev, dma_addr) ||
+               swiotlb_dma_mapping_error(dev, dma_addr);
 }
 
 EXPORT_SYMBOL(hwsw_dma_mapping_error);
index 34421aed1e2ab95ff0d4f4d4e458003b409fd1a2..4956be40d7b56d9c03a3224e45f743e8f6e34d39 100644 (file)
@@ -2147,7 +2147,7 @@ sba_dma_supported (struct device *dev, u64 mask)
 }
 
 int
-sba_dma_mapping_error (dma_addr_t dma_addr)
+sba_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
index 52175af299a0424516fc3299c8509487581956dc..53ebb6484495545550e3f07e149bea575db07981 100644 (file)
@@ -350,7 +350,7 @@ void sn_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 }
 EXPORT_SYMBOL(sn_dma_sync_sg_for_device);
 
-int sn_dma_mapping_error(dma_addr_t dma_addr)
+int sn_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
index ae39dd88b9aa0eb97cb63b4c56bbe41c1811ad94..891312f8e5a6b19d01b6485cda535821520e346c 100644 (file)
@@ -348,7 +348,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
 
 EXPORT_SYMBOL(dma_sync_sg_for_device);
 
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
index 0e04f8fb152a55a9e5ad0aa9c97bd164738392ea..3e7e0f1568ef50d705de43fa889ce8722c2fd352 100644 (file)
@@ -281,7 +281,7 @@ static int __init scc_pciex_iowa_init(struct iowa_bus *bus, void *data)
 
        dummy_page_da = dma_map_single(bus->phb->parent, dummy_page_va,
                                       PAGE_SIZE, DMA_FROM_DEVICE);
-       if (dma_mapping_error(dummy_page_da)) {
+       if (dma_mapping_error(bus->phb->parent, dummy_page_da)) {
                pr_err("PCIEX:Map dummy page failed.\n");
                kfree(dummy_page_va);
                return -1;
index 418b605ac35aefff35224aaabccc1d086473ac44..5122ec145271e0e65608718a0233138144a77561 100644 (file)
@@ -111,7 +111,7 @@ static int __init spiderpci_pci_setup_chip(struct pci_controller *phb,
 
        dummy_page_da = dma_map_single(phb->parent, dummy_page_va,
                                       PAGE_SIZE, DMA_FROM_DEVICE);
-       if (dma_mapping_error(dummy_page_da)) {
+       if (dma_mapping_error(phb->parent, dummy_page_da)) {
                pr_err("SPIDER-IOWA:Map dummy page filed.\n");
                kfree(dummy_page_va);
                return -1;
index 1dc7295746dac6471503b098182c9f1e128239d8..731d7b157749e29547c0a615e66b35e0329ea72d 100644 (file)
@@ -871,7 +871,7 @@ static int proc_mf_dump_cmdline(char *page, char **start, off_t off,
                count = 256 - off;
 
        dma_addr = iseries_hv_map(page, off + count, DMA_FROM_DEVICE);
-       if (dma_mapping_error(dma_addr))
+       if (dma_mapping_error(NULL, dma_addr))
                return -ENOMEM;
        memset(page, 0, off + count);
        memset(&vsp_cmd, 0, sizeof(vsp_cmd));
index 19e7fc7c2c4ff4c9be369fec3b3fdfae3b3d2467..1eb86be93d7aa9d96bd12e3e173ef3203ff13e66 100644 (file)
@@ -544,7 +544,7 @@ error:
        return ret;
 }
 
-static const struct dma_mapping_ops calgary_dma_ops = {
+static struct dma_mapping_ops calgary_dma_ops = {
        .alloc_coherent = calgary_alloc_coherent,
        .map_single = calgary_map_single,
        .unmap_single = calgary_unmap_single,
index cbecb05551bbcefeb68e6048bd1dd5aeab5376f7..37544123896db54a58ac8cb3ee746d6c114ff686 100644 (file)
@@ -11,7 +11,7 @@
 
 static int forbid_dac __read_mostly;
 
-const struct dma_mapping_ops *dma_ops;
+struct dma_mapping_ops *dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
 static int iommu_sac_force __read_mostly;
@@ -312,6 +312,8 @@ static int dma_release_coherent(struct device *dev, int order, void *vaddr)
 
 int dma_supported(struct device *dev, u64 mask)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
+
 #ifdef CONFIG_PCI
        if (mask > 0xffffffff && forbid_dac > 0) {
                dev_info(dev, "PCI: Disallowing DAC for device\n");
@@ -319,8 +321,8 @@ int dma_supported(struct device *dev, u64 mask)
        }
 #endif
 
-       if (dma_ops->dma_supported)
-               return dma_ops->dma_supported(dev, mask);
+       if (ops->dma_supported)
+               return ops->dma_supported(dev, mask);
 
        /* Copied from i386. Doesn't make much sense, because it will
           only work for pci_alloc_coherent.
@@ -367,6 +369,7 @@ void *
 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
                   gfp_t gfp)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
        void *memory = NULL;
        struct page *page;
        unsigned long dma_mask = 0;
@@ -435,8 +438,8 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
                        /* Let low level make its own zone decisions */
                        gfp &= ~(GFP_DMA32|GFP_DMA);
 
-                       if (dma_ops->alloc_coherent)
-                               return dma_ops->alloc_coherent(dev, size,
+                       if (ops->alloc_coherent)
+                               return ops->alloc_coherent(dev, size,
                                                           dma_handle, gfp);
                        return NULL;
                }
@@ -448,14 +451,14 @@ dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
                }
        }
 
-       if (dma_ops->alloc_coherent) {
+       if (ops->alloc_coherent) {
                free_pages((unsigned long)memory, get_order(size));
                gfp &= ~(GFP_DMA|GFP_DMA32);
-               return dma_ops->alloc_coherent(dev, size, dma_handle, gfp);
+               return ops->alloc_coherent(dev, size, dma_handle, gfp);
        }
 
-       if (dma_ops->map_simple) {
-               *dma_handle = dma_ops->map_simple(dev, virt_to_phys(memory),
+       if (ops->map_simple) {
+               *dma_handle = ops->map_simple(dev, virt_to_phys(memory),
                                              size,
                                              PCI_DMA_BIDIRECTIONAL);
                if (*dma_handle != bad_dma_address)
@@ -477,12 +480,14 @@ EXPORT_SYMBOL(dma_alloc_coherent);
 void dma_free_coherent(struct device *dev, size_t size,
                         void *vaddr, dma_addr_t bus)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
+
        int order = get_order(size);
        WARN_ON(irqs_disabled());       /* for portability */
        if (dma_release_coherent(dev, order, vaddr))
                return;
-       if (dma_ops->unmap_single)
-               dma_ops->unmap_single(dev, bus, size, 0);
+       if (ops->unmap_single)
+               ops->unmap_single(dev, bus, size, 0);
        free_pages((unsigned long)vaddr, order);
 }
 EXPORT_SYMBOL(dma_free_coherent);
index df5f142657d27352a9e3c1bbd911fd89f677c1fa..744126e64950ee5a7bbceb66ec31d7084dc6b42e 100644 (file)
@@ -692,8 +692,7 @@ static __init int init_k8_gatt(struct agp_kern_info *info)
 
 extern int agp_amd64_init(void);
 
-static const struct dma_mapping_ops gart_dma_ops = {
-       .mapping_error                  = NULL,
+static struct dma_mapping_ops gart_dma_ops = {
        .map_single                     = gart_map_single,
        .map_simple                     = gart_map_simple,
        .unmap_single                   = gart_unmap_single,
index 792b9179eff315ecf3ae26e7e1635073e1a42c26..3f91f71cdc3eac766be0891511afd738001c6918 100644 (file)
@@ -72,21 +72,9 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
        return nents;
 }
 
-/* Make sure we keep the same behaviour */
-static int nommu_mapping_error(dma_addr_t dma_addr)
-{
-#ifdef CONFIG_X86_32
-       return 0;
-#else
-       return (dma_addr == bad_dma_address);
-#endif
-}
-
-
-const struct dma_mapping_ops nommu_dma_ops = {
+struct dma_mapping_ops nommu_dma_ops = {
        .map_single = nommu_map_single,
        .map_sg = nommu_map_sg,
-       .mapping_error = nommu_mapping_error,
        .is_phys = 1,
 };
 
index 20df839b9c2012c12fa082c7b42ada6e63879a87..c4ce0332759ee08413e78e6623b5a023f6404fc7 100644 (file)
@@ -18,7 +18,7 @@ swiotlb_map_single_phys(struct device *hwdev, phys_addr_t paddr, size_t size,
        return swiotlb_map_single(hwdev, phys_to_virt(paddr), size, direction);
 }
 
-const struct dma_mapping_ops swiotlb_dma_ops = {
+struct dma_mapping_ops swiotlb_dma_ops = {
        .mapping_error = swiotlb_dma_mapping_error,
        .alloc_coherent = swiotlb_alloc_coherent,
        .free_coherent = swiotlb_free_coherent,
index bcbe794a3ea5ca3198df3c1593ef696edf0f716c..e14c03dc006588d1b98d0b10ded8bd210d8fdb8d 100644 (file)
@@ -50,7 +50,7 @@ fw_iso_buffer_init(struct fw_iso_buffer *buffer, struct fw_card *card,
 
                address = dma_map_page(card->device, buffer->pages[i],
                                       0, PAGE_SIZE, direction);
-               if (dma_mapping_error(address)) {
+               if (dma_mapping_error(card->device, address)) {
                        __free_page(buffer->pages[i]);
                        goto out_pages;
                }
index 333b12544dd1b0991bd9cc22ac079e2f3313210a..566672e0bcffbb6902d5174899b7b1933213847a 100644 (file)
@@ -953,7 +953,7 @@ at_context_queue_packet(struct context *ctx, struct fw_packet *packet)
                payload_bus =
                        dma_map_single(ohci->card.device, packet->payload,
                                       packet->payload_length, DMA_TO_DEVICE);
-               if (dma_mapping_error(payload_bus)) {
+               if (dma_mapping_error(ohci->card.device, payload_bus)) {
                        packet->ack = RCODE_SEND_ERROR;
                        return -1;
                }
index 53fc5a641e6d85de37c5c40b3147d4df009b72a6..aaff50ebba1def2892a8945be8b9289d39921c4f 100644 (file)
@@ -543,7 +543,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
        orb->response_bus =
                dma_map_single(device->card->device, &orb->response,
                               sizeof(orb->response), DMA_FROM_DEVICE);
-       if (dma_mapping_error(orb->response_bus))
+       if (dma_mapping_error(device->card->device, orb->response_bus))
                goto fail_mapping_response;
 
        orb->request.response.high = 0;
@@ -577,7 +577,7 @@ sbp2_send_management_orb(struct sbp2_logical_unit *lu, int node_id,
        orb->base.request_bus =
                dma_map_single(device->card->device, &orb->request,
                               sizeof(orb->request), DMA_TO_DEVICE);
-       if (dma_mapping_error(orb->base.request_bus))
+       if (dma_mapping_error(device->card->device, orb->base.request_bus))
                goto fail_mapping_request;
 
        sbp2_send_orb(&orb->base, lu, node_id, generation,
@@ -1424,7 +1424,7 @@ sbp2_map_scatterlist(struct sbp2_command_orb *orb, struct fw_device *device,
        orb->page_table_bus =
                dma_map_single(device->card->device, orb->page_table,
                               sizeof(orb->page_table), DMA_TO_DEVICE);
-       if (dma_mapping_error(orb->page_table_bus))
+       if (dma_mapping_error(device->card->device, orb->page_table_bus))
                goto fail_page_table;
 
        /*
@@ -1509,7 +1509,7 @@ static int sbp2_scsi_queuecommand(struct scsi_cmnd *cmd, scsi_done_fn_t done)
        orb->base.request_bus =
                dma_map_single(device->card->device, &orb->request,
                               sizeof(orb->request), DMA_TO_DEVICE);
-       if (dma_mapping_error(orb->base.request_bus))
+       if (dma_mapping_error(device->card->device, orb->base.request_bus))
                goto out;
 
        sbp2_send_orb(&orb->base, lu, lu->tgt->node_id, lu->generation,
index eaba03273e4febd20ca45ce88e3b63fbd7e83f40..284c9bca517e3bc7f55bd7661c3cb2a0ab7bfa7e 100644 (file)
@@ -698,7 +698,7 @@ retry:
 
        addr = dma_map_single(&dd->pcidev->dev, tx->txreq.map_addr,
                              tx->map_len, DMA_TO_DEVICE);
-       if (dma_mapping_error(addr)) {
+       if (dma_mapping_error(&dd->pcidev->dev, addr)) {
                ret = -EIO;
                goto unlock;
        }
index 86e016916cd1d473d60f74fe5be5544d89897e0e..82d9a0b5ca2fb06039398164f52d46efa1d8082d 100644 (file)
@@ -206,7 +206,7 @@ static int ipath_user_sdma_coalesce(const struct ipath_devdata *dd,
 
        dma_addr = dma_map_page(&dd->pcidev->dev, page, 0, len,
                                DMA_TO_DEVICE);
-       if (dma_mapping_error(dma_addr)) {
+       if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
                ret = -ENOMEM;
                goto free_unmap;
        }
@@ -301,7 +301,7 @@ static int ipath_user_sdma_pin_pages(const struct ipath_devdata *dd,
                                     pages[j], 0, flen, DMA_TO_DEVICE);
                unsigned long fofs = addr & ~PAGE_MASK;
 
-               if (dma_mapping_error(dma_addr)) {
+               if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
                        ret = -ENOMEM;
                        goto done;
                }
@@ -508,7 +508,7 @@ static int ipath_user_sdma_queue_pkts(const struct ipath_devdata *dd,
                if (page) {
                        dma_addr = dma_map_page(&dd->pcidev->dev,
                                                page, 0, len, DMA_TO_DEVICE);
-                       if (dma_mapping_error(dma_addr)) {
+                       if (dma_mapping_error(&dd->pcidev->dev, dma_addr)) {
                                ret = -ENOMEM;
                                goto free_pbc;
                        }
index 4e36aa7cb3d2f7cfc9eba226b0315f9a30083939..cc6858f0b65bb1d6b6d168cb2ad8aa73315fb817 100644 (file)
@@ -780,7 +780,7 @@ int mthca_map_eq_icm(struct mthca_dev *dev, u64 icm_virt)
                return -ENOMEM;
        dev->eq_table.icm_dma  = pci_map_page(dev->pdev, dev->eq_table.icm_page, 0,
                                              PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       if (pci_dma_mapping_error(dev->eq_table.icm_dma)) {
+       if (pci_dma_mapping_error(dev->pdev, dev->eq_table.icm_dma)) {
                __free_page(dev->eq_table.icm_page);
                return -ENOMEM;
        }
index 1360403b88b6953d778e300717e5e8c646bf2da0..a9653c63f4db8350b5fb66c4e184b6d44a16421f 100644 (file)
@@ -242,7 +242,7 @@ static int __devinit pluto_dma_map(struct pluto *pluto)
        pluto->dma_addr = pci_map_single(pluto->pdev, pluto->dma_buf,
                        TS_DMA_BYTES, PCI_DMA_FROMDEVICE);
 
-       return pci_dma_mapping_error(pluto->dma_addr);
+       return pci_dma_mapping_error(pluto->pdev, pluto->dma_addr);
 }
 
 static void pluto_dma_unmap(struct pluto *pluto)
index c3a5db72ddd73b7fd48a29cd467c889ba36978a5..5f95e10229b5f64cf55e79e8930b2c69d4f9181f 100644 (file)
@@ -337,7 +337,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
 
        host->align_addr = dma_map_single(mmc_dev(host->mmc),
                host->align_buffer, 128 * 4, direction);
-       if (dma_mapping_error(host->align_addr))
+       if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
                goto fail;
        BUG_ON(host->align_addr & 0x3);
 
@@ -439,7 +439,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host,
 
        host->adma_addr = dma_map_single(mmc_dev(host->mmc),
                host->adma_desc, (128 * 2 + 1) * 4, DMA_TO_DEVICE);
-       if (dma_mapping_error(host->align_addr))
+       if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr))
                goto unmap_entries;
        BUG_ON(host->adma_addr & 0x3);
 
index 7a14980f3472d133da546174cef0f6365ffb4772..18d3eeb7eab26188501967dcf4fc9b95d773210c 100644 (file)
@@ -482,7 +482,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
                        goto err;
 
                d = dma_map_single(NULL, page, PAGE_SIZE, DMA_FROM_DEVICE);
-               if (dma_mapping_error(d)) {
+               if (dma_mapping_error(NULL, d)) {
                        free_page((unsigned long)page);
                        goto err;
                }
@@ -505,7 +505,7 @@ static int ep93xx_alloc_buffers(struct ep93xx_priv *ep)
                        goto err;
 
                d = dma_map_single(NULL, page, PAGE_SIZE, DMA_TO_DEVICE);
-               if (dma_mapping_error(d)) {
+               if (dma_mapping_error(NULL, d)) {
                        free_page((unsigned long)page);
                        goto err;
                }
index 0263bef9cc6ddfb2cfa428ebe1ad6a104d27b49c..c7cc760a1777f96fad3ba631c76df774e4579adf 100644 (file)
@@ -1020,7 +1020,7 @@ static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
 
        mapping = pci_map_page(bp->pdev, page, 0, BCM_PAGE_SIZE*PAGES_PER_SGE,
                               PCI_DMA_FROMDEVICE);
-       if (unlikely(dma_mapping_error(mapping))) {
+       if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
                __free_pages(page, PAGES_PER_SGE_SHIFT);
                return -ENOMEM;
        }
@@ -1048,7 +1048,7 @@ static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
 
        mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
                                 PCI_DMA_FROMDEVICE);
-       if (unlikely(dma_mapping_error(mapping))) {
+       if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
                dev_kfree_skb(skb);
                return -ENOMEM;
        }
index a96331c875e69cf0d07a9398cf4f74692c06faae..1b0861d73ab7d00ef103ab2d58de7975b134ab20 100644 (file)
@@ -386,7 +386,7 @@ static inline int add_one_rx_buf(void *va, unsigned int len,
        dma_addr_t mapping;
 
        mapping = pci_map_single(pdev, va, len, PCI_DMA_FROMDEVICE);
-       if (unlikely(pci_dma_mapping_error(mapping)))
+       if (unlikely(pci_dma_mapping_error(pdev, mapping)))
                return -ENOMEM;
 
        pci_unmap_addr_set(sd, dma_addr, mapping);
index 1037b1332312998aa36274b279381263419f0371..19d32a227be14c3202f1773da8dda65898503bc0 100644 (file)
@@ -1790,7 +1790,7 @@ static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
        rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
                RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
 
-       if (pci_dma_mapping_error(rx->dma_addr)) {
+       if (pci_dma_mapping_error(nic->pdev, rx->dma_addr)) {
                dev_kfree_skb_any(rx->skb);
                rx->skb = NULL;
                rx->dma_addr = 0;
index a14561f40db0fddd497b4894c6b67db1d8996dc2..9350564065e7713f900dcbc77acae0fc463728b7 100644 (file)
@@ -1090,7 +1090,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
                tx_ring->buffer_info[i].dma =
                        pci_map_single(pdev, skb->data, skb->len,
                                       PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(tx_ring->buffer_info[i].dma)) {
+               if (pci_dma_mapping_error(pdev, tx_ring->buffer_info[i].dma)) {
                        ret_val = 4;
                        goto err_nomem;
                }
@@ -1153,7 +1153,7 @@ static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
                rx_ring->buffer_info[i].dma =
                        pci_map_single(pdev, skb->data, 2048,
                                       PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(rx_ring->buffer_info[i].dma)) {
+               if (pci_dma_mapping_error(pdev, rx_ring->buffer_info[i].dma)) {
                        ret_val = 8;
                        goto err_nomem;
                }
index 9c0f56b3c51871d0f36fc1bb8165ffe42ea5771f..d1367789976740f829083d63ed787b788e854e01 100644 (file)
@@ -195,7 +195,7 @@ map_skb:
                buffer_info->dma = pci_map_single(pdev, skb->data,
                                                  adapter->rx_buffer_len,
                                                  PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(buffer_info->dma)) {
+               if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
                        dev_err(&pdev->dev, "RX DMA map failed\n");
                        adapter->rx_dma_failed++;
                        break;
@@ -265,7 +265,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
                                                   ps_page->page,
                                                   0, PAGE_SIZE,
                                                   PCI_DMA_FROMDEVICE);
-                               if (pci_dma_mapping_error(ps_page->dma)) {
+                               if (pci_dma_mapping_error(pdev, ps_page->dma)) {
                                        dev_err(&adapter->pdev->dev,
                                          "RX DMA page map failed\n");
                                        adapter->rx_dma_failed++;
@@ -300,7 +300,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
                buffer_info->dma = pci_map_single(pdev, skb->data,
                                                  adapter->rx_ps_bsize0,
                                                  PCI_DMA_FROMDEVICE);
-               if (pci_dma_mapping_error(buffer_info->dma)) {
+               if (pci_dma_mapping_error(pdev, buffer_info->dma)) {
                        dev_err(&pdev->dev, "RX DMA map failed\n");
                        adapter->rx_dma_failed++;
                        /* cleanup skb */
@@ -3344,7 +3344,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                                skb->data + offset,
                                size,
                                PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(buffer_info->dma)) {
+               if (pci_dma_mapping_error(adapter->pdev, buffer_info->dma)) {
                        dev_err(&adapter->pdev->dev, "TX DMA map failed\n");
                        adapter->tx_dma_failed++;
                        return -1;
@@ -3382,7 +3382,8 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                                        offset,
                                        size,
                                        PCI_DMA_TODEVICE);
-                       if (pci_dma_mapping_error(buffer_info->dma)) {
+                       if (pci_dma_mapping_error(adapter->pdev,
+                                                 buffer_info->dma)) {
                                dev_err(&adapter->pdev->dev,
                                        "TX DMA page map failed\n");
                                adapter->tx_dma_failed++;
index e5a6e2e84540f841a91f98c171cce122045724ea..91ec9fdc718482247e5058d74893f61b6e0230e0 100644 (file)
@@ -260,7 +260,7 @@ static void ibmveth_replenish_buffer_pool(struct ibmveth_adapter *adapter, struc
                dma_addr = dma_map_single(&adapter->vdev->dev, skb->data,
                                pool->buff_size, DMA_FROM_DEVICE);
 
-               if (dma_mapping_error(dma_addr))
+               if (dma_mapping_error((&adapter->vdev->dev, dma_addr))
                        goto failure;
 
                pool->free_map[free_index] = IBM_VETH_INVALID_MAP;
@@ -294,7 +294,7 @@ failure:
                pool->consumer_index = pool->size - 1;
        else
                pool->consumer_index--;
-       if (!dma_mapping_error(dma_addr))
+       if (!dma_mapping_error((&adapter->vdev->dev, dma_addr))
                dma_unmap_single(&adapter->vdev->dev,
                                 pool->dma_addr[index], pool->buff_size,
                                 DMA_FROM_DEVICE);
@@ -448,11 +448,11 @@ static void ibmveth_rxq_harvest_buffer(struct ibmveth_adapter *adapter)
 static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
 {
        int i;
+       struct device *dev = &adapter->vdev->dev;
 
        if(adapter->buffer_list_addr != NULL) {
-               if(!dma_mapping_error(adapter->buffer_list_dma)) {
-                       dma_unmap_single(&adapter->vdev->dev,
-                                       adapter->buffer_list_dma, 4096,
+               if (!dma_mapping_error(dev, adapter->buffer_list_dma)) {
+                       dma_unmap_single(dev, adapter->buffer_list_dma, 4096,
                                        DMA_BIDIRECTIONAL);
                        adapter->buffer_list_dma = DMA_ERROR_CODE;
                }
@@ -461,9 +461,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
        }
 
        if(adapter->filter_list_addr != NULL) {
-               if(!dma_mapping_error(adapter->filter_list_dma)) {
-                       dma_unmap_single(&adapter->vdev->dev,
-                                       adapter->filter_list_dma, 4096,
+               if (!dma_mapping_error(dev, adapter->filter_list_dma)) {
+                       dma_unmap_single(dev, adapter->filter_list_dma, 4096,
                                        DMA_BIDIRECTIONAL);
                        adapter->filter_list_dma = DMA_ERROR_CODE;
                }
@@ -472,8 +471,8 @@ static void ibmveth_cleanup(struct ibmveth_adapter *adapter)
        }
 
        if(adapter->rx_queue.queue_addr != NULL) {
-               if(!dma_mapping_error(adapter->rx_queue.queue_dma)) {
-                       dma_unmap_single(&adapter->vdev->dev,
+               if (!dma_mapping_error(dev, adapter->rx_queue.queue_dma)) {
+                       dma_unmap_single(dev,
                                        adapter->rx_queue.queue_dma,
                                        adapter->rx_queue.queue_len,
                                        DMA_BIDIRECTIONAL);
@@ -535,6 +534,7 @@ static int ibmveth_open(struct net_device *netdev)
        int rc;
        union ibmveth_buf_desc rxq_desc;
        int i;
+       struct device *dev;
 
        ibmveth_debug_printk("open starting\n");
 
@@ -563,17 +563,19 @@ static int ibmveth_open(struct net_device *netdev)
                return -ENOMEM;
        }
 
-       adapter->buffer_list_dma = dma_map_single(&adapter->vdev->dev,
+       dev = &adapter->vdev->dev;
+
+       adapter->buffer_list_dma = dma_map_single(dev,
                        adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
-       adapter->filter_list_dma = dma_map_single(&adapter->vdev->dev,
+       adapter->filter_list_dma = dma_map_single(dev,
                        adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
-       adapter->rx_queue.queue_dma = dma_map_single(&adapter->vdev->dev,
+       adapter->rx_queue.queue_dma = dma_map_single(dev,
                        adapter->rx_queue.queue_addr,
                        adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);
 
-       if((dma_mapping_error(adapter->buffer_list_dma) ) ||
-          (dma_mapping_error(adapter->filter_list_dma)) ||
-          (dma_mapping_error(adapter->rx_queue.queue_dma))) {
+       if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
+           (dma_mapping_error(dev, adapter->filter_list_dma)) ||
+           (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
                ibmveth_error_printk("unable to map filter or buffer list pages\n");
                ibmveth_cleanup(adapter);
                napi_disable(&adapter->napi);
@@ -645,7 +647,7 @@ static int ibmveth_open(struct net_device *netdev)
        adapter->bounce_buffer_dma =
            dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
                           netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
-       if (dma_mapping_error(adapter->bounce_buffer_dma)) {
+       if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
                ibmveth_error_printk("unable to map bounce buffer\n");
                ibmveth_cleanup(adapter);
                napi_disable(&adapter->napi);
@@ -922,7 +924,7 @@ static int ibmveth_start_xmit(struct sk_buff *skb, struct net_device *netdev)
                buf[1] = 0;
        }
 
-       if (dma_mapping_error(data_dma_addr)) {
+       if (dma_mapping_error((&adapter->vdev->dev, data_dma_addr)) {
                if (!firmware_has_feature(FW_FEATURE_CMO))
                        ibmveth_error_printk("tx: unable to map xmit buffer\n");
                skb_copy_from_linear_data(skb, adapter->bounce_buffer,
index b8d0639c1cdfa655c2b3ab5f499de6310668cc02..c46864d626b28e44b7e25669ea319d1f74a2dd53 100644 (file)
@@ -1128,7 +1128,7 @@ static int veth_transmit_to_one(struct sk_buff *skb, HvLpIndex rlp,
        msg->data.addr[0] = dma_map_single(port->dev, skb->data,
                                skb->len, DMA_TO_DEVICE);
 
-       if (dma_mapping_error(msg->data.addr[0]))
+       if (dma_mapping_error(port->dev, msg->data.addr[0]))
                goto recycle_and_drop;
 
        msg->dev = port->dev;
@@ -1226,7 +1226,7 @@ static void veth_recycle_msg(struct veth_lpar_connection *cnx,
                dma_address = msg->data.addr[0];
                dma_length = msg->data.len[0];
 
-               if (!dma_mapping_error(dma_address))
+               if (!dma_mapping_error(msg->dev, dma_address))
                        dma_unmap_single(msg->dev, dma_address, dma_length,
                                        DMA_TO_DEVICE);
 
index ea3a09aaa8440ac87a3ea98d85bc6ce1f0bc7fd1..7df928d3a3d82a55db3f970cf20257e6168161ec 100644 (file)
@@ -526,7 +526,7 @@ int mlx4_map_eq_icm(struct mlx4_dev *dev, u64 icm_virt)
                return -ENOMEM;
        priv->eq_table.icm_dma  = pci_map_page(dev->pdev, priv->eq_table.icm_page, 0,
                                               PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-       if (pci_dma_mapping_error(priv->eq_table.icm_dma)) {
+       if (pci_dma_mapping_error(dev->pdev, priv->eq_table.icm_dma)) {
                __free_page(priv->eq_table.icm_page);
                return -ENOMEM;
        }
index 993d87c9296f92361763e1662eae7ce545cb7aaf..edc0fd588985f51da906e325398ae36bac2f715c 100644 (file)
@@ -650,7 +650,7 @@ static void pasemi_mac_replenish_rx_ring(const struct net_device *dev,
                                     mac->bufsz - LOCAL_SKB_ALIGN,
                                     PCI_DMA_FROMDEVICE);
 
-               if (unlikely(dma_mapping_error(dma))) {
+               if (unlikely(pci_dma_mapping_error(mac->dma_pdev, dma))) {
                        dev_kfree_skb_irq(info->skb);
                        break;
                }
@@ -1519,7 +1519,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
        map[0] = pci_map_single(mac->dma_pdev, skb->data, skb_headlen(skb),
                                PCI_DMA_TODEVICE);
        map_size[0] = skb_headlen(skb);
-       if (dma_mapping_error(map[0]))
+       if (pci_dma_mapping_error(mac->dma_pdev, map[0]))
                goto out_err_nolock;
 
        for (i = 0; i < nfrags; i++) {
@@ -1529,7 +1529,7 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev)
                                        frag->page_offset, frag->size,
                                        PCI_DMA_TODEVICE);
                map_size[i+1] = frag->size;
-               if (dma_mapping_error(map[i+1])) {
+               if (pci_dma_mapping_error(mac->dma_pdev, map[i+1])) {
                        nfrags = i;
                        goto out_err_nolock;
                }
index e7d48a352beb0dabe4f3a70e5f4512949eae1f6a..e82b37bbd6c3534c244ef41d7d848c04d1d19caa 100644 (file)
@@ -328,7 +328,7 @@ static void ql_release_to_lrg_buf_free_list(struct ql3_adapter *qdev,
                                             qdev->lrg_buffer_len -
                                             QL_HEADER_SPACE,
                                             PCI_DMA_FROMDEVICE);
-                       err = pci_dma_mapping_error(map);
+                       err = pci_dma_mapping_error(qdev->pdev, map);
                        if(err) {
                                printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
                                       qdev->ndev->name, err);
@@ -1919,7 +1919,7 @@ static int ql_populate_free_queue(struct ql3_adapter *qdev)
                                                     QL_HEADER_SPACE,
                                                     PCI_DMA_FROMDEVICE);
 
-                               err = pci_dma_mapping_error(map);
+                               err = pci_dma_mapping_error(qdev->pdev, map);
                                if(err) {
                                        printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
                                               qdev->ndev->name, err);
@@ -2454,7 +2454,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
         */
        map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
 
-       err = pci_dma_mapping_error(map);
+       err = pci_dma_mapping_error(qdev->pdev, map);
        if(err) {
                printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
                       qdev->ndev->name, err);
@@ -2487,7 +2487,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
                                                     sizeof(struct oal),
                                                     PCI_DMA_TODEVICE);
 
-                               err = pci_dma_mapping_error(map);
+                               err = pci_dma_mapping_error(qdev->pdev, map);
                                if(err) {
 
                                        printk(KERN_ERR "%s: PCI mapping outbound address list with error: %d\n",
@@ -2514,7 +2514,7 @@ static int ql_send_map(struct ql3_adapter *qdev,
                                         frag->page_offset, frag->size,
                                         PCI_DMA_TODEVICE);
 
-                       err = pci_dma_mapping_error(map);
+                       err = pci_dma_mapping_error(qdev->pdev, map);
                        if(err) {
                                printk(KERN_ERR "%s: PCI mapping frags failed with error: %d\n",
                                       qdev->ndev->name, err);
@@ -2916,7 +2916,7 @@ static int ql_alloc_large_buffers(struct ql3_adapter *qdev)
                                             QL_HEADER_SPACE,
                                             PCI_DMA_FROMDEVICE);
 
-                       err = pci_dma_mapping_error(map);
+                       err = pci_dma_mapping_error(qdev->pdev, map);
                        if(err) {
                                printk(KERN_ERR "%s: PCI mapping failed with error: %d\n",
                                       qdev->ndev->name, err);
index 9dae40ccf0482cdce2a9df3bb25ee0e7730a35ae..86d77d05190aca1a89f038b5a1e90dae584d5345 100644 (file)
@@ -2512,8 +2512,8 @@ static void stop_nic(struct s2io_nic *nic)
  *   Return Value:
  *  SUCCESS on success or an appropriate -ve value on failure.
  */
-
-static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
+static int fill_rx_buffers(struct s2io_nic *nic, struct ring_info *ring,
+                               int from_card_up)
 {
        struct sk_buff *skb;
        struct RxD_t *rxdp;
@@ -2602,7 +2602,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
                        rxdp1->Buffer0_ptr = pci_map_single
                            (ring->pdev, skb->data, size - NET_IP_ALIGN,
                                PCI_DMA_FROMDEVICE);
-                       if(pci_dma_mapping_error(rxdp1->Buffer0_ptr))
+                       if (pci_dma_mapping_error(nic->pdev,
+                                               rxdp1->Buffer0_ptr))
                                goto pci_map_failed;
 
                        rxdp->Control_2 =
@@ -2636,7 +2637,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
                                rxdp3->Buffer0_ptr =
                                   pci_map_single(ring->pdev, ba->ba_0,
                                        BUF0_LEN, PCI_DMA_FROMDEVICE);
-                               if (pci_dma_mapping_error(rxdp3->Buffer0_ptr))
+                       if (pci_dma_mapping_error(nic->pdev,
+                                               rxdp3->Buffer0_ptr))
                                        goto pci_map_failed;
                        } else
                                pci_dma_sync_single_for_device(ring->pdev,
@@ -2655,7 +2657,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
                                (ring->pdev, skb->data, ring->mtu + 4,
                                                PCI_DMA_FROMDEVICE);
 
-                               if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
+                               if (pci_dma_mapping_error(nic->pdev,
+                                                       rxdp3->Buffer2_ptr))
                                        goto pci_map_failed;
 
                                if (from_card_up) {
@@ -2664,8 +2667,8 @@ static int fill_rx_buffers(struct ring_info *ring, int from_card_up)
                                                ba->ba_1, BUF1_LEN,
                                                PCI_DMA_FROMDEVICE);
 
-                                       if (pci_dma_mapping_error
-                                               (rxdp3->Buffer1_ptr)) {
+                                       if (pci_dma_mapping_error(nic->pdev,
+                                               rxdp3->Buffer1_ptr)) {
                                                pci_unmap_single
                                                        (ring->pdev,
                                                    (dma_addr_t)(unsigned long)
@@ -2806,9 +2809,9 @@ static void free_rx_buffers(struct s2io_nic *sp)
        }
 }
 
-static int s2io_chk_rx_buffers(struct ring_info *ring)
+static int s2io_chk_rx_buffers(struct s2io_nic *nic, struct ring_info *ring)
 {
-       if (fill_rx_buffers(ring, 0) == -ENOMEM) {
+       if (fill_rx_buffers(nic, ring, 0) == -ENOMEM) {
                DBG_PRINT(INFO_DBG, "%s:Out of memory", ring->dev->name);
                DBG_PRINT(INFO_DBG, " in Rx Intr!!\n");
        }
@@ -2848,7 +2851,7 @@ static int s2io_poll_msix(struct napi_struct *napi, int budget)
                return 0;
 
        pkts_processed = rx_intr_handler(ring, budget);
-       s2io_chk_rx_buffers(ring);
+       s2io_chk_rx_buffers(nic, ring);
 
        if (pkts_processed < budget_org) {
                netif_rx_complete(dev, napi);
@@ -2882,7 +2885,7 @@ static int s2io_poll_inta(struct napi_struct *napi, int budget)
        for (i = 0; i < config->rx_ring_num; i++) {
                ring = &mac_control->rings[i];
                ring_pkts_processed = rx_intr_handler(ring, budget);
-               s2io_chk_rx_buffers(ring);
+               s2io_chk_rx_buffers(nic, ring);
                pkts_processed += ring_pkts_processed;
                budget -= ring_pkts_processed;
                if (budget <= 0)
@@ -2939,7 +2942,8 @@ static void s2io_netpoll(struct net_device *dev)
                rx_intr_handler(&mac_control->rings[i], 0);
 
        for (i = 0; i < config->rx_ring_num; i++) {
-               if (fill_rx_buffers(&mac_control->rings[i], 0) == -ENOMEM) {
+               if (fill_rx_buffers(nic, &mac_control->rings[i], 0) ==
+                               -ENOMEM) {
                        DBG_PRINT(INFO_DBG, "%s:Out of memory", dev->name);
                        DBG_PRINT(INFO_DBG, " in Rx Netpoll!!\n");
                        break;
@@ -4235,14 +4239,14 @@ static int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
                txdp->Buffer_Pointer = pci_map_single(sp->pdev,
                                        fifo->ufo_in_band_v,
                                        sizeof(u64), PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(txdp->Buffer_Pointer))
+               if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
                        goto pci_map_failed;
                txdp++;
        }
 
        txdp->Buffer_Pointer = pci_map_single
            (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(txdp->Buffer_Pointer))
+       if (pci_dma_mapping_error(sp->pdev, txdp->Buffer_Pointer))
                goto pci_map_failed;
 
        txdp->Host_Control = (unsigned long) skb;
@@ -4345,7 +4349,7 @@ static irqreturn_t s2io_msix_ring_handle(int irq, void *dev_id)
                netif_rx_schedule(dev, &ring->napi);
        } else {
                rx_intr_handler(ring, 0);
-               s2io_chk_rx_buffers(ring);
+               s2io_chk_rx_buffers(sp, ring);
        }
 
        return IRQ_HANDLED;
@@ -4826,7 +4830,7 @@ static irqreturn_t s2io_isr(int irq, void *dev_id)
                 */
                if (!config->napi) {
                        for (i = 0; i < config->rx_ring_num; i++)
-                               s2io_chk_rx_buffers(&mac_control->rings[i]);
+                               s2io_chk_rx_buffers(sp, &mac_control->rings[i]);
                }
                writeq(sp->general_int_mask, &bar0->general_int_mask);
                readl(&bar0->general_int_status);
@@ -6859,7 +6863,7 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
                                pci_map_single( sp->pdev, (*skb)->data,
                                        size - NET_IP_ALIGN,
                                        PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(rxdp1->Buffer0_ptr))
+                       if (pci_dma_mapping_error(sp->pdev, rxdp1->Buffer0_ptr))
                                goto memalloc_failed;
                        rxdp->Host_Control = (unsigned long) (*skb);
                }
@@ -6886,12 +6890,13 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
                                pci_map_single(sp->pdev, (*skb)->data,
                                               dev->mtu + 4,
                                               PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(rxdp3->Buffer2_ptr))
+                       if (pci_dma_mapping_error(sp->pdev, rxdp3->Buffer2_ptr))
                                goto memalloc_failed;
                        rxdp3->Buffer0_ptr = *temp0 =
                                pci_map_single( sp->pdev, ba->ba_0, BUF0_LEN,
                                                PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(rxdp3->Buffer0_ptr)) {
+                       if (pci_dma_mapping_error(sp->pdev,
+                                               rxdp3->Buffer0_ptr)) {
                                pci_unmap_single (sp->pdev,
                                        (dma_addr_t)rxdp3->Buffer2_ptr,
                                        dev->mtu + 4, PCI_DMA_FROMDEVICE);
@@ -6903,7 +6908,8 @@ static int set_rxd_buffer_pointer(struct s2io_nic *sp, struct RxD_t *rxdp,
                        rxdp3->Buffer1_ptr = *temp1 =
                                pci_map_single(sp->pdev, ba->ba_1, BUF1_LEN,
                                                PCI_DMA_FROMDEVICE);
-                       if (pci_dma_mapping_error(rxdp3->Buffer1_ptr)) {
+                       if (pci_dma_mapping_error(sp->pdev,
+                                               rxdp3->Buffer1_ptr)) {
                                pci_unmap_single (sp->pdev,
                                        (dma_addr_t)rxdp3->Buffer0_ptr,
                                        BUF0_LEN, PCI_DMA_FROMDEVICE);
@@ -7187,7 +7193,7 @@ static int s2io_card_up(struct s2io_nic * sp)
 
        for (i = 0; i < config->rx_ring_num; i++) {
                mac_control->rings[i].mtu = dev->mtu;
-               ret = fill_rx_buffers(&mac_control->rings[i], 1);
+               ret = fill_rx_buffers(sp, &mac_control->rings[i], 1);
                if (ret) {
                        DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
                                  dev->name);
index 601b001437c01b8bf13966448641e197e4eb749d..0d27dd39bc0938c95585137156365fa910e161d0 100644 (file)
@@ -233,7 +233,7 @@ static inline int efx_init_rx_buffer_skb(struct efx_rx_queue *rx_queue,
                                          rx_buf->data, rx_buf->len,
                                          PCI_DMA_FROMDEVICE);
 
-       if (unlikely(pci_dma_mapping_error(rx_buf->dma_addr))) {
+       if (unlikely(pci_dma_mapping_error(efx->pci_dev, rx_buf->dma_addr))) {
                dev_kfree_skb_any(rx_buf->skb);
                rx_buf->skb = NULL;
                return -EIO;
@@ -275,7 +275,7 @@ static inline int efx_init_rx_buffer_page(struct efx_rx_queue *rx_queue,
                                        0, efx_rx_buf_size(efx),
                                        PCI_DMA_FROMDEVICE);
 
-               if (unlikely(pci_dma_mapping_error(dma_addr))) {
+               if (unlikely(pci_dma_mapping_error(efx->pci_dev, dma_addr))) {
                        __free_pages(rx_buf->page, efx->rx_buffer_order);
                        rx_buf->page = NULL;
                        return -EIO;
index 5cdd082ab8f6aba562838dada2af5bd9ec214067..5e8374ab28eea5a9fc5198c0f955d8fdc614a622 100644 (file)
@@ -172,7 +172,7 @@ static inline int efx_enqueue_skb(struct efx_tx_queue *tx_queue,
 
        /* Process all fragments */
        while (1) {
-               if (unlikely(pci_dma_mapping_error(dma_addr)))
+               if (unlikely(pci_dma_mapping_error(pci_dev, dma_addr)))
                        goto pci_err;
 
                /* Store fields for marking in the per-fragment final
@@ -661,7 +661,8 @@ efx_tsoh_heap_alloc(struct efx_tx_queue *tx_queue, size_t header_len)
        tsoh->dma_addr = pci_map_single(tx_queue->efx->pci_dev,
                                        TSOH_BUFFER(tsoh), header_len,
                                        PCI_DMA_TODEVICE);
-       if (unlikely(pci_dma_mapping_error(tsoh->dma_addr))) {
+       if (unlikely(pci_dma_mapping_error(tx_queue->efx->pci_dev,
+                                          tsoh->dma_addr))) {
                kfree(tsoh);
                return NULL;
        }
@@ -863,7 +864,7 @@ static inline int tso_get_fragment(struct tso_state *st, struct efx_nic *efx,
 
        st->ifc.unmap_addr = pci_map_page(efx->pci_dev, page, page_off,
                                          len, PCI_DMA_TODEVICE);
-       if (likely(!pci_dma_mapping_error(st->ifc.unmap_addr))) {
+       if (likely(!pci_dma_mapping_error(efx->pci_dev, st->ifc.unmap_addr))) {
                st->ifc.unmap_len = len;
                st->ifc.len = len;
                st->ifc.dma_addr = st->ifc.unmap_addr;
index 00aa0b108cb9691982991bd819c53b1ac9b3252f..b6435d0d71f9a067f887f7102159ae0d9a651ed5 100644 (file)
@@ -452,7 +452,7 @@ spider_net_prepare_rx_descr(struct spider_net_card *card,
        /* iommu-map the skb */
        buf = pci_map_single(card->pdev, descr->skb->data,
                        SPIDER_NET_MAX_FRAME, PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(buf)) {
+       if (pci_dma_mapping_error(card->pdev, buf)) {
                dev_kfree_skb_any(descr->skb);
                descr->skb = NULL;
                if (netif_msg_rx_err(card) && net_ratelimit())
@@ -691,7 +691,7 @@ spider_net_prepare_tx_descr(struct spider_net_card *card,
        unsigned long flags;
 
        buf = pci_map_single(card->pdev, skb->data, skb->len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(buf)) {
+       if (pci_dma_mapping_error(card->pdev, buf)) {
                if (netif_msg_tx_err(card) && net_ratelimit())
                        dev_err(&card->netdev->dev, "could not iommu-map packet (%p, %i). "
                                  "Dropping packet\n", skb->data, skb->len);
index a645e5028c14bcd12053d77b31f4f0cfd7003c9b..8487ace9d2e3a8d73a8fedbe32a2447ec9575222 100644 (file)
@@ -506,7 +506,7 @@ static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
                return NULL;
        *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
                                     PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(*dma_handle)) {
+       if (pci_dma_mapping_error(hwdev, *dma_handle)) {
                free_page((unsigned long)buf);
                return NULL;
        }
@@ -536,7 +536,7 @@ static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
                return NULL;
        *dma_handle = pci_map_single(hwdev, skb->data, RX_BUF_SIZE,
                                     PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(*dma_handle)) {
+       if (pci_dma_mapping_error(hwdev, *dma_handle)) {
                dev_kfree_skb_any(skb);
                return NULL;
        }
index 217d506527a96d2f72e887f75b9cab2425554e0f..d9769c527346874f6f876babf37f67a11ebcd32e 100644 (file)
@@ -1166,7 +1166,7 @@ ath5k_rxbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
                bf->skb = skb;
                bf->skbaddr = pci_map_single(sc->pdev,
                        skb->data, sc->rxbufsize, PCI_DMA_FROMDEVICE);
-               if (unlikely(pci_dma_mapping_error(bf->skbaddr))) {
+               if (unlikely(pci_dma_mapping_error(sc->pdev, bf->skbaddr))) {
                        ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
                        dev_kfree_skb(skb);
                        bf->skb = NULL;
@@ -1918,7 +1918,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
        ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
                        "skbaddr %llx\n", skb, skb->data, skb->len,
                        (unsigned long long)bf->skbaddr);
-       if (pci_dma_mapping_error(bf->skbaddr)) {
+       if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) {
                ATH5K_ERR(sc, "beacon DMA mapping failed\n");
                return -EIO;
        }
index c4a7c06793c5ff6bc96257cbc77868dd1c25e49c..61f8fdea2d96e24824bb2926e1f4cd126912377f 100644 (file)
@@ -3525,7 +3525,7 @@ static int ibmvfc_init_crq(struct ibmvfc_host *vhost)
        crq->msg_token = dma_map_single(dev, crq->msgs,
                                        PAGE_SIZE, DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(crq->msg_token))
+       if (dma_mapping_error(dev, crq->msg_token))
                goto map_failed;
 
        retrc = rc = plpar_hcall_norets(H_REG_CRQ, vdev->unit_address,
@@ -3618,7 +3618,7 @@ static int ibmvfc_alloc_mem(struct ibmvfc_host *vhost)
                                            async_q->size * sizeof(*async_q->msgs),
                                            DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(async_q->msg_token)) {
+       if (dma_mapping_error(dev, async_q->msg_token)) {
                dev_err(dev, "Failed to map async queue\n");
                goto free_async_crq;
        }
index 20000ec79b043a9a17fcfc54b45e58cce556f064..6b24b9cdb04cda6666837684cd232d271dfc4b10 100644 (file)
@@ -859,7 +859,7 @@ static void send_mad_adapter_info(struct ibmvscsi_host_data *hostdata)
                                            sizeof(hostdata->madapter_info),
                                            DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(req->buffer)) {
+       if (dma_mapping_error(hostdata->dev, req->buffer)) {
                if (!firmware_has_feature(FW_FEATURE_CMO))
                        dev_err(hostdata->dev,
                                "Unable to map request_buffer for "
@@ -1407,7 +1407,7 @@ static int ibmvscsi_do_host_config(struct ibmvscsi_host_data *hostdata,
                                                    length,
                                                    DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(host_config->buffer)) {
+       if (dma_mapping_error(hostdata->dev, host_config->buffer)) {
                if (!firmware_has_feature(FW_FEATURE_CMO))
                        dev_err(hostdata->dev,
                                "dma_mapping error getting host config\n");
index 3b9514c8f1f17321a2b3532b9572336f81cf9c74..2e13ec00172aca14ccdea9da8daf6ed339e179ef 100644 (file)
@@ -564,7 +564,7 @@ static int crq_queue_create(struct crq_queue *queue, struct srp_target *target)
                                          queue->size * sizeof(*queue->msgs),
                                          DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(queue->msg_token))
+       if (dma_mapping_error(target->dev, queue->msg_token))
                goto map_failed;
 
        err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token,
index 182146100dc12220ee20e55d44cb8e0d3a7bf618..462a8574dad96ffa6020c9aa27886c545b8a7988 100644 (file)
@@ -253,7 +253,7 @@ static int rpavscsi_init_crq_queue(struct crq_queue *queue,
                                          queue->size * sizeof(*queue->msgs),
                                          DMA_BIDIRECTIONAL);
 
-       if (dma_mapping_error(queue->msg_token))
+       if (dma_mapping_error(hostdata->dev, queue->msg_token))
                goto map_failed;
 
        gather_partition_info();
index e81d59d789108041fee33e62b22b82d14e34fef3..0c7165660853a7194b16f8b2ed3cbf4849d78eb2 100644 (file)
@@ -313,14 +313,14 @@ atmel_spi_dma_map_xfer(struct atmel_spi *as, struct spi_transfer *xfer)
                xfer->tx_dma = dma_map_single(dev,
                                (void *) xfer->tx_buf, xfer->len,
                                DMA_TO_DEVICE);
-               if (dma_mapping_error(xfer->tx_dma))
+               if (dma_mapping_error(dev, xfer->tx_dma))
                        return -ENOMEM;
        }
        if (xfer->rx_buf) {
                xfer->rx_dma = dma_map_single(dev,
                                xfer->rx_buf, xfer->len,
                                DMA_FROM_DEVICE);
-               if (dma_mapping_error(xfer->rx_dma)) {
+               if (dma_mapping_error(dev, xfer->rx_dma)) {
                        if (xfer->tx_buf)
                                dma_unmap_single(dev,
                                                xfer->tx_dma, xfer->len,
index 9149689c79d9e4ac198fefe778a9aa2de27e065e..87b73e0169c51eb2137d29679ceccbbf179b1568 100644 (file)
@@ -334,7 +334,7 @@ static int au1550_spi_dma_rxtmp_alloc(struct au1550_spi *hw, unsigned size)
        hw->dma_rx_tmpbuf_size = size;
        hw->dma_rx_tmpbuf_addr = dma_map_single(hw->dev, hw->dma_rx_tmpbuf,
                        size, DMA_FROM_DEVICE);
-       if (dma_mapping_error(hw->dma_rx_tmpbuf_addr)) {
+       if (dma_mapping_error(hw->dev, hw->dma_rx_tmpbuf_addr)) {
                kfree(hw->dma_rx_tmpbuf);
                hw->dma_rx_tmpbuf = 0;
                hw->dma_rx_tmpbuf_size = 0;
@@ -378,7 +378,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
                        dma_rx_addr = dma_map_single(hw->dev,
                                        (void *)t->rx_buf,
                                        t->len, DMA_FROM_DEVICE);
-                       if (dma_mapping_error(dma_rx_addr))
+                       if (dma_mapping_error(hw->dev, dma_rx_addr))
                                dev_err(hw->dev, "rx dma map error\n");
                }
        } else {
@@ -401,7 +401,7 @@ static int au1550_spi_dma_txrxb(struct spi_device *spi, struct spi_transfer *t)
                        dma_tx_addr = dma_map_single(hw->dev,
                                        (void *)t->tx_buf,
                                        t->len, DMA_TO_DEVICE);
-                       if (dma_mapping_error(dma_tx_addr))
+                       if (dma_mapping_error(hw->dev, dma_tx_addr))
                                dev_err(hw->dev, "tx dma map error\n");
                }
        } else {
index b1cc148036c1e378ef870c0fa30f614f2a92745a..f6f987bb71ca39413192c0a3de0e8f2a2ac2e6a0 100644 (file)
@@ -836,7 +836,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
                if (tx_buf != NULL) {
                        t->tx_dma = dma_map_single(&spi->dev, (void *) tx_buf,
                                        len, DMA_TO_DEVICE);
-                       if (dma_mapping_error(t->tx_dma)) {
+                       if (dma_mapping_error(&spi->dev, t->tx_dma)) {
                                dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
                                                'T', len);
                                return -EINVAL;
@@ -845,7 +845,7 @@ static int omap2_mcspi_transfer(struct spi_device *spi, struct spi_message *m)
                if (rx_buf != NULL) {
                        t->rx_dma = dma_map_single(&spi->dev, rx_buf, t->len,
                                        DMA_FROM_DEVICE);
-                       if (dma_mapping_error(t->rx_dma)) {
+                       if (dma_mapping_error(&spi->dev, t->rx_dma)) {
                                dev_dbg(&spi->dev, "dma %cX %d bytes error\n",
                                                'R', len);
                                if (tx_buf != NULL)
index 0c452c46ab0778bf3e1c5458dcec0c270a979016..067299d6d1921045f81771530c0ebba268e51b3b 100644 (file)
@@ -353,7 +353,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
        drv_data->rx_dma = dma_map_single(dev, drv_data->rx,
                                                drv_data->rx_map_len,
                                                DMA_FROM_DEVICE);
-       if (dma_mapping_error(drv_data->rx_dma))
+       if (dma_mapping_error(dev, drv_data->rx_dma))
                return 0;
 
        /* Stream map the tx buffer */
@@ -361,7 +361,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
                                                drv_data->tx_map_len,
                                                DMA_TO_DEVICE);
 
-       if (dma_mapping_error(drv_data->tx_dma)) {
+       if (dma_mapping_error(dev, drv_data->tx_dma)) {
                dma_unmap_single(dev, drv_data->rx_dma,
                                        drv_data->rx_map_len, DMA_FROM_DEVICE);
                return 0;
index 54ac7bea5f8c0e35204d20d004ca4be3d6da736d..6fb77fcc49711548bed2cd1248cb8246b5b770fb 100644 (file)
@@ -491,7 +491,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
                                                        buf,
                                                        drv_data->tx_map_len,
                                                        DMA_TO_DEVICE);
-                       if (dma_mapping_error(drv_data->tx_dma))
+                       if (dma_mapping_error(dev, drv_data->tx_dma))
                                return -1;
 
                        drv_data->tx_dma_needs_unmap = 1;
@@ -516,7 +516,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
                                        buf,
                                        drv_data->len,
                                        DMA_FROM_DEVICE);
-               if (dma_mapping_error(drv_data->rx_dma))
+               if (dma_mapping_error(dev, drv_data->rx_dma))
                        return -1;
                drv_data->rx_dma_needs_unmap = 1;
        }
@@ -534,7 +534,7 @@ static int map_dma_buffers(struct driver_data *drv_data)
                                        buf,
                                        drv_data->tx_map_len,
                                        DMA_TO_DEVICE);
-       if (dma_mapping_error(drv_data->tx_dma)) {
+       if (dma_mapping_error(dev, drv_data->tx_dma)) {
                if (drv_data->rx_dma) {
                        dma_unmap_single(dev,
                                        drv_data->rx_dma,
index db351d1296f4bb13a9dfab082c7ce2dc66969e62..a5801ae02e4be31cb7232626b6f6468a9495244e 100644 (file)
@@ -24,8 +24,8 @@
                pci_unmap_sg(alpha_gendev_to_pci(dev), sg, nents, dir)
 #define dma_supported(dev, mask)                       \
                pci_dma_supported(alpha_gendev_to_pci(dev), mask)
-#define dma_mapping_error(addr)                                \
-               pci_dma_mapping_error(addr)
+#define dma_mapping_error(dev, addr)                           \
+               pci_dma_mapping_error(alpha_gendev_to_pci(dev), addr)
 
 #else  /* no PCI - no IOMMU. */
 
@@ -45,7 +45,7 @@ int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
 #define dma_unmap_page(dev, addr, size, dir)   ((void)0)
 #define dma_unmap_sg(dev, sg, nents, dir)      ((void)0)
 
-#define dma_mapping_error(addr)  (0)
+#define dma_mapping_error(dev, addr)  (0)
 
 #endif /* !CONFIG_PCI */
 
index d31fd49ff79a40fc7d00d22528f9b955739786e9..2a14302c17a337194cf3df0a7d20ae4dfa28273d 100644 (file)
@@ -106,7 +106,7 @@ extern dma_addr_t pci_map_page(struct pci_dev *, struct page *,
 /* Test for pci_map_single or pci_map_page having generated an error.  */
 
 static inline int
-pci_dma_mapping_error(dma_addr_t dma_addr)
+pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
 {
        return dma_addr == 0;
 }
index e99406a7bece7264569542b85a72ea8c7d3aaa1e..f41335ba63379ad77d5e75599be68b6f010ecc87 100644 (file)
@@ -56,7 +56,7 @@ static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
 /*
  * DMA errors are defined by all-bits-set in the DMA address.
  */
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return dma_addr == ~0;
 }
index 57dc672bab8ed3fb9666c15039fa07260fe80a4c..0399359ab5d80f043b54dfe792baa65c1e2364fa 100644 (file)
@@ -35,7 +35,7 @@ static inline int dma_set_mask(struct device *dev, u64 dma_mask)
 /*
  * dma_map_single can't fail as it is implemented now.
  */
-static inline int dma_mapping_error(dma_addr_t addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t addr)
 {
        return 0;
 }
index edc8d1bfaae2a6fa303f7dfa1f844fdff7eac812..cb2fb25ff8d9dcb79729b6b0865193357b5eda7d 100644 (file)
@@ -120,7 +120,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
 }
 
 static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
index 2e8966ca030d46cd4b7514a3a0c4589aeba667b7..b2898877c07b5048fb820fe1b5e1350a53ccf5fe 100644 (file)
@@ -126,7 +126,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nele
 }
 
 static inline
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
index e2468f894d2aab0d2e22d1fa87b86ab03160b7cb..82cd0cb1c3fe0100f6a0194491324e11d6deb95b 100644 (file)
@@ -61,7 +61,7 @@ dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
 #define dma_sync_sg_for_device dma_sync_sg_for_cpu
 
 extern int
-dma_mapping_error(dma_addr_t dma_addr);
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 
 extern int
 dma_supported(struct device *dev, u64 mask);
index 783ab9944d701234cfd80eeaf34da6ab90af6f04..189486c3f92e3e775d21bf56444e5c6b2d576216 100644 (file)
@@ -144,9 +144,9 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
 }
 
 static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
-       return pci_dma_mapping_error(dma_addr);
+       return pci_dma_mapping_error(to_pci_dev(dev), dma_addr);
 }
 
 
index 25c10e96b2b794099ead8769dad4b5f8146a19ba..37b3706226e74f67e52d80dce326968e05c0655c 100644 (file)
@@ -99,9 +99,9 @@ pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg,
 }
 
 static inline int
-pci_dma_mapping_error(dma_addr_t dma_addr)
+pci_dma_mapping_error(struct pci_dev *pdev, dma_addr_t dma_addr)
 {
-       return dma_mapping_error(dma_addr);
+       return dma_mapping_error(&pdev->dev, dma_addr);
 }
 
 #endif
index 0721a5e8271e7ffe2181a487689f155b7b85cb89..a6d50c77b6bfaa467eb5ef51c645348316082316 100644 (file)
@@ -54,7 +54,7 @@ typedef void ia64_mv_dma_sync_single_for_cpu (struct device *, dma_addr_t, size_
 typedef void ia64_mv_dma_sync_sg_for_cpu (struct device *, struct scatterlist *, int, int);
 typedef void ia64_mv_dma_sync_single_for_device (struct device *, dma_addr_t, size_t, int);
 typedef void ia64_mv_dma_sync_sg_for_device (struct device *, struct scatterlist *, int, int);
-typedef int ia64_mv_dma_mapping_error (dma_addr_t dma_addr);
+typedef int ia64_mv_dma_mapping_error(struct device *, dma_addr_t dma_addr);
 typedef int ia64_mv_dma_supported (struct device *, u64);
 
 typedef dma_addr_t ia64_mv_dma_map_single_attrs (struct device *, void *, size_t, int, struct dma_attrs *);
index a26cdeb46a575e2a0756ed8c4226fe64a4139a9c..91f7944333d49d6ce2f861036cddf0492ce76b9c 100644 (file)
@@ -84,7 +84,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *s
 {
 }
 
-static inline int dma_mapping_error(dma_addr_t handle)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t handle)
 {
        return 0;
 }
index 230b3f1b69b1015b3d639691200f97ebad45052e..c64afb40cd0698dce532067e66e70d2a27bbbea9 100644 (file)
@@ -42,7 +42,7 @@ extern void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
        int nelems, enum dma_data_direction direction);
 extern void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
        int nelems, enum dma_data_direction direction);
-extern int dma_mapping_error(dma_addr_t dma_addr);
+extern int dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
 extern int dma_supported(struct device *dev, u64 mask);
 
 static inline int
index 7c882fca9ec86cd056aa776782f4d2d8e15c01d0..ccae8f6c6326b17f6224ad1e555999a9c9cd5d5a 100644 (file)
@@ -182,7 +182,7 @@ void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
 }
 
 static inline
-int dma_mapping_error(dma_addr_t dma_addr)
+int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
index c6c0e9ff6bdea3eeb623893fb2d8d3a078d893f9..53af696f23d2fda10025551686d9ce15e01f1f00 100644 (file)
@@ -248,6 +248,6 @@ void * sba_get_iommu(struct parisc_device *dev);
 #endif
 
 /* At the moment, we panic on error for IOMMU resource exaustion */
-#define dma_mapping_error(x)   0
+#define dma_mapping_error(dev, x)      0
 
 #endif
index 74c549780987ba8e9feb48a299c9455c32cfce93..c7ca45f97dd210e9ef611ba8d4877b6f64067021 100644 (file)
@@ -415,7 +415,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
                __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
 }
 
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
 #ifdef CONFIG_PPC64
        return (dma_addr == DMA_ERROR_CODE);
index 22cc419389fe6b54717f2c13ed8814fa87703ef1..6c0b8a2de14323d63c57d45dc17fa3d1bc41d950 100644 (file)
@@ -171,7 +171,7 @@ static inline int dma_get_cache_alignment(void)
        return L1_CACHE_BYTES;
 }
 
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return dma_addr == 0;
 }
index 38cbec76a33f1c04bd14948786891e90185c9277..bfa64f9702d548806c9bd2fa0a117195d6c25ef0 100644 (file)
@@ -135,7 +135,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
        /* No flushing needed to sync cpu writes to the device.  */
 }
 
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return (dma_addr == DMA_ERROR_CODE);
 }
index b93b6c79e08f6d48692f1539ced9fb7dbfeb8629..0ee949d220c06d3c3a7beec15b19ca3abf33e367 100644 (file)
@@ -154,7 +154,8 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
 
 #define PCI_DMA_ERROR_CODE      (~(dma_addr_t)0x0)
 
-static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
+static inline int pci_dma_mapping_error(struct pci_dev *pdev,
+                                       dma_addr_t dma_addr)
 {
         return (dma_addr == PCI_DMA_ERROR_CODE);
 }
index f59f2571295b69e8304ba406d54d17d7e6260b49..4f79a54948f6913c00c8b4400a9e1849cbc7b803 100644 (file)
@@ -140,9 +140,10 @@ extern int pci_dma_supported(struct pci_dev *hwdev, u64 mask);
 #define PCI64_REQUIRED_MASK    (~(dma64_addr_t)0)
 #define PCI64_ADDR_BASE                0xfffc000000000000UL
 
-static inline int pci_dma_mapping_error(dma_addr_t dma_addr)
+static inline int pci_dma_mapping_error(struct pci_dev *pdev,
+                                       dma_addr_t dma_addr)
 {
-       return dma_mapping_error(dma_addr);
+       return dma_mapping_error(&pdev->dev, dma_addr);
 }
 
 #ifdef CONFIG_PCI
index 87a715367a1b11a9db24d569682d22f780352be7..3c034f48fdb0a12f5a3fefe1499d014b68e6f4a2 100644 (file)
@@ -5,6 +5,9 @@ struct dev_archdata {
 #ifdef CONFIG_ACPI
        void    *acpi_handle;
 #endif
+#ifdef CONFIG_X86_64
+struct dma_mapping_ops *dma_ops;
+#endif
 #ifdef CONFIG_DMAR
        void *iommu; /* hook for IOMMU specific extension */
 #endif
index c2ddd3d1b8831a98fe139639e131f458bfc41d99..0eaa9bf6011fca2987c0b8731c3b25c8113dc820 100644 (file)
@@ -17,7 +17,8 @@ extern int panic_on_overflow;
 extern int force_iommu;
 
 struct dma_mapping_ops {
-       int             (*mapping_error)(dma_addr_t dma_addr);
+       int             (*mapping_error)(struct device *dev,
+                                        dma_addr_t dma_addr);
        void*           (*alloc_coherent)(struct device *dev, size_t size,
                                dma_addr_t *dma_handle, gfp_t gfp);
        void            (*free_coherent)(struct device *dev, size_t size,
@@ -56,14 +57,32 @@ struct dma_mapping_ops {
        int             is_phys;
 };
 
-extern const struct dma_mapping_ops *dma_ops;
+extern struct dma_mapping_ops *dma_ops;
 
-static inline int dma_mapping_error(dma_addr_t dma_addr)
+static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
 {
-       if (dma_ops->mapping_error)
-               return dma_ops->mapping_error(dma_addr);
+#ifdef CONFIG_X86_32
+       return dma_ops;
+#else
+       if (unlikely(!dev) || !dev->archdata.dma_ops)
+               return dma_ops;
+       else
+               return dev->archdata.dma_ops;
+#endif
+}
+
+/* Make sure we keep the same behaviour */
+static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
+{
+#ifdef CONFIG_X86_32
+       return 0;
+#else
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
+       if (ops->mapping_error)
+               return ops->mapping_error(dev, dma_addr);
 
        return (dma_addr == bad_dma_address);
+#endif
 }
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
@@ -83,44 +102,53 @@ static inline dma_addr_t
 dma_map_single(struct device *hwdev, void *ptr, size_t size,
               int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
+       return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
 }
 
 static inline void
 dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
                 int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
+
        BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->unmap_single)
-               dma_ops->unmap_single(dev, addr, size, direction);
+       if (ops->unmap_single)
+               ops->unmap_single(dev, addr, size, direction);
 }
 
 static inline int
 dma_map_sg(struct device *hwdev, struct scatterlist *sg,
           int nents, int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       return dma_ops->map_sg(hwdev, sg, nents, direction);
+       return ops->map_sg(hwdev, sg, nents, direction);
 }
 
 static inline void
 dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
             int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->unmap_sg)
-               dma_ops->unmap_sg(hwdev, sg, nents, direction);
+       if (ops->unmap_sg)
+               ops->unmap_sg(hwdev, sg, nents, direction);
 }
 
 static inline void
 dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
                        size_t size, int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->sync_single_for_cpu)
-               dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
-                                            direction);
+       if (ops->sync_single_for_cpu)
+               ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
        flush_write_buffers();
 }
 
@@ -128,10 +156,11 @@ static inline void
 dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
                           size_t size, int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->sync_single_for_device)
-               dma_ops->sync_single_for_device(hwdev, dma_handle, size,
-                                               direction);
+       if (ops->sync_single_for_device)
+               ops->sync_single_for_device(hwdev, dma_handle, size, direction);
        flush_write_buffers();
 }
 
@@ -139,11 +168,12 @@ static inline void
 dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
                              unsigned long offset, size_t size, int direction)
 {
-       BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->sync_single_range_for_cpu)
-               dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
-                                                  size, direction);
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 
+       BUG_ON(!valid_dma_direction(direction));
+       if (ops->sync_single_range_for_cpu)
+               ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
+                                              size, direction);
        flush_write_buffers();
 }
 
@@ -152,11 +182,12 @@ dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
                                 unsigned long offset, size_t size,
                                 int direction)
 {
-       BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->sync_single_range_for_device)
-               dma_ops->sync_single_range_for_device(hwdev, dma_handle,
-                                                     offset, size, direction);
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
 
+       BUG_ON(!valid_dma_direction(direction));
+       if (ops->sync_single_range_for_device)
+               ops->sync_single_range_for_device(hwdev, dma_handle,
+                                                 offset, size, direction);
        flush_write_buffers();
 }
 
@@ -164,9 +195,11 @@ static inline void
 dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
                    int nelems, int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->sync_sg_for_cpu)
-               dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
+       if (ops->sync_sg_for_cpu)
+               ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
        flush_write_buffers();
 }
 
@@ -174,9 +207,11 @@ static inline void
 dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
                       int nelems, int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(hwdev);
+
        BUG_ON(!valid_dma_direction(direction));
-       if (dma_ops->sync_sg_for_device)
-               dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
+       if (ops->sync_sg_for_device)
+               ops->sync_sg_for_device(hwdev, sg, nelems, direction);
 
        flush_write_buffers();
 }
@@ -185,9 +220,11 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
                                      size_t offset, size_t size,
                                      int direction)
 {
+       struct dma_mapping_ops *ops = get_dma_ops(dev);
+
        BUG_ON(!valid_dma_direction(direction));
-       return dma_ops->map_single(dev, page_to_phys(page)+offset,
-                                  size, direction);
+       return ops->map_single(dev, page_to_phys(page) + offset,
+                              size, direction);
 }
 
 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
index c706a7442633f1f3fdf051598f06326157d51b7a..2730b351afcf2422695ca176cf8a7edbee57b9cc 100644 (file)
@@ -35,7 +35,7 @@ extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
                          int nents, int direction);
 extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
                             int nents, int direction);
-extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
+extern int swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr);
 extern void swiotlb_free_coherent(struct device *hwdev, size_t size,
                                  void *vaddr, dma_addr_t dma_handle);
 extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
index 3c7d537dd15d36a775cbe098f9948be8a5c680bf..51882ae3db4d18ceb3a1536745d55ca00948ea65 100644 (file)
@@ -139,7 +139,7 @@ dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
                consistent_sync(sg_virt(sg), sg->length, dir);
 }
 static inline int
-dma_mapping_error(dma_addr_t dma_addr)
+dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
 {
        return 0;
 }
index 7d51cbca49ab230f15d5e8f1276d8ad5f5537b82..75ae6d8aba4fff5167faf5e8b570f78956ccef91 100644 (file)
@@ -758,7 +758,7 @@ static inline dma_addr_t i2o_dma_map_single(struct i2o_controller *c, void *ptr,
        }
 
        dma_addr = dma_map_single(&c->pdev->dev, ptr, size, direction);
-       if (!dma_mapping_error(dma_addr)) {
+       if (!dma_mapping_error(&c->pdev->dev, dma_addr)) {
 #ifdef CONFIG_I2O_EXT_ADAPTEC_DMA64
                if ((sizeof(dma_addr_t) > 4) && c->pae_support) {
                        *mptr++ = cpu_to_le32(0x7C020002);
index 4bf8cade9dbc5fa534963e40fe99de303f54296a..e530026eedf76b6f40402b3ecd03d1b05c234fd1 100644 (file)
@@ -427,9 +427,9 @@ static inline int ssb_dma_mapping_error(struct ssb_device *dev, dma_addr_t addr)
 {
        switch (dev->bus->bustype) {
        case SSB_BUSTYPE_PCI:
-               return pci_dma_mapping_error(addr);
+               return pci_dma_mapping_error(dev->bus->host_pci, addr);
        case SSB_BUSTYPE_SSB:
-               return dma_mapping_error(addr);
+               return dma_mapping_error(dev->dev, addr);
        default:
                __ssb_dma_not_implemented(dev);
        }
index 90b529f7a154efdc26c02b43b7e591022b491987..936e333e7ce598e9fc46a7b5d96c9ad233a34924 100644 (file)
@@ -1590,7 +1590,7 @@ static inline int ib_dma_mapping_error(struct ib_device *dev, u64 dma_addr)
 {
        if (dev->dma_ops)
                return dev->dma_ops->mapping_error(dev, dma_addr);
-       return dma_mapping_error(dma_addr);
+       return dma_mapping_error(dev->dma_device, dma_addr);
 }
 
 /**
index d568894df8ccd335c4acdcb47da4a7b3c4830150..977edbdbc1debada5937958f5835b1dc88c9beba 100644 (file)
@@ -492,7 +492,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                 */
                dma_addr_t handle;
                handle = swiotlb_map_single(NULL, NULL, size, DMA_FROM_DEVICE);
-               if (swiotlb_dma_mapping_error(handle))
+               if (swiotlb_dma_mapping_error(hwdev, handle))
                        return NULL;
 
                ret = bus_to_virt(handle);
@@ -824,7 +824,7 @@ swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
 }
 
 int
-swiotlb_dma_mapping_error(dma_addr_t dma_addr)
+swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
 {
        return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
 }
index a19b22b452a37c1a814d5464842afbc96fa6bc37..84d328329d98575d624557fa75a7e159436ece85 100644 (file)
@@ -169,7 +169,8 @@ static int send_write(struct svcxprt_rdma *xprt, struct svc_rqst *rqstp,
                                          (void *)
                                          vec->sge[xdr_sge_no].iov_base + sge_off,
                                          sge_bytes, DMA_TO_DEVICE);
-               if (dma_mapping_error(sge[sge_no].addr))
+               if (dma_mapping_error(xprt->sc_cm_id->device->dma_device,
+                                       sge[sge_no].addr))
                        goto err;
                sge_off = 0;
                sge_no++;