]> err.no Git - linux-2.6/blobdiff - include/asm-powerpc/dma-mapping.h
[POWERPC] Pointers marked as __iomem do not need to be volatile
[linux-2.6] / include / asm-powerpc / dma-mapping.h
index 65be95dd03a57407db066b1317d9d6f0fc4c2e1f..e974876e18d2c98c7239981c1c49b693a73b11a6 100644 (file)
@@ -87,6 +87,9 @@ static inline int dma_supported(struct device *dev, u64 mask)
        return dma_ops->dma_supported(dev, mask);
 }
 
+/* We have our own implementation of pci_set_dma_mask() */
+#define HAVE_ARCH_PCI_SET_DMA_MASK
+
 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
 {
        struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
@@ -285,9 +288,9 @@ dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
        BUG_ON(direction == DMA_NONE);
 
        for_each_sg(sgl, sg, nents, i) {
-               BUG_ON(!sg->page);
-               __dma_sync_page(sg->page, sg->offset, sg->length, direction);
-               sg->dma_address = page_to_bus(sg->page) + sg->offset;
+               BUG_ON(!sg_page(sg));
+               __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
+               sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
        }
 
        return nents;
@@ -328,7 +331,7 @@ static inline void dma_sync_sg_for_cpu(struct device *dev,
        BUG_ON(direction == DMA_NONE);
 
        for_each_sg(sgl, sg, nents, i)
-               __dma_sync_page(sg->page, sg->offset, sg->length, direction);
+               __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
 }
 
 static inline void dma_sync_sg_for_device(struct device *dev,
@@ -341,7 +344,7 @@ static inline void dma_sync_sg_for_device(struct device *dev,
        BUG_ON(direction == DMA_NONE);
 
        for_each_sg(sgl, sg, nents, i)
-               __dma_sync_page(sg->page, sg->offset, sg->length, direction);
+               __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
 }
 
 static inline int dma_mapping_error(dma_addr_t dma_addr)