]> err.no Git - linux-2.6/commitdiff
SPARC64: sg chaining support
authorJens Axboe <jens.axboe@oracle.com>
Tue, 7 Aug 2007 07:37:10 +0000 (09:37 +0200)
committerJens Axboe <jens.axboe@oracle.com>
Tue, 16 Oct 2007 09:27:32 +0000 (11:27 +0200)
This updates the sparc64 iommu/pci dma mappers to sg chaining.

Acked-by: David S. Miller <davem@davemloft.net>
Later updated to newer kernel with unified sparc64 iommu sg handling.

Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
arch/sparc64/kernel/iommu.c
arch/sparc64/kernel/pci_sun4v.c
include/asm-sparc64/scatterlist.h

index b35a62167e9ce31af6f957b28902ca0259bf3b5d..db3ffcf7a12036bee200f0ca207cf52bcab95e23 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/device.h>
 #include <linux/dma-mapping.h>
 #include <linux/errno.h>
+#include <linux/scatterlist.h>
 
 #ifdef CONFIG_PCI
 #include <linux/pci.h>
@@ -480,7 +481,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
                           unsigned long iopte_protection)
 {
        struct scatterlist *dma_sg = sg;
-       struct scatterlist *sg_end = sg + nelems;
+       struct scatterlist *sg_end = sg_last(sg, nelems);
        int i;
 
        for (i = 0; i < nused; i++) {
@@ -515,7 +516,7 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
                                        len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
                                        break;
                                }
-                               sg++;
+                               sg = sg_next(sg);
                        }
 
                        pteval = iopte_protection | (pteval & IOPTE_PAGE);
@@ -528,24 +529,24 @@ static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
                        }
 
                        pteval = (pteval & IOPTE_PAGE) + len;
-                       sg++;
+                       sg = sg_next(sg);
 
                        /* Skip over any tail mappings we've fully mapped,
                         * adjusting pteval along the way.  Stop when we
                         * detect a page crossing event.
                         */
-                       while (sg < sg_end &&
+                       while (sg != sg_end &&
                               (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
                               (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
                               ((pteval ^
                                 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
                                pteval += sg->length;
-                               sg++;
+                               sg = sg_next(sg);
                        }
                        if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
                                pteval = ~0UL;
                } while (dma_npages != 0);
-               dma_sg++;
+               dma_sg = sg_next(dma_sg);
        }
 }
 
@@ -606,7 +607,7 @@ static int dma_4u_map_sg(struct device *dev, struct scatterlist *sglist,
        sgtmp = sglist;
        while (used && sgtmp->dma_length) {
                sgtmp->dma_address += dma_base;
-               sgtmp++;
+               sgtmp = sg_next(sgtmp);
                used--;
        }
        used = nelems - used;
@@ -642,6 +643,7 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
        struct strbuf *strbuf;
        iopte_t *base;
        unsigned long flags, ctx, i, npages;
+       struct scatterlist *sg, *sgprv;
        u32 bus_addr;
 
        if (unlikely(direction == DMA_NONE)) {
@@ -654,11 +656,14 @@ static void dma_4u_unmap_sg(struct device *dev, struct scatterlist *sglist,
 
        bus_addr = sglist->dma_address & IO_PAGE_MASK;
 
-       for (i = 1; i < nelems; i++)
-               if (sglist[i].dma_length == 0)
+       sgprv = NULL;
+       for_each_sg(sglist, sg, nelems, i) {
+               if (sg->dma_length == 0)
                        break;
-       i--;
-       npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
+               sgprv = sg;
+       }
+
+       npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
                  bus_addr) >> IO_PAGE_SHIFT;
 
        base = iommu->page_table +
@@ -730,6 +735,7 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
        struct iommu *iommu;
        struct strbuf *strbuf;
        unsigned long flags, ctx, npages, i;
+       struct scatterlist *sg, *sgprv;
        u32 bus_addr;
 
        iommu = dev->archdata.iommu;
@@ -753,11 +759,14 @@ static void dma_4u_sync_sg_for_cpu(struct device *dev,
 
        /* Step 2: Kick data out of streaming buffers. */
        bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
-       for(i = 1; i < nelems; i++)
-               if (!sglist[i].dma_length)
+       sgprv = NULL;
+       for_each_sg(sglist, sg, nelems, i) {
+               if (sg->dma_length == 0)
                        break;
-       i--;
-       npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
+               sgprv = sg;
+       }
+
+       npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length)
                  - bus_addr) >> IO_PAGE_SHIFT;
        strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
 
index 95de1444ee674c912a4fc51cf66a2b0b2f64754b..cacacfae5451a7ec68600a0e1438319dd7b6e4e3 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/irq.h>
 #include <linux/msi.h>
 #include <linux/log2.h>
+#include <linux/scatterlist.h>
 
 #include <asm/iommu.h>
 #include <asm/irq.h>
@@ -373,7 +374,7 @@ static inline long fill_sg(long entry, struct device *dev,
                           int nused, int nelems, unsigned long prot)
 {
        struct scatterlist *dma_sg = sg;
-       struct scatterlist *sg_end = sg + nelems;
+       struct scatterlist *sg_end = sg_last(sg, nelems);
        unsigned long flags;
        int i;
 
@@ -413,7 +414,7 @@ static inline long fill_sg(long entry, struct device *dev,
                                        len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
                                        break;
                                }
-                               sg++;
+                               sg = sg_next(sg);
                        }
 
                        pteval = (pteval & IOPTE_PAGE);
@@ -431,24 +432,25 @@ static inline long fill_sg(long entry, struct device *dev,
                        }
 
                        pteval = (pteval & IOPTE_PAGE) + len;
-                       sg++;
+                       sg = sg_next(sg);
 
                        /* Skip over any tail mappings we've fully mapped,
                         * adjusting pteval along the way.  Stop when we
                         * detect a page crossing event.
                         */
-                       while (sg < sg_end &&
-                              (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
+                       while ((pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
                               (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
                               ((pteval ^
                                 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
                                pteval += sg->length;
-                               sg++;
+                               if (sg == sg_end)
+                                       break;
+                               sg = sg_next(sg);
                        }
                        if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
                                pteval = ~0UL;
                } while (dma_npages != 0);
-               dma_sg++;
+               dma_sg = sg_next(dma_sg);
        }
 
        if (unlikely(iommu_batch_end() < 0L))
@@ -510,7 +512,7 @@ static int dma_4v_map_sg(struct device *dev, struct scatterlist *sglist,
        sgtmp = sglist;
        while (used && sgtmp->dma_length) {
                sgtmp->dma_address += dma_base;
-               sgtmp++;
+               sgtmp = sg_next(sgtmp);
                used--;
        }
        used = nelems - used;
@@ -545,6 +547,7 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
        struct pci_pbm_info *pbm;
        struct iommu *iommu;
        unsigned long flags, i, npages;
+       struct scatterlist *sg, *sgprv;
        long entry;
        u32 devhandle, bus_addr;
 
@@ -558,12 +561,15 @@ static void dma_4v_unmap_sg(struct device *dev, struct scatterlist *sglist,
        devhandle = pbm->devhandle;
        
        bus_addr = sglist->dma_address & IO_PAGE_MASK;
-
-       for (i = 1; i < nelems; i++)
-               if (sglist[i].dma_length == 0)
+       sgprv = NULL;
+       for_each_sg(sglist, sg, nelems, i) {
+               if (sg->dma_length == 0)
                        break;
-       i--;
-       npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) -
+
+               sgprv = sg;
+       }
+
+       npages = (IO_PAGE_ALIGN(sgprv->dma_address + sgprv->dma_length) -
                  bus_addr) >> IO_PAGE_SHIFT;
 
        entry = ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
index 048fdb40e81dc17a1d319222556344aaf5408b2c..703c5bbe6c8cdc4a06aa44f2a64c5d4bde5adc8d 100644 (file)
@@ -20,4 +20,6 @@ struct scatterlist {
 
 #define ISA_DMA_THRESHOLD      (~0UL)
 
+#define ARCH_HAS_SG_CHAIN
+
 #endif /* !(_SPARC64_SCATTERLIST_H) */