]> err.no Git - linux-2.6/blobdiff - arch/sparc/kernel/ioport.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[linux-2.6] / arch / sparc / kernel / ioport.c
index 62182d2d7b0de2b1a9b64d8a7316c8cb3d47caf2..7b17522f59bfbf680b808c41a41fbdf56511f060 100644 (file)
@@ -1,4 +1,4 @@
-/* $Id: ioport.c,v 1.45 2001/10/30 04:54:21 davem Exp $
+/*
  * ioport.c:  Simple io mapping allocator.
  *
  * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
@@ -35,6 +35,7 @@
 #include <linux/slab.h>
 #include <linux/pci.h>         /* struct pci_dev */
 #include <linux/proc_fs.h>
+#include <linux/scatterlist.h>
 
 #include <asm/io.h>
 #include <asm/vaddrs.h>
@@ -304,7 +305,7 @@ void *sbus_alloc_consistent(struct sbus_dev *sdev, long len, u32 *dma_addrp)
        struct resource *res;
        int order;
 
-       /* XXX why are some lenghts signed, others unsigned? */
+       /* XXX why are some lengths signed, others unsigned? */
        if (len <= 0) {
                return NULL;
        }
@@ -392,7 +393,7 @@ void sbus_free_consistent(struct sbus_dev *sdev, long n, void *p, u32 ba)
  */
 dma_addr_t sbus_map_single(struct sbus_dev *sdev, void *va, size_t len, int direction)
 {
-       /* XXX why are some lenghts signed, others unsigned? */
+       /* XXX why are some lengths signed, others unsigned? */
        if (len <= 0) {
                return 0;
        }
@@ -717,19 +718,18 @@ void pci_unmap_page(struct pci_dev *hwdev,
  * Device ownership issues as mentioned above for pci_map_single are
  * the same here.
  */
-int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
+int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
     int direction)
 {
+       struct scatterlist *sg;
        int n;
 
        BUG_ON(direction == PCI_DMA_NONE);
        /* IIep is write-through, not flushing. */
-       for (n = 0; n < nents; n++) {
-               BUG_ON(page_address(sg->page) == NULL);
-               sg->dvma_address =
-                       virt_to_phys(page_address(sg->page)) + sg->offset;
+       for_each_sg(sgl, sg, nents, n) {
+               BUG_ON(page_address(sg_page(sg)) == NULL);
+               sg->dvma_address = virt_to_phys(sg_virt(sg));
                sg->dvma_length = sg->length;
-               sg++;
        }
        return nents;
 }
@@ -738,19 +738,19 @@ int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
  * Again, cpu read rules concerning calls here are the same as for
  * pci_unmap_single() above.
  */
-void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
+void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
     int direction)
 {
+       struct scatterlist *sg;
        int n;
 
        BUG_ON(direction == PCI_DMA_NONE);
        if (direction != PCI_DMA_TODEVICE) {
-               for (n = 0; n < nents; n++) {
-                       BUG_ON(page_address(sg->page) == NULL);
+               for_each_sg(sgl, sg, nents, n) {
+                       BUG_ON(page_address(sg_page(sg)) == NULL);
                        mmu_inval_dma_area(
-                           (unsigned long) page_address(sg->page),
+                           (unsigned long) page_address(sg_page(sg)),
                            (sg->length + PAGE_SIZE-1) & PAGE_MASK);
-                       sg++;
                }
        }
 }
@@ -789,34 +789,34 @@ void pci_dma_sync_single_for_device(struct pci_dev *hwdev, dma_addr_t ba, size_t
  * The same as pci_dma_sync_single_* but for a scatter-gather list,
  * same rules and usage.
  */
-void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
 {
+       struct scatterlist *sg;
        int n;
 
        BUG_ON(direction == PCI_DMA_NONE);
        if (direction != PCI_DMA_TODEVICE) {
-               for (n = 0; n < nents; n++) {
-                       BUG_ON(page_address(sg->page) == NULL);
+               for_each_sg(sgl, sg, nents, n) {
+                       BUG_ON(page_address(sg_page(sg)) == NULL);
                        mmu_inval_dma_area(
-                           (unsigned long) page_address(sg->page),
+                           (unsigned long) page_address(sg_page(sg)),
                            (sg->length + PAGE_SIZE-1) & PAGE_MASK);
-                       sg++;
                }
        }
 }
 
-void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
 {
+       struct scatterlist *sg;
        int n;
 
        BUG_ON(direction == PCI_DMA_NONE);
        if (direction != PCI_DMA_TODEVICE) {
-               for (n = 0; n < nents; n++) {
-                       BUG_ON(page_address(sg->page) == NULL);
+               for_each_sg(sgl, sg, nents, n) {
+                       BUG_ON(page_address(sg_page(sg)) == NULL);
                        mmu_inval_dma_area(
-                           (unsigned long) page_address(sg->page),
+                           (unsigned long) page_address(sg_page(sg)),
                            (sg->length + PAGE_SIZE-1) & PAGE_MASK);
-                       sg++;
                }
        }
 }