#include <linux/slab.h>
#include <linux/pci.h> /* struct pci_dev */
#include <linux/proc_fs.h>
+#include <linux/scatterlist.h>
#include <asm/io.h>
#include <asm/vaddrs.h>
* Device ownership issues as mentioned above for pci_map_single are
* the same here.
*/
-int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
+int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
int direction)
{
+ struct scatterlist *sg;
int n;
BUG_ON(direction == PCI_DMA_NONE);
/* IIep is write-through, not flushing. */
- for (n = 0; n < nents; n++) {
+ for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg->page) == NULL);
sg->dvma_address =
virt_to_phys(page_address(sg->page)) + sg->offset;
sg->dvma_length = sg->length;
- sg++;
}
return nents;
}
* Again, cpu read rules concerning calls here are the same as for
* pci_unmap_single() above.
*/
-void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg, int nents,
+void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sgl, int nents,
int direction)
{
+ struct scatterlist *sg;
int n;
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
- for (n = 0; n < nents; n++) {
+ for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg->page) == NULL);
mmu_inval_dma_area(
(unsigned long) page_address(sg->page),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
- sg++;
}
}
}
* The same as pci_dma_sync_single_* but for a scatter-gather list,
* same rules and usage.
*/
-void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+void pci_dma_sync_sg_for_cpu(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
{
+ struct scatterlist *sg;
int n;
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
- for (n = 0; n < nents; n++) {
+ for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg->page) == NULL);
mmu_inval_dma_area(
(unsigned long) page_address(sg->page),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
- sg++;
}
}
}
-void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sg, int nents, int direction)
+void pci_dma_sync_sg_for_device(struct pci_dev *hwdev, struct scatterlist *sgl, int nents, int direction)
{
+ struct scatterlist *sg;
int n;
BUG_ON(direction == PCI_DMA_NONE);
if (direction != PCI_DMA_TODEVICE) {
- for (n = 0; n < nents; n++) {
+ for_each_sg(sgl, sg, nents, n) {
BUG_ON(page_address(sg->page) == NULL);
mmu_inval_dma_area(
(unsigned long) page_address(sg->page),
(sg->length + PAGE_SIZE-1) & PAGE_MASK);
- sg++;
}
}
}
#include <linux/mm.h>
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
#include <linux/bitops.h>
+#include <linux/scatterlist.h>
-#include <asm/scatterlist.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sbus.h>
spin_lock_irqsave(&iounit->lock, flags);
while (sz != 0) {
--sz;
- sg[sz].dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
- sg[sz].dvma_length = sg[sz].length;
+ sg->dvma_address = iounit_get_area(iounit, (unsigned long)page_address(sg->page) + sg->offset, sg->length);
+ sg->dvma_length = sg->length;
+ sg = sg_next(sg);
}
spin_unlock_irqrestore(&iounit->lock, flags);
}
spin_lock_irqsave(&iounit->lock, flags);
while (sz != 0) {
--sz;
- len = ((sg[sz].dvma_address & ~PAGE_MASK) + sg[sz].length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
- vaddr = (sg[sz].dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
+ len = ((sg->dvma_address & ~PAGE_MASK) + sg->length + (PAGE_SIZE-1)) >> PAGE_SHIFT;
+ vaddr = (sg->dvma_address - IOUNIT_DMA_BASE) >> PAGE_SHIFT;
IOD(("iounit_release %08lx-%08lx\n", (long)vaddr, (long)len+vaddr));
for (len += vaddr; vaddr < len; vaddr++)
clear_bit(vaddr, iounit->bmap);
+ sg = sg_next(sg);
}
spin_unlock_irqrestore(&iounit->lock, flags);
}
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/highmem.h> /* pte_offset_map => kmap_atomic */
+#include <linux/scatterlist.h>
-#include <asm/scatterlist.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include <asm/sbus.h>
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
sg->dvma_length = (__u32) sg->length;
- sg++;
+ sg = sg_next(sg);
}
}
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
sg->dvma_length = (__u32) sg->length;
- sg++;
+ sg = sg_next(sg);
}
}
sg->dvma_address = iommu_get_one(sg->page, n, sbus) + sg->offset;
sg->dvma_length = (__u32) sg->length;
- sg++;
+ sg = sg_next(sg);
}
}
n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
iommu_release_one(sg->dvma_address & PAGE_MASK, n, sbus);
sg->dvma_address = 0x21212121;
- sg++;
+ sg = sg_next(sg);
}
}
#include <linux/highmem.h>
#include <linux/fs.h>
#include <linux/seq_file.h>
+#include <linux/scatterlist.h>
-#include <asm/scatterlist.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
{
while (sz != 0) {
--sz;
- sg[sz].dvma_address = (__u32)sun4c_lockarea(page_address(sg[sz].page) + sg[sz].offset, sg[sz].length);
- sg[sz].dvma_length = sg[sz].length;
+ sg->dvma_address = (__u32)sun4c_lockarea(page_address(sg->page) + sg->offset, sg->length);
+ sg->dvma_length = sg->length;
+ sg = sg_next(sg);
}
}
{
while (sz != 0) {
--sz;
- sun4c_unlockarea((char *)sg[sz].dvma_address, sg[sz].length);
+ sun4c_unlockarea((char *)sg->dvma_address, sg->length);
+ sg = sg_next(sg);
}
}