1 /* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
11 #include <linux/delay.h>
15 #include "iommu_common.h"
17 #define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
18 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
20 /* Accessing IOMMU and Streaming Buffer registers.
21 * REG parameter is a physical address. All registers
22 * are 64-bits in size.
24 #define pci_iommu_read(__reg) \
26 __asm__ __volatile__("ldxa [%1] %2, %0" \
28 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
32 #define pci_iommu_write(__reg, __val) \
33 __asm__ __volatile__("stxa %0, [%1] %2" \
35 : "r" (__val), "r" (__reg), \
36 "i" (ASI_PHYS_BYPASS_EC_E))
38 /* Must be invoked under the IOMMU lock. */
39 static void __iommu_flushall(struct pci_iommu *iommu)
44 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
45 for (entry = 0; entry < 16; entry++) {
46 pci_iommu_write(tag, 0);
50 /* Ensure completion of previous PIO writes. */
51 (void) pci_iommu_read(iommu->write_complete_reg);
53 /* Now update everyone's flush point. */
54 for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
55 iommu->alloc_info[entry].flush =
56 iommu->alloc_info[entry].next;
60 #define IOPTE_CONSISTENT(CTX) \
61 (IOPTE_VALID | IOPTE_CACHE | \
62 (((CTX) << 47) & IOPTE_CONTEXT))
64 #define IOPTE_STREAMING(CTX) \
65 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
67 /* Existing mappings are never marked invalid, instead they
68 * are pointed to a dummy page.
70 #define IOPTE_IS_DUMMY(iommu, iopte) \
71 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
73 static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
75 unsigned long val = iopte_val(*iopte);
78 val |= iommu->dummy_page_pa;
80 iopte_val(*iopte) = val;
83 void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize)
87 tsbsize /= sizeof(iopte_t);
89 for (i = 0; i < tsbsize; i++)
90 iopte_make_dummy(iommu, &iommu->page_table[i]);
93 static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
95 iopte_t *iopte, *limit, *first;
96 unsigned long cnum, ent, flush_point;
99 while ((1UL << cnum) < npages)
101 iopte = (iommu->page_table +
102 (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
105 limit = (iommu->page_table +
106 iommu->lowest_consistent_map);
109 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
111 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
112 flush_point = iommu->alloc_info[cnum].flush;
116 if (IOPTE_IS_DUMMY(iommu, iopte)) {
117 if ((iopte + (1 << cnum)) >= limit)
121 iommu->alloc_info[cnum].next = ent;
122 if (ent == flush_point)
123 __iommu_flushall(iommu);
126 iopte += (1 << cnum);
128 if (iopte >= limit) {
129 iopte = (iommu->page_table +
131 (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
134 if (ent == flush_point)
135 __iommu_flushall(iommu);
140 /* I've got your streaming cluster right here buddy boy... */
144 printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
149 static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
150 unsigned long npages, unsigned long ctx)
152 unsigned long cnum, ent;
155 while ((1UL << cnum) < npages)
158 ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
159 >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
161 /* If the global flush might not have caught this entry,
162 * adjust the flush point such that we will flush before
163 * ever trying to reuse it.
165 #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
166 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
167 iommu->alloc_info[cnum].flush = ent;
171 /* We allocate consistent mappings from the end of cluster zero. */
172 static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
176 iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
177 while (iopte > iommu->page_table) {
179 if (IOPTE_IS_DUMMY(iommu, iopte)) {
180 unsigned long tmp = npages;
184 if (!IOPTE_IS_DUMMY(iommu, iopte))
188 u32 entry = (iopte - iommu->page_table);
190 if (entry < iommu->lowest_consistent_map)
191 iommu->lowest_consistent_map = entry;
199 static int iommu_alloc_ctx(struct pci_iommu *iommu)
201 int lowest = iommu->ctx_lowest_free;
202 int sz = IOMMU_NUM_CTXS - lowest;
203 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
205 if (unlikely(n == sz)) {
206 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
207 if (unlikely(n == lowest)) {
208 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
213 __set_bit(n, iommu->ctx_bitmap);
218 static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
221 __clear_bit(ctx, iommu->ctx_bitmap);
222 if (ctx < iommu->ctx_lowest_free)
223 iommu->ctx_lowest_free = ctx;
227 /* Allocate and map kernel buffer of size SIZE using consistent mode
228 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
229 * successful and set *DMA_ADDRP to the PCI side dma address.
231 void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
233 struct pcidev_cookie *pcp;
234 struct pci_iommu *iommu;
236 unsigned long flags, order, first_page, ctx;
240 size = IO_PAGE_ALIGN(size);
241 order = get_order(size);
245 first_page = __get_free_pages(GFP_ATOMIC, order);
246 if (first_page == 0UL)
248 memset((char *)first_page, 0, PAGE_SIZE << order);
251 iommu = pcp->pbm->iommu;
253 spin_lock_irqsave(&iommu->lock, flags);
254 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
256 spin_unlock_irqrestore(&iommu->lock, flags);
257 free_pages(first_page, order);
261 *dma_addrp = (iommu->page_table_map_base +
262 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
263 ret = (void *) first_page;
264 npages = size >> IO_PAGE_SHIFT;
266 if (iommu->iommu_ctxflush)
267 ctx = iommu_alloc_ctx(iommu);
268 first_page = __pa(first_page);
270 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
272 (first_page & IOPTE_PAGE));
274 first_page += IO_PAGE_SIZE;
279 u32 daddr = *dma_addrp;
281 npages = size >> IO_PAGE_SHIFT;
282 for (i = 0; i < npages; i++) {
283 pci_iommu_write(iommu->iommu_flush, daddr);
284 daddr += IO_PAGE_SIZE;
288 spin_unlock_irqrestore(&iommu->lock, flags);
293 /* Free and unmap a consistent DMA translation. */
294 void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
296 struct pcidev_cookie *pcp;
297 struct pci_iommu *iommu;
299 unsigned long flags, order, npages, i, ctx;
301 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
303 iommu = pcp->pbm->iommu;
304 iopte = iommu->page_table +
305 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
307 spin_lock_irqsave(&iommu->lock, flags);
309 if ((iopte - iommu->page_table) ==
310 iommu->lowest_consistent_map) {
311 iopte_t *walk = iopte + npages;
314 limit = (iommu->page_table +
315 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
316 while (walk < limit) {
317 if (!IOPTE_IS_DUMMY(iommu, walk))
321 iommu->lowest_consistent_map =
322 (walk - iommu->page_table);
325 /* Data for consistent mappings cannot enter the streaming
326 * buffers, so we only need to update the TSB. We flush
327 * the IOMMU here as well to prevent conflicts with the
328 * streaming mapping deferred tlb flush scheme.
332 if (iommu->iommu_ctxflush)
333 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
335 for (i = 0; i < npages; i++, iopte++)
336 iopte_make_dummy(iommu, iopte);
338 if (iommu->iommu_ctxflush) {
339 pci_iommu_write(iommu->iommu_ctxflush, ctx);
341 for (i = 0; i < npages; i++) {
342 u32 daddr = dvma + (i << IO_PAGE_SHIFT);
344 pci_iommu_write(iommu->iommu_flush, daddr);
348 iommu_free_ctx(iommu, ctx);
350 spin_unlock_irqrestore(&iommu->lock, flags);
352 order = get_order(size);
354 free_pages((unsigned long)cpu, order);
357 /* Map a single buffer at PTR of SZ bytes for PCI DMA
360 dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
362 struct pcidev_cookie *pcp;
363 struct pci_iommu *iommu;
364 struct pci_strbuf *strbuf;
366 unsigned long flags, npages, oaddr;
367 unsigned long i, base_paddr, ctx;
369 unsigned long iopte_protection;
372 iommu = pcp->pbm->iommu;
373 strbuf = &pcp->pbm->stc;
375 if (direction == PCI_DMA_NONE)
378 oaddr = (unsigned long)ptr;
379 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
380 npages >>= IO_PAGE_SHIFT;
382 spin_lock_irqsave(&iommu->lock, flags);
384 base = alloc_streaming_cluster(iommu, npages);
387 bus_addr = (iommu->page_table_map_base +
388 ((base - iommu->page_table) << IO_PAGE_SHIFT));
389 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
390 base_paddr = __pa(oaddr & IO_PAGE_MASK);
392 if (iommu->iommu_ctxflush)
393 ctx = iommu_alloc_ctx(iommu);
394 if (strbuf->strbuf_enabled)
395 iopte_protection = IOPTE_STREAMING(ctx);
397 iopte_protection = IOPTE_CONSISTENT(ctx);
398 if (direction != PCI_DMA_TODEVICE)
399 iopte_protection |= IOPTE_WRITE;
401 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
402 iopte_val(*base) = iopte_protection | base_paddr;
404 spin_unlock_irqrestore(&iommu->lock, flags);
409 spin_unlock_irqrestore(&iommu->lock, flags);
410 return PCI_DMA_ERROR_CODE;
413 static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
417 if (strbuf->strbuf_ctxflush &&
418 iommu->iommu_ctxflush) {
419 unsigned long matchreg, flushreg;
422 flushreg = strbuf->strbuf_ctxflush;
423 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
425 if (pci_iommu_read(matchreg) == 0)
428 pci_iommu_write(flushreg, ctx);
429 if ((val = pci_iommu_read(matchreg)) == 0)
435 pci_iommu_write(flushreg, ctx);
438 val = pci_iommu_read(matchreg);
440 printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
441 "timeout matchreg[%lx] ctx[%lx]\n",
449 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
450 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
454 /* If the device could not have possibly put dirty data into
455 * the streaming cache, no flush-flag synchronization needs
458 if (direction == PCI_DMA_TODEVICE)
461 PCI_STC_FLUSHFLAG_INIT(strbuf);
462 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
463 (void) pci_iommu_read(iommu->write_complete_reg);
466 while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
474 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
475 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
479 /* Unmap a single streaming mode DMA translation. */
480 void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
482 struct pcidev_cookie *pcp;
483 struct pci_iommu *iommu;
484 struct pci_strbuf *strbuf;
486 unsigned long flags, npages, ctx;
488 if (direction == PCI_DMA_NONE)
492 iommu = pcp->pbm->iommu;
493 strbuf = &pcp->pbm->stc;
495 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
496 npages >>= IO_PAGE_SHIFT;
497 base = iommu->page_table +
498 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
499 #ifdef DEBUG_PCI_IOMMU
500 if (IOPTE_IS_DUMMY(iommu, base))
501 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
502 bus_addr, sz, __builtin_return_address(0));
504 bus_addr &= IO_PAGE_MASK;
506 spin_lock_irqsave(&iommu->lock, flags);
508 /* Record the context, if any. */
510 if (iommu->iommu_ctxflush)
511 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
513 /* Step 1: Kick data out of streaming buffers if necessary. */
514 if (strbuf->strbuf_enabled)
515 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
517 /* Step 2: Clear out first TSB entry. */
518 iopte_make_dummy(iommu, base);
520 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
523 iommu_free_ctx(iommu, ctx);
525 spin_unlock_irqrestore(&iommu->lock, flags);
528 #define SG_ENT_PHYS_ADDRESS(SG) \
529 (__pa(page_address((SG)->page)) + (SG)->offset)
531 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
532 int nused, int nelems, unsigned long iopte_protection)
534 struct scatterlist *dma_sg = sg;
535 struct scatterlist *sg_end = sg + nelems;
538 for (i = 0; i < nused; i++) {
539 unsigned long pteval = ~0UL;
542 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
544 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
546 unsigned long offset;
549 /* If we are here, we know we have at least one
550 * more page to map. So walk forward until we
551 * hit a page crossing, and begin creating new
552 * mappings from that spot.
557 tmp = SG_ENT_PHYS_ADDRESS(sg);
559 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
560 pteval = tmp & IO_PAGE_MASK;
561 offset = tmp & (IO_PAGE_SIZE - 1UL);
564 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
565 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
567 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
573 pteval = iopte_protection | (pteval & IOPTE_PAGE);
575 *iopte++ = __iopte(pteval);
576 pteval += IO_PAGE_SIZE;
577 len -= (IO_PAGE_SIZE - offset);
582 pteval = (pteval & IOPTE_PAGE) + len;
585 /* Skip over any tail mappings we've fully mapped,
586 * adjusting pteval along the way. Stop when we
587 * detect a page crossing event.
589 while (sg < sg_end &&
590 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
591 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
593 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
594 pteval += sg->length;
597 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
599 } while (dma_npages != 0);
604 /* Map a set of buffers described by SGLIST with NELEMS array
605 * elements in streaming mode for PCI DMA.
606 * When making changes here, inspect the assembly output. I was having
607 * hard time to kepp this routine out of using stack slots for holding variables.
609 int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
611 struct pcidev_cookie *pcp;
612 struct pci_iommu *iommu;
613 struct pci_strbuf *strbuf;
614 unsigned long flags, ctx, npages, iopte_protection;
617 struct scatterlist *sgtmp;
620 /* Fast path single entry scatterlists. */
622 sglist->dma_address =
624 (page_address(sglist->page) + sglist->offset),
625 sglist->length, direction);
626 sglist->dma_length = sglist->length;
631 iommu = pcp->pbm->iommu;
632 strbuf = &pcp->pbm->stc;
634 if (direction == PCI_DMA_NONE)
637 /* Step 1: Prepare scatter list. */
639 npages = prepare_sg(sglist, nelems);
641 /* Step 2: Allocate a cluster. */
643 spin_lock_irqsave(&iommu->lock, flags);
645 base = alloc_streaming_cluster(iommu, npages);
648 dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
650 /* Step 3: Normalize DMA addresses. */
654 while (used && sgtmp->dma_length) {
655 sgtmp->dma_address += dma_base;
659 used = nelems - used;
661 /* Step 4: Choose a context if necessary. */
663 if (iommu->iommu_ctxflush)
664 ctx = iommu_alloc_ctx(iommu);
666 /* Step 5: Create the mappings. */
667 if (strbuf->strbuf_enabled)
668 iopte_protection = IOPTE_STREAMING(ctx);
670 iopte_protection = IOPTE_CONSISTENT(ctx);
671 if (direction != PCI_DMA_TODEVICE)
672 iopte_protection |= IOPTE_WRITE;
673 fill_sg (base, sglist, used, nelems, iopte_protection);
675 verify_sglist(sglist, nelems, base, npages);
678 spin_unlock_irqrestore(&iommu->lock, flags);
683 spin_unlock_irqrestore(&iommu->lock, flags);
684 return PCI_DMA_ERROR_CODE;
687 /* Unmap a set of streaming mode DMA translations. */
688 void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
690 struct pcidev_cookie *pcp;
691 struct pci_iommu *iommu;
692 struct pci_strbuf *strbuf;
694 unsigned long flags, ctx, i, npages;
697 if (direction == PCI_DMA_NONE)
701 iommu = pcp->pbm->iommu;
702 strbuf = &pcp->pbm->stc;
704 bus_addr = sglist->dma_address & IO_PAGE_MASK;
706 for (i = 1; i < nelems; i++)
707 if (sglist[i].dma_length == 0)
710 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
712 base = iommu->page_table +
713 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
715 #ifdef DEBUG_PCI_IOMMU
716 if (IOPTE_IS_DUMMY(iommu, base))
717 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
720 spin_lock_irqsave(&iommu->lock, flags);
722 /* Record the context, if any. */
724 if (iommu->iommu_ctxflush)
725 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
727 /* Step 1: Kick data out of streaming buffers if necessary. */
728 if (strbuf->strbuf_enabled)
729 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
731 /* Step 2: Clear out first TSB entry. */
732 iopte_make_dummy(iommu, base);
734 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
737 iommu_free_ctx(iommu, ctx);
739 spin_unlock_irqrestore(&iommu->lock, flags);
742 /* Make physical memory consistent for a single
743 * streaming mode DMA translation after a transfer.
745 void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
747 struct pcidev_cookie *pcp;
748 struct pci_iommu *iommu;
749 struct pci_strbuf *strbuf;
750 unsigned long flags, ctx, npages;
753 iommu = pcp->pbm->iommu;
754 strbuf = &pcp->pbm->stc;
756 if (!strbuf->strbuf_enabled)
759 spin_lock_irqsave(&iommu->lock, flags);
761 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
762 npages >>= IO_PAGE_SHIFT;
763 bus_addr &= IO_PAGE_MASK;
765 /* Step 1: Record the context, if any. */
767 if (iommu->iommu_ctxflush &&
768 strbuf->strbuf_ctxflush) {
771 iopte = iommu->page_table +
772 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
773 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
776 /* Step 2: Kick data out of streaming buffers. */
777 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
779 spin_unlock_irqrestore(&iommu->lock, flags);
782 /* Make physical memory consistent for a set of streaming
783 * mode DMA translations after a transfer.
785 void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
787 struct pcidev_cookie *pcp;
788 struct pci_iommu *iommu;
789 struct pci_strbuf *strbuf;
790 unsigned long flags, ctx, npages, i;
794 iommu = pcp->pbm->iommu;
795 strbuf = &pcp->pbm->stc;
797 if (!strbuf->strbuf_enabled)
800 spin_lock_irqsave(&iommu->lock, flags);
802 /* Step 1: Record the context, if any. */
804 if (iommu->iommu_ctxflush &&
805 strbuf->strbuf_ctxflush) {
808 iopte = iommu->page_table +
809 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
810 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
813 /* Step 2: Kick data out of streaming buffers. */
814 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
815 for(i = 1; i < nelems; i++)
816 if (!sglist[i].dma_length)
819 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
820 - bus_addr) >> IO_PAGE_SHIFT;
821 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
823 spin_unlock_irqrestore(&iommu->lock, flags);
826 static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
828 struct pci_dev *ali_isa_bridge;
831 /* ALI sound chips generate 31-bits of DMA, a special register
832 * determines what bit 31 is emitted as.
834 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
835 PCI_DEVICE_ID_AL_M1533,
838 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
843 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
844 pci_dev_put(ali_isa_bridge);
847 int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
849 struct pcidev_cookie *pcp = pdev->sysdata;
853 dma_addr_mask = 0xffffffff;
855 struct pci_iommu *iommu = pcp->pbm->iommu;
857 dma_addr_mask = iommu->dma_addr_mask;
859 if (pdev->vendor == PCI_VENDOR_ID_AL &&
860 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
861 device_mask == 0x7fffffff) {
862 ali_sound_dma_hack(pdev,
863 (dma_addr_mask & 0x80000000) != 0);
868 if (device_mask >= (1UL << 32UL))
871 return (device_mask & dma_addr_mask) == dma_addr_mask;