1 /* $Id: pci_iommu.c,v 1.17 2001/12/17 07:05:09 davem Exp $
2 * pci_iommu.c: UltraSparc PCI controller IOM/STC support.
4 * Copyright (C) 1999 David S. Miller (davem@redhat.com)
5 * Copyright (C) 1999, 2000 Jakub Jelinek (jakub@redhat.com)
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
11 #include <linux/delay.h>
15 #include "iommu_common.h"
17 #define PCI_STC_CTXMATCH_ADDR(STC, CTX) \
18 ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
20 /* Accessing IOMMU and Streaming Buffer registers.
21 * REG parameter is a physical address. All registers
22 * are 64-bits in size.
24 #define pci_iommu_read(__reg) \
26 __asm__ __volatile__("ldxa [%1] %2, %0" \
28 : "r" (__reg), "i" (ASI_PHYS_BYPASS_EC_E) \
32 #define pci_iommu_write(__reg, __val) \
33 __asm__ __volatile__("stxa %0, [%1] %2" \
35 : "r" (__val), "r" (__reg), \
36 "i" (ASI_PHYS_BYPASS_EC_E))
38 /* Must be invoked under the IOMMU lock. */
39 static void __iommu_flushall(struct pci_iommu *iommu)
44 tag = iommu->iommu_flush + (0xa580UL - 0x0210UL);
45 for (entry = 0; entry < 16; entry++) {
46 pci_iommu_write(tag, 0);
50 /* Ensure completion of previous PIO writes. */
51 (void) pci_iommu_read(iommu->write_complete_reg);
53 /* Now update everyone's flush point. */
54 for (entry = 0; entry < PBM_NCLUSTERS; entry++) {
55 iommu->alloc_info[entry].flush =
56 iommu->alloc_info[entry].next;
60 #define IOPTE_CONSISTENT(CTX) \
61 (IOPTE_VALID | IOPTE_CACHE | \
62 (((CTX) << 47) & IOPTE_CONTEXT))
64 #define IOPTE_STREAMING(CTX) \
65 (IOPTE_CONSISTENT(CTX) | IOPTE_STBUF)
67 /* Existing mappings are never marked invalid, instead they
68 * are pointed to a dummy page.
70 #define IOPTE_IS_DUMMY(iommu, iopte) \
71 ((iopte_val(*iopte) & IOPTE_PAGE) == (iommu)->dummy_page_pa)
73 static void inline iopte_make_dummy(struct pci_iommu *iommu, iopte_t *iopte)
75 unsigned long val = iopte_val(*iopte);
78 val |= iommu->dummy_page_pa;
80 iopte_val(*iopte) = val;
83 void pci_iommu_table_init(struct pci_iommu *iommu, int tsbsize)
87 tsbsize /= sizeof(iopte_t);
89 for (i = 0; i < tsbsize; i++)
90 iopte_make_dummy(iommu, &iommu->page_table[i]);
93 static iopte_t *alloc_streaming_cluster(struct pci_iommu *iommu, unsigned long npages)
95 iopte_t *iopte, *limit, *first;
96 unsigned long cnum, ent, flush_point;
99 while ((1UL << cnum) < npages)
101 iopte = (iommu->page_table +
102 (cnum << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
105 limit = (iommu->page_table +
106 iommu->lowest_consistent_map);
109 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
111 iopte += ((ent = iommu->alloc_info[cnum].next) << cnum);
112 flush_point = iommu->alloc_info[cnum].flush;
116 if (IOPTE_IS_DUMMY(iommu, iopte)) {
117 if ((iopte + (1 << cnum)) >= limit)
121 iommu->alloc_info[cnum].next = ent;
122 if (ent == flush_point)
123 __iommu_flushall(iommu);
126 iopte += (1 << cnum);
128 if (iopte >= limit) {
129 iopte = (iommu->page_table +
131 (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
134 if (ent == flush_point)
135 __iommu_flushall(iommu);
140 /* I've got your streaming cluster right here buddy boy... */
144 printk(KERN_EMERG "pci_iommu: alloc_streaming_cluster of npages(%ld) failed!\n",
149 static void free_streaming_cluster(struct pci_iommu *iommu, dma_addr_t base,
150 unsigned long npages, unsigned long ctx)
152 unsigned long cnum, ent;
155 while ((1UL << cnum) < npages)
158 ent = (base << (32 - IO_PAGE_SHIFT + PBM_LOGCLUSTERS - iommu->page_table_sz_bits))
159 >> (32 + PBM_LOGCLUSTERS + cnum - iommu->page_table_sz_bits);
161 /* If the global flush might not have caught this entry,
162 * adjust the flush point such that we will flush before
163 * ever trying to reuse it.
165 #define between(X,Y,Z) (((Z) - (Y)) >= ((X) - (Y)))
166 if (between(ent, iommu->alloc_info[cnum].next, iommu->alloc_info[cnum].flush))
167 iommu->alloc_info[cnum].flush = ent;
171 /* We allocate consistent mappings from the end of cluster zero. */
172 static iopte_t *alloc_consistent_cluster(struct pci_iommu *iommu, unsigned long npages)
176 iopte = iommu->page_table + (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS));
177 while (iopte > iommu->page_table) {
179 if (IOPTE_IS_DUMMY(iommu, iopte)) {
180 unsigned long tmp = npages;
184 if (!IOPTE_IS_DUMMY(iommu, iopte))
188 u32 entry = (iopte - iommu->page_table);
190 if (entry < iommu->lowest_consistent_map)
191 iommu->lowest_consistent_map = entry;
199 static int iommu_alloc_ctx(struct pci_iommu *iommu)
201 int lowest = iommu->ctx_lowest_free;
202 int sz = IOMMU_NUM_CTXS - lowest;
203 int n = find_next_zero_bit(iommu->ctx_bitmap, sz, lowest);
205 if (unlikely(n == sz)) {
206 n = find_next_zero_bit(iommu->ctx_bitmap, lowest, 1);
207 if (unlikely(n == lowest)) {
208 printk(KERN_WARNING "IOMMU: Ran out of contexts.\n");
213 __set_bit(n, iommu->ctx_bitmap);
218 static inline void iommu_free_ctx(struct pci_iommu *iommu, int ctx)
221 __clear_bit(ctx, iommu->ctx_bitmap);
222 if (ctx < iommu->ctx_lowest_free)
223 iommu->ctx_lowest_free = ctx;
227 /* Allocate and map kernel buffer of size SIZE using consistent mode
228 * DMA for PCI device PDEV. Return non-NULL cpu-side address if
229 * successful and set *DMA_ADDRP to the PCI side dma address.
231 void *pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
233 struct pcidev_cookie *pcp;
234 struct pci_iommu *iommu;
236 unsigned long flags, order, first_page, ctx;
240 size = IO_PAGE_ALIGN(size);
241 order = get_order(size);
245 first_page = __get_free_pages(GFP_ATOMIC, order);
246 if (first_page == 0UL)
248 memset((char *)first_page, 0, PAGE_SIZE << order);
251 iommu = pcp->pbm->iommu;
253 spin_lock_irqsave(&iommu->lock, flags);
254 iopte = alloc_consistent_cluster(iommu, size >> IO_PAGE_SHIFT);
256 spin_unlock_irqrestore(&iommu->lock, flags);
257 free_pages(first_page, order);
261 *dma_addrp = (iommu->page_table_map_base +
262 ((iopte - iommu->page_table) << IO_PAGE_SHIFT));
263 ret = (void *) first_page;
264 npages = size >> IO_PAGE_SHIFT;
266 if (iommu->iommu_ctxflush)
267 ctx = iommu_alloc_ctx(iommu);
268 first_page = __pa(first_page);
270 iopte_val(*iopte) = (IOPTE_CONSISTENT(ctx) |
272 (first_page & IOPTE_PAGE));
274 first_page += IO_PAGE_SIZE;
279 u32 daddr = *dma_addrp;
281 npages = size >> IO_PAGE_SHIFT;
282 for (i = 0; i < npages; i++) {
283 pci_iommu_write(iommu->iommu_flush, daddr);
284 daddr += IO_PAGE_SIZE;
288 spin_unlock_irqrestore(&iommu->lock, flags);
293 /* Free and unmap a consistent DMA translation. */
294 void pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu, dma_addr_t dvma)
296 struct pcidev_cookie *pcp;
297 struct pci_iommu *iommu;
299 unsigned long flags, order, npages, i, ctx;
301 npages = IO_PAGE_ALIGN(size) >> IO_PAGE_SHIFT;
303 iommu = pcp->pbm->iommu;
304 iopte = iommu->page_table +
305 ((dvma - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
307 spin_lock_irqsave(&iommu->lock, flags);
309 if ((iopte - iommu->page_table) ==
310 iommu->lowest_consistent_map) {
311 iopte_t *walk = iopte + npages;
314 limit = (iommu->page_table +
315 (1 << (iommu->page_table_sz_bits - PBM_LOGCLUSTERS)));
316 while (walk < limit) {
317 if (!IOPTE_IS_DUMMY(iommu, walk))
321 iommu->lowest_consistent_map =
322 (walk - iommu->page_table);
325 /* Data for consistent mappings cannot enter the streaming
326 * buffers, so we only need to update the TSB. We flush
327 * the IOMMU here as well to prevent conflicts with the
328 * streaming mapping deferred tlb flush scheme.
332 if (iommu->iommu_ctxflush)
333 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
335 for (i = 0; i < npages; i++, iopte++)
336 iopte_make_dummy(iommu, iopte);
338 if (iommu->iommu_ctxflush) {
339 pci_iommu_write(iommu->iommu_ctxflush, ctx);
341 for (i = 0; i < npages; i++) {
342 u32 daddr = dvma + (i << IO_PAGE_SHIFT);
344 pci_iommu_write(iommu->iommu_flush, daddr);
348 iommu_free_ctx(iommu, ctx);
350 spin_unlock_irqrestore(&iommu->lock, flags);
352 order = get_order(size);
354 free_pages((unsigned long)cpu, order);
357 /* Map a single buffer at PTR of SZ bytes for PCI DMA
360 dma_addr_t pci_map_single(struct pci_dev *pdev, void *ptr, size_t sz, int direction)
362 struct pcidev_cookie *pcp;
363 struct pci_iommu *iommu;
364 struct pci_strbuf *strbuf;
366 unsigned long flags, npages, oaddr;
367 unsigned long i, base_paddr, ctx;
369 unsigned long iopte_protection;
372 iommu = pcp->pbm->iommu;
373 strbuf = &pcp->pbm->stc;
375 if (direction == PCI_DMA_NONE)
378 oaddr = (unsigned long)ptr;
379 npages = IO_PAGE_ALIGN(oaddr + sz) - (oaddr & IO_PAGE_MASK);
380 npages >>= IO_PAGE_SHIFT;
382 spin_lock_irqsave(&iommu->lock, flags);
384 base = alloc_streaming_cluster(iommu, npages);
387 bus_addr = (iommu->page_table_map_base +
388 ((base - iommu->page_table) << IO_PAGE_SHIFT));
389 ret = bus_addr | (oaddr & ~IO_PAGE_MASK);
390 base_paddr = __pa(oaddr & IO_PAGE_MASK);
392 if (iommu->iommu_ctxflush)
393 ctx = iommu_alloc_ctx(iommu);
394 if (strbuf->strbuf_enabled)
395 iopte_protection = IOPTE_STREAMING(ctx);
397 iopte_protection = IOPTE_CONSISTENT(ctx);
398 if (direction != PCI_DMA_TODEVICE)
399 iopte_protection |= IOPTE_WRITE;
401 for (i = 0; i < npages; i++, base++, base_paddr += IO_PAGE_SIZE)
402 iopte_val(*base) = iopte_protection | base_paddr;
404 spin_unlock_irqrestore(&iommu->lock, flags);
409 spin_unlock_irqrestore(&iommu->lock, flags);
410 return PCI_DMA_ERROR_CODE;
413 static void pci_strbuf_flush(struct pci_strbuf *strbuf, struct pci_iommu *iommu, u32 vaddr, unsigned long ctx, unsigned long npages, int direction)
417 if (strbuf->strbuf_ctxflush &&
418 iommu->iommu_ctxflush) {
419 unsigned long matchreg, flushreg;
422 flushreg = strbuf->strbuf_ctxflush;
423 matchreg = PCI_STC_CTXMATCH_ADDR(strbuf, ctx);
425 pci_iommu_write(flushreg, ctx);
426 val = pci_iommu_read(matchreg);
433 pci_iommu_write(flushreg, ctx);
436 val = pci_iommu_read(matchreg);
438 printk(KERN_WARNING "pci_strbuf_flush: ctx flush "
439 "timeout matchreg[%lx] ctx[%lx]\n",
447 for (i = 0; i < npages; i++, vaddr += IO_PAGE_SIZE)
448 pci_iommu_write(strbuf->strbuf_pflush, vaddr);
452 /* If the device could not have possibly put dirty data into
453 * the streaming cache, no flush-flag synchronization needs
456 if (direction == PCI_DMA_TODEVICE)
459 PCI_STC_FLUSHFLAG_INIT(strbuf);
460 pci_iommu_write(strbuf->strbuf_fsync, strbuf->strbuf_flushflag_pa);
461 (void) pci_iommu_read(iommu->write_complete_reg);
464 while (!PCI_STC_FLUSHFLAG_SET(strbuf)) {
472 printk(KERN_WARNING "pci_strbuf_flush: flushflag timeout "
473 "vaddr[%08x] ctx[%lx] npages[%ld]\n",
477 /* Unmap a single streaming mode DMA translation. */
478 void pci_unmap_single(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
480 struct pcidev_cookie *pcp;
481 struct pci_iommu *iommu;
482 struct pci_strbuf *strbuf;
484 unsigned long flags, npages, ctx;
486 if (direction == PCI_DMA_NONE)
490 iommu = pcp->pbm->iommu;
491 strbuf = &pcp->pbm->stc;
493 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
494 npages >>= IO_PAGE_SHIFT;
495 base = iommu->page_table +
496 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
497 #ifdef DEBUG_PCI_IOMMU
498 if (IOPTE_IS_DUMMY(iommu, base))
499 printk("pci_unmap_single called on non-mapped region %08x,%08x from %016lx\n",
500 bus_addr, sz, __builtin_return_address(0));
502 bus_addr &= IO_PAGE_MASK;
504 spin_lock_irqsave(&iommu->lock, flags);
506 /* Record the context, if any. */
508 if (iommu->iommu_ctxflush)
509 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
511 /* Step 1: Kick data out of streaming buffers if necessary. */
512 if (strbuf->strbuf_enabled)
513 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
515 /* Step 2: Clear out first TSB entry. */
516 iopte_make_dummy(iommu, base);
518 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
521 iommu_free_ctx(iommu, ctx);
523 spin_unlock_irqrestore(&iommu->lock, flags);
526 #define SG_ENT_PHYS_ADDRESS(SG) \
527 (__pa(page_address((SG)->page)) + (SG)->offset)
529 static inline void fill_sg(iopte_t *iopte, struct scatterlist *sg,
530 int nused, int nelems, unsigned long iopte_protection)
532 struct scatterlist *dma_sg = sg;
533 struct scatterlist *sg_end = sg + nelems;
536 for (i = 0; i < nused; i++) {
537 unsigned long pteval = ~0UL;
540 dma_npages = ((dma_sg->dma_address & (IO_PAGE_SIZE - 1UL)) +
542 ((IO_PAGE_SIZE - 1UL))) >> IO_PAGE_SHIFT;
544 unsigned long offset;
547 /* If we are here, we know we have at least one
548 * more page to map. So walk forward until we
549 * hit a page crossing, and begin creating new
550 * mappings from that spot.
555 tmp = SG_ENT_PHYS_ADDRESS(sg);
557 if (((tmp ^ pteval) >> IO_PAGE_SHIFT) != 0UL) {
558 pteval = tmp & IO_PAGE_MASK;
559 offset = tmp & (IO_PAGE_SIZE - 1UL);
562 if (((tmp ^ (tmp + len - 1UL)) >> IO_PAGE_SHIFT) != 0UL) {
563 pteval = (tmp + IO_PAGE_SIZE) & IO_PAGE_MASK;
565 len -= (IO_PAGE_SIZE - (tmp & (IO_PAGE_SIZE - 1UL)));
571 pteval = iopte_protection | (pteval & IOPTE_PAGE);
573 *iopte++ = __iopte(pteval);
574 pteval += IO_PAGE_SIZE;
575 len -= (IO_PAGE_SIZE - offset);
580 pteval = (pteval & IOPTE_PAGE) + len;
583 /* Skip over any tail mappings we've fully mapped,
584 * adjusting pteval along the way. Stop when we
585 * detect a page crossing event.
587 while (sg < sg_end &&
588 (pteval << (64 - IO_PAGE_SHIFT)) != 0UL &&
589 (pteval == SG_ENT_PHYS_ADDRESS(sg)) &&
591 (SG_ENT_PHYS_ADDRESS(sg) + sg->length - 1UL)) >> IO_PAGE_SHIFT) == 0UL) {
592 pteval += sg->length;
595 if ((pteval << (64 - IO_PAGE_SHIFT)) == 0UL)
597 } while (dma_npages != 0);
602 /* Map a set of buffers described by SGLIST with NELEMS array
603 * elements in streaming mode for PCI DMA.
604 * When making changes here, inspect the assembly output. I was having
605 * hard time to kepp this routine out of using stack slots for holding variables.
607 int pci_map_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
609 struct pcidev_cookie *pcp;
610 struct pci_iommu *iommu;
611 struct pci_strbuf *strbuf;
612 unsigned long flags, ctx, npages, iopte_protection;
615 struct scatterlist *sgtmp;
618 /* Fast path single entry scatterlists. */
620 sglist->dma_address =
622 (page_address(sglist->page) + sglist->offset),
623 sglist->length, direction);
624 sglist->dma_length = sglist->length;
629 iommu = pcp->pbm->iommu;
630 strbuf = &pcp->pbm->stc;
632 if (direction == PCI_DMA_NONE)
635 /* Step 1: Prepare scatter list. */
637 npages = prepare_sg(sglist, nelems);
639 /* Step 2: Allocate a cluster. */
641 spin_lock_irqsave(&iommu->lock, flags);
643 base = alloc_streaming_cluster(iommu, npages);
646 dma_base = iommu->page_table_map_base + ((base - iommu->page_table) << IO_PAGE_SHIFT);
648 /* Step 3: Normalize DMA addresses. */
652 while (used && sgtmp->dma_length) {
653 sgtmp->dma_address += dma_base;
657 used = nelems - used;
659 /* Step 4: Choose a context if necessary. */
661 if (iommu->iommu_ctxflush)
662 ctx = iommu_alloc_ctx(iommu);
664 /* Step 5: Create the mappings. */
665 if (strbuf->strbuf_enabled)
666 iopte_protection = IOPTE_STREAMING(ctx);
668 iopte_protection = IOPTE_CONSISTENT(ctx);
669 if (direction != PCI_DMA_TODEVICE)
670 iopte_protection |= IOPTE_WRITE;
671 fill_sg (base, sglist, used, nelems, iopte_protection);
673 verify_sglist(sglist, nelems, base, npages);
676 spin_unlock_irqrestore(&iommu->lock, flags);
681 spin_unlock_irqrestore(&iommu->lock, flags);
682 return PCI_DMA_ERROR_CODE;
685 /* Unmap a set of streaming mode DMA translations. */
686 void pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
688 struct pcidev_cookie *pcp;
689 struct pci_iommu *iommu;
690 struct pci_strbuf *strbuf;
692 unsigned long flags, ctx, i, npages;
695 if (direction == PCI_DMA_NONE)
699 iommu = pcp->pbm->iommu;
700 strbuf = &pcp->pbm->stc;
702 bus_addr = sglist->dma_address & IO_PAGE_MASK;
704 for (i = 1; i < nelems; i++)
705 if (sglist[i].dma_length == 0)
708 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length) - bus_addr) >> IO_PAGE_SHIFT;
710 base = iommu->page_table +
711 ((bus_addr - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
713 #ifdef DEBUG_PCI_IOMMU
714 if (IOPTE_IS_DUMMY(iommu, base))
715 printk("pci_unmap_sg called on non-mapped region %016lx,%d from %016lx\n", sglist->dma_address, nelems, __builtin_return_address(0));
718 spin_lock_irqsave(&iommu->lock, flags);
720 /* Record the context, if any. */
722 if (iommu->iommu_ctxflush)
723 ctx = (iopte_val(*base) & IOPTE_CONTEXT) >> 47UL;
725 /* Step 1: Kick data out of streaming buffers if necessary. */
726 if (strbuf->strbuf_enabled)
727 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
729 /* Step 2: Clear out first TSB entry. */
730 iopte_make_dummy(iommu, base);
732 free_streaming_cluster(iommu, bus_addr - iommu->page_table_map_base,
735 iommu_free_ctx(iommu, ctx);
737 spin_unlock_irqrestore(&iommu->lock, flags);
740 /* Make physical memory consistent for a single
741 * streaming mode DMA translation after a transfer.
743 void pci_dma_sync_single_for_cpu(struct pci_dev *pdev, dma_addr_t bus_addr, size_t sz, int direction)
745 struct pcidev_cookie *pcp;
746 struct pci_iommu *iommu;
747 struct pci_strbuf *strbuf;
748 unsigned long flags, ctx, npages;
751 iommu = pcp->pbm->iommu;
752 strbuf = &pcp->pbm->stc;
754 if (!strbuf->strbuf_enabled)
757 spin_lock_irqsave(&iommu->lock, flags);
759 npages = IO_PAGE_ALIGN(bus_addr + sz) - (bus_addr & IO_PAGE_MASK);
760 npages >>= IO_PAGE_SHIFT;
761 bus_addr &= IO_PAGE_MASK;
763 /* Step 1: Record the context, if any. */
765 if (iommu->iommu_ctxflush &&
766 strbuf->strbuf_ctxflush) {
769 iopte = iommu->page_table +
770 ((bus_addr - iommu->page_table_map_base)>>IO_PAGE_SHIFT);
771 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
774 /* Step 2: Kick data out of streaming buffers. */
775 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
777 spin_unlock_irqrestore(&iommu->lock, flags);
780 /* Make physical memory consistent for a set of streaming
781 * mode DMA translations after a transfer.
783 void pci_dma_sync_sg_for_cpu(struct pci_dev *pdev, struct scatterlist *sglist, int nelems, int direction)
785 struct pcidev_cookie *pcp;
786 struct pci_iommu *iommu;
787 struct pci_strbuf *strbuf;
788 unsigned long flags, ctx, npages, i;
792 iommu = pcp->pbm->iommu;
793 strbuf = &pcp->pbm->stc;
795 if (!strbuf->strbuf_enabled)
798 spin_lock_irqsave(&iommu->lock, flags);
800 /* Step 1: Record the context, if any. */
802 if (iommu->iommu_ctxflush &&
803 strbuf->strbuf_ctxflush) {
806 iopte = iommu->page_table +
807 ((sglist[0].dma_address - iommu->page_table_map_base) >> IO_PAGE_SHIFT);
808 ctx = (iopte_val(*iopte) & IOPTE_CONTEXT) >> 47UL;
811 /* Step 2: Kick data out of streaming buffers. */
812 bus_addr = sglist[0].dma_address & IO_PAGE_MASK;
813 for(i = 1; i < nelems; i++)
814 if (!sglist[i].dma_length)
817 npages = (IO_PAGE_ALIGN(sglist[i].dma_address + sglist[i].dma_length)
818 - bus_addr) >> IO_PAGE_SHIFT;
819 pci_strbuf_flush(strbuf, iommu, bus_addr, ctx, npages, direction);
821 spin_unlock_irqrestore(&iommu->lock, flags);
824 static void ali_sound_dma_hack(struct pci_dev *pdev, int set_bit)
826 struct pci_dev *ali_isa_bridge;
829 /* ALI sound chips generate 31-bits of DMA, a special register
830 * determines what bit 31 is emitted as.
832 ali_isa_bridge = pci_get_device(PCI_VENDOR_ID_AL,
833 PCI_DEVICE_ID_AL_M1533,
836 pci_read_config_byte(ali_isa_bridge, 0x7e, &val);
841 pci_write_config_byte(ali_isa_bridge, 0x7e, val);
842 pci_dev_put(ali_isa_bridge);
845 int pci_dma_supported(struct pci_dev *pdev, u64 device_mask)
847 struct pcidev_cookie *pcp = pdev->sysdata;
851 dma_addr_mask = 0xffffffff;
853 struct pci_iommu *iommu = pcp->pbm->iommu;
855 dma_addr_mask = iommu->dma_addr_mask;
857 if (pdev->vendor == PCI_VENDOR_ID_AL &&
858 pdev->device == PCI_DEVICE_ID_AL_M5451 &&
859 device_mask == 0x7fffffff) {
860 ali_sound_dma_hack(pdev,
861 (dma_addr_mask & 0x80000000) != 0);
866 if (device_mask >= (1UL << 32UL))
869 return (device_mask & dma_addr_mask) == dma_addr_mask;