2 ** IA64 System Bus Adapter (SBA) I/O MMU manager
4 ** (c) Copyright 2002-2005 Alex Williamson
5 ** (c) Copyright 2002-2003 Grant Grundler
6 ** (c) Copyright 2002-2005 Hewlett-Packard Company
8 ** Portions (c) 2000 Grant Grundler (from parisc I/O MMU code)
9 ** Portions (c) 1999 Dave S. Miller (from sparc64 I/O MMU code)
11 ** This program is free software; you can redistribute it and/or modify
12 ** it under the terms of the GNU General Public License as published by
13 ** the Free Software Foundation; either version 2 of the License, or
14 ** (at your option) any later version.
17 ** This module initializes the IOC (I/O Controller) found on HP
18 ** McKinley machines and their successors.
22 #include <linux/types.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/spinlock.h>
26 #include <linux/slab.h>
27 #include <linux/init.h>
29 #include <linux/string.h>
30 #include <linux/pci.h>
31 #include <linux/proc_fs.h>
32 #include <linux/seq_file.h>
33 #include <linux/acpi.h>
34 #include <linux/efi.h>
35 #include <linux/nodemask.h>
36 #include <linux/bitops.h> /* hweight64() */
37 #include <linux/crash_dump.h>
39 #include <asm/delay.h> /* ia64_get_itc() */
41 #include <asm/page.h> /* PAGE_OFFSET */
43 #include <asm/system.h> /* wmb() */
45 #include <asm/acpi-ext.h>
47 extern int swiotlb_late_init_with_default_size (size_t size);
52 ** Enabling timing search of the pdir resource map. Output in /proc.
53 ** Disabled by default to optimize performance.
55 #undef PDIR_SEARCH_TIMING
58 ** This option allows cards capable of 64bit DMA to bypass the IOMMU. If
59 ** not defined, all DMA will be 32bit and go through the TLB.
60 ** There's potentially a conflict in the bio merge code with us
61 ** advertising an iommu, but then bypassing it. Since I/O MMU bypassing
62 ** appears to give more performance than bio-level virtual merging, we'll
63 ** do the former for now. NOTE: BYPASS_SG also needs to be undef'd to
64 ** completely restrict DMA to the IOMMU.
66 #define ALLOW_IOV_BYPASS
69 ** This option specifically allows/disallows bypassing scatterlists with
70 ** multiple entries. Coalescing these entries can allow better DMA streaming
71 ** and in some cases shows better performance than entirely bypassing the
72 ** IOMMU. Performance increase on the order of 1-2% sequential output/input
73 ** using bonnie++ on a RAID0 MD device (sym2 & mpt).
75 #undef ALLOW_IOV_BYPASS_SG
78 ** If a device prefetches beyond the end of a valid pdir entry, it will cause
79 ** a hard failure, ie. MCA. Version 3.0 and later of the zx1 LBA should
80 ** disconnect on 4k boundaries and prevent such issues. If the device is
81 ** particularly aggressive, this option will keep the entire pdir valid such
82 ** that prefetching will hit a valid address. This could severely impact
83 ** error containment, and is therefore off by default. The page that is
84 ** used for spill-over is poisoned, so that should help debugging somewhat.
86 #undef FULL_VALID_PDIR
88 #define ENABLE_MARK_CLEAN
91 ** The number of debug flags is a clue - this code is fragile. NOTE: since
92 ** tightening the use of res_lock the resource bitmap and actual pdir are no
93 ** longer guaranteed to stay in sync. The sanity checking code isn't going to
98 #undef DEBUG_SBA_RUN_SG
99 #undef DEBUG_SBA_RESOURCE
100 #undef ASSERT_PDIR_SANITY
101 #undef DEBUG_LARGE_SG_ENTRIES
104 #if defined(FULL_VALID_PDIR) && defined(ASSERT_PDIR_SANITY)
105 #error FULL_VALID_PDIR and ASSERT_PDIR_SANITY are mutually exclusive
108 #define SBA_INLINE __inline__
109 /* #define SBA_INLINE */
111 #ifdef DEBUG_SBA_INIT
112 #define DBG_INIT(x...) printk(x)
114 #define DBG_INIT(x...)
118 #define DBG_RUN(x...) printk(x)
120 #define DBG_RUN(x...)
123 #ifdef DEBUG_SBA_RUN_SG
124 #define DBG_RUN_SG(x...) printk(x)
126 #define DBG_RUN_SG(x...)
130 #ifdef DEBUG_SBA_RESOURCE
131 #define DBG_RES(x...) printk(x)
133 #define DBG_RES(x...)
137 #define DBG_BYPASS(x...) printk(x)
139 #define DBG_BYPASS(x...)
142 #ifdef ASSERT_PDIR_SANITY
143 #define ASSERT(expr) \
145 printk( "\n" __FILE__ ":%d: Assertion " #expr " failed!\n",__LINE__); \
153 ** The number of pdir entries to "free" before issuing
154 ** a read to PCOM register to flush out PCOM writes.
155 ** Interacts with allocation granularity (ie 4 or 8 entries
156 ** allocated and free'd/purged at a time might make this
157 ** less interesting).
159 #define DELAYED_RESOURCE_CNT 64
161 #define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
163 #define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
164 #define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
165 #define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
166 #define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
167 #define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
169 #define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
171 #define IOC_FUNC_ID 0x000
172 #define IOC_FCLASS 0x008 /* function class, bist, header, rev... */
173 #define IOC_IBASE 0x300 /* IO TLB */
174 #define IOC_IMASK 0x308
175 #define IOC_PCOM 0x310
176 #define IOC_TCNFG 0x318
177 #define IOC_PDIR_BASE 0x320
179 #define IOC_ROPE0_CFG 0x500
180 #define IOC_ROPE_AO 0x10 /* Allow "Relaxed Ordering" */
183 /* AGP GART driver looks for this */
184 #define ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
187 ** The zx1 IOC supports 4/8/16/64KB page sizes (see TCNFG register)
189 ** Some IOCs (sx1000) can run at the above pages sizes, but are
190 ** really only supported using the IOC at a 4k page size.
192 ** iovp_size could only be greater than PAGE_SIZE if we are
193 ** confident the drivers really only touch the next physical
194 ** page iff that driver instance owns it.
196 static unsigned long iovp_size;
197 static unsigned long iovp_shift;
198 static unsigned long iovp_mask;
201 void __iomem *ioc_hpa; /* I/O MMU base address */
202 char *res_map; /* resource map, bit == pdir entry */
203 u64 *pdir_base; /* physical base address */
204 unsigned long ibase; /* pdir IOV Space base */
205 unsigned long imask; /* pdir IOV Space mask */
207 unsigned long *res_hint; /* next avail IOVP - circular search */
208 unsigned long dma_mask;
209 spinlock_t res_lock; /* protects the resource bitmap, but must be held when */
210 /* clearing pdir to prevent races with allocations. */
211 unsigned int res_bitshift; /* from the RIGHT! */
212 unsigned int res_size; /* size of resource map in bytes */
214 unsigned int node; /* node where this IOC lives */
216 #if DELAYED_RESOURCE_CNT > 0
217 spinlock_t saved_lock; /* may want to try to get this on a separate cacheline */
218 /* than res_lock for bigger systems. */
220 struct sba_dma_pair {
223 } saved[DELAYED_RESOURCE_CNT];
226 #ifdef PDIR_SEARCH_TIMING
227 #define SBA_SEARCH_SAMPLE 0x100
228 unsigned long avg_search[SBA_SEARCH_SAMPLE];
229 unsigned long avg_idx; /* current index into avg_search */
232 /* Stuff we don't need in performance path */
233 struct ioc *next; /* list of IOC's in system */
234 acpi_handle handle; /* for multiple IOC's */
236 unsigned int func_id;
237 unsigned int rev; /* HW revision of chip */
239 unsigned int pdir_size; /* in bytes, determined by IOV Space size */
240 struct pci_dev *sac_only_dev;
243 static struct ioc *ioc_list;
244 static int reserve_sba_gart = 1;
246 static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
247 static SBA_INLINE void sba_free_range(struct ioc *, dma_addr_t, size_t);
249 #define sba_sg_address(sg) sg_virt((sg))
251 #ifdef FULL_VALID_PDIR
252 static u64 prefetch_spill_page;
256 # define GET_IOC(dev) (((dev)->bus == &pci_bus_type) \
257 ? ((struct ioc *) PCI_CONTROLLER(to_pci_dev(dev))->iommu) : NULL)
259 # define GET_IOC(dev) NULL
263 ** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
264 ** (or rather not merge) DMAs into manageable chunks.
265 ** On parisc, this is more of the software/tuning constraint
266 ** rather than the HW. I/O MMU allocation algorithms can be
267 ** faster with smaller sizes (to some degree).
269 #define DMA_CHUNK_SIZE (BITS_PER_LONG*iovp_size)
271 #define ROUNDUP(x,y) ((x + ((y)-1)) & ~((y)-1))
273 /************************************
274 ** SBA register read and write support
276 ** BE WARNED: register writes are posted.
277 ** (ie follow writes which must reach HW with a read)
280 #define READ_REG(addr) __raw_readq(addr)
281 #define WRITE_REG(val, addr) __raw_writeq(val, addr)
283 #ifdef DEBUG_SBA_INIT
286 * sba_dump_tlb - debugging only - print IOMMU operating parameters
287 * @hpa: base address of the IOMMU
289 * Print the size/location of the IO MMU PDIR.
292 sba_dump_tlb(char *hpa)
294 DBG_INIT("IO TLB at 0x%p\n", (void *)hpa);
295 DBG_INIT("IOC_IBASE : %016lx\n", READ_REG(hpa+IOC_IBASE));
296 DBG_INIT("IOC_IMASK : %016lx\n", READ_REG(hpa+IOC_IMASK));
297 DBG_INIT("IOC_TCNFG : %016lx\n", READ_REG(hpa+IOC_TCNFG));
298 DBG_INIT("IOC_PDIR_BASE: %016lx\n", READ_REG(hpa+IOC_PDIR_BASE));
304 #ifdef ASSERT_PDIR_SANITY
307 * sba_dump_pdir_entry - debugging only - print one IOMMU PDIR entry
308 * @ioc: IO MMU structure which owns the pdir we are interested in.
309 * @msg: text to print ont the output line.
312 * Print one entry of the IO MMU PDIR in human readable form.
315 sba_dump_pdir_entry(struct ioc *ioc, char *msg, uint pide)
317 /* start printing from lowest pde in rval */
318 u64 *ptr = &ioc->pdir_base[pide & ~(BITS_PER_LONG - 1)];
319 unsigned long *rptr = (unsigned long *) &ioc->res_map[(pide >>3) & -sizeof(unsigned long)];
322 printk(KERN_DEBUG "SBA: %s rp %p bit %d rval 0x%lx\n",
323 msg, rptr, pide & (BITS_PER_LONG - 1), *rptr);
326 while (rcnt < BITS_PER_LONG) {
327 printk(KERN_DEBUG "%s %2d %p %016Lx\n",
328 (rcnt == (pide & (BITS_PER_LONG - 1)))
330 rcnt, ptr, (unsigned long long) *ptr );
334 printk(KERN_DEBUG "%s", msg);
339 * sba_check_pdir - debugging only - consistency checker
340 * @ioc: IO MMU structure which owns the pdir we are interested in.
341 * @msg: text to print ont the output line.
343 * Verify the resource map and pdir state is consistent
346 sba_check_pdir(struct ioc *ioc, char *msg)
348 u64 *rptr_end = (u64 *) &(ioc->res_map[ioc->res_size]);
349 u64 *rptr = (u64 *) ioc->res_map; /* resource map ptr */
350 u64 *pptr = ioc->pdir_base; /* pdir ptr */
353 while (rptr < rptr_end) {
355 int rcnt; /* number of bits we might check */
361 /* Get last byte and highest bit from that */
362 u32 pde = ((u32)((*pptr >> (63)) & 0x1));
363 if ((rval & 0x1) ^ pde)
366 ** BUMMER! -- res_map != pdir --
367 ** Dump rval and matching pdir entries
369 sba_dump_pdir_entry(ioc, msg, pide);
373 rval >>= 1; /* try the next bit */
377 rptr++; /* look at next word of res_map */
379 /* It'd be nice if we always got here :^) */
385 * sba_dump_sg - debugging only - print Scatter-Gather list
386 * @ioc: IO MMU structure which owns the pdir we are interested in.
387 * @startsg: head of the SG list
388 * @nents: number of entries in SG list
390 * print the SG list so we can verify it's correct by hand.
393 sba_dump_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
395 while (nents-- > 0) {
396 printk(KERN_DEBUG " %d : DMA %08lx/%05x CPU %p\n", nents,
397 startsg->dma_address, startsg->dma_length,
398 sba_sg_address(startsg));
399 startsg = sg_next(startsg);
404 sba_check_sg( struct ioc *ioc, struct scatterlist *startsg, int nents)
406 struct scatterlist *the_sg = startsg;
407 int the_nents = nents;
409 while (the_nents-- > 0) {
410 if (sba_sg_address(the_sg) == 0x0UL)
411 sba_dump_sg(NULL, startsg, nents);
412 the_sg = sg_next(the_sg);
416 #endif /* ASSERT_PDIR_SANITY */
421 /**************************************************************
423 * I/O Pdir Resource Management
425 * Bits set in the resource map are in use.
426 * Each bit can represent a number of pages.
427 * LSbs represent lower addresses (IOVA's).
429 ***************************************************************/
430 #define PAGES_PER_RANGE 1 /* could increase this to 4 or 8 if needed */
432 /* Convert from IOVP to IOVA and vice versa. */
433 #define SBA_IOVA(ioc,iovp,offset) ((ioc->ibase) | (iovp) | (offset))
434 #define SBA_IOVP(ioc,iova) ((iova) & ~(ioc->ibase))
436 #define PDIR_ENTRY_SIZE sizeof(u64)
438 #define PDIR_INDEX(iovp) ((iovp)>>iovp_shift)
440 #define RESMAP_MASK(n) ~(~0UL << (n))
441 #define RESMAP_IDX_MASK (sizeof(unsigned long) - 1)
445 * For most cases the normal get_order is sufficient, however it limits us
446 * to PAGE_SIZE being the minimum mapping alignment and TC flush granularity.
447 * It only incurs about 1 clock cycle to use this one with the static variable
448 * and makes the code more intuitive.
450 static SBA_INLINE int
451 get_iovp_order (unsigned long size)
453 long double d = size - 1;
456 order = ia64_getf_exp(d);
457 order = order - iovp_shift - 0xffff + 1;
464 * sba_search_bitmap - find free space in IO PDIR resource bitmap
465 * @ioc: IO MMU structure which owns the pdir we are interested in.
466 * @bits_wanted: number of entries we need.
467 * @use_hint: use res_hint to indicate where to start looking
469 * Find consecutive free bits in resource bitmap.
470 * Each bit represents one entry in the IO Pdir.
471 * Cool perf optimization: search for log2(size) bits at a time.
473 static SBA_INLINE unsigned long
474 sba_search_bitmap(struct ioc *ioc, unsigned long bits_wanted, int use_hint)
476 unsigned long *res_ptr;
477 unsigned long *res_end = (unsigned long *) &(ioc->res_map[ioc->res_size]);
478 unsigned long flags, pide = ~0UL;
480 ASSERT(((unsigned long) ioc->res_hint & (sizeof(unsigned long) - 1UL)) == 0);
481 ASSERT(res_ptr < res_end);
483 spin_lock_irqsave(&ioc->res_lock, flags);
485 /* Allow caller to force a search through the entire resource space */
486 if (likely(use_hint)) {
487 res_ptr = ioc->res_hint;
489 res_ptr = (ulong *)ioc->res_map;
490 ioc->res_bitshift = 0;
494 * N.B. REO/Grande defect AR2305 can cause TLB fetch timeouts
495 * if a TLB entry is purged while in use. sba_mark_invalid()
496 * purges IOTLB entries in power-of-two sizes, so we also
497 * allocate IOVA space in power-of-two sizes.
499 bits_wanted = 1UL << get_iovp_order(bits_wanted << iovp_shift);
501 if (likely(bits_wanted == 1)) {
502 unsigned int bitshiftcnt;
503 for(; res_ptr < res_end ; res_ptr++) {
504 if (likely(*res_ptr != ~0UL)) {
505 bitshiftcnt = ffz(*res_ptr);
506 *res_ptr |= (1UL << bitshiftcnt);
507 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
508 pide <<= 3; /* convert to bit address */
510 ioc->res_bitshift = bitshiftcnt + bits_wanted;
518 if (likely(bits_wanted <= BITS_PER_LONG/2)) {
520 ** Search the resource bit map on well-aligned values.
521 ** "o" is the alignment.
522 ** We need the alignment to invalidate I/O TLB using
523 ** SBA HW features in the unmap path.
525 unsigned long o = 1 << get_iovp_order(bits_wanted << iovp_shift);
526 uint bitshiftcnt = ROUNDUP(ioc->res_bitshift, o);
527 unsigned long mask, base_mask;
529 base_mask = RESMAP_MASK(bits_wanted);
530 mask = base_mask << bitshiftcnt;
532 DBG_RES("%s() o %ld %p", __FUNCTION__, o, res_ptr);
533 for(; res_ptr < res_end ; res_ptr++)
535 DBG_RES(" %p %lx %lx\n", res_ptr, mask, *res_ptr);
537 for (; mask ; mask <<= o, bitshiftcnt += o) {
538 if(0 == ((*res_ptr) & mask)) {
539 *res_ptr |= mask; /* mark resources busy! */
540 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
541 pide <<= 3; /* convert to bit address */
543 ioc->res_bitshift = bitshiftcnt + bits_wanted;
557 qwords = bits_wanted >> 6; /* /64 */
558 bits = bits_wanted - (qwords * BITS_PER_LONG);
560 end = res_end - qwords;
562 for (; res_ptr < end; res_ptr++) {
563 for (i = 0 ; i < qwords ; i++) {
567 if (bits && res_ptr[i] && (__ffs(res_ptr[i]) < bits))
570 /* Found it, mark it */
571 for (i = 0 ; i < qwords ; i++)
573 res_ptr[i] |= RESMAP_MASK(bits);
575 pide = ((unsigned long)res_ptr - (unsigned long)ioc->res_map);
576 pide <<= 3; /* convert to bit address */
578 ioc->res_bitshift = bits;
586 prefetch(ioc->res_map);
587 ioc->res_hint = (unsigned long *) ioc->res_map;
588 ioc->res_bitshift = 0;
589 spin_unlock_irqrestore(&ioc->res_lock, flags);
593 ioc->res_hint = res_ptr;
594 spin_unlock_irqrestore(&ioc->res_lock, flags);
600 * sba_alloc_range - find free bits and mark them in IO PDIR resource bitmap
601 * @ioc: IO MMU structure which owns the pdir we are interested in.
602 * @size: number of bytes to create a mapping for
604 * Given a size, find consecutive unmarked and then mark those bits in the
608 sba_alloc_range(struct ioc *ioc, size_t size)
610 unsigned int pages_needed = size >> iovp_shift;
611 #ifdef PDIR_SEARCH_TIMING
612 unsigned long itc_start;
616 ASSERT(pages_needed);
617 ASSERT(0 == (size & ~iovp_mask));
619 #ifdef PDIR_SEARCH_TIMING
620 itc_start = ia64_get_itc();
623 ** "seek and ye shall find"...praying never hurts either...
625 pide = sba_search_bitmap(ioc, pages_needed, 1);
626 if (unlikely(pide >= (ioc->res_size << 3))) {
627 pide = sba_search_bitmap(ioc, pages_needed, 0);
628 if (unlikely(pide >= (ioc->res_size << 3))) {
629 #if DELAYED_RESOURCE_CNT > 0
633 ** With delayed resource freeing, we can give this one more shot. We're
634 ** getting close to being in trouble here, so do what we can to make this
637 spin_lock_irqsave(&ioc->saved_lock, flags);
638 if (ioc->saved_cnt > 0) {
639 struct sba_dma_pair *d;
640 int cnt = ioc->saved_cnt;
642 d = &(ioc->saved[ioc->saved_cnt - 1]);
644 spin_lock(&ioc->res_lock);
646 sba_mark_invalid(ioc, d->iova, d->size);
647 sba_free_range(ioc, d->iova, d->size);
651 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
652 spin_unlock(&ioc->res_lock);
654 spin_unlock_irqrestore(&ioc->saved_lock, flags);
656 pide = sba_search_bitmap(ioc, pages_needed, 0);
657 if (unlikely(pide >= (ioc->res_size << 3)))
658 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
661 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n",
667 #ifdef PDIR_SEARCH_TIMING
668 ioc->avg_search[ioc->avg_idx++] = (ia64_get_itc() - itc_start) / pages_needed;
669 ioc->avg_idx &= SBA_SEARCH_SAMPLE - 1;
672 prefetchw(&(ioc->pdir_base[pide]));
674 #ifdef ASSERT_PDIR_SANITY
675 /* verify the first enable bit is clear */
676 if(0x00 != ((u8 *) ioc->pdir_base)[pide*PDIR_ENTRY_SIZE + 7]) {
677 sba_dump_pdir_entry(ioc, "sba_search_bitmap() botched it?", pide);
681 DBG_RES("%s(%x) %d -> %lx hint %x/%x\n",
682 __FUNCTION__, size, pages_needed, pide,
683 (uint) ((unsigned long) ioc->res_hint - (unsigned long) ioc->res_map),
691 * sba_free_range - unmark bits in IO PDIR resource bitmap
692 * @ioc: IO MMU structure which owns the pdir we are interested in.
693 * @iova: IO virtual address which was previously allocated.
694 * @size: number of bytes to create a mapping for
696 * clear bits in the ioc's resource map
698 static SBA_INLINE void
699 sba_free_range(struct ioc *ioc, dma_addr_t iova, size_t size)
701 unsigned long iovp = SBA_IOVP(ioc, iova);
702 unsigned int pide = PDIR_INDEX(iovp);
703 unsigned int ridx = pide >> 3; /* convert bit to byte address */
704 unsigned long *res_ptr = (unsigned long *) &((ioc)->res_map[ridx & ~RESMAP_IDX_MASK]);
705 int bits_not_wanted = size >> iovp_shift;
708 /* Round up to power-of-two size: see AR2305 note above */
709 bits_not_wanted = 1UL << get_iovp_order(bits_not_wanted << iovp_shift);
710 for (; bits_not_wanted > 0 ; res_ptr++) {
712 if (unlikely(bits_not_wanted > BITS_PER_LONG)) {
714 /* these mappings start 64bit aligned */
716 bits_not_wanted -= BITS_PER_LONG;
717 pide += BITS_PER_LONG;
721 /* 3-bits "bit" address plus 2 (or 3) bits for "byte" == bit in word */
722 m = RESMAP_MASK(bits_not_wanted) << (pide & (BITS_PER_LONG - 1));
725 DBG_RES("%s( ,%x,%x) %x/%lx %x %p %lx\n", __FUNCTION__, (uint) iova, size,
726 bits_not_wanted, m, pide, res_ptr, *res_ptr);
729 ASSERT(bits_not_wanted);
730 ASSERT((*res_ptr & m) == m); /* verify same bits are set */
737 /**************************************************************
739 * "Dynamic DMA Mapping" support (aka "Coherent I/O")
741 ***************************************************************/
744 * sba_io_pdir_entry - fill in one IO PDIR entry
745 * @pdir_ptr: pointer to IO PDIR entry
746 * @vba: Virtual CPU address of buffer to map
748 * SBA Mapping Routine
750 * Given a virtual address (vba, arg1) sba_io_pdir_entry()
751 * loads the I/O PDIR entry pointed to by pdir_ptr (arg0).
752 * Each IO Pdir entry consists of 8 bytes as shown below
756 * +-+---------------------+----------------------------------+----+--------+
757 * |V| U | PPN[39:12] | U | FF |
758 * +-+---------------------+----------------------------------+----+--------+
762 * PPN == Physical Page Number
764 * The physical address fields are filled with the results of virt_to_phys()
769 #define sba_io_pdir_entry(pdir_ptr, vba) *pdir_ptr = ((vba & ~0xE000000000000FFFULL) \
770 | 0x8000000000000000ULL)
773 sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
775 *pdir_ptr = ((vba & ~0xE000000000000FFFULL) | 0x80000000000000FFULL);
779 #ifdef ENABLE_MARK_CLEAN
781 * Since DMA is i-cache coherent, any (complete) pages that were written via
782 * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
783 * flush them when they get mapped into an executable vm-area.
786 mark_clean (void *addr, size_t size)
788 unsigned long pg_addr, end;
790 pg_addr = PAGE_ALIGN((unsigned long) addr);
791 end = (unsigned long) addr + size;
792 while (pg_addr + PAGE_SIZE <= end) {
793 struct page *page = virt_to_page((void *)pg_addr);
794 set_bit(PG_arch_1, &page->flags);
795 pg_addr += PAGE_SIZE;
801 * sba_mark_invalid - invalidate one or more IO PDIR entries
802 * @ioc: IO MMU structure which owns the pdir we are interested in.
803 * @iova: IO Virtual Address mapped earlier
804 * @byte_cnt: number of bytes this mapping covers.
806 * Marking the IO PDIR entry(ies) as Invalid and invalidate
807 * corresponding IO TLB entry. The PCOM (Purge Command Register)
808 * is to purge stale entries in the IO TLB when unmapping entries.
810 * The PCOM register supports purging of multiple pages, with a minium
811 * of 1 page and a maximum of 2GB. Hardware requires the address be
812 * aligned to the size of the range being purged. The size of the range
813 * must be a power of 2. The "Cool perf optimization" in the
814 * allocation routine helps keep that true.
816 static SBA_INLINE void
817 sba_mark_invalid(struct ioc *ioc, dma_addr_t iova, size_t byte_cnt)
819 u32 iovp = (u32) SBA_IOVP(ioc,iova);
821 int off = PDIR_INDEX(iovp);
823 /* Must be non-zero and rounded up */
824 ASSERT(byte_cnt > 0);
825 ASSERT(0 == (byte_cnt & ~iovp_mask));
827 #ifdef ASSERT_PDIR_SANITY
828 /* Assert first pdir entry is set */
829 if (!(ioc->pdir_base[off] >> 60)) {
830 sba_dump_pdir_entry(ioc,"sba_mark_invalid()", PDIR_INDEX(iovp));
834 if (byte_cnt <= iovp_size)
836 ASSERT(off < ioc->pdir_size);
838 iovp |= iovp_shift; /* set "size" field for PCOM */
840 #ifndef FULL_VALID_PDIR
842 ** clear I/O PDIR entry "valid" bit
843 ** Do NOT clear the rest - save it for debugging.
844 ** We should only clear bits that have previously
847 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
850 ** If we want to maintain the PDIR as valid, put in
851 ** the spill page so devices prefetching won't
852 ** cause a hard fail.
854 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
857 u32 t = get_iovp_order(byte_cnt) + iovp_shift;
860 ASSERT(t <= 31); /* 2GB! Max value of "size" field */
863 /* verify this pdir entry is enabled */
864 ASSERT(ioc->pdir_base[off] >> 63);
865 #ifndef FULL_VALID_PDIR
866 /* clear I/O Pdir entry "valid" bit first */
867 ioc->pdir_base[off] &= ~(0x80000000000000FFULL);
869 ioc->pdir_base[off] = (0x80000000000000FFULL | prefetch_spill_page);
872 byte_cnt -= iovp_size;
873 } while (byte_cnt > 0);
876 WRITE_REG(iovp | ioc->ibase, ioc->ioc_hpa+IOC_PCOM);
880 * sba_map_single - map one buffer and return IOVA for DMA
881 * @dev: instance of PCI owned by the driver that's asking.
882 * @addr: driver buffer to map.
883 * @size: number of bytes to map in driver buffer.
886 * See Documentation/DMA-mapping.txt
889 sba_map_single(struct device *dev, void *addr, size_t size, int dir)
896 #ifdef ASSERT_PDIR_SANITY
899 #ifdef ALLOW_IOV_BYPASS
900 unsigned long pci_addr = virt_to_phys(addr);
903 #ifdef ALLOW_IOV_BYPASS
904 ASSERT(to_pci_dev(dev)->dma_mask);
906 ** Check if the PCI device can DMA to ptr... if so, just return ptr
908 if (likely((pci_addr & ~to_pci_dev(dev)->dma_mask) == 0)) {
910 ** Device is bit capable of DMA'ing to the buffer...
911 ** just return the PCI address of ptr
913 DBG_BYPASS("sba_map_single() bypass mask/addr: 0x%lx/0x%lx\n",
914 to_pci_dev(dev)->dma_mask, pci_addr);
921 prefetch(ioc->res_hint);
924 ASSERT(size <= DMA_CHUNK_SIZE);
926 /* save offset bits */
927 offset = ((dma_addr_t) (long) addr) & ~iovp_mask;
929 /* round up to nearest iovp_size */
930 size = (size + offset + ~iovp_mask) & iovp_mask;
932 #ifdef ASSERT_PDIR_SANITY
933 spin_lock_irqsave(&ioc->res_lock, flags);
934 if (sba_check_pdir(ioc,"Check before sba_map_single()"))
935 panic("Sanity check failed");
936 spin_unlock_irqrestore(&ioc->res_lock, flags);
939 pide = sba_alloc_range(ioc, size);
941 iovp = (dma_addr_t) pide << iovp_shift;
943 DBG_RUN("%s() 0x%p -> 0x%lx\n",
944 __FUNCTION__, addr, (long) iovp | offset);
946 pdir_start = &(ioc->pdir_base[pide]);
949 ASSERT(((u8 *)pdir_start)[7] == 0); /* verify availability */
950 sba_io_pdir_entry(pdir_start, (unsigned long) addr);
952 DBG_RUN(" pdir 0x%p %lx\n", pdir_start, *pdir_start);
958 /* force pdir update */
961 /* form complete address */
962 #ifdef ASSERT_PDIR_SANITY
963 spin_lock_irqsave(&ioc->res_lock, flags);
964 sba_check_pdir(ioc,"Check after sba_map_single()");
965 spin_unlock_irqrestore(&ioc->res_lock, flags);
967 return SBA_IOVA(ioc, iovp, offset);
970 #ifdef ENABLE_MARK_CLEAN
971 static SBA_INLINE void
972 sba_mark_clean(struct ioc *ioc, dma_addr_t iova, size_t size)
974 u32 iovp = (u32) SBA_IOVP(ioc,iova);
975 int off = PDIR_INDEX(iovp);
978 if (size <= iovp_size) {
979 addr = phys_to_virt(ioc->pdir_base[off] &
980 ~0xE000000000000FFFULL);
981 mark_clean(addr, size);
984 addr = phys_to_virt(ioc->pdir_base[off] &
985 ~0xE000000000000FFFULL);
986 mark_clean(addr, min(size, iovp_size));
995 * sba_unmap_single - unmap one IOVA and free resources
996 * @dev: instance of PCI owned by the driver that's asking.
997 * @iova: IOVA of driver buffer previously mapped.
998 * @size: number of bytes mapped in driver buffer.
1001 * See Documentation/DMA-mapping.txt
1003 void sba_unmap_single(struct device *dev, dma_addr_t iova, size_t size, int dir)
1006 #if DELAYED_RESOURCE_CNT > 0
1007 struct sba_dma_pair *d;
1009 unsigned long flags;
1015 #ifdef ALLOW_IOV_BYPASS
1016 if (likely((iova & ioc->imask) != ioc->ibase)) {
1018 ** Address does not fall w/in IOVA, must be bypassing
1020 DBG_BYPASS("sba_unmap_single() bypass addr: 0x%lx\n", iova);
1022 #ifdef ENABLE_MARK_CLEAN
1023 if (dir == DMA_FROM_DEVICE) {
1024 mark_clean(phys_to_virt(iova), size);
1030 offset = iova & ~iovp_mask;
1032 DBG_RUN("%s() iovp 0x%lx/%x\n",
1033 __FUNCTION__, (long) iova, size);
1035 iova ^= offset; /* clear offset bits */
1037 size = ROUNDUP(size, iovp_size);
1039 #ifdef ENABLE_MARK_CLEAN
1040 if (dir == DMA_FROM_DEVICE)
1041 sba_mark_clean(ioc, iova, size);
1044 #if DELAYED_RESOURCE_CNT > 0
1045 spin_lock_irqsave(&ioc->saved_lock, flags);
1046 d = &(ioc->saved[ioc->saved_cnt]);
1049 if (unlikely(++(ioc->saved_cnt) >= DELAYED_RESOURCE_CNT)) {
1050 int cnt = ioc->saved_cnt;
1051 spin_lock(&ioc->res_lock);
1053 sba_mark_invalid(ioc, d->iova, d->size);
1054 sba_free_range(ioc, d->iova, d->size);
1058 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1059 spin_unlock(&ioc->res_lock);
1061 spin_unlock_irqrestore(&ioc->saved_lock, flags);
1062 #else /* DELAYED_RESOURCE_CNT == 0 */
1063 spin_lock_irqsave(&ioc->res_lock, flags);
1064 sba_mark_invalid(ioc, iova, size);
1065 sba_free_range(ioc, iova, size);
1066 READ_REG(ioc->ioc_hpa+IOC_PCOM); /* flush purges */
1067 spin_unlock_irqrestore(&ioc->res_lock, flags);
1068 #endif /* DELAYED_RESOURCE_CNT == 0 */
1073 * sba_alloc_coherent - allocate/map shared mem for DMA
1074 * @dev: instance of PCI owned by the driver that's asking.
1075 * @size: number of bytes mapped in driver buffer.
1076 * @dma_handle: IOVA of new buffer.
1078 * See Documentation/DMA-mapping.txt
1081 sba_alloc_coherent (struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t flags)
1092 page = alloc_pages_node(ioc->node == MAX_NUMNODES ?
1093 numa_node_id() : ioc->node, flags,
1096 if (unlikely(!page))
1099 addr = page_address(page);
1102 addr = (void *) __get_free_pages(flags, get_order(size));
1104 if (unlikely(!addr))
1107 memset(addr, 0, size);
1108 *dma_handle = virt_to_phys(addr);
1110 #ifdef ALLOW_IOV_BYPASS
1111 ASSERT(dev->coherent_dma_mask);
1113 ** Check if the PCI device can DMA to ptr... if so, just return ptr
1115 if (likely((*dma_handle & ~dev->coherent_dma_mask) == 0)) {
1116 DBG_BYPASS("sba_alloc_coherent() bypass mask/addr: 0x%lx/0x%lx\n",
1117 dev->coherent_dma_mask, *dma_handle);
1124 * If device can't bypass or bypass is disabled, pass the 32bit fake
1125 * device to map single to get an iova mapping.
1127 *dma_handle = sba_map_single(&ioc->sac_only_dev->dev, addr, size, 0);
1134 * sba_free_coherent - free/unmap shared mem for DMA
1135 * @dev: instance of PCI owned by the driver that's asking.
1136 * @size: number of bytes mapped in driver buffer.
1137 * @vaddr: virtual address IOVA of "consistent" buffer.
1138 * @dma_handler: IO virtual address of "consistent" buffer.
1140 * See Documentation/DMA-mapping.txt
1142 void sba_free_coherent (struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle)
1144 sba_unmap_single(dev, dma_handle, size, 0);
1145 free_pages((unsigned long) vaddr, get_order(size));
1150 ** Since 0 is a valid pdir_base index value, can't use that
1151 ** to determine if a value is valid or not. Use a flag to indicate
1152 ** the SG list entry contains a valid pdir index.
1154 #define PIDE_FLAG 0x1UL
1156 #ifdef DEBUG_LARGE_SG_ENTRIES
1157 int dump_run_sg = 0;
1162 * sba_fill_pdir - write allocated SG entries into IO PDIR
1163 * @ioc: IO MMU structure which owns the pdir we are interested in.
1164 * @startsg: list of IOVA/size pairs
1165 * @nents: number of entries in startsg list
1167 * Take preprocessed SG list and write corresponding entries
1171 static SBA_INLINE int
1174 struct scatterlist *startsg,
1177 struct scatterlist *dma_sg = startsg; /* pointer to current DMA */
1180 unsigned long dma_offset = 0;
1182 while (nents-- > 0) {
1183 int cnt = startsg->dma_length;
1184 startsg->dma_length = 0;
1186 #ifdef DEBUG_LARGE_SG_ENTRIES
1188 printk(" %2d : %08lx/%05x %p\n",
1189 nents, startsg->dma_address, cnt,
1190 sba_sg_address(startsg));
1192 DBG_RUN_SG(" %d : %08lx/%05x %p\n",
1193 nents, startsg->dma_address, cnt,
1194 sba_sg_address(startsg));
1197 ** Look for the start of a new DMA stream
1199 if (startsg->dma_address & PIDE_FLAG) {
1200 u32 pide = startsg->dma_address & ~PIDE_FLAG;
1201 dma_offset = (unsigned long) pide & ~iovp_mask;
1202 startsg->dma_address = 0;
1204 dma_sg = sg_next(dma_sg);
1205 dma_sg->dma_address = pide | ioc->ibase;
1206 pdirp = &(ioc->pdir_base[pide >> iovp_shift]);
1211 ** Look for a VCONTIG chunk
1214 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1217 /* Since multiple Vcontig blocks could make up
1218 ** one DMA stream, *add* cnt to dma_len.
1220 dma_sg->dma_length += cnt;
1222 dma_offset=0; /* only want offset on first chunk */
1223 cnt = ROUNDUP(cnt, iovp_size);
1225 sba_io_pdir_entry(pdirp, vaddr);
1231 startsg = sg_next(startsg);
1233 /* force pdir update */
1236 #ifdef DEBUG_LARGE_SG_ENTRIES
1244 ** Two address ranges are DMA contiguous *iff* "end of prev" and
1245 ** "start of next" are both on an IOV page boundary.
1247 ** (shift left is a quick trick to mask off upper bits)
1249 #define DMA_CONTIG(__X, __Y) \
1250 (((((unsigned long) __X) | ((unsigned long) __Y)) << (BITS_PER_LONG - iovp_shift)) == 0UL)
1254 * sba_coalesce_chunks - preprocess the SG list
1255 * @ioc: IO MMU structure which owns the pdir we are interested in.
1256 * @startsg: list of IOVA/size pairs
1257 * @nents: number of entries in startsg list
1259 * First pass is to walk the SG list and determine where the breaks are
1260 * in the DMA stream. Allocates PDIR entries but does not fill them.
1261 * Returns the number of DMA chunks.
1263 * Doing the fill separate from the coalescing/allocation keeps the
1264 * code simpler. Future enhancement could make one pass through
1265 * the sglist do both.
1267 static SBA_INLINE int
1268 sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1269 struct scatterlist *startsg,
1272 struct scatterlist *vcontig_sg; /* VCONTIG chunk head */
1273 unsigned long vcontig_len; /* len of VCONTIG chunk */
1274 unsigned long vcontig_end;
1275 struct scatterlist *dma_sg; /* next DMA stream head */
1276 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1278 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1281 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
1284 ** Prepare for first/next DMA stream
1286 dma_sg = vcontig_sg = startsg;
1287 dma_len = vcontig_len = vcontig_end = startsg->length;
1288 vcontig_end += vaddr;
1289 dma_offset = vaddr & ~iovp_mask;
1291 /* PARANOID: clear entries */
1292 startsg->dma_address = startsg->dma_length = 0;
1295 ** This loop terminates one iteration "early" since
1296 ** it's always looking one "ahead".
1298 while (--nents > 0) {
1299 unsigned long vaddr; /* tmp */
1301 startsg = sg_next(startsg);
1304 startsg->dma_address = startsg->dma_length = 0;
1306 /* catch brokenness in SCSI layer */
1307 ASSERT(startsg->length <= DMA_CHUNK_SIZE);
1310 ** First make sure current dma stream won't
1311 ** exceed DMA_CHUNK_SIZE if we coalesce the
1314 if (((dma_len + dma_offset + startsg->length + ~iovp_mask) & iovp_mask)
1318 if (dma_len + startsg->length > max_seg_size)
1322 ** Then look for virtually contiguous blocks.
1324 ** append the next transaction?
1326 vaddr = (unsigned long) sba_sg_address(startsg);
1327 if (vcontig_end == vaddr)
1329 vcontig_len += startsg->length;
1330 vcontig_end += startsg->length;
1331 dma_len += startsg->length;
1335 #ifdef DEBUG_LARGE_SG_ENTRIES
1336 dump_run_sg = (vcontig_len > iovp_size);
1340 ** Not virtually contigous.
1341 ** Terminate prev chunk.
1342 ** Start a new chunk.
1344 ** Once we start a new VCONTIG chunk, dma_offset
1345 ** can't change. And we need the offset from the first
1346 ** chunk - not the last one. Ergo Successive chunks
1347 ** must start on page boundaries and dove tail
1348 ** with it's predecessor.
1350 vcontig_sg->dma_length = vcontig_len;
1352 vcontig_sg = startsg;
1353 vcontig_len = startsg->length;
1356 ** 3) do the entries end/start on page boundaries?
1357 ** Don't update vcontig_end until we've checked.
1359 if (DMA_CONTIG(vcontig_end, vaddr))
1361 vcontig_end = vcontig_len + vaddr;
1362 dma_len += vcontig_len;
1370 ** End of DMA Stream
1371 ** Terminate last VCONTIG block.
1372 ** Allocate space for DMA stream.
1374 vcontig_sg->dma_length = vcontig_len;
1375 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1376 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1377 dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG
1378 | (sba_alloc_range(ioc, dma_len) << iovp_shift)
1388 * sba_map_sg - map Scatter/Gather list
1389 * @dev: instance of PCI owned by the driver that's asking.
1390 * @sglist: array of buffer/length pairs
1391 * @nents: number of entries in list
1392 * @dir: R/W or both.
1394 * See Documentation/DMA-mapping.txt
1396 int sba_map_sg(struct device *dev, struct scatterlist *sglist, int nents, int dir)
1399 int coalesced, filled = 0;
1400 #ifdef ASSERT_PDIR_SANITY
1401 unsigned long flags;
1403 #ifdef ALLOW_IOV_BYPASS_SG
1404 struct scatterlist *sg;
1407 DBG_RUN_SG("%s() START %d entries\n", __FUNCTION__, nents);
1411 #ifdef ALLOW_IOV_BYPASS_SG
1412 ASSERT(to_pci_dev(dev)->dma_mask);
1413 if (likely((ioc->dma_mask & ~to_pci_dev(dev)->dma_mask) == 0)) {
1414 for_each_sg(sglist, sg, nents, filled) {
1415 sg->dma_length = sg->length;
1416 sg->dma_address = virt_to_phys(sba_sg_address(sg));
1421 /* Fast path single entry scatterlists. */
1423 sglist->dma_length = sglist->length;
1424 sglist->dma_address = sba_map_single(dev, sba_sg_address(sglist), sglist->length, dir);
1428 #ifdef ASSERT_PDIR_SANITY
1429 spin_lock_irqsave(&ioc->res_lock, flags);
1430 if (sba_check_pdir(ioc,"Check before sba_map_sg()"))
1432 sba_dump_sg(ioc, sglist, nents);
1433 panic("Check before sba_map_sg()");
1435 spin_unlock_irqrestore(&ioc->res_lock, flags);
1438 prefetch(ioc->res_hint);
1441 ** First coalesce the chunks and allocate I/O pdir space
1443 ** If this is one DMA stream, we can properly map using the
1444 ** correct virtual address associated with each DMA page.
1445 ** w/o this association, we wouldn't have coherent DMA!
1446 ** Access to the virtual address is what forces a two pass algorithm.
1448 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1451 ** Program the I/O Pdir
1453 ** map the virtual addresses to the I/O Pdir
1454 ** o dma_address will contain the pdir index
1455 ** o dma_len will contain the number of bytes to map
1456 ** o address contains the virtual address.
1458 filled = sba_fill_pdir(ioc, sglist, nents);
1460 #ifdef ASSERT_PDIR_SANITY
1461 spin_lock_irqsave(&ioc->res_lock, flags);
1462 if (sba_check_pdir(ioc,"Check after sba_map_sg()"))
1464 sba_dump_sg(ioc, sglist, nents);
1465 panic("Check after sba_map_sg()\n");
1467 spin_unlock_irqrestore(&ioc->res_lock, flags);
1470 ASSERT(coalesced == filled);
1471 DBG_RUN_SG("%s() DONE %d mappings\n", __FUNCTION__, filled);
1478 * sba_unmap_sg - unmap Scatter/Gather list
1479 * @dev: instance of PCI owned by the driver that's asking.
1480 * @sglist: array of buffer/length pairs
1481 * @nents: number of entries in list
1482 * @dir: R/W or both.
1484 * See Documentation/DMA-mapping.txt
1486 void sba_unmap_sg (struct device *dev, struct scatterlist *sglist, int nents, int dir)
1488 #ifdef ASSERT_PDIR_SANITY
1490 unsigned long flags;
1493 DBG_RUN_SG("%s() START %d entries, %p,%x\n",
1494 __FUNCTION__, nents, sba_sg_address(sglist), sglist->length);
1496 #ifdef ASSERT_PDIR_SANITY
1500 spin_lock_irqsave(&ioc->res_lock, flags);
1501 sba_check_pdir(ioc,"Check before sba_unmap_sg()");
1502 spin_unlock_irqrestore(&ioc->res_lock, flags);
1505 while (nents && sglist->dma_length) {
1507 sba_unmap_single(dev, sglist->dma_address, sglist->dma_length, dir);
1508 sglist = sg_next(sglist);
1512 DBG_RUN_SG("%s() DONE (nents %d)\n", __FUNCTION__, nents);
1514 #ifdef ASSERT_PDIR_SANITY
1515 spin_lock_irqsave(&ioc->res_lock, flags);
1516 sba_check_pdir(ioc,"Check after sba_unmap_sg()");
1517 spin_unlock_irqrestore(&ioc->res_lock, flags);
1522 /**************************************************************
1524 * Initialization and claim
1526 ***************************************************************/
1529 ioc_iova_init(struct ioc *ioc)
1533 struct pci_dev *device = NULL;
1534 #ifdef FULL_VALID_PDIR
1535 unsigned long index;
1539 ** Firmware programs the base and size of a "safe IOVA space"
1540 ** (one that doesn't overlap memory or LMMIO space) in the
1541 ** IBASE and IMASK registers.
1543 ioc->ibase = READ_REG(ioc->ioc_hpa + IOC_IBASE) & ~0x1UL;
1544 ioc->imask = READ_REG(ioc->ioc_hpa + IOC_IMASK) | 0xFFFFFFFF00000000UL;
1546 ioc->iov_size = ~ioc->imask + 1;
1548 DBG_INIT("%s() hpa %p IOV base 0x%lx mask 0x%lx (%dMB)\n",
1549 __FUNCTION__, ioc->ioc_hpa, ioc->ibase, ioc->imask,
1550 ioc->iov_size >> 20);
1552 switch (iovp_size) {
1553 case 4*1024: tcnfg = 0; break;
1554 case 8*1024: tcnfg = 1; break;
1555 case 16*1024: tcnfg = 2; break;
1556 case 64*1024: tcnfg = 3; break;
1558 panic(PFX "Unsupported IOTLB page size %ldK",
1562 WRITE_REG(tcnfg, ioc->ioc_hpa + IOC_TCNFG);
1564 ioc->pdir_size = (ioc->iov_size / iovp_size) * PDIR_ENTRY_SIZE;
1565 ioc->pdir_base = (void *) __get_free_pages(GFP_KERNEL,
1566 get_order(ioc->pdir_size));
1567 if (!ioc->pdir_base)
1568 panic(PFX "Couldn't allocate I/O Page Table\n");
1570 memset(ioc->pdir_base, 0, ioc->pdir_size);
1572 DBG_INIT("%s() IOV page size %ldK pdir %p size %x\n", __FUNCTION__,
1573 iovp_size >> 10, ioc->pdir_base, ioc->pdir_size);
1575 ASSERT(ALIGN((unsigned long) ioc->pdir_base, 4*1024) == (unsigned long) ioc->pdir_base);
1576 WRITE_REG(virt_to_phys(ioc->pdir_base), ioc->ioc_hpa + IOC_PDIR_BASE);
1579 ** If an AGP device is present, only use half of the IOV space
1580 ** for PCI DMA. Unfortunately we can't know ahead of time
1581 ** whether GART support will actually be used, for now we
1582 ** can just key on an AGP device found in the system.
1583 ** We program the next pdir index after we stop w/ a key for
1584 ** the GART code to handshake on.
1586 for_each_pci_dev(device)
1587 agp_found |= pci_find_capability(device, PCI_CAP_ID_AGP);
1589 if (agp_found && reserve_sba_gart) {
1590 printk(KERN_INFO PFX "reserving %dMb of IOVA space at 0x%lx for agpgart\n",
1591 ioc->iov_size/2 >> 20, ioc->ibase + ioc->iov_size/2);
1592 ioc->pdir_size /= 2;
1593 ((u64 *)ioc->pdir_base)[PDIR_INDEX(ioc->iov_size/2)] = ZX1_SBA_IOMMU_COOKIE;
1595 #ifdef FULL_VALID_PDIR
1597 ** Check to see if the spill page has been allocated, we don't need more than
1598 ** one across multiple SBAs.
1600 if (!prefetch_spill_page) {
1601 char *spill_poison = "SBAIOMMU POISON";
1602 int poison_size = 16;
1603 void *poison_addr, *addr;
1605 addr = (void *)__get_free_pages(GFP_KERNEL, get_order(iovp_size));
1607 panic(PFX "Couldn't allocate PDIR spill page\n");
1610 for ( ; (u64) poison_addr < addr + iovp_size; poison_addr += poison_size)
1611 memcpy(poison_addr, spill_poison, poison_size);
1613 prefetch_spill_page = virt_to_phys(addr);
1615 DBG_INIT("%s() prefetch spill addr: 0x%lx\n", __FUNCTION__, prefetch_spill_page);
1618 ** Set all the PDIR entries valid w/ the spill page as the target
1620 for (index = 0 ; index < (ioc->pdir_size / PDIR_ENTRY_SIZE) ; index++)
1621 ((u64 *)ioc->pdir_base)[index] = (0x80000000000000FF | prefetch_spill_page);
1624 /* Clear I/O TLB of any possible entries */
1625 WRITE_REG(ioc->ibase | (get_iovp_order(ioc->iov_size) + iovp_shift), ioc->ioc_hpa + IOC_PCOM);
1626 READ_REG(ioc->ioc_hpa + IOC_PCOM);
1628 /* Enable IOVA translation */
1629 WRITE_REG(ioc->ibase | 1, ioc->ioc_hpa + IOC_IBASE);
1630 READ_REG(ioc->ioc_hpa + IOC_IBASE);
1634 ioc_resource_init(struct ioc *ioc)
1636 spin_lock_init(&ioc->res_lock);
1637 #if DELAYED_RESOURCE_CNT > 0
1638 spin_lock_init(&ioc->saved_lock);
1641 /* resource map size dictated by pdir_size */
1642 ioc->res_size = ioc->pdir_size / PDIR_ENTRY_SIZE; /* entries */
1643 ioc->res_size >>= 3; /* convert bit count to byte count */
1644 DBG_INIT("%s() res_size 0x%x\n", __FUNCTION__, ioc->res_size);
1646 ioc->res_map = (char *) __get_free_pages(GFP_KERNEL,
1647 get_order(ioc->res_size));
1649 panic(PFX "Couldn't allocate resource map\n");
1651 memset(ioc->res_map, 0, ioc->res_size);
1652 /* next available IOVP - circular search */
1653 ioc->res_hint = (unsigned long *) ioc->res_map;
1655 #ifdef ASSERT_PDIR_SANITY
1656 /* Mark first bit busy - ie no IOVA 0 */
1657 ioc->res_map[0] = 0x1;
1658 ioc->pdir_base[0] = 0x8000000000000000ULL | ZX1_SBA_IOMMU_COOKIE;
1660 #ifdef FULL_VALID_PDIR
1661 /* Mark the last resource used so we don't prefetch beyond IOVA space */
1662 ioc->res_map[ioc->res_size - 1] |= 0x80UL; /* res_map is chars */
1663 ioc->pdir_base[(ioc->pdir_size / PDIR_ENTRY_SIZE) - 1] = (0x80000000000000FF
1664 | prefetch_spill_page);
1667 DBG_INIT("%s() res_map %x %p\n", __FUNCTION__,
1668 ioc->res_size, (void *) ioc->res_map);
1672 ioc_sac_init(struct ioc *ioc)
1674 struct pci_dev *sac = NULL;
1675 struct pci_controller *controller = NULL;
1678 * pci_alloc_coherent() must return a DMA address which is
1679 * SAC (single address cycle) addressable, so allocate a
1680 * pseudo-device to enforce that.
1682 sac = kzalloc(sizeof(*sac), GFP_KERNEL);
1684 panic(PFX "Couldn't allocate struct pci_dev");
1686 controller = kzalloc(sizeof(*controller), GFP_KERNEL);
1688 panic(PFX "Couldn't allocate struct pci_controller");
1690 controller->iommu = ioc;
1691 sac->sysdata = controller;
1692 sac->dma_mask = 0xFFFFFFFFUL;
1694 sac->dev.bus = &pci_bus_type;
1696 ioc->sac_only_dev = sac;
1700 ioc_zx1_init(struct ioc *ioc)
1702 unsigned long rope_config;
1705 if (ioc->rev < 0x20)
1706 panic(PFX "IOC 2.0 or later required for IOMMU support\n");
1708 /* 38 bit memory controller + extra bit for range displaced by MMIO */
1709 ioc->dma_mask = (0x1UL << 39) - 1;
1712 ** Clear ROPE(N)_CONFIG AO bit.
1713 ** Disables "NT Ordering" (~= !"Relaxed Ordering")
1714 ** Overrides bit 1 in DMA Hint Sets.
1715 ** Improves netperf UDP_STREAM by ~10% for tg3 on bcm5701.
1717 for (i=0; i<(8*8); i+=8) {
1718 rope_config = READ_REG(ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1719 rope_config &= ~IOC_ROPE_AO;
1720 WRITE_REG(rope_config, ioc->ioc_hpa + IOC_ROPE0_CFG + i);
1724 typedef void (initfunc)(struct ioc *);
1732 static struct ioc_iommu ioc_iommu_info[] __initdata = {
1733 { ZX1_IOC_ID, "zx1", ioc_zx1_init },
1734 { ZX2_IOC_ID, "zx2", NULL },
1735 { SX1000_IOC_ID, "sx1000", NULL },
1736 { SX2000_IOC_ID, "sx2000", NULL },
1739 static struct ioc * __init
1740 ioc_init(u64 hpa, void *handle)
1743 struct ioc_iommu *info;
1745 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
1749 ioc->next = ioc_list;
1752 ioc->handle = handle;
1753 ioc->ioc_hpa = ioremap(hpa, 0x1000);
1755 ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
1756 ioc->rev = READ_REG(ioc->ioc_hpa + IOC_FCLASS) & 0xFFUL;
1757 ioc->dma_mask = 0xFFFFFFFFFFFFFFFFUL; /* conservative */
1759 for (info = ioc_iommu_info; info < ioc_iommu_info + ARRAY_SIZE(ioc_iommu_info); info++) {
1760 if (ioc->func_id == info->func_id) {
1761 ioc->name = info->name;
1767 iovp_size = (1 << iovp_shift);
1768 iovp_mask = ~(iovp_size - 1);
1770 DBG_INIT("%s: PAGE_SIZE %ldK, iovp_size %ldK\n", __FUNCTION__,
1771 PAGE_SIZE >> 10, iovp_size >> 10);
1774 ioc->name = kmalloc(24, GFP_KERNEL);
1776 sprintf((char *) ioc->name, "Unknown (%04x:%04x)",
1777 ioc->func_id & 0xFFFF, (ioc->func_id >> 16) & 0xFFFF);
1779 ioc->name = "Unknown";
1783 ioc_resource_init(ioc);
1786 if ((long) ~iovp_mask > (long) ia64_max_iommu_merge_mask)
1787 ia64_max_iommu_merge_mask = ~iovp_mask;
1789 printk(KERN_INFO PFX
1790 "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
1791 ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
1792 hpa, ioc->iov_size >> 20, ioc->ibase);
1799 /**************************************************************************
1801 ** SBA initialization code (HW and SW)
1803 ** o identify SBA chip itself
1804 ** o FIXME: initialize DMA hints for reasonable defaults
1806 **************************************************************************/
1808 #ifdef CONFIG_PROC_FS
1810 ioc_start(struct seq_file *s, loff_t *pos)
1815 for (ioc = ioc_list; ioc; ioc = ioc->next)
1823 ioc_next(struct seq_file *s, void *v, loff_t *pos)
1825 struct ioc *ioc = v;
1832 ioc_stop(struct seq_file *s, void *v)
1837 ioc_show(struct seq_file *s, void *v)
1839 struct ioc *ioc = v;
1840 unsigned long *res_ptr = (unsigned long *)ioc->res_map;
1843 seq_printf(s, "Hewlett Packard %s IOC rev %d.%d\n",
1844 ioc->name, ((ioc->rev >> 4) & 0xF), (ioc->rev & 0xF));
1846 if (ioc->node != MAX_NUMNODES)
1847 seq_printf(s, "NUMA node : %d\n", ioc->node);
1849 seq_printf(s, "IOVA size : %ld MB\n", ((ioc->pdir_size >> 3) * iovp_size)/(1024*1024));
1850 seq_printf(s, "IOVA page size : %ld kb\n", iovp_size/1024);
1852 for (i = 0; i < (ioc->res_size / sizeof(unsigned long)); ++i, ++res_ptr)
1853 used += hweight64(*res_ptr);
1855 seq_printf(s, "PDIR size : %d entries\n", ioc->pdir_size >> 3);
1856 seq_printf(s, "PDIR used : %d entries\n", used);
1858 #ifdef PDIR_SEARCH_TIMING
1860 unsigned long i = 0, avg = 0, min, max;
1861 min = max = ioc->avg_search[0];
1862 for (i = 0; i < SBA_SEARCH_SAMPLE; i++) {
1863 avg += ioc->avg_search[i];
1864 if (ioc->avg_search[i] > max) max = ioc->avg_search[i];
1865 if (ioc->avg_search[i] < min) min = ioc->avg_search[i];
1867 avg /= SBA_SEARCH_SAMPLE;
1868 seq_printf(s, "Bitmap search : %ld/%ld/%ld (min/avg/max CPU Cycles/IOVA page)\n",
1872 #ifndef ALLOW_IOV_BYPASS
1873 seq_printf(s, "IOVA bypass disabled\n");
1878 static const struct seq_operations ioc_seq_ops = {
1886 ioc_open(struct inode *inode, struct file *file)
1888 return seq_open(file, &ioc_seq_ops);
1891 static const struct file_operations ioc_fops = {
1894 .llseek = seq_lseek,
1895 .release = seq_release
1901 struct proc_dir_entry *dir, *entry;
1903 dir = proc_mkdir("bus/mckinley", NULL);
1907 entry = create_proc_entry(ioc_list->name, 0, dir);
1909 entry->proc_fops = &ioc_fops;
1914 sba_connect_bus(struct pci_bus *bus)
1916 acpi_handle handle, parent;
1920 if (!PCI_CONTROLLER(bus))
1921 panic(PFX "no sysdata on bus %d!\n", bus->number);
1923 if (PCI_CONTROLLER(bus)->iommu)
1926 handle = PCI_CONTROLLER(bus)->acpi_handle;
1931 * The IOC scope encloses PCI root bridges in the ACPI
1932 * namespace, so work our way out until we find an IOC we
1933 * claimed previously.
1936 for (ioc = ioc_list; ioc; ioc = ioc->next)
1937 if (ioc->handle == handle) {
1938 PCI_CONTROLLER(bus)->iommu = ioc;
1942 status = acpi_get_parent(handle, &parent);
1944 } while (ACPI_SUCCESS(status));
1946 printk(KERN_WARNING "No IOC for PCI Bus %04x:%02x in ACPI\n", pci_domain_nr(bus), bus->number);
1951 sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
1956 ioc->node = MAX_NUMNODES;
1958 pxm = acpi_get_pxm(handle);
1963 node = pxm_to_node(pxm);
1965 if (node >= MAX_NUMNODES || !node_online(node))
1972 #define sba_map_ioc_to_node(ioc, handle)
1976 acpi_sba_ioc_add(struct acpi_device *device)
1981 struct acpi_buffer buffer;
1982 struct acpi_device_info *dev_info;
1984 status = hp_acpi_csr_space(device->handle, &hpa, &length);
1985 if (ACPI_FAILURE(status))
1988 buffer.length = ACPI_ALLOCATE_LOCAL_BUFFER;
1989 status = acpi_get_object_info(device->handle, &buffer);
1990 if (ACPI_FAILURE(status))
1992 dev_info = buffer.pointer;
1995 * For HWP0001, only SBA appears in ACPI namespace. It encloses the PCI
1996 * root bridges, and its CSR space includes the IOC function.
1998 if (strncmp("HWP0001", dev_info->hardware_id.value, 7) == 0) {
1999 hpa += ZX1_IOC_OFFSET;
2000 /* zx1 based systems default to kernel page size iommu pages */
2002 iovp_shift = min(PAGE_SHIFT, 16);
2007 * default anything not caught above or specified on cmdline to 4k
2013 ioc = ioc_init(hpa, device->handle);
2017 /* setup NUMA node association */
2018 sba_map_ioc_to_node(ioc, device->handle);
2022 static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
2027 static struct acpi_driver acpi_sba_ioc_driver = {
2028 .name = "IOC IOMMU Driver",
2029 .ids = hp_ioc_iommu_device_ids,
2031 .add = acpi_sba_ioc_add,
2038 if (!ia64_platform_is("hpzx1") && !ia64_platform_is("hpzx1_swiotlb"))
2041 #if defined(CONFIG_IA64_GENERIC) && defined(CONFIG_CRASH_DUMP) && \
2042 defined(CONFIG_PROC_FS)
2043 /* If we are booting a kdump kernel, the sba_iommu will
2044 * cause devices that were not shutdown properly to MCA
2045 * as soon as they are turned back on. Our only option for
2046 * a successful kdump kernel boot is to use the swiotlb.
2048 if (elfcorehdr_addr < ELFCORE_ADDR_MAX) {
2049 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2050 panic("Unable to initialize software I/O TLB:"
2051 " Try machvec=dig boot option");
2052 machvec_init("dig");
2057 acpi_bus_register_driver(&acpi_sba_ioc_driver);
2059 #ifdef CONFIG_IA64_GENERIC
2061 * If we didn't find something sba_iommu can claim, we
2062 * need to setup the swiotlb and switch to the dig machvec.
2064 if (swiotlb_late_init_with_default_size(64 * (1<<20)) != 0)
2065 panic("Unable to find SBA IOMMU or initialize "
2066 "software I/O TLB: Try machvec=dig boot option");
2067 machvec_init("dig");
2069 panic("Unable to find SBA IOMMU: Try a generic or DIG kernel");
2074 #if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_HP_ZX1_SWIOTLB)
2076 * hpzx1_swiotlb needs to have a fairly small swiotlb bounce
2077 * buffer setup to support devices with smaller DMA masks than
2078 * sba_iommu can handle.
2080 if (ia64_platform_is("hpzx1_swiotlb")) {
2081 extern void hwsw_init(void);
2089 struct pci_bus *b = NULL;
2090 while ((b = pci_find_next_bus(b)) != NULL)
2095 #ifdef CONFIG_PROC_FS
2101 subsys_initcall(sba_init); /* must be initialized after ACPI etc., but before any drivers... */
2104 nosbagart(char *str)
2106 reserve_sba_gart = 0;
2111 sba_dma_supported (struct device *dev, u64 mask)
2113 /* make sure it's at least 32bit capable */
2114 return ((mask & 0xFFFFFFFFUL) == 0xFFFFFFFFUL);
2118 sba_dma_mapping_error (dma_addr_t dma_addr)
2123 __setup("nosbagart", nosbagart);
2126 sba_page_override(char *str)
2128 unsigned long page_size;
2130 page_size = memparse(str, &str);
2131 switch (page_size) {
2136 iovp_shift = ffs(page_size) - 1;
2139 printk("%s: unknown/unsupported iommu page size %ld\n",
2140 __FUNCTION__, page_size);
2146 __setup("sbapagesize=",sba_page_override);
2148 EXPORT_SYMBOL(sba_dma_mapping_error);
2149 EXPORT_SYMBOL(sba_map_single);
2150 EXPORT_SYMBOL(sba_unmap_single);
2151 EXPORT_SYMBOL(sba_map_sg);
2152 EXPORT_SYMBOL(sba_unmap_sg);
2153 EXPORT_SYMBOL(sba_dma_supported);
2154 EXPORT_SYMBOL(sba_alloc_coherent);
2155 EXPORT_SYMBOL(sba_free_coherent);