2 * Copyright (c) 2006, 2007 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/pci.h>
35 #include <linux/poll.h>
36 #include <linux/cdev.h>
37 #include <linux/swap.h>
38 #include <linux/vmalloc.h>
39 #include <asm/pgtable.h>
41 #include "ipath_kernel.h"
42 #include "ipath_common.h"
44 static int ipath_open(struct inode *, struct file *);
45 static int ipath_close(struct inode *, struct file *);
46 static ssize_t ipath_write(struct file *, const char __user *, size_t,
48 static unsigned int ipath_poll(struct file *, struct poll_table_struct *);
49 static int ipath_mmap(struct file *, struct vm_area_struct *);
51 static const struct file_operations ipath_file_ops = {
55 .release = ipath_close,
61 * Convert kernel virtual addresses to physical addresses so they don't
62 * potentially conflict with the chip addresses used as mmap offsets.
63 * It doesn't really matter what mmap offset we use as long as we can
64 * interpret it correctly.
66 static u64 cvt_kvaddr(void *p)
71 page = vmalloc_to_page(p);
73 paddr = page_to_pfn(page) << PAGE_SHIFT;
78 static int ipath_get_base_info(struct file *fp,
79 void __user *ubase, size_t ubase_size)
81 struct ipath_portdata *pd = port_fp(fp);
83 struct ipath_base_info *kinfo = NULL;
84 struct ipath_devdata *dd = pd->port_dd;
89 subport_cnt = pd->port_subport_cnt;
96 master = !subport_fp(fp);
100 /* If port sharing is not requested, allow the old size structure */
102 sz -= 7 * sizeof(u64);
103 if (ubase_size < sz) {
105 "Base size %zu, need %zu (version mismatch?)\n",
111 kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL);
117 ret = dd->ipath_f_get_base_info(pd, kinfo);
121 kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt;
122 kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize;
123 kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt;
124 kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize;
126 * have to mmap whole thing
128 kinfo->spi_rcv_egrbuftotlen =
129 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
130 kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk;
131 kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen /
132 pd->port_rcvegrbuf_chunks;
133 kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt;
135 kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt;
137 * for this use, may be ipath_cfgports summed over all chips that
138 * are are configured and present
140 kinfo->spi_nports = dd->ipath_cfgports;
141 /* unit (chip/board) our port is on */
142 kinfo->spi_unit = dd->ipath_unit;
143 /* for now, only a single page */
144 kinfo->spi_tid_maxsize = PAGE_SIZE;
147 * Doing this per port, and based on the skip value, etc. This has
148 * to be the actual buffer size, since the protocol code treats it
151 * These have to be set to user addresses in the user code via mmap.
152 * These values are used on return to user code for the mmap target
153 * addresses only. For 32 bit, same 44 bit address problem, so use
154 * the physical address, not virtual. Before 2.6.11, using the
155 * page_address() macro worked, but in 2.6.11, even that returns the
156 * full 64 bit address (upper bits all 1's). So far, using the
157 * physical addresses (or chip offsets, for chip mapping) works, but
158 * no doubt some future kernel release will change that, and we'll be
159 * on to yet another method of dealing with this.
161 kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys;
162 kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys;
163 kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys;
164 kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys;
165 kinfo->spi_status = (u64) kinfo->spi_pioavailaddr +
166 (void *) dd->ipath_statusp -
167 (void *) dd->ipath_pioavailregs_dma;
169 kinfo->spi_piocnt = dd->ipath_pbufsport;
170 kinfo->spi_piobufbase = (u64) pd->port_piobufs;
171 kinfo->__spi_uregbase = (u64) dd->ipath_uregbase +
172 dd->ipath_palign * pd->port_port;
174 kinfo->spi_piocnt = (dd->ipath_pbufsport / subport_cnt) +
175 (dd->ipath_pbufsport % subport_cnt);
176 /* Master's PIO buffers are after all the slave's */
177 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
179 (dd->ipath_pbufsport - kinfo->spi_piocnt);
181 unsigned slave = subport_fp(fp) - 1;
183 kinfo->spi_piocnt = dd->ipath_pbufsport / subport_cnt;
184 kinfo->spi_piobufbase = (u64) pd->port_piobufs +
185 dd->ipath_palign * kinfo->spi_piocnt * slave;
188 kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase +
189 dd->ipath_palign * pd->port_port;
190 kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs;
191 kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base;
192 kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr;
194 kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase +
195 PAGE_SIZE * subport_fp(fp));
197 kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base +
198 pd->port_rcvhdrq_size * subport_fp(fp));
199 kinfo->spi_rcvhdr_tailaddr = 0;
200 kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf +
201 pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size *
204 kinfo->spi_subport_uregbase =
205 cvt_kvaddr(pd->subport_uregbase);
206 kinfo->spi_subport_rcvegrbuf =
207 cvt_kvaddr(pd->subport_rcvegrbuf);
208 kinfo->spi_subport_rcvhdr_base =
209 cvt_kvaddr(pd->subport_rcvhdr_base);
210 ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n",
211 kinfo->spi_port, kinfo->spi_runtime_flags,
212 (unsigned long long) kinfo->spi_subport_uregbase,
213 (unsigned long long) kinfo->spi_subport_rcvegrbuf,
214 (unsigned long long) kinfo->spi_subport_rcvhdr_base);
217 kinfo->spi_pioindex = (kinfo->spi_piobufbase - dd->ipath_piobufbase) /
219 kinfo->spi_pioalign = dd->ipath_palign;
221 kinfo->spi_qpair = IPATH_KD_QP;
222 kinfo->spi_piosize = dd->ipath_ibmaxlen;
223 kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */
224 kinfo->spi_port = pd->port_port;
225 kinfo->spi_subport = subport_fp(fp);
226 kinfo->spi_sw_version = IPATH_KERN_SWVERSION;
227 kinfo->spi_hw_version = dd->ipath_revision;
230 kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER;
233 sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo);
234 if (copy_to_user(ubase, kinfo, sz))
243 * ipath_tid_update - update a port TID
245 * @fp: the ipath device file
246 * @ti: the TID information
248 * The new implementation as of Oct 2004 is that the driver assigns
249 * the tid and returns it to the caller. To make it easier to
250 * catch bugs, and to reduce search time, we keep a cursor for
251 * each port, walking the shadow tid array to find one that's not
254 * For now, if we can't allocate the full list, we fail, although
255 * in the long run, we'll allocate as many as we can, and the
256 * caller will deal with that by trying the remaining pages later.
257 * That means that when we fail, we have to mark the tids as not in
258 * use again, in our shadow copy.
260 * It's up to the caller to free the tids when they are done.
261 * We'll unlock the pages as they free them.
263 * Also, right now we are locking one page at a time, but since
264 * the intended use of this routine is for a single group of
265 * virtually contiguous pages, that should change to improve
268 static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp,
269 const struct ipath_tid_info *ti)
272 u32 tid, porttid, cnt, i, tidcnt, tidoff;
274 struct ipath_devdata *dd = pd->port_dd;
277 u64 __iomem *tidbase;
278 unsigned long tidmap[8];
279 struct page **pagep = NULL;
280 unsigned subport = subport_fp(fp);
282 if (!dd->ipath_pageshadow) {
289 ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n",
290 (unsigned long long) ti->tidlist);
292 * Should we treat as success? likely a bug
297 porttid = pd->port_port * dd->ipath_rcvtidcnt;
298 if (!pd->port_subport_cnt) {
299 tidcnt = dd->ipath_rcvtidcnt;
300 tid = pd->port_tidcursor;
302 } else if (!subport) {
303 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
304 (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
305 tidoff = dd->ipath_rcvtidcnt - tidcnt;
307 tid = tidcursor_fp(fp);
309 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
310 tidoff = tidcnt * (subport - 1);
312 tid = tidcursor_fp(fp);
315 /* make sure it all fits in port_tid_pg_list */
316 dev_info(&dd->pcidev->dev, "Process tried to allocate %u "
317 "TIDs, only trying max (%u)\n", cnt, tidcnt);
320 pagep = &((struct page **) pd->port_tid_pg_list)[tidoff];
321 tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff];
323 memset(tidmap, 0, sizeof(tidmap));
324 /* before decrement; chip actual # */
326 tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) +
327 dd->ipath_rcvtidbase +
328 porttid * sizeof(*tidbase));
330 ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n",
331 pd->port_port, cnt, tid, tidbase);
333 /* virtual address of first page in transfer */
334 vaddr = ti->tidvaddr;
335 if (!access_ok(VERIFY_WRITE, (void __user *) vaddr,
337 ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n",
342 ret = ipath_get_user_pages(vaddr, cnt, pagep);
345 ipath_dbg("Failed to lock addr %p, %u pages "
346 "(already locked)\n",
347 (void *) vaddr, cnt);
349 * for now, continue, and see what happens but with
350 * the new implementation, this should never happen,
351 * unless perhaps the user has mpin'ed the pages
352 * themselves (something we need to test)
356 dev_info(&dd->pcidev->dev,
357 "Failed to lock addr %p, %u pages: "
358 "errno %d\n", (void *) vaddr, cnt, -ret);
362 for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) {
363 for (; ntids--; tid++) {
366 if (!dd->ipath_pageshadow[porttid + tid])
371 * oops, wrapped all the way through their TIDs,
372 * and didn't have enough free; see comments at
375 ipath_dbg("Not enough free TIDs for %u pages "
376 "(index %d), failing\n", cnt, i);
377 i--; /* last tidlist[i] not filled in */
381 tidlist[i] = tid + tidoff;
382 ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, "
383 "vaddr %lx\n", i, tid + tidoff, vaddr);
384 /* we "know" system pages and TID pages are same size */
385 dd->ipath_pageshadow[porttid + tid] = pagep[i];
386 dd->ipath_physshadow[porttid + tid] = ipath_map_page(
387 dd->pcidev, pagep[i], 0, PAGE_SIZE,
390 * don't need atomic or it's overhead
392 __set_bit(tid, tidmap);
393 physaddr = dd->ipath_physshadow[porttid + tid];
394 ipath_stats.sps_pagelocks++;
396 "TID %u, vaddr %lx, physaddr %llx pgp %p\n",
397 tid, vaddr, (unsigned long long) physaddr,
399 dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED,
402 * don't check this tid in ipath_portshadow, since we
403 * just filled it in; start with the next one.
411 /* jump here if copy out of updated info failed... */
412 ipath_dbg("After failure (ret=%d), undo %d of %d entries\n",
414 /* same code that's in ipath_free_tid() */
415 limit = sizeof(tidmap) * BITS_PER_BYTE;
417 /* just in case size changes in future */
419 tid = find_first_bit((const unsigned long *)tidmap, limit);
420 for (; tid < limit; tid++) {
421 if (!test_bit(tid, tidmap))
423 if (dd->ipath_pageshadow[porttid + tid]) {
424 ipath_cdbg(VERBOSE, "Freeing TID %u\n",
426 dd->ipath_f_put_tid(dd, &tidbase[tid],
427 RCVHQ_RCV_TYPE_EXPECTED,
428 dd->ipath_tidinvalid);
429 pci_unmap_page(dd->pcidev,
430 dd->ipath_physshadow[porttid + tid],
431 PAGE_SIZE, PCI_DMA_FROMDEVICE);
432 dd->ipath_pageshadow[porttid + tid] = NULL;
433 ipath_stats.sps_pageunlocks++;
436 ipath_release_user_pages(pagep, cnt);
439 * Copy the updated array, with ipath_tid's filled in, back
440 * to user. Since we did the copy in already, this "should
441 * never fail" If it does, we have to clean up...
443 if (copy_to_user((void __user *)
444 (unsigned long) ti->tidlist,
445 tidlist, cnt * sizeof(*tidlist))) {
449 if (copy_to_user((void __user *) (unsigned long) ti->tidmap,
450 tidmap, sizeof tidmap)) {
456 if (!pd->port_subport_cnt)
457 pd->port_tidcursor = tid;
459 tidcursor_fp(fp) = tid;
464 ipath_dbg("Failed to map %u TID pages, failing with %d\n",
470 * ipath_tid_free - free a port TID
472 * @subport: the subport
475 * right now we are unlocking one page at a time, but since
476 * the intended use of this routine is for a single group of
477 * virtually contiguous pages, that should change to improve
478 * performance. We check that the TID is in range for this port
479 * but otherwise don't check validity; if user has an error and
480 * frees the wrong tid, it's only their own data that can thereby
481 * be corrupted. We do check that the TID was in use, for sanity
482 * We always use our idea of the saved address, not the address that
483 * they pass in to us.
486 static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport,
487 const struct ipath_tid_info *ti)
490 u32 tid, porttid, cnt, limit, tidcnt;
491 struct ipath_devdata *dd = pd->port_dd;
492 u64 __iomem *tidbase;
493 unsigned long tidmap[8];
495 if (!dd->ipath_pageshadow) {
500 if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap,
506 porttid = pd->port_port * dd->ipath_rcvtidcnt;
507 if (!pd->port_subport_cnt)
508 tidcnt = dd->ipath_rcvtidcnt;
510 tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) +
511 (dd->ipath_rcvtidcnt % pd->port_subport_cnt);
512 porttid += dd->ipath_rcvtidcnt - tidcnt;
514 tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt;
515 porttid += tidcnt * (subport - 1);
517 tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) +
518 dd->ipath_rcvtidbase +
519 porttid * sizeof(*tidbase));
521 limit = sizeof(tidmap) * BITS_PER_BYTE;
523 /* just in case size changes in future */
525 tid = find_first_bit(tidmap, limit);
526 ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) "
527 "set is %d, porttid %u\n", pd->port_port, ti->tidcnt,
528 limit, tid, porttid);
529 for (cnt = 0; tid < limit; tid++) {
531 * small optimization; if we detect a run of 3 or so without
532 * any set, use find_first_bit again. That's mainly to
533 * accelerate the case where we wrapped, so we have some at
534 * the beginning, and some at the end, and a big gap
537 if (!test_bit(tid, tidmap))
540 if (dd->ipath_pageshadow[porttid + tid]) {
542 p = dd->ipath_pageshadow[porttid + tid];
543 dd->ipath_pageshadow[porttid + tid] = NULL;
544 ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n",
546 dd->ipath_f_put_tid(dd, &tidbase[tid],
547 RCVHQ_RCV_TYPE_EXPECTED,
548 dd->ipath_tidinvalid);
549 pci_unmap_page(dd->pcidev,
550 dd->ipath_physshadow[porttid + tid],
551 PAGE_SIZE, PCI_DMA_FROMDEVICE);
552 ipath_release_user_pages(&p, 1);
553 ipath_stats.sps_pageunlocks++;
555 ipath_dbg("Unused tid %u, ignoring\n", tid);
557 if (cnt != ti->tidcnt)
558 ipath_dbg("passed in tidcnt %d, only %d bits set in map\n",
562 ipath_dbg("Failed to unmap %u TID pages, failing with %d\n",
568 * ipath_set_part_key - set a partition key
572 * We can have up to 4 active at a time (other than the default, which is
573 * always allowed). This is somewhat tricky, since multiple ports may set
574 * the same key, so we reference count them, and clean up at exit. All 4
575 * partition keys are packed into a single infinipath register. It's an
576 * error for a process to set the same pkey multiple times. We provide no
577 * mechanism to de-allocate a pkey at this time, we may eventually need to
578 * do that. I've used the atomic operations, and no locking, and only make
579 * a single pass through what's available. This should be more than
580 * adequate for some time. I'll think about spinlocks or the like if and as
583 static int ipath_set_part_key(struct ipath_portdata *pd, u16 key)
585 struct ipath_devdata *dd = pd->port_dd;
586 int i, any = 0, pidx = -1;
587 u16 lkey = key & 0x7FFF;
590 if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) {
591 /* nothing to do; this key always valid */
596 ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys "
597 "%hx:%x %hx:%x %hx:%x %hx:%x\n",
598 pd->port_port, key, dd->ipath_pkeys[0],
599 atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1],
600 atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2],
601 atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3],
602 atomic_read(&dd->ipath_pkeyrefs[3]));
605 ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n",
612 * Set the full membership bit, because it has to be
613 * set in the register or the packet, and it seems
614 * cleaner to set in the register than to force all
615 * callers to set it. (see bug 4331)
619 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
620 if (!pd->port_pkeys[i] && pidx == -1)
622 if (pd->port_pkeys[i] == key) {
623 ipath_cdbg(VERBOSE, "p%u tries to set same pkey "
624 "(%x) more than once\n",
631 ipath_dbg("All pkeys for port %u already in use, "
632 "can't set %x\n", pd->port_port, key);
636 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
637 if (!dd->ipath_pkeys[i]) {
641 if (dd->ipath_pkeys[i] == key) {
642 atomic_t *pkrefs = &dd->ipath_pkeyrefs[i];
644 if (atomic_inc_return(pkrefs) > 1) {
645 pd->port_pkeys[pidx] = key;
646 ipath_cdbg(VERBOSE, "p%u set key %x "
647 "matches #%d, count now %d\n",
648 pd->port_port, key, i,
649 atomic_read(pkrefs));
654 * lost race, decrement count, catch below
657 ipath_cdbg(VERBOSE, "Lost race, count was "
658 "0, after dec, it's %d\n",
659 atomic_read(pkrefs));
663 if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) {
665 * It makes no sense to have both the limited and
666 * full membership PKEY set at the same time since
667 * the unlimited one will disable the limited one.
674 ipath_dbg("port %u, all pkeys already in use, "
675 "can't set %x\n", pd->port_port, key);
679 for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) {
680 if (!dd->ipath_pkeys[i] &&
681 atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) {
684 /* for ipathstats, etc. */
685 ipath_stats.sps_pkeys[i] = lkey;
686 pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key;
688 (u64) dd->ipath_pkeys[0] |
689 ((u64) dd->ipath_pkeys[1] << 16) |
690 ((u64) dd->ipath_pkeys[2] << 32) |
691 ((u64) dd->ipath_pkeys[3] << 48);
692 ipath_cdbg(PROC, "p%u set key %x in #%d, "
693 "portidx %d, new pkey reg %llx\n",
694 pd->port_port, key, i, pidx,
695 (unsigned long long) pkey);
697 dd, dd->ipath_kregs->kr_partitionkey, pkey);
703 ipath_dbg("port %u, all pkeys already in use 2nd pass, "
704 "can't set %x\n", pd->port_port, key);
712 * ipath_manage_rcvq - manage a port's receive queue
714 * @subport: the subport
715 * @start_stop: action to carry out
717 * start_stop == 0 disables receive on the port, for use in queue
718 * overflow conditions. start_stop==1 re-enables, to be used to
719 * re-init the software copy of the head register
721 static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport,
724 struct ipath_devdata *dd = pd->port_dd;
726 ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n",
727 start_stop ? "en" : "dis", dd->ipath_unit,
728 pd->port_port, subport);
731 /* atomically clear receive enable port. */
734 * On enable, force in-memory copy of the tail register to
735 * 0, so that protocol code doesn't have to worry about
736 * whether or not the chip has yet updated the in-memory
737 * copy or not on return from the system call. The chip
738 * always resets it's tail register back to 0 on a
739 * transition from disabled to enabled. This could cause a
740 * problem if software was broken, and did the enable w/o
741 * the disable, but eventually the in-memory copy will be
742 * updated and correct itself, even in the face of software
745 if (pd->port_rcvhdrtail_kvaddr)
746 ipath_clear_rcvhdrtail(pd);
747 set_bit(dd->ipath_r_portenable_shift + pd->port_port,
750 clear_bit(dd->ipath_r_portenable_shift + pd->port_port,
752 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
754 /* now be sure chip saw it before we return */
755 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
758 * And try to be sure that tail reg update has happened too.
759 * This should in theory interlock with the RXE changes to
760 * the tail register. Don't assign it to the tail register
761 * in memory copy, since we could overwrite an update by the
764 ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
766 /* always; new head should be equal to new tail; see above */
771 static void ipath_clean_part_key(struct ipath_portdata *pd,
772 struct ipath_devdata *dd)
774 int i, j, pchanged = 0;
777 /* for debugging only */
778 oldpkey = (u64) dd->ipath_pkeys[0] |
779 ((u64) dd->ipath_pkeys[1] << 16) |
780 ((u64) dd->ipath_pkeys[2] << 32) |
781 ((u64) dd->ipath_pkeys[3] << 48);
783 for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) {
784 if (!pd->port_pkeys[i])
786 ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i,
788 for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) {
789 /* check for match independent of the global bit */
790 if ((dd->ipath_pkeys[j] & 0x7fff) !=
791 (pd->port_pkeys[i] & 0x7fff))
793 if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) {
794 ipath_cdbg(VERBOSE, "p%u clear key "
797 pd->port_pkeys[i], j);
798 ipath_stats.sps_pkeys[j] =
799 dd->ipath_pkeys[j] = 0;
803 VERBOSE, "p%u key %x matches #%d, "
804 "but ref still %d\n", pd->port_port,
805 pd->port_pkeys[i], j,
806 atomic_read(&dd->ipath_pkeyrefs[j]));
809 pd->port_pkeys[i] = 0;
812 u64 pkey = (u64) dd->ipath_pkeys[0] |
813 ((u64) dd->ipath_pkeys[1] << 16) |
814 ((u64) dd->ipath_pkeys[2] << 32) |
815 ((u64) dd->ipath_pkeys[3] << 48);
816 ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, "
817 "new pkey reg %llx\n", pd->port_port,
818 (unsigned long long) oldpkey,
819 (unsigned long long) pkey);
820 ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey,
826 * Initialize the port data with the receive buffer sizes
827 * so this can be done while the master port is locked.
828 * Otherwise, there is a race with a slave opening the port
829 * and seeing these fields uninitialized.
831 static void init_user_egr_sizes(struct ipath_portdata *pd)
833 struct ipath_devdata *dd = pd->port_dd;
834 unsigned egrperchunk, egrcnt, size;
837 * to avoid wasting a lot of memory, we allocate 32KB chunks of
838 * physically contiguous memory, advance through it until used up
839 * and then allocate more. Of course, we need memory to store those
840 * extra pointers, now. Started out with 256KB, but under heavy
841 * memory pressure (creating large files and then copying them over
842 * NFS while doing lots of MPI jobs), we hit some allocation
843 * failures, even though we can sleep... (2.6.10) Still get
844 * failures at 64K. 32K is the lowest we can go without wasting
848 egrperchunk = size / dd->ipath_rcvegrbufsize;
849 egrcnt = dd->ipath_rcvegrcnt;
850 pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk;
851 pd->port_rcvegrbufs_perchunk = egrperchunk;
852 pd->port_rcvegrbuf_size = size;
856 * ipath_create_user_egr - allocate eager TID buffers
857 * @pd: the port to allocate TID buffers for
859 * This routine is now quite different for user and kernel, because
860 * the kernel uses skb's, for the accelerated network performance
861 * This is the user port version
863 * Allocate the eager TID buffers and program them into infinipath
864 * They are no longer completely contiguous, we do multiple allocation
867 static int ipath_create_user_egr(struct ipath_portdata *pd)
869 struct ipath_devdata *dd = pd->port_dd;
870 unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff;
876 * GFP_USER, but without GFP_FS, so buffer cache can be
877 * coalesced (we hope); otherwise, even at order 4,
878 * heavy filesystem activity makes these fail, and we can
879 * use compound pages.
881 gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP;
883 egrcnt = dd->ipath_rcvegrcnt;
884 /* TID number offset for this port */
885 egroff = pd->port_port * egrcnt;
886 egrsize = dd->ipath_rcvegrbufsize;
887 ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid "
888 "offset %x, egrsize %u\n", egrcnt, egroff, egrsize);
890 chunk = pd->port_rcvegrbuf_chunks;
891 egrperchunk = pd->port_rcvegrbufs_perchunk;
892 size = pd->port_rcvegrbuf_size;
893 pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]),
895 if (!pd->port_rcvegrbuf) {
899 pd->port_rcvegrbuf_phys =
900 kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]),
902 if (!pd->port_rcvegrbuf_phys) {
906 for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) {
908 pd->port_rcvegrbuf[e] = dma_alloc_coherent(
909 &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e],
912 if (!pd->port_rcvegrbuf[e]) {
914 goto bail_rcvegrbuf_phys;
918 pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0];
920 for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) {
921 dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk];
924 for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) {
925 dd->ipath_f_put_tid(dd, e + egroff +
929 dd->ipath_rcvegrbase),
930 RCVHQ_RCV_TYPE_EAGER, pa);
933 cond_resched(); /* don't hog the cpu */
940 for (e = 0; e < pd->port_rcvegrbuf_chunks &&
941 pd->port_rcvegrbuf[e]; e++) {
942 dma_free_coherent(&dd->pcidev->dev, size,
943 pd->port_rcvegrbuf[e],
944 pd->port_rcvegrbuf_phys[e]);
947 kfree(pd->port_rcvegrbuf_phys);
948 pd->port_rcvegrbuf_phys = NULL;
950 kfree(pd->port_rcvegrbuf);
951 pd->port_rcvegrbuf = NULL;
957 /* common code for the mappings on dma_alloc_coherent mem */
958 static int ipath_mmap_mem(struct vm_area_struct *vma,
959 struct ipath_portdata *pd, unsigned len, int write_ok,
960 void *kvaddr, char *what)
962 struct ipath_devdata *dd = pd->port_dd;
966 if ((vma->vm_end - vma->vm_start) > len) {
967 dev_info(&dd->pcidev->dev,
968 "FAIL on %s: len %lx > %x\n", what,
969 vma->vm_end - vma->vm_start, len);
975 if (vma->vm_flags & VM_WRITE) {
976 dev_info(&dd->pcidev->dev,
977 "%s must be mapped readonly\n", what);
982 /* don't allow them to later change with mprotect */
983 vma->vm_flags &= ~VM_MAYWRITE;
986 pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT;
987 ret = remap_pfn_range(vma, vma->vm_start, pfn,
988 len, vma->vm_page_prot);
990 dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x "
991 "bytes r%c failed: %d\n", what, pd->port_port,
992 pfn, len, write_ok?'w':'o', ret);
994 ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes "
995 "r%c\n", what, pd->port_port, pfn, len,
1001 static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd,
1008 * This is real hardware, so use io_remap. This is the mechanism
1009 * for the user process to update the head registers for their port
1012 if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) {
1013 dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen "
1014 "%lx > PAGE\n", vma->vm_end - vma->vm_start);
1017 phys = dd->ipath_physaddr + ureg;
1018 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1020 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1021 ret = io_remap_pfn_range(vma, vma->vm_start,
1023 vma->vm_end - vma->vm_start,
1029 static int mmap_piobufs(struct vm_area_struct *vma,
1030 struct ipath_devdata *dd,
1031 struct ipath_portdata *pd,
1032 unsigned piobufs, unsigned piocnt)
1038 * When we map the PIO buffers in the chip, we want to map them as
1039 * writeonly, no read possible. This prevents access to previous
1040 * process data, and catches users who might try to read the i/o
1041 * space due to a bug.
1043 if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) {
1044 dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: "
1045 "reqlen %lx > PAGE\n",
1046 vma->vm_end - vma->vm_start);
1051 phys = dd->ipath_physaddr + piobufs;
1054 * Don't mark this as non-cached, or we don't get the
1055 * write combining behavior we want on the PIO buffers!
1058 #if defined(__powerpc__)
1059 /* There isn't a generic way to specify writethrough mappings */
1060 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
1061 pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU;
1062 pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED;
1066 * don't allow them to later change to readable with mprotect (for when
1067 * not initially mapped readable, as is normally the case)
1069 vma->vm_flags &= ~VM_MAYREAD;
1070 vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
1072 ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
1073 vma->vm_end - vma->vm_start,
1079 static int mmap_rcvegrbufs(struct vm_area_struct *vma,
1080 struct ipath_portdata *pd)
1082 struct ipath_devdata *dd = pd->port_dd;
1083 unsigned long start, size;
1084 size_t total_size, i;
1088 size = pd->port_rcvegrbuf_size;
1089 total_size = pd->port_rcvegrbuf_chunks * size;
1090 if ((vma->vm_end - vma->vm_start) > total_size) {
1091 dev_info(&dd->pcidev->dev, "FAIL on egr bufs: "
1092 "reqlen %lx > actual %lx\n",
1093 vma->vm_end - vma->vm_start,
1094 (unsigned long) total_size);
1099 if (vma->vm_flags & VM_WRITE) {
1100 dev_info(&dd->pcidev->dev, "Can't map eager buffers as "
1101 "writable (flags=%lx)\n", vma->vm_flags);
1105 /* don't allow them to later change to writeable with mprotect */
1106 vma->vm_flags &= ~VM_MAYWRITE;
1108 start = vma->vm_start;
1110 for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) {
1111 pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT;
1112 ret = remap_pfn_range(vma, start, pfn, size,
1124 * ipath_file_vma_fault - handle a VMA page fault.
1126 static int ipath_file_vma_fault(struct vm_area_struct *vma,
1127 struct vm_fault *vmf)
1131 page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT));
1133 return VM_FAULT_SIGBUS;
1140 static struct vm_operations_struct ipath_file_vm_ops = {
1141 .fault = ipath_file_vma_fault,
1144 static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr,
1145 struct ipath_portdata *pd, unsigned subport)
1148 struct ipath_devdata *dd;
1153 /* If the port is not shared, all addresses should be physical */
1154 if (!pd->port_subport_cnt)
1158 size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size;
1161 * Each process has all the subport uregbase, rcvhdrq, and
1162 * rcvegrbufs mmapped - as an array for all the processes,
1163 * and also separately for this process.
1165 if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) {
1166 addr = pd->subport_uregbase;
1167 size = PAGE_SIZE * pd->port_subport_cnt;
1168 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) {
1169 addr = pd->subport_rcvhdr_base;
1170 size = pd->port_rcvhdrq_size * pd->port_subport_cnt;
1171 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) {
1172 addr = pd->subport_rcvegrbuf;
1173 size *= pd->port_subport_cnt;
1174 } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase +
1175 PAGE_SIZE * subport)) {
1176 addr = pd->subport_uregbase + PAGE_SIZE * subport;
1178 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base +
1179 pd->port_rcvhdrq_size * subport)) {
1180 addr = pd->subport_rcvhdr_base +
1181 pd->port_rcvhdrq_size * subport;
1182 size = pd->port_rcvhdrq_size;
1183 } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf +
1185 addr = pd->subport_rcvegrbuf + size * subport;
1186 /* rcvegrbufs are read-only on the slave */
1187 if (vma->vm_flags & VM_WRITE) {
1188 dev_info(&dd->pcidev->dev,
1189 "Can't map eager buffers as "
1190 "writable (flags=%lx)\n", vma->vm_flags);
1195 * Don't allow permission to later change to writeable
1198 vma->vm_flags &= ~VM_MAYWRITE;
1202 len = vma->vm_end - vma->vm_start;
1204 ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size);
1209 vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT;
1210 vma->vm_ops = &ipath_file_vm_ops;
1211 vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND;
1219 * ipath_mmap - mmap various structures into user space
1220 * @fp: the file pointer
1223 * We use this to have a shared buffer between the kernel and the user code
1224 * for the rcvhdr queue, egr buffers, and the per-port user regs and pio
1225 * buffers in the chip. We have the open and close entries so we can bump
1226 * the ref count and keep the driver from being unloaded while still mapped.
1228 static int ipath_mmap(struct file *fp, struct vm_area_struct *vma)
1230 struct ipath_portdata *pd;
1231 struct ipath_devdata *dd;
1233 unsigned piobufs, piocnt;
1244 * This is the ipath_do_user_init() code, mapping the shared buffers
1245 * into the user process. The address referred to by vm_pgoff is the
1246 * file offset passed via mmap(). For shared ports, this is the
1247 * kernel vmalloc() address of the pages to share with the master.
1248 * For non-shared or master ports, this is a physical address.
1249 * We only do one mmap for each space mapped.
1251 pgaddr = vma->vm_pgoff << PAGE_SHIFT;
1254 * Check for 0 in case one of the allocations failed, but user
1255 * called mmap anyway.
1262 ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n",
1263 (unsigned long long) pgaddr, vma->vm_start,
1264 vma->vm_end - vma->vm_start, dd->ipath_unit,
1265 pd->port_port, subport_fp(fp));
1268 * Physical addresses must fit in 40 bits for our hardware.
1269 * Check for kernel virtual addresses first, anything else must
1270 * match a HW or memory address.
1272 ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp));
1279 ureg = dd->ipath_uregbase + dd->ipath_palign * pd->port_port;
1280 if (!pd->port_subport_cnt) {
1281 /* port is not shared */
1282 piocnt = dd->ipath_pbufsport;
1283 piobufs = pd->port_piobufs;
1284 } else if (!subport_fp(fp)) {
1285 /* caller is the master */
1286 piocnt = (dd->ipath_pbufsport / pd->port_subport_cnt) +
1287 (dd->ipath_pbufsport % pd->port_subport_cnt);
1288 piobufs = pd->port_piobufs +
1289 dd->ipath_palign * (dd->ipath_pbufsport - piocnt);
1291 unsigned slave = subport_fp(fp) - 1;
1293 /* caller is a slave */
1294 piocnt = dd->ipath_pbufsport / pd->port_subport_cnt;
1295 piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave;
1299 ret = mmap_ureg(vma, dd, ureg);
1300 else if (pgaddr == piobufs)
1301 ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt);
1302 else if (pgaddr == dd->ipath_pioavailregs_phys)
1303 /* in-memory copy of pioavail registers */
1304 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1305 (void *) dd->ipath_pioavailregs_dma,
1306 "pioavail registers");
1307 else if (pgaddr == pd->port_rcvegr_phys)
1308 ret = mmap_rcvegrbufs(vma, pd);
1309 else if (pgaddr == (u64) pd->port_rcvhdrq_phys)
1311 * The rcvhdrq itself; readonly except on HT (so have
1312 * to allow writable mapping), multiple pages, contiguous
1313 * from an i/o perspective.
1315 ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1,
1318 else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys)
1319 /* in-memory copy of rcvhdrq tail register */
1320 ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0,
1321 pd->port_rcvhdrtail_kvaddr,
1326 vma->vm_private_data = NULL;
1329 dev_info(&dd->pcidev->dev,
1330 "Failure %d on off %llx len %lx\n",
1331 -ret, (unsigned long long)pgaddr,
1332 vma->vm_end - vma->vm_start);
1337 static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd)
1339 unsigned pollflag = 0;
1341 if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) &&
1342 pd->port_hdrqfull != pd->port_hdrqfull_poll) {
1343 pollflag |= POLLIN | POLLRDNORM;
1344 pd->port_hdrqfull_poll = pd->port_hdrqfull;
1350 static unsigned int ipath_poll_urgent(struct ipath_portdata *pd,
1352 struct poll_table_struct *pt)
1354 unsigned pollflag = 0;
1355 struct ipath_devdata *dd;
1359 /* variable access in ipath_poll_hdrqfull() needs this */
1361 pollflag = ipath_poll_hdrqfull(pd);
1363 if (pd->port_urgent != pd->port_urgent_poll) {
1364 pollflag |= POLLIN | POLLRDNORM;
1365 pd->port_urgent_poll = pd->port_urgent;
1369 /* this saves a spin_lock/unlock in interrupt handler... */
1370 set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag);
1371 /* flush waiting flag so don't miss an event... */
1373 poll_wait(fp, &pd->port_wait, pt);
1379 static unsigned int ipath_poll_next(struct ipath_portdata *pd,
1381 struct poll_table_struct *pt)
1385 unsigned pollflag = 0;
1386 struct ipath_devdata *dd;
1390 /* variable access in ipath_poll_hdrqfull() needs this */
1392 pollflag = ipath_poll_hdrqfull(pd);
1394 head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port);
1395 if (pd->port_rcvhdrtail_kvaddr)
1396 tail = ipath_get_rcvhdrtail(pd);
1398 tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port);
1401 pollflag |= POLLIN | POLLRDNORM;
1403 /* this saves a spin_lock/unlock in interrupt handler */
1404 set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag);
1405 /* flush waiting flag so we don't miss an event */
1408 set_bit(pd->port_port + dd->ipath_r_intravail_shift,
1409 &dd->ipath_rcvctrl);
1411 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1414 if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */
1415 ipath_write_ureg(dd, ur_rcvhdrhead,
1416 dd->ipath_rhdrhead_intr_off | head,
1419 poll_wait(fp, &pd->port_wait, pt);
1425 static unsigned int ipath_poll(struct file *fp,
1426 struct poll_table_struct *pt)
1428 struct ipath_portdata *pd;
1434 else if (pd->poll_type & IPATH_POLL_TYPE_URGENT)
1435 pollflag = ipath_poll_urgent(pd, fp, pt);
1437 pollflag = ipath_poll_next(pd, fp, pt);
1442 static int ipath_supports_subports(int user_swmajor, int user_swminor)
1444 /* no subport implementation prior to software version 1.3 */
1445 return (user_swmajor > 1) || (user_swminor >= 3);
1448 static int ipath_compatible_subports(int user_swmajor, int user_swminor)
1450 /* this code is written long-hand for clarity */
1451 if (IPATH_USER_SWMAJOR != user_swmajor) {
1452 /* no promise of compatibility if major mismatch */
1455 if (IPATH_USER_SWMAJOR == 1) {
1456 switch (IPATH_USER_SWMINOR) {
1460 /* no subport implementation so cannot be compatible */
1463 /* 3 is only compatible with itself */
1464 return user_swminor == 3;
1466 /* >= 4 are compatible (or are expected to be) */
1467 return user_swminor >= 4;
1470 /* make no promises yet for future major versions */
1474 static int init_subports(struct ipath_devdata *dd,
1475 struct ipath_portdata *pd,
1476 const struct ipath_user_info *uinfo)
1479 unsigned num_subports;
1483 * If the user is requesting zero subports,
1484 * skip the subport allocation.
1486 if (uinfo->spu_subport_cnt <= 0)
1489 /* Self-consistency check for ipath_compatible_subports() */
1490 if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) &&
1491 !ipath_compatible_subports(IPATH_USER_SWMAJOR,
1492 IPATH_USER_SWMINOR)) {
1493 dev_info(&dd->pcidev->dev,
1494 "Inconsistent ipath_compatible_subports()\n");
1498 /* Check for subport compatibility */
1499 if (!ipath_compatible_subports(uinfo->spu_userversion >> 16,
1500 uinfo->spu_userversion & 0xffff)) {
1501 dev_info(&dd->pcidev->dev,
1502 "Mismatched user version (%d.%d) and driver "
1503 "version (%d.%d) while port sharing. Ensure "
1504 "that driver and library are from the same "
1506 (int) (uinfo->spu_userversion >> 16),
1507 (int) (uinfo->spu_userversion & 0xffff),
1509 IPATH_USER_SWMINOR);
1512 if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) {
1517 num_subports = uinfo->spu_subport_cnt;
1518 pd->subport_uregbase = vmalloc(PAGE_SIZE * num_subports);
1519 if (!pd->subport_uregbase) {
1523 /* Note: pd->port_rcvhdrq_size isn't initialized yet. */
1524 size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize *
1525 sizeof(u32), PAGE_SIZE) * num_subports;
1526 pd->subport_rcvhdr_base = vmalloc(size);
1527 if (!pd->subport_rcvhdr_base) {
1532 pd->subport_rcvegrbuf = vmalloc(pd->port_rcvegrbuf_chunks *
1533 pd->port_rcvegrbuf_size *
1535 if (!pd->subport_rcvegrbuf) {
1540 pd->port_subport_cnt = uinfo->spu_subport_cnt;
1541 pd->port_subport_id = uinfo->spu_subport_id;
1542 pd->active_slaves = 1;
1543 set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
1544 memset(pd->subport_uregbase, 0, PAGE_SIZE * num_subports);
1545 memset(pd->subport_rcvhdr_base, 0, size);
1546 memset(pd->subport_rcvegrbuf, 0, pd->port_rcvegrbuf_chunks *
1547 pd->port_rcvegrbuf_size *
1552 vfree(pd->subport_rcvhdr_base);
1554 vfree(pd->subport_uregbase);
1555 pd->subport_uregbase = NULL;
1560 static int try_alloc_port(struct ipath_devdata *dd, int port,
1562 const struct ipath_user_info *uinfo)
1564 struct ipath_portdata *pd;
1567 if (!(pd = dd->ipath_pd[port])) {
1570 pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL);
1573 * Allocate memory for use in ipath_tid_update() just once
1574 * at open, not per call. Reduces cost of expected send
1577 ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) +
1578 dd->ipath_rcvtidcnt * sizeof(struct page **),
1581 ipath_dev_err(dd, "Unable to allocate portdata "
1582 "memory, failing open\n");
1588 dd->ipath_pd[port] = pd;
1589 dd->ipath_pd[port]->port_port = port;
1590 dd->ipath_pd[port]->port_dd = dd;
1591 dd->ipath_pd[port]->port_tid_pg_list = ptmp;
1592 init_waitqueue_head(&dd->ipath_pd[port]->port_wait);
1594 if (!pd->port_cnt) {
1595 pd->userversion = uinfo->spu_userversion;
1596 init_user_egr_sizes(pd);
1597 if ((ret = init_subports(dd, pd, uinfo)) != 0)
1599 ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n",
1600 current->comm, current->pid, dd->ipath_unit,
1604 pd->port_pid = current->pid;
1605 strncpy(pd->port_comm, current->comm, sizeof(pd->port_comm));
1606 ipath_stats.sps_ports++;
1615 static inline int usable(struct ipath_devdata *dd)
1618 (dd->ipath_flags & IPATH_PRESENT) &&
1619 dd->ipath_kregbase &&
1621 !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED
1625 static int find_free_port(int unit, struct file *fp,
1626 const struct ipath_user_info *uinfo)
1628 struct ipath_devdata *dd = ipath_lookup(unit);
1641 for (i = 1; i < dd->ipath_cfgports; i++) {
1642 ret = try_alloc_port(dd, i, fp, uinfo);
1652 static int find_best_unit(struct file *fp,
1653 const struct ipath_user_info *uinfo)
1655 int ret = 0, i, prefunit = -1, devmax;
1656 int maxofallports, npresent, nup;
1659 devmax = ipath_count_units(&npresent, &nup, &maxofallports);
1662 * This code is present to allow a knowledgeable person to
1663 * specify the layout of processes to processors before opening
1664 * this driver, and then we'll assign the process to the "closest"
1665 * InfiniPath chip to that processor (we assume reasonable connectivity,
1666 * for now). This code assumes that if affinity has been set
1667 * before this point, that at most one cpu is set; for now this
1668 * is reasonable. I check for both cpus_empty() and cpus_full(),
1669 * in case some kernel variant sets none of the bits when no
1670 * affinity is set. 2.6.11 and 12 kernels have all present
1671 * cpus set. Some day we'll have to fix it up further to handle
1672 * a cpu subset. This algorithm fails for two HT chips connected
1673 * in tunnel fashion. Eventually this needs real topology
1674 * information. There may be some issues with dual core numbering
1675 * as well. This needs more work prior to release.
1677 if (!cpus_empty(current->cpus_allowed) &&
1678 !cpus_full(current->cpus_allowed)) {
1679 int ncpus = num_online_cpus(), curcpu = -1, nset = 0;
1680 for (i = 0; i < ncpus; i++)
1681 if (cpu_isset(i, current->cpus_allowed)) {
1682 ipath_cdbg(PROC, "%s[%u] affinity set for "
1683 "cpu %d/%d\n", current->comm,
1684 current->pid, i, ncpus);
1688 if (curcpu != -1 && nset != ncpus) {
1690 prefunit = curcpu / (ncpus / npresent);
1691 ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, "
1692 "%d cpus/chip, select unit %d\n",
1693 current->comm, current->pid,
1694 npresent, ncpus, ncpus / npresent,
1701 * user ports start at 1, kernel port is 0
1702 * For now, we do round-robin access across all chips
1706 devmax = prefunit + 1;
1708 for (i = 1; i < maxofallports; i++) {
1709 for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax;
1711 struct ipath_devdata *dd = ipath_lookup(ndev);
1714 continue; /* can't use this unit */
1715 if (i >= dd->ipath_cfgports)
1717 * Maxed out on users of this unit. Try
1721 ret = try_alloc_port(dd, i, fp, uinfo);
1730 ipath_dbg("No ports available (none initialized "
1734 /* if started above 0, retry from 0 */
1736 "%s[%u] no ports on prefunit "
1737 "%d, clear and re-check\n",
1738 current->comm, current->pid,
1740 devmax = ipath_count_units(NULL, NULL,
1746 ipath_dbg("No ports available\n");
1750 ipath_dbg("No boards found\n");
1757 static int find_shared_port(struct file *fp,
1758 const struct ipath_user_info *uinfo)
1760 int devmax, ndev, i;
1763 devmax = ipath_count_units(NULL, NULL, NULL);
1765 for (ndev = 0; ndev < devmax; ndev++) {
1766 struct ipath_devdata *dd = ipath_lookup(ndev);
1770 for (i = 1; i < dd->ipath_cfgports; i++) {
1771 struct ipath_portdata *pd = dd->ipath_pd[i];
1773 /* Skip ports which are not yet open */
1774 if (!pd || !pd->port_cnt)
1776 /* Skip port if it doesn't match the requested one */
1777 if (pd->port_subport_id != uinfo->spu_subport_id)
1779 /* Verify the sharing process matches the master */
1780 if (pd->port_subport_cnt != uinfo->spu_subport_cnt ||
1781 pd->userversion != uinfo->spu_userversion ||
1782 pd->port_cnt >= pd->port_subport_cnt) {
1787 subport_fp(fp) = pd->port_cnt++;
1788 tidcursor_fp(fp) = 0;
1789 pd->active_slaves |= 1 << subport_fp(fp);
1791 "%s[%u] %u sharing %s[%u] unit:port %u:%u\n",
1792 current->comm, current->pid,
1794 pd->port_comm, pd->port_pid,
1795 dd->ipath_unit, pd->port_port);
1805 static int ipath_open(struct inode *in, struct file *fp)
1807 /* The real work is performed later in ipath_assign_port() */
1808 fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL);
1809 return fp->private_data ? 0 : -ENOMEM;
1812 /* Get port early, so can set affinity prior to memory allocation */
1813 static int ipath_assign_port(struct file *fp,
1814 const struct ipath_user_info *uinfo)
1818 unsigned swmajor, swminor;
1820 /* Check to be sure we haven't already initialized this file */
1826 /* for now, if major version is different, bail */
1827 swmajor = uinfo->spu_userversion >> 16;
1828 if (swmajor != IPATH_USER_SWMAJOR) {
1829 ipath_dbg("User major version %d not same as driver "
1830 "major %d\n", uinfo->spu_userversion >> 16,
1831 IPATH_USER_SWMAJOR);
1836 swminor = uinfo->spu_userversion & 0xffff;
1837 if (swminor != IPATH_USER_SWMINOR)
1838 ipath_dbg("User minor version %d not same as driver "
1839 "minor %d\n", swminor, IPATH_USER_SWMINOR);
1841 mutex_lock(&ipath_mutex);
1843 if (ipath_compatible_subports(swmajor, swminor) &&
1844 uinfo->spu_subport_cnt &&
1845 (ret = find_shared_port(fp, uinfo))) {
1846 mutex_unlock(&ipath_mutex);
1852 i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE;
1853 ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n",
1854 (long)fp->f_path.dentry->d_inode->i_rdev, i_minor);
1857 ret = find_free_port(i_minor - 1, fp, uinfo);
1859 ret = find_best_unit(fp, uinfo);
1861 mutex_unlock(&ipath_mutex);
1868 static int ipath_do_user_init(struct file *fp,
1869 const struct ipath_user_info *uinfo)
1872 struct ipath_portdata *pd = port_fp(fp);
1873 struct ipath_devdata *dd;
1876 /* Subports don't need to initialize anything since master did it. */
1877 if (subport_fp(fp)) {
1878 ret = wait_event_interruptible(pd->port_wait,
1879 !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag));
1885 if (uinfo->spu_rcvhdrsize) {
1886 ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize);
1891 /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */
1893 /* for right now, kernel piobufs are at end, so port 1 is at 0 */
1894 pd->port_piobufs = dd->ipath_piobufbase +
1895 dd->ipath_pbufsport * (pd->port_port - 1) * dd->ipath_palign;
1896 ipath_cdbg(VERBOSE, "Set base of piobufs for port %u to 0x%x\n",
1897 pd->port_port, pd->port_piobufs);
1900 * Now allocate the rcvhdr Q and eager TIDs; skip the TID
1901 * array for time being. If pd->port_port > chip-supported,
1902 * we need to do extra stuff here to handle by handling overflow
1903 * through port 0, someday
1905 ret = ipath_create_rcvhdrq(dd, pd);
1907 ret = ipath_create_user_egr(pd);
1912 * set the eager head register for this port to the current values
1913 * of the tail pointers, since we don't know if they were
1914 * updated on last use of the port.
1916 head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port);
1917 ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port);
1918 dd->ipath_lastegrheads[pd->port_port] = -1;
1919 dd->ipath_lastrcvhdrqtails[pd->port_port] = -1;
1920 ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n",
1921 pd->port_port, head32);
1922 pd->port_tidcursor = 0; /* start at beginning after open */
1924 /* initialize poll variables... */
1925 pd->port_urgent = 0;
1926 pd->port_urgent_poll = 0;
1927 pd->port_hdrqfull_poll = pd->port_hdrqfull;
1930 * now enable the port; the tail registers will be written to memory
1931 * by the chip as soon as it sees the write to
1932 * dd->ipath_kregs->kr_rcvctrl. The update only happens on
1933 * transition from 0 to 1, so clear it first, then set it as part of
1934 * enabling the port. This will (very briefly) affect any other
1935 * open ports, but it shouldn't be long enough to be an issue.
1936 * We explictly set the in-memory copy to 0 beforehand, so we don't
1937 * have to wait to be sure the DMA update has happened.
1939 if (pd->port_rcvhdrtail_kvaddr)
1940 ipath_clear_rcvhdrtail(pd);
1941 set_bit(dd->ipath_r_portenable_shift + pd->port_port,
1942 &dd->ipath_rcvctrl);
1943 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1945 ~(1ULL << dd->ipath_r_tailupd_shift));
1946 ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl,
1948 /* Notify any waiting slaves */
1949 if (pd->port_subport_cnt) {
1950 clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag);
1951 wake_up(&pd->port_wait);
1958 * unlock_exptid - unlock any expected TID entries port still had in use
1961 * We don't actually update the chip here, because we do a bulk update
1962 * below, using ipath_f_clear_tids.
1964 static void unlock_expected_tids(struct ipath_portdata *pd)
1966 struct ipath_devdata *dd = pd->port_dd;
1967 int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt;
1968 int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt;
1970 ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n",
1972 for (i = port_tidbase; i < maxtid; i++) {
1973 if (!dd->ipath_pageshadow[i])
1976 pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i],
1977 PAGE_SIZE, PCI_DMA_FROMDEVICE);
1978 ipath_release_user_pages_on_close(&dd->ipath_pageshadow[i],
1980 dd->ipath_pageshadow[i] = NULL;
1982 ipath_stats.sps_pageunlocks++;
1985 ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n",
1986 pd->port_port, cnt);
1988 if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks)
1989 ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n",
1990 (unsigned long long) ipath_stats.sps_pagelocks,
1991 (unsigned long long)
1992 ipath_stats.sps_pageunlocks);
1995 static int ipath_close(struct inode *in, struct file *fp)
1998 struct ipath_filedata *fd;
1999 struct ipath_portdata *pd;
2000 struct ipath_devdata *dd;
2003 ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n",
2004 (long)in->i_rdev, fp->private_data);
2006 mutex_lock(&ipath_mutex);
2008 fd = (struct ipath_filedata *) fp->private_data;
2009 fp->private_data = NULL;
2012 mutex_unlock(&ipath_mutex);
2015 if (--pd->port_cnt) {
2017 * XXX If the master closes the port before the slave(s),
2018 * revoke the mmap for the eager receive queue so
2019 * the slave(s) don't wait for receive data forever.
2021 pd->active_slaves &= ~(1 << fd->subport);
2022 mutex_unlock(&ipath_mutex);
2025 port = pd->port_port;
2028 if (pd->port_hdrqfull) {
2029 ipath_cdbg(PROC, "%s[%u] had %u rcvhdrqfull errors "
2030 "during run\n", pd->port_comm, pd->port_pid,
2032 pd->port_hdrqfull = 0;
2035 if (pd->port_rcvwait_to || pd->port_piowait_to
2036 || pd->port_rcvnowait || pd->port_pionowait) {
2037 ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; "
2038 "%u rcv %u, pio already\n",
2039 pd->port_port, pd->port_rcvwait_to,
2040 pd->port_piowait_to, pd->port_rcvnowait,
2041 pd->port_pionowait);
2042 pd->port_rcvwait_to = pd->port_piowait_to =
2043 pd->port_rcvnowait = pd->port_pionowait = 0;
2045 if (pd->port_flag) {
2046 ipath_dbg("port %u port_flag still set to 0x%lx\n",
2047 pd->port_port, pd->port_flag);
2051 if (dd->ipath_kregbase) {
2053 /* atomically clear receive enable port and intr avail. */
2054 clear_bit(dd->ipath_r_portenable_shift + port,
2055 &dd->ipath_rcvctrl);
2056 clear_bit(pd->port_port + dd->ipath_r_intravail_shift,
2057 &dd->ipath_rcvctrl);
2058 ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl,
2060 /* and read back from chip to be sure that nothing
2061 * else is in flight when we do the rest */
2062 (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2064 /* clean up the pkeys for this port user */
2065 ipath_clean_part_key(pd, dd);
2067 * be paranoid, and never write 0's to these, just use an
2068 * unused part of the port 0 tail page. Of course,
2069 * rcvhdraddr points to a large chunk of memory, so this
2070 * could still trash things, but at least it won't trash
2071 * page 0, and by disabling the port, it should stop "soon",
2072 * even if a packet or two is in already in flight after we
2073 * disabled the port.
2075 ipath_write_kreg_port(dd,
2076 dd->ipath_kregs->kr_rcvhdrtailaddr, port,
2077 dd->ipath_dummy_hdrq_phys);
2078 ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr,
2079 pd->port_port, dd->ipath_dummy_hdrq_phys);
2081 i = dd->ipath_pbufsport * (port - 1);
2082 ipath_disarm_piobufs(dd, i, dd->ipath_pbufsport);
2084 dd->ipath_f_clear_tids(dd, pd->port_port);
2086 if (dd->ipath_pageshadow)
2087 unlock_expected_tids(pd);
2088 ipath_stats.sps_ports--;
2089 ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n",
2090 pd->port_comm, pd->port_pid,
2091 dd->ipath_unit, port);
2095 dd->ipath_pd[pd->port_port] = NULL; /* before releasing mutex */
2096 mutex_unlock(&ipath_mutex);
2097 ipath_free_pddata(dd, pd); /* after releasing the mutex */
2104 static int ipath_port_info(struct ipath_portdata *pd, u16 subport,
2105 struct ipath_port_info __user *uinfo)
2107 struct ipath_port_info info;
2112 (void) ipath_count_units(NULL, &nup, NULL);
2113 info.num_active = nup;
2114 info.unit = pd->port_dd->ipath_unit;
2115 info.port = pd->port_port;
2116 info.subport = subport;
2117 /* Don't return new fields if old library opened the port. */
2118 if (ipath_supports_subports(pd->userversion >> 16,
2119 pd->userversion & 0xffff)) {
2120 /* Number of user ports available for this device. */
2121 info.num_ports = pd->port_dd->ipath_cfgports - 1;
2122 info.num_subports = pd->port_subport_cnt;
2125 sz = sizeof(info) - 2 * sizeof(u16);
2127 if (copy_to_user(uinfo, &info, sz)) {
2137 static int ipath_get_slave_info(struct ipath_portdata *pd,
2138 void __user *slave_mask_addr)
2142 if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32)))
2147 static int ipath_force_pio_avail_update(struct ipath_devdata *dd)
2149 unsigned long flags;
2151 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
2152 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
2153 dd->ipath_sendctrl & ~INFINIPATH_S_PIOBUFAVAILUPD);
2154 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2155 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl, dd->ipath_sendctrl);
2156 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
2157 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
2162 static ssize_t ipath_write(struct file *fp, const char __user *data,
2163 size_t count, loff_t *off)
2165 const struct ipath_cmd __user *ucmd;
2166 struct ipath_portdata *pd;
2167 const void __user *src;
2168 size_t consumed, copy;
2169 struct ipath_cmd cmd;
2173 if (count < sizeof(cmd.type)) {
2178 ucmd = (const struct ipath_cmd __user *) data;
2180 if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) {
2185 consumed = sizeof(cmd.type);
2188 case IPATH_CMD_ASSIGN_PORT:
2189 case __IPATH_CMD_USER_INIT:
2190 case IPATH_CMD_USER_INIT:
2191 copy = sizeof(cmd.cmd.user_info);
2192 dest = &cmd.cmd.user_info;
2193 src = &ucmd->cmd.user_info;
2195 case IPATH_CMD_RECV_CTRL:
2196 copy = sizeof(cmd.cmd.recv_ctrl);
2197 dest = &cmd.cmd.recv_ctrl;
2198 src = &ucmd->cmd.recv_ctrl;
2200 case IPATH_CMD_PORT_INFO:
2201 copy = sizeof(cmd.cmd.port_info);
2202 dest = &cmd.cmd.port_info;
2203 src = &ucmd->cmd.port_info;
2205 case IPATH_CMD_TID_UPDATE:
2206 case IPATH_CMD_TID_FREE:
2207 copy = sizeof(cmd.cmd.tid_info);
2208 dest = &cmd.cmd.tid_info;
2209 src = &ucmd->cmd.tid_info;
2211 case IPATH_CMD_SET_PART_KEY:
2212 copy = sizeof(cmd.cmd.part_key);
2213 dest = &cmd.cmd.part_key;
2214 src = &ucmd->cmd.part_key;
2216 case __IPATH_CMD_SLAVE_INFO:
2217 copy = sizeof(cmd.cmd.slave_mask_addr);
2218 dest = &cmd.cmd.slave_mask_addr;
2219 src = &ucmd->cmd.slave_mask_addr;
2221 case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg
2226 case IPATH_CMD_POLL_TYPE:
2227 copy = sizeof(cmd.cmd.poll_type);
2228 dest = &cmd.cmd.poll_type;
2229 src = &ucmd->cmd.poll_type;
2237 if ((count - consumed) < copy) {
2242 if (copy_from_user(dest, src, copy)) {
2251 if (!pd && cmd.type != __IPATH_CMD_USER_INIT &&
2252 cmd.type != IPATH_CMD_ASSIGN_PORT) {
2258 case IPATH_CMD_ASSIGN_PORT:
2259 ret = ipath_assign_port(fp, &cmd.cmd.user_info);
2263 case __IPATH_CMD_USER_INIT:
2264 /* backwards compatibility, get port first */
2265 ret = ipath_assign_port(fp, &cmd.cmd.user_info);
2268 /* and fall through to current version. */
2269 case IPATH_CMD_USER_INIT:
2270 ret = ipath_do_user_init(fp, &cmd.cmd.user_info);
2273 ret = ipath_get_base_info(
2274 fp, (void __user *) (unsigned long)
2275 cmd.cmd.user_info.spu_base_info,
2276 cmd.cmd.user_info.spu_base_info_size);
2278 case IPATH_CMD_RECV_CTRL:
2279 ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl);
2281 case IPATH_CMD_PORT_INFO:
2282 ret = ipath_port_info(pd, subport_fp(fp),
2283 (struct ipath_port_info __user *)
2284 (unsigned long) cmd.cmd.port_info);
2286 case IPATH_CMD_TID_UPDATE:
2287 ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info);
2289 case IPATH_CMD_TID_FREE:
2290 ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info);
2292 case IPATH_CMD_SET_PART_KEY:
2293 ret = ipath_set_part_key(pd, cmd.cmd.part_key);
2295 case __IPATH_CMD_SLAVE_INFO:
2296 ret = ipath_get_slave_info(pd,
2297 (void __user *) (unsigned long)
2298 cmd.cmd.slave_mask_addr);
2300 case IPATH_CMD_PIOAVAILUPD:
2301 ret = ipath_force_pio_avail_update(pd->port_dd);
2303 case IPATH_CMD_POLL_TYPE:
2304 pd->poll_type = cmd.cmd.poll_type;
2315 static struct class *ipath_class;
2317 static int init_cdev(int minor, char *name, const struct file_operations *fops,
2318 struct cdev **cdevp, struct class_device **class_devp)
2320 const dev_t dev = MKDEV(IPATH_MAJOR, minor);
2321 struct cdev *cdev = NULL;
2322 struct class_device *class_dev = NULL;
2325 cdev = cdev_alloc();
2327 printk(KERN_ERR IPATH_DRV_NAME
2328 ": Could not allocate cdev for minor %d, %s\n",
2334 cdev->owner = THIS_MODULE;
2336 kobject_set_name(&cdev->kobj, name);
2338 ret = cdev_add(cdev, dev, 1);
2340 printk(KERN_ERR IPATH_DRV_NAME
2341 ": Could not add cdev for minor %d, %s (err %d)\n",
2346 class_dev = class_device_create(ipath_class, NULL, dev, NULL, name);
2348 if (IS_ERR(class_dev)) {
2349 ret = PTR_ERR(class_dev);
2350 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
2351 "class_dev for minor %d, %s (err %d)\n",
2365 *class_devp = class_dev;
2374 int ipath_cdev_init(int minor, char *name, const struct file_operations *fops,
2375 struct cdev **cdevp, struct class_device **class_devp)
2377 return init_cdev(minor, name, fops, cdevp, class_devp);
2380 static void cleanup_cdev(struct cdev **cdevp,
2381 struct class_device **class_devp)
2383 struct class_device *class_dev = *class_devp;
2386 class_device_unregister(class_dev);
2396 void ipath_cdev_cleanup(struct cdev **cdevp,
2397 struct class_device **class_devp)
2399 cleanup_cdev(cdevp, class_devp);
2402 static struct cdev *wildcard_cdev;
2403 static struct class_device *wildcard_class_dev;
2405 static const dev_t dev = MKDEV(IPATH_MAJOR, 0);
2407 static int user_init(void)
2411 ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME);
2413 printk(KERN_ERR IPATH_DRV_NAME ": Could not register "
2414 "chrdev region (err %d)\n", -ret);
2418 ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME);
2420 if (IS_ERR(ipath_class)) {
2421 ret = PTR_ERR(ipath_class);
2422 printk(KERN_ERR IPATH_DRV_NAME ": Could not create "
2423 "device class (err %d)\n", -ret);
2429 unregister_chrdev_region(dev, IPATH_NMINORS);
2434 static void user_cleanup(void)
2437 class_destroy(ipath_class);
2441 unregister_chrdev_region(dev, IPATH_NMINORS);
2444 static atomic_t user_count = ATOMIC_INIT(0);
2445 static atomic_t user_setup = ATOMIC_INIT(0);
2447 int ipath_user_add(struct ipath_devdata *dd)
2452 if (atomic_inc_return(&user_count) == 1) {
2455 ipath_dev_err(dd, "Unable to set up user support: "
2456 "error %d\n", -ret);
2459 ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev,
2460 &wildcard_class_dev);
2462 ipath_dev_err(dd, "Could not create wildcard "
2463 "minor: error %d\n", -ret);
2467 atomic_set(&user_setup, 1);
2470 snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit);
2472 ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops,
2473 &dd->user_cdev, &dd->user_class_dev);
2475 ipath_dev_err(dd, "Could not create user minor %d, %s\n",
2476 dd->ipath_unit + 1, name);
2486 void ipath_user_remove(struct ipath_devdata *dd)
2488 cleanup_cdev(&dd->user_cdev, &dd->user_class_dev);
2490 if (atomic_dec_return(&user_count) == 0) {
2491 if (atomic_read(&user_setup) == 0)
2494 cleanup_cdev(&wildcard_cdev, &wildcard_class_dev);
2497 atomic_set(&user_setup, 0);