2 * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include <linux/spinlock.h>
36 #include "ipath_verbs.h"
37 #include "ipath_kernel.h"
40 * Convert the AETH RNR timeout code into the number of milliseconds.
42 const u32 ib_ipath_rnr_table[32] = {
78 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
81 * XXX Use a simple list for now. We might need a priority
82 * queue if we have lots of QPs waiting for RNR timeouts
83 * but that should be rare.
85 void ipath_insert_rnr_queue(struct ipath_qp *qp)
87 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
90 spin_lock_irqsave(&dev->pending_lock, flags);
91 if (list_empty(&dev->rnrwait))
92 list_add(&qp->timerwait, &dev->rnrwait);
94 struct list_head *l = &dev->rnrwait;
95 struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
98 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
99 qp->s_rnr_timeout -= nqp->s_rnr_timeout;
101 if (l->next == &dev->rnrwait) {
105 nqp = list_entry(l->next, struct ipath_qp,
109 nqp->s_rnr_timeout -= qp->s_rnr_timeout;
110 list_add(&qp->timerwait, l);
112 spin_unlock_irqrestore(&dev->pending_lock, flags);
116 * ipath_init_sge - Validate a RWQE and fill in the SGE state
121 int ipath_init_sge(struct ipath_qp *qp, struct ipath_rwqe *wqe,
122 u32 *lengthp, struct ipath_sge_state *ss)
128 for (i = j = 0; i < wqe->num_sge; i++) {
129 if (wqe->sg_list[i].length == 0)
132 if (!ipath_lkey_ok(qp, j ? &ss->sg_list[j - 1] : &ss->sge,
133 &wqe->sg_list[i], IB_ACCESS_LOCAL_WRITE))
135 *lengthp += wqe->sg_list[i].length;
143 memset(&wc, 0, sizeof(wc));
144 wc.wr_id = wqe->wr_id;
145 wc.status = IB_WC_LOC_PROT_ERR;
146 wc.opcode = IB_WC_RECV;
148 /* Signal solicited completion event. */
149 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, 1);
156 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
158 * @wr_id_only: update wr_id only, not SGEs
160 * Return 0 if no RWQE is available, otherwise return 1.
162 * Can be called from interrupt level.
164 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
168 struct ipath_rwq *wq;
169 struct ipath_srq *srq;
170 struct ipath_rwqe *wqe;
171 void (*handler)(struct ib_event *, void *);
175 qp->r_sge.sg_list = qp->r_sg_list;
178 srq = to_isrq(qp->ibqp.srq);
179 handler = srq->ibsrq.event_handler;
187 spin_lock_irqsave(&rq->lock, flags);
190 /* Validate tail before using it since it is user writable. */
191 if (tail >= rq->size)
194 if (unlikely(tail == wq->head)) {
195 spin_unlock_irqrestore(&rq->lock, flags);
199 /* Make sure entry is read after head index is read. */
201 wqe = get_rwqe_ptr(rq, tail);
202 if (++tail >= rq->size)
204 } while (!wr_id_only && !ipath_init_sge(qp, wqe, &qp->r_len,
206 qp->r_wr_id = wqe->wr_id;
210 qp->r_wrid_valid = 1;
215 * validate head pointer value and compute
216 * the number of remaining WQEs.
222 n += rq->size - tail;
225 if (n < srq->limit) {
229 spin_unlock_irqrestore(&rq->lock, flags);
230 ev.device = qp->ibqp.device;
231 ev.element.srq = qp->ibqp.srq;
232 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
233 handler(&ev, srq->ibsrq.srq_context);
237 spin_unlock_irqrestore(&rq->lock, flags);
244 * ipath_ruc_loopback - handle UC and RC lookback requests
245 * @sqp: the sending QP
247 * This is called from ipath_do_send() to
248 * forward a WQE addressed to the same HCA.
249 * Note that although we are single threaded due to the tasklet, we still
250 * have to protect against post_send(). We don't have to worry about
251 * receive interrupts since this is a connected protocol and all packets
252 * will pass through here.
254 static void ipath_ruc_loopback(struct ipath_qp *sqp)
256 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
258 struct ipath_swqe *wqe;
259 struct ipath_sge *sge;
264 enum ib_wc_status send_status;
266 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
273 spin_lock_irqsave(&sqp->s_lock, flags);
275 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK) ||
276 sqp->s_rnr_timeout) {
277 spin_unlock_irqrestore(&sqp->s_lock, flags);
281 /* Get the next send request. */
282 if (sqp->s_last == sqp->s_head) {
283 /* Send work queue is empty. */
284 spin_unlock_irqrestore(&sqp->s_lock, flags);
289 * We can rely on the entry not changing without the s_lock
290 * being held until we update s_last.
292 wqe = get_swqe_ptr(sqp, sqp->s_last);
293 spin_unlock_irqrestore(&sqp->s_lock, flags);
295 memset(&wc, 0, sizeof wc);
296 send_status = IB_WC_SUCCESS;
298 sqp->s_sge.sge = wqe->sg_list[0];
299 sqp->s_sge.sg_list = wqe->sg_list + 1;
300 sqp->s_sge.num_sge = wqe->wr.num_sge;
301 sqp->s_len = wqe->length;
302 switch (wqe->wr.opcode) {
303 case IB_WR_SEND_WITH_IMM:
304 wc.wc_flags = IB_WC_WITH_IMM;
305 wc.imm_data = wqe->wr.ex.imm_data;
308 if (!ipath_get_rwqe(qp, 0))
312 case IB_WR_RDMA_WRITE_WITH_IMM:
313 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
315 wc.wc_flags = IB_WC_WITH_IMM;
316 wc.imm_data = wqe->wr.ex.imm_data;
317 if (!ipath_get_rwqe(qp, 1))
320 case IB_WR_RDMA_WRITE:
321 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE)))
323 if (wqe->length == 0)
325 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, wqe->length,
326 wqe->wr.wr.rdma.remote_addr,
327 wqe->wr.wr.rdma.rkey,
328 IB_ACCESS_REMOTE_WRITE)))
332 case IB_WR_RDMA_READ:
333 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ)))
335 if (unlikely(!ipath_rkey_ok(qp, &sqp->s_sge, wqe->length,
336 wqe->wr.wr.rdma.remote_addr,
337 wqe->wr.wr.rdma.rkey,
338 IB_ACCESS_REMOTE_READ)))
340 qp->r_sge.sge = wqe->sg_list[0];
341 qp->r_sge.sg_list = wqe->sg_list + 1;
342 qp->r_sge.num_sge = wqe->wr.num_sge;
345 case IB_WR_ATOMIC_CMP_AND_SWP:
346 case IB_WR_ATOMIC_FETCH_AND_ADD:
347 if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC)))
349 if (unlikely(!ipath_rkey_ok(qp, &qp->r_sge, sizeof(u64),
350 wqe->wr.wr.atomic.remote_addr,
351 wqe->wr.wr.atomic.rkey,
352 IB_ACCESS_REMOTE_ATOMIC)))
354 /* Perform atomic OP and save result. */
355 maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
356 sdata = wqe->wr.wr.atomic.compare_add;
357 *(u64 *) sqp->s_sge.sge.vaddr =
358 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
359 (u64) atomic64_add_return(sdata, maddr) - sdata :
360 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
361 sdata, wqe->wr.wr.atomic.swap);
365 send_status = IB_WC_LOC_QP_OP_ERR;
369 sge = &sqp->s_sge.sge;
371 u32 len = sqp->s_len;
373 if (len > sge->length)
375 if (len > sge->sge_length)
376 len = sge->sge_length;
378 ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
381 sge->sge_length -= len;
382 if (sge->sge_length == 0) {
383 if (--sqp->s_sge.num_sge)
384 *sge = *sqp->s_sge.sg_list++;
385 } else if (sge->length == 0 && sge->mr != NULL) {
386 if (++sge->n >= IPATH_SEGSZ) {
387 if (++sge->m >= sge->mr->mapsz)
392 sge->mr->map[sge->m]->segs[sge->n].vaddr;
394 sge->mr->map[sge->m]->segs[sge->n].length;
399 if (wqe->wr.opcode == IB_WR_RDMA_WRITE ||
400 wqe->wr.opcode == IB_WR_RDMA_READ)
403 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
404 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
406 wc.opcode = IB_WC_RECV;
407 wc.wr_id = qp->r_wr_id;
408 wc.status = IB_WC_SUCCESS;
409 wc.byte_len = wqe->length;
411 wc.src_qp = qp->remote_qpn;
412 wc.slid = qp->remote_ah_attr.dlid;
413 wc.sl = qp->remote_ah_attr.sl;
415 /* Signal completion event if the solicited bit is set. */
416 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
417 wqe->wr.send_flags & IB_SEND_SOLICITED);
420 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
421 ipath_send_complete(sqp, wqe, send_status);
426 if (qp->ibqp.qp_type == IB_QPT_UC)
429 * Note: we don't need the s_lock held since the BUSY flag
430 * makes this single threaded.
432 if (sqp->s_rnr_retry == 0) {
433 send_status = IB_WC_RNR_RETRY_EXC_ERR;
436 if (sqp->s_rnr_retry_cnt < 7)
438 spin_lock_irqsave(&sqp->s_lock, flags);
439 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_RECV_OK))
442 sqp->s_rnr_timeout = ib_ipath_rnr_table[qp->r_min_rnr_timer];
443 ipath_insert_rnr_queue(sqp);
447 send_status = IB_WC_REM_INV_REQ_ERR;
448 wc.status = IB_WC_LOC_QP_OP_ERR;
452 send_status = IB_WC_REM_ACCESS_ERR;
453 wc.status = IB_WC_LOC_PROT_ERR;
455 /* responder goes to error state */
456 ipath_rc_error(qp, wc.status);
459 spin_lock_irqsave(&sqp->s_lock, flags);
460 ipath_send_complete(sqp, wqe, send_status);
461 if (sqp->ibqp.qp_type == IB_QPT_RC) {
462 int lastwqe = ipath_error_qp(sqp, IB_WC_WR_FLUSH_ERR);
464 sqp->s_flags &= ~IPATH_S_BUSY;
465 spin_unlock_irqrestore(&sqp->s_lock, flags);
469 ev.device = sqp->ibqp.device;
470 ev.element.qp = &sqp->ibqp;
471 ev.event = IB_EVENT_QP_LAST_WQE_REACHED;
472 sqp->ibqp.event_handler(&ev, sqp->ibqp.qp_context);
477 spin_unlock_irqrestore(&sqp->s_lock, flags);
479 if (atomic_dec_and_test(&qp->refcount))
483 static void want_buffer(struct ipath_devdata *dd, struct ipath_qp *qp)
485 if (!(dd->ipath_flags & IPATH_HAS_SEND_DMA) ||
486 qp->ibqp.qp_type == IB_QPT_SMI) {
489 spin_lock_irqsave(&dd->ipath_sendctrl_lock, flags);
490 dd->ipath_sendctrl |= INFINIPATH_S_PIOINTBUFAVAIL;
491 ipath_write_kreg(dd, dd->ipath_kregs->kr_sendctrl,
493 ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch);
494 spin_unlock_irqrestore(&dd->ipath_sendctrl_lock, flags);
499 * ipath_no_bufs_available - tell the layer driver we need buffers
500 * @qp: the QP that caused the problem
501 * @dev: the device we ran out of buffers on
503 * Called when we run out of PIO buffers.
505 static void ipath_no_bufs_available(struct ipath_qp *qp,
506 struct ipath_ibdev *dev)
511 * Note that as soon as want_buffer() is called and
512 * possibly before it returns, ipath_ib_piobufavail()
513 * could be called. If we are still in the tasklet function,
514 * tasklet_hi_schedule() will not call us until the next time
515 * tasklet_hi_schedule() is called.
516 * We leave the busy flag set so that another post send doesn't
517 * try to put the same QP on the piowait list again.
519 spin_lock_irqsave(&dev->pending_lock, flags);
520 list_add_tail(&qp->piowait, &dev->piowait);
521 spin_unlock_irqrestore(&dev->pending_lock, flags);
522 want_buffer(dev->dd, qp);
527 * ipath_make_grh - construct a GRH header
528 * @dev: a pointer to the ipath device
529 * @hdr: a pointer to the GRH header being constructed
530 * @grh: the global route address to send to
531 * @hwords: the number of 32 bit words of header being sent
532 * @nwords: the number of 32 bit words of data being sent
534 * Return the size of the header in 32 bit words.
536 u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
537 struct ib_global_route *grh, u32 hwords, u32 nwords)
539 hdr->version_tclass_flow =
540 cpu_to_be32((6 << 28) |
541 (grh->traffic_class << 20) |
543 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
544 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
545 hdr->next_hdr = 0x1B;
546 hdr->hop_limit = grh->hop_limit;
547 /* The SGID is 32-bit aligned. */
548 hdr->sgid.global.subnet_prefix = dev->gid_prefix;
549 hdr->sgid.global.interface_id = dev->dd->ipath_guid;
550 hdr->dgid = grh->dgid;
552 /* GRH header size in 32-bit words. */
553 return sizeof(struct ib_grh) / sizeof(u32);
556 void ipath_make_ruc_header(struct ipath_ibdev *dev, struct ipath_qp *qp,
557 struct ipath_other_headers *ohdr,
564 /* Construct the header. */
565 extra_bytes = -qp->s_cur_size & 3;
566 nwords = (qp->s_cur_size + extra_bytes) >> 2;
567 lrh0 = IPATH_LRH_BTH;
568 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
569 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
570 &qp->remote_ah_attr.grh,
571 qp->s_hdrwords, nwords);
572 lrh0 = IPATH_LRH_GRH;
574 lrh0 |= qp->remote_ah_attr.sl << 4;
575 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
576 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
577 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords + SIZE_OF_CRC);
578 qp->s_hdr.lrh[3] = cpu_to_be16(dev->dd->ipath_lid);
579 bth0 |= ipath_get_pkey(dev->dd, qp->s_pkey_index);
580 bth0 |= extra_bytes << 20;
581 ohdr->bth[0] = cpu_to_be32(bth0 | (1 << 22));
582 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
583 ohdr->bth[2] = cpu_to_be32(bth2);
587 * ipath_do_send - perform a send on a QP
588 * @data: contains a pointer to the QP
590 * Process entries in the send work queue until credit or queue is
591 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
592 * Otherwise, two threads could send packets out of order.
594 void ipath_do_send(unsigned long data)
596 struct ipath_qp *qp = (struct ipath_qp *)data;
597 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
598 int (*make_req)(struct ipath_qp *qp);
600 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_busy))
603 if ((qp->ibqp.qp_type == IB_QPT_RC ||
604 qp->ibqp.qp_type == IB_QPT_UC) &&
605 qp->remote_ah_attr.dlid == dev->dd->ipath_lid) {
606 ipath_ruc_loopback(qp);
610 if (qp->ibqp.qp_type == IB_QPT_RC)
611 make_req = ipath_make_rc_req;
612 else if (qp->ibqp.qp_type == IB_QPT_UC)
613 make_req = ipath_make_uc_req;
615 make_req = ipath_make_ud_req;
618 /* Check for a constructed packet to be sent. */
619 if (qp->s_hdrwords != 0) {
621 * If no PIO bufs are available, return. An interrupt will
622 * call ipath_ib_piobufavail() when one is available.
624 if (ipath_verbs_send(qp, &qp->s_hdr, qp->s_hdrwords,
625 qp->s_cur_sge, qp->s_cur_size)) {
626 ipath_no_bufs_available(qp, dev);
629 dev->n_unicast_xmit++;
630 /* Record that we sent the packet and s_hdr is empty. */
637 clear_bit(IPATH_S_BUSY, &qp->s_busy);
641 void ipath_send_complete(struct ipath_qp *qp, struct ipath_swqe *wqe,
642 enum ib_wc_status status)
647 /* See ch. 11.2.4.1 and 10.7.3.1 */
648 if (!(qp->s_flags & IPATH_S_SIGNAL_REQ_WR) ||
649 (wqe->wr.send_flags & IB_SEND_SIGNALED) ||
650 status != IB_WC_SUCCESS) {
653 memset(&wc, 0, sizeof wc);
654 wc.wr_id = wqe->wr.wr_id;
656 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
658 if (status == IB_WC_SUCCESS)
659 wc.byte_len = wqe->length;
660 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc,
661 status != IB_WC_SUCCESS);
664 spin_lock_irqsave(&qp->s_lock, flags);
666 if (++last >= qp->s_size)
669 spin_unlock_irqrestore(&qp->s_lock, flags);