2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "ipath_verbs.h"
35 #include "ips_common.h"
38 * Convert the AETH RNR timeout code into the number of milliseconds.
40 const u32 ib_ipath_rnr_table[32] = {
76 * ipath_insert_rnr_queue - put QP on the RNR timeout list for the device
79 * XXX Use a simple list for now. We might need a priority
80 * queue if we have lots of QPs waiting for RNR timeouts
81 * but that should be rare.
83 void ipath_insert_rnr_queue(struct ipath_qp *qp)
85 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
88 spin_lock_irqsave(&dev->pending_lock, flags);
89 if (list_empty(&dev->rnrwait))
90 list_add(&qp->timerwait, &dev->rnrwait);
92 struct list_head *l = &dev->rnrwait;
93 struct ipath_qp *nqp = list_entry(l->next, struct ipath_qp,
96 while (qp->s_rnr_timeout >= nqp->s_rnr_timeout) {
97 qp->s_rnr_timeout -= nqp->s_rnr_timeout;
99 if (l->next == &dev->rnrwait)
101 nqp = list_entry(l->next, struct ipath_qp,
104 list_add(&qp->timerwait, l);
106 spin_unlock_irqrestore(&dev->pending_lock, flags);
110 * ipath_get_rwqe - copy the next RWQE into the QP's RWQE
112 * @wr_id_only: update wr_id only, not SGEs
114 * Return 0 if no RWQE is available, otherwise return 1.
116 * Called at interrupt level with the QP r_rq.lock held.
118 int ipath_get_rwqe(struct ipath_qp *qp, int wr_id_only)
121 struct ipath_srq *srq;
122 struct ipath_rwqe *wqe;
127 if (unlikely(rq->tail == rq->head)) {
131 wqe = get_rwqe_ptr(rq, rq->tail);
132 qp->r_wr_id = wqe->wr_id;
134 qp->r_sge.sge = wqe->sg_list[0];
135 qp->r_sge.sg_list = wqe->sg_list + 1;
136 qp->r_sge.num_sge = wqe->num_sge;
137 qp->r_len = wqe->length;
139 if (++rq->tail >= rq->size)
145 srq = to_isrq(qp->ibqp.srq);
147 spin_lock(&rq->lock);
148 if (unlikely(rq->tail == rq->head)) {
149 spin_unlock(&rq->lock);
153 wqe = get_rwqe_ptr(rq, rq->tail);
154 qp->r_wr_id = wqe->wr_id;
156 qp->r_sge.sge = wqe->sg_list[0];
157 qp->r_sge.sg_list = wqe->sg_list + 1;
158 qp->r_sge.num_sge = wqe->num_sge;
159 qp->r_len = wqe->length;
161 if (++rq->tail >= rq->size)
163 if (srq->ibsrq.event_handler) {
167 if (rq->head < rq->tail)
168 n = rq->size + rq->head - rq->tail;
170 n = rq->head - rq->tail;
171 if (n < srq->limit) {
173 spin_unlock(&rq->lock);
174 ev.device = qp->ibqp.device;
175 ev.element.srq = qp->ibqp.srq;
176 ev.event = IB_EVENT_SRQ_LIMIT_REACHED;
177 srq->ibsrq.event_handler(&ev,
178 srq->ibsrq.srq_context);
180 spin_unlock(&rq->lock);
182 spin_unlock(&rq->lock);
190 * ipath_ruc_loopback - handle UC and RC lookback requests
191 * @sqp: the loopback QP
193 * This is called from ipath_do_uc_send() or ipath_do_rc_send() to
194 * forward a WQE addressed to the same HCA.
195 * Note that although we are single threaded due to the tasklet, we still
196 * have to protect against post_send(). We don't have to worry about
197 * receive interrupts since this is a connected protocol and all packets
198 * will pass through here.
200 static void ipath_ruc_loopback(struct ipath_qp *sqp)
202 struct ipath_ibdev *dev = to_idev(sqp->ibqp.device);
204 struct ipath_swqe *wqe;
205 struct ipath_sge *sge;
210 qp = ipath_lookup_qpn(&dev->qp_table, sqp->remote_qpn);
217 spin_lock_irqsave(&sqp->s_lock, flags);
219 if (!(ib_ipath_state_ops[sqp->state] & IPATH_PROCESS_SEND_OK)) {
220 spin_unlock_irqrestore(&sqp->s_lock, flags);
224 /* Get the next send request. */
225 if (sqp->s_last == sqp->s_head) {
226 /* Send work queue is empty. */
227 spin_unlock_irqrestore(&sqp->s_lock, flags);
232 * We can rely on the entry not changing without the s_lock
233 * being held until we update s_last.
235 wqe = get_swqe_ptr(sqp, sqp->s_last);
236 spin_unlock_irqrestore(&sqp->s_lock, flags);
241 sqp->s_sge.sge = wqe->sg_list[0];
242 sqp->s_sge.sg_list = wqe->sg_list + 1;
243 sqp->s_sge.num_sge = wqe->wr.num_sge;
244 sqp->s_len = wqe->length;
245 switch (wqe->wr.opcode) {
246 case IB_WR_SEND_WITH_IMM:
247 wc.wc_flags = IB_WC_WITH_IMM;
248 wc.imm_data = wqe->wr.imm_data;
251 spin_lock_irqsave(&qp->r_rq.lock, flags);
252 if (!ipath_get_rwqe(qp, 0)) {
254 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
256 if (qp->ibqp.qp_type == IB_QPT_UC)
258 if (sqp->s_rnr_retry == 0) {
259 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
262 if (sqp->s_rnr_retry_cnt < 7)
266 ib_ipath_rnr_table[sqp->s_min_rnr_timer];
267 ipath_insert_rnr_queue(sqp);
270 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
273 case IB_WR_RDMA_WRITE_WITH_IMM:
274 wc.wc_flags = IB_WC_WITH_IMM;
275 wc.imm_data = wqe->wr.imm_data;
276 spin_lock_irqsave(&qp->r_rq.lock, flags);
277 if (!ipath_get_rwqe(qp, 1))
279 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
281 case IB_WR_RDMA_WRITE:
282 if (wqe->length == 0)
284 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, wqe->length,
285 wqe->wr.wr.rdma.remote_addr,
286 wqe->wr.wr.rdma.rkey,
287 IB_ACCESS_REMOTE_WRITE))) {
289 wc.status = IB_WC_REM_ACCESS_ERR;
291 wc.wr_id = wqe->wr.wr_id;
292 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
295 wc.qp_num = sqp->ibqp.qp_num;
296 wc.src_qp = sqp->remote_qpn;
298 wc.slid = sqp->remote_ah_attr.dlid;
299 wc.sl = sqp->remote_ah_attr.sl;
300 wc.dlid_path_bits = 0;
302 ipath_sqerror_qp(sqp, &wc);
307 case IB_WR_RDMA_READ:
308 if (unlikely(!ipath_rkey_ok(dev, &sqp->s_sge, wqe->length,
309 wqe->wr.wr.rdma.remote_addr,
310 wqe->wr.wr.rdma.rkey,
311 IB_ACCESS_REMOTE_READ)))
313 if (unlikely(!(qp->qp_access_flags &
314 IB_ACCESS_REMOTE_READ)))
316 qp->r_sge.sge = wqe->sg_list[0];
317 qp->r_sge.sg_list = wqe->sg_list + 1;
318 qp->r_sge.num_sge = wqe->wr.num_sge;
321 case IB_WR_ATOMIC_CMP_AND_SWP:
322 case IB_WR_ATOMIC_FETCH_AND_ADD:
323 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge, sizeof(u64),
324 wqe->wr.wr.rdma.remote_addr,
325 wqe->wr.wr.rdma.rkey,
326 IB_ACCESS_REMOTE_ATOMIC)))
328 /* Perform atomic OP and save result. */
329 sdata = wqe->wr.wr.atomic.swap;
330 spin_lock_irqsave(&dev->pending_lock, flags);
331 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
332 if (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)
333 *(u64 *) qp->r_sge.sge.vaddr =
334 qp->r_atomic_data + sdata;
335 else if (qp->r_atomic_data == wqe->wr.wr.atomic.compare_add)
336 *(u64 *) qp->r_sge.sge.vaddr = sdata;
337 spin_unlock_irqrestore(&dev->pending_lock, flags);
338 *(u64 *) sqp->s_sge.sge.vaddr = qp->r_atomic_data;
345 sge = &sqp->s_sge.sge;
347 u32 len = sqp->s_len;
349 if (len > sge->length)
352 ipath_copy_sge(&qp->r_sge, sge->vaddr, len);
355 sge->sge_length -= len;
356 if (sge->sge_length == 0) {
357 if (--sqp->s_sge.num_sge)
358 *sge = *sqp->s_sge.sg_list++;
359 } else if (sge->length == 0 && sge->mr != NULL) {
360 if (++sge->n >= IPATH_SEGSZ) {
361 if (++sge->m >= sge->mr->mapsz)
366 sge->mr->map[sge->m]->segs[sge->n].vaddr;
368 sge->mr->map[sge->m]->segs[sge->n].length;
373 if (wqe->wr.opcode == IB_WR_RDMA_WRITE ||
374 wqe->wr.opcode == IB_WR_RDMA_READ)
377 if (wqe->wr.opcode == IB_WR_RDMA_WRITE_WITH_IMM)
378 wc.opcode = IB_WC_RECV_RDMA_WITH_IMM;
380 wc.opcode = IB_WC_RECV;
381 wc.wr_id = qp->r_wr_id;
382 wc.status = IB_WC_SUCCESS;
384 wc.byte_len = wqe->length;
385 wc.qp_num = qp->ibqp.qp_num;
386 wc.src_qp = qp->remote_qpn;
387 /* XXX do we know which pkey matched? Only needed for GSI. */
389 wc.slid = qp->remote_ah_attr.dlid;
390 wc.sl = qp->remote_ah_attr.sl;
391 wc.dlid_path_bits = 0;
392 /* Signal completion event if the solicited bit is set. */
393 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
394 wqe->wr.send_flags & IB_SEND_SOLICITED);
397 sqp->s_rnr_retry = sqp->s_rnr_retry_cnt;
399 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &sqp->s_flags) ||
400 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
401 wc.wr_id = wqe->wr.wr_id;
402 wc.status = IB_WC_SUCCESS;
403 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
405 wc.byte_len = wqe->length;
406 wc.qp_num = sqp->ibqp.qp_num;
411 wc.dlid_path_bits = 0;
413 ipath_cq_enter(to_icq(sqp->ibqp.send_cq), &wc, 0);
416 /* Update s_last now that we are finished with the SWQE */
417 spin_lock_irqsave(&sqp->s_lock, flags);
418 if (++sqp->s_last >= sqp->s_size)
420 spin_unlock_irqrestore(&sqp->s_lock, flags);
424 if (atomic_dec_and_test(&qp->refcount))
429 * ipath_no_bufs_available - tell the layer driver we need buffers
430 * @qp: the QP that caused the problem
431 * @dev: the device we ran out of buffers on
433 * Called when we run out of PIO buffers.
435 void ipath_no_bufs_available(struct ipath_qp *qp, struct ipath_ibdev *dev)
439 spin_lock_irqsave(&dev->pending_lock, flags);
440 if (list_empty(&qp->piowait))
441 list_add_tail(&qp->piowait, &dev->piowait);
442 spin_unlock_irqrestore(&dev->pending_lock, flags);
444 * Note that as soon as ipath_layer_want_buffer() is called and
445 * possibly before it returns, ipath_ib_piobufavail()
446 * could be called. If we are still in the tasklet function,
447 * tasklet_hi_schedule() will not call us until the next time
448 * tasklet_hi_schedule() is called.
449 * We clear the tasklet flag now since we are committing to return
450 * from the tasklet function.
452 clear_bit(IPATH_S_BUSY, &qp->s_flags);
453 tasklet_unlock(&qp->s_task);
454 ipath_layer_want_buffer(dev->dd);
459 * ipath_post_ruc_send - post RC and UC sends
460 * @qp: the QP to post on
461 * @wr: the work request to send
463 int ipath_post_ruc_send(struct ipath_qp *qp, struct ib_send_wr *wr)
465 struct ipath_swqe *wqe;
473 * Don't allow RDMA reads or atomic operations on UC or
474 * undefined operations.
475 * Make sure buffer is large enough to hold the result for atomics.
477 if (qp->ibqp.qp_type == IB_QPT_UC) {
478 if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) {
482 } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) {
485 } else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP &&
487 wr->sg_list[0].length < sizeof(u64) ||
488 wr->sg_list[0].addr & (sizeof(u64) - 1))) {
492 /* IB spec says that num_sge == 0 is OK. */
493 if (wr->num_sge > qp->s_max_sge) {
497 spin_lock_irqsave(&qp->s_lock, flags);
498 next = qp->s_head + 1;
499 if (next >= qp->s_size)
501 if (next == qp->s_last) {
502 spin_unlock_irqrestore(&qp->s_lock, flags);
507 wqe = get_swqe_ptr(qp, qp->s_head);
509 wqe->ssn = qp->s_ssn++;
510 wqe->sg_list[0].mr = NULL;
511 wqe->sg_list[0].vaddr = NULL;
512 wqe->sg_list[0].length = 0;
513 wqe->sg_list[0].sge_length = 0;
515 acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0;
516 for (i = 0, j = 0; i < wr->num_sge; i++) {
517 if (to_ipd(qp->ibqp.pd)->user && wr->sg_list[i].lkey == 0) {
518 spin_unlock_irqrestore(&qp->s_lock, flags);
522 if (wr->sg_list[i].length == 0)
524 if (!ipath_lkey_ok(&to_idev(qp->ibqp.device)->lk_table,
525 &wqe->sg_list[j], &wr->sg_list[i],
527 spin_unlock_irqrestore(&qp->s_lock, flags);
531 wqe->length += wr->sg_list[i].length;
536 spin_unlock_irqrestore(&qp->s_lock, flags);
538 ipath_do_ruc_send((unsigned long) qp);
547 * ipath_make_grh - construct a GRH header
548 * @dev: a pointer to the ipath device
549 * @hdr: a pointer to the GRH header being constructed
550 * @grh: the global route address to send to
551 * @hwords: the number of 32 bit words of header being sent
552 * @nwords: the number of 32 bit words of data being sent
554 * Return the size of the header in 32 bit words.
556 u32 ipath_make_grh(struct ipath_ibdev *dev, struct ib_grh *hdr,
557 struct ib_global_route *grh, u32 hwords, u32 nwords)
559 hdr->version_tclass_flow =
560 cpu_to_be32((6 << 28) |
561 (grh->traffic_class << 20) |
563 hdr->paylen = cpu_to_be16((hwords - 2 + nwords + SIZE_OF_CRC) << 2);
564 /* next_hdr is defined by C8-7 in ch. 8.4.1 */
565 hdr->next_hdr = 0x1B;
566 hdr->hop_limit = grh->hop_limit;
567 /* The SGID is 32-bit aligned. */
568 hdr->sgid.global.subnet_prefix = dev->gid_prefix;
569 hdr->sgid.global.interface_id = ipath_layer_get_guid(dev->dd);
570 hdr->dgid = grh->dgid;
572 /* GRH header size in 32-bit words. */
573 return sizeof(struct ib_grh) / sizeof(u32);
577 * ipath_do_ruc_send - perform a send on an RC or UC QP
578 * @data: contains a pointer to the QP
580 * Process entries in the send work queue until credit or queue is
581 * exhausted. Only allow one CPU to send a packet per QP (tasklet).
582 * Otherwise, after we drop the QP s_lock, two threads could send
583 * packets out of order.
585 void ipath_do_ruc_send(unsigned long data)
587 struct ipath_qp *qp = (struct ipath_qp *)data;
588 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
595 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
596 struct ipath_other_headers *ohdr;
598 if (test_and_set_bit(IPATH_S_BUSY, &qp->s_flags))
601 if (unlikely(qp->remote_ah_attr.dlid ==
602 ipath_layer_get_lid(dev->dd))) {
603 ipath_ruc_loopback(qp);
607 ohdr = &qp->s_hdr.u.oth;
608 if (qp->remote_ah_attr.ah_flags & IB_AH_GRH)
609 ohdr = &qp->s_hdr.u.l.oth;
612 /* Check for a constructed packet to be sent. */
613 if (qp->s_hdrwords != 0) {
615 * If no PIO bufs are available, return. An interrupt will
616 * call ipath_ib_piobufavail() when one is available.
618 if (ipath_verbs_send(dev->dd, qp->s_hdrwords,
619 (u32 *) &qp->s_hdr, qp->s_cur_size,
621 ipath_no_bufs_available(qp, dev);
624 dev->n_unicast_xmit++;
625 /* Record that we sent the packet and s_hdr is empty. */
630 * The lock is needed to synchronize between setting
631 * qp->s_ack_state, resend timer, and post_send().
633 spin_lock_irqsave(&qp->s_lock, flags);
635 /* Sending responses has higher priority over sending requests. */
636 if (qp->s_ack_state != IB_OPCODE_RC_ACKNOWLEDGE &&
637 (bth0 = ipath_make_rc_ack(qp, ohdr, pmtu)) != 0)
638 bth2 = qp->s_ack_psn++ & IPS_PSN_MASK;
639 else if (!((qp->ibqp.qp_type == IB_QPT_RC) ?
640 ipath_make_rc_req(qp, ohdr, pmtu, &bth0, &bth2) :
641 ipath_make_uc_req(qp, ohdr, pmtu, &bth0, &bth2))) {
643 * Clear the busy bit before unlocking to avoid races with
644 * adding new work queue items and then failing to process
647 clear_bit(IPATH_S_BUSY, &qp->s_flags);
648 spin_unlock_irqrestore(&qp->s_lock, flags);
652 spin_unlock_irqrestore(&qp->s_lock, flags);
654 /* Construct the header. */
655 extra_bytes = (4 - qp->s_cur_size) & 3;
656 nwords = (qp->s_cur_size + extra_bytes) >> 2;
658 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
659 qp->s_hdrwords += ipath_make_grh(dev, &qp->s_hdr.u.l.grh,
660 &qp->remote_ah_attr.grh,
661 qp->s_hdrwords, nwords);
664 lrh0 |= qp->remote_ah_attr.sl << 4;
665 qp->s_hdr.lrh[0] = cpu_to_be16(lrh0);
666 qp->s_hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
667 qp->s_hdr.lrh[2] = cpu_to_be16(qp->s_hdrwords + nwords +
669 qp->s_hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
670 bth0 |= ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
671 bth0 |= extra_bytes << 20;
672 ohdr->bth[0] = cpu_to_be32(bth0);
673 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
674 ohdr->bth[2] = cpu_to_be32(bth2);
676 /* Check for more work to do. */
680 clear_bit(IPATH_S_BUSY, &qp->s_flags);