2 * Copyright (c) 2006 QLogic, Inc. All rights reserved.
3 * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "ipath_verbs.h"
35 #include "ips_common.h"
37 /* cut down ridiculously long IB macro names */
38 #define OP(x) IB_OPCODE_RC_##x
41 * ipath_init_restart- initialize the qp->s_sge after a restart
42 * @qp: the QP who's SGE we're restarting
43 * @wqe: the work queue to initialize the QP's SGE from
45 * The QP s_lock should be held.
47 static void ipath_init_restart(struct ipath_qp *qp, struct ipath_swqe *wqe)
49 struct ipath_ibdev *dev;
52 len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) *
53 ib_mtu_enum_to_int(qp->path_mtu);
54 qp->s_sge.sge = wqe->sg_list[0];
55 qp->s_sge.sg_list = wqe->sg_list + 1;
56 qp->s_sge.num_sge = wqe->wr.num_sge;
57 ipath_skip_sge(&qp->s_sge, len);
58 qp->s_len = wqe->length - len;
59 dev = to_idev(qp->ibqp.device);
60 spin_lock(&dev->pending_lock);
61 if (list_empty(&qp->timerwait))
62 list_add_tail(&qp->timerwait,
63 &dev->pending[dev->pending_index]);
64 spin_unlock(&dev->pending_lock);
68 * ipath_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read)
69 * @qp: a pointer to the QP
70 * @ohdr: a pointer to the IB header being constructed
73 * Return bth0 if constructed; otherwise, return 0.
74 * Note the QP s_lock must be held.
76 u32 ipath_make_rc_ack(struct ipath_qp *qp,
77 struct ipath_other_headers *ohdr,
80 struct ipath_sge_state *ss;
85 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
89 * Send a response. Note that we are in the responder's
90 * side of the QP context.
92 switch (qp->s_ack_state) {
93 case OP(RDMA_READ_REQUEST):
98 qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST);
100 qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY);
101 qp->s_rdma_len -= len;
102 bth0 = qp->s_ack_state << 24;
103 ohdr->u.aeth = ipath_compute_aeth(qp);
107 case OP(RDMA_READ_RESPONSE_FIRST):
108 qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE);
110 case OP(RDMA_READ_RESPONSE_MIDDLE):
111 ss = &qp->s_rdma_sge;
112 len = qp->s_rdma_len;
116 ohdr->u.aeth = ipath_compute_aeth(qp);
118 qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST);
120 qp->s_rdma_len -= len;
121 bth0 = qp->s_ack_state << 24;
124 case OP(RDMA_READ_RESPONSE_LAST):
125 case OP(RDMA_READ_RESPONSE_ONLY):
127 * We have to prevent new requests from changing
128 * the r_sge state while a ipath_verbs_send()
130 * Changing r_state allows the receiver
131 * to continue processing new packets.
132 * We do it here now instead of above so
133 * that we are sure the packet was sent before
134 * changing the state.
136 qp->r_state = OP(RDMA_READ_RESPONSE_LAST);
137 qp->s_ack_state = OP(ACKNOWLEDGE);
140 case OP(COMPARE_SWAP):
144 qp->r_state = OP(SEND_LAST);
145 qp->s_ack_state = OP(ACKNOWLEDGE);
146 bth0 = IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
147 ohdr->u.at.aeth = ipath_compute_aeth(qp);
148 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
149 hwords += sizeof(ohdr->u.at) / 4;
153 /* Send a regular ACK. */
156 qp->s_ack_state = OP(ACKNOWLEDGE);
157 bth0 = qp->s_ack_state << 24;
158 ohdr->u.aeth = ipath_compute_aeth(qp);
161 qp->s_hdrwords = hwords;
163 qp->s_cur_size = len;
169 * ipath_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC)
170 * @qp: a pointer to the QP
171 * @ohdr: a pointer to the IB header being constructed
172 * @pmtu: the path MTU
173 * @bth0p: pointer to the BTH opcode word
174 * @bth2p: pointer to the BTH PSN word
176 * Return 1 if constructed; otherwise, return 0.
177 * Note the QP s_lock must be held.
179 int ipath_make_rc_req(struct ipath_qp *qp,
180 struct ipath_other_headers *ohdr,
181 u32 pmtu, u32 *bth0p, u32 *bth2p)
183 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
184 struct ipath_sge_state *ss;
185 struct ipath_swqe *wqe;
192 if (!(ib_ipath_state_ops[qp->state] & IPATH_PROCESS_SEND_OK) ||
196 /* header size in 32-bit words LRH+BTH = (8+12)/4. */
200 /* Send a request. */
201 wqe = get_swqe_ptr(qp, qp->s_cur);
202 switch (qp->s_state) {
205 * Resend an old request or start a new one.
207 * We keep track of the current SWQE so that
208 * we don't reset the "furthest progress" state
209 * if we need to back up.
212 if (qp->s_cur == qp->s_tail) {
213 /* Check if send work queue is empty. */
214 if (qp->s_tail == qp->s_head)
216 qp->s_psn = wqe->psn = qp->s_next_psn;
220 * Note that we have to be careful not to modify the
221 * original work request since we may need to resend
224 qp->s_sge.sge = wqe->sg_list[0];
225 qp->s_sge.sg_list = wqe->sg_list + 1;
226 qp->s_sge.num_sge = wqe->wr.num_sge;
227 qp->s_len = len = wqe->length;
230 switch (wqe->wr.opcode) {
232 case IB_WR_SEND_WITH_IMM:
233 /* If no credit, return. */
234 if (qp->s_lsn != (u32) -1 &&
235 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
237 wqe->lpsn = wqe->psn;
239 wqe->lpsn += (len - 1) / pmtu;
240 qp->s_state = OP(SEND_FIRST);
244 if (wqe->wr.opcode == IB_WR_SEND)
245 qp->s_state = OP(SEND_ONLY);
247 qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE);
248 /* Immediate data comes after the BTH */
249 ohdr->u.imm_data = wqe->wr.imm_data;
252 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
254 bth2 = 1 << 31; /* Request ACK. */
255 if (++qp->s_cur == qp->s_size)
259 case IB_WR_RDMA_WRITE:
263 case IB_WR_RDMA_WRITE_WITH_IMM:
264 /* If no credit, return. */
265 if (qp->s_lsn != (u32) -1 &&
266 ipath_cmp24(wqe->ssn, qp->s_lsn + 1) > 0)
268 ohdr->u.rc.reth.vaddr =
269 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
270 ohdr->u.rc.reth.rkey =
271 cpu_to_be32(wqe->wr.wr.rdma.rkey);
272 ohdr->u.rc.reth.length = cpu_to_be32(len);
273 hwords += sizeof(struct ib_reth) / 4;
274 wqe->lpsn = wqe->psn;
276 wqe->lpsn += (len - 1) / pmtu;
277 qp->s_state = OP(RDMA_WRITE_FIRST);
281 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
282 qp->s_state = OP(RDMA_WRITE_ONLY);
285 OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE);
286 /* Immediate data comes
288 ohdr->u.rc.imm_data = wqe->wr.imm_data;
290 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
293 bth2 = 1 << 31; /* Request ACK. */
294 if (++qp->s_cur == qp->s_size)
298 case IB_WR_RDMA_READ:
299 ohdr->u.rc.reth.vaddr =
300 cpu_to_be64(wqe->wr.wr.rdma.remote_addr);
301 ohdr->u.rc.reth.rkey =
302 cpu_to_be32(wqe->wr.wr.rdma.rkey);
303 ohdr->u.rc.reth.length = cpu_to_be32(len);
304 qp->s_state = OP(RDMA_READ_REQUEST);
305 hwords += sizeof(ohdr->u.rc.reth) / 4;
309 * Adjust s_next_psn to count the
310 * expected number of responses.
313 qp->s_next_psn += (len - 1) / pmtu;
314 wqe->lpsn = qp->s_next_psn++;
318 if (++qp->s_cur == qp->s_size)
322 case IB_WR_ATOMIC_CMP_AND_SWP:
323 case IB_WR_ATOMIC_FETCH_AND_ADD:
324 if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP)
325 qp->s_state = OP(COMPARE_SWAP);
327 qp->s_state = OP(FETCH_ADD);
328 ohdr->u.atomic_eth.vaddr = cpu_to_be64(
329 wqe->wr.wr.atomic.remote_addr);
330 ohdr->u.atomic_eth.rkey = cpu_to_be32(
331 wqe->wr.wr.atomic.rkey);
332 ohdr->u.atomic_eth.swap_data = cpu_to_be64(
333 wqe->wr.wr.atomic.swap);
334 ohdr->u.atomic_eth.compare_data = cpu_to_be64(
335 wqe->wr.wr.atomic.compare_add);
336 hwords += sizeof(struct ib_atomic_eth) / 4;
339 wqe->lpsn = wqe->psn;
341 if (++qp->s_cur == qp->s_size)
352 if (qp->s_tail >= qp->s_size)
355 bth2 |= qp->s_psn++ & IPS_PSN_MASK;
356 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
357 qp->s_next_psn = qp->s_psn;
358 spin_lock(&dev->pending_lock);
359 if (list_empty(&qp->timerwait))
360 list_add_tail(&qp->timerwait,
361 &dev->pending[dev->pending_index]);
362 spin_unlock(&dev->pending_lock);
365 case OP(RDMA_READ_RESPONSE_FIRST):
367 * This case can only happen if a send is restarted. See
368 * ipath_restart_rc().
370 ipath_init_restart(qp, wqe);
373 qp->s_state = OP(SEND_MIDDLE);
375 case OP(SEND_MIDDLE):
376 bth2 = qp->s_psn++ & IPS_PSN_MASK;
377 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
378 qp->s_next_psn = qp->s_psn;
383 * Request an ACK every 1/2 MB to avoid retransmit
386 if (((wqe->length - len) % (512 * 1024)) == 0)
391 if (wqe->wr.opcode == IB_WR_SEND)
392 qp->s_state = OP(SEND_LAST);
394 qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE);
395 /* Immediate data comes after the BTH */
396 ohdr->u.imm_data = wqe->wr.imm_data;
399 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
401 bth2 |= 1 << 31; /* Request ACK. */
403 if (qp->s_cur >= qp->s_size)
407 case OP(RDMA_READ_RESPONSE_LAST):
409 * This case can only happen if a RDMA write is restarted.
410 * See ipath_restart_rc().
412 ipath_init_restart(qp, wqe);
414 case OP(RDMA_WRITE_FIRST):
415 qp->s_state = OP(RDMA_WRITE_MIDDLE);
417 case OP(RDMA_WRITE_MIDDLE):
418 bth2 = qp->s_psn++ & IPS_PSN_MASK;
419 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
420 qp->s_next_psn = qp->s_psn;
425 * Request an ACK every 1/2 MB to avoid retransmit
428 if (((wqe->length - len) % (512 * 1024)) == 0)
433 if (wqe->wr.opcode == IB_WR_RDMA_WRITE)
434 qp->s_state = OP(RDMA_WRITE_LAST);
436 qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE);
437 /* Immediate data comes after the BTH */
438 ohdr->u.imm_data = wqe->wr.imm_data;
440 if (wqe->wr.send_flags & IB_SEND_SOLICITED)
443 bth2 |= 1 << 31; /* Request ACK. */
445 if (qp->s_cur >= qp->s_size)
449 case OP(RDMA_READ_RESPONSE_MIDDLE):
451 * This case can only happen if a RDMA read is restarted.
452 * See ipath_restart_rc().
454 ipath_init_restart(qp, wqe);
455 len = ((qp->s_psn - wqe->psn) & IPS_PSN_MASK) * pmtu;
456 ohdr->u.rc.reth.vaddr =
457 cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len);
458 ohdr->u.rc.reth.rkey =
459 cpu_to_be32(wqe->wr.wr.rdma.rkey);
460 ohdr->u.rc.reth.length = cpu_to_be32(qp->s_len);
461 qp->s_state = OP(RDMA_READ_REQUEST);
462 hwords += sizeof(ohdr->u.rc.reth) / 4;
463 bth2 = qp->s_psn++ & IPS_PSN_MASK;
464 if ((int)(qp->s_psn - qp->s_next_psn) > 0)
465 qp->s_next_psn = qp->s_psn;
469 if (qp->s_cur == qp->s_size)
473 case OP(RDMA_READ_REQUEST):
474 case OP(COMPARE_SWAP):
477 * We shouldn't start anything new until this request is
478 * finished. The ACK will handle rescheduling us. XXX The
479 * number of outstanding ones is negotiated at connection
480 * setup time (see pg. 258,289)? XXX Also, if we support
481 * multiple outstanding requests, we need to check the WQE
482 * IB_SEND_FENCE flag and not send a new request if a RDMA
483 * read or atomic is pending.
488 qp->s_hdrwords = hwords;
490 qp->s_cur_size = len;
491 *bth0p = bth0 | (qp->s_state << 24);
500 * send_rc_ack - Construct an ACK packet and send it
501 * @qp: a pointer to the QP
503 * This is called from ipath_rc_rcv() and only uses the receive
505 * Note that RDMA reads are handled in the send side QP state and tasklet.
507 static void send_rc_ack(struct ipath_qp *qp)
509 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
513 struct ipath_ib_header hdr;
514 struct ipath_other_headers *ohdr;
516 /* Construct the header. */
519 /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */
521 if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) {
522 hwords += ipath_make_grh(dev, &hdr.u.l.grh,
523 &qp->remote_ah_attr.grh,
528 bth0 = ipath_layer_get_pkey(dev->dd, qp->s_pkey_index);
529 ohdr->u.aeth = ipath_compute_aeth(qp);
530 if (qp->s_ack_state >= OP(COMPARE_SWAP)) {
531 bth0 |= IB_OPCODE_ATOMIC_ACKNOWLEDGE << 24;
532 ohdr->u.at.atomic_ack_eth = cpu_to_be64(qp->s_ack_atomic);
533 hwords += sizeof(ohdr->u.at.atomic_ack_eth) / 4;
535 bth0 |= OP(ACKNOWLEDGE) << 24;
536 lrh0 |= qp->remote_ah_attr.sl << 4;
537 hdr.lrh[0] = cpu_to_be16(lrh0);
538 hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid);
539 hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC);
540 hdr.lrh[3] = cpu_to_be16(ipath_layer_get_lid(dev->dd));
541 ohdr->bth[0] = cpu_to_be32(bth0);
542 ohdr->bth[1] = cpu_to_be32(qp->remote_qpn);
543 ohdr->bth[2] = cpu_to_be32(qp->s_ack_psn & IPS_PSN_MASK);
546 * If we can send the ACK, clear the ACK state.
548 if (ipath_verbs_send(dev->dd, hwords, (u32 *) &hdr, 0, NULL) == 0) {
549 qp->s_ack_state = OP(ACKNOWLEDGE);
551 dev->n_unicast_xmit++;
556 * ipath_restart_rc - back up requester to resend the last un-ACKed request
557 * @qp: the QP to restart
558 * @psn: packet sequence number for the request
559 * @wc: the work completion request
561 * The QP s_lock should be held.
563 void ipath_restart_rc(struct ipath_qp *qp, u32 psn, struct ib_wc *wc)
565 struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
566 struct ipath_ibdev *dev;
570 * If there are no requests pending, we are done.
572 if (ipath_cmp24(psn, qp->s_next_psn) >= 0 ||
573 qp->s_last == qp->s_tail)
576 if (qp->s_retry == 0) {
577 wc->wr_id = wqe->wr.wr_id;
578 wc->status = IB_WC_RETRY_EXC_ERR;
579 wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
582 wc->qp_num = qp->ibqp.qp_num;
583 wc->src_qp = qp->remote_qpn;
585 wc->slid = qp->remote_ah_attr.dlid;
586 wc->sl = qp->remote_ah_attr.sl;
587 wc->dlid_path_bits = 0;
589 ipath_sqerror_qp(qp, wc);
595 * Remove the QP from the timeout queue.
596 * Note: it may already have been removed by ipath_ib_timer().
598 dev = to_idev(qp->ibqp.device);
599 spin_lock(&dev->pending_lock);
600 if (!list_empty(&qp->timerwait))
601 list_del_init(&qp->timerwait);
602 spin_unlock(&dev->pending_lock);
604 if (wqe->wr.opcode == IB_WR_RDMA_READ)
607 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
610 * If we are starting the request from the beginning, let the normal
611 * send code handle initialization.
613 qp->s_cur = qp->s_last;
614 if (ipath_cmp24(psn, wqe->psn) <= 0) {
615 qp->s_state = OP(SEND_LAST);
616 qp->s_psn = wqe->psn;
620 if (++n == qp->s_size)
622 if (n == qp->s_tail) {
623 if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
625 wqe = get_swqe_ptr(qp, n);
629 wqe = get_swqe_ptr(qp, n);
630 if (ipath_cmp24(psn, wqe->psn) < 0)
637 * Reset the state to restart in the middle of a request.
638 * Don't change the s_sge, s_cur_sge, or s_cur_size.
639 * See ipath_do_rc_send().
641 switch (wqe->wr.opcode) {
643 case IB_WR_SEND_WITH_IMM:
644 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
647 case IB_WR_RDMA_WRITE:
648 case IB_WR_RDMA_WRITE_WITH_IMM:
649 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
652 case IB_WR_RDMA_READ:
654 OP(RDMA_READ_RESPONSE_MIDDLE);
659 * This case shouldn't happen since its only
662 qp->s_state = OP(SEND_LAST);
667 tasklet_hi_schedule(&qp->s_task);
674 * reset_psn - reset the QP state to send starting from PSN
676 * @psn: the packet sequence number to restart at
678 * This is called from ipath_rc_rcv_resp() to process an incoming RC ACK
680 * Called at interrupt level with the QP s_lock held.
682 static void reset_psn(struct ipath_qp *qp, u32 psn)
684 struct ipath_swqe *wqe;
688 wqe = get_swqe_ptr(qp, n);
690 if (++n == qp->s_size)
692 if (n == qp->s_tail) {
693 if (ipath_cmp24(psn, qp->s_next_psn) >= 0) {
695 wqe = get_swqe_ptr(qp, n);
699 wqe = get_swqe_ptr(qp, n);
700 if (ipath_cmp24(psn, wqe->psn) < 0)
707 * Set the state to restart in the middle of a
708 * request. Don't change the s_sge, s_cur_sge, or
709 * s_cur_size. See ipath_do_rc_send().
711 switch (wqe->wr.opcode) {
713 case IB_WR_SEND_WITH_IMM:
714 qp->s_state = OP(RDMA_READ_RESPONSE_FIRST);
717 case IB_WR_RDMA_WRITE:
718 case IB_WR_RDMA_WRITE_WITH_IMM:
719 qp->s_state = OP(RDMA_READ_RESPONSE_LAST);
722 case IB_WR_RDMA_READ:
723 qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE);
728 * This case shouldn't happen since its only
731 qp->s_state = OP(SEND_LAST);
736 * do_rc_ack - process an incoming RC ACK
737 * @qp: the QP the ACK came in on
738 * @psn: the packet sequence number of the ACK
739 * @opcode: the opcode of the request that resulted in the ACK
741 * This is called from ipath_rc_rcv() to process an incoming RC ACK
743 * Called at interrupt level with the QP s_lock held.
744 * Returns 1 if OK, 0 if current operation should be aborted (NAK).
746 static int do_rc_ack(struct ipath_qp *qp, u32 aeth, u32 psn, int opcode)
748 struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
750 struct ipath_swqe *wqe;
754 * Remove the QP from the timeout queue (or RNR timeout queue).
755 * If ipath_ib_timer() has already removed it,
756 * it's OK since we hold the QP s_lock and ipath_restart_rc()
757 * just won't find anything to restart if we ACK everything.
759 spin_lock(&dev->pending_lock);
760 if (!list_empty(&qp->timerwait))
761 list_del_init(&qp->timerwait);
762 spin_unlock(&dev->pending_lock);
765 * Note that NAKs implicitly ACK outstanding SEND and RDMA write
766 * requests and implicitly NAK RDMA read and atomic requests issued
767 * before the NAK'ed request. The MSN won't include the NAK'ed
768 * request but will include an ACK'ed request(s).
770 wqe = get_swqe_ptr(qp, qp->s_last);
772 /* Nothing is pending to ACK/NAK. */
773 if (qp->s_last == qp->s_tail)
777 * The MSN might be for a later WQE than the PSN indicates so
778 * only complete WQEs that the PSN finishes.
780 while (ipath_cmp24(psn, wqe->lpsn) >= 0) {
781 /* If we are ACKing a WQE, the MSN should be >= the SSN. */
782 if (ipath_cmp24(aeth, wqe->ssn) < 0)
785 * If this request is a RDMA read or atomic, and the ACK is
786 * for a later operation, this ACK NAKs the RDMA read or
787 * atomic. In other words, only a RDMA_READ_LAST or ONLY
788 * can ACK a RDMA read and likewise for atomic ops. Note
789 * that the NAK case can only happen if relaxed ordering is
790 * used and requests are sent after an RDMA read or atomic
791 * is sent but before the response is received.
793 if ((wqe->wr.opcode == IB_WR_RDMA_READ &&
794 opcode != OP(RDMA_READ_RESPONSE_LAST)) ||
795 ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP ||
796 wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) &&
797 (opcode != OP(ATOMIC_ACKNOWLEDGE) ||
798 ipath_cmp24(wqe->psn, psn) != 0))) {
800 * The last valid PSN seen is the previous
803 qp->s_last_psn = wqe->psn - 1;
804 /* Retry this request. */
805 ipath_restart_rc(qp, wqe->psn, &wc);
807 * No need to process the ACK/NAK since we are
808 * restarting an earlier request.
812 /* Post a send completion queue entry if requested. */
813 if (!test_bit(IPATH_S_SIGNAL_REQ_WR, &qp->s_flags) ||
814 (wqe->wr.send_flags & IB_SEND_SIGNALED)) {
815 wc.wr_id = wqe->wr.wr_id;
816 wc.status = IB_WC_SUCCESS;
817 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
819 wc.byte_len = wqe->length;
820 wc.qp_num = qp->ibqp.qp_num;
821 wc.src_qp = qp->remote_qpn;
823 wc.slid = qp->remote_ah_attr.dlid;
824 wc.sl = qp->remote_ah_attr.sl;
825 wc.dlid_path_bits = 0;
827 ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0);
829 qp->s_retry = qp->s_retry_cnt;
831 * If we are completing a request which is in the process of
832 * being resent, we can stop resending it since we know the
833 * responder has already seen it.
835 if (qp->s_last == qp->s_cur) {
836 if (++qp->s_cur >= qp->s_size)
838 wqe = get_swqe_ptr(qp, qp->s_cur);
839 qp->s_state = OP(SEND_LAST);
840 qp->s_psn = wqe->psn;
842 if (++qp->s_last >= qp->s_size)
844 wqe = get_swqe_ptr(qp, qp->s_last);
845 if (qp->s_last == qp->s_tail)
849 switch (aeth >> 29) {
852 /* If this is a partial ACK, reset the retransmit timer. */
853 if (qp->s_last != qp->s_tail) {
854 spin_lock(&dev->pending_lock);
855 list_add_tail(&qp->timerwait,
856 &dev->pending[dev->pending_index]);
857 spin_unlock(&dev->pending_lock);
859 ipath_get_credit(qp, aeth);
860 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
861 qp->s_retry = qp->s_retry_cnt;
862 qp->s_last_psn = psn;
866 case 1: /* RNR NAK */
868 if (qp->s_rnr_retry == 0) {
869 if (qp->s_last == qp->s_tail)
872 wc.status = IB_WC_RNR_RETRY_EXC_ERR;
875 if (qp->s_rnr_retry_cnt < 7)
877 if (qp->s_last == qp->s_tail)
880 /* The last valid PSN seen is the previous request's. */
881 qp->s_last_psn = wqe->psn - 1;
883 dev->n_rc_resends += (int)qp->s_psn - (int)psn;
886 * If we are starting the request from the beginning, let
887 * the normal send code handle initialization.
889 qp->s_cur = qp->s_last;
890 wqe = get_swqe_ptr(qp, qp->s_cur);
891 if (ipath_cmp24(psn, wqe->psn) <= 0) {
892 qp->s_state = OP(SEND_LAST);
893 qp->s_psn = wqe->psn;
898 ib_ipath_rnr_table[(aeth >> IPS_AETH_CREDIT_SHIFT) &
899 IPS_AETH_CREDIT_MASK];
900 ipath_insert_rnr_queue(qp);
904 /* The last valid PSN seen is the previous request's. */
905 if (qp->s_last != qp->s_tail)
906 qp->s_last_psn = wqe->psn - 1;
907 switch ((aeth >> IPS_AETH_CREDIT_SHIFT) &
908 IPS_AETH_CREDIT_MASK) {
909 case 0: /* PSN sequence error */
912 * Back up to the responder's expected PSN. XXX
913 * Note that we might get a NAK in the middle of an
914 * RDMA READ response which terminates the RDMA
917 if (qp->s_last == qp->s_tail)
920 if (ipath_cmp24(psn, wqe->psn) < 0)
923 /* Retry the request. */
924 ipath_restart_rc(qp, psn, &wc);
927 case 1: /* Invalid Request */
928 wc.status = IB_WC_REM_INV_REQ_ERR;
932 case 2: /* Remote Access Error */
933 wc.status = IB_WC_REM_ACCESS_ERR;
937 case 3: /* Remote Operation Error */
938 wc.status = IB_WC_REM_OP_ERR;
941 wc.wr_id = wqe->wr.wr_id;
942 wc.opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
945 wc.qp_num = qp->ibqp.qp_num;
946 wc.src_qp = qp->remote_qpn;
948 wc.slid = qp->remote_ah_attr.dlid;
949 wc.sl = qp->remote_ah_attr.sl;
950 wc.dlid_path_bits = 0;
952 ipath_sqerror_qp(qp, &wc);
956 /* Ignore other reserved NAK error codes */
959 qp->s_rnr_retry = qp->s_rnr_retry_cnt;
962 default: /* 2: reserved */
964 /* Ignore reserved NAK codes. */
973 * ipath_rc_rcv_resp - process an incoming RC response packet
974 * @dev: the device this packet came in on
975 * @ohdr: the other headers for this packet
976 * @data: the packet data
977 * @tlen: the packet length
978 * @qp: the QP for this packet
979 * @opcode: the opcode for this packet
980 * @psn: the packet sequence number for this packet
981 * @hdrsize: the header length
982 * @pmtu: the path MTU
983 * @header_in_data: true if part of the header data is in the data buffer
985 * This is called from ipath_rc_rcv() to process an incoming RC response
986 * packet for the given QP.
987 * Called at interrupt level.
989 static inline void ipath_rc_rcv_resp(struct ipath_ibdev *dev,
990 struct ipath_other_headers *ohdr,
991 void *data, u32 tlen,
994 u32 psn, u32 hdrsize, u32 pmtu,
1003 spin_lock_irqsave(&qp->s_lock, flags);
1005 /* Ignore invalid responses. */
1006 if (ipath_cmp24(psn, qp->s_next_psn) >= 0)
1009 /* Ignore duplicate responses. */
1010 diff = ipath_cmp24(psn, qp->s_last_psn);
1011 if (unlikely(diff <= 0)) {
1012 /* Update credits for "ghost" ACKs */
1013 if (diff == 0 && opcode == OP(ACKNOWLEDGE)) {
1014 if (!header_in_data)
1015 aeth = be32_to_cpu(ohdr->u.aeth);
1017 aeth = be32_to_cpu(((__be32 *) data)[0]);
1018 data += sizeof(__be32);
1020 if ((aeth >> 29) == 0)
1021 ipath_get_credit(qp, aeth);
1027 case OP(ACKNOWLEDGE):
1028 case OP(ATOMIC_ACKNOWLEDGE):
1029 case OP(RDMA_READ_RESPONSE_FIRST):
1030 if (!header_in_data)
1031 aeth = be32_to_cpu(ohdr->u.aeth);
1033 aeth = be32_to_cpu(((__be32 *) data)[0]);
1034 data += sizeof(__be32);
1036 if (opcode == OP(ATOMIC_ACKNOWLEDGE))
1037 *(u64 *) qp->s_sge.sge.vaddr = *(u64 *) data;
1038 if (!do_rc_ack(qp, aeth, psn, opcode) ||
1039 opcode != OP(RDMA_READ_RESPONSE_FIRST))
1043 * do_rc_ack() has already checked the PSN so skip
1044 * the sequence check.
1048 case OP(RDMA_READ_RESPONSE_MIDDLE):
1049 /* no AETH, no ACK */
1050 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1052 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1056 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1058 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1060 if (unlikely(pmtu >= qp->s_len))
1062 /* We got a response so update the timeout. */
1063 if (unlikely(qp->s_last == qp->s_tail ||
1064 get_swqe_ptr(qp, qp->s_last)->wr.opcode !=
1067 spin_lock(&dev->pending_lock);
1068 if (qp->s_rnr_timeout == 0 && !list_empty(&qp->timerwait))
1069 list_move_tail(&qp->timerwait,
1070 &dev->pending[dev->pending_index]);
1071 spin_unlock(&dev->pending_lock);
1073 * Update the RDMA receive state but do the copy w/o holding the
1074 * locks and blocking interrupts. XXX Yet another place that
1075 * affects relaxed RDMA order since we don't want s_sge modified.
1078 qp->s_last_psn = psn;
1079 spin_unlock_irqrestore(&qp->s_lock, flags);
1080 ipath_copy_sge(&qp->s_sge, data, pmtu);
1083 case OP(RDMA_READ_RESPONSE_LAST):
1084 /* ACKs READ req. */
1085 if (unlikely(ipath_cmp24(psn, qp->s_last_psn + 1))) {
1087 ipath_restart_rc(qp, qp->s_last_psn + 1, &wc);
1091 case OP(RDMA_READ_RESPONSE_ONLY):
1092 if (unlikely(qp->s_state != OP(RDMA_READ_REQUEST)))
1095 * Get the number of bytes the message was padded by.
1097 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1099 * Check that the data size is >= 1 && <= pmtu.
1100 * Remember to account for the AETH header (4) and
1103 if (unlikely(tlen <= (hdrsize + pad + 8))) {
1104 /* XXX Need to generate an error CQ entry. */
1107 tlen -= hdrsize + pad + 8;
1108 if (unlikely(tlen != qp->s_len)) {
1109 /* XXX Need to generate an error CQ entry. */
1112 if (!header_in_data)
1113 aeth = be32_to_cpu(ohdr->u.aeth);
1115 aeth = be32_to_cpu(((__be32 *) data)[0]);
1116 data += sizeof(__be32);
1118 ipath_copy_sge(&qp->s_sge, data, tlen);
1119 if (do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST))) {
1121 * Change the state so we contimue
1122 * processing new requests.
1124 qp->s_state = OP(SEND_LAST);
1130 spin_unlock_irqrestore(&qp->s_lock, flags);
1136 * ipath_rc_rcv_error - process an incoming duplicate or error RC packet
1137 * @dev: the device this packet came in on
1138 * @ohdr: the other headers for this packet
1139 * @data: the packet data
1140 * @qp: the QP for this packet
1141 * @opcode: the opcode for this packet
1142 * @psn: the packet sequence number for this packet
1143 * @diff: the difference between the PSN and the expected PSN
1144 * @header_in_data: true if part of the header data is in the data buffer
1146 * This is called from ipath_rc_rcv() to process an unexpected
1147 * incoming RC packet for the given QP.
1148 * Called at interrupt level.
1149 * Return 1 if no more processing is needed; otherwise return 0 to
1150 * schedule a response to be sent and the s_lock unlocked.
1152 static inline int ipath_rc_rcv_error(struct ipath_ibdev *dev,
1153 struct ipath_other_headers *ohdr,
1155 struct ipath_qp *qp,
1161 struct ib_reth *reth;
1165 * Packet sequence error.
1166 * A NAK will ACK earlier sends and RDMA writes.
1167 * Don't queue the NAK if a RDMA read, atomic, or
1168 * NAK is pending though.
1170 spin_lock(&qp->s_lock);
1171 if ((qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
1172 qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) ||
1173 qp->s_nak_state != 0) {
1174 spin_unlock(&qp->s_lock);
1177 qp->s_ack_state = OP(SEND_ONLY);
1178 qp->s_nak_state = IB_NAK_PSN_ERROR;
1179 /* Use the expected PSN. */
1180 qp->s_ack_psn = qp->r_psn;
1185 * Handle a duplicate request. Don't re-execute SEND, RDMA
1186 * write or atomic op. Don't NAK errors, just silently drop
1187 * the duplicate request. Note that r_sge, r_len, and
1188 * r_rcv_len may be in use so don't modify them.
1190 * We are supposed to ACK the earliest duplicate PSN but we
1191 * can coalesce an outstanding duplicate ACK. We have to
1192 * send the earliest so that RDMA reads can be restarted at
1193 * the requester's expected PSN.
1195 spin_lock(&qp->s_lock);
1196 if (qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE &&
1197 ipath_cmp24(psn, qp->s_ack_psn) >= 0) {
1198 if (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST)
1199 qp->s_ack_psn = psn;
1200 spin_unlock(&qp->s_lock);
1204 case OP(RDMA_READ_REQUEST):
1206 * We have to be careful to not change s_rdma_sge
1207 * while ipath_do_rc_send() is using it and not
1208 * holding the s_lock.
1210 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
1211 qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
1212 spin_unlock(&qp->s_lock);
1213 dev->n_rdma_dup_busy++;
1216 /* RETH comes after BTH */
1217 if (!header_in_data)
1218 reth = &ohdr->u.rc.reth;
1220 reth = (struct ib_reth *)data;
1221 data += sizeof(*reth);
1223 qp->s_rdma_len = be32_to_cpu(reth->length);
1224 if (qp->s_rdma_len != 0) {
1225 u32 rkey = be32_to_cpu(reth->rkey);
1226 u64 vaddr = be64_to_cpu(reth->vaddr);
1230 * Address range must be a subset of the original
1231 * request and start on pmtu boundaries.
1233 ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
1234 qp->s_rdma_len, vaddr, rkey,
1235 IB_ACCESS_REMOTE_READ);
1239 qp->s_rdma_sge.sg_list = NULL;
1240 qp->s_rdma_sge.num_sge = 0;
1241 qp->s_rdma_sge.sge.mr = NULL;
1242 qp->s_rdma_sge.sge.vaddr = NULL;
1243 qp->s_rdma_sge.sge.length = 0;
1244 qp->s_rdma_sge.sge.sge_length = 0;
1248 case OP(COMPARE_SWAP):
1251 * Check for the PSN of the last atomic operation
1252 * performed and resend the result if found.
1254 if ((psn & IPS_PSN_MASK) != qp->r_atomic_psn) {
1255 spin_unlock(&qp->s_lock);
1258 qp->s_ack_atomic = qp->r_atomic_data;
1261 qp->s_ack_state = opcode;
1262 qp->s_nak_state = 0;
1263 qp->s_ack_psn = psn;
1272 * ipath_rc_rcv - process an incoming RC packet
1273 * @dev: the device this packet came in on
1274 * @hdr: the header of this packet
1275 * @has_grh: true if the header has a GRH
1276 * @data: the packet data
1277 * @tlen: the packet length
1278 * @qp: the QP for this packet
1280 * This is called from ipath_qp_rcv() to process an incoming RC packet
1282 * Called at interrupt level.
1284 void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
1285 int has_grh, void *data, u32 tlen, struct ipath_qp *qp)
1287 struct ipath_other_headers *ohdr;
1292 unsigned long flags;
1294 u32 pmtu = ib_mtu_enum_to_int(qp->path_mtu);
1296 struct ib_reth *reth;
1302 hdrsize = 8 + 12; /* LRH + BTH */
1303 psn = be32_to_cpu(ohdr->bth[2]);
1306 ohdr = &hdr->u.l.oth;
1307 hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */
1309 * The header with GRH is 60 bytes and the core driver sets
1310 * the eager header buffer size to 56 bytes so the last 4
1311 * bytes of the BTH header (PSN) is in the data buffer.
1314 ipath_layer_get_rcvhdrentsize(dev->dd) == 16;
1315 if (header_in_data) {
1316 psn = be32_to_cpu(((__be32 *) data)[0]);
1317 data += sizeof(__be32);
1319 psn = be32_to_cpu(ohdr->bth[2]);
1323 * Process responses (ACKs) before anything else. Note that the
1324 * packet sequence number will be for something in the send work
1325 * queue rather than the expected receive packet sequence number.
1326 * In other words, this QP is the requester.
1328 opcode = be32_to_cpu(ohdr->bth[0]) >> 24;
1329 if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) &&
1330 opcode <= OP(ATOMIC_ACKNOWLEDGE)) {
1331 ipath_rc_rcv_resp(dev, ohdr, data, tlen, qp, opcode, psn,
1332 hdrsize, pmtu, header_in_data);
1336 spin_lock_irqsave(&qp->r_rq.lock, flags);
1338 /* Compute 24 bits worth of difference. */
1339 diff = ipath_cmp24(psn, qp->r_psn);
1340 if (unlikely(diff)) {
1341 if (ipath_rc_rcv_error(dev, ohdr, data, qp, opcode,
1342 psn, diff, header_in_data))
1347 /* Check for opcode sequence errors. */
1348 switch (qp->r_state) {
1349 case OP(SEND_FIRST):
1350 case OP(SEND_MIDDLE):
1351 if (opcode == OP(SEND_MIDDLE) ||
1352 opcode == OP(SEND_LAST) ||
1353 opcode == OP(SEND_LAST_WITH_IMMEDIATE))
1357 * A NAK will ACK earlier sends and RDMA writes. Don't queue the
1358 * NAK if a RDMA read, atomic, or NAK is pending though.
1360 spin_lock(&qp->s_lock);
1361 if (qp->s_ack_state >= OP(RDMA_READ_REQUEST) &&
1362 qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
1363 spin_unlock(&qp->s_lock);
1366 /* XXX Flush WQEs */
1367 qp->state = IB_QPS_ERR;
1368 qp->s_ack_state = OP(SEND_ONLY);
1369 qp->s_nak_state = IB_NAK_INVALID_REQUEST;
1370 qp->s_ack_psn = qp->r_psn;
1373 case OP(RDMA_WRITE_FIRST):
1374 case OP(RDMA_WRITE_MIDDLE):
1375 if (opcode == OP(RDMA_WRITE_MIDDLE) ||
1376 opcode == OP(RDMA_WRITE_LAST) ||
1377 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1381 case OP(RDMA_READ_REQUEST):
1382 case OP(COMPARE_SWAP):
1385 * Drop all new requests until a response has been sent. A
1386 * new request then ACKs the RDMA response we sent. Relaxed
1387 * ordering would allow new requests to be processed but we
1388 * would need to keep a queue of rwqe's for all that are in
1389 * progress. Note that we can't RNR NAK this request since
1390 * the RDMA READ or atomic response is already queued to be
1391 * sent (unless we implement a response send queue).
1396 if (opcode == OP(SEND_MIDDLE) ||
1397 opcode == OP(SEND_LAST) ||
1398 opcode == OP(SEND_LAST_WITH_IMMEDIATE) ||
1399 opcode == OP(RDMA_WRITE_MIDDLE) ||
1400 opcode == OP(RDMA_WRITE_LAST) ||
1401 opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE))
1409 /* OK, process the packet. */
1411 case OP(SEND_FIRST):
1412 if (!ipath_get_rwqe(qp, 0)) {
1415 * A RNR NAK will ACK earlier sends and RDMA writes.
1416 * Don't queue the NAK if a RDMA read or atomic
1417 * is pending though.
1419 spin_lock(&qp->s_lock);
1420 if (qp->s_ack_state >=
1421 OP(RDMA_READ_REQUEST) &&
1422 qp->s_ack_state != IB_OPCODE_ACKNOWLEDGE) {
1423 spin_unlock(&qp->s_lock);
1426 qp->s_ack_state = OP(SEND_ONLY);
1427 qp->s_nak_state = IB_RNR_NAK | qp->s_min_rnr_timer;
1428 qp->s_ack_psn = qp->r_psn;
1433 case OP(SEND_MIDDLE):
1434 case OP(RDMA_WRITE_MIDDLE):
1436 /* Check for invalid length PMTU or posted rwqe len. */
1437 if (unlikely(tlen != (hdrsize + pmtu + 4)))
1439 qp->r_rcv_len += pmtu;
1440 if (unlikely(qp->r_rcv_len > qp->r_len))
1442 ipath_copy_sge(&qp->r_sge, data, pmtu);
1445 case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE):
1447 if (!ipath_get_rwqe(qp, 1))
1452 case OP(SEND_ONLY_WITH_IMMEDIATE):
1453 if (!ipath_get_rwqe(qp, 0))
1456 if (opcode == OP(SEND_ONLY))
1459 case OP(SEND_LAST_WITH_IMMEDIATE):
1461 if (header_in_data) {
1462 wc.imm_data = *(__be32 *) data;
1463 data += sizeof(__be32);
1465 /* Immediate data comes after BTH */
1466 wc.imm_data = ohdr->u.imm_data;
1469 wc.wc_flags = IB_WC_WITH_IMM;
1472 case OP(RDMA_WRITE_LAST):
1474 /* Get the number of bytes the message was padded by. */
1475 pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3;
1476 /* Check for invalid length. */
1477 /* XXX LAST len should be >= 1 */
1478 if (unlikely(tlen < (hdrsize + pad + 4)))
1480 /* Don't count the CRC. */
1481 tlen -= (hdrsize + pad + 4);
1482 wc.byte_len = tlen + qp->r_rcv_len;
1483 if (unlikely(wc.byte_len > qp->r_len))
1485 ipath_copy_sge(&qp->r_sge, data, tlen);
1486 atomic_inc(&qp->msn);
1487 if (opcode == OP(RDMA_WRITE_LAST) ||
1488 opcode == OP(RDMA_WRITE_ONLY))
1490 wc.wr_id = qp->r_wr_id;
1491 wc.status = IB_WC_SUCCESS;
1492 wc.opcode = IB_WC_RECV;
1494 wc.qp_num = qp->ibqp.qp_num;
1495 wc.src_qp = qp->remote_qpn;
1497 wc.slid = qp->remote_ah_attr.dlid;
1498 wc.sl = qp->remote_ah_attr.sl;
1499 wc.dlid_path_bits = 0;
1501 /* Signal completion event if the solicited bit is set. */
1502 ipath_cq_enter(to_icq(qp->ibqp.recv_cq), &wc,
1504 __constant_cpu_to_be32(1 << 23)) != 0);
1507 case OP(RDMA_WRITE_FIRST):
1508 case OP(RDMA_WRITE_ONLY):
1509 case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE):
1511 /* RETH comes after BTH */
1512 if (!header_in_data)
1513 reth = &ohdr->u.rc.reth;
1515 reth = (struct ib_reth *)data;
1516 data += sizeof(*reth);
1518 hdrsize += sizeof(*reth);
1519 qp->r_len = be32_to_cpu(reth->length);
1521 if (qp->r_len != 0) {
1522 u32 rkey = be32_to_cpu(reth->rkey);
1523 u64 vaddr = be64_to_cpu(reth->vaddr);
1526 /* Check rkey & NAK */
1527 ok = ipath_rkey_ok(dev, &qp->r_sge,
1528 qp->r_len, vaddr, rkey,
1529 IB_ACCESS_REMOTE_WRITE);
1530 if (unlikely(!ok)) {
1533 * A NAK will ACK earlier sends and RDMA
1534 * writes. Don't queue the NAK if a RDMA
1535 * read, atomic, or NAK is pending though.
1537 spin_lock(&qp->s_lock);
1538 if (qp->s_ack_state >=
1539 OP(RDMA_READ_REQUEST) &&
1541 IB_OPCODE_ACKNOWLEDGE) {
1542 spin_unlock(&qp->s_lock);
1545 /* XXX Flush WQEs */
1546 qp->state = IB_QPS_ERR;
1547 qp->s_ack_state = OP(RDMA_WRITE_ONLY);
1549 IB_NAK_REMOTE_ACCESS_ERROR;
1550 qp->s_ack_psn = qp->r_psn;
1554 qp->r_sge.sg_list = NULL;
1555 qp->r_sge.sge.mr = NULL;
1556 qp->r_sge.sge.vaddr = NULL;
1557 qp->r_sge.sge.length = 0;
1558 qp->r_sge.sge.sge_length = 0;
1560 if (unlikely(!(qp->qp_access_flags &
1561 IB_ACCESS_REMOTE_WRITE)))
1563 if (opcode == OP(RDMA_WRITE_FIRST))
1565 else if (opcode == OP(RDMA_WRITE_ONLY))
1567 if (!ipath_get_rwqe(qp, 1))
1571 case OP(RDMA_READ_REQUEST):
1572 /* RETH comes after BTH */
1573 if (!header_in_data)
1574 reth = &ohdr->u.rc.reth;
1576 reth = (struct ib_reth *)data;
1577 data += sizeof(*reth);
1579 spin_lock(&qp->s_lock);
1580 if (qp->s_ack_state != OP(ACKNOWLEDGE) &&
1581 qp->s_ack_state >= IB_OPCODE_RDMA_READ_REQUEST) {
1582 spin_unlock(&qp->s_lock);
1585 qp->s_rdma_len = be32_to_cpu(reth->length);
1586 if (qp->s_rdma_len != 0) {
1587 u32 rkey = be32_to_cpu(reth->rkey);
1588 u64 vaddr = be64_to_cpu(reth->vaddr);
1591 /* Check rkey & NAK */
1592 ok = ipath_rkey_ok(dev, &qp->s_rdma_sge,
1593 qp->s_rdma_len, vaddr, rkey,
1594 IB_ACCESS_REMOTE_READ);
1595 if (unlikely(!ok)) {
1596 spin_unlock(&qp->s_lock);
1600 * Update the next expected PSN. We add 1 later
1601 * below, so only add the remainder here.
1603 if (qp->s_rdma_len > pmtu)
1604 qp->r_psn += (qp->s_rdma_len - 1) / pmtu;
1606 qp->s_rdma_sge.sg_list = NULL;
1607 qp->s_rdma_sge.num_sge = 0;
1608 qp->s_rdma_sge.sge.mr = NULL;
1609 qp->s_rdma_sge.sge.vaddr = NULL;
1610 qp->s_rdma_sge.sge.length = 0;
1611 qp->s_rdma_sge.sge.sge_length = 0;
1613 if (unlikely(!(qp->qp_access_flags &
1614 IB_ACCESS_REMOTE_READ)))
1617 * We need to increment the MSN here instead of when we
1618 * finish sending the result since a duplicate request would
1619 * increment it more than once.
1621 atomic_inc(&qp->msn);
1622 qp->s_ack_state = opcode;
1623 qp->s_nak_state = 0;
1624 qp->s_ack_psn = psn;
1626 qp->r_state = opcode;
1629 case OP(COMPARE_SWAP):
1630 case OP(FETCH_ADD): {
1631 struct ib_atomic_eth *ateth;
1636 if (!header_in_data)
1637 ateth = &ohdr->u.atomic_eth;
1639 ateth = (struct ib_atomic_eth *)data;
1640 data += sizeof(*ateth);
1642 vaddr = be64_to_cpu(ateth->vaddr);
1643 if (unlikely(vaddr & (sizeof(u64) - 1)))
1645 rkey = be32_to_cpu(ateth->rkey);
1646 /* Check rkey & NAK */
1647 if (unlikely(!ipath_rkey_ok(dev, &qp->r_sge,
1648 sizeof(u64), vaddr, rkey,
1649 IB_ACCESS_REMOTE_ATOMIC)))
1651 if (unlikely(!(qp->qp_access_flags &
1652 IB_ACCESS_REMOTE_ATOMIC)))
1654 /* Perform atomic OP and save result. */
1655 sdata = be64_to_cpu(ateth->swap_data);
1656 spin_lock(&dev->pending_lock);
1657 qp->r_atomic_data = *(u64 *) qp->r_sge.sge.vaddr;
1658 if (opcode == OP(FETCH_ADD))
1659 *(u64 *) qp->r_sge.sge.vaddr =
1660 qp->r_atomic_data + sdata;
1661 else if (qp->r_atomic_data ==
1662 be64_to_cpu(ateth->compare_data))
1663 *(u64 *) qp->r_sge.sge.vaddr = sdata;
1664 spin_unlock(&dev->pending_lock);
1665 atomic_inc(&qp->msn);
1666 qp->r_atomic_psn = psn & IPS_PSN_MASK;
1672 /* Drop packet for unknown opcodes. */
1676 qp->r_state = opcode;
1677 /* Send an ACK if requested or required. */
1678 if (psn & (1 << 31)) {
1680 * Coalesce ACKs unless there is a RDMA READ or
1683 spin_lock(&qp->s_lock);
1684 if (qp->s_ack_state == OP(ACKNOWLEDGE) ||
1685 qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST) {
1686 qp->s_ack_state = opcode;
1687 qp->s_nak_state = 0;
1688 qp->s_ack_psn = psn;
1689 qp->s_ack_atomic = qp->r_atomic_data;
1692 spin_unlock(&qp->s_lock);
1695 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1700 * Try to send ACK right away but not if ipath_do_rc_send() is
1703 if (qp->s_hdrwords == 0 &&
1704 (qp->s_ack_state < IB_OPCODE_RDMA_READ_REQUEST ||
1705 qp->s_ack_state >= IB_OPCODE_COMPARE_SWAP))
1709 spin_unlock(&qp->s_lock);
1710 spin_unlock_irqrestore(&qp->r_rq.lock, flags);
1712 /* Call ipath_do_rc_send() in another thread. */
1713 tasklet_hi_schedule(&qp->s_task);