2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2004 Voltaire, Inc. All rights reserved.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 * $Id: mthca_qp.c 1355 2004-12-17 15:23:43Z roland $
38 #include <linux/init.h>
39 #include <linux/string.h>
40 #include <linux/slab.h>
42 #include <rdma/ib_verbs.h>
43 #include <rdma/ib_cache.h>
44 #include <rdma/ib_pack.h>
46 #include "mthca_dev.h"
47 #include "mthca_cmd.h"
48 #include "mthca_memfree.h"
49 #include "mthca_wqe.h"
52 MTHCA_MAX_DIRECT_QP_SIZE = 4 * PAGE_SIZE,
53 MTHCA_ACK_REQ_FREQ = 10,
54 MTHCA_FLIGHT_LIMIT = 9,
55 MTHCA_UD_HEADER_SIZE = 72, /* largest UD header possible */
56 MTHCA_INLINE_HEADER_SIZE = 4, /* data segment overhead for inline */
57 MTHCA_INLINE_CHUNK_SIZE = 16 /* inline data segment chunk */
61 MTHCA_QP_STATE_RST = 0,
62 MTHCA_QP_STATE_INIT = 1,
63 MTHCA_QP_STATE_RTR = 2,
64 MTHCA_QP_STATE_RTS = 3,
65 MTHCA_QP_STATE_SQE = 4,
66 MTHCA_QP_STATE_SQD = 5,
67 MTHCA_QP_STATE_ERR = 6,
68 MTHCA_QP_STATE_DRAINING = 7
80 MTHCA_QP_PM_MIGRATED = 0x3,
81 MTHCA_QP_PM_ARMED = 0x0,
82 MTHCA_QP_PM_REARM = 0x1
86 /* qp_context flags */
87 MTHCA_QP_BIT_DE = 1 << 8,
89 MTHCA_QP_BIT_SRE = 1 << 15,
90 MTHCA_QP_BIT_SWE = 1 << 14,
91 MTHCA_QP_BIT_SAE = 1 << 13,
92 MTHCA_QP_BIT_SIC = 1 << 4,
93 MTHCA_QP_BIT_SSC = 1 << 3,
95 MTHCA_QP_BIT_RRE = 1 << 15,
96 MTHCA_QP_BIT_RWE = 1 << 14,
97 MTHCA_QP_BIT_RAE = 1 << 13,
98 MTHCA_QP_BIT_RIC = 1 << 4,
99 MTHCA_QP_BIT_RSC = 1 << 3
102 struct mthca_qp_path {
111 __be32 sl_tclass_flowlabel;
113 } __attribute__((packed));
115 struct mthca_qp_context {
117 __be32 tavor_sched_queue; /* Reserved on Arbel */
119 u8 rq_size_stride; /* Reserved on Tavor */
120 u8 sq_size_stride; /* Reserved on Tavor */
121 u8 rlkey_arbel_sched_queue; /* Reserved on Tavor */
126 struct mthca_qp_path pri_path;
127 struct mthca_qp_path alt_path;
134 __be32 next_send_psn;
136 __be32 snd_wqe_base_l; /* Next send WQE on Tavor */
137 __be32 snd_db_index; /* (debugging only entries) */
138 __be32 last_acked_psn;
141 __be32 rnr_nextrecvpsn;
144 __be32 rcv_wqe_base_l; /* Next recv WQE on Tavor */
145 __be32 rcv_db_index; /* (debugging only entries) */
149 __be16 rq_wqe_counter; /* reserved on Tavor */
150 __be16 sq_wqe_counter; /* reserved on Tavor */
152 } __attribute__((packed));
154 struct mthca_qp_param {
155 __be32 opt_param_mask;
157 struct mthca_qp_context context;
159 } __attribute__((packed));
162 MTHCA_QP_OPTPAR_ALT_ADDR_PATH = 1 << 0,
163 MTHCA_QP_OPTPAR_RRE = 1 << 1,
164 MTHCA_QP_OPTPAR_RAE = 1 << 2,
165 MTHCA_QP_OPTPAR_RWE = 1 << 3,
166 MTHCA_QP_OPTPAR_PKEY_INDEX = 1 << 4,
167 MTHCA_QP_OPTPAR_Q_KEY = 1 << 5,
168 MTHCA_QP_OPTPAR_RNR_TIMEOUT = 1 << 6,
169 MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH = 1 << 7,
170 MTHCA_QP_OPTPAR_SRA_MAX = 1 << 8,
171 MTHCA_QP_OPTPAR_RRA_MAX = 1 << 9,
172 MTHCA_QP_OPTPAR_PM_STATE = 1 << 10,
173 MTHCA_QP_OPTPAR_PORT_NUM = 1 << 11,
174 MTHCA_QP_OPTPAR_RETRY_COUNT = 1 << 12,
175 MTHCA_QP_OPTPAR_ALT_RNR_RETRY = 1 << 13,
176 MTHCA_QP_OPTPAR_ACK_TIMEOUT = 1 << 14,
177 MTHCA_QP_OPTPAR_RNR_RETRY = 1 << 15,
178 MTHCA_QP_OPTPAR_SCHED_QUEUE = 1 << 16
181 static const u8 mthca_opcode[] = {
182 [IB_WR_SEND] = MTHCA_OPCODE_SEND,
183 [IB_WR_SEND_WITH_IMM] = MTHCA_OPCODE_SEND_IMM,
184 [IB_WR_RDMA_WRITE] = MTHCA_OPCODE_RDMA_WRITE,
185 [IB_WR_RDMA_WRITE_WITH_IMM] = MTHCA_OPCODE_RDMA_WRITE_IMM,
186 [IB_WR_RDMA_READ] = MTHCA_OPCODE_RDMA_READ,
187 [IB_WR_ATOMIC_CMP_AND_SWP] = MTHCA_OPCODE_ATOMIC_CS,
188 [IB_WR_ATOMIC_FETCH_AND_ADD] = MTHCA_OPCODE_ATOMIC_FA,
191 static int is_sqp(struct mthca_dev *dev, struct mthca_qp *qp)
193 return qp->qpn >= dev->qp_table.sqp_start &&
194 qp->qpn <= dev->qp_table.sqp_start + 3;
197 static int is_qp0(struct mthca_dev *dev, struct mthca_qp *qp)
199 return qp->qpn >= dev->qp_table.sqp_start &&
200 qp->qpn <= dev->qp_table.sqp_start + 1;
203 static void *get_recv_wqe(struct mthca_qp *qp, int n)
206 return qp->queue.direct.buf + (n << qp->rq.wqe_shift);
208 return qp->queue.page_list[(n << qp->rq.wqe_shift) >> PAGE_SHIFT].buf +
209 ((n << qp->rq.wqe_shift) & (PAGE_SIZE - 1));
212 static void *get_send_wqe(struct mthca_qp *qp, int n)
215 return qp->queue.direct.buf + qp->send_wqe_offset +
216 (n << qp->sq.wqe_shift);
218 return qp->queue.page_list[(qp->send_wqe_offset +
219 (n << qp->sq.wqe_shift)) >>
221 ((qp->send_wqe_offset + (n << qp->sq.wqe_shift)) &
225 static void mthca_wq_init(struct mthca_wq *wq)
227 spin_lock_init(&wq->lock);
229 wq->last_comp = wq->max - 1;
234 void mthca_qp_event(struct mthca_dev *dev, u32 qpn,
235 enum ib_event_type event_type)
238 struct ib_event event;
240 spin_lock(&dev->qp_table.lock);
241 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1));
243 atomic_inc(&qp->refcount);
244 spin_unlock(&dev->qp_table.lock);
247 mthca_warn(dev, "Async event for bogus QP %08x\n", qpn);
251 event.device = &dev->ib_dev;
252 event.event = event_type;
253 event.element.qp = &qp->ibqp;
254 if (qp->ibqp.event_handler)
255 qp->ibqp.event_handler(&event, qp->ibqp.qp_context);
257 if (atomic_dec_and_test(&qp->refcount))
261 static int to_mthca_state(enum ib_qp_state ib_state)
264 case IB_QPS_RESET: return MTHCA_QP_STATE_RST;
265 case IB_QPS_INIT: return MTHCA_QP_STATE_INIT;
266 case IB_QPS_RTR: return MTHCA_QP_STATE_RTR;
267 case IB_QPS_RTS: return MTHCA_QP_STATE_RTS;
268 case IB_QPS_SQD: return MTHCA_QP_STATE_SQD;
269 case IB_QPS_SQE: return MTHCA_QP_STATE_SQE;
270 case IB_QPS_ERR: return MTHCA_QP_STATE_ERR;
275 enum { RC, UC, UD, RD, RDEE, MLX, NUM_TRANS };
277 static int to_mthca_st(int transport)
280 case RC: return MTHCA_QP_ST_RC;
281 case UC: return MTHCA_QP_ST_UC;
282 case UD: return MTHCA_QP_ST_UD;
283 case RD: return MTHCA_QP_ST_RD;
284 case MLX: return MTHCA_QP_ST_MLX;
289 static const struct {
291 u32 req_param[NUM_TRANS];
292 u32 opt_param[NUM_TRANS];
293 } state_table[IB_QPS_ERR + 1][IB_QPS_ERR + 1] = {
295 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
296 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
298 .trans = MTHCA_TRANS_RST2INIT,
300 [UD] = (IB_QP_PKEY_INDEX |
303 [UC] = (IB_QP_PKEY_INDEX |
306 [RC] = (IB_QP_PKEY_INDEX |
309 [MLX] = (IB_QP_PKEY_INDEX |
312 /* bug-for-bug compatibility with VAPI: */
319 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
320 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
322 .trans = MTHCA_TRANS_INIT2INIT,
324 [UD] = (IB_QP_PKEY_INDEX |
327 [UC] = (IB_QP_PKEY_INDEX |
330 [RC] = (IB_QP_PKEY_INDEX |
333 [MLX] = (IB_QP_PKEY_INDEX |
338 .trans = MTHCA_TRANS_INIT2RTR,
348 IB_QP_MAX_DEST_RD_ATOMIC |
349 IB_QP_MIN_RNR_TIMER),
352 [UD] = (IB_QP_PKEY_INDEX |
354 [UC] = (IB_QP_ALT_PATH |
357 [RC] = (IB_QP_ALT_PATH |
360 [MLX] = (IB_QP_PKEY_INDEX |
366 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
367 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
369 .trans = MTHCA_TRANS_RTR2RTS,
373 [RC] = (IB_QP_TIMEOUT |
377 IB_QP_MAX_QP_RD_ATOMIC),
378 [MLX] = IB_QP_SQ_PSN,
381 [UD] = (IB_QP_CUR_STATE |
383 [UC] = (IB_QP_CUR_STATE |
386 IB_QP_PATH_MIG_STATE),
387 [RC] = (IB_QP_CUR_STATE |
390 IB_QP_MIN_RNR_TIMER |
391 IB_QP_PATH_MIG_STATE),
392 [MLX] = (IB_QP_CUR_STATE |
398 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
399 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
401 .trans = MTHCA_TRANS_RTS2RTS,
403 [UD] = (IB_QP_CUR_STATE |
405 [UC] = (IB_QP_ACCESS_FLAGS |
407 IB_QP_PATH_MIG_STATE),
408 [RC] = (IB_QP_ACCESS_FLAGS |
410 IB_QP_PATH_MIG_STATE |
411 IB_QP_MIN_RNR_TIMER),
412 [MLX] = (IB_QP_CUR_STATE |
417 .trans = MTHCA_TRANS_RTS2SQD,
419 [UD] = IB_QP_EN_SQD_ASYNC_NOTIFY,
420 [UC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
421 [RC] = IB_QP_EN_SQD_ASYNC_NOTIFY,
422 [MLX] = IB_QP_EN_SQD_ASYNC_NOTIFY
427 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
428 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
430 .trans = MTHCA_TRANS_SQD2RTS,
432 [UD] = (IB_QP_CUR_STATE |
434 [UC] = (IB_QP_CUR_STATE |
437 IB_QP_PATH_MIG_STATE),
438 [RC] = (IB_QP_CUR_STATE |
441 IB_QP_MIN_RNR_TIMER |
442 IB_QP_PATH_MIG_STATE),
443 [MLX] = (IB_QP_CUR_STATE |
448 .trans = MTHCA_TRANS_SQD2SQD,
450 [UD] = (IB_QP_PKEY_INDEX |
457 IB_QP_PATH_MIG_STATE),
462 IB_QP_MAX_QP_RD_ATOMIC |
463 IB_QP_MAX_DEST_RD_ATOMIC |
468 IB_QP_MIN_RNR_TIMER |
469 IB_QP_PATH_MIG_STATE),
470 [MLX] = (IB_QP_PKEY_INDEX |
476 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
477 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR },
479 .trans = MTHCA_TRANS_SQERR2RTS,
481 [UD] = (IB_QP_CUR_STATE |
483 [UC] = (IB_QP_CUR_STATE |
485 [MLX] = (IB_QP_CUR_STATE |
491 [IB_QPS_RESET] = { .trans = MTHCA_TRANS_ANY2RST },
492 [IB_QPS_ERR] = { .trans = MTHCA_TRANS_ANY2ERR }
496 static void store_attrs(struct mthca_sqp *sqp, struct ib_qp_attr *attr,
499 if (attr_mask & IB_QP_PKEY_INDEX)
500 sqp->pkey_index = attr->pkey_index;
501 if (attr_mask & IB_QP_QKEY)
502 sqp->qkey = attr->qkey;
503 if (attr_mask & IB_QP_SQ_PSN)
504 sqp->send_psn = attr->sq_psn;
507 static void init_port(struct mthca_dev *dev, int port)
511 struct mthca_init_ib_param param;
513 memset(¶m, 0, sizeof param);
515 param.port_width = dev->limits.port_width_cap;
516 param.vl_cap = dev->limits.vl_cap;
517 param.mtu_cap = dev->limits.mtu_cap;
518 param.gid_cap = dev->limits.gid_table_len;
519 param.pkey_cap = dev->limits.pkey_table_len;
521 err = mthca_INIT_IB(dev, ¶m, port, &status);
523 mthca_warn(dev, "INIT_IB failed, return code %d.\n", err);
525 mthca_warn(dev, "INIT_IB returned status %02x.\n", status);
528 static __be32 get_hw_access_flags(struct mthca_qp *qp, struct ib_qp_attr *attr,
533 u32 hw_access_flags = 0;
535 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
536 dest_rd_atomic = attr->max_dest_rd_atomic;
538 dest_rd_atomic = qp->resp_depth;
540 if (attr_mask & IB_QP_ACCESS_FLAGS)
541 access_flags = attr->qp_access_flags;
543 access_flags = qp->atomic_rd_en;
546 access_flags &= IB_ACCESS_REMOTE_WRITE;
548 if (access_flags & IB_ACCESS_REMOTE_READ)
549 hw_access_flags |= MTHCA_QP_BIT_RRE;
550 if (access_flags & IB_ACCESS_REMOTE_ATOMIC)
551 hw_access_flags |= MTHCA_QP_BIT_RAE;
552 if (access_flags & IB_ACCESS_REMOTE_WRITE)
553 hw_access_flags |= MTHCA_QP_BIT_RWE;
555 return cpu_to_be32(hw_access_flags);
558 static void mthca_path_set(struct ib_ah_attr *ah, struct mthca_qp_path *path)
560 path->g_mylmc = ah->src_path_bits & 0x7f;
561 path->rlid = cpu_to_be16(ah->dlid);
562 path->static_rate = !!ah->static_rate;
564 if (ah->ah_flags & IB_AH_GRH) {
565 path->g_mylmc |= 1 << 7;
566 path->mgid_index = ah->grh.sgid_index;
567 path->hop_limit = ah->grh.hop_limit;
568 path->sl_tclass_flowlabel =
569 cpu_to_be32((ah->sl << 28) |
570 (ah->grh.traffic_class << 20) |
571 (ah->grh.flow_label));
572 memcpy(path->rgid, ah->grh.dgid.raw, 16);
574 path->sl_tclass_flowlabel = cpu_to_be32(ah->sl << 28);
577 int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
579 struct mthca_dev *dev = to_mdev(ibqp->device);
580 struct mthca_qp *qp = to_mqp(ibqp);
581 enum ib_qp_state cur_state, new_state;
582 struct mthca_mailbox *mailbox;
583 struct mthca_qp_param *qp_param;
584 struct mthca_qp_context *qp_context;
585 u32 req_param, opt_param;
590 if (attr_mask & IB_QP_CUR_STATE) {
591 if (attr->cur_qp_state != IB_QPS_RTR &&
592 attr->cur_qp_state != IB_QPS_RTS &&
593 attr->cur_qp_state != IB_QPS_SQD &&
594 attr->cur_qp_state != IB_QPS_SQE)
597 cur_state = attr->cur_qp_state;
599 spin_lock_irq(&qp->sq.lock);
600 spin_lock(&qp->rq.lock);
601 cur_state = qp->state;
602 spin_unlock(&qp->rq.lock);
603 spin_unlock_irq(&qp->sq.lock);
606 if (attr_mask & IB_QP_STATE) {
607 if (attr->qp_state < 0 || attr->qp_state > IB_QPS_ERR)
609 new_state = attr->qp_state;
611 new_state = cur_state;
613 if (state_table[cur_state][new_state].trans == MTHCA_TRANS_INVALID) {
614 mthca_dbg(dev, "Illegal QP transition "
615 "%d->%d\n", cur_state, new_state);
619 req_param = state_table[cur_state][new_state].req_param[qp->transport];
620 opt_param = state_table[cur_state][new_state].opt_param[qp->transport];
622 if ((req_param & attr_mask) != req_param) {
623 mthca_dbg(dev, "QP transition "
624 "%d->%d missing req attr 0x%08x\n",
625 cur_state, new_state,
626 req_param & ~attr_mask);
630 if (attr_mask & ~(req_param | opt_param | IB_QP_STATE)) {
631 mthca_dbg(dev, "QP transition (transport %d) "
632 "%d->%d has extra attr 0x%08x\n",
634 cur_state, new_state,
635 attr_mask & ~(req_param | opt_param |
640 if ((attr_mask & IB_QP_PKEY_INDEX) &&
641 attr->pkey_index >= dev->limits.pkey_table_len) {
642 mthca_dbg(dev, "PKey index (%u) too large. max is %d\n",
643 attr->pkey_index,dev->limits.pkey_table_len-1);
647 if ((attr_mask & IB_QP_PORT) &&
648 (attr->port_num == 0 || attr->port_num > dev->limits.num_ports)) {
649 mthca_dbg(dev, "Port number (%u) is invalid\n", attr->port_num);
653 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
654 attr->max_rd_atomic > dev->limits.max_qp_init_rdma) {
655 mthca_dbg(dev, "Max rdma_atomic as initiator %u too large (max is %d)\n",
656 attr->max_rd_atomic, dev->limits.max_qp_init_rdma);
660 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
661 attr->max_dest_rd_atomic > 1 << dev->qp_table.rdb_shift) {
662 mthca_dbg(dev, "Max rdma_atomic as responder %u too large (max %d)\n",
663 attr->max_dest_rd_atomic, 1 << dev->qp_table.rdb_shift);
667 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
669 return PTR_ERR(mailbox);
670 qp_param = mailbox->buf;
671 qp_context = &qp_param->context;
672 memset(qp_param, 0, sizeof *qp_param);
674 qp_context->flags = cpu_to_be32((to_mthca_state(new_state) << 28) |
675 (to_mthca_st(qp->transport) << 16));
676 qp_context->flags |= cpu_to_be32(MTHCA_QP_BIT_DE);
677 if (!(attr_mask & IB_QP_PATH_MIG_STATE))
678 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
680 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PM_STATE);
681 switch (attr->path_mig_state) {
682 case IB_MIG_MIGRATED:
683 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_MIGRATED << 11);
686 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_REARM << 11);
689 qp_context->flags |= cpu_to_be32(MTHCA_QP_PM_ARMED << 11);
694 /* leave tavor_sched_queue as 0 */
696 if (qp->transport == MLX || qp->transport == UD)
697 qp_context->mtu_msgmax = (IB_MTU_2048 << 5) | 11;
698 else if (attr_mask & IB_QP_PATH_MTU)
699 qp_context->mtu_msgmax = (attr->path_mtu << 5) | 31;
701 if (mthca_is_memfree(dev)) {
703 qp_context->rq_size_stride = long_log2(qp->rq.max) << 3;
704 qp_context->rq_size_stride |= qp->rq.wqe_shift - 4;
707 qp_context->sq_size_stride = long_log2(qp->sq.max) << 3;
708 qp_context->sq_size_stride |= qp->sq.wqe_shift - 4;
711 /* leave arbel_sched_queue as 0 */
713 if (qp->ibqp.uobject)
714 qp_context->usr_page =
715 cpu_to_be32(to_mucontext(qp->ibqp.uobject->context)->uar.index);
717 qp_context->usr_page = cpu_to_be32(dev->driver_uar.index);
718 qp_context->local_qpn = cpu_to_be32(qp->qpn);
719 if (attr_mask & IB_QP_DEST_QPN) {
720 qp_context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
723 if (qp->transport == MLX)
724 qp_context->pri_path.port_pkey |=
725 cpu_to_be32(to_msqp(qp)->port << 24);
727 if (attr_mask & IB_QP_PORT) {
728 qp_context->pri_path.port_pkey |=
729 cpu_to_be32(attr->port_num << 24);
730 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PORT_NUM);
734 if (attr_mask & IB_QP_PKEY_INDEX) {
735 qp_context->pri_path.port_pkey |=
736 cpu_to_be32(attr->pkey_index);
737 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PKEY_INDEX);
740 if (attr_mask & IB_QP_RNR_RETRY) {
741 qp_context->alt_path.rnr_retry = qp_context->pri_path.rnr_retry =
742 attr->rnr_retry << 5;
743 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_RETRY |
744 MTHCA_QP_OPTPAR_ALT_RNR_RETRY);
747 if (attr_mask & IB_QP_AV) {
748 mthca_path_set(&attr->ah_attr, &qp_context->pri_path);
749 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_PRIMARY_ADDR_PATH);
752 if (attr_mask & IB_QP_TIMEOUT) {
753 qp_context->pri_path.ackto = attr->timeout << 3;
754 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ACK_TIMEOUT);
757 if (attr_mask & IB_QP_ALT_PATH) {
758 if (attr->alt_port_num == 0 || attr->alt_port_num > dev->limits.num_ports) {
759 mthca_dbg(dev, "Alternate port number (%u) is invalid\n",
764 mthca_path_set(&attr->alt_ah_attr, &qp_context->alt_path);
765 qp_context->alt_path.port_pkey |= cpu_to_be32(attr->alt_pkey_index |
766 attr->alt_port_num << 24);
767 qp_context->alt_path.ackto = attr->alt_timeout << 3;
768 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_ALT_ADDR_PATH);
772 qp_context->pd = cpu_to_be32(to_mpd(ibqp->pd)->pd_num);
773 /* leave wqe_base as 0 (we always create an MR based at 0 for WQs) */
774 qp_context->wqe_lkey = cpu_to_be32(qp->mr.ibmr.lkey);
775 qp_context->params1 = cpu_to_be32((MTHCA_ACK_REQ_FREQ << 28) |
776 (MTHCA_FLIGHT_LIMIT << 24) |
778 if (qp->sq_policy == IB_SIGNAL_ALL_WR)
779 qp_context->params1 |= cpu_to_be32(MTHCA_QP_BIT_SSC);
780 if (attr_mask & IB_QP_RETRY_CNT) {
781 qp_context->params1 |= cpu_to_be32(attr->retry_cnt << 16);
782 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RETRY_COUNT);
785 if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC) {
786 if (attr->max_rd_atomic) {
787 qp_context->params1 |=
788 cpu_to_be32(MTHCA_QP_BIT_SRE |
790 qp_context->params1 |=
791 cpu_to_be32(fls(attr->max_rd_atomic - 1) << 21);
793 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_SRA_MAX);
796 if (attr_mask & IB_QP_SQ_PSN)
797 qp_context->next_send_psn = cpu_to_be32(attr->sq_psn);
798 qp_context->cqn_snd = cpu_to_be32(to_mcq(ibqp->send_cq)->cqn);
800 if (mthca_is_memfree(dev)) {
801 qp_context->snd_wqe_base_l = cpu_to_be32(qp->send_wqe_offset);
802 qp_context->snd_db_index = cpu_to_be32(qp->sq.db_index);
805 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC) {
806 if (attr->max_dest_rd_atomic)
807 qp_context->params2 |=
808 cpu_to_be32(fls(attr->max_dest_rd_atomic - 1) << 21);
810 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RRA_MAX);
813 if (attr_mask & (IB_QP_ACCESS_FLAGS | IB_QP_MAX_DEST_RD_ATOMIC)) {
814 qp_context->params2 |= get_hw_access_flags(qp, attr, attr_mask);
815 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RWE |
816 MTHCA_QP_OPTPAR_RRE |
817 MTHCA_QP_OPTPAR_RAE);
820 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RSC);
823 qp_context->params2 |= cpu_to_be32(MTHCA_QP_BIT_RIC);
825 if (attr_mask & IB_QP_MIN_RNR_TIMER) {
826 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->min_rnr_timer << 24);
827 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_RNR_TIMEOUT);
829 if (attr_mask & IB_QP_RQ_PSN)
830 qp_context->rnr_nextrecvpsn |= cpu_to_be32(attr->rq_psn);
832 qp_context->ra_buff_indx =
833 cpu_to_be32(dev->qp_table.rdb_base +
834 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE <<
835 dev->qp_table.rdb_shift));
837 qp_context->cqn_rcv = cpu_to_be32(to_mcq(ibqp->recv_cq)->cqn);
839 if (mthca_is_memfree(dev))
840 qp_context->rcv_db_index = cpu_to_be32(qp->rq.db_index);
842 if (attr_mask & IB_QP_QKEY) {
843 qp_context->qkey = cpu_to_be32(attr->qkey);
844 qp_param->opt_param_mask |= cpu_to_be32(MTHCA_QP_OPTPAR_Q_KEY);
848 qp_context->srqn = cpu_to_be32(1 << 24 |
849 to_msrq(ibqp->srq)->srqn);
851 if (cur_state == IB_QPS_RTS && new_state == IB_QPS_SQD &&
852 attr_mask & IB_QP_EN_SQD_ASYNC_NOTIFY &&
853 attr->en_sqd_async_notify)
856 err = mthca_MODIFY_QP(dev, state_table[cur_state][new_state].trans,
857 qp->qpn, 0, mailbox, sqd_event, &status);
859 mthca_warn(dev, "modify QP %d returned status %02x.\n",
860 state_table[cur_state][new_state].trans, status);
865 qp->state = new_state;
866 if (attr_mask & IB_QP_ACCESS_FLAGS)
867 qp->atomic_rd_en = attr->qp_access_flags;
868 if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
869 qp->resp_depth = attr->max_dest_rd_atomic;
872 mthca_free_mailbox(dev, mailbox);
875 store_attrs(to_msqp(qp), attr, attr_mask);
878 * If we moved QP0 to RTR, bring the IB link up; if we moved
879 * QP0 to RESET or ERROR, bring the link back down.
881 if (is_qp0(dev, qp)) {
882 if (cur_state != IB_QPS_RTR &&
883 new_state == IB_QPS_RTR)
884 init_port(dev, to_msqp(qp)->port);
886 if (cur_state != IB_QPS_RESET &&
887 cur_state != IB_QPS_ERR &&
888 (new_state == IB_QPS_RESET ||
889 new_state == IB_QPS_ERR))
890 mthca_CLOSE_IB(dev, to_msqp(qp)->port, &status);
894 * If we moved a kernel QP to RESET, clean up all old CQ
895 * entries and reinitialize the QP.
897 if (!err && new_state == IB_QPS_RESET && !qp->ibqp.uobject) {
898 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
899 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
900 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
901 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
902 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
904 mthca_wq_init(&qp->sq);
905 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
907 mthca_wq_init(&qp->rq);
908 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
910 if (mthca_is_memfree(dev)) {
919 static int mthca_max_data_size(struct mthca_dev *dev, struct mthca_qp *qp, int desc_sz)
922 * Calculate the maximum size of WQE s/g segments, excluding
923 * the next segment and other non-data segments.
925 int max_data_size = desc_sz - sizeof (struct mthca_next_seg);
927 switch (qp->transport) {
929 max_data_size -= 2 * sizeof (struct mthca_data_seg);
933 if (mthca_is_memfree(dev))
934 max_data_size -= sizeof (struct mthca_arbel_ud_seg);
936 max_data_size -= sizeof (struct mthca_tavor_ud_seg);
940 max_data_size -= sizeof (struct mthca_raddr_seg);
944 return max_data_size;
947 static inline int mthca_max_inline_data(struct mthca_pd *pd, int max_data_size)
949 /* We don't support inline data for kernel QPs (yet). */
950 return pd->ibpd.uobject ? max_data_size - MTHCA_INLINE_HEADER_SIZE : 0;
953 static void mthca_adjust_qp_caps(struct mthca_dev *dev,
957 int max_data_size = mthca_max_data_size(dev, qp,
958 min(dev->limits.max_desc_sz,
959 1 << qp->sq.wqe_shift));
961 qp->max_inline_data = mthca_max_inline_data(pd, max_data_size);
963 qp->sq.max_gs = min_t(int, dev->limits.max_sg,
964 max_data_size / sizeof (struct mthca_data_seg));
965 qp->rq.max_gs = min_t(int, dev->limits.max_sg,
966 (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
967 sizeof (struct mthca_next_seg)) /
968 sizeof (struct mthca_data_seg));
972 * Allocate and register buffer for WQEs. qp->rq.max, sq.max,
973 * rq.max_gs and sq.max_gs must all be assigned.
974 * mthca_alloc_wqe_buf will calculate rq.wqe_shift and
975 * sq.wqe_shift (as well as send_wqe_offset, is_direct, and
978 static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
985 size = sizeof (struct mthca_next_seg) +
986 qp->rq.max_gs * sizeof (struct mthca_data_seg);
988 if (size > dev->limits.max_desc_sz)
991 for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
995 size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
996 switch (qp->transport) {
998 size += 2 * sizeof (struct mthca_data_seg);
1002 size += mthca_is_memfree(dev) ?
1003 sizeof (struct mthca_arbel_ud_seg) :
1004 sizeof (struct mthca_tavor_ud_seg);
1008 size += sizeof (struct mthca_raddr_seg);
1012 size += sizeof (struct mthca_raddr_seg);
1014 * An atomic op will require an atomic segment, a
1015 * remote address segment and one scatter entry.
1017 size = max_t(int, size,
1018 sizeof (struct mthca_atomic_seg) +
1019 sizeof (struct mthca_raddr_seg) +
1020 sizeof (struct mthca_data_seg));
1027 /* Make sure that we have enough space for a bind request */
1028 size = max_t(int, size, sizeof (struct mthca_bind_seg));
1030 size += sizeof (struct mthca_next_seg);
1032 if (size > dev->limits.max_desc_sz)
1035 for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
1039 qp->send_wqe_offset = ALIGN(qp->rq.max << qp->rq.wqe_shift,
1040 1 << qp->sq.wqe_shift);
1043 * If this is a userspace QP, we don't actually have to
1044 * allocate anything. All we need is to calculate the WQE
1045 * sizes and the send_wqe_offset, so we're done now.
1047 if (pd->ibpd.uobject)
1050 size = PAGE_ALIGN(qp->send_wqe_offset +
1051 (qp->sq.max << qp->sq.wqe_shift));
1053 qp->wrid = kmalloc((qp->rq.max + qp->sq.max) * sizeof (u64),
1058 err = mthca_buf_alloc(dev, size, MTHCA_MAX_DIRECT_QP_SIZE,
1059 &qp->queue, &qp->is_direct, pd, 0, &qp->mr);
1070 static void mthca_free_wqe_buf(struct mthca_dev *dev,
1071 struct mthca_qp *qp)
1073 mthca_buf_free(dev, PAGE_ALIGN(qp->send_wqe_offset +
1074 (qp->sq.max << qp->sq.wqe_shift)),
1075 &qp->queue, qp->is_direct, &qp->mr);
1079 static int mthca_map_memfree(struct mthca_dev *dev,
1080 struct mthca_qp *qp)
1084 if (mthca_is_memfree(dev)) {
1085 ret = mthca_table_get(dev, dev->qp_table.qp_table, qp->qpn);
1089 ret = mthca_table_get(dev, dev->qp_table.eqp_table, qp->qpn);
1093 ret = mthca_table_get(dev, dev->qp_table.rdb_table,
1094 qp->qpn << dev->qp_table.rdb_shift);
1103 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1106 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1111 static void mthca_unmap_memfree(struct mthca_dev *dev,
1112 struct mthca_qp *qp)
1114 mthca_table_put(dev, dev->qp_table.rdb_table,
1115 qp->qpn << dev->qp_table.rdb_shift);
1116 mthca_table_put(dev, dev->qp_table.eqp_table, qp->qpn);
1117 mthca_table_put(dev, dev->qp_table.qp_table, qp->qpn);
1120 static int mthca_alloc_memfree(struct mthca_dev *dev,
1121 struct mthca_qp *qp)
1125 if (mthca_is_memfree(dev)) {
1126 qp->rq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_RQ,
1127 qp->qpn, &qp->rq.db);
1128 if (qp->rq.db_index < 0)
1131 qp->sq.db_index = mthca_alloc_db(dev, MTHCA_DB_TYPE_SQ,
1132 qp->qpn, &qp->sq.db);
1133 if (qp->sq.db_index < 0)
1134 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1140 static void mthca_free_memfree(struct mthca_dev *dev,
1141 struct mthca_qp *qp)
1143 if (mthca_is_memfree(dev)) {
1144 mthca_free_db(dev, MTHCA_DB_TYPE_SQ, qp->sq.db_index);
1145 mthca_free_db(dev, MTHCA_DB_TYPE_RQ, qp->rq.db_index);
1149 static int mthca_alloc_qp_common(struct mthca_dev *dev,
1150 struct mthca_pd *pd,
1151 struct mthca_cq *send_cq,
1152 struct mthca_cq *recv_cq,
1153 enum ib_sig_type send_policy,
1154 struct mthca_qp *qp)
1159 atomic_set(&qp->refcount, 1);
1160 init_waitqueue_head(&qp->wait);
1161 qp->state = IB_QPS_RESET;
1162 qp->atomic_rd_en = 0;
1164 qp->sq_policy = send_policy;
1165 mthca_wq_init(&qp->sq);
1166 mthca_wq_init(&qp->rq);
1168 ret = mthca_map_memfree(dev, qp);
1172 ret = mthca_alloc_wqe_buf(dev, pd, qp);
1174 mthca_unmap_memfree(dev, qp);
1178 mthca_adjust_qp_caps(dev, pd, qp);
1181 * If this is a userspace QP, we're done now. The doorbells
1182 * will be allocated and buffers will be initialized in
1185 if (pd->ibpd.uobject)
1188 ret = mthca_alloc_memfree(dev, qp);
1190 mthca_free_wqe_buf(dev, qp);
1191 mthca_unmap_memfree(dev, qp);
1195 if (mthca_is_memfree(dev)) {
1196 struct mthca_next_seg *next;
1197 struct mthca_data_seg *scatter;
1198 int size = (sizeof (struct mthca_next_seg) +
1199 qp->rq.max_gs * sizeof (struct mthca_data_seg)) / 16;
1201 for (i = 0; i < qp->rq.max; ++i) {
1202 next = get_recv_wqe(qp, i);
1203 next->nda_op = cpu_to_be32(((i + 1) & (qp->rq.max - 1)) <<
1205 next->ee_nds = cpu_to_be32(size);
1207 for (scatter = (void *) (next + 1);
1208 (void *) scatter < (void *) next + (1 << qp->rq.wqe_shift);
1210 scatter->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
1213 for (i = 0; i < qp->sq.max; ++i) {
1214 next = get_send_wqe(qp, i);
1215 next->nda_op = cpu_to_be32((((i + 1) & (qp->sq.max - 1)) <<
1217 qp->send_wqe_offset);
1221 qp->sq.last = get_send_wqe(qp, qp->sq.max - 1);
1222 qp->rq.last = get_recv_wqe(qp, qp->rq.max - 1);
1227 static int mthca_set_qp_size(struct mthca_dev *dev, struct ib_qp_cap *cap,
1228 struct mthca_pd *pd, struct mthca_qp *qp)
1230 int max_data_size = mthca_max_data_size(dev, qp, dev->limits.max_desc_sz);
1232 /* Sanity check QP size before proceeding */
1233 if (cap->max_send_wr > dev->limits.max_wqes ||
1234 cap->max_recv_wr > dev->limits.max_wqes ||
1235 cap->max_send_sge > dev->limits.max_sg ||
1236 cap->max_recv_sge > dev->limits.max_sg ||
1237 cap->max_inline_data > mthca_max_inline_data(pd, max_data_size))
1241 * For MLX transport we need 2 extra S/G entries:
1242 * one for the header and one for the checksum at the end
1244 if (qp->transport == MLX && cap->max_recv_sge + 2 > dev->limits.max_sg)
1247 if (mthca_is_memfree(dev)) {
1248 qp->rq.max = cap->max_recv_wr ?
1249 roundup_pow_of_two(cap->max_recv_wr) : 0;
1250 qp->sq.max = cap->max_send_wr ?
1251 roundup_pow_of_two(cap->max_send_wr) : 0;
1253 qp->rq.max = cap->max_recv_wr;
1254 qp->sq.max = cap->max_send_wr;
1257 qp->rq.max_gs = cap->max_recv_sge;
1258 qp->sq.max_gs = max_t(int, cap->max_send_sge,
1259 ALIGN(cap->max_inline_data + MTHCA_INLINE_HEADER_SIZE,
1260 MTHCA_INLINE_CHUNK_SIZE) /
1261 sizeof (struct mthca_data_seg));
1266 int mthca_alloc_qp(struct mthca_dev *dev,
1267 struct mthca_pd *pd,
1268 struct mthca_cq *send_cq,
1269 struct mthca_cq *recv_cq,
1270 enum ib_qp_type type,
1271 enum ib_sig_type send_policy,
1272 struct ib_qp_cap *cap,
1273 struct mthca_qp *qp)
1277 err = mthca_set_qp_size(dev, cap, pd, qp);
1282 case IB_QPT_RC: qp->transport = RC; break;
1283 case IB_QPT_UC: qp->transport = UC; break;
1284 case IB_QPT_UD: qp->transport = UD; break;
1285 default: return -EINVAL;
1288 qp->qpn = mthca_alloc(&dev->qp_table.alloc);
1292 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1295 mthca_free(&dev->qp_table.alloc, qp->qpn);
1299 spin_lock_irq(&dev->qp_table.lock);
1300 mthca_array_set(&dev->qp_table.qp,
1301 qp->qpn & (dev->limits.num_qps - 1), qp);
1302 spin_unlock_irq(&dev->qp_table.lock);
1307 int mthca_alloc_sqp(struct mthca_dev *dev,
1308 struct mthca_pd *pd,
1309 struct mthca_cq *send_cq,
1310 struct mthca_cq *recv_cq,
1311 enum ib_sig_type send_policy,
1312 struct ib_qp_cap *cap,
1315 struct mthca_sqp *sqp)
1317 u32 mqpn = qpn * 2 + dev->qp_table.sqp_start + port - 1;
1320 err = mthca_set_qp_size(dev, cap, pd, &sqp->qp);
1324 sqp->header_buf_size = sqp->qp.sq.max * MTHCA_UD_HEADER_SIZE;
1325 sqp->header_buf = dma_alloc_coherent(&dev->pdev->dev, sqp->header_buf_size,
1326 &sqp->header_dma, GFP_KERNEL);
1327 if (!sqp->header_buf)
1330 spin_lock_irq(&dev->qp_table.lock);
1331 if (mthca_array_get(&dev->qp_table.qp, mqpn))
1334 mthca_array_set(&dev->qp_table.qp, mqpn, sqp);
1335 spin_unlock_irq(&dev->qp_table.lock);
1342 sqp->qp.transport = MLX;
1344 err = mthca_alloc_qp_common(dev, pd, send_cq, recv_cq,
1345 send_policy, &sqp->qp);
1349 atomic_inc(&pd->sqp_count);
1355 * Lock CQs here, so that CQ polling code can do QP lookup
1356 * without taking a lock.
1358 spin_lock_irq(&send_cq->lock);
1359 if (send_cq != recv_cq)
1360 spin_lock(&recv_cq->lock);
1362 spin_lock(&dev->qp_table.lock);
1363 mthca_array_clear(&dev->qp_table.qp, mqpn);
1364 spin_unlock(&dev->qp_table.lock);
1366 if (send_cq != recv_cq)
1367 spin_unlock(&recv_cq->lock);
1368 spin_unlock_irq(&send_cq->lock);
1371 dma_free_coherent(&dev->pdev->dev, sqp->header_buf_size,
1372 sqp->header_buf, sqp->header_dma);
1377 void mthca_free_qp(struct mthca_dev *dev,
1378 struct mthca_qp *qp)
1381 struct mthca_cq *send_cq;
1382 struct mthca_cq *recv_cq;
1384 send_cq = to_mcq(qp->ibqp.send_cq);
1385 recv_cq = to_mcq(qp->ibqp.recv_cq);
1388 * Lock CQs here, so that CQ polling code can do QP lookup
1389 * without taking a lock.
1391 spin_lock_irq(&send_cq->lock);
1392 if (send_cq != recv_cq)
1393 spin_lock(&recv_cq->lock);
1395 spin_lock(&dev->qp_table.lock);
1396 mthca_array_clear(&dev->qp_table.qp,
1397 qp->qpn & (dev->limits.num_qps - 1));
1398 spin_unlock(&dev->qp_table.lock);
1400 if (send_cq != recv_cq)
1401 spin_unlock(&recv_cq->lock);
1402 spin_unlock_irq(&send_cq->lock);
1404 atomic_dec(&qp->refcount);
1405 wait_event(qp->wait, !atomic_read(&qp->refcount));
1407 if (qp->state != IB_QPS_RESET)
1408 mthca_MODIFY_QP(dev, MTHCA_TRANS_ANY2RST, qp->qpn, 0, NULL, 0, &status);
1411 * If this is a userspace QP, the buffers, MR, CQs and so on
1412 * will be cleaned up in userspace, so all we have to do is
1413 * unref the mem-free tables and free the QPN in our table.
1415 if (!qp->ibqp.uobject) {
1416 mthca_cq_clean(dev, to_mcq(qp->ibqp.send_cq)->cqn, qp->qpn,
1417 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1418 if (qp->ibqp.send_cq != qp->ibqp.recv_cq)
1419 mthca_cq_clean(dev, to_mcq(qp->ibqp.recv_cq)->cqn, qp->qpn,
1420 qp->ibqp.srq ? to_msrq(qp->ibqp.srq) : NULL);
1422 mthca_free_memfree(dev, qp);
1423 mthca_free_wqe_buf(dev, qp);
1426 mthca_unmap_memfree(dev, qp);
1428 if (is_sqp(dev, qp)) {
1429 atomic_dec(&(to_mpd(qp->ibqp.pd)->sqp_count));
1430 dma_free_coherent(&dev->pdev->dev,
1431 to_msqp(qp)->header_buf_size,
1432 to_msqp(qp)->header_buf,
1433 to_msqp(qp)->header_dma);
1435 mthca_free(&dev->qp_table.alloc, qp->qpn);
1438 /* Create UD header for an MLX send and build a data segment for it */
1439 static int build_mlx_header(struct mthca_dev *dev, struct mthca_sqp *sqp,
1440 int ind, struct ib_send_wr *wr,
1441 struct mthca_mlx_seg *mlx,
1442 struct mthca_data_seg *data)
1448 ib_ud_header_init(256, /* assume a MAD */
1449 mthca_ah_grh_present(to_mah(wr->wr.ud.ah)),
1452 err = mthca_read_ah(dev, to_mah(wr->wr.ud.ah), &sqp->ud_header);
1455 mlx->flags &= ~cpu_to_be32(MTHCA_NEXT_SOLICIT | 1);
1456 mlx->flags |= cpu_to_be32((!sqp->qp.ibqp.qp_num ? MTHCA_MLX_VL15 : 0) |
1457 (sqp->ud_header.lrh.destination_lid ==
1458 IB_LID_PERMISSIVE ? MTHCA_MLX_SLR : 0) |
1459 (sqp->ud_header.lrh.service_level << 8));
1460 mlx->rlid = sqp->ud_header.lrh.destination_lid;
1463 switch (wr->opcode) {
1465 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY;
1466 sqp->ud_header.immediate_present = 0;
1468 case IB_WR_SEND_WITH_IMM:
1469 sqp->ud_header.bth.opcode = IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE;
1470 sqp->ud_header.immediate_present = 1;
1471 sqp->ud_header.immediate_data = wr->imm_data;
1477 sqp->ud_header.lrh.virtual_lane = !sqp->qp.ibqp.qp_num ? 15 : 0;
1478 if (sqp->ud_header.lrh.destination_lid == IB_LID_PERMISSIVE)
1479 sqp->ud_header.lrh.source_lid = IB_LID_PERMISSIVE;
1480 sqp->ud_header.bth.solicited_event = !!(wr->send_flags & IB_SEND_SOLICITED);
1481 if (!sqp->qp.ibqp.qp_num)
1482 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1483 sqp->pkey_index, &pkey);
1485 ib_get_cached_pkey(&dev->ib_dev, sqp->port,
1486 wr->wr.ud.pkey_index, &pkey);
1487 sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
1488 sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->wr.ud.remote_qpn);
1489 sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
1490 sqp->ud_header.deth.qkey = cpu_to_be32(wr->wr.ud.remote_qkey & 0x80000000 ?
1491 sqp->qkey : wr->wr.ud.remote_qkey);
1492 sqp->ud_header.deth.source_qpn = cpu_to_be32(sqp->qp.ibqp.qp_num);
1494 header_size = ib_ud_header_pack(&sqp->ud_header,
1496 ind * MTHCA_UD_HEADER_SIZE);
1498 data->byte_count = cpu_to_be32(header_size);
1499 data->lkey = cpu_to_be32(to_mpd(sqp->qp.ibqp.pd)->ntmr.ibmr.lkey);
1500 data->addr = cpu_to_be64(sqp->header_dma +
1501 ind * MTHCA_UD_HEADER_SIZE);
1506 static inline int mthca_wq_overflow(struct mthca_wq *wq, int nreq,
1507 struct ib_cq *ib_cq)
1510 struct mthca_cq *cq;
1512 cur = wq->head - wq->tail;
1513 if (likely(cur + nreq < wq->max))
1517 spin_lock(&cq->lock);
1518 cur = wq->head - wq->tail;
1519 spin_unlock(&cq->lock);
1521 return cur + nreq >= wq->max;
1524 int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1525 struct ib_send_wr **bad_wr)
1527 struct mthca_dev *dev = to_mdev(ibqp->device);
1528 struct mthca_qp *qp = to_mqp(ibqp);
1531 unsigned long flags;
1541 spin_lock_irqsave(&qp->sq.lock, flags);
1543 /* XXX check that state is OK to post send */
1545 ind = qp->sq.next_ind;
1547 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1548 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1549 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1550 " %d max, %d nreq)\n", qp->qpn,
1551 qp->sq.head, qp->sq.tail,
1558 wqe = get_send_wqe(qp, ind);
1559 prev_wqe = qp->sq.last;
1562 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1563 ((struct mthca_next_seg *) wqe)->ee_nds = 0;
1564 ((struct mthca_next_seg *) wqe)->flags =
1565 ((wr->send_flags & IB_SEND_SIGNALED) ?
1566 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1567 ((wr->send_flags & IB_SEND_SOLICITED) ?
1568 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1570 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1571 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1572 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
1574 wqe += sizeof (struct mthca_next_seg);
1575 size = sizeof (struct mthca_next_seg) / 16;
1577 switch (qp->transport) {
1579 switch (wr->opcode) {
1580 case IB_WR_ATOMIC_CMP_AND_SWP:
1581 case IB_WR_ATOMIC_FETCH_AND_ADD:
1582 ((struct mthca_raddr_seg *) wqe)->raddr =
1583 cpu_to_be64(wr->wr.atomic.remote_addr);
1584 ((struct mthca_raddr_seg *) wqe)->rkey =
1585 cpu_to_be32(wr->wr.atomic.rkey);
1586 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1588 wqe += sizeof (struct mthca_raddr_seg);
1590 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1591 ((struct mthca_atomic_seg *) wqe)->swap_add =
1592 cpu_to_be64(wr->wr.atomic.swap);
1593 ((struct mthca_atomic_seg *) wqe)->compare =
1594 cpu_to_be64(wr->wr.atomic.compare_add);
1596 ((struct mthca_atomic_seg *) wqe)->swap_add =
1597 cpu_to_be64(wr->wr.atomic.compare_add);
1598 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1601 wqe += sizeof (struct mthca_atomic_seg);
1602 size += (sizeof (struct mthca_raddr_seg) +
1603 sizeof (struct mthca_atomic_seg)) / 16;
1606 case IB_WR_RDMA_WRITE:
1607 case IB_WR_RDMA_WRITE_WITH_IMM:
1608 case IB_WR_RDMA_READ:
1609 ((struct mthca_raddr_seg *) wqe)->raddr =
1610 cpu_to_be64(wr->wr.rdma.remote_addr);
1611 ((struct mthca_raddr_seg *) wqe)->rkey =
1612 cpu_to_be32(wr->wr.rdma.rkey);
1613 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1614 wqe += sizeof (struct mthca_raddr_seg);
1615 size += sizeof (struct mthca_raddr_seg) / 16;
1619 /* No extra segments required for sends */
1626 switch (wr->opcode) {
1627 case IB_WR_RDMA_WRITE:
1628 case IB_WR_RDMA_WRITE_WITH_IMM:
1629 ((struct mthca_raddr_seg *) wqe)->raddr =
1630 cpu_to_be64(wr->wr.rdma.remote_addr);
1631 ((struct mthca_raddr_seg *) wqe)->rkey =
1632 cpu_to_be32(wr->wr.rdma.rkey);
1633 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1634 wqe += sizeof (struct mthca_raddr_seg);
1635 size += sizeof (struct mthca_raddr_seg) / 16;
1639 /* No extra segments required for sends */
1646 ((struct mthca_tavor_ud_seg *) wqe)->lkey =
1647 cpu_to_be32(to_mah(wr->wr.ud.ah)->key);
1648 ((struct mthca_tavor_ud_seg *) wqe)->av_addr =
1649 cpu_to_be64(to_mah(wr->wr.ud.ah)->avdma);
1650 ((struct mthca_tavor_ud_seg *) wqe)->dqpn =
1651 cpu_to_be32(wr->wr.ud.remote_qpn);
1652 ((struct mthca_tavor_ud_seg *) wqe)->qkey =
1653 cpu_to_be32(wr->wr.ud.remote_qkey);
1655 wqe += sizeof (struct mthca_tavor_ud_seg);
1656 size += sizeof (struct mthca_tavor_ud_seg) / 16;
1660 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
1661 wqe - sizeof (struct mthca_next_seg),
1667 wqe += sizeof (struct mthca_data_seg);
1668 size += sizeof (struct mthca_data_seg) / 16;
1672 if (wr->num_sge > qp->sq.max_gs) {
1673 mthca_err(dev, "too many gathers\n");
1679 for (i = 0; i < wr->num_sge; ++i) {
1680 ((struct mthca_data_seg *) wqe)->byte_count =
1681 cpu_to_be32(wr->sg_list[i].length);
1682 ((struct mthca_data_seg *) wqe)->lkey =
1683 cpu_to_be32(wr->sg_list[i].lkey);
1684 ((struct mthca_data_seg *) wqe)->addr =
1685 cpu_to_be64(wr->sg_list[i].addr);
1686 wqe += sizeof (struct mthca_data_seg);
1687 size += sizeof (struct mthca_data_seg) / 16;
1690 /* Add one more inline data segment for ICRC */
1691 if (qp->transport == MLX) {
1692 ((struct mthca_data_seg *) wqe)->byte_count =
1693 cpu_to_be32((1 << 31) | 4);
1694 ((u32 *) wqe)[1] = 0;
1695 wqe += sizeof (struct mthca_data_seg);
1696 size += sizeof (struct mthca_data_seg) / 16;
1699 qp->wrid[ind + qp->rq.max] = wr->wr_id;
1701 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
1702 mthca_err(dev, "opcode invalid\n");
1708 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1709 cpu_to_be32(((ind << qp->sq.wqe_shift) +
1710 qp->send_wqe_offset) |
1711 mthca_opcode[wr->opcode]);
1713 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1714 cpu_to_be32((size0 ? 0 : MTHCA_NEXT_DBD) | size);
1718 op0 = mthca_opcode[wr->opcode];
1722 if (unlikely(ind >= qp->sq.max))
1730 doorbell[0] = cpu_to_be32(((qp->sq.next_ind << qp->sq.wqe_shift) +
1731 qp->send_wqe_offset) | f0 | op0);
1732 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1736 mthca_write64(doorbell,
1737 dev->kar + MTHCA_SEND_DOORBELL,
1738 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1741 qp->sq.next_ind = ind;
1742 qp->sq.head += nreq;
1744 spin_unlock_irqrestore(&qp->sq.lock, flags);
1748 int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
1749 struct ib_recv_wr **bad_wr)
1751 struct mthca_dev *dev = to_mdev(ibqp->device);
1752 struct mthca_qp *qp = to_mqp(ibqp);
1754 unsigned long flags;
1764 spin_lock_irqsave(&qp->rq.lock, flags);
1766 /* XXX check that state is OK to post receive */
1768 ind = qp->rq.next_ind;
1770 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1771 if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
1774 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1775 doorbell[1] = cpu_to_be32(qp->qpn << 8);
1779 mthca_write64(doorbell,
1780 dev->kar + MTHCA_RECEIVE_DOORBELL,
1781 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1783 qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
1787 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
1788 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
1789 " %d max, %d nreq)\n", qp->qpn,
1790 qp->rq.head, qp->rq.tail,
1797 wqe = get_recv_wqe(qp, ind);
1798 prev_wqe = qp->rq.last;
1801 ((struct mthca_next_seg *) wqe)->nda_op = 0;
1802 ((struct mthca_next_seg *) wqe)->ee_nds =
1803 cpu_to_be32(MTHCA_NEXT_DBD);
1804 ((struct mthca_next_seg *) wqe)->flags = 0;
1806 wqe += sizeof (struct mthca_next_seg);
1807 size = sizeof (struct mthca_next_seg) / 16;
1809 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
1815 for (i = 0; i < wr->num_sge; ++i) {
1816 ((struct mthca_data_seg *) wqe)->byte_count =
1817 cpu_to_be32(wr->sg_list[i].length);
1818 ((struct mthca_data_seg *) wqe)->lkey =
1819 cpu_to_be32(wr->sg_list[i].lkey);
1820 ((struct mthca_data_seg *) wqe)->addr =
1821 cpu_to_be64(wr->sg_list[i].addr);
1822 wqe += sizeof (struct mthca_data_seg);
1823 size += sizeof (struct mthca_data_seg) / 16;
1826 qp->wrid[ind] = wr->wr_id;
1828 ((struct mthca_next_seg *) prev_wqe)->nda_op =
1829 cpu_to_be32((ind << qp->rq.wqe_shift) | 1);
1831 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
1832 cpu_to_be32(MTHCA_NEXT_DBD | size);
1838 if (unlikely(ind >= qp->rq.max))
1844 doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
1845 doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
1849 mthca_write64(doorbell,
1850 dev->kar + MTHCA_RECEIVE_DOORBELL,
1851 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1854 qp->rq.next_ind = ind;
1855 qp->rq.head += nreq;
1857 spin_unlock_irqrestore(&qp->rq.lock, flags);
1861 int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
1862 struct ib_send_wr **bad_wr)
1864 struct mthca_dev *dev = to_mdev(ibqp->device);
1865 struct mthca_qp *qp = to_mqp(ibqp);
1869 unsigned long flags;
1879 spin_lock_irqsave(&qp->sq.lock, flags);
1881 /* XXX check that state is OK to post send */
1883 ind = qp->sq.head & (qp->sq.max - 1);
1885 for (nreq = 0; wr; ++nreq, wr = wr->next) {
1886 if (unlikely(nreq == MTHCA_ARBEL_MAX_WQES_PER_SEND_DB)) {
1889 doorbell[0] = cpu_to_be32((MTHCA_ARBEL_MAX_WQES_PER_SEND_DB << 24) |
1890 ((qp->sq.head & 0xffff) << 8) |
1892 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
1894 qp->sq.head += MTHCA_ARBEL_MAX_WQES_PER_SEND_DB;
1898 * Make sure that descriptors are written before
1902 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
1905 * Make sure doorbell record is written before we
1906 * write MMIO send doorbell.
1909 mthca_write64(doorbell,
1910 dev->kar + MTHCA_SEND_DOORBELL,
1911 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
1914 if (mthca_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) {
1915 mthca_err(dev, "SQ %06x full (%u head, %u tail,"
1916 " %d max, %d nreq)\n", qp->qpn,
1917 qp->sq.head, qp->sq.tail,
1924 wqe = get_send_wqe(qp, ind);
1925 prev_wqe = qp->sq.last;
1928 ((struct mthca_next_seg *) wqe)->flags =
1929 ((wr->send_flags & IB_SEND_SIGNALED) ?
1930 cpu_to_be32(MTHCA_NEXT_CQ_UPDATE) : 0) |
1931 ((wr->send_flags & IB_SEND_SOLICITED) ?
1932 cpu_to_be32(MTHCA_NEXT_SOLICIT) : 0) |
1934 if (wr->opcode == IB_WR_SEND_WITH_IMM ||
1935 wr->opcode == IB_WR_RDMA_WRITE_WITH_IMM)
1936 ((struct mthca_next_seg *) wqe)->imm = wr->imm_data;
1938 wqe += sizeof (struct mthca_next_seg);
1939 size = sizeof (struct mthca_next_seg) / 16;
1941 switch (qp->transport) {
1943 switch (wr->opcode) {
1944 case IB_WR_ATOMIC_CMP_AND_SWP:
1945 case IB_WR_ATOMIC_FETCH_AND_ADD:
1946 ((struct mthca_raddr_seg *) wqe)->raddr =
1947 cpu_to_be64(wr->wr.atomic.remote_addr);
1948 ((struct mthca_raddr_seg *) wqe)->rkey =
1949 cpu_to_be32(wr->wr.atomic.rkey);
1950 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1952 wqe += sizeof (struct mthca_raddr_seg);
1954 if (wr->opcode == IB_WR_ATOMIC_CMP_AND_SWP) {
1955 ((struct mthca_atomic_seg *) wqe)->swap_add =
1956 cpu_to_be64(wr->wr.atomic.swap);
1957 ((struct mthca_atomic_seg *) wqe)->compare =
1958 cpu_to_be64(wr->wr.atomic.compare_add);
1960 ((struct mthca_atomic_seg *) wqe)->swap_add =
1961 cpu_to_be64(wr->wr.atomic.compare_add);
1962 ((struct mthca_atomic_seg *) wqe)->compare = 0;
1965 wqe += sizeof (struct mthca_atomic_seg);
1966 size += (sizeof (struct mthca_raddr_seg) +
1967 sizeof (struct mthca_atomic_seg)) / 16;
1970 case IB_WR_RDMA_READ:
1971 case IB_WR_RDMA_WRITE:
1972 case IB_WR_RDMA_WRITE_WITH_IMM:
1973 ((struct mthca_raddr_seg *) wqe)->raddr =
1974 cpu_to_be64(wr->wr.rdma.remote_addr);
1975 ((struct mthca_raddr_seg *) wqe)->rkey =
1976 cpu_to_be32(wr->wr.rdma.rkey);
1977 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1978 wqe += sizeof (struct mthca_raddr_seg);
1979 size += sizeof (struct mthca_raddr_seg) / 16;
1983 /* No extra segments required for sends */
1990 switch (wr->opcode) {
1991 case IB_WR_RDMA_WRITE:
1992 case IB_WR_RDMA_WRITE_WITH_IMM:
1993 ((struct mthca_raddr_seg *) wqe)->raddr =
1994 cpu_to_be64(wr->wr.rdma.remote_addr);
1995 ((struct mthca_raddr_seg *) wqe)->rkey =
1996 cpu_to_be32(wr->wr.rdma.rkey);
1997 ((struct mthca_raddr_seg *) wqe)->reserved = 0;
1998 wqe += sizeof (struct mthca_raddr_seg);
1999 size += sizeof (struct mthca_raddr_seg) / 16;
2003 /* No extra segments required for sends */
2010 memcpy(((struct mthca_arbel_ud_seg *) wqe)->av,
2011 to_mah(wr->wr.ud.ah)->av, MTHCA_AV_SIZE);
2012 ((struct mthca_arbel_ud_seg *) wqe)->dqpn =
2013 cpu_to_be32(wr->wr.ud.remote_qpn);
2014 ((struct mthca_arbel_ud_seg *) wqe)->qkey =
2015 cpu_to_be32(wr->wr.ud.remote_qkey);
2017 wqe += sizeof (struct mthca_arbel_ud_seg);
2018 size += sizeof (struct mthca_arbel_ud_seg) / 16;
2022 err = build_mlx_header(dev, to_msqp(qp), ind, wr,
2023 wqe - sizeof (struct mthca_next_seg),
2029 wqe += sizeof (struct mthca_data_seg);
2030 size += sizeof (struct mthca_data_seg) / 16;
2034 if (wr->num_sge > qp->sq.max_gs) {
2035 mthca_err(dev, "too many gathers\n");
2041 for (i = 0; i < wr->num_sge; ++i) {
2042 ((struct mthca_data_seg *) wqe)->byte_count =
2043 cpu_to_be32(wr->sg_list[i].length);
2044 ((struct mthca_data_seg *) wqe)->lkey =
2045 cpu_to_be32(wr->sg_list[i].lkey);
2046 ((struct mthca_data_seg *) wqe)->addr =
2047 cpu_to_be64(wr->sg_list[i].addr);
2048 wqe += sizeof (struct mthca_data_seg);
2049 size += sizeof (struct mthca_data_seg) / 16;
2052 /* Add one more inline data segment for ICRC */
2053 if (qp->transport == MLX) {
2054 ((struct mthca_data_seg *) wqe)->byte_count =
2055 cpu_to_be32((1 << 31) | 4);
2056 ((u32 *) wqe)[1] = 0;
2057 wqe += sizeof (struct mthca_data_seg);
2058 size += sizeof (struct mthca_data_seg) / 16;
2061 qp->wrid[ind + qp->rq.max] = wr->wr_id;
2063 if (wr->opcode >= ARRAY_SIZE(mthca_opcode)) {
2064 mthca_err(dev, "opcode invalid\n");
2070 ((struct mthca_next_seg *) prev_wqe)->nda_op =
2071 cpu_to_be32(((ind << qp->sq.wqe_shift) +
2072 qp->send_wqe_offset) |
2073 mthca_opcode[wr->opcode]);
2075 ((struct mthca_next_seg *) prev_wqe)->ee_nds =
2076 cpu_to_be32(MTHCA_NEXT_DBD | size);
2080 op0 = mthca_opcode[wr->opcode];
2084 if (unlikely(ind >= qp->sq.max))
2090 doorbell[0] = cpu_to_be32((nreq << 24) |
2091 ((qp->sq.head & 0xffff) << 8) |
2093 doorbell[1] = cpu_to_be32((qp->qpn << 8) | size0);
2095 qp->sq.head += nreq;
2098 * Make sure that descriptors are written before
2102 *qp->sq.db = cpu_to_be32(qp->sq.head & 0xffff);
2105 * Make sure doorbell record is written before we
2106 * write MMIO send doorbell.
2109 mthca_write64(doorbell,
2110 dev->kar + MTHCA_SEND_DOORBELL,
2111 MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
2114 spin_unlock_irqrestore(&qp->sq.lock, flags);
2118 int mthca_arbel_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
2119 struct ib_recv_wr **bad_wr)
2121 struct mthca_dev *dev = to_mdev(ibqp->device);
2122 struct mthca_qp *qp = to_mqp(ibqp);
2123 unsigned long flags;
2130 spin_lock_irqsave(&qp->rq.lock, flags);
2132 /* XXX check that state is OK to post receive */
2134 ind = qp->rq.head & (qp->rq.max - 1);
2136 for (nreq = 0; wr; ++nreq, wr = wr->next) {
2137 if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
2138 mthca_err(dev, "RQ %06x full (%u head, %u tail,"
2139 " %d max, %d nreq)\n", qp->qpn,
2140 qp->rq.head, qp->rq.tail,
2147 wqe = get_recv_wqe(qp, ind);
2149 ((struct mthca_next_seg *) wqe)->flags = 0;
2151 wqe += sizeof (struct mthca_next_seg);
2153 if (unlikely(wr->num_sge > qp->rq.max_gs)) {
2159 for (i = 0; i < wr->num_sge; ++i) {
2160 ((struct mthca_data_seg *) wqe)->byte_count =
2161 cpu_to_be32(wr->sg_list[i].length);
2162 ((struct mthca_data_seg *) wqe)->lkey =
2163 cpu_to_be32(wr->sg_list[i].lkey);
2164 ((struct mthca_data_seg *) wqe)->addr =
2165 cpu_to_be64(wr->sg_list[i].addr);
2166 wqe += sizeof (struct mthca_data_seg);
2169 if (i < qp->rq.max_gs) {
2170 ((struct mthca_data_seg *) wqe)->byte_count = 0;
2171 ((struct mthca_data_seg *) wqe)->lkey = cpu_to_be32(MTHCA_INVAL_LKEY);
2172 ((struct mthca_data_seg *) wqe)->addr = 0;
2175 qp->wrid[ind] = wr->wr_id;
2178 if (unlikely(ind >= qp->rq.max))
2183 qp->rq.head += nreq;
2186 * Make sure that descriptors are written before
2190 *qp->rq.db = cpu_to_be32(qp->rq.head & 0xffff);
2193 spin_unlock_irqrestore(&qp->rq.lock, flags);
2197 void mthca_free_err_wqe(struct mthca_dev *dev, struct mthca_qp *qp, int is_send,
2198 int index, int *dbd, __be32 *new_wqe)
2200 struct mthca_next_seg *next;
2203 * For SRQs, all WQEs generate a CQE, so we're always at the
2204 * end of the doorbell chain.
2212 next = get_send_wqe(qp, index);
2214 next = get_recv_wqe(qp, index);
2216 *dbd = !!(next->ee_nds & cpu_to_be32(MTHCA_NEXT_DBD));
2217 if (next->ee_nds & cpu_to_be32(0x3f))
2218 *new_wqe = (next->nda_op & cpu_to_be32(~0x3f)) |
2219 (next->ee_nds & cpu_to_be32(0x3f));
2224 int __devinit mthca_init_qp_table(struct mthca_dev *dev)
2230 spin_lock_init(&dev->qp_table.lock);
2233 * We reserve 2 extra QPs per port for the special QPs. The
2234 * special QP for port 1 has to be even, so round up.
2236 dev->qp_table.sqp_start = (dev->limits.reserved_qps + 1) & ~1UL;
2237 err = mthca_alloc_init(&dev->qp_table.alloc,
2238 dev->limits.num_qps,
2240 dev->qp_table.sqp_start +
2241 MTHCA_MAX_PORTS * 2);
2245 err = mthca_array_init(&dev->qp_table.qp,
2246 dev->limits.num_qps);
2248 mthca_alloc_cleanup(&dev->qp_table.alloc);
2252 for (i = 0; i < 2; ++i) {
2253 err = mthca_CONF_SPECIAL_QP(dev, i ? IB_QPT_GSI : IB_QPT_SMI,
2254 dev->qp_table.sqp_start + i * 2,
2259 mthca_warn(dev, "CONF_SPECIAL_QP returned "
2260 "status %02x, aborting.\n",
2269 for (i = 0; i < 2; ++i)
2270 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2272 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2273 mthca_alloc_cleanup(&dev->qp_table.alloc);
2278 void __devexit mthca_cleanup_qp_table(struct mthca_dev *dev)
2283 for (i = 0; i < 2; ++i)
2284 mthca_CONF_SPECIAL_QP(dev, i, 0, &status);
2286 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps);
2287 mthca_alloc_cleanup(&dev->qp_table.alloc);