]> err.no Git - linux-2.6/blobdiff - drivers/infiniband/hw/ipath/ipath_qp.c
IB/ipath: Fix CQ flushing when QP is modified to error state
[linux-2.6] / drivers / infiniband / hw / ipath / ipath_qp.c
index 9228d59b59d46d632ade5823cf504972a5804eaa..f671fd07325302977e83d08040bb31d08877ec30 100644 (file)
@@ -274,7 +274,7 @@ void ipath_free_all_qps(struct ipath_qp_table *qpt)
                                free_qpn(qpt, qp->ibqp.qp_num);
                        if (!atomic_dec_and_test(&qp->refcount) ||
                            !ipath_destroy_qp(&qp->ibqp))
-                               _VERBS_INFO("QP memory leak!\n");
+                               ipath_dbg("QP memory leak!\n");
                        qp = nqp;
                }
        }
@@ -320,7 +320,8 @@ static void ipath_reset_qp(struct ipath_qp *qp)
        qp->remote_qpn = 0;
        qp->qkey = 0;
        qp->qp_access_flags = 0;
-       clear_bit(IPATH_S_BUSY, &qp->s_flags);
+       qp->s_busy = 0;
+       qp->s_flags &= ~IPATH_S_SIGNAL_REQ_WR;
        qp->s_hdrwords = 0;
        qp->s_psn = 0;
        qp->r_psn = 0;
@@ -333,8 +334,8 @@ static void ipath_reset_qp(struct ipath_qp *qp)
                qp->r_state = IB_OPCODE_UC_SEND_LAST;
        }
        qp->s_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
-       qp->r_ack_state = IB_OPCODE_RC_ACKNOWLEDGE;
        qp->r_nak_state = 0;
+       qp->r_wrid_valid = 0;
        qp->s_rnr_timeout = 0;
        qp->s_head = 0;
        qp->s_tail = 0;
@@ -342,6 +343,11 @@ static void ipath_reset_qp(struct ipath_qp *qp)
        qp->s_last = 0;
        qp->s_ssn = 1;
        qp->s_lsn = 0;
+       qp->s_wait_credit = 0;
+       memset(qp->s_ack_queue, 0, sizeof(qp->s_ack_queue));
+       qp->r_head_ack_queue = 0;
+       qp->s_tail_ack_queue = 0;
+       qp->s_num_rd_atomic = 0;
        if (qp->r_rq.wq) {
                qp->r_rq.wq->head = 0;
                qp->r_rq.wq->tail = 0;
@@ -352,18 +358,19 @@ static void ipath_reset_qp(struct ipath_qp *qp)
 /**
  * ipath_error_qp - put a QP into an error state
  * @qp: the QP to put into an error state
+ * @err: the receive completion error to signal if a RWQE is active
  *
  * Flushes both send and receive work queues.
- * QP s_lock should be held and interrupts disabled.
+ * The QP s_lock should be held and interrupts disabled.
  */
 
-void ipath_error_qp(struct ipath_qp *qp)
+void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
 {
        struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
        struct ib_wc wc;
 
-       _VERBS_INFO("QP%d/%d in error state\n",
-                   qp->ibqp.qp_num, qp->remote_qpn);
+       ipath_dbg("QP%d/%d in error state\n",
+                 qp->ibqp.qp_num, qp->remote_qpn);
 
        spin_lock(&dev->pending_lock);
        /* XXX What if its already removed by the timeout code? */
@@ -373,11 +380,10 @@ void ipath_error_qp(struct ipath_qp *qp)
                list_del_init(&qp->piowait);
        spin_unlock(&dev->pending_lock);
 
-       wc.status = IB_WC_WR_FLUSH_ERR;
        wc.vendor_err = 0;
        wc.byte_len = 0;
        wc.imm_data = 0;
-       wc.qp_num = qp->ibqp.qp_num;
+       wc.qp = &qp->ibqp;
        wc.src_qp = 0;
        wc.wc_flags = 0;
        wc.pkey_index = 0;
@@ -385,6 +391,14 @@ void ipath_error_qp(struct ipath_qp *qp)
        wc.sl = 0;
        wc.dlid_path_bits = 0;
        wc.port_num = 0;
+       if (qp->r_wrid_valid) {
+               qp->r_wrid_valid = 0;
+               wc.wr_id = qp->r_wr_id;
+               wc.opcode = IB_WC_RECV;
+               wc.status = err;
+               ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
+       }
+       wc.status = IB_WC_WR_FLUSH_ERR;
 
        while (qp->s_last != qp->s_head) {
                struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
@@ -455,11 +469,16 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                                attr_mask))
                goto inval;
 
-       if (attr_mask & IB_QP_AV)
+       if (attr_mask & IB_QP_AV) {
                if (attr->ah_attr.dlid == 0 ||
                    attr->ah_attr.dlid >= IPATH_MULTICAST_LID_BASE)
                        goto inval;
 
+               if ((attr->ah_attr.ah_flags & IB_AH_GRH) &&
+                   (attr->ah_attr.grh.sgid_index > 1))
+                       goto inval;
+       }
+
        if (attr_mask & IB_QP_PKEY_INDEX)
                if (attr->pkey_index >= ipath_get_npkeys(dev->dd))
                        goto inval;
@@ -468,13 +487,39 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                if (attr->min_rnr_timer > 31)
                        goto inval;
 
+       if (attr_mask & IB_QP_PORT)
+               if (attr->port_num == 0 ||
+                   attr->port_num > ibqp->device->phys_port_cnt)
+                       goto inval;
+
+       if (attr_mask & IB_QP_PATH_MTU)
+               if (attr->path_mtu > IB_MTU_4096)
+                       goto inval;
+
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               if (attr->max_dest_rd_atomic > 1)
+                       goto inval;
+
+       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+               if (attr->max_rd_atomic > 1)
+                       goto inval;
+
+       if (attr_mask & IB_QP_PATH_MIG_STATE)
+               if (attr->path_mig_state != IB_MIG_MIGRATED &&
+                   attr->path_mig_state != IB_MIG_REARM)
+                       goto inval;
+
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               if (attr->max_dest_rd_atomic > IPATH_MAX_RDMA_ATOMIC)
+                       goto inval;
+
        switch (new_state) {
        case IB_QPS_RESET:
                ipath_reset_qp(qp);
                break;
 
        case IB_QPS_ERR:
-               ipath_error_qp(qp);
+               ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
                break;
 
        default:
@@ -489,7 +534,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                qp->remote_qpn = attr->dest_qp_num;
 
        if (attr_mask & IB_QP_SQ_PSN) {
-               qp->s_next_psn = attr->sq_psn;
+               qp->s_psn = qp->s_next_psn = attr->sq_psn;
                qp->s_last_psn = qp->s_next_psn - 1;
        }
 
@@ -518,9 +563,18 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        if (attr_mask & IB_QP_MIN_RNR_TIMER)
                qp->r_min_rnr_timer = attr->min_rnr_timer;
 
+       if (attr_mask & IB_QP_TIMEOUT)
+               qp->timeout = attr->timeout;
+
        if (attr_mask & IB_QP_QKEY)
                qp->qkey = attr->qkey;
 
+       if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC)
+               qp->r_max_rd_atomic = attr->max_dest_rd_atomic;
+
+       if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC)
+               qp->s_max_rd_atomic = attr->max_rd_atomic;
+
        qp->state = new_state;
        spin_unlock_irqrestore(&qp->s_lock, flags);
 
@@ -560,11 +614,11 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        attr->alt_pkey_index = 0;
        attr->en_sqd_async_notify = 0;
        attr->sq_draining = 0;
-       attr->max_rd_atomic = 1;
-       attr->max_dest_rd_atomic = 1;
+       attr->max_rd_atomic = qp->s_max_rd_atomic;
+       attr->max_dest_rd_atomic = qp->r_max_rd_atomic;
        attr->min_rnr_timer = qp->r_min_rnr_timer;
        attr->port_num = 1;
-       attr->timeout = 0;
+       attr->timeout = qp->timeout;
        attr->retry_cnt = qp->s_retry_cnt;
        attr->rnr_retry = qp->s_rnr_retry;
        attr->alt_port_num = 0;
@@ -576,9 +630,10 @@ int ipath_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        init_attr->recv_cq = qp->ibqp.recv_cq;
        init_attr->srq = qp->ibqp.srq;
        init_attr->cap = attr->cap;
-       init_attr->sq_sig_type =
-               (qp->s_flags & (1 << IPATH_S_SIGNAL_REQ_WR))
-               ? IB_SIGNAL_REQ_WR : 0;
+       if (qp->s_flags & IPATH_S_SIGNAL_REQ_WR)
+               init_attr->sq_sig_type = IB_SIGNAL_REQ_WR;
+       else
+               init_attr->sq_sig_type = IB_SIGNAL_ALL_WR;
        init_attr->qp_type = qp->ibqp.qp_type;
        init_attr->port_num = 1;
        return 0;
@@ -644,33 +699,6 @@ __be32 ipath_compute_aeth(struct ipath_qp *qp)
        return cpu_to_be32(aeth);
 }
 
-/**
- * set_verbs_flags - set the verbs layer flags
- * @dd: the infinipath device
- * @flags: the flags to set
- */
-static int set_verbs_flags(struct ipath_devdata *dd, unsigned flags)
-{
-       struct ipath_devdata *ss;
-       unsigned long lflags;
-
-       spin_lock_irqsave(&ipath_devs_lock, lflags);
-
-       list_for_each_entry(ss, &ipath_dev_list, ipath_list) {
-               if (!(ss->ipath_flags & IPATH_INITTED))
-                       continue;
-               if ((flags & IPATH_VERBS_KERNEL_SMA) &&
-                   !(*ss->ipath_statusp & IPATH_STATUS_SMA))
-                       *ss->ipath_statusp |= IPATH_STATUS_OIB_SMA;
-               else
-                       *ss->ipath_statusp &= ~IPATH_STATUS_OIB_SMA;
-       }
-
-       spin_unlock_irqrestore(&ipath_devs_lock, lflags);
-
-       return 0;
-}
-
 /**
  * ipath_create_qp - create a queue pair for a device
  * @ibpd: the protection domain who's device we create the queue pair for
@@ -773,8 +801,10 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
                qp->s_wq = swq;
                qp->s_size = init_attr->cap.max_send_wr + 1;
                qp->s_max_sge = init_attr->cap.max_send_sge;
-               qp->s_flags = init_attr->sq_sig_type == IB_SIGNAL_REQ_WR ?
-                       1 << IPATH_S_SIGNAL_REQ_WR : 0;
+               if (init_attr->sq_sig_type == IB_SIGNAL_REQ_WR)
+                       qp->s_flags = IPATH_S_SIGNAL_REQ_WR;
+               else
+                       qp->s_flags = 0;
                dev = to_idev(ibpd->device);
                err = ipath_alloc_qpn(&dev->qp_table, qp,
                                      init_attr->qp_type);
@@ -784,10 +814,6 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
                }
                qp->ip = NULL;
                ipath_reset_qp(qp);
-
-               /* Tell the core driver that the kernel SMA is present. */
-               if (init_attr->qp_type == IB_QPT_SMI)
-                       set_verbs_flags(dev->dd, IPATH_VERBS_KERNEL_SMA);
                break;
 
        default:
@@ -834,9 +860,21 @@ struct ib_qp *ipath_create_qp(struct ib_pd *ibpd,
                }
        }
 
+       spin_lock(&dev->n_qps_lock);
+       if (dev->n_qps_allocated == ib_ipath_max_qps) {
+               spin_unlock(&dev->n_qps_lock);
+               ret = ERR_PTR(-ENOMEM);
+               goto bail_ip;
+       }
+
+       dev->n_qps_allocated++;
+       spin_unlock(&dev->n_qps_lock);
+
        ret = &qp->ibqp;
        goto bail;
 
+bail_ip:
+       kfree(qp->ip);
 bail_rwq:
        vfree(qp->r_rq.wq);
 bail_qp:
@@ -862,13 +900,12 @@ int ipath_destroy_qp(struct ib_qp *ibqp)
        struct ipath_ibdev *dev = to_idev(ibqp->device);
        unsigned long flags;
 
-       /* Tell the core driver that the kernel SMA is gone. */
-       if (qp->ibqp.qp_type == IB_QPT_SMI)
-               set_verbs_flags(dev->dd, 0);
-
        spin_lock_irqsave(&qp->s_lock, flags);
        qp->state = IB_QPS_ERR;
        spin_unlock_irqrestore(&qp->s_lock, flags);
+       spin_lock(&dev->n_qps_lock);
+       dev->n_qps_allocated--;
+       spin_unlock(&dev->n_qps_lock);
 
        /* Stop the sending tasklet. */
        tasklet_kill(&qp->s_task);
@@ -937,7 +974,7 @@ bail:
  * @wc: the WC responsible for putting the QP in this state
  *
  * Flushes the send work queue.
- * The QP s_lock should be held.
+ * The QP s_lock should be held and interrupts disabled.
  */
 
 void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
@@ -945,8 +982,8 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
        struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
        struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
 
-       _VERBS_INFO("Send queue error on QP%d/%d: err: %d\n",
-                   qp->ibqp.qp_num, qp->remote_qpn, wc->status);
+       ipath_dbg("Send queue error on QP%d/%d: err: %d\n",
+                 qp->ibqp.qp_num, qp->remote_qpn, wc->status);
 
        spin_lock(&dev->pending_lock);
        /* XXX What if its already removed by the timeout code? */
@@ -963,12 +1000,12 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
        wc->status = IB_WC_WR_FLUSH_ERR;
 
        while (qp->s_last != qp->s_head) {
+               wqe = get_swqe_ptr(qp, qp->s_last);
                wc->wr_id = wqe->wr.wr_id;
                wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
                ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
                if (++qp->s_last >= qp->s_size)
                        qp->s_last = 0;
-               wqe = get_swqe_ptr(qp, qp->s_last);
        }
        qp->s_cur = qp->s_tail = qp->s_head;
        qp->state = IB_QPS_SQE;