]> err.no Git - linux-2.6/blobdiff - drivers/infiniband/hw/ipath/ipath_qp.c
IB/ipath: Fix CQ flushing when QP is modified to error state
[linux-2.6] / drivers / infiniband / hw / ipath / ipath_qp.c
index c122fea9145b5cc88acc15923b628d8b2052ed56..f671fd07325302977e83d08040bb31d08877ec30 100644 (file)
@@ -274,7 +274,7 @@ void ipath_free_all_qps(struct ipath_qp_table *qpt)
                                free_qpn(qpt, qp->ibqp.qp_num);
                        if (!atomic_dec_and_test(&qp->refcount) ||
                            !ipath_destroy_qp(&qp->ibqp))
-                               ipath_dbg(KERN_INFO "QP memory leak!\n");
+                               ipath_dbg("QP memory leak!\n");
                        qp = nqp;
                }
        }
@@ -361,7 +361,7 @@ static void ipath_reset_qp(struct ipath_qp *qp)
  * @err: the receive completion error to signal if a RWQE is active
  *
  * Flushes both send and receive work queues.
- * QP s_lock should be held and interrupts disabled.
+ * The QP s_lock should be held and interrupts disabled.
  */
 
 void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
@@ -369,7 +369,7 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
        struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
        struct ib_wc wc;
 
-       ipath_dbg(KERN_INFO "QP%d/%d in error state\n",
+       ipath_dbg("QP%d/%d in error state\n",
                  qp->ibqp.qp_num, qp->remote_qpn);
 
        spin_lock(&dev->pending_lock);
@@ -393,6 +393,8 @@ void ipath_error_qp(struct ipath_qp *qp, enum ib_wc_status err)
        wc.port_num = 0;
        if (qp->r_wrid_valid) {
                qp->r_wrid_valid = 0;
+               wc.wr_id = qp->r_wr_id;
+               wc.opcode = IB_WC_RECV;
                wc.status = err;
                ipath_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 1);
        }
@@ -517,7 +519,7 @@ int ipath_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                break;
 
        case IB_QPS_ERR:
-               ipath_error_qp(qp, IB_WC_GENERAL_ERR);
+               ipath_error_qp(qp, IB_WC_WR_FLUSH_ERR);
                break;
 
        default:
@@ -972,7 +974,7 @@ bail:
  * @wc: the WC responsible for putting the QP in this state
  *
  * Flushes the send work queue.
- * The QP s_lock should be held.
+ * The QP s_lock should be held and interrupts disabled.
  */
 
 void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
@@ -980,7 +982,7 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
        struct ipath_ibdev *dev = to_idev(qp->ibqp.device);
        struct ipath_swqe *wqe = get_swqe_ptr(qp, qp->s_last);
 
-       ipath_dbg(KERN_INFO "Send queue error on QP%d/%d: err: %d\n",
+       ipath_dbg("Send queue error on QP%d/%d: err: %d\n",
                  qp->ibqp.qp_num, qp->remote_qpn, wc->status);
 
        spin_lock(&dev->pending_lock);
@@ -998,12 +1000,12 @@ void ipath_sqerror_qp(struct ipath_qp *qp, struct ib_wc *wc)
        wc->status = IB_WC_WR_FLUSH_ERR;
 
        while (qp->s_last != qp->s_head) {
+               wqe = get_swqe_ptr(qp, qp->s_last);
                wc->wr_id = wqe->wr.wr_id;
                wc->opcode = ib_ipath_wc_opcode[wqe->wr.opcode];
                ipath_cq_enter(to_icq(qp->ibqp.send_cq), wc, 1);
                if (++qp->s_last >= qp->s_size)
                        qp->s_last = 0;
-               wqe = get_swqe_ptr(qp, qp->s_last);
        }
        qp->s_cur = qp->s_tail = qp->s_head;
        qp->state = IB_QPS_SQE;