]> err.no Git - linux-2.6/blobdiff - net/sunrpc/xprtrdma/svc_rdma_transport.c
Merge branches 'at91', 'dyntick', 'ep93xx', 'iop', 'ixp', 'misc', 'orion', 'omap...
[linux-2.6] / net / sunrpc / xprtrdma / svc_rdma_transport.c
index 31b1927b5ee61a96078c4ec8ba3409de36b14468..e132509d1db06285e6af8695c0e5171db0d9f766 100644 (file)
@@ -162,10 +162,10 @@ void svc_rdma_put_context(struct svc_rdma_op_ctxt *ctxt, int free_pages)
                        put_page(ctxt->pages[i]);
 
        for (i = 0; i < ctxt->count; i++)
-               dma_unmap_single(xprt->sc_cm_id->device->dma_device,
-                                ctxt->sge[i].addr,
-                                ctxt->sge[i].length,
-                                ctxt->direction);
+               ib_dma_unmap_single(xprt->sc_cm_id->device,
+                                   ctxt->sge[i].addr,
+                                   ctxt->sge[i].length,
+                                   ctxt->direction);
 
        spin_lock_bh(&xprt->sc_ctxt_lock);
        list_add(&ctxt->free_list, &xprt->sc_ctxt_free);
@@ -252,11 +252,15 @@ static void rq_comp_handler(struct ib_cq *cq, void *cq_context)
        struct svcxprt_rdma *xprt = cq_context;
        unsigned long flags;
 
+       /* Guard against unconditional flush call for destroyed QP */
+       if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
+               return;
+
        /*
         * Set the bit regardless of whether or not it's on the list
         * because it may be on the list already due to an SQ
         * completion.
-       */
+        */
        set_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags);
 
        /*
@@ -393,11 +397,15 @@ static void sq_comp_handler(struct ib_cq *cq, void *cq_context)
        struct svcxprt_rdma *xprt = cq_context;
        unsigned long flags;
 
+       /* Guard against unconditional flush call for destroyed QP */
+       if (atomic_read(&xprt->sc_xprt.xpt_ref.refcount)==0)
+               return;
+
        /*
         * Set the bit regardless of whether or not it's on the list
         * because it may be on the list already due to an RQ
         * completion.
-       */
+        */
        set_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags);
 
        /*
@@ -562,6 +570,7 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id)
 {
        struct svcxprt_rdma *listen_xprt = new_cma_id->context;
        struct svcxprt_rdma *newxprt;
+       struct sockaddr *sa;
 
        /* Create a new transport */
        newxprt = rdma_create_xprt(listen_xprt->sc_xprt.xpt_server, 0);
@@ -574,6 +583,12 @@ static void handle_connect_req(struct rdma_cm_id *new_cma_id)
        dprintk("svcrdma: Creating newxprt=%p, cm_id=%p, listenxprt=%p\n",
                newxprt, newxprt->sc_cm_id, listen_xprt);
 
+       /* Set the local and remote addresses in the transport */
+       sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
+       svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
+       sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
+       svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
+
        /*
         * Enqueue the new transport on the accept queue of the listening
         * transport
@@ -742,7 +757,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        struct rdma_conn_param conn_param;
        struct ib_qp_init_attr qp_attr;
        struct ib_device_attr devattr;
-       struct sockaddr *sa;
        int ret;
        int i;
 
@@ -852,7 +866,6 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
                newxprt->sc_sq_depth = qp_attr.cap.max_send_wr;
                newxprt->sc_max_requests = qp_attr.cap.max_recv_wr;
        }
-       svc_xprt_get(&newxprt->sc_xprt);
        newxprt->sc_qp = newxprt->sc_cm_id->qp;
 
        /* Register all of physical memory */
@@ -876,6 +889,13 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        /* Swap out the handler */
        newxprt->sc_cm_id->event_handler = rdma_cma_handler;
 
+       /*
+        * Arm the CQs for the SQ and RQ before accepting so we can't
+        * miss the first message
+        */
+       ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
+       ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
+
        /* Accept Connection */
        set_bit(RDMAXPRT_CONN_PENDING, &newxprt->sc_flags);
        memset(&conn_param, 0, sizeof conn_param);
@@ -912,24 +932,14 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
                newxprt->sc_max_requests,
                newxprt->sc_ord);
 
-       /* Set the local and remote addresses in the transport */
-       sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr;
-       svc_xprt_set_remote(&newxprt->sc_xprt, sa, svc_addr_len(sa));
-       sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr;
-       svc_xprt_set_local(&newxprt->sc_xprt, sa, svc_addr_len(sa));
-
-       ib_req_notify_cq(newxprt->sc_sq_cq, IB_CQ_NEXT_COMP);
-       ib_req_notify_cq(newxprt->sc_rq_cq, IB_CQ_NEXT_COMP);
        return &newxprt->sc_xprt;
 
  errout:
        dprintk("svcrdma: failure accepting new connection rc=%d.\n", ret);
        /* Take a reference in case the DTO handler runs */
        svc_xprt_get(&newxprt->sc_xprt);
-       if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp)) {
+       if (newxprt->sc_qp && !IS_ERR(newxprt->sc_qp))
                ib_destroy_qp(newxprt->sc_qp);
-               svc_xprt_put(&newxprt->sc_xprt);
-       }
        rdma_destroy_id(newxprt->sc_cm_id);
        /* This call to put will destroy the transport */
        svc_xprt_put(&newxprt->sc_xprt);
@@ -941,10 +951,7 @@ static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
 }
 
 /*
- * When connected, an svc_xprt has at least three references:
- *
- * - A reference held by the QP. We still hold that here because this
- *   code deletes the QP and puts the reference.
+ * When connected, an svc_xprt has at least two references:
  *
  * - A reference held by the cm_id between the ESTABLISHED and
  *   DISCONNECTED events. If the remote peer disconnected first, this
@@ -953,7 +960,7 @@ static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
  * - A reference held by the svc_recv code that called this function
  *   as part of close processing.
  *
- * At a minimum two references should still be held.
+ * At a minimum one references should still be held.
  */
 static void svc_rdma_detach(struct svc_xprt *xprt)
 {
@@ -963,15 +970,6 @@ static void svc_rdma_detach(struct svc_xprt *xprt)
 
        /* Disconnect and flush posted WQE */
        rdma_disconnect(rdma->sc_cm_id);
-
-       /* Destroy the QP if present (not a listener) */
-       if (rdma->sc_qp && !IS_ERR(rdma->sc_qp)) {
-               ib_destroy_qp(rdma->sc_qp);
-               svc_xprt_put(xprt);
-       }
-
-       /* Destroy the CM ID */
-       rdma_destroy_id(rdma->sc_cm_id);
 }
 
 static void __svc_rdma_free(struct work_struct *work)
@@ -983,6 +981,42 @@ static void __svc_rdma_free(struct work_struct *work)
        /* We should only be called from kref_put */
        BUG_ON(atomic_read(&rdma->sc_xprt.xpt_ref.refcount) != 0);
 
+       /*
+        * Destroy queued, but not processed read completions. Note
+        * that this cleanup has to be done before destroying the
+        * cm_id because the device ptr is needed to unmap the dma in
+        * svc_rdma_put_context.
+        */
+       spin_lock_bh(&rdma->sc_read_complete_lock);
+       while (!list_empty(&rdma->sc_read_complete_q)) {
+               struct svc_rdma_op_ctxt *ctxt;
+               ctxt = list_entry(rdma->sc_read_complete_q.next,
+                                 struct svc_rdma_op_ctxt,
+                                 dto_q);
+               list_del_init(&ctxt->dto_q);
+               svc_rdma_put_context(ctxt, 1);
+       }
+       spin_unlock_bh(&rdma->sc_read_complete_lock);
+
+       /* Destroy queued, but not processed recv completions */
+       spin_lock_bh(&rdma->sc_rq_dto_lock);
+       while (!list_empty(&rdma->sc_rq_dto_q)) {
+               struct svc_rdma_op_ctxt *ctxt;
+               ctxt = list_entry(rdma->sc_rq_dto_q.next,
+                                 struct svc_rdma_op_ctxt,
+                                 dto_q);
+               list_del_init(&ctxt->dto_q);
+               svc_rdma_put_context(ctxt, 1);
+       }
+       spin_unlock_bh(&rdma->sc_rq_dto_lock);
+
+       /* Warn if we leaked a resource or under-referenced */
+       WARN_ON(atomic_read(&rdma->sc_ctxt_used) != 0);
+
+       /* Destroy the QP if present (not a listener) */
+       if (rdma->sc_qp && !IS_ERR(rdma->sc_qp))
+               ib_destroy_qp(rdma->sc_qp);
+
        if (rdma->sc_sq_cq && !IS_ERR(rdma->sc_sq_cq))
                ib_destroy_cq(rdma->sc_sq_cq);
 
@@ -995,6 +1029,9 @@ static void __svc_rdma_free(struct work_struct *work)
        if (rdma->sc_pd && !IS_ERR(rdma->sc_pd))
                ib_dealloc_pd(rdma->sc_pd);
 
+       /* Destroy the CM ID */
+       rdma_destroy_id(rdma->sc_cm_id);
+
        destroy_context_cache(rdma);
        kfree(rdma);
 }
@@ -1077,8 +1114,8 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
        return ret;
 }
 
-int svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
-                       enum rpcrdma_errcode err)
+void svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
+                        enum rpcrdma_errcode err)
 {
        struct ib_send_wr err_wr;
        struct ib_sge sge;
@@ -1116,9 +1153,8 @@ int svc_rdma_send_error(struct svcxprt_rdma *xprt, struct rpcrdma_msg *rmsgp,
        /* Post It */
        ret = svc_rdma_send(xprt, &err_wr);
        if (ret) {
-               dprintk("svcrdma: Error posting send = %d\n", ret);
+               dprintk("svcrdma: Error %d posting send for protocol error\n",
+                       ret);
                svc_rdma_put_context(ctxt, 1);
        }
-
-       return ret;
 }