]> err.no Git - linux-2.6/blobdiff - net/sunrpc/xprtrdma/svc_rdma_transport.c
svcrdma: Fix error handling during listening endpoint creation
[linux-2.6] / net / sunrpc / xprtrdma / svc_rdma_transport.c
index af408fc1263403beb37989bff89006fc7d42a669..d9ed5f24c3626e5d3442c5657a763c62a670fca3 100644 (file)
@@ -228,23 +228,8 @@ static void dto_tasklet_func(unsigned long data)
                list_del_init(&xprt->sc_dto_q);
                spin_unlock_irqrestore(&dto_lock, flags);
 
-               if (test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags)) {
-                       ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
-                       rq_cq_reap(xprt);
-                       set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
-                       /*
-                        * If data arrived before established event,
-                        * don't enqueue. This defers RPC I/O until the
-                        * RDMA connection is complete.
-                        */
-                       if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
-                               svc_xprt_enqueue(&xprt->sc_xprt);
-               }
-
-               if (test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags)) {
-                       ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
-                       sq_cq_reap(xprt);
-               }
+               rq_cq_reap(xprt);
+               sq_cq_reap(xprt);
 
                svc_xprt_put(&xprt->sc_xprt);
                spin_lock_irqsave(&dto_lock, flags);
@@ -297,6 +282,10 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
        struct ib_wc wc;
        struct svc_rdma_op_ctxt *ctxt = NULL;
 
+       if (!test_and_clear_bit(RDMAXPRT_RQ_PENDING, &xprt->sc_flags))
+               return;
+
+       ib_req_notify_cq(xprt->sc_rq_cq, IB_CQ_NEXT_COMP);
        atomic_inc(&rdma_stat_rq_poll);
 
        spin_lock_bh(&xprt->sc_rq_dto_lock);
@@ -316,6 +305,15 @@ static void rq_cq_reap(struct svcxprt_rdma *xprt)
 
        if (ctxt)
                atomic_inc(&rdma_stat_rq_prod);
+
+       set_bit(XPT_DATA, &xprt->sc_xprt.xpt_flags);
+       /*
+        * If data arrived before established event,
+        * don't enqueue. This defers RPC I/O until the
+        * RDMA connection is complete.
+        */
+       if (!test_bit(RDMAXPRT_CONN_PENDING, &xprt->sc_flags))
+               svc_xprt_enqueue(&xprt->sc_xprt);
 }
 
 /*
@@ -328,6 +326,11 @@ static void sq_cq_reap(struct svcxprt_rdma *xprt)
        struct ib_cq *cq = xprt->sc_sq_cq;
        int ret;
 
+
+       if (!test_and_clear_bit(RDMAXPRT_SQ_PENDING, &xprt->sc_flags))
+               return;
+
+       ib_req_notify_cq(xprt->sc_sq_cq, IB_CQ_NEXT_COMP);
        atomic_inc(&rdma_stat_sq_poll);
        while ((ret = ib_poll_cq(cq, 1, &wc)) > 0) {
                ctxt = (struct svc_rdma_op_ctxt *)(unsigned long)wc.wr_id;
@@ -521,6 +524,8 @@ int svc_rdma_post_recv(struct svcxprt_rdma *xprt)
        recv_wr.wr_id = (u64)(unsigned long)ctxt;
 
        ret = ib_post_recv(xprt->sc_qp, &recv_wr, &bad_recv_wr);
+       if (ret)
+               svc_rdma_put_context(ctxt, 1);
        return ret;
 }
 
@@ -627,6 +632,7 @@ static int rdma_cma_handler(struct rdma_cm_id *cma_id,
                if (xprt) {
                        set_bit(XPT_CLOSE, &xprt->xpt_flags);
                        svc_xprt_enqueue(xprt);
+                       svc_xprt_put(xprt);
                }
                break;
        case RDMA_CM_EVENT_DEVICE_REMOVAL:
@@ -661,31 +667,27 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
 
        cma_xprt = rdma_create_xprt(serv, 1);
        if (!cma_xprt)
-               return ERR_PTR(ENOMEM);
+               return ERR_PTR(-ENOMEM);
        xprt = &cma_xprt->sc_xprt;
 
        listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP);
        if (IS_ERR(listen_id)) {
-               svc_xprt_put(&cma_xprt->sc_xprt);
-               dprintk("svcrdma: rdma_create_id failed = %ld\n",
-                       PTR_ERR(listen_id));
-               return (void *)listen_id;
+               ret = PTR_ERR(listen_id);
+               dprintk("svcrdma: rdma_create_id failed = %d\n", ret);
+               goto err0;
        }
+
        ret = rdma_bind_addr(listen_id, sa);
        if (ret) {
-               rdma_destroy_id(listen_id);
-               svc_xprt_put(&cma_xprt->sc_xprt);
                dprintk("svcrdma: rdma_bind_addr failed = %d\n", ret);
-               return ERR_PTR(ret);
+               goto err1;
        }
        cma_xprt->sc_cm_id = listen_id;
 
        ret = rdma_listen(listen_id, RPCRDMA_LISTEN_BACKLOG);
        if (ret) {
-               rdma_destroy_id(listen_id);
-               svc_xprt_put(&cma_xprt->sc_xprt);
                dprintk("svcrdma: rdma_listen failed = %d\n", ret);
-               return ERR_PTR(ret);
+               goto err1;
        }
 
        /*
@@ -696,6 +698,12 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv,
        svc_xprt_set_local(&cma_xprt->sc_xprt, sa, salen);
 
        return &cma_xprt->sc_xprt;
+
+ err1:
+       rdma_destroy_id(listen_id);
+ err0:
+       kfree(cma_xprt);
+       return ERR_PTR(ret);
 }
 
 /*
@@ -910,27 +918,8 @@ static struct svc_xprt *svc_rdma_accept(struct svc_xprt *xprt)
        return NULL;
 }
 
-/*
- * Post an RQ WQE to the RQ when the rqst is being released. This
- * effectively returns an RQ credit to the client. The rq_xprt_ctxt
- * will be null if the request is deferred due to an RDMA_READ or the
- * transport had no data ready (EAGAIN). Note that an RPC deferred in
- * svc_process will still return the credit, this is because the data
- * is copied and no longer consume a WQE/WC.
- */
 static void svc_rdma_release_rqst(struct svc_rqst *rqstp)
 {
-       int err;
-       struct svcxprt_rdma *rdma =
-               container_of(rqstp->rq_xprt, struct svcxprt_rdma, sc_xprt);
-       if (rqstp->rq_xprt_ctxt) {
-               BUG_ON(rqstp->rq_xprt_ctxt != rdma);
-               err = svc_rdma_post_recv(rdma);
-               if (err)
-                       dprintk("svcrdma: failed to post an RQ WQE error=%d\n",
-                               err);
-       }
-       rqstp->rq_xprt_ctxt = NULL;
 }
 
 /*
@@ -1018,7 +1007,7 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
        int ret;
 
        if (test_bit(XPT_CLOSE, &xprt->sc_xprt.xpt_flags))
-               return 0;
+               return -ENOTCONN;
 
        BUG_ON(wr->send_flags != IB_SEND_SIGNALED);
        BUG_ON(((struct svc_rdma_op_ctxt *)(unsigned long)wr->wr_id)->wr_op !=
@@ -1029,7 +1018,8 @@ int svc_rdma_send(struct svcxprt_rdma *xprt, struct ib_send_wr *wr)
                if (xprt->sc_sq_depth == atomic_read(&xprt->sc_sq_count)) {
                        spin_unlock_bh(&xprt->sc_lock);
                        atomic_inc(&rdma_stat_sq_starve);
-                       /* See if we can reap some SQ WR */
+
+                       /* See if we can opportunistically reap SQ WR to make room */
                        sq_cq_reap(xprt);
 
                        /* Wait until SQ WR available if SQ still full */