2 * Copyright (c) 2005-2006 Network Appliance, Inc. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the BSD-type
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
14 * Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer.
17 * Redistributions in binary form must reproduce the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer in the documentation and/or other materials provided
20 * with the distribution.
22 * Neither the name of the Network Appliance, Inc. nor the names of
23 * its contributors may be used to endorse or promote products
24 * derived from this software without specific prior written
27 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
28 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
29 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
30 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
31 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
32 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
33 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
34 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
35 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
36 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
37 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
39 * Author: Tom Tucker <tom@opengridcomputing.com>
44 #include <linux/sunrpc/xdr.h>
45 #include <linux/sunrpc/svcsock.h>
46 #include <linux/sunrpc/rpc_rdma.h>
47 #include <rdma/ib_verbs.h>
48 #include <rdma/rdma_cm.h>
51 /* RPC/RDMA parameters and stats */
52 extern unsigned int svcrdma_ord;
53 extern unsigned int svcrdma_max_requests;
54 extern unsigned int svcrdma_max_req_size;
56 extern atomic_t rdma_stat_recv;
57 extern atomic_t rdma_stat_read;
58 extern atomic_t rdma_stat_write;
59 extern atomic_t rdma_stat_sq_starve;
60 extern atomic_t rdma_stat_rq_starve;
61 extern atomic_t rdma_stat_rq_poll;
62 extern atomic_t rdma_stat_rq_prod;
63 extern atomic_t rdma_stat_sq_poll;
64 extern atomic_t rdma_stat_sq_prod;
66 #define RPCRDMA_VERSION 1
69 * Contexts are built when an RDMA request is created and are a
70 * record of the resources that can be recovered when the request
73 struct svc_rdma_op_ctxt {
74 struct svc_rdma_op_ctxt *read_hdr;
76 struct list_head free_list;
78 struct list_head dto_q;
79 enum ib_wr_opcode wr_op;
80 enum ib_wc_status wc_status;
82 struct svcxprt_rdma *xprt;
84 enum dma_data_direction direction;
86 struct ib_sge sge[RPCSVC_MAXPAGES];
87 struct page *pages[RPCSVC_MAXPAGES];
91 * NFS_ requests are mapped on the client side by the chunk lists in
92 * the RPCRDMA header. During the fetching of the RPC from the client
93 * and the writing of the reply to the client, the memory in the
94 * client and the memory in the server must be mapped as contiguous
95 * vaddr/len for access by the hardware. These data strucures keep
98 * For an RDMA_WRITE, the 'sge' maps the RPC REPLY. For RDMA_READ, the
99 * 'sge' in the svc_rdma_req_map maps the server side RPC reply and the
100 * 'ch' field maps the read-list of the RPCRDMA header to the 'sge'
101 * mapping of the reply.
103 struct svc_rdma_chunk_sge {
104 int start; /* sge no for this chunk */
105 int count; /* sge count for this chunk */
107 struct svc_rdma_req_map {
110 struct kvec sge[RPCSVC_MAXPAGES];
111 struct svc_rdma_chunk_sge ch[RPCSVC_MAXPAGES];
115 #define RDMACTXT_F_LAST_CTXT 2
117 struct svcxprt_rdma {
118 struct svc_xprt sc_xprt; /* SVC transport structure */
119 struct rdma_cm_id *sc_cm_id; /* RDMA connection id */
120 struct list_head sc_accept_q; /* Conn. waiting accept */
121 int sc_ord; /* RDMA read limit */
122 wait_queue_head_t sc_read_wait;
125 int sc_sq_depth; /* Depth of SQ */
126 atomic_t sc_sq_count; /* Number of SQ WR on queue */
128 int sc_max_requests; /* Depth of RQ */
129 int sc_max_req_size; /* Size of each RQ WR buf */
133 atomic_t sc_ctxt_used;
134 struct list_head sc_ctxt_free;
138 spinlock_t sc_ctxt_lock;
139 struct list_head sc_rq_dto_q;
140 spinlock_t sc_rq_dto_lock;
142 struct ib_cq *sc_rq_cq;
143 struct ib_cq *sc_sq_cq;
144 struct ib_mr *sc_phys_mr; /* MR for server memory */
146 spinlock_t sc_lock; /* transport lock */
148 wait_queue_head_t sc_send_wait; /* SQ exhaustion waitlist */
149 unsigned long sc_flags;
150 struct list_head sc_dto_q; /* DTO tasklet I/O pending Q */
151 struct list_head sc_read_complete_q;
152 spinlock_t sc_read_complete_lock;
153 struct work_struct sc_work;
156 #define RDMAXPRT_RQ_PENDING 1
157 #define RDMAXPRT_SQ_PENDING 2
158 #define RDMAXPRT_CONN_PENDING 3
160 #define RPCRDMA_LISTEN_BACKLOG 10
161 /* The default ORD value is based on two outstanding full-size writes with a
162 * page size of 4k, or 32k * 2 ops / 4k = 16 outstanding RDMA_READ. */
163 #define RPCRDMA_ORD (64/4)
164 #define RPCRDMA_SQ_DEPTH_MULT 8
165 #define RPCRDMA_MAX_THREADS 16
166 #define RPCRDMA_MAX_REQUESTS 16
167 #define RPCRDMA_MAX_REQ_SIZE 4096
169 /* svc_rdma_marshal.c */
170 extern void svc_rdma_rcl_chunk_counts(struct rpcrdma_read_chunk *,
172 extern int svc_rdma_xdr_decode_req(struct rpcrdma_msg **, struct svc_rqst *);
173 extern int svc_rdma_xdr_decode_deferred_req(struct svc_rqst *);
174 extern int svc_rdma_xdr_encode_error(struct svcxprt_rdma *,
175 struct rpcrdma_msg *,
176 enum rpcrdma_errcode, u32 *);
177 extern void svc_rdma_xdr_encode_write_list(struct rpcrdma_msg *, int);
178 extern void svc_rdma_xdr_encode_reply_array(struct rpcrdma_write_array *, int);
179 extern void svc_rdma_xdr_encode_array_chunk(struct rpcrdma_write_array *, int,
181 extern void svc_rdma_xdr_encode_reply_header(struct svcxprt_rdma *,
182 struct rpcrdma_msg *,
183 struct rpcrdma_msg *,
185 extern int svc_rdma_xdr_get_reply_hdr_len(struct rpcrdma_msg *);
187 /* svc_rdma_recvfrom.c */
188 extern int svc_rdma_recvfrom(struct svc_rqst *);
190 /* svc_rdma_sendto.c */
191 extern int svc_rdma_sendto(struct svc_rqst *);
193 /* svc_rdma_transport.c */
194 extern int svc_rdma_send(struct svcxprt_rdma *, struct ib_send_wr *);
195 extern void svc_rdma_send_error(struct svcxprt_rdma *, struct rpcrdma_msg *,
196 enum rpcrdma_errcode);
197 struct page *svc_rdma_get_page(void);
198 extern int svc_rdma_post_recv(struct svcxprt_rdma *);
199 extern int svc_rdma_create_listen(struct svc_serv *, int, struct sockaddr *);
200 extern struct svc_rdma_op_ctxt *svc_rdma_get_context(struct svcxprt_rdma *);
201 extern void svc_rdma_put_context(struct svc_rdma_op_ctxt *, int);
202 extern struct svc_rdma_req_map *svc_rdma_get_req_map(void);
203 extern void svc_rdma_put_req_map(struct svc_rdma_req_map *);
204 extern void svc_sq_reap(struct svcxprt_rdma *);
205 extern void svc_rq_reap(struct svcxprt_rdma *);
206 extern struct svc_xprt_class svc_rdma_class;
207 extern void svc_rdma_prep_reply_hdr(struct svc_rqst *);
210 extern int svc_rdma_init(void);
211 extern void svc_rdma_cleanup(void);
214 * Returns the address of the first read chunk or <nul> if no read chunk is
217 static inline struct rpcrdma_read_chunk *
218 svc_rdma_get_read_chunk(struct rpcrdma_msg *rmsgp)
220 struct rpcrdma_read_chunk *ch =
221 (struct rpcrdma_read_chunk *)&rmsgp->rm_body.rm_chunks[0];
223 if (ch->rc_discrim == 0)
230 * Returns the address of the first read write array element or <nul> if no
231 * write array list is present
233 static inline struct rpcrdma_write_array *
234 svc_rdma_get_write_array(struct rpcrdma_msg *rmsgp)
236 if (rmsgp->rm_body.rm_chunks[0] != 0
237 || rmsgp->rm_body.rm_chunks[1] == 0)
240 return (struct rpcrdma_write_array *)&rmsgp->rm_body.rm_chunks[1];
244 * Returns the address of the first reply array element or <nul> if no
245 * reply array is present
247 static inline struct rpcrdma_write_array *
248 svc_rdma_get_reply_array(struct rpcrdma_msg *rmsgp)
250 struct rpcrdma_read_chunk *rch;
251 struct rpcrdma_write_array *wr_ary;
252 struct rpcrdma_write_array *rp_ary;
254 /* XXX: Need to fix when reply list may occur with read-list and/or
256 if (rmsgp->rm_body.rm_chunks[0] != 0 ||
257 rmsgp->rm_body.rm_chunks[1] != 0)
260 rch = svc_rdma_get_read_chunk(rmsgp);
262 while (rch->rc_discrim)
265 /* The reply list follows an empty write array located
266 * at 'rc_position' here. The reply array is at rc_target.
268 rp_ary = (struct rpcrdma_write_array *)&rch->rc_target;
273 wr_ary = svc_rdma_get_write_array(rmsgp);
275 rp_ary = (struct rpcrdma_write_array *)
277 wc_array[wr_ary->wc_nchunks].wc_target.rs_length;
282 /* No read list, no write list */
283 rp_ary = (struct rpcrdma_write_array *)
284 &rmsgp->rm_body.rm_chunks[2];
287 if (rp_ary->wc_discrim == 0)