]>
Commit | Line | Data |
---|---|---|
f58851e6 | 1 | /* |
c56c65fb TT |
2 | * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved. |
3 | * | |
4 | * This software is available to you under a choice of one of two | |
5 | * licenses. You may choose to be licensed under the terms of the GNU | |
6 | * General Public License (GPL) Version 2, available from the file | |
7 | * COPYING in the main directory of this source tree, or the BSD-type | |
8 | * license below: | |
9 | * | |
10 | * Redistribution and use in source and binary forms, with or without | |
11 | * modification, are permitted provided that the following conditions | |
12 | * are met: | |
13 | * | |
14 | * Redistributions of source code must retain the above copyright | |
15 | * notice, this list of conditions and the following disclaimer. | |
16 | * | |
17 | * Redistributions in binary form must reproduce the above | |
18 | * copyright notice, this list of conditions and the following | |
19 | * disclaimer in the documentation and/or other materials provided | |
20 | * with the distribution. | |
21 | * | |
22 | * Neither the name of the Network Appliance, Inc. nor the names of | |
23 | * its contributors may be used to endorse or promote products | |
24 | * derived from this software without specific prior written | |
25 | * permission. | |
26 | * | |
27 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
28 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
29 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
30 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
31 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
32 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
33 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
34 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
35 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
36 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
37 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
f58851e6 TT |
38 | */ |
39 | ||
c56c65fb TT |
40 | /* |
41 | * verbs.c | |
42 | * | |
43 | * Encapsulates the major functions managing: | |
44 | * o adapters | |
45 | * o endpoints | |
46 | * o connections | |
47 | * o buffer memory | |
48 | */ | |
49 | ||
a6b7a407 | 50 | #include <linux/interrupt.h> |
5a0e3ad6 | 51 | #include <linux/slab.h> |
eba8ff66 | 52 | #include <linux/prefetch.h> |
0dd39cae | 53 | #include <linux/sunrpc/addr.h> |
05c97466 | 54 | #include <linux/sunrpc/svc_rdma.h> |
65866f82 | 55 | #include <asm/bitops.h> |
d0f36c46 | 56 | #include <linux/module.h> /* try_module_get()/module_put() */ |
0a90487b | 57 | #include <rdma/ib_cm.h> |
c56c65fb | 58 | |
f58851e6 TT |
59 | #include "xprt_rdma.h" |
60 | ||
c56c65fb TT |
61 | /* |
62 | * Globals/Macros | |
63 | */ | |
64 | ||
f895b252 | 65 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
c56c65fb TT |
66 | # define RPCDBG_FACILITY RPCDBG_TRANS |
67 | #endif | |
68 | ||
69 | /* | |
70 | * internal functions | |
71 | */ | |
72 | ||
fe97b47c | 73 | static struct workqueue_struct *rpcrdma_receive_wq; |
c56c65fb | 74 | |
fe97b47c CL |
75 | int |
76 | rpcrdma_alloc_wq(void) | |
c56c65fb | 77 | { |
fe97b47c | 78 | struct workqueue_struct *recv_wq; |
c56c65fb | 79 | |
fe97b47c CL |
80 | recv_wq = alloc_workqueue("xprtrdma_receive", |
81 | WQ_MEM_RECLAIM | WQ_UNBOUND | WQ_HIGHPRI, | |
82 | 0); | |
83 | if (!recv_wq) | |
84 | return -ENOMEM; | |
c56c65fb | 85 | |
fe97b47c CL |
86 | rpcrdma_receive_wq = recv_wq; |
87 | return 0; | |
c56c65fb TT |
88 | } |
89 | ||
fe97b47c CL |
90 | void |
91 | rpcrdma_destroy_wq(void) | |
f1a03b76 | 92 | { |
fe97b47c | 93 | struct workqueue_struct *wq; |
f1a03b76 | 94 | |
fe97b47c CL |
95 | if (rpcrdma_receive_wq) { |
96 | wq = rpcrdma_receive_wq; | |
97 | rpcrdma_receive_wq = NULL; | |
98 | destroy_workqueue(wq); | |
99 | } | |
f1a03b76 CL |
100 | } |
101 | ||
c56c65fb TT |
102 | static void |
103 | rpcrdma_qp_async_error_upcall(struct ib_event *event, void *context) | |
104 | { | |
105 | struct rpcrdma_ep *ep = context; | |
106 | ||
2f6922ca CL |
107 | pr_err("rpcrdma: %s on device %s ep %p\n", |
108 | ib_event_msg(event->event), event->device->name, context); | |
109 | ||
c56c65fb TT |
110 | if (ep->rep_connected == 1) { |
111 | ep->rep_connected = -EIO; | |
afadc468 | 112 | rpcrdma_conn_func(ep); |
c56c65fb TT |
113 | wake_up_all(&ep->rep_connect_wait); |
114 | } | |
115 | } | |
116 | ||
2fa8f88d CL |
117 | /** |
118 | * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC | |
119 | * @cq: completion queue (ignored) | |
120 | * @wc: completed WR | |
121 | * | |
fc664485 CL |
122 | */ |
123 | static void | |
2fa8f88d | 124 | rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc) |
fc664485 | 125 | { |
2fa8f88d CL |
126 | /* WARNING: Only wr_cqe and status are reliable at this point */ |
127 | if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) | |
128 | pr_err("rpcrdma: Send: %s (%u/0x%x)\n", | |
129 | ib_wc_status_msg(wc->status), | |
130 | wc->status, wc->vendor_err); | |
fc664485 | 131 | } |
c56c65fb | 132 | |
23826c7a CL |
133 | /* Perform basic sanity checking to avoid using garbage |
134 | * to update the credit grant value. | |
135 | */ | |
136 | static void | |
137 | rpcrdma_update_granted_credits(struct rpcrdma_rep *rep) | |
138 | { | |
139 | struct rpcrdma_msg *rmsgp = rdmab_to_msg(rep->rr_rdmabuf); | |
140 | struct rpcrdma_buffer *buffer = &rep->rr_rxprt->rx_buf; | |
141 | u32 credits; | |
142 | ||
143 | if (rep->rr_len < RPCRDMA_HDRLEN_ERR) | |
144 | return; | |
145 | ||
146 | credits = be32_to_cpu(rmsgp->rm_credit); | |
147 | if (credits == 0) | |
148 | credits = 1; /* don't deadlock */ | |
149 | else if (credits > buffer->rb_max_requests) | |
150 | credits = buffer->rb_max_requests; | |
151 | ||
152 | atomic_set(&buffer->rb_credits, credits); | |
153 | } | |
154 | ||
552bf225 | 155 | /** |
1519e969 | 156 | * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC |
552bf225 CL |
157 | * @cq: completion queue (ignored) |
158 | * @wc: completed WR | |
159 | * | |
160 | */ | |
fc664485 | 161 | static void |
1519e969 | 162 | rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc) |
fc664485 | 163 | { |
552bf225 CL |
164 | struct ib_cqe *cqe = wc->wr_cqe; |
165 | struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep, | |
166 | rr_cqe); | |
fc664485 | 167 | |
8502427c CL |
168 | /* WARNING: Only wr_id and status are reliable at this point */ |
169 | if (wc->status != IB_WC_SUCCESS) | |
170 | goto out_fail; | |
fc664485 | 171 | |
8502427c | 172 | /* status == SUCCESS means all fields in wc are trustworthy */ |
fc664485 CL |
173 | if (wc->opcode != IB_WC_RECV) |
174 | return; | |
175 | ||
8502427c CL |
176 | dprintk("RPC: %s: rep %p opcode 'recv', length %u: success\n", |
177 | __func__, rep, wc->byte_len); | |
178 | ||
fc664485 | 179 | rep->rr_len = wc->byte_len; |
c8b920bb CL |
180 | rep->rr_wc_flags = wc->wc_flags; |
181 | rep->rr_inv_rkey = wc->ex.invalidate_rkey; | |
182 | ||
89e0d112 | 183 | ib_dma_sync_single_for_cpu(rep->rr_device, |
6b1184cd CL |
184 | rdmab_addr(rep->rr_rdmabuf), |
185 | rep->rr_len, DMA_FROM_DEVICE); | |
23826c7a CL |
186 | |
187 | rpcrdma_update_granted_credits(rep); | |
fc664485 CL |
188 | |
189 | out_schedule: | |
fe97b47c | 190 | queue_work(rpcrdma_receive_wq, &rep->rr_work); |
8502427c | 191 | return; |
fe97b47c | 192 | |
8502427c CL |
193 | out_fail: |
194 | if (wc->status != IB_WC_WR_FLUSH_ERR) | |
552bf225 CL |
195 | pr_err("rpcrdma: Recv: %s (%u/0x%x)\n", |
196 | ib_wc_status_msg(wc->status), | |
197 | wc->status, wc->vendor_err); | |
b0e178a2 | 198 | rep->rr_len = RPCRDMA_BAD_LEN; |
8502427c | 199 | goto out_schedule; |
fc664485 CL |
200 | } |
201 | ||
87cfb9a0 CL |
202 | static void |
203 | rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt, | |
204 | struct rdma_conn_param *param) | |
205 | { | |
206 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; | |
207 | const struct rpcrdma_connect_private *pmsg = param->private_data; | |
208 | unsigned int rsize, wsize; | |
209 | ||
c8b920bb CL |
210 | /* Default settings for RPC-over-RDMA Version One */ |
211 | r_xprt->rx_ia.ri_reminv_expected = false; | |
b5f0afbe | 212 | r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize; |
87cfb9a0 CL |
213 | rsize = RPCRDMA_V1_DEF_INLINE_SIZE; |
214 | wsize = RPCRDMA_V1_DEF_INLINE_SIZE; | |
215 | ||
216 | if (pmsg && | |
217 | pmsg->cp_magic == rpcrdma_cmp_magic && | |
218 | pmsg->cp_version == RPCRDMA_CMP_VERSION) { | |
c8b920bb | 219 | r_xprt->rx_ia.ri_reminv_expected = true; |
c95a3c6b | 220 | r_xprt->rx_ia.ri_implicit_roundup = true; |
87cfb9a0 CL |
221 | rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size); |
222 | wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size); | |
223 | } | |
224 | ||
225 | if (rsize < cdata->inline_rsize) | |
226 | cdata->inline_rsize = rsize; | |
227 | if (wsize < cdata->inline_wsize) | |
228 | cdata->inline_wsize = wsize; | |
6d6bf72d CL |
229 | dprintk("RPC: %s: max send %u, max recv %u\n", |
230 | __func__, cdata->inline_wsize, cdata->inline_rsize); | |
87cfb9a0 CL |
231 | rpcrdma_set_max_header_sizes(r_xprt); |
232 | } | |
233 | ||
c56c65fb TT |
234 | static int |
235 | rpcrdma_conn_upcall(struct rdma_cm_id *id, struct rdma_cm_event *event) | |
236 | { | |
237 | struct rpcrdma_xprt *xprt = id->context; | |
238 | struct rpcrdma_ia *ia = &xprt->rx_ia; | |
239 | struct rpcrdma_ep *ep = &xprt->rx_ep; | |
f895b252 | 240 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
0dd39cae | 241 | struct sockaddr *sap = (struct sockaddr *)&ep->rep_remote_addr; |
ff0db049 | 242 | #endif |
ce1ab9ab CL |
243 | struct ib_qp_attr *attr = &ia->ri_qp_attr; |
244 | struct ib_qp_init_attr *iattr = &ia->ri_qp_init_attr; | |
c56c65fb TT |
245 | int connstate = 0; |
246 | ||
247 | switch (event->event) { | |
248 | case RDMA_CM_EVENT_ADDR_RESOLVED: | |
249 | case RDMA_CM_EVENT_ROUTE_RESOLVED: | |
5675add3 | 250 | ia->ri_async_rc = 0; |
c56c65fb TT |
251 | complete(&ia->ri_done); |
252 | break; | |
253 | case RDMA_CM_EVENT_ADDR_ERROR: | |
254 | ia->ri_async_rc = -EHOSTUNREACH; | |
255 | dprintk("RPC: %s: CM address resolution error, ep 0x%p\n", | |
256 | __func__, ep); | |
257 | complete(&ia->ri_done); | |
258 | break; | |
259 | case RDMA_CM_EVENT_ROUTE_ERROR: | |
260 | ia->ri_async_rc = -ENETUNREACH; | |
261 | dprintk("RPC: %s: CM route resolution error, ep 0x%p\n", | |
262 | __func__, ep); | |
263 | complete(&ia->ri_done); | |
264 | break; | |
265 | case RDMA_CM_EVENT_ESTABLISHED: | |
266 | connstate = 1; | |
ce1ab9ab CL |
267 | ib_query_qp(ia->ri_id->qp, attr, |
268 | IB_QP_MAX_QP_RD_ATOMIC | IB_QP_MAX_DEST_RD_ATOMIC, | |
269 | iattr); | |
c56c65fb TT |
270 | dprintk("RPC: %s: %d responder resources" |
271 | " (%d initiator)\n", | |
ce1ab9ab CL |
272 | __func__, attr->max_dest_rd_atomic, |
273 | attr->max_rd_atomic); | |
87cfb9a0 | 274 | rpcrdma_update_connect_private(xprt, &event->param.conn); |
c56c65fb TT |
275 | goto connected; |
276 | case RDMA_CM_EVENT_CONNECT_ERROR: | |
277 | connstate = -ENOTCONN; | |
278 | goto connected; | |
279 | case RDMA_CM_EVENT_UNREACHABLE: | |
280 | connstate = -ENETDOWN; | |
281 | goto connected; | |
282 | case RDMA_CM_EVENT_REJECTED: | |
0a90487b CL |
283 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
284 | pr_info("rpcrdma: connection to %pIS:%u on %s rejected: %s\n", | |
285 | sap, rpc_get_port(sap), ia->ri_device->name, | |
286 | rdma_reject_msg(id, event->status)); | |
287 | #endif | |
c56c65fb | 288 | connstate = -ECONNREFUSED; |
0a90487b CL |
289 | if (event->status == IB_CM_REJ_STALE_CONN) |
290 | connstate = -EAGAIN; | |
c56c65fb TT |
291 | goto connected; |
292 | case RDMA_CM_EVENT_DISCONNECTED: | |
293 | connstate = -ECONNABORTED; | |
294 | goto connected; | |
295 | case RDMA_CM_EVENT_DEVICE_REMOVAL: | |
296 | connstate = -ENODEV; | |
297 | connected: | |
c56c65fb TT |
298 | dprintk("RPC: %s: %sconnected\n", |
299 | __func__, connstate > 0 ? "" : "dis"); | |
23826c7a | 300 | atomic_set(&xprt->rx_buf.rb_credits, 1); |
c56c65fb | 301 | ep->rep_connected = connstate; |
afadc468 | 302 | rpcrdma_conn_func(ep); |
c56c65fb | 303 | wake_up_all(&ep->rep_connect_wait); |
8079fb78 | 304 | /*FALLTHROUGH*/ |
c56c65fb | 305 | default: |
0dd39cae CL |
306 | dprintk("RPC: %s: %pIS:%u (ep 0x%p): %s\n", |
307 | __func__, sap, rpc_get_port(sap), ep, | |
76357c71 | 308 | rdma_event_msg(event->event)); |
c56c65fb TT |
309 | break; |
310 | } | |
311 | ||
f895b252 | 312 | #if IS_ENABLED(CONFIG_SUNRPC_DEBUG) |
b3cd8d45 | 313 | if (connstate == 1) { |
ce1ab9ab | 314 | int ird = attr->max_dest_rd_atomic; |
b3cd8d45 | 315 | int tird = ep->rep_remote_cma.responder_resources; |
0dd39cae | 316 | |
a0ce85f5 | 317 | pr_info("rpcrdma: connection to %pIS:%u on %s, memreg '%s', %d credits, %d responders%s\n", |
0dd39cae | 318 | sap, rpc_get_port(sap), |
89e0d112 | 319 | ia->ri_device->name, |
a0ce85f5 | 320 | ia->ri_ops->ro_displayname, |
b3cd8d45 TT |
321 | xprt->rx_buf.rb_max_requests, |
322 | ird, ird < 4 && ird < tird / 2 ? " (low!)" : ""); | |
323 | } else if (connstate < 0) { | |
0dd39cae CL |
324 | pr_info("rpcrdma: connection to %pIS:%u closed (%d)\n", |
325 | sap, rpc_get_port(sap), connstate); | |
b3cd8d45 TT |
326 | } |
327 | #endif | |
328 | ||
c56c65fb TT |
329 | return 0; |
330 | } | |
331 | ||
d0f36c46 DS |
332 | static void rpcrdma_destroy_id(struct rdma_cm_id *id) |
333 | { | |
334 | if (id) { | |
335 | module_put(id->device->owner); | |
336 | rdma_destroy_id(id); | |
337 | } | |
338 | } | |
339 | ||
c56c65fb TT |
340 | static struct rdma_cm_id * |
341 | rpcrdma_create_id(struct rpcrdma_xprt *xprt, | |
342 | struct rpcrdma_ia *ia, struct sockaddr *addr) | |
343 | { | |
109b88ab | 344 | unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1; |
c56c65fb TT |
345 | struct rdma_cm_id *id; |
346 | int rc; | |
347 | ||
1a954051 TT |
348 | init_completion(&ia->ri_done); |
349 | ||
fa20105e GS |
350 | id = rdma_create_id(&init_net, rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, |
351 | IB_QPT_RC); | |
c56c65fb TT |
352 | if (IS_ERR(id)) { |
353 | rc = PTR_ERR(id); | |
354 | dprintk("RPC: %s: rdma_create_id() failed %i\n", | |
355 | __func__, rc); | |
356 | return id; | |
357 | } | |
358 | ||
5675add3 | 359 | ia->ri_async_rc = -ETIMEDOUT; |
c56c65fb TT |
360 | rc = rdma_resolve_addr(id, NULL, addr, RDMA_RESOLVE_TIMEOUT); |
361 | if (rc) { | |
362 | dprintk("RPC: %s: rdma_resolve_addr() failed %i\n", | |
363 | __func__, rc); | |
364 | goto out; | |
365 | } | |
109b88ab CL |
366 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
367 | if (rc < 0) { | |
368 | dprintk("RPC: %s: wait() exited: %i\n", | |
369 | __func__, rc); | |
370 | goto out; | |
371 | } | |
d0f36c46 DS |
372 | |
373 | /* FIXME: | |
374 | * Until xprtrdma supports DEVICE_REMOVAL, the provider must | |
375 | * be pinned while there are active NFS/RDMA mounts to prevent | |
376 | * hangs and crashes at umount time. | |
377 | */ | |
378 | if (!ia->ri_async_rc && !try_module_get(id->device->owner)) { | |
379 | dprintk("RPC: %s: Failed to get device module\n", | |
380 | __func__); | |
381 | ia->ri_async_rc = -ENODEV; | |
382 | } | |
c56c65fb TT |
383 | rc = ia->ri_async_rc; |
384 | if (rc) | |
385 | goto out; | |
386 | ||
5675add3 | 387 | ia->ri_async_rc = -ETIMEDOUT; |
c56c65fb TT |
388 | rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT); |
389 | if (rc) { | |
390 | dprintk("RPC: %s: rdma_resolve_route() failed %i\n", | |
391 | __func__, rc); | |
d0f36c46 | 392 | goto put; |
c56c65fb | 393 | } |
109b88ab CL |
394 | rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout); |
395 | if (rc < 0) { | |
396 | dprintk("RPC: %s: wait() exited: %i\n", | |
397 | __func__, rc); | |
398 | goto put; | |
399 | } | |
c56c65fb TT |
400 | rc = ia->ri_async_rc; |
401 | if (rc) | |
d0f36c46 | 402 | goto put; |
c56c65fb TT |
403 | |
404 | return id; | |
d0f36c46 DS |
405 | put: |
406 | module_put(id->device->owner); | |
c56c65fb TT |
407 | out: |
408 | rdma_destroy_id(id); | |
409 | return ERR_PTR(rc); | |
410 | } | |
411 | ||
c56c65fb TT |
412 | /* |
413 | * Exported functions. | |
414 | */ | |
415 | ||
416 | /* | |
417 | * Open and initialize an Interface Adapter. | |
418 | * o initializes fields of struct rpcrdma_ia, including | |
419 | * interface and provider attributes and protection zone. | |
420 | */ | |
421 | int | |
422 | rpcrdma_ia_open(struct rpcrdma_xprt *xprt, struct sockaddr *addr, int memreg) | |
423 | { | |
c56c65fb | 424 | struct rpcrdma_ia *ia = &xprt->rx_ia; |
d1ed857e CL |
425 | int rc; |
426 | ||
c56c65fb TT |
427 | ia->ri_id = rpcrdma_create_id(xprt, ia, addr); |
428 | if (IS_ERR(ia->ri_id)) { | |
429 | rc = PTR_ERR(ia->ri_id); | |
430 | goto out1; | |
431 | } | |
89e0d112 | 432 | ia->ri_device = ia->ri_id->device; |
c56c65fb | 433 | |
ed082d36 | 434 | ia->ri_pd = ib_alloc_pd(ia->ri_device, 0); |
c56c65fb TT |
435 | if (IS_ERR(ia->ri_pd)) { |
436 | rc = PTR_ERR(ia->ri_pd); | |
b54054ca | 437 | pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc); |
c56c65fb TT |
438 | goto out2; |
439 | } | |
440 | ||
bd7ed1d1 | 441 | switch (memreg) { |
3197d309 | 442 | case RPCRDMA_FRMR: |
b54054ca CL |
443 | if (frwr_is_supported(ia)) { |
444 | ia->ri_ops = &rpcrdma_frwr_memreg_ops; | |
445 | break; | |
446 | } | |
447 | /*FALLTHROUGH*/ | |
bd7ed1d1 | 448 | case RPCRDMA_MTHCAFMR: |
b54054ca CL |
449 | if (fmr_is_supported(ia)) { |
450 | ia->ri_ops = &rpcrdma_fmr_memreg_ops; | |
451 | break; | |
452 | } | |
453 | /*FALLTHROUGH*/ | |
bd7ed1d1 | 454 | default: |
b54054ca CL |
455 | pr_err("rpcrdma: Unsupported memory registration mode: %d\n", |
456 | memreg); | |
457 | rc = -EINVAL; | |
5ae711a2 | 458 | goto out3; |
c56c65fb TT |
459 | } |
460 | ||
c56c65fb | 461 | return 0; |
5ae711a2 CL |
462 | |
463 | out3: | |
464 | ib_dealloc_pd(ia->ri_pd); | |
465 | ia->ri_pd = NULL; | |
c56c65fb | 466 | out2: |
d0f36c46 | 467 | rpcrdma_destroy_id(ia->ri_id); |
fee08caf | 468 | ia->ri_id = NULL; |
c56c65fb TT |
469 | out1: |
470 | return rc; | |
471 | } | |
472 | ||
473 | /* | |
474 | * Clean up/close an IA. | |
475 | * o if event handles and PD have been initialized, free them. | |
476 | * o close the IA | |
477 | */ | |
478 | void | |
479 | rpcrdma_ia_close(struct rpcrdma_ia *ia) | |
480 | { | |
c56c65fb | 481 | dprintk("RPC: %s: entering\n", __func__); |
fee08caf TT |
482 | if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) { |
483 | if (ia->ri_id->qp) | |
484 | rdma_destroy_qp(ia->ri_id); | |
d0f36c46 | 485 | rpcrdma_destroy_id(ia->ri_id); |
fee08caf TT |
486 | ia->ri_id = NULL; |
487 | } | |
6d44698d CL |
488 | |
489 | /* If the pd is still busy, xprtrdma missed freeing a resource */ | |
490 | if (ia->ri_pd && !IS_ERR(ia->ri_pd)) | |
7dd78647 | 491 | ib_dealloc_pd(ia->ri_pd); |
c56c65fb TT |
492 | } |
493 | ||
494 | /* | |
495 | * Create unconnected endpoint. | |
496 | */ | |
497 | int | |
498 | rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia, | |
16f906d6 | 499 | struct rpcrdma_create_data_internal *cdata) |
c56c65fb | 500 | { |
87cfb9a0 | 501 | struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private; |
16f906d6 | 502 | unsigned int max_qp_wr, max_sge; |
fc664485 | 503 | struct ib_cq *sendcq, *recvcq; |
2fa8f88d | 504 | int rc; |
c56c65fb | 505 | |
eed50879 CL |
506 | max_sge = min_t(unsigned int, ia->ri_device->attrs.max_sge, |
507 | RPCRDMA_MAX_SEND_SGES); | |
16f906d6 CL |
508 | if (max_sge < RPCRDMA_MIN_SEND_SGES) { |
509 | pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge); | |
b3221d6a CL |
510 | return -ENOMEM; |
511 | } | |
16f906d6 | 512 | ia->ri_max_send_sges = max_sge - RPCRDMA_MIN_SEND_SGES; |
b3221d6a | 513 | |
e3e45b1b | 514 | if (ia->ri_device->attrs.max_qp_wr <= RPCRDMA_BACKWARD_WRS) { |
124fa17d CL |
515 | dprintk("RPC: %s: insufficient wqe's available\n", |
516 | __func__); | |
517 | return -ENOMEM; | |
518 | } | |
550d7502 | 519 | max_qp_wr = ia->ri_device->attrs.max_qp_wr - RPCRDMA_BACKWARD_WRS - 1; |
124fa17d | 520 | |
c56c65fb | 521 | /* check provider's send/recv wr limits */ |
124fa17d CL |
522 | if (cdata->max_requests > max_qp_wr) |
523 | cdata->max_requests = max_qp_wr; | |
c56c65fb TT |
524 | |
525 | ep->rep_attr.event_handler = rpcrdma_qp_async_error_upcall; | |
526 | ep->rep_attr.qp_context = ep; | |
c56c65fb TT |
527 | ep->rep_attr.srq = NULL; |
528 | ep->rep_attr.cap.max_send_wr = cdata->max_requests; | |
124fa17d | 529 | ep->rep_attr.cap.max_send_wr += RPCRDMA_BACKWARD_WRS; |
550d7502 | 530 | ep->rep_attr.cap.max_send_wr += 1; /* drain cqe */ |
3968cb58 CL |
531 | rc = ia->ri_ops->ro_open(ia, ep, cdata); |
532 | if (rc) | |
533 | return rc; | |
c56c65fb | 534 | ep->rep_attr.cap.max_recv_wr = cdata->max_requests; |
124fa17d | 535 | ep->rep_attr.cap.max_recv_wr += RPCRDMA_BACKWARD_WRS; |
550d7502 | 536 | ep->rep_attr.cap.max_recv_wr += 1; /* drain cqe */ |
16f906d6 | 537 | ep->rep_attr.cap.max_send_sge = max_sge; |
c56c65fb TT |
538 | ep->rep_attr.cap.max_recv_sge = 1; |
539 | ep->rep_attr.cap.max_inline_data = 0; | |
540 | ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR; | |
541 | ep->rep_attr.qp_type = IB_QPT_RC; | |
542 | ep->rep_attr.port_num = ~0; | |
543 | ||
544 | dprintk("RPC: %s: requested max: dtos: send %d recv %d; " | |
545 | "iovs: send %d recv %d\n", | |
546 | __func__, | |
547 | ep->rep_attr.cap.max_send_wr, | |
548 | ep->rep_attr.cap.max_recv_wr, | |
549 | ep->rep_attr.cap.max_send_sge, | |
550 | ep->rep_attr.cap.max_recv_sge); | |
551 | ||
552 | /* set trigger for requesting send completion */ | |
fc664485 | 553 | ep->rep_cqinit = ep->rep_attr.cap.max_send_wr/2 - 1; |
26ae9d1c CL |
554 | if (ep->rep_cqinit <= 2) |
555 | ep->rep_cqinit = 0; /* always signal? */ | |
8d38de65 | 556 | rpcrdma_init_cqcount(ep, 0); |
c56c65fb | 557 | init_waitqueue_head(&ep->rep_connect_wait); |
254f91e2 | 558 | INIT_DELAYED_WORK(&ep->rep_connect_worker, rpcrdma_connect_worker); |
c56c65fb | 559 | |
2fa8f88d CL |
560 | sendcq = ib_alloc_cq(ia->ri_device, NULL, |
561 | ep->rep_attr.cap.max_send_wr + 1, | |
562 | 0, IB_POLL_SOFTIRQ); | |
fc664485 CL |
563 | if (IS_ERR(sendcq)) { |
564 | rc = PTR_ERR(sendcq); | |
565 | dprintk("RPC: %s: failed to create send CQ: %i\n", | |
c56c65fb TT |
566 | __func__, rc); |
567 | goto out1; | |
568 | } | |
569 | ||
552bf225 CL |
570 | recvcq = ib_alloc_cq(ia->ri_device, NULL, |
571 | ep->rep_attr.cap.max_recv_wr + 1, | |
572 | 0, IB_POLL_SOFTIRQ); | |
fc664485 CL |
573 | if (IS_ERR(recvcq)) { |
574 | rc = PTR_ERR(recvcq); | |
575 | dprintk("RPC: %s: failed to create recv CQ: %i\n", | |
576 | __func__, rc); | |
577 | goto out2; | |
578 | } | |
579 | ||
fc664485 CL |
580 | ep->rep_attr.send_cq = sendcq; |
581 | ep->rep_attr.recv_cq = recvcq; | |
c56c65fb TT |
582 | |
583 | /* Initialize cma parameters */ | |
b2dde94b | 584 | memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma)); |
c56c65fb | 585 | |
87cfb9a0 CL |
586 | /* Prepare RDMA-CM private message */ |
587 | pmsg->cp_magic = rpcrdma_cmp_magic; | |
588 | pmsg->cp_version = RPCRDMA_CMP_VERSION; | |
c8b920bb | 589 | pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok; |
87cfb9a0 CL |
590 | pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize); |
591 | pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize); | |
592 | ep->rep_remote_cma.private_data = pmsg; | |
593 | ep->rep_remote_cma.private_data_len = sizeof(*pmsg); | |
c56c65fb TT |
594 | |
595 | /* Client offers RDMA Read but does not initiate */ | |
b334eaab | 596 | ep->rep_remote_cma.initiator_depth = 0; |
e3e45b1b | 597 | if (ia->ri_device->attrs.max_qp_rd_atom > 32) /* arbitrary but <= 255 */ |
b334eaab TT |
598 | ep->rep_remote_cma.responder_resources = 32; |
599 | else | |
7bc7972c | 600 | ep->rep_remote_cma.responder_resources = |
e3e45b1b | 601 | ia->ri_device->attrs.max_qp_rd_atom; |
c56c65fb | 602 | |
b2dde94b CL |
603 | /* Limit transport retries so client can detect server |
604 | * GID changes quickly. RPC layer handles re-establishing | |
605 | * transport connection and retransmission. | |
606 | */ | |
607 | ep->rep_remote_cma.retry_count = 6; | |
608 | ||
609 | /* RPC-over-RDMA handles its own flow control. In addition, | |
610 | * make all RNR NAKs visible so we know that RPC-over-RDMA | |
611 | * flow control is working correctly (no NAKs should be seen). | |
612 | */ | |
c56c65fb TT |
613 | ep->rep_remote_cma.flow_control = 0; |
614 | ep->rep_remote_cma.rnr_retry_count = 0; | |
615 | ||
616 | return 0; | |
617 | ||
618 | out2: | |
2fa8f88d | 619 | ib_free_cq(sendcq); |
c56c65fb TT |
620 | out1: |
621 | return rc; | |
622 | } | |
623 | ||
624 | /* | |
625 | * rpcrdma_ep_destroy | |
626 | * | |
627 | * Disconnect and destroy endpoint. After this, the only | |
628 | * valid operations on the ep are to free it (if dynamically | |
629 | * allocated) or re-create it. | |
c56c65fb | 630 | */ |
7f1d5419 | 631 | void |
c56c65fb TT |
632 | rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
633 | { | |
c56c65fb TT |
634 | dprintk("RPC: %s: entering, connected is %d\n", |
635 | __func__, ep->rep_connected); | |
636 | ||
254f91e2 CL |
637 | cancel_delayed_work_sync(&ep->rep_connect_worker); |
638 | ||
72c02173 | 639 | if (ia->ri_id->qp) { |
550d7502 | 640 | rpcrdma_ep_disconnect(ep, ia); |
fee08caf TT |
641 | rdma_destroy_qp(ia->ri_id); |
642 | ia->ri_id->qp = NULL; | |
c56c65fb TT |
643 | } |
644 | ||
552bf225 | 645 | ib_free_cq(ep->rep_attr.recv_cq); |
2fa8f88d | 646 | ib_free_cq(ep->rep_attr.send_cq); |
c56c65fb TT |
647 | } |
648 | ||
649 | /* | |
650 | * Connect unconnected endpoint. | |
651 | */ | |
652 | int | |
653 | rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) | |
654 | { | |
0a90487b CL |
655 | struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt, |
656 | rx_ia); | |
73806c88 | 657 | struct rdma_cm_id *id, *old; |
0a90487b CL |
658 | struct sockaddr *sap; |
659 | unsigned int extras; | |
c56c65fb | 660 | int rc = 0; |
c56c65fb | 661 | |
c055551e | 662 | if (ep->rep_connected != 0) { |
c56c65fb | 663 | retry: |
ec62f40d | 664 | dprintk("RPC: %s: reconnecting...\n", __func__); |
282191cb CL |
665 | |
666 | rpcrdma_ep_disconnect(ep, ia); | |
c56c65fb | 667 | |
0a90487b CL |
668 | sap = (struct sockaddr *)&r_xprt->rx_data.addr; |
669 | id = rpcrdma_create_id(r_xprt, ia, sap); | |
c56c65fb | 670 | if (IS_ERR(id)) { |
ec62f40d | 671 | rc = -EHOSTUNREACH; |
c56c65fb TT |
672 | goto out; |
673 | } | |
674 | /* TEMP TEMP TEMP - fail if new device: | |
675 | * Deregister/remarshal *all* requests! | |
676 | * Close and recreate adapter, pd, etc! | |
677 | * Re-determine all attributes still sane! | |
678 | * More stuff I haven't thought of! | |
679 | * Rrrgh! | |
680 | */ | |
89e0d112 | 681 | if (ia->ri_device != id->device) { |
c56c65fb TT |
682 | printk("RPC: %s: can't reconnect on " |
683 | "different device!\n", __func__); | |
d0f36c46 | 684 | rpcrdma_destroy_id(id); |
ec62f40d | 685 | rc = -ENETUNREACH; |
c56c65fb TT |
686 | goto out; |
687 | } | |
688 | /* END TEMP */ | |
ec62f40d CL |
689 | rc = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr); |
690 | if (rc) { | |
691 | dprintk("RPC: %s: rdma_create_qp failed %i\n", | |
692 | __func__, rc); | |
d0f36c46 | 693 | rpcrdma_destroy_id(id); |
ec62f40d CL |
694 | rc = -ENETUNREACH; |
695 | goto out; | |
696 | } | |
73806c88 | 697 | |
73806c88 | 698 | old = ia->ri_id; |
c56c65fb | 699 | ia->ri_id = id; |
73806c88 CL |
700 | |
701 | rdma_destroy_qp(old); | |
d0f36c46 | 702 | rpcrdma_destroy_id(old); |
ec62f40d CL |
703 | } else { |
704 | dprintk("RPC: %s: connecting...\n", __func__); | |
705 | rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr); | |
706 | if (rc) { | |
707 | dprintk("RPC: %s: rdma_create_qp failed %i\n", | |
708 | __func__, rc); | |
709 | /* do not update ep->rep_connected */ | |
710 | return -ENETUNREACH; | |
711 | } | |
c56c65fb TT |
712 | } |
713 | ||
c56c65fb TT |
714 | ep->rep_connected = 0; |
715 | ||
716 | rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma); | |
717 | if (rc) { | |
718 | dprintk("RPC: %s: rdma_connect() failed with %i\n", | |
719 | __func__, rc); | |
720 | goto out; | |
721 | } | |
722 | ||
c56c65fb | 723 | wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0); |
c56c65fb | 724 | if (ep->rep_connected <= 0) { |
0a90487b | 725 | if (ep->rep_connected == -EAGAIN) |
c56c65fb TT |
726 | goto retry; |
727 | rc = ep->rep_connected; | |
0a90487b | 728 | goto out; |
c56c65fb TT |
729 | } |
730 | ||
0a90487b CL |
731 | dprintk("RPC: %s: connected\n", __func__); |
732 | extras = r_xprt->rx_buf.rb_bc_srv_max_requests; | |
733 | if (extras) | |
734 | rpcrdma_ep_post_extra_recv(r_xprt, extras); | |
735 | ||
c56c65fb TT |
736 | out: |
737 | if (rc) | |
738 | ep->rep_connected = rc; | |
739 | return rc; | |
740 | } | |
741 | ||
742 | /* | |
743 | * rpcrdma_ep_disconnect | |
744 | * | |
745 | * This is separate from destroy to facilitate the ability | |
746 | * to reconnect without recreating the endpoint. | |
747 | * | |
748 | * This call is not reentrant, and must not be made in parallel | |
749 | * on the same endpoint. | |
750 | */ | |
282191cb | 751 | void |
c56c65fb TT |
752 | rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia) |
753 | { | |
754 | int rc; | |
755 | ||
c56c65fb TT |
756 | rc = rdma_disconnect(ia->ri_id); |
757 | if (!rc) { | |
758 | /* returns without wait if not connected */ | |
759 | wait_event_interruptible(ep->rep_connect_wait, | |
760 | ep->rep_connected != 1); | |
761 | dprintk("RPC: %s: after wait, %sconnected\n", __func__, | |
762 | (ep->rep_connected == 1) ? "still " : "dis"); | |
763 | } else { | |
764 | dprintk("RPC: %s: rdma_disconnect %i\n", __func__, rc); | |
765 | ep->rep_connected = rc; | |
766 | } | |
550d7502 CL |
767 | |
768 | ib_drain_qp(ia->ri_id->qp); | |
c56c65fb TT |
769 | } |
770 | ||
505bbe64 CL |
771 | static void |
772 | rpcrdma_mr_recovery_worker(struct work_struct *work) | |
773 | { | |
774 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, | |
775 | rb_recovery_worker.work); | |
776 | struct rpcrdma_mw *mw; | |
777 | ||
778 | spin_lock(&buf->rb_recovery_lock); | |
779 | while (!list_empty(&buf->rb_stale_mrs)) { | |
9a5c63e9 | 780 | mw = rpcrdma_pop_mw(&buf->rb_stale_mrs); |
505bbe64 CL |
781 | spin_unlock(&buf->rb_recovery_lock); |
782 | ||
783 | dprintk("RPC: %s: recovering MR %p\n", __func__, mw); | |
784 | mw->mw_xprt->rx_ia.ri_ops->ro_recover_mr(mw); | |
785 | ||
786 | spin_lock(&buf->rb_recovery_lock); | |
53d78523 | 787 | } |
505bbe64 CL |
788 | spin_unlock(&buf->rb_recovery_lock); |
789 | } | |
790 | ||
791 | void | |
792 | rpcrdma_defer_mr_recovery(struct rpcrdma_mw *mw) | |
793 | { | |
794 | struct rpcrdma_xprt *r_xprt = mw->mw_xprt; | |
795 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
796 | ||
797 | spin_lock(&buf->rb_recovery_lock); | |
9a5c63e9 | 798 | rpcrdma_push_mw(mw, &buf->rb_stale_mrs); |
505bbe64 CL |
799 | spin_unlock(&buf->rb_recovery_lock); |
800 | ||
801 | schedule_delayed_work(&buf->rb_recovery_worker, 0); | |
802 | } | |
803 | ||
e2ac236c CL |
804 | static void |
805 | rpcrdma_create_mrs(struct rpcrdma_xprt *r_xprt) | |
806 | { | |
807 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; | |
808 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
809 | unsigned int count; | |
810 | LIST_HEAD(free); | |
811 | LIST_HEAD(all); | |
812 | ||
813 | for (count = 0; count < 32; count++) { | |
814 | struct rpcrdma_mw *mw; | |
815 | int rc; | |
816 | ||
817 | mw = kzalloc(sizeof(*mw), GFP_KERNEL); | |
818 | if (!mw) | |
819 | break; | |
820 | ||
821 | rc = ia->ri_ops->ro_init_mr(ia, mw); | |
822 | if (rc) { | |
823 | kfree(mw); | |
824 | break; | |
825 | } | |
826 | ||
827 | mw->mw_xprt = r_xprt; | |
828 | ||
829 | list_add(&mw->mw_list, &free); | |
830 | list_add(&mw->mw_all, &all); | |
831 | } | |
832 | ||
833 | spin_lock(&buf->rb_mwlock); | |
834 | list_splice(&free, &buf->rb_mws); | |
835 | list_splice(&all, &buf->rb_all); | |
836 | r_xprt->rx_stats.mrs_allocated += count; | |
837 | spin_unlock(&buf->rb_mwlock); | |
838 | ||
839 | dprintk("RPC: %s: created %u MRs\n", __func__, count); | |
840 | } | |
841 | ||
842 | static void | |
843 | rpcrdma_mr_refresh_worker(struct work_struct *work) | |
844 | { | |
845 | struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer, | |
846 | rb_refresh_worker.work); | |
847 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, | |
848 | rx_buf); | |
849 | ||
850 | rpcrdma_create_mrs(r_xprt); | |
851 | } | |
852 | ||
f531a5db | 853 | struct rpcrdma_req * |
1392402c CL |
854 | rpcrdma_create_req(struct rpcrdma_xprt *r_xprt) |
855 | { | |
f531a5db | 856 | struct rpcrdma_buffer *buffer = &r_xprt->rx_buf; |
1392402c | 857 | struct rpcrdma_req *req; |
1392402c | 858 | |
85275c87 | 859 | req = kzalloc(sizeof(*req), GFP_KERNEL); |
1392402c | 860 | if (req == NULL) |
85275c87 | 861 | return ERR_PTR(-ENOMEM); |
1392402c | 862 | |
f531a5db CL |
863 | INIT_LIST_HEAD(&req->rl_free); |
864 | spin_lock(&buffer->rb_reqslock); | |
865 | list_add(&req->rl_all, &buffer->rb_allreqs); | |
866 | spin_unlock(&buffer->rb_reqslock); | |
2fa8f88d | 867 | req->rl_cqe.done = rpcrdma_wc_send; |
1392402c | 868 | req->rl_buffer = &r_xprt->rx_buf; |
9d6b0409 | 869 | INIT_LIST_HEAD(&req->rl_registered); |
90aab602 CL |
870 | req->rl_send_wr.next = NULL; |
871 | req->rl_send_wr.wr_cqe = &req->rl_cqe; | |
655fec69 | 872 | req->rl_send_wr.sg_list = req->rl_send_sge; |
90aab602 | 873 | req->rl_send_wr.opcode = IB_WR_SEND; |
1392402c | 874 | return req; |
1392402c CL |
875 | } |
876 | ||
f531a5db | 877 | struct rpcrdma_rep * |
1392402c CL |
878 | rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt) |
879 | { | |
880 | struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data; | |
1392402c CL |
881 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; |
882 | struct rpcrdma_rep *rep; | |
883 | int rc; | |
884 | ||
885 | rc = -ENOMEM; | |
6b1184cd | 886 | rep = kzalloc(sizeof(*rep), GFP_KERNEL); |
1392402c CL |
887 | if (rep == NULL) |
888 | goto out; | |
1392402c | 889 | |
13650c23 | 890 | rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize, |
99ef4db3 | 891 | DMA_FROM_DEVICE, GFP_KERNEL); |
6b1184cd CL |
892 | if (IS_ERR(rep->rr_rdmabuf)) { |
893 | rc = PTR_ERR(rep->rr_rdmabuf); | |
1392402c | 894 | goto out_free; |
6b1184cd | 895 | } |
1392402c | 896 | |
89e0d112 | 897 | rep->rr_device = ia->ri_device; |
1519e969 | 898 | rep->rr_cqe.done = rpcrdma_wc_receive; |
fed171b3 | 899 | rep->rr_rxprt = r_xprt; |
496b77a5 | 900 | INIT_WORK(&rep->rr_work, rpcrdma_reply_handler); |
6ea8e711 CL |
901 | rep->rr_recv_wr.next = NULL; |
902 | rep->rr_recv_wr.wr_cqe = &rep->rr_cqe; | |
903 | rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov; | |
904 | rep->rr_recv_wr.num_sge = 1; | |
1392402c CL |
905 | return rep; |
906 | ||
907 | out_free: | |
908 | kfree(rep); | |
909 | out: | |
910 | return ERR_PTR(rc); | |
911 | } | |
912 | ||
c56c65fb | 913 | int |
ac920d04 | 914 | rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt) |
c56c65fb | 915 | { |
ac920d04 | 916 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
c56c65fb TT |
917 | int i, rc; |
918 | ||
1e465fd4 | 919 | buf->rb_max_requests = r_xprt->rx_data.max_requests; |
f531a5db | 920 | buf->rb_bc_srv_max_requests = 0; |
23826c7a | 921 | atomic_set(&buf->rb_credits, 1); |
e2ac236c | 922 | spin_lock_init(&buf->rb_mwlock); |
505bbe64 CL |
923 | spin_lock_init(&buf->rb_lock); |
924 | spin_lock_init(&buf->rb_recovery_lock); | |
e2ac236c CL |
925 | INIT_LIST_HEAD(&buf->rb_mws); |
926 | INIT_LIST_HEAD(&buf->rb_all); | |
505bbe64 | 927 | INIT_LIST_HEAD(&buf->rb_stale_mrs); |
e2ac236c CL |
928 | INIT_DELAYED_WORK(&buf->rb_refresh_worker, |
929 | rpcrdma_mr_refresh_worker); | |
505bbe64 CL |
930 | INIT_DELAYED_WORK(&buf->rb_recovery_worker, |
931 | rpcrdma_mr_recovery_worker); | |
c56c65fb | 932 | |
e2ac236c | 933 | rpcrdma_create_mrs(r_xprt); |
c56c65fb | 934 | |
1e465fd4 | 935 | INIT_LIST_HEAD(&buf->rb_send_bufs); |
f531a5db CL |
936 | INIT_LIST_HEAD(&buf->rb_allreqs); |
937 | spin_lock_init(&buf->rb_reqslock); | |
c56c65fb TT |
938 | for (i = 0; i < buf->rb_max_requests; i++) { |
939 | struct rpcrdma_req *req; | |
c56c65fb | 940 | |
1392402c CL |
941 | req = rpcrdma_create_req(r_xprt); |
942 | if (IS_ERR(req)) { | |
c56c65fb TT |
943 | dprintk("RPC: %s: request buffer %d alloc" |
944 | " failed\n", __func__, i); | |
1392402c | 945 | rc = PTR_ERR(req); |
c56c65fb TT |
946 | goto out; |
947 | } | |
f531a5db | 948 | req->rl_backchannel = false; |
1e465fd4 CL |
949 | list_add(&req->rl_free, &buf->rb_send_bufs); |
950 | } | |
951 | ||
952 | INIT_LIST_HEAD(&buf->rb_recv_bufs); | |
05c97466 | 953 | for (i = 0; i < buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS; i++) { |
1e465fd4 | 954 | struct rpcrdma_rep *rep; |
c56c65fb | 955 | |
1392402c CL |
956 | rep = rpcrdma_create_rep(r_xprt); |
957 | if (IS_ERR(rep)) { | |
c56c65fb TT |
958 | dprintk("RPC: %s: reply buffer %d alloc failed\n", |
959 | __func__, i); | |
1392402c | 960 | rc = PTR_ERR(rep); |
c56c65fb TT |
961 | goto out; |
962 | } | |
1e465fd4 | 963 | list_add(&rep->rr_list, &buf->rb_recv_bufs); |
c56c65fb | 964 | } |
1392402c | 965 | |
c56c65fb TT |
966 | return 0; |
967 | out: | |
968 | rpcrdma_buffer_destroy(buf); | |
969 | return rc; | |
970 | } | |
971 | ||
1e465fd4 CL |
972 | static struct rpcrdma_req * |
973 | rpcrdma_buffer_get_req_locked(struct rpcrdma_buffer *buf) | |
974 | { | |
975 | struct rpcrdma_req *req; | |
976 | ||
977 | req = list_first_entry(&buf->rb_send_bufs, | |
978 | struct rpcrdma_req, rl_free); | |
979 | list_del(&req->rl_free); | |
980 | return req; | |
981 | } | |
982 | ||
983 | static struct rpcrdma_rep * | |
984 | rpcrdma_buffer_get_rep_locked(struct rpcrdma_buffer *buf) | |
985 | { | |
986 | struct rpcrdma_rep *rep; | |
987 | ||
988 | rep = list_first_entry(&buf->rb_recv_bufs, | |
989 | struct rpcrdma_rep, rr_list); | |
990 | list_del(&rep->rr_list); | |
991 | return rep; | |
992 | } | |
993 | ||
1392402c | 994 | static void |
13650c23 | 995 | rpcrdma_destroy_rep(struct rpcrdma_rep *rep) |
1392402c | 996 | { |
13650c23 | 997 | rpcrdma_free_regbuf(rep->rr_rdmabuf); |
1392402c CL |
998 | kfree(rep); |
999 | } | |
1000 | ||
f531a5db | 1001 | void |
13650c23 | 1002 | rpcrdma_destroy_req(struct rpcrdma_req *req) |
1392402c | 1003 | { |
13650c23 CL |
1004 | rpcrdma_free_regbuf(req->rl_recvbuf); |
1005 | rpcrdma_free_regbuf(req->rl_sendbuf); | |
1006 | rpcrdma_free_regbuf(req->rl_rdmabuf); | |
1392402c CL |
1007 | kfree(req); |
1008 | } | |
1009 | ||
e2ac236c CL |
1010 | static void |
1011 | rpcrdma_destroy_mrs(struct rpcrdma_buffer *buf) | |
1012 | { | |
1013 | struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt, | |
1014 | rx_buf); | |
1015 | struct rpcrdma_ia *ia = rdmab_to_ia(buf); | |
1016 | struct rpcrdma_mw *mw; | |
1017 | unsigned int count; | |
1018 | ||
1019 | count = 0; | |
1020 | spin_lock(&buf->rb_mwlock); | |
1021 | while (!list_empty(&buf->rb_all)) { | |
1022 | mw = list_entry(buf->rb_all.next, struct rpcrdma_mw, mw_all); | |
1023 | list_del(&mw->mw_all); | |
1024 | ||
1025 | spin_unlock(&buf->rb_mwlock); | |
1026 | ia->ri_ops->ro_release_mr(mw); | |
1027 | count++; | |
1028 | spin_lock(&buf->rb_mwlock); | |
1029 | } | |
1030 | spin_unlock(&buf->rb_mwlock); | |
1031 | r_xprt->rx_stats.mrs_allocated = 0; | |
1032 | ||
1033 | dprintk("RPC: %s: released %u MRs\n", __func__, count); | |
1034 | } | |
1035 | ||
c56c65fb TT |
1036 | void |
1037 | rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf) | |
1038 | { | |
505bbe64 CL |
1039 | cancel_delayed_work_sync(&buf->rb_recovery_worker); |
1040 | ||
1e465fd4 CL |
1041 | while (!list_empty(&buf->rb_recv_bufs)) { |
1042 | struct rpcrdma_rep *rep; | |
c56c65fb | 1043 | |
1e465fd4 | 1044 | rep = rpcrdma_buffer_get_rep_locked(buf); |
13650c23 | 1045 | rpcrdma_destroy_rep(rep); |
c56c65fb | 1046 | } |
05c97466 | 1047 | buf->rb_send_count = 0; |
c56c65fb | 1048 | |
f531a5db CL |
1049 | spin_lock(&buf->rb_reqslock); |
1050 | while (!list_empty(&buf->rb_allreqs)) { | |
1e465fd4 | 1051 | struct rpcrdma_req *req; |
4034ba04 | 1052 | |
f531a5db CL |
1053 | req = list_first_entry(&buf->rb_allreqs, |
1054 | struct rpcrdma_req, rl_all); | |
1055 | list_del(&req->rl_all); | |
1056 | ||
1057 | spin_unlock(&buf->rb_reqslock); | |
13650c23 | 1058 | rpcrdma_destroy_req(req); |
f531a5db | 1059 | spin_lock(&buf->rb_reqslock); |
1e465fd4 | 1060 | } |
f531a5db | 1061 | spin_unlock(&buf->rb_reqslock); |
05c97466 | 1062 | buf->rb_recv_count = 0; |
4034ba04 | 1063 | |
e2ac236c | 1064 | rpcrdma_destroy_mrs(buf); |
c56c65fb TT |
1065 | } |
1066 | ||
346aa66b CL |
1067 | struct rpcrdma_mw * |
1068 | rpcrdma_get_mw(struct rpcrdma_xprt *r_xprt) | |
c2922c02 | 1069 | { |
346aa66b CL |
1070 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
1071 | struct rpcrdma_mw *mw = NULL; | |
346aa66b | 1072 | |
58d1dcf5 | 1073 | spin_lock(&buf->rb_mwlock); |
9a5c63e9 CL |
1074 | if (!list_empty(&buf->rb_mws)) |
1075 | mw = rpcrdma_pop_mw(&buf->rb_mws); | |
58d1dcf5 | 1076 | spin_unlock(&buf->rb_mwlock); |
346aa66b CL |
1077 | |
1078 | if (!mw) | |
e2ac236c | 1079 | goto out_nomws; |
346aa66b | 1080 | return mw; |
e2ac236c CL |
1081 | |
1082 | out_nomws: | |
1083 | dprintk("RPC: %s: no MWs available\n", __func__); | |
1084 | schedule_delayed_work(&buf->rb_refresh_worker, 0); | |
1085 | ||
1086 | /* Allow the reply handler and refresh worker to run */ | |
1087 | cond_resched(); | |
1088 | ||
1089 | return NULL; | |
c2922c02 CL |
1090 | } |
1091 | ||
346aa66b CL |
1092 | void |
1093 | rpcrdma_put_mw(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mw *mw) | |
c2922c02 | 1094 | { |
346aa66b | 1095 | struct rpcrdma_buffer *buf = &r_xprt->rx_buf; |
c2922c02 | 1096 | |
58d1dcf5 | 1097 | spin_lock(&buf->rb_mwlock); |
9a5c63e9 | 1098 | rpcrdma_push_mw(mw, &buf->rb_mws); |
58d1dcf5 | 1099 | spin_unlock(&buf->rb_mwlock); |
c2922c02 CL |
1100 | } |
1101 | ||
05c97466 CL |
1102 | static struct rpcrdma_rep * |
1103 | rpcrdma_buffer_get_rep(struct rpcrdma_buffer *buffers) | |
1104 | { | |
1105 | /* If an RPC previously completed without a reply (say, a | |
1106 | * credential problem or a soft timeout occurs) then hold off | |
1107 | * on supplying more Receive buffers until the number of new | |
1108 | * pending RPCs catches up to the number of posted Receives. | |
1109 | */ | |
1110 | if (unlikely(buffers->rb_send_count < buffers->rb_recv_count)) | |
1111 | return NULL; | |
1112 | ||
1113 | if (unlikely(list_empty(&buffers->rb_recv_bufs))) | |
1114 | return NULL; | |
1115 | buffers->rb_recv_count++; | |
1116 | return rpcrdma_buffer_get_rep_locked(buffers); | |
1117 | } | |
1118 | ||
c56c65fb TT |
1119 | /* |
1120 | * Get a set of request/reply buffers. | |
78d506e1 CL |
1121 | * |
1122 | * Reply buffer (if available) is attached to send buffer upon return. | |
c56c65fb TT |
1123 | */ |
1124 | struct rpcrdma_req * | |
1125 | rpcrdma_buffer_get(struct rpcrdma_buffer *buffers) | |
1126 | { | |
1127 | struct rpcrdma_req *req; | |
c14d86e5 | 1128 | |
a5b027e1 | 1129 | spin_lock(&buffers->rb_lock); |
1e465fd4 CL |
1130 | if (list_empty(&buffers->rb_send_bufs)) |
1131 | goto out_reqbuf; | |
05c97466 | 1132 | buffers->rb_send_count++; |
1e465fd4 | 1133 | req = rpcrdma_buffer_get_req_locked(buffers); |
05c97466 | 1134 | req->rl_reply = rpcrdma_buffer_get_rep(buffers); |
a5b027e1 | 1135 | spin_unlock(&buffers->rb_lock); |
1e465fd4 | 1136 | return req; |
ddb6bebc | 1137 | |
1e465fd4 | 1138 | out_reqbuf: |
a5b027e1 | 1139 | spin_unlock(&buffers->rb_lock); |
78d506e1 | 1140 | pr_warn("RPC: %s: out of request buffers\n", __func__); |
1e465fd4 | 1141 | return NULL; |
c56c65fb TT |
1142 | } |
1143 | ||
1144 | /* | |
1145 | * Put request/reply buffers back into pool. | |
1146 | * Pre-decrement counter/array index. | |
1147 | */ | |
1148 | void | |
1149 | rpcrdma_buffer_put(struct rpcrdma_req *req) | |
1150 | { | |
1151 | struct rpcrdma_buffer *buffers = req->rl_buffer; | |
1e465fd4 | 1152 | struct rpcrdma_rep *rep = req->rl_reply; |
c56c65fb | 1153 | |
90aab602 | 1154 | req->rl_send_wr.num_sge = 0; |
1e465fd4 CL |
1155 | req->rl_reply = NULL; |
1156 | ||
a5b027e1 | 1157 | spin_lock(&buffers->rb_lock); |
05c97466 | 1158 | buffers->rb_send_count--; |
1e465fd4 | 1159 | list_add_tail(&req->rl_free, &buffers->rb_send_bufs); |
05c97466 CL |
1160 | if (rep) { |
1161 | buffers->rb_recv_count--; | |
1e465fd4 | 1162 | list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); |
05c97466 | 1163 | } |
a5b027e1 | 1164 | spin_unlock(&buffers->rb_lock); |
c56c65fb TT |
1165 | } |
1166 | ||
1167 | /* | |
1168 | * Recover reply buffers from pool. | |
1e465fd4 | 1169 | * This happens when recovering from disconnect. |
c56c65fb TT |
1170 | */ |
1171 | void | |
1172 | rpcrdma_recv_buffer_get(struct rpcrdma_req *req) | |
1173 | { | |
1174 | struct rpcrdma_buffer *buffers = req->rl_buffer; | |
c56c65fb | 1175 | |
a5b027e1 | 1176 | spin_lock(&buffers->rb_lock); |
05c97466 | 1177 | req->rl_reply = rpcrdma_buffer_get_rep(buffers); |
a5b027e1 | 1178 | spin_unlock(&buffers->rb_lock); |
c56c65fb TT |
1179 | } |
1180 | ||
1181 | /* | |
1182 | * Put reply buffers back into pool when not attached to | |
b45ccfd2 | 1183 | * request. This happens in error conditions. |
c56c65fb TT |
1184 | */ |
1185 | void | |
1186 | rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep) | |
1187 | { | |
fed171b3 | 1188 | struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf; |
c56c65fb | 1189 | |
a5b027e1 | 1190 | spin_lock(&buffers->rb_lock); |
05c97466 | 1191 | buffers->rb_recv_count--; |
1e465fd4 | 1192 | list_add_tail(&rep->rr_list, &buffers->rb_recv_bufs); |
a5b027e1 | 1193 | spin_unlock(&buffers->rb_lock); |
c56c65fb TT |
1194 | } |
1195 | ||
9128c3e7 | 1196 | /** |
99ef4db3 | 1197 | * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers |
9128c3e7 | 1198 | * @size: size of buffer to be allocated, in bytes |
99ef4db3 | 1199 | * @direction: direction of data movement |
9128c3e7 CL |
1200 | * @flags: GFP flags |
1201 | * | |
54cbd6b0 CL |
1202 | * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that |
1203 | * can be persistently DMA-mapped for I/O. | |
9128c3e7 CL |
1204 | * |
1205 | * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for | |
99ef4db3 CL |
1206 | * receiving the payload of RDMA RECV operations. During Long Calls |
1207 | * or Replies they may be registered externally via ro_map. | |
9128c3e7 CL |
1208 | */ |
1209 | struct rpcrdma_regbuf * | |
13650c23 CL |
1210 | rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction, |
1211 | gfp_t flags) | |
9128c3e7 CL |
1212 | { |
1213 | struct rpcrdma_regbuf *rb; | |
9128c3e7 | 1214 | |
9128c3e7 CL |
1215 | rb = kmalloc(sizeof(*rb) + size, flags); |
1216 | if (rb == NULL) | |
54cbd6b0 | 1217 | return ERR_PTR(-ENOMEM); |
9128c3e7 | 1218 | |
54cbd6b0 | 1219 | rb->rg_device = NULL; |
99ef4db3 | 1220 | rb->rg_direction = direction; |
54cbd6b0 | 1221 | rb->rg_iov.length = size; |
9128c3e7 CL |
1222 | |
1223 | return rb; | |
54cbd6b0 | 1224 | } |
9128c3e7 | 1225 | |
54cbd6b0 CL |
1226 | /** |
1227 | * __rpcrdma_map_regbuf - DMA-map a regbuf | |
1228 | * @ia: controlling rpcrdma_ia | |
1229 | * @rb: regbuf to be mapped | |
1230 | */ | |
1231 | bool | |
1232 | __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb) | |
1233 | { | |
1234 | if (rb->rg_direction == DMA_NONE) | |
1235 | return false; | |
1236 | ||
1237 | rb->rg_iov.addr = ib_dma_map_single(ia->ri_device, | |
1238 | (void *)rb->rg_base, | |
1239 | rdmab_length(rb), | |
1240 | rb->rg_direction); | |
1241 | if (ib_dma_mapping_error(ia->ri_device, rdmab_addr(rb))) | |
1242 | return false; | |
1243 | ||
1244 | rb->rg_device = ia->ri_device; | |
1245 | rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey; | |
1246 | return true; | |
1247 | } | |
1248 | ||
1249 | static void | |
1250 | rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb) | |
1251 | { | |
1252 | if (!rpcrdma_regbuf_is_mapped(rb)) | |
1253 | return; | |
1254 | ||
1255 | ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb), | |
1256 | rdmab_length(rb), rb->rg_direction); | |
1257 | rb->rg_device = NULL; | |
9128c3e7 CL |
1258 | } |
1259 | ||
1260 | /** | |
1261 | * rpcrdma_free_regbuf - deregister and free registered buffer | |
9128c3e7 CL |
1262 | * @rb: regbuf to be deregistered and freed |
1263 | */ | |
1264 | void | |
13650c23 | 1265 | rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb) |
9128c3e7 | 1266 | { |
e531dcab CL |
1267 | if (!rb) |
1268 | return; | |
1269 | ||
54cbd6b0 | 1270 | rpcrdma_dma_unmap_regbuf(rb); |
e531dcab | 1271 | kfree(rb); |
9128c3e7 CL |
1272 | } |
1273 | ||
c56c65fb TT |
1274 | /* |
1275 | * Prepost any receive buffer, then post send. | |
1276 | * | |
1277 | * Receive buffer is donated to hardware, reclaimed upon recv completion. | |
1278 | */ | |
1279 | int | |
1280 | rpcrdma_ep_post(struct rpcrdma_ia *ia, | |
1281 | struct rpcrdma_ep *ep, | |
1282 | struct rpcrdma_req *req) | |
1283 | { | |
90aab602 CL |
1284 | struct ib_send_wr *send_wr = &req->rl_send_wr; |
1285 | struct ib_send_wr *send_wr_fail; | |
655fec69 | 1286 | int rc; |
c56c65fb | 1287 | |
90aab602 CL |
1288 | if (req->rl_reply) { |
1289 | rc = rpcrdma_ep_post_recv(ia, req->rl_reply); | |
c56c65fb | 1290 | if (rc) |
7a89f9c6 | 1291 | return rc; |
c56c65fb TT |
1292 | req->rl_reply = NULL; |
1293 | } | |
1294 | ||
b3221d6a | 1295 | dprintk("RPC: %s: posting %d s/g entries\n", |
90aab602 | 1296 | __func__, send_wr->num_sge); |
c56c65fb | 1297 | |
8d38de65 | 1298 | rpcrdma_set_signaled(ep, send_wr); |
90aab602 | 1299 | rc = ib_post_send(ia->ri_id->qp, send_wr, &send_wr_fail); |
c56c65fb | 1300 | if (rc) |
7a89f9c6 CL |
1301 | goto out_postsend_err; |
1302 | return 0; | |
1303 | ||
1304 | out_postsend_err: | |
1305 | pr_err("rpcrdma: RDMA Send ib_post_send returned %i\n", rc); | |
1306 | return -ENOTCONN; | |
c56c65fb TT |
1307 | } |
1308 | ||
c56c65fb TT |
1309 | int |
1310 | rpcrdma_ep_post_recv(struct rpcrdma_ia *ia, | |
c56c65fb TT |
1311 | struct rpcrdma_rep *rep) |
1312 | { | |
6ea8e711 | 1313 | struct ib_recv_wr *recv_wr_fail; |
c56c65fb TT |
1314 | int rc; |
1315 | ||
54cbd6b0 CL |
1316 | if (!rpcrdma_dma_map_regbuf(ia, rep->rr_rdmabuf)) |
1317 | goto out_map; | |
6ea8e711 | 1318 | rc = ib_post_recv(ia->ri_id->qp, &rep->rr_recv_wr, &recv_wr_fail); |
c56c65fb | 1319 | if (rc) |
7a89f9c6 CL |
1320 | goto out_postrecv; |
1321 | return 0; | |
1322 | ||
54cbd6b0 CL |
1323 | out_map: |
1324 | pr_err("rpcrdma: failed to DMA map the Receive buffer\n"); | |
1325 | return -EIO; | |
1326 | ||
7a89f9c6 CL |
1327 | out_postrecv: |
1328 | pr_err("rpcrdma: ib_post_recv returned %i\n", rc); | |
1329 | return -ENOTCONN; | |
c56c65fb | 1330 | } |
43e95988 | 1331 | |
f531a5db CL |
1332 | /** |
1333 | * rpcrdma_ep_post_extra_recv - Post buffers for incoming backchannel requests | |
1334 | * @r_xprt: transport associated with these backchannel resources | |
1335 | * @min_reqs: minimum number of incoming requests expected | |
1336 | * | |
1337 | * Returns zero if all requested buffers were posted, or a negative errno. | |
1338 | */ | |
1339 | int | |
1340 | rpcrdma_ep_post_extra_recv(struct rpcrdma_xprt *r_xprt, unsigned int count) | |
1341 | { | |
1342 | struct rpcrdma_buffer *buffers = &r_xprt->rx_buf; | |
1343 | struct rpcrdma_ia *ia = &r_xprt->rx_ia; | |
f531a5db | 1344 | struct rpcrdma_rep *rep; |
f531a5db CL |
1345 | int rc; |
1346 | ||
1347 | while (count--) { | |
9b06688b | 1348 | spin_lock(&buffers->rb_lock); |
f531a5db CL |
1349 | if (list_empty(&buffers->rb_recv_bufs)) |
1350 | goto out_reqbuf; | |
1351 | rep = rpcrdma_buffer_get_rep_locked(buffers); | |
9b06688b | 1352 | spin_unlock(&buffers->rb_lock); |
f531a5db | 1353 | |
b157380a | 1354 | rc = rpcrdma_ep_post_recv(ia, rep); |
f531a5db CL |
1355 | if (rc) |
1356 | goto out_rc; | |
1357 | } | |
1358 | ||
1359 | return 0; | |
1360 | ||
1361 | out_reqbuf: | |
9b06688b | 1362 | spin_unlock(&buffers->rb_lock); |
f531a5db CL |
1363 | pr_warn("%s: no extra receive buffers\n", __func__); |
1364 | return -ENOMEM; | |
1365 | ||
1366 | out_rc: | |
1367 | rpcrdma_recv_buffer_put(rep); | |
1368 | return rc; | |
1369 | } |