perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / net / sunrpc / xprtrdma / verbs.c
blob3ddba94c939f64e223e375fdfc75d54aebe14a0b
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
10 * license below:
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
27 * permission.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 * verbs.c
45 * Encapsulates the major functions managing:
46 * o adapters
47 * o endpoints
48 * o connections
49 * o buffer memory
52 #include <linux/interrupt.h>
53 #include <linux/slab.h>
54 #include <linux/sunrpc/addr.h>
55 #include <linux/sunrpc/svc_rdma.h>
57 #include <asm-generic/barrier.h>
58 #include <asm/bitops.h>
60 #include <rdma/ib_cm.h>
62 #include "xprt_rdma.h"
63 #include <trace/events/rpcrdma.h>
66 * Globals/Macros
69 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
70 # define RPCDBG_FACILITY RPCDBG_TRANS
71 #endif
74 * internal functions
76 static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc);
77 static void rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt);
78 static void rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf);
79 static int rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp);
80 static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb);
82 struct workqueue_struct *rpcrdma_receive_wq __read_mostly;
84 int
85 rpcrdma_alloc_wq(void)
87 struct workqueue_struct *recv_wq;
89 recv_wq = alloc_workqueue("xprtrdma_receive",
90 WQ_MEM_RECLAIM | WQ_HIGHPRI,
91 0);
92 if (!recv_wq)
93 return -ENOMEM;
95 rpcrdma_receive_wq = recv_wq;
96 return 0;
99 void
100 rpcrdma_destroy_wq(void)
102 struct workqueue_struct *wq;
104 if (rpcrdma_receive_wq) {
105 wq = rpcrdma_receive_wq;
106 rpcrdma_receive_wq = NULL;
107 destroy_workqueue(wq);
112 * rpcrdma_disconnect_worker - Force a disconnect
113 * @work: endpoint to be disconnected
115 * Provider callbacks can possibly run in an IRQ context. This function
116 * is invoked in a worker thread to guarantee that disconnect wake-up
117 * calls are always done in process context.
119 static void
120 rpcrdma_disconnect_worker(struct work_struct *work)
122 struct rpcrdma_ep *ep = container_of(work, struct rpcrdma_ep,
123 rep_disconnect_worker.work);
124 struct rpcrdma_xprt *r_xprt =
125 container_of(ep, struct rpcrdma_xprt, rx_ep);
127 xprt_force_disconnect(&r_xprt->rx_xprt);
131 * rpcrdma_qp_event_handler - Handle one QP event (error notification)
132 * @event: details of the event
133 * @context: ep that owns QP where event occurred
135 * Called from the RDMA provider (device driver) possibly in an interrupt
136 * context.
138 static void
139 rpcrdma_qp_event_handler(struct ib_event *event, void *context)
141 struct rpcrdma_ep *ep = context;
142 struct rpcrdma_xprt *r_xprt = container_of(ep, struct rpcrdma_xprt,
143 rx_ep);
145 trace_xprtrdma_qp_event(r_xprt, event);
146 pr_err("rpcrdma: %s on device %s connected to %s:%s\n",
147 ib_event_msg(event->event), event->device->name,
148 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
150 if (ep->rep_connected == 1) {
151 ep->rep_connected = -EIO;
152 schedule_delayed_work(&ep->rep_disconnect_worker, 0);
153 wake_up_all(&ep->rep_connect_wait);
158 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
159 * @cq: completion queue (ignored)
160 * @wc: completed WR
163 static void
164 rpcrdma_wc_send(struct ib_cq *cq, struct ib_wc *wc)
166 struct ib_cqe *cqe = wc->wr_cqe;
167 struct rpcrdma_sendctx *sc =
168 container_of(cqe, struct rpcrdma_sendctx, sc_cqe);
170 /* WARNING: Only wr_cqe and status are reliable at this point */
171 trace_xprtrdma_wc_send(sc, wc);
172 if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR)
173 pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
174 ib_wc_status_msg(wc->status),
175 wc->status, wc->vendor_err);
177 rpcrdma_sendctx_put_locked(sc);
181 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
182 * @cq: completion queue (ignored)
183 * @wc: completed WR
186 static void
187 rpcrdma_wc_receive(struct ib_cq *cq, struct ib_wc *wc)
189 struct ib_cqe *cqe = wc->wr_cqe;
190 struct rpcrdma_rep *rep = container_of(cqe, struct rpcrdma_rep,
191 rr_cqe);
193 /* WARNING: Only wr_id and status are reliable at this point */
194 trace_xprtrdma_wc_receive(wc);
195 if (wc->status != IB_WC_SUCCESS)
196 goto out_fail;
198 /* status == SUCCESS means all fields in wc are trustworthy */
199 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, wc->byte_len);
200 rep->rr_wc_flags = wc->wc_flags;
201 rep->rr_inv_rkey = wc->ex.invalidate_rkey;
203 ib_dma_sync_single_for_cpu(rdmab_device(rep->rr_rdmabuf),
204 rdmab_addr(rep->rr_rdmabuf),
205 wc->byte_len, DMA_FROM_DEVICE);
207 out_schedule:
208 rpcrdma_reply_handler(rep);
209 return;
211 out_fail:
212 if (wc->status != IB_WC_WR_FLUSH_ERR)
213 pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
214 ib_wc_status_msg(wc->status),
215 wc->status, wc->vendor_err);
216 rpcrdma_set_xdrlen(&rep->rr_hdrbuf, 0);
217 goto out_schedule;
220 static void
221 rpcrdma_update_connect_private(struct rpcrdma_xprt *r_xprt,
222 struct rdma_conn_param *param)
224 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
225 const struct rpcrdma_connect_private *pmsg = param->private_data;
226 unsigned int rsize, wsize;
228 /* Default settings for RPC-over-RDMA Version One */
229 r_xprt->rx_ia.ri_implicit_roundup = xprt_rdma_pad_optimize;
230 rsize = RPCRDMA_V1_DEF_INLINE_SIZE;
231 wsize = RPCRDMA_V1_DEF_INLINE_SIZE;
233 if (pmsg &&
234 pmsg->cp_magic == rpcrdma_cmp_magic &&
235 pmsg->cp_version == RPCRDMA_CMP_VERSION) {
236 r_xprt->rx_ia.ri_implicit_roundup = true;
237 rsize = rpcrdma_decode_buffer_size(pmsg->cp_send_size);
238 wsize = rpcrdma_decode_buffer_size(pmsg->cp_recv_size);
241 if (rsize < cdata->inline_rsize)
242 cdata->inline_rsize = rsize;
243 if (wsize < cdata->inline_wsize)
244 cdata->inline_wsize = wsize;
245 dprintk("RPC: %s: max send %u, max recv %u\n",
246 __func__, cdata->inline_wsize, cdata->inline_rsize);
247 rpcrdma_set_max_header_sizes(r_xprt);
251 * rpcrdma_cm_event_handler - Handle RDMA CM events
252 * @id: rdma_cm_id on which an event has occurred
253 * @event: details of the event
255 * Called with @id's mutex held. Returns 1 if caller should
256 * destroy @id, otherwise 0.
258 static int
259 rpcrdma_cm_event_handler(struct rdma_cm_id *id, struct rdma_cm_event *event)
261 struct rpcrdma_xprt *r_xprt = id->context;
262 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
263 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
264 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
266 might_sleep();
268 trace_xprtrdma_cm_event(r_xprt, event);
269 switch (event->event) {
270 case RDMA_CM_EVENT_ADDR_RESOLVED:
271 case RDMA_CM_EVENT_ROUTE_RESOLVED:
272 ia->ri_async_rc = 0;
273 complete(&ia->ri_done);
274 return 0;
275 case RDMA_CM_EVENT_ADDR_ERROR:
276 ia->ri_async_rc = -EPROTO;
277 complete(&ia->ri_done);
278 return 0;
279 case RDMA_CM_EVENT_ROUTE_ERROR:
280 ia->ri_async_rc = -ENETUNREACH;
281 complete(&ia->ri_done);
282 return 0;
283 case RDMA_CM_EVENT_DEVICE_REMOVAL:
284 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
285 pr_info("rpcrdma: removing device %s for %s:%s\n",
286 ia->ri_device->name,
287 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt));
288 #endif
289 set_bit(RPCRDMA_IAF_REMOVING, &ia->ri_flags);
290 ep->rep_connected = -ENODEV;
291 xprt_force_disconnect(xprt);
292 wait_for_completion(&ia->ri_remove_done);
294 ia->ri_id = NULL;
295 ia->ri_device = NULL;
296 /* Return 1 to ensure the core destroys the id. */
297 return 1;
298 case RDMA_CM_EVENT_ESTABLISHED:
299 ++xprt->connect_cookie;
300 ep->rep_connected = 1;
301 rpcrdma_update_connect_private(r_xprt, &event->param.conn);
302 wake_up_all(&ep->rep_connect_wait);
303 break;
304 case RDMA_CM_EVENT_CONNECT_ERROR:
305 ep->rep_connected = -ENOTCONN;
306 goto disconnected;
307 case RDMA_CM_EVENT_UNREACHABLE:
308 ep->rep_connected = -ENETUNREACH;
309 goto disconnected;
310 case RDMA_CM_EVENT_REJECTED:
311 dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
312 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
313 rdma_reject_msg(id, event->status));
314 ep->rep_connected = -ECONNREFUSED;
315 if (event->status == IB_CM_REJ_STALE_CONN)
316 ep->rep_connected = -EAGAIN;
317 goto disconnected;
318 case RDMA_CM_EVENT_DISCONNECTED:
319 ++xprt->connect_cookie;
320 ep->rep_connected = -ECONNABORTED;
321 disconnected:
322 xprt_force_disconnect(xprt);
323 wake_up_all(&ep->rep_connect_wait);
324 break;
325 default:
326 break;
329 dprintk("RPC: %s: %s:%s on %s/%s: %s\n", __func__,
330 rpcrdma_addrstr(r_xprt), rpcrdma_portstr(r_xprt),
331 ia->ri_device->name, ia->ri_ops->ro_displayname,
332 rdma_event_msg(event->event));
333 return 0;
336 static struct rdma_cm_id *
337 rpcrdma_create_id(struct rpcrdma_xprt *xprt, struct rpcrdma_ia *ia)
339 unsigned long wtimeout = msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT) + 1;
340 struct rdma_cm_id *id;
341 int rc;
343 trace_xprtrdma_conn_start(xprt);
345 init_completion(&ia->ri_done);
346 init_completion(&ia->ri_remove_done);
348 id = rdma_create_id(xprt->rx_xprt.xprt_net, rpcrdma_cm_event_handler,
349 xprt, RDMA_PS_TCP, IB_QPT_RC);
350 if (IS_ERR(id)) {
351 rc = PTR_ERR(id);
352 dprintk("RPC: %s: rdma_create_id() failed %i\n",
353 __func__, rc);
354 return id;
357 ia->ri_async_rc = -ETIMEDOUT;
358 rc = rdma_resolve_addr(id, NULL,
359 (struct sockaddr *)&xprt->rx_xprt.addr,
360 RDMA_RESOLVE_TIMEOUT);
361 if (rc) {
362 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
363 __func__, rc);
364 goto out;
366 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
367 if (rc < 0) {
368 trace_xprtrdma_conn_tout(xprt);
369 goto out;
372 rc = ia->ri_async_rc;
373 if (rc)
374 goto out;
376 ia->ri_async_rc = -ETIMEDOUT;
377 rc = rdma_resolve_route(id, RDMA_RESOLVE_TIMEOUT);
378 if (rc) {
379 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
380 __func__, rc);
381 goto out;
383 rc = wait_for_completion_interruptible_timeout(&ia->ri_done, wtimeout);
384 if (rc < 0) {
385 trace_xprtrdma_conn_tout(xprt);
386 goto out;
388 rc = ia->ri_async_rc;
389 if (rc)
390 goto out;
392 return id;
394 out:
395 rdma_destroy_id(id);
396 return ERR_PTR(rc);
400 * Exported functions.
404 * rpcrdma_ia_open - Open and initialize an Interface Adapter.
405 * @xprt: transport with IA to (re)initialize
407 * Returns 0 on success, negative errno if an appropriate
408 * Interface Adapter could not be found and opened.
411 rpcrdma_ia_open(struct rpcrdma_xprt *xprt)
413 struct rpcrdma_ia *ia = &xprt->rx_ia;
414 int rc;
416 ia->ri_id = rpcrdma_create_id(xprt, ia);
417 if (IS_ERR(ia->ri_id)) {
418 rc = PTR_ERR(ia->ri_id);
419 goto out_err;
421 ia->ri_device = ia->ri_id->device;
423 ia->ri_pd = ib_alloc_pd(ia->ri_device, 0);
424 if (IS_ERR(ia->ri_pd)) {
425 rc = PTR_ERR(ia->ri_pd);
426 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc);
427 goto out_err;
430 switch (xprt_rdma_memreg_strategy) {
431 case RPCRDMA_FRWR:
432 if (frwr_is_supported(ia)) {
433 ia->ri_ops = &rpcrdma_frwr_memreg_ops;
434 break;
436 /*FALLTHROUGH*/
437 case RPCRDMA_MTHCAFMR:
438 if (fmr_is_supported(ia)) {
439 ia->ri_ops = &rpcrdma_fmr_memreg_ops;
440 break;
442 /*FALLTHROUGH*/
443 default:
444 pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
445 ia->ri_device->name, xprt_rdma_memreg_strategy);
446 rc = -EINVAL;
447 goto out_err;
450 return 0;
452 out_err:
453 rpcrdma_ia_close(ia);
454 return rc;
458 * rpcrdma_ia_remove - Handle device driver unload
459 * @ia: interface adapter being removed
461 * Divest transport H/W resources associated with this adapter,
462 * but allow it to be restored later.
464 void
465 rpcrdma_ia_remove(struct rpcrdma_ia *ia)
467 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
468 rx_ia);
469 struct rpcrdma_ep *ep = &r_xprt->rx_ep;
470 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
471 struct rpcrdma_req *req;
472 struct rpcrdma_rep *rep;
474 cancel_delayed_work_sync(&buf->rb_refresh_worker);
476 /* This is similar to rpcrdma_ep_destroy, but:
477 * - Don't cancel the connect worker.
478 * - Don't call rpcrdma_ep_disconnect, which waits
479 * for another conn upcall, which will deadlock.
480 * - rdma_disconnect is unneeded, the underlying
481 * connection is already gone.
483 if (ia->ri_id->qp) {
484 ib_drain_qp(ia->ri_id->qp);
485 rdma_destroy_qp(ia->ri_id);
486 ia->ri_id->qp = NULL;
488 ib_free_cq(ep->rep_attr.recv_cq);
489 ep->rep_attr.recv_cq = NULL;
490 ib_free_cq(ep->rep_attr.send_cq);
491 ep->rep_attr.send_cq = NULL;
493 /* The ULP is responsible for ensuring all DMA
494 * mappings and MRs are gone.
496 list_for_each_entry(rep, &buf->rb_recv_bufs, rr_list)
497 rpcrdma_dma_unmap_regbuf(rep->rr_rdmabuf);
498 list_for_each_entry(req, &buf->rb_allreqs, rl_all) {
499 rpcrdma_dma_unmap_regbuf(req->rl_rdmabuf);
500 rpcrdma_dma_unmap_regbuf(req->rl_sendbuf);
501 rpcrdma_dma_unmap_regbuf(req->rl_recvbuf);
503 rpcrdma_mrs_destroy(buf);
504 ib_dealloc_pd(ia->ri_pd);
505 ia->ri_pd = NULL;
507 /* Allow waiters to continue */
508 complete(&ia->ri_remove_done);
510 trace_xprtrdma_remove(r_xprt);
514 * rpcrdma_ia_close - Clean up/close an IA.
515 * @ia: interface adapter to close
518 void
519 rpcrdma_ia_close(struct rpcrdma_ia *ia)
521 if (ia->ri_id != NULL && !IS_ERR(ia->ri_id)) {
522 if (ia->ri_id->qp)
523 rdma_destroy_qp(ia->ri_id);
524 rdma_destroy_id(ia->ri_id);
526 ia->ri_id = NULL;
527 ia->ri_device = NULL;
529 /* If the pd is still busy, xprtrdma missed freeing a resource */
530 if (ia->ri_pd && !IS_ERR(ia->ri_pd))
531 ib_dealloc_pd(ia->ri_pd);
532 ia->ri_pd = NULL;
536 * Create unconnected endpoint.
539 rpcrdma_ep_create(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia,
540 struct rpcrdma_create_data_internal *cdata)
542 struct rpcrdma_connect_private *pmsg = &ep->rep_cm_private;
543 struct ib_cq *sendcq, *recvcq;
544 unsigned int max_sge;
545 int rc;
547 max_sge = min_t(unsigned int, ia->ri_device->attrs.max_send_sge,
548 RPCRDMA_MAX_SEND_SGES);
549 if (max_sge < RPCRDMA_MIN_SEND_SGES) {
550 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge);
551 return -ENOMEM;
553 ia->ri_max_send_sges = max_sge;
555 rc = ia->ri_ops->ro_open(ia, ep, cdata);
556 if (rc)
557 return rc;
559 ep->rep_attr.event_handler = rpcrdma_qp_event_handler;
560 ep->rep_attr.qp_context = ep;
561 ep->rep_attr.srq = NULL;
562 ep->rep_attr.cap.max_send_sge = max_sge;
563 ep->rep_attr.cap.max_recv_sge = 1;
564 ep->rep_attr.cap.max_inline_data = 0;
565 ep->rep_attr.sq_sig_type = IB_SIGNAL_REQ_WR;
566 ep->rep_attr.qp_type = IB_QPT_RC;
567 ep->rep_attr.port_num = ~0;
569 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
570 "iovs: send %d recv %d\n",
571 __func__,
572 ep->rep_attr.cap.max_send_wr,
573 ep->rep_attr.cap.max_recv_wr,
574 ep->rep_attr.cap.max_send_sge,
575 ep->rep_attr.cap.max_recv_sge);
577 /* set trigger for requesting send completion */
578 ep->rep_send_batch = min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH,
579 cdata->max_requests >> 2);
580 ep->rep_send_count = ep->rep_send_batch;
581 init_waitqueue_head(&ep->rep_connect_wait);
582 INIT_DELAYED_WORK(&ep->rep_disconnect_worker,
583 rpcrdma_disconnect_worker);
585 sendcq = ib_alloc_cq(ia->ri_device, NULL,
586 ep->rep_attr.cap.max_send_wr + 1,
587 1, IB_POLL_WORKQUEUE);
588 if (IS_ERR(sendcq)) {
589 rc = PTR_ERR(sendcq);
590 dprintk("RPC: %s: failed to create send CQ: %i\n",
591 __func__, rc);
592 goto out1;
595 recvcq = ib_alloc_cq(ia->ri_device, NULL,
596 ep->rep_attr.cap.max_recv_wr + 1,
597 0, IB_POLL_WORKQUEUE);
598 if (IS_ERR(recvcq)) {
599 rc = PTR_ERR(recvcq);
600 dprintk("RPC: %s: failed to create recv CQ: %i\n",
601 __func__, rc);
602 goto out2;
605 ep->rep_attr.send_cq = sendcq;
606 ep->rep_attr.recv_cq = recvcq;
608 /* Initialize cma parameters */
609 memset(&ep->rep_remote_cma, 0, sizeof(ep->rep_remote_cma));
611 /* Prepare RDMA-CM private message */
612 pmsg->cp_magic = rpcrdma_cmp_magic;
613 pmsg->cp_version = RPCRDMA_CMP_VERSION;
614 pmsg->cp_flags |= ia->ri_ops->ro_send_w_inv_ok;
615 pmsg->cp_send_size = rpcrdma_encode_buffer_size(cdata->inline_wsize);
616 pmsg->cp_recv_size = rpcrdma_encode_buffer_size(cdata->inline_rsize);
617 ep->rep_remote_cma.private_data = pmsg;
618 ep->rep_remote_cma.private_data_len = sizeof(*pmsg);
620 /* Client offers RDMA Read but does not initiate */
621 ep->rep_remote_cma.initiator_depth = 0;
622 ep->rep_remote_cma.responder_resources =
623 min_t(int, U8_MAX, ia->ri_device->attrs.max_qp_rd_atom);
625 /* Limit transport retries so client can detect server
626 * GID changes quickly. RPC layer handles re-establishing
627 * transport connection and retransmission.
629 ep->rep_remote_cma.retry_count = 6;
631 /* RPC-over-RDMA handles its own flow control. In addition,
632 * make all RNR NAKs visible so we know that RPC-over-RDMA
633 * flow control is working correctly (no NAKs should be seen).
635 ep->rep_remote_cma.flow_control = 0;
636 ep->rep_remote_cma.rnr_retry_count = 0;
638 return 0;
640 out2:
641 ib_free_cq(sendcq);
642 out1:
643 return rc;
647 * rpcrdma_ep_destroy
649 * Disconnect and destroy endpoint. After this, the only
650 * valid operations on the ep are to free it (if dynamically
651 * allocated) or re-create it.
653 void
654 rpcrdma_ep_destroy(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
656 cancel_delayed_work_sync(&ep->rep_disconnect_worker);
658 if (ia->ri_id && ia->ri_id->qp) {
659 rpcrdma_ep_disconnect(ep, ia);
660 rdma_destroy_qp(ia->ri_id);
661 ia->ri_id->qp = NULL;
664 if (ep->rep_attr.recv_cq)
665 ib_free_cq(ep->rep_attr.recv_cq);
666 if (ep->rep_attr.send_cq)
667 ib_free_cq(ep->rep_attr.send_cq);
670 /* Re-establish a connection after a device removal event.
671 * Unlike a normal reconnection, a fresh PD and a new set
672 * of MRs and buffers is needed.
674 static int
675 rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt *r_xprt,
676 struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
678 int rc, err;
680 trace_xprtrdma_reinsert(r_xprt);
682 rc = -EHOSTUNREACH;
683 if (rpcrdma_ia_open(r_xprt))
684 goto out1;
686 rc = -ENOMEM;
687 err = rpcrdma_ep_create(ep, ia, &r_xprt->rx_data);
688 if (err) {
689 pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err);
690 goto out2;
693 rc = -ENETUNREACH;
694 err = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
695 if (err) {
696 pr_err("rpcrdma: rdma_create_qp returned %d\n", err);
697 goto out3;
700 rpcrdma_mrs_create(r_xprt);
701 return 0;
703 out3:
704 rpcrdma_ep_destroy(ep, ia);
705 out2:
706 rpcrdma_ia_close(ia);
707 out1:
708 return rc;
711 static int
712 rpcrdma_ep_reconnect(struct rpcrdma_xprt *r_xprt, struct rpcrdma_ep *ep,
713 struct rpcrdma_ia *ia)
715 struct rdma_cm_id *id, *old;
716 int err, rc;
718 trace_xprtrdma_reconnect(r_xprt);
720 rpcrdma_ep_disconnect(ep, ia);
722 rc = -EHOSTUNREACH;
723 id = rpcrdma_create_id(r_xprt, ia);
724 if (IS_ERR(id))
725 goto out;
727 /* As long as the new ID points to the same device as the
728 * old ID, we can reuse the transport's existing PD and all
729 * previously allocated MRs. Also, the same device means
730 * the transport's previous DMA mappings are still valid.
732 * This is a sanity check only. There should be no way these
733 * point to two different devices here.
735 old = id;
736 rc = -ENETUNREACH;
737 if (ia->ri_device != id->device) {
738 pr_err("rpcrdma: can't reconnect on different device!\n");
739 goto out_destroy;
742 err = rdma_create_qp(id, ia->ri_pd, &ep->rep_attr);
743 if (err) {
744 dprintk("RPC: %s: rdma_create_qp returned %d\n",
745 __func__, err);
746 goto out_destroy;
749 /* Atomically replace the transport's ID and QP. */
750 rc = 0;
751 old = ia->ri_id;
752 ia->ri_id = id;
753 rdma_destroy_qp(old);
755 out_destroy:
756 rdma_destroy_id(old);
757 out:
758 return rc;
762 * Connect unconnected endpoint.
765 rpcrdma_ep_connect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
767 struct rpcrdma_xprt *r_xprt = container_of(ia, struct rpcrdma_xprt,
768 rx_ia);
769 struct rpc_xprt *xprt = &r_xprt->rx_xprt;
770 int rc;
772 retry:
773 switch (ep->rep_connected) {
774 case 0:
775 dprintk("RPC: %s: connecting...\n", __func__);
776 rc = rdma_create_qp(ia->ri_id, ia->ri_pd, &ep->rep_attr);
777 if (rc) {
778 dprintk("RPC: %s: rdma_create_qp failed %i\n",
779 __func__, rc);
780 rc = -ENETUNREACH;
781 goto out_noupdate;
783 break;
784 case -ENODEV:
785 rc = rpcrdma_ep_recreate_xprt(r_xprt, ep, ia);
786 if (rc)
787 goto out_noupdate;
788 break;
789 default:
790 rc = rpcrdma_ep_reconnect(r_xprt, ep, ia);
791 if (rc)
792 goto out;
795 ep->rep_connected = 0;
796 xprt_clear_connected(xprt);
798 rpcrdma_post_recvs(r_xprt, true);
800 rc = rdma_connect(ia->ri_id, &ep->rep_remote_cma);
801 if (rc) {
802 dprintk("RPC: %s: rdma_connect() failed with %i\n",
803 __func__, rc);
804 goto out;
807 wait_event_interruptible(ep->rep_connect_wait, ep->rep_connected != 0);
808 if (ep->rep_connected <= 0) {
809 if (ep->rep_connected == -EAGAIN)
810 goto retry;
811 rc = ep->rep_connected;
812 goto out;
815 dprintk("RPC: %s: connected\n", __func__);
817 out:
818 if (rc)
819 ep->rep_connected = rc;
821 out_noupdate:
822 return rc;
826 * rpcrdma_ep_disconnect
828 * This is separate from destroy to facilitate the ability
829 * to reconnect without recreating the endpoint.
831 * This call is not reentrant, and must not be made in parallel
832 * on the same endpoint.
834 void
835 rpcrdma_ep_disconnect(struct rpcrdma_ep *ep, struct rpcrdma_ia *ia)
837 int rc;
839 rc = rdma_disconnect(ia->ri_id);
840 if (!rc)
841 /* returns without wait if not connected */
842 wait_event_interruptible(ep->rep_connect_wait,
843 ep->rep_connected != 1);
844 else
845 ep->rep_connected = rc;
846 trace_xprtrdma_disconnect(container_of(ep, struct rpcrdma_xprt,
847 rx_ep), rc);
849 ib_drain_qp(ia->ri_id->qp);
852 /* Fixed-size circular FIFO queue. This implementation is wait-free and
853 * lock-free.
855 * Consumer is the code path that posts Sends. This path dequeues a
856 * sendctx for use by a Send operation. Multiple consumer threads
857 * are serialized by the RPC transport lock, which allows only one
858 * ->send_request call at a time.
860 * Producer is the code path that handles Send completions. This path
861 * enqueues a sendctx that has been completed. Multiple producer
862 * threads are serialized by the ib_poll_cq() function.
865 /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
866 * queue activity, and ib_drain_qp has flushed all remaining Send
867 * requests.
869 static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer *buf)
871 unsigned long i;
873 for (i = 0; i <= buf->rb_sc_last; i++)
874 kfree(buf->rb_sc_ctxs[i]);
875 kfree(buf->rb_sc_ctxs);
878 static struct rpcrdma_sendctx *rpcrdma_sendctx_create(struct rpcrdma_ia *ia)
880 struct rpcrdma_sendctx *sc;
882 sc = kzalloc(sizeof(*sc) +
883 ia->ri_max_send_sges * sizeof(struct ib_sge),
884 GFP_KERNEL);
885 if (!sc)
886 return NULL;
888 sc->sc_wr.wr_cqe = &sc->sc_cqe;
889 sc->sc_wr.sg_list = sc->sc_sges;
890 sc->sc_wr.opcode = IB_WR_SEND;
891 sc->sc_cqe.done = rpcrdma_wc_send;
892 return sc;
895 static int rpcrdma_sendctxs_create(struct rpcrdma_xprt *r_xprt)
897 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
898 struct rpcrdma_sendctx *sc;
899 unsigned long i;
901 /* Maximum number of concurrent outstanding Send WRs. Capping
902 * the circular queue size stops Send Queue overflow by causing
903 * the ->send_request call to fail temporarily before too many
904 * Sends are posted.
906 i = buf->rb_max_requests + RPCRDMA_MAX_BC_REQUESTS;
907 dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__, i);
908 buf->rb_sc_ctxs = kcalloc(i, sizeof(sc), GFP_KERNEL);
909 if (!buf->rb_sc_ctxs)
910 return -ENOMEM;
912 buf->rb_sc_last = i - 1;
913 for (i = 0; i <= buf->rb_sc_last; i++) {
914 sc = rpcrdma_sendctx_create(&r_xprt->rx_ia);
915 if (!sc)
916 goto out_destroy;
918 sc->sc_xprt = r_xprt;
919 buf->rb_sc_ctxs[i] = sc;
922 return 0;
924 out_destroy:
925 rpcrdma_sendctxs_destroy(buf);
926 return -ENOMEM;
929 /* The sendctx queue is not guaranteed to have a size that is a
930 * power of two, thus the helpers in circ_buf.h cannot be used.
931 * The other option is to use modulus (%), which can be expensive.
933 static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer *buf,
934 unsigned long item)
936 return likely(item < buf->rb_sc_last) ? item + 1 : 0;
940 * rpcrdma_sendctx_get_locked - Acquire a send context
941 * @buf: transport buffers from which to acquire an unused context
943 * Returns pointer to a free send completion context; or NULL if
944 * the queue is empty.
946 * Usage: Called to acquire an SGE array before preparing a Send WR.
948 * The caller serializes calls to this function (per rpcrdma_buffer),
949 * and provides an effective memory barrier that flushes the new value
950 * of rb_sc_head.
952 struct rpcrdma_sendctx *rpcrdma_sendctx_get_locked(struct rpcrdma_buffer *buf)
954 struct rpcrdma_xprt *r_xprt;
955 struct rpcrdma_sendctx *sc;
956 unsigned long next_head;
958 next_head = rpcrdma_sendctx_next(buf, buf->rb_sc_head);
960 if (next_head == READ_ONCE(buf->rb_sc_tail))
961 goto out_emptyq;
963 /* ORDER: item must be accessed _before_ head is updated */
964 sc = buf->rb_sc_ctxs[next_head];
966 /* Releasing the lock in the caller acts as a memory
967 * barrier that flushes rb_sc_head.
969 buf->rb_sc_head = next_head;
971 return sc;
973 out_emptyq:
974 /* The queue is "empty" if there have not been enough Send
975 * completions recently. This is a sign the Send Queue is
976 * backing up. Cause the caller to pause and try again.
978 set_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags);
979 r_xprt = container_of(buf, struct rpcrdma_xprt, rx_buf);
980 r_xprt->rx_stats.empty_sendctx_q++;
981 return NULL;
985 * rpcrdma_sendctx_put_locked - Release a send context
986 * @sc: send context to release
988 * Usage: Called from Send completion to return a sendctxt
989 * to the queue.
991 * The caller serializes calls to this function (per rpcrdma_buffer).
993 static void
994 rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx *sc)
996 struct rpcrdma_buffer *buf = &sc->sc_xprt->rx_buf;
997 unsigned long next_tail;
999 /* Unmap SGEs of previously completed by unsignaled
1000 * Sends by walking up the queue until @sc is found.
1002 next_tail = buf->rb_sc_tail;
1003 do {
1004 next_tail = rpcrdma_sendctx_next(buf, next_tail);
1006 /* ORDER: item must be accessed _before_ tail is updated */
1007 rpcrdma_unmap_sendctx(buf->rb_sc_ctxs[next_tail]);
1009 } while (buf->rb_sc_ctxs[next_tail] != sc);
1011 /* Paired with READ_ONCE */
1012 smp_store_release(&buf->rb_sc_tail, next_tail);
1014 if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ, &buf->rb_flags)) {
1015 smp_mb__after_atomic();
1016 xprt_write_space(&sc->sc_xprt->rx_xprt);
1020 static void
1021 rpcrdma_mrs_create(struct rpcrdma_xprt *r_xprt)
1023 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1024 struct rpcrdma_ia *ia = &r_xprt->rx_ia;
1025 unsigned int count;
1026 LIST_HEAD(free);
1027 LIST_HEAD(all);
1029 for (count = 0; count < ia->ri_max_segs; count++) {
1030 struct rpcrdma_mr *mr;
1031 int rc;
1033 mr = kzalloc(sizeof(*mr), GFP_KERNEL);
1034 if (!mr)
1035 break;
1037 rc = ia->ri_ops->ro_init_mr(ia, mr);
1038 if (rc) {
1039 kfree(mr);
1040 break;
1043 mr->mr_xprt = r_xprt;
1045 list_add(&mr->mr_list, &free);
1046 list_add(&mr->mr_all, &all);
1049 spin_lock(&buf->rb_mrlock);
1050 list_splice(&free, &buf->rb_mrs);
1051 list_splice(&all, &buf->rb_all);
1052 r_xprt->rx_stats.mrs_allocated += count;
1053 spin_unlock(&buf->rb_mrlock);
1054 trace_xprtrdma_createmrs(r_xprt, count);
1056 xprt_write_space(&r_xprt->rx_xprt);
1059 static void
1060 rpcrdma_mr_refresh_worker(struct work_struct *work)
1062 struct rpcrdma_buffer *buf = container_of(work, struct rpcrdma_buffer,
1063 rb_refresh_worker.work);
1064 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
1065 rx_buf);
1067 rpcrdma_mrs_create(r_xprt);
1070 struct rpcrdma_req *
1071 rpcrdma_create_req(struct rpcrdma_xprt *r_xprt)
1073 struct rpcrdma_buffer *buffer = &r_xprt->rx_buf;
1074 struct rpcrdma_regbuf *rb;
1075 struct rpcrdma_req *req;
1077 req = kzalloc(sizeof(*req), GFP_KERNEL);
1078 if (req == NULL)
1079 return ERR_PTR(-ENOMEM);
1081 rb = rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE,
1082 DMA_TO_DEVICE, GFP_KERNEL);
1083 if (IS_ERR(rb)) {
1084 kfree(req);
1085 return ERR_PTR(-ENOMEM);
1087 req->rl_rdmabuf = rb;
1088 xdr_buf_init(&req->rl_hdrbuf, rb->rg_base, rdmab_length(rb));
1089 req->rl_buffer = buffer;
1090 INIT_LIST_HEAD(&req->rl_registered);
1092 spin_lock(&buffer->rb_reqslock);
1093 list_add(&req->rl_all, &buffer->rb_allreqs);
1094 spin_unlock(&buffer->rb_reqslock);
1095 return req;
1098 static int
1099 rpcrdma_create_rep(struct rpcrdma_xprt *r_xprt, bool temp)
1101 struct rpcrdma_create_data_internal *cdata = &r_xprt->rx_data;
1102 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1103 struct rpcrdma_rep *rep;
1104 int rc;
1106 rc = -ENOMEM;
1107 rep = kzalloc(sizeof(*rep), GFP_KERNEL);
1108 if (rep == NULL)
1109 goto out;
1111 rep->rr_rdmabuf = rpcrdma_alloc_regbuf(cdata->inline_rsize,
1112 DMA_FROM_DEVICE, GFP_KERNEL);
1113 if (IS_ERR(rep->rr_rdmabuf)) {
1114 rc = PTR_ERR(rep->rr_rdmabuf);
1115 goto out_free;
1117 xdr_buf_init(&rep->rr_hdrbuf, rep->rr_rdmabuf->rg_base,
1118 rdmab_length(rep->rr_rdmabuf));
1120 rep->rr_cqe.done = rpcrdma_wc_receive;
1121 rep->rr_rxprt = r_xprt;
1122 INIT_WORK(&rep->rr_work, rpcrdma_deferred_completion);
1123 rep->rr_recv_wr.next = NULL;
1124 rep->rr_recv_wr.wr_cqe = &rep->rr_cqe;
1125 rep->rr_recv_wr.sg_list = &rep->rr_rdmabuf->rg_iov;
1126 rep->rr_recv_wr.num_sge = 1;
1127 rep->rr_temp = temp;
1129 spin_lock(&buf->rb_lock);
1130 list_add(&rep->rr_list, &buf->rb_recv_bufs);
1131 spin_unlock(&buf->rb_lock);
1132 return 0;
1134 out_free:
1135 kfree(rep);
1136 out:
1137 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1138 __func__, rc);
1139 return rc;
1143 rpcrdma_buffer_create(struct rpcrdma_xprt *r_xprt)
1145 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1146 int i, rc;
1148 buf->rb_flags = 0;
1149 buf->rb_max_requests = r_xprt->rx_data.max_requests;
1150 buf->rb_bc_srv_max_requests = 0;
1151 spin_lock_init(&buf->rb_mrlock);
1152 spin_lock_init(&buf->rb_lock);
1153 INIT_LIST_HEAD(&buf->rb_mrs);
1154 INIT_LIST_HEAD(&buf->rb_all);
1155 INIT_DELAYED_WORK(&buf->rb_refresh_worker,
1156 rpcrdma_mr_refresh_worker);
1158 rpcrdma_mrs_create(r_xprt);
1160 INIT_LIST_HEAD(&buf->rb_send_bufs);
1161 INIT_LIST_HEAD(&buf->rb_allreqs);
1162 spin_lock_init(&buf->rb_reqslock);
1163 for (i = 0; i < buf->rb_max_requests; i++) {
1164 struct rpcrdma_req *req;
1166 req = rpcrdma_create_req(r_xprt);
1167 if (IS_ERR(req)) {
1168 dprintk("RPC: %s: request buffer %d alloc"
1169 " failed\n", __func__, i);
1170 rc = PTR_ERR(req);
1171 goto out;
1173 list_add(&req->rl_list, &buf->rb_send_bufs);
1176 buf->rb_credits = 1;
1177 buf->rb_posted_receives = 0;
1178 INIT_LIST_HEAD(&buf->rb_recv_bufs);
1180 rc = rpcrdma_sendctxs_create(r_xprt);
1181 if (rc)
1182 goto out;
1184 return 0;
1185 out:
1186 rpcrdma_buffer_destroy(buf);
1187 return rc;
1190 static void
1191 rpcrdma_destroy_rep(struct rpcrdma_rep *rep)
1193 rpcrdma_free_regbuf(rep->rr_rdmabuf);
1194 kfree(rep);
1197 void
1198 rpcrdma_destroy_req(struct rpcrdma_req *req)
1200 rpcrdma_free_regbuf(req->rl_recvbuf);
1201 rpcrdma_free_regbuf(req->rl_sendbuf);
1202 rpcrdma_free_regbuf(req->rl_rdmabuf);
1203 kfree(req);
1206 static void
1207 rpcrdma_mrs_destroy(struct rpcrdma_buffer *buf)
1209 struct rpcrdma_xprt *r_xprt = container_of(buf, struct rpcrdma_xprt,
1210 rx_buf);
1211 struct rpcrdma_ia *ia = rdmab_to_ia(buf);
1212 struct rpcrdma_mr *mr;
1213 unsigned int count;
1215 count = 0;
1216 spin_lock(&buf->rb_mrlock);
1217 while (!list_empty(&buf->rb_all)) {
1218 mr = list_entry(buf->rb_all.next, struct rpcrdma_mr, mr_all);
1219 list_del(&mr->mr_all);
1221 spin_unlock(&buf->rb_mrlock);
1223 /* Ensure MW is not on any rl_registered list */
1224 if (!list_empty(&mr->mr_list))
1225 list_del(&mr->mr_list);
1227 ia->ri_ops->ro_release_mr(mr);
1228 count++;
1229 spin_lock(&buf->rb_mrlock);
1231 spin_unlock(&buf->rb_mrlock);
1232 r_xprt->rx_stats.mrs_allocated = 0;
1234 dprintk("RPC: %s: released %u MRs\n", __func__, count);
1237 void
1238 rpcrdma_buffer_destroy(struct rpcrdma_buffer *buf)
1240 cancel_delayed_work_sync(&buf->rb_refresh_worker);
1242 rpcrdma_sendctxs_destroy(buf);
1244 while (!list_empty(&buf->rb_recv_bufs)) {
1245 struct rpcrdma_rep *rep;
1247 rep = list_first_entry(&buf->rb_recv_bufs,
1248 struct rpcrdma_rep, rr_list);
1249 list_del(&rep->rr_list);
1250 rpcrdma_destroy_rep(rep);
1253 spin_lock(&buf->rb_reqslock);
1254 while (!list_empty(&buf->rb_allreqs)) {
1255 struct rpcrdma_req *req;
1257 req = list_first_entry(&buf->rb_allreqs,
1258 struct rpcrdma_req, rl_all);
1259 list_del(&req->rl_all);
1261 spin_unlock(&buf->rb_reqslock);
1262 rpcrdma_destroy_req(req);
1263 spin_lock(&buf->rb_reqslock);
1265 spin_unlock(&buf->rb_reqslock);
1267 rpcrdma_mrs_destroy(buf);
1271 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1272 * @r_xprt: controlling transport
1274 * Returns an initialized rpcrdma_mr or NULL if no free
1275 * rpcrdma_mr objects are available.
1277 struct rpcrdma_mr *
1278 rpcrdma_mr_get(struct rpcrdma_xprt *r_xprt)
1280 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1281 struct rpcrdma_mr *mr = NULL;
1283 spin_lock(&buf->rb_mrlock);
1284 if (!list_empty(&buf->rb_mrs))
1285 mr = rpcrdma_mr_pop(&buf->rb_mrs);
1286 spin_unlock(&buf->rb_mrlock);
1288 if (!mr)
1289 goto out_nomrs;
1290 return mr;
1292 out_nomrs:
1293 trace_xprtrdma_nomrs(r_xprt);
1294 if (r_xprt->rx_ep.rep_connected != -ENODEV)
1295 schedule_delayed_work(&buf->rb_refresh_worker, 0);
1297 /* Allow the reply handler and refresh worker to run */
1298 cond_resched();
1300 return NULL;
1303 static void
1304 __rpcrdma_mr_put(struct rpcrdma_buffer *buf, struct rpcrdma_mr *mr)
1306 spin_lock(&buf->rb_mrlock);
1307 rpcrdma_mr_push(mr, &buf->rb_mrs);
1308 spin_unlock(&buf->rb_mrlock);
1312 * rpcrdma_mr_put - Release an rpcrdma_mr object
1313 * @mr: object to release
1316 void
1317 rpcrdma_mr_put(struct rpcrdma_mr *mr)
1319 __rpcrdma_mr_put(&mr->mr_xprt->rx_buf, mr);
1323 * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
1324 * @mr: object to release
1327 void
1328 rpcrdma_mr_unmap_and_put(struct rpcrdma_mr *mr)
1330 struct rpcrdma_xprt *r_xprt = mr->mr_xprt;
1332 trace_xprtrdma_mr_unmap(mr);
1333 ib_dma_unmap_sg(r_xprt->rx_ia.ri_device,
1334 mr->mr_sg, mr->mr_nents, mr->mr_dir);
1335 __rpcrdma_mr_put(&r_xprt->rx_buf, mr);
1339 * rpcrdma_buffer_get - Get a request buffer
1340 * @buffers: Buffer pool from which to obtain a buffer
1342 * Returns a fresh rpcrdma_req, or NULL if none are available.
1344 struct rpcrdma_req *
1345 rpcrdma_buffer_get(struct rpcrdma_buffer *buffers)
1347 struct rpcrdma_req *req;
1349 spin_lock(&buffers->rb_lock);
1350 req = list_first_entry_or_null(&buffers->rb_send_bufs,
1351 struct rpcrdma_req, rl_list);
1352 if (req)
1353 list_del_init(&req->rl_list);
1354 spin_unlock(&buffers->rb_lock);
1355 return req;
1359 * rpcrdma_buffer_put - Put request/reply buffers back into pool
1360 * @req: object to return
1363 void
1364 rpcrdma_buffer_put(struct rpcrdma_req *req)
1366 struct rpcrdma_buffer *buffers = req->rl_buffer;
1367 struct rpcrdma_rep *rep = req->rl_reply;
1369 req->rl_reply = NULL;
1371 spin_lock(&buffers->rb_lock);
1372 list_add(&req->rl_list, &buffers->rb_send_bufs);
1373 if (rep) {
1374 if (!rep->rr_temp) {
1375 list_add(&rep->rr_list, &buffers->rb_recv_bufs);
1376 rep = NULL;
1379 spin_unlock(&buffers->rb_lock);
1380 if (rep)
1381 rpcrdma_destroy_rep(rep);
1385 * Put reply buffers back into pool when not attached to
1386 * request. This happens in error conditions.
1388 void
1389 rpcrdma_recv_buffer_put(struct rpcrdma_rep *rep)
1391 struct rpcrdma_buffer *buffers = &rep->rr_rxprt->rx_buf;
1393 if (!rep->rr_temp) {
1394 spin_lock(&buffers->rb_lock);
1395 list_add(&rep->rr_list, &buffers->rb_recv_bufs);
1396 spin_unlock(&buffers->rb_lock);
1397 } else {
1398 rpcrdma_destroy_rep(rep);
1403 * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers
1404 * @size: size of buffer to be allocated, in bytes
1405 * @direction: direction of data movement
1406 * @flags: GFP flags
1408 * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that
1409 * can be persistently DMA-mapped for I/O.
1411 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1412 * receiving the payload of RDMA RECV operations. During Long Calls
1413 * or Replies they may be registered externally via ro_map.
1415 struct rpcrdma_regbuf *
1416 rpcrdma_alloc_regbuf(size_t size, enum dma_data_direction direction,
1417 gfp_t flags)
1419 struct rpcrdma_regbuf *rb;
1421 rb = kmalloc(sizeof(*rb) + size, flags);
1422 if (rb == NULL)
1423 return ERR_PTR(-ENOMEM);
1425 rb->rg_device = NULL;
1426 rb->rg_direction = direction;
1427 rb->rg_iov.length = size;
1429 return rb;
1433 * __rpcrdma_map_regbuf - DMA-map a regbuf
1434 * @ia: controlling rpcrdma_ia
1435 * @rb: regbuf to be mapped
1437 bool
1438 __rpcrdma_dma_map_regbuf(struct rpcrdma_ia *ia, struct rpcrdma_regbuf *rb)
1440 struct ib_device *device = ia->ri_device;
1442 if (rb->rg_direction == DMA_NONE)
1443 return false;
1445 rb->rg_iov.addr = ib_dma_map_single(device,
1446 (void *)rb->rg_base,
1447 rdmab_length(rb),
1448 rb->rg_direction);
1449 if (ib_dma_mapping_error(device, rdmab_addr(rb)))
1450 return false;
1452 rb->rg_device = device;
1453 rb->rg_iov.lkey = ia->ri_pd->local_dma_lkey;
1454 return true;
1457 static void
1458 rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf *rb)
1460 if (!rb)
1461 return;
1463 if (!rpcrdma_regbuf_is_mapped(rb))
1464 return;
1466 ib_dma_unmap_single(rb->rg_device, rdmab_addr(rb),
1467 rdmab_length(rb), rb->rg_direction);
1468 rb->rg_device = NULL;
1472 * rpcrdma_free_regbuf - deregister and free registered buffer
1473 * @rb: regbuf to be deregistered and freed
1475 void
1476 rpcrdma_free_regbuf(struct rpcrdma_regbuf *rb)
1478 rpcrdma_dma_unmap_regbuf(rb);
1479 kfree(rb);
1483 * Prepost any receive buffer, then post send.
1485 * Receive buffer is donated to hardware, reclaimed upon recv completion.
1488 rpcrdma_ep_post(struct rpcrdma_ia *ia,
1489 struct rpcrdma_ep *ep,
1490 struct rpcrdma_req *req)
1492 struct ib_send_wr *send_wr = &req->rl_sendctx->sc_wr;
1493 int rc;
1495 if (!ep->rep_send_count ||
1496 test_bit(RPCRDMA_REQ_F_TX_RESOURCES, &req->rl_flags)) {
1497 send_wr->send_flags |= IB_SEND_SIGNALED;
1498 ep->rep_send_count = ep->rep_send_batch;
1499 } else {
1500 send_wr->send_flags &= ~IB_SEND_SIGNALED;
1501 --ep->rep_send_count;
1504 rc = ia->ri_ops->ro_send(ia, req);
1505 trace_xprtrdma_post_send(req, rc);
1506 if (rc)
1507 return -ENOTCONN;
1508 return 0;
1512 * rpcrdma_post_recvs - Maybe post some Receive buffers
1513 * @r_xprt: controlling transport
1514 * @temp: when true, allocate temp rpcrdma_rep objects
1517 void
1518 rpcrdma_post_recvs(struct rpcrdma_xprt *r_xprt, bool temp)
1520 struct rpcrdma_buffer *buf = &r_xprt->rx_buf;
1521 struct ib_recv_wr *wr, *bad_wr;
1522 int needed, count, rc;
1524 rc = 0;
1525 count = 0;
1526 needed = buf->rb_credits + (buf->rb_bc_srv_max_requests << 1);
1527 if (buf->rb_posted_receives > needed)
1528 goto out;
1529 needed -= buf->rb_posted_receives;
1531 count = 0;
1532 wr = NULL;
1533 while (needed) {
1534 struct rpcrdma_regbuf *rb;
1535 struct rpcrdma_rep *rep;
1537 spin_lock(&buf->rb_lock);
1538 rep = list_first_entry_or_null(&buf->rb_recv_bufs,
1539 struct rpcrdma_rep, rr_list);
1540 if (likely(rep))
1541 list_del(&rep->rr_list);
1542 spin_unlock(&buf->rb_lock);
1543 if (!rep) {
1544 if (rpcrdma_create_rep(r_xprt, temp))
1545 break;
1546 continue;
1549 rb = rep->rr_rdmabuf;
1550 if (!rpcrdma_regbuf_is_mapped(rb)) {
1551 if (!__rpcrdma_dma_map_regbuf(&r_xprt->rx_ia, rb)) {
1552 rpcrdma_recv_buffer_put(rep);
1553 break;
1557 trace_xprtrdma_post_recv(rep->rr_recv_wr.wr_cqe);
1558 rep->rr_recv_wr.next = wr;
1559 wr = &rep->rr_recv_wr;
1560 ++count;
1561 --needed;
1563 if (!count)
1564 goto out;
1566 rc = ib_post_recv(r_xprt->rx_ia.ri_id->qp, wr,
1567 (const struct ib_recv_wr **)&bad_wr);
1568 if (rc) {
1569 for (wr = bad_wr; wr; wr = wr->next) {
1570 struct rpcrdma_rep *rep;
1572 rep = container_of(wr, struct rpcrdma_rep, rr_recv_wr);
1573 rpcrdma_recv_buffer_put(rep);
1574 --count;
1577 buf->rb_posted_receives += count;
1578 out:
1579 trace_xprtrdma_post_recvs(r_xprt, count, rc);