1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014-2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the BSD-type
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
19 * Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials provided
22 * with the distribution.
24 * Neither the name of the Network Appliance, Inc. nor the names of
25 * its contributors may be used to endorse or promote products
26 * derived from this software without specific prior written
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
45 * Encapsulates the major functions managing:
52 #include <linux/interrupt.h>
53 #include <linux/slab.h>
54 #include <linux/sunrpc/addr.h>
55 #include <linux/sunrpc/svc_rdma.h>
57 #include <asm-generic/barrier.h>
58 #include <asm/bitops.h>
60 #include <rdma/ib_cm.h>
62 #include "xprt_rdma.h"
63 #include <trace/events/rpcrdma.h>
69 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
70 # define RPCDBG_FACILITY RPCDBG_TRANS
76 static void rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx
*sc
);
77 static void rpcrdma_mrs_create(struct rpcrdma_xprt
*r_xprt
);
78 static void rpcrdma_mrs_destroy(struct rpcrdma_buffer
*buf
);
79 static int rpcrdma_create_rep(struct rpcrdma_xprt
*r_xprt
, bool temp
);
80 static void rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf
*rb
);
82 struct workqueue_struct
*rpcrdma_receive_wq __read_mostly
;
85 rpcrdma_alloc_wq(void)
87 struct workqueue_struct
*recv_wq
;
89 recv_wq
= alloc_workqueue("xprtrdma_receive",
90 WQ_MEM_RECLAIM
| WQ_HIGHPRI
,
95 rpcrdma_receive_wq
= recv_wq
;
100 rpcrdma_destroy_wq(void)
102 struct workqueue_struct
*wq
;
104 if (rpcrdma_receive_wq
) {
105 wq
= rpcrdma_receive_wq
;
106 rpcrdma_receive_wq
= NULL
;
107 destroy_workqueue(wq
);
112 rpcrdma_qp_async_error_upcall(struct ib_event
*event
, void *context
)
114 struct rpcrdma_ep
*ep
= context
;
115 struct rpcrdma_xprt
*r_xprt
= container_of(ep
, struct rpcrdma_xprt
,
118 trace_xprtrdma_qp_error(r_xprt
, event
);
119 pr_err("rpcrdma: %s on device %s ep %p\n",
120 ib_event_msg(event
->event
), event
->device
->name
, context
);
122 if (ep
->rep_connected
== 1) {
123 ep
->rep_connected
= -EIO
;
124 rpcrdma_conn_func(ep
);
125 wake_up_all(&ep
->rep_connect_wait
);
130 * rpcrdma_wc_send - Invoked by RDMA provider for each polled Send WC
131 * @cq: completion queue (ignored)
136 rpcrdma_wc_send(struct ib_cq
*cq
, struct ib_wc
*wc
)
138 struct ib_cqe
*cqe
= wc
->wr_cqe
;
139 struct rpcrdma_sendctx
*sc
=
140 container_of(cqe
, struct rpcrdma_sendctx
, sc_cqe
);
142 /* WARNING: Only wr_cqe and status are reliable at this point */
143 trace_xprtrdma_wc_send(sc
, wc
);
144 if (wc
->status
!= IB_WC_SUCCESS
&& wc
->status
!= IB_WC_WR_FLUSH_ERR
)
145 pr_err("rpcrdma: Send: %s (%u/0x%x)\n",
146 ib_wc_status_msg(wc
->status
),
147 wc
->status
, wc
->vendor_err
);
149 rpcrdma_sendctx_put_locked(sc
);
153 * rpcrdma_wc_receive - Invoked by RDMA provider for each polled Receive WC
154 * @cq: completion queue (ignored)
159 rpcrdma_wc_receive(struct ib_cq
*cq
, struct ib_wc
*wc
)
161 struct ib_cqe
*cqe
= wc
->wr_cqe
;
162 struct rpcrdma_rep
*rep
= container_of(cqe
, struct rpcrdma_rep
,
165 /* WARNING: Only wr_id and status are reliable at this point */
166 trace_xprtrdma_wc_receive(wc
);
167 if (wc
->status
!= IB_WC_SUCCESS
)
170 /* status == SUCCESS means all fields in wc are trustworthy */
171 rpcrdma_set_xdrlen(&rep
->rr_hdrbuf
, wc
->byte_len
);
172 rep
->rr_wc_flags
= wc
->wc_flags
;
173 rep
->rr_inv_rkey
= wc
->ex
.invalidate_rkey
;
175 ib_dma_sync_single_for_cpu(rdmab_device(rep
->rr_rdmabuf
),
176 rdmab_addr(rep
->rr_rdmabuf
),
177 wc
->byte_len
, DMA_FROM_DEVICE
);
180 rpcrdma_reply_handler(rep
);
184 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
185 pr_err("rpcrdma: Recv: %s (%u/0x%x)\n",
186 ib_wc_status_msg(wc
->status
),
187 wc
->status
, wc
->vendor_err
);
188 rpcrdma_set_xdrlen(&rep
->rr_hdrbuf
, 0);
193 rpcrdma_update_connect_private(struct rpcrdma_xprt
*r_xprt
,
194 struct rdma_conn_param
*param
)
196 struct rpcrdma_create_data_internal
*cdata
= &r_xprt
->rx_data
;
197 const struct rpcrdma_connect_private
*pmsg
= param
->private_data
;
198 unsigned int rsize
, wsize
;
200 /* Default settings for RPC-over-RDMA Version One */
201 r_xprt
->rx_ia
.ri_implicit_roundup
= xprt_rdma_pad_optimize
;
202 rsize
= RPCRDMA_V1_DEF_INLINE_SIZE
;
203 wsize
= RPCRDMA_V1_DEF_INLINE_SIZE
;
206 pmsg
->cp_magic
== rpcrdma_cmp_magic
&&
207 pmsg
->cp_version
== RPCRDMA_CMP_VERSION
) {
208 r_xprt
->rx_ia
.ri_implicit_roundup
= true;
209 rsize
= rpcrdma_decode_buffer_size(pmsg
->cp_send_size
);
210 wsize
= rpcrdma_decode_buffer_size(pmsg
->cp_recv_size
);
213 if (rsize
< cdata
->inline_rsize
)
214 cdata
->inline_rsize
= rsize
;
215 if (wsize
< cdata
->inline_wsize
)
216 cdata
->inline_wsize
= wsize
;
217 dprintk("RPC: %s: max send %u, max recv %u\n",
218 __func__
, cdata
->inline_wsize
, cdata
->inline_rsize
);
219 rpcrdma_set_max_header_sizes(r_xprt
);
223 rpcrdma_conn_upcall(struct rdma_cm_id
*id
, struct rdma_cm_event
*event
)
225 struct rpcrdma_xprt
*xprt
= id
->context
;
226 struct rpcrdma_ia
*ia
= &xprt
->rx_ia
;
227 struct rpcrdma_ep
*ep
= &xprt
->rx_ep
;
230 trace_xprtrdma_conn_upcall(xprt
, event
);
231 switch (event
->event
) {
232 case RDMA_CM_EVENT_ADDR_RESOLVED
:
233 case RDMA_CM_EVENT_ROUTE_RESOLVED
:
235 complete(&ia
->ri_done
);
237 case RDMA_CM_EVENT_ADDR_ERROR
:
238 ia
->ri_async_rc
= -EPROTO
;
239 complete(&ia
->ri_done
);
241 case RDMA_CM_EVENT_ROUTE_ERROR
:
242 ia
->ri_async_rc
= -ENETUNREACH
;
243 complete(&ia
->ri_done
);
245 case RDMA_CM_EVENT_DEVICE_REMOVAL
:
246 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
247 pr_info("rpcrdma: removing device %s for %s:%s\n",
249 rpcrdma_addrstr(xprt
), rpcrdma_portstr(xprt
));
251 init_completion(&ia
->ri_remove_done
);
252 set_bit(RPCRDMA_IAF_REMOVING
, &ia
->ri_flags
);
253 ep
->rep_connected
= -ENODEV
;
254 xprt_force_disconnect(&xprt
->rx_xprt
);
255 wait_for_completion(&ia
->ri_remove_done
);
258 ia
->ri_device
= NULL
;
259 /* Return 1 to ensure the core destroys the id. */
261 case RDMA_CM_EVENT_ESTABLISHED
:
262 ++xprt
->rx_xprt
.connect_cookie
;
264 rpcrdma_update_connect_private(xprt
, &event
->param
.conn
);
266 case RDMA_CM_EVENT_CONNECT_ERROR
:
267 connstate
= -ENOTCONN
;
269 case RDMA_CM_EVENT_UNREACHABLE
:
270 connstate
= -ENETUNREACH
;
272 case RDMA_CM_EVENT_REJECTED
:
273 dprintk("rpcrdma: connection to %s:%s rejected: %s\n",
274 rpcrdma_addrstr(xprt
), rpcrdma_portstr(xprt
),
275 rdma_reject_msg(id
, event
->status
));
276 connstate
= -ECONNREFUSED
;
277 if (event
->status
== IB_CM_REJ_STALE_CONN
)
280 case RDMA_CM_EVENT_DISCONNECTED
:
281 ++xprt
->rx_xprt
.connect_cookie
;
282 connstate
= -ECONNABORTED
;
284 ep
->rep_connected
= connstate
;
285 rpcrdma_conn_func(ep
);
286 wake_up_all(&ep
->rep_connect_wait
);
289 dprintk("RPC: %s: %s:%s on %s/%s (ep 0x%p): %s\n",
291 rpcrdma_addrstr(xprt
), rpcrdma_portstr(xprt
),
292 ia
->ri_device
->name
, ia
->ri_ops
->ro_displayname
,
293 ep
, rdma_event_msg(event
->event
));
300 static struct rdma_cm_id
*
301 rpcrdma_create_id(struct rpcrdma_xprt
*xprt
, struct rpcrdma_ia
*ia
)
303 unsigned long wtimeout
= msecs_to_jiffies(RDMA_RESOLVE_TIMEOUT
) + 1;
304 struct rdma_cm_id
*id
;
307 trace_xprtrdma_conn_start(xprt
);
309 init_completion(&ia
->ri_done
);
311 id
= rdma_create_id(xprt
->rx_xprt
.xprt_net
, rpcrdma_conn_upcall
,
312 xprt
, RDMA_PS_TCP
, IB_QPT_RC
);
315 dprintk("RPC: %s: rdma_create_id() failed %i\n",
320 ia
->ri_async_rc
= -ETIMEDOUT
;
321 rc
= rdma_resolve_addr(id
, NULL
,
322 (struct sockaddr
*)&xprt
->rx_xprt
.addr
,
323 RDMA_RESOLVE_TIMEOUT
);
325 dprintk("RPC: %s: rdma_resolve_addr() failed %i\n",
329 rc
= wait_for_completion_interruptible_timeout(&ia
->ri_done
, wtimeout
);
331 trace_xprtrdma_conn_tout(xprt
);
335 rc
= ia
->ri_async_rc
;
339 ia
->ri_async_rc
= -ETIMEDOUT
;
340 rc
= rdma_resolve_route(id
, RDMA_RESOLVE_TIMEOUT
);
342 dprintk("RPC: %s: rdma_resolve_route() failed %i\n",
346 rc
= wait_for_completion_interruptible_timeout(&ia
->ri_done
, wtimeout
);
348 trace_xprtrdma_conn_tout(xprt
);
351 rc
= ia
->ri_async_rc
;
363 * Exported functions.
367 * rpcrdma_ia_open - Open and initialize an Interface Adapter.
368 * @xprt: transport with IA to (re)initialize
370 * Returns 0 on success, negative errno if an appropriate
371 * Interface Adapter could not be found and opened.
374 rpcrdma_ia_open(struct rpcrdma_xprt
*xprt
)
376 struct rpcrdma_ia
*ia
= &xprt
->rx_ia
;
379 ia
->ri_id
= rpcrdma_create_id(xprt
, ia
);
380 if (IS_ERR(ia
->ri_id
)) {
381 rc
= PTR_ERR(ia
->ri_id
);
384 ia
->ri_device
= ia
->ri_id
->device
;
386 ia
->ri_pd
= ib_alloc_pd(ia
->ri_device
, 0);
387 if (IS_ERR(ia
->ri_pd
)) {
388 rc
= PTR_ERR(ia
->ri_pd
);
389 pr_err("rpcrdma: ib_alloc_pd() returned %d\n", rc
);
393 switch (xprt_rdma_memreg_strategy
) {
395 if (frwr_is_supported(ia
)) {
396 ia
->ri_ops
= &rpcrdma_frwr_memreg_ops
;
400 case RPCRDMA_MTHCAFMR
:
401 if (fmr_is_supported(ia
)) {
402 ia
->ri_ops
= &rpcrdma_fmr_memreg_ops
;
407 pr_err("rpcrdma: Device %s does not support memreg mode %d\n",
408 ia
->ri_device
->name
, xprt_rdma_memreg_strategy
);
416 rpcrdma_ia_close(ia
);
421 * rpcrdma_ia_remove - Handle device driver unload
422 * @ia: interface adapter being removed
424 * Divest transport H/W resources associated with this adapter,
425 * but allow it to be restored later.
428 rpcrdma_ia_remove(struct rpcrdma_ia
*ia
)
430 struct rpcrdma_xprt
*r_xprt
= container_of(ia
, struct rpcrdma_xprt
,
432 struct rpcrdma_ep
*ep
= &r_xprt
->rx_ep
;
433 struct rpcrdma_buffer
*buf
= &r_xprt
->rx_buf
;
434 struct rpcrdma_req
*req
;
435 struct rpcrdma_rep
*rep
;
437 cancel_delayed_work_sync(&buf
->rb_refresh_worker
);
439 /* This is similar to rpcrdma_ep_destroy, but:
440 * - Don't cancel the connect worker.
441 * - Don't call rpcrdma_ep_disconnect, which waits
442 * for another conn upcall, which will deadlock.
443 * - rdma_disconnect is unneeded, the underlying
444 * connection is already gone.
447 ib_drain_qp(ia
->ri_id
->qp
);
448 rdma_destroy_qp(ia
->ri_id
);
449 ia
->ri_id
->qp
= NULL
;
451 ib_free_cq(ep
->rep_attr
.recv_cq
);
452 ep
->rep_attr
.recv_cq
= NULL
;
453 ib_free_cq(ep
->rep_attr
.send_cq
);
454 ep
->rep_attr
.send_cq
= NULL
;
456 /* The ULP is responsible for ensuring all DMA
457 * mappings and MRs are gone.
459 list_for_each_entry(rep
, &buf
->rb_recv_bufs
, rr_list
)
460 rpcrdma_dma_unmap_regbuf(rep
->rr_rdmabuf
);
461 list_for_each_entry(req
, &buf
->rb_allreqs
, rl_all
) {
462 rpcrdma_dma_unmap_regbuf(req
->rl_rdmabuf
);
463 rpcrdma_dma_unmap_regbuf(req
->rl_sendbuf
);
464 rpcrdma_dma_unmap_regbuf(req
->rl_recvbuf
);
466 rpcrdma_mrs_destroy(buf
);
467 ib_dealloc_pd(ia
->ri_pd
);
470 /* Allow waiters to continue */
471 complete(&ia
->ri_remove_done
);
473 trace_xprtrdma_remove(r_xprt
);
477 * rpcrdma_ia_close - Clean up/close an IA.
478 * @ia: interface adapter to close
482 rpcrdma_ia_close(struct rpcrdma_ia
*ia
)
484 if (ia
->ri_id
!= NULL
&& !IS_ERR(ia
->ri_id
)) {
486 rdma_destroy_qp(ia
->ri_id
);
487 rdma_destroy_id(ia
->ri_id
);
490 ia
->ri_device
= NULL
;
492 /* If the pd is still busy, xprtrdma missed freeing a resource */
493 if (ia
->ri_pd
&& !IS_ERR(ia
->ri_pd
))
494 ib_dealloc_pd(ia
->ri_pd
);
499 * Create unconnected endpoint.
502 rpcrdma_ep_create(struct rpcrdma_ep
*ep
, struct rpcrdma_ia
*ia
,
503 struct rpcrdma_create_data_internal
*cdata
)
505 struct rpcrdma_connect_private
*pmsg
= &ep
->rep_cm_private
;
506 struct ib_cq
*sendcq
, *recvcq
;
507 unsigned int max_sge
;
510 max_sge
= min_t(unsigned int, ia
->ri_device
->attrs
.max_send_sge
,
511 RPCRDMA_MAX_SEND_SGES
);
512 if (max_sge
< RPCRDMA_MIN_SEND_SGES
) {
513 pr_warn("rpcrdma: HCA provides only %d send SGEs\n", max_sge
);
516 ia
->ri_max_send_sges
= max_sge
;
518 rc
= ia
->ri_ops
->ro_open(ia
, ep
, cdata
);
522 ep
->rep_attr
.event_handler
= rpcrdma_qp_async_error_upcall
;
523 ep
->rep_attr
.qp_context
= ep
;
524 ep
->rep_attr
.srq
= NULL
;
525 ep
->rep_attr
.cap
.max_send_sge
= max_sge
;
526 ep
->rep_attr
.cap
.max_recv_sge
= 1;
527 ep
->rep_attr
.cap
.max_inline_data
= 0;
528 ep
->rep_attr
.sq_sig_type
= IB_SIGNAL_REQ_WR
;
529 ep
->rep_attr
.qp_type
= IB_QPT_RC
;
530 ep
->rep_attr
.port_num
= ~0;
532 dprintk("RPC: %s: requested max: dtos: send %d recv %d; "
533 "iovs: send %d recv %d\n",
535 ep
->rep_attr
.cap
.max_send_wr
,
536 ep
->rep_attr
.cap
.max_recv_wr
,
537 ep
->rep_attr
.cap
.max_send_sge
,
538 ep
->rep_attr
.cap
.max_recv_sge
);
540 /* set trigger for requesting send completion */
541 ep
->rep_send_batch
= min_t(unsigned int, RPCRDMA_MAX_SEND_BATCH
,
542 cdata
->max_requests
>> 2);
543 ep
->rep_send_count
= ep
->rep_send_batch
;
544 init_waitqueue_head(&ep
->rep_connect_wait
);
545 INIT_DELAYED_WORK(&ep
->rep_connect_worker
, rpcrdma_connect_worker
);
547 sendcq
= ib_alloc_cq(ia
->ri_device
, NULL
,
548 ep
->rep_attr
.cap
.max_send_wr
+ 1,
549 ia
->ri_device
->num_comp_vectors
> 1 ? 1 : 0,
551 if (IS_ERR(sendcq
)) {
552 rc
= PTR_ERR(sendcq
);
553 dprintk("RPC: %s: failed to create send CQ: %i\n",
558 recvcq
= ib_alloc_cq(ia
->ri_device
, NULL
,
559 ep
->rep_attr
.cap
.max_recv_wr
+ 1,
560 0, IB_POLL_WORKQUEUE
);
561 if (IS_ERR(recvcq
)) {
562 rc
= PTR_ERR(recvcq
);
563 dprintk("RPC: %s: failed to create recv CQ: %i\n",
568 ep
->rep_attr
.send_cq
= sendcq
;
569 ep
->rep_attr
.recv_cq
= recvcq
;
571 /* Initialize cma parameters */
572 memset(&ep
->rep_remote_cma
, 0, sizeof(ep
->rep_remote_cma
));
574 /* Prepare RDMA-CM private message */
575 pmsg
->cp_magic
= rpcrdma_cmp_magic
;
576 pmsg
->cp_version
= RPCRDMA_CMP_VERSION
;
577 pmsg
->cp_flags
|= ia
->ri_ops
->ro_send_w_inv_ok
;
578 pmsg
->cp_send_size
= rpcrdma_encode_buffer_size(cdata
->inline_wsize
);
579 pmsg
->cp_recv_size
= rpcrdma_encode_buffer_size(cdata
->inline_rsize
);
580 ep
->rep_remote_cma
.private_data
= pmsg
;
581 ep
->rep_remote_cma
.private_data_len
= sizeof(*pmsg
);
583 /* Client offers RDMA Read but does not initiate */
584 ep
->rep_remote_cma
.initiator_depth
= 0;
585 ep
->rep_remote_cma
.responder_resources
=
586 min_t(int, U8_MAX
, ia
->ri_device
->attrs
.max_qp_rd_atom
);
588 /* Limit transport retries so client can detect server
589 * GID changes quickly. RPC layer handles re-establishing
590 * transport connection and retransmission.
592 ep
->rep_remote_cma
.retry_count
= 6;
594 /* RPC-over-RDMA handles its own flow control. In addition,
595 * make all RNR NAKs visible so we know that RPC-over-RDMA
596 * flow control is working correctly (no NAKs should be seen).
598 ep
->rep_remote_cma
.flow_control
= 0;
599 ep
->rep_remote_cma
.rnr_retry_count
= 0;
612 * Disconnect and destroy endpoint. After this, the only
613 * valid operations on the ep are to free it (if dynamically
614 * allocated) or re-create it.
617 rpcrdma_ep_destroy(struct rpcrdma_ep
*ep
, struct rpcrdma_ia
*ia
)
619 cancel_delayed_work_sync(&ep
->rep_connect_worker
);
621 if (ia
->ri_id
&& ia
->ri_id
->qp
) {
622 rpcrdma_ep_disconnect(ep
, ia
);
623 rdma_destroy_qp(ia
->ri_id
);
624 ia
->ri_id
->qp
= NULL
;
627 if (ep
->rep_attr
.recv_cq
)
628 ib_free_cq(ep
->rep_attr
.recv_cq
);
629 if (ep
->rep_attr
.send_cq
)
630 ib_free_cq(ep
->rep_attr
.send_cq
);
633 /* Re-establish a connection after a device removal event.
634 * Unlike a normal reconnection, a fresh PD and a new set
635 * of MRs and buffers is needed.
638 rpcrdma_ep_recreate_xprt(struct rpcrdma_xprt
*r_xprt
,
639 struct rpcrdma_ep
*ep
, struct rpcrdma_ia
*ia
)
643 trace_xprtrdma_reinsert(r_xprt
);
646 if (rpcrdma_ia_open(r_xprt
))
650 err
= rpcrdma_ep_create(ep
, ia
, &r_xprt
->rx_data
);
652 pr_err("rpcrdma: rpcrdma_ep_create returned %d\n", err
);
657 err
= rdma_create_qp(ia
->ri_id
, ia
->ri_pd
, &ep
->rep_attr
);
659 pr_err("rpcrdma: rdma_create_qp returned %d\n", err
);
663 rpcrdma_mrs_create(r_xprt
);
667 rpcrdma_ep_destroy(ep
, ia
);
669 rpcrdma_ia_close(ia
);
675 rpcrdma_ep_reconnect(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_ep
*ep
,
676 struct rpcrdma_ia
*ia
)
678 struct rdma_cm_id
*id
, *old
;
681 trace_xprtrdma_reconnect(r_xprt
);
683 rpcrdma_ep_disconnect(ep
, ia
);
686 id
= rpcrdma_create_id(r_xprt
, ia
);
690 /* As long as the new ID points to the same device as the
691 * old ID, we can reuse the transport's existing PD and all
692 * previously allocated MRs. Also, the same device means
693 * the transport's previous DMA mappings are still valid.
695 * This is a sanity check only. There should be no way these
696 * point to two different devices here.
700 if (ia
->ri_device
!= id
->device
) {
701 pr_err("rpcrdma: can't reconnect on different device!\n");
705 err
= rdma_create_qp(id
, ia
->ri_pd
, &ep
->rep_attr
);
707 dprintk("RPC: %s: rdma_create_qp returned %d\n",
712 /* Atomically replace the transport's ID and QP. */
716 rdma_destroy_qp(old
);
719 rdma_destroy_id(old
);
725 * Connect unconnected endpoint.
728 rpcrdma_ep_connect(struct rpcrdma_ep
*ep
, struct rpcrdma_ia
*ia
)
730 struct rpcrdma_xprt
*r_xprt
= container_of(ia
, struct rpcrdma_xprt
,
735 switch (ep
->rep_connected
) {
737 dprintk("RPC: %s: connecting...\n", __func__
);
738 rc
= rdma_create_qp(ia
->ri_id
, ia
->ri_pd
, &ep
->rep_attr
);
740 dprintk("RPC: %s: rdma_create_qp failed %i\n",
747 rc
= rpcrdma_ep_recreate_xprt(r_xprt
, ep
, ia
);
752 rc
= rpcrdma_ep_reconnect(r_xprt
, ep
, ia
);
757 ep
->rep_connected
= 0;
758 rpcrdma_post_recvs(r_xprt
, true);
760 rc
= rdma_connect(ia
->ri_id
, &ep
->rep_remote_cma
);
762 dprintk("RPC: %s: rdma_connect() failed with %i\n",
767 wait_event_interruptible(ep
->rep_connect_wait
, ep
->rep_connected
!= 0);
768 if (ep
->rep_connected
<= 0) {
769 if (ep
->rep_connected
== -EAGAIN
)
771 rc
= ep
->rep_connected
;
775 dprintk("RPC: %s: connected\n", __func__
);
779 ep
->rep_connected
= rc
;
786 * rpcrdma_ep_disconnect
788 * This is separate from destroy to facilitate the ability
789 * to reconnect without recreating the endpoint.
791 * This call is not reentrant, and must not be made in parallel
792 * on the same endpoint.
795 rpcrdma_ep_disconnect(struct rpcrdma_ep
*ep
, struct rpcrdma_ia
*ia
)
799 rc
= rdma_disconnect(ia
->ri_id
);
801 /* returns without wait if not connected */
802 wait_event_interruptible(ep
->rep_connect_wait
,
803 ep
->rep_connected
!= 1);
805 ep
->rep_connected
= rc
;
806 trace_xprtrdma_disconnect(container_of(ep
, struct rpcrdma_xprt
,
809 ib_drain_qp(ia
->ri_id
->qp
);
812 /* Fixed-size circular FIFO queue. This implementation is wait-free and
815 * Consumer is the code path that posts Sends. This path dequeues a
816 * sendctx for use by a Send operation. Multiple consumer threads
817 * are serialized by the RPC transport lock, which allows only one
818 * ->send_request call at a time.
820 * Producer is the code path that handles Send completions. This path
821 * enqueues a sendctx that has been completed. Multiple producer
822 * threads are serialized by the ib_poll_cq() function.
825 /* rpcrdma_sendctxs_destroy() assumes caller has already quiesced
826 * queue activity, and ib_drain_qp has flushed all remaining Send
829 static void rpcrdma_sendctxs_destroy(struct rpcrdma_buffer
*buf
)
833 for (i
= 0; i
<= buf
->rb_sc_last
; i
++)
834 kfree(buf
->rb_sc_ctxs
[i
]);
835 kfree(buf
->rb_sc_ctxs
);
838 static struct rpcrdma_sendctx
*rpcrdma_sendctx_create(struct rpcrdma_ia
*ia
)
840 struct rpcrdma_sendctx
*sc
;
842 sc
= kzalloc(sizeof(*sc
) +
843 ia
->ri_max_send_sges
* sizeof(struct ib_sge
),
848 sc
->sc_wr
.wr_cqe
= &sc
->sc_cqe
;
849 sc
->sc_wr
.sg_list
= sc
->sc_sges
;
850 sc
->sc_wr
.opcode
= IB_WR_SEND
;
851 sc
->sc_cqe
.done
= rpcrdma_wc_send
;
855 static int rpcrdma_sendctxs_create(struct rpcrdma_xprt
*r_xprt
)
857 struct rpcrdma_buffer
*buf
= &r_xprt
->rx_buf
;
858 struct rpcrdma_sendctx
*sc
;
861 /* Maximum number of concurrent outstanding Send WRs. Capping
862 * the circular queue size stops Send Queue overflow by causing
863 * the ->send_request call to fail temporarily before too many
866 i
= buf
->rb_max_requests
+ RPCRDMA_MAX_BC_REQUESTS
;
867 dprintk("RPC: %s: allocating %lu send_ctxs\n", __func__
, i
);
868 buf
->rb_sc_ctxs
= kcalloc(i
, sizeof(sc
), GFP_KERNEL
);
869 if (!buf
->rb_sc_ctxs
)
872 buf
->rb_sc_last
= i
- 1;
873 for (i
= 0; i
<= buf
->rb_sc_last
; i
++) {
874 sc
= rpcrdma_sendctx_create(&r_xprt
->rx_ia
);
878 sc
->sc_xprt
= r_xprt
;
879 buf
->rb_sc_ctxs
[i
] = sc
;
886 /* The sendctx queue is not guaranteed to have a size that is a
887 * power of two, thus the helpers in circ_buf.h cannot be used.
888 * The other option is to use modulus (%), which can be expensive.
890 static unsigned long rpcrdma_sendctx_next(struct rpcrdma_buffer
*buf
,
893 return likely(item
< buf
->rb_sc_last
) ? item
+ 1 : 0;
897 * rpcrdma_sendctx_get_locked - Acquire a send context
898 * @buf: transport buffers from which to acquire an unused context
900 * Returns pointer to a free send completion context; or NULL if
901 * the queue is empty.
903 * Usage: Called to acquire an SGE array before preparing a Send WR.
905 * The caller serializes calls to this function (per rpcrdma_buffer),
906 * and provides an effective memory barrier that flushes the new value
909 struct rpcrdma_sendctx
*rpcrdma_sendctx_get_locked(struct rpcrdma_buffer
*buf
)
911 struct rpcrdma_xprt
*r_xprt
;
912 struct rpcrdma_sendctx
*sc
;
913 unsigned long next_head
;
915 next_head
= rpcrdma_sendctx_next(buf
, buf
->rb_sc_head
);
917 if (next_head
== READ_ONCE(buf
->rb_sc_tail
))
920 /* ORDER: item must be accessed _before_ head is updated */
921 sc
= buf
->rb_sc_ctxs
[next_head
];
923 /* Releasing the lock in the caller acts as a memory
924 * barrier that flushes rb_sc_head.
926 buf
->rb_sc_head
= next_head
;
931 /* The queue is "empty" if there have not been enough Send
932 * completions recently. This is a sign the Send Queue is
933 * backing up. Cause the caller to pause and try again.
935 set_bit(RPCRDMA_BUF_F_EMPTY_SCQ
, &buf
->rb_flags
);
936 r_xprt
= container_of(buf
, struct rpcrdma_xprt
, rx_buf
);
937 r_xprt
->rx_stats
.empty_sendctx_q
++;
942 * rpcrdma_sendctx_put_locked - Release a send context
943 * @sc: send context to release
945 * Usage: Called from Send completion to return a sendctxt
948 * The caller serializes calls to this function (per rpcrdma_buffer).
951 rpcrdma_sendctx_put_locked(struct rpcrdma_sendctx
*sc
)
953 struct rpcrdma_buffer
*buf
= &sc
->sc_xprt
->rx_buf
;
954 unsigned long next_tail
;
956 /* Unmap SGEs of previously completed by unsignaled
957 * Sends by walking up the queue until @sc is found.
959 next_tail
= buf
->rb_sc_tail
;
961 next_tail
= rpcrdma_sendctx_next(buf
, next_tail
);
963 /* ORDER: item must be accessed _before_ tail is updated */
964 rpcrdma_unmap_sendctx(buf
->rb_sc_ctxs
[next_tail
]);
966 } while (buf
->rb_sc_ctxs
[next_tail
] != sc
);
968 /* Paired with READ_ONCE */
969 smp_store_release(&buf
->rb_sc_tail
, next_tail
);
971 if (test_and_clear_bit(RPCRDMA_BUF_F_EMPTY_SCQ
, &buf
->rb_flags
)) {
972 smp_mb__after_atomic();
973 xprt_write_space(&sc
->sc_xprt
->rx_xprt
);
978 rpcrdma_mr_recovery_worker(struct work_struct
*work
)
980 struct rpcrdma_buffer
*buf
= container_of(work
, struct rpcrdma_buffer
,
981 rb_recovery_worker
.work
);
982 struct rpcrdma_mr
*mr
;
984 spin_lock(&buf
->rb_recovery_lock
);
985 while (!list_empty(&buf
->rb_stale_mrs
)) {
986 mr
= rpcrdma_mr_pop(&buf
->rb_stale_mrs
);
987 spin_unlock(&buf
->rb_recovery_lock
);
989 trace_xprtrdma_recover_mr(mr
);
990 mr
->mr_xprt
->rx_ia
.ri_ops
->ro_recover_mr(mr
);
992 spin_lock(&buf
->rb_recovery_lock
);
994 spin_unlock(&buf
->rb_recovery_lock
);
998 rpcrdma_mr_defer_recovery(struct rpcrdma_mr
*mr
)
1000 struct rpcrdma_xprt
*r_xprt
= mr
->mr_xprt
;
1001 struct rpcrdma_buffer
*buf
= &r_xprt
->rx_buf
;
1003 spin_lock(&buf
->rb_recovery_lock
);
1004 rpcrdma_mr_push(mr
, &buf
->rb_stale_mrs
);
1005 spin_unlock(&buf
->rb_recovery_lock
);
1007 schedule_delayed_work(&buf
->rb_recovery_worker
, 0);
1011 rpcrdma_mrs_create(struct rpcrdma_xprt
*r_xprt
)
1013 struct rpcrdma_buffer
*buf
= &r_xprt
->rx_buf
;
1014 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
1019 for (count
= 0; count
< 3; count
++) {
1020 struct rpcrdma_mr
*mr
;
1023 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
1027 rc
= ia
->ri_ops
->ro_init_mr(ia
, mr
);
1033 mr
->mr_xprt
= r_xprt
;
1035 list_add(&mr
->mr_list
, &free
);
1036 list_add(&mr
->mr_all
, &all
);
1039 spin_lock(&buf
->rb_mrlock
);
1040 list_splice(&free
, &buf
->rb_mrs
);
1041 list_splice(&all
, &buf
->rb_all
);
1042 r_xprt
->rx_stats
.mrs_allocated
+= count
;
1043 spin_unlock(&buf
->rb_mrlock
);
1044 trace_xprtrdma_createmrs(r_xprt
, count
);
1046 xprt_write_space(&r_xprt
->rx_xprt
);
1050 rpcrdma_mr_refresh_worker(struct work_struct
*work
)
1052 struct rpcrdma_buffer
*buf
= container_of(work
, struct rpcrdma_buffer
,
1053 rb_refresh_worker
.work
);
1054 struct rpcrdma_xprt
*r_xprt
= container_of(buf
, struct rpcrdma_xprt
,
1057 rpcrdma_mrs_create(r_xprt
);
1060 struct rpcrdma_req
*
1061 rpcrdma_create_req(struct rpcrdma_xprt
*r_xprt
)
1063 struct rpcrdma_buffer
*buffer
= &r_xprt
->rx_buf
;
1064 struct rpcrdma_regbuf
*rb
;
1065 struct rpcrdma_req
*req
;
1067 req
= kzalloc(sizeof(*req
), GFP_KERNEL
);
1069 return ERR_PTR(-ENOMEM
);
1071 rb
= rpcrdma_alloc_regbuf(RPCRDMA_HDRBUF_SIZE
,
1072 DMA_TO_DEVICE
, GFP_KERNEL
);
1075 return ERR_PTR(-ENOMEM
);
1077 req
->rl_rdmabuf
= rb
;
1078 xdr_buf_init(&req
->rl_hdrbuf
, rb
->rg_base
, rdmab_length(rb
));
1079 req
->rl_buffer
= buffer
;
1080 INIT_LIST_HEAD(&req
->rl_registered
);
1082 spin_lock(&buffer
->rb_reqslock
);
1083 list_add(&req
->rl_all
, &buffer
->rb_allreqs
);
1084 spin_unlock(&buffer
->rb_reqslock
);
1089 rpcrdma_create_rep(struct rpcrdma_xprt
*r_xprt
, bool temp
)
1091 struct rpcrdma_create_data_internal
*cdata
= &r_xprt
->rx_data
;
1092 struct rpcrdma_buffer
*buf
= &r_xprt
->rx_buf
;
1093 struct rpcrdma_rep
*rep
;
1097 rep
= kzalloc(sizeof(*rep
), GFP_KERNEL
);
1101 rep
->rr_rdmabuf
= rpcrdma_alloc_regbuf(cdata
->inline_rsize
,
1102 DMA_FROM_DEVICE
, GFP_KERNEL
);
1103 if (IS_ERR(rep
->rr_rdmabuf
)) {
1104 rc
= PTR_ERR(rep
->rr_rdmabuf
);
1107 xdr_buf_init(&rep
->rr_hdrbuf
, rep
->rr_rdmabuf
->rg_base
,
1108 rdmab_length(rep
->rr_rdmabuf
));
1110 rep
->rr_cqe
.done
= rpcrdma_wc_receive
;
1111 rep
->rr_rxprt
= r_xprt
;
1112 INIT_WORK(&rep
->rr_work
, rpcrdma_deferred_completion
);
1113 rep
->rr_recv_wr
.next
= NULL
;
1114 rep
->rr_recv_wr
.wr_cqe
= &rep
->rr_cqe
;
1115 rep
->rr_recv_wr
.sg_list
= &rep
->rr_rdmabuf
->rg_iov
;
1116 rep
->rr_recv_wr
.num_sge
= 1;
1117 rep
->rr_temp
= temp
;
1119 spin_lock(&buf
->rb_lock
);
1120 list_add(&rep
->rr_list
, &buf
->rb_recv_bufs
);
1121 spin_unlock(&buf
->rb_lock
);
1127 dprintk("RPC: %s: reply buffer %d alloc failed\n",
1133 rpcrdma_buffer_create(struct rpcrdma_xprt
*r_xprt
)
1135 struct rpcrdma_buffer
*buf
= &r_xprt
->rx_buf
;
1138 buf
->rb_max_requests
= r_xprt
->rx_data
.max_requests
;
1139 buf
->rb_bc_srv_max_requests
= 0;
1140 spin_lock_init(&buf
->rb_mrlock
);
1141 spin_lock_init(&buf
->rb_lock
);
1142 spin_lock_init(&buf
->rb_recovery_lock
);
1143 INIT_LIST_HEAD(&buf
->rb_mrs
);
1144 INIT_LIST_HEAD(&buf
->rb_all
);
1145 INIT_LIST_HEAD(&buf
->rb_stale_mrs
);
1146 INIT_DELAYED_WORK(&buf
->rb_refresh_worker
,
1147 rpcrdma_mr_refresh_worker
);
1148 INIT_DELAYED_WORK(&buf
->rb_recovery_worker
,
1149 rpcrdma_mr_recovery_worker
);
1151 rpcrdma_mrs_create(r_xprt
);
1153 INIT_LIST_HEAD(&buf
->rb_send_bufs
);
1154 INIT_LIST_HEAD(&buf
->rb_allreqs
);
1155 spin_lock_init(&buf
->rb_reqslock
);
1156 for (i
= 0; i
< buf
->rb_max_requests
; i
++) {
1157 struct rpcrdma_req
*req
;
1159 req
= rpcrdma_create_req(r_xprt
);
1161 dprintk("RPC: %s: request buffer %d alloc"
1162 " failed\n", __func__
, i
);
1166 list_add(&req
->rl_list
, &buf
->rb_send_bufs
);
1169 buf
->rb_credits
= 1;
1170 buf
->rb_posted_receives
= 0;
1171 INIT_LIST_HEAD(&buf
->rb_recv_bufs
);
1173 rc
= rpcrdma_sendctxs_create(r_xprt
);
1179 rpcrdma_buffer_destroy(buf
);
1184 rpcrdma_destroy_rep(struct rpcrdma_rep
*rep
)
1186 rpcrdma_free_regbuf(rep
->rr_rdmabuf
);
1191 rpcrdma_destroy_req(struct rpcrdma_req
*req
)
1193 rpcrdma_free_regbuf(req
->rl_recvbuf
);
1194 rpcrdma_free_regbuf(req
->rl_sendbuf
);
1195 rpcrdma_free_regbuf(req
->rl_rdmabuf
);
1200 rpcrdma_mrs_destroy(struct rpcrdma_buffer
*buf
)
1202 struct rpcrdma_xprt
*r_xprt
= container_of(buf
, struct rpcrdma_xprt
,
1204 struct rpcrdma_ia
*ia
= rdmab_to_ia(buf
);
1205 struct rpcrdma_mr
*mr
;
1209 spin_lock(&buf
->rb_mrlock
);
1210 while (!list_empty(&buf
->rb_all
)) {
1211 mr
= list_entry(buf
->rb_all
.next
, struct rpcrdma_mr
, mr_all
);
1212 list_del(&mr
->mr_all
);
1214 spin_unlock(&buf
->rb_mrlock
);
1216 /* Ensure MW is not on any rl_registered list */
1217 if (!list_empty(&mr
->mr_list
))
1218 list_del(&mr
->mr_list
);
1220 ia
->ri_ops
->ro_release_mr(mr
);
1222 spin_lock(&buf
->rb_mrlock
);
1224 spin_unlock(&buf
->rb_mrlock
);
1225 r_xprt
->rx_stats
.mrs_allocated
= 0;
1227 dprintk("RPC: %s: released %u MRs\n", __func__
, count
);
1231 rpcrdma_buffer_destroy(struct rpcrdma_buffer
*buf
)
1233 cancel_delayed_work_sync(&buf
->rb_recovery_worker
);
1234 cancel_delayed_work_sync(&buf
->rb_refresh_worker
);
1236 rpcrdma_sendctxs_destroy(buf
);
1238 while (!list_empty(&buf
->rb_recv_bufs
)) {
1239 struct rpcrdma_rep
*rep
;
1241 rep
= list_first_entry(&buf
->rb_recv_bufs
,
1242 struct rpcrdma_rep
, rr_list
);
1243 list_del(&rep
->rr_list
);
1244 rpcrdma_destroy_rep(rep
);
1247 spin_lock(&buf
->rb_reqslock
);
1248 while (!list_empty(&buf
->rb_allreqs
)) {
1249 struct rpcrdma_req
*req
;
1251 req
= list_first_entry(&buf
->rb_allreqs
,
1252 struct rpcrdma_req
, rl_all
);
1253 list_del(&req
->rl_all
);
1255 spin_unlock(&buf
->rb_reqslock
);
1256 rpcrdma_destroy_req(req
);
1257 spin_lock(&buf
->rb_reqslock
);
1259 spin_unlock(&buf
->rb_reqslock
);
1261 rpcrdma_mrs_destroy(buf
);
1265 * rpcrdma_mr_get - Allocate an rpcrdma_mr object
1266 * @r_xprt: controlling transport
1268 * Returns an initialized rpcrdma_mr or NULL if no free
1269 * rpcrdma_mr objects are available.
1272 rpcrdma_mr_get(struct rpcrdma_xprt
*r_xprt
)
1274 struct rpcrdma_buffer
*buf
= &r_xprt
->rx_buf
;
1275 struct rpcrdma_mr
*mr
= NULL
;
1277 spin_lock(&buf
->rb_mrlock
);
1278 if (!list_empty(&buf
->rb_mrs
))
1279 mr
= rpcrdma_mr_pop(&buf
->rb_mrs
);
1280 spin_unlock(&buf
->rb_mrlock
);
1287 trace_xprtrdma_nomrs(r_xprt
);
1288 if (r_xprt
->rx_ep
.rep_connected
!= -ENODEV
)
1289 schedule_delayed_work(&buf
->rb_refresh_worker
, 0);
1291 /* Allow the reply handler and refresh worker to run */
1298 __rpcrdma_mr_put(struct rpcrdma_buffer
*buf
, struct rpcrdma_mr
*mr
)
1300 spin_lock(&buf
->rb_mrlock
);
1301 rpcrdma_mr_push(mr
, &buf
->rb_mrs
);
1302 spin_unlock(&buf
->rb_mrlock
);
1306 * rpcrdma_mr_put - Release an rpcrdma_mr object
1307 * @mr: object to release
1311 rpcrdma_mr_put(struct rpcrdma_mr
*mr
)
1313 __rpcrdma_mr_put(&mr
->mr_xprt
->rx_buf
, mr
);
1317 * rpcrdma_mr_unmap_and_put - DMA unmap an MR and release it
1318 * @mr: object to release
1322 rpcrdma_mr_unmap_and_put(struct rpcrdma_mr
*mr
)
1324 struct rpcrdma_xprt
*r_xprt
= mr
->mr_xprt
;
1326 trace_xprtrdma_dma_unmap(mr
);
1327 ib_dma_unmap_sg(r_xprt
->rx_ia
.ri_device
,
1328 mr
->mr_sg
, mr
->mr_nents
, mr
->mr_dir
);
1329 __rpcrdma_mr_put(&r_xprt
->rx_buf
, mr
);
1333 * rpcrdma_buffer_get - Get a request buffer
1334 * @buffers: Buffer pool from which to obtain a buffer
1336 * Returns a fresh rpcrdma_req, or NULL if none are available.
1338 struct rpcrdma_req
*
1339 rpcrdma_buffer_get(struct rpcrdma_buffer
*buffers
)
1341 struct rpcrdma_req
*req
;
1343 spin_lock(&buffers
->rb_lock
);
1344 req
= list_first_entry_or_null(&buffers
->rb_send_bufs
,
1345 struct rpcrdma_req
, rl_list
);
1347 list_del_init(&req
->rl_list
);
1348 spin_unlock(&buffers
->rb_lock
);
1353 * rpcrdma_buffer_put - Put request/reply buffers back into pool
1354 * @req: object to return
1358 rpcrdma_buffer_put(struct rpcrdma_req
*req
)
1360 struct rpcrdma_buffer
*buffers
= req
->rl_buffer
;
1361 struct rpcrdma_rep
*rep
= req
->rl_reply
;
1363 req
->rl_reply
= NULL
;
1365 spin_lock(&buffers
->rb_lock
);
1366 list_add(&req
->rl_list
, &buffers
->rb_send_bufs
);
1368 if (!rep
->rr_temp
) {
1369 list_add(&rep
->rr_list
, &buffers
->rb_recv_bufs
);
1373 spin_unlock(&buffers
->rb_lock
);
1375 rpcrdma_destroy_rep(rep
);
1379 * Put reply buffers back into pool when not attached to
1380 * request. This happens in error conditions.
1383 rpcrdma_recv_buffer_put(struct rpcrdma_rep
*rep
)
1385 struct rpcrdma_buffer
*buffers
= &rep
->rr_rxprt
->rx_buf
;
1387 if (!rep
->rr_temp
) {
1388 spin_lock(&buffers
->rb_lock
);
1389 list_add(&rep
->rr_list
, &buffers
->rb_recv_bufs
);
1390 spin_unlock(&buffers
->rb_lock
);
1392 rpcrdma_destroy_rep(rep
);
1397 * rpcrdma_alloc_regbuf - allocate and DMA-map memory for SEND/RECV buffers
1398 * @size: size of buffer to be allocated, in bytes
1399 * @direction: direction of data movement
1402 * Returns an ERR_PTR, or a pointer to a regbuf, a buffer that
1403 * can be persistently DMA-mapped for I/O.
1405 * xprtrdma uses a regbuf for posting an outgoing RDMA SEND, or for
1406 * receiving the payload of RDMA RECV operations. During Long Calls
1407 * or Replies they may be registered externally via ro_map.
1409 struct rpcrdma_regbuf
*
1410 rpcrdma_alloc_regbuf(size_t size
, enum dma_data_direction direction
,
1413 struct rpcrdma_regbuf
*rb
;
1415 rb
= kmalloc(sizeof(*rb
) + size
, flags
);
1417 return ERR_PTR(-ENOMEM
);
1419 rb
->rg_device
= NULL
;
1420 rb
->rg_direction
= direction
;
1421 rb
->rg_iov
.length
= size
;
1427 * __rpcrdma_map_regbuf - DMA-map a regbuf
1428 * @ia: controlling rpcrdma_ia
1429 * @rb: regbuf to be mapped
1432 __rpcrdma_dma_map_regbuf(struct rpcrdma_ia
*ia
, struct rpcrdma_regbuf
*rb
)
1434 struct ib_device
*device
= ia
->ri_device
;
1436 if (rb
->rg_direction
== DMA_NONE
)
1439 rb
->rg_iov
.addr
= ib_dma_map_single(device
,
1440 (void *)rb
->rg_base
,
1443 if (ib_dma_mapping_error(device
, rdmab_addr(rb
)))
1446 rb
->rg_device
= device
;
1447 rb
->rg_iov
.lkey
= ia
->ri_pd
->local_dma_lkey
;
1452 rpcrdma_dma_unmap_regbuf(struct rpcrdma_regbuf
*rb
)
1457 if (!rpcrdma_regbuf_is_mapped(rb
))
1460 ib_dma_unmap_single(rb
->rg_device
, rdmab_addr(rb
),
1461 rdmab_length(rb
), rb
->rg_direction
);
1462 rb
->rg_device
= NULL
;
1466 * rpcrdma_free_regbuf - deregister and free registered buffer
1467 * @rb: regbuf to be deregistered and freed
1470 rpcrdma_free_regbuf(struct rpcrdma_regbuf
*rb
)
1472 rpcrdma_dma_unmap_regbuf(rb
);
1477 * Prepost any receive buffer, then post send.
1479 * Receive buffer is donated to hardware, reclaimed upon recv completion.
1482 rpcrdma_ep_post(struct rpcrdma_ia
*ia
,
1483 struct rpcrdma_ep
*ep
,
1484 struct rpcrdma_req
*req
)
1486 struct ib_send_wr
*send_wr
= &req
->rl_sendctx
->sc_wr
;
1489 if (!ep
->rep_send_count
||
1490 test_bit(RPCRDMA_REQ_F_TX_RESOURCES
, &req
->rl_flags
)) {
1491 send_wr
->send_flags
|= IB_SEND_SIGNALED
;
1492 ep
->rep_send_count
= ep
->rep_send_batch
;
1494 send_wr
->send_flags
&= ~IB_SEND_SIGNALED
;
1495 --ep
->rep_send_count
;
1498 rc
= ia
->ri_ops
->ro_send(ia
, req
);
1499 trace_xprtrdma_post_send(req
, rc
);
1506 * rpcrdma_post_recvs - Maybe post some Receive buffers
1507 * @r_xprt: controlling transport
1508 * @temp: when true, allocate temp rpcrdma_rep objects
1512 rpcrdma_post_recvs(struct rpcrdma_xprt
*r_xprt
, bool temp
)
1514 struct rpcrdma_buffer
*buf
= &r_xprt
->rx_buf
;
1515 struct ib_recv_wr
*wr
, *bad_wr
;
1516 int needed
, count
, rc
;
1518 needed
= buf
->rb_credits
+ (buf
->rb_bc_srv_max_requests
<< 1);
1519 if (buf
->rb_posted_receives
> needed
)
1521 needed
-= buf
->rb_posted_receives
;
1526 struct rpcrdma_regbuf
*rb
;
1527 struct rpcrdma_rep
*rep
;
1529 spin_lock(&buf
->rb_lock
);
1530 rep
= list_first_entry_or_null(&buf
->rb_recv_bufs
,
1531 struct rpcrdma_rep
, rr_list
);
1533 list_del(&rep
->rr_list
);
1534 spin_unlock(&buf
->rb_lock
);
1536 if (rpcrdma_create_rep(r_xprt
, temp
))
1541 rb
= rep
->rr_rdmabuf
;
1542 if (!rpcrdma_regbuf_is_mapped(rb
)) {
1543 if (!__rpcrdma_dma_map_regbuf(&r_xprt
->rx_ia
, rb
)) {
1544 rpcrdma_recv_buffer_put(rep
);
1549 trace_xprtrdma_post_recv(rep
->rr_recv_wr
.wr_cqe
);
1550 rep
->rr_recv_wr
.next
= wr
;
1551 wr
= &rep
->rr_recv_wr
;
1558 rc
= ib_post_recv(r_xprt
->rx_ia
.ri_id
->qp
, wr
,
1559 (const struct ib_recv_wr
**)&bad_wr
);
1561 for (wr
= bad_wr
; wr
;) {
1562 struct rpcrdma_rep
*rep
;
1564 rep
= container_of(wr
, struct rpcrdma_rep
, rr_recv_wr
);
1566 rpcrdma_recv_buffer_put(rep
);
1570 buf
->rb_posted_receives
+= count
;
1571 trace_xprtrdma_post_recvs(r_xprt
, count
, rc
);