1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Registration Work
10 * FRWR features ordered asynchronous registration and deregistration
11 * of arbitrarily sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
18 * Work Request (frwr_op_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
20 * (frwr_op_unmap_sync).
22 * Typically these Work Requests are not signaled, and neither are RDMA
23 * SEND Work Requests (with the exception of signaling occasionally to
24 * prevent provider work queue overflows). This greatly reduces HCA
27 * As an optimization, frwr_op_unmap marks MRs INVALID before the
28 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
29 * rb_mrs immediately so that no work (like managing a linked list
30 * under a spinlock) is needed in the completion upcall.
32 * But this means that frwr_op_map() can occasionally encounter an MR
33 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
34 * ordering prevents a subsequent FAST_REG WR from executing against
35 * that MR while it is still being invalidated.
40 * ->op_map and the transport connect worker cannot run at the same
41 * time, but ->op_unmap can fire while the transport connect worker
42 * is running. Thus MR recovery is handled in ->op_map, to guarantee
43 * that recovered MRs are owned by a sending RPC, and not one where
44 * ->op_unmap could fire at the same time transport reconnect is
47 * When the underlying transport disconnects, MRs are left in one of
50 * INVALID: The MR was not in use before the QP entered ERROR state.
52 * VALID: The MR was registered before the QP entered ERROR state.
54 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
55 * state, and the pending WR was flushed.
57 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
58 * state, and the pending WR was flushed.
60 * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
61 * with ib_dereg_mr and then are re-initialized. Because MR recovery
62 * allocates fresh resources, it is deferred to a workqueue, and the
63 * recovered MRs are placed back on the rb_mrs list when recovery is
64 * complete. frwr_op_map allocates another MR for the current RPC while
65 * the broken MR is reset.
67 * To ensure that frwr_op_map doesn't encounter an MR that is marked
68 * INVALID but that is about to be flushed due to a previous transport
69 * disconnect, the transport connect worker attempts to drain all
70 * pending send queue WRs before the transport is reconnected.
73 #include <linux/sunrpc/rpc_rdma.h>
74 #include <linux/sunrpc/svc_rdma.h>
76 #include "xprt_rdma.h"
77 #include <trace/events/rpcrdma.h>
79 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
80 # define RPCDBG_FACILITY RPCDBG_TRANS
84 frwr_is_supported(struct rpcrdma_ia
*ia
)
86 struct ib_device_attr
*attrs
= &ia
->ri_device
->attrs
;
88 if (!(attrs
->device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
))
89 goto out_not_supported
;
90 if (attrs
->max_fast_reg_page_list_len
== 0)
91 goto out_not_supported
;
95 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
101 frwr_op_init_mr(struct rpcrdma_ia
*ia
, struct rpcrdma_mr
*mr
)
103 unsigned int depth
= ia
->ri_max_frwr_depth
;
104 struct rpcrdma_frwr
*frwr
= &mr
->frwr
;
107 frwr
->fr_mr
= ib_alloc_mr(ia
->ri_pd
, ia
->ri_mrtype
, depth
);
108 if (IS_ERR(frwr
->fr_mr
))
111 mr
->mr_sg
= kcalloc(depth
, sizeof(*mr
->mr_sg
), GFP_KERNEL
);
115 INIT_LIST_HEAD(&mr
->mr_list
);
116 sg_init_table(mr
->mr_sg
, depth
);
117 init_completion(&frwr
->fr_linv_done
);
121 rc
= PTR_ERR(frwr
->fr_mr
);
122 dprintk("RPC: %s: ib_alloc_mr status %i\n",
128 dprintk("RPC: %s: sg allocation failure\n",
130 ib_dereg_mr(frwr
->fr_mr
);
135 frwr_op_release_mr(struct rpcrdma_mr
*mr
)
139 rc
= ib_dereg_mr(mr
->frwr
.fr_mr
);
141 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
148 __frwr_mr_reset(struct rpcrdma_ia
*ia
, struct rpcrdma_mr
*mr
)
150 struct rpcrdma_frwr
*frwr
= &mr
->frwr
;
153 rc
= ib_dereg_mr(frwr
->fr_mr
);
155 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
160 frwr
->fr_mr
= ib_alloc_mr(ia
->ri_pd
, ia
->ri_mrtype
,
161 ia
->ri_max_frwr_depth
);
162 if (IS_ERR(frwr
->fr_mr
)) {
163 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
164 PTR_ERR(frwr
->fr_mr
), mr
);
165 return PTR_ERR(frwr
->fr_mr
);
168 dprintk("RPC: %s: recovered FRWR %p\n", __func__
, frwr
);
169 frwr
->fr_state
= FRWR_IS_INVALID
;
173 /* Reset of a single FRWR. Generate a fresh rkey by replacing the MR.
176 frwr_op_recover_mr(struct rpcrdma_mr
*mr
)
178 enum rpcrdma_frwr_state state
= mr
->frwr
.fr_state
;
179 struct rpcrdma_xprt
*r_xprt
= mr
->mr_xprt
;
180 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
183 rc
= __frwr_mr_reset(ia
, mr
);
184 if (state
!= FRWR_FLUSHED_LI
) {
185 trace_xprtrdma_dma_unmap(mr
);
186 ib_dma_unmap_sg(ia
->ri_device
,
187 mr
->mr_sg
, mr
->mr_nents
, mr
->mr_dir
);
193 r_xprt
->rx_stats
.mrs_recovered
++;
197 pr_err("rpcrdma: FRWR reset failed %d, %p released\n", rc
, mr
);
198 r_xprt
->rx_stats
.mrs_orphaned
++;
200 spin_lock(&r_xprt
->rx_buf
.rb_mrlock
);
201 list_del(&mr
->mr_all
);
202 spin_unlock(&r_xprt
->rx_buf
.rb_mrlock
);
204 frwr_op_release_mr(mr
);
208 * ep->rep_attr.cap.max_send_wr
209 * ep->rep_attr.cap.max_recv_wr
210 * cdata->max_requests
213 * And these FRWR-related fields:
214 * ia->ri_max_frwr_depth
218 frwr_op_open(struct rpcrdma_ia
*ia
, struct rpcrdma_ep
*ep
,
219 struct rpcrdma_create_data_internal
*cdata
)
221 struct ib_device_attr
*attrs
= &ia
->ri_device
->attrs
;
222 int max_qp_wr
, depth
, delta
;
224 ia
->ri_mrtype
= IB_MR_TYPE_MEM_REG
;
225 if (attrs
->device_cap_flags
& IB_DEVICE_SG_GAPS_REG
)
226 ia
->ri_mrtype
= IB_MR_TYPE_SG_GAPS
;
228 ia
->ri_max_frwr_depth
=
229 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
230 attrs
->max_fast_reg_page_list_len
);
231 dprintk("RPC: %s: device's max FR page list len = %u\n",
232 __func__
, ia
->ri_max_frwr_depth
);
234 /* Add room for frwr register and invalidate WRs.
235 * 1. FRWR reg WR for head
236 * 2. FRWR invalidate WR for head
237 * 3. N FRWR reg WRs for pagelist
238 * 4. N FRWR invalidate WRs for pagelist
239 * 5. FRWR reg WR for tail
240 * 6. FRWR invalidate WR for tail
241 * 7. The RDMA_SEND WR
245 /* Calculate N if the device max FRWR depth is smaller than
246 * RPCRDMA_MAX_DATA_SEGS.
248 if (ia
->ri_max_frwr_depth
< RPCRDMA_MAX_DATA_SEGS
) {
249 delta
= RPCRDMA_MAX_DATA_SEGS
- ia
->ri_max_frwr_depth
;
251 depth
+= 2; /* FRWR reg + invalidate */
252 delta
-= ia
->ri_max_frwr_depth
;
256 max_qp_wr
= ia
->ri_device
->attrs
.max_qp_wr
;
257 max_qp_wr
-= RPCRDMA_BACKWARD_WRS
;
259 if (max_qp_wr
< RPCRDMA_MIN_SLOT_TABLE
)
261 if (cdata
->max_requests
> max_qp_wr
)
262 cdata
->max_requests
= max_qp_wr
;
263 ep
->rep_attr
.cap
.max_send_wr
= cdata
->max_requests
* depth
;
264 if (ep
->rep_attr
.cap
.max_send_wr
> max_qp_wr
) {
265 cdata
->max_requests
= max_qp_wr
/ depth
;
266 if (!cdata
->max_requests
)
268 ep
->rep_attr
.cap
.max_send_wr
= cdata
->max_requests
*
271 ep
->rep_attr
.cap
.max_send_wr
+= RPCRDMA_BACKWARD_WRS
;
272 ep
->rep_attr
.cap
.max_send_wr
+= 1; /* for ib_drain_sq */
273 ep
->rep_attr
.cap
.max_recv_wr
= cdata
->max_requests
;
274 ep
->rep_attr
.cap
.max_recv_wr
+= RPCRDMA_BACKWARD_WRS
;
275 ep
->rep_attr
.cap
.max_recv_wr
+= 1; /* for ib_drain_rq */
277 ia
->ri_max_segs
= max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS
/
278 ia
->ri_max_frwr_depth
);
282 /* FRWR mode conveys a list of pages per chunk segment. The
283 * maximum length of that list is the FRWR page list depth.
286 frwr_op_maxpages(struct rpcrdma_xprt
*r_xprt
)
288 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
290 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
291 RPCRDMA_MAX_HDR_SEGS
* ia
->ri_max_frwr_depth
);
295 __frwr_sendcompletion_flush(struct ib_wc
*wc
, const char *wr
)
297 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
298 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
299 wr
, ib_wc_status_msg(wc
->status
),
300 wc
->status
, wc
->vendor_err
);
304 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
305 * @cq: completion queue (ignored)
310 frwr_wc_fastreg(struct ib_cq
*cq
, struct ib_wc
*wc
)
312 struct ib_cqe
*cqe
= wc
->wr_cqe
;
313 struct rpcrdma_frwr
*frwr
=
314 container_of(cqe
, struct rpcrdma_frwr
, fr_cqe
);
316 /* WARNING: Only wr_cqe and status are reliable at this point */
317 if (wc
->status
!= IB_WC_SUCCESS
) {
318 frwr
->fr_state
= FRWR_FLUSHED_FR
;
319 __frwr_sendcompletion_flush(wc
, "fastreg");
321 trace_xprtrdma_wc_fastreg(wc
, frwr
);
325 * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
326 * @cq: completion queue (ignored)
331 frwr_wc_localinv(struct ib_cq
*cq
, struct ib_wc
*wc
)
333 struct ib_cqe
*cqe
= wc
->wr_cqe
;
334 struct rpcrdma_frwr
*frwr
= container_of(cqe
, struct rpcrdma_frwr
,
337 /* WARNING: Only wr_cqe and status are reliable at this point */
338 if (wc
->status
!= IB_WC_SUCCESS
) {
339 frwr
->fr_state
= FRWR_FLUSHED_LI
;
340 __frwr_sendcompletion_flush(wc
, "localinv");
342 trace_xprtrdma_wc_li(wc
, frwr
);
346 * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
347 * @cq: completion queue (ignored)
350 * Awaken anyone waiting for an MR to finish being fenced.
353 frwr_wc_localinv_wake(struct ib_cq
*cq
, struct ib_wc
*wc
)
355 struct ib_cqe
*cqe
= wc
->wr_cqe
;
356 struct rpcrdma_frwr
*frwr
= container_of(cqe
, struct rpcrdma_frwr
,
359 /* WARNING: Only wr_cqe and status are reliable at this point */
360 if (wc
->status
!= IB_WC_SUCCESS
) {
361 frwr
->fr_state
= FRWR_FLUSHED_LI
;
362 __frwr_sendcompletion_flush(wc
, "localinv");
364 complete(&frwr
->fr_linv_done
);
365 trace_xprtrdma_wc_li_wake(wc
, frwr
);
368 /* Post a REG_MR Work Request to register a memory region
369 * for remote access via RDMA READ or RDMA WRITE.
371 static struct rpcrdma_mr_seg
*
372 frwr_op_map(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
,
373 int nsegs
, bool writing
, struct rpcrdma_mr
**out
)
375 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
376 bool holes_ok
= ia
->ri_mrtype
== IB_MR_TYPE_SG_GAPS
;
377 struct rpcrdma_frwr
*frwr
;
378 struct rpcrdma_mr
*mr
;
380 struct ib_reg_wr
*reg_wr
;
387 rpcrdma_mr_defer_recovery(mr
);
388 mr
= rpcrdma_mr_get(r_xprt
);
390 return ERR_PTR(-EAGAIN
);
391 } while (mr
->frwr
.fr_state
!= FRWR_IS_INVALID
);
393 frwr
->fr_state
= FRWR_IS_VALID
;
395 if (nsegs
> ia
->ri_max_frwr_depth
)
396 nsegs
= ia
->ri_max_frwr_depth
;
397 for (i
= 0; i
< nsegs
;) {
399 sg_set_page(&mr
->mr_sg
[i
],
402 offset_in_page(seg
->mr_offset
));
404 sg_set_buf(&mr
->mr_sg
[i
], seg
->mr_offset
,
411 if ((i
< nsegs
&& offset_in_page(seg
->mr_offset
)) ||
412 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
415 mr
->mr_dir
= rpcrdma_data_dir(writing
);
417 mr
->mr_nents
= ib_dma_map_sg(ia
->ri_device
, mr
->mr_sg
, i
, mr
->mr_dir
);
420 trace_xprtrdma_dma_map(mr
);
423 n
= ib_map_mr_sg(ibmr
, mr
->mr_sg
, mr
->mr_nents
, NULL
, PAGE_SIZE
);
424 if (unlikely(n
!= mr
->mr_nents
))
427 key
= (u8
)(ibmr
->rkey
& 0x000000FF);
428 ib_update_fast_reg_key(ibmr
, ++key
);
430 reg_wr
= &frwr
->fr_regwr
;
432 reg_wr
->key
= ibmr
->rkey
;
433 reg_wr
->access
= writing
?
434 IB_ACCESS_REMOTE_WRITE
| IB_ACCESS_LOCAL_WRITE
:
435 IB_ACCESS_REMOTE_READ
;
437 mr
->mr_handle
= ibmr
->rkey
;
438 mr
->mr_length
= ibmr
->length
;
439 mr
->mr_offset
= ibmr
->iova
;
445 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
447 frwr
->fr_state
= FRWR_IS_INVALID
;
449 return ERR_PTR(-EIO
);
452 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
453 frwr
->fr_mr
, n
, mr
->mr_nents
);
454 rpcrdma_mr_defer_recovery(mr
);
455 return ERR_PTR(-EIO
);
458 /* Post Send WR containing the RPC Call message.
460 * For FRMR, chain any FastReg WRs to the Send WR. Only a
461 * single ib_post_send call is needed to register memory
462 * and then post the Send WR.
465 frwr_op_send(struct rpcrdma_ia
*ia
, struct rpcrdma_req
*req
)
467 struct ib_send_wr
*post_wr
;
468 struct rpcrdma_mr
*mr
;
470 post_wr
= &req
->rl_sendctx
->sc_wr
;
471 list_for_each_entry(mr
, &req
->rl_registered
, mr_list
) {
472 struct rpcrdma_frwr
*frwr
;
476 frwr
->fr_cqe
.done
= frwr_wc_fastreg
;
477 frwr
->fr_regwr
.wr
.next
= post_wr
;
478 frwr
->fr_regwr
.wr
.wr_cqe
= &frwr
->fr_cqe
;
479 frwr
->fr_regwr
.wr
.num_sge
= 0;
480 frwr
->fr_regwr
.wr
.opcode
= IB_WR_REG_MR
;
481 frwr
->fr_regwr
.wr
.send_flags
= 0;
483 post_wr
= &frwr
->fr_regwr
.wr
;
486 /* If ib_post_send fails, the next ->send_request for
487 * @req will queue these MWs for recovery.
489 return ib_post_send(ia
->ri_id
->qp
, post_wr
, NULL
);
492 /* Handle a remotely invalidated mr on the @mrs list
495 frwr_op_reminv(struct rpcrdma_rep
*rep
, struct list_head
*mrs
)
497 struct rpcrdma_mr
*mr
;
499 list_for_each_entry(mr
, mrs
, mr_list
)
500 if (mr
->mr_handle
== rep
->rr_inv_rkey
) {
501 list_del_init(&mr
->mr_list
);
502 trace_xprtrdma_remoteinv(mr
);
503 mr
->frwr
.fr_state
= FRWR_IS_INVALID
;
504 rpcrdma_mr_unmap_and_put(mr
);
505 break; /* only one invalidated MR per RPC */
509 /* Invalidate all memory regions that were registered for "req".
511 * Sleeps until it is safe for the host CPU to access the
512 * previously mapped memory regions.
514 * Caller ensures that @mrs is not empty before the call. This
515 * function empties the list.
518 frwr_op_unmap_sync(struct rpcrdma_xprt
*r_xprt
, struct list_head
*mrs
)
520 struct ib_send_wr
*first
, **prev
, *last
;
521 const struct ib_send_wr
*bad_wr
;
522 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
523 struct rpcrdma_frwr
*frwr
;
524 struct rpcrdma_mr
*mr
;
527 /* ORDER: Invalidate all of the MRs first
529 * Chain the LOCAL_INV Work Requests and post them with
530 * a single ib_post_send() call.
535 list_for_each_entry(mr
, mrs
, mr_list
) {
536 mr
->frwr
.fr_state
= FRWR_IS_INVALID
;
539 trace_xprtrdma_localinv(mr
);
541 frwr
->fr_cqe
.done
= frwr_wc_localinv
;
542 last
= &frwr
->fr_invwr
;
543 memset(last
, 0, sizeof(*last
));
544 last
->wr_cqe
= &frwr
->fr_cqe
;
545 last
->opcode
= IB_WR_LOCAL_INV
;
546 last
->ex
.invalidate_rkey
= mr
->mr_handle
;
555 /* Strong send queue ordering guarantees that when the
556 * last WR in the chain completes, all WRs in the chain
559 last
->send_flags
= IB_SEND_SIGNALED
;
560 frwr
->fr_cqe
.done
= frwr_wc_localinv_wake
;
561 reinit_completion(&frwr
->fr_linv_done
);
563 /* Transport disconnect drains the receive CQ before it
564 * replaces the QP. The RPC reply handler won't call us
565 * unless ri_id->qp is a valid pointer.
567 r_xprt
->rx_stats
.local_inv_needed
++;
569 rc
= ib_post_send(ia
->ri_id
->qp
, first
, &bad_wr
);
571 wait_for_completion(&frwr
->fr_linv_done
);
575 /* ORDER: Now DMA unmap all of the MRs, and return
576 * them to the free MR list.
579 while (!list_empty(mrs
)) {
580 mr
= rpcrdma_mr_pop(mrs
);
581 rpcrdma_mr_unmap_and_put(mr
);
586 pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc
);
588 /* Find and reset the MRs in the LOCAL_INV WRs that did not
592 frwr
= container_of(bad_wr
, struct rpcrdma_frwr
,
594 mr
= container_of(frwr
, struct rpcrdma_mr
, frwr
);
596 __frwr_mr_reset(ia
, mr
);
598 bad_wr
= bad_wr
->next
;
603 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops
= {
604 .ro_map
= frwr_op_map
,
605 .ro_send
= frwr_op_send
,
606 .ro_reminv
= frwr_op_reminv
,
607 .ro_unmap_sync
= frwr_op_unmap_sync
,
608 .ro_recover_mr
= frwr_op_recover_mr
,
609 .ro_open
= frwr_op_open
,
610 .ro_maxpages
= frwr_op_maxpages
,
611 .ro_init_mr
= frwr_op_init_mr
,
612 .ro_release_mr
= frwr_op_release_mr
,
613 .ro_displayname
= "frwr",
614 .ro_send_w_inv_ok
= RPCRDMA_CMP_F_SND_W_INV_OK
,