1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Registration Work
10 * FRWR features ordered asynchronous registration and deregistration
11 * of arbitrarily sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
17 * A Memory Region is prepared for RDMA READ or WRITE using a FAST_REG
18 * Work Request (frwr_op_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
20 * (frwr_op_unmap_sync).
22 * Typically these Work Requests are not signaled, and neither are RDMA
23 * SEND Work Requests (with the exception of signaling occasionally to
24 * prevent provider work queue overflows). This greatly reduces HCA
27 * As an optimization, frwr_op_unmap marks MRs INVALID before the
28 * LOCAL_INV WR is posted. If posting succeeds, the MR is placed on
29 * rb_mrs immediately so that no work (like managing a linked list
30 * under a spinlock) is needed in the completion upcall.
32 * But this means that frwr_op_map() can occasionally encounter an MR
33 * that is INVALID but the LOCAL_INV WR has not completed. Work Queue
34 * ordering prevents a subsequent FAST_REG WR from executing against
35 * that MR while it is still being invalidated.
40 * ->op_map and the transport connect worker cannot run at the same
41 * time, but ->op_unmap can fire while the transport connect worker
42 * is running. Thus MR recovery is handled in ->op_map, to guarantee
43 * that recovered MRs are owned by a sending RPC, and not one where
44 * ->op_unmap could fire at the same time transport reconnect is
47 * When the underlying transport disconnects, MRs are left in one of
50 * INVALID: The MR was not in use before the QP entered ERROR state.
52 * VALID: The MR was registered before the QP entered ERROR state.
54 * FLUSHED_FR: The MR was being registered when the QP entered ERROR
55 * state, and the pending WR was flushed.
57 * FLUSHED_LI: The MR was being invalidated when the QP entered ERROR
58 * state, and the pending WR was flushed.
60 * When frwr_op_map encounters FLUSHED and VALID MRs, they are recovered
61 * with ib_dereg_mr and then are re-initialized. Because MR recovery
62 * allocates fresh resources, it is deferred to a workqueue, and the
63 * recovered MRs are placed back on the rb_mrs list when recovery is
64 * complete. frwr_op_map allocates another MR for the current RPC while
65 * the broken MR is reset.
67 * To ensure that frwr_op_map doesn't encounter an MR that is marked
68 * INVALID but that is about to be flushed due to a previous transport
69 * disconnect, the transport connect worker attempts to drain all
70 * pending send queue WRs before the transport is reconnected.
73 #include <linux/sunrpc/rpc_rdma.h>
75 #include "xprt_rdma.h"
77 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
78 # define RPCDBG_FACILITY RPCDBG_TRANS
82 frwr_is_supported(struct rpcrdma_ia
*ia
)
84 struct ib_device_attr
*attrs
= &ia
->ri_device
->attrs
;
86 if (!(attrs
->device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
))
87 goto out_not_supported
;
88 if (attrs
->max_fast_reg_page_list_len
== 0)
89 goto out_not_supported
;
93 pr_info("rpcrdma: 'frwr' mode is not supported by device %s\n",
99 frwr_op_init_mr(struct rpcrdma_ia
*ia
, struct rpcrdma_mr
*mr
)
101 unsigned int depth
= ia
->ri_max_frwr_depth
;
102 struct rpcrdma_frwr
*frwr
= &mr
->frwr
;
105 frwr
->fr_mr
= ib_alloc_mr(ia
->ri_pd
, ia
->ri_mrtype
, depth
);
106 if (IS_ERR(frwr
->fr_mr
))
109 mr
->mr_sg
= kcalloc(depth
, sizeof(*mr
->mr_sg
), GFP_KERNEL
);
113 sg_init_table(mr
->mr_sg
, depth
);
114 init_completion(&frwr
->fr_linv_done
);
118 rc
= PTR_ERR(frwr
->fr_mr
);
119 dprintk("RPC: %s: ib_alloc_mr status %i\n",
125 dprintk("RPC: %s: sg allocation failure\n",
127 ib_dereg_mr(frwr
->fr_mr
);
132 frwr_op_release_mr(struct rpcrdma_mr
*mr
)
136 /* Ensure MR is not on any rl_registered list */
137 if (!list_empty(&mr
->mr_list
))
138 list_del(&mr
->mr_list
);
140 rc
= ib_dereg_mr(mr
->frwr
.fr_mr
);
142 pr_err("rpcrdma: final ib_dereg_mr for %p returned %i\n",
149 __frwr_mr_reset(struct rpcrdma_ia
*ia
, struct rpcrdma_mr
*mr
)
151 struct rpcrdma_frwr
*frwr
= &mr
->frwr
;
154 rc
= ib_dereg_mr(frwr
->fr_mr
);
156 pr_warn("rpcrdma: ib_dereg_mr status %d, frwr %p orphaned\n",
161 frwr
->fr_mr
= ib_alloc_mr(ia
->ri_pd
, ia
->ri_mrtype
,
162 ia
->ri_max_frwr_depth
);
163 if (IS_ERR(frwr
->fr_mr
)) {
164 pr_warn("rpcrdma: ib_alloc_mr status %ld, frwr %p orphaned\n",
165 PTR_ERR(frwr
->fr_mr
), mr
);
166 return PTR_ERR(frwr
->fr_mr
);
169 dprintk("RPC: %s: recovered FRWR %p\n", __func__
, frwr
);
170 frwr
->fr_state
= FRWR_IS_INVALID
;
174 /* Reset of a single FRWR. Generate a fresh rkey by replacing the MR.
177 frwr_op_recover_mr(struct rpcrdma_mr
*mr
)
179 enum rpcrdma_frwr_state state
= mr
->frwr
.fr_state
;
180 struct rpcrdma_xprt
*r_xprt
= mr
->mr_xprt
;
181 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
184 rc
= __frwr_mr_reset(ia
, mr
);
185 if (state
!= FRWR_FLUSHED_LI
) {
186 trace_xprtrdma_dma_unmap(mr
);
187 ib_dma_unmap_sg(ia
->ri_device
,
188 mr
->mr_sg
, mr
->mr_nents
, mr
->mr_dir
);
194 r_xprt
->rx_stats
.mrs_recovered
++;
198 pr_err("rpcrdma: FRWR reset failed %d, %p release\n", rc
, mr
);
199 r_xprt
->rx_stats
.mrs_orphaned
++;
201 spin_lock(&r_xprt
->rx_buf
.rb_mrlock
);
202 list_del(&mr
->mr_all
);
203 spin_unlock(&r_xprt
->rx_buf
.rb_mrlock
);
205 frwr_op_release_mr(mr
);
209 frwr_op_open(struct rpcrdma_ia
*ia
, struct rpcrdma_ep
*ep
,
210 struct rpcrdma_create_data_internal
*cdata
)
212 struct ib_device_attr
*attrs
= &ia
->ri_device
->attrs
;
215 ia
->ri_mrtype
= IB_MR_TYPE_MEM_REG
;
216 if (attrs
->device_cap_flags
& IB_DEVICE_SG_GAPS_REG
)
217 ia
->ri_mrtype
= IB_MR_TYPE_SG_GAPS
;
219 ia
->ri_max_frwr_depth
=
220 min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
221 attrs
->max_fast_reg_page_list_len
);
222 dprintk("RPC: %s: device's max FR page list len = %u\n",
223 __func__
, ia
->ri_max_frwr_depth
);
225 /* Add room for frwr register and invalidate WRs.
226 * 1. FRWR reg WR for head
227 * 2. FRWR invalidate WR for head
228 * 3. N FRWR reg WRs for pagelist
229 * 4. N FRWR invalidate WRs for pagelist
230 * 5. FRWR reg WR for tail
231 * 6. FRWR invalidate WR for tail
232 * 7. The RDMA_SEND WR
236 /* Calculate N if the device max FRWR depth is smaller than
237 * RPCRDMA_MAX_DATA_SEGS.
239 if (ia
->ri_max_frwr_depth
< RPCRDMA_MAX_DATA_SEGS
) {
240 delta
= RPCRDMA_MAX_DATA_SEGS
- ia
->ri_max_frwr_depth
;
242 depth
+= 2; /* FRWR reg + invalidate */
243 delta
-= ia
->ri_max_frwr_depth
;
247 ep
->rep_attr
.cap
.max_send_wr
*= depth
;
248 if (ep
->rep_attr
.cap
.max_send_wr
> attrs
->max_qp_wr
) {
249 cdata
->max_requests
= attrs
->max_qp_wr
/ depth
;
250 if (!cdata
->max_requests
)
252 ep
->rep_attr
.cap
.max_send_wr
= cdata
->max_requests
*
256 ia
->ri_max_segs
= max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS
/
257 ia
->ri_max_frwr_depth
);
261 /* FRWR mode conveys a list of pages per chunk segment. The
262 * maximum length of that list is the FRWR page list depth.
265 frwr_op_maxpages(struct rpcrdma_xprt
*r_xprt
)
267 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
269 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
270 RPCRDMA_MAX_HDR_SEGS
* ia
->ri_max_frwr_depth
);
274 __frwr_sendcompletion_flush(struct ib_wc
*wc
, const char *wr
)
276 if (wc
->status
!= IB_WC_WR_FLUSH_ERR
)
277 pr_err("rpcrdma: %s: %s (%u/0x%x)\n",
278 wr
, ib_wc_status_msg(wc
->status
),
279 wc
->status
, wc
->vendor_err
);
283 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
284 * @cq: completion queue (ignored)
289 frwr_wc_fastreg(struct ib_cq
*cq
, struct ib_wc
*wc
)
291 struct ib_cqe
*cqe
= wc
->wr_cqe
;
292 struct rpcrdma_frwr
*frwr
=
293 container_of(cqe
, struct rpcrdma_frwr
, fr_cqe
);
295 /* WARNING: Only wr_cqe and status are reliable at this point */
296 if (wc
->status
!= IB_WC_SUCCESS
) {
297 frwr
->fr_state
= FRWR_FLUSHED_FR
;
298 __frwr_sendcompletion_flush(wc
, "fastreg");
300 trace_xprtrdma_wc_fastreg(wc
, frwr
);
304 * frwr_wc_localinv - Invoked by RDMA provider for a flushed LocalInv WC
305 * @cq: completion queue (ignored)
310 frwr_wc_localinv(struct ib_cq
*cq
, struct ib_wc
*wc
)
312 struct ib_cqe
*cqe
= wc
->wr_cqe
;
313 struct rpcrdma_frwr
*frwr
= container_of(cqe
, struct rpcrdma_frwr
,
316 /* WARNING: Only wr_cqe and status are reliable at this point */
317 if (wc
->status
!= IB_WC_SUCCESS
) {
318 frwr
->fr_state
= FRWR_FLUSHED_LI
;
319 __frwr_sendcompletion_flush(wc
, "localinv");
321 trace_xprtrdma_wc_li(wc
, frwr
);
325 * frwr_wc_localinv_wake - Invoked by RDMA provider for a signaled LocalInv WC
326 * @cq: completion queue (ignored)
329 * Awaken anyone waiting for an MR to finish being fenced.
332 frwr_wc_localinv_wake(struct ib_cq
*cq
, struct ib_wc
*wc
)
334 struct ib_cqe
*cqe
= wc
->wr_cqe
;
335 struct rpcrdma_frwr
*frwr
= container_of(cqe
, struct rpcrdma_frwr
,
338 /* WARNING: Only wr_cqe and status are reliable at this point */
339 if (wc
->status
!= IB_WC_SUCCESS
) {
340 frwr
->fr_state
= FRWR_FLUSHED_LI
;
341 __frwr_sendcompletion_flush(wc
, "localinv");
343 complete(&frwr
->fr_linv_done
);
344 trace_xprtrdma_wc_li_wake(wc
, frwr
);
347 /* Post a REG_MR Work Request to register a memory region
348 * for remote access via RDMA READ or RDMA WRITE.
350 static struct rpcrdma_mr_seg
*
351 frwr_op_map(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
,
352 int nsegs
, bool writing
, struct rpcrdma_mr
**out
)
354 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
355 bool holes_ok
= ia
->ri_mrtype
== IB_MR_TYPE_SG_GAPS
;
356 struct rpcrdma_frwr
*frwr
;
357 struct rpcrdma_mr
*mr
;
359 struct ib_reg_wr
*reg_wr
;
360 struct ib_send_wr
*bad_wr
;
367 rpcrdma_mr_defer_recovery(mr
);
368 mr
= rpcrdma_mr_get(r_xprt
);
370 return ERR_PTR(-ENOBUFS
);
371 } while (mr
->frwr
.fr_state
!= FRWR_IS_INVALID
);
373 frwr
->fr_state
= FRWR_IS_VALID
;
375 if (nsegs
> ia
->ri_max_frwr_depth
)
376 nsegs
= ia
->ri_max_frwr_depth
;
377 for (i
= 0; i
< nsegs
;) {
379 sg_set_page(&mr
->mr_sg
[i
],
382 offset_in_page(seg
->mr_offset
));
384 sg_set_buf(&mr
->mr_sg
[i
], seg
->mr_offset
,
391 if ((i
< nsegs
&& offset_in_page(seg
->mr_offset
)) ||
392 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
395 mr
->mr_dir
= rpcrdma_data_dir(writing
);
397 mr
->mr_nents
= ib_dma_map_sg(ia
->ri_device
, mr
->mr_sg
, i
, mr
->mr_dir
);
402 n
= ib_map_mr_sg(ibmr
, mr
->mr_sg
, mr
->mr_nents
, NULL
, PAGE_SIZE
);
403 if (unlikely(n
!= mr
->mr_nents
))
406 key
= (u8
)(ibmr
->rkey
& 0x000000FF);
407 ib_update_fast_reg_key(ibmr
, ++key
);
409 reg_wr
= &frwr
->fr_regwr
;
410 reg_wr
->wr
.next
= NULL
;
411 reg_wr
->wr
.opcode
= IB_WR_REG_MR
;
412 frwr
->fr_cqe
.done
= frwr_wc_fastreg
;
413 reg_wr
->wr
.wr_cqe
= &frwr
->fr_cqe
;
414 reg_wr
->wr
.num_sge
= 0;
415 reg_wr
->wr
.send_flags
= 0;
417 reg_wr
->key
= ibmr
->rkey
;
418 reg_wr
->access
= writing
?
419 IB_ACCESS_REMOTE_WRITE
| IB_ACCESS_LOCAL_WRITE
:
420 IB_ACCESS_REMOTE_READ
;
422 rc
= ib_post_send(ia
->ri_id
->qp
, ®_wr
->wr
, &bad_wr
);
426 mr
->mr_handle
= ibmr
->rkey
;
427 mr
->mr_length
= ibmr
->length
;
428 mr
->mr_offset
= ibmr
->iova
;
434 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
436 frwr
->fr_state
= FRWR_IS_INVALID
;
438 return ERR_PTR(-EIO
);
441 pr_err("rpcrdma: failed to map mr %p (%d/%d)\n",
442 frwr
->fr_mr
, n
, mr
->mr_nents
);
443 rpcrdma_mr_defer_recovery(mr
);
444 return ERR_PTR(-EIO
);
447 pr_err("rpcrdma: FRWR registration ib_post_send returned %i\n", rc
);
448 rpcrdma_mr_defer_recovery(mr
);
449 return ERR_PTR(-ENOTCONN
);
452 /* Handle a remotely invalidated mr on the @mrs list
455 frwr_op_reminv(struct rpcrdma_rep
*rep
, struct list_head
*mrs
)
457 struct rpcrdma_mr
*mr
;
459 list_for_each_entry(mr
, mrs
, mr_list
)
460 if (mr
->mr_handle
== rep
->rr_inv_rkey
) {
461 list_del(&mr
->mr_list
);
462 trace_xprtrdma_remoteinv(mr
);
463 mr
->frwr
.fr_state
= FRWR_IS_INVALID
;
464 rpcrdma_mr_unmap_and_put(mr
);
465 break; /* only one invalidated MR per RPC */
469 /* Invalidate all memory regions that were registered for "req".
471 * Sleeps until it is safe for the host CPU to access the
472 * previously mapped memory regions.
474 * Caller ensures that @mrs is not empty before the call. This
475 * function empties the list.
478 frwr_op_unmap_sync(struct rpcrdma_xprt
*r_xprt
, struct list_head
*mrs
)
480 struct ib_send_wr
*first
, **prev
, *last
, *bad_wr
;
481 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
482 struct rpcrdma_frwr
*frwr
;
483 struct rpcrdma_mr
*mr
;
486 /* ORDER: Invalidate all of the MRs first
488 * Chain the LOCAL_INV Work Requests and post them with
489 * a single ib_post_send() call.
494 list_for_each_entry(mr
, mrs
, mr_list
) {
495 mr
->frwr
.fr_state
= FRWR_IS_INVALID
;
498 trace_xprtrdma_localinv(mr
);
500 frwr
->fr_cqe
.done
= frwr_wc_localinv
;
501 last
= &frwr
->fr_invwr
;
502 memset(last
, 0, sizeof(*last
));
503 last
->wr_cqe
= &frwr
->fr_cqe
;
504 last
->opcode
= IB_WR_LOCAL_INV
;
505 last
->ex
.invalidate_rkey
= mr
->mr_handle
;
514 /* Strong send queue ordering guarantees that when the
515 * last WR in the chain completes, all WRs in the chain
518 last
->send_flags
= IB_SEND_SIGNALED
;
519 frwr
->fr_cqe
.done
= frwr_wc_localinv_wake
;
520 reinit_completion(&frwr
->fr_linv_done
);
522 /* Transport disconnect drains the receive CQ before it
523 * replaces the QP. The RPC reply handler won't call us
524 * unless ri_id->qp is a valid pointer.
526 r_xprt
->rx_stats
.local_inv_needed
++;
528 rc
= ib_post_send(ia
->ri_id
->qp
, first
, &bad_wr
);
530 wait_for_completion(&frwr
->fr_linv_done
);
534 /* ORDER: Now DMA unmap all of the MRs, and return
535 * them to the free MR list.
538 while (!list_empty(mrs
)) {
539 mr
= rpcrdma_mr_pop(mrs
);
540 rpcrdma_mr_unmap_and_put(mr
);
545 pr_err("rpcrdma: FRWR invalidate ib_post_send returned %i\n", rc
);
547 /* Find and reset the MRs in the LOCAL_INV WRs that did not
551 frwr
= container_of(bad_wr
, struct rpcrdma_frwr
,
553 mr
= container_of(frwr
, struct rpcrdma_mr
, frwr
);
555 __frwr_mr_reset(ia
, mr
);
557 bad_wr
= bad_wr
->next
;
562 const struct rpcrdma_memreg_ops rpcrdma_frwr_memreg_ops
= {
563 .ro_map
= frwr_op_map
,
564 .ro_reminv
= frwr_op_reminv
,
565 .ro_unmap_sync
= frwr_op_unmap_sync
,
566 .ro_recover_mr
= frwr_op_recover_mr
,
567 .ro_open
= frwr_op_open
,
568 .ro_maxpages
= frwr_op_maxpages
,
569 .ro_init_mr
= frwr_op_init_mr
,
570 .ro_release_mr
= frwr_op_release_mr
,
571 .ro_displayname
= "frwr",
572 .ro_send_w_inv_ok
= RPCRDMA_CMP_F_SND_W_INV_OK
,