1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Registration Work
10 * FRWR features ordered asynchronous registration and invalidation
11 * of arbitrarily-sized memory regions. This is the fastest and safest
12 * but most complex memory registration mode.
17 * A Memory Region is prepared for RDMA Read or Write using a FAST_REG
18 * Work Request (frwr_map). When the RDMA operation is finished, this
19 * Memory Region is invalidated using a LOCAL_INV Work Request
20 * (frwr_unmap_async and frwr_unmap_sync).
22 * Typically FAST_REG Work Requests are not signaled, and neither are
23 * RDMA Send Work Requests (with the exception of signaling occasionally
24 * to prevent provider work queue overflows). This greatly reduces HCA
30 * frwr_map and frwr_unmap_* cannot run at the same time the transport
31 * connect worker is running. The connect worker holds the transport
32 * send lock, just as ->send_request does. This prevents frwr_map and
33 * the connect worker from running concurrently. When a connection is
34 * closed, the Receive completion queue is drained before the allowing
35 * the connect worker to get control. This prevents frwr_unmap and the
36 * connect worker from running concurrently.
38 * When the underlying transport disconnects, MRs that are in flight
39 * are flushed and are likely unusable. Thus all MRs are destroyed.
40 * New MRs are created on demand.
43 #include <linux/sunrpc/svc_rdma.h>
45 #include "xprt_rdma.h"
46 #include <trace/events/rpcrdma.h>
48 static void frwr_cid_init(struct rpcrdma_ep
*ep
,
49 struct rpcrdma_mr
*mr
)
51 struct rpc_rdma_cid
*cid
= &mr
->mr_cid
;
53 cid
->ci_queue_id
= ep
->re_attr
.send_cq
->res
.id
;
54 cid
->ci_completion_id
= mr
->mr_ibmr
->res
.id
;
57 static void frwr_mr_unmap(struct rpcrdma_mr
*mr
)
60 trace_xprtrdma_mr_unmap(mr
);
61 ib_dma_unmap_sg(mr
->mr_device
, mr
->mr_sg
, mr
->mr_nents
,
68 * frwr_mr_release - Destroy one MR
69 * @mr: MR allocated by frwr_mr_init
72 void frwr_mr_release(struct rpcrdma_mr
*mr
)
78 rc
= ib_dereg_mr(mr
->mr_ibmr
);
80 trace_xprtrdma_frwr_dereg(mr
, rc
);
85 static void frwr_mr_put(struct rpcrdma_mr
*mr
)
89 /* The MR is returned to the req's MR free list instead
90 * of to the xprt's MR free list. No spinlock is needed.
92 rpcrdma_mr_push(mr
, &mr
->mr_req
->rl_free_mrs
);
96 * frwr_reset - Place MRs back on @req's free list
97 * @req: request to reset
99 * Used after a failed marshal. For FRWR, this means the MRs
100 * don't have to be fully released and recreated.
102 * NB: This is safe only as long as none of @req's MRs are
103 * involved with an ongoing asynchronous FAST_REG or LOCAL_INV
106 void frwr_reset(struct rpcrdma_req
*req
)
108 struct rpcrdma_mr
*mr
;
110 while ((mr
= rpcrdma_mr_pop(&req
->rl_registered
)))
115 * frwr_mr_init - Initialize one MR
116 * @r_xprt: controlling transport instance
117 * @mr: generic MR to prepare for FRWR
119 * Returns zero if successful. Otherwise a negative errno
122 int frwr_mr_init(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr
*mr
)
124 struct rpcrdma_ep
*ep
= r_xprt
->rx_ep
;
125 unsigned int depth
= ep
->re_max_fr_depth
;
126 struct scatterlist
*sg
;
129 sg
= kcalloc_node(depth
, sizeof(*sg
), XPRTRDMA_GFP_FLAGS
,
130 ibdev_to_node(ep
->re_id
->device
));
134 frmr
= ib_alloc_mr(ep
->re_pd
, ep
->re_mrtype
, depth
);
138 mr
->mr_xprt
= r_xprt
;
140 mr
->mr_device
= NULL
;
141 INIT_LIST_HEAD(&mr
->mr_list
);
142 init_completion(&mr
->mr_linv_done
);
143 frwr_cid_init(ep
, mr
);
145 sg_init_table(sg
, depth
);
151 trace_xprtrdma_frwr_alloc(mr
, PTR_ERR(frmr
));
152 return PTR_ERR(frmr
);
156 * frwr_query_device - Prepare a transport for use with FRWR
157 * @ep: endpoint to fill in
158 * @device: RDMA device to query
162 * ep->re_max_requests
163 * ep->re_max_rdma_segs
164 * ep->re_max_fr_depth
168 * On success, returns zero.
169 * %-EINVAL - the device does not support FRWR memory registration
170 * %-ENOMEM - the device is not sufficiently capable for NFS/RDMA
172 int frwr_query_device(struct rpcrdma_ep
*ep
, const struct ib_device
*device
)
174 const struct ib_device_attr
*attrs
= &device
->attrs
;
175 int max_qp_wr
, depth
, delta
;
176 unsigned int max_sge
;
178 if (!(attrs
->device_cap_flags
& IB_DEVICE_MEM_MGT_EXTENSIONS
) ||
179 attrs
->max_fast_reg_page_list_len
== 0) {
180 pr_err("rpcrdma: 'frwr' mode is not supported by device %s\n",
185 max_sge
= min_t(unsigned int, attrs
->max_send_sge
,
186 RPCRDMA_MAX_SEND_SGES
);
187 if (max_sge
< RPCRDMA_MIN_SEND_SGES
) {
188 pr_err("rpcrdma: HCA provides only %u send SGEs\n", max_sge
);
191 ep
->re_attr
.cap
.max_send_sge
= max_sge
;
192 ep
->re_attr
.cap
.max_recv_sge
= 1;
194 ep
->re_mrtype
= IB_MR_TYPE_MEM_REG
;
195 if (attrs
->kernel_cap_flags
& IBK_SG_GAPS_REG
)
196 ep
->re_mrtype
= IB_MR_TYPE_SG_GAPS
;
198 /* Quirk: Some devices advertise a large max_fast_reg_page_list_len
199 * capability, but perform optimally when the MRs are not larger
202 if (attrs
->max_sge_rd
> RPCRDMA_MAX_HDR_SEGS
)
203 ep
->re_max_fr_depth
= attrs
->max_sge_rd
;
205 ep
->re_max_fr_depth
= attrs
->max_fast_reg_page_list_len
;
206 if (ep
->re_max_fr_depth
> RPCRDMA_MAX_DATA_SEGS
)
207 ep
->re_max_fr_depth
= RPCRDMA_MAX_DATA_SEGS
;
209 /* Add room for frwr register and invalidate WRs.
210 * 1. FRWR reg WR for head
211 * 2. FRWR invalidate WR for head
212 * 3. N FRWR reg WRs for pagelist
213 * 4. N FRWR invalidate WRs for pagelist
214 * 5. FRWR reg WR for tail
215 * 6. FRWR invalidate WR for tail
216 * 7. The RDMA_SEND WR
220 /* Calculate N if the device max FRWR depth is smaller than
221 * RPCRDMA_MAX_DATA_SEGS.
223 if (ep
->re_max_fr_depth
< RPCRDMA_MAX_DATA_SEGS
) {
224 delta
= RPCRDMA_MAX_DATA_SEGS
- ep
->re_max_fr_depth
;
226 depth
+= 2; /* FRWR reg + invalidate */
227 delta
-= ep
->re_max_fr_depth
;
231 max_qp_wr
= attrs
->max_qp_wr
;
232 max_qp_wr
-= RPCRDMA_BACKWARD_WRS
;
234 if (max_qp_wr
< RPCRDMA_MIN_SLOT_TABLE
)
236 if (ep
->re_max_requests
> max_qp_wr
)
237 ep
->re_max_requests
= max_qp_wr
;
238 ep
->re_attr
.cap
.max_send_wr
= ep
->re_max_requests
* depth
;
239 if (ep
->re_attr
.cap
.max_send_wr
> max_qp_wr
) {
240 ep
->re_max_requests
= max_qp_wr
/ depth
;
241 if (!ep
->re_max_requests
)
243 ep
->re_attr
.cap
.max_send_wr
= ep
->re_max_requests
* depth
;
245 ep
->re_attr
.cap
.max_send_wr
+= RPCRDMA_BACKWARD_WRS
;
246 ep
->re_attr
.cap
.max_send_wr
+= 1; /* for ib_drain_sq */
247 ep
->re_attr
.cap
.max_recv_wr
= ep
->re_max_requests
;
248 ep
->re_attr
.cap
.max_recv_wr
+= RPCRDMA_BACKWARD_WRS
;
249 ep
->re_attr
.cap
.max_recv_wr
+= RPCRDMA_MAX_RECV_BATCH
;
250 ep
->re_attr
.cap
.max_recv_wr
+= 1; /* for ib_drain_rq */
252 ep
->re_max_rdma_segs
=
253 DIV_ROUND_UP(RPCRDMA_MAX_DATA_SEGS
, ep
->re_max_fr_depth
);
254 /* Reply chunks require segments for head and tail buffers */
255 ep
->re_max_rdma_segs
+= 2;
256 if (ep
->re_max_rdma_segs
> RPCRDMA_MAX_HDR_SEGS
)
257 ep
->re_max_rdma_segs
= RPCRDMA_MAX_HDR_SEGS
;
259 /* Ensure the underlying device is capable of conveying the
260 * largest r/wsize NFS will ask for. This guarantees that
261 * failing over from one RDMA device to another will not
264 if ((ep
->re_max_rdma_segs
* ep
->re_max_fr_depth
) < RPCRDMA_MAX_SEGS
)
271 * frwr_map - Register a memory region
272 * @r_xprt: controlling transport
273 * @seg: memory region co-ordinates
274 * @nsegs: number of segments remaining
275 * @writing: true when RDMA Write will be used
276 * @xid: XID of RPC using the registered memory
279 * Prepare a REG_MR Work Request to register a memory region
280 * for remote access via RDMA READ or RDMA WRITE.
282 * Returns the next segment or a negative errno pointer.
283 * On success, @mr is filled in.
285 struct rpcrdma_mr_seg
*frwr_map(struct rpcrdma_xprt
*r_xprt
,
286 struct rpcrdma_mr_seg
*seg
,
287 int nsegs
, bool writing
, __be32 xid
,
288 struct rpcrdma_mr
*mr
)
290 struct rpcrdma_ep
*ep
= r_xprt
->rx_ep
;
291 struct ib_reg_wr
*reg_wr
;
296 if (nsegs
> ep
->re_max_fr_depth
)
297 nsegs
= ep
->re_max_fr_depth
;
298 for (i
= 0; i
< nsegs
;) {
299 sg_set_page(&mr
->mr_sg
[i
], seg
->mr_page
,
300 seg
->mr_len
, seg
->mr_offset
);
304 if (ep
->re_mrtype
== IB_MR_TYPE_SG_GAPS
)
306 if ((i
< nsegs
&& seg
->mr_offset
) ||
307 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
310 mr
->mr_dir
= rpcrdma_data_dir(writing
);
313 dma_nents
= ib_dma_map_sg(ep
->re_id
->device
, mr
->mr_sg
, mr
->mr_nents
,
317 mr
->mr_device
= ep
->re_id
->device
;
320 n
= ib_map_mr_sg(ibmr
, mr
->mr_sg
, dma_nents
, NULL
, PAGE_SIZE
);
324 ibmr
->iova
&= 0x00000000ffffffff;
325 ibmr
->iova
|= ((u64
)be32_to_cpu(xid
)) << 32;
326 key
= (u8
)(ibmr
->rkey
& 0x000000FF);
327 ib_update_fast_reg_key(ibmr
, ++key
);
329 reg_wr
= &mr
->mr_regwr
;
331 reg_wr
->key
= ibmr
->rkey
;
332 reg_wr
->access
= writing
?
333 IB_ACCESS_REMOTE_WRITE
| IB_ACCESS_LOCAL_WRITE
:
334 IB_ACCESS_REMOTE_READ
;
336 mr
->mr_handle
= ibmr
->rkey
;
337 mr
->mr_length
= ibmr
->length
;
338 mr
->mr_offset
= ibmr
->iova
;
339 trace_xprtrdma_mr_map(mr
);
344 trace_xprtrdma_frwr_sgerr(mr
, i
);
345 return ERR_PTR(-EIO
);
348 trace_xprtrdma_frwr_maperr(mr
, n
);
349 return ERR_PTR(-EIO
);
353 * frwr_wc_fastreg - Invoked by RDMA provider for a flushed FastReg WC
354 * @cq: completion queue
355 * @wc: WCE for a completed FastReg WR
357 * Each flushed MR gets destroyed after the QP has drained.
359 static void frwr_wc_fastreg(struct ib_cq
*cq
, struct ib_wc
*wc
)
361 struct ib_cqe
*cqe
= wc
->wr_cqe
;
362 struct rpcrdma_mr
*mr
= container_of(cqe
, struct rpcrdma_mr
, mr_cqe
);
364 /* WARNING: Only wr_cqe and status are reliable at this point */
365 trace_xprtrdma_wc_fastreg(wc
, &mr
->mr_cid
);
367 rpcrdma_flush_disconnect(cq
->cq_context
, wc
);
371 * frwr_send - post Send WRs containing the RPC Call message
372 * @r_xprt: controlling transport instance
373 * @req: prepared RPC Call
375 * For FRWR, chain any FastReg WRs to the Send WR. Only a
376 * single ib_post_send call is needed to register memory
377 * and then post the Send WR.
379 * Returns the return code from ib_post_send.
381 * Caller must hold the transport send lock to ensure that the
382 * pointers to the transport's rdma_cm_id and QP are stable.
384 int frwr_send(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
)
386 struct ib_send_wr
*post_wr
, *send_wr
= &req
->rl_wr
;
387 struct rpcrdma_ep
*ep
= r_xprt
->rx_ep
;
388 struct rpcrdma_mr
*mr
;
389 unsigned int num_wrs
;
394 list_for_each_entry(mr
, &req
->rl_registered
, mr_list
) {
395 trace_xprtrdma_mr_fastreg(mr
);
397 mr
->mr_cqe
.done
= frwr_wc_fastreg
;
398 mr
->mr_regwr
.wr
.next
= post_wr
;
399 mr
->mr_regwr
.wr
.wr_cqe
= &mr
->mr_cqe
;
400 mr
->mr_regwr
.wr
.num_sge
= 0;
401 mr
->mr_regwr
.wr
.opcode
= IB_WR_REG_MR
;
402 mr
->mr_regwr
.wr
.send_flags
= 0;
403 post_wr
= &mr
->mr_regwr
.wr
;
407 if ((kref_read(&req
->rl_kref
) > 1) || num_wrs
> ep
->re_send_count
) {
408 send_wr
->send_flags
|= IB_SEND_SIGNALED
;
409 ep
->re_send_count
= min_t(unsigned int, ep
->re_send_batch
,
410 num_wrs
- ep
->re_send_count
);
412 send_wr
->send_flags
&= ~IB_SEND_SIGNALED
;
413 ep
->re_send_count
-= num_wrs
;
416 trace_xprtrdma_post_send(req
);
417 ret
= ib_post_send(ep
->re_id
->qp
, post_wr
, NULL
);
419 trace_xprtrdma_post_send_err(r_xprt
, req
, ret
);
424 * frwr_reminv - handle a remotely invalidated mr on the @mrs list
425 * @rep: Received reply
426 * @mrs: list of MRs to check
429 void frwr_reminv(struct rpcrdma_rep
*rep
, struct list_head
*mrs
)
431 struct rpcrdma_mr
*mr
;
433 list_for_each_entry(mr
, mrs
, mr_list
)
434 if (mr
->mr_handle
== rep
->rr_inv_rkey
) {
435 list_del_init(&mr
->mr_list
);
436 trace_xprtrdma_mr_reminv(mr
);
438 break; /* only one invalidated MR per RPC */
442 static void frwr_mr_done(struct ib_wc
*wc
, struct rpcrdma_mr
*mr
)
444 if (likely(wc
->status
== IB_WC_SUCCESS
))
449 * frwr_wc_localinv - Invoked by RDMA provider for a LOCAL_INV WC
450 * @cq: completion queue
451 * @wc: WCE for a completed LocalInv WR
454 static void frwr_wc_localinv(struct ib_cq
*cq
, struct ib_wc
*wc
)
456 struct ib_cqe
*cqe
= wc
->wr_cqe
;
457 struct rpcrdma_mr
*mr
= container_of(cqe
, struct rpcrdma_mr
, mr_cqe
);
459 /* WARNING: Only wr_cqe and status are reliable at this point */
460 trace_xprtrdma_wc_li(wc
, &mr
->mr_cid
);
461 frwr_mr_done(wc
, mr
);
463 rpcrdma_flush_disconnect(cq
->cq_context
, wc
);
467 * frwr_wc_localinv_wake - Invoked by RDMA provider for a LOCAL_INV WC
468 * @cq: completion queue
469 * @wc: WCE for a completed LocalInv WR
471 * Awaken anyone waiting for an MR to finish being fenced.
473 static void frwr_wc_localinv_wake(struct ib_cq
*cq
, struct ib_wc
*wc
)
475 struct ib_cqe
*cqe
= wc
->wr_cqe
;
476 struct rpcrdma_mr
*mr
= container_of(cqe
, struct rpcrdma_mr
, mr_cqe
);
478 /* WARNING: Only wr_cqe and status are reliable at this point */
479 trace_xprtrdma_wc_li_wake(wc
, &mr
->mr_cid
);
480 frwr_mr_done(wc
, mr
);
481 complete(&mr
->mr_linv_done
);
483 rpcrdma_flush_disconnect(cq
->cq_context
, wc
);
487 * frwr_unmap_sync - invalidate memory regions that were registered for @req
488 * @r_xprt: controlling transport instance
489 * @req: rpcrdma_req with a non-empty list of MRs to process
491 * Sleeps until it is safe for the host CPU to access the previously mapped
492 * memory regions. This guarantees that registered MRs are properly fenced
493 * from the server before the RPC consumer accesses the data in them. It
494 * also ensures proper Send flow control: waking the next RPC waits until
495 * this RPC has relinquished all its Send Queue entries.
497 void frwr_unmap_sync(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
)
499 struct ib_send_wr
*first
, **prev
, *last
;
500 struct rpcrdma_ep
*ep
= r_xprt
->rx_ep
;
501 const struct ib_send_wr
*bad_wr
;
502 struct rpcrdma_mr
*mr
;
505 /* ORDER: Invalidate all of the MRs first
507 * Chain the LOCAL_INV Work Requests and post them with
508 * a single ib_post_send() call.
511 mr
= rpcrdma_mr_pop(&req
->rl_registered
);
513 trace_xprtrdma_mr_localinv(mr
);
514 r_xprt
->rx_stats
.local_inv_needed
++;
516 last
= &mr
->mr_invwr
;
518 last
->wr_cqe
= &mr
->mr_cqe
;
519 last
->sg_list
= NULL
;
521 last
->opcode
= IB_WR_LOCAL_INV
;
522 last
->send_flags
= IB_SEND_SIGNALED
;
523 last
->ex
.invalidate_rkey
= mr
->mr_handle
;
525 last
->wr_cqe
->done
= frwr_wc_localinv
;
529 } while ((mr
= rpcrdma_mr_pop(&req
->rl_registered
)));
531 mr
= container_of(last
, struct rpcrdma_mr
, mr_invwr
);
533 /* Strong send queue ordering guarantees that when the
534 * last WR in the chain completes, all WRs in the chain
537 last
->wr_cqe
->done
= frwr_wc_localinv_wake
;
538 reinit_completion(&mr
->mr_linv_done
);
540 /* Transport disconnect drains the receive CQ before it
541 * replaces the QP. The RPC reply handler won't call us
542 * unless re_id->qp is a valid pointer.
545 rc
= ib_post_send(ep
->re_id
->qp
, first
, &bad_wr
);
547 /* The final LOCAL_INV WR in the chain is supposed to
548 * do the wake. If it was never posted, the wake will
549 * not happen, so don't wait in that case.
552 wait_for_completion(&mr
->mr_linv_done
);
556 /* On error, the MRs get destroyed once the QP has drained. */
557 trace_xprtrdma_post_linv_err(req
, rc
);
559 /* Force a connection loss to ensure complete recovery.
561 rpcrdma_force_disconnect(ep
);
565 * frwr_wc_localinv_done - Invoked by RDMA provider for a signaled LOCAL_INV WC
566 * @cq: completion queue
567 * @wc: WCE for a completed LocalInv WR
570 static void frwr_wc_localinv_done(struct ib_cq
*cq
, struct ib_wc
*wc
)
572 struct ib_cqe
*cqe
= wc
->wr_cqe
;
573 struct rpcrdma_mr
*mr
= container_of(cqe
, struct rpcrdma_mr
, mr_cqe
);
574 struct rpcrdma_rep
*rep
;
576 /* WARNING: Only wr_cqe and status are reliable at this point */
577 trace_xprtrdma_wc_li_done(wc
, &mr
->mr_cid
);
579 /* Ensure that @rep is generated before the MR is released */
580 rep
= mr
->mr_req
->rl_reply
;
583 if (wc
->status
!= IB_WC_SUCCESS
) {
585 rpcrdma_unpin_rqst(rep
);
586 rpcrdma_flush_disconnect(cq
->cq_context
, wc
);
590 rpcrdma_complete_rqst(rep
);
594 * frwr_unmap_async - invalidate memory regions that were registered for @req
595 * @r_xprt: controlling transport instance
596 * @req: rpcrdma_req with a non-empty list of MRs to process
598 * This guarantees that registered MRs are properly fenced from the
599 * server before the RPC consumer accesses the data in them. It also
600 * ensures proper Send flow control: waking the next RPC waits until
601 * this RPC has relinquished all its Send Queue entries.
603 void frwr_unmap_async(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
)
605 struct ib_send_wr
*first
, *last
, **prev
;
606 struct rpcrdma_ep
*ep
= r_xprt
->rx_ep
;
607 struct rpcrdma_mr
*mr
;
610 /* Chain the LOCAL_INV Work Requests and post them with
611 * a single ib_post_send() call.
614 mr
= rpcrdma_mr_pop(&req
->rl_registered
);
616 trace_xprtrdma_mr_localinv(mr
);
617 r_xprt
->rx_stats
.local_inv_needed
++;
619 last
= &mr
->mr_invwr
;
621 last
->wr_cqe
= &mr
->mr_cqe
;
622 last
->sg_list
= NULL
;
624 last
->opcode
= IB_WR_LOCAL_INV
;
625 last
->send_flags
= IB_SEND_SIGNALED
;
626 last
->ex
.invalidate_rkey
= mr
->mr_handle
;
628 last
->wr_cqe
->done
= frwr_wc_localinv
;
632 } while ((mr
= rpcrdma_mr_pop(&req
->rl_registered
)));
634 /* Strong send queue ordering guarantees that when the
635 * last WR in the chain completes, all WRs in the chain
636 * are complete. The last completion will wake up the
639 last
->wr_cqe
->done
= frwr_wc_localinv_done
;
641 /* Transport disconnect drains the receive CQ before it
642 * replaces the QP. The RPC reply handler won't call us
643 * unless re_id->qp is a valid pointer.
645 rc
= ib_post_send(ep
->re_id
->qp
, first
, NULL
);
649 /* On error, the MRs get destroyed once the QP has drained. */
650 trace_xprtrdma_post_linv_err(req
, rc
);
652 /* The final LOCAL_INV WR in the chain is supposed to
653 * do the wake. If it was never posted, the wake does
654 * not happen. Unpin the rqst in preparation for its
657 rpcrdma_unpin_rqst(req
->rl_reply
);
659 /* Force a connection loss to ensure complete recovery.
661 rpcrdma_force_disconnect(ep
);
665 * frwr_wp_create - Create an MR for padding Write chunks
666 * @r_xprt: transport resources to use
668 * Return 0 on success, negative errno on failure.
670 int frwr_wp_create(struct rpcrdma_xprt
*r_xprt
)
672 struct rpcrdma_ep
*ep
= r_xprt
->rx_ep
;
673 struct rpcrdma_mr_seg seg
;
674 struct rpcrdma_mr
*mr
;
676 mr
= rpcrdma_mr_get(r_xprt
);
680 ep
->re_write_pad_mr
= mr
;
682 seg
.mr_len
= XDR_UNIT
;
683 seg
.mr_page
= virt_to_page(ep
->re_write_pad
);
684 seg
.mr_offset
= offset_in_page(ep
->re_write_pad
);
685 if (IS_ERR(frwr_map(r_xprt
, &seg
, 1, true, xdr_zero
, mr
)))
687 trace_xprtrdma_mr_fastreg(mr
);
689 mr
->mr_cqe
.done
= frwr_wc_fastreg
;
690 mr
->mr_regwr
.wr
.next
= NULL
;
691 mr
->mr_regwr
.wr
.wr_cqe
= &mr
->mr_cqe
;
692 mr
->mr_regwr
.wr
.num_sge
= 0;
693 mr
->mr_regwr
.wr
.opcode
= IB_WR_REG_MR
;
694 mr
->mr_regwr
.wr
.send_flags
= 0;
696 return ib_post_send(ep
->re_id
->qp
, &mr
->mr_regwr
.wr
, NULL
);