1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Memory Regions (FMR).
8 * Referred to sometimes as MTHCAFMR mode.
10 * FMR uses synchronous memory registration and deregistration.
11 * FMR registration is known to be fast, but FMR deregistration
12 * can take tens of usecs to complete.
17 * A Memory Region is prepared for RDMA READ or WRITE using the
18 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
19 * finished, the Memory Region is unmapped using the ib_unmap_fmr
20 * verb (fmr_op_unmap).
23 #include <linux/sunrpc/svc_rdma.h>
25 #include "xprt_rdma.h"
26 #include <trace/events/rpcrdma.h>
28 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
29 # define RPCDBG_FACILITY RPCDBG_TRANS
32 /* Maximum scatter/gather per FMR */
33 #define RPCRDMA_MAX_FMR_SGES (64)
35 /* Access mode of externally registered pages */
37 RPCRDMA_FMR_ACCESS_FLAGS
= IB_ACCESS_REMOTE_WRITE
|
38 IB_ACCESS_REMOTE_READ
,
42 fmr_is_supported(struct rpcrdma_ia
*ia
)
44 if (!ia
->ri_device
->alloc_fmr
) {
45 pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
53 __fmr_unmap(struct rpcrdma_mr
*mr
)
58 list_add(&mr
->fmr
.fm_mr
->list
, &l
);
59 rc
= ib_unmap_fmr(&l
);
60 list_del(&mr
->fmr
.fm_mr
->list
);
62 pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
69 fmr_op_release_mr(struct rpcrdma_mr
*mr
)
73 kfree(mr
->fmr
.fm_physaddrs
);
76 /* In case this one was left mapped, try to unmap it
77 * to prevent dealloc_fmr from failing with EBUSY
81 rc
= ib_dealloc_fmr(mr
->fmr
.fm_mr
);
83 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
89 /* MRs are dynamically allocated, so simply clean up and release the MR.
90 * A replacement MR will subsequently be allocated on demand.
93 fmr_mr_recycle_worker(struct work_struct
*work
)
95 struct rpcrdma_mr
*mr
= container_of(work
, struct rpcrdma_mr
, mr_recycle
);
96 struct rpcrdma_xprt
*r_xprt
= mr
->mr_xprt
;
98 trace_xprtrdma_mr_recycle(mr
);
100 trace_xprtrdma_mr_unmap(mr
);
101 ib_dma_unmap_sg(r_xprt
->rx_ia
.ri_device
,
102 mr
->mr_sg
, mr
->mr_nents
, mr
->mr_dir
);
104 spin_lock(&r_xprt
->rx_buf
.rb_mrlock
);
105 list_del(&mr
->mr_all
);
106 r_xprt
->rx_stats
.mrs_recycled
++;
107 spin_unlock(&r_xprt
->rx_buf
.rb_mrlock
);
108 fmr_op_release_mr(mr
);
112 fmr_op_init_mr(struct rpcrdma_ia
*ia
, struct rpcrdma_mr
*mr
)
114 static struct ib_fmr_attr fmr_attr
= {
115 .max_pages
= RPCRDMA_MAX_FMR_SGES
,
117 .page_shift
= PAGE_SHIFT
120 mr
->fmr
.fm_physaddrs
= kcalloc(RPCRDMA_MAX_FMR_SGES
,
121 sizeof(u64
), GFP_KERNEL
);
122 if (!mr
->fmr
.fm_physaddrs
)
125 mr
->mr_sg
= kcalloc(RPCRDMA_MAX_FMR_SGES
,
126 sizeof(*mr
->mr_sg
), GFP_KERNEL
);
130 sg_init_table(mr
->mr_sg
, RPCRDMA_MAX_FMR_SGES
);
132 mr
->fmr
.fm_mr
= ib_alloc_fmr(ia
->ri_pd
, RPCRDMA_FMR_ACCESS_FLAGS
,
134 if (IS_ERR(mr
->fmr
.fm_mr
))
137 INIT_LIST_HEAD(&mr
->mr_list
);
138 INIT_WORK(&mr
->mr_recycle
, fmr_mr_recycle_worker
);
142 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__
,
143 PTR_ERR(mr
->fmr
.fm_mr
));
147 kfree(mr
->fmr
.fm_physaddrs
);
152 * ep->rep_attr.cap.max_send_wr
153 * ep->rep_attr.cap.max_recv_wr
154 * cdata->max_requests
158 fmr_op_open(struct rpcrdma_ia
*ia
, struct rpcrdma_ep
*ep
,
159 struct rpcrdma_create_data_internal
*cdata
)
163 max_qp_wr
= ia
->ri_device
->attrs
.max_qp_wr
;
164 max_qp_wr
-= RPCRDMA_BACKWARD_WRS
;
166 if (max_qp_wr
< RPCRDMA_MIN_SLOT_TABLE
)
168 if (cdata
->max_requests
> max_qp_wr
)
169 cdata
->max_requests
= max_qp_wr
;
170 ep
->rep_attr
.cap
.max_send_wr
= cdata
->max_requests
;
171 ep
->rep_attr
.cap
.max_send_wr
+= RPCRDMA_BACKWARD_WRS
;
172 ep
->rep_attr
.cap
.max_send_wr
+= 1; /* for ib_drain_sq */
173 ep
->rep_attr
.cap
.max_recv_wr
= cdata
->max_requests
;
174 ep
->rep_attr
.cap
.max_recv_wr
+= RPCRDMA_BACKWARD_WRS
;
175 ep
->rep_attr
.cap
.max_recv_wr
+= 1; /* for ib_drain_rq */
177 ia
->ri_max_segs
= max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS
/
178 RPCRDMA_MAX_FMR_SGES
);
179 ia
->ri_max_segs
+= 2; /* segments for head and tail buffers */
183 /* FMR mode conveys up to 64 pages of payload per chunk segment.
186 fmr_op_maxpages(struct rpcrdma_xprt
*r_xprt
)
188 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
189 RPCRDMA_MAX_HDR_SEGS
* RPCRDMA_MAX_FMR_SGES
);
192 /* Use the ib_map_phys_fmr() verb to register a memory region
193 * for remote access via RDMA READ or RDMA WRITE.
195 static struct rpcrdma_mr_seg
*
196 fmr_op_map(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
,
197 int nsegs
, bool writing
, struct rpcrdma_mr
**out
)
199 struct rpcrdma_mr_seg
*seg1
= seg
;
200 int len
, pageoff
, i
, rc
;
201 struct rpcrdma_mr
*mr
;
204 mr
= rpcrdma_mr_get(r_xprt
);
206 return ERR_PTR(-EAGAIN
);
208 pageoff
= offset_in_page(seg1
->mr_offset
);
209 seg1
->mr_offset
-= pageoff
; /* start of page */
210 seg1
->mr_len
+= pageoff
;
212 if (nsegs
> RPCRDMA_MAX_FMR_SGES
)
213 nsegs
= RPCRDMA_MAX_FMR_SGES
;
214 for (i
= 0; i
< nsegs
;) {
216 sg_set_page(&mr
->mr_sg
[i
],
219 offset_in_page(seg
->mr_offset
));
221 sg_set_buf(&mr
->mr_sg
[i
], seg
->mr_offset
,
226 /* Check for holes */
227 if ((i
< nsegs
&& offset_in_page(seg
->mr_offset
)) ||
228 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
231 mr
->mr_dir
= rpcrdma_data_dir(writing
);
233 mr
->mr_nents
= ib_dma_map_sg(r_xprt
->rx_ia
.ri_device
,
234 mr
->mr_sg
, i
, mr
->mr_dir
);
237 trace_xprtrdma_mr_map(mr
);
239 for (i
= 0, dma_pages
= mr
->fmr
.fm_physaddrs
; i
< mr
->mr_nents
; i
++)
240 dma_pages
[i
] = sg_dma_address(&mr
->mr_sg
[i
]);
241 rc
= ib_map_phys_fmr(mr
->fmr
.fm_mr
, dma_pages
, mr
->mr_nents
,
246 mr
->mr_handle
= mr
->fmr
.fm_mr
->rkey
;
248 mr
->mr_offset
= dma_pages
[0] + pageoff
;
254 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
257 return ERR_PTR(-EIO
);
260 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
261 len
, (unsigned long long)dma_pages
[0],
262 pageoff
, mr
->mr_nents
, rc
);
263 rpcrdma_mr_unmap_and_put(mr
);
264 return ERR_PTR(-EIO
);
267 /* Post Send WR containing the RPC Call message.
270 fmr_op_send(struct rpcrdma_ia
*ia
, struct rpcrdma_req
*req
)
272 return ib_post_send(ia
->ri_id
->qp
, &req
->rl_sendctx
->sc_wr
, NULL
);
275 /* Invalidate all memory regions that were registered for "req".
277 * Sleeps until it is safe for the host CPU to access the
278 * previously mapped memory regions.
280 * Caller ensures that @mrs is not empty before the call. This
281 * function empties the list.
284 fmr_op_unmap_sync(struct rpcrdma_xprt
*r_xprt
, struct list_head
*mrs
)
286 struct rpcrdma_mr
*mr
;
287 LIST_HEAD(unmap_list
);
290 /* ORDER: Invalidate all of the req's MRs first
292 * ib_unmap_fmr() is slow, so use a single call instead
293 * of one call per mapped FMR.
295 list_for_each_entry(mr
, mrs
, mr_list
) {
296 dprintk("RPC: %s: unmapping fmr %p\n",
298 trace_xprtrdma_mr_localinv(mr
);
299 list_add_tail(&mr
->fmr
.fm_mr
->list
, &unmap_list
);
301 r_xprt
->rx_stats
.local_inv_needed
++;
302 rc
= ib_unmap_fmr(&unmap_list
);
306 /* ORDER: Now DMA unmap all of the req's MRs, and return
307 * them to the free MW list.
309 while (!list_empty(mrs
)) {
310 mr
= rpcrdma_mr_pop(mrs
);
311 list_del(&mr
->fmr
.fm_mr
->list
);
312 rpcrdma_mr_unmap_and_put(mr
);
318 pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc
);
320 while (!list_empty(mrs
)) {
321 mr
= rpcrdma_mr_pop(mrs
);
322 list_del(&mr
->fmr
.fm_mr
->list
);
323 rpcrdma_mr_recycle(mr
);
327 const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops
= {
328 .ro_map
= fmr_op_map
,
329 .ro_send
= fmr_op_send
,
330 .ro_unmap_sync
= fmr_op_unmap_sync
,
331 .ro_open
= fmr_op_open
,
332 .ro_maxpages
= fmr_op_maxpages
,
333 .ro_init_mr
= fmr_op_init_mr
,
334 .ro_release_mr
= fmr_op_release_mr
,
335 .ro_displayname
= "fmr",
336 .ro_send_w_inv_ok
= 0,