1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2015, 2017 Oracle. All rights reserved.
4 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
7 /* Lightweight memory registration using Fast Memory Regions (FMR).
8 * Referred to sometimes as MTHCAFMR mode.
10 * FMR uses synchronous memory registration and deregistration.
11 * FMR registration is known to be fast, but FMR deregistration
12 * can take tens of usecs to complete.
17 * A Memory Region is prepared for RDMA READ or WRITE using the
18 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
19 * finished, the Memory Region is unmapped using the ib_unmap_fmr
20 * verb (fmr_op_unmap).
23 #include "xprt_rdma.h"
25 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
26 # define RPCDBG_FACILITY RPCDBG_TRANS
29 /* Maximum scatter/gather per FMR */
30 #define RPCRDMA_MAX_FMR_SGES (64)
32 /* Access mode of externally registered pages */
34 RPCRDMA_FMR_ACCESS_FLAGS
= IB_ACCESS_REMOTE_WRITE
|
35 IB_ACCESS_REMOTE_READ
,
39 fmr_is_supported(struct rpcrdma_ia
*ia
)
41 if (!ia
->ri_device
->alloc_fmr
) {
42 pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
50 fmr_op_init_mr(struct rpcrdma_ia
*ia
, struct rpcrdma_mr
*mr
)
52 static struct ib_fmr_attr fmr_attr
= {
53 .max_pages
= RPCRDMA_MAX_FMR_SGES
,
55 .page_shift
= PAGE_SHIFT
58 mr
->fmr
.fm_physaddrs
= kcalloc(RPCRDMA_MAX_FMR_SGES
,
59 sizeof(u64
), GFP_KERNEL
);
60 if (!mr
->fmr
.fm_physaddrs
)
63 mr
->mr_sg
= kcalloc(RPCRDMA_MAX_FMR_SGES
,
64 sizeof(*mr
->mr_sg
), GFP_KERNEL
);
68 sg_init_table(mr
->mr_sg
, RPCRDMA_MAX_FMR_SGES
);
70 mr
->fmr
.fm_mr
= ib_alloc_fmr(ia
->ri_pd
, RPCRDMA_FMR_ACCESS_FLAGS
,
72 if (IS_ERR(mr
->fmr
.fm_mr
))
78 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__
,
79 PTR_ERR(mr
->fmr
.fm_mr
));
83 kfree(mr
->fmr
.fm_physaddrs
);
88 __fmr_unmap(struct rpcrdma_mr
*mr
)
93 list_add(&mr
->fmr
.fm_mr
->list
, &l
);
94 rc
= ib_unmap_fmr(&l
);
95 list_del(&mr
->fmr
.fm_mr
->list
);
100 fmr_op_release_mr(struct rpcrdma_mr
*mr
)
102 LIST_HEAD(unmap_list
);
105 /* Ensure MW is not on any rl_registered list */
106 if (!list_empty(&mr
->mr_list
))
107 list_del(&mr
->mr_list
);
109 kfree(mr
->fmr
.fm_physaddrs
);
112 /* In case this one was left mapped, try to unmap it
113 * to prevent dealloc_fmr from failing with EBUSY
115 rc
= __fmr_unmap(mr
);
117 pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
120 rc
= ib_dealloc_fmr(mr
->fmr
.fm_mr
);
122 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
128 /* Reset of a single FMR.
131 fmr_op_recover_mr(struct rpcrdma_mr
*mr
)
133 struct rpcrdma_xprt
*r_xprt
= mr
->mr_xprt
;
136 /* ORDER: invalidate first */
137 rc
= __fmr_unmap(mr
);
141 /* ORDER: then DMA unmap */
142 rpcrdma_mr_unmap_and_put(mr
);
144 r_xprt
->rx_stats
.mrs_recovered
++;
148 pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc
, mr
);
149 r_xprt
->rx_stats
.mrs_orphaned
++;
151 trace_xprtrdma_dma_unmap(mr
);
152 ib_dma_unmap_sg(r_xprt
->rx_ia
.ri_device
,
153 mr
->mr_sg
, mr
->mr_nents
, mr
->mr_dir
);
155 spin_lock(&r_xprt
->rx_buf
.rb_mrlock
);
156 list_del(&mr
->mr_all
);
157 spin_unlock(&r_xprt
->rx_buf
.rb_mrlock
);
159 fmr_op_release_mr(mr
);
163 fmr_op_open(struct rpcrdma_ia
*ia
, struct rpcrdma_ep
*ep
,
164 struct rpcrdma_create_data_internal
*cdata
)
166 ia
->ri_max_segs
= max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS
/
167 RPCRDMA_MAX_FMR_SGES
);
171 /* FMR mode conveys up to 64 pages of payload per chunk segment.
174 fmr_op_maxpages(struct rpcrdma_xprt
*r_xprt
)
176 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
177 RPCRDMA_MAX_HDR_SEGS
* RPCRDMA_MAX_FMR_SGES
);
180 /* Use the ib_map_phys_fmr() verb to register a memory region
181 * for remote access via RDMA READ or RDMA WRITE.
183 static struct rpcrdma_mr_seg
*
184 fmr_op_map(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
,
185 int nsegs
, bool writing
, struct rpcrdma_mr
**out
)
187 struct rpcrdma_mr_seg
*seg1
= seg
;
188 int len
, pageoff
, i
, rc
;
189 struct rpcrdma_mr
*mr
;
192 mr
= rpcrdma_mr_get(r_xprt
);
194 return ERR_PTR(-ENOBUFS
);
196 pageoff
= offset_in_page(seg1
->mr_offset
);
197 seg1
->mr_offset
-= pageoff
; /* start of page */
198 seg1
->mr_len
+= pageoff
;
200 if (nsegs
> RPCRDMA_MAX_FMR_SGES
)
201 nsegs
= RPCRDMA_MAX_FMR_SGES
;
202 for (i
= 0; i
< nsegs
;) {
204 sg_set_page(&mr
->mr_sg
[i
],
207 offset_in_page(seg
->mr_offset
));
209 sg_set_buf(&mr
->mr_sg
[i
], seg
->mr_offset
,
214 /* Check for holes */
215 if ((i
< nsegs
&& offset_in_page(seg
->mr_offset
)) ||
216 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
219 mr
->mr_dir
= rpcrdma_data_dir(writing
);
221 mr
->mr_nents
= ib_dma_map_sg(r_xprt
->rx_ia
.ri_device
,
222 mr
->mr_sg
, i
, mr
->mr_dir
);
226 for (i
= 0, dma_pages
= mr
->fmr
.fm_physaddrs
; i
< mr
->mr_nents
; i
++)
227 dma_pages
[i
] = sg_dma_address(&mr
->mr_sg
[i
]);
228 rc
= ib_map_phys_fmr(mr
->fmr
.fm_mr
, dma_pages
, mr
->mr_nents
,
233 mr
->mr_handle
= mr
->fmr
.fm_mr
->rkey
;
235 mr
->mr_offset
= dma_pages
[0] + pageoff
;
241 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
244 return ERR_PTR(-EIO
);
247 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
248 len
, (unsigned long long)dma_pages
[0],
249 pageoff
, mr
->mr_nents
, rc
);
250 rpcrdma_mr_unmap_and_put(mr
);
251 return ERR_PTR(-EIO
);
254 /* Invalidate all memory regions that were registered for "req".
256 * Sleeps until it is safe for the host CPU to access the
257 * previously mapped memory regions.
259 * Caller ensures that @mrs is not empty before the call. This
260 * function empties the list.
263 fmr_op_unmap_sync(struct rpcrdma_xprt
*r_xprt
, struct list_head
*mrs
)
265 struct rpcrdma_mr
*mr
;
266 LIST_HEAD(unmap_list
);
269 /* ORDER: Invalidate all of the req's MRs first
271 * ib_unmap_fmr() is slow, so use a single call instead
272 * of one call per mapped FMR.
274 list_for_each_entry(mr
, mrs
, mr_list
) {
275 dprintk("RPC: %s: unmapping fmr %p\n",
277 trace_xprtrdma_localinv(mr
);
278 list_add_tail(&mr
->fmr
.fm_mr
->list
, &unmap_list
);
280 r_xprt
->rx_stats
.local_inv_needed
++;
281 rc
= ib_unmap_fmr(&unmap_list
);
285 /* ORDER: Now DMA unmap all of the req's MRs, and return
286 * them to the free MW list.
288 while (!list_empty(mrs
)) {
289 mr
= rpcrdma_mr_pop(mrs
);
290 list_del(&mr
->fmr
.fm_mr
->list
);
291 rpcrdma_mr_unmap_and_put(mr
);
297 pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc
);
299 while (!list_empty(mrs
)) {
300 mr
= rpcrdma_mr_pop(mrs
);
301 list_del(&mr
->fmr
.fm_mr
->list
);
302 fmr_op_recover_mr(mr
);
306 const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops
= {
307 .ro_map
= fmr_op_map
,
308 .ro_unmap_sync
= fmr_op_unmap_sync
,
309 .ro_recover_mr
= fmr_op_recover_mr
,
310 .ro_open
= fmr_op_open
,
311 .ro_maxpages
= fmr_op_maxpages
,
312 .ro_init_mr
= fmr_op_init_mr
,
313 .ro_release_mr
= fmr_op_release_mr
,
314 .ro_displayname
= "fmr",
315 .ro_send_w_inv_ok
= 0,