2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* Lightweight memory registration using Fast Memory Regions (FMR).
7 * Referred to sometimes as MTHCAFMR mode.
9 * FMR uses synchronous memory registration and deregistration.
10 * FMR registration is known to be fast, but FMR deregistration
11 * can take tens of usecs to complete.
16 * A Memory Region is prepared for RDMA READ or WRITE using the
17 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
18 * finished, the Memory Region is unmapped using the ib_unmap_fmr
19 * verb (fmr_op_unmap).
22 #include "xprt_rdma.h"
24 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
25 # define RPCDBG_FACILITY RPCDBG_TRANS
28 /* Maximum scatter/gather per FMR */
29 #define RPCRDMA_MAX_FMR_SGES (64)
31 /* Access mode of externally registered pages */
33 RPCRDMA_FMR_ACCESS_FLAGS
= IB_ACCESS_REMOTE_WRITE
|
34 IB_ACCESS_REMOTE_READ
,
38 fmr_is_supported(struct rpcrdma_ia
*ia
)
40 if (!ia
->ri_device
->alloc_fmr
) {
41 pr_info("rpcrdma: 'fmr' mode is not supported by device %s\n",
49 fmr_op_init_mr(struct rpcrdma_ia
*ia
, struct rpcrdma_mw
*mw
)
51 static struct ib_fmr_attr fmr_attr
= {
52 .max_pages
= RPCRDMA_MAX_FMR_SGES
,
54 .page_shift
= PAGE_SHIFT
57 mw
->fmr
.fm_physaddrs
= kcalloc(RPCRDMA_MAX_FMR_SGES
,
58 sizeof(u64
), GFP_KERNEL
);
59 if (!mw
->fmr
.fm_physaddrs
)
62 mw
->mw_sg
= kcalloc(RPCRDMA_MAX_FMR_SGES
,
63 sizeof(*mw
->mw_sg
), GFP_KERNEL
);
67 sg_init_table(mw
->mw_sg
, RPCRDMA_MAX_FMR_SGES
);
69 mw
->fmr
.fm_mr
= ib_alloc_fmr(ia
->ri_pd
, RPCRDMA_FMR_ACCESS_FLAGS
,
71 if (IS_ERR(mw
->fmr
.fm_mr
))
77 dprintk("RPC: %s: ib_alloc_fmr returned %ld\n", __func__
,
78 PTR_ERR(mw
->fmr
.fm_mr
));
82 kfree(mw
->fmr
.fm_physaddrs
);
87 __fmr_unmap(struct rpcrdma_mw
*mw
)
92 list_add(&mw
->fmr
.fm_mr
->list
, &l
);
93 rc
= ib_unmap_fmr(&l
);
94 list_del(&mw
->fmr
.fm_mr
->list
);
99 fmr_op_release_mr(struct rpcrdma_mw
*r
)
101 LIST_HEAD(unmap_list
);
104 /* Ensure MW is not on any rl_registered list */
105 if (!list_empty(&r
->mw_list
))
106 list_del(&r
->mw_list
);
108 kfree(r
->fmr
.fm_physaddrs
);
111 /* In case this one was left mapped, try to unmap it
112 * to prevent dealloc_fmr from failing with EBUSY
116 pr_err("rpcrdma: final ib_unmap_fmr for %p failed %i\n",
119 rc
= ib_dealloc_fmr(r
->fmr
.fm_mr
);
121 pr_err("rpcrdma: final ib_dealloc_fmr for %p returned %i\n",
127 /* Reset of a single FMR.
130 fmr_op_recover_mr(struct rpcrdma_mw
*mw
)
132 struct rpcrdma_xprt
*r_xprt
= mw
->mw_xprt
;
135 /* ORDER: invalidate first */
136 rc
= __fmr_unmap(mw
);
138 /* ORDER: then DMA unmap */
139 ib_dma_unmap_sg(r_xprt
->rx_ia
.ri_device
,
140 mw
->mw_sg
, mw
->mw_nents
, mw
->mw_dir
);
144 rpcrdma_put_mw(r_xprt
, mw
);
145 r_xprt
->rx_stats
.mrs_recovered
++;
149 pr_err("rpcrdma: FMR reset failed (%d), %p released\n", rc
, mw
);
150 r_xprt
->rx_stats
.mrs_orphaned
++;
152 spin_lock(&r_xprt
->rx_buf
.rb_mwlock
);
153 list_del(&mw
->mw_all
);
154 spin_unlock(&r_xprt
->rx_buf
.rb_mwlock
);
156 fmr_op_release_mr(mw
);
160 fmr_op_open(struct rpcrdma_ia
*ia
, struct rpcrdma_ep
*ep
,
161 struct rpcrdma_create_data_internal
*cdata
)
163 ia
->ri_max_segs
= max_t(unsigned int, 1, RPCRDMA_MAX_DATA_SEGS
/
164 RPCRDMA_MAX_FMR_SGES
);
168 /* FMR mode conveys up to 64 pages of payload per chunk segment.
171 fmr_op_maxpages(struct rpcrdma_xprt
*r_xprt
)
173 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
174 RPCRDMA_MAX_HDR_SEGS
* RPCRDMA_MAX_FMR_SGES
);
177 /* Use the ib_map_phys_fmr() verb to register a memory region
178 * for remote access via RDMA READ or RDMA WRITE.
180 static struct rpcrdma_mr_seg
*
181 fmr_op_map(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
,
182 int nsegs
, bool writing
, struct rpcrdma_mw
**out
)
184 struct rpcrdma_mr_seg
*seg1
= seg
;
185 int len
, pageoff
, i
, rc
;
186 struct rpcrdma_mw
*mw
;
189 mw
= rpcrdma_get_mw(r_xprt
);
191 return ERR_PTR(-ENOBUFS
);
193 pageoff
= offset_in_page(seg1
->mr_offset
);
194 seg1
->mr_offset
-= pageoff
; /* start of page */
195 seg1
->mr_len
+= pageoff
;
197 if (nsegs
> RPCRDMA_MAX_FMR_SGES
)
198 nsegs
= RPCRDMA_MAX_FMR_SGES
;
199 for (i
= 0; i
< nsegs
;) {
201 sg_set_page(&mw
->mw_sg
[i
],
204 offset_in_page(seg
->mr_offset
));
206 sg_set_buf(&mw
->mw_sg
[i
], seg
->mr_offset
,
211 /* Check for holes */
212 if ((i
< nsegs
&& offset_in_page(seg
->mr_offset
)) ||
213 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
216 mw
->mw_dir
= rpcrdma_data_dir(writing
);
218 mw
->mw_nents
= ib_dma_map_sg(r_xprt
->rx_ia
.ri_device
,
219 mw
->mw_sg
, i
, mw
->mw_dir
);
223 for (i
= 0, dma_pages
= mw
->fmr
.fm_physaddrs
; i
< mw
->mw_nents
; i
++)
224 dma_pages
[i
] = sg_dma_address(&mw
->mw_sg
[i
]);
225 rc
= ib_map_phys_fmr(mw
->fmr
.fm_mr
, dma_pages
, mw
->mw_nents
,
230 mw
->mw_handle
= mw
->fmr
.fm_mr
->rkey
;
232 mw
->mw_offset
= dma_pages
[0] + pageoff
;
238 pr_err("rpcrdma: failed to DMA map sg %p sg_nents %d\n",
240 rpcrdma_put_mw(r_xprt
, mw
);
241 return ERR_PTR(-EIO
);
244 pr_err("rpcrdma: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
245 len
, (unsigned long long)dma_pages
[0],
246 pageoff
, mw
->mw_nents
, rc
);
247 ib_dma_unmap_sg(r_xprt
->rx_ia
.ri_device
,
248 mw
->mw_sg
, mw
->mw_nents
, mw
->mw_dir
);
249 rpcrdma_put_mw(r_xprt
, mw
);
250 return ERR_PTR(-EIO
);
253 /* Invalidate all memory regions that were registered for "req".
255 * Sleeps until it is safe for the host CPU to access the
256 * previously mapped memory regions.
258 * Caller ensures that @mws is not empty before the call. This
259 * function empties the list.
262 fmr_op_unmap_sync(struct rpcrdma_xprt
*r_xprt
, struct list_head
*mws
)
264 struct rpcrdma_mw
*mw
;
265 LIST_HEAD(unmap_list
);
268 /* ORDER: Invalidate all of the req's MRs first
270 * ib_unmap_fmr() is slow, so use a single call instead
271 * of one call per mapped FMR.
273 list_for_each_entry(mw
, mws
, mw_list
) {
274 dprintk("RPC: %s: unmapping fmr %p\n",
276 list_add_tail(&mw
->fmr
.fm_mr
->list
, &unmap_list
);
278 r_xprt
->rx_stats
.local_inv_needed
++;
279 rc
= ib_unmap_fmr(&unmap_list
);
283 /* ORDER: Now DMA unmap all of the req's MRs, and return
284 * them to the free MW list.
286 while (!list_empty(mws
)) {
287 mw
= rpcrdma_pop_mw(mws
);
288 dprintk("RPC: %s: DMA unmapping fmr %p\n",
290 list_del(&mw
->fmr
.fm_mr
->list
);
291 ib_dma_unmap_sg(r_xprt
->rx_ia
.ri_device
,
292 mw
->mw_sg
, mw
->mw_nents
, mw
->mw_dir
);
293 rpcrdma_put_mw(r_xprt
, mw
);
299 pr_err("rpcrdma: ib_unmap_fmr failed (%i)\n", rc
);
301 while (!list_empty(mws
)) {
302 mw
= rpcrdma_pop_mw(mws
);
303 list_del(&mw
->fmr
.fm_mr
->list
);
304 fmr_op_recover_mr(mw
);
308 /* Use a slow, safe mechanism to invalidate all memory regions
309 * that were registered for "req".
312 fmr_op_unmap_safe(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_req
*req
,
315 struct rpcrdma_mw
*mw
;
317 while (!list_empty(&req
->rl_registered
)) {
318 mw
= rpcrdma_pop_mw(&req
->rl_registered
);
320 fmr_op_recover_mr(mw
);
322 rpcrdma_defer_mr_recovery(mw
);
326 const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops
= {
327 .ro_map
= fmr_op_map
,
328 .ro_unmap_sync
= fmr_op_unmap_sync
,
329 .ro_unmap_safe
= fmr_op_unmap_safe
,
330 .ro_recover_mr
= fmr_op_recover_mr
,
331 .ro_open
= fmr_op_open
,
332 .ro_maxpages
= fmr_op_maxpages
,
333 .ro_init_mr
= fmr_op_init_mr
,
334 .ro_release_mr
= fmr_op_release_mr
,
335 .ro_displayname
= "fmr",
336 .ro_send_w_inv_ok
= 0,