2 * Copyright (c) 2015 Oracle. All rights reserved.
3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
6 /* Lightweight memory registration using Fast Memory Regions (FMR).
7 * Referred to sometimes as MTHCAFMR mode.
9 * FMR uses synchronous memory registration and deregistration.
10 * FMR registration is known to be fast, but FMR deregistration
11 * can take tens of usecs to complete.
16 * A Memory Region is prepared for RDMA READ or WRITE using the
17 * ib_map_phys_fmr verb (fmr_op_map). When the RDMA operation is
18 * finished, the Memory Region is unmapped using the ib_unmap_fmr
19 * verb (fmr_op_unmap).
24 * After a transport reconnect, fmr_op_map re-uses the MR already
25 * allocated for the RPC, but generates a fresh rkey then maps the
26 * MR again. This process is synchronous.
29 #include "xprt_rdma.h"
31 #if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
32 # define RPCDBG_FACILITY RPCDBG_TRANS
35 /* Maximum scatter/gather per FMR */
36 #define RPCRDMA_MAX_FMR_SGES (64)
39 fmr_op_open(struct rpcrdma_ia
*ia
, struct rpcrdma_ep
*ep
,
40 struct rpcrdma_create_data_internal
*cdata
)
45 /* FMR mode conveys up to 64 pages of payload per chunk segment.
48 fmr_op_maxpages(struct rpcrdma_xprt
*r_xprt
)
50 return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS
,
51 rpcrdma_max_segments(r_xprt
) * RPCRDMA_MAX_FMR_SGES
);
55 fmr_op_init(struct rpcrdma_xprt
*r_xprt
)
57 struct rpcrdma_buffer
*buf
= &r_xprt
->rx_buf
;
58 int mr_access_flags
= IB_ACCESS_REMOTE_WRITE
| IB_ACCESS_REMOTE_READ
;
59 struct ib_fmr_attr fmr_attr
= {
60 .max_pages
= RPCRDMA_MAX_FMR_SGES
,
62 .page_shift
= PAGE_SHIFT
64 struct ib_pd
*pd
= r_xprt
->rx_ia
.ri_pd
;
68 spin_lock_init(&buf
->rb_mwlock
);
69 INIT_LIST_HEAD(&buf
->rb_mws
);
70 INIT_LIST_HEAD(&buf
->rb_all
);
72 i
= max_t(int, RPCRDMA_MAX_DATA_SEGS
/ RPCRDMA_MAX_FMR_SGES
, 1);
73 i
+= 2; /* head + tail */
74 i
*= buf
->rb_max_requests
; /* one set for each RPC slot */
75 dprintk("RPC: %s: initalizing %d FMRs\n", __func__
, i
);
79 r
= kzalloc(sizeof(*r
), GFP_KERNEL
);
83 r
->r
.fmr
.physaddrs
= kmalloc(RPCRDMA_MAX_FMR_SGES
*
84 sizeof(u64
), GFP_KERNEL
);
85 if (!r
->r
.fmr
.physaddrs
)
88 r
->r
.fmr
.fmr
= ib_alloc_fmr(pd
, mr_access_flags
, &fmr_attr
);
89 if (IS_ERR(r
->r
.fmr
.fmr
))
92 list_add(&r
->mw_list
, &buf
->rb_mws
);
93 list_add(&r
->mw_all
, &buf
->rb_all
);
98 rc
= PTR_ERR(r
->r
.fmr
.fmr
);
99 dprintk("RPC: %s: ib_alloc_fmr status %i\n", __func__
, rc
);
100 kfree(r
->r
.fmr
.physaddrs
);
108 __fmr_unmap(struct rpcrdma_mw
*r
)
112 list_add(&r
->r
.fmr
.fmr
->list
, &l
);
113 return ib_unmap_fmr(&l
);
116 /* Use the ib_map_phys_fmr() verb to register a memory region
117 * for remote access via RDMA READ or RDMA WRITE.
120 fmr_op_map(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
,
121 int nsegs
, bool writing
)
123 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
124 struct ib_device
*device
= ia
->ri_device
;
125 enum dma_data_direction direction
= rpcrdma_data_dir(writing
);
126 struct rpcrdma_mr_seg
*seg1
= seg
;
127 int len
, pageoff
, i
, rc
;
128 struct rpcrdma_mw
*mw
;
133 mw
= rpcrdma_get_mw(r_xprt
);
137 /* this is a retransmit; generate a fresh rkey */
138 rc
= __fmr_unmap(mw
);
143 pageoff
= offset_in_page(seg1
->mr_offset
);
144 seg1
->mr_offset
-= pageoff
; /* start of page */
145 seg1
->mr_len
+= pageoff
;
147 if (nsegs
> RPCRDMA_MAX_FMR_SGES
)
148 nsegs
= RPCRDMA_MAX_FMR_SGES
;
149 for (i
= 0; i
< nsegs
;) {
150 rpcrdma_map_one(device
, seg
, direction
);
151 mw
->r
.fmr
.physaddrs
[i
] = seg
->mr_dma
;
155 /* Check for holes */
156 if ((i
< nsegs
&& offset_in_page(seg
->mr_offset
)) ||
157 offset_in_page((seg
-1)->mr_offset
+ (seg
-1)->mr_len
))
161 rc
= ib_map_phys_fmr(mw
->r
.fmr
.fmr
, mw
->r
.fmr
.physaddrs
,
167 seg1
->mr_rkey
= mw
->r
.fmr
.fmr
->rkey
;
168 seg1
->mr_base
= seg1
->mr_dma
+ pageoff
;
174 dprintk("RPC: %s: ib_map_phys_fmr %u@0x%llx+%i (%d) status %i\n",
175 __func__
, len
, (unsigned long long)seg1
->mr_dma
,
178 rpcrdma_unmap_one(device
, --seg
);
182 /* Use the ib_unmap_fmr() verb to prevent further remote
183 * access via RDMA READ or RDMA WRITE.
186 fmr_op_unmap(struct rpcrdma_xprt
*r_xprt
, struct rpcrdma_mr_seg
*seg
)
188 struct rpcrdma_ia
*ia
= &r_xprt
->rx_ia
;
189 struct rpcrdma_mr_seg
*seg1
= seg
;
190 struct rpcrdma_mw
*mw
= seg1
->rl_mw
;
191 int rc
, nsegs
= seg
->mr_nsegs
;
193 dprintk("RPC: %s: FMR %p\n", __func__
, mw
);
196 while (seg1
->mr_nsegs
--)
197 rpcrdma_unmap_one(ia
->ri_device
, seg
++);
198 rc
= __fmr_unmap(mw
);
201 rpcrdma_put_mw(r_xprt
, mw
);
205 /* The FMR is abandoned, but remains in rb_all. fmr_op_destroy
206 * will attempt to release it when the transport is destroyed.
208 dprintk("RPC: %s: ib_unmap_fmr status %i\n", __func__
, rc
);
213 fmr_op_destroy(struct rpcrdma_buffer
*buf
)
215 struct rpcrdma_mw
*r
;
218 while (!list_empty(&buf
->rb_all
)) {
219 r
= list_entry(buf
->rb_all
.next
, struct rpcrdma_mw
, mw_all
);
220 list_del(&r
->mw_all
);
221 kfree(r
->r
.fmr
.physaddrs
);
223 rc
= ib_dealloc_fmr(r
->r
.fmr
.fmr
);
225 dprintk("RPC: %s: ib_dealloc_fmr failed %i\n",
232 const struct rpcrdma_memreg_ops rpcrdma_fmr_memreg_ops
= {
233 .ro_map
= fmr_op_map
,
234 .ro_unmap
= fmr_op_unmap
,
235 .ro_open
= fmr_op_open
,
236 .ro_maxpages
= fmr_op_maxpages
,
237 .ro_init
= fmr_op_init
,
238 .ro_destroy
= fmr_op_destroy
,
239 .ro_displayname
= "fmr",