2 * Copyright (c) 2016 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 static struct rds_ib_mr
*rds_ib_alloc_frmr(struct rds_ib_device
*rds_ibdev
,
38 struct rds_ib_mr_pool
*pool
;
39 struct rds_ib_mr
*ibmr
= NULL
;
40 struct rds_ib_frmr
*frmr
;
43 if (npages
<= RDS_MR_8K_MSG_SIZE
)
44 pool
= rds_ibdev
->mr_8k_pool
;
46 pool
= rds_ibdev
->mr_1m_pool
;
48 ibmr
= rds_ib_try_reuse_ibmr(pool
);
52 ibmr
= kzalloc_node(sizeof(*ibmr
), GFP_KERNEL
,
53 rdsibdev_to_node(rds_ibdev
));
60 frmr
->mr
= ib_alloc_mr(rds_ibdev
->pd
, IB_MR_TYPE_MEM_REG
,
61 pool
->fmr_attr
.max_pages
);
62 if (IS_ERR(frmr
->mr
)) {
63 pr_warn("RDS/IB: %s failed to allocate MR", __func__
);
64 err
= PTR_ERR(frmr
->mr
);
69 if (pool
->pool_type
== RDS_IB_MR_8K_POOL
)
70 rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc
);
72 rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc
);
74 if (atomic_read(&pool
->item_count
) > pool
->max_items_soft
)
75 pool
->max_items_soft
= pool
->max_items
;
77 frmr
->fr_state
= FRMR_IS_FREE
;
82 atomic_dec(&pool
->item_count
);
86 static void rds_ib_free_frmr(struct rds_ib_mr
*ibmr
, bool drop
)
88 struct rds_ib_mr_pool
*pool
= ibmr
->pool
;
91 llist_add(&ibmr
->llnode
, &pool
->drop_list
);
93 llist_add(&ibmr
->llnode
, &pool
->free_list
);
94 atomic_add(ibmr
->sg_len
, &pool
->free_pinned
);
95 atomic_inc(&pool
->dirty_count
);
97 /* If we've pinned too many pages, request a flush */
98 if (atomic_read(&pool
->free_pinned
) >= pool
->max_free_pinned
||
99 atomic_read(&pool
->dirty_count
) >= pool
->max_items
/ 5)
100 queue_delayed_work(rds_ib_mr_wq
, &pool
->flush_worker
, 10);
103 static int rds_ib_post_reg_frmr(struct rds_ib_mr
*ibmr
)
105 struct rds_ib_frmr
*frmr
= &ibmr
->u
.frmr
;
106 struct ib_reg_wr reg_wr
;
109 while (atomic_dec_return(&ibmr
->ic
->i_fastreg_wrs
) <= 0) {
110 atomic_inc(&ibmr
->ic
->i_fastreg_wrs
);
114 ret
= ib_map_mr_sg_zbva(frmr
->mr
, ibmr
->sg
, ibmr
->sg_len
,
116 if (unlikely(ret
!= ibmr
->sg_len
))
117 return ret
< 0 ? ret
: -EINVAL
;
119 /* Perform a WR for the fast_reg_mr. Each individual page
120 * in the sg list is added to the fast reg page list and placed
121 * inside the fast_reg_mr WR. The key used is a rolling 8bit
122 * counter, which should guarantee uniqueness.
124 ib_update_fast_reg_key(frmr
->mr
, ibmr
->remap_count
++);
125 frmr
->fr_state
= FRMR_IS_INUSE
;
127 memset(®_wr
, 0, sizeof(reg_wr
));
128 reg_wr
.wr
.wr_id
= (unsigned long)(void *)ibmr
;
129 reg_wr
.wr
.opcode
= IB_WR_REG_MR
;
130 reg_wr
.wr
.num_sge
= 0;
131 reg_wr
.mr
= frmr
->mr
;
132 reg_wr
.key
= frmr
->mr
->rkey
;
133 reg_wr
.access
= IB_ACCESS_LOCAL_WRITE
|
134 IB_ACCESS_REMOTE_READ
|
135 IB_ACCESS_REMOTE_WRITE
;
136 reg_wr
.wr
.send_flags
= IB_SEND_SIGNALED
;
138 ret
= ib_post_send(ibmr
->ic
->i_cm_id
->qp
, ®_wr
.wr
, NULL
);
140 /* Failure here can be because of -ENOMEM as well */
141 frmr
->fr_state
= FRMR_IS_STALE
;
142 atomic_inc(&ibmr
->ic
->i_fastreg_wrs
);
143 if (printk_ratelimit())
144 pr_warn("RDS/IB: %s returned error(%d)\n",
150 static int rds_ib_map_frmr(struct rds_ib_device
*rds_ibdev
,
151 struct rds_ib_mr_pool
*pool
,
152 struct rds_ib_mr
*ibmr
,
153 struct scatterlist
*sg
, unsigned int sg_len
)
155 struct ib_device
*dev
= rds_ibdev
->dev
;
156 struct rds_ib_frmr
*frmr
= &ibmr
->u
.frmr
;
161 /* We want to teardown old ibmr values here and fill it up with
164 rds_ib_teardown_mr(ibmr
);
167 ibmr
->sg_len
= sg_len
;
168 ibmr
->sg_dma_len
= 0;
169 frmr
->sg_byte_len
= 0;
170 WARN_ON(ibmr
->sg_dma_len
);
171 ibmr
->sg_dma_len
= ib_dma_map_sg(dev
, ibmr
->sg
, ibmr
->sg_len
,
173 if (unlikely(!ibmr
->sg_dma_len
)) {
174 pr_warn("RDS/IB: %s failed!\n", __func__
);
178 frmr
->sg_byte_len
= 0;
179 frmr
->dma_npages
= 0;
183 for (i
= 0; i
< ibmr
->sg_dma_len
; ++i
) {
184 unsigned int dma_len
= ib_sg_dma_len(dev
, &ibmr
->sg
[i
]);
185 u64 dma_addr
= ib_sg_dma_address(dev
, &ibmr
->sg
[i
]);
187 frmr
->sg_byte_len
+= dma_len
;
188 if (dma_addr
& ~PAGE_MASK
) {
195 if ((dma_addr
+ dma_len
) & ~PAGE_MASK
) {
196 if (i
< ibmr
->sg_dma_len
- 1)
204 frmr
->dma_npages
+= len
>> PAGE_SHIFT
;
206 if (frmr
->dma_npages
> ibmr
->pool
->fmr_attr
.max_pages
) {
211 ret
= rds_ib_post_reg_frmr(ibmr
);
215 if (ibmr
->pool
->pool_type
== RDS_IB_MR_8K_POOL
)
216 rds_ib_stats_inc(s_ib_rdma_mr_8k_used
);
218 rds_ib_stats_inc(s_ib_rdma_mr_1m_used
);
223 ib_dma_unmap_sg(rds_ibdev
->dev
, ibmr
->sg
, ibmr
->sg_len
,
225 ibmr
->sg_dma_len
= 0;
229 static int rds_ib_post_inv(struct rds_ib_mr
*ibmr
)
231 struct ib_send_wr
*s_wr
;
232 struct rds_ib_frmr
*frmr
= &ibmr
->u
.frmr
;
233 struct rdma_cm_id
*i_cm_id
= ibmr
->ic
->i_cm_id
;
236 if (!i_cm_id
|| !i_cm_id
->qp
|| !frmr
->mr
)
239 if (frmr
->fr_state
!= FRMR_IS_INUSE
)
242 while (atomic_dec_return(&ibmr
->ic
->i_fastunreg_wrs
) <= 0) {
243 atomic_inc(&ibmr
->ic
->i_fastunreg_wrs
);
250 memset(s_wr
, 0, sizeof(*s_wr
));
251 s_wr
->wr_id
= (unsigned long)(void *)ibmr
;
252 s_wr
->opcode
= IB_WR_LOCAL_INV
;
253 s_wr
->ex
.invalidate_rkey
= frmr
->mr
->rkey
;
254 s_wr
->send_flags
= IB_SEND_SIGNALED
;
256 ret
= ib_post_send(i_cm_id
->qp
, s_wr
, NULL
);
258 frmr
->fr_state
= FRMR_IS_STALE
;
259 frmr
->fr_inv
= false;
260 atomic_inc(&ibmr
->ic
->i_fastunreg_wrs
);
261 pr_err("RDS/IB: %s returned error(%d)\n", __func__
, ret
);
268 void rds_ib_mr_cqe_handler(struct rds_ib_connection
*ic
, struct ib_wc
*wc
)
270 struct rds_ib_mr
*ibmr
= (void *)(unsigned long)wc
->wr_id
;
271 struct rds_ib_frmr
*frmr
= &ibmr
->u
.frmr
;
273 if (wc
->status
!= IB_WC_SUCCESS
) {
274 frmr
->fr_state
= FRMR_IS_STALE
;
275 if (rds_conn_up(ic
->conn
))
276 rds_ib_conn_error(ic
->conn
,
277 "frmr completion <%pI4,%pI4> status %u(%s), vendor_err 0x%x, disconnecting and reconnecting\n",
281 ib_wc_status_msg(wc
->status
),
286 frmr
->fr_state
= FRMR_IS_FREE
;
287 frmr
->fr_inv
= false;
288 atomic_inc(&ic
->i_fastreg_wrs
);
290 atomic_inc(&ic
->i_fastunreg_wrs
);
294 void rds_ib_unreg_frmr(struct list_head
*list
, unsigned int *nfreed
,
295 unsigned long *unpinned
, unsigned int goal
)
297 struct rds_ib_mr
*ibmr
, *next
;
298 struct rds_ib_frmr
*frmr
;
300 unsigned int freed
= *nfreed
;
302 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
303 list_for_each_entry(ibmr
, list
, unmap_list
) {
304 if (ibmr
->sg_dma_len
)
305 ret
|= rds_ib_post_inv(ibmr
);
308 pr_warn("RDS/IB: %s failed (err=%d)\n", __func__
, ret
);
310 /* Now we can destroy the DMA mapping and unpin any pages */
311 list_for_each_entry_safe(ibmr
, next
, list
, unmap_list
) {
312 *unpinned
+= ibmr
->sg_len
;
313 frmr
= &ibmr
->u
.frmr
;
314 __rds_ib_teardown_mr(ibmr
);
315 if (freed
< goal
|| frmr
->fr_state
== FRMR_IS_STALE
) {
316 /* Don't de-allocate if the MR is not free yet */
317 if (frmr
->fr_state
== FRMR_IS_INUSE
)
320 if (ibmr
->pool
->pool_type
== RDS_IB_MR_8K_POOL
)
321 rds_ib_stats_inc(s_ib_rdma_mr_8k_free
);
323 rds_ib_stats_inc(s_ib_rdma_mr_1m_free
);
324 list_del(&ibmr
->unmap_list
);
326 ib_dereg_mr(frmr
->mr
);
334 struct rds_ib_mr
*rds_ib_reg_frmr(struct rds_ib_device
*rds_ibdev
,
335 struct rds_ib_connection
*ic
,
336 struct scatterlist
*sg
,
337 unsigned long nents
, u32
*key
)
339 struct rds_ib_mr
*ibmr
= NULL
;
340 struct rds_ib_frmr
*frmr
;
344 /* TODO: Add FRWR support for RDS_GET_MR using proxy qp*/
345 return ERR_PTR(-EOPNOTSUPP
);
350 rds_ib_free_frmr(ibmr
, true);
351 ibmr
= rds_ib_alloc_frmr(rds_ibdev
, nents
);
354 frmr
= &ibmr
->u
.frmr
;
355 } while (frmr
->fr_state
!= FRMR_IS_FREE
);
358 ibmr
->device
= rds_ibdev
;
359 ret
= rds_ib_map_frmr(rds_ibdev
, ibmr
->pool
, ibmr
, sg
, nents
);
361 *key
= frmr
->mr
->rkey
;
363 rds_ib_free_frmr(ibmr
, false);
370 void rds_ib_free_frmr_list(struct rds_ib_mr
*ibmr
)
372 struct rds_ib_mr_pool
*pool
= ibmr
->pool
;
373 struct rds_ib_frmr
*frmr
= &ibmr
->u
.frmr
;
375 if (frmr
->fr_state
== FRMR_IS_STALE
)
376 llist_add(&ibmr
->llnode
, &pool
->drop_list
);
378 llist_add(&ibmr
->llnode
, &pool
->free_list
);