2 * Copyright (c) 2016 Oracle. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 struct rds_ib_mr
*rds_ib_alloc_fmr(struct rds_ib_device
*rds_ibdev
, int npages
)
37 struct rds_ib_mr_pool
*pool
;
38 struct rds_ib_mr
*ibmr
= NULL
;
39 struct rds_ib_fmr
*fmr
;
42 if (npages
<= RDS_MR_8K_MSG_SIZE
)
43 pool
= rds_ibdev
->mr_8k_pool
;
45 pool
= rds_ibdev
->mr_1m_pool
;
47 if (atomic_read(&pool
->dirty_count
) >= pool
->max_items
/ 10)
48 queue_delayed_work(rds_ib_mr_wq
, &pool
->flush_worker
, 10);
50 /* Switch pools if one of the pool is reaching upper limit */
51 if (atomic_read(&pool
->dirty_count
) >= pool
->max_items
* 9 / 10) {
52 if (pool
->pool_type
== RDS_IB_MR_8K_POOL
)
53 pool
= rds_ibdev
->mr_1m_pool
;
55 pool
= rds_ibdev
->mr_8k_pool
;
58 ibmr
= rds_ib_try_reuse_ibmr(pool
);
62 ibmr
= kzalloc_node(sizeof(*ibmr
), GFP_KERNEL
,
63 rdsibdev_to_node(rds_ibdev
));
70 fmr
->fmr
= ib_alloc_fmr(rds_ibdev
->pd
,
71 (IB_ACCESS_LOCAL_WRITE
|
72 IB_ACCESS_REMOTE_READ
|
73 IB_ACCESS_REMOTE_WRITE
|
74 IB_ACCESS_REMOTE_ATOMIC
),
76 if (IS_ERR(fmr
->fmr
)) {
77 err
= PTR_ERR(fmr
->fmr
);
79 pr_warn("RDS/IB: %s failed (err=%d)\n", __func__
, err
);
84 if (pool
->pool_type
== RDS_IB_MR_8K_POOL
)
85 rds_ib_stats_inc(s_ib_rdma_mr_8k_alloc
);
87 rds_ib_stats_inc(s_ib_rdma_mr_1m_alloc
);
94 ib_dealloc_fmr(fmr
->fmr
);
97 atomic_dec(&pool
->item_count
);
101 int rds_ib_map_fmr(struct rds_ib_device
*rds_ibdev
, struct rds_ib_mr
*ibmr
,
102 struct scatterlist
*sg
, unsigned int nents
)
104 struct ib_device
*dev
= rds_ibdev
->dev
;
105 struct rds_ib_fmr
*fmr
= &ibmr
->u
.fmr
;
106 struct scatterlist
*scat
= sg
;
110 int page_cnt
, sg_dma_len
;
114 sg_dma_len
= ib_dma_map_sg(dev
, sg
, nents
, DMA_BIDIRECTIONAL
);
115 if (unlikely(!sg_dma_len
)) {
116 pr_warn("RDS/IB: %s failed!\n", __func__
);
123 for (i
= 0; i
< sg_dma_len
; ++i
) {
124 unsigned int dma_len
= ib_sg_dma_len(dev
, &scat
[i
]);
125 u64 dma_addr
= ib_sg_dma_address(dev
, &scat
[i
]);
127 if (dma_addr
& ~PAGE_MASK
) {
133 if ((dma_addr
+ dma_len
) & ~PAGE_MASK
) {
134 if (i
< sg_dma_len
- 1)
143 page_cnt
+= len
>> PAGE_SHIFT
;
144 if (page_cnt
> ibmr
->pool
->fmr_attr
.max_pages
)
147 dma_pages
= kmalloc_node(sizeof(u64
) * page_cnt
, GFP_ATOMIC
,
148 rdsibdev_to_node(rds_ibdev
));
153 for (i
= 0; i
< sg_dma_len
; ++i
) {
154 unsigned int dma_len
= ib_sg_dma_len(dev
, &scat
[i
]);
155 u64 dma_addr
= ib_sg_dma_address(dev
, &scat
[i
]);
157 for (j
= 0; j
< dma_len
; j
+= PAGE_SIZE
)
158 dma_pages
[page_cnt
++] =
159 (dma_addr
& PAGE_MASK
) + j
;
162 ret
= ib_map_phys_fmr(fmr
->fmr
, dma_pages
, page_cnt
, io_addr
);
166 /* Success - we successfully remapped the MR, so we can
167 * safely tear down the old mapping.
169 rds_ib_teardown_mr(ibmr
);
172 ibmr
->sg_len
= nents
;
173 ibmr
->sg_dma_len
= sg_dma_len
;
176 if (ibmr
->pool
->pool_type
== RDS_IB_MR_8K_POOL
)
177 rds_ib_stats_inc(s_ib_rdma_mr_8k_used
);
179 rds_ib_stats_inc(s_ib_rdma_mr_1m_used
);
188 struct rds_ib_mr
*rds_ib_reg_fmr(struct rds_ib_device
*rds_ibdev
,
189 struct scatterlist
*sg
,
193 struct rds_ib_mr
*ibmr
= NULL
;
194 struct rds_ib_fmr
*fmr
;
197 ibmr
= rds_ib_alloc_fmr(rds_ibdev
, nents
);
201 ibmr
->device
= rds_ibdev
;
203 ret
= rds_ib_map_fmr(rds_ibdev
, ibmr
, sg
, nents
);
205 *key
= fmr
->fmr
->rkey
;
207 rds_ib_free_mr(ibmr
, 0);
212 void rds_ib_unreg_fmr(struct list_head
*list
, unsigned int *nfreed
,
213 unsigned long *unpinned
, unsigned int goal
)
215 struct rds_ib_mr
*ibmr
, *next
;
216 struct rds_ib_fmr
*fmr
;
219 unsigned int freed
= *nfreed
;
221 /* String all ib_mr's onto one list and hand them to ib_unmap_fmr */
222 list_for_each_entry(ibmr
, list
, unmap_list
) {
224 list_add(&fmr
->fmr
->list
, &fmr_list
);
227 ret
= ib_unmap_fmr(&fmr_list
);
229 pr_warn("RDS/IB: FMR invalidation failed (err=%d)\n", ret
);
231 /* Now we can destroy the DMA mapping and unpin any pages */
232 list_for_each_entry_safe(ibmr
, next
, list
, unmap_list
) {
234 *unpinned
+= ibmr
->sg_len
;
235 __rds_ib_teardown_mr(ibmr
);
237 ibmr
->remap_count
>= ibmr
->pool
->fmr_attr
.max_maps
) {
238 if (ibmr
->pool
->pool_type
== RDS_IB_MR_8K_POOL
)
239 rds_ib_stats_inc(s_ib_rdma_mr_8k_free
);
241 rds_ib_stats_inc(s_ib_rdma_mr_1m_free
);
242 list_del(&ibmr
->unmap_list
);
243 ib_dealloc_fmr(fmr
->fmr
);
251 void rds_ib_free_fmr_list(struct rds_ib_mr
*ibmr
)
253 struct rds_ib_mr_pool
*pool
= ibmr
->pool
;
255 if (ibmr
->remap_count
>= pool
->fmr_attr
.max_maps
)
256 llist_add(&ibmr
->llnode
, &pool
->drop_list
);
258 llist_add(&ibmr
->llnode
, &pool
->free_list
);