1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
4 #include <linux/vhost_types.h>
5 #include <linux/vdpa.h>
7 #include <linux/string.h>
8 #include <linux/mlx5/qp.h>
11 /* DIV_ROUND_UP where the divider is a power of 2 give by its log base 2 value */
12 #define MLX5_DIV_ROUND_UP_POW2(_n, _s) \
16 _res = (((_n) + (1 << (__s)) - 1) >> (__s)); \
20 static int get_octo_len(u64 len
, int page_shift
)
22 u64 page_size
= 1ULL << page_shift
;
25 npages
= ALIGN(len
, page_size
) >> page_shift
;
26 return (npages
+ 1) / 2;
29 static void mlx5_set_access_mode(void *mkc
, int mode
)
31 MLX5_SET(mkc
, mkc
, access_mode_1_0
, mode
& 0x3);
32 MLX5_SET(mkc
, mkc
, access_mode_4_2
, mode
>> 2);
35 static void populate_mtts(struct mlx5_vdpa_direct_mr
*mr
, __be64
*mtt
)
37 struct scatterlist
*sg
;
44 for_each_sg(mr
->sg_head
.sgl
, sg
, mr
->nent
, i
) {
45 for (dma_addr
= sg_dma_address(sg
), dma_len
= sg_dma_len(sg
);
47 nsg
--, dma_addr
+= BIT(mr
->log_size
), dma_len
-= BIT(mr
->log_size
))
48 mtt
[j
++] = cpu_to_be64(dma_addr
);
52 struct mlx5_create_mkey_mem
{
53 u8 out
[MLX5_ST_SZ_BYTES(create_mkey_out
)];
54 u8 in
[MLX5_ST_SZ_BYTES(create_mkey_in
)];
58 struct mlx5_destroy_mkey_mem
{
59 u8 out
[MLX5_ST_SZ_BYTES(destroy_mkey_out
)];
60 u8 in
[MLX5_ST_SZ_BYTES(destroy_mkey_in
)];
63 static void fill_create_direct_mr(struct mlx5_vdpa_dev
*mvdev
,
64 struct mlx5_vdpa_direct_mr
*mr
,
65 struct mlx5_create_mkey_mem
*mem
)
70 MLX5_SET(create_mkey_in
, in
, uid
, mvdev
->res
.uid
);
71 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
72 MLX5_SET(mkc
, mkc
, lw
, !!(mr
->perm
& VHOST_MAP_WO
));
73 MLX5_SET(mkc
, mkc
, lr
, !!(mr
->perm
& VHOST_MAP_RO
));
74 mlx5_set_access_mode(mkc
, MLX5_MKC_ACCESS_MODE_MTT
);
75 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
76 MLX5_SET(mkc
, mkc
, pd
, mvdev
->res
.pdn
);
77 MLX5_SET64(mkc
, mkc
, start_addr
, mr
->offset
);
78 MLX5_SET64(mkc
, mkc
, len
, mr
->end
- mr
->start
);
79 MLX5_SET(mkc
, mkc
, log_page_size
, mr
->log_size
);
80 MLX5_SET(mkc
, mkc
, translations_octword_size
,
81 get_octo_len(mr
->end
- mr
->start
, mr
->log_size
));
82 MLX5_SET(create_mkey_in
, in
, translations_octword_actual_size
,
83 get_octo_len(mr
->end
- mr
->start
, mr
->log_size
));
84 populate_mtts(mr
, MLX5_ADDR_OF(create_mkey_in
, in
, klm_pas_mtt
));
86 MLX5_SET(create_mkey_in
, in
, opcode
, MLX5_CMD_OP_CREATE_MKEY
);
87 MLX5_SET(create_mkey_in
, in
, uid
, mvdev
->res
.uid
);
90 static void create_direct_mr_end(struct mlx5_vdpa_dev
*mvdev
,
91 struct mlx5_vdpa_direct_mr
*mr
,
92 struct mlx5_create_mkey_mem
*mem
)
94 u32 mkey_index
= MLX5_GET(create_mkey_out
, mem
->out
, mkey_index
);
96 mr
->mr
= mlx5_idx_to_mkey(mkey_index
);
99 static void fill_destroy_direct_mr(struct mlx5_vdpa_dev
*mvdev
,
100 struct mlx5_vdpa_direct_mr
*mr
,
101 struct mlx5_destroy_mkey_mem
*mem
)
105 MLX5_SET(destroy_mkey_in
, in
, uid
, mvdev
->res
.uid
);
106 MLX5_SET(destroy_mkey_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_MKEY
);
107 MLX5_SET(destroy_mkey_in
, in
, mkey_index
, mlx5_mkey_to_idx(mr
->mr
));
110 static void destroy_direct_mr(struct mlx5_vdpa_dev
*mvdev
, struct mlx5_vdpa_direct_mr
*mr
)
115 mlx5_vdpa_destroy_mkey(mvdev
, mr
->mr
);
118 static u64
map_start(struct vhost_iotlb_map
*map
, struct mlx5_vdpa_direct_mr
*mr
)
120 return max_t(u64
, map
->start
, mr
->start
);
123 static u64
map_end(struct vhost_iotlb_map
*map
, struct mlx5_vdpa_direct_mr
*mr
)
125 return min_t(u64
, map
->last
+ 1, mr
->end
);
128 static u64
maplen(struct vhost_iotlb_map
*map
, struct mlx5_vdpa_direct_mr
*mr
)
130 return map_end(map
, mr
) - map_start(map
, mr
);
133 #define MLX5_VDPA_INVALID_START_ADDR ((u64)-1)
134 #define MLX5_VDPA_INVALID_LEN ((u64)-1)
136 static u64
indir_start_addr(struct mlx5_vdpa_mr
*mkey
)
138 struct mlx5_vdpa_direct_mr
*s
;
140 s
= list_first_entry_or_null(&mkey
->head
, struct mlx5_vdpa_direct_mr
, list
);
142 return MLX5_VDPA_INVALID_START_ADDR
;
147 static u64
indir_len(struct mlx5_vdpa_mr
*mkey
)
149 struct mlx5_vdpa_direct_mr
*s
;
150 struct mlx5_vdpa_direct_mr
*e
;
152 s
= list_first_entry_or_null(&mkey
->head
, struct mlx5_vdpa_direct_mr
, list
);
154 return MLX5_VDPA_INVALID_LEN
;
156 e
= list_last_entry(&mkey
->head
, struct mlx5_vdpa_direct_mr
, list
);
158 return e
->end
- s
->start
;
161 #define LOG_MAX_KLM_SIZE 30
162 #define MAX_KLM_SIZE BIT(LOG_MAX_KLM_SIZE)
164 static u32
klm_bcount(u64 size
)
169 static void fill_indir(struct mlx5_vdpa_dev
*mvdev
, struct mlx5_vdpa_mr
*mkey
, void *in
)
171 struct mlx5_vdpa_direct_mr
*dmr
;
172 struct mlx5_klm
*klmarr
;
173 struct mlx5_klm
*klm
;
178 klmarr
= MLX5_ADDR_OF(create_mkey_in
, in
, klm_pas_mtt
);
180 list_for_each_entry(dmr
, &mkey
->head
, list
) {
188 if (preve
== dmr
->start
) {
189 klm
->key
= cpu_to_be32(dmr
->mr
);
190 klm
->bcount
= cpu_to_be32(klm_bcount(dmr
->end
- dmr
->start
));
193 klm
->key
= cpu_to_be32(mvdev
->res
.null_mkey
);
194 klm
->bcount
= cpu_to_be32(klm_bcount(dmr
->start
- preve
));
201 static int klm_byte_size(int nklms
)
203 return 16 * ALIGN(nklms
, 4);
206 #define MLX5_VDPA_MTT_ALIGN 16
208 static int create_direct_keys(struct mlx5_vdpa_dev
*mvdev
, struct mlx5_vdpa_mr
*mr
)
210 struct mlx5_vdpa_async_cmd
*cmds
;
211 struct mlx5_vdpa_direct_mr
*dmr
;
215 cmds
= kvcalloc(mr
->num_directs
, sizeof(*cmds
), GFP_KERNEL
);
219 list_for_each_entry(dmr
, &mr
->head
, list
) {
220 struct mlx5_create_mkey_mem
*cmd_mem
;
221 int mttlen
, mttcount
;
223 mttlen
= roundup(MLX5_ST_SZ_BYTES(mtt
) * dmr
->nsg
, MLX5_VDPA_MTT_ALIGN
);
224 mttcount
= mttlen
/ sizeof(cmd_mem
->mtt
[0]);
225 cmd_mem
= kvcalloc(1, struct_size(cmd_mem
, mtt
, mttcount
), GFP_KERNEL
);
231 cmds
[i
].out
= cmd_mem
->out
;
232 cmds
[i
].outlen
= sizeof(cmd_mem
->out
);
233 cmds
[i
].in
= cmd_mem
->in
;
234 cmds
[i
].inlen
= struct_size(cmd_mem
, mtt
, mttcount
);
236 fill_create_direct_mr(mvdev
, dmr
, cmd_mem
);
241 err
= mlx5_vdpa_exec_async_cmds(mvdev
, cmds
, mr
->num_directs
);
244 mlx5_vdpa_err(mvdev
, "error issuing MTT mkey creation for direct mrs: %d\n", err
);
249 list_for_each_entry(dmr
, &mr
->head
, list
) {
250 struct mlx5_vdpa_async_cmd
*cmd
= &cmds
[i
++];
251 struct mlx5_create_mkey_mem
*cmd_mem
;
253 cmd_mem
= container_of(cmd
->out
, struct mlx5_create_mkey_mem
, out
);
256 create_direct_mr_end(mvdev
, dmr
, cmd_mem
);
258 err
= err
? err
: cmd
->err
;
259 mlx5_vdpa_err(mvdev
, "error creating MTT mkey [0x%llx, 0x%llx]: %d\n",
260 dmr
->start
, dmr
->end
, cmd
->err
);
265 for (i
= i
-1; i
>= 0; i
--) {
266 struct mlx5_create_mkey_mem
*cmd_mem
;
268 cmd_mem
= container_of(cmds
[i
].out
, struct mlx5_create_mkey_mem
, out
);
276 DEFINE_FREE(free_cmds
, struct mlx5_vdpa_async_cmd
*, kvfree(_T
))
277 DEFINE_FREE(free_cmd_mem
, struct mlx5_destroy_mkey_mem
*, kvfree(_T
))
279 static int destroy_direct_keys(struct mlx5_vdpa_dev
*mvdev
, struct mlx5_vdpa_mr
*mr
)
281 struct mlx5_destroy_mkey_mem
*cmd_mem
__free(free_cmd_mem
) = NULL
;
282 struct mlx5_vdpa_async_cmd
*cmds
__free(free_cmds
) = NULL
;
283 struct mlx5_vdpa_direct_mr
*dmr
;
287 cmds
= kvcalloc(mr
->num_directs
, sizeof(*cmds
), GFP_KERNEL
);
288 cmd_mem
= kvcalloc(mr
->num_directs
, sizeof(*cmd_mem
), GFP_KERNEL
);
289 if (!cmds
|| !cmd_mem
)
292 list_for_each_entry(dmr
, &mr
->head
, list
) {
293 cmds
[i
].out
= cmd_mem
[i
].out
;
294 cmds
[i
].outlen
= sizeof(cmd_mem
[i
].out
);
295 cmds
[i
].in
= cmd_mem
[i
].in
;
296 cmds
[i
].inlen
= sizeof(cmd_mem
[i
].in
);
297 fill_destroy_direct_mr(mvdev
, dmr
, &cmd_mem
[i
]);
301 err
= mlx5_vdpa_exec_async_cmds(mvdev
, cmds
, mr
->num_directs
);
304 mlx5_vdpa_err(mvdev
, "error issuing MTT mkey deletion for direct mrs: %d\n", err
);
309 list_for_each_entry(dmr
, &mr
->head
, list
) {
310 struct mlx5_vdpa_async_cmd
*cmd
= &cmds
[i
++];
314 err
= err
? err
: cmd
->err
;
315 mlx5_vdpa_err(mvdev
, "error deleting MTT mkey [0x%llx, 0x%llx]: %d\n",
316 dmr
->start
, dmr
->end
, cmd
->err
);
323 static int create_indirect_key(struct mlx5_vdpa_dev
*mvdev
, struct mlx5_vdpa_mr
*mr
)
332 start
= indir_start_addr(mr
);
334 if (start
== MLX5_VDPA_INVALID_START_ADDR
|| len
== MLX5_VDPA_INVALID_LEN
)
337 inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
) + klm_byte_size(mr
->num_klms
);
338 in
= kzalloc(inlen
, GFP_KERNEL
);
342 MLX5_SET(create_mkey_in
, in
, uid
, mvdev
->res
.uid
);
343 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
344 MLX5_SET(mkc
, mkc
, lw
, 1);
345 MLX5_SET(mkc
, mkc
, lr
, 1);
346 mlx5_set_access_mode(mkc
, MLX5_MKC_ACCESS_MODE_KLMS
);
347 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
348 MLX5_SET(mkc
, mkc
, pd
, mvdev
->res
.pdn
);
349 MLX5_SET64(mkc
, mkc
, start_addr
, start
);
350 MLX5_SET64(mkc
, mkc
, len
, len
);
351 MLX5_SET(mkc
, mkc
, translations_octword_size
, klm_byte_size(mr
->num_klms
) / 16);
352 MLX5_SET(create_mkey_in
, in
, translations_octword_actual_size
, mr
->num_klms
);
353 fill_indir(mvdev
, mr
, in
);
354 err
= mlx5_vdpa_create_mkey(mvdev
, &mr
->mkey
, in
, inlen
);
359 static void destroy_indirect_key(struct mlx5_vdpa_dev
*mvdev
, struct mlx5_vdpa_mr
*mkey
)
361 mlx5_vdpa_destroy_mkey(mvdev
, mkey
->mkey
);
364 static int map_direct_mr(struct mlx5_vdpa_dev
*mvdev
, struct mlx5_vdpa_direct_mr
*mr
,
365 struct vhost_iotlb
*iotlb
)
367 struct vhost_iotlb_map
*map
;
368 unsigned long lgcd
= 0;
377 struct scatterlist
*sg
;
378 struct device
*dma
= mvdev
->vdev
.dma_dev
;
380 for (map
= vhost_iotlb_itree_first(iotlb
, mr
->start
, mr
->end
- 1);
381 map
; map
= vhost_iotlb_itree_next(map
, mr
->start
, mr
->end
- 1)) {
382 size
= maplen(map
, mr
);
383 lgcd
= gcd(lgcd
, size
);
385 log_entity_size
= ilog2(lgcd
);
387 sglen
= 1 << log_entity_size
;
388 nsg
= MLX5_DIV_ROUND_UP_POW2(mr
->end
- mr
->start
, log_entity_size
);
390 err
= sg_alloc_table(&mr
->sg_head
, nsg
, GFP_KERNEL
);
394 sg
= mr
->sg_head
.sgl
;
395 for (map
= vhost_iotlb_itree_first(iotlb
, mr
->start
, mr
->end
- 1);
396 map
; map
= vhost_iotlb_itree_next(map
, mr
->start
, mr
->end
- 1)) {
397 offset
= mr
->start
> map
->start
? mr
->start
- map
->start
: 0;
398 pa
= map
->addr
+ offset
;
399 paend
= map
->addr
+ offset
+ maplen(map
, mr
);
400 for (; pa
< paend
; pa
+= sglen
) {
401 pg
= pfn_to_page(__phys_to_pfn(pa
));
403 mlx5_vdpa_warn(mvdev
, "sg null. start 0x%llx, end 0x%llx\n",
404 map
->start
, map
->last
+ 1);
408 sg_set_page(sg
, pg
, sglen
, 0);
415 mr
->log_size
= log_entity_size
;
417 mr
->nent
= dma_map_sg_attrs(dma
, mr
->sg_head
.sgl
, mr
->nsg
, DMA_BIDIRECTIONAL
, 0);
426 sg_free_table(&mr
->sg_head
);
430 static void unmap_direct_mr(struct mlx5_vdpa_dev
*mvdev
, struct mlx5_vdpa_direct_mr
*mr
)
432 struct device
*dma
= mvdev
->vdev
.dma_dev
;
434 destroy_direct_mr(mvdev
, mr
);
435 dma_unmap_sg_attrs(dma
, mr
->sg_head
.sgl
, mr
->nsg
, DMA_BIDIRECTIONAL
, 0);
436 sg_free_table(&mr
->sg_head
);
439 static int add_direct_chain(struct mlx5_vdpa_dev
*mvdev
,
440 struct mlx5_vdpa_mr
*mr
,
444 struct vhost_iotlb
*iotlb
)
446 struct mlx5_vdpa_direct_mr
*dmr
;
447 struct mlx5_vdpa_direct_mr
*n
;
455 sz
= (u32
)min_t(u64
, MAX_KLM_SIZE
, size
);
456 dmr
= kzalloc(sizeof(*dmr
), GFP_KERNEL
);
465 err
= map_direct_mr(mvdev
, dmr
, iotlb
);
471 list_add_tail(&dmr
->list
, &tmp
);
477 list_splice_tail(&tmp
, &mr
->head
);
481 list_for_each_entry_safe(dmr
, n
, &mr
->head
, list
) {
482 list_del_init(&dmr
->list
);
483 unmap_direct_mr(mvdev
, dmr
);
489 /* The iotlb pointer contains a list of maps. Go over the maps, possibly
490 * merging mergeable maps, and create direct memory keys that provide the
491 * device access to memory. The direct mkeys are then referred to by the
492 * indirect memory key that provides access to the enitre address space given
495 static int create_user_mr(struct mlx5_vdpa_dev
*mvdev
,
496 struct mlx5_vdpa_mr
*mr
,
497 struct vhost_iotlb
*iotlb
)
499 struct mlx5_vdpa_direct_mr
*dmr
;
500 struct mlx5_vdpa_direct_mr
*n
;
501 struct vhost_iotlb_map
*map
;
510 INIT_LIST_HEAD(&mr
->head
);
511 for (map
= vhost_iotlb_itree_first(iotlb
, start
, last
); map
;
512 map
= vhost_iotlb_itree_next(map
, start
, last
)) {
514 if (pe
== map
->start
&& pperm
== map
->perm
) {
518 if (pe
< map
->start
) {
519 /* We have a hole in the map. Check how
520 * many null keys are required to fill it.
522 nnuls
= MLX5_DIV_ROUND_UP_POW2(map
->start
- pe
,
524 mr
->num_klms
+= nnuls
;
526 err
= add_direct_chain(mvdev
, mr
, ps
, pe
- ps
, pperm
, iotlb
);
535 err
= add_direct_chain(mvdev
, mr
, ps
, pe
- ps
, pperm
, iotlb
);
539 err
= create_direct_keys(mvdev
, mr
);
543 /* Create the memory key that defines the guests's address space. This
544 * memory key refers to the direct keys that contain the MTT
547 err
= create_indirect_key(mvdev
, mr
);
555 list_for_each_entry_safe_reverse(dmr
, n
, &mr
->head
, list
) {
556 list_del_init(&dmr
->list
);
557 unmap_direct_mr(mvdev
, dmr
);
563 static int create_dma_mr(struct mlx5_vdpa_dev
*mvdev
, struct mlx5_vdpa_mr
*mr
)
565 int inlen
= MLX5_ST_SZ_BYTES(create_mkey_in
);
570 in
= kzalloc(inlen
, GFP_KERNEL
);
574 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
576 MLX5_SET(mkc
, mkc
, access_mode_1_0
, MLX5_MKC_ACCESS_MODE_PA
);
577 MLX5_SET(mkc
, mkc
, length64
, 1);
578 MLX5_SET(mkc
, mkc
, lw
, 1);
579 MLX5_SET(mkc
, mkc
, lr
, 1);
580 MLX5_SET(mkc
, mkc
, pd
, mvdev
->res
.pdn
);
581 MLX5_SET(mkc
, mkc
, qpn
, 0xffffff);
583 err
= mlx5_vdpa_create_mkey(mvdev
, &mr
->mkey
, in
, inlen
);
591 static void destroy_dma_mr(struct mlx5_vdpa_dev
*mvdev
, struct mlx5_vdpa_mr
*mr
)
593 mlx5_vdpa_destroy_mkey(mvdev
, mr
->mkey
);
596 static int dup_iotlb(struct vhost_iotlb
*dst
, struct vhost_iotlb
*src
)
598 struct vhost_iotlb_map
*map
;
599 u64 start
= 0, last
= ULLONG_MAX
;
606 err
= vhost_iotlb_add_range(dst
, start
, last
, start
, VHOST_ACCESS_RW
);
610 for (map
= vhost_iotlb_itree_first(src
, start
, last
); map
;
611 map
= vhost_iotlb_itree_next(map
, start
, last
)) {
612 err
= vhost_iotlb_add_range(dst
, map
->start
, map
->last
,
613 map
->addr
, map
->perm
);
620 static void prune_iotlb(struct vhost_iotlb
*iotlb
)
622 vhost_iotlb_del_range(iotlb
, 0, ULLONG_MAX
);
625 static void destroy_user_mr(struct mlx5_vdpa_dev
*mvdev
, struct mlx5_vdpa_mr
*mr
)
627 struct mlx5_vdpa_direct_mr
*dmr
;
628 struct mlx5_vdpa_direct_mr
*n
;
630 destroy_indirect_key(mvdev
, mr
);
631 destroy_direct_keys(mvdev
, mr
);
632 list_for_each_entry_safe_reverse(dmr
, n
, &mr
->head
, list
) {
633 list_del_init(&dmr
->list
);
634 unmap_direct_mr(mvdev
, dmr
);
639 static void _mlx5_vdpa_destroy_mr(struct mlx5_vdpa_dev
*mvdev
, struct mlx5_vdpa_mr
*mr
)
645 destroy_user_mr(mvdev
, mr
);
647 destroy_dma_mr(mvdev
, mr
);
649 vhost_iotlb_free(mr
->iotlb
);
651 list_del(&mr
->mr_list
);
656 /* There can be multiple .set_map() operations in quick succession.
657 * This large delay is a simple way to prevent the MR cleanup from blocking
658 * .set_map() MR creation in this scenario.
660 #define MLX5_VDPA_MR_GC_TRIGGER_MS 2000
662 static void mlx5_vdpa_mr_gc_handler(struct work_struct
*work
)
664 struct mlx5_vdpa_mr_resources
*mres
;
665 struct mlx5_vdpa_mr
*mr
, *tmp
;
666 struct mlx5_vdpa_dev
*mvdev
;
668 mres
= container_of(work
, struct mlx5_vdpa_mr_resources
, gc_dwork_ent
.work
);
670 if (atomic_read(&mres
->shutdown
)) {
671 mutex_lock(&mres
->lock
);
672 } else if (!mutex_trylock(&mres
->lock
)) {
673 queue_delayed_work(mres
->wq_gc
, &mres
->gc_dwork_ent
,
674 msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS
));
678 mvdev
= container_of(mres
, struct mlx5_vdpa_dev
, mres
);
680 list_for_each_entry_safe(mr
, tmp
, &mres
->mr_gc_list_head
, mr_list
) {
681 _mlx5_vdpa_destroy_mr(mvdev
, mr
);
684 mutex_unlock(&mres
->lock
);
687 static void _mlx5_vdpa_put_mr(struct mlx5_vdpa_dev
*mvdev
,
688 struct mlx5_vdpa_mr
*mr
)
690 struct mlx5_vdpa_mr_resources
*mres
= &mvdev
->mres
;
695 if (refcount_dec_and_test(&mr
->refcount
)) {
696 list_move_tail(&mr
->mr_list
, &mres
->mr_gc_list_head
);
697 queue_delayed_work(mres
->wq_gc
, &mres
->gc_dwork_ent
,
698 msecs_to_jiffies(MLX5_VDPA_MR_GC_TRIGGER_MS
));
702 void mlx5_vdpa_put_mr(struct mlx5_vdpa_dev
*mvdev
,
703 struct mlx5_vdpa_mr
*mr
)
705 mutex_lock(&mvdev
->mres
.lock
);
706 _mlx5_vdpa_put_mr(mvdev
, mr
);
707 mutex_unlock(&mvdev
->mres
.lock
);
710 static void _mlx5_vdpa_get_mr(struct mlx5_vdpa_dev
*mvdev
,
711 struct mlx5_vdpa_mr
*mr
)
716 refcount_inc(&mr
->refcount
);
719 void mlx5_vdpa_get_mr(struct mlx5_vdpa_dev
*mvdev
,
720 struct mlx5_vdpa_mr
*mr
)
722 mutex_lock(&mvdev
->mres
.lock
);
723 _mlx5_vdpa_get_mr(mvdev
, mr
);
724 mutex_unlock(&mvdev
->mres
.lock
);
727 void mlx5_vdpa_update_mr(struct mlx5_vdpa_dev
*mvdev
,
728 struct mlx5_vdpa_mr
*new_mr
,
731 struct mlx5_vdpa_mr
*old_mr
= mvdev
->mres
.mr
[asid
];
733 mutex_lock(&mvdev
->mres
.lock
);
735 _mlx5_vdpa_put_mr(mvdev
, old_mr
);
736 mvdev
->mres
.mr
[asid
] = new_mr
;
738 mutex_unlock(&mvdev
->mres
.lock
);
741 static void mlx5_vdpa_show_mr_leaks(struct mlx5_vdpa_dev
*mvdev
)
743 struct mlx5_vdpa_mr
*mr
;
745 mutex_lock(&mvdev
->mres
.lock
);
747 list_for_each_entry(mr
, &mvdev
->mres
.mr_list_head
, mr_list
) {
749 mlx5_vdpa_warn(mvdev
, "mkey still alive after resource delete: "
750 "mr: %p, mkey: 0x%x, refcount: %u\n",
751 mr
, mr
->mkey
, refcount_read(&mr
->refcount
));
754 mutex_unlock(&mvdev
->mres
.lock
);
758 void mlx5_vdpa_clean_mrs(struct mlx5_vdpa_dev
*mvdev
)
760 if (!mvdev
->res
.valid
)
763 for (int i
= 0; i
< MLX5_VDPA_NUM_AS
; i
++)
764 mlx5_vdpa_update_mr(mvdev
, NULL
, i
);
766 prune_iotlb(mvdev
->cvq
.iotlb
);
768 mlx5_vdpa_show_mr_leaks(mvdev
);
771 static int _mlx5_vdpa_create_mr(struct mlx5_vdpa_dev
*mvdev
,
772 struct mlx5_vdpa_mr
*mr
,
773 struct vhost_iotlb
*iotlb
)
778 err
= create_user_mr(mvdev
, mr
, iotlb
);
780 err
= create_dma_mr(mvdev
, mr
);
785 mr
->iotlb
= vhost_iotlb_alloc(0, 0);
791 err
= dup_iotlb(mr
->iotlb
, iotlb
);
795 list_add_tail(&mr
->mr_list
, &mvdev
->mres
.mr_list_head
);
800 vhost_iotlb_free(mr
->iotlb
);
804 destroy_user_mr(mvdev
, mr
);
806 destroy_dma_mr(mvdev
, mr
);
811 struct mlx5_vdpa_mr
*mlx5_vdpa_create_mr(struct mlx5_vdpa_dev
*mvdev
,
812 struct vhost_iotlb
*iotlb
)
814 struct mlx5_vdpa_mr
*mr
;
817 mr
= kzalloc(sizeof(*mr
), GFP_KERNEL
);
819 return ERR_PTR(-ENOMEM
);
821 mutex_lock(&mvdev
->mres
.lock
);
822 err
= _mlx5_vdpa_create_mr(mvdev
, mr
, iotlb
);
823 mutex_unlock(&mvdev
->mres
.lock
);
828 refcount_set(&mr
->refcount
, 1);
837 int mlx5_vdpa_update_cvq_iotlb(struct mlx5_vdpa_dev
*mvdev
,
838 struct vhost_iotlb
*iotlb
,
843 if (mvdev
->mres
.group2asid
[MLX5_VDPA_CVQ_GROUP
] != asid
)
846 spin_lock(&mvdev
->cvq
.iommu_lock
);
848 prune_iotlb(mvdev
->cvq
.iotlb
);
849 err
= dup_iotlb(mvdev
->cvq
.iotlb
, iotlb
);
851 spin_unlock(&mvdev
->cvq
.iommu_lock
);
856 int mlx5_vdpa_create_dma_mr(struct mlx5_vdpa_dev
*mvdev
)
858 struct mlx5_vdpa_mr
*mr
;
860 mr
= mlx5_vdpa_create_mr(mvdev
, NULL
);
864 mlx5_vdpa_update_mr(mvdev
, mr
, 0);
866 return mlx5_vdpa_update_cvq_iotlb(mvdev
, NULL
, 0);
869 int mlx5_vdpa_reset_mr(struct mlx5_vdpa_dev
*mvdev
, unsigned int asid
)
871 if (asid
>= MLX5_VDPA_NUM_AS
)
874 mlx5_vdpa_update_mr(mvdev
, NULL
, asid
);
876 if (asid
== 0 && MLX5_CAP_GEN(mvdev
->mdev
, umem_uid_0
)) {
877 if (mlx5_vdpa_create_dma_mr(mvdev
))
878 mlx5_vdpa_warn(mvdev
, "create DMA MR failed\n");
880 mlx5_vdpa_update_cvq_iotlb(mvdev
, NULL
, asid
);
886 int mlx5_vdpa_init_mr_resources(struct mlx5_vdpa_dev
*mvdev
)
888 struct mlx5_vdpa_mr_resources
*mres
= &mvdev
->mres
;
890 mres
->wq_gc
= create_singlethread_workqueue("mlx5_vdpa_mr_gc");
894 INIT_DELAYED_WORK(&mres
->gc_dwork_ent
, mlx5_vdpa_mr_gc_handler
);
896 mutex_init(&mres
->lock
);
898 INIT_LIST_HEAD(&mres
->mr_list_head
);
899 INIT_LIST_HEAD(&mres
->mr_gc_list_head
);
904 void mlx5_vdpa_destroy_mr_resources(struct mlx5_vdpa_dev
*mvdev
)
906 struct mlx5_vdpa_mr_resources
*mres
= &mvdev
->mres
;
908 atomic_set(&mres
->shutdown
, 1);
910 flush_delayed_work(&mres
->gc_dwork_ent
);
911 destroy_workqueue(mres
->wq_gc
);
913 mutex_destroy(&mres
->lock
);