1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /* Copyright (c) 2020 Mellanox Technologies Ltd. */
4 #include <linux/iova.h>
5 #include <linux/mlx5/driver.h>
8 static int alloc_pd(struct mlx5_vdpa_dev
*dev
, u32
*pdn
, u16 uid
)
10 struct mlx5_core_dev
*mdev
= dev
->mdev
;
12 u32 out
[MLX5_ST_SZ_DW(alloc_pd_out
)] = {};
13 u32 in
[MLX5_ST_SZ_DW(alloc_pd_in
)] = {};
16 MLX5_SET(alloc_pd_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_PD
);
17 MLX5_SET(alloc_pd_in
, in
, uid
, uid
);
19 err
= mlx5_cmd_exec_inout(mdev
, alloc_pd
, in
, out
);
21 *pdn
= MLX5_GET(alloc_pd_out
, out
, pd
);
26 static int dealloc_pd(struct mlx5_vdpa_dev
*dev
, u32 pdn
, u16 uid
)
28 u32 in
[MLX5_ST_SZ_DW(dealloc_pd_in
)] = {};
29 struct mlx5_core_dev
*mdev
= dev
->mdev
;
31 MLX5_SET(dealloc_pd_in
, in
, opcode
, MLX5_CMD_OP_DEALLOC_PD
);
32 MLX5_SET(dealloc_pd_in
, in
, pd
, pdn
);
33 MLX5_SET(dealloc_pd_in
, in
, uid
, uid
);
34 return mlx5_cmd_exec_in(mdev
, dealloc_pd
, in
);
37 static int get_null_mkey(struct mlx5_vdpa_dev
*dev
, u32
*null_mkey
)
39 u32 out
[MLX5_ST_SZ_DW(query_special_contexts_out
)] = {};
40 u32 in
[MLX5_ST_SZ_DW(query_special_contexts_in
)] = {};
41 struct mlx5_core_dev
*mdev
= dev
->mdev
;
44 MLX5_SET(query_special_contexts_in
, in
, opcode
, MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS
);
45 err
= mlx5_cmd_exec_inout(mdev
, query_special_contexts
, in
, out
);
47 *null_mkey
= MLX5_GET(query_special_contexts_out
, out
, null_mkey
);
51 static int create_uctx(struct mlx5_vdpa_dev
*mvdev
, u16
*uid
)
53 u32 out
[MLX5_ST_SZ_DW(create_uctx_out
)] = {};
58 if (MLX5_CAP_GEN(mvdev
->mdev
, umem_uid_0
))
61 /* 0 means not supported */
62 if (!MLX5_CAP_GEN(mvdev
->mdev
, log_max_uctx
))
65 inlen
= MLX5_ST_SZ_BYTES(create_uctx_in
);
66 in
= kzalloc(inlen
, GFP_KERNEL
);
70 MLX5_SET(create_uctx_in
, in
, opcode
, MLX5_CMD_OP_CREATE_UCTX
);
71 MLX5_SET(create_uctx_in
, in
, uctx
.cap
, MLX5_UCTX_CAP_RAW_TX
);
73 err
= mlx5_cmd_exec(mvdev
->mdev
, in
, inlen
, out
, sizeof(out
));
76 *uid
= MLX5_GET(create_uctx_out
, out
, uid
);
81 static void destroy_uctx(struct mlx5_vdpa_dev
*mvdev
, u32 uid
)
83 u32 out
[MLX5_ST_SZ_DW(destroy_uctx_out
)] = {};
84 u32 in
[MLX5_ST_SZ_DW(destroy_uctx_in
)] = {};
89 MLX5_SET(destroy_uctx_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_UCTX
);
90 MLX5_SET(destroy_uctx_in
, in
, uid
, uid
);
92 mlx5_cmd_exec(mvdev
->mdev
, in
, sizeof(in
), out
, sizeof(out
));
95 int mlx5_vdpa_create_tis(struct mlx5_vdpa_dev
*mvdev
, void *in
, u32
*tisn
)
97 u32 out
[MLX5_ST_SZ_DW(create_tis_out
)] = {};
100 MLX5_SET(create_tis_in
, in
, opcode
, MLX5_CMD_OP_CREATE_TIS
);
101 MLX5_SET(create_tis_in
, in
, uid
, mvdev
->res
.uid
);
102 err
= mlx5_cmd_exec_inout(mvdev
->mdev
, create_tis
, in
, out
);
104 *tisn
= MLX5_GET(create_tis_out
, out
, tisn
);
109 void mlx5_vdpa_destroy_tis(struct mlx5_vdpa_dev
*mvdev
, u32 tisn
)
111 u32 in
[MLX5_ST_SZ_DW(destroy_tis_in
)] = {};
113 MLX5_SET(destroy_tis_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_TIS
);
114 MLX5_SET(destroy_tis_in
, in
, uid
, mvdev
->res
.uid
);
115 MLX5_SET(destroy_tis_in
, in
, tisn
, tisn
);
116 mlx5_cmd_exec_in(mvdev
->mdev
, destroy_tis
, in
);
119 int mlx5_vdpa_create_rqt(struct mlx5_vdpa_dev
*mvdev
, void *in
, int inlen
, u32
*rqtn
)
121 u32 out
[MLX5_ST_SZ_DW(create_rqt_out
)] = {};
124 MLX5_SET(create_rqt_in
, in
, opcode
, MLX5_CMD_OP_CREATE_RQT
);
125 err
= mlx5_cmd_exec(mvdev
->mdev
, in
, inlen
, out
, sizeof(out
));
127 *rqtn
= MLX5_GET(create_rqt_out
, out
, rqtn
);
132 int mlx5_vdpa_modify_rqt(struct mlx5_vdpa_dev
*mvdev
, void *in
, int inlen
, u32 rqtn
)
134 u32 out
[MLX5_ST_SZ_DW(create_rqt_out
)] = {};
136 MLX5_SET(modify_rqt_in
, in
, uid
, mvdev
->res
.uid
);
137 MLX5_SET(modify_rqt_in
, in
, rqtn
, rqtn
);
138 MLX5_SET(modify_rqt_in
, in
, opcode
, MLX5_CMD_OP_MODIFY_RQT
);
139 return mlx5_cmd_exec(mvdev
->mdev
, in
, inlen
, out
, sizeof(out
));
142 void mlx5_vdpa_destroy_rqt(struct mlx5_vdpa_dev
*mvdev
, u32 rqtn
)
144 u32 in
[MLX5_ST_SZ_DW(destroy_rqt_in
)] = {};
146 MLX5_SET(destroy_rqt_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_RQT
);
147 MLX5_SET(destroy_rqt_in
, in
, uid
, mvdev
->res
.uid
);
148 MLX5_SET(destroy_rqt_in
, in
, rqtn
, rqtn
);
149 mlx5_cmd_exec_in(mvdev
->mdev
, destroy_rqt
, in
);
152 int mlx5_vdpa_create_tir(struct mlx5_vdpa_dev
*mvdev
, void *in
, u32
*tirn
)
154 u32 out
[MLX5_ST_SZ_DW(create_tir_out
)] = {};
157 MLX5_SET(create_tir_in
, in
, opcode
, MLX5_CMD_OP_CREATE_TIR
);
158 err
= mlx5_cmd_exec_inout(mvdev
->mdev
, create_tir
, in
, out
);
160 *tirn
= MLX5_GET(create_tir_out
, out
, tirn
);
165 void mlx5_vdpa_destroy_tir(struct mlx5_vdpa_dev
*mvdev
, u32 tirn
)
167 u32 in
[MLX5_ST_SZ_DW(destroy_tir_in
)] = {};
169 MLX5_SET(destroy_tir_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_TIR
);
170 MLX5_SET(destroy_tir_in
, in
, uid
, mvdev
->res
.uid
);
171 MLX5_SET(destroy_tir_in
, in
, tirn
, tirn
);
172 mlx5_cmd_exec_in(mvdev
->mdev
, destroy_tir
, in
);
175 int mlx5_vdpa_alloc_transport_domain(struct mlx5_vdpa_dev
*mvdev
, u32
*tdn
)
177 u32 out
[MLX5_ST_SZ_DW(alloc_transport_domain_out
)] = {};
178 u32 in
[MLX5_ST_SZ_DW(alloc_transport_domain_in
)] = {};
181 MLX5_SET(alloc_transport_domain_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN
);
182 MLX5_SET(alloc_transport_domain_in
, in
, uid
, mvdev
->res
.uid
);
184 err
= mlx5_cmd_exec_inout(mvdev
->mdev
, alloc_transport_domain
, in
, out
);
186 *tdn
= MLX5_GET(alloc_transport_domain_out
, out
, transport_domain
);
191 void mlx5_vdpa_dealloc_transport_domain(struct mlx5_vdpa_dev
*mvdev
, u32 tdn
)
193 u32 in
[MLX5_ST_SZ_DW(dealloc_transport_domain_in
)] = {};
195 MLX5_SET(dealloc_transport_domain_in
, in
, opcode
, MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN
);
196 MLX5_SET(dealloc_transport_domain_in
, in
, uid
, mvdev
->res
.uid
);
197 MLX5_SET(dealloc_transport_domain_in
, in
, transport_domain
, tdn
);
198 mlx5_cmd_exec_in(mvdev
->mdev
, dealloc_transport_domain
, in
);
201 int mlx5_vdpa_create_mkey(struct mlx5_vdpa_dev
*mvdev
, u32
*mkey
, u32
*in
,
204 u32 lout
[MLX5_ST_SZ_DW(create_mkey_out
)] = {};
208 MLX5_SET(create_mkey_in
, in
, opcode
, MLX5_CMD_OP_CREATE_MKEY
);
209 MLX5_SET(create_mkey_in
, in
, uid
, mvdev
->res
.uid
);
211 err
= mlx5_cmd_exec(mvdev
->mdev
, in
, inlen
, lout
, sizeof(lout
));
215 mkey_index
= MLX5_GET(create_mkey_out
, lout
, mkey_index
);
216 *mkey
= mlx5_idx_to_mkey(mkey_index
);
220 int mlx5_vdpa_destroy_mkey(struct mlx5_vdpa_dev
*mvdev
, u32 mkey
)
222 u32 in
[MLX5_ST_SZ_DW(destroy_mkey_in
)] = {};
224 MLX5_SET(destroy_mkey_in
, in
, uid
, mvdev
->res
.uid
);
225 MLX5_SET(destroy_mkey_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_MKEY
);
226 MLX5_SET(destroy_mkey_in
, in
, mkey_index
, mlx5_mkey_to_idx(mkey
));
227 return mlx5_cmd_exec_in(mvdev
->mdev
, destroy_mkey
, in
);
230 static int init_ctrl_vq(struct mlx5_vdpa_dev
*mvdev
)
232 mvdev
->cvq
.iotlb
= vhost_iotlb_alloc(0, 0);
233 if (!mvdev
->cvq
.iotlb
)
236 spin_lock_init(&mvdev
->cvq
.iommu_lock
);
237 vringh_set_iotlb(&mvdev
->cvq
.vring
, mvdev
->cvq
.iotlb
, &mvdev
->cvq
.iommu_lock
);
242 static void cleanup_ctrl_vq(struct mlx5_vdpa_dev
*mvdev
)
244 vhost_iotlb_free(mvdev
->cvq
.iotlb
);
247 int mlx5_vdpa_alloc_resources(struct mlx5_vdpa_dev
*mvdev
)
249 u64 offset
= MLX5_CAP64_DEV_VDPA_EMULATION(mvdev
->mdev
, doorbell_bar_offset
);
250 struct mlx5_vdpa_resources
*res
= &mvdev
->res
;
251 struct mlx5_core_dev
*mdev
= mvdev
->mdev
;
256 mlx5_vdpa_warn(mvdev
, "resources already allocated\n");
259 res
->uar
= mlx5_get_uars_page(mdev
);
260 if (IS_ERR(res
->uar
)) {
261 err
= PTR_ERR(res
->uar
);
265 err
= create_uctx(mvdev
, &res
->uid
);
269 err
= alloc_pd(mvdev
, &res
->pdn
, res
->uid
);
273 err
= get_null_mkey(mvdev
, &res
->null_mkey
);
277 kick_addr
= mdev
->bar_addr
+ offset
;
278 res
->phys_kick_addr
= kick_addr
;
280 res
->kick_addr
= ioremap(kick_addr
, PAGE_SIZE
);
281 if (!res
->kick_addr
) {
286 err
= init_ctrl_vq(mvdev
);
295 iounmap(res
->kick_addr
);
297 dealloc_pd(mvdev
, res
->pdn
, res
->uid
);
299 destroy_uctx(mvdev
, res
->uid
);
301 mlx5_put_uars_page(mdev
, res
->uar
);
306 void mlx5_vdpa_free_resources(struct mlx5_vdpa_dev
*mvdev
)
308 struct mlx5_vdpa_resources
*res
= &mvdev
->res
;
313 cleanup_ctrl_vq(mvdev
);
314 iounmap(res
->kick_addr
);
315 res
->kick_addr
= NULL
;
316 dealloc_pd(mvdev
, res
->pdn
, res
->uid
);
317 destroy_uctx(mvdev
, res
->uid
);
318 mlx5_put_uars_page(mvdev
->mdev
, res
->uar
);
322 static void virtqueue_cmd_callback(int status
, struct mlx5_async_work
*context
)
324 struct mlx5_vdpa_async_cmd
*cmd
=
325 container_of(context
, struct mlx5_vdpa_async_cmd
, cb_work
);
327 cmd
->err
= mlx5_cmd_check(context
->ctx
->dev
, status
, cmd
->in
, cmd
->out
);
328 complete(&cmd
->cmd_done
);
331 static int issue_async_cmd(struct mlx5_vdpa_dev
*mvdev
,
332 struct mlx5_vdpa_async_cmd
*cmds
,
337 struct mlx5_vdpa_async_cmd
*cmd
= &cmds
[issued
];
341 err
= mlx5_cmd_exec_cb(&mvdev
->async_ctx
,
343 cmd
->out
, cmd
->outlen
,
344 virtqueue_cmd_callback
,
347 if (*completed
< issued
) {
348 /* Throttled by own commands: wait for oldest completion. */
349 wait_for_completion(&cmds
[*completed
].cmd_done
);
354 /* Throttled by external commands: switch to sync api. */
355 err
= mlx5_cmd_exec(mvdev
->mdev
,
357 cmd
->out
, cmd
->outlen
);
366 int mlx5_vdpa_exec_async_cmds(struct mlx5_vdpa_dev
*mvdev
,
367 struct mlx5_vdpa_async_cmd
*cmds
,
374 for (int i
= 0; i
< num_cmds
; i
++)
375 init_completion(&cmds
[i
].cmd_done
);
377 while (issued
< num_cmds
) {
379 err
= issue_async_cmd(mvdev
, cmds
, issued
, &completed
);
381 mlx5_vdpa_err(mvdev
, "error issuing command %d of %d: %d\n",
382 issued
, num_cmds
, err
);
389 while (completed
< issued
)
390 wait_for_completion(&cmds
[completed
++].cmd_done
);