treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / infiniband / hw / mlx5 / devx.c
blobd7efc9f6daf09d13499735a527822c1faedd92ca
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
4 */
6 #include <rdma/ib_user_verbs.h>
7 #include <rdma/ib_verbs.h>
8 #include <rdma/uverbs_types.h>
9 #include <rdma/uverbs_ioctl.h>
10 #include <rdma/mlx5_user_ioctl_cmds.h>
11 #include <rdma/mlx5_user_ioctl_verbs.h>
12 #include <rdma/ib_umem.h>
13 #include <rdma/uverbs_std_types.h>
14 #include <linux/mlx5/driver.h>
15 #include <linux/mlx5/fs.h>
16 #include "mlx5_ib.h"
17 #include <linux/xarray.h>
19 #define UVERBS_MODULE_NAME mlx5_ib
20 #include <rdma/uverbs_named_ioctl.h>
22 static void dispatch_event_fd(struct list_head *fd_list, const void *data);
24 enum devx_obj_flags {
25 DEVX_OBJ_FLAGS_INDIRECT_MKEY = 1 << 0,
26 DEVX_OBJ_FLAGS_DCT = 1 << 1,
27 DEVX_OBJ_FLAGS_CQ = 1 << 2,
30 struct devx_async_data {
31 struct mlx5_ib_dev *mdev;
32 struct list_head list;
33 struct devx_async_cmd_event_file *ev_file;
34 struct mlx5_async_work cb_work;
35 u16 cmd_out_len;
36 /* must be last field in this structure */
37 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr;
40 struct devx_async_event_data {
41 struct list_head list; /* headed in ev_file->event_list */
42 struct mlx5_ib_uapi_devx_async_event_hdr hdr;
45 /* first level XA value data structure */
46 struct devx_event {
47 struct xarray object_ids; /* second XA level, Key = object id */
48 struct list_head unaffiliated_list;
51 /* second level XA value data structure */
52 struct devx_obj_event {
53 struct rcu_head rcu;
54 struct list_head obj_sub_list;
57 struct devx_event_subscription {
58 struct list_head file_list; /* headed in ev_file->
59 * subscribed_events_list
61 struct list_head xa_list; /* headed in devx_event->unaffiliated_list or
62 * devx_obj_event->obj_sub_list
64 struct list_head obj_list; /* headed in devx_object */
65 struct list_head event_list; /* headed in ev_file->event_list or in
66 * temp list via subscription
69 u8 is_cleaned:1;
70 u32 xa_key_level1;
71 u32 xa_key_level2;
72 struct rcu_head rcu;
73 u64 cookie;
74 struct devx_async_event_file *ev_file;
75 struct eventfd_ctx *eventfd;
78 struct devx_async_event_file {
79 struct ib_uobject uobj;
80 /* Head of events that are subscribed to this FD */
81 struct list_head subscribed_events_list;
82 spinlock_t lock;
83 wait_queue_head_t poll_wait;
84 struct list_head event_list;
85 struct mlx5_ib_dev *dev;
86 u8 omit_data:1;
87 u8 is_overflow_err:1;
88 u8 is_destroyed:1;
91 #define MLX5_MAX_DESTROY_INBOX_SIZE_DW MLX5_ST_SZ_DW(delete_fte_in)
92 struct devx_obj {
93 struct mlx5_ib_dev *ib_dev;
94 u64 obj_id;
95 u32 dinlen; /* destroy inbox length */
96 u32 dinbox[MLX5_MAX_DESTROY_INBOX_SIZE_DW];
97 u32 flags;
98 union {
99 struct mlx5_ib_devx_mr devx_mr;
100 struct mlx5_core_dct core_dct;
101 struct mlx5_core_cq core_cq;
102 u32 flow_counter_bulk_size;
104 struct list_head event_sub; /* holds devx_event_subscription entries */
107 struct devx_umem {
108 struct mlx5_core_dev *mdev;
109 struct ib_umem *umem;
110 u32 page_offset;
111 int page_shift;
112 int ncont;
113 u32 dinlen;
114 u32 dinbox[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr)];
117 struct devx_umem_reg_cmd {
118 void *in;
119 u32 inlen;
120 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
123 static struct mlx5_ib_ucontext *
124 devx_ufile2uctx(const struct uverbs_attr_bundle *attrs)
126 return to_mucontext(ib_uverbs_get_ucontext(attrs));
129 int mlx5_ib_devx_create(struct mlx5_ib_dev *dev, bool is_user)
131 u32 in[MLX5_ST_SZ_DW(create_uctx_in)] = {0};
132 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
133 void *uctx;
134 int err;
135 u16 uid;
136 u32 cap = 0;
138 /* 0 means not supported */
139 if (!MLX5_CAP_GEN(dev->mdev, log_max_uctx))
140 return -EINVAL;
142 uctx = MLX5_ADDR_OF(create_uctx_in, in, uctx);
143 if (is_user && capable(CAP_NET_RAW) &&
144 (MLX5_CAP_GEN(dev->mdev, uctx_cap) & MLX5_UCTX_CAP_RAW_TX))
145 cap |= MLX5_UCTX_CAP_RAW_TX;
146 if (is_user && capable(CAP_SYS_RAWIO) &&
147 (MLX5_CAP_GEN(dev->mdev, uctx_cap) &
148 MLX5_UCTX_CAP_INTERNAL_DEV_RES))
149 cap |= MLX5_UCTX_CAP_INTERNAL_DEV_RES;
151 MLX5_SET(create_uctx_in, in, opcode, MLX5_CMD_OP_CREATE_UCTX);
152 MLX5_SET(uctx, uctx, cap, cap);
154 err = mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
155 if (err)
156 return err;
158 uid = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
159 return uid;
162 void mlx5_ib_devx_destroy(struct mlx5_ib_dev *dev, u16 uid)
164 u32 in[MLX5_ST_SZ_DW(destroy_uctx_in)] = {0};
165 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)] = {0};
167 MLX5_SET(destroy_uctx_in, in, opcode, MLX5_CMD_OP_DESTROY_UCTX);
168 MLX5_SET(destroy_uctx_in, in, uid, uid);
170 mlx5_cmd_exec(dev->mdev, in, sizeof(in), out, sizeof(out));
173 bool mlx5_ib_devx_is_flow_dest(void *obj, int *dest_id, int *dest_type)
175 struct devx_obj *devx_obj = obj;
176 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
178 switch (opcode) {
179 case MLX5_CMD_OP_DESTROY_TIR:
180 *dest_type = MLX5_FLOW_DESTINATION_TYPE_TIR;
181 *dest_id = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox,
182 obj_id);
183 return true;
185 case MLX5_CMD_OP_DESTROY_FLOW_TABLE:
186 *dest_type = MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE;
187 *dest_id = MLX5_GET(destroy_flow_table_in, devx_obj->dinbox,
188 table_id);
189 return true;
190 default:
191 return false;
195 bool mlx5_ib_devx_is_flow_counter(void *obj, u32 offset, u32 *counter_id)
197 struct devx_obj *devx_obj = obj;
198 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, devx_obj->dinbox, opcode);
200 if (opcode == MLX5_CMD_OP_DEALLOC_FLOW_COUNTER) {
202 if (offset && offset >= devx_obj->flow_counter_bulk_size)
203 return false;
205 *counter_id = MLX5_GET(dealloc_flow_counter_in,
206 devx_obj->dinbox,
207 flow_counter_id);
208 *counter_id += offset;
209 return true;
212 return false;
215 static bool is_legacy_unaffiliated_event_num(u16 event_num)
217 switch (event_num) {
218 case MLX5_EVENT_TYPE_PORT_CHANGE:
219 return true;
220 default:
221 return false;
225 static bool is_legacy_obj_event_num(u16 event_num)
227 switch (event_num) {
228 case MLX5_EVENT_TYPE_PATH_MIG:
229 case MLX5_EVENT_TYPE_COMM_EST:
230 case MLX5_EVENT_TYPE_SQ_DRAINED:
231 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
232 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
233 case MLX5_EVENT_TYPE_CQ_ERROR:
234 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
235 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
236 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
237 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
238 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
239 case MLX5_EVENT_TYPE_DCT_DRAINED:
240 case MLX5_EVENT_TYPE_COMP:
241 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
242 case MLX5_EVENT_TYPE_XRQ_ERROR:
243 return true;
244 default:
245 return false;
249 static u16 get_legacy_obj_type(u16 opcode)
251 switch (opcode) {
252 case MLX5_CMD_OP_CREATE_RQ:
253 return MLX5_EVENT_QUEUE_TYPE_RQ;
254 case MLX5_CMD_OP_CREATE_QP:
255 return MLX5_EVENT_QUEUE_TYPE_QP;
256 case MLX5_CMD_OP_CREATE_SQ:
257 return MLX5_EVENT_QUEUE_TYPE_SQ;
258 case MLX5_CMD_OP_CREATE_DCT:
259 return MLX5_EVENT_QUEUE_TYPE_DCT;
260 default:
261 return 0;
265 static u16 get_dec_obj_type(struct devx_obj *obj, u16 event_num)
267 u16 opcode;
269 opcode = (obj->obj_id >> 32) & 0xffff;
271 if (is_legacy_obj_event_num(event_num))
272 return get_legacy_obj_type(opcode);
274 switch (opcode) {
275 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
276 return (obj->obj_id >> 48);
277 case MLX5_CMD_OP_CREATE_RQ:
278 return MLX5_OBJ_TYPE_RQ;
279 case MLX5_CMD_OP_CREATE_QP:
280 return MLX5_OBJ_TYPE_QP;
281 case MLX5_CMD_OP_CREATE_SQ:
282 return MLX5_OBJ_TYPE_SQ;
283 case MLX5_CMD_OP_CREATE_DCT:
284 return MLX5_OBJ_TYPE_DCT;
285 case MLX5_CMD_OP_CREATE_TIR:
286 return MLX5_OBJ_TYPE_TIR;
287 case MLX5_CMD_OP_CREATE_TIS:
288 return MLX5_OBJ_TYPE_TIS;
289 case MLX5_CMD_OP_CREATE_PSV:
290 return MLX5_OBJ_TYPE_PSV;
291 case MLX5_OBJ_TYPE_MKEY:
292 return MLX5_OBJ_TYPE_MKEY;
293 case MLX5_CMD_OP_CREATE_RMP:
294 return MLX5_OBJ_TYPE_RMP;
295 case MLX5_CMD_OP_CREATE_XRC_SRQ:
296 return MLX5_OBJ_TYPE_XRC_SRQ;
297 case MLX5_CMD_OP_CREATE_XRQ:
298 return MLX5_OBJ_TYPE_XRQ;
299 case MLX5_CMD_OP_CREATE_RQT:
300 return MLX5_OBJ_TYPE_RQT;
301 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
302 return MLX5_OBJ_TYPE_FLOW_COUNTER;
303 case MLX5_CMD_OP_CREATE_CQ:
304 return MLX5_OBJ_TYPE_CQ;
305 default:
306 return 0;
310 static u16 get_event_obj_type(unsigned long event_type, struct mlx5_eqe *eqe)
312 switch (event_type) {
313 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
314 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
315 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
316 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
317 case MLX5_EVENT_TYPE_PATH_MIG:
318 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
319 case MLX5_EVENT_TYPE_COMM_EST:
320 case MLX5_EVENT_TYPE_SQ_DRAINED:
321 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
322 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
323 return eqe->data.qp_srq.type;
324 case MLX5_EVENT_TYPE_CQ_ERROR:
325 case MLX5_EVENT_TYPE_XRQ_ERROR:
326 return 0;
327 case MLX5_EVENT_TYPE_DCT_DRAINED:
328 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
329 return MLX5_EVENT_QUEUE_TYPE_DCT;
330 default:
331 return MLX5_GET(affiliated_event_header, &eqe->data, obj_type);
335 static u32 get_dec_obj_id(u64 obj_id)
337 return (obj_id & 0xffffffff);
341 * As the obj_id in the firmware is not globally unique the object type
342 * must be considered upon checking for a valid object id.
343 * For that the opcode of the creator command is encoded as part of the obj_id.
345 static u64 get_enc_obj_id(u32 opcode, u32 obj_id)
347 return ((u64)opcode << 32) | obj_id;
350 static u64 devx_get_obj_id(const void *in)
352 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
353 u64 obj_id;
355 switch (opcode) {
356 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
357 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
358 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT |
359 MLX5_GET(general_obj_in_cmd_hdr, in,
360 obj_type) << 16,
361 MLX5_GET(general_obj_in_cmd_hdr, in,
362 obj_id));
363 break;
364 case MLX5_CMD_OP_QUERY_MKEY:
365 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY,
366 MLX5_GET(query_mkey_in, in,
367 mkey_index));
368 break;
369 case MLX5_CMD_OP_QUERY_CQ:
370 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
371 MLX5_GET(query_cq_in, in, cqn));
372 break;
373 case MLX5_CMD_OP_MODIFY_CQ:
374 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
375 MLX5_GET(modify_cq_in, in, cqn));
376 break;
377 case MLX5_CMD_OP_QUERY_SQ:
378 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
379 MLX5_GET(query_sq_in, in, sqn));
380 break;
381 case MLX5_CMD_OP_MODIFY_SQ:
382 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
383 MLX5_GET(modify_sq_in, in, sqn));
384 break;
385 case MLX5_CMD_OP_QUERY_RQ:
386 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
387 MLX5_GET(query_rq_in, in, rqn));
388 break;
389 case MLX5_CMD_OP_MODIFY_RQ:
390 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
391 MLX5_GET(modify_rq_in, in, rqn));
392 break;
393 case MLX5_CMD_OP_QUERY_RMP:
394 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
395 MLX5_GET(query_rmp_in, in, rmpn));
396 break;
397 case MLX5_CMD_OP_MODIFY_RMP:
398 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP,
399 MLX5_GET(modify_rmp_in, in, rmpn));
400 break;
401 case MLX5_CMD_OP_QUERY_RQT:
402 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
403 MLX5_GET(query_rqt_in, in, rqtn));
404 break;
405 case MLX5_CMD_OP_MODIFY_RQT:
406 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
407 MLX5_GET(modify_rqt_in, in, rqtn));
408 break;
409 case MLX5_CMD_OP_QUERY_TIR:
410 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
411 MLX5_GET(query_tir_in, in, tirn));
412 break;
413 case MLX5_CMD_OP_MODIFY_TIR:
414 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
415 MLX5_GET(modify_tir_in, in, tirn));
416 break;
417 case MLX5_CMD_OP_QUERY_TIS:
418 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
419 MLX5_GET(query_tis_in, in, tisn));
420 break;
421 case MLX5_CMD_OP_MODIFY_TIS:
422 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
423 MLX5_GET(modify_tis_in, in, tisn));
424 break;
425 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
426 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
427 MLX5_GET(query_flow_table_in, in,
428 table_id));
429 break;
430 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
431 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE,
432 MLX5_GET(modify_flow_table_in, in,
433 table_id));
434 break;
435 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
436 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP,
437 MLX5_GET(query_flow_group_in, in,
438 group_id));
439 break;
440 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
441 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
442 MLX5_GET(query_fte_in, in,
443 flow_index));
444 break;
445 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
446 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY,
447 MLX5_GET(set_fte_in, in, flow_index));
448 break;
449 case MLX5_CMD_OP_QUERY_Q_COUNTER:
450 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER,
451 MLX5_GET(query_q_counter_in, in,
452 counter_set_id));
453 break;
454 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
455 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER,
456 MLX5_GET(query_flow_counter_in, in,
457 flow_counter_id));
458 break;
459 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
460 obj_id = get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT,
461 MLX5_GET(general_obj_in_cmd_hdr, in,
462 obj_id));
463 break;
464 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
465 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
466 MLX5_GET(query_scheduling_element_in,
467 in, scheduling_element_id));
468 break;
469 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
470 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT,
471 MLX5_GET(modify_scheduling_element_in,
472 in, scheduling_element_id));
473 break;
474 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
475 obj_id = get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT,
476 MLX5_GET(add_vxlan_udp_dport_in, in,
477 vxlan_udp_port));
478 break;
479 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
480 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
481 MLX5_GET(query_l2_table_entry_in, in,
482 table_index));
483 break;
484 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
485 obj_id = get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY,
486 MLX5_GET(set_l2_table_entry_in, in,
487 table_index));
488 break;
489 case MLX5_CMD_OP_QUERY_QP:
490 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
491 MLX5_GET(query_qp_in, in, qpn));
492 break;
493 case MLX5_CMD_OP_RST2INIT_QP:
494 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
495 MLX5_GET(rst2init_qp_in, in, qpn));
496 break;
497 case MLX5_CMD_OP_INIT2RTR_QP:
498 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
499 MLX5_GET(init2rtr_qp_in, in, qpn));
500 break;
501 case MLX5_CMD_OP_RTR2RTS_QP:
502 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
503 MLX5_GET(rtr2rts_qp_in, in, qpn));
504 break;
505 case MLX5_CMD_OP_RTS2RTS_QP:
506 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
507 MLX5_GET(rts2rts_qp_in, in, qpn));
508 break;
509 case MLX5_CMD_OP_SQERR2RTS_QP:
510 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
511 MLX5_GET(sqerr2rts_qp_in, in, qpn));
512 break;
513 case MLX5_CMD_OP_2ERR_QP:
514 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
515 MLX5_GET(qp_2err_in, in, qpn));
516 break;
517 case MLX5_CMD_OP_2RST_QP:
518 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
519 MLX5_GET(qp_2rst_in, in, qpn));
520 break;
521 case MLX5_CMD_OP_QUERY_DCT:
522 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
523 MLX5_GET(query_dct_in, in, dctn));
524 break;
525 case MLX5_CMD_OP_QUERY_XRQ:
526 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
527 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
528 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
529 MLX5_GET(query_xrq_in, in, xrqn));
530 break;
531 case MLX5_CMD_OP_QUERY_XRC_SRQ:
532 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
533 MLX5_GET(query_xrc_srq_in, in,
534 xrc_srqn));
535 break;
536 case MLX5_CMD_OP_ARM_XRC_SRQ:
537 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ,
538 MLX5_GET(arm_xrc_srq_in, in, xrc_srqn));
539 break;
540 case MLX5_CMD_OP_QUERY_SRQ:
541 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ,
542 MLX5_GET(query_srq_in, in, srqn));
543 break;
544 case MLX5_CMD_OP_ARM_RQ:
545 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
546 MLX5_GET(arm_rq_in, in, srq_number));
547 break;
548 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
549 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
550 MLX5_GET(drain_dct_in, in, dctn));
551 break;
552 case MLX5_CMD_OP_ARM_XRQ:
553 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
554 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
555 case MLX5_CMD_OP_MODIFY_XRQ:
556 obj_id = get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ,
557 MLX5_GET(arm_xrq_in, in, xrqn));
558 break;
559 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
560 obj_id = get_enc_obj_id
561 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT,
562 MLX5_GET(query_packet_reformat_context_in,
563 in, packet_reformat_id));
564 break;
565 default:
566 obj_id = 0;
569 return obj_id;
572 static bool devx_is_valid_obj_id(struct uverbs_attr_bundle *attrs,
573 struct ib_uobject *uobj, const void *in)
575 struct mlx5_ib_dev *dev = mlx5_udata_to_mdev(&attrs->driver_udata);
576 u64 obj_id = devx_get_obj_id(in);
578 if (!obj_id)
579 return false;
581 switch (uobj_get_object_id(uobj)) {
582 case UVERBS_OBJECT_CQ:
583 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ,
584 to_mcq(uobj->object)->mcq.cqn) ==
585 obj_id;
587 case UVERBS_OBJECT_SRQ:
589 struct mlx5_core_srq *srq = &(to_msrq(uobj->object)->msrq);
590 u16 opcode;
592 switch (srq->common.res) {
593 case MLX5_RES_XSRQ:
594 opcode = MLX5_CMD_OP_CREATE_XRC_SRQ;
595 break;
596 case MLX5_RES_XRQ:
597 opcode = MLX5_CMD_OP_CREATE_XRQ;
598 break;
599 default:
600 if (!dev->mdev->issi)
601 opcode = MLX5_CMD_OP_CREATE_SRQ;
602 else
603 opcode = MLX5_CMD_OP_CREATE_RMP;
606 return get_enc_obj_id(opcode,
607 to_msrq(uobj->object)->msrq.srqn) ==
608 obj_id;
611 case UVERBS_OBJECT_QP:
613 struct mlx5_ib_qp *qp = to_mqp(uobj->object);
614 enum ib_qp_type qp_type = qp->ibqp.qp_type;
616 if (qp_type == IB_QPT_RAW_PACKET ||
617 (qp->flags & MLX5_IB_QP_UNDERLAY)) {
618 struct mlx5_ib_raw_packet_qp *raw_packet_qp =
619 &qp->raw_packet_qp;
620 struct mlx5_ib_rq *rq = &raw_packet_qp->rq;
621 struct mlx5_ib_sq *sq = &raw_packet_qp->sq;
623 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
624 rq->base.mqp.qpn) == obj_id ||
625 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ,
626 sq->base.mqp.qpn) == obj_id ||
627 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR,
628 rq->tirn) == obj_id ||
629 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS,
630 sq->tisn) == obj_id);
633 if (qp_type == MLX5_IB_QPT_DCT)
634 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT,
635 qp->dct.mdct.mqp.qpn) == obj_id;
637 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP,
638 qp->ibqp.qp_num) == obj_id;
641 case UVERBS_OBJECT_WQ:
642 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ,
643 to_mrwq(uobj->object)->core_qp.qpn) ==
644 obj_id;
646 case UVERBS_OBJECT_RWQ_IND_TBL:
647 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT,
648 to_mrwq_ind_table(uobj->object)->rqtn) ==
649 obj_id;
651 case MLX5_IB_OBJECT_DEVX_OBJ:
652 return ((struct devx_obj *)uobj->object)->obj_id == obj_id;
654 default:
655 return false;
659 static void devx_set_umem_valid(const void *in)
661 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
663 switch (opcode) {
664 case MLX5_CMD_OP_CREATE_MKEY:
665 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
666 break;
667 case MLX5_CMD_OP_CREATE_CQ:
669 void *cqc;
671 MLX5_SET(create_cq_in, in, cq_umem_valid, 1);
672 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
673 MLX5_SET(cqc, cqc, dbr_umem_valid, 1);
674 break;
676 case MLX5_CMD_OP_CREATE_QP:
678 void *qpc;
680 qpc = MLX5_ADDR_OF(create_qp_in, in, qpc);
681 MLX5_SET(qpc, qpc, dbr_umem_valid, 1);
682 MLX5_SET(create_qp_in, in, wq_umem_valid, 1);
683 break;
686 case MLX5_CMD_OP_CREATE_RQ:
688 void *rqc, *wq;
690 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
691 wq = MLX5_ADDR_OF(rqc, rqc, wq);
692 MLX5_SET(wq, wq, dbr_umem_valid, 1);
693 MLX5_SET(wq, wq, wq_umem_valid, 1);
694 break;
697 case MLX5_CMD_OP_CREATE_SQ:
699 void *sqc, *wq;
701 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
702 wq = MLX5_ADDR_OF(sqc, sqc, wq);
703 MLX5_SET(wq, wq, dbr_umem_valid, 1);
704 MLX5_SET(wq, wq, wq_umem_valid, 1);
705 break;
708 case MLX5_CMD_OP_MODIFY_CQ:
709 MLX5_SET(modify_cq_in, in, cq_umem_valid, 1);
710 break;
712 case MLX5_CMD_OP_CREATE_RMP:
714 void *rmpc, *wq;
716 rmpc = MLX5_ADDR_OF(create_rmp_in, in, ctx);
717 wq = MLX5_ADDR_OF(rmpc, rmpc, wq);
718 MLX5_SET(wq, wq, dbr_umem_valid, 1);
719 MLX5_SET(wq, wq, wq_umem_valid, 1);
720 break;
723 case MLX5_CMD_OP_CREATE_XRQ:
725 void *xrqc, *wq;
727 xrqc = MLX5_ADDR_OF(create_xrq_in, in, xrq_context);
728 wq = MLX5_ADDR_OF(xrqc, xrqc, wq);
729 MLX5_SET(wq, wq, dbr_umem_valid, 1);
730 MLX5_SET(wq, wq, wq_umem_valid, 1);
731 break;
734 case MLX5_CMD_OP_CREATE_XRC_SRQ:
736 void *xrc_srqc;
738 MLX5_SET(create_xrc_srq_in, in, xrc_srq_umem_valid, 1);
739 xrc_srqc = MLX5_ADDR_OF(create_xrc_srq_in, in,
740 xrc_srq_context_entry);
741 MLX5_SET(xrc_srqc, xrc_srqc, dbr_umem_valid, 1);
742 break;
745 default:
746 return;
750 static bool devx_is_obj_create_cmd(const void *in, u16 *opcode)
752 *opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
754 switch (*opcode) {
755 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
756 case MLX5_CMD_OP_CREATE_MKEY:
757 case MLX5_CMD_OP_CREATE_CQ:
758 case MLX5_CMD_OP_ALLOC_PD:
759 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
760 case MLX5_CMD_OP_CREATE_RMP:
761 case MLX5_CMD_OP_CREATE_SQ:
762 case MLX5_CMD_OP_CREATE_RQ:
763 case MLX5_CMD_OP_CREATE_RQT:
764 case MLX5_CMD_OP_CREATE_TIR:
765 case MLX5_CMD_OP_CREATE_TIS:
766 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
767 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
768 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
769 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
770 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
771 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
772 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
773 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
774 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
775 case MLX5_CMD_OP_CREATE_QP:
776 case MLX5_CMD_OP_CREATE_SRQ:
777 case MLX5_CMD_OP_CREATE_XRC_SRQ:
778 case MLX5_CMD_OP_CREATE_DCT:
779 case MLX5_CMD_OP_CREATE_XRQ:
780 case MLX5_CMD_OP_ATTACH_TO_MCG:
781 case MLX5_CMD_OP_ALLOC_XRCD:
782 return true;
783 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
785 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
786 if (op_mod == 0)
787 return true;
788 return false;
790 case MLX5_CMD_OP_CREATE_PSV:
792 u8 num_psv = MLX5_GET(create_psv_in, in, num_psv);
794 if (num_psv == 1)
795 return true;
796 return false;
798 default:
799 return false;
803 static bool devx_is_obj_modify_cmd(const void *in)
805 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
807 switch (opcode) {
808 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT:
809 case MLX5_CMD_OP_MODIFY_CQ:
810 case MLX5_CMD_OP_MODIFY_RMP:
811 case MLX5_CMD_OP_MODIFY_SQ:
812 case MLX5_CMD_OP_MODIFY_RQ:
813 case MLX5_CMD_OP_MODIFY_RQT:
814 case MLX5_CMD_OP_MODIFY_TIR:
815 case MLX5_CMD_OP_MODIFY_TIS:
816 case MLX5_CMD_OP_MODIFY_FLOW_TABLE:
817 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT:
818 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
819 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
820 case MLX5_CMD_OP_RST2INIT_QP:
821 case MLX5_CMD_OP_INIT2RTR_QP:
822 case MLX5_CMD_OP_RTR2RTS_QP:
823 case MLX5_CMD_OP_RTS2RTS_QP:
824 case MLX5_CMD_OP_SQERR2RTS_QP:
825 case MLX5_CMD_OP_2ERR_QP:
826 case MLX5_CMD_OP_2RST_QP:
827 case MLX5_CMD_OP_ARM_XRC_SRQ:
828 case MLX5_CMD_OP_ARM_RQ:
829 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION:
830 case MLX5_CMD_OP_ARM_XRQ:
831 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY:
832 case MLX5_CMD_OP_RELEASE_XRQ_ERROR:
833 case MLX5_CMD_OP_MODIFY_XRQ:
834 return true;
835 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
837 u16 op_mod = MLX5_GET(set_fte_in, in, op_mod);
839 if (op_mod == 1)
840 return true;
841 return false;
843 default:
844 return false;
848 static bool devx_is_obj_query_cmd(const void *in)
850 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
852 switch (opcode) {
853 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT:
854 case MLX5_CMD_OP_QUERY_MKEY:
855 case MLX5_CMD_OP_QUERY_CQ:
856 case MLX5_CMD_OP_QUERY_RMP:
857 case MLX5_CMD_OP_QUERY_SQ:
858 case MLX5_CMD_OP_QUERY_RQ:
859 case MLX5_CMD_OP_QUERY_RQT:
860 case MLX5_CMD_OP_QUERY_TIR:
861 case MLX5_CMD_OP_QUERY_TIS:
862 case MLX5_CMD_OP_QUERY_Q_COUNTER:
863 case MLX5_CMD_OP_QUERY_FLOW_TABLE:
864 case MLX5_CMD_OP_QUERY_FLOW_GROUP:
865 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY:
866 case MLX5_CMD_OP_QUERY_FLOW_COUNTER:
867 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT:
868 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT:
869 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY:
870 case MLX5_CMD_OP_QUERY_QP:
871 case MLX5_CMD_OP_QUERY_SRQ:
872 case MLX5_CMD_OP_QUERY_XRC_SRQ:
873 case MLX5_CMD_OP_QUERY_DCT:
874 case MLX5_CMD_OP_QUERY_XRQ:
875 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY:
876 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS:
877 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT:
878 return true;
879 default:
880 return false;
884 static bool devx_is_whitelist_cmd(void *in)
886 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
888 switch (opcode) {
889 case MLX5_CMD_OP_QUERY_HCA_CAP:
890 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
891 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
892 return true;
893 default:
894 return false;
898 static int devx_get_uid(struct mlx5_ib_ucontext *c, void *cmd_in)
900 if (devx_is_whitelist_cmd(cmd_in)) {
901 struct mlx5_ib_dev *dev;
903 if (c->devx_uid)
904 return c->devx_uid;
906 dev = to_mdev(c->ibucontext.device);
907 if (dev->devx_whitelist_uid)
908 return dev->devx_whitelist_uid;
910 return -EOPNOTSUPP;
913 if (!c->devx_uid)
914 return -EINVAL;
916 return c->devx_uid;
919 static bool devx_is_general_cmd(void *in, struct mlx5_ib_dev *dev)
921 u16 opcode = MLX5_GET(general_obj_in_cmd_hdr, in, opcode);
923 /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
924 if ((MLX5_CAP_GEN_64(dev->mdev, vhca_tunnel_commands) &&
925 MLX5_GET(general_obj_in_cmd_hdr, in, vhca_tunnel_id)) ||
926 (opcode >= MLX5_CMD_OP_GENERAL_START &&
927 opcode < MLX5_CMD_OP_GENERAL_END))
928 return true;
930 switch (opcode) {
931 case MLX5_CMD_OP_QUERY_HCA_CAP:
932 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT:
933 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT:
934 case MLX5_CMD_OP_QUERY_VPORT_STATE:
935 case MLX5_CMD_OP_QUERY_ADAPTER:
936 case MLX5_CMD_OP_QUERY_ISSI:
937 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT:
938 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS:
939 case MLX5_CMD_OP_QUERY_VNIC_ENV:
940 case MLX5_CMD_OP_QUERY_VPORT_COUNTER:
941 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG:
942 case MLX5_CMD_OP_NOP:
943 case MLX5_CMD_OP_QUERY_CONG_STATUS:
944 case MLX5_CMD_OP_QUERY_CONG_PARAMS:
945 case MLX5_CMD_OP_QUERY_CONG_STATISTICS:
946 case MLX5_CMD_OP_QUERY_LAG:
947 return true;
948 default:
949 return false;
953 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN)(
954 struct uverbs_attr_bundle *attrs)
956 struct mlx5_ib_ucontext *c;
957 struct mlx5_ib_dev *dev;
958 int user_vector;
959 int dev_eqn;
960 unsigned int irqn;
961 int err;
963 if (uverbs_copy_from(&user_vector, attrs,
964 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC))
965 return -EFAULT;
967 c = devx_ufile2uctx(attrs);
968 if (IS_ERR(c))
969 return PTR_ERR(c);
970 dev = to_mdev(c->ibucontext.device);
972 err = mlx5_vector2eqn(dev->mdev, user_vector, &dev_eqn, &irqn);
973 if (err < 0)
974 return err;
976 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
977 &dev_eqn, sizeof(dev_eqn)))
978 return -EFAULT;
980 return 0;
984 *Security note:
985 * The hardware protection mechanism works like this: Each device object that
986 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
987 * the device specification manual) upon its creation. Then upon doorbell,
988 * hardware fetches the object context for which the doorbell was rang, and
989 * validates that the UAR through which the DB was rang matches the UAR ID
990 * of the object.
991 * If no match the doorbell is silently ignored by the hardware. Of course,
992 * the user cannot ring a doorbell on a UAR that was not mapped to it.
993 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
994 * mailboxes (except tagging them with UID), we expose to the user its UAR
995 * ID, so it can embed it in these objects in the expected specification
996 * format. So the only thing the user can do is hurt itself by creating a
997 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
998 * may ring a doorbell on its objects.
999 * The consequence of that will be that another user can schedule a QP/SQ
1000 * of the buggy user for execution (just insert it to the hardware schedule
1001 * queue or arm its CQ for event generation), no further harm is expected.
1003 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR)(
1004 struct uverbs_attr_bundle *attrs)
1006 struct mlx5_ib_ucontext *c;
1007 struct mlx5_ib_dev *dev;
1008 u32 user_idx;
1009 s32 dev_idx;
1011 c = devx_ufile2uctx(attrs);
1012 if (IS_ERR(c))
1013 return PTR_ERR(c);
1014 dev = to_mdev(c->ibucontext.device);
1016 if (uverbs_copy_from(&user_idx, attrs,
1017 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX))
1018 return -EFAULT;
1020 dev_idx = bfregn_to_uar_index(dev, &c->bfregi, user_idx, true);
1021 if (dev_idx < 0)
1022 return dev_idx;
1024 if (uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
1025 &dev_idx, sizeof(dev_idx)))
1026 return -EFAULT;
1028 return 0;
1031 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER)(
1032 struct uverbs_attr_bundle *attrs)
1034 struct mlx5_ib_ucontext *c;
1035 struct mlx5_ib_dev *dev;
1036 void *cmd_in = uverbs_attr_get_alloced_ptr(
1037 attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN);
1038 int cmd_out_len = uverbs_attr_get_len(attrs,
1039 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT);
1040 void *cmd_out;
1041 int err;
1042 int uid;
1044 c = devx_ufile2uctx(attrs);
1045 if (IS_ERR(c))
1046 return PTR_ERR(c);
1047 dev = to_mdev(c->ibucontext.device);
1049 uid = devx_get_uid(c, cmd_in);
1050 if (uid < 0)
1051 return uid;
1053 /* Only white list of some general HCA commands are allowed for this method. */
1054 if (!devx_is_general_cmd(cmd_in, dev))
1055 return -EINVAL;
1057 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1058 if (IS_ERR(cmd_out))
1059 return PTR_ERR(cmd_out);
1061 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1062 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1063 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN),
1064 cmd_out, cmd_out_len);
1065 if (err)
1066 return err;
1068 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT, cmd_out,
1069 cmd_out_len);
1072 static void devx_obj_build_destroy_cmd(void *in, void *out, void *din,
1073 u32 *dinlen,
1074 u32 *obj_id)
1076 u16 obj_type = MLX5_GET(general_obj_in_cmd_hdr, in, obj_type);
1077 u16 uid = MLX5_GET(general_obj_in_cmd_hdr, in, uid);
1079 *obj_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id);
1080 *dinlen = MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr);
1082 MLX5_SET(general_obj_in_cmd_hdr, din, obj_id, *obj_id);
1083 MLX5_SET(general_obj_in_cmd_hdr, din, uid, uid);
1085 switch (MLX5_GET(general_obj_in_cmd_hdr, in, opcode)) {
1086 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT:
1087 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT);
1088 MLX5_SET(general_obj_in_cmd_hdr, din, obj_type, obj_type);
1089 break;
1091 case MLX5_CMD_OP_CREATE_UMEM:
1092 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1093 MLX5_CMD_OP_DESTROY_UMEM);
1094 break;
1095 case MLX5_CMD_OP_CREATE_MKEY:
1096 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_MKEY);
1097 break;
1098 case MLX5_CMD_OP_CREATE_CQ:
1099 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_CQ);
1100 break;
1101 case MLX5_CMD_OP_ALLOC_PD:
1102 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_PD);
1103 break;
1104 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN:
1105 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1106 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN);
1107 break;
1108 case MLX5_CMD_OP_CREATE_RMP:
1109 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RMP);
1110 break;
1111 case MLX5_CMD_OP_CREATE_SQ:
1112 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SQ);
1113 break;
1114 case MLX5_CMD_OP_CREATE_RQ:
1115 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQ);
1116 break;
1117 case MLX5_CMD_OP_CREATE_RQT:
1118 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_RQT);
1119 break;
1120 case MLX5_CMD_OP_CREATE_TIR:
1121 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIR);
1122 break;
1123 case MLX5_CMD_OP_CREATE_TIS:
1124 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_TIS);
1125 break;
1126 case MLX5_CMD_OP_ALLOC_Q_COUNTER:
1127 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1128 MLX5_CMD_OP_DEALLOC_Q_COUNTER);
1129 break;
1130 case MLX5_CMD_OP_CREATE_FLOW_TABLE:
1131 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_table_in);
1132 *obj_id = MLX5_GET(create_flow_table_out, out, table_id);
1133 MLX5_SET(destroy_flow_table_in, din, other_vport,
1134 MLX5_GET(create_flow_table_in, in, other_vport));
1135 MLX5_SET(destroy_flow_table_in, din, vport_number,
1136 MLX5_GET(create_flow_table_in, in, vport_number));
1137 MLX5_SET(destroy_flow_table_in, din, table_type,
1138 MLX5_GET(create_flow_table_in, in, table_type));
1139 MLX5_SET(destroy_flow_table_in, din, table_id, *obj_id);
1140 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1141 MLX5_CMD_OP_DESTROY_FLOW_TABLE);
1142 break;
1143 case MLX5_CMD_OP_CREATE_FLOW_GROUP:
1144 *dinlen = MLX5_ST_SZ_BYTES(destroy_flow_group_in);
1145 *obj_id = MLX5_GET(create_flow_group_out, out, group_id);
1146 MLX5_SET(destroy_flow_group_in, din, other_vport,
1147 MLX5_GET(create_flow_group_in, in, other_vport));
1148 MLX5_SET(destroy_flow_group_in, din, vport_number,
1149 MLX5_GET(create_flow_group_in, in, vport_number));
1150 MLX5_SET(destroy_flow_group_in, din, table_type,
1151 MLX5_GET(create_flow_group_in, in, table_type));
1152 MLX5_SET(destroy_flow_group_in, din, table_id,
1153 MLX5_GET(create_flow_group_in, in, table_id));
1154 MLX5_SET(destroy_flow_group_in, din, group_id, *obj_id);
1155 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1156 MLX5_CMD_OP_DESTROY_FLOW_GROUP);
1157 break;
1158 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY:
1159 *dinlen = MLX5_ST_SZ_BYTES(delete_fte_in);
1160 *obj_id = MLX5_GET(set_fte_in, in, flow_index);
1161 MLX5_SET(delete_fte_in, din, other_vport,
1162 MLX5_GET(set_fte_in, in, other_vport));
1163 MLX5_SET(delete_fte_in, din, vport_number,
1164 MLX5_GET(set_fte_in, in, vport_number));
1165 MLX5_SET(delete_fte_in, din, table_type,
1166 MLX5_GET(set_fte_in, in, table_type));
1167 MLX5_SET(delete_fte_in, din, table_id,
1168 MLX5_GET(set_fte_in, in, table_id));
1169 MLX5_SET(delete_fte_in, din, flow_index, *obj_id);
1170 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1171 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY);
1172 break;
1173 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER:
1174 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1175 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER);
1176 break;
1177 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT:
1178 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1179 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT);
1180 break;
1181 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT:
1182 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1183 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT);
1184 break;
1185 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT:
1186 *dinlen = MLX5_ST_SZ_BYTES(destroy_scheduling_element_in);
1187 *obj_id = MLX5_GET(create_scheduling_element_out, out,
1188 scheduling_element_id);
1189 MLX5_SET(destroy_scheduling_element_in, din,
1190 scheduling_hierarchy,
1191 MLX5_GET(create_scheduling_element_in, in,
1192 scheduling_hierarchy));
1193 MLX5_SET(destroy_scheduling_element_in, din,
1194 scheduling_element_id, *obj_id);
1195 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1196 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT);
1197 break;
1198 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT:
1199 *dinlen = MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in);
1200 *obj_id = MLX5_GET(add_vxlan_udp_dport_in, in, vxlan_udp_port);
1201 MLX5_SET(delete_vxlan_udp_dport_in, din, vxlan_udp_port, *obj_id);
1202 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1203 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT);
1204 break;
1205 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY:
1206 *dinlen = MLX5_ST_SZ_BYTES(delete_l2_table_entry_in);
1207 *obj_id = MLX5_GET(set_l2_table_entry_in, in, table_index);
1208 MLX5_SET(delete_l2_table_entry_in, din, table_index, *obj_id);
1209 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1210 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY);
1211 break;
1212 case MLX5_CMD_OP_CREATE_QP:
1213 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_QP);
1214 break;
1215 case MLX5_CMD_OP_CREATE_SRQ:
1216 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_SRQ);
1217 break;
1218 case MLX5_CMD_OP_CREATE_XRC_SRQ:
1219 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1220 MLX5_CMD_OP_DESTROY_XRC_SRQ);
1221 break;
1222 case MLX5_CMD_OP_CREATE_DCT:
1223 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_DCT);
1224 break;
1225 case MLX5_CMD_OP_CREATE_XRQ:
1226 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DESTROY_XRQ);
1227 break;
1228 case MLX5_CMD_OP_ATTACH_TO_MCG:
1229 *dinlen = MLX5_ST_SZ_BYTES(detach_from_mcg_in);
1230 MLX5_SET(detach_from_mcg_in, din, qpn,
1231 MLX5_GET(attach_to_mcg_in, in, qpn));
1232 memcpy(MLX5_ADDR_OF(detach_from_mcg_in, din, multicast_gid),
1233 MLX5_ADDR_OF(attach_to_mcg_in, in, multicast_gid),
1234 MLX5_FLD_SZ_BYTES(attach_to_mcg_in, multicast_gid));
1235 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DETACH_FROM_MCG);
1236 break;
1237 case MLX5_CMD_OP_ALLOC_XRCD:
1238 MLX5_SET(general_obj_in_cmd_hdr, din, opcode, MLX5_CMD_OP_DEALLOC_XRCD);
1239 break;
1240 case MLX5_CMD_OP_CREATE_PSV:
1241 MLX5_SET(general_obj_in_cmd_hdr, din, opcode,
1242 MLX5_CMD_OP_DESTROY_PSV);
1243 MLX5_SET(destroy_psv_in, din, psvn,
1244 MLX5_GET(create_psv_out, out, psv0_index));
1245 break;
1246 default:
1247 /* The entry must match to one of the devx_is_obj_create_cmd */
1248 WARN_ON(true);
1249 break;
1253 static int devx_handle_mkey_indirect(struct devx_obj *obj,
1254 struct mlx5_ib_dev *dev,
1255 void *in, void *out)
1257 struct mlx5_ib_devx_mr *devx_mr = &obj->devx_mr;
1258 struct mlx5_core_mkey *mkey;
1259 void *mkc;
1260 u8 key;
1262 mkey = &devx_mr->mmkey;
1263 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1264 key = MLX5_GET(mkc, mkc, mkey_7_0);
1265 mkey->key = mlx5_idx_to_mkey(
1266 MLX5_GET(create_mkey_out, out, mkey_index)) | key;
1267 mkey->type = MLX5_MKEY_INDIRECT_DEVX;
1268 mkey->iova = MLX5_GET64(mkc, mkc, start_addr);
1269 mkey->size = MLX5_GET64(mkc, mkc, len);
1270 mkey->pd = MLX5_GET(mkc, mkc, pd);
1271 devx_mr->ndescs = MLX5_GET(mkc, mkc, translations_octword_size);
1273 return xa_err(xa_store(&dev->odp_mkeys, mlx5_base_mkey(mkey->key), mkey,
1274 GFP_KERNEL));
1277 static int devx_handle_mkey_create(struct mlx5_ib_dev *dev,
1278 struct devx_obj *obj,
1279 void *in, int in_len)
1281 int min_len = MLX5_BYTE_OFF(create_mkey_in, memory_key_mkey_entry) +
1282 MLX5_FLD_SZ_BYTES(create_mkey_in,
1283 memory_key_mkey_entry);
1284 void *mkc;
1285 u8 access_mode;
1287 if (in_len < min_len)
1288 return -EINVAL;
1290 mkc = MLX5_ADDR_OF(create_mkey_in, in, memory_key_mkey_entry);
1292 access_mode = MLX5_GET(mkc, mkc, access_mode_1_0);
1293 access_mode |= MLX5_GET(mkc, mkc, access_mode_4_2) << 2;
1295 if (access_mode == MLX5_MKC_ACCESS_MODE_KLMS ||
1296 access_mode == MLX5_MKC_ACCESS_MODE_KSM) {
1297 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING))
1298 obj->flags |= DEVX_OBJ_FLAGS_INDIRECT_MKEY;
1299 return 0;
1302 MLX5_SET(create_mkey_in, in, mkey_umem_valid, 1);
1303 return 0;
1306 static void devx_cleanup_subscription(struct mlx5_ib_dev *dev,
1307 struct devx_event_subscription *sub)
1309 struct devx_event *event;
1310 struct devx_obj_event *xa_val_level2;
1312 if (sub->is_cleaned)
1313 return;
1315 sub->is_cleaned = 1;
1316 list_del_rcu(&sub->xa_list);
1318 if (list_empty(&sub->obj_list))
1319 return;
1321 list_del_rcu(&sub->obj_list);
1322 /* check whether key level 1 for this obj_sub_list is empty */
1323 event = xa_load(&dev->devx_event_table.event_xa,
1324 sub->xa_key_level1);
1325 WARN_ON(!event);
1327 xa_val_level2 = xa_load(&event->object_ids, sub->xa_key_level2);
1328 if (list_empty(&xa_val_level2->obj_sub_list)) {
1329 xa_erase(&event->object_ids,
1330 sub->xa_key_level2);
1331 kfree_rcu(xa_val_level2, rcu);
1335 static int devx_obj_cleanup(struct ib_uobject *uobject,
1336 enum rdma_remove_reason why,
1337 struct uverbs_attr_bundle *attrs)
1339 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1340 struct mlx5_devx_event_table *devx_event_table;
1341 struct devx_obj *obj = uobject->object;
1342 struct devx_event_subscription *sub_entry, *tmp;
1343 struct mlx5_ib_dev *dev;
1344 int ret;
1346 dev = mlx5_udata_to_mdev(&attrs->driver_udata);
1347 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1349 * The pagefault_single_data_segment() does commands against
1350 * the mmkey, we must wait for that to stop before freeing the
1351 * mkey, as another allocation could get the same mkey #.
1353 xa_erase(&obj->ib_dev->odp_mkeys,
1354 mlx5_base_mkey(obj->devx_mr.mmkey.key));
1355 synchronize_srcu(&dev->odp_srcu);
1358 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1359 ret = mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1360 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1361 ret = mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1362 else
1363 ret = mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox,
1364 obj->dinlen, out, sizeof(out));
1365 if (ib_is_destroy_retryable(ret, why, uobject))
1366 return ret;
1368 devx_event_table = &dev->devx_event_table;
1370 mutex_lock(&devx_event_table->event_xa_lock);
1371 list_for_each_entry_safe(sub_entry, tmp, &obj->event_sub, obj_list)
1372 devx_cleanup_subscription(dev, sub_entry);
1373 mutex_unlock(&devx_event_table->event_xa_lock);
1375 kfree(obj);
1376 return ret;
1379 static void devx_cq_comp(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe)
1381 struct devx_obj *obj = container_of(mcq, struct devx_obj, core_cq);
1382 struct mlx5_devx_event_table *table;
1383 struct devx_event *event;
1384 struct devx_obj_event *obj_event;
1385 u32 obj_id = mcq->cqn;
1387 table = &obj->ib_dev->devx_event_table;
1388 rcu_read_lock();
1389 event = xa_load(&table->event_xa, MLX5_EVENT_TYPE_COMP);
1390 if (!event)
1391 goto out;
1393 obj_event = xa_load(&event->object_ids, obj_id);
1394 if (!obj_event)
1395 goto out;
1397 dispatch_event_fd(&obj_event->obj_sub_list, eqe);
1398 out:
1399 rcu_read_unlock();
1402 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE)(
1403 struct uverbs_attr_bundle *attrs)
1405 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1406 int cmd_out_len = uverbs_attr_get_len(attrs,
1407 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT);
1408 int cmd_in_len = uverbs_attr_get_len(attrs,
1409 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN);
1410 void *cmd_out;
1411 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1412 attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE);
1413 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1414 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1415 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1416 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
1417 struct devx_obj *obj;
1418 u16 obj_type = 0;
1419 int err;
1420 int uid;
1421 u32 obj_id;
1422 u16 opcode;
1424 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1425 return -EINVAL;
1427 uid = devx_get_uid(c, cmd_in);
1428 if (uid < 0)
1429 return uid;
1431 if (!devx_is_obj_create_cmd(cmd_in, &opcode))
1432 return -EINVAL;
1434 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1435 if (IS_ERR(cmd_out))
1436 return PTR_ERR(cmd_out);
1438 obj = kzalloc(sizeof(struct devx_obj), GFP_KERNEL);
1439 if (!obj)
1440 return -ENOMEM;
1442 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1443 if (opcode == MLX5_CMD_OP_CREATE_MKEY) {
1444 err = devx_handle_mkey_create(dev, obj, cmd_in, cmd_in_len);
1445 if (err)
1446 goto obj_free;
1447 } else {
1448 devx_set_umem_valid(cmd_in);
1451 if (opcode == MLX5_CMD_OP_CREATE_DCT) {
1452 obj->flags |= DEVX_OBJ_FLAGS_DCT;
1453 err = mlx5_core_create_dct(dev->mdev, &obj->core_dct,
1454 cmd_in, cmd_in_len,
1455 cmd_out, cmd_out_len);
1456 } else if (opcode == MLX5_CMD_OP_CREATE_CQ) {
1457 obj->flags |= DEVX_OBJ_FLAGS_CQ;
1458 obj->core_cq.comp = devx_cq_comp;
1459 err = mlx5_core_create_cq(dev->mdev, &obj->core_cq,
1460 cmd_in, cmd_in_len, cmd_out,
1461 cmd_out_len);
1462 } else {
1463 err = mlx5_cmd_exec(dev->mdev, cmd_in,
1464 cmd_in_len,
1465 cmd_out, cmd_out_len);
1468 if (err)
1469 goto obj_free;
1471 if (opcode == MLX5_CMD_OP_ALLOC_FLOW_COUNTER) {
1472 u8 bulk = MLX5_GET(alloc_flow_counter_in,
1473 cmd_in,
1474 flow_counter_bulk);
1475 obj->flow_counter_bulk_size = 128UL * bulk;
1478 uobj->object = obj;
1479 INIT_LIST_HEAD(&obj->event_sub);
1480 obj->ib_dev = dev;
1481 devx_obj_build_destroy_cmd(cmd_in, cmd_out, obj->dinbox, &obj->dinlen,
1482 &obj_id);
1483 WARN_ON(obj->dinlen > MLX5_MAX_DESTROY_INBOX_SIZE_DW * sizeof(u32));
1485 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT, cmd_out, cmd_out_len);
1486 if (err)
1487 goto obj_destroy;
1489 if (opcode == MLX5_CMD_OP_CREATE_GENERAL_OBJECT)
1490 obj_type = MLX5_GET(general_obj_in_cmd_hdr, cmd_in, obj_type);
1491 obj->obj_id = get_enc_obj_id(opcode | obj_type << 16, obj_id);
1493 if (obj->flags & DEVX_OBJ_FLAGS_INDIRECT_MKEY) {
1494 err = devx_handle_mkey_indirect(obj, dev, cmd_in, cmd_out);
1495 if (err)
1496 goto obj_destroy;
1498 return 0;
1500 obj_destroy:
1501 if (obj->flags & DEVX_OBJ_FLAGS_DCT)
1502 mlx5_core_destroy_dct(obj->ib_dev->mdev, &obj->core_dct);
1503 else if (obj->flags & DEVX_OBJ_FLAGS_CQ)
1504 mlx5_core_destroy_cq(obj->ib_dev->mdev, &obj->core_cq);
1505 else
1506 mlx5_cmd_exec(obj->ib_dev->mdev, obj->dinbox, obj->dinlen, out,
1507 sizeof(out));
1508 obj_free:
1509 kfree(obj);
1510 return err;
1513 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY)(
1514 struct uverbs_attr_bundle *attrs)
1516 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN);
1517 int cmd_out_len = uverbs_attr_get_len(attrs,
1518 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT);
1519 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1520 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE);
1521 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1522 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1523 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1524 void *cmd_out;
1525 int err;
1526 int uid;
1528 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1529 return -EINVAL;
1531 uid = devx_get_uid(c, cmd_in);
1532 if (uid < 0)
1533 return uid;
1535 if (!devx_is_obj_modify_cmd(cmd_in))
1536 return -EINVAL;
1538 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1539 return -EINVAL;
1541 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1542 if (IS_ERR(cmd_out))
1543 return PTR_ERR(cmd_out);
1545 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1546 devx_set_umem_valid(cmd_in);
1548 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1549 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN),
1550 cmd_out, cmd_out_len);
1551 if (err)
1552 return err;
1554 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
1555 cmd_out, cmd_out_len);
1558 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY)(
1559 struct uverbs_attr_bundle *attrs)
1561 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN);
1562 int cmd_out_len = uverbs_attr_get_len(attrs,
1563 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT);
1564 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
1565 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE);
1566 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1567 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1568 void *cmd_out;
1569 int err;
1570 int uid;
1571 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1573 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1574 return -EINVAL;
1576 uid = devx_get_uid(c, cmd_in);
1577 if (uid < 0)
1578 return uid;
1580 if (!devx_is_obj_query_cmd(cmd_in))
1581 return -EINVAL;
1583 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1584 return -EINVAL;
1586 cmd_out = uverbs_zalloc(attrs, cmd_out_len);
1587 if (IS_ERR(cmd_out))
1588 return PTR_ERR(cmd_out);
1590 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1591 err = mlx5_cmd_exec(mdev->mdev, cmd_in,
1592 uverbs_attr_get_len(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN),
1593 cmd_out, cmd_out_len);
1594 if (err)
1595 return err;
1597 return uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
1598 cmd_out, cmd_out_len);
1601 struct devx_async_event_queue {
1602 spinlock_t lock;
1603 wait_queue_head_t poll_wait;
1604 struct list_head event_list;
1605 atomic_t bytes_in_use;
1606 u8 is_destroyed:1;
1609 struct devx_async_cmd_event_file {
1610 struct ib_uobject uobj;
1611 struct devx_async_event_queue ev_queue;
1612 struct mlx5_async_ctx async_ctx;
1615 static void devx_init_event_queue(struct devx_async_event_queue *ev_queue)
1617 spin_lock_init(&ev_queue->lock);
1618 INIT_LIST_HEAD(&ev_queue->event_list);
1619 init_waitqueue_head(&ev_queue->poll_wait);
1620 atomic_set(&ev_queue->bytes_in_use, 0);
1621 ev_queue->is_destroyed = 0;
1624 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC)(
1625 struct uverbs_attr_bundle *attrs)
1627 struct devx_async_cmd_event_file *ev_file;
1629 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1630 attrs, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE);
1631 struct mlx5_ib_dev *mdev = mlx5_udata_to_mdev(&attrs->driver_udata);
1633 ev_file = container_of(uobj, struct devx_async_cmd_event_file,
1634 uobj);
1635 devx_init_event_queue(&ev_file->ev_queue);
1636 mlx5_cmd_init_async_ctx(mdev->mdev, &ev_file->async_ctx);
1637 return 0;
1640 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC)(
1641 struct uverbs_attr_bundle *attrs)
1643 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1644 attrs, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE);
1645 struct devx_async_event_file *ev_file;
1646 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1647 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1648 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1649 u32 flags;
1650 int err;
1652 err = uverbs_get_flags32(&flags, attrs,
1653 MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
1654 MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA);
1656 if (err)
1657 return err;
1659 ev_file = container_of(uobj, struct devx_async_event_file,
1660 uobj);
1661 spin_lock_init(&ev_file->lock);
1662 INIT_LIST_HEAD(&ev_file->event_list);
1663 init_waitqueue_head(&ev_file->poll_wait);
1664 if (flags & MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA)
1665 ev_file->omit_data = 1;
1666 INIT_LIST_HEAD(&ev_file->subscribed_events_list);
1667 ev_file->dev = dev;
1668 get_device(&dev->ib_dev.dev);
1669 return 0;
1672 static void devx_query_callback(int status, struct mlx5_async_work *context)
1674 struct devx_async_data *async_data =
1675 container_of(context, struct devx_async_data, cb_work);
1676 struct devx_async_cmd_event_file *ev_file = async_data->ev_file;
1677 struct devx_async_event_queue *ev_queue = &ev_file->ev_queue;
1678 unsigned long flags;
1681 * Note that if the struct devx_async_cmd_event_file uobj begins to be
1682 * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
1683 * routine returns, ensuring that it always remains valid here.
1685 spin_lock_irqsave(&ev_queue->lock, flags);
1686 list_add_tail(&async_data->list, &ev_queue->event_list);
1687 spin_unlock_irqrestore(&ev_queue->lock, flags);
1689 wake_up_interruptible(&ev_queue->poll_wait);
1692 #define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1694 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY)(
1695 struct uverbs_attr_bundle *attrs)
1697 void *cmd_in = uverbs_attr_get_alloced_ptr(attrs,
1698 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN);
1699 struct ib_uobject *uobj = uverbs_attr_get_uobject(
1700 attrs,
1701 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE);
1702 u16 cmd_out_len;
1703 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1704 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1705 struct ib_uobject *fd_uobj;
1706 int err;
1707 int uid;
1708 struct mlx5_ib_dev *mdev = to_mdev(c->ibucontext.device);
1709 struct devx_async_cmd_event_file *ev_file;
1710 struct devx_async_data *async_data;
1712 if (MLX5_GET(general_obj_in_cmd_hdr, cmd_in, vhca_tunnel_id))
1713 return -EINVAL;
1715 uid = devx_get_uid(c, cmd_in);
1716 if (uid < 0)
1717 return uid;
1719 if (!devx_is_obj_query_cmd(cmd_in))
1720 return -EINVAL;
1722 err = uverbs_get_const(&cmd_out_len, attrs,
1723 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN);
1724 if (err)
1725 return err;
1727 if (!devx_is_valid_obj_id(attrs, uobj, cmd_in))
1728 return -EINVAL;
1730 fd_uobj = uverbs_attr_get_uobject(attrs,
1731 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD);
1732 if (IS_ERR(fd_uobj))
1733 return PTR_ERR(fd_uobj);
1735 ev_file = container_of(fd_uobj, struct devx_async_cmd_event_file,
1736 uobj);
1738 if (atomic_add_return(cmd_out_len, &ev_file->ev_queue.bytes_in_use) >
1739 MAX_ASYNC_BYTES_IN_USE) {
1740 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1741 return -EAGAIN;
1744 async_data = kvzalloc(struct_size(async_data, hdr.out_data,
1745 cmd_out_len), GFP_KERNEL);
1746 if (!async_data) {
1747 err = -ENOMEM;
1748 goto sub_bytes;
1751 err = uverbs_copy_from(&async_data->hdr.wr_id, attrs,
1752 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID);
1753 if (err)
1754 goto free_async;
1756 async_data->cmd_out_len = cmd_out_len;
1757 async_data->mdev = mdev;
1758 async_data->ev_file = ev_file;
1760 MLX5_SET(general_obj_in_cmd_hdr, cmd_in, uid, uid);
1761 err = mlx5_cmd_exec_cb(&ev_file->async_ctx, cmd_in,
1762 uverbs_attr_get_len(attrs,
1763 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN),
1764 async_data->hdr.out_data,
1765 async_data->cmd_out_len,
1766 devx_query_callback, &async_data->cb_work);
1768 if (err)
1769 goto free_async;
1771 return 0;
1773 free_async:
1774 kvfree(async_data);
1775 sub_bytes:
1776 atomic_sub(cmd_out_len, &ev_file->ev_queue.bytes_in_use);
1777 return err;
1780 static void
1781 subscribe_event_xa_dealloc(struct mlx5_devx_event_table *devx_event_table,
1782 u32 key_level1,
1783 bool is_level2,
1784 u32 key_level2)
1786 struct devx_event *event;
1787 struct devx_obj_event *xa_val_level2;
1789 /* Level 1 is valid for future use, no need to free */
1790 if (!is_level2)
1791 return;
1793 event = xa_load(&devx_event_table->event_xa, key_level1);
1794 WARN_ON(!event);
1796 xa_val_level2 = xa_load(&event->object_ids,
1797 key_level2);
1798 if (list_empty(&xa_val_level2->obj_sub_list)) {
1799 xa_erase(&event->object_ids,
1800 key_level2);
1801 kfree_rcu(xa_val_level2, rcu);
1805 static int
1806 subscribe_event_xa_alloc(struct mlx5_devx_event_table *devx_event_table,
1807 u32 key_level1,
1808 bool is_level2,
1809 u32 key_level2)
1811 struct devx_obj_event *obj_event;
1812 struct devx_event *event;
1813 int err;
1815 event = xa_load(&devx_event_table->event_xa, key_level1);
1816 if (!event) {
1817 event = kzalloc(sizeof(*event), GFP_KERNEL);
1818 if (!event)
1819 return -ENOMEM;
1821 INIT_LIST_HEAD(&event->unaffiliated_list);
1822 xa_init(&event->object_ids);
1824 err = xa_insert(&devx_event_table->event_xa,
1825 key_level1,
1826 event,
1827 GFP_KERNEL);
1828 if (err) {
1829 kfree(event);
1830 return err;
1834 if (!is_level2)
1835 return 0;
1837 obj_event = xa_load(&event->object_ids, key_level2);
1838 if (!obj_event) {
1839 obj_event = kzalloc(sizeof(*obj_event), GFP_KERNEL);
1840 if (!obj_event)
1841 /* Level1 is valid for future use, no need to free */
1842 return -ENOMEM;
1844 err = xa_insert(&event->object_ids,
1845 key_level2,
1846 obj_event,
1847 GFP_KERNEL);
1848 if (err)
1849 return err;
1850 INIT_LIST_HEAD(&obj_event->obj_sub_list);
1853 return 0;
1856 static bool is_valid_events_legacy(int num_events, u16 *event_type_num_list,
1857 struct devx_obj *obj)
1859 int i;
1861 for (i = 0; i < num_events; i++) {
1862 if (obj) {
1863 if (!is_legacy_obj_event_num(event_type_num_list[i]))
1864 return false;
1865 } else if (!is_legacy_unaffiliated_event_num(
1866 event_type_num_list[i])) {
1867 return false;
1871 return true;
1874 #define MAX_SUPP_EVENT_NUM 255
1875 static bool is_valid_events(struct mlx5_core_dev *dev,
1876 int num_events, u16 *event_type_num_list,
1877 struct devx_obj *obj)
1879 __be64 *aff_events;
1880 __be64 *unaff_events;
1881 int mask_entry;
1882 int mask_bit;
1883 int i;
1885 if (MLX5_CAP_GEN(dev, event_cap)) {
1886 aff_events = MLX5_CAP_DEV_EVENT(dev,
1887 user_affiliated_events);
1888 unaff_events = MLX5_CAP_DEV_EVENT(dev,
1889 user_unaffiliated_events);
1890 } else {
1891 return is_valid_events_legacy(num_events, event_type_num_list,
1892 obj);
1895 for (i = 0; i < num_events; i++) {
1896 if (event_type_num_list[i] > MAX_SUPP_EVENT_NUM)
1897 return false;
1899 mask_entry = event_type_num_list[i] / 64;
1900 mask_bit = event_type_num_list[i] % 64;
1902 if (obj) {
1903 /* CQ completion */
1904 if (event_type_num_list[i] == 0)
1905 continue;
1907 if (!(be64_to_cpu(aff_events[mask_entry]) &
1908 (1ull << mask_bit)))
1909 return false;
1911 continue;
1914 if (!(be64_to_cpu(unaff_events[mask_entry]) &
1915 (1ull << mask_bit)))
1916 return false;
1919 return true;
1922 #define MAX_NUM_EVENTS 16
1923 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT)(
1924 struct uverbs_attr_bundle *attrs)
1926 struct ib_uobject *devx_uobj = uverbs_attr_get_uobject(
1927 attrs,
1928 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE);
1929 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
1930 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
1931 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
1932 struct ib_uobject *fd_uobj;
1933 struct devx_obj *obj = NULL;
1934 struct devx_async_event_file *ev_file;
1935 struct mlx5_devx_event_table *devx_event_table = &dev->devx_event_table;
1936 u16 *event_type_num_list;
1937 struct devx_event_subscription *event_sub, *tmp_sub;
1938 struct list_head sub_list;
1939 int redirect_fd;
1940 bool use_eventfd = false;
1941 int num_events;
1942 int num_alloc_xa_entries = 0;
1943 u16 obj_type = 0;
1944 u64 cookie = 0;
1945 u32 obj_id = 0;
1946 int err;
1947 int i;
1949 if (!c->devx_uid)
1950 return -EINVAL;
1952 if (!IS_ERR(devx_uobj)) {
1953 obj = (struct devx_obj *)devx_uobj->object;
1954 if (obj)
1955 obj_id = get_dec_obj_id(obj->obj_id);
1958 fd_uobj = uverbs_attr_get_uobject(attrs,
1959 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE);
1960 if (IS_ERR(fd_uobj))
1961 return PTR_ERR(fd_uobj);
1963 ev_file = container_of(fd_uobj, struct devx_async_event_file,
1964 uobj);
1966 if (uverbs_attr_is_valid(attrs,
1967 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM)) {
1968 err = uverbs_copy_from(&redirect_fd, attrs,
1969 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM);
1970 if (err)
1971 return err;
1973 use_eventfd = true;
1976 if (uverbs_attr_is_valid(attrs,
1977 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE)) {
1978 if (use_eventfd)
1979 return -EINVAL;
1981 err = uverbs_copy_from(&cookie, attrs,
1982 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE);
1983 if (err)
1984 return err;
1987 num_events = uverbs_attr_ptr_get_array_size(
1988 attrs, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
1989 sizeof(u16));
1991 if (num_events < 0)
1992 return num_events;
1994 if (num_events > MAX_NUM_EVENTS)
1995 return -EINVAL;
1997 event_type_num_list = uverbs_attr_get_alloced_ptr(attrs,
1998 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST);
2000 if (!is_valid_events(dev->mdev, num_events, event_type_num_list, obj))
2001 return -EINVAL;
2003 INIT_LIST_HEAD(&sub_list);
2005 /* Protect from concurrent subscriptions to same XA entries to allow
2006 * both to succeed
2008 mutex_lock(&devx_event_table->event_xa_lock);
2009 for (i = 0; i < num_events; i++) {
2010 u32 key_level1;
2012 if (obj)
2013 obj_type = get_dec_obj_type(obj,
2014 event_type_num_list[i]);
2015 key_level1 = event_type_num_list[i] | obj_type << 16;
2017 err = subscribe_event_xa_alloc(devx_event_table,
2018 key_level1,
2019 obj,
2020 obj_id);
2021 if (err)
2022 goto err;
2024 num_alloc_xa_entries++;
2025 event_sub = kzalloc(sizeof(*event_sub), GFP_KERNEL);
2026 if (!event_sub)
2027 goto err;
2029 list_add_tail(&event_sub->event_list, &sub_list);
2030 uverbs_uobject_get(&ev_file->uobj);
2031 if (use_eventfd) {
2032 event_sub->eventfd =
2033 eventfd_ctx_fdget(redirect_fd);
2035 if (IS_ERR(event_sub->eventfd)) {
2036 err = PTR_ERR(event_sub->eventfd);
2037 event_sub->eventfd = NULL;
2038 goto err;
2042 event_sub->cookie = cookie;
2043 event_sub->ev_file = ev_file;
2044 /* May be needed upon cleanup the devx object/subscription */
2045 event_sub->xa_key_level1 = key_level1;
2046 event_sub->xa_key_level2 = obj_id;
2047 INIT_LIST_HEAD(&event_sub->obj_list);
2050 /* Once all the allocations and the XA data insertions were done we
2051 * can go ahead and add all the subscriptions to the relevant lists
2052 * without concern of a failure.
2054 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2055 struct devx_event *event;
2056 struct devx_obj_event *obj_event;
2058 list_del_init(&event_sub->event_list);
2060 spin_lock_irq(&ev_file->lock);
2061 list_add_tail_rcu(&event_sub->file_list,
2062 &ev_file->subscribed_events_list);
2063 spin_unlock_irq(&ev_file->lock);
2065 event = xa_load(&devx_event_table->event_xa,
2066 event_sub->xa_key_level1);
2067 WARN_ON(!event);
2069 if (!obj) {
2070 list_add_tail_rcu(&event_sub->xa_list,
2071 &event->unaffiliated_list);
2072 continue;
2075 obj_event = xa_load(&event->object_ids, obj_id);
2076 WARN_ON(!obj_event);
2077 list_add_tail_rcu(&event_sub->xa_list,
2078 &obj_event->obj_sub_list);
2079 list_add_tail_rcu(&event_sub->obj_list,
2080 &obj->event_sub);
2083 mutex_unlock(&devx_event_table->event_xa_lock);
2084 return 0;
2086 err:
2087 list_for_each_entry_safe(event_sub, tmp_sub, &sub_list, event_list) {
2088 list_del(&event_sub->event_list);
2090 subscribe_event_xa_dealloc(devx_event_table,
2091 event_sub->xa_key_level1,
2092 obj,
2093 obj_id);
2095 if (event_sub->eventfd)
2096 eventfd_ctx_put(event_sub->eventfd);
2097 uverbs_uobject_put(&event_sub->ev_file->uobj);
2098 kfree(event_sub);
2101 mutex_unlock(&devx_event_table->event_xa_lock);
2102 return err;
2105 static int devx_umem_get(struct mlx5_ib_dev *dev, struct ib_ucontext *ucontext,
2106 struct uverbs_attr_bundle *attrs,
2107 struct devx_umem *obj)
2109 u64 addr;
2110 size_t size;
2111 u32 access;
2112 int npages;
2113 int err;
2114 u32 page_mask;
2116 if (uverbs_copy_from(&addr, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR) ||
2117 uverbs_copy_from(&size, attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN))
2118 return -EFAULT;
2120 err = uverbs_get_flags32(&access, attrs,
2121 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2122 IB_ACCESS_LOCAL_WRITE |
2123 IB_ACCESS_REMOTE_WRITE |
2124 IB_ACCESS_REMOTE_READ);
2125 if (err)
2126 return err;
2128 err = ib_check_mr_access(access);
2129 if (err)
2130 return err;
2132 obj->umem = ib_umem_get(&dev->ib_dev, addr, size, access);
2133 if (IS_ERR(obj->umem))
2134 return PTR_ERR(obj->umem);
2136 mlx5_ib_cont_pages(obj->umem, obj->umem->address,
2137 MLX5_MKEY_PAGE_SHIFT_MASK, &npages,
2138 &obj->page_shift, &obj->ncont, NULL);
2140 if (!npages) {
2141 ib_umem_release(obj->umem);
2142 return -EINVAL;
2145 page_mask = (1 << obj->page_shift) - 1;
2146 obj->page_offset = obj->umem->address & page_mask;
2148 return 0;
2151 static int devx_umem_reg_cmd_alloc(struct uverbs_attr_bundle *attrs,
2152 struct devx_umem *obj,
2153 struct devx_umem_reg_cmd *cmd)
2155 cmd->inlen = MLX5_ST_SZ_BYTES(create_umem_in) +
2156 (MLX5_ST_SZ_BYTES(mtt) * obj->ncont);
2157 cmd->in = uverbs_zalloc(attrs, cmd->inlen);
2158 return PTR_ERR_OR_ZERO(cmd->in);
2161 static void devx_umem_reg_cmd_build(struct mlx5_ib_dev *dev,
2162 struct devx_umem *obj,
2163 struct devx_umem_reg_cmd *cmd)
2165 void *umem;
2166 __be64 *mtt;
2168 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem);
2169 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt);
2171 MLX5_SET(create_umem_in, cmd->in, opcode, MLX5_CMD_OP_CREATE_UMEM);
2172 MLX5_SET64(umem, umem, num_of_mtt, obj->ncont);
2173 MLX5_SET(umem, umem, log_page_size, obj->page_shift -
2174 MLX5_ADAPTER_PAGE_SHIFT);
2175 MLX5_SET(umem, umem, page_offset, obj->page_offset);
2176 mlx5_ib_populate_pas(dev, obj->umem, obj->page_shift, mtt,
2177 (obj->umem->writable ? MLX5_IB_MTT_WRITE : 0) |
2178 MLX5_IB_MTT_READ);
2181 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG)(
2182 struct uverbs_attr_bundle *attrs)
2184 struct devx_umem_reg_cmd cmd;
2185 struct devx_umem *obj;
2186 struct ib_uobject *uobj = uverbs_attr_get_uobject(
2187 attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE);
2188 u32 obj_id;
2189 struct mlx5_ib_ucontext *c = rdma_udata_to_drv_context(
2190 &attrs->driver_udata, struct mlx5_ib_ucontext, ibucontext);
2191 struct mlx5_ib_dev *dev = to_mdev(c->ibucontext.device);
2192 int err;
2194 if (!c->devx_uid)
2195 return -EINVAL;
2197 obj = kzalloc(sizeof(struct devx_umem), GFP_KERNEL);
2198 if (!obj)
2199 return -ENOMEM;
2201 err = devx_umem_get(dev, &c->ibucontext, attrs, obj);
2202 if (err)
2203 goto err_obj_free;
2205 err = devx_umem_reg_cmd_alloc(attrs, obj, &cmd);
2206 if (err)
2207 goto err_umem_release;
2209 devx_umem_reg_cmd_build(dev, obj, &cmd);
2211 MLX5_SET(create_umem_in, cmd.in, uid, c->devx_uid);
2212 err = mlx5_cmd_exec(dev->mdev, cmd.in, cmd.inlen, cmd.out,
2213 sizeof(cmd.out));
2214 if (err)
2215 goto err_umem_release;
2217 obj->mdev = dev->mdev;
2218 uobj->object = obj;
2219 devx_obj_build_destroy_cmd(cmd.in, cmd.out, obj->dinbox, &obj->dinlen, &obj_id);
2220 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID, &obj_id, sizeof(obj_id));
2221 if (err)
2222 goto err_umem_destroy;
2224 return 0;
2226 err_umem_destroy:
2227 mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, cmd.out, sizeof(cmd.out));
2228 err_umem_release:
2229 ib_umem_release(obj->umem);
2230 err_obj_free:
2231 kfree(obj);
2232 return err;
2235 static int devx_umem_cleanup(struct ib_uobject *uobject,
2236 enum rdma_remove_reason why,
2237 struct uverbs_attr_bundle *attrs)
2239 struct devx_umem *obj = uobject->object;
2240 u32 out[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr)];
2241 int err;
2243 err = mlx5_cmd_exec(obj->mdev, obj->dinbox, obj->dinlen, out, sizeof(out));
2244 if (ib_is_destroy_retryable(err, why, uobject))
2245 return err;
2247 ib_umem_release(obj->umem);
2248 kfree(obj);
2249 return 0;
2252 static bool is_unaffiliated_event(struct mlx5_core_dev *dev,
2253 unsigned long event_type)
2255 __be64 *unaff_events;
2256 int mask_entry;
2257 int mask_bit;
2259 if (!MLX5_CAP_GEN(dev, event_cap))
2260 return is_legacy_unaffiliated_event_num(event_type);
2262 unaff_events = MLX5_CAP_DEV_EVENT(dev,
2263 user_unaffiliated_events);
2264 WARN_ON(event_type > MAX_SUPP_EVENT_NUM);
2266 mask_entry = event_type / 64;
2267 mask_bit = event_type % 64;
2269 if (!(be64_to_cpu(unaff_events[mask_entry]) & (1ull << mask_bit)))
2270 return false;
2272 return true;
2275 static u32 devx_get_obj_id_from_event(unsigned long event_type, void *data)
2277 struct mlx5_eqe *eqe = data;
2278 u32 obj_id = 0;
2280 switch (event_type) {
2281 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR:
2282 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT:
2283 case MLX5_EVENT_TYPE_PATH_MIG:
2284 case MLX5_EVENT_TYPE_COMM_EST:
2285 case MLX5_EVENT_TYPE_SQ_DRAINED:
2286 case MLX5_EVENT_TYPE_SRQ_LAST_WQE:
2287 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR:
2288 case MLX5_EVENT_TYPE_PATH_MIG_FAILED:
2289 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR:
2290 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR:
2291 obj_id = be32_to_cpu(eqe->data.qp_srq.qp_srq_n) & 0xffffff;
2292 break;
2293 case MLX5_EVENT_TYPE_XRQ_ERROR:
2294 obj_id = be32_to_cpu(eqe->data.xrq_err.type_xrqn) & 0xffffff;
2295 break;
2296 case MLX5_EVENT_TYPE_DCT_DRAINED:
2297 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION:
2298 obj_id = be32_to_cpu(eqe->data.dct.dctn) & 0xffffff;
2299 break;
2300 case MLX5_EVENT_TYPE_CQ_ERROR:
2301 obj_id = be32_to_cpu(eqe->data.cq_err.cqn) & 0xffffff;
2302 break;
2303 default:
2304 obj_id = MLX5_GET(affiliated_event_header, &eqe->data, obj_id);
2305 break;
2308 return obj_id;
2311 static int deliver_event(struct devx_event_subscription *event_sub,
2312 const void *data)
2314 struct devx_async_event_file *ev_file;
2315 struct devx_async_event_data *event_data;
2316 unsigned long flags;
2318 ev_file = event_sub->ev_file;
2320 if (ev_file->omit_data) {
2321 spin_lock_irqsave(&ev_file->lock, flags);
2322 if (!list_empty(&event_sub->event_list)) {
2323 spin_unlock_irqrestore(&ev_file->lock, flags);
2324 return 0;
2327 /* is_destroyed is ignored here because we don't have any memory
2328 * allocation to clean up for the omit_data case
2330 list_add_tail(&event_sub->event_list, &ev_file->event_list);
2331 spin_unlock_irqrestore(&ev_file->lock, flags);
2332 wake_up_interruptible(&ev_file->poll_wait);
2333 return 0;
2336 event_data = kzalloc(sizeof(*event_data) + sizeof(struct mlx5_eqe),
2337 GFP_ATOMIC);
2338 if (!event_data) {
2339 spin_lock_irqsave(&ev_file->lock, flags);
2340 ev_file->is_overflow_err = 1;
2341 spin_unlock_irqrestore(&ev_file->lock, flags);
2342 return -ENOMEM;
2345 event_data->hdr.cookie = event_sub->cookie;
2346 memcpy(event_data->hdr.out_data, data, sizeof(struct mlx5_eqe));
2348 spin_lock_irqsave(&ev_file->lock, flags);
2349 if (!ev_file->is_destroyed)
2350 list_add_tail(&event_data->list, &ev_file->event_list);
2351 else
2352 kfree(event_data);
2353 spin_unlock_irqrestore(&ev_file->lock, flags);
2354 wake_up_interruptible(&ev_file->poll_wait);
2356 return 0;
2359 static void dispatch_event_fd(struct list_head *fd_list,
2360 const void *data)
2362 struct devx_event_subscription *item;
2364 list_for_each_entry_rcu(item, fd_list, xa_list) {
2365 if (item->eventfd)
2366 eventfd_signal(item->eventfd, 1);
2367 else
2368 deliver_event(item, data);
2372 static int devx_event_notifier(struct notifier_block *nb,
2373 unsigned long event_type, void *data)
2375 struct mlx5_devx_event_table *table;
2376 struct mlx5_ib_dev *dev;
2377 struct devx_event *event;
2378 struct devx_obj_event *obj_event;
2379 u16 obj_type = 0;
2380 bool is_unaffiliated;
2381 u32 obj_id;
2383 /* Explicit filtering to kernel events which may occur frequently */
2384 if (event_type == MLX5_EVENT_TYPE_CMD ||
2385 event_type == MLX5_EVENT_TYPE_PAGE_REQUEST)
2386 return NOTIFY_OK;
2388 table = container_of(nb, struct mlx5_devx_event_table, devx_nb.nb);
2389 dev = container_of(table, struct mlx5_ib_dev, devx_event_table);
2390 is_unaffiliated = is_unaffiliated_event(dev->mdev, event_type);
2392 if (!is_unaffiliated)
2393 obj_type = get_event_obj_type(event_type, data);
2395 rcu_read_lock();
2396 event = xa_load(&table->event_xa, event_type | (obj_type << 16));
2397 if (!event) {
2398 rcu_read_unlock();
2399 return NOTIFY_DONE;
2402 if (is_unaffiliated) {
2403 dispatch_event_fd(&event->unaffiliated_list, data);
2404 rcu_read_unlock();
2405 return NOTIFY_OK;
2408 obj_id = devx_get_obj_id_from_event(event_type, data);
2409 obj_event = xa_load(&event->object_ids, obj_id);
2410 if (!obj_event) {
2411 rcu_read_unlock();
2412 return NOTIFY_DONE;
2415 dispatch_event_fd(&obj_event->obj_sub_list, data);
2417 rcu_read_unlock();
2418 return NOTIFY_OK;
2421 void mlx5_ib_devx_init_event_table(struct mlx5_ib_dev *dev)
2423 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2425 xa_init(&table->event_xa);
2426 mutex_init(&table->event_xa_lock);
2427 MLX5_NB_INIT(&table->devx_nb, devx_event_notifier, NOTIFY_ANY);
2428 mlx5_eq_notifier_register(dev->mdev, &table->devx_nb);
2431 void mlx5_ib_devx_cleanup_event_table(struct mlx5_ib_dev *dev)
2433 struct mlx5_devx_event_table *table = &dev->devx_event_table;
2434 struct devx_event_subscription *sub, *tmp;
2435 struct devx_event *event;
2436 void *entry;
2437 unsigned long id;
2439 mlx5_eq_notifier_unregister(dev->mdev, &table->devx_nb);
2440 mutex_lock(&dev->devx_event_table.event_xa_lock);
2441 xa_for_each(&table->event_xa, id, entry) {
2442 event = entry;
2443 list_for_each_entry_safe(sub, tmp, &event->unaffiliated_list,
2444 xa_list)
2445 devx_cleanup_subscription(dev, sub);
2446 kfree(entry);
2448 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2449 xa_destroy(&table->event_xa);
2452 static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
2453 size_t count, loff_t *pos)
2455 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2456 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2457 struct devx_async_data *event;
2458 int ret = 0;
2459 size_t eventsz;
2461 spin_lock_irq(&ev_queue->lock);
2463 while (list_empty(&ev_queue->event_list)) {
2464 spin_unlock_irq(&ev_queue->lock);
2466 if (filp->f_flags & O_NONBLOCK)
2467 return -EAGAIN;
2469 if (wait_event_interruptible(
2470 ev_queue->poll_wait,
2471 (!list_empty(&ev_queue->event_list) ||
2472 ev_queue->is_destroyed))) {
2473 return -ERESTARTSYS;
2476 if (list_empty(&ev_queue->event_list) &&
2477 ev_queue->is_destroyed)
2478 return -EIO;
2480 spin_lock_irq(&ev_queue->lock);
2483 event = list_entry(ev_queue->event_list.next,
2484 struct devx_async_data, list);
2485 eventsz = event->cmd_out_len +
2486 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr);
2488 if (eventsz > count) {
2489 spin_unlock_irq(&ev_queue->lock);
2490 return -ENOSPC;
2493 list_del(ev_queue->event_list.next);
2494 spin_unlock_irq(&ev_queue->lock);
2496 if (copy_to_user(buf, &event->hdr, eventsz))
2497 ret = -EFAULT;
2498 else
2499 ret = eventsz;
2501 atomic_sub(event->cmd_out_len, &ev_queue->bytes_in_use);
2502 kvfree(event);
2503 return ret;
2506 static __poll_t devx_async_cmd_event_poll(struct file *filp,
2507 struct poll_table_struct *wait)
2509 struct devx_async_cmd_event_file *comp_ev_file = filp->private_data;
2510 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2511 __poll_t pollflags = 0;
2513 poll_wait(filp, &ev_queue->poll_wait, wait);
2515 spin_lock_irq(&ev_queue->lock);
2516 if (ev_queue->is_destroyed)
2517 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2518 else if (!list_empty(&ev_queue->event_list))
2519 pollflags = EPOLLIN | EPOLLRDNORM;
2520 spin_unlock_irq(&ev_queue->lock);
2522 return pollflags;
2525 static const struct file_operations devx_async_cmd_event_fops = {
2526 .owner = THIS_MODULE,
2527 .read = devx_async_cmd_event_read,
2528 .poll = devx_async_cmd_event_poll,
2529 .release = uverbs_uobject_fd_release,
2530 .llseek = no_llseek,
2533 static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
2534 size_t count, loff_t *pos)
2536 struct devx_async_event_file *ev_file = filp->private_data;
2537 struct devx_event_subscription *event_sub;
2538 struct devx_async_event_data *uninitialized_var(event);
2539 int ret = 0;
2540 size_t eventsz;
2541 bool omit_data;
2542 void *event_data;
2544 omit_data = ev_file->omit_data;
2546 spin_lock_irq(&ev_file->lock);
2548 if (ev_file->is_overflow_err) {
2549 ev_file->is_overflow_err = 0;
2550 spin_unlock_irq(&ev_file->lock);
2551 return -EOVERFLOW;
2554 if (ev_file->is_destroyed) {
2555 spin_unlock_irq(&ev_file->lock);
2556 return -EIO;
2559 while (list_empty(&ev_file->event_list)) {
2560 spin_unlock_irq(&ev_file->lock);
2562 if (filp->f_flags & O_NONBLOCK)
2563 return -EAGAIN;
2565 if (wait_event_interruptible(ev_file->poll_wait,
2566 (!list_empty(&ev_file->event_list) ||
2567 ev_file->is_destroyed))) {
2568 return -ERESTARTSYS;
2571 spin_lock_irq(&ev_file->lock);
2572 if (ev_file->is_destroyed) {
2573 spin_unlock_irq(&ev_file->lock);
2574 return -EIO;
2578 if (omit_data) {
2579 event_sub = list_first_entry(&ev_file->event_list,
2580 struct devx_event_subscription,
2581 event_list);
2582 eventsz = sizeof(event_sub->cookie);
2583 event_data = &event_sub->cookie;
2584 } else {
2585 event = list_first_entry(&ev_file->event_list,
2586 struct devx_async_event_data, list);
2587 eventsz = sizeof(struct mlx5_eqe) +
2588 sizeof(struct mlx5_ib_uapi_devx_async_event_hdr);
2589 event_data = &event->hdr;
2592 if (eventsz > count) {
2593 spin_unlock_irq(&ev_file->lock);
2594 return -EINVAL;
2597 if (omit_data)
2598 list_del_init(&event_sub->event_list);
2599 else
2600 list_del(&event->list);
2602 spin_unlock_irq(&ev_file->lock);
2604 if (copy_to_user(buf, event_data, eventsz))
2605 /* This points to an application issue, not a kernel concern */
2606 ret = -EFAULT;
2607 else
2608 ret = eventsz;
2610 if (!omit_data)
2611 kfree(event);
2612 return ret;
2615 static __poll_t devx_async_event_poll(struct file *filp,
2616 struct poll_table_struct *wait)
2618 struct devx_async_event_file *ev_file = filp->private_data;
2619 __poll_t pollflags = 0;
2621 poll_wait(filp, &ev_file->poll_wait, wait);
2623 spin_lock_irq(&ev_file->lock);
2624 if (ev_file->is_destroyed)
2625 pollflags = EPOLLIN | EPOLLRDNORM | EPOLLRDHUP;
2626 else if (!list_empty(&ev_file->event_list))
2627 pollflags = EPOLLIN | EPOLLRDNORM;
2628 spin_unlock_irq(&ev_file->lock);
2630 return pollflags;
2633 static void devx_free_subscription(struct rcu_head *rcu)
2635 struct devx_event_subscription *event_sub =
2636 container_of(rcu, struct devx_event_subscription, rcu);
2638 if (event_sub->eventfd)
2639 eventfd_ctx_put(event_sub->eventfd);
2640 uverbs_uobject_put(&event_sub->ev_file->uobj);
2641 kfree(event_sub);
2644 static const struct file_operations devx_async_event_fops = {
2645 .owner = THIS_MODULE,
2646 .read = devx_async_event_read,
2647 .poll = devx_async_event_poll,
2648 .release = uverbs_uobject_fd_release,
2649 .llseek = no_llseek,
2652 static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
2653 enum rdma_remove_reason why)
2655 struct devx_async_cmd_event_file *comp_ev_file =
2656 container_of(uobj, struct devx_async_cmd_event_file,
2657 uobj);
2658 struct devx_async_event_queue *ev_queue = &comp_ev_file->ev_queue;
2659 struct devx_async_data *entry, *tmp;
2661 spin_lock_irq(&ev_queue->lock);
2662 ev_queue->is_destroyed = 1;
2663 spin_unlock_irq(&ev_queue->lock);
2664 wake_up_interruptible(&ev_queue->poll_wait);
2666 mlx5_cmd_cleanup_async_ctx(&comp_ev_file->async_ctx);
2668 spin_lock_irq(&comp_ev_file->ev_queue.lock);
2669 list_for_each_entry_safe(entry, tmp,
2670 &comp_ev_file->ev_queue.event_list, list)
2671 kvfree(entry);
2672 spin_unlock_irq(&comp_ev_file->ev_queue.lock);
2673 return 0;
2676 static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
2677 enum rdma_remove_reason why)
2679 struct devx_async_event_file *ev_file =
2680 container_of(uobj, struct devx_async_event_file,
2681 uobj);
2682 struct devx_event_subscription *event_sub, *event_sub_tmp;
2683 struct devx_async_event_data *entry, *tmp;
2684 struct mlx5_ib_dev *dev = ev_file->dev;
2686 spin_lock_irq(&ev_file->lock);
2687 ev_file->is_destroyed = 1;
2688 spin_unlock_irq(&ev_file->lock);
2689 wake_up_interruptible(&ev_file->poll_wait);
2691 mutex_lock(&dev->devx_event_table.event_xa_lock);
2692 /* delete the subscriptions which are related to this FD */
2693 list_for_each_entry_safe(event_sub, event_sub_tmp,
2694 &ev_file->subscribed_events_list, file_list) {
2695 devx_cleanup_subscription(dev, event_sub);
2696 list_del_rcu(&event_sub->file_list);
2697 /* subscription may not be used by the read API any more */
2698 call_rcu(&event_sub->rcu, devx_free_subscription);
2700 mutex_unlock(&dev->devx_event_table.event_xa_lock);
2702 /* free the pending events allocation */
2703 if (!ev_file->omit_data) {
2704 spin_lock_irq(&ev_file->lock);
2705 list_for_each_entry_safe(entry, tmp,
2706 &ev_file->event_list, list)
2707 kfree(entry); /* read can't come any more */
2708 spin_unlock_irq(&ev_file->lock);
2711 put_device(&dev->ib_dev.dev);
2712 return 0;
2715 DECLARE_UVERBS_NAMED_METHOD(
2716 MLX5_IB_METHOD_DEVX_UMEM_REG,
2717 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE,
2718 MLX5_IB_OBJECT_DEVX_UMEM,
2719 UVERBS_ACCESS_NEW,
2720 UA_MANDATORY),
2721 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
2722 UVERBS_ATTR_TYPE(u64),
2723 UA_MANDATORY),
2724 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
2725 UVERBS_ATTR_TYPE(u64),
2726 UA_MANDATORY),
2727 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
2728 enum ib_access_flags),
2729 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
2730 UVERBS_ATTR_TYPE(u32),
2731 UA_MANDATORY));
2733 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2734 MLX5_IB_METHOD_DEVX_UMEM_DEREG,
2735 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE,
2736 MLX5_IB_OBJECT_DEVX_UMEM,
2737 UVERBS_ACCESS_DESTROY,
2738 UA_MANDATORY));
2740 DECLARE_UVERBS_NAMED_METHOD(
2741 MLX5_IB_METHOD_DEVX_QUERY_EQN,
2742 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC,
2743 UVERBS_ATTR_TYPE(u32),
2744 UA_MANDATORY),
2745 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
2746 UVERBS_ATTR_TYPE(u32),
2747 UA_MANDATORY));
2749 DECLARE_UVERBS_NAMED_METHOD(
2750 MLX5_IB_METHOD_DEVX_QUERY_UAR,
2751 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX,
2752 UVERBS_ATTR_TYPE(u32),
2753 UA_MANDATORY),
2754 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
2755 UVERBS_ATTR_TYPE(u32),
2756 UA_MANDATORY));
2758 DECLARE_UVERBS_NAMED_METHOD(
2759 MLX5_IB_METHOD_DEVX_OTHER,
2760 UVERBS_ATTR_PTR_IN(
2761 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN,
2762 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2763 UA_MANDATORY,
2764 UA_ALLOC_AND_COPY),
2765 UVERBS_ATTR_PTR_OUT(
2766 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
2767 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2768 UA_MANDATORY));
2770 DECLARE_UVERBS_NAMED_METHOD(
2771 MLX5_IB_METHOD_DEVX_OBJ_CREATE,
2772 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE,
2773 MLX5_IB_OBJECT_DEVX_OBJ,
2774 UVERBS_ACCESS_NEW,
2775 UA_MANDATORY),
2776 UVERBS_ATTR_PTR_IN(
2777 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
2778 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2779 UA_MANDATORY,
2780 UA_ALLOC_AND_COPY),
2781 UVERBS_ATTR_PTR_OUT(
2782 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
2783 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2784 UA_MANDATORY));
2786 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2787 MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
2788 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE,
2789 MLX5_IB_OBJECT_DEVX_OBJ,
2790 UVERBS_ACCESS_DESTROY,
2791 UA_MANDATORY));
2793 DECLARE_UVERBS_NAMED_METHOD(
2794 MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
2795 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE,
2796 UVERBS_IDR_ANY_OBJECT,
2797 UVERBS_ACCESS_WRITE,
2798 UA_MANDATORY),
2799 UVERBS_ATTR_PTR_IN(
2800 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
2801 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2802 UA_MANDATORY,
2803 UA_ALLOC_AND_COPY),
2804 UVERBS_ATTR_PTR_OUT(
2805 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
2806 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2807 UA_MANDATORY));
2809 DECLARE_UVERBS_NAMED_METHOD(
2810 MLX5_IB_METHOD_DEVX_OBJ_QUERY,
2811 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2812 UVERBS_IDR_ANY_OBJECT,
2813 UVERBS_ACCESS_READ,
2814 UA_MANDATORY),
2815 UVERBS_ATTR_PTR_IN(
2816 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2817 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2818 UA_MANDATORY,
2819 UA_ALLOC_AND_COPY),
2820 UVERBS_ATTR_PTR_OUT(
2821 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
2822 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr)),
2823 UA_MANDATORY));
2825 DECLARE_UVERBS_NAMED_METHOD(
2826 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
2827 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE,
2828 UVERBS_IDR_ANY_OBJECT,
2829 UVERBS_ACCESS_READ,
2830 UA_MANDATORY),
2831 UVERBS_ATTR_PTR_IN(
2832 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
2833 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr)),
2834 UA_MANDATORY,
2835 UA_ALLOC_AND_COPY),
2836 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN,
2837 u16, UA_MANDATORY),
2838 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD,
2839 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2840 UVERBS_ACCESS_READ,
2841 UA_MANDATORY),
2842 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID,
2843 UVERBS_ATTR_TYPE(u64),
2844 UA_MANDATORY));
2846 DECLARE_UVERBS_NAMED_METHOD(
2847 MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT,
2848 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE,
2849 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2850 UVERBS_ACCESS_READ,
2851 UA_MANDATORY),
2852 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE,
2853 MLX5_IB_OBJECT_DEVX_OBJ,
2854 UVERBS_ACCESS_READ,
2855 UA_OPTIONAL),
2856 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST,
2857 UVERBS_ATTR_MIN_SIZE(sizeof(u16)),
2858 UA_MANDATORY,
2859 UA_ALLOC_AND_COPY),
2860 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE,
2861 UVERBS_ATTR_TYPE(u64),
2862 UA_OPTIONAL),
2863 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM,
2864 UVERBS_ATTR_TYPE(u32),
2865 UA_OPTIONAL));
2867 DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX,
2868 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER),
2869 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR),
2870 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN),
2871 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT));
2873 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ,
2874 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup),
2875 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE),
2876 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY),
2877 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY),
2878 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY),
2879 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY));
2881 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM,
2882 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup),
2883 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG),
2884 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG));
2887 DECLARE_UVERBS_NAMED_METHOD(
2888 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC,
2889 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE,
2890 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2891 UVERBS_ACCESS_NEW,
2892 UA_MANDATORY));
2894 DECLARE_UVERBS_NAMED_OBJECT(
2895 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2896 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file),
2897 devx_async_cmd_event_destroy_uobj,
2898 &devx_async_cmd_event_fops, "[devx_async_cmd]",
2899 O_RDONLY),
2900 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC));
2902 DECLARE_UVERBS_NAMED_METHOD(
2903 MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC,
2904 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE,
2905 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2906 UVERBS_ACCESS_NEW,
2907 UA_MANDATORY),
2908 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS,
2909 enum mlx5_ib_uapi_devx_create_event_channel_flags,
2910 UA_MANDATORY));
2912 DECLARE_UVERBS_NAMED_OBJECT(
2913 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2914 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file),
2915 devx_async_event_destroy_uobj,
2916 &devx_async_event_fops, "[devx_async_event]",
2917 O_RDONLY),
2918 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC));
2920 static bool devx_is_supported(struct ib_device *device)
2922 struct mlx5_ib_dev *dev = to_mdev(device);
2924 return MLX5_CAP_GEN(dev->mdev, log_max_uctx);
2927 const struct uapi_definition mlx5_ib_devx_defs[] = {
2928 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2929 MLX5_IB_OBJECT_DEVX,
2930 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2931 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2932 MLX5_IB_OBJECT_DEVX_OBJ,
2933 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2934 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2935 MLX5_IB_OBJECT_DEVX_UMEM,
2936 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2937 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2938 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
2939 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),
2940 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2941 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
2942 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported)),