1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2018, Mellanox Technologies inc. All rights reserved.
6 #include <rdma/ib_user_verbs.h>
7 #include <rdma/ib_verbs.h>
8 #include <rdma/uverbs_types.h>
9 #include <rdma/uverbs_ioctl.h>
10 #include <rdma/mlx5_user_ioctl_cmds.h>
11 #include <rdma/mlx5_user_ioctl_verbs.h>
12 #include <rdma/ib_umem.h>
13 #include <rdma/uverbs_std_types.h>
14 #include <linux/mlx5/driver.h>
15 #include <linux/mlx5/fs.h>
19 #include <linux/xarray.h>
21 #define UVERBS_MODULE_NAME mlx5_ib
22 #include <rdma/uverbs_named_ioctl.h>
24 static void dispatch_event_fd(struct list_head
*fd_list
, const void *data
);
27 DEVX_OBJ_FLAGS_INDIRECT_MKEY
= 1 << 0,
28 DEVX_OBJ_FLAGS_DCT
= 1 << 1,
29 DEVX_OBJ_FLAGS_CQ
= 1 << 2,
32 struct devx_async_data
{
33 struct mlx5_ib_dev
*mdev
;
34 struct list_head list
;
35 struct devx_async_cmd_event_file
*ev_file
;
36 struct mlx5_async_work cb_work
;
38 /* must be last field in this structure */
39 struct mlx5_ib_uapi_devx_async_cmd_hdr hdr
;
42 struct devx_async_event_data
{
43 struct list_head list
; /* headed in ev_file->event_list */
44 struct mlx5_ib_uapi_devx_async_event_hdr hdr
;
47 /* first level XA value data structure */
49 struct xarray object_ids
; /* second XA level, Key = object id */
50 struct list_head unaffiliated_list
;
53 /* second level XA value data structure */
54 struct devx_obj_event
{
56 struct list_head obj_sub_list
;
59 struct devx_event_subscription
{
60 struct list_head file_list
; /* headed in ev_file->
61 * subscribed_events_list
63 struct list_head xa_list
; /* headed in devx_event->unaffiliated_list or
64 * devx_obj_event->obj_sub_list
66 struct list_head obj_list
; /* headed in devx_object */
67 struct list_head event_list
; /* headed in ev_file->event_list or in
68 * temp list via subscription
76 struct devx_async_event_file
*ev_file
;
77 struct eventfd_ctx
*eventfd
;
80 struct devx_async_event_file
{
81 struct ib_uobject uobj
;
82 /* Head of events that are subscribed to this FD */
83 struct list_head subscribed_events_list
;
85 wait_queue_head_t poll_wait
;
86 struct list_head event_list
;
87 struct mlx5_ib_dev
*dev
;
94 struct mlx5_core_dev
*mdev
;
97 u32 dinbox
[MLX5_ST_SZ_DW(general_obj_in_cmd_hdr
)];
100 struct devx_umem_reg_cmd
{
103 u32 out
[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr
)];
106 static struct mlx5_ib_ucontext
*
107 devx_ufile2uctx(const struct uverbs_attr_bundle
*attrs
)
109 return to_mucontext(ib_uverbs_get_ucontext(attrs
));
112 int mlx5_ib_devx_create(struct mlx5_ib_dev
*dev
, bool is_user
)
114 u32 in
[MLX5_ST_SZ_DW(create_uctx_in
)] = {0};
115 u32 out
[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr
)] = {0};
121 /* 0 means not supported */
122 if (!MLX5_CAP_GEN(dev
->mdev
, log_max_uctx
))
125 uctx
= MLX5_ADDR_OF(create_uctx_in
, in
, uctx
);
126 if (is_user
&& capable(CAP_NET_RAW
) &&
127 (MLX5_CAP_GEN(dev
->mdev
, uctx_cap
) & MLX5_UCTX_CAP_RAW_TX
))
128 cap
|= MLX5_UCTX_CAP_RAW_TX
;
129 if (is_user
&& capable(CAP_SYS_RAWIO
) &&
130 (MLX5_CAP_GEN(dev
->mdev
, uctx_cap
) &
131 MLX5_UCTX_CAP_INTERNAL_DEV_RES
))
132 cap
|= MLX5_UCTX_CAP_INTERNAL_DEV_RES
;
134 MLX5_SET(create_uctx_in
, in
, opcode
, MLX5_CMD_OP_CREATE_UCTX
);
135 MLX5_SET(uctx
, uctx
, cap
, cap
);
137 err
= mlx5_cmd_exec(dev
->mdev
, in
, sizeof(in
), out
, sizeof(out
));
141 uid
= MLX5_GET(general_obj_out_cmd_hdr
, out
, obj_id
);
145 void mlx5_ib_devx_destroy(struct mlx5_ib_dev
*dev
, u16 uid
)
147 u32 in
[MLX5_ST_SZ_DW(destroy_uctx_in
)] = {0};
148 u32 out
[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr
)] = {0};
150 MLX5_SET(destroy_uctx_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_UCTX
);
151 MLX5_SET(destroy_uctx_in
, in
, uid
, uid
);
153 mlx5_cmd_exec(dev
->mdev
, in
, sizeof(in
), out
, sizeof(out
));
156 static bool is_legacy_unaffiliated_event_num(u16 event_num
)
159 case MLX5_EVENT_TYPE_PORT_CHANGE
:
166 static bool is_legacy_obj_event_num(u16 event_num
)
169 case MLX5_EVENT_TYPE_PATH_MIG
:
170 case MLX5_EVENT_TYPE_COMM_EST
:
171 case MLX5_EVENT_TYPE_SQ_DRAINED
:
172 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
173 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT
:
174 case MLX5_EVENT_TYPE_CQ_ERROR
:
175 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
176 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
177 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
178 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
179 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR
:
180 case MLX5_EVENT_TYPE_DCT_DRAINED
:
181 case MLX5_EVENT_TYPE_COMP
:
182 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION
:
183 case MLX5_EVENT_TYPE_XRQ_ERROR
:
190 static u16
get_legacy_obj_type(u16 opcode
)
193 case MLX5_CMD_OP_CREATE_RQ
:
194 return MLX5_EVENT_QUEUE_TYPE_RQ
;
195 case MLX5_CMD_OP_CREATE_QP
:
196 return MLX5_EVENT_QUEUE_TYPE_QP
;
197 case MLX5_CMD_OP_CREATE_SQ
:
198 return MLX5_EVENT_QUEUE_TYPE_SQ
;
199 case MLX5_CMD_OP_CREATE_DCT
:
200 return MLX5_EVENT_QUEUE_TYPE_DCT
;
206 static u16
get_dec_obj_type(struct devx_obj
*obj
, u16 event_num
)
210 opcode
= (obj
->obj_id
>> 32) & 0xffff;
212 if (is_legacy_obj_event_num(event_num
))
213 return get_legacy_obj_type(opcode
);
216 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT
:
217 return (obj
->obj_id
>> 48);
218 case MLX5_CMD_OP_CREATE_RQ
:
219 return MLX5_OBJ_TYPE_RQ
;
220 case MLX5_CMD_OP_CREATE_QP
:
221 return MLX5_OBJ_TYPE_QP
;
222 case MLX5_CMD_OP_CREATE_SQ
:
223 return MLX5_OBJ_TYPE_SQ
;
224 case MLX5_CMD_OP_CREATE_DCT
:
225 return MLX5_OBJ_TYPE_DCT
;
226 case MLX5_CMD_OP_CREATE_TIR
:
227 return MLX5_OBJ_TYPE_TIR
;
228 case MLX5_CMD_OP_CREATE_TIS
:
229 return MLX5_OBJ_TYPE_TIS
;
230 case MLX5_CMD_OP_CREATE_PSV
:
231 return MLX5_OBJ_TYPE_PSV
;
232 case MLX5_OBJ_TYPE_MKEY
:
233 return MLX5_OBJ_TYPE_MKEY
;
234 case MLX5_CMD_OP_CREATE_RMP
:
235 return MLX5_OBJ_TYPE_RMP
;
236 case MLX5_CMD_OP_CREATE_XRC_SRQ
:
237 return MLX5_OBJ_TYPE_XRC_SRQ
;
238 case MLX5_CMD_OP_CREATE_XRQ
:
239 return MLX5_OBJ_TYPE_XRQ
;
240 case MLX5_CMD_OP_CREATE_RQT
:
241 return MLX5_OBJ_TYPE_RQT
;
242 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER
:
243 return MLX5_OBJ_TYPE_FLOW_COUNTER
;
244 case MLX5_CMD_OP_CREATE_CQ
:
245 return MLX5_OBJ_TYPE_CQ
;
251 static u16
get_event_obj_type(unsigned long event_type
, struct mlx5_eqe
*eqe
)
253 switch (event_type
) {
254 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
255 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
256 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
257 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
258 case MLX5_EVENT_TYPE_PATH_MIG
:
259 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
260 case MLX5_EVENT_TYPE_COMM_EST
:
261 case MLX5_EVENT_TYPE_SQ_DRAINED
:
262 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT
:
263 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR
:
264 return eqe
->data
.qp_srq
.type
;
265 case MLX5_EVENT_TYPE_CQ_ERROR
:
266 case MLX5_EVENT_TYPE_XRQ_ERROR
:
268 case MLX5_EVENT_TYPE_DCT_DRAINED
:
269 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION
:
270 return MLX5_EVENT_QUEUE_TYPE_DCT
;
272 return MLX5_GET(affiliated_event_header
, &eqe
->data
, obj_type
);
276 static u32
get_dec_obj_id(u64 obj_id
)
278 return (obj_id
& 0xffffffff);
282 * As the obj_id in the firmware is not globally unique the object type
283 * must be considered upon checking for a valid object id.
284 * For that the opcode of the creator command is encoded as part of the obj_id.
286 static u64
get_enc_obj_id(u32 opcode
, u32 obj_id
)
288 return ((u64
)opcode
<< 32) | obj_id
;
291 static u64
devx_get_obj_id(const void *in
)
293 u16 opcode
= MLX5_GET(general_obj_in_cmd_hdr
, in
, opcode
);
297 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT
:
298 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT
:
299 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_GENERAL_OBJECT
|
300 MLX5_GET(general_obj_in_cmd_hdr
, in
,
302 MLX5_GET(general_obj_in_cmd_hdr
, in
,
305 case MLX5_CMD_OP_QUERY_MKEY
:
306 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_MKEY
,
307 MLX5_GET(query_mkey_in
, in
,
310 case MLX5_CMD_OP_QUERY_CQ
:
311 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ
,
312 MLX5_GET(query_cq_in
, in
, cqn
));
314 case MLX5_CMD_OP_MODIFY_CQ
:
315 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ
,
316 MLX5_GET(modify_cq_in
, in
, cqn
));
318 case MLX5_CMD_OP_QUERY_SQ
:
319 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ
,
320 MLX5_GET(query_sq_in
, in
, sqn
));
322 case MLX5_CMD_OP_MODIFY_SQ
:
323 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ
,
324 MLX5_GET(modify_sq_in
, in
, sqn
));
326 case MLX5_CMD_OP_QUERY_RQ
:
327 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ
,
328 MLX5_GET(query_rq_in
, in
, rqn
));
330 case MLX5_CMD_OP_MODIFY_RQ
:
331 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ
,
332 MLX5_GET(modify_rq_in
, in
, rqn
));
334 case MLX5_CMD_OP_QUERY_RMP
:
335 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP
,
336 MLX5_GET(query_rmp_in
, in
, rmpn
));
338 case MLX5_CMD_OP_MODIFY_RMP
:
339 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_RMP
,
340 MLX5_GET(modify_rmp_in
, in
, rmpn
));
342 case MLX5_CMD_OP_QUERY_RQT
:
343 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT
,
344 MLX5_GET(query_rqt_in
, in
, rqtn
));
346 case MLX5_CMD_OP_MODIFY_RQT
:
347 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT
,
348 MLX5_GET(modify_rqt_in
, in
, rqtn
));
350 case MLX5_CMD_OP_QUERY_TIR
:
351 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR
,
352 MLX5_GET(query_tir_in
, in
, tirn
));
354 case MLX5_CMD_OP_MODIFY_TIR
:
355 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR
,
356 MLX5_GET(modify_tir_in
, in
, tirn
));
358 case MLX5_CMD_OP_QUERY_TIS
:
359 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS
,
360 MLX5_GET(query_tis_in
, in
, tisn
));
362 case MLX5_CMD_OP_MODIFY_TIS
:
363 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS
,
364 MLX5_GET(modify_tis_in
, in
, tisn
));
366 case MLX5_CMD_OP_QUERY_FLOW_TABLE
:
367 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE
,
368 MLX5_GET(query_flow_table_in
, in
,
371 case MLX5_CMD_OP_MODIFY_FLOW_TABLE
:
372 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_TABLE
,
373 MLX5_GET(modify_flow_table_in
, in
,
376 case MLX5_CMD_OP_QUERY_FLOW_GROUP
:
377 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_FLOW_GROUP
,
378 MLX5_GET(query_flow_group_in
, in
,
381 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY
:
382 obj_id
= get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY
,
383 MLX5_GET(query_fte_in
, in
,
386 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY
:
387 obj_id
= get_enc_obj_id(MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY
,
388 MLX5_GET(set_fte_in
, in
, flow_index
));
390 case MLX5_CMD_OP_QUERY_Q_COUNTER
:
391 obj_id
= get_enc_obj_id(MLX5_CMD_OP_ALLOC_Q_COUNTER
,
392 MLX5_GET(query_q_counter_in
, in
,
395 case MLX5_CMD_OP_QUERY_FLOW_COUNTER
:
396 obj_id
= get_enc_obj_id(MLX5_CMD_OP_ALLOC_FLOW_COUNTER
,
397 MLX5_GET(query_flow_counter_in
, in
,
400 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT
:
401 obj_id
= get_enc_obj_id(MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT
,
402 MLX5_GET(general_obj_in_cmd_hdr
, in
,
405 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT
:
406 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT
,
407 MLX5_GET(query_scheduling_element_in
,
408 in
, scheduling_element_id
));
410 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT
:
411 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT
,
412 MLX5_GET(modify_scheduling_element_in
,
413 in
, scheduling_element_id
));
415 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
:
416 obj_id
= get_enc_obj_id(MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
,
417 MLX5_GET(add_vxlan_udp_dport_in
, in
,
420 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY
:
421 obj_id
= get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY
,
422 MLX5_GET(query_l2_table_entry_in
, in
,
425 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY
:
426 obj_id
= get_enc_obj_id(MLX5_CMD_OP_SET_L2_TABLE_ENTRY
,
427 MLX5_GET(set_l2_table_entry_in
, in
,
430 case MLX5_CMD_OP_QUERY_QP
:
431 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_QP
,
432 MLX5_GET(query_qp_in
, in
, qpn
));
434 case MLX5_CMD_OP_RST2INIT_QP
:
435 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_QP
,
436 MLX5_GET(rst2init_qp_in
, in
, qpn
));
438 case MLX5_CMD_OP_INIT2INIT_QP
:
439 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_QP
,
440 MLX5_GET(init2init_qp_in
, in
, qpn
));
442 case MLX5_CMD_OP_INIT2RTR_QP
:
443 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_QP
,
444 MLX5_GET(init2rtr_qp_in
, in
, qpn
));
446 case MLX5_CMD_OP_RTR2RTS_QP
:
447 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_QP
,
448 MLX5_GET(rtr2rts_qp_in
, in
, qpn
));
450 case MLX5_CMD_OP_RTS2RTS_QP
:
451 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_QP
,
452 MLX5_GET(rts2rts_qp_in
, in
, qpn
));
454 case MLX5_CMD_OP_SQERR2RTS_QP
:
455 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_QP
,
456 MLX5_GET(sqerr2rts_qp_in
, in
, qpn
));
458 case MLX5_CMD_OP_2ERR_QP
:
459 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_QP
,
460 MLX5_GET(qp_2err_in
, in
, qpn
));
462 case MLX5_CMD_OP_2RST_QP
:
463 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_QP
,
464 MLX5_GET(qp_2rst_in
, in
, qpn
));
466 case MLX5_CMD_OP_QUERY_DCT
:
467 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT
,
468 MLX5_GET(query_dct_in
, in
, dctn
));
470 case MLX5_CMD_OP_QUERY_XRQ
:
471 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY
:
472 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS
:
473 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ
,
474 MLX5_GET(query_xrq_in
, in
, xrqn
));
476 case MLX5_CMD_OP_QUERY_XRC_SRQ
:
477 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ
,
478 MLX5_GET(query_xrc_srq_in
, in
,
481 case MLX5_CMD_OP_ARM_XRC_SRQ
:
482 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_XRC_SRQ
,
483 MLX5_GET(arm_xrc_srq_in
, in
, xrc_srqn
));
485 case MLX5_CMD_OP_QUERY_SRQ
:
486 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_SRQ
,
487 MLX5_GET(query_srq_in
, in
, srqn
));
489 case MLX5_CMD_OP_ARM_RQ
:
490 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ
,
491 MLX5_GET(arm_rq_in
, in
, srq_number
));
493 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION
:
494 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT
,
495 MLX5_GET(drain_dct_in
, in
, dctn
));
497 case MLX5_CMD_OP_ARM_XRQ
:
498 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY
:
499 case MLX5_CMD_OP_RELEASE_XRQ_ERROR
:
500 case MLX5_CMD_OP_MODIFY_XRQ
:
501 obj_id
= get_enc_obj_id(MLX5_CMD_OP_CREATE_XRQ
,
502 MLX5_GET(arm_xrq_in
, in
, xrqn
));
504 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT
:
505 obj_id
= get_enc_obj_id
506 (MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT
,
507 MLX5_GET(query_packet_reformat_context_in
,
508 in
, packet_reformat_id
));
517 static bool devx_is_valid_obj_id(struct uverbs_attr_bundle
*attrs
,
518 struct ib_uobject
*uobj
, const void *in
)
520 struct mlx5_ib_dev
*dev
= mlx5_udata_to_mdev(&attrs
->driver_udata
);
521 u64 obj_id
= devx_get_obj_id(in
);
526 switch (uobj_get_object_id(uobj
)) {
527 case UVERBS_OBJECT_CQ
:
528 return get_enc_obj_id(MLX5_CMD_OP_CREATE_CQ
,
529 to_mcq(uobj
->object
)->mcq
.cqn
) ==
532 case UVERBS_OBJECT_SRQ
:
534 struct mlx5_core_srq
*srq
= &(to_msrq(uobj
->object
)->msrq
);
537 switch (srq
->common
.res
) {
539 opcode
= MLX5_CMD_OP_CREATE_XRC_SRQ
;
542 opcode
= MLX5_CMD_OP_CREATE_XRQ
;
545 if (!dev
->mdev
->issi
)
546 opcode
= MLX5_CMD_OP_CREATE_SRQ
;
548 opcode
= MLX5_CMD_OP_CREATE_RMP
;
551 return get_enc_obj_id(opcode
,
552 to_msrq(uobj
->object
)->msrq
.srqn
) ==
556 case UVERBS_OBJECT_QP
:
558 struct mlx5_ib_qp
*qp
= to_mqp(uobj
->object
);
559 enum ib_qp_type qp_type
= qp
->ibqp
.qp_type
;
561 if (qp_type
== IB_QPT_RAW_PACKET
||
562 (qp
->flags
& IB_QP_CREATE_SOURCE_QPN
)) {
563 struct mlx5_ib_raw_packet_qp
*raw_packet_qp
=
565 struct mlx5_ib_rq
*rq
= &raw_packet_qp
->rq
;
566 struct mlx5_ib_sq
*sq
= &raw_packet_qp
->sq
;
568 return (get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ
,
569 rq
->base
.mqp
.qpn
) == obj_id
||
570 get_enc_obj_id(MLX5_CMD_OP_CREATE_SQ
,
571 sq
->base
.mqp
.qpn
) == obj_id
||
572 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIR
,
573 rq
->tirn
) == obj_id
||
574 get_enc_obj_id(MLX5_CMD_OP_CREATE_TIS
,
575 sq
->tisn
) == obj_id
);
578 if (qp_type
== MLX5_IB_QPT_DCT
)
579 return get_enc_obj_id(MLX5_CMD_OP_CREATE_DCT
,
580 qp
->dct
.mdct
.mqp
.qpn
) == obj_id
;
582 return get_enc_obj_id(MLX5_CMD_OP_CREATE_QP
,
583 qp
->ibqp
.qp_num
) == obj_id
;
586 case UVERBS_OBJECT_WQ
:
587 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQ
,
588 to_mrwq(uobj
->object
)->core_qp
.qpn
) ==
591 case UVERBS_OBJECT_RWQ_IND_TBL
:
592 return get_enc_obj_id(MLX5_CMD_OP_CREATE_RQT
,
593 to_mrwq_ind_table(uobj
->object
)->rqtn
) ==
596 case MLX5_IB_OBJECT_DEVX_OBJ
:
597 return ((struct devx_obj
*)uobj
->object
)->obj_id
== obj_id
;
604 static void devx_set_umem_valid(const void *in
)
606 u16 opcode
= MLX5_GET(general_obj_in_cmd_hdr
, in
, opcode
);
609 case MLX5_CMD_OP_CREATE_MKEY
:
610 MLX5_SET(create_mkey_in
, in
, mkey_umem_valid
, 1);
612 case MLX5_CMD_OP_CREATE_CQ
:
616 MLX5_SET(create_cq_in
, in
, cq_umem_valid
, 1);
617 cqc
= MLX5_ADDR_OF(create_cq_in
, in
, cq_context
);
618 MLX5_SET(cqc
, cqc
, dbr_umem_valid
, 1);
621 case MLX5_CMD_OP_CREATE_QP
:
625 qpc
= MLX5_ADDR_OF(create_qp_in
, in
, qpc
);
626 MLX5_SET(qpc
, qpc
, dbr_umem_valid
, 1);
627 MLX5_SET(create_qp_in
, in
, wq_umem_valid
, 1);
631 case MLX5_CMD_OP_CREATE_RQ
:
635 rqc
= MLX5_ADDR_OF(create_rq_in
, in
, ctx
);
636 wq
= MLX5_ADDR_OF(rqc
, rqc
, wq
);
637 MLX5_SET(wq
, wq
, dbr_umem_valid
, 1);
638 MLX5_SET(wq
, wq
, wq_umem_valid
, 1);
642 case MLX5_CMD_OP_CREATE_SQ
:
646 sqc
= MLX5_ADDR_OF(create_sq_in
, in
, ctx
);
647 wq
= MLX5_ADDR_OF(sqc
, sqc
, wq
);
648 MLX5_SET(wq
, wq
, dbr_umem_valid
, 1);
649 MLX5_SET(wq
, wq
, wq_umem_valid
, 1);
653 case MLX5_CMD_OP_MODIFY_CQ
:
654 MLX5_SET(modify_cq_in
, in
, cq_umem_valid
, 1);
657 case MLX5_CMD_OP_CREATE_RMP
:
661 rmpc
= MLX5_ADDR_OF(create_rmp_in
, in
, ctx
);
662 wq
= MLX5_ADDR_OF(rmpc
, rmpc
, wq
);
663 MLX5_SET(wq
, wq
, dbr_umem_valid
, 1);
664 MLX5_SET(wq
, wq
, wq_umem_valid
, 1);
668 case MLX5_CMD_OP_CREATE_XRQ
:
672 xrqc
= MLX5_ADDR_OF(create_xrq_in
, in
, xrq_context
);
673 wq
= MLX5_ADDR_OF(xrqc
, xrqc
, wq
);
674 MLX5_SET(wq
, wq
, dbr_umem_valid
, 1);
675 MLX5_SET(wq
, wq
, wq_umem_valid
, 1);
679 case MLX5_CMD_OP_CREATE_XRC_SRQ
:
683 MLX5_SET(create_xrc_srq_in
, in
, xrc_srq_umem_valid
, 1);
684 xrc_srqc
= MLX5_ADDR_OF(create_xrc_srq_in
, in
,
685 xrc_srq_context_entry
);
686 MLX5_SET(xrc_srqc
, xrc_srqc
, dbr_umem_valid
, 1);
695 static bool devx_is_obj_create_cmd(const void *in
, u16
*opcode
)
697 *opcode
= MLX5_GET(general_obj_in_cmd_hdr
, in
, opcode
);
700 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT
:
701 case MLX5_CMD_OP_CREATE_MKEY
:
702 case MLX5_CMD_OP_CREATE_CQ
:
703 case MLX5_CMD_OP_ALLOC_PD
:
704 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN
:
705 case MLX5_CMD_OP_CREATE_RMP
:
706 case MLX5_CMD_OP_CREATE_SQ
:
707 case MLX5_CMD_OP_CREATE_RQ
:
708 case MLX5_CMD_OP_CREATE_RQT
:
709 case MLX5_CMD_OP_CREATE_TIR
:
710 case MLX5_CMD_OP_CREATE_TIS
:
711 case MLX5_CMD_OP_ALLOC_Q_COUNTER
:
712 case MLX5_CMD_OP_CREATE_FLOW_TABLE
:
713 case MLX5_CMD_OP_CREATE_FLOW_GROUP
:
714 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER
:
715 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT
:
716 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT
:
717 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT
:
718 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
:
719 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY
:
720 case MLX5_CMD_OP_CREATE_QP
:
721 case MLX5_CMD_OP_CREATE_SRQ
:
722 case MLX5_CMD_OP_CREATE_XRC_SRQ
:
723 case MLX5_CMD_OP_CREATE_DCT
:
724 case MLX5_CMD_OP_CREATE_XRQ
:
725 case MLX5_CMD_OP_ATTACH_TO_MCG
:
726 case MLX5_CMD_OP_ALLOC_XRCD
:
728 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY
:
730 u16 op_mod
= MLX5_GET(set_fte_in
, in
, op_mod
);
735 case MLX5_CMD_OP_CREATE_PSV
:
737 u8 num_psv
= MLX5_GET(create_psv_in
, in
, num_psv
);
748 static bool devx_is_obj_modify_cmd(const void *in
)
750 u16 opcode
= MLX5_GET(general_obj_in_cmd_hdr
, in
, opcode
);
753 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT
:
754 case MLX5_CMD_OP_MODIFY_CQ
:
755 case MLX5_CMD_OP_MODIFY_RMP
:
756 case MLX5_CMD_OP_MODIFY_SQ
:
757 case MLX5_CMD_OP_MODIFY_RQ
:
758 case MLX5_CMD_OP_MODIFY_RQT
:
759 case MLX5_CMD_OP_MODIFY_TIR
:
760 case MLX5_CMD_OP_MODIFY_TIS
:
761 case MLX5_CMD_OP_MODIFY_FLOW_TABLE
:
762 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT
:
763 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
:
764 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY
:
765 case MLX5_CMD_OP_RST2INIT_QP
:
766 case MLX5_CMD_OP_INIT2RTR_QP
:
767 case MLX5_CMD_OP_INIT2INIT_QP
:
768 case MLX5_CMD_OP_RTR2RTS_QP
:
769 case MLX5_CMD_OP_RTS2RTS_QP
:
770 case MLX5_CMD_OP_SQERR2RTS_QP
:
771 case MLX5_CMD_OP_2ERR_QP
:
772 case MLX5_CMD_OP_2RST_QP
:
773 case MLX5_CMD_OP_ARM_XRC_SRQ
:
774 case MLX5_CMD_OP_ARM_RQ
:
775 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION
:
776 case MLX5_CMD_OP_ARM_XRQ
:
777 case MLX5_CMD_OP_SET_XRQ_DC_PARAMS_ENTRY
:
778 case MLX5_CMD_OP_RELEASE_XRQ_ERROR
:
779 case MLX5_CMD_OP_MODIFY_XRQ
:
781 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY
:
783 u16 op_mod
= MLX5_GET(set_fte_in
, in
, op_mod
);
794 static bool devx_is_obj_query_cmd(const void *in
)
796 u16 opcode
= MLX5_GET(general_obj_in_cmd_hdr
, in
, opcode
);
799 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT
:
800 case MLX5_CMD_OP_QUERY_MKEY
:
801 case MLX5_CMD_OP_QUERY_CQ
:
802 case MLX5_CMD_OP_QUERY_RMP
:
803 case MLX5_CMD_OP_QUERY_SQ
:
804 case MLX5_CMD_OP_QUERY_RQ
:
805 case MLX5_CMD_OP_QUERY_RQT
:
806 case MLX5_CMD_OP_QUERY_TIR
:
807 case MLX5_CMD_OP_QUERY_TIS
:
808 case MLX5_CMD_OP_QUERY_Q_COUNTER
:
809 case MLX5_CMD_OP_QUERY_FLOW_TABLE
:
810 case MLX5_CMD_OP_QUERY_FLOW_GROUP
:
811 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY
:
812 case MLX5_CMD_OP_QUERY_FLOW_COUNTER
:
813 case MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT
:
814 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT
:
815 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY
:
816 case MLX5_CMD_OP_QUERY_QP
:
817 case MLX5_CMD_OP_QUERY_SRQ
:
818 case MLX5_CMD_OP_QUERY_XRC_SRQ
:
819 case MLX5_CMD_OP_QUERY_DCT
:
820 case MLX5_CMD_OP_QUERY_XRQ
:
821 case MLX5_CMD_OP_QUERY_XRQ_DC_PARAMS_ENTRY
:
822 case MLX5_CMD_OP_QUERY_XRQ_ERROR_PARAMS
:
823 case MLX5_CMD_OP_QUERY_PACKET_REFORMAT_CONTEXT
:
830 static bool devx_is_whitelist_cmd(void *in
)
832 u16 opcode
= MLX5_GET(general_obj_in_cmd_hdr
, in
, opcode
);
835 case MLX5_CMD_OP_QUERY_HCA_CAP
:
836 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT
:
837 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT
:
844 static int devx_get_uid(struct mlx5_ib_ucontext
*c
, void *cmd_in
)
846 if (devx_is_whitelist_cmd(cmd_in
)) {
847 struct mlx5_ib_dev
*dev
;
852 dev
= to_mdev(c
->ibucontext
.device
);
853 if (dev
->devx_whitelist_uid
)
854 return dev
->devx_whitelist_uid
;
865 static bool devx_is_general_cmd(void *in
, struct mlx5_ib_dev
*dev
)
867 u16 opcode
= MLX5_GET(general_obj_in_cmd_hdr
, in
, opcode
);
869 /* Pass all cmds for vhca_tunnel as general, tracking is done in FW */
870 if ((MLX5_CAP_GEN_64(dev
->mdev
, vhca_tunnel_commands
) &&
871 MLX5_GET(general_obj_in_cmd_hdr
, in
, vhca_tunnel_id
)) ||
872 (opcode
>= MLX5_CMD_OP_GENERAL_START
&&
873 opcode
< MLX5_CMD_OP_GENERAL_END
))
877 case MLX5_CMD_OP_QUERY_HCA_CAP
:
878 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT
:
879 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT
:
880 case MLX5_CMD_OP_QUERY_VPORT_STATE
:
881 case MLX5_CMD_OP_QUERY_ADAPTER
:
882 case MLX5_CMD_OP_QUERY_ISSI
:
883 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
:
884 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS
:
885 case MLX5_CMD_OP_QUERY_VNIC_ENV
:
886 case MLX5_CMD_OP_QUERY_VPORT_COUNTER
:
887 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG
:
888 case MLX5_CMD_OP_NOP
:
889 case MLX5_CMD_OP_QUERY_CONG_STATUS
:
890 case MLX5_CMD_OP_QUERY_CONG_PARAMS
:
891 case MLX5_CMD_OP_QUERY_CONG_STATISTICS
:
892 case MLX5_CMD_OP_QUERY_LAG
:
899 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_EQN
)(
900 struct uverbs_attr_bundle
*attrs
)
902 struct mlx5_ib_ucontext
*c
;
903 struct mlx5_ib_dev
*dev
;
909 if (uverbs_copy_from(&user_vector
, attrs
,
910 MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC
))
913 c
= devx_ufile2uctx(attrs
);
916 dev
= to_mdev(c
->ibucontext
.device
);
918 err
= mlx5_vector2eqn(dev
->mdev
, user_vector
, &dev_eqn
, &irqn
);
922 if (uverbs_copy_to(attrs
, MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN
,
923 &dev_eqn
, sizeof(dev_eqn
)))
931 * The hardware protection mechanism works like this: Each device object that
932 * is subject to UAR doorbells (QP/SQ/CQ) gets a UAR ID (called uar_page in
933 * the device specification manual) upon its creation. Then upon doorbell,
934 * hardware fetches the object context for which the doorbell was rang, and
935 * validates that the UAR through which the DB was rang matches the UAR ID
937 * If no match the doorbell is silently ignored by the hardware. Of course,
938 * the user cannot ring a doorbell on a UAR that was not mapped to it.
939 * Now in devx, as the devx kernel does not manipulate the QP/SQ/CQ command
940 * mailboxes (except tagging them with UID), we expose to the user its UAR
941 * ID, so it can embed it in these objects in the expected specification
942 * format. So the only thing the user can do is hurt itself by creating a
943 * QP/SQ/CQ with a UAR ID other than his, and then in this case other users
944 * may ring a doorbell on its objects.
945 * The consequence of that will be that another user can schedule a QP/SQ
946 * of the buggy user for execution (just insert it to the hardware schedule
947 * queue or arm its CQ for event generation), no further harm is expected.
949 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_QUERY_UAR
)(
950 struct uverbs_attr_bundle
*attrs
)
952 struct mlx5_ib_ucontext
*c
;
953 struct mlx5_ib_dev
*dev
;
957 c
= devx_ufile2uctx(attrs
);
960 dev
= to_mdev(c
->ibucontext
.device
);
962 if (uverbs_copy_from(&user_idx
, attrs
,
963 MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX
))
966 dev_idx
= bfregn_to_uar_index(dev
, &c
->bfregi
, user_idx
, true);
970 if (uverbs_copy_to(attrs
, MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX
,
971 &dev_idx
, sizeof(dev_idx
)))
977 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OTHER
)(
978 struct uverbs_attr_bundle
*attrs
)
980 struct mlx5_ib_ucontext
*c
;
981 struct mlx5_ib_dev
*dev
;
982 void *cmd_in
= uverbs_attr_get_alloced_ptr(
983 attrs
, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN
);
984 int cmd_out_len
= uverbs_attr_get_len(attrs
,
985 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT
);
990 c
= devx_ufile2uctx(attrs
);
993 dev
= to_mdev(c
->ibucontext
.device
);
995 uid
= devx_get_uid(c
, cmd_in
);
999 /* Only white list of some general HCA commands are allowed for this method. */
1000 if (!devx_is_general_cmd(cmd_in
, dev
))
1003 cmd_out
= uverbs_zalloc(attrs
, cmd_out_len
);
1004 if (IS_ERR(cmd_out
))
1005 return PTR_ERR(cmd_out
);
1007 MLX5_SET(general_obj_in_cmd_hdr
, cmd_in
, uid
, uid
);
1008 err
= mlx5_cmd_exec(dev
->mdev
, cmd_in
,
1009 uverbs_attr_get_len(attrs
, MLX5_IB_ATTR_DEVX_OTHER_CMD_IN
),
1010 cmd_out
, cmd_out_len
);
1014 return uverbs_copy_to(attrs
, MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT
, cmd_out
,
1018 static void devx_obj_build_destroy_cmd(void *in
, void *out
, void *din
,
1022 u16 obj_type
= MLX5_GET(general_obj_in_cmd_hdr
, in
, obj_type
);
1023 u16 uid
= MLX5_GET(general_obj_in_cmd_hdr
, in
, uid
);
1025 *obj_id
= MLX5_GET(general_obj_out_cmd_hdr
, out
, obj_id
);
1026 *dinlen
= MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr
);
1028 MLX5_SET(general_obj_in_cmd_hdr
, din
, obj_id
, *obj_id
);
1029 MLX5_SET(general_obj_in_cmd_hdr
, din
, uid
, uid
);
1031 switch (MLX5_GET(general_obj_in_cmd_hdr
, in
, opcode
)) {
1032 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT
:
1033 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_GENERAL_OBJECT
);
1034 MLX5_SET(general_obj_in_cmd_hdr
, din
, obj_type
, obj_type
);
1037 case MLX5_CMD_OP_CREATE_UMEM
:
1038 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1039 MLX5_CMD_OP_DESTROY_UMEM
);
1041 case MLX5_CMD_OP_CREATE_MKEY
:
1042 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_MKEY
);
1044 case MLX5_CMD_OP_CREATE_CQ
:
1045 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_CQ
);
1047 case MLX5_CMD_OP_ALLOC_PD
:
1048 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DEALLOC_PD
);
1050 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN
:
1051 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1052 MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN
);
1054 case MLX5_CMD_OP_CREATE_RMP
:
1055 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_RMP
);
1057 case MLX5_CMD_OP_CREATE_SQ
:
1058 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_SQ
);
1060 case MLX5_CMD_OP_CREATE_RQ
:
1061 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_RQ
);
1063 case MLX5_CMD_OP_CREATE_RQT
:
1064 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_RQT
);
1066 case MLX5_CMD_OP_CREATE_TIR
:
1067 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_TIR
);
1069 case MLX5_CMD_OP_CREATE_TIS
:
1070 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_TIS
);
1072 case MLX5_CMD_OP_ALLOC_Q_COUNTER
:
1073 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1074 MLX5_CMD_OP_DEALLOC_Q_COUNTER
);
1076 case MLX5_CMD_OP_CREATE_FLOW_TABLE
:
1077 *dinlen
= MLX5_ST_SZ_BYTES(destroy_flow_table_in
);
1078 *obj_id
= MLX5_GET(create_flow_table_out
, out
, table_id
);
1079 MLX5_SET(destroy_flow_table_in
, din
, other_vport
,
1080 MLX5_GET(create_flow_table_in
, in
, other_vport
));
1081 MLX5_SET(destroy_flow_table_in
, din
, vport_number
,
1082 MLX5_GET(create_flow_table_in
, in
, vport_number
));
1083 MLX5_SET(destroy_flow_table_in
, din
, table_type
,
1084 MLX5_GET(create_flow_table_in
, in
, table_type
));
1085 MLX5_SET(destroy_flow_table_in
, din
, table_id
, *obj_id
);
1086 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1087 MLX5_CMD_OP_DESTROY_FLOW_TABLE
);
1089 case MLX5_CMD_OP_CREATE_FLOW_GROUP
:
1090 *dinlen
= MLX5_ST_SZ_BYTES(destroy_flow_group_in
);
1091 *obj_id
= MLX5_GET(create_flow_group_out
, out
, group_id
);
1092 MLX5_SET(destroy_flow_group_in
, din
, other_vport
,
1093 MLX5_GET(create_flow_group_in
, in
, other_vport
));
1094 MLX5_SET(destroy_flow_group_in
, din
, vport_number
,
1095 MLX5_GET(create_flow_group_in
, in
, vport_number
));
1096 MLX5_SET(destroy_flow_group_in
, din
, table_type
,
1097 MLX5_GET(create_flow_group_in
, in
, table_type
));
1098 MLX5_SET(destroy_flow_group_in
, din
, table_id
,
1099 MLX5_GET(create_flow_group_in
, in
, table_id
));
1100 MLX5_SET(destroy_flow_group_in
, din
, group_id
, *obj_id
);
1101 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1102 MLX5_CMD_OP_DESTROY_FLOW_GROUP
);
1104 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY
:
1105 *dinlen
= MLX5_ST_SZ_BYTES(delete_fte_in
);
1106 *obj_id
= MLX5_GET(set_fte_in
, in
, flow_index
);
1107 MLX5_SET(delete_fte_in
, din
, other_vport
,
1108 MLX5_GET(set_fte_in
, in
, other_vport
));
1109 MLX5_SET(delete_fte_in
, din
, vport_number
,
1110 MLX5_GET(set_fte_in
, in
, vport_number
));
1111 MLX5_SET(delete_fte_in
, din
, table_type
,
1112 MLX5_GET(set_fte_in
, in
, table_type
));
1113 MLX5_SET(delete_fte_in
, din
, table_id
,
1114 MLX5_GET(set_fte_in
, in
, table_id
));
1115 MLX5_SET(delete_fte_in
, din
, flow_index
, *obj_id
);
1116 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1117 MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY
);
1119 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER
:
1120 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1121 MLX5_CMD_OP_DEALLOC_FLOW_COUNTER
);
1123 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT
:
1124 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1125 MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT
);
1127 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT
:
1128 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1129 MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT
);
1131 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT
:
1132 *dinlen
= MLX5_ST_SZ_BYTES(destroy_scheduling_element_in
);
1133 *obj_id
= MLX5_GET(create_scheduling_element_out
, out
,
1134 scheduling_element_id
);
1135 MLX5_SET(destroy_scheduling_element_in
, din
,
1136 scheduling_hierarchy
,
1137 MLX5_GET(create_scheduling_element_in
, in
,
1138 scheduling_hierarchy
));
1139 MLX5_SET(destroy_scheduling_element_in
, din
,
1140 scheduling_element_id
, *obj_id
);
1141 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1142 MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT
);
1144 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
:
1145 *dinlen
= MLX5_ST_SZ_BYTES(delete_vxlan_udp_dport_in
);
1146 *obj_id
= MLX5_GET(add_vxlan_udp_dport_in
, in
, vxlan_udp_port
);
1147 MLX5_SET(delete_vxlan_udp_dport_in
, din
, vxlan_udp_port
, *obj_id
);
1148 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1149 MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT
);
1151 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY
:
1152 *dinlen
= MLX5_ST_SZ_BYTES(delete_l2_table_entry_in
);
1153 *obj_id
= MLX5_GET(set_l2_table_entry_in
, in
, table_index
);
1154 MLX5_SET(delete_l2_table_entry_in
, din
, table_index
, *obj_id
);
1155 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1156 MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY
);
1158 case MLX5_CMD_OP_CREATE_QP
:
1159 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_QP
);
1161 case MLX5_CMD_OP_CREATE_SRQ
:
1162 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_SRQ
);
1164 case MLX5_CMD_OP_CREATE_XRC_SRQ
:
1165 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1166 MLX5_CMD_OP_DESTROY_XRC_SRQ
);
1168 case MLX5_CMD_OP_CREATE_DCT
:
1169 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_DCT
);
1171 case MLX5_CMD_OP_CREATE_XRQ
:
1172 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DESTROY_XRQ
);
1174 case MLX5_CMD_OP_ATTACH_TO_MCG
:
1175 *dinlen
= MLX5_ST_SZ_BYTES(detach_from_mcg_in
);
1176 MLX5_SET(detach_from_mcg_in
, din
, qpn
,
1177 MLX5_GET(attach_to_mcg_in
, in
, qpn
));
1178 memcpy(MLX5_ADDR_OF(detach_from_mcg_in
, din
, multicast_gid
),
1179 MLX5_ADDR_OF(attach_to_mcg_in
, in
, multicast_gid
),
1180 MLX5_FLD_SZ_BYTES(attach_to_mcg_in
, multicast_gid
));
1181 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DETACH_FROM_MCG
);
1183 case MLX5_CMD_OP_ALLOC_XRCD
:
1184 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
, MLX5_CMD_OP_DEALLOC_XRCD
);
1186 case MLX5_CMD_OP_CREATE_PSV
:
1187 MLX5_SET(general_obj_in_cmd_hdr
, din
, opcode
,
1188 MLX5_CMD_OP_DESTROY_PSV
);
1189 MLX5_SET(destroy_psv_in
, din
, psvn
,
1190 MLX5_GET(create_psv_out
, out
, psv0_index
));
1193 /* The entry must match to one of the devx_is_obj_create_cmd */
1199 static int devx_handle_mkey_indirect(struct devx_obj
*obj
,
1200 struct mlx5_ib_dev
*dev
,
1201 void *in
, void *out
)
1203 struct mlx5_ib_devx_mr
*devx_mr
= &obj
->devx_mr
;
1204 struct mlx5_core_mkey
*mkey
;
1208 mkey
= &devx_mr
->mmkey
;
1209 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1210 key
= MLX5_GET(mkc
, mkc
, mkey_7_0
);
1211 mkey
->key
= mlx5_idx_to_mkey(
1212 MLX5_GET(create_mkey_out
, out
, mkey_index
)) | key
;
1213 mkey
->type
= MLX5_MKEY_INDIRECT_DEVX
;
1214 mkey
->iova
= MLX5_GET64(mkc
, mkc
, start_addr
);
1215 mkey
->size
= MLX5_GET64(mkc
, mkc
, len
);
1216 mkey
->pd
= MLX5_GET(mkc
, mkc
, pd
);
1217 devx_mr
->ndescs
= MLX5_GET(mkc
, mkc
, translations_octword_size
);
1219 return xa_err(xa_store(&dev
->odp_mkeys
, mlx5_base_mkey(mkey
->key
), mkey
,
1223 static int devx_handle_mkey_create(struct mlx5_ib_dev
*dev
,
1224 struct devx_obj
*obj
,
1225 void *in
, int in_len
)
1227 int min_len
= MLX5_BYTE_OFF(create_mkey_in
, memory_key_mkey_entry
) +
1228 MLX5_FLD_SZ_BYTES(create_mkey_in
,
1229 memory_key_mkey_entry
);
1233 if (in_len
< min_len
)
1236 mkc
= MLX5_ADDR_OF(create_mkey_in
, in
, memory_key_mkey_entry
);
1238 access_mode
= MLX5_GET(mkc
, mkc
, access_mode_1_0
);
1239 access_mode
|= MLX5_GET(mkc
, mkc
, access_mode_4_2
) << 2;
1241 if (access_mode
== MLX5_MKC_ACCESS_MODE_KLMS
||
1242 access_mode
== MLX5_MKC_ACCESS_MODE_KSM
) {
1243 if (IS_ENABLED(CONFIG_INFINIBAND_ON_DEMAND_PAGING
))
1244 obj
->flags
|= DEVX_OBJ_FLAGS_INDIRECT_MKEY
;
1248 MLX5_SET(create_mkey_in
, in
, mkey_umem_valid
, 1);
1252 static void devx_cleanup_subscription(struct mlx5_ib_dev
*dev
,
1253 struct devx_event_subscription
*sub
)
1255 struct devx_event
*event
;
1256 struct devx_obj_event
*xa_val_level2
;
1258 if (sub
->is_cleaned
)
1261 sub
->is_cleaned
= 1;
1262 list_del_rcu(&sub
->xa_list
);
1264 if (list_empty(&sub
->obj_list
))
1267 list_del_rcu(&sub
->obj_list
);
1268 /* check whether key level 1 for this obj_sub_list is empty */
1269 event
= xa_load(&dev
->devx_event_table
.event_xa
,
1270 sub
->xa_key_level1
);
1273 xa_val_level2
= xa_load(&event
->object_ids
, sub
->xa_key_level2
);
1274 if (list_empty(&xa_val_level2
->obj_sub_list
)) {
1275 xa_erase(&event
->object_ids
,
1276 sub
->xa_key_level2
);
1277 kfree_rcu(xa_val_level2
, rcu
);
1281 static int devx_obj_cleanup(struct ib_uobject
*uobject
,
1282 enum rdma_remove_reason why
,
1283 struct uverbs_attr_bundle
*attrs
)
1285 u32 out
[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr
)];
1286 struct mlx5_devx_event_table
*devx_event_table
;
1287 struct devx_obj
*obj
= uobject
->object
;
1288 struct devx_event_subscription
*sub_entry
, *tmp
;
1289 struct mlx5_ib_dev
*dev
;
1292 dev
= mlx5_udata_to_mdev(&attrs
->driver_udata
);
1293 if (obj
->flags
& DEVX_OBJ_FLAGS_INDIRECT_MKEY
) {
1295 * The pagefault_single_data_segment() does commands against
1296 * the mmkey, we must wait for that to stop before freeing the
1297 * mkey, as another allocation could get the same mkey #.
1299 xa_erase(&obj
->ib_dev
->odp_mkeys
,
1300 mlx5_base_mkey(obj
->devx_mr
.mmkey
.key
));
1301 synchronize_srcu(&dev
->odp_srcu
);
1304 if (obj
->flags
& DEVX_OBJ_FLAGS_DCT
)
1305 ret
= mlx5_core_destroy_dct(obj
->ib_dev
, &obj
->core_dct
);
1306 else if (obj
->flags
& DEVX_OBJ_FLAGS_CQ
)
1307 ret
= mlx5_core_destroy_cq(obj
->ib_dev
->mdev
, &obj
->core_cq
);
1309 ret
= mlx5_cmd_exec(obj
->ib_dev
->mdev
, obj
->dinbox
,
1310 obj
->dinlen
, out
, sizeof(out
));
1314 devx_event_table
= &dev
->devx_event_table
;
1316 mutex_lock(&devx_event_table
->event_xa_lock
);
1317 list_for_each_entry_safe(sub_entry
, tmp
, &obj
->event_sub
, obj_list
)
1318 devx_cleanup_subscription(dev
, sub_entry
);
1319 mutex_unlock(&devx_event_table
->event_xa_lock
);
1325 static void devx_cq_comp(struct mlx5_core_cq
*mcq
, struct mlx5_eqe
*eqe
)
1327 struct devx_obj
*obj
= container_of(mcq
, struct devx_obj
, core_cq
);
1328 struct mlx5_devx_event_table
*table
;
1329 struct devx_event
*event
;
1330 struct devx_obj_event
*obj_event
;
1331 u32 obj_id
= mcq
->cqn
;
1333 table
= &obj
->ib_dev
->devx_event_table
;
1335 event
= xa_load(&table
->event_xa
, MLX5_EVENT_TYPE_COMP
);
1339 obj_event
= xa_load(&event
->object_ids
, obj_id
);
1343 dispatch_event_fd(&obj_event
->obj_sub_list
, eqe
);
1348 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_CREATE
)(
1349 struct uverbs_attr_bundle
*attrs
)
1351 void *cmd_in
= uverbs_attr_get_alloced_ptr(attrs
, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN
);
1352 int cmd_out_len
= uverbs_attr_get_len(attrs
,
1353 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT
);
1354 int cmd_in_len
= uverbs_attr_get_len(attrs
,
1355 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN
);
1357 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(
1358 attrs
, MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE
);
1359 struct mlx5_ib_ucontext
*c
= rdma_udata_to_drv_context(
1360 &attrs
->driver_udata
, struct mlx5_ib_ucontext
, ibucontext
);
1361 struct mlx5_ib_dev
*dev
= to_mdev(c
->ibucontext
.device
);
1362 u32 out
[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr
)];
1363 struct devx_obj
*obj
;
1370 if (MLX5_GET(general_obj_in_cmd_hdr
, cmd_in
, vhca_tunnel_id
))
1373 uid
= devx_get_uid(c
, cmd_in
);
1377 if (!devx_is_obj_create_cmd(cmd_in
, &opcode
))
1380 cmd_out
= uverbs_zalloc(attrs
, cmd_out_len
);
1381 if (IS_ERR(cmd_out
))
1382 return PTR_ERR(cmd_out
);
1384 obj
= kzalloc(sizeof(struct devx_obj
), GFP_KERNEL
);
1388 MLX5_SET(general_obj_in_cmd_hdr
, cmd_in
, uid
, uid
);
1389 if (opcode
== MLX5_CMD_OP_CREATE_MKEY
) {
1390 err
= devx_handle_mkey_create(dev
, obj
, cmd_in
, cmd_in_len
);
1394 devx_set_umem_valid(cmd_in
);
1397 if (opcode
== MLX5_CMD_OP_CREATE_DCT
) {
1398 obj
->flags
|= DEVX_OBJ_FLAGS_DCT
;
1399 err
= mlx5_core_create_dct(dev
, &obj
->core_dct
, cmd_in
,
1400 cmd_in_len
, cmd_out
, cmd_out_len
);
1401 } else if (opcode
== MLX5_CMD_OP_CREATE_CQ
) {
1402 obj
->flags
|= DEVX_OBJ_FLAGS_CQ
;
1403 obj
->core_cq
.comp
= devx_cq_comp
;
1404 err
= mlx5_core_create_cq(dev
->mdev
, &obj
->core_cq
,
1405 cmd_in
, cmd_in_len
, cmd_out
,
1408 err
= mlx5_cmd_exec(dev
->mdev
, cmd_in
,
1410 cmd_out
, cmd_out_len
);
1416 if (opcode
== MLX5_CMD_OP_ALLOC_FLOW_COUNTER
) {
1417 u8 bulk
= MLX5_GET(alloc_flow_counter_in
,
1420 obj
->flow_counter_bulk_size
= 128UL * bulk
;
1424 INIT_LIST_HEAD(&obj
->event_sub
);
1426 devx_obj_build_destroy_cmd(cmd_in
, cmd_out
, obj
->dinbox
, &obj
->dinlen
,
1428 WARN_ON(obj
->dinlen
> MLX5_MAX_DESTROY_INBOX_SIZE_DW
* sizeof(u32
));
1430 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT
, cmd_out
, cmd_out_len
);
1434 if (opcode
== MLX5_CMD_OP_CREATE_GENERAL_OBJECT
)
1435 obj_type
= MLX5_GET(general_obj_in_cmd_hdr
, cmd_in
, obj_type
);
1436 obj
->obj_id
= get_enc_obj_id(opcode
| obj_type
<< 16, obj_id
);
1438 if (obj
->flags
& DEVX_OBJ_FLAGS_INDIRECT_MKEY
) {
1439 err
= devx_handle_mkey_indirect(obj
, dev
, cmd_in
, cmd_out
);
1446 if (obj
->flags
& DEVX_OBJ_FLAGS_DCT
)
1447 mlx5_core_destroy_dct(obj
->ib_dev
, &obj
->core_dct
);
1448 else if (obj
->flags
& DEVX_OBJ_FLAGS_CQ
)
1449 mlx5_core_destroy_cq(obj
->ib_dev
->mdev
, &obj
->core_cq
);
1451 mlx5_cmd_exec(obj
->ib_dev
->mdev
, obj
->dinbox
, obj
->dinlen
, out
,
1458 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_MODIFY
)(
1459 struct uverbs_attr_bundle
*attrs
)
1461 void *cmd_in
= uverbs_attr_get_alloced_ptr(attrs
, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN
);
1462 int cmd_out_len
= uverbs_attr_get_len(attrs
,
1463 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT
);
1464 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(attrs
,
1465 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE
);
1466 struct mlx5_ib_ucontext
*c
= rdma_udata_to_drv_context(
1467 &attrs
->driver_udata
, struct mlx5_ib_ucontext
, ibucontext
);
1468 struct mlx5_ib_dev
*mdev
= to_mdev(c
->ibucontext
.device
);
1473 if (MLX5_GET(general_obj_in_cmd_hdr
, cmd_in
, vhca_tunnel_id
))
1476 uid
= devx_get_uid(c
, cmd_in
);
1480 if (!devx_is_obj_modify_cmd(cmd_in
))
1483 if (!devx_is_valid_obj_id(attrs
, uobj
, cmd_in
))
1486 cmd_out
= uverbs_zalloc(attrs
, cmd_out_len
);
1487 if (IS_ERR(cmd_out
))
1488 return PTR_ERR(cmd_out
);
1490 MLX5_SET(general_obj_in_cmd_hdr
, cmd_in
, uid
, uid
);
1491 devx_set_umem_valid(cmd_in
);
1493 err
= mlx5_cmd_exec(mdev
->mdev
, cmd_in
,
1494 uverbs_attr_get_len(attrs
, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN
),
1495 cmd_out
, cmd_out_len
);
1499 return uverbs_copy_to(attrs
, MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT
,
1500 cmd_out
, cmd_out_len
);
1503 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_QUERY
)(
1504 struct uverbs_attr_bundle
*attrs
)
1506 void *cmd_in
= uverbs_attr_get_alloced_ptr(attrs
, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN
);
1507 int cmd_out_len
= uverbs_attr_get_len(attrs
,
1508 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT
);
1509 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(attrs
,
1510 MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE
);
1511 struct mlx5_ib_ucontext
*c
= rdma_udata_to_drv_context(
1512 &attrs
->driver_udata
, struct mlx5_ib_ucontext
, ibucontext
);
1516 struct mlx5_ib_dev
*mdev
= to_mdev(c
->ibucontext
.device
);
1518 if (MLX5_GET(general_obj_in_cmd_hdr
, cmd_in
, vhca_tunnel_id
))
1521 uid
= devx_get_uid(c
, cmd_in
);
1525 if (!devx_is_obj_query_cmd(cmd_in
))
1528 if (!devx_is_valid_obj_id(attrs
, uobj
, cmd_in
))
1531 cmd_out
= uverbs_zalloc(attrs
, cmd_out_len
);
1532 if (IS_ERR(cmd_out
))
1533 return PTR_ERR(cmd_out
);
1535 MLX5_SET(general_obj_in_cmd_hdr
, cmd_in
, uid
, uid
);
1536 err
= mlx5_cmd_exec(mdev
->mdev
, cmd_in
,
1537 uverbs_attr_get_len(attrs
, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN
),
1538 cmd_out
, cmd_out_len
);
1542 return uverbs_copy_to(attrs
, MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT
,
1543 cmd_out
, cmd_out_len
);
1546 struct devx_async_event_queue
{
1548 wait_queue_head_t poll_wait
;
1549 struct list_head event_list
;
1550 atomic_t bytes_in_use
;
1554 struct devx_async_cmd_event_file
{
1555 struct ib_uobject uobj
;
1556 struct devx_async_event_queue ev_queue
;
1557 struct mlx5_async_ctx async_ctx
;
1560 static void devx_init_event_queue(struct devx_async_event_queue
*ev_queue
)
1562 spin_lock_init(&ev_queue
->lock
);
1563 INIT_LIST_HEAD(&ev_queue
->event_list
);
1564 init_waitqueue_head(&ev_queue
->poll_wait
);
1565 atomic_set(&ev_queue
->bytes_in_use
, 0);
1566 ev_queue
->is_destroyed
= 0;
1569 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC
)(
1570 struct uverbs_attr_bundle
*attrs
)
1572 struct devx_async_cmd_event_file
*ev_file
;
1574 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(
1575 attrs
, MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE
);
1576 struct mlx5_ib_dev
*mdev
= mlx5_udata_to_mdev(&attrs
->driver_udata
);
1578 ev_file
= container_of(uobj
, struct devx_async_cmd_event_file
,
1580 devx_init_event_queue(&ev_file
->ev_queue
);
1581 mlx5_cmd_init_async_ctx(mdev
->mdev
, &ev_file
->async_ctx
);
1585 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC
)(
1586 struct uverbs_attr_bundle
*attrs
)
1588 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(
1589 attrs
, MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE
);
1590 struct devx_async_event_file
*ev_file
;
1591 struct mlx5_ib_ucontext
*c
= rdma_udata_to_drv_context(
1592 &attrs
->driver_udata
, struct mlx5_ib_ucontext
, ibucontext
);
1593 struct mlx5_ib_dev
*dev
= to_mdev(c
->ibucontext
.device
);
1597 err
= uverbs_get_flags32(&flags
, attrs
,
1598 MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS
,
1599 MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA
);
1604 ev_file
= container_of(uobj
, struct devx_async_event_file
,
1606 spin_lock_init(&ev_file
->lock
);
1607 INIT_LIST_HEAD(&ev_file
->event_list
);
1608 init_waitqueue_head(&ev_file
->poll_wait
);
1609 if (flags
& MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA
)
1610 ev_file
->omit_data
= 1;
1611 INIT_LIST_HEAD(&ev_file
->subscribed_events_list
);
1613 get_device(&dev
->ib_dev
.dev
);
1617 static void devx_query_callback(int status
, struct mlx5_async_work
*context
)
1619 struct devx_async_data
*async_data
=
1620 container_of(context
, struct devx_async_data
, cb_work
);
1621 struct devx_async_cmd_event_file
*ev_file
= async_data
->ev_file
;
1622 struct devx_async_event_queue
*ev_queue
= &ev_file
->ev_queue
;
1623 unsigned long flags
;
1626 * Note that if the struct devx_async_cmd_event_file uobj begins to be
1627 * destroyed it will block at mlx5_cmd_cleanup_async_ctx() until this
1628 * routine returns, ensuring that it always remains valid here.
1630 spin_lock_irqsave(&ev_queue
->lock
, flags
);
1631 list_add_tail(&async_data
->list
, &ev_queue
->event_list
);
1632 spin_unlock_irqrestore(&ev_queue
->lock
, flags
);
1634 wake_up_interruptible(&ev_queue
->poll_wait
);
1637 #define MAX_ASYNC_BYTES_IN_USE (1024 * 1024) /* 1MB */
1639 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY
)(
1640 struct uverbs_attr_bundle
*attrs
)
1642 void *cmd_in
= uverbs_attr_get_alloced_ptr(attrs
,
1643 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN
);
1644 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(
1646 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_HANDLE
);
1648 struct mlx5_ib_ucontext
*c
= rdma_udata_to_drv_context(
1649 &attrs
->driver_udata
, struct mlx5_ib_ucontext
, ibucontext
);
1650 struct ib_uobject
*fd_uobj
;
1653 struct mlx5_ib_dev
*mdev
= to_mdev(c
->ibucontext
.device
);
1654 struct devx_async_cmd_event_file
*ev_file
;
1655 struct devx_async_data
*async_data
;
1657 if (MLX5_GET(general_obj_in_cmd_hdr
, cmd_in
, vhca_tunnel_id
))
1660 uid
= devx_get_uid(c
, cmd_in
);
1664 if (!devx_is_obj_query_cmd(cmd_in
))
1667 err
= uverbs_get_const(&cmd_out_len
, attrs
,
1668 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN
);
1672 if (!devx_is_valid_obj_id(attrs
, uobj
, cmd_in
))
1675 fd_uobj
= uverbs_attr_get_uobject(attrs
,
1676 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD
);
1677 if (IS_ERR(fd_uobj
))
1678 return PTR_ERR(fd_uobj
);
1680 ev_file
= container_of(fd_uobj
, struct devx_async_cmd_event_file
,
1683 if (atomic_add_return(cmd_out_len
, &ev_file
->ev_queue
.bytes_in_use
) >
1684 MAX_ASYNC_BYTES_IN_USE
) {
1685 atomic_sub(cmd_out_len
, &ev_file
->ev_queue
.bytes_in_use
);
1689 async_data
= kvzalloc(struct_size(async_data
, hdr
.out_data
,
1690 cmd_out_len
), GFP_KERNEL
);
1696 err
= uverbs_copy_from(&async_data
->hdr
.wr_id
, attrs
,
1697 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID
);
1701 async_data
->cmd_out_len
= cmd_out_len
;
1702 async_data
->mdev
= mdev
;
1703 async_data
->ev_file
= ev_file
;
1705 MLX5_SET(general_obj_in_cmd_hdr
, cmd_in
, uid
, uid
);
1706 err
= mlx5_cmd_exec_cb(&ev_file
->async_ctx
, cmd_in
,
1707 uverbs_attr_get_len(attrs
,
1708 MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_CMD_IN
),
1709 async_data
->hdr
.out_data
,
1710 async_data
->cmd_out_len
,
1711 devx_query_callback
, &async_data
->cb_work
);
1721 atomic_sub(cmd_out_len
, &ev_file
->ev_queue
.bytes_in_use
);
1726 subscribe_event_xa_dealloc(struct mlx5_devx_event_table
*devx_event_table
,
1731 struct devx_event
*event
;
1732 struct devx_obj_event
*xa_val_level2
;
1734 /* Level 1 is valid for future use, no need to free */
1738 event
= xa_load(&devx_event_table
->event_xa
, key_level1
);
1741 xa_val_level2
= xa_load(&event
->object_ids
,
1743 if (list_empty(&xa_val_level2
->obj_sub_list
)) {
1744 xa_erase(&event
->object_ids
,
1746 kfree_rcu(xa_val_level2
, rcu
);
1751 subscribe_event_xa_alloc(struct mlx5_devx_event_table
*devx_event_table
,
1756 struct devx_obj_event
*obj_event
;
1757 struct devx_event
*event
;
1760 event
= xa_load(&devx_event_table
->event_xa
, key_level1
);
1762 event
= kzalloc(sizeof(*event
), GFP_KERNEL
);
1766 INIT_LIST_HEAD(&event
->unaffiliated_list
);
1767 xa_init(&event
->object_ids
);
1769 err
= xa_insert(&devx_event_table
->event_xa
,
1782 obj_event
= xa_load(&event
->object_ids
, key_level2
);
1784 obj_event
= kzalloc(sizeof(*obj_event
), GFP_KERNEL
);
1786 /* Level1 is valid for future use, no need to free */
1789 err
= xa_insert(&event
->object_ids
,
1795 INIT_LIST_HEAD(&obj_event
->obj_sub_list
);
1801 static bool is_valid_events_legacy(int num_events
, u16
*event_type_num_list
,
1802 struct devx_obj
*obj
)
1806 for (i
= 0; i
< num_events
; i
++) {
1808 if (!is_legacy_obj_event_num(event_type_num_list
[i
]))
1810 } else if (!is_legacy_unaffiliated_event_num(
1811 event_type_num_list
[i
])) {
1819 #define MAX_SUPP_EVENT_NUM 255
1820 static bool is_valid_events(struct mlx5_core_dev
*dev
,
1821 int num_events
, u16
*event_type_num_list
,
1822 struct devx_obj
*obj
)
1825 __be64
*unaff_events
;
1830 if (MLX5_CAP_GEN(dev
, event_cap
)) {
1831 aff_events
= MLX5_CAP_DEV_EVENT(dev
,
1832 user_affiliated_events
);
1833 unaff_events
= MLX5_CAP_DEV_EVENT(dev
,
1834 user_unaffiliated_events
);
1836 return is_valid_events_legacy(num_events
, event_type_num_list
,
1840 for (i
= 0; i
< num_events
; i
++) {
1841 if (event_type_num_list
[i
] > MAX_SUPP_EVENT_NUM
)
1844 mask_entry
= event_type_num_list
[i
] / 64;
1845 mask_bit
= event_type_num_list
[i
] % 64;
1849 if (event_type_num_list
[i
] == 0)
1852 if (!(be64_to_cpu(aff_events
[mask_entry
]) &
1853 (1ull << mask_bit
)))
1859 if (!(be64_to_cpu(unaff_events
[mask_entry
]) &
1860 (1ull << mask_bit
)))
1867 #define MAX_NUM_EVENTS 16
1868 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT
)(
1869 struct uverbs_attr_bundle
*attrs
)
1871 struct ib_uobject
*devx_uobj
= uverbs_attr_get_uobject(
1873 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE
);
1874 struct mlx5_ib_ucontext
*c
= rdma_udata_to_drv_context(
1875 &attrs
->driver_udata
, struct mlx5_ib_ucontext
, ibucontext
);
1876 struct mlx5_ib_dev
*dev
= to_mdev(c
->ibucontext
.device
);
1877 struct ib_uobject
*fd_uobj
;
1878 struct devx_obj
*obj
= NULL
;
1879 struct devx_async_event_file
*ev_file
;
1880 struct mlx5_devx_event_table
*devx_event_table
= &dev
->devx_event_table
;
1881 u16
*event_type_num_list
;
1882 struct devx_event_subscription
*event_sub
, *tmp_sub
;
1883 struct list_head sub_list
;
1885 bool use_eventfd
= false;
1887 int num_alloc_xa_entries
= 0;
1897 if (!IS_ERR(devx_uobj
)) {
1898 obj
= (struct devx_obj
*)devx_uobj
->object
;
1900 obj_id
= get_dec_obj_id(obj
->obj_id
);
1903 fd_uobj
= uverbs_attr_get_uobject(attrs
,
1904 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE
);
1905 if (IS_ERR(fd_uobj
))
1906 return PTR_ERR(fd_uobj
);
1908 ev_file
= container_of(fd_uobj
, struct devx_async_event_file
,
1911 if (uverbs_attr_is_valid(attrs
,
1912 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM
)) {
1913 err
= uverbs_copy_from(&redirect_fd
, attrs
,
1914 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM
);
1921 if (uverbs_attr_is_valid(attrs
,
1922 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE
)) {
1926 err
= uverbs_copy_from(&cookie
, attrs
,
1927 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE
);
1932 num_events
= uverbs_attr_ptr_get_array_size(
1933 attrs
, MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST
,
1939 if (num_events
> MAX_NUM_EVENTS
)
1942 event_type_num_list
= uverbs_attr_get_alloced_ptr(attrs
,
1943 MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST
);
1945 if (!is_valid_events(dev
->mdev
, num_events
, event_type_num_list
, obj
))
1948 INIT_LIST_HEAD(&sub_list
);
1950 /* Protect from concurrent subscriptions to same XA entries to allow
1953 mutex_lock(&devx_event_table
->event_xa_lock
);
1954 for (i
= 0; i
< num_events
; i
++) {
1958 obj_type
= get_dec_obj_type(obj
,
1959 event_type_num_list
[i
]);
1960 key_level1
= event_type_num_list
[i
] | obj_type
<< 16;
1962 err
= subscribe_event_xa_alloc(devx_event_table
,
1969 num_alloc_xa_entries
++;
1970 event_sub
= kzalloc(sizeof(*event_sub
), GFP_KERNEL
);
1974 list_add_tail(&event_sub
->event_list
, &sub_list
);
1975 uverbs_uobject_get(&ev_file
->uobj
);
1977 event_sub
->eventfd
=
1978 eventfd_ctx_fdget(redirect_fd
);
1980 if (IS_ERR(event_sub
->eventfd
)) {
1981 err
= PTR_ERR(event_sub
->eventfd
);
1982 event_sub
->eventfd
= NULL
;
1987 event_sub
->cookie
= cookie
;
1988 event_sub
->ev_file
= ev_file
;
1989 /* May be needed upon cleanup the devx object/subscription */
1990 event_sub
->xa_key_level1
= key_level1
;
1991 event_sub
->xa_key_level2
= obj_id
;
1992 INIT_LIST_HEAD(&event_sub
->obj_list
);
1995 /* Once all the allocations and the XA data insertions were done we
1996 * can go ahead and add all the subscriptions to the relevant lists
1997 * without concern of a failure.
1999 list_for_each_entry_safe(event_sub
, tmp_sub
, &sub_list
, event_list
) {
2000 struct devx_event
*event
;
2001 struct devx_obj_event
*obj_event
;
2003 list_del_init(&event_sub
->event_list
);
2005 spin_lock_irq(&ev_file
->lock
);
2006 list_add_tail_rcu(&event_sub
->file_list
,
2007 &ev_file
->subscribed_events_list
);
2008 spin_unlock_irq(&ev_file
->lock
);
2010 event
= xa_load(&devx_event_table
->event_xa
,
2011 event_sub
->xa_key_level1
);
2015 list_add_tail_rcu(&event_sub
->xa_list
,
2016 &event
->unaffiliated_list
);
2020 obj_event
= xa_load(&event
->object_ids
, obj_id
);
2021 WARN_ON(!obj_event
);
2022 list_add_tail_rcu(&event_sub
->xa_list
,
2023 &obj_event
->obj_sub_list
);
2024 list_add_tail_rcu(&event_sub
->obj_list
,
2028 mutex_unlock(&devx_event_table
->event_xa_lock
);
2032 list_for_each_entry_safe(event_sub
, tmp_sub
, &sub_list
, event_list
) {
2033 list_del(&event_sub
->event_list
);
2035 subscribe_event_xa_dealloc(devx_event_table
,
2036 event_sub
->xa_key_level1
,
2040 if (event_sub
->eventfd
)
2041 eventfd_ctx_put(event_sub
->eventfd
);
2042 uverbs_uobject_put(&event_sub
->ev_file
->uobj
);
2046 mutex_unlock(&devx_event_table
->event_xa_lock
);
2050 static int devx_umem_get(struct mlx5_ib_dev
*dev
, struct ib_ucontext
*ucontext
,
2051 struct uverbs_attr_bundle
*attrs
,
2052 struct devx_umem
*obj
)
2059 if (uverbs_copy_from(&addr
, attrs
, MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR
) ||
2060 uverbs_copy_from(&size
, attrs
, MLX5_IB_ATTR_DEVX_UMEM_REG_LEN
))
2063 err
= uverbs_get_flags32(&access
, attrs
,
2064 MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS
,
2065 IB_ACCESS_LOCAL_WRITE
|
2066 IB_ACCESS_REMOTE_WRITE
|
2067 IB_ACCESS_REMOTE_READ
);
2071 err
= ib_check_mr_access(&dev
->ib_dev
, access
);
2075 obj
->umem
= ib_umem_get(&dev
->ib_dev
, addr
, size
, access
);
2076 if (IS_ERR(obj
->umem
))
2077 return PTR_ERR(obj
->umem
);
2081 static int devx_umem_reg_cmd_alloc(struct mlx5_ib_dev
*dev
,
2082 struct uverbs_attr_bundle
*attrs
,
2083 struct devx_umem
*obj
,
2084 struct devx_umem_reg_cmd
*cmd
)
2086 unsigned int page_size
;
2091 * We don't know what the user intends to use this umem for, but the HW
2092 * restrictions must be met. MR, doorbell records, QP, WQ and CQ all
2093 * have different requirements. Since we have no idea how to sort this
2094 * out, only support PAGE_SIZE with the expectation that userspace will
2095 * provide the necessary alignments inside the known PAGE_SIZE and that
2096 * FW will check everything.
2098 page_size
= ib_umem_find_best_pgoff(
2099 obj
->umem
, PAGE_SIZE
,
2100 __mlx5_page_offset_to_bitmask(__mlx5_bit_sz(umem
, page_offset
),
2105 cmd
->inlen
= MLX5_ST_SZ_BYTES(create_umem_in
) +
2106 (MLX5_ST_SZ_BYTES(mtt
) *
2107 ib_umem_num_dma_blocks(obj
->umem
, page_size
));
2108 cmd
->in
= uverbs_zalloc(attrs
, cmd
->inlen
);
2109 if (IS_ERR(cmd
->in
))
2110 return PTR_ERR(cmd
->in
);
2112 umem
= MLX5_ADDR_OF(create_umem_in
, cmd
->in
, umem
);
2113 mtt
= (__be64
*)MLX5_ADDR_OF(umem
, umem
, mtt
);
2115 MLX5_SET(create_umem_in
, cmd
->in
, opcode
, MLX5_CMD_OP_CREATE_UMEM
);
2116 MLX5_SET64(umem
, umem
, num_of_mtt
,
2117 ib_umem_num_dma_blocks(obj
->umem
, page_size
));
2118 MLX5_SET(umem
, umem
, log_page_size
,
2119 order_base_2(page_size
) - MLX5_ADAPTER_PAGE_SHIFT
);
2120 MLX5_SET(umem
, umem
, page_offset
,
2121 ib_umem_dma_offset(obj
->umem
, page_size
));
2123 mlx5_ib_populate_pas(obj
->umem
, page_size
, mtt
,
2124 (obj
->umem
->writable
? MLX5_IB_MTT_WRITE
: 0) |
2129 static int UVERBS_HANDLER(MLX5_IB_METHOD_DEVX_UMEM_REG
)(
2130 struct uverbs_attr_bundle
*attrs
)
2132 struct devx_umem_reg_cmd cmd
;
2133 struct devx_umem
*obj
;
2134 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(
2135 attrs
, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE
);
2137 struct mlx5_ib_ucontext
*c
= rdma_udata_to_drv_context(
2138 &attrs
->driver_udata
, struct mlx5_ib_ucontext
, ibucontext
);
2139 struct mlx5_ib_dev
*dev
= to_mdev(c
->ibucontext
.device
);
2145 obj
= kzalloc(sizeof(struct devx_umem
), GFP_KERNEL
);
2149 err
= devx_umem_get(dev
, &c
->ibucontext
, attrs
, obj
);
2153 err
= devx_umem_reg_cmd_alloc(dev
, attrs
, obj
, &cmd
);
2155 goto err_umem_release
;
2157 MLX5_SET(create_umem_in
, cmd
.in
, uid
, c
->devx_uid
);
2158 err
= mlx5_cmd_exec(dev
->mdev
, cmd
.in
, cmd
.inlen
, cmd
.out
,
2161 goto err_umem_release
;
2163 obj
->mdev
= dev
->mdev
;
2165 devx_obj_build_destroy_cmd(cmd
.in
, cmd
.out
, obj
->dinbox
, &obj
->dinlen
, &obj_id
);
2166 uverbs_finalize_uobj_create(attrs
, MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE
);
2168 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID
, &obj_id
,
2173 ib_umem_release(obj
->umem
);
2179 static int devx_umem_cleanup(struct ib_uobject
*uobject
,
2180 enum rdma_remove_reason why
,
2181 struct uverbs_attr_bundle
*attrs
)
2183 struct devx_umem
*obj
= uobject
->object
;
2184 u32 out
[MLX5_ST_SZ_DW(general_obj_out_cmd_hdr
)];
2187 err
= mlx5_cmd_exec(obj
->mdev
, obj
->dinbox
, obj
->dinlen
, out
, sizeof(out
));
2191 ib_umem_release(obj
->umem
);
2196 static bool is_unaffiliated_event(struct mlx5_core_dev
*dev
,
2197 unsigned long event_type
)
2199 __be64
*unaff_events
;
2203 if (!MLX5_CAP_GEN(dev
, event_cap
))
2204 return is_legacy_unaffiliated_event_num(event_type
);
2206 unaff_events
= MLX5_CAP_DEV_EVENT(dev
,
2207 user_unaffiliated_events
);
2208 WARN_ON(event_type
> MAX_SUPP_EVENT_NUM
);
2210 mask_entry
= event_type
/ 64;
2211 mask_bit
= event_type
% 64;
2213 if (!(be64_to_cpu(unaff_events
[mask_entry
]) & (1ull << mask_bit
)))
2219 static u32
devx_get_obj_id_from_event(unsigned long event_type
, void *data
)
2221 struct mlx5_eqe
*eqe
= data
;
2224 switch (event_type
) {
2225 case MLX5_EVENT_TYPE_SRQ_CATAS_ERROR
:
2226 case MLX5_EVENT_TYPE_SRQ_RQ_LIMIT
:
2227 case MLX5_EVENT_TYPE_PATH_MIG
:
2228 case MLX5_EVENT_TYPE_COMM_EST
:
2229 case MLX5_EVENT_TYPE_SQ_DRAINED
:
2230 case MLX5_EVENT_TYPE_SRQ_LAST_WQE
:
2231 case MLX5_EVENT_TYPE_WQ_CATAS_ERROR
:
2232 case MLX5_EVENT_TYPE_PATH_MIG_FAILED
:
2233 case MLX5_EVENT_TYPE_WQ_INVAL_REQ_ERROR
:
2234 case MLX5_EVENT_TYPE_WQ_ACCESS_ERROR
:
2235 obj_id
= be32_to_cpu(eqe
->data
.qp_srq
.qp_srq_n
) & 0xffffff;
2237 case MLX5_EVENT_TYPE_XRQ_ERROR
:
2238 obj_id
= be32_to_cpu(eqe
->data
.xrq_err
.type_xrqn
) & 0xffffff;
2240 case MLX5_EVENT_TYPE_DCT_DRAINED
:
2241 case MLX5_EVENT_TYPE_DCT_KEY_VIOLATION
:
2242 obj_id
= be32_to_cpu(eqe
->data
.dct
.dctn
) & 0xffffff;
2244 case MLX5_EVENT_TYPE_CQ_ERROR
:
2245 obj_id
= be32_to_cpu(eqe
->data
.cq_err
.cqn
) & 0xffffff;
2248 obj_id
= MLX5_GET(affiliated_event_header
, &eqe
->data
, obj_id
);
2255 static int deliver_event(struct devx_event_subscription
*event_sub
,
2258 struct devx_async_event_file
*ev_file
;
2259 struct devx_async_event_data
*event_data
;
2260 unsigned long flags
;
2262 ev_file
= event_sub
->ev_file
;
2264 if (ev_file
->omit_data
) {
2265 spin_lock_irqsave(&ev_file
->lock
, flags
);
2266 if (!list_empty(&event_sub
->event_list
) ||
2267 ev_file
->is_destroyed
) {
2268 spin_unlock_irqrestore(&ev_file
->lock
, flags
);
2272 list_add_tail(&event_sub
->event_list
, &ev_file
->event_list
);
2273 spin_unlock_irqrestore(&ev_file
->lock
, flags
);
2274 wake_up_interruptible(&ev_file
->poll_wait
);
2278 event_data
= kzalloc(sizeof(*event_data
) + sizeof(struct mlx5_eqe
),
2281 spin_lock_irqsave(&ev_file
->lock
, flags
);
2282 ev_file
->is_overflow_err
= 1;
2283 spin_unlock_irqrestore(&ev_file
->lock
, flags
);
2287 event_data
->hdr
.cookie
= event_sub
->cookie
;
2288 memcpy(event_data
->hdr
.out_data
, data
, sizeof(struct mlx5_eqe
));
2290 spin_lock_irqsave(&ev_file
->lock
, flags
);
2291 if (!ev_file
->is_destroyed
)
2292 list_add_tail(&event_data
->list
, &ev_file
->event_list
);
2295 spin_unlock_irqrestore(&ev_file
->lock
, flags
);
2296 wake_up_interruptible(&ev_file
->poll_wait
);
2301 static void dispatch_event_fd(struct list_head
*fd_list
,
2304 struct devx_event_subscription
*item
;
2306 list_for_each_entry_rcu(item
, fd_list
, xa_list
) {
2308 eventfd_signal(item
->eventfd
, 1);
2310 deliver_event(item
, data
);
2314 static int devx_event_notifier(struct notifier_block
*nb
,
2315 unsigned long event_type
, void *data
)
2317 struct mlx5_devx_event_table
*table
;
2318 struct mlx5_ib_dev
*dev
;
2319 struct devx_event
*event
;
2320 struct devx_obj_event
*obj_event
;
2322 bool is_unaffiliated
;
2325 /* Explicit filtering to kernel events which may occur frequently */
2326 if (event_type
== MLX5_EVENT_TYPE_CMD
||
2327 event_type
== MLX5_EVENT_TYPE_PAGE_REQUEST
)
2330 table
= container_of(nb
, struct mlx5_devx_event_table
, devx_nb
.nb
);
2331 dev
= container_of(table
, struct mlx5_ib_dev
, devx_event_table
);
2332 is_unaffiliated
= is_unaffiliated_event(dev
->mdev
, event_type
);
2334 if (!is_unaffiliated
)
2335 obj_type
= get_event_obj_type(event_type
, data
);
2338 event
= xa_load(&table
->event_xa
, event_type
| (obj_type
<< 16));
2344 if (is_unaffiliated
) {
2345 dispatch_event_fd(&event
->unaffiliated_list
, data
);
2350 obj_id
= devx_get_obj_id_from_event(event_type
, data
);
2351 obj_event
= xa_load(&event
->object_ids
, obj_id
);
2357 dispatch_event_fd(&obj_event
->obj_sub_list
, data
);
2363 int mlx5_ib_devx_init(struct mlx5_ib_dev
*dev
)
2365 struct mlx5_devx_event_table
*table
= &dev
->devx_event_table
;
2368 uid
= mlx5_ib_devx_create(dev
, false);
2370 dev
->devx_whitelist_uid
= uid
;
2371 xa_init(&table
->event_xa
);
2372 mutex_init(&table
->event_xa_lock
);
2373 MLX5_NB_INIT(&table
->devx_nb
, devx_event_notifier
, NOTIFY_ANY
);
2374 mlx5_eq_notifier_register(dev
->mdev
, &table
->devx_nb
);
2380 void mlx5_ib_devx_cleanup(struct mlx5_ib_dev
*dev
)
2382 struct mlx5_devx_event_table
*table
= &dev
->devx_event_table
;
2383 struct devx_event_subscription
*sub
, *tmp
;
2384 struct devx_event
*event
;
2388 if (dev
->devx_whitelist_uid
) {
2389 mlx5_eq_notifier_unregister(dev
->mdev
, &table
->devx_nb
);
2390 mutex_lock(&dev
->devx_event_table
.event_xa_lock
);
2391 xa_for_each(&table
->event_xa
, id
, entry
) {
2393 list_for_each_entry_safe(
2394 sub
, tmp
, &event
->unaffiliated_list
, xa_list
)
2395 devx_cleanup_subscription(dev
, sub
);
2398 mutex_unlock(&dev
->devx_event_table
.event_xa_lock
);
2399 xa_destroy(&table
->event_xa
);
2401 mlx5_ib_devx_destroy(dev
, dev
->devx_whitelist_uid
);
2405 static ssize_t
devx_async_cmd_event_read(struct file
*filp
, char __user
*buf
,
2406 size_t count
, loff_t
*pos
)
2408 struct devx_async_cmd_event_file
*comp_ev_file
= filp
->private_data
;
2409 struct devx_async_event_queue
*ev_queue
= &comp_ev_file
->ev_queue
;
2410 struct devx_async_data
*event
;
2414 spin_lock_irq(&ev_queue
->lock
);
2416 while (list_empty(&ev_queue
->event_list
)) {
2417 spin_unlock_irq(&ev_queue
->lock
);
2419 if (filp
->f_flags
& O_NONBLOCK
)
2422 if (wait_event_interruptible(
2423 ev_queue
->poll_wait
,
2424 (!list_empty(&ev_queue
->event_list
) ||
2425 ev_queue
->is_destroyed
))) {
2426 return -ERESTARTSYS
;
2429 spin_lock_irq(&ev_queue
->lock
);
2430 if (ev_queue
->is_destroyed
) {
2431 spin_unlock_irq(&ev_queue
->lock
);
2436 event
= list_entry(ev_queue
->event_list
.next
,
2437 struct devx_async_data
, list
);
2438 eventsz
= event
->cmd_out_len
+
2439 sizeof(struct mlx5_ib_uapi_devx_async_cmd_hdr
);
2441 if (eventsz
> count
) {
2442 spin_unlock_irq(&ev_queue
->lock
);
2446 list_del(ev_queue
->event_list
.next
);
2447 spin_unlock_irq(&ev_queue
->lock
);
2449 if (copy_to_user(buf
, &event
->hdr
, eventsz
))
2454 atomic_sub(event
->cmd_out_len
, &ev_queue
->bytes_in_use
);
2459 static __poll_t
devx_async_cmd_event_poll(struct file
*filp
,
2460 struct poll_table_struct
*wait
)
2462 struct devx_async_cmd_event_file
*comp_ev_file
= filp
->private_data
;
2463 struct devx_async_event_queue
*ev_queue
= &comp_ev_file
->ev_queue
;
2464 __poll_t pollflags
= 0;
2466 poll_wait(filp
, &ev_queue
->poll_wait
, wait
);
2468 spin_lock_irq(&ev_queue
->lock
);
2469 if (ev_queue
->is_destroyed
)
2470 pollflags
= EPOLLIN
| EPOLLRDNORM
| EPOLLRDHUP
;
2471 else if (!list_empty(&ev_queue
->event_list
))
2472 pollflags
= EPOLLIN
| EPOLLRDNORM
;
2473 spin_unlock_irq(&ev_queue
->lock
);
2478 static const struct file_operations devx_async_cmd_event_fops
= {
2479 .owner
= THIS_MODULE
,
2480 .read
= devx_async_cmd_event_read
,
2481 .poll
= devx_async_cmd_event_poll
,
2482 .release
= uverbs_uobject_fd_release
,
2483 .llseek
= no_llseek
,
2486 static ssize_t
devx_async_event_read(struct file
*filp
, char __user
*buf
,
2487 size_t count
, loff_t
*pos
)
2489 struct devx_async_event_file
*ev_file
= filp
->private_data
;
2490 struct devx_event_subscription
*event_sub
;
2491 struct devx_async_event_data
*event
;
2497 omit_data
= ev_file
->omit_data
;
2499 spin_lock_irq(&ev_file
->lock
);
2501 if (ev_file
->is_overflow_err
) {
2502 ev_file
->is_overflow_err
= 0;
2503 spin_unlock_irq(&ev_file
->lock
);
2508 while (list_empty(&ev_file
->event_list
)) {
2509 spin_unlock_irq(&ev_file
->lock
);
2511 if (filp
->f_flags
& O_NONBLOCK
)
2514 if (wait_event_interruptible(ev_file
->poll_wait
,
2515 (!list_empty(&ev_file
->event_list
) ||
2516 ev_file
->is_destroyed
))) {
2517 return -ERESTARTSYS
;
2520 spin_lock_irq(&ev_file
->lock
);
2521 if (ev_file
->is_destroyed
) {
2522 spin_unlock_irq(&ev_file
->lock
);
2528 event_sub
= list_first_entry(&ev_file
->event_list
,
2529 struct devx_event_subscription
,
2531 eventsz
= sizeof(event_sub
->cookie
);
2532 event_data
= &event_sub
->cookie
;
2534 event
= list_first_entry(&ev_file
->event_list
,
2535 struct devx_async_event_data
, list
);
2536 eventsz
= sizeof(struct mlx5_eqe
) +
2537 sizeof(struct mlx5_ib_uapi_devx_async_event_hdr
);
2538 event_data
= &event
->hdr
;
2541 if (eventsz
> count
) {
2542 spin_unlock_irq(&ev_file
->lock
);
2547 list_del_init(&event_sub
->event_list
);
2549 list_del(&event
->list
);
2551 spin_unlock_irq(&ev_file
->lock
);
2553 if (copy_to_user(buf
, event_data
, eventsz
))
2554 /* This points to an application issue, not a kernel concern */
2564 static __poll_t
devx_async_event_poll(struct file
*filp
,
2565 struct poll_table_struct
*wait
)
2567 struct devx_async_event_file
*ev_file
= filp
->private_data
;
2568 __poll_t pollflags
= 0;
2570 poll_wait(filp
, &ev_file
->poll_wait
, wait
);
2572 spin_lock_irq(&ev_file
->lock
);
2573 if (ev_file
->is_destroyed
)
2574 pollflags
= EPOLLIN
| EPOLLRDNORM
| EPOLLRDHUP
;
2575 else if (!list_empty(&ev_file
->event_list
))
2576 pollflags
= EPOLLIN
| EPOLLRDNORM
;
2577 spin_unlock_irq(&ev_file
->lock
);
2582 static void devx_free_subscription(struct rcu_head
*rcu
)
2584 struct devx_event_subscription
*event_sub
=
2585 container_of(rcu
, struct devx_event_subscription
, rcu
);
2587 if (event_sub
->eventfd
)
2588 eventfd_ctx_put(event_sub
->eventfd
);
2589 uverbs_uobject_put(&event_sub
->ev_file
->uobj
);
2593 static const struct file_operations devx_async_event_fops
= {
2594 .owner
= THIS_MODULE
,
2595 .read
= devx_async_event_read
,
2596 .poll
= devx_async_event_poll
,
2597 .release
= uverbs_uobject_fd_release
,
2598 .llseek
= no_llseek
,
2601 static void devx_async_cmd_event_destroy_uobj(struct ib_uobject
*uobj
,
2602 enum rdma_remove_reason why
)
2604 struct devx_async_cmd_event_file
*comp_ev_file
=
2605 container_of(uobj
, struct devx_async_cmd_event_file
,
2607 struct devx_async_event_queue
*ev_queue
= &comp_ev_file
->ev_queue
;
2608 struct devx_async_data
*entry
, *tmp
;
2610 spin_lock_irq(&ev_queue
->lock
);
2611 ev_queue
->is_destroyed
= 1;
2612 spin_unlock_irq(&ev_queue
->lock
);
2613 wake_up_interruptible(&ev_queue
->poll_wait
);
2615 mlx5_cmd_cleanup_async_ctx(&comp_ev_file
->async_ctx
);
2617 spin_lock_irq(&comp_ev_file
->ev_queue
.lock
);
2618 list_for_each_entry_safe(entry
, tmp
,
2619 &comp_ev_file
->ev_queue
.event_list
, list
) {
2620 list_del(&entry
->list
);
2623 spin_unlock_irq(&comp_ev_file
->ev_queue
.lock
);
2626 static void devx_async_event_destroy_uobj(struct ib_uobject
*uobj
,
2627 enum rdma_remove_reason why
)
2629 struct devx_async_event_file
*ev_file
=
2630 container_of(uobj
, struct devx_async_event_file
,
2632 struct devx_event_subscription
*event_sub
, *event_sub_tmp
;
2633 struct mlx5_ib_dev
*dev
= ev_file
->dev
;
2635 spin_lock_irq(&ev_file
->lock
);
2636 ev_file
->is_destroyed
= 1;
2638 /* free the pending events allocation */
2639 if (ev_file
->omit_data
) {
2640 struct devx_event_subscription
*event_sub
, *tmp
;
2642 list_for_each_entry_safe(event_sub
, tmp
, &ev_file
->event_list
,
2644 list_del_init(&event_sub
->event_list
);
2647 struct devx_async_event_data
*entry
, *tmp
;
2649 list_for_each_entry_safe(entry
, tmp
, &ev_file
->event_list
,
2651 list_del(&entry
->list
);
2656 spin_unlock_irq(&ev_file
->lock
);
2657 wake_up_interruptible(&ev_file
->poll_wait
);
2659 mutex_lock(&dev
->devx_event_table
.event_xa_lock
);
2660 /* delete the subscriptions which are related to this FD */
2661 list_for_each_entry_safe(event_sub
, event_sub_tmp
,
2662 &ev_file
->subscribed_events_list
, file_list
) {
2663 devx_cleanup_subscription(dev
, event_sub
);
2664 list_del_rcu(&event_sub
->file_list
);
2665 /* subscription may not be used by the read API any more */
2666 call_rcu(&event_sub
->rcu
, devx_free_subscription
);
2668 mutex_unlock(&dev
->devx_event_table
.event_xa_lock
);
2670 put_device(&dev
->ib_dev
.dev
);
2673 DECLARE_UVERBS_NAMED_METHOD(
2674 MLX5_IB_METHOD_DEVX_UMEM_REG
,
2675 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE
,
2676 MLX5_IB_OBJECT_DEVX_UMEM
,
2679 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR
,
2680 UVERBS_ATTR_TYPE(u64
),
2682 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_LEN
,
2683 UVERBS_ATTR_TYPE(u64
),
2685 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS
,
2686 enum ib_access_flags
),
2687 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID
,
2688 UVERBS_ATTR_TYPE(u32
),
2691 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2692 MLX5_IB_METHOD_DEVX_UMEM_DEREG
,
2693 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE
,
2694 MLX5_IB_OBJECT_DEVX_UMEM
,
2695 UVERBS_ACCESS_DESTROY
,
2698 DECLARE_UVERBS_NAMED_METHOD(
2699 MLX5_IB_METHOD_DEVX_QUERY_EQN
,
2700 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC
,
2701 UVERBS_ATTR_TYPE(u32
),
2703 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN
,
2704 UVERBS_ATTR_TYPE(u32
),
2707 DECLARE_UVERBS_NAMED_METHOD(
2708 MLX5_IB_METHOD_DEVX_QUERY_UAR
,
2709 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX
,
2710 UVERBS_ATTR_TYPE(u32
),
2712 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX
,
2713 UVERBS_ATTR_TYPE(u32
),
2716 DECLARE_UVERBS_NAMED_METHOD(
2717 MLX5_IB_METHOD_DEVX_OTHER
,
2719 MLX5_IB_ATTR_DEVX_OTHER_CMD_IN
,
2720 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr
)),
2723 UVERBS_ATTR_PTR_OUT(
2724 MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT
,
2725 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr
)),
2728 DECLARE_UVERBS_NAMED_METHOD(
2729 MLX5_IB_METHOD_DEVX_OBJ_CREATE
,
2730 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE
,
2731 MLX5_IB_OBJECT_DEVX_OBJ
,
2735 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN
,
2736 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr
)),
2739 UVERBS_ATTR_PTR_OUT(
2740 MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT
,
2741 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr
)),
2744 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
2745 MLX5_IB_METHOD_DEVX_OBJ_DESTROY
,
2746 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE
,
2747 MLX5_IB_OBJECT_DEVX_OBJ
,
2748 UVERBS_ACCESS_DESTROY
,
2751 DECLARE_UVERBS_NAMED_METHOD(
2752 MLX5_IB_METHOD_DEVX_OBJ_MODIFY
,
2753 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE
,
2754 UVERBS_IDR_ANY_OBJECT
,
2755 UVERBS_ACCESS_WRITE
,
2758 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN
,
2759 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr
)),
2762 UVERBS_ATTR_PTR_OUT(
2763 MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT
,
2764 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr
)),
2767 DECLARE_UVERBS_NAMED_METHOD(
2768 MLX5_IB_METHOD_DEVX_OBJ_QUERY
,
2769 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE
,
2770 UVERBS_IDR_ANY_OBJECT
,
2774 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN
,
2775 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr
)),
2778 UVERBS_ATTR_PTR_OUT(
2779 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT
,
2780 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_out_cmd_hdr
)),
2783 DECLARE_UVERBS_NAMED_METHOD(
2784 MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY
,
2785 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE
,
2786 UVERBS_IDR_ANY_OBJECT
,
2790 MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN
,
2791 UVERBS_ATTR_MIN_SIZE(MLX5_ST_SZ_BYTES(general_obj_in_cmd_hdr
)),
2794 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_OUT_LEN
,
2796 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_FD
,
2797 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD
,
2800 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_OBJ_QUERY_ASYNC_WR_ID
,
2801 UVERBS_ATTR_TYPE(u64
),
2804 DECLARE_UVERBS_NAMED_METHOD(
2805 MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT
,
2806 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_HANDLE
,
2807 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD
,
2810 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_OBJ_HANDLE
,
2811 MLX5_IB_OBJECT_DEVX_OBJ
,
2814 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_TYPE_NUM_LIST
,
2815 UVERBS_ATTR_MIN_SIZE(sizeof(u16
)),
2818 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_COOKIE
,
2819 UVERBS_ATTR_TYPE(u64
),
2821 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DEVX_SUBSCRIBE_EVENT_FD_NUM
,
2822 UVERBS_ATTR_TYPE(u32
),
2825 DECLARE_UVERBS_GLOBAL_METHODS(MLX5_IB_OBJECT_DEVX
,
2826 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OTHER
),
2827 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_UAR
),
2828 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_QUERY_EQN
),
2829 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_SUBSCRIBE_EVENT
));
2831 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_OBJ
,
2832 UVERBS_TYPE_ALLOC_IDR(devx_obj_cleanup
),
2833 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_CREATE
),
2834 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_DESTROY
),
2835 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_MODIFY
),
2836 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_QUERY
),
2837 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY
));
2839 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_DEVX_UMEM
,
2840 UVERBS_TYPE_ALLOC_IDR(devx_umem_cleanup
),
2841 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_REG
),
2842 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_UMEM_DEREG
));
2845 DECLARE_UVERBS_NAMED_METHOD(
2846 MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC
,
2847 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_CMD_FD_ALLOC_HANDLE
,
2848 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD
,
2852 DECLARE_UVERBS_NAMED_OBJECT(
2853 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD
,
2854 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_cmd_event_file
),
2855 devx_async_cmd_event_destroy_uobj
,
2856 &devx_async_cmd_event_fops
, "[devx_async_cmd]",
2858 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_CMD_FD_ALLOC
));
2860 DECLARE_UVERBS_NAMED_METHOD(
2861 MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC
,
2862 UVERBS_ATTR_FD(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_HANDLE
,
2863 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD
,
2866 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_DEVX_ASYNC_EVENT_FD_ALLOC_FLAGS
,
2867 enum mlx5_ib_uapi_devx_create_event_channel_flags
,
2870 DECLARE_UVERBS_NAMED_OBJECT(
2871 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD
,
2872 UVERBS_TYPE_ALLOC_FD(sizeof(struct devx_async_event_file
),
2873 devx_async_event_destroy_uobj
,
2874 &devx_async_event_fops
, "[devx_async_event]",
2876 &UVERBS_METHOD(MLX5_IB_METHOD_DEVX_ASYNC_EVENT_FD_ALLOC
));
2878 static bool devx_is_supported(struct ib_device
*device
)
2880 struct mlx5_ib_dev
*dev
= to_mdev(device
);
2882 return MLX5_CAP_GEN(dev
->mdev
, log_max_uctx
);
2885 const struct uapi_definition mlx5_ib_devx_defs
[] = {
2886 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2887 MLX5_IB_OBJECT_DEVX
,
2888 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported
)),
2889 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2890 MLX5_IB_OBJECT_DEVX_OBJ
,
2891 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported
)),
2892 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2893 MLX5_IB_OBJECT_DEVX_UMEM
,
2894 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported
)),
2895 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2896 MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD
,
2897 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported
)),
2898 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
2899 MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD
,
2900 UAPI_DEF_IS_OBJ_SUPPORTED(devx_is_supported
)),