1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2021, Mellanox Technologies inc. All rights reserved.
6 #include <rdma/uverbs_std_types.h>
9 #define UVERBS_MODULE_NAME mlx5_ib
10 #include <rdma/uverbs_named_ioctl.h>
12 static int mlx5_cmd_alloc_memic(struct mlx5_dm
*dm
, phys_addr_t
*addr
,
13 u64 length
, u32 alignment
)
15 struct mlx5_core_dev
*dev
= dm
->dev
;
16 u64 num_memic_hw_pages
= MLX5_CAP_DEV_MEM(dev
, memic_bar_size
)
18 u64 hw_start_addr
= MLX5_CAP64_DEV_MEM(dev
, memic_bar_start_addr
);
19 u32 max_alignment
= MLX5_CAP_DEV_MEM(dev
, log_max_memic_addr_alignment
);
20 u32 num_pages
= DIV_ROUND_UP(length
, PAGE_SIZE
);
21 u32 out
[MLX5_ST_SZ_DW(alloc_memic_out
)] = {};
22 u32 in
[MLX5_ST_SZ_DW(alloc_memic_in
)] = {};
27 if (!length
|| (length
& MLX5_MEMIC_ALLOC_SIZE_MASK
))
30 /* mlx5 device sets alignment as 64*2^driver_value
31 * so normalizing is needed.
33 mlx5_alignment
= (alignment
< MLX5_MEMIC_BASE_ALIGN
) ? 0 :
34 alignment
- MLX5_MEMIC_BASE_ALIGN
;
35 if (mlx5_alignment
> max_alignment
)
38 MLX5_SET(alloc_memic_in
, in
, opcode
, MLX5_CMD_OP_ALLOC_MEMIC
);
39 MLX5_SET(alloc_memic_in
, in
, range_size
, num_pages
* PAGE_SIZE
);
40 MLX5_SET(alloc_memic_in
, in
, memic_size
, length
);
41 MLX5_SET(alloc_memic_in
, in
, log_memic_addr_alignment
,
44 while (page_idx
< num_memic_hw_pages
) {
46 page_idx
= bitmap_find_next_zero_area(dm
->memic_alloc_pages
,
51 if (page_idx
< num_memic_hw_pages
)
52 bitmap_set(dm
->memic_alloc_pages
,
55 spin_unlock(&dm
->lock
);
57 if (page_idx
>= num_memic_hw_pages
)
60 MLX5_SET64(alloc_memic_in
, in
, range_start_addr
,
61 hw_start_addr
+ (page_idx
* PAGE_SIZE
));
63 ret
= mlx5_cmd_exec_inout(dev
, alloc_memic
, in
, out
);
66 bitmap_clear(dm
->memic_alloc_pages
,
68 spin_unlock(&dm
->lock
);
78 *addr
= dev
->bar_addr
+
79 MLX5_GET64(alloc_memic_out
, out
, memic_start_addr
);
87 void mlx5_cmd_dealloc_memic(struct mlx5_dm
*dm
, phys_addr_t addr
,
90 struct mlx5_core_dev
*dev
= dm
->dev
;
91 u64 hw_start_addr
= MLX5_CAP64_DEV_MEM(dev
, memic_bar_start_addr
);
92 u32 num_pages
= DIV_ROUND_UP(length
, PAGE_SIZE
);
93 u32 in
[MLX5_ST_SZ_DW(dealloc_memic_in
)] = {};
97 addr
-= dev
->bar_addr
;
98 start_page_idx
= (addr
- hw_start_addr
) >> PAGE_SHIFT
;
100 MLX5_SET(dealloc_memic_in
, in
, opcode
, MLX5_CMD_OP_DEALLOC_MEMIC
);
101 MLX5_SET64(dealloc_memic_in
, in
, memic_start_addr
, addr
);
102 MLX5_SET(dealloc_memic_in
, in
, memic_size
, length
);
104 err
= mlx5_cmd_exec_in(dev
, dealloc_memic
, in
);
108 spin_lock(&dm
->lock
);
109 bitmap_clear(dm
->memic_alloc_pages
,
110 start_page_idx
, num_pages
);
111 spin_unlock(&dm
->lock
);
114 void mlx5_cmd_dealloc_memic_op(struct mlx5_dm
*dm
, phys_addr_t addr
,
117 u32 in
[MLX5_ST_SZ_DW(modify_memic_in
)] = {};
118 struct mlx5_core_dev
*dev
= dm
->dev
;
120 MLX5_SET(modify_memic_in
, in
, opcode
, MLX5_CMD_OP_MODIFY_MEMIC
);
121 MLX5_SET(modify_memic_in
, in
, op_mod
, MLX5_MODIFY_MEMIC_OP_MOD_DEALLOC
);
122 MLX5_SET(modify_memic_in
, in
, memic_operation_type
, operation
);
123 MLX5_SET64(modify_memic_in
, in
, memic_start_addr
, addr
- dev
->bar_addr
);
125 mlx5_cmd_exec_in(dev
, modify_memic
, in
);
128 static int mlx5_cmd_alloc_memic_op(struct mlx5_dm
*dm
, phys_addr_t addr
,
129 u8 operation
, phys_addr_t
*op_addr
)
131 u32 out
[MLX5_ST_SZ_DW(modify_memic_out
)] = {};
132 u32 in
[MLX5_ST_SZ_DW(modify_memic_in
)] = {};
133 struct mlx5_core_dev
*dev
= dm
->dev
;
136 MLX5_SET(modify_memic_in
, in
, opcode
, MLX5_CMD_OP_MODIFY_MEMIC
);
137 MLX5_SET(modify_memic_in
, in
, op_mod
, MLX5_MODIFY_MEMIC_OP_MOD_ALLOC
);
138 MLX5_SET(modify_memic_in
, in
, memic_operation_type
, operation
);
139 MLX5_SET64(modify_memic_in
, in
, memic_start_addr
, addr
- dev
->bar_addr
);
141 err
= mlx5_cmd_exec_inout(dev
, modify_memic
, in
, out
);
145 *op_addr
= dev
->bar_addr
+
146 MLX5_GET64(modify_memic_out
, out
, memic_operation_addr
);
150 static int add_dm_mmap_entry(struct ib_ucontext
*context
,
151 struct mlx5_user_mmap_entry
*mentry
, u8 mmap_flag
,
152 size_t size
, u64 address
)
154 mentry
->mmap_flag
= mmap_flag
;
155 mentry
->address
= address
;
157 return rdma_user_mmap_entry_insert_range(
158 context
, &mentry
->rdma_entry
, size
,
159 MLX5_IB_MMAP_DEVICE_MEM
<< 16,
160 (MLX5_IB_MMAP_DEVICE_MEM
<< 16) + (1UL << 16) - 1);
163 static void mlx5_ib_dm_memic_free(struct kref
*kref
)
165 struct mlx5_ib_dm_memic
*dm
=
166 container_of(kref
, struct mlx5_ib_dm_memic
, ref
);
167 struct mlx5_ib_dev
*dev
= to_mdev(dm
->base
.ibdm
.device
);
169 mlx5_cmd_dealloc_memic(&dev
->dm
, dm
->base
.dev_addr
, dm
->base
.size
);
173 static int copy_op_to_user(struct mlx5_ib_dm_op_entry
*op_entry
,
174 struct uverbs_attr_bundle
*attrs
)
180 page_idx
= op_entry
->mentry
.rdma_entry
.start_pgoff
& 0xFFFF;
181 start_offset
= op_entry
->op_addr
& ~PAGE_MASK
;
182 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX
,
183 &page_idx
, sizeof(page_idx
));
187 return uverbs_copy_to(attrs
,
188 MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET
,
189 &start_offset
, sizeof(start_offset
));
192 static int map_existing_op(struct mlx5_ib_dm_memic
*dm
, u8 op
,
193 struct uverbs_attr_bundle
*attrs
)
195 struct mlx5_ib_dm_op_entry
*op_entry
;
197 op_entry
= xa_load(&dm
->ops
, op
);
201 return copy_op_to_user(op_entry
, attrs
);
204 static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_MAP_OP_ADDR
)(
205 struct uverbs_attr_bundle
*attrs
)
207 struct ib_uobject
*uobj
= uverbs_attr_get_uobject(
208 attrs
, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE
);
209 struct mlx5_ib_dev
*dev
= to_mdev(uobj
->context
->device
);
210 struct ib_dm
*ibdm
= uobj
->object
;
211 struct mlx5_ib_dm_memic
*dm
= to_memic(ibdm
);
212 struct mlx5_ib_dm_op_entry
*op_entry
;
216 err
= uverbs_copy_from(&op
, attrs
, MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP
);
220 if (op
>= BITS_PER_TYPE(u32
))
223 if (!(MLX5_CAP_DEV_MEM(dev
->mdev
, memic_operations
) & BIT(op
)))
226 mutex_lock(&dm
->ops_xa_lock
);
227 err
= map_existing_op(dm
, op
, attrs
);
228 if (!err
|| err
!= -ENOENT
)
231 op_entry
= kzalloc(sizeof(*op_entry
), GFP_KERNEL
);
235 err
= mlx5_cmd_alloc_memic_op(&dev
->dm
, dm
->base
.dev_addr
, op
,
244 err
= add_dm_mmap_entry(uobj
->context
, &op_entry
->mentry
,
245 MLX5_IB_MMAP_TYPE_MEMIC_OP
, dm
->base
.size
,
246 op_entry
->op_addr
& PAGE_MASK
);
248 mlx5_cmd_dealloc_memic_op(&dev
->dm
, dm
->base
.dev_addr
, op
);
252 /* From this point, entry will be freed by mmap_free */
255 err
= copy_op_to_user(op_entry
, attrs
);
259 err
= xa_insert(&dm
->ops
, op
, op_entry
, GFP_KERNEL
);
262 mutex_unlock(&dm
->ops_xa_lock
);
267 rdma_user_mmap_entry_remove(&op_entry
->mentry
.rdma_entry
);
269 mutex_unlock(&dm
->ops_xa_lock
);
274 static struct ib_dm
*handle_alloc_dm_memic(struct ib_ucontext
*ctx
,
275 struct ib_dm_alloc_attr
*attr
,
276 struct uverbs_attr_bundle
*attrs
)
278 struct mlx5_dm
*dm_db
= &to_mdev(ctx
->device
)->dm
;
279 struct mlx5_ib_dm_memic
*dm
;
285 if (!MLX5_CAP_DEV_MEM(dm_db
->dev
, memic
))
286 return ERR_PTR(-EOPNOTSUPP
);
288 dm
= kzalloc(sizeof(*dm
), GFP_KERNEL
);
290 return ERR_PTR(-ENOMEM
);
292 dm
->base
.type
= MLX5_IB_UAPI_DM_TYPE_MEMIC
;
293 dm
->base
.size
= roundup(attr
->length
, MLX5_MEMIC_BASE_SIZE
);
294 dm
->base
.ibdm
.device
= ctx
->device
;
298 mutex_init(&dm
->ops_xa_lock
);
299 dm
->req_length
= attr
->length
;
301 err
= mlx5_cmd_alloc_memic(dm_db
, &dm
->base
.dev_addr
,
302 dm
->base
.size
, attr
->alignment
);
308 address
= dm
->base
.dev_addr
& PAGE_MASK
;
309 err
= add_dm_mmap_entry(ctx
, &dm
->mentry
, MLX5_IB_MMAP_TYPE_MEMIC
,
310 dm
->base
.size
, address
);
312 mlx5_cmd_dealloc_memic(dm_db
, dm
->base
.dev_addr
, dm
->base
.size
);
317 page_idx
= dm
->mentry
.rdma_entry
.start_pgoff
& 0xFFFF;
318 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX
,
319 &page_idx
, sizeof(page_idx
));
323 start_offset
= dm
->base
.dev_addr
& ~PAGE_MASK
;
324 err
= uverbs_copy_to(attrs
,
325 MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET
,
326 &start_offset
, sizeof(start_offset
));
330 return &dm
->base
.ibdm
;
333 rdma_user_mmap_entry_remove(&dm
->mentry
.rdma_entry
);
337 static enum mlx5_sw_icm_type
get_icm_type(int uapi_type
)
340 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM
:
341 return MLX5_SW_ICM_TYPE_HEADER_MODIFY
;
342 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM
:
343 return MLX5_SW_ICM_TYPE_HEADER_MODIFY_PATTERN
;
344 case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM
:
345 return MLX5_SW_ICM_TYPE_SW_ENCAP
;
346 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM
:
348 return MLX5_SW_ICM_TYPE_STEERING
;
352 static struct ib_dm
*handle_alloc_dm_sw_icm(struct ib_ucontext
*ctx
,
353 struct ib_dm_alloc_attr
*attr
,
354 struct uverbs_attr_bundle
*attrs
,
357 struct mlx5_core_dev
*dev
= to_mdev(ctx
->device
)->mdev
;
358 enum mlx5_sw_icm_type icm_type
;
359 struct mlx5_ib_dm_icm
*dm
;
363 if (!capable(CAP_SYS_RAWIO
) || !capable(CAP_NET_RAW
))
364 return ERR_PTR(-EPERM
);
367 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM
:
368 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM
:
369 case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM
:
370 if (!(MLX5_CAP_FLOWTABLE_NIC_RX(dev
, sw_owner
) ||
371 MLX5_CAP_FLOWTABLE_NIC_TX(dev
, sw_owner
) ||
372 MLX5_CAP_FLOWTABLE_NIC_RX(dev
, sw_owner_v2
) ||
373 MLX5_CAP_FLOWTABLE_NIC_TX(dev
, sw_owner_v2
)))
374 return ERR_PTR(-EOPNOTSUPP
);
376 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM
:
377 if (!MLX5_CAP_FLOWTABLE_NIC_RX(dev
, sw_owner_v2
) ||
378 !MLX5_CAP_FLOWTABLE_NIC_TX(dev
, sw_owner_v2
))
379 return ERR_PTR(-EOPNOTSUPP
);
382 return ERR_PTR(-EOPNOTSUPP
);
385 dm
= kzalloc(sizeof(*dm
), GFP_KERNEL
);
387 return ERR_PTR(-ENOMEM
);
389 dm
->base
.type
= type
;
390 dm
->base
.ibdm
.device
= ctx
->device
;
392 /* Allocation size must a multiple of the basic block size
395 act_size
= round_up(attr
->length
, MLX5_SW_ICM_BLOCK_SIZE(dev
));
396 act_size
= roundup_pow_of_two(act_size
);
398 dm
->base
.size
= act_size
;
399 icm_type
= get_icm_type(type
);
401 err
= mlx5_dm_sw_icm_alloc(dev
, icm_type
, act_size
, attr
->alignment
,
402 to_mucontext(ctx
)->devx_uid
,
403 &dm
->base
.dev_addr
, &dm
->obj_id
);
407 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET
,
408 &dm
->base
.dev_addr
, sizeof(dm
->base
.dev_addr
));
410 mlx5_dm_sw_icm_dealloc(dev
, icm_type
, dm
->base
.size
,
411 to_mucontext(ctx
)->devx_uid
,
412 dm
->base
.dev_addr
, dm
->obj_id
);
415 return &dm
->base
.ibdm
;
421 struct ib_dm
*mlx5_ib_alloc_dm(struct ib_device
*ibdev
,
422 struct ib_ucontext
*context
,
423 struct ib_dm_alloc_attr
*attr
,
424 struct uverbs_attr_bundle
*attrs
)
426 enum mlx5_ib_uapi_dm_type type
;
429 err
= uverbs_get_const_default(&type
, attrs
,
430 MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE
,
431 MLX5_IB_UAPI_DM_TYPE_MEMIC
);
435 mlx5_ib_dbg(to_mdev(ibdev
), "alloc_dm req: dm_type=%d user_length=0x%llx log_alignment=%d\n",
436 type
, attr
->length
, attr
->alignment
);
439 case MLX5_IB_UAPI_DM_TYPE_MEMIC
:
440 return handle_alloc_dm_memic(context
, attr
, attrs
);
441 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM
:
442 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM
:
443 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM
:
444 case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM
:
445 return handle_alloc_dm_sw_icm(context
, attr
, attrs
, type
);
447 return ERR_PTR(-EOPNOTSUPP
);
451 static void dm_memic_remove_ops(struct mlx5_ib_dm_memic
*dm
)
453 struct mlx5_ib_dm_op_entry
*entry
;
456 mutex_lock(&dm
->ops_xa_lock
);
457 xa_for_each(&dm
->ops
, idx
, entry
) {
458 xa_erase(&dm
->ops
, idx
);
459 rdma_user_mmap_entry_remove(&entry
->mentry
.rdma_entry
);
461 mutex_unlock(&dm
->ops_xa_lock
);
464 static void mlx5_dm_memic_dealloc(struct mlx5_ib_dm_memic
*dm
)
466 dm_memic_remove_ops(dm
);
467 rdma_user_mmap_entry_remove(&dm
->mentry
.rdma_entry
);
470 static int mlx5_dm_icm_dealloc(struct mlx5_ib_ucontext
*ctx
,
471 struct mlx5_ib_dm_icm
*dm
)
473 enum mlx5_sw_icm_type type
= get_icm_type(dm
->base
.type
);
474 struct mlx5_core_dev
*dev
= to_mdev(dm
->base
.ibdm
.device
)->mdev
;
477 err
= mlx5_dm_sw_icm_dealloc(dev
, type
, dm
->base
.size
, ctx
->devx_uid
,
478 dm
->base
.dev_addr
, dm
->obj_id
);
484 static int mlx5_ib_dealloc_dm(struct ib_dm
*ibdm
,
485 struct uverbs_attr_bundle
*attrs
)
487 struct mlx5_ib_ucontext
*ctx
= rdma_udata_to_drv_context(
488 &attrs
->driver_udata
, struct mlx5_ib_ucontext
, ibucontext
);
489 struct mlx5_ib_dm
*dm
= to_mdm(ibdm
);
492 case MLX5_IB_UAPI_DM_TYPE_MEMIC
:
493 mlx5_dm_memic_dealloc(to_memic(ibdm
));
495 case MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM
:
496 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM
:
497 case MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM
:
498 case MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM
:
499 return mlx5_dm_icm_dealloc(ctx
, to_icm(ibdm
));
505 static int UVERBS_HANDLER(MLX5_IB_METHOD_DM_QUERY
)(
506 struct uverbs_attr_bundle
*attrs
)
509 uverbs_attr_get_obj(attrs
, MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE
);
510 struct mlx5_ib_dm
*dm
= to_mdm(ibdm
);
511 struct mlx5_ib_dm_memic
*memic
;
516 if (dm
->type
!= MLX5_IB_UAPI_DM_TYPE_MEMIC
)
519 memic
= to_memic(ibdm
);
520 page_idx
= memic
->mentry
.rdma_entry
.start_pgoff
& 0xFFFF;
521 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX
,
522 &page_idx
, sizeof(page_idx
));
526 start_offset
= memic
->base
.dev_addr
& ~PAGE_MASK
;
527 err
= uverbs_copy_to(attrs
, MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET
,
528 &start_offset
, sizeof(start_offset
));
532 return uverbs_copy_to(attrs
, MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH
,
534 sizeof(memic
->req_length
));
537 void mlx5_ib_dm_mmap_free(struct mlx5_ib_dev
*dev
,
538 struct mlx5_user_mmap_entry
*mentry
)
540 struct mlx5_ib_dm_op_entry
*op_entry
;
541 struct mlx5_ib_dm_memic
*mdm
;
543 switch (mentry
->mmap_flag
) {
544 case MLX5_IB_MMAP_TYPE_MEMIC
:
545 mdm
= container_of(mentry
, struct mlx5_ib_dm_memic
, mentry
);
546 kref_put(&mdm
->ref
, mlx5_ib_dm_memic_free
);
548 case MLX5_IB_MMAP_TYPE_MEMIC_OP
:
549 op_entry
= container_of(mentry
, struct mlx5_ib_dm_op_entry
,
552 mlx5_cmd_dealloc_memic_op(&dev
->dm
, mdm
->base
.dev_addr
,
555 kref_put(&mdm
->ref
, mlx5_ib_dm_memic_free
);
562 DECLARE_UVERBS_NAMED_METHOD(
563 MLX5_IB_METHOD_DM_QUERY
,
564 UVERBS_ATTR_IDR(MLX5_IB_ATTR_QUERY_DM_REQ_HANDLE
, UVERBS_OBJECT_DM
,
565 UVERBS_ACCESS_READ
, UA_MANDATORY
),
566 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_START_OFFSET
,
567 UVERBS_ATTR_TYPE(u64
), UA_MANDATORY
),
568 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_PAGE_INDEX
,
569 UVERBS_ATTR_TYPE(u16
), UA_MANDATORY
),
570 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_QUERY_DM_RESP_LENGTH
,
571 UVERBS_ATTR_TYPE(u64
), UA_MANDATORY
));
573 ADD_UVERBS_ATTRIBUTES_SIMPLE(
574 mlx5_ib_dm
, UVERBS_OBJECT_DM
, UVERBS_METHOD_DM_ALLOC
,
575 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_START_OFFSET
,
576 UVERBS_ATTR_TYPE(u64
), UA_MANDATORY
),
577 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX
,
578 UVERBS_ATTR_TYPE(u16
), UA_OPTIONAL
),
579 UVERBS_ATTR_CONST_IN(MLX5_IB_ATTR_ALLOC_DM_REQ_TYPE
,
580 enum mlx5_ib_uapi_dm_type
, UA_OPTIONAL
));
582 DECLARE_UVERBS_NAMED_METHOD(
583 MLX5_IB_METHOD_DM_MAP_OP_ADDR
,
584 UVERBS_ATTR_IDR(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_HANDLE
,
588 UVERBS_ATTR_PTR_IN(MLX5_IB_ATTR_DM_MAP_OP_ADDR_REQ_OP
,
589 UVERBS_ATTR_TYPE(u8
),
591 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_START_OFFSET
,
592 UVERBS_ATTR_TYPE(u64
),
594 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_DM_MAP_OP_ADDR_RESP_PAGE_INDEX
,
595 UVERBS_ATTR_TYPE(u16
),
598 DECLARE_UVERBS_GLOBAL_METHODS(UVERBS_OBJECT_DM
,
599 &UVERBS_METHOD(MLX5_IB_METHOD_DM_MAP_OP_ADDR
),
600 &UVERBS_METHOD(MLX5_IB_METHOD_DM_QUERY
));
602 const struct uapi_definition mlx5_ib_dm_defs
[] = {
603 UAPI_DEF_CHAIN_OBJ_TREE(UVERBS_OBJECT_DM
, &mlx5_ib_dm
),
604 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(UVERBS_OBJECT_DM
),
608 const struct ib_device_ops mlx5_ib_dev_dm_ops
= {
609 .alloc_dm
= mlx5_ib_alloc_dm
,
610 .dealloc_dm
= mlx5_ib_dealloc_dm
,
611 .reg_dm_mr
= mlx5_ib_reg_dm_mr
,