dm writecache: add cond_resched to loop in persistent_memory_claim()
[linux/fpc-iii.git] / drivers / infiniband / hw / mlx5 / qos.c
blobcac878a70edb03d82ef4f83985ac53ed338c0895
1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
2 /*
3 * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
4 */
6 #include <rdma/uverbs_ioctl.h>
7 #include <rdma/mlx5_user_ioctl_cmds.h>
8 #include <rdma/mlx5_user_ioctl_verbs.h>
9 #include <linux/mlx5/driver.h>
10 #include "mlx5_ib.h"
12 #define UVERBS_MODULE_NAME mlx5_ib
13 #include <rdma/uverbs_named_ioctl.h>
15 static bool pp_is_supported(struct ib_device *device)
17 struct mlx5_ib_dev *dev = to_mdev(device);
19 return (MLX5_CAP_GEN(dev->mdev, qos) &&
20 MLX5_CAP_QOS(dev->mdev, packet_pacing) &&
21 MLX5_CAP_QOS(dev->mdev, packet_pacing_uid));
24 static int UVERBS_HANDLER(MLX5_IB_METHOD_PP_OBJ_ALLOC)(
25 struct uverbs_attr_bundle *attrs)
27 u8 rl_raw[MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)] = {};
28 struct ib_uobject *uobj = uverbs_attr_get_uobject(attrs,
29 MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE);
30 struct mlx5_ib_dev *dev;
31 struct mlx5_ib_ucontext *c;
32 struct mlx5_ib_pp *pp_entry;
33 void *in_ctx;
34 u16 uid;
35 int inlen;
36 u32 flags;
37 int err;
39 c = to_mucontext(ib_uverbs_get_ucontext(attrs));
40 if (IS_ERR(c))
41 return PTR_ERR(c);
43 /* The allocated entry can be used only by a DEVX context */
44 if (!c->devx_uid)
45 return -EINVAL;
47 dev = to_mdev(c->ibucontext.device);
48 pp_entry = kzalloc(sizeof(*pp_entry), GFP_KERNEL);
49 if (!pp_entry)
50 return -ENOMEM;
52 in_ctx = uverbs_attr_get_alloced_ptr(attrs,
53 MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
54 inlen = uverbs_attr_get_len(attrs,
55 MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX);
56 memcpy(rl_raw, in_ctx, inlen);
57 err = uverbs_get_flags32(&flags, attrs,
58 MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
59 MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX);
60 if (err)
61 goto err;
63 uid = (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX) ?
64 c->devx_uid : MLX5_SHARED_RESOURCE_UID;
66 err = mlx5_rl_add_rate_raw(dev->mdev, rl_raw, uid,
67 (flags & MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX),
68 &pp_entry->index);
69 if (err)
70 goto err;
72 err = uverbs_copy_to(attrs, MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
73 &pp_entry->index, sizeof(pp_entry->index));
74 if (err)
75 goto clean;
77 pp_entry->mdev = dev->mdev;
78 uobj->object = pp_entry;
79 return 0;
81 clean:
82 mlx5_rl_remove_rate_raw(dev->mdev, pp_entry->index);
83 err:
84 kfree(pp_entry);
85 return err;
88 static int pp_obj_cleanup(struct ib_uobject *uobject,
89 enum rdma_remove_reason why,
90 struct uverbs_attr_bundle *attrs)
92 struct mlx5_ib_pp *pp_entry = uobject->object;
94 mlx5_rl_remove_rate_raw(pp_entry->mdev, pp_entry->index);
95 kfree(pp_entry);
96 return 0;
99 DECLARE_UVERBS_NAMED_METHOD(
100 MLX5_IB_METHOD_PP_OBJ_ALLOC,
101 UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_ALLOC_HANDLE,
102 MLX5_IB_OBJECT_PP,
103 UVERBS_ACCESS_NEW,
104 UA_MANDATORY),
105 UVERBS_ATTR_PTR_IN(
106 MLX5_IB_ATTR_PP_OBJ_ALLOC_CTX,
107 UVERBS_ATTR_SIZE(1,
108 MLX5_ST_SZ_BYTES(set_pp_rate_limit_context)),
109 UA_MANDATORY,
110 UA_ALLOC_AND_COPY),
111 UVERBS_ATTR_FLAGS_IN(MLX5_IB_ATTR_PP_OBJ_ALLOC_FLAGS,
112 enum mlx5_ib_uapi_pp_alloc_flags,
113 UA_MANDATORY),
114 UVERBS_ATTR_PTR_OUT(MLX5_IB_ATTR_PP_OBJ_ALLOC_INDEX,
115 UVERBS_ATTR_TYPE(u16),
116 UA_MANDATORY));
118 DECLARE_UVERBS_NAMED_METHOD_DESTROY(
119 MLX5_IB_METHOD_PP_OBJ_DESTROY,
120 UVERBS_ATTR_IDR(MLX5_IB_ATTR_PP_OBJ_DESTROY_HANDLE,
121 MLX5_IB_OBJECT_PP,
122 UVERBS_ACCESS_DESTROY,
123 UA_MANDATORY));
125 DECLARE_UVERBS_NAMED_OBJECT(MLX5_IB_OBJECT_PP,
126 UVERBS_TYPE_ALLOC_IDR(pp_obj_cleanup),
127 &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_ALLOC),
128 &UVERBS_METHOD(MLX5_IB_METHOD_PP_OBJ_DESTROY));
131 const struct uapi_definition mlx5_ib_qos_defs[] = {
132 UAPI_DEF_CHAIN_OBJ_TREE_NAMED(
133 MLX5_IB_OBJECT_PP,
134 UAPI_DEF_IS_OBJ_SUPPORTED(pp_is_supported)),