1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
6 #include <linux/kernel.h>
7 #include <linux/mlx5/driver.h>
8 #include <linux/mlx5/cmd.h>
12 static int get_pas_size(struct mlx5_srq_attr
*in
)
14 u32 log_page_size
= in
->log_page_size
+ 12;
15 u32 log_srq_size
= in
->log_size
;
16 u32 log_rq_stride
= in
->wqe_shift
;
17 u32 page_offset
= in
->page_offset
;
18 u32 po_quanta
= 1 << (log_page_size
- 6);
19 u32 rq_sz
= 1 << (log_srq_size
+ 4 + log_rq_stride
);
20 u32 page_size
= 1 << log_page_size
;
21 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
22 u32 rq_num_pas
= DIV_ROUND_UP(rq_sz_po
, page_size
);
24 return rq_num_pas
* sizeof(u64
);
27 static void set_wq(void *wq
, struct mlx5_srq_attr
*in
)
29 MLX5_SET(wq
, wq
, wq_signature
, !!(in
->flags
30 & MLX5_SRQ_FLAG_WQ_SIG
));
31 MLX5_SET(wq
, wq
, log_wq_pg_sz
, in
->log_page_size
);
32 MLX5_SET(wq
, wq
, log_wq_stride
, in
->wqe_shift
+ 4);
33 MLX5_SET(wq
, wq
, log_wq_sz
, in
->log_size
);
34 MLX5_SET(wq
, wq
, page_offset
, in
->page_offset
);
35 MLX5_SET(wq
, wq
, lwm
, in
->lwm
);
36 MLX5_SET(wq
, wq
, pd
, in
->pd
);
37 MLX5_SET64(wq
, wq
, dbr_addr
, in
->db_record
);
40 static void set_srqc(void *srqc
, struct mlx5_srq_attr
*in
)
42 MLX5_SET(srqc
, srqc
, wq_signature
, !!(in
->flags
43 & MLX5_SRQ_FLAG_WQ_SIG
));
44 MLX5_SET(srqc
, srqc
, log_page_size
, in
->log_page_size
);
45 MLX5_SET(srqc
, srqc
, log_rq_stride
, in
->wqe_shift
);
46 MLX5_SET(srqc
, srqc
, log_srq_size
, in
->log_size
);
47 MLX5_SET(srqc
, srqc
, page_offset
, in
->page_offset
);
48 MLX5_SET(srqc
, srqc
, lwm
, in
->lwm
);
49 MLX5_SET(srqc
, srqc
, pd
, in
->pd
);
50 MLX5_SET64(srqc
, srqc
, dbr_addr
, in
->db_record
);
51 MLX5_SET(srqc
, srqc
, xrcd
, in
->xrcd
);
52 MLX5_SET(srqc
, srqc
, cqn
, in
->cqn
);
55 static void get_wq(void *wq
, struct mlx5_srq_attr
*in
)
57 if (MLX5_GET(wq
, wq
, wq_signature
))
58 in
->flags
&= MLX5_SRQ_FLAG_WQ_SIG
;
59 in
->log_page_size
= MLX5_GET(wq
, wq
, log_wq_pg_sz
);
60 in
->wqe_shift
= MLX5_GET(wq
, wq
, log_wq_stride
) - 4;
61 in
->log_size
= MLX5_GET(wq
, wq
, log_wq_sz
);
62 in
->page_offset
= MLX5_GET(wq
, wq
, page_offset
);
63 in
->lwm
= MLX5_GET(wq
, wq
, lwm
);
64 in
->pd
= MLX5_GET(wq
, wq
, pd
);
65 in
->db_record
= MLX5_GET64(wq
, wq
, dbr_addr
);
68 static void get_srqc(void *srqc
, struct mlx5_srq_attr
*in
)
70 if (MLX5_GET(srqc
, srqc
, wq_signature
))
71 in
->flags
&= MLX5_SRQ_FLAG_WQ_SIG
;
72 in
->log_page_size
= MLX5_GET(srqc
, srqc
, log_page_size
);
73 in
->wqe_shift
= MLX5_GET(srqc
, srqc
, log_rq_stride
);
74 in
->log_size
= MLX5_GET(srqc
, srqc
, log_srq_size
);
75 in
->page_offset
= MLX5_GET(srqc
, srqc
, page_offset
);
76 in
->lwm
= MLX5_GET(srqc
, srqc
, lwm
);
77 in
->pd
= MLX5_GET(srqc
, srqc
, pd
);
78 in
->db_record
= MLX5_GET64(srqc
, srqc
, dbr_addr
);
81 struct mlx5_core_srq
*mlx5_cmd_get_srq(struct mlx5_ib_dev
*dev
, u32 srqn
)
83 struct mlx5_srq_table
*table
= &dev
->srq_table
;
84 struct mlx5_core_srq
*srq
;
86 xa_lock(&table
->array
);
87 srq
= xa_load(&table
->array
, srqn
);
89 refcount_inc(&srq
->common
.refcount
);
90 xa_unlock(&table
->array
);
95 static int create_srq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
96 struct mlx5_srq_attr
*in
)
98 u32 create_out
[MLX5_ST_SZ_DW(create_srq_out
)] = {0};
106 pas_size
= get_pas_size(in
);
107 inlen
= MLX5_ST_SZ_BYTES(create_srq_in
) + pas_size
;
108 create_in
= kvzalloc(inlen
, GFP_KERNEL
);
112 MLX5_SET(create_srq_in
, create_in
, uid
, in
->uid
);
113 srqc
= MLX5_ADDR_OF(create_srq_in
, create_in
, srq_context_entry
);
114 pas
= MLX5_ADDR_OF(create_srq_in
, create_in
, pas
);
117 memcpy(pas
, in
->pas
, pas_size
);
119 MLX5_SET(create_srq_in
, create_in
, opcode
,
120 MLX5_CMD_OP_CREATE_SRQ
);
122 err
= mlx5_cmd_exec(dev
->mdev
, create_in
, inlen
, create_out
,
126 srq
->srqn
= MLX5_GET(create_srq_out
, create_out
, srqn
);
133 static int destroy_srq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
)
135 u32 srq_in
[MLX5_ST_SZ_DW(destroy_srq_in
)] = {0};
136 u32 srq_out
[MLX5_ST_SZ_DW(destroy_srq_out
)] = {0};
138 MLX5_SET(destroy_srq_in
, srq_in
, opcode
,
139 MLX5_CMD_OP_DESTROY_SRQ
);
140 MLX5_SET(destroy_srq_in
, srq_in
, srqn
, srq
->srqn
);
141 MLX5_SET(destroy_srq_in
, srq_in
, uid
, srq
->uid
);
143 return mlx5_cmd_exec(dev
->mdev
, srq_in
, sizeof(srq_in
), srq_out
,
147 static int arm_srq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
150 u32 srq_in
[MLX5_ST_SZ_DW(arm_rq_in
)] = {0};
151 u32 srq_out
[MLX5_ST_SZ_DW(arm_rq_out
)] = {0};
153 MLX5_SET(arm_rq_in
, srq_in
, opcode
, MLX5_CMD_OP_ARM_RQ
);
154 MLX5_SET(arm_rq_in
, srq_in
, op_mod
, MLX5_ARM_RQ_IN_OP_MOD_SRQ
);
155 MLX5_SET(arm_rq_in
, srq_in
, srq_number
, srq
->srqn
);
156 MLX5_SET(arm_rq_in
, srq_in
, lwm
, lwm
);
157 MLX5_SET(arm_rq_in
, srq_in
, uid
, srq
->uid
);
159 return mlx5_cmd_exec(dev
->mdev
, srq_in
, sizeof(srq_in
), srq_out
,
163 static int query_srq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
164 struct mlx5_srq_attr
*out
)
166 u32 srq_in
[MLX5_ST_SZ_DW(query_srq_in
)] = {0};
171 srq_out
= kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out
), GFP_KERNEL
);
175 MLX5_SET(query_srq_in
, srq_in
, opcode
,
176 MLX5_CMD_OP_QUERY_SRQ
);
177 MLX5_SET(query_srq_in
, srq_in
, srqn
, srq
->srqn
);
178 err
= mlx5_cmd_exec(dev
->mdev
, srq_in
, sizeof(srq_in
), srq_out
,
179 MLX5_ST_SZ_BYTES(query_srq_out
));
183 srqc
= MLX5_ADDR_OF(query_srq_out
, srq_out
, srq_context_entry
);
185 if (MLX5_GET(srqc
, srqc
, state
) != MLX5_SRQC_STATE_GOOD
)
186 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
192 static int create_xrc_srq_cmd(struct mlx5_ib_dev
*dev
,
193 struct mlx5_core_srq
*srq
,
194 struct mlx5_srq_attr
*in
)
196 u32 create_out
[MLX5_ST_SZ_DW(create_xrc_srq_out
)];
204 pas_size
= get_pas_size(in
);
205 inlen
= MLX5_ST_SZ_BYTES(create_xrc_srq_in
) + pas_size
;
206 create_in
= kvzalloc(inlen
, GFP_KERNEL
);
210 MLX5_SET(create_xrc_srq_in
, create_in
, uid
, in
->uid
);
211 xrc_srqc
= MLX5_ADDR_OF(create_xrc_srq_in
, create_in
,
212 xrc_srq_context_entry
);
213 pas
= MLX5_ADDR_OF(create_xrc_srq_in
, create_in
, pas
);
215 set_srqc(xrc_srqc
, in
);
216 MLX5_SET(xrc_srqc
, xrc_srqc
, user_index
, in
->user_index
);
217 memcpy(pas
, in
->pas
, pas_size
);
218 MLX5_SET(create_xrc_srq_in
, create_in
, opcode
,
219 MLX5_CMD_OP_CREATE_XRC_SRQ
);
221 memset(create_out
, 0, sizeof(create_out
));
222 err
= mlx5_cmd_exec(dev
->mdev
, create_in
, inlen
, create_out
,
227 srq
->srqn
= MLX5_GET(create_xrc_srq_out
, create_out
, xrc_srqn
);
234 static int destroy_xrc_srq_cmd(struct mlx5_ib_dev
*dev
,
235 struct mlx5_core_srq
*srq
)
237 u32 xrcsrq_in
[MLX5_ST_SZ_DW(destroy_xrc_srq_in
)] = {0};
238 u32 xrcsrq_out
[MLX5_ST_SZ_DW(destroy_xrc_srq_out
)] = {0};
240 MLX5_SET(destroy_xrc_srq_in
, xrcsrq_in
, opcode
,
241 MLX5_CMD_OP_DESTROY_XRC_SRQ
);
242 MLX5_SET(destroy_xrc_srq_in
, xrcsrq_in
, xrc_srqn
, srq
->srqn
);
243 MLX5_SET(destroy_xrc_srq_in
, xrcsrq_in
, uid
, srq
->uid
);
245 return mlx5_cmd_exec(dev
->mdev
, xrcsrq_in
, sizeof(xrcsrq_in
),
246 xrcsrq_out
, sizeof(xrcsrq_out
));
249 static int arm_xrc_srq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
252 u32 xrcsrq_in
[MLX5_ST_SZ_DW(arm_xrc_srq_in
)] = {0};
253 u32 xrcsrq_out
[MLX5_ST_SZ_DW(arm_xrc_srq_out
)] = {0};
255 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, opcode
, MLX5_CMD_OP_ARM_XRC_SRQ
);
256 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, op_mod
, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ
);
257 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, xrc_srqn
, srq
->srqn
);
258 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, lwm
, lwm
);
259 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, uid
, srq
->uid
);
261 return mlx5_cmd_exec(dev
->mdev
, xrcsrq_in
, sizeof(xrcsrq_in
),
262 xrcsrq_out
, sizeof(xrcsrq_out
));
265 static int query_xrc_srq_cmd(struct mlx5_ib_dev
*dev
,
266 struct mlx5_core_srq
*srq
,
267 struct mlx5_srq_attr
*out
)
269 u32 xrcsrq_in
[MLX5_ST_SZ_DW(query_xrc_srq_in
)];
274 xrcsrq_out
= kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out
), GFP_KERNEL
);
277 memset(xrcsrq_in
, 0, sizeof(xrcsrq_in
));
279 MLX5_SET(query_xrc_srq_in
, xrcsrq_in
, opcode
,
280 MLX5_CMD_OP_QUERY_XRC_SRQ
);
281 MLX5_SET(query_xrc_srq_in
, xrcsrq_in
, xrc_srqn
, srq
->srqn
);
283 err
= mlx5_cmd_exec(dev
->mdev
, xrcsrq_in
, sizeof(xrcsrq_in
),
284 xrcsrq_out
, MLX5_ST_SZ_BYTES(query_xrc_srq_out
));
288 xrc_srqc
= MLX5_ADDR_OF(query_xrc_srq_out
, xrcsrq_out
,
289 xrc_srq_context_entry
);
290 get_srqc(xrc_srqc
, out
);
291 if (MLX5_GET(xrc_srqc
, xrc_srqc
, state
) != MLX5_XRC_SRQC_STATE_GOOD
)
292 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
299 static int create_rmp_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
300 struct mlx5_srq_attr
*in
)
302 void *create_out
= NULL
;
303 void *create_in
= NULL
;
311 pas_size
= get_pas_size(in
);
312 inlen
= MLX5_ST_SZ_BYTES(create_rmp_in
) + pas_size
;
313 outlen
= MLX5_ST_SZ_BYTES(create_rmp_out
);
314 create_in
= kvzalloc(inlen
, GFP_KERNEL
);
315 create_out
= kvzalloc(outlen
, GFP_KERNEL
);
316 if (!create_in
|| !create_out
) {
321 rmpc
= MLX5_ADDR_OF(create_rmp_in
, create_in
, ctx
);
322 wq
= MLX5_ADDR_OF(rmpc
, rmpc
, wq
);
324 MLX5_SET(rmpc
, rmpc
, state
, MLX5_RMPC_STATE_RDY
);
325 MLX5_SET(create_rmp_in
, create_in
, uid
, in
->uid
);
327 memcpy(MLX5_ADDR_OF(rmpc
, rmpc
, wq
.pas
), in
->pas
, pas_size
);
329 MLX5_SET(create_rmp_in
, create_in
, opcode
, MLX5_CMD_OP_CREATE_RMP
);
330 err
= mlx5_cmd_exec(dev
->mdev
, create_in
, inlen
, create_out
, outlen
);
332 srq
->srqn
= MLX5_GET(create_rmp_out
, create_out
, rmpn
);
342 static int destroy_rmp_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
)
344 u32 in
[MLX5_ST_SZ_DW(destroy_rmp_in
)] = {};
345 u32 out
[MLX5_ST_SZ_DW(destroy_rmp_out
)] = {};
347 MLX5_SET(destroy_rmp_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_RMP
);
348 MLX5_SET(destroy_rmp_in
, in
, rmpn
, srq
->srqn
);
349 MLX5_SET(destroy_rmp_in
, in
, uid
, srq
->uid
);
350 return mlx5_cmd_exec(dev
->mdev
, in
, sizeof(in
), out
, sizeof(out
));
353 static int arm_rmp_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
365 inlen
= MLX5_ST_SZ_BYTES(modify_rmp_in
);
366 outlen
= MLX5_ST_SZ_BYTES(modify_rmp_out
);
368 in
= kvzalloc(inlen
, GFP_KERNEL
);
369 out
= kvzalloc(outlen
, GFP_KERNEL
);
375 rmpc
= MLX5_ADDR_OF(modify_rmp_in
, in
, ctx
);
376 bitmask
= MLX5_ADDR_OF(modify_rmp_in
, in
, bitmask
);
377 wq
= MLX5_ADDR_OF(rmpc
, rmpc
, wq
);
379 MLX5_SET(modify_rmp_in
, in
, rmp_state
, MLX5_RMPC_STATE_RDY
);
380 MLX5_SET(modify_rmp_in
, in
, rmpn
, srq
->srqn
);
381 MLX5_SET(modify_rmp_in
, in
, uid
, srq
->uid
);
382 MLX5_SET(wq
, wq
, lwm
, lwm
);
383 MLX5_SET(rmp_bitmask
, bitmask
, lwm
, 1);
384 MLX5_SET(rmpc
, rmpc
, state
, MLX5_RMPC_STATE_RDY
);
385 MLX5_SET(modify_rmp_in
, in
, opcode
, MLX5_CMD_OP_MODIFY_RMP
);
387 err
= mlx5_cmd_exec(dev
->mdev
, in
, inlen
, out
, outlen
);
395 static int query_rmp_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
396 struct mlx5_srq_attr
*out
)
405 outlen
= MLX5_ST_SZ_BYTES(query_rmp_out
);
406 inlen
= MLX5_ST_SZ_BYTES(query_rmp_in
);
408 rmp_out
= kvzalloc(outlen
, GFP_KERNEL
);
409 rmp_in
= kvzalloc(inlen
, GFP_KERNEL
);
410 if (!rmp_out
|| !rmp_in
) {
415 MLX5_SET(query_rmp_in
, rmp_in
, opcode
, MLX5_CMD_OP_QUERY_RMP
);
416 MLX5_SET(query_rmp_in
, rmp_in
, rmpn
, srq
->srqn
);
417 err
= mlx5_cmd_exec(dev
->mdev
, rmp_in
, inlen
, rmp_out
, outlen
);
421 rmpc
= MLX5_ADDR_OF(query_rmp_out
, rmp_out
, rmp_context
);
422 get_wq(MLX5_ADDR_OF(rmpc
, rmpc
, wq
), out
);
423 if (MLX5_GET(rmpc
, rmpc
, state
) != MLX5_RMPC_STATE_RDY
)
424 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
432 static int create_xrq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
433 struct mlx5_srq_attr
*in
)
435 u32 create_out
[MLX5_ST_SZ_DW(create_xrq_out
)] = {0};
443 pas_size
= get_pas_size(in
);
444 inlen
= MLX5_ST_SZ_BYTES(create_xrq_in
) + pas_size
;
445 create_in
= kvzalloc(inlen
, GFP_KERNEL
);
449 xrqc
= MLX5_ADDR_OF(create_xrq_in
, create_in
, xrq_context
);
450 wq
= MLX5_ADDR_OF(xrqc
, xrqc
, wq
);
453 memcpy(MLX5_ADDR_OF(xrqc
, xrqc
, wq
.pas
), in
->pas
, pas_size
);
455 if (in
->type
== IB_SRQT_TM
) {
456 MLX5_SET(xrqc
, xrqc
, topology
, MLX5_XRQC_TOPOLOGY_TAG_MATCHING
);
457 if (in
->flags
& MLX5_SRQ_FLAG_RNDV
)
458 MLX5_SET(xrqc
, xrqc
, offload
, MLX5_XRQC_OFFLOAD_RNDV
);
460 tag_matching_topology_context
.log_matching_list_sz
,
461 in
->tm_log_list_size
);
463 MLX5_SET(xrqc
, xrqc
, user_index
, in
->user_index
);
464 MLX5_SET(xrqc
, xrqc
, cqn
, in
->cqn
);
465 MLX5_SET(create_xrq_in
, create_in
, opcode
, MLX5_CMD_OP_CREATE_XRQ
);
466 MLX5_SET(create_xrq_in
, create_in
, uid
, in
->uid
);
467 err
= mlx5_cmd_exec(dev
->mdev
, create_in
, inlen
, create_out
,
471 srq
->srqn
= MLX5_GET(create_xrq_out
, create_out
, xrqn
);
478 static int destroy_xrq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
)
480 u32 in
[MLX5_ST_SZ_DW(destroy_xrq_in
)] = {0};
481 u32 out
[MLX5_ST_SZ_DW(destroy_xrq_out
)] = {0};
483 MLX5_SET(destroy_xrq_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_XRQ
);
484 MLX5_SET(destroy_xrq_in
, in
, xrqn
, srq
->srqn
);
485 MLX5_SET(destroy_xrq_in
, in
, uid
, srq
->uid
);
487 return mlx5_cmd_exec(dev
->mdev
, in
, sizeof(in
), out
, sizeof(out
));
490 static int arm_xrq_cmd(struct mlx5_ib_dev
*dev
,
491 struct mlx5_core_srq
*srq
,
494 u32 out
[MLX5_ST_SZ_DW(arm_rq_out
)] = {0};
495 u32 in
[MLX5_ST_SZ_DW(arm_rq_in
)] = {0};
497 MLX5_SET(arm_rq_in
, in
, opcode
, MLX5_CMD_OP_ARM_RQ
);
498 MLX5_SET(arm_rq_in
, in
, op_mod
, MLX5_ARM_RQ_IN_OP_MOD_XRQ
);
499 MLX5_SET(arm_rq_in
, in
, srq_number
, srq
->srqn
);
500 MLX5_SET(arm_rq_in
, in
, lwm
, lwm
);
501 MLX5_SET(arm_rq_in
, in
, uid
, srq
->uid
);
503 return mlx5_cmd_exec(dev
->mdev
, in
, sizeof(in
), out
, sizeof(out
));
506 static int query_xrq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
507 struct mlx5_srq_attr
*out
)
509 u32 in
[MLX5_ST_SZ_DW(query_xrq_in
)] = {0};
511 int outlen
= MLX5_ST_SZ_BYTES(query_xrq_out
);
515 xrq_out
= kvzalloc(outlen
, GFP_KERNEL
);
519 MLX5_SET(query_xrq_in
, in
, opcode
, MLX5_CMD_OP_QUERY_XRQ
);
520 MLX5_SET(query_xrq_in
, in
, xrqn
, srq
->srqn
);
522 err
= mlx5_cmd_exec(dev
->mdev
, in
, sizeof(in
), xrq_out
, outlen
);
526 xrqc
= MLX5_ADDR_OF(query_xrq_out
, xrq_out
, xrq_context
);
527 get_wq(MLX5_ADDR_OF(xrqc
, xrqc
, wq
), out
);
528 if (MLX5_GET(xrqc
, xrqc
, state
) != MLX5_XRQC_STATE_GOOD
)
529 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
532 tag_matching_topology_context
.append_next_index
);
533 out
->tm_hw_phase_cnt
=
535 tag_matching_topology_context
.hw_phase_cnt
);
536 out
->tm_sw_phase_cnt
=
538 tag_matching_topology_context
.sw_phase_cnt
);
545 static int create_srq_split(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
546 struct mlx5_srq_attr
*in
)
548 if (!dev
->mdev
->issi
)
549 return create_srq_cmd(dev
, srq
, in
);
550 switch (srq
->common
.res
) {
552 return create_xrc_srq_cmd(dev
, srq
, in
);
554 return create_xrq_cmd(dev
, srq
, in
);
556 return create_rmp_cmd(dev
, srq
, in
);
560 static int destroy_srq_split(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
)
562 if (!dev
->mdev
->issi
)
563 return destroy_srq_cmd(dev
, srq
);
564 switch (srq
->common
.res
) {
566 return destroy_xrc_srq_cmd(dev
, srq
);
568 return destroy_xrq_cmd(dev
, srq
);
570 return destroy_rmp_cmd(dev
, srq
);
574 int mlx5_cmd_create_srq(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
575 struct mlx5_srq_attr
*in
)
577 struct mlx5_srq_table
*table
= &dev
->srq_table
;
582 srq
->common
.res
= MLX5_RES_XSRQ
;
585 srq
->common
.res
= MLX5_RES_XRQ
;
588 srq
->common
.res
= MLX5_RES_SRQ
;
591 err
= create_srq_split(dev
, srq
, in
);
595 refcount_set(&srq
->common
.refcount
, 1);
596 init_completion(&srq
->common
.free
);
598 err
= xa_err(xa_store_irq(&table
->array
, srq
->srqn
, srq
, GFP_KERNEL
));
600 goto err_destroy_srq_split
;
604 err_destroy_srq_split
:
605 destroy_srq_split(dev
, srq
);
610 void mlx5_cmd_destroy_srq(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
)
612 struct mlx5_srq_table
*table
= &dev
->srq_table
;
613 struct mlx5_core_srq
*tmp
;
616 tmp
= xa_erase_irq(&table
->array
, srq
->srqn
);
617 if (!tmp
|| tmp
!= srq
)
620 err
= destroy_srq_split(dev
, srq
);
624 mlx5_core_res_put(&srq
->common
);
625 wait_for_completion(&srq
->common
.free
);
628 int mlx5_cmd_query_srq(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
629 struct mlx5_srq_attr
*out
)
631 if (!dev
->mdev
->issi
)
632 return query_srq_cmd(dev
, srq
, out
);
633 switch (srq
->common
.res
) {
635 return query_xrc_srq_cmd(dev
, srq
, out
);
637 return query_xrq_cmd(dev
, srq
, out
);
639 return query_rmp_cmd(dev
, srq
, out
);
643 int mlx5_cmd_arm_srq(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
646 if (!dev
->mdev
->issi
)
647 return arm_srq_cmd(dev
, srq
, lwm
, is_srq
);
648 switch (srq
->common
.res
) {
650 return arm_xrc_srq_cmd(dev
, srq
, lwm
);
652 return arm_xrq_cmd(dev
, srq
, lwm
);
654 return arm_rmp_cmd(dev
, srq
, lwm
);
658 static int srq_event_notifier(struct notifier_block
*nb
,
659 unsigned long type
, void *data
)
661 struct mlx5_srq_table
*table
;
662 struct mlx5_core_srq
*srq
;
663 struct mlx5_eqe
*eqe
;
666 if (type
!= MLX5_EVENT_TYPE_SRQ_CATAS_ERROR
&&
667 type
!= MLX5_EVENT_TYPE_SRQ_RQ_LIMIT
)
670 table
= container_of(nb
, struct mlx5_srq_table
, nb
);
673 srqn
= be32_to_cpu(eqe
->data
.qp_srq
.qp_srq_n
) & 0xffffff;
675 xa_lock(&table
->array
);
676 srq
= xa_load(&table
->array
, srqn
);
678 refcount_inc(&srq
->common
.refcount
);
679 xa_unlock(&table
->array
);
684 srq
->event(srq
, eqe
->type
);
686 mlx5_core_res_put(&srq
->common
);
691 int mlx5_init_srq_table(struct mlx5_ib_dev
*dev
)
693 struct mlx5_srq_table
*table
= &dev
->srq_table
;
695 memset(table
, 0, sizeof(*table
));
696 xa_init_flags(&table
->array
, XA_FLAGS_LOCK_IRQ
);
698 table
->nb
.notifier_call
= srq_event_notifier
;
699 mlx5_notifier_register(dev
->mdev
, &table
->nb
);
704 void mlx5_cleanup_srq_table(struct mlx5_ib_dev
*dev
)
706 struct mlx5_srq_table
*table
= &dev
->srq_table
;
708 mlx5_notifier_unregister(dev
->mdev
, &table
->nb
);