1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2013-2018, Mellanox Technologies inc. All rights reserved.
6 #include <linux/kernel.h>
7 #include <linux/mlx5/driver.h>
12 static int get_pas_size(struct mlx5_srq_attr
*in
)
14 u32 log_page_size
= in
->log_page_size
+ 12;
15 u32 log_srq_size
= in
->log_size
;
16 u32 log_rq_stride
= in
->wqe_shift
;
17 u32 page_offset
= in
->page_offset
;
18 u32 po_quanta
= 1 << (log_page_size
- 6);
19 u32 rq_sz
= 1 << (log_srq_size
+ 4 + log_rq_stride
);
20 u32 page_size
= 1 << log_page_size
;
21 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
22 u32 rq_num_pas
= DIV_ROUND_UP(rq_sz_po
, page_size
);
24 return rq_num_pas
* sizeof(u64
);
27 static void set_wq(void *wq
, struct mlx5_srq_attr
*in
)
29 MLX5_SET(wq
, wq
, wq_signature
, !!(in
->flags
30 & MLX5_SRQ_FLAG_WQ_SIG
));
31 MLX5_SET(wq
, wq
, log_wq_pg_sz
, in
->log_page_size
);
32 MLX5_SET(wq
, wq
, log_wq_stride
, in
->wqe_shift
+ 4);
33 MLX5_SET(wq
, wq
, log_wq_sz
, in
->log_size
);
34 MLX5_SET(wq
, wq
, page_offset
, in
->page_offset
);
35 MLX5_SET(wq
, wq
, lwm
, in
->lwm
);
36 MLX5_SET(wq
, wq
, pd
, in
->pd
);
37 MLX5_SET64(wq
, wq
, dbr_addr
, in
->db_record
);
40 static void set_srqc(void *srqc
, struct mlx5_srq_attr
*in
)
42 MLX5_SET(srqc
, srqc
, wq_signature
, !!(in
->flags
43 & MLX5_SRQ_FLAG_WQ_SIG
));
44 MLX5_SET(srqc
, srqc
, log_page_size
, in
->log_page_size
);
45 MLX5_SET(srqc
, srqc
, log_rq_stride
, in
->wqe_shift
);
46 MLX5_SET(srqc
, srqc
, log_srq_size
, in
->log_size
);
47 MLX5_SET(srqc
, srqc
, page_offset
, in
->page_offset
);
48 MLX5_SET(srqc
, srqc
, lwm
, in
->lwm
);
49 MLX5_SET(srqc
, srqc
, pd
, in
->pd
);
50 MLX5_SET64(srqc
, srqc
, dbr_addr
, in
->db_record
);
51 MLX5_SET(srqc
, srqc
, xrcd
, in
->xrcd
);
52 MLX5_SET(srqc
, srqc
, cqn
, in
->cqn
);
55 static void get_wq(void *wq
, struct mlx5_srq_attr
*in
)
57 if (MLX5_GET(wq
, wq
, wq_signature
))
58 in
->flags
&= MLX5_SRQ_FLAG_WQ_SIG
;
59 in
->log_page_size
= MLX5_GET(wq
, wq
, log_wq_pg_sz
);
60 in
->wqe_shift
= MLX5_GET(wq
, wq
, log_wq_stride
) - 4;
61 in
->log_size
= MLX5_GET(wq
, wq
, log_wq_sz
);
62 in
->page_offset
= MLX5_GET(wq
, wq
, page_offset
);
63 in
->lwm
= MLX5_GET(wq
, wq
, lwm
);
64 in
->pd
= MLX5_GET(wq
, wq
, pd
);
65 in
->db_record
= MLX5_GET64(wq
, wq
, dbr_addr
);
68 static void get_srqc(void *srqc
, struct mlx5_srq_attr
*in
)
70 if (MLX5_GET(srqc
, srqc
, wq_signature
))
71 in
->flags
&= MLX5_SRQ_FLAG_WQ_SIG
;
72 in
->log_page_size
= MLX5_GET(srqc
, srqc
, log_page_size
);
73 in
->wqe_shift
= MLX5_GET(srqc
, srqc
, log_rq_stride
);
74 in
->log_size
= MLX5_GET(srqc
, srqc
, log_srq_size
);
75 in
->page_offset
= MLX5_GET(srqc
, srqc
, page_offset
);
76 in
->lwm
= MLX5_GET(srqc
, srqc
, lwm
);
77 in
->pd
= MLX5_GET(srqc
, srqc
, pd
);
78 in
->db_record
= MLX5_GET64(srqc
, srqc
, dbr_addr
);
81 struct mlx5_core_srq
*mlx5_cmd_get_srq(struct mlx5_ib_dev
*dev
, u32 srqn
)
83 struct mlx5_srq_table
*table
= &dev
->srq_table
;
84 struct mlx5_core_srq
*srq
;
86 xa_lock_irq(&table
->array
);
87 srq
= xa_load(&table
->array
, srqn
);
89 refcount_inc(&srq
->common
.refcount
);
90 xa_unlock_irq(&table
->array
);
95 static int __set_srq_page_size(struct mlx5_srq_attr
*in
,
96 unsigned long page_size
)
100 in
->log_page_size
= order_base_2(page_size
) - MLX5_ADAPTER_PAGE_SHIFT
;
102 if (WARN_ON(get_pas_size(in
) !=
103 ib_umem_num_dma_blocks(in
->umem
, page_size
) * sizeof(u64
)))
108 #define set_srq_page_size(in, typ, log_pgsz_fld) \
109 __set_srq_page_size(in, mlx5_umem_find_best_quantized_pgoff( \
110 (in)->umem, typ, log_pgsz_fld, \
111 MLX5_ADAPTER_PAGE_SHIFT, page_offset, \
112 64, &(in)->page_offset))
114 static int create_srq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
115 struct mlx5_srq_attr
*in
)
117 u32 create_out
[MLX5_ST_SZ_DW(create_srq_out
)] = {0};
126 err
= set_srq_page_size(in
, srqc
, log_page_size
);
131 pas_size
= get_pas_size(in
);
132 inlen
= MLX5_ST_SZ_BYTES(create_srq_in
) + pas_size
;
133 create_in
= kvzalloc(inlen
, GFP_KERNEL
);
137 MLX5_SET(create_srq_in
, create_in
, uid
, in
->uid
);
138 srqc
= MLX5_ADDR_OF(create_srq_in
, create_in
, srq_context_entry
);
139 pas
= MLX5_ADDR_OF(create_srq_in
, create_in
, pas
);
143 mlx5_ib_populate_pas(
145 1UL << (in
->log_page_size
+ MLX5_ADAPTER_PAGE_SHIFT
),
148 memcpy(pas
, in
->pas
, pas_size
);
150 MLX5_SET(create_srq_in
, create_in
, opcode
,
151 MLX5_CMD_OP_CREATE_SRQ
);
153 err
= mlx5_cmd_exec(dev
->mdev
, create_in
, inlen
, create_out
,
157 srq
->srqn
= MLX5_GET(create_srq_out
, create_out
, srqn
);
164 static int destroy_srq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
)
166 u32 in
[MLX5_ST_SZ_DW(destroy_srq_in
)] = {};
168 MLX5_SET(destroy_srq_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_SRQ
);
169 MLX5_SET(destroy_srq_in
, in
, srqn
, srq
->srqn
);
170 MLX5_SET(destroy_srq_in
, in
, uid
, srq
->uid
);
172 return mlx5_cmd_exec_in(dev
->mdev
, destroy_srq
, in
);
175 static int arm_srq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
178 u32 in
[MLX5_ST_SZ_DW(arm_rq_in
)] = {};
180 MLX5_SET(arm_rq_in
, in
, opcode
, MLX5_CMD_OP_ARM_RQ
);
181 MLX5_SET(arm_rq_in
, in
, op_mod
, MLX5_ARM_RQ_IN_OP_MOD_SRQ
);
182 MLX5_SET(arm_rq_in
, in
, srq_number
, srq
->srqn
);
183 MLX5_SET(arm_rq_in
, in
, lwm
, lwm
);
184 MLX5_SET(arm_rq_in
, in
, uid
, srq
->uid
);
186 return mlx5_cmd_exec_in(dev
->mdev
, arm_rq
, in
);
189 static int query_srq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
190 struct mlx5_srq_attr
*out
)
192 u32 in
[MLX5_ST_SZ_DW(query_srq_in
)] = {};
197 srq_out
= kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out
), GFP_KERNEL
);
201 MLX5_SET(query_srq_in
, in
, opcode
, MLX5_CMD_OP_QUERY_SRQ
);
202 MLX5_SET(query_srq_in
, in
, srqn
, srq
->srqn
);
203 err
= mlx5_cmd_exec_inout(dev
->mdev
, query_srq
, in
, srq_out
);
207 srqc
= MLX5_ADDR_OF(query_srq_out
, srq_out
, srq_context_entry
);
209 if (MLX5_GET(srqc
, srqc
, state
) != MLX5_SRQC_STATE_GOOD
)
210 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
216 static int create_xrc_srq_cmd(struct mlx5_ib_dev
*dev
,
217 struct mlx5_core_srq
*srq
,
218 struct mlx5_srq_attr
*in
)
220 u32 create_out
[MLX5_ST_SZ_DW(create_xrc_srq_out
)];
229 err
= set_srq_page_size(in
, xrc_srqc
, log_page_size
);
234 pas_size
= get_pas_size(in
);
235 inlen
= MLX5_ST_SZ_BYTES(create_xrc_srq_in
) + pas_size
;
236 create_in
= kvzalloc(inlen
, GFP_KERNEL
);
240 MLX5_SET(create_xrc_srq_in
, create_in
, uid
, in
->uid
);
241 xrc_srqc
= MLX5_ADDR_OF(create_xrc_srq_in
, create_in
,
242 xrc_srq_context_entry
);
243 pas
= MLX5_ADDR_OF(create_xrc_srq_in
, create_in
, pas
);
245 set_srqc(xrc_srqc
, in
);
246 MLX5_SET(xrc_srqc
, xrc_srqc
, user_index
, in
->user_index
);
248 mlx5_ib_populate_pas(
250 1UL << (in
->log_page_size
+ MLX5_ADAPTER_PAGE_SHIFT
),
253 memcpy(pas
, in
->pas
, pas_size
);
254 MLX5_SET(create_xrc_srq_in
, create_in
, opcode
,
255 MLX5_CMD_OP_CREATE_XRC_SRQ
);
257 memset(create_out
, 0, sizeof(create_out
));
258 err
= mlx5_cmd_exec(dev
->mdev
, create_in
, inlen
, create_out
,
263 srq
->srqn
= MLX5_GET(create_xrc_srq_out
, create_out
, xrc_srqn
);
270 static int destroy_xrc_srq_cmd(struct mlx5_ib_dev
*dev
,
271 struct mlx5_core_srq
*srq
)
273 u32 in
[MLX5_ST_SZ_DW(destroy_xrc_srq_in
)] = {};
275 MLX5_SET(destroy_xrc_srq_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_XRC_SRQ
);
276 MLX5_SET(destroy_xrc_srq_in
, in
, xrc_srqn
, srq
->srqn
);
277 MLX5_SET(destroy_xrc_srq_in
, in
, uid
, srq
->uid
);
279 return mlx5_cmd_exec_in(dev
->mdev
, destroy_xrc_srq
, in
);
282 static int arm_xrc_srq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
285 u32 in
[MLX5_ST_SZ_DW(arm_xrc_srq_in
)] = {};
287 MLX5_SET(arm_xrc_srq_in
, in
, opcode
, MLX5_CMD_OP_ARM_XRC_SRQ
);
288 MLX5_SET(arm_xrc_srq_in
, in
, op_mod
,
289 MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ
);
290 MLX5_SET(arm_xrc_srq_in
, in
, xrc_srqn
, srq
->srqn
);
291 MLX5_SET(arm_xrc_srq_in
, in
, lwm
, lwm
);
292 MLX5_SET(arm_xrc_srq_in
, in
, uid
, srq
->uid
);
294 return mlx5_cmd_exec_in(dev
->mdev
, arm_xrc_srq
, in
);
297 static int query_xrc_srq_cmd(struct mlx5_ib_dev
*dev
,
298 struct mlx5_core_srq
*srq
,
299 struct mlx5_srq_attr
*out
)
301 u32 in
[MLX5_ST_SZ_DW(query_xrc_srq_in
)] = {};
306 xrcsrq_out
= kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out
), GFP_KERNEL
);
310 MLX5_SET(query_xrc_srq_in
, in
, opcode
, MLX5_CMD_OP_QUERY_XRC_SRQ
);
311 MLX5_SET(query_xrc_srq_in
, in
, xrc_srqn
, srq
->srqn
);
313 err
= mlx5_cmd_exec_inout(dev
->mdev
, query_xrc_srq
, in
, xrcsrq_out
);
317 xrc_srqc
= MLX5_ADDR_OF(query_xrc_srq_out
, xrcsrq_out
,
318 xrc_srq_context_entry
);
319 get_srqc(xrc_srqc
, out
);
320 if (MLX5_GET(xrc_srqc
, xrc_srqc
, state
) != MLX5_XRC_SRQC_STATE_GOOD
)
321 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
328 static int create_rmp_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
329 struct mlx5_srq_attr
*in
)
331 void *create_out
= NULL
;
332 void *create_in
= NULL
;
342 err
= set_srq_page_size(in
, wq
, log_wq_pg_sz
);
347 pas_size
= get_pas_size(in
);
348 inlen
= MLX5_ST_SZ_BYTES(create_rmp_in
) + pas_size
;
349 outlen
= MLX5_ST_SZ_BYTES(create_rmp_out
);
350 create_in
= kvzalloc(inlen
, GFP_KERNEL
);
351 create_out
= kvzalloc(outlen
, GFP_KERNEL
);
352 if (!create_in
|| !create_out
) {
357 rmpc
= MLX5_ADDR_OF(create_rmp_in
, create_in
, ctx
);
358 wq
= MLX5_ADDR_OF(rmpc
, rmpc
, wq
);
360 MLX5_SET(rmpc
, rmpc
, state
, MLX5_RMPC_STATE_RDY
);
361 MLX5_SET(create_rmp_in
, create_in
, uid
, in
->uid
);
362 pas
= MLX5_ADDR_OF(rmpc
, rmpc
, wq
.pas
);
366 mlx5_ib_populate_pas(
368 1UL << (in
->log_page_size
+ MLX5_ADAPTER_PAGE_SHIFT
),
371 memcpy(pas
, in
->pas
, pas_size
);
373 MLX5_SET(create_rmp_in
, create_in
, opcode
, MLX5_CMD_OP_CREATE_RMP
);
374 err
= mlx5_cmd_exec(dev
->mdev
, create_in
, inlen
, create_out
, outlen
);
376 srq
->srqn
= MLX5_GET(create_rmp_out
, create_out
, rmpn
);
386 static int destroy_rmp_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
)
388 u32 in
[MLX5_ST_SZ_DW(destroy_rmp_in
)] = {};
390 MLX5_SET(destroy_rmp_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_RMP
);
391 MLX5_SET(destroy_rmp_in
, in
, rmpn
, srq
->srqn
);
392 MLX5_SET(destroy_rmp_in
, in
, uid
, srq
->uid
);
393 return mlx5_cmd_exec_in(dev
->mdev
, destroy_rmp
, in
);
396 static int arm_rmp_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
408 inlen
= MLX5_ST_SZ_BYTES(modify_rmp_in
);
409 outlen
= MLX5_ST_SZ_BYTES(modify_rmp_out
);
411 in
= kvzalloc(inlen
, GFP_KERNEL
);
412 out
= kvzalloc(outlen
, GFP_KERNEL
);
418 rmpc
= MLX5_ADDR_OF(modify_rmp_in
, in
, ctx
);
419 bitmask
= MLX5_ADDR_OF(modify_rmp_in
, in
, bitmask
);
420 wq
= MLX5_ADDR_OF(rmpc
, rmpc
, wq
);
422 MLX5_SET(modify_rmp_in
, in
, rmp_state
, MLX5_RMPC_STATE_RDY
);
423 MLX5_SET(modify_rmp_in
, in
, rmpn
, srq
->srqn
);
424 MLX5_SET(modify_rmp_in
, in
, uid
, srq
->uid
);
425 MLX5_SET(wq
, wq
, lwm
, lwm
);
426 MLX5_SET(rmp_bitmask
, bitmask
, lwm
, 1);
427 MLX5_SET(rmpc
, rmpc
, state
, MLX5_RMPC_STATE_RDY
);
428 MLX5_SET(modify_rmp_in
, in
, opcode
, MLX5_CMD_OP_MODIFY_RMP
);
430 err
= mlx5_cmd_exec_inout(dev
->mdev
, modify_rmp
, in
, out
);
438 static int query_rmp_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
439 struct mlx5_srq_attr
*out
)
448 outlen
= MLX5_ST_SZ_BYTES(query_rmp_out
);
449 inlen
= MLX5_ST_SZ_BYTES(query_rmp_in
);
451 rmp_out
= kvzalloc(outlen
, GFP_KERNEL
);
452 rmp_in
= kvzalloc(inlen
, GFP_KERNEL
);
453 if (!rmp_out
|| !rmp_in
) {
458 MLX5_SET(query_rmp_in
, rmp_in
, opcode
, MLX5_CMD_OP_QUERY_RMP
);
459 MLX5_SET(query_rmp_in
, rmp_in
, rmpn
, srq
->srqn
);
460 err
= mlx5_cmd_exec_inout(dev
->mdev
, query_rmp
, rmp_in
, rmp_out
);
464 rmpc
= MLX5_ADDR_OF(query_rmp_out
, rmp_out
, rmp_context
);
465 get_wq(MLX5_ADDR_OF(rmpc
, rmpc
, wq
), out
);
466 if (MLX5_GET(rmpc
, rmpc
, state
) != MLX5_RMPC_STATE_RDY
)
467 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
475 static int create_xrq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
476 struct mlx5_srq_attr
*in
)
478 u32 create_out
[MLX5_ST_SZ_DW(create_xrq_out
)] = {0};
488 err
= set_srq_page_size(in
, wq
, log_wq_pg_sz
);
493 pas_size
= get_pas_size(in
);
494 inlen
= MLX5_ST_SZ_BYTES(create_xrq_in
) + pas_size
;
495 create_in
= kvzalloc(inlen
, GFP_KERNEL
);
499 xrqc
= MLX5_ADDR_OF(create_xrq_in
, create_in
, xrq_context
);
500 wq
= MLX5_ADDR_OF(xrqc
, xrqc
, wq
);
501 pas
= MLX5_ADDR_OF(xrqc
, xrqc
, wq
.pas
);
505 mlx5_ib_populate_pas(
507 1UL << (in
->log_page_size
+ MLX5_ADAPTER_PAGE_SHIFT
),
510 memcpy(pas
, in
->pas
, pas_size
);
512 if (in
->type
== IB_SRQT_TM
) {
513 MLX5_SET(xrqc
, xrqc
, topology
, MLX5_XRQC_TOPOLOGY_TAG_MATCHING
);
514 if (in
->flags
& MLX5_SRQ_FLAG_RNDV
)
515 MLX5_SET(xrqc
, xrqc
, offload
, MLX5_XRQC_OFFLOAD_RNDV
);
517 tag_matching_topology_context
.log_matching_list_sz
,
518 in
->tm_log_list_size
);
520 MLX5_SET(xrqc
, xrqc
, user_index
, in
->user_index
);
521 MLX5_SET(xrqc
, xrqc
, cqn
, in
->cqn
);
522 MLX5_SET(create_xrq_in
, create_in
, opcode
, MLX5_CMD_OP_CREATE_XRQ
);
523 MLX5_SET(create_xrq_in
, create_in
, uid
, in
->uid
);
524 err
= mlx5_cmd_exec(dev
->mdev
, create_in
, inlen
, create_out
,
528 srq
->srqn
= MLX5_GET(create_xrq_out
, create_out
, xrqn
);
535 static int destroy_xrq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
)
537 u32 in
[MLX5_ST_SZ_DW(destroy_xrq_in
)] = {};
539 MLX5_SET(destroy_xrq_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_XRQ
);
540 MLX5_SET(destroy_xrq_in
, in
, xrqn
, srq
->srqn
);
541 MLX5_SET(destroy_xrq_in
, in
, uid
, srq
->uid
);
543 return mlx5_cmd_exec_in(dev
->mdev
, destroy_xrq
, in
);
546 static int arm_xrq_cmd(struct mlx5_ib_dev
*dev
,
547 struct mlx5_core_srq
*srq
,
550 u32 in
[MLX5_ST_SZ_DW(arm_rq_in
)] = {};
552 MLX5_SET(arm_rq_in
, in
, opcode
, MLX5_CMD_OP_ARM_RQ
);
553 MLX5_SET(arm_rq_in
, in
, op_mod
, MLX5_ARM_RQ_IN_OP_MOD_XRQ
);
554 MLX5_SET(arm_rq_in
, in
, srq_number
, srq
->srqn
);
555 MLX5_SET(arm_rq_in
, in
, lwm
, lwm
);
556 MLX5_SET(arm_rq_in
, in
, uid
, srq
->uid
);
558 return mlx5_cmd_exec_in(dev
->mdev
, arm_rq
, in
);
561 static int query_xrq_cmd(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
562 struct mlx5_srq_attr
*out
)
564 u32 in
[MLX5_ST_SZ_DW(query_xrq_in
)] = {};
566 int outlen
= MLX5_ST_SZ_BYTES(query_xrq_out
);
570 xrq_out
= kvzalloc(outlen
, GFP_KERNEL
);
574 MLX5_SET(query_xrq_in
, in
, opcode
, MLX5_CMD_OP_QUERY_XRQ
);
575 MLX5_SET(query_xrq_in
, in
, xrqn
, srq
->srqn
);
577 err
= mlx5_cmd_exec_inout(dev
->mdev
, query_xrq
, in
, xrq_out
);
581 xrqc
= MLX5_ADDR_OF(query_xrq_out
, xrq_out
, xrq_context
);
582 get_wq(MLX5_ADDR_OF(xrqc
, xrqc
, wq
), out
);
583 if (MLX5_GET(xrqc
, xrqc
, state
) != MLX5_XRQC_STATE_GOOD
)
584 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
587 tag_matching_topology_context
.append_next_index
);
588 out
->tm_hw_phase_cnt
=
590 tag_matching_topology_context
.hw_phase_cnt
);
591 out
->tm_sw_phase_cnt
=
593 tag_matching_topology_context
.sw_phase_cnt
);
600 static int create_srq_split(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
601 struct mlx5_srq_attr
*in
)
603 if (!dev
->mdev
->issi
)
604 return create_srq_cmd(dev
, srq
, in
);
605 switch (srq
->common
.res
) {
607 return create_xrc_srq_cmd(dev
, srq
, in
);
609 return create_xrq_cmd(dev
, srq
, in
);
611 return create_rmp_cmd(dev
, srq
, in
);
615 static int destroy_srq_split(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
)
617 if (!dev
->mdev
->issi
)
618 return destroy_srq_cmd(dev
, srq
);
619 switch (srq
->common
.res
) {
621 return destroy_xrc_srq_cmd(dev
, srq
);
623 return destroy_xrq_cmd(dev
, srq
);
625 return destroy_rmp_cmd(dev
, srq
);
629 int mlx5_cmd_create_srq(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
630 struct mlx5_srq_attr
*in
)
632 struct mlx5_srq_table
*table
= &dev
->srq_table
;
637 srq
->common
.res
= MLX5_RES_XSRQ
;
640 srq
->common
.res
= MLX5_RES_XRQ
;
643 srq
->common
.res
= MLX5_RES_SRQ
;
646 err
= create_srq_split(dev
, srq
, in
);
650 refcount_set(&srq
->common
.refcount
, 1);
651 init_completion(&srq
->common
.free
);
653 err
= xa_err(xa_store_irq(&table
->array
, srq
->srqn
, srq
, GFP_KERNEL
));
655 goto err_destroy_srq_split
;
659 err_destroy_srq_split
:
660 destroy_srq_split(dev
, srq
);
665 int mlx5_cmd_destroy_srq(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
)
667 struct mlx5_srq_table
*table
= &dev
->srq_table
;
668 struct mlx5_core_srq
*tmp
;
671 /* Delete entry, but leave index occupied */
672 tmp
= xa_cmpxchg_irq(&table
->array
, srq
->srqn
, srq
, XA_ZERO_ENTRY
, 0);
673 if (WARN_ON(tmp
!= srq
))
674 return xa_err(tmp
) ?: -EINVAL
;
676 err
= destroy_srq_split(dev
, srq
);
679 * We don't need to check returned result for an error,
680 * because we are storing in pre-allocated space xarray
681 * entry and it can't fail at this stage.
683 xa_cmpxchg_irq(&table
->array
, srq
->srqn
, XA_ZERO_ENTRY
, srq
, 0);
686 xa_erase_irq(&table
->array
, srq
->srqn
);
688 mlx5_core_res_put(&srq
->common
);
689 wait_for_completion(&srq
->common
.free
);
693 int mlx5_cmd_query_srq(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
694 struct mlx5_srq_attr
*out
)
696 if (!dev
->mdev
->issi
)
697 return query_srq_cmd(dev
, srq
, out
);
698 switch (srq
->common
.res
) {
700 return query_xrc_srq_cmd(dev
, srq
, out
);
702 return query_xrq_cmd(dev
, srq
, out
);
704 return query_rmp_cmd(dev
, srq
, out
);
708 int mlx5_cmd_arm_srq(struct mlx5_ib_dev
*dev
, struct mlx5_core_srq
*srq
,
711 if (!dev
->mdev
->issi
)
712 return arm_srq_cmd(dev
, srq
, lwm
, is_srq
);
713 switch (srq
->common
.res
) {
715 return arm_xrc_srq_cmd(dev
, srq
, lwm
);
717 return arm_xrq_cmd(dev
, srq
, lwm
);
719 return arm_rmp_cmd(dev
, srq
, lwm
);
723 static int srq_event_notifier(struct notifier_block
*nb
,
724 unsigned long type
, void *data
)
726 struct mlx5_srq_table
*table
;
727 struct mlx5_core_srq
*srq
;
728 struct mlx5_eqe
*eqe
;
731 if (type
!= MLX5_EVENT_TYPE_SRQ_CATAS_ERROR
&&
732 type
!= MLX5_EVENT_TYPE_SRQ_RQ_LIMIT
)
735 table
= container_of(nb
, struct mlx5_srq_table
, nb
);
738 srqn
= be32_to_cpu(eqe
->data
.qp_srq
.qp_srq_n
) & 0xffffff;
740 xa_lock(&table
->array
);
741 srq
= xa_load(&table
->array
, srqn
);
743 refcount_inc(&srq
->common
.refcount
);
744 xa_unlock(&table
->array
);
749 srq
->event(srq
, eqe
->type
);
751 mlx5_core_res_put(&srq
->common
);
756 int mlx5_init_srq_table(struct mlx5_ib_dev
*dev
)
758 struct mlx5_srq_table
*table
= &dev
->srq_table
;
760 memset(table
, 0, sizeof(*table
));
761 xa_init_flags(&table
->array
, XA_FLAGS_LOCK_IRQ
);
763 table
->nb
.notifier_call
= srq_event_notifier
;
764 mlx5_notifier_register(dev
->mdev
, &table
->nb
);
769 void mlx5_cleanup_srq_table(struct mlx5_ib_dev
*dev
)
771 struct mlx5_srq_table
*table
= &dev
->srq_table
;
773 mlx5_notifier_unregister(dev
->mdev
, &table
->nb
);