2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/srq.h>
38 #include <rdma/ib_verbs.h>
39 #include "mlx5_core.h"
40 #include <linux/mlx5/transobj.h>
42 void mlx5_srq_event(struct mlx5_core_dev
*dev
, u32 srqn
, int event_type
)
44 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
45 struct mlx5_core_srq
*srq
;
47 spin_lock(&table
->lock
);
49 srq
= radix_tree_lookup(&table
->tree
, srqn
);
51 atomic_inc(&srq
->refcount
);
53 spin_unlock(&table
->lock
);
56 mlx5_core_warn(dev
, "Async event for bogus SRQ 0x%08x\n", srqn
);
60 srq
->event(srq
, event_type
);
62 if (atomic_dec_and_test(&srq
->refcount
))
66 static int get_pas_size(struct mlx5_srq_attr
*in
)
68 u32 log_page_size
= in
->log_page_size
+ 12;
69 u32 log_srq_size
= in
->log_size
;
70 u32 log_rq_stride
= in
->wqe_shift
;
71 u32 page_offset
= in
->page_offset
;
72 u32 po_quanta
= 1 << (log_page_size
- 6);
73 u32 rq_sz
= 1 << (log_srq_size
+ 4 + log_rq_stride
);
74 u32 page_size
= 1 << log_page_size
;
75 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
76 u32 rq_num_pas
= (rq_sz_po
+ page_size
- 1) / page_size
;
78 return rq_num_pas
* sizeof(u64
);
81 static void set_wq(void *wq
, struct mlx5_srq_attr
*in
)
83 MLX5_SET(wq
, wq
, wq_signature
, !!(in
->flags
84 & MLX5_SRQ_FLAG_WQ_SIG
));
85 MLX5_SET(wq
, wq
, log_wq_pg_sz
, in
->log_page_size
);
86 MLX5_SET(wq
, wq
, log_wq_stride
, in
->wqe_shift
+ 4);
87 MLX5_SET(wq
, wq
, log_wq_sz
, in
->log_size
);
88 MLX5_SET(wq
, wq
, page_offset
, in
->page_offset
);
89 MLX5_SET(wq
, wq
, lwm
, in
->lwm
);
90 MLX5_SET(wq
, wq
, pd
, in
->pd
);
91 MLX5_SET64(wq
, wq
, dbr_addr
, in
->db_record
);
94 static void set_srqc(void *srqc
, struct mlx5_srq_attr
*in
)
96 MLX5_SET(srqc
, srqc
, wq_signature
, !!(in
->flags
97 & MLX5_SRQ_FLAG_WQ_SIG
));
98 MLX5_SET(srqc
, srqc
, log_page_size
, in
->log_page_size
);
99 MLX5_SET(srqc
, srqc
, log_rq_stride
, in
->wqe_shift
);
100 MLX5_SET(srqc
, srqc
, log_srq_size
, in
->log_size
);
101 MLX5_SET(srqc
, srqc
, page_offset
, in
->page_offset
);
102 MLX5_SET(srqc
, srqc
, lwm
, in
->lwm
);
103 MLX5_SET(srqc
, srqc
, pd
, in
->pd
);
104 MLX5_SET64(srqc
, srqc
, dbr_addr
, in
->db_record
);
105 MLX5_SET(srqc
, srqc
, xrcd
, in
->xrcd
);
106 MLX5_SET(srqc
, srqc
, cqn
, in
->cqn
);
109 static void get_wq(void *wq
, struct mlx5_srq_attr
*in
)
111 if (MLX5_GET(wq
, wq
, wq_signature
))
112 in
->flags
&= MLX5_SRQ_FLAG_WQ_SIG
;
113 in
->log_page_size
= MLX5_GET(wq
, wq
, log_wq_pg_sz
);
114 in
->wqe_shift
= MLX5_GET(wq
, wq
, log_wq_stride
) - 4;
115 in
->log_size
= MLX5_GET(wq
, wq
, log_wq_sz
);
116 in
->page_offset
= MLX5_GET(wq
, wq
, page_offset
);
117 in
->lwm
= MLX5_GET(wq
, wq
, lwm
);
118 in
->pd
= MLX5_GET(wq
, wq
, pd
);
119 in
->db_record
= MLX5_GET64(wq
, wq
, dbr_addr
);
122 static void get_srqc(void *srqc
, struct mlx5_srq_attr
*in
)
124 if (MLX5_GET(srqc
, srqc
, wq_signature
))
125 in
->flags
&= MLX5_SRQ_FLAG_WQ_SIG
;
126 in
->log_page_size
= MLX5_GET(srqc
, srqc
, log_page_size
);
127 in
->wqe_shift
= MLX5_GET(srqc
, srqc
, log_rq_stride
);
128 in
->log_size
= MLX5_GET(srqc
, srqc
, log_srq_size
);
129 in
->page_offset
= MLX5_GET(srqc
, srqc
, page_offset
);
130 in
->lwm
= MLX5_GET(srqc
, srqc
, lwm
);
131 in
->pd
= MLX5_GET(srqc
, srqc
, pd
);
132 in
->db_record
= MLX5_GET64(srqc
, srqc
, dbr_addr
);
135 struct mlx5_core_srq
*mlx5_core_get_srq(struct mlx5_core_dev
*dev
, u32 srqn
)
137 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
138 struct mlx5_core_srq
*srq
;
140 spin_lock(&table
->lock
);
142 srq
= radix_tree_lookup(&table
->tree
, srqn
);
144 atomic_inc(&srq
->refcount
);
146 spin_unlock(&table
->lock
);
150 EXPORT_SYMBOL(mlx5_core_get_srq
);
152 static int create_srq_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
153 struct mlx5_srq_attr
*in
)
155 u32 create_out
[MLX5_ST_SZ_DW(create_srq_out
)] = {0};
163 pas_size
= get_pas_size(in
);
164 inlen
= MLX5_ST_SZ_BYTES(create_srq_in
) + pas_size
;
165 create_in
= kvzalloc(inlen
, GFP_KERNEL
);
169 srqc
= MLX5_ADDR_OF(create_srq_in
, create_in
, srq_context_entry
);
170 pas
= MLX5_ADDR_OF(create_srq_in
, create_in
, pas
);
173 memcpy(pas
, in
->pas
, pas_size
);
175 MLX5_SET(create_srq_in
, create_in
, opcode
,
176 MLX5_CMD_OP_CREATE_SRQ
);
178 err
= mlx5_cmd_exec(dev
, create_in
, inlen
, create_out
,
182 srq
->srqn
= MLX5_GET(create_srq_out
, create_out
, srqn
);
187 static int destroy_srq_cmd(struct mlx5_core_dev
*dev
,
188 struct mlx5_core_srq
*srq
)
190 u32 srq_in
[MLX5_ST_SZ_DW(destroy_srq_in
)] = {0};
191 u32 srq_out
[MLX5_ST_SZ_DW(destroy_srq_out
)] = {0};
193 MLX5_SET(destroy_srq_in
, srq_in
, opcode
,
194 MLX5_CMD_OP_DESTROY_SRQ
);
195 MLX5_SET(destroy_srq_in
, srq_in
, srqn
, srq
->srqn
);
197 return mlx5_cmd_exec(dev
, srq_in
, sizeof(srq_in
),
198 srq_out
, sizeof(srq_out
));
201 static int arm_srq_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
204 u32 srq_in
[MLX5_ST_SZ_DW(arm_rq_in
)] = {0};
205 u32 srq_out
[MLX5_ST_SZ_DW(arm_rq_out
)] = {0};
207 MLX5_SET(arm_rq_in
, srq_in
, opcode
, MLX5_CMD_OP_ARM_RQ
);
208 MLX5_SET(arm_rq_in
, srq_in
, op_mod
, MLX5_ARM_RQ_IN_OP_MOD_SRQ
);
209 MLX5_SET(arm_rq_in
, srq_in
, srq_number
, srq
->srqn
);
210 MLX5_SET(arm_rq_in
, srq_in
, lwm
, lwm
);
212 return mlx5_cmd_exec(dev
, srq_in
, sizeof(srq_in
),
213 srq_out
, sizeof(srq_out
));
216 static int query_srq_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
217 struct mlx5_srq_attr
*out
)
219 u32 srq_in
[MLX5_ST_SZ_DW(query_srq_in
)] = {0};
224 srq_out
= kvzalloc(MLX5_ST_SZ_BYTES(query_srq_out
), GFP_KERNEL
);
228 MLX5_SET(query_srq_in
, srq_in
, opcode
,
229 MLX5_CMD_OP_QUERY_SRQ
);
230 MLX5_SET(query_srq_in
, srq_in
, srqn
, srq
->srqn
);
231 err
= mlx5_cmd_exec(dev
, srq_in
, sizeof(srq_in
),
232 srq_out
, MLX5_ST_SZ_BYTES(query_srq_out
));
236 srqc
= MLX5_ADDR_OF(query_srq_out
, srq_out
, srq_context_entry
);
238 if (MLX5_GET(srqc
, srqc
, state
) != MLX5_SRQC_STATE_GOOD
)
239 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
245 static int create_xrc_srq_cmd(struct mlx5_core_dev
*dev
,
246 struct mlx5_core_srq
*srq
,
247 struct mlx5_srq_attr
*in
)
249 u32 create_out
[MLX5_ST_SZ_DW(create_xrc_srq_out
)];
257 pas_size
= get_pas_size(in
);
258 inlen
= MLX5_ST_SZ_BYTES(create_xrc_srq_in
) + pas_size
;
259 create_in
= kvzalloc(inlen
, GFP_KERNEL
);
263 xrc_srqc
= MLX5_ADDR_OF(create_xrc_srq_in
, create_in
,
264 xrc_srq_context_entry
);
265 pas
= MLX5_ADDR_OF(create_xrc_srq_in
, create_in
, pas
);
267 set_srqc(xrc_srqc
, in
);
268 MLX5_SET(xrc_srqc
, xrc_srqc
, user_index
, in
->user_index
);
269 memcpy(pas
, in
->pas
, pas_size
);
270 MLX5_SET(create_xrc_srq_in
, create_in
, opcode
,
271 MLX5_CMD_OP_CREATE_XRC_SRQ
);
273 memset(create_out
, 0, sizeof(create_out
));
274 err
= mlx5_cmd_exec(dev
, create_in
, inlen
, create_out
,
279 srq
->srqn
= MLX5_GET(create_xrc_srq_out
, create_out
, xrc_srqn
);
285 static int destroy_xrc_srq_cmd(struct mlx5_core_dev
*dev
,
286 struct mlx5_core_srq
*srq
)
288 u32 xrcsrq_in
[MLX5_ST_SZ_DW(destroy_xrc_srq_in
)] = {0};
289 u32 xrcsrq_out
[MLX5_ST_SZ_DW(destroy_xrc_srq_out
)] = {0};
291 MLX5_SET(destroy_xrc_srq_in
, xrcsrq_in
, opcode
,
292 MLX5_CMD_OP_DESTROY_XRC_SRQ
);
293 MLX5_SET(destroy_xrc_srq_in
, xrcsrq_in
, xrc_srqn
, srq
->srqn
);
295 return mlx5_cmd_exec(dev
, xrcsrq_in
, sizeof(xrcsrq_in
),
296 xrcsrq_out
, sizeof(xrcsrq_out
));
299 static int arm_xrc_srq_cmd(struct mlx5_core_dev
*dev
,
300 struct mlx5_core_srq
*srq
, u16 lwm
)
302 u32 xrcsrq_in
[MLX5_ST_SZ_DW(arm_xrc_srq_in
)] = {0};
303 u32 xrcsrq_out
[MLX5_ST_SZ_DW(arm_xrc_srq_out
)] = {0};
305 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, opcode
, MLX5_CMD_OP_ARM_XRC_SRQ
);
306 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, op_mod
, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ
);
307 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, xrc_srqn
, srq
->srqn
);
308 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, lwm
, lwm
);
310 return mlx5_cmd_exec(dev
, xrcsrq_in
, sizeof(xrcsrq_in
),
311 xrcsrq_out
, sizeof(xrcsrq_out
));
314 static int query_xrc_srq_cmd(struct mlx5_core_dev
*dev
,
315 struct mlx5_core_srq
*srq
,
316 struct mlx5_srq_attr
*out
)
318 u32 xrcsrq_in
[MLX5_ST_SZ_DW(query_xrc_srq_in
)];
323 xrcsrq_out
= kvzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out
), GFP_KERNEL
);
326 memset(xrcsrq_in
, 0, sizeof(xrcsrq_in
));
328 MLX5_SET(query_xrc_srq_in
, xrcsrq_in
, opcode
,
329 MLX5_CMD_OP_QUERY_XRC_SRQ
);
330 MLX5_SET(query_xrc_srq_in
, xrcsrq_in
, xrc_srqn
, srq
->srqn
);
332 err
= mlx5_cmd_exec(dev
, xrcsrq_in
, sizeof(xrcsrq_in
), xrcsrq_out
,
333 MLX5_ST_SZ_BYTES(query_xrc_srq_out
));
337 xrc_srqc
= MLX5_ADDR_OF(query_xrc_srq_out
, xrcsrq_out
,
338 xrc_srq_context_entry
);
339 get_srqc(xrc_srqc
, out
);
340 if (MLX5_GET(xrc_srqc
, xrc_srqc
, state
) != MLX5_XRC_SRQC_STATE_GOOD
)
341 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
348 static int create_rmp_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
349 struct mlx5_srq_attr
*in
)
358 pas_size
= get_pas_size(in
);
359 inlen
= MLX5_ST_SZ_BYTES(create_rmp_in
) + pas_size
;
360 create_in
= kvzalloc(inlen
, GFP_KERNEL
);
364 rmpc
= MLX5_ADDR_OF(create_rmp_in
, create_in
, ctx
);
365 wq
= MLX5_ADDR_OF(rmpc
, rmpc
, wq
);
367 MLX5_SET(rmpc
, rmpc
, state
, MLX5_RMPC_STATE_RDY
);
369 memcpy(MLX5_ADDR_OF(rmpc
, rmpc
, wq
.pas
), in
->pas
, pas_size
);
371 err
= mlx5_core_create_rmp(dev
, create_in
, inlen
, &srq
->srqn
);
377 static int destroy_rmp_cmd(struct mlx5_core_dev
*dev
,
378 struct mlx5_core_srq
*srq
)
380 return mlx5_core_destroy_rmp(dev
, srq
->srqn
);
383 static int arm_rmp_cmd(struct mlx5_core_dev
*dev
,
384 struct mlx5_core_srq
*srq
,
393 in
= kvzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in
), GFP_KERNEL
);
397 rmpc
= MLX5_ADDR_OF(modify_rmp_in
, in
, ctx
);
398 bitmask
= MLX5_ADDR_OF(modify_rmp_in
, in
, bitmask
);
399 wq
= MLX5_ADDR_OF(rmpc
, rmpc
, wq
);
401 MLX5_SET(modify_rmp_in
, in
, rmp_state
, MLX5_RMPC_STATE_RDY
);
402 MLX5_SET(modify_rmp_in
, in
, rmpn
, srq
->srqn
);
403 MLX5_SET(wq
, wq
, lwm
, lwm
);
404 MLX5_SET(rmp_bitmask
, bitmask
, lwm
, 1);
405 MLX5_SET(rmpc
, rmpc
, state
, MLX5_RMPC_STATE_RDY
);
407 err
= mlx5_core_modify_rmp(dev
, in
, MLX5_ST_SZ_BYTES(modify_rmp_in
));
413 static int query_rmp_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
414 struct mlx5_srq_attr
*out
)
420 rmp_out
= kvzalloc(MLX5_ST_SZ_BYTES(query_rmp_out
), GFP_KERNEL
);
424 err
= mlx5_core_query_rmp(dev
, srq
->srqn
, rmp_out
);
428 rmpc
= MLX5_ADDR_OF(query_rmp_out
, rmp_out
, rmp_context
);
429 get_wq(MLX5_ADDR_OF(rmpc
, rmpc
, wq
), out
);
430 if (MLX5_GET(rmpc
, rmpc
, state
) != MLX5_RMPC_STATE_RDY
)
431 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
438 static int create_xrq_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
439 struct mlx5_srq_attr
*in
)
441 u32 create_out
[MLX5_ST_SZ_DW(create_xrq_out
)] = {0};
449 pas_size
= get_pas_size(in
);
450 inlen
= MLX5_ST_SZ_BYTES(create_xrq_in
) + pas_size
;
451 create_in
= kvzalloc(inlen
, GFP_KERNEL
);
455 xrqc
= MLX5_ADDR_OF(create_xrq_in
, create_in
, xrq_context
);
456 wq
= MLX5_ADDR_OF(xrqc
, xrqc
, wq
);
459 memcpy(MLX5_ADDR_OF(xrqc
, xrqc
, wq
.pas
), in
->pas
, pas_size
);
461 if (in
->type
== IB_SRQT_TM
) {
462 MLX5_SET(xrqc
, xrqc
, topology
, MLX5_XRQC_TOPOLOGY_TAG_MATCHING
);
463 if (in
->flags
& MLX5_SRQ_FLAG_RNDV
)
464 MLX5_SET(xrqc
, xrqc
, offload
, MLX5_XRQC_OFFLOAD_RNDV
);
466 tag_matching_topology_context
.log_matching_list_sz
,
467 in
->tm_log_list_size
);
469 MLX5_SET(xrqc
, xrqc
, user_index
, in
->user_index
);
470 MLX5_SET(xrqc
, xrqc
, cqn
, in
->cqn
);
471 MLX5_SET(create_xrq_in
, create_in
, opcode
, MLX5_CMD_OP_CREATE_XRQ
);
472 err
= mlx5_cmd_exec(dev
, create_in
, inlen
, create_out
,
476 srq
->srqn
= MLX5_GET(create_xrq_out
, create_out
, xrqn
);
481 static int destroy_xrq_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
)
483 u32 in
[MLX5_ST_SZ_DW(destroy_xrq_in
)] = {0};
484 u32 out
[MLX5_ST_SZ_DW(destroy_xrq_out
)] = {0};
486 MLX5_SET(destroy_xrq_in
, in
, opcode
, MLX5_CMD_OP_DESTROY_XRQ
);
487 MLX5_SET(destroy_xrq_in
, in
, xrqn
, srq
->srqn
);
489 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
492 static int arm_xrq_cmd(struct mlx5_core_dev
*dev
,
493 struct mlx5_core_srq
*srq
,
496 u32 out
[MLX5_ST_SZ_DW(arm_rq_out
)] = {0};
497 u32 in
[MLX5_ST_SZ_DW(arm_rq_in
)] = {0};
499 MLX5_SET(arm_rq_in
, in
, opcode
, MLX5_CMD_OP_ARM_RQ
);
500 MLX5_SET(arm_rq_in
, in
, op_mod
, MLX5_ARM_RQ_IN_OP_MOD_XRQ
);
501 MLX5_SET(arm_rq_in
, in
, srq_number
, srq
->srqn
);
502 MLX5_SET(arm_rq_in
, in
, lwm
, lwm
);
504 return mlx5_cmd_exec(dev
, in
, sizeof(in
), out
, sizeof(out
));
507 static int query_xrq_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
508 struct mlx5_srq_attr
*out
)
510 u32 in
[MLX5_ST_SZ_DW(query_xrq_in
)] = {0};
512 int outlen
= MLX5_ST_SZ_BYTES(query_xrq_out
);
516 xrq_out
= kvzalloc(outlen
, GFP_KERNEL
);
520 MLX5_SET(query_xrq_in
, in
, opcode
, MLX5_CMD_OP_QUERY_XRQ
);
521 MLX5_SET(query_xrq_in
, in
, xrqn
, srq
->srqn
);
523 err
= mlx5_cmd_exec(dev
, in
, sizeof(in
), xrq_out
, outlen
);
527 xrqc
= MLX5_ADDR_OF(query_xrq_out
, xrq_out
, xrq_context
);
528 get_wq(MLX5_ADDR_OF(xrqc
, xrqc
, wq
), out
);
529 if (MLX5_GET(xrqc
, xrqc
, state
) != MLX5_XRQC_STATE_GOOD
)
530 out
->flags
|= MLX5_SRQ_FLAG_ERR
;
533 tag_matching_topology_context
.append_next_index
);
534 out
->tm_hw_phase_cnt
=
536 tag_matching_topology_context
.hw_phase_cnt
);
537 out
->tm_sw_phase_cnt
=
539 tag_matching_topology_context
.sw_phase_cnt
);
546 static int create_srq_split(struct mlx5_core_dev
*dev
,
547 struct mlx5_core_srq
*srq
,
548 struct mlx5_srq_attr
*in
)
551 return create_srq_cmd(dev
, srq
, in
);
552 switch (srq
->common
.res
) {
554 return create_xrc_srq_cmd(dev
, srq
, in
);
556 return create_xrq_cmd(dev
, srq
, in
);
558 return create_rmp_cmd(dev
, srq
, in
);
562 static int destroy_srq_split(struct mlx5_core_dev
*dev
,
563 struct mlx5_core_srq
*srq
)
566 return destroy_srq_cmd(dev
, srq
);
567 switch (srq
->common
.res
) {
569 return destroy_xrc_srq_cmd(dev
, srq
);
571 return destroy_xrq_cmd(dev
, srq
);
573 return destroy_rmp_cmd(dev
, srq
);
577 int mlx5_core_create_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
578 struct mlx5_srq_attr
*in
)
581 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
585 srq
->common
.res
= MLX5_RES_XSRQ
;
588 srq
->common
.res
= MLX5_RES_XRQ
;
591 srq
->common
.res
= MLX5_RES_SRQ
;
594 err
= create_srq_split(dev
, srq
, in
);
598 atomic_set(&srq
->refcount
, 1);
599 init_completion(&srq
->free
);
601 spin_lock_irq(&table
->lock
);
602 err
= radix_tree_insert(&table
->tree
, srq
->srqn
, srq
);
603 spin_unlock_irq(&table
->lock
);
605 mlx5_core_warn(dev
, "err %d, srqn 0x%x\n", err
, srq
->srqn
);
606 goto err_destroy_srq_split
;
611 err_destroy_srq_split
:
612 destroy_srq_split(dev
, srq
);
616 EXPORT_SYMBOL(mlx5_core_create_srq
);
618 int mlx5_core_destroy_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
)
620 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
621 struct mlx5_core_srq
*tmp
;
624 spin_lock_irq(&table
->lock
);
625 tmp
= radix_tree_delete(&table
->tree
, srq
->srqn
);
626 spin_unlock_irq(&table
->lock
);
628 mlx5_core_warn(dev
, "srq 0x%x not found in tree\n", srq
->srqn
);
632 mlx5_core_warn(dev
, "corruption on srqn 0x%x\n", srq
->srqn
);
636 err
= destroy_srq_split(dev
, srq
);
640 if (atomic_dec_and_test(&srq
->refcount
))
641 complete(&srq
->free
);
642 wait_for_completion(&srq
->free
);
646 EXPORT_SYMBOL(mlx5_core_destroy_srq
);
648 int mlx5_core_query_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
649 struct mlx5_srq_attr
*out
)
652 return query_srq_cmd(dev
, srq
, out
);
653 switch (srq
->common
.res
) {
655 return query_xrc_srq_cmd(dev
, srq
, out
);
657 return query_xrq_cmd(dev
, srq
, out
);
659 return query_rmp_cmd(dev
, srq
, out
);
662 EXPORT_SYMBOL(mlx5_core_query_srq
);
664 int mlx5_core_arm_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
668 return arm_srq_cmd(dev
, srq
, lwm
, is_srq
);
669 switch (srq
->common
.res
) {
671 return arm_xrc_srq_cmd(dev
, srq
, lwm
);
673 return arm_xrq_cmd(dev
, srq
, lwm
);
675 return arm_rmp_cmd(dev
, srq
, lwm
);
678 EXPORT_SYMBOL(mlx5_core_arm_srq
);
680 void mlx5_init_srq_table(struct mlx5_core_dev
*dev
)
682 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
684 memset(table
, 0, sizeof(*table
));
685 spin_lock_init(&table
->lock
);
686 INIT_RADIX_TREE(&table
->tree
, GFP_ATOMIC
);
689 void mlx5_cleanup_srq_table(struct mlx5_core_dev
*dev
)