2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/kernel.h>
34 #include <linux/module.h>
35 #include <linux/mlx5/driver.h>
36 #include <linux/mlx5/cmd.h>
37 #include <linux/mlx5/srq.h>
38 #include <rdma/ib_verbs.h>
39 #include "mlx5_core.h"
42 void mlx5_srq_event(struct mlx5_core_dev
*dev
, u32 srqn
, int event_type
)
44 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
45 struct mlx5_core_srq
*srq
;
47 spin_lock(&table
->lock
);
49 srq
= radix_tree_lookup(&table
->tree
, srqn
);
51 atomic_inc(&srq
->refcount
);
53 spin_unlock(&table
->lock
);
56 mlx5_core_warn(dev
, "Async event for bogus SRQ 0x%08x\n", srqn
);
60 srq
->event(srq
, event_type
);
62 if (atomic_dec_and_test(&srq
->refcount
))
66 static int get_pas_size(void *srqc
)
68 u32 log_page_size
= MLX5_GET(srqc
, srqc
, log_page_size
) + 12;
69 u32 log_srq_size
= MLX5_GET(srqc
, srqc
, log_srq_size
);
70 u32 log_rq_stride
= MLX5_GET(srqc
, srqc
, log_rq_stride
);
71 u32 page_offset
= MLX5_GET(srqc
, srqc
, page_offset
);
72 u32 po_quanta
= 1 << (log_page_size
- 6);
73 u32 rq_sz
= 1 << (log_srq_size
+ 4 + log_rq_stride
);
74 u32 page_size
= 1 << log_page_size
;
75 u32 rq_sz_po
= rq_sz
+ (page_offset
* po_quanta
);
76 u32 rq_num_pas
= (rq_sz_po
+ page_size
- 1) / page_size
;
78 return rq_num_pas
* sizeof(u64
);
81 static void rmpc_srqc_reformat(void *srqc
, void *rmpc
, bool srqc_to_rmpc
)
83 void *wq
= MLX5_ADDR_OF(rmpc
, rmpc
, wq
);
86 switch (MLX5_GET(srqc
, srqc
, state
)) {
87 case MLX5_SRQC_STATE_GOOD
:
88 MLX5_SET(rmpc
, rmpc
, state
, MLX5_RMPC_STATE_RDY
);
90 case MLX5_SRQC_STATE_ERROR
:
91 MLX5_SET(rmpc
, rmpc
, state
, MLX5_RMPC_STATE_ERR
);
94 pr_warn("%s: %d: Unknown srq state = 0x%x\n", __func__
,
95 __LINE__
, MLX5_GET(srqc
, srqc
, state
));
96 MLX5_SET(rmpc
, rmpc
, state
, MLX5_GET(srqc
, srqc
, state
));
99 MLX5_SET(wq
, wq
, wq_signature
, MLX5_GET(srqc
, srqc
, wq_signature
));
100 MLX5_SET(wq
, wq
, log_wq_pg_sz
, MLX5_GET(srqc
, srqc
, log_page_size
));
101 MLX5_SET(wq
, wq
, log_wq_stride
, MLX5_GET(srqc
, srqc
, log_rq_stride
) + 4);
102 MLX5_SET(wq
, wq
, log_wq_sz
, MLX5_GET(srqc
, srqc
, log_srq_size
));
103 MLX5_SET(wq
, wq
, page_offset
, MLX5_GET(srqc
, srqc
, page_offset
));
104 MLX5_SET(wq
, wq
, lwm
, MLX5_GET(srqc
, srqc
, lwm
));
105 MLX5_SET(wq
, wq
, pd
, MLX5_GET(srqc
, srqc
, pd
));
106 MLX5_SET64(wq
, wq
, dbr_addr
, MLX5_GET64(srqc
, srqc
, dbr_addr
));
108 switch (MLX5_GET(rmpc
, rmpc
, state
)) {
109 case MLX5_RMPC_STATE_RDY
:
110 MLX5_SET(srqc
, srqc
, state
, MLX5_SRQC_STATE_GOOD
);
112 case MLX5_RMPC_STATE_ERR
:
113 MLX5_SET(srqc
, srqc
, state
, MLX5_SRQC_STATE_ERROR
);
116 pr_warn("%s: %d: Unknown rmp state = 0x%x\n",
118 MLX5_GET(rmpc
, rmpc
, state
));
119 MLX5_SET(srqc
, srqc
, state
,
120 MLX5_GET(rmpc
, rmpc
, state
));
123 MLX5_SET(srqc
, srqc
, wq_signature
, MLX5_GET(wq
, wq
, wq_signature
));
124 MLX5_SET(srqc
, srqc
, log_page_size
, MLX5_GET(wq
, wq
, log_wq_pg_sz
));
125 MLX5_SET(srqc
, srqc
, log_rq_stride
, MLX5_GET(wq
, wq
, log_wq_stride
) - 4);
126 MLX5_SET(srqc
, srqc
, log_srq_size
, MLX5_GET(wq
, wq
, log_wq_sz
));
127 MLX5_SET(srqc
, srqc
, page_offset
, MLX5_GET(wq
, wq
, page_offset
));
128 MLX5_SET(srqc
, srqc
, lwm
, MLX5_GET(wq
, wq
, lwm
));
129 MLX5_SET(srqc
, srqc
, pd
, MLX5_GET(wq
, wq
, pd
));
130 MLX5_SET64(srqc
, srqc
, dbr_addr
, MLX5_GET64(wq
, wq
, dbr_addr
));
134 struct mlx5_core_srq
*mlx5_core_get_srq(struct mlx5_core_dev
*dev
, u32 srqn
)
136 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
137 struct mlx5_core_srq
*srq
;
139 spin_lock(&table
->lock
);
141 srq
= radix_tree_lookup(&table
->tree
, srqn
);
143 atomic_inc(&srq
->refcount
);
145 spin_unlock(&table
->lock
);
149 EXPORT_SYMBOL(mlx5_core_get_srq
);
151 static int create_srq_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
152 struct mlx5_create_srq_mbox_in
*in
, int inlen
)
154 struct mlx5_create_srq_mbox_out out
;
157 memset(&out
, 0, sizeof(out
));
159 in
->hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_CREATE_SRQ
);
161 err
= mlx5_cmd_exec_check_status(dev
, (u32
*)in
, inlen
, (u32
*)(&out
),
164 srq
->srqn
= be32_to_cpu(out
.srqn
) & 0xffffff;
169 static int destroy_srq_cmd(struct mlx5_core_dev
*dev
,
170 struct mlx5_core_srq
*srq
)
172 struct mlx5_destroy_srq_mbox_in in
;
173 struct mlx5_destroy_srq_mbox_out out
;
175 memset(&in
, 0, sizeof(in
));
176 memset(&out
, 0, sizeof(out
));
177 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_DESTROY_SRQ
);
178 in
.srqn
= cpu_to_be32(srq
->srqn
);
180 return mlx5_cmd_exec_check_status(dev
, (u32
*)(&in
), sizeof(in
),
181 (u32
*)(&out
), sizeof(out
));
184 static int arm_srq_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
187 struct mlx5_arm_srq_mbox_in in
;
188 struct mlx5_arm_srq_mbox_out out
;
190 memset(&in
, 0, sizeof(in
));
191 memset(&out
, 0, sizeof(out
));
193 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_ARM_RQ
);
194 in
.hdr
.opmod
= cpu_to_be16(!!is_srq
);
195 in
.srqn
= cpu_to_be32(srq
->srqn
);
196 in
.lwm
= cpu_to_be16(lwm
);
198 return mlx5_cmd_exec_check_status(dev
, (u32
*)(&in
),
199 sizeof(in
), (u32
*)(&out
),
203 static int query_srq_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
204 struct mlx5_query_srq_mbox_out
*out
)
206 struct mlx5_query_srq_mbox_in in
;
208 memset(&in
, 0, sizeof(in
));
210 in
.hdr
.opcode
= cpu_to_be16(MLX5_CMD_OP_QUERY_SRQ
);
211 in
.srqn
= cpu_to_be32(srq
->srqn
);
213 return mlx5_cmd_exec_check_status(dev
, (u32
*)(&in
), sizeof(in
),
214 (u32
*)out
, sizeof(*out
));
217 static int create_xrc_srq_cmd(struct mlx5_core_dev
*dev
,
218 struct mlx5_core_srq
*srq
,
219 struct mlx5_create_srq_mbox_in
*in
,
222 u32 create_out
[MLX5_ST_SZ_DW(create_xrc_srq_out
)];
231 srqc
= MLX5_ADDR_OF(create_srq_in
, in
, srq_context_entry
);
232 pas_size
= get_pas_size(srqc
);
233 inlen
= MLX5_ST_SZ_BYTES(create_xrc_srq_in
) + pas_size
;
234 create_in
= mlx5_vzalloc(inlen
);
238 xrc_srqc
= MLX5_ADDR_OF(create_xrc_srq_in
, create_in
,
239 xrc_srq_context_entry
);
240 pas
= MLX5_ADDR_OF(create_xrc_srq_in
, create_in
, pas
);
242 memcpy(xrc_srqc
, srqc
, MLX5_ST_SZ_BYTES(srqc
));
243 memcpy(pas
, in
->pas
, pas_size
);
244 /* 0xffffff means we ask to work with cqe version 0 */
245 MLX5_SET(xrc_srqc
, xrc_srqc
, user_index
, 0xffffff);
246 MLX5_SET(create_xrc_srq_in
, create_in
, opcode
,
247 MLX5_CMD_OP_CREATE_XRC_SRQ
);
249 memset(create_out
, 0, sizeof(create_out
));
250 err
= mlx5_cmd_exec_check_status(dev
, create_in
, inlen
, create_out
,
255 srq
->srqn
= MLX5_GET(create_xrc_srq_out
, create_out
, xrc_srqn
);
261 static int destroy_xrc_srq_cmd(struct mlx5_core_dev
*dev
,
262 struct mlx5_core_srq
*srq
)
264 u32 xrcsrq_in
[MLX5_ST_SZ_DW(destroy_xrc_srq_in
)];
265 u32 xrcsrq_out
[MLX5_ST_SZ_DW(destroy_xrc_srq_out
)];
267 memset(xrcsrq_in
, 0, sizeof(xrcsrq_in
));
268 memset(xrcsrq_out
, 0, sizeof(xrcsrq_out
));
270 MLX5_SET(destroy_xrc_srq_in
, xrcsrq_in
, opcode
,
271 MLX5_CMD_OP_DESTROY_XRC_SRQ
);
272 MLX5_SET(destroy_xrc_srq_in
, xrcsrq_in
, xrc_srqn
, srq
->srqn
);
274 return mlx5_cmd_exec_check_status(dev
, xrcsrq_in
, sizeof(xrcsrq_in
),
275 xrcsrq_out
, sizeof(xrcsrq_out
));
278 static int arm_xrc_srq_cmd(struct mlx5_core_dev
*dev
,
279 struct mlx5_core_srq
*srq
, u16 lwm
)
281 u32 xrcsrq_in
[MLX5_ST_SZ_DW(arm_xrc_srq_in
)];
282 u32 xrcsrq_out
[MLX5_ST_SZ_DW(arm_xrc_srq_out
)];
284 memset(xrcsrq_in
, 0, sizeof(xrcsrq_in
));
285 memset(xrcsrq_out
, 0, sizeof(xrcsrq_out
));
287 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, opcode
, MLX5_CMD_OP_ARM_XRC_SRQ
);
288 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, op_mod
, MLX5_ARM_XRC_SRQ_IN_OP_MOD_XRC_SRQ
);
289 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, xrc_srqn
, srq
->srqn
);
290 MLX5_SET(arm_xrc_srq_in
, xrcsrq_in
, lwm
, lwm
);
292 return mlx5_cmd_exec_check_status(dev
, xrcsrq_in
, sizeof(xrcsrq_in
),
293 xrcsrq_out
, sizeof(xrcsrq_out
));
296 static int query_xrc_srq_cmd(struct mlx5_core_dev
*dev
,
297 struct mlx5_core_srq
*srq
,
298 struct mlx5_query_srq_mbox_out
*out
)
300 u32 xrcsrq_in
[MLX5_ST_SZ_DW(query_xrc_srq_in
)];
306 xrcsrq_out
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_xrc_srq_out
));
309 memset(xrcsrq_in
, 0, sizeof(xrcsrq_in
));
311 MLX5_SET(query_xrc_srq_in
, xrcsrq_in
, opcode
,
312 MLX5_CMD_OP_QUERY_XRC_SRQ
);
313 MLX5_SET(query_xrc_srq_in
, xrcsrq_in
, xrc_srqn
, srq
->srqn
);
314 err
= mlx5_cmd_exec_check_status(dev
, xrcsrq_in
, sizeof(xrcsrq_in
),
316 MLX5_ST_SZ_BYTES(query_xrc_srq_out
));
320 xrc_srqc
= MLX5_ADDR_OF(query_xrc_srq_out
, xrcsrq_out
,
321 xrc_srq_context_entry
);
322 srqc
= MLX5_ADDR_OF(query_srq_out
, out
, srq_context_entry
);
323 memcpy(srqc
, xrc_srqc
, MLX5_ST_SZ_BYTES(srqc
));
330 static int create_rmp_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
331 struct mlx5_create_srq_mbox_in
*in
, int srq_inlen
)
340 srqc
= MLX5_ADDR_OF(create_srq_in
, in
, srq_context_entry
);
341 pas_size
= get_pas_size(srqc
);
342 inlen
= MLX5_ST_SZ_BYTES(create_rmp_in
) + pas_size
;
343 create_in
= mlx5_vzalloc(inlen
);
347 rmpc
= MLX5_ADDR_OF(create_rmp_in
, create_in
, ctx
);
349 memcpy(MLX5_ADDR_OF(rmpc
, rmpc
, wq
.pas
), in
->pas
, pas_size
);
350 rmpc_srqc_reformat(srqc
, rmpc
, true);
352 err
= mlx5_core_create_rmp(dev
, create_in
, inlen
, &srq
->srqn
);
358 static int destroy_rmp_cmd(struct mlx5_core_dev
*dev
,
359 struct mlx5_core_srq
*srq
)
361 return mlx5_core_destroy_rmp(dev
, srq
->srqn
);
364 static int arm_rmp_cmd(struct mlx5_core_dev
*dev
,
365 struct mlx5_core_srq
*srq
,
374 in
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(modify_rmp_in
));
378 rmpc
= MLX5_ADDR_OF(modify_rmp_in
, in
, ctx
);
379 bitmask
= MLX5_ADDR_OF(modify_rmp_in
, in
, bitmask
);
380 wq
= MLX5_ADDR_OF(rmpc
, rmpc
, wq
);
382 MLX5_SET(modify_rmp_in
, in
, rmp_state
, MLX5_RMPC_STATE_RDY
);
383 MLX5_SET(modify_rmp_in
, in
, rmpn
, srq
->srqn
);
384 MLX5_SET(wq
, wq
, lwm
, lwm
);
385 MLX5_SET(rmp_bitmask
, bitmask
, lwm
, 1);
386 MLX5_SET(rmpc
, rmpc
, state
, MLX5_RMPC_STATE_RDY
);
388 err
= mlx5_core_modify_rmp(dev
, in
, MLX5_ST_SZ_BYTES(modify_rmp_in
));
394 static int query_rmp_cmd(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
395 struct mlx5_query_srq_mbox_out
*out
)
402 rmp_out
= mlx5_vzalloc(MLX5_ST_SZ_BYTES(query_rmp_out
));
406 err
= mlx5_core_query_rmp(dev
, srq
->srqn
, rmp_out
);
410 srqc
= MLX5_ADDR_OF(query_srq_out
, out
, srq_context_entry
);
411 rmpc
= MLX5_ADDR_OF(query_rmp_out
, rmp_out
, rmp_context
);
412 rmpc_srqc_reformat(srqc
, rmpc
, false);
419 static int create_srq_split(struct mlx5_core_dev
*dev
,
420 struct mlx5_core_srq
*srq
,
421 struct mlx5_create_srq_mbox_in
*in
,
422 int inlen
, int is_xrc
)
425 return create_srq_cmd(dev
, srq
, in
, inlen
);
426 else if (srq
->common
.res
== MLX5_RES_XSRQ
)
427 return create_xrc_srq_cmd(dev
, srq
, in
, inlen
);
429 return create_rmp_cmd(dev
, srq
, in
, inlen
);
432 static int destroy_srq_split(struct mlx5_core_dev
*dev
,
433 struct mlx5_core_srq
*srq
)
436 return destroy_srq_cmd(dev
, srq
);
437 else if (srq
->common
.res
== MLX5_RES_XSRQ
)
438 return destroy_xrc_srq_cmd(dev
, srq
);
440 return destroy_rmp_cmd(dev
, srq
);
443 int mlx5_core_create_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
444 struct mlx5_create_srq_mbox_in
*in
, int inlen
,
448 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
450 srq
->common
.res
= is_xrc
? MLX5_RES_XSRQ
: MLX5_RES_SRQ
;
452 err
= create_srq_split(dev
, srq
, in
, inlen
, is_xrc
);
456 atomic_set(&srq
->refcount
, 1);
457 init_completion(&srq
->free
);
459 spin_lock_irq(&table
->lock
);
460 err
= radix_tree_insert(&table
->tree
, srq
->srqn
, srq
);
461 spin_unlock_irq(&table
->lock
);
463 mlx5_core_warn(dev
, "err %d, srqn 0x%x\n", err
, srq
->srqn
);
464 goto err_destroy_srq_split
;
469 err_destroy_srq_split
:
470 destroy_srq_split(dev
, srq
);
474 EXPORT_SYMBOL(mlx5_core_create_srq
);
476 int mlx5_core_destroy_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
)
478 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
479 struct mlx5_core_srq
*tmp
;
482 spin_lock_irq(&table
->lock
);
483 tmp
= radix_tree_delete(&table
->tree
, srq
->srqn
);
484 spin_unlock_irq(&table
->lock
);
486 mlx5_core_warn(dev
, "srq 0x%x not found in tree\n", srq
->srqn
);
490 mlx5_core_warn(dev
, "corruption on srqn 0x%x\n", srq
->srqn
);
494 err
= destroy_srq_split(dev
, srq
);
498 if (atomic_dec_and_test(&srq
->refcount
))
499 complete(&srq
->free
);
500 wait_for_completion(&srq
->free
);
504 EXPORT_SYMBOL(mlx5_core_destroy_srq
);
506 int mlx5_core_query_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
507 struct mlx5_query_srq_mbox_out
*out
)
510 return query_srq_cmd(dev
, srq
, out
);
511 else if (srq
->common
.res
== MLX5_RES_XSRQ
)
512 return query_xrc_srq_cmd(dev
, srq
, out
);
514 return query_rmp_cmd(dev
, srq
, out
);
516 EXPORT_SYMBOL(mlx5_core_query_srq
);
518 int mlx5_core_arm_srq(struct mlx5_core_dev
*dev
, struct mlx5_core_srq
*srq
,
522 return arm_srq_cmd(dev
, srq
, lwm
, is_srq
);
523 else if (srq
->common
.res
== MLX5_RES_XSRQ
)
524 return arm_xrc_srq_cmd(dev
, srq
, lwm
);
526 return arm_rmp_cmd(dev
, srq
, lwm
);
528 EXPORT_SYMBOL(mlx5_core_arm_srq
);
530 void mlx5_init_srq_table(struct mlx5_core_dev
*dev
)
532 struct mlx5_srq_table
*table
= &dev
->priv
.srq_table
;
534 memset(table
, 0, sizeof(*table
));
535 spin_lock_init(&table
->lock
);
536 INIT_RADIX_TREE(&table
->tree
, GFP_ATOMIC
);
539 void mlx5_cleanup_srq_table(struct mlx5_core_dev
*dev
)