2 * Copyright (c) 2013-2016, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/mlx5/eq.h>
44 #include <linux/debugfs.h>
46 #include "mlx5_core.h"
59 MLX5_CMD_DELIVERY_STAT_OK
= 0x0,
60 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
= 0x1,
61 MLX5_CMD_DELIVERY_STAT_TOK_ERR
= 0x2,
62 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
= 0x3,
63 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
= 0x4,
64 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
= 0x5,
65 MLX5_CMD_DELIVERY_STAT_FW_ERR
= 0x6,
66 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
= 0x7,
67 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
= 0x8,
68 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
= 0x9,
69 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
= 0x10,
72 static struct mlx5_cmd_work_ent
*
73 cmd_alloc_ent(struct mlx5_cmd
*cmd
, struct mlx5_cmd_msg
*in
,
74 struct mlx5_cmd_msg
*out
, void *uout
, int uout_size
,
75 mlx5_cmd_cbk_t cbk
, void *context
, int page_queue
)
77 gfp_t alloc_flags
= cbk
? GFP_ATOMIC
: GFP_KERNEL
;
78 struct mlx5_cmd_work_ent
*ent
;
80 ent
= kzalloc(sizeof(*ent
), alloc_flags
);
82 return ERR_PTR(-ENOMEM
);
88 ent
->uout_size
= uout_size
;
90 ent
->context
= context
;
92 ent
->page_queue
= page_queue
;
93 refcount_set(&ent
->refcnt
, 1);
98 static void cmd_free_ent(struct mlx5_cmd_work_ent
*ent
)
103 static u8
alloc_token(struct mlx5_cmd
*cmd
)
107 spin_lock(&cmd
->token_lock
);
112 spin_unlock(&cmd
->token_lock
);
117 static int cmd_alloc_index(struct mlx5_cmd
*cmd
)
122 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
123 ret
= find_first_bit(&cmd
->bitmask
, cmd
->max_reg_cmds
);
124 if (ret
< cmd
->max_reg_cmds
)
125 clear_bit(ret
, &cmd
->bitmask
);
126 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
128 return ret
< cmd
->max_reg_cmds
? ret
: -ENOMEM
;
131 static void cmd_free_index(struct mlx5_cmd
*cmd
, int idx
)
135 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
136 set_bit(idx
, &cmd
->bitmask
);
137 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
140 static void cmd_ent_get(struct mlx5_cmd_work_ent
*ent
)
142 refcount_inc(&ent
->refcnt
);
145 static void cmd_ent_put(struct mlx5_cmd_work_ent
*ent
)
147 if (!refcount_dec_and_test(&ent
->refcnt
))
151 cmd_free_index(ent
->cmd
, ent
->idx
);
156 static struct mlx5_cmd_layout
*get_inst(struct mlx5_cmd
*cmd
, int idx
)
158 return cmd
->cmd_buf
+ (idx
<< cmd
->log_stride
);
161 static int mlx5_calc_cmd_blocks(struct mlx5_cmd_msg
*msg
)
164 int blen
= size
- min_t(int, sizeof(msg
->first
.data
), size
);
166 return DIV_ROUND_UP(blen
, MLX5_CMD_DATA_BLOCK_SIZE
);
169 static u8
xor8_buf(void *buf
, size_t offset
, int len
)
174 int end
= len
+ offset
;
176 for (i
= offset
; i
< end
; i
++)
182 static int verify_block_sig(struct mlx5_cmd_prot_block
*block
)
184 size_t rsvd0_off
= offsetof(struct mlx5_cmd_prot_block
, rsvd0
);
185 int xor_len
= sizeof(*block
) - sizeof(block
->data
) - 1;
187 if (xor8_buf(block
, rsvd0_off
, xor_len
) != 0xff)
190 if (xor8_buf(block
, 0, sizeof(*block
)) != 0xff)
196 static void calc_block_sig(struct mlx5_cmd_prot_block
*block
)
198 int ctrl_xor_len
= sizeof(*block
) - sizeof(block
->data
) - 2;
199 size_t rsvd0_off
= offsetof(struct mlx5_cmd_prot_block
, rsvd0
);
201 block
->ctrl_sig
= ~xor8_buf(block
, rsvd0_off
, ctrl_xor_len
);
202 block
->sig
= ~xor8_buf(block
, 0, sizeof(*block
) - 1);
205 static void calc_chain_sig(struct mlx5_cmd_msg
*msg
)
207 struct mlx5_cmd_mailbox
*next
= msg
->next
;
208 int n
= mlx5_calc_cmd_blocks(msg
);
211 for (i
= 0; i
< n
&& next
; i
++) {
212 calc_block_sig(next
->buf
);
217 static void set_signature(struct mlx5_cmd_work_ent
*ent
, int csum
)
219 ent
->lay
->sig
= ~xor8_buf(ent
->lay
, 0, sizeof(*ent
->lay
));
221 calc_chain_sig(ent
->in
);
222 calc_chain_sig(ent
->out
);
226 static void poll_timeout(struct mlx5_cmd_work_ent
*ent
)
228 unsigned long poll_end
= jiffies
+ msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
+ 1000);
232 own
= READ_ONCE(ent
->lay
->status_own
);
233 if (!(own
& CMD_OWNER_HW
)) {
238 } while (time_before(jiffies
, poll_end
));
240 ent
->ret
= -ETIMEDOUT
;
243 static int verify_signature(struct mlx5_cmd_work_ent
*ent
)
245 struct mlx5_cmd_mailbox
*next
= ent
->out
->next
;
246 int n
= mlx5_calc_cmd_blocks(ent
->out
);
251 sig
= xor8_buf(ent
->lay
, 0, sizeof(*ent
->lay
));
255 for (i
= 0; i
< n
&& next
; i
++) {
256 err
= verify_block_sig(next
->buf
);
266 static void dump_buf(void *buf
, int size
, int data_only
, int offset
)
271 for (i
= 0; i
< size
; i
+= 16) {
272 pr_debug("%03x: %08x %08x %08x %08x\n", offset
, be32_to_cpu(p
[0]),
273 be32_to_cpu(p
[1]), be32_to_cpu(p
[2]),
282 static int mlx5_internal_err_ret_value(struct mlx5_core_dev
*dev
, u16 op
,
283 u32
*synd
, u8
*status
)
289 case MLX5_CMD_OP_TEARDOWN_HCA
:
290 case MLX5_CMD_OP_DISABLE_HCA
:
291 case MLX5_CMD_OP_MANAGE_PAGES
:
292 case MLX5_CMD_OP_DESTROY_MKEY
:
293 case MLX5_CMD_OP_DESTROY_EQ
:
294 case MLX5_CMD_OP_DESTROY_CQ
:
295 case MLX5_CMD_OP_DESTROY_QP
:
296 case MLX5_CMD_OP_DESTROY_PSV
:
297 case MLX5_CMD_OP_DESTROY_SRQ
:
298 case MLX5_CMD_OP_DESTROY_XRC_SRQ
:
299 case MLX5_CMD_OP_DESTROY_XRQ
:
300 case MLX5_CMD_OP_DESTROY_DCT
:
301 case MLX5_CMD_OP_DEALLOC_Q_COUNTER
:
302 case MLX5_CMD_OP_DESTROY_SCHEDULING_ELEMENT
:
303 case MLX5_CMD_OP_DESTROY_QOS_PARA_VPORT
:
304 case MLX5_CMD_OP_DEALLOC_PD
:
305 case MLX5_CMD_OP_DEALLOC_UAR
:
306 case MLX5_CMD_OP_DETACH_FROM_MCG
:
307 case MLX5_CMD_OP_DEALLOC_XRCD
:
308 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN
:
309 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT
:
310 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY
:
311 case MLX5_CMD_OP_DESTROY_LAG
:
312 case MLX5_CMD_OP_DESTROY_VPORT_LAG
:
313 case MLX5_CMD_OP_DESTROY_TIR
:
314 case MLX5_CMD_OP_DESTROY_SQ
:
315 case MLX5_CMD_OP_DESTROY_RQ
:
316 case MLX5_CMD_OP_DESTROY_RMP
:
317 case MLX5_CMD_OP_DESTROY_TIS
:
318 case MLX5_CMD_OP_DESTROY_RQT
:
319 case MLX5_CMD_OP_DESTROY_FLOW_TABLE
:
320 case MLX5_CMD_OP_DESTROY_FLOW_GROUP
:
321 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY
:
322 case MLX5_CMD_OP_DEALLOC_FLOW_COUNTER
:
323 case MLX5_CMD_OP_2ERR_QP
:
324 case MLX5_CMD_OP_2RST_QP
:
325 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
:
326 case MLX5_CMD_OP_MODIFY_FLOW_TABLE
:
327 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY
:
328 case MLX5_CMD_OP_SET_FLOW_TABLE_ROOT
:
329 case MLX5_CMD_OP_DEALLOC_PACKET_REFORMAT_CONTEXT
:
330 case MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT
:
331 case MLX5_CMD_OP_FPGA_DESTROY_QP
:
332 case MLX5_CMD_OP_DESTROY_GENERAL_OBJECT
:
333 case MLX5_CMD_OP_DEALLOC_MEMIC
:
334 case MLX5_CMD_OP_PAGE_FAULT_RESUME
:
335 case MLX5_CMD_OP_QUERY_ESW_FUNCTIONS
:
336 return MLX5_CMD_STAT_OK
;
338 case MLX5_CMD_OP_QUERY_HCA_CAP
:
339 case MLX5_CMD_OP_QUERY_ADAPTER
:
340 case MLX5_CMD_OP_INIT_HCA
:
341 case MLX5_CMD_OP_ENABLE_HCA
:
342 case MLX5_CMD_OP_QUERY_PAGES
:
343 case MLX5_CMD_OP_SET_HCA_CAP
:
344 case MLX5_CMD_OP_QUERY_ISSI
:
345 case MLX5_CMD_OP_SET_ISSI
:
346 case MLX5_CMD_OP_CREATE_MKEY
:
347 case MLX5_CMD_OP_QUERY_MKEY
:
348 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS
:
349 case MLX5_CMD_OP_CREATE_EQ
:
350 case MLX5_CMD_OP_QUERY_EQ
:
351 case MLX5_CMD_OP_GEN_EQE
:
352 case MLX5_CMD_OP_CREATE_CQ
:
353 case MLX5_CMD_OP_QUERY_CQ
:
354 case MLX5_CMD_OP_MODIFY_CQ
:
355 case MLX5_CMD_OP_CREATE_QP
:
356 case MLX5_CMD_OP_RST2INIT_QP
:
357 case MLX5_CMD_OP_INIT2RTR_QP
:
358 case MLX5_CMD_OP_RTR2RTS_QP
:
359 case MLX5_CMD_OP_RTS2RTS_QP
:
360 case MLX5_CMD_OP_SQERR2RTS_QP
:
361 case MLX5_CMD_OP_QUERY_QP
:
362 case MLX5_CMD_OP_SQD_RTS_QP
:
363 case MLX5_CMD_OP_INIT2INIT_QP
:
364 case MLX5_CMD_OP_CREATE_PSV
:
365 case MLX5_CMD_OP_CREATE_SRQ
:
366 case MLX5_CMD_OP_QUERY_SRQ
:
367 case MLX5_CMD_OP_ARM_RQ
:
368 case MLX5_CMD_OP_CREATE_XRC_SRQ
:
369 case MLX5_CMD_OP_QUERY_XRC_SRQ
:
370 case MLX5_CMD_OP_ARM_XRC_SRQ
:
371 case MLX5_CMD_OP_CREATE_XRQ
:
372 case MLX5_CMD_OP_QUERY_XRQ
:
373 case MLX5_CMD_OP_ARM_XRQ
:
374 case MLX5_CMD_OP_CREATE_DCT
:
375 case MLX5_CMD_OP_DRAIN_DCT
:
376 case MLX5_CMD_OP_QUERY_DCT
:
377 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION
:
378 case MLX5_CMD_OP_QUERY_VPORT_STATE
:
379 case MLX5_CMD_OP_MODIFY_VPORT_STATE
:
380 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT
:
381 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT
:
382 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
:
383 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS
:
384 case MLX5_CMD_OP_SET_ROCE_ADDRESS
:
385 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT
:
386 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT
:
387 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID
:
388 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY
:
389 case MLX5_CMD_OP_QUERY_VNIC_ENV
:
390 case MLX5_CMD_OP_QUERY_VPORT_COUNTER
:
391 case MLX5_CMD_OP_ALLOC_Q_COUNTER
:
392 case MLX5_CMD_OP_QUERY_Q_COUNTER
:
393 case MLX5_CMD_OP_SET_MONITOR_COUNTER
:
394 case MLX5_CMD_OP_ARM_MONITOR_COUNTER
:
395 case MLX5_CMD_OP_SET_PP_RATE_LIMIT
:
396 case MLX5_CMD_OP_QUERY_RATE_LIMIT
:
397 case MLX5_CMD_OP_CREATE_SCHEDULING_ELEMENT
:
398 case MLX5_CMD_OP_QUERY_SCHEDULING_ELEMENT
:
399 case MLX5_CMD_OP_MODIFY_SCHEDULING_ELEMENT
:
400 case MLX5_CMD_OP_CREATE_QOS_PARA_VPORT
:
401 case MLX5_CMD_OP_ALLOC_PD
:
402 case MLX5_CMD_OP_ALLOC_UAR
:
403 case MLX5_CMD_OP_CONFIG_INT_MODERATION
:
404 case MLX5_CMD_OP_ACCESS_REG
:
405 case MLX5_CMD_OP_ATTACH_TO_MCG
:
406 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG
:
407 case MLX5_CMD_OP_MAD_IFC
:
408 case MLX5_CMD_OP_QUERY_MAD_DEMUX
:
409 case MLX5_CMD_OP_SET_MAD_DEMUX
:
410 case MLX5_CMD_OP_NOP
:
411 case MLX5_CMD_OP_ALLOC_XRCD
:
412 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN
:
413 case MLX5_CMD_OP_QUERY_CONG_STATUS
:
414 case MLX5_CMD_OP_MODIFY_CONG_STATUS
:
415 case MLX5_CMD_OP_QUERY_CONG_PARAMS
:
416 case MLX5_CMD_OP_MODIFY_CONG_PARAMS
:
417 case MLX5_CMD_OP_QUERY_CONG_STATISTICS
:
418 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
:
419 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY
:
420 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY
:
421 case MLX5_CMD_OP_CREATE_LAG
:
422 case MLX5_CMD_OP_MODIFY_LAG
:
423 case MLX5_CMD_OP_QUERY_LAG
:
424 case MLX5_CMD_OP_CREATE_VPORT_LAG
:
425 case MLX5_CMD_OP_CREATE_TIR
:
426 case MLX5_CMD_OP_MODIFY_TIR
:
427 case MLX5_CMD_OP_QUERY_TIR
:
428 case MLX5_CMD_OP_CREATE_SQ
:
429 case MLX5_CMD_OP_MODIFY_SQ
:
430 case MLX5_CMD_OP_QUERY_SQ
:
431 case MLX5_CMD_OP_CREATE_RQ
:
432 case MLX5_CMD_OP_MODIFY_RQ
:
433 case MLX5_CMD_OP_QUERY_RQ
:
434 case MLX5_CMD_OP_CREATE_RMP
:
435 case MLX5_CMD_OP_MODIFY_RMP
:
436 case MLX5_CMD_OP_QUERY_RMP
:
437 case MLX5_CMD_OP_CREATE_TIS
:
438 case MLX5_CMD_OP_MODIFY_TIS
:
439 case MLX5_CMD_OP_QUERY_TIS
:
440 case MLX5_CMD_OP_CREATE_RQT
:
441 case MLX5_CMD_OP_MODIFY_RQT
:
442 case MLX5_CMD_OP_QUERY_RQT
:
444 case MLX5_CMD_OP_CREATE_FLOW_TABLE
:
445 case MLX5_CMD_OP_QUERY_FLOW_TABLE
:
446 case MLX5_CMD_OP_CREATE_FLOW_GROUP
:
447 case MLX5_CMD_OP_QUERY_FLOW_GROUP
:
448 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY
:
449 case MLX5_CMD_OP_ALLOC_FLOW_COUNTER
:
450 case MLX5_CMD_OP_QUERY_FLOW_COUNTER
:
451 case MLX5_CMD_OP_ALLOC_PACKET_REFORMAT_CONTEXT
:
452 case MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT
:
453 case MLX5_CMD_OP_FPGA_CREATE_QP
:
454 case MLX5_CMD_OP_FPGA_MODIFY_QP
:
455 case MLX5_CMD_OP_FPGA_QUERY_QP
:
456 case MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS
:
457 case MLX5_CMD_OP_CREATE_GENERAL_OBJECT
:
458 case MLX5_CMD_OP_MODIFY_GENERAL_OBJECT
:
459 case MLX5_CMD_OP_QUERY_GENERAL_OBJECT
:
460 case MLX5_CMD_OP_CREATE_UCTX
:
461 case MLX5_CMD_OP_DESTROY_UCTX
:
462 case MLX5_CMD_OP_CREATE_UMEM
:
463 case MLX5_CMD_OP_DESTROY_UMEM
:
464 case MLX5_CMD_OP_ALLOC_MEMIC
:
465 case MLX5_CMD_OP_MODIFY_XRQ
:
466 case MLX5_CMD_OP_RELEASE_XRQ_ERROR
:
467 *status
= MLX5_DRIVER_STATUS_ABORTED
;
468 *synd
= MLX5_DRIVER_SYND
;
471 mlx5_core_err(dev
, "Unknown FW command (%d)\n", op
);
476 const char *mlx5_command_str(int command
)
478 #define MLX5_COMMAND_STR_CASE(__cmd) case MLX5_CMD_OP_ ## __cmd: return #__cmd
481 MLX5_COMMAND_STR_CASE(QUERY_HCA_CAP
);
482 MLX5_COMMAND_STR_CASE(QUERY_ADAPTER
);
483 MLX5_COMMAND_STR_CASE(INIT_HCA
);
484 MLX5_COMMAND_STR_CASE(TEARDOWN_HCA
);
485 MLX5_COMMAND_STR_CASE(ENABLE_HCA
);
486 MLX5_COMMAND_STR_CASE(DISABLE_HCA
);
487 MLX5_COMMAND_STR_CASE(QUERY_PAGES
);
488 MLX5_COMMAND_STR_CASE(MANAGE_PAGES
);
489 MLX5_COMMAND_STR_CASE(SET_HCA_CAP
);
490 MLX5_COMMAND_STR_CASE(QUERY_ISSI
);
491 MLX5_COMMAND_STR_CASE(SET_ISSI
);
492 MLX5_COMMAND_STR_CASE(SET_DRIVER_VERSION
);
493 MLX5_COMMAND_STR_CASE(CREATE_MKEY
);
494 MLX5_COMMAND_STR_CASE(QUERY_MKEY
);
495 MLX5_COMMAND_STR_CASE(DESTROY_MKEY
);
496 MLX5_COMMAND_STR_CASE(QUERY_SPECIAL_CONTEXTS
);
497 MLX5_COMMAND_STR_CASE(PAGE_FAULT_RESUME
);
498 MLX5_COMMAND_STR_CASE(CREATE_EQ
);
499 MLX5_COMMAND_STR_CASE(DESTROY_EQ
);
500 MLX5_COMMAND_STR_CASE(QUERY_EQ
);
501 MLX5_COMMAND_STR_CASE(GEN_EQE
);
502 MLX5_COMMAND_STR_CASE(CREATE_CQ
);
503 MLX5_COMMAND_STR_CASE(DESTROY_CQ
);
504 MLX5_COMMAND_STR_CASE(QUERY_CQ
);
505 MLX5_COMMAND_STR_CASE(MODIFY_CQ
);
506 MLX5_COMMAND_STR_CASE(CREATE_QP
);
507 MLX5_COMMAND_STR_CASE(DESTROY_QP
);
508 MLX5_COMMAND_STR_CASE(RST2INIT_QP
);
509 MLX5_COMMAND_STR_CASE(INIT2RTR_QP
);
510 MLX5_COMMAND_STR_CASE(RTR2RTS_QP
);
511 MLX5_COMMAND_STR_CASE(RTS2RTS_QP
);
512 MLX5_COMMAND_STR_CASE(SQERR2RTS_QP
);
513 MLX5_COMMAND_STR_CASE(2ERR_QP
);
514 MLX5_COMMAND_STR_CASE(2RST_QP
);
515 MLX5_COMMAND_STR_CASE(QUERY_QP
);
516 MLX5_COMMAND_STR_CASE(SQD_RTS_QP
);
517 MLX5_COMMAND_STR_CASE(INIT2INIT_QP
);
518 MLX5_COMMAND_STR_CASE(CREATE_PSV
);
519 MLX5_COMMAND_STR_CASE(DESTROY_PSV
);
520 MLX5_COMMAND_STR_CASE(CREATE_SRQ
);
521 MLX5_COMMAND_STR_CASE(DESTROY_SRQ
);
522 MLX5_COMMAND_STR_CASE(QUERY_SRQ
);
523 MLX5_COMMAND_STR_CASE(ARM_RQ
);
524 MLX5_COMMAND_STR_CASE(CREATE_XRC_SRQ
);
525 MLX5_COMMAND_STR_CASE(DESTROY_XRC_SRQ
);
526 MLX5_COMMAND_STR_CASE(QUERY_XRC_SRQ
);
527 MLX5_COMMAND_STR_CASE(ARM_XRC_SRQ
);
528 MLX5_COMMAND_STR_CASE(CREATE_DCT
);
529 MLX5_COMMAND_STR_CASE(DESTROY_DCT
);
530 MLX5_COMMAND_STR_CASE(DRAIN_DCT
);
531 MLX5_COMMAND_STR_CASE(QUERY_DCT
);
532 MLX5_COMMAND_STR_CASE(ARM_DCT_FOR_KEY_VIOLATION
);
533 MLX5_COMMAND_STR_CASE(QUERY_VPORT_STATE
);
534 MLX5_COMMAND_STR_CASE(MODIFY_VPORT_STATE
);
535 MLX5_COMMAND_STR_CASE(QUERY_ESW_VPORT_CONTEXT
);
536 MLX5_COMMAND_STR_CASE(MODIFY_ESW_VPORT_CONTEXT
);
537 MLX5_COMMAND_STR_CASE(QUERY_NIC_VPORT_CONTEXT
);
538 MLX5_COMMAND_STR_CASE(MODIFY_NIC_VPORT_CONTEXT
);
539 MLX5_COMMAND_STR_CASE(QUERY_ROCE_ADDRESS
);
540 MLX5_COMMAND_STR_CASE(SET_ROCE_ADDRESS
);
541 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_CONTEXT
);
542 MLX5_COMMAND_STR_CASE(MODIFY_HCA_VPORT_CONTEXT
);
543 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_GID
);
544 MLX5_COMMAND_STR_CASE(QUERY_HCA_VPORT_PKEY
);
545 MLX5_COMMAND_STR_CASE(QUERY_VNIC_ENV
);
546 MLX5_COMMAND_STR_CASE(QUERY_VPORT_COUNTER
);
547 MLX5_COMMAND_STR_CASE(ALLOC_Q_COUNTER
);
548 MLX5_COMMAND_STR_CASE(DEALLOC_Q_COUNTER
);
549 MLX5_COMMAND_STR_CASE(QUERY_Q_COUNTER
);
550 MLX5_COMMAND_STR_CASE(SET_MONITOR_COUNTER
);
551 MLX5_COMMAND_STR_CASE(ARM_MONITOR_COUNTER
);
552 MLX5_COMMAND_STR_CASE(SET_PP_RATE_LIMIT
);
553 MLX5_COMMAND_STR_CASE(QUERY_RATE_LIMIT
);
554 MLX5_COMMAND_STR_CASE(CREATE_SCHEDULING_ELEMENT
);
555 MLX5_COMMAND_STR_CASE(DESTROY_SCHEDULING_ELEMENT
);
556 MLX5_COMMAND_STR_CASE(QUERY_SCHEDULING_ELEMENT
);
557 MLX5_COMMAND_STR_CASE(MODIFY_SCHEDULING_ELEMENT
);
558 MLX5_COMMAND_STR_CASE(CREATE_QOS_PARA_VPORT
);
559 MLX5_COMMAND_STR_CASE(DESTROY_QOS_PARA_VPORT
);
560 MLX5_COMMAND_STR_CASE(ALLOC_PD
);
561 MLX5_COMMAND_STR_CASE(DEALLOC_PD
);
562 MLX5_COMMAND_STR_CASE(ALLOC_UAR
);
563 MLX5_COMMAND_STR_CASE(DEALLOC_UAR
);
564 MLX5_COMMAND_STR_CASE(CONFIG_INT_MODERATION
);
565 MLX5_COMMAND_STR_CASE(ACCESS_REG
);
566 MLX5_COMMAND_STR_CASE(ATTACH_TO_MCG
);
567 MLX5_COMMAND_STR_CASE(DETACH_FROM_MCG
);
568 MLX5_COMMAND_STR_CASE(GET_DROPPED_PACKET_LOG
);
569 MLX5_COMMAND_STR_CASE(MAD_IFC
);
570 MLX5_COMMAND_STR_CASE(QUERY_MAD_DEMUX
);
571 MLX5_COMMAND_STR_CASE(SET_MAD_DEMUX
);
572 MLX5_COMMAND_STR_CASE(NOP
);
573 MLX5_COMMAND_STR_CASE(ALLOC_XRCD
);
574 MLX5_COMMAND_STR_CASE(DEALLOC_XRCD
);
575 MLX5_COMMAND_STR_CASE(ALLOC_TRANSPORT_DOMAIN
);
576 MLX5_COMMAND_STR_CASE(DEALLOC_TRANSPORT_DOMAIN
);
577 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATUS
);
578 MLX5_COMMAND_STR_CASE(MODIFY_CONG_STATUS
);
579 MLX5_COMMAND_STR_CASE(QUERY_CONG_PARAMS
);
580 MLX5_COMMAND_STR_CASE(MODIFY_CONG_PARAMS
);
581 MLX5_COMMAND_STR_CASE(QUERY_CONG_STATISTICS
);
582 MLX5_COMMAND_STR_CASE(ADD_VXLAN_UDP_DPORT
);
583 MLX5_COMMAND_STR_CASE(DELETE_VXLAN_UDP_DPORT
);
584 MLX5_COMMAND_STR_CASE(SET_L2_TABLE_ENTRY
);
585 MLX5_COMMAND_STR_CASE(QUERY_L2_TABLE_ENTRY
);
586 MLX5_COMMAND_STR_CASE(DELETE_L2_TABLE_ENTRY
);
587 MLX5_COMMAND_STR_CASE(SET_WOL_ROL
);
588 MLX5_COMMAND_STR_CASE(QUERY_WOL_ROL
);
589 MLX5_COMMAND_STR_CASE(CREATE_LAG
);
590 MLX5_COMMAND_STR_CASE(MODIFY_LAG
);
591 MLX5_COMMAND_STR_CASE(QUERY_LAG
);
592 MLX5_COMMAND_STR_CASE(DESTROY_LAG
);
593 MLX5_COMMAND_STR_CASE(CREATE_VPORT_LAG
);
594 MLX5_COMMAND_STR_CASE(DESTROY_VPORT_LAG
);
595 MLX5_COMMAND_STR_CASE(CREATE_TIR
);
596 MLX5_COMMAND_STR_CASE(MODIFY_TIR
);
597 MLX5_COMMAND_STR_CASE(DESTROY_TIR
);
598 MLX5_COMMAND_STR_CASE(QUERY_TIR
);
599 MLX5_COMMAND_STR_CASE(CREATE_SQ
);
600 MLX5_COMMAND_STR_CASE(MODIFY_SQ
);
601 MLX5_COMMAND_STR_CASE(DESTROY_SQ
);
602 MLX5_COMMAND_STR_CASE(QUERY_SQ
);
603 MLX5_COMMAND_STR_CASE(CREATE_RQ
);
604 MLX5_COMMAND_STR_CASE(MODIFY_RQ
);
605 MLX5_COMMAND_STR_CASE(DESTROY_RQ
);
606 MLX5_COMMAND_STR_CASE(QUERY_RQ
);
607 MLX5_COMMAND_STR_CASE(CREATE_RMP
);
608 MLX5_COMMAND_STR_CASE(MODIFY_RMP
);
609 MLX5_COMMAND_STR_CASE(DESTROY_RMP
);
610 MLX5_COMMAND_STR_CASE(QUERY_RMP
);
611 MLX5_COMMAND_STR_CASE(CREATE_TIS
);
612 MLX5_COMMAND_STR_CASE(MODIFY_TIS
);
613 MLX5_COMMAND_STR_CASE(DESTROY_TIS
);
614 MLX5_COMMAND_STR_CASE(QUERY_TIS
);
615 MLX5_COMMAND_STR_CASE(CREATE_RQT
);
616 MLX5_COMMAND_STR_CASE(MODIFY_RQT
);
617 MLX5_COMMAND_STR_CASE(DESTROY_RQT
);
618 MLX5_COMMAND_STR_CASE(QUERY_RQT
);
619 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ROOT
);
620 MLX5_COMMAND_STR_CASE(CREATE_FLOW_TABLE
);
621 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_TABLE
);
622 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE
);
623 MLX5_COMMAND_STR_CASE(CREATE_FLOW_GROUP
);
624 MLX5_COMMAND_STR_CASE(DESTROY_FLOW_GROUP
);
625 MLX5_COMMAND_STR_CASE(QUERY_FLOW_GROUP
);
626 MLX5_COMMAND_STR_CASE(SET_FLOW_TABLE_ENTRY
);
627 MLX5_COMMAND_STR_CASE(QUERY_FLOW_TABLE_ENTRY
);
628 MLX5_COMMAND_STR_CASE(DELETE_FLOW_TABLE_ENTRY
);
629 MLX5_COMMAND_STR_CASE(ALLOC_FLOW_COUNTER
);
630 MLX5_COMMAND_STR_CASE(DEALLOC_FLOW_COUNTER
);
631 MLX5_COMMAND_STR_CASE(QUERY_FLOW_COUNTER
);
632 MLX5_COMMAND_STR_CASE(MODIFY_FLOW_TABLE
);
633 MLX5_COMMAND_STR_CASE(ALLOC_PACKET_REFORMAT_CONTEXT
);
634 MLX5_COMMAND_STR_CASE(DEALLOC_PACKET_REFORMAT_CONTEXT
);
635 MLX5_COMMAND_STR_CASE(ALLOC_MODIFY_HEADER_CONTEXT
);
636 MLX5_COMMAND_STR_CASE(DEALLOC_MODIFY_HEADER_CONTEXT
);
637 MLX5_COMMAND_STR_CASE(FPGA_CREATE_QP
);
638 MLX5_COMMAND_STR_CASE(FPGA_MODIFY_QP
);
639 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP
);
640 MLX5_COMMAND_STR_CASE(FPGA_QUERY_QP_COUNTERS
);
641 MLX5_COMMAND_STR_CASE(FPGA_DESTROY_QP
);
642 MLX5_COMMAND_STR_CASE(CREATE_XRQ
);
643 MLX5_COMMAND_STR_CASE(DESTROY_XRQ
);
644 MLX5_COMMAND_STR_CASE(QUERY_XRQ
);
645 MLX5_COMMAND_STR_CASE(ARM_XRQ
);
646 MLX5_COMMAND_STR_CASE(CREATE_GENERAL_OBJECT
);
647 MLX5_COMMAND_STR_CASE(DESTROY_GENERAL_OBJECT
);
648 MLX5_COMMAND_STR_CASE(MODIFY_GENERAL_OBJECT
);
649 MLX5_COMMAND_STR_CASE(QUERY_GENERAL_OBJECT
);
650 MLX5_COMMAND_STR_CASE(QUERY_MODIFY_HEADER_CONTEXT
);
651 MLX5_COMMAND_STR_CASE(ALLOC_MEMIC
);
652 MLX5_COMMAND_STR_CASE(DEALLOC_MEMIC
);
653 MLX5_COMMAND_STR_CASE(QUERY_ESW_FUNCTIONS
);
654 MLX5_COMMAND_STR_CASE(CREATE_UCTX
);
655 MLX5_COMMAND_STR_CASE(DESTROY_UCTX
);
656 MLX5_COMMAND_STR_CASE(CREATE_UMEM
);
657 MLX5_COMMAND_STR_CASE(DESTROY_UMEM
);
658 MLX5_COMMAND_STR_CASE(RELEASE_XRQ_ERROR
);
659 MLX5_COMMAND_STR_CASE(MODIFY_XRQ
);
660 default: return "unknown command opcode";
664 static const char *cmd_status_str(u8 status
)
667 case MLX5_CMD_STAT_OK
:
669 case MLX5_CMD_STAT_INT_ERR
:
670 return "internal error";
671 case MLX5_CMD_STAT_BAD_OP_ERR
:
672 return "bad operation";
673 case MLX5_CMD_STAT_BAD_PARAM_ERR
:
674 return "bad parameter";
675 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
:
676 return "bad system state";
677 case MLX5_CMD_STAT_BAD_RES_ERR
:
678 return "bad resource";
679 case MLX5_CMD_STAT_RES_BUSY
:
680 return "resource busy";
681 case MLX5_CMD_STAT_LIM_ERR
:
682 return "limits exceeded";
683 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
:
684 return "bad resource state";
685 case MLX5_CMD_STAT_IX_ERR
:
687 case MLX5_CMD_STAT_NO_RES_ERR
:
688 return "no resources";
689 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
:
690 return "bad input length";
691 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
:
692 return "bad output length";
693 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
:
694 return "bad QP state";
695 case MLX5_CMD_STAT_BAD_PKT_ERR
:
696 return "bad packet (discarded)";
697 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
:
698 return "bad size too many outstanding CQEs";
700 return "unknown status";
704 static int cmd_status_to_err(u8 status
)
707 case MLX5_CMD_STAT_OK
: return 0;
708 case MLX5_CMD_STAT_INT_ERR
: return -EIO
;
709 case MLX5_CMD_STAT_BAD_OP_ERR
: return -EINVAL
;
710 case MLX5_CMD_STAT_BAD_PARAM_ERR
: return -EINVAL
;
711 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
: return -EIO
;
712 case MLX5_CMD_STAT_BAD_RES_ERR
: return -EINVAL
;
713 case MLX5_CMD_STAT_RES_BUSY
: return -EBUSY
;
714 case MLX5_CMD_STAT_LIM_ERR
: return -ENOMEM
;
715 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
: return -EINVAL
;
716 case MLX5_CMD_STAT_IX_ERR
: return -EINVAL
;
717 case MLX5_CMD_STAT_NO_RES_ERR
: return -EAGAIN
;
718 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
: return -EIO
;
719 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
: return -EIO
;
720 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
: return -EINVAL
;
721 case MLX5_CMD_STAT_BAD_PKT_ERR
: return -EINVAL
;
722 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
: return -EINVAL
;
723 default: return -EIO
;
727 struct mlx5_ifc_mbox_out_bits
{
729 u8 reserved_at_8
[0x18];
733 u8 reserved_at_40
[0x40];
736 struct mlx5_ifc_mbox_in_bits
{
740 u8 reserved_at_20
[0x10];
743 u8 reserved_at_40
[0x40];
746 void mlx5_cmd_mbox_status(void *out
, u8
*status
, u32
*syndrome
)
748 *status
= MLX5_GET(mbox_out
, out
, status
);
749 *syndrome
= MLX5_GET(mbox_out
, out
, syndrome
);
752 static int mlx5_cmd_check(struct mlx5_core_dev
*dev
, void *in
, void *out
)
760 mlx5_cmd_mbox_status(out
, &status
, &syndrome
);
764 opcode
= MLX5_GET(mbox_in
, in
, opcode
);
765 op_mod
= MLX5_GET(mbox_in
, in
, op_mod
);
766 uid
= MLX5_GET(mbox_in
, in
, uid
);
768 if (!uid
&& opcode
!= MLX5_CMD_OP_DESTROY_MKEY
)
769 mlx5_core_err_rl(dev
,
770 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
771 mlx5_command_str(opcode
), opcode
, op_mod
,
772 cmd_status_str(status
), status
, syndrome
);
775 "%s(0x%x) op_mod(0x%x) failed, status %s(0x%x), syndrome (0x%x)\n",
776 mlx5_command_str(opcode
),
778 cmd_status_str(status
),
782 return cmd_status_to_err(status
);
785 static void dump_command(struct mlx5_core_dev
*dev
,
786 struct mlx5_cmd_work_ent
*ent
, int input
)
788 struct mlx5_cmd_msg
*msg
= input
? ent
->in
: ent
->out
;
789 u16 op
= MLX5_GET(mbox_in
, ent
->lay
->in
, opcode
);
790 struct mlx5_cmd_mailbox
*next
= msg
->next
;
791 int n
= mlx5_calc_cmd_blocks(msg
);
797 data_only
= !!(mlx5_core_debug_mask
& (1 << MLX5_CMD_DATA
));
800 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_DATA
,
801 "dump command data %s(0x%x) %s\n",
802 mlx5_command_str(op
), op
,
803 input
? "INPUT" : "OUTPUT");
805 mlx5_core_dbg(dev
, "dump command %s(0x%x) %s\n",
806 mlx5_command_str(op
), op
,
807 input
? "INPUT" : "OUTPUT");
811 dump_buf(ent
->lay
->in
, sizeof(ent
->lay
->in
), 1, offset
);
812 offset
+= sizeof(ent
->lay
->in
);
814 dump_buf(ent
->lay
->out
, sizeof(ent
->lay
->out
), 1, offset
);
815 offset
+= sizeof(ent
->lay
->out
);
818 dump_buf(ent
->lay
, sizeof(*ent
->lay
), 0, offset
);
819 offset
+= sizeof(*ent
->lay
);
822 for (i
= 0; i
< n
&& next
; i
++) {
824 dump_len
= min_t(int, MLX5_CMD_DATA_BLOCK_SIZE
, msg
->len
- offset
);
825 dump_buf(next
->buf
, dump_len
, 1, offset
);
826 offset
+= MLX5_CMD_DATA_BLOCK_SIZE
;
828 mlx5_core_dbg(dev
, "command block:\n");
829 dump_buf(next
->buf
, sizeof(struct mlx5_cmd_prot_block
), 0, offset
);
830 offset
+= sizeof(struct mlx5_cmd_prot_block
);
839 static u16
msg_to_opcode(struct mlx5_cmd_msg
*in
)
841 return MLX5_GET(mbox_in
, in
->first
.data
, opcode
);
844 static void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, u64 vec
, bool forced
);
846 static void cb_timeout_handler(struct work_struct
*work
)
848 struct delayed_work
*dwork
= container_of(work
, struct delayed_work
,
850 struct mlx5_cmd_work_ent
*ent
= container_of(dwork
,
851 struct mlx5_cmd_work_ent
,
853 struct mlx5_core_dev
*dev
= container_of(ent
->cmd
, struct mlx5_core_dev
,
856 mlx5_cmd_eq_recover(dev
);
858 /* Maybe got handled by eq recover ? */
859 if (!test_bit(MLX5_CMD_ENT_STATE_PENDING_COMP
, &ent
->state
)) {
860 mlx5_core_warn(dev
, "cmd[%d]: %s(0x%x) Async, recovered after timeout\n", ent
->idx
,
861 mlx5_command_str(msg_to_opcode(ent
->in
)), msg_to_opcode(ent
->in
));
862 goto out
; /* phew, already handled */
865 ent
->ret
= -ETIMEDOUT
;
866 mlx5_core_warn(dev
, "cmd[%d]: %s(0x%x) Async, timeout. Will cause a leak of a command resource\n",
867 ent
->idx
, mlx5_command_str(msg_to_opcode(ent
->in
)), msg_to_opcode(ent
->in
));
868 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
, true);
871 cmd_ent_put(ent
); /* for the cmd_ent_get() took on schedule delayed work */
874 static void free_msg(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*msg
);
875 static void mlx5_free_cmd_msg(struct mlx5_core_dev
*dev
,
876 struct mlx5_cmd_msg
*msg
);
878 static bool opcode_allowed(struct mlx5_cmd
*cmd
, u16 opcode
)
880 if (cmd
->allowed_opcode
== CMD_ALLOWED_OPCODE_ALL
)
883 return cmd
->allowed_opcode
== opcode
;
886 static int cmd_alloc_index_retry(struct mlx5_cmd
*cmd
)
888 unsigned long alloc_end
= jiffies
+ msecs_to_jiffies(1000);
892 idx
= cmd_alloc_index(cmd
);
893 if (idx
< 0 && time_before(jiffies
, alloc_end
)) {
894 /* Index allocation can fail on heavy load of commands. This is a temporary
895 * situation as the current command already holds the semaphore, meaning that
896 * another command completion is being handled and it is expected to release
897 * the entry index soon.
905 bool mlx5_cmd_is_down(struct mlx5_core_dev
*dev
)
907 return pci_channel_offline(dev
->pdev
) ||
908 dev
->cmd
.state
!= MLX5_CMDIF_STATE_UP
||
909 dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
;
912 static void cmd_work_handler(struct work_struct
*work
)
914 struct mlx5_cmd_work_ent
*ent
= container_of(work
, struct mlx5_cmd_work_ent
, work
);
915 struct mlx5_cmd
*cmd
= ent
->cmd
;
916 struct mlx5_core_dev
*dev
= container_of(cmd
, struct mlx5_core_dev
, cmd
);
917 unsigned long cb_timeout
= msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
);
918 struct mlx5_cmd_layout
*lay
;
919 struct semaphore
*sem
;
921 bool poll_cmd
= ent
->polling
;
925 complete(&ent
->handling
);
926 sem
= ent
->page_queue
? &cmd
->pages_sem
: &cmd
->sem
;
928 if (!ent
->page_queue
) {
929 alloc_ret
= cmd_alloc_index_retry(cmd
);
931 mlx5_core_err_rl(dev
, "failed to allocate command entry\n");
933 ent
->callback(-EAGAIN
, ent
->context
);
934 mlx5_free_cmd_msg(dev
, ent
->out
);
935 free_msg(dev
, ent
->in
);
939 complete(&ent
->done
);
944 ent
->idx
= alloc_ret
;
946 ent
->idx
= cmd
->max_reg_cmds
;
947 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
948 clear_bit(ent
->idx
, &cmd
->bitmask
);
949 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
952 cmd
->ent_arr
[ent
->idx
] = ent
;
953 lay
= get_inst(cmd
, ent
->idx
);
955 memset(lay
, 0, sizeof(*lay
));
956 memcpy(lay
->in
, ent
->in
->first
.data
, sizeof(lay
->in
));
957 ent
->op
= be32_to_cpu(lay
->in
[0]) >> 16;
959 lay
->in_ptr
= cpu_to_be64(ent
->in
->next
->dma
);
960 lay
->inlen
= cpu_to_be32(ent
->in
->len
);
962 lay
->out_ptr
= cpu_to_be64(ent
->out
->next
->dma
);
963 lay
->outlen
= cpu_to_be32(ent
->out
->len
);
964 lay
->type
= MLX5_PCI_CMD_XPORT
;
965 lay
->token
= ent
->token
;
966 lay
->status_own
= CMD_OWNER_HW
;
967 set_signature(ent
, !cmd
->checksum_disabled
);
968 dump_command(dev
, ent
, 1);
969 ent
->ts1
= ktime_get_ns();
970 cmd_mode
= cmd
->mode
;
972 if (ent
->callback
&& schedule_delayed_work(&ent
->cb_timeout_work
, cb_timeout
))
974 set_bit(MLX5_CMD_ENT_STATE_PENDING_COMP
, &ent
->state
);
976 /* Skip sending command to fw if internal error */
977 if (mlx5_cmd_is_down(dev
) || !opcode_allowed(&dev
->cmd
, ent
->op
)) {
981 ent
->ret
= mlx5_internal_err_ret_value(dev
, msg_to_opcode(ent
->in
), &drv_synd
, &status
);
982 MLX5_SET(mbox_out
, ent
->out
, status
, status
);
983 MLX5_SET(mbox_out
, ent
->out
, syndrome
, drv_synd
);
985 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
, true);
989 cmd_ent_get(ent
); /* for the _real_ FW event on completion */
990 /* ring doorbell after the descriptor is valid */
991 mlx5_core_dbg(dev
, "writing 0x%x to command doorbell\n", 1 << ent
->idx
);
993 iowrite32be(1 << ent
->idx
, &dev
->iseg
->cmd_dbell
);
994 /* if not in polling don't use ent after this point */
995 if (cmd_mode
== CMD_MODE_POLLING
|| poll_cmd
) {
997 /* make sure we read the descriptor after ownership is SW */
999 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
, (ent
->ret
== -ETIMEDOUT
));
1003 static const char *deliv_status_to_str(u8 status
)
1006 case MLX5_CMD_DELIVERY_STAT_OK
:
1008 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
:
1009 return "signature error";
1010 case MLX5_CMD_DELIVERY_STAT_TOK_ERR
:
1011 return "token error";
1012 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
:
1013 return "bad block number";
1014 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
:
1015 return "output pointer not aligned to block size";
1016 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
:
1017 return "input pointer not aligned to block size";
1018 case MLX5_CMD_DELIVERY_STAT_FW_ERR
:
1019 return "firmware internal error";
1020 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
:
1021 return "command input length error";
1022 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
:
1023 return "command output length error";
1024 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
:
1025 return "reserved fields not cleared";
1026 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
:
1027 return "bad command descriptor type";
1029 return "unknown status code";
1034 MLX5_CMD_TIMEOUT_RECOVER_MSEC
= 5 * 1000,
1037 static void wait_func_handle_exec_timeout(struct mlx5_core_dev
*dev
,
1038 struct mlx5_cmd_work_ent
*ent
)
1040 unsigned long timeout
= msecs_to_jiffies(MLX5_CMD_TIMEOUT_RECOVER_MSEC
);
1042 mlx5_cmd_eq_recover(dev
);
1044 /* Re-wait on the ent->done after executing the recovery flow. If the
1045 * recovery flow (or any other recovery flow running simultaneously)
1046 * has recovered an EQE, it should cause the entry to be completed by
1047 * the command interface.
1049 if (wait_for_completion_timeout(&ent
->done
, timeout
)) {
1050 mlx5_core_warn(dev
, "cmd[%d]: %s(0x%x) recovered after timeout\n", ent
->idx
,
1051 mlx5_command_str(msg_to_opcode(ent
->in
)), msg_to_opcode(ent
->in
));
1055 mlx5_core_warn(dev
, "cmd[%d]: %s(0x%x) No done completion\n", ent
->idx
,
1056 mlx5_command_str(msg_to_opcode(ent
->in
)), msg_to_opcode(ent
->in
));
1058 ent
->ret
= -ETIMEDOUT
;
1059 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
, true);
1062 static int wait_func(struct mlx5_core_dev
*dev
, struct mlx5_cmd_work_ent
*ent
)
1064 unsigned long timeout
= msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
);
1065 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1068 if (!wait_for_completion_timeout(&ent
->handling
, timeout
) &&
1069 cancel_work_sync(&ent
->work
)) {
1070 ent
->ret
= -ECANCELED
;
1073 if (cmd
->mode
== CMD_MODE_POLLING
|| ent
->polling
)
1074 wait_for_completion(&ent
->done
);
1075 else if (!wait_for_completion_timeout(&ent
->done
, timeout
))
1076 wait_func_handle_exec_timeout(dev
, ent
);
1081 if (err
== -ETIMEDOUT
) {
1082 mlx5_core_warn(dev
, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
1083 mlx5_command_str(msg_to_opcode(ent
->in
)),
1084 msg_to_opcode(ent
->in
));
1085 } else if (err
== -ECANCELED
) {
1086 mlx5_core_warn(dev
, "%s(0x%x) canceled on out of queue timeout.\n",
1087 mlx5_command_str(msg_to_opcode(ent
->in
)),
1088 msg_to_opcode(ent
->in
));
1090 mlx5_core_dbg(dev
, "err %d, delivery status %s(%d)\n",
1091 err
, deliv_status_to_str(ent
->status
), ent
->status
);
1097 * 1. Callback functions may not sleep
1098 * 2. page queue commands do not support asynchrous completion
1100 static int mlx5_cmd_invoke(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*in
,
1101 struct mlx5_cmd_msg
*out
, void *uout
, int uout_size
,
1102 mlx5_cmd_cbk_t callback
,
1103 void *context
, int page_queue
, u8
*status
,
1104 u8 token
, bool force_polling
)
1106 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1107 struct mlx5_cmd_work_ent
*ent
;
1108 struct mlx5_cmd_stats
*stats
;
1113 if (callback
&& page_queue
)
1116 ent
= cmd_alloc_ent(cmd
, in
, out
, uout
, uout_size
,
1117 callback
, context
, page_queue
);
1119 return PTR_ERR(ent
);
1121 /* put for this ent is when consumed, depending on the use case
1122 * 1) (!callback) blocking flow: by caller after wait_func completes
1123 * 2) (callback) flow: by mlx5_cmd_comp_handler() when ent is handled
1127 ent
->polling
= force_polling
;
1129 init_completion(&ent
->handling
);
1131 init_completion(&ent
->done
);
1133 INIT_DELAYED_WORK(&ent
->cb_timeout_work
, cb_timeout_handler
);
1134 INIT_WORK(&ent
->work
, cmd_work_handler
);
1136 cmd_work_handler(&ent
->work
);
1137 } else if (!queue_work(cmd
->wq
, &ent
->work
)) {
1138 mlx5_core_warn(dev
, "failed to queue work\n");
1144 goto out
; /* mlx5_cmd_comp_handler() will put(ent) */
1146 err
= wait_func(dev
, ent
);
1147 if (err
== -ETIMEDOUT
|| err
== -ECANCELED
)
1150 ds
= ent
->ts2
- ent
->ts1
;
1151 op
= MLX5_GET(mbox_in
, in
->first
.data
, opcode
);
1152 if (op
< MLX5_CMD_OP_MAX
) {
1153 stats
= &cmd
->stats
[op
];
1154 spin_lock_irq(&stats
->lock
);
1157 spin_unlock_irq(&stats
->lock
);
1159 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_TIME
,
1160 "fw exec time for %s is %lld nsec\n",
1161 mlx5_command_str(op
), ds
);
1162 *status
= ent
->status
;
1170 static ssize_t
dbg_write(struct file
*filp
, const char __user
*buf
,
1171 size_t count
, loff_t
*pos
)
1173 struct mlx5_core_dev
*dev
= filp
->private_data
;
1174 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1178 if (!dbg
->in_msg
|| !dbg
->out_msg
)
1181 if (count
< sizeof(lbuf
) - 1)
1184 if (copy_from_user(lbuf
, buf
, sizeof(lbuf
) - 1))
1187 lbuf
[sizeof(lbuf
) - 1] = 0;
1189 if (strcmp(lbuf
, "go"))
1192 err
= mlx5_cmd_exec(dev
, dbg
->in_msg
, dbg
->inlen
, dbg
->out_msg
, dbg
->outlen
);
1194 return err
? err
: count
;
1197 static const struct file_operations fops
= {
1198 .owner
= THIS_MODULE
,
1199 .open
= simple_open
,
1203 static int mlx5_copy_to_msg(struct mlx5_cmd_msg
*to
, void *from
, int size
,
1206 struct mlx5_cmd_prot_block
*block
;
1207 struct mlx5_cmd_mailbox
*next
;
1213 copy
= min_t(int, size
, sizeof(to
->first
.data
));
1214 memcpy(to
->first
.data
, from
, copy
);
1225 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
1227 memcpy(block
->data
, from
, copy
);
1230 block
->token
= token
;
1237 static int mlx5_copy_from_msg(void *to
, struct mlx5_cmd_msg
*from
, int size
)
1239 struct mlx5_cmd_prot_block
*block
;
1240 struct mlx5_cmd_mailbox
*next
;
1246 copy
= min_t(int, size
, sizeof(from
->first
.data
));
1247 memcpy(to
, from
->first
.data
, copy
);
1258 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
1261 memcpy(to
, block
->data
, copy
);
1270 static struct mlx5_cmd_mailbox
*alloc_cmd_box(struct mlx5_core_dev
*dev
,
1273 struct mlx5_cmd_mailbox
*mailbox
;
1275 mailbox
= kmalloc(sizeof(*mailbox
), flags
);
1277 return ERR_PTR(-ENOMEM
);
1279 mailbox
->buf
= dma_pool_zalloc(dev
->cmd
.pool
, flags
,
1281 if (!mailbox
->buf
) {
1282 mlx5_core_dbg(dev
, "failed allocation\n");
1284 return ERR_PTR(-ENOMEM
);
1286 mailbox
->next
= NULL
;
1291 static void free_cmd_box(struct mlx5_core_dev
*dev
,
1292 struct mlx5_cmd_mailbox
*mailbox
)
1294 dma_pool_free(dev
->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
1298 static struct mlx5_cmd_msg
*mlx5_alloc_cmd_msg(struct mlx5_core_dev
*dev
,
1299 gfp_t flags
, int size
,
1302 struct mlx5_cmd_mailbox
*tmp
, *head
= NULL
;
1303 struct mlx5_cmd_prot_block
*block
;
1304 struct mlx5_cmd_msg
*msg
;
1309 msg
= kzalloc(sizeof(*msg
), flags
);
1311 return ERR_PTR(-ENOMEM
);
1314 n
= mlx5_calc_cmd_blocks(msg
);
1316 for (i
= 0; i
< n
; i
++) {
1317 tmp
= alloc_cmd_box(dev
, flags
);
1319 mlx5_core_warn(dev
, "failed allocating block\n");
1326 block
->next
= cpu_to_be64(tmp
->next
? tmp
->next
->dma
: 0);
1327 block
->block_num
= cpu_to_be32(n
- i
- 1);
1328 block
->token
= token
;
1337 free_cmd_box(dev
, head
);
1342 return ERR_PTR(err
);
1345 static void mlx5_free_cmd_msg(struct mlx5_core_dev
*dev
,
1346 struct mlx5_cmd_msg
*msg
)
1348 struct mlx5_cmd_mailbox
*head
= msg
->next
;
1349 struct mlx5_cmd_mailbox
*next
;
1353 free_cmd_box(dev
, head
);
1359 static ssize_t
data_write(struct file
*filp
, const char __user
*buf
,
1360 size_t count
, loff_t
*pos
)
1362 struct mlx5_core_dev
*dev
= filp
->private_data
;
1363 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1372 ptr
= memdup_user(buf
, count
);
1374 return PTR_ERR(ptr
);
1383 static ssize_t
data_read(struct file
*filp
, char __user
*buf
, size_t count
,
1386 struct mlx5_core_dev
*dev
= filp
->private_data
;
1387 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1392 return simple_read_from_buffer(buf
, count
, pos
, dbg
->out_msg
,
1396 static const struct file_operations dfops
= {
1397 .owner
= THIS_MODULE
,
1398 .open
= simple_open
,
1399 .write
= data_write
,
1403 static ssize_t
outlen_read(struct file
*filp
, char __user
*buf
, size_t count
,
1406 struct mlx5_core_dev
*dev
= filp
->private_data
;
1407 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1411 err
= snprintf(outlen
, sizeof(outlen
), "%d", dbg
->outlen
);
1415 return simple_read_from_buffer(buf
, count
, pos
, outlen
, err
);
1418 static ssize_t
outlen_write(struct file
*filp
, const char __user
*buf
,
1419 size_t count
, loff_t
*pos
)
1421 struct mlx5_core_dev
*dev
= filp
->private_data
;
1422 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1423 char outlen_str
[8] = {0};
1428 if (*pos
!= 0 || count
> 6)
1431 kfree(dbg
->out_msg
);
1432 dbg
->out_msg
= NULL
;
1435 if (copy_from_user(outlen_str
, buf
, count
))
1438 err
= sscanf(outlen_str
, "%d", &outlen
);
1442 ptr
= kzalloc(outlen
, GFP_KERNEL
);
1447 dbg
->outlen
= outlen
;
1454 static const struct file_operations olfops
= {
1455 .owner
= THIS_MODULE
,
1456 .open
= simple_open
,
1457 .write
= outlen_write
,
1458 .read
= outlen_read
,
1461 static void set_wqname(struct mlx5_core_dev
*dev
)
1463 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1465 snprintf(cmd
->wq_name
, sizeof(cmd
->wq_name
), "mlx5_cmd_%s",
1466 dev_name(dev
->device
));
1469 static void clean_debug_files(struct mlx5_core_dev
*dev
)
1471 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1473 if (!mlx5_debugfs_root
)
1476 mlx5_cmdif_debugfs_cleanup(dev
);
1477 debugfs_remove_recursive(dbg
->dbg_root
);
1480 static void create_debugfs_files(struct mlx5_core_dev
*dev
)
1482 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1484 dbg
->dbg_root
= debugfs_create_dir("cmd", dev
->priv
.dbg_root
);
1486 debugfs_create_file("in", 0400, dbg
->dbg_root
, dev
, &dfops
);
1487 debugfs_create_file("out", 0200, dbg
->dbg_root
, dev
, &dfops
);
1488 debugfs_create_file("out_len", 0600, dbg
->dbg_root
, dev
, &olfops
);
1489 debugfs_create_u8("status", 0600, dbg
->dbg_root
, &dbg
->status
);
1490 debugfs_create_file("run", 0200, dbg
->dbg_root
, dev
, &fops
);
1492 mlx5_cmdif_debugfs_init(dev
);
1495 void mlx5_cmd_allowed_opcode(struct mlx5_core_dev
*dev
, u16 opcode
)
1497 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1500 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1502 down(&cmd
->pages_sem
);
1504 cmd
->allowed_opcode
= opcode
;
1506 up(&cmd
->pages_sem
);
1507 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1511 static void mlx5_cmd_change_mod(struct mlx5_core_dev
*dev
, int mode
)
1513 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1516 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1518 down(&cmd
->pages_sem
);
1522 up(&cmd
->pages_sem
);
1523 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1527 static int cmd_comp_notifier(struct notifier_block
*nb
,
1528 unsigned long type
, void *data
)
1530 struct mlx5_core_dev
*dev
;
1531 struct mlx5_cmd
*cmd
;
1532 struct mlx5_eqe
*eqe
;
1534 cmd
= mlx5_nb_cof(nb
, struct mlx5_cmd
, nb
);
1535 dev
= container_of(cmd
, struct mlx5_core_dev
, cmd
);
1538 mlx5_cmd_comp_handler(dev
, be32_to_cpu(eqe
->data
.cmd
.vector
), false);
1542 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
)
1544 MLX5_NB_INIT(&dev
->cmd
.nb
, cmd_comp_notifier
, CMD
);
1545 mlx5_eq_notifier_register(dev
, &dev
->cmd
.nb
);
1546 mlx5_cmd_change_mod(dev
, CMD_MODE_EVENTS
);
1549 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
)
1551 mlx5_cmd_change_mod(dev
, CMD_MODE_POLLING
);
1552 mlx5_eq_notifier_unregister(dev
, &dev
->cmd
.nb
);
1555 static void free_msg(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*msg
)
1557 unsigned long flags
;
1560 spin_lock_irqsave(&msg
->parent
->lock
, flags
);
1561 list_add_tail(&msg
->list
, &msg
->parent
->head
);
1562 spin_unlock_irqrestore(&msg
->parent
->lock
, flags
);
1564 mlx5_free_cmd_msg(dev
, msg
);
1568 static void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, u64 vec
, bool forced
)
1570 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1571 struct mlx5_cmd_work_ent
*ent
;
1572 mlx5_cmd_cbk_t callback
;
1577 struct mlx5_cmd_stats
*stats
;
1578 unsigned long flags
;
1579 unsigned long vector
;
1581 /* there can be at most 32 command queues */
1582 vector
= vec
& 0xffffffff;
1583 for (i
= 0; i
< (1 << cmd
->log_sz
); i
++) {
1584 if (test_bit(i
, &vector
)) {
1585 struct semaphore
*sem
;
1587 ent
= cmd
->ent_arr
[i
];
1589 /* if we already completed the command, ignore it */
1590 if (!test_and_clear_bit(MLX5_CMD_ENT_STATE_PENDING_COMP
,
1592 /* only real completion can free the cmd slot */
1594 mlx5_core_err(dev
, "Command completion arrived after timeout (entry idx = %d).\n",
1601 if (ent
->callback
&& cancel_delayed_work(&ent
->cb_timeout_work
))
1602 cmd_ent_put(ent
); /* timeout work was canceled */
1604 if (!forced
|| /* Real FW completion */
1605 pci_channel_offline(dev
->pdev
) || /* FW is inaccessible */
1606 dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
)
1609 if (ent
->page_queue
)
1610 sem
= &cmd
->pages_sem
;
1613 ent
->ts2
= ktime_get_ns();
1614 memcpy(ent
->out
->first
.data
, ent
->lay
->out
, sizeof(ent
->lay
->out
));
1615 dump_command(dev
, ent
, 0);
1617 if (!cmd
->checksum_disabled
)
1618 ent
->ret
= verify_signature(ent
);
1621 if (vec
& MLX5_TRIGGERED_CMD_COMP
)
1622 ent
->status
= MLX5_DRIVER_STATUS_ABORTED
;
1624 ent
->status
= ent
->lay
->status_own
>> 1;
1626 mlx5_core_dbg(dev
, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1627 ent
->ret
, deliv_status_to_str(ent
->status
), ent
->status
);
1630 if (ent
->callback
) {
1631 ds
= ent
->ts2
- ent
->ts1
;
1632 if (ent
->op
< MLX5_CMD_OP_MAX
) {
1633 stats
= &cmd
->stats
[ent
->op
];
1634 spin_lock_irqsave(&stats
->lock
, flags
);
1637 spin_unlock_irqrestore(&stats
->lock
, flags
);
1640 callback
= ent
->callback
;
1641 context
= ent
->context
;
1644 err
= mlx5_copy_from_msg(ent
->uout
,
1648 err
= err
? err
: mlx5_cmd_check(dev
,
1649 ent
->in
->first
.data
,
1653 mlx5_free_cmd_msg(dev
, ent
->out
);
1654 free_msg(dev
, ent
->in
);
1656 err
= err
? err
: ent
->status
;
1657 /* final consumer is done, release ent */
1659 callback(err
, context
);
1661 /* release wait_func() so mlx5_cmd_invoke()
1662 * can make the final ent_put()
1664 complete(&ent
->done
);
1671 void mlx5_cmd_trigger_completions(struct mlx5_core_dev
*dev
)
1673 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1674 unsigned long bitmask
;
1675 unsigned long flags
;
1679 /* wait for pending handlers to complete */
1680 mlx5_eq_synchronize_cmd_irq(dev
);
1681 spin_lock_irqsave(&dev
->cmd
.alloc_lock
, flags
);
1682 vector
= ~dev
->cmd
.bitmask
& ((1ul << (1 << dev
->cmd
.log_sz
)) - 1);
1687 /* we must increment the allocated entries refcount before triggering the completions
1688 * to guarantee pending commands will not get freed in the meanwhile.
1689 * For that reason, it also has to be done inside the alloc_lock.
1691 for_each_set_bit(i
, &bitmask
, (1 << cmd
->log_sz
))
1692 cmd_ent_get(cmd
->ent_arr
[i
]);
1693 vector
|= MLX5_TRIGGERED_CMD_COMP
;
1694 spin_unlock_irqrestore(&dev
->cmd
.alloc_lock
, flags
);
1696 mlx5_core_dbg(dev
, "vector 0x%llx\n", vector
);
1697 mlx5_cmd_comp_handler(dev
, vector
, true);
1698 for_each_set_bit(i
, &bitmask
, (1 << cmd
->log_sz
))
1699 cmd_ent_put(cmd
->ent_arr
[i
]);
1703 spin_unlock_irqrestore(&dev
->cmd
.alloc_lock
, flags
);
1706 void mlx5_cmd_flush(struct mlx5_core_dev
*dev
)
1708 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1711 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1712 while (down_trylock(&cmd
->sem
))
1713 mlx5_cmd_trigger_completions(dev
);
1715 while (down_trylock(&cmd
->pages_sem
))
1716 mlx5_cmd_trigger_completions(dev
);
1719 up(&cmd
->pages_sem
);
1720 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1724 static int status_to_err(u8 status
)
1727 case MLX5_CMD_DELIVERY_STAT_OK
:
1728 case MLX5_DRIVER_STATUS_ABORTED
:
1730 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
:
1731 case MLX5_CMD_DELIVERY_STAT_TOK_ERR
:
1733 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
:
1734 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
:
1735 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
:
1736 return -EFAULT
; /* Bad address */
1737 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
:
1738 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
:
1739 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
:
1740 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
:
1742 case MLX5_CMD_DELIVERY_STAT_FW_ERR
:
1749 static struct mlx5_cmd_msg
*alloc_msg(struct mlx5_core_dev
*dev
, int in_size
,
1752 struct mlx5_cmd_msg
*msg
= ERR_PTR(-ENOMEM
);
1753 struct cmd_msg_cache
*ch
= NULL
;
1754 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1760 for (i
= 0; i
< MLX5_NUM_COMMAND_CACHES
; i
++) {
1761 ch
= &cmd
->cache
[i
];
1762 if (in_size
> ch
->max_inbox_size
)
1764 spin_lock_irq(&ch
->lock
);
1765 if (list_empty(&ch
->head
)) {
1766 spin_unlock_irq(&ch
->lock
);
1769 msg
= list_entry(ch
->head
.next
, typeof(*msg
), list
);
1770 /* For cached lists, we must explicitly state what is
1774 list_del(&msg
->list
);
1775 spin_unlock_irq(&ch
->lock
);
1783 msg
= mlx5_alloc_cmd_msg(dev
, gfp
, in_size
, 0);
1787 static int is_manage_pages(void *in
)
1789 return MLX5_GET(mbox_in
, in
, opcode
) == MLX5_CMD_OP_MANAGE_PAGES
;
1792 static int cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1793 int out_size
, mlx5_cmd_cbk_t callback
, void *context
,
1796 struct mlx5_cmd_msg
*inb
;
1797 struct mlx5_cmd_msg
*outb
;
1806 opcode
= MLX5_GET(mbox_in
, in
, opcode
);
1807 if (mlx5_cmd_is_down(dev
) || !opcode_allowed(&dev
->cmd
, opcode
)) {
1808 err
= mlx5_internal_err_ret_value(dev
, opcode
, &drv_synd
, &status
);
1809 MLX5_SET(mbox_out
, out
, status
, status
);
1810 MLX5_SET(mbox_out
, out
, syndrome
, drv_synd
);
1814 pages_queue
= is_manage_pages(in
);
1815 gfp
= callback
? GFP_ATOMIC
: GFP_KERNEL
;
1817 inb
= alloc_msg(dev
, in_size
, gfp
);
1823 token
= alloc_token(&dev
->cmd
);
1825 err
= mlx5_copy_to_msg(inb
, in
, in_size
, token
);
1827 mlx5_core_warn(dev
, "err %d\n", err
);
1831 outb
= mlx5_alloc_cmd_msg(dev
, gfp
, out_size
, token
);
1833 err
= PTR_ERR(outb
);
1837 err
= mlx5_cmd_invoke(dev
, inb
, outb
, out
, out_size
, callback
, context
,
1838 pages_queue
, &status
, token
, force_polling
);
1842 mlx5_core_dbg(dev
, "err %d, status %d\n", err
, status
);
1844 err
= status_to_err(status
);
1849 err
= mlx5_copy_from_msg(out
, outb
, out_size
);
1853 mlx5_free_cmd_msg(dev
, outb
);
1861 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1866 err
= cmd_exec(dev
, in
, in_size
, out
, out_size
, NULL
, NULL
, false);
1867 return err
? : mlx5_cmd_check(dev
, in
, out
);
1869 EXPORT_SYMBOL(mlx5_cmd_exec
);
1871 void mlx5_cmd_init_async_ctx(struct mlx5_core_dev
*dev
,
1872 struct mlx5_async_ctx
*ctx
)
1875 /* Starts at 1 to avoid doing wake_up if we are not cleaning up */
1876 atomic_set(&ctx
->num_inflight
, 1);
1877 init_waitqueue_head(&ctx
->wait
);
1879 EXPORT_SYMBOL(mlx5_cmd_init_async_ctx
);
1882 * mlx5_cmd_cleanup_async_ctx - Clean up an async_ctx
1883 * @ctx: The ctx to clean
1885 * Upon return all callbacks given to mlx5_cmd_exec_cb() have been called. The
1886 * caller must ensure that mlx5_cmd_exec_cb() is not called during or after
1887 * the call mlx5_cleanup_async_ctx().
1889 void mlx5_cmd_cleanup_async_ctx(struct mlx5_async_ctx
*ctx
)
1891 atomic_dec(&ctx
->num_inflight
);
1892 wait_event(ctx
->wait
, atomic_read(&ctx
->num_inflight
) == 0);
1894 EXPORT_SYMBOL(mlx5_cmd_cleanup_async_ctx
);
1896 static void mlx5_cmd_exec_cb_handler(int status
, void *_work
)
1898 struct mlx5_async_work
*work
= _work
;
1899 struct mlx5_async_ctx
*ctx
= work
->ctx
;
1901 work
->user_callback(status
, work
);
1902 if (atomic_dec_and_test(&ctx
->num_inflight
))
1903 wake_up(&ctx
->wait
);
1906 int mlx5_cmd_exec_cb(struct mlx5_async_ctx
*ctx
, void *in
, int in_size
,
1907 void *out
, int out_size
, mlx5_async_cbk_t callback
,
1908 struct mlx5_async_work
*work
)
1913 work
->user_callback
= callback
;
1914 if (WARN_ON(!atomic_inc_not_zero(&ctx
->num_inflight
)))
1916 ret
= cmd_exec(ctx
->dev
, in
, in_size
, out
, out_size
,
1917 mlx5_cmd_exec_cb_handler
, work
, false);
1918 if (ret
&& atomic_dec_and_test(&ctx
->num_inflight
))
1919 wake_up(&ctx
->wait
);
1923 EXPORT_SYMBOL(mlx5_cmd_exec_cb
);
1925 int mlx5_cmd_exec_polling(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
1926 void *out
, int out_size
)
1930 err
= cmd_exec(dev
, in
, in_size
, out
, out_size
, NULL
, NULL
, true);
1932 return err
? : mlx5_cmd_check(dev
, in
, out
);
1934 EXPORT_SYMBOL(mlx5_cmd_exec_polling
);
1936 static void destroy_msg_cache(struct mlx5_core_dev
*dev
)
1938 struct cmd_msg_cache
*ch
;
1939 struct mlx5_cmd_msg
*msg
;
1940 struct mlx5_cmd_msg
*n
;
1943 for (i
= 0; i
< MLX5_NUM_COMMAND_CACHES
; i
++) {
1944 ch
= &dev
->cmd
.cache
[i
];
1945 list_for_each_entry_safe(msg
, n
, &ch
->head
, list
) {
1946 list_del(&msg
->list
);
1947 mlx5_free_cmd_msg(dev
, msg
);
1952 static unsigned cmd_cache_num_ent
[MLX5_NUM_COMMAND_CACHES
] = {
1956 static unsigned cmd_cache_ent_size
[MLX5_NUM_COMMAND_CACHES
] = {
1957 16 + MLX5_CMD_DATA_BLOCK_SIZE
,
1958 16 + MLX5_CMD_DATA_BLOCK_SIZE
* 2,
1959 16 + MLX5_CMD_DATA_BLOCK_SIZE
* 16,
1960 16 + MLX5_CMD_DATA_BLOCK_SIZE
* 256,
1961 16 + MLX5_CMD_DATA_BLOCK_SIZE
* 512,
1964 static void create_msg_cache(struct mlx5_core_dev
*dev
)
1966 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1967 struct cmd_msg_cache
*ch
;
1968 struct mlx5_cmd_msg
*msg
;
1972 /* Initialize and fill the caches with initial entries */
1973 for (k
= 0; k
< MLX5_NUM_COMMAND_CACHES
; k
++) {
1974 ch
= &cmd
->cache
[k
];
1975 spin_lock_init(&ch
->lock
);
1976 INIT_LIST_HEAD(&ch
->head
);
1977 ch
->num_ent
= cmd_cache_num_ent
[k
];
1978 ch
->max_inbox_size
= cmd_cache_ent_size
[k
];
1979 for (i
= 0; i
< ch
->num_ent
; i
++) {
1980 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
| __GFP_NOWARN
,
1981 ch
->max_inbox_size
, 0);
1985 list_add_tail(&msg
->list
, &ch
->head
);
1990 static int alloc_cmd_page(struct mlx5_core_dev
*dev
, struct mlx5_cmd
*cmd
)
1992 cmd
->cmd_alloc_buf
= dma_alloc_coherent(mlx5_core_dma_dev(dev
), MLX5_ADAPTER_PAGE_SIZE
,
1993 &cmd
->alloc_dma
, GFP_KERNEL
);
1994 if (!cmd
->cmd_alloc_buf
)
1997 /* make sure it is aligned to 4K */
1998 if (!((uintptr_t)cmd
->cmd_alloc_buf
& (MLX5_ADAPTER_PAGE_SIZE
- 1))) {
1999 cmd
->cmd_buf
= cmd
->cmd_alloc_buf
;
2000 cmd
->dma
= cmd
->alloc_dma
;
2001 cmd
->alloc_size
= MLX5_ADAPTER_PAGE_SIZE
;
2005 dma_free_coherent(mlx5_core_dma_dev(dev
), MLX5_ADAPTER_PAGE_SIZE
, cmd
->cmd_alloc_buf
,
2007 cmd
->cmd_alloc_buf
= dma_alloc_coherent(mlx5_core_dma_dev(dev
),
2008 2 * MLX5_ADAPTER_PAGE_SIZE
- 1,
2009 &cmd
->alloc_dma
, GFP_KERNEL
);
2010 if (!cmd
->cmd_alloc_buf
)
2013 cmd
->cmd_buf
= PTR_ALIGN(cmd
->cmd_alloc_buf
, MLX5_ADAPTER_PAGE_SIZE
);
2014 cmd
->dma
= ALIGN(cmd
->alloc_dma
, MLX5_ADAPTER_PAGE_SIZE
);
2015 cmd
->alloc_size
= 2 * MLX5_ADAPTER_PAGE_SIZE
- 1;
2019 static void free_cmd_page(struct mlx5_core_dev
*dev
, struct mlx5_cmd
*cmd
)
2021 dma_free_coherent(mlx5_core_dma_dev(dev
), cmd
->alloc_size
, cmd
->cmd_alloc_buf
,
2025 static u16
cmdif_rev(struct mlx5_core_dev
*dev
)
2027 return ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
2030 int mlx5_cmd_init(struct mlx5_core_dev
*dev
)
2032 int size
= sizeof(struct mlx5_cmd_prot_block
);
2033 int align
= roundup_pow_of_two(size
);
2034 struct mlx5_cmd
*cmd
= &dev
->cmd
;
2040 memset(cmd
, 0, sizeof(*cmd
));
2041 cmd_if_rev
= cmdif_rev(dev
);
2042 if (cmd_if_rev
!= CMD_IF_REV
) {
2044 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
2045 CMD_IF_REV
, cmd_if_rev
);
2049 cmd
->stats
= kvzalloc(MLX5_CMD_OP_MAX
* sizeof(*cmd
->stats
), GFP_KERNEL
);
2053 cmd
->pool
= dma_pool_create("mlx5_cmd", mlx5_core_dma_dev(dev
), size
, align
, 0);
2059 err
= alloc_cmd_page(dev
, cmd
);
2063 cmd_l
= ioread32be(&dev
->iseg
->cmdq_addr_l_sz
) & 0xff;
2064 cmd
->log_sz
= cmd_l
>> 4 & 0xf;
2065 cmd
->log_stride
= cmd_l
& 0xf;
2066 if (1 << cmd
->log_sz
> MLX5_MAX_COMMANDS
) {
2067 mlx5_core_err(dev
, "firmware reports too many outstanding commands %d\n",
2073 if (cmd
->log_sz
+ cmd
->log_stride
> MLX5_ADAPTER_PAGE_SHIFT
) {
2074 mlx5_core_err(dev
, "command queue size overflow\n");
2079 cmd
->state
= MLX5_CMDIF_STATE_DOWN
;
2080 cmd
->checksum_disabled
= 1;
2081 cmd
->max_reg_cmds
= (1 << cmd
->log_sz
) - 1;
2082 cmd
->bitmask
= (1UL << cmd
->max_reg_cmds
) - 1;
2084 cmd
->cmdif_rev
= ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
2085 if (cmd
->cmdif_rev
> CMD_IF_REV
) {
2086 mlx5_core_err(dev
, "driver does not support command interface version. driver %d, firmware %d\n",
2087 CMD_IF_REV
, cmd
->cmdif_rev
);
2092 spin_lock_init(&cmd
->alloc_lock
);
2093 spin_lock_init(&cmd
->token_lock
);
2094 for (i
= 0; i
< MLX5_CMD_OP_MAX
; i
++)
2095 spin_lock_init(&cmd
->stats
[i
].lock
);
2097 sema_init(&cmd
->sem
, cmd
->max_reg_cmds
);
2098 sema_init(&cmd
->pages_sem
, 1);
2100 cmd_h
= (u32
)((u64
)(cmd
->dma
) >> 32);
2101 cmd_l
= (u32
)(cmd
->dma
);
2102 if (cmd_l
& 0xfff) {
2103 mlx5_core_err(dev
, "invalid command queue address\n");
2108 iowrite32be(cmd_h
, &dev
->iseg
->cmdq_addr_h
);
2109 iowrite32be(cmd_l
, &dev
->iseg
->cmdq_addr_l_sz
);
2111 /* Make sure firmware sees the complete address before we proceed */
2114 mlx5_core_dbg(dev
, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd
->dma
));
2116 cmd
->mode
= CMD_MODE_POLLING
;
2117 cmd
->allowed_opcode
= CMD_ALLOWED_OPCODE_ALL
;
2119 create_msg_cache(dev
);
2122 cmd
->wq
= create_singlethread_workqueue(cmd
->wq_name
);
2124 mlx5_core_err(dev
, "failed to create command workqueue\n");
2129 create_debugfs_files(dev
);
2134 destroy_msg_cache(dev
);
2137 free_cmd_page(dev
, cmd
);
2140 dma_pool_destroy(cmd
->pool
);
2146 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
)
2148 struct mlx5_cmd
*cmd
= &dev
->cmd
;
2150 clean_debug_files(dev
);
2151 destroy_workqueue(cmd
->wq
);
2152 destroy_msg_cache(dev
);
2153 free_cmd_page(dev
, cmd
);
2154 dma_pool_destroy(cmd
->pool
);
2158 void mlx5_cmd_set_state(struct mlx5_core_dev
*dev
,
2159 enum mlx5_cmdif_state cmdif_state
)
2161 dev
->cmd
.state
= cmdif_state
;