2 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/highmem.h>
34 #include <linux/module.h>
35 #include <linux/errno.h>
36 #include <linux/pci.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/slab.h>
39 #include <linux/delay.h>
40 #include <linux/random.h>
41 #include <linux/io-mapping.h>
42 #include <linux/mlx5/driver.h>
43 #include <linux/debugfs.h>
45 #include "mlx5_core.h"
59 LONG_LIST_SIZE
= (2ULL * 1024 * 1024 * 1024 / PAGE_SIZE
) * 8 + 16 +
60 MLX5_CMD_DATA_BLOCK_SIZE
,
61 MED_LIST_SIZE
= 16 + MLX5_CMD_DATA_BLOCK_SIZE
,
65 MLX5_CMD_DELIVERY_STAT_OK
= 0x0,
66 MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
= 0x1,
67 MLX5_CMD_DELIVERY_STAT_TOK_ERR
= 0x2,
68 MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
= 0x3,
69 MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
= 0x4,
70 MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
= 0x5,
71 MLX5_CMD_DELIVERY_STAT_FW_ERR
= 0x6,
72 MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
= 0x7,
73 MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
= 0x8,
74 MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
= 0x9,
75 MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
= 0x10,
78 static struct mlx5_cmd_work_ent
*alloc_cmd(struct mlx5_cmd
*cmd
,
79 struct mlx5_cmd_msg
*in
,
80 struct mlx5_cmd_msg
*out
,
81 void *uout
, int uout_size
,
83 void *context
, int page_queue
)
85 gfp_t alloc_flags
= cbk
? GFP_ATOMIC
: GFP_KERNEL
;
86 struct mlx5_cmd_work_ent
*ent
;
88 ent
= kzalloc(sizeof(*ent
), alloc_flags
);
90 return ERR_PTR(-ENOMEM
);
95 ent
->uout_size
= uout_size
;
97 ent
->context
= context
;
99 ent
->page_queue
= page_queue
;
104 static u8
alloc_token(struct mlx5_cmd
*cmd
)
108 spin_lock(&cmd
->token_lock
);
113 spin_unlock(&cmd
->token_lock
);
118 static int alloc_ent(struct mlx5_cmd
*cmd
)
123 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
124 ret
= find_first_bit(&cmd
->bitmask
, cmd
->max_reg_cmds
);
125 if (ret
< cmd
->max_reg_cmds
)
126 clear_bit(ret
, &cmd
->bitmask
);
127 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
129 return ret
< cmd
->max_reg_cmds
? ret
: -ENOMEM
;
132 static void free_ent(struct mlx5_cmd
*cmd
, int idx
)
136 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
137 set_bit(idx
, &cmd
->bitmask
);
138 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
141 static struct mlx5_cmd_layout
*get_inst(struct mlx5_cmd
*cmd
, int idx
)
143 return cmd
->cmd_buf
+ (idx
<< cmd
->log_stride
);
146 static u8
xor8_buf(void *buf
, int len
)
152 for (i
= 0; i
< len
; i
++)
158 static int verify_block_sig(struct mlx5_cmd_prot_block
*block
)
160 if (xor8_buf(block
->rsvd0
, sizeof(*block
) - sizeof(block
->data
) - 1) != 0xff)
163 if (xor8_buf(block
, sizeof(*block
)) != 0xff)
169 static void calc_block_sig(struct mlx5_cmd_prot_block
*block
, u8 token
,
172 block
->token
= token
;
174 block
->ctrl_sig
= ~xor8_buf(block
->rsvd0
, sizeof(*block
) -
175 sizeof(block
->data
) - 2);
176 block
->sig
= ~xor8_buf(block
, sizeof(*block
) - 1);
180 static void calc_chain_sig(struct mlx5_cmd_msg
*msg
, u8 token
, int csum
)
182 struct mlx5_cmd_mailbox
*next
= msg
->next
;
185 calc_block_sig(next
->buf
, token
, csum
);
190 static void set_signature(struct mlx5_cmd_work_ent
*ent
, int csum
)
192 ent
->lay
->sig
= ~xor8_buf(ent
->lay
, sizeof(*ent
->lay
));
193 calc_chain_sig(ent
->in
, ent
->token
, csum
);
194 calc_chain_sig(ent
->out
, ent
->token
, csum
);
197 static void poll_timeout(struct mlx5_cmd_work_ent
*ent
)
199 unsigned long poll_end
= jiffies
+ msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
+ 1000);
203 own
= ent
->lay
->status_own
;
204 if (!(own
& CMD_OWNER_HW
)) {
208 usleep_range(5000, 10000);
209 } while (time_before(jiffies
, poll_end
));
211 ent
->ret
= -ETIMEDOUT
;
214 static void free_cmd(struct mlx5_cmd_work_ent
*ent
)
220 static int verify_signature(struct mlx5_cmd_work_ent
*ent
)
222 struct mlx5_cmd_mailbox
*next
= ent
->out
->next
;
226 sig
= xor8_buf(ent
->lay
, sizeof(*ent
->lay
));
231 err
= verify_block_sig(next
->buf
);
241 static void dump_buf(void *buf
, int size
, int data_only
, int offset
)
246 for (i
= 0; i
< size
; i
+= 16) {
247 pr_debug("%03x: %08x %08x %08x %08x\n", offset
, be32_to_cpu(p
[0]),
248 be32_to_cpu(p
[1]), be32_to_cpu(p
[2]),
258 MLX5_DRIVER_STATUS_ABORTED
= 0xfe,
259 MLX5_DRIVER_SYND
= 0xbadd00de,
262 static int mlx5_internal_err_ret_value(struct mlx5_core_dev
*dev
, u16 op
,
263 u32
*synd
, u8
*status
)
269 case MLX5_CMD_OP_TEARDOWN_HCA
:
270 case MLX5_CMD_OP_DISABLE_HCA
:
271 case MLX5_CMD_OP_MANAGE_PAGES
:
272 case MLX5_CMD_OP_DESTROY_MKEY
:
273 case MLX5_CMD_OP_DESTROY_EQ
:
274 case MLX5_CMD_OP_DESTROY_CQ
:
275 case MLX5_CMD_OP_DESTROY_QP
:
276 case MLX5_CMD_OP_DESTROY_PSV
:
277 case MLX5_CMD_OP_DESTROY_SRQ
:
278 case MLX5_CMD_OP_DESTROY_XRC_SRQ
:
279 case MLX5_CMD_OP_DESTROY_DCT
:
280 case MLX5_CMD_OP_DEALLOC_Q_COUNTER
:
281 case MLX5_CMD_OP_DEALLOC_PD
:
282 case MLX5_CMD_OP_DEALLOC_UAR
:
283 case MLX5_CMD_OP_DETTACH_FROM_MCG
:
284 case MLX5_CMD_OP_DEALLOC_XRCD
:
285 case MLX5_CMD_OP_DEALLOC_TRANSPORT_DOMAIN
:
286 case MLX5_CMD_OP_DELETE_VXLAN_UDP_DPORT
:
287 case MLX5_CMD_OP_DELETE_L2_TABLE_ENTRY
:
288 case MLX5_CMD_OP_DESTROY_TIR
:
289 case MLX5_CMD_OP_DESTROY_SQ
:
290 case MLX5_CMD_OP_DESTROY_RQ
:
291 case MLX5_CMD_OP_DESTROY_RMP
:
292 case MLX5_CMD_OP_DESTROY_TIS
:
293 case MLX5_CMD_OP_DESTROY_RQT
:
294 case MLX5_CMD_OP_DESTROY_FLOW_TABLE
:
295 case MLX5_CMD_OP_DESTROY_FLOW_GROUP
:
296 case MLX5_CMD_OP_DELETE_FLOW_TABLE_ENTRY
:
297 return MLX5_CMD_STAT_OK
;
299 case MLX5_CMD_OP_QUERY_HCA_CAP
:
300 case MLX5_CMD_OP_QUERY_ADAPTER
:
301 case MLX5_CMD_OP_INIT_HCA
:
302 case MLX5_CMD_OP_ENABLE_HCA
:
303 case MLX5_CMD_OP_QUERY_PAGES
:
304 case MLX5_CMD_OP_SET_HCA_CAP
:
305 case MLX5_CMD_OP_QUERY_ISSI
:
306 case MLX5_CMD_OP_SET_ISSI
:
307 case MLX5_CMD_OP_CREATE_MKEY
:
308 case MLX5_CMD_OP_QUERY_MKEY
:
309 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS
:
310 case MLX5_CMD_OP_PAGE_FAULT_RESUME
:
311 case MLX5_CMD_OP_CREATE_EQ
:
312 case MLX5_CMD_OP_QUERY_EQ
:
313 case MLX5_CMD_OP_GEN_EQE
:
314 case MLX5_CMD_OP_CREATE_CQ
:
315 case MLX5_CMD_OP_QUERY_CQ
:
316 case MLX5_CMD_OP_MODIFY_CQ
:
317 case MLX5_CMD_OP_CREATE_QP
:
318 case MLX5_CMD_OP_RST2INIT_QP
:
319 case MLX5_CMD_OP_INIT2RTR_QP
:
320 case MLX5_CMD_OP_RTR2RTS_QP
:
321 case MLX5_CMD_OP_RTS2RTS_QP
:
322 case MLX5_CMD_OP_SQERR2RTS_QP
:
323 case MLX5_CMD_OP_2ERR_QP
:
324 case MLX5_CMD_OP_2RST_QP
:
325 case MLX5_CMD_OP_QUERY_QP
:
326 case MLX5_CMD_OP_SQD_RTS_QP
:
327 case MLX5_CMD_OP_INIT2INIT_QP
:
328 case MLX5_CMD_OP_CREATE_PSV
:
329 case MLX5_CMD_OP_CREATE_SRQ
:
330 case MLX5_CMD_OP_QUERY_SRQ
:
331 case MLX5_CMD_OP_ARM_RQ
:
332 case MLX5_CMD_OP_CREATE_XRC_SRQ
:
333 case MLX5_CMD_OP_QUERY_XRC_SRQ
:
334 case MLX5_CMD_OP_ARM_XRC_SRQ
:
335 case MLX5_CMD_OP_CREATE_DCT
:
336 case MLX5_CMD_OP_DRAIN_DCT
:
337 case MLX5_CMD_OP_QUERY_DCT
:
338 case MLX5_CMD_OP_ARM_DCT_FOR_KEY_VIOLATION
:
339 case MLX5_CMD_OP_QUERY_VPORT_STATE
:
340 case MLX5_CMD_OP_MODIFY_VPORT_STATE
:
341 case MLX5_CMD_OP_QUERY_ESW_VPORT_CONTEXT
:
342 case MLX5_CMD_OP_MODIFY_ESW_VPORT_CONTEXT
:
343 case MLX5_CMD_OP_QUERY_NIC_VPORT_CONTEXT
:
344 case MLX5_CMD_OP_MODIFY_NIC_VPORT_CONTEXT
:
345 case MLX5_CMD_OP_QUERY_ROCE_ADDRESS
:
346 case MLX5_CMD_OP_SET_ROCE_ADDRESS
:
347 case MLX5_CMD_OP_QUERY_HCA_VPORT_CONTEXT
:
348 case MLX5_CMD_OP_MODIFY_HCA_VPORT_CONTEXT
:
349 case MLX5_CMD_OP_QUERY_HCA_VPORT_GID
:
350 case MLX5_CMD_OP_QUERY_HCA_VPORT_PKEY
:
351 case MLX5_CMD_OP_QUERY_VPORT_COUNTER
:
352 case MLX5_CMD_OP_ALLOC_Q_COUNTER
:
353 case MLX5_CMD_OP_QUERY_Q_COUNTER
:
354 case MLX5_CMD_OP_ALLOC_PD
:
355 case MLX5_CMD_OP_ALLOC_UAR
:
356 case MLX5_CMD_OP_CONFIG_INT_MODERATION
:
357 case MLX5_CMD_OP_ACCESS_REG
:
358 case MLX5_CMD_OP_ATTACH_TO_MCG
:
359 case MLX5_CMD_OP_GET_DROPPED_PACKET_LOG
:
360 case MLX5_CMD_OP_MAD_IFC
:
361 case MLX5_CMD_OP_QUERY_MAD_DEMUX
:
362 case MLX5_CMD_OP_SET_MAD_DEMUX
:
363 case MLX5_CMD_OP_NOP
:
364 case MLX5_CMD_OP_ALLOC_XRCD
:
365 case MLX5_CMD_OP_ALLOC_TRANSPORT_DOMAIN
:
366 case MLX5_CMD_OP_QUERY_CONG_STATUS
:
367 case MLX5_CMD_OP_MODIFY_CONG_STATUS
:
368 case MLX5_CMD_OP_QUERY_CONG_PARAMS
:
369 case MLX5_CMD_OP_MODIFY_CONG_PARAMS
:
370 case MLX5_CMD_OP_QUERY_CONG_STATISTICS
:
371 case MLX5_CMD_OP_ADD_VXLAN_UDP_DPORT
:
372 case MLX5_CMD_OP_SET_L2_TABLE_ENTRY
:
373 case MLX5_CMD_OP_QUERY_L2_TABLE_ENTRY
:
374 case MLX5_CMD_OP_CREATE_TIR
:
375 case MLX5_CMD_OP_MODIFY_TIR
:
376 case MLX5_CMD_OP_QUERY_TIR
:
377 case MLX5_CMD_OP_CREATE_SQ
:
378 case MLX5_CMD_OP_MODIFY_SQ
:
379 case MLX5_CMD_OP_QUERY_SQ
:
380 case MLX5_CMD_OP_CREATE_RQ
:
381 case MLX5_CMD_OP_MODIFY_RQ
:
382 case MLX5_CMD_OP_QUERY_RQ
:
383 case MLX5_CMD_OP_CREATE_RMP
:
384 case MLX5_CMD_OP_MODIFY_RMP
:
385 case MLX5_CMD_OP_QUERY_RMP
:
386 case MLX5_CMD_OP_CREATE_TIS
:
387 case MLX5_CMD_OP_MODIFY_TIS
:
388 case MLX5_CMD_OP_QUERY_TIS
:
389 case MLX5_CMD_OP_CREATE_RQT
:
390 case MLX5_CMD_OP_MODIFY_RQT
:
391 case MLX5_CMD_OP_QUERY_RQT
:
392 case MLX5_CMD_OP_CREATE_FLOW_TABLE
:
393 case MLX5_CMD_OP_QUERY_FLOW_TABLE
:
394 case MLX5_CMD_OP_CREATE_FLOW_GROUP
:
395 case MLX5_CMD_OP_QUERY_FLOW_GROUP
:
396 case MLX5_CMD_OP_SET_FLOW_TABLE_ENTRY
:
397 case MLX5_CMD_OP_QUERY_FLOW_TABLE_ENTRY
:
398 *status
= MLX5_DRIVER_STATUS_ABORTED
;
399 *synd
= MLX5_DRIVER_SYND
;
402 mlx5_core_err(dev
, "Unknown FW command (%d)\n", op
);
407 const char *mlx5_command_str(int command
)
410 case MLX5_CMD_OP_QUERY_HCA_CAP
:
411 return "QUERY_HCA_CAP";
413 case MLX5_CMD_OP_SET_HCA_CAP
:
414 return "SET_HCA_CAP";
416 case MLX5_CMD_OP_QUERY_ADAPTER
:
417 return "QUERY_ADAPTER";
419 case MLX5_CMD_OP_INIT_HCA
:
422 case MLX5_CMD_OP_TEARDOWN_HCA
:
423 return "TEARDOWN_HCA";
425 case MLX5_CMD_OP_ENABLE_HCA
:
426 return "MLX5_CMD_OP_ENABLE_HCA";
428 case MLX5_CMD_OP_DISABLE_HCA
:
429 return "MLX5_CMD_OP_DISABLE_HCA";
431 case MLX5_CMD_OP_QUERY_PAGES
:
432 return "QUERY_PAGES";
434 case MLX5_CMD_OP_MANAGE_PAGES
:
435 return "MANAGE_PAGES";
437 case MLX5_CMD_OP_CREATE_MKEY
:
438 return "CREATE_MKEY";
440 case MLX5_CMD_OP_QUERY_MKEY
:
443 case MLX5_CMD_OP_DESTROY_MKEY
:
444 return "DESTROY_MKEY";
446 case MLX5_CMD_OP_QUERY_SPECIAL_CONTEXTS
:
447 return "QUERY_SPECIAL_CONTEXTS";
449 case MLX5_CMD_OP_CREATE_EQ
:
452 case MLX5_CMD_OP_DESTROY_EQ
:
455 case MLX5_CMD_OP_QUERY_EQ
:
458 case MLX5_CMD_OP_CREATE_CQ
:
461 case MLX5_CMD_OP_DESTROY_CQ
:
464 case MLX5_CMD_OP_QUERY_CQ
:
467 case MLX5_CMD_OP_MODIFY_CQ
:
470 case MLX5_CMD_OP_CREATE_QP
:
473 case MLX5_CMD_OP_DESTROY_QP
:
476 case MLX5_CMD_OP_RST2INIT_QP
:
477 return "RST2INIT_QP";
479 case MLX5_CMD_OP_INIT2RTR_QP
:
480 return "INIT2RTR_QP";
482 case MLX5_CMD_OP_RTR2RTS_QP
:
485 case MLX5_CMD_OP_RTS2RTS_QP
:
488 case MLX5_CMD_OP_SQERR2RTS_QP
:
489 return "SQERR2RTS_QP";
491 case MLX5_CMD_OP_2ERR_QP
:
494 case MLX5_CMD_OP_2RST_QP
:
497 case MLX5_CMD_OP_QUERY_QP
:
500 case MLX5_CMD_OP_MAD_IFC
:
503 case MLX5_CMD_OP_INIT2INIT_QP
:
504 return "INIT2INIT_QP";
506 case MLX5_CMD_OP_CREATE_PSV
:
509 case MLX5_CMD_OP_DESTROY_PSV
:
510 return "DESTROY_PSV";
512 case MLX5_CMD_OP_CREATE_SRQ
:
515 case MLX5_CMD_OP_DESTROY_SRQ
:
516 return "DESTROY_SRQ";
518 case MLX5_CMD_OP_QUERY_SRQ
:
521 case MLX5_CMD_OP_ARM_RQ
:
524 case MLX5_CMD_OP_CREATE_XRC_SRQ
:
525 return "CREATE_XRC_SRQ";
527 case MLX5_CMD_OP_DESTROY_XRC_SRQ
:
528 return "DESTROY_XRC_SRQ";
530 case MLX5_CMD_OP_QUERY_XRC_SRQ
:
531 return "QUERY_XRC_SRQ";
533 case MLX5_CMD_OP_ARM_XRC_SRQ
:
534 return "ARM_XRC_SRQ";
536 case MLX5_CMD_OP_ALLOC_PD
:
539 case MLX5_CMD_OP_DEALLOC_PD
:
542 case MLX5_CMD_OP_ALLOC_UAR
:
545 case MLX5_CMD_OP_DEALLOC_UAR
:
546 return "DEALLOC_UAR";
548 case MLX5_CMD_OP_ATTACH_TO_MCG
:
549 return "ATTACH_TO_MCG";
551 case MLX5_CMD_OP_DETTACH_FROM_MCG
:
552 return "DETTACH_FROM_MCG";
554 case MLX5_CMD_OP_ALLOC_XRCD
:
557 case MLX5_CMD_OP_DEALLOC_XRCD
:
558 return "DEALLOC_XRCD";
560 case MLX5_CMD_OP_ACCESS_REG
:
561 return "MLX5_CMD_OP_ACCESS_REG";
563 default: return "unknown command opcode";
567 static void dump_command(struct mlx5_core_dev
*dev
,
568 struct mlx5_cmd_work_ent
*ent
, int input
)
570 u16 op
= be16_to_cpu(((struct mlx5_inbox_hdr
*)(ent
->lay
->in
))->opcode
);
571 struct mlx5_cmd_msg
*msg
= input
? ent
->in
: ent
->out
;
572 struct mlx5_cmd_mailbox
*next
= msg
->next
;
577 data_only
= !!(mlx5_core_debug_mask
& (1 << MLX5_CMD_DATA
));
580 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_DATA
,
581 "dump command data %s(0x%x) %s\n",
582 mlx5_command_str(op
), op
,
583 input
? "INPUT" : "OUTPUT");
585 mlx5_core_dbg(dev
, "dump command %s(0x%x) %s\n",
586 mlx5_command_str(op
), op
,
587 input
? "INPUT" : "OUTPUT");
591 dump_buf(ent
->lay
->in
, sizeof(ent
->lay
->in
), 1, offset
);
592 offset
+= sizeof(ent
->lay
->in
);
594 dump_buf(ent
->lay
->out
, sizeof(ent
->lay
->out
), 1, offset
);
595 offset
+= sizeof(ent
->lay
->out
);
598 dump_buf(ent
->lay
, sizeof(*ent
->lay
), 0, offset
);
599 offset
+= sizeof(*ent
->lay
);
602 while (next
&& offset
< msg
->len
) {
604 dump_len
= min_t(int, MLX5_CMD_DATA_BLOCK_SIZE
, msg
->len
- offset
);
605 dump_buf(next
->buf
, dump_len
, 1, offset
);
606 offset
+= MLX5_CMD_DATA_BLOCK_SIZE
;
608 mlx5_core_dbg(dev
, "command block:\n");
609 dump_buf(next
->buf
, sizeof(struct mlx5_cmd_prot_block
), 0, offset
);
610 offset
+= sizeof(struct mlx5_cmd_prot_block
);
619 static void cmd_work_handler(struct work_struct
*work
)
621 struct mlx5_cmd_work_ent
*ent
= container_of(work
, struct mlx5_cmd_work_ent
, work
);
622 struct mlx5_cmd
*cmd
= ent
->cmd
;
623 struct mlx5_core_dev
*dev
= container_of(cmd
, struct mlx5_core_dev
, cmd
);
624 struct mlx5_cmd_layout
*lay
;
625 struct semaphore
*sem
;
628 sem
= ent
->page_queue
? &cmd
->pages_sem
: &cmd
->sem
;
630 if (!ent
->page_queue
) {
631 ent
->idx
= alloc_ent(cmd
);
633 mlx5_core_err(dev
, "failed to allocate command entry\n");
638 ent
->idx
= cmd
->max_reg_cmds
;
639 spin_lock_irqsave(&cmd
->alloc_lock
, flags
);
640 clear_bit(ent
->idx
, &cmd
->bitmask
);
641 spin_unlock_irqrestore(&cmd
->alloc_lock
, flags
);
644 ent
->token
= alloc_token(cmd
);
645 cmd
->ent_arr
[ent
->idx
] = ent
;
646 lay
= get_inst(cmd
, ent
->idx
);
648 memset(lay
, 0, sizeof(*lay
));
649 memcpy(lay
->in
, ent
->in
->first
.data
, sizeof(lay
->in
));
650 ent
->op
= be32_to_cpu(lay
->in
[0]) >> 16;
652 lay
->in_ptr
= cpu_to_be64(ent
->in
->next
->dma
);
653 lay
->inlen
= cpu_to_be32(ent
->in
->len
);
655 lay
->out_ptr
= cpu_to_be64(ent
->out
->next
->dma
);
656 lay
->outlen
= cpu_to_be32(ent
->out
->len
);
657 lay
->type
= MLX5_PCI_CMD_XPORT
;
658 lay
->token
= ent
->token
;
659 lay
->status_own
= CMD_OWNER_HW
;
660 set_signature(ent
, !cmd
->checksum_disabled
);
661 dump_command(dev
, ent
, 1);
662 ent
->ts1
= ktime_get_ns();
664 /* ring doorbell after the descriptor is valid */
665 mlx5_core_dbg(dev
, "writing 0x%x to command doorbell\n", 1 << ent
->idx
);
667 iowrite32be(1 << ent
->idx
, &dev
->iseg
->cmd_dbell
);
669 /* if not in polling don't use ent after this point */
670 if (cmd
->mode
== CMD_MODE_POLLING
) {
672 /* make sure we read the descriptor after ownership is SW */
674 mlx5_cmd_comp_handler(dev
, 1UL << ent
->idx
);
678 static const char *deliv_status_to_str(u8 status
)
681 case MLX5_CMD_DELIVERY_STAT_OK
:
683 case MLX5_CMD_DELIVERY_STAT_SIGNAT_ERR
:
684 return "signature error";
685 case MLX5_CMD_DELIVERY_STAT_TOK_ERR
:
686 return "token error";
687 case MLX5_CMD_DELIVERY_STAT_BAD_BLK_NUM_ERR
:
688 return "bad block number";
689 case MLX5_CMD_DELIVERY_STAT_OUT_PTR_ALIGN_ERR
:
690 return "output pointer not aligned to block size";
691 case MLX5_CMD_DELIVERY_STAT_IN_PTR_ALIGN_ERR
:
692 return "input pointer not aligned to block size";
693 case MLX5_CMD_DELIVERY_STAT_FW_ERR
:
694 return "firmware internal error";
695 case MLX5_CMD_DELIVERY_STAT_IN_LENGTH_ERR
:
696 return "command input length error";
697 case MLX5_CMD_DELIVERY_STAT_OUT_LENGTH_ERR
:
698 return "command ouput length error";
699 case MLX5_CMD_DELIVERY_STAT_RES_FLD_NOT_CLR_ERR
:
700 return "reserved fields not cleared";
701 case MLX5_CMD_DELIVERY_STAT_CMD_DESCR_ERR
:
702 return "bad command descriptor type";
704 return "unknown status code";
708 static u16
msg_to_opcode(struct mlx5_cmd_msg
*in
)
710 struct mlx5_inbox_hdr
*hdr
= (struct mlx5_inbox_hdr
*)(in
->first
.data
);
712 return be16_to_cpu(hdr
->opcode
);
715 static int wait_func(struct mlx5_core_dev
*dev
, struct mlx5_cmd_work_ent
*ent
)
717 unsigned long timeout
= msecs_to_jiffies(MLX5_CMD_TIMEOUT_MSEC
);
718 struct mlx5_cmd
*cmd
= &dev
->cmd
;
721 if (cmd
->mode
== CMD_MODE_POLLING
) {
722 wait_for_completion(&ent
->done
);
725 if (!wait_for_completion_timeout(&ent
->done
, timeout
))
730 if (err
== -ETIMEDOUT
) {
731 mlx5_core_warn(dev
, "%s(0x%x) timeout. Will cause a leak of a command resource\n",
732 mlx5_command_str(msg_to_opcode(ent
->in
)),
733 msg_to_opcode(ent
->in
));
735 mlx5_core_dbg(dev
, "err %d, delivery status %s(%d)\n",
736 err
, deliv_status_to_str(ent
->status
), ent
->status
);
741 static __be32
*get_synd_ptr(struct mlx5_outbox_hdr
*out
)
743 return &out
->syndrome
;
746 static u8
*get_status_ptr(struct mlx5_outbox_hdr
*out
)
752 * 1. Callback functions may not sleep
753 * 2. page queue commands do not support asynchrous completion
755 static int mlx5_cmd_invoke(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*in
,
756 struct mlx5_cmd_msg
*out
, void *uout
, int uout_size
,
757 mlx5_cmd_cbk_t callback
,
758 void *context
, int page_queue
, u8
*status
)
760 struct mlx5_cmd
*cmd
= &dev
->cmd
;
761 struct mlx5_cmd_work_ent
*ent
;
762 struct mlx5_cmd_stats
*stats
;
767 if (callback
&& page_queue
)
770 ent
= alloc_cmd(cmd
, in
, out
, uout
, uout_size
, callback
, context
,
776 init_completion(&ent
->done
);
778 INIT_WORK(&ent
->work
, cmd_work_handler
);
780 cmd_work_handler(&ent
->work
);
781 } else if (!queue_work(cmd
->wq
, &ent
->work
)) {
782 mlx5_core_warn(dev
, "failed to queue work\n");
788 err
= wait_func(dev
, ent
);
789 if (err
== -ETIMEDOUT
)
792 ds
= ent
->ts2
- ent
->ts1
;
793 op
= be16_to_cpu(((struct mlx5_inbox_hdr
*)in
->first
.data
)->opcode
);
794 if (op
< ARRAY_SIZE(cmd
->stats
)) {
795 stats
= &cmd
->stats
[op
];
796 spin_lock_irq(&stats
->lock
);
799 spin_unlock_irq(&stats
->lock
);
801 mlx5_core_dbg_mask(dev
, 1 << MLX5_CMD_TIME
,
802 "fw exec time for %s is %lld nsec\n",
803 mlx5_command_str(op
), ds
);
804 *status
= ent
->status
;
816 static ssize_t
dbg_write(struct file
*filp
, const char __user
*buf
,
817 size_t count
, loff_t
*pos
)
819 struct mlx5_core_dev
*dev
= filp
->private_data
;
820 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
824 if (!dbg
->in_msg
|| !dbg
->out_msg
)
827 if (copy_from_user(lbuf
, buf
, sizeof(lbuf
)))
830 lbuf
[sizeof(lbuf
) - 1] = 0;
832 if (strcmp(lbuf
, "go"))
835 err
= mlx5_cmd_exec(dev
, dbg
->in_msg
, dbg
->inlen
, dbg
->out_msg
, dbg
->outlen
);
837 return err
? err
: count
;
841 static const struct file_operations fops
= {
842 .owner
= THIS_MODULE
,
847 static int mlx5_copy_to_msg(struct mlx5_cmd_msg
*to
, void *from
, int size
)
849 struct mlx5_cmd_prot_block
*block
;
850 struct mlx5_cmd_mailbox
*next
;
856 copy
= min_t(int, size
, sizeof(to
->first
.data
));
857 memcpy(to
->first
.data
, from
, copy
);
868 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
870 memcpy(block
->data
, from
, copy
);
879 static int mlx5_copy_from_msg(void *to
, struct mlx5_cmd_msg
*from
, int size
)
881 struct mlx5_cmd_prot_block
*block
;
882 struct mlx5_cmd_mailbox
*next
;
888 copy
= min_t(int, size
, sizeof(from
->first
.data
));
889 memcpy(to
, from
->first
.data
, copy
);
900 copy
= min_t(int, size
, MLX5_CMD_DATA_BLOCK_SIZE
);
903 memcpy(to
, block
->data
, copy
);
912 static struct mlx5_cmd_mailbox
*alloc_cmd_box(struct mlx5_core_dev
*dev
,
915 struct mlx5_cmd_mailbox
*mailbox
;
917 mailbox
= kmalloc(sizeof(*mailbox
), flags
);
919 return ERR_PTR(-ENOMEM
);
921 mailbox
->buf
= pci_pool_alloc(dev
->cmd
.pool
, flags
,
924 mlx5_core_dbg(dev
, "failed allocation\n");
926 return ERR_PTR(-ENOMEM
);
928 memset(mailbox
->buf
, 0, sizeof(struct mlx5_cmd_prot_block
));
929 mailbox
->next
= NULL
;
934 static void free_cmd_box(struct mlx5_core_dev
*dev
,
935 struct mlx5_cmd_mailbox
*mailbox
)
937 pci_pool_free(dev
->cmd
.pool
, mailbox
->buf
, mailbox
->dma
);
941 static struct mlx5_cmd_msg
*mlx5_alloc_cmd_msg(struct mlx5_core_dev
*dev
,
942 gfp_t flags
, int size
)
944 struct mlx5_cmd_mailbox
*tmp
, *head
= NULL
;
945 struct mlx5_cmd_prot_block
*block
;
946 struct mlx5_cmd_msg
*msg
;
952 msg
= kzalloc(sizeof(*msg
), flags
);
954 return ERR_PTR(-ENOMEM
);
956 blen
= size
- min_t(int, sizeof(msg
->first
.data
), size
);
957 n
= (blen
+ MLX5_CMD_DATA_BLOCK_SIZE
- 1) / MLX5_CMD_DATA_BLOCK_SIZE
;
959 for (i
= 0; i
< n
; i
++) {
960 tmp
= alloc_cmd_box(dev
, flags
);
962 mlx5_core_warn(dev
, "failed allocating block\n");
969 block
->next
= cpu_to_be64(tmp
->next
? tmp
->next
->dma
: 0);
970 block
->block_num
= cpu_to_be32(n
- i
- 1);
980 free_cmd_box(dev
, head
);
988 static void mlx5_free_cmd_msg(struct mlx5_core_dev
*dev
,
989 struct mlx5_cmd_msg
*msg
)
991 struct mlx5_cmd_mailbox
*head
= msg
->next
;
992 struct mlx5_cmd_mailbox
*next
;
996 free_cmd_box(dev
, head
);
1002 static ssize_t
data_write(struct file
*filp
, const char __user
*buf
,
1003 size_t count
, loff_t
*pos
)
1005 struct mlx5_core_dev
*dev
= filp
->private_data
;
1006 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1017 ptr
= kzalloc(count
, GFP_KERNEL
);
1021 if (copy_from_user(ptr
, buf
, count
)) {
1037 static ssize_t
data_read(struct file
*filp
, char __user
*buf
, size_t count
,
1040 struct mlx5_core_dev
*dev
= filp
->private_data
;
1041 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1050 copy
= min_t(int, count
, dbg
->outlen
);
1051 if (copy_to_user(buf
, dbg
->out_msg
, copy
))
1059 static const struct file_operations dfops
= {
1060 .owner
= THIS_MODULE
,
1061 .open
= simple_open
,
1062 .write
= data_write
,
1066 static ssize_t
outlen_read(struct file
*filp
, char __user
*buf
, size_t count
,
1069 struct mlx5_core_dev
*dev
= filp
->private_data
;
1070 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1077 err
= snprintf(outlen
, sizeof(outlen
), "%d", dbg
->outlen
);
1081 if (copy_to_user(buf
, &outlen
, err
))
1089 static ssize_t
outlen_write(struct file
*filp
, const char __user
*buf
,
1090 size_t count
, loff_t
*pos
)
1092 struct mlx5_core_dev
*dev
= filp
->private_data
;
1093 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1099 if (*pos
!= 0 || count
> 6)
1102 kfree(dbg
->out_msg
);
1103 dbg
->out_msg
= NULL
;
1106 if (copy_from_user(outlen_str
, buf
, count
))
1111 err
= sscanf(outlen_str
, "%d", &outlen
);
1115 ptr
= kzalloc(outlen
, GFP_KERNEL
);
1120 dbg
->outlen
= outlen
;
1127 static const struct file_operations olfops
= {
1128 .owner
= THIS_MODULE
,
1129 .open
= simple_open
,
1130 .write
= outlen_write
,
1131 .read
= outlen_read
,
1134 static void set_wqname(struct mlx5_core_dev
*dev
)
1136 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1138 snprintf(cmd
->wq_name
, sizeof(cmd
->wq_name
), "mlx5_cmd_%s",
1139 dev_name(&dev
->pdev
->dev
));
1142 static void clean_debug_files(struct mlx5_core_dev
*dev
)
1144 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1146 if (!mlx5_debugfs_root
)
1149 mlx5_cmdif_debugfs_cleanup(dev
);
1150 debugfs_remove_recursive(dbg
->dbg_root
);
1153 static int create_debugfs_files(struct mlx5_core_dev
*dev
)
1155 struct mlx5_cmd_debug
*dbg
= &dev
->cmd
.dbg
;
1158 if (!mlx5_debugfs_root
)
1161 dbg
->dbg_root
= debugfs_create_dir("cmd", dev
->priv
.dbg_root
);
1165 dbg
->dbg_in
= debugfs_create_file("in", 0400, dbg
->dbg_root
,
1170 dbg
->dbg_out
= debugfs_create_file("out", 0200, dbg
->dbg_root
,
1175 dbg
->dbg_outlen
= debugfs_create_file("out_len", 0600, dbg
->dbg_root
,
1177 if (!dbg
->dbg_outlen
)
1180 dbg
->dbg_status
= debugfs_create_u8("status", 0600, dbg
->dbg_root
,
1182 if (!dbg
->dbg_status
)
1185 dbg
->dbg_run
= debugfs_create_file("run", 0200, dbg
->dbg_root
, dev
, &fops
);
1189 mlx5_cmdif_debugfs_init(dev
);
1194 clean_debug_files(dev
);
1198 void mlx5_cmd_use_events(struct mlx5_core_dev
*dev
)
1200 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1203 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1206 down(&cmd
->pages_sem
);
1208 flush_workqueue(cmd
->wq
);
1210 cmd
->mode
= CMD_MODE_EVENTS
;
1212 up(&cmd
->pages_sem
);
1213 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1217 void mlx5_cmd_use_polling(struct mlx5_core_dev
*dev
)
1219 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1222 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1225 down(&cmd
->pages_sem
);
1227 flush_workqueue(cmd
->wq
);
1228 cmd
->mode
= CMD_MODE_POLLING
;
1230 up(&cmd
->pages_sem
);
1231 for (i
= 0; i
< cmd
->max_reg_cmds
; i
++)
1235 static void free_msg(struct mlx5_core_dev
*dev
, struct mlx5_cmd_msg
*msg
)
1237 unsigned long flags
;
1240 spin_lock_irqsave(&msg
->cache
->lock
, flags
);
1241 list_add_tail(&msg
->list
, &msg
->cache
->head
);
1242 spin_unlock_irqrestore(&msg
->cache
->lock
, flags
);
1244 mlx5_free_cmd_msg(dev
, msg
);
1248 void mlx5_cmd_comp_handler(struct mlx5_core_dev
*dev
, u64 vec
)
1250 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1251 struct mlx5_cmd_work_ent
*ent
;
1252 mlx5_cmd_cbk_t callback
;
1257 struct mlx5_cmd_stats
*stats
;
1258 unsigned long flags
;
1259 unsigned long vector
;
1261 /* there can be at most 32 command queues */
1262 vector
= vec
& 0xffffffff;
1263 for (i
= 0; i
< (1 << cmd
->log_sz
); i
++) {
1264 if (test_bit(i
, &vector
)) {
1265 struct semaphore
*sem
;
1267 ent
= cmd
->ent_arr
[i
];
1268 if (ent
->page_queue
)
1269 sem
= &cmd
->pages_sem
;
1272 ent
->ts2
= ktime_get_ns();
1273 memcpy(ent
->out
->first
.data
, ent
->lay
->out
, sizeof(ent
->lay
->out
));
1274 dump_command(dev
, ent
, 0);
1276 if (!cmd
->checksum_disabled
)
1277 ent
->ret
= verify_signature(ent
);
1280 if (vec
& MLX5_TRIGGERED_CMD_COMP
)
1281 ent
->status
= MLX5_DRIVER_STATUS_ABORTED
;
1283 ent
->status
= ent
->lay
->status_own
>> 1;
1285 mlx5_core_dbg(dev
, "command completed. ret 0x%x, delivery status %s(0x%x)\n",
1286 ent
->ret
, deliv_status_to_str(ent
->status
), ent
->status
);
1288 free_ent(cmd
, ent
->idx
);
1290 if (ent
->callback
) {
1291 ds
= ent
->ts2
- ent
->ts1
;
1292 if (ent
->op
< ARRAY_SIZE(cmd
->stats
)) {
1293 stats
= &cmd
->stats
[ent
->op
];
1294 spin_lock_irqsave(&stats
->lock
, flags
);
1297 spin_unlock_irqrestore(&stats
->lock
, flags
);
1300 callback
= ent
->callback
;
1301 context
= ent
->context
;
1304 err
= mlx5_copy_from_msg(ent
->uout
,
1308 mlx5_free_cmd_msg(dev
, ent
->out
);
1309 free_msg(dev
, ent
->in
);
1311 err
= err
? err
: ent
->status
;
1313 callback(err
, context
);
1315 complete(&ent
->done
);
1321 EXPORT_SYMBOL(mlx5_cmd_comp_handler
);
1323 static int status_to_err(u8 status
)
1325 return status
? -1 : 0; /* TBD more meaningful codes */
1328 static struct mlx5_cmd_msg
*alloc_msg(struct mlx5_core_dev
*dev
, int in_size
,
1331 struct mlx5_cmd_msg
*msg
= ERR_PTR(-ENOMEM
);
1332 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1333 struct cache_ent
*ent
= NULL
;
1335 if (in_size
> MED_LIST_SIZE
&& in_size
<= LONG_LIST_SIZE
)
1336 ent
= &cmd
->cache
.large
;
1337 else if (in_size
> 16 && in_size
<= MED_LIST_SIZE
)
1338 ent
= &cmd
->cache
.med
;
1341 spin_lock_irq(&ent
->lock
);
1342 if (!list_empty(&ent
->head
)) {
1343 msg
= list_entry(ent
->head
.next
, typeof(*msg
), list
);
1344 /* For cached lists, we must explicitly state what is
1348 list_del(&msg
->list
);
1350 spin_unlock_irq(&ent
->lock
);
1354 msg
= mlx5_alloc_cmd_msg(dev
, gfp
, in_size
);
1359 static u16
opcode_from_in(struct mlx5_inbox_hdr
*in
)
1361 return be16_to_cpu(in
->opcode
);
1364 static int is_manage_pages(struct mlx5_inbox_hdr
*in
)
1366 return be16_to_cpu(in
->opcode
) == MLX5_CMD_OP_MANAGE_PAGES
;
1369 static int cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1370 int out_size
, mlx5_cmd_cbk_t callback
, void *context
)
1372 struct mlx5_cmd_msg
*inb
;
1373 struct mlx5_cmd_msg
*outb
;
1380 if (pci_channel_offline(dev
->pdev
) ||
1381 dev
->state
== MLX5_DEVICE_STATE_INTERNAL_ERROR
) {
1382 err
= mlx5_internal_err_ret_value(dev
, opcode_from_in(in
), &drv_synd
, &status
);
1383 *get_synd_ptr(out
) = cpu_to_be32(drv_synd
);
1384 *get_status_ptr(out
) = status
;
1388 pages_queue
= is_manage_pages(in
);
1389 gfp
= callback
? GFP_ATOMIC
: GFP_KERNEL
;
1391 inb
= alloc_msg(dev
, in_size
, gfp
);
1397 err
= mlx5_copy_to_msg(inb
, in
, in_size
);
1399 mlx5_core_warn(dev
, "err %d\n", err
);
1403 outb
= mlx5_alloc_cmd_msg(dev
, gfp
, out_size
);
1405 err
= PTR_ERR(outb
);
1409 err
= mlx5_cmd_invoke(dev
, inb
, outb
, out
, out_size
, callback
, context
,
1410 pages_queue
, &status
);
1414 mlx5_core_dbg(dev
, "err %d, status %d\n", err
, status
);
1416 err
= status_to_err(status
);
1421 err
= mlx5_copy_from_msg(out
, outb
, out_size
);
1425 mlx5_free_cmd_msg(dev
, outb
);
1433 int mlx5_cmd_exec(struct mlx5_core_dev
*dev
, void *in
, int in_size
, void *out
,
1436 return cmd_exec(dev
, in
, in_size
, out
, out_size
, NULL
, NULL
);
1438 EXPORT_SYMBOL(mlx5_cmd_exec
);
1440 int mlx5_cmd_exec_cb(struct mlx5_core_dev
*dev
, void *in
, int in_size
,
1441 void *out
, int out_size
, mlx5_cmd_cbk_t callback
,
1444 return cmd_exec(dev
, in
, in_size
, out
, out_size
, callback
, context
);
1446 EXPORT_SYMBOL(mlx5_cmd_exec_cb
);
1448 static void destroy_msg_cache(struct mlx5_core_dev
*dev
)
1450 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1451 struct mlx5_cmd_msg
*msg
;
1452 struct mlx5_cmd_msg
*n
;
1454 list_for_each_entry_safe(msg
, n
, &cmd
->cache
.large
.head
, list
) {
1455 list_del(&msg
->list
);
1456 mlx5_free_cmd_msg(dev
, msg
);
1459 list_for_each_entry_safe(msg
, n
, &cmd
->cache
.med
.head
, list
) {
1460 list_del(&msg
->list
);
1461 mlx5_free_cmd_msg(dev
, msg
);
1465 static int create_msg_cache(struct mlx5_core_dev
*dev
)
1467 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1468 struct mlx5_cmd_msg
*msg
;
1472 spin_lock_init(&cmd
->cache
.large
.lock
);
1473 INIT_LIST_HEAD(&cmd
->cache
.large
.head
);
1474 spin_lock_init(&cmd
->cache
.med
.lock
);
1475 INIT_LIST_HEAD(&cmd
->cache
.med
.head
);
1477 for (i
= 0; i
< NUM_LONG_LISTS
; i
++) {
1478 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
, LONG_LIST_SIZE
);
1483 msg
->cache
= &cmd
->cache
.large
;
1484 list_add_tail(&msg
->list
, &cmd
->cache
.large
.head
);
1487 for (i
= 0; i
< NUM_MED_LISTS
; i
++) {
1488 msg
= mlx5_alloc_cmd_msg(dev
, GFP_KERNEL
, MED_LIST_SIZE
);
1493 msg
->cache
= &cmd
->cache
.med
;
1494 list_add_tail(&msg
->list
, &cmd
->cache
.med
.head
);
1500 destroy_msg_cache(dev
);
1504 static int alloc_cmd_page(struct mlx5_core_dev
*dev
, struct mlx5_cmd
*cmd
)
1506 struct device
*ddev
= &dev
->pdev
->dev
;
1508 cmd
->cmd_alloc_buf
= dma_zalloc_coherent(ddev
, MLX5_ADAPTER_PAGE_SIZE
,
1509 &cmd
->alloc_dma
, GFP_KERNEL
);
1510 if (!cmd
->cmd_alloc_buf
)
1513 /* make sure it is aligned to 4K */
1514 if (!((uintptr_t)cmd
->cmd_alloc_buf
& (MLX5_ADAPTER_PAGE_SIZE
- 1))) {
1515 cmd
->cmd_buf
= cmd
->cmd_alloc_buf
;
1516 cmd
->dma
= cmd
->alloc_dma
;
1517 cmd
->alloc_size
= MLX5_ADAPTER_PAGE_SIZE
;
1521 dma_free_coherent(ddev
, MLX5_ADAPTER_PAGE_SIZE
, cmd
->cmd_alloc_buf
,
1523 cmd
->cmd_alloc_buf
= dma_zalloc_coherent(ddev
,
1524 2 * MLX5_ADAPTER_PAGE_SIZE
- 1,
1525 &cmd
->alloc_dma
, GFP_KERNEL
);
1526 if (!cmd
->cmd_alloc_buf
)
1529 cmd
->cmd_buf
= PTR_ALIGN(cmd
->cmd_alloc_buf
, MLX5_ADAPTER_PAGE_SIZE
);
1530 cmd
->dma
= ALIGN(cmd
->alloc_dma
, MLX5_ADAPTER_PAGE_SIZE
);
1531 cmd
->alloc_size
= 2 * MLX5_ADAPTER_PAGE_SIZE
- 1;
1535 static void free_cmd_page(struct mlx5_core_dev
*dev
, struct mlx5_cmd
*cmd
)
1537 struct device
*ddev
= &dev
->pdev
->dev
;
1539 dma_free_coherent(ddev
, cmd
->alloc_size
, cmd
->cmd_alloc_buf
,
1543 int mlx5_cmd_init(struct mlx5_core_dev
*dev
)
1545 int size
= sizeof(struct mlx5_cmd_prot_block
);
1546 int align
= roundup_pow_of_two(size
);
1547 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1553 memset(cmd
, 0, sizeof(*cmd
));
1554 cmd_if_rev
= cmdif_rev(dev
);
1555 if (cmd_if_rev
!= CMD_IF_REV
) {
1556 dev_err(&dev
->pdev
->dev
,
1557 "Driver cmdif rev(%d) differs from firmware's(%d)\n",
1558 CMD_IF_REV
, cmd_if_rev
);
1562 cmd
->pool
= pci_pool_create("mlx5_cmd", dev
->pdev
, size
, align
, 0);
1566 err
= alloc_cmd_page(dev
, cmd
);
1570 cmd_l
= ioread32be(&dev
->iseg
->cmdq_addr_l_sz
) & 0xff;
1571 cmd
->log_sz
= cmd_l
>> 4 & 0xf;
1572 cmd
->log_stride
= cmd_l
& 0xf;
1573 if (1 << cmd
->log_sz
> MLX5_MAX_COMMANDS
) {
1574 dev_err(&dev
->pdev
->dev
, "firmware reports too many outstanding commands %d\n",
1580 if (cmd
->log_sz
+ cmd
->log_stride
> MLX5_ADAPTER_PAGE_SHIFT
) {
1581 dev_err(&dev
->pdev
->dev
, "command queue size overflow\n");
1586 cmd
->checksum_disabled
= 1;
1587 cmd
->max_reg_cmds
= (1 << cmd
->log_sz
) - 1;
1588 cmd
->bitmask
= (1 << cmd
->max_reg_cmds
) - 1;
1590 cmd
->cmdif_rev
= ioread32be(&dev
->iseg
->cmdif_rev_fw_sub
) >> 16;
1591 if (cmd
->cmdif_rev
> CMD_IF_REV
) {
1592 dev_err(&dev
->pdev
->dev
, "driver does not support command interface version. driver %d, firmware %d\n",
1593 CMD_IF_REV
, cmd
->cmdif_rev
);
1598 spin_lock_init(&cmd
->alloc_lock
);
1599 spin_lock_init(&cmd
->token_lock
);
1600 for (i
= 0; i
< ARRAY_SIZE(cmd
->stats
); i
++)
1601 spin_lock_init(&cmd
->stats
[i
].lock
);
1603 sema_init(&cmd
->sem
, cmd
->max_reg_cmds
);
1604 sema_init(&cmd
->pages_sem
, 1);
1606 cmd_h
= (u32
)((u64
)(cmd
->dma
) >> 32);
1607 cmd_l
= (u32
)(cmd
->dma
);
1608 if (cmd_l
& 0xfff) {
1609 dev_err(&dev
->pdev
->dev
, "invalid command queue address\n");
1614 iowrite32be(cmd_h
, &dev
->iseg
->cmdq_addr_h
);
1615 iowrite32be(cmd_l
, &dev
->iseg
->cmdq_addr_l_sz
);
1617 /* Make sure firmware sees the complete address before we proceed */
1620 mlx5_core_dbg(dev
, "descriptor at dma 0x%llx\n", (unsigned long long)(cmd
->dma
));
1622 cmd
->mode
= CMD_MODE_POLLING
;
1624 err
= create_msg_cache(dev
);
1626 dev_err(&dev
->pdev
->dev
, "failed to create command cache\n");
1631 cmd
->wq
= create_singlethread_workqueue(cmd
->wq_name
);
1633 dev_err(&dev
->pdev
->dev
, "failed to create command workqueue\n");
1638 err
= create_debugfs_files(dev
);
1647 destroy_workqueue(cmd
->wq
);
1650 destroy_msg_cache(dev
);
1653 free_cmd_page(dev
, cmd
);
1656 pci_pool_destroy(cmd
->pool
);
1660 EXPORT_SYMBOL(mlx5_cmd_init
);
1662 void mlx5_cmd_cleanup(struct mlx5_core_dev
*dev
)
1664 struct mlx5_cmd
*cmd
= &dev
->cmd
;
1666 clean_debug_files(dev
);
1667 destroy_workqueue(cmd
->wq
);
1668 destroy_msg_cache(dev
);
1669 free_cmd_page(dev
, cmd
);
1670 pci_pool_destroy(cmd
->pool
);
1672 EXPORT_SYMBOL(mlx5_cmd_cleanup
);
1674 static const char *cmd_status_str(u8 status
)
1677 case MLX5_CMD_STAT_OK
:
1679 case MLX5_CMD_STAT_INT_ERR
:
1680 return "internal error";
1681 case MLX5_CMD_STAT_BAD_OP_ERR
:
1682 return "bad operation";
1683 case MLX5_CMD_STAT_BAD_PARAM_ERR
:
1684 return "bad parameter";
1685 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
:
1686 return "bad system state";
1687 case MLX5_CMD_STAT_BAD_RES_ERR
:
1688 return "bad resource";
1689 case MLX5_CMD_STAT_RES_BUSY
:
1690 return "resource busy";
1691 case MLX5_CMD_STAT_LIM_ERR
:
1692 return "limits exceeded";
1693 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
:
1694 return "bad resource state";
1695 case MLX5_CMD_STAT_IX_ERR
:
1697 case MLX5_CMD_STAT_NO_RES_ERR
:
1698 return "no resources";
1699 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
:
1700 return "bad input length";
1701 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
:
1702 return "bad output length";
1703 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
:
1704 return "bad QP state";
1705 case MLX5_CMD_STAT_BAD_PKT_ERR
:
1706 return "bad packet (discarded)";
1707 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
:
1708 return "bad size too many outstanding CQEs";
1710 return "unknown status";
1714 static int cmd_status_to_err(u8 status
)
1717 case MLX5_CMD_STAT_OK
: return 0;
1718 case MLX5_CMD_STAT_INT_ERR
: return -EIO
;
1719 case MLX5_CMD_STAT_BAD_OP_ERR
: return -EINVAL
;
1720 case MLX5_CMD_STAT_BAD_PARAM_ERR
: return -EINVAL
;
1721 case MLX5_CMD_STAT_BAD_SYS_STATE_ERR
: return -EIO
;
1722 case MLX5_CMD_STAT_BAD_RES_ERR
: return -EINVAL
;
1723 case MLX5_CMD_STAT_RES_BUSY
: return -EBUSY
;
1724 case MLX5_CMD_STAT_LIM_ERR
: return -ENOMEM
;
1725 case MLX5_CMD_STAT_BAD_RES_STATE_ERR
: return -EINVAL
;
1726 case MLX5_CMD_STAT_IX_ERR
: return -EINVAL
;
1727 case MLX5_CMD_STAT_NO_RES_ERR
: return -EAGAIN
;
1728 case MLX5_CMD_STAT_BAD_INP_LEN_ERR
: return -EIO
;
1729 case MLX5_CMD_STAT_BAD_OUTP_LEN_ERR
: return -EIO
;
1730 case MLX5_CMD_STAT_BAD_QP_STATE_ERR
: return -EINVAL
;
1731 case MLX5_CMD_STAT_BAD_PKT_ERR
: return -EINVAL
;
1732 case MLX5_CMD_STAT_BAD_SIZE_OUTS_CQES_ERR
: return -EINVAL
;
1733 default: return -EIO
;
1737 /* this will be available till all the commands use set/get macros */
1738 int mlx5_cmd_status_to_err(struct mlx5_outbox_hdr
*hdr
)
1743 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1744 cmd_status_str(hdr
->status
), hdr
->status
,
1745 be32_to_cpu(hdr
->syndrome
));
1747 return cmd_status_to_err(hdr
->status
);
1750 int mlx5_cmd_status_to_err_v2(void *ptr
)
1755 status
= be32_to_cpu(*(__be32
*)ptr
) >> 24;
1759 syndrome
= be32_to_cpu(*(__be32
*)(ptr
+ 4));
1761 pr_warn("command failed, status %s(0x%x), syndrome 0x%x\n",
1762 cmd_status_str(status
), status
, syndrome
);
1764 return cmd_status_to_err(status
);