1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/string.h>
15 #include <linux/etherdevice.h>
22 #include "qed_reg_addr.h"
23 #include "qed_sriov.h"
25 #define GRCBASE_MCP 0xe00000
27 #define QED_MCP_RESP_ITER_US 10
29 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
30 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
32 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
33 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
36 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
37 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
39 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
40 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
41 offsetof(struct public_drv_mb, _field), _val)
43 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
44 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
45 offsetof(struct public_drv_mb, _field))
47 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
48 DRV_ID_PDA_COMP_VER_SHIFT)
50 #define MCP_BYTES_PER_MBIT_SHIFT 17
52 bool qed_mcp_is_init(struct qed_hwfn
*p_hwfn
)
54 if (!p_hwfn
->mcp_info
|| !p_hwfn
->mcp_info
->public_base
)
59 void qed_mcp_cmd_port_init(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
61 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
63 u32 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
65 p_hwfn
->mcp_info
->port_addr
= SECTION_ADDR(mfw_mb_offsize
,
67 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
68 "port_addr = 0x%x, port_id 0x%02x\n",
69 p_hwfn
->mcp_info
->port_addr
, MFW_PORT(p_hwfn
));
72 void qed_mcp_read_mb(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
74 u32 length
= MFW_DRV_MSG_MAX_DWORDS(p_hwfn
->mcp_info
->mfw_mb_length
);
77 if (!p_hwfn
->mcp_info
->public_base
)
80 for (i
= 0; i
< length
; i
++) {
81 tmp
= qed_rd(p_hwfn
, p_ptt
,
82 p_hwfn
->mcp_info
->mfw_mb_addr
+
83 (i
<< 2) + sizeof(u32
));
85 /* The MB data is actually BE; Need to force it to cpu */
86 ((u32
*)p_hwfn
->mcp_info
->mfw_mb_cur
)[i
] =
87 be32_to_cpu((__force __be32
)tmp
);
91 struct qed_mcp_cmd_elem
{
92 struct list_head list
;
93 struct qed_mcp_mb_params
*p_mb_params
;
98 /* Must be called while cmd_lock is acquired */
99 static struct qed_mcp_cmd_elem
*
100 qed_mcp_cmd_add_elem(struct qed_hwfn
*p_hwfn
,
101 struct qed_mcp_mb_params
*p_mb_params
,
102 u16 expected_seq_num
)
104 struct qed_mcp_cmd_elem
*p_cmd_elem
= NULL
;
106 p_cmd_elem
= kzalloc(sizeof(*p_cmd_elem
), GFP_ATOMIC
);
110 p_cmd_elem
->p_mb_params
= p_mb_params
;
111 p_cmd_elem
->expected_seq_num
= expected_seq_num
;
112 list_add(&p_cmd_elem
->list
, &p_hwfn
->mcp_info
->cmd_list
);
117 /* Must be called while cmd_lock is acquired */
118 static void qed_mcp_cmd_del_elem(struct qed_hwfn
*p_hwfn
,
119 struct qed_mcp_cmd_elem
*p_cmd_elem
)
121 list_del(&p_cmd_elem
->list
);
125 /* Must be called while cmd_lock is acquired */
126 static struct qed_mcp_cmd_elem
*qed_mcp_cmd_get_elem(struct qed_hwfn
*p_hwfn
,
129 struct qed_mcp_cmd_elem
*p_cmd_elem
= NULL
;
131 list_for_each_entry(p_cmd_elem
, &p_hwfn
->mcp_info
->cmd_list
, list
) {
132 if (p_cmd_elem
->expected_seq_num
== seq_num
)
139 int qed_mcp_free(struct qed_hwfn
*p_hwfn
)
141 if (p_hwfn
->mcp_info
) {
142 struct qed_mcp_cmd_elem
*p_cmd_elem
, *p_tmp
;
144 kfree(p_hwfn
->mcp_info
->mfw_mb_cur
);
145 kfree(p_hwfn
->mcp_info
->mfw_mb_shadow
);
147 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
148 list_for_each_entry_safe(p_cmd_elem
,
150 &p_hwfn
->mcp_info
->cmd_list
, list
) {
151 qed_mcp_cmd_del_elem(p_hwfn
, p_cmd_elem
);
153 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
156 kfree(p_hwfn
->mcp_info
);
157 p_hwfn
->mcp_info
= NULL
;
162 /* Maximum of 1 sec to wait for the SHMEM ready indication */
163 #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
164 #define QED_MCP_SHMEM_RDY_ITER_MS 50
166 static int qed_load_mcp_offsets(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
168 struct qed_mcp_info
*p_info
= p_hwfn
->mcp_info
;
169 u8 cnt
= QED_MCP_SHMEM_RDY_MAX_RETRIES
;
170 u8 msec
= QED_MCP_SHMEM_RDY_ITER_MS
;
171 u32 drv_mb_offsize
, mfw_mb_offsize
;
172 u32 mcp_pf_id
= MCP_PF_ID(p_hwfn
);
174 p_info
->public_base
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_SHARED_MEM_ADDR
);
175 if (!p_info
->public_base
) {
177 "The address of the MCP scratch-pad is not configured\n");
181 p_info
->public_base
|= GRCBASE_MCP
;
183 /* Get the MFW MB address and number of supported messages */
184 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
185 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
187 p_info
->mfw_mb_addr
= SECTION_ADDR(mfw_mb_offsize
, mcp_pf_id
);
188 p_info
->mfw_mb_length
= (u16
)qed_rd(p_hwfn
, p_ptt
,
189 p_info
->mfw_mb_addr
+
190 offsetof(struct public_mfw_mb
,
193 /* The driver can notify that there was an MCP reset, and might read the
194 * SHMEM values before the MFW has completed initializing them.
195 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
196 * data ready indication.
198 while (!p_info
->mfw_mb_length
&& --cnt
) {
200 p_info
->mfw_mb_length
=
201 (u16
)qed_rd(p_hwfn
, p_ptt
,
202 p_info
->mfw_mb_addr
+
203 offsetof(struct public_mfw_mb
, sup_msgs
));
208 "Failed to get the SHMEM ready notification after %d msec\n",
209 QED_MCP_SHMEM_RDY_MAX_RETRIES
* msec
);
213 /* Calculate the driver and MFW mailbox address */
214 drv_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
215 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
217 p_info
->drv_mb_addr
= SECTION_ADDR(drv_mb_offsize
, mcp_pf_id
);
218 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
219 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
220 drv_mb_offsize
, p_info
->drv_mb_addr
, mcp_pf_id
);
222 /* Get the current driver mailbox sequence before sending
225 p_info
->drv_mb_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_mb_header
) &
226 DRV_MSG_SEQ_NUMBER_MASK
;
228 /* Get current FW pulse sequence */
229 p_info
->drv_pulse_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_pulse_mb
) &
232 p_info
->mcp_hist
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
237 int qed_mcp_cmd_init(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
239 struct qed_mcp_info
*p_info
;
242 /* Allocate mcp_info structure */
243 p_hwfn
->mcp_info
= kzalloc(sizeof(*p_hwfn
->mcp_info
), GFP_KERNEL
);
244 if (!p_hwfn
->mcp_info
)
246 p_info
= p_hwfn
->mcp_info
;
248 /* Initialize the MFW spinlock */
249 spin_lock_init(&p_info
->cmd_lock
);
250 spin_lock_init(&p_info
->link_lock
);
252 INIT_LIST_HEAD(&p_info
->cmd_list
);
254 if (qed_load_mcp_offsets(p_hwfn
, p_ptt
) != 0) {
255 DP_NOTICE(p_hwfn
, "MCP is not initialized\n");
256 /* Do not free mcp_info here, since public_base indicate that
257 * the MCP is not initialized
262 size
= MFW_DRV_MSG_MAX_DWORDS(p_info
->mfw_mb_length
) * sizeof(u32
);
263 p_info
->mfw_mb_cur
= kzalloc(size
, GFP_KERNEL
);
264 p_info
->mfw_mb_shadow
= kzalloc(size
, GFP_KERNEL
);
265 if (!p_info
->mfw_mb_cur
|| !p_info
->mfw_mb_shadow
)
271 qed_mcp_free(p_hwfn
);
275 static void qed_mcp_reread_offsets(struct qed_hwfn
*p_hwfn
,
276 struct qed_ptt
*p_ptt
)
278 u32 generic_por_0
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
280 /* Use MCP history register to check if MCP reset occurred between init
283 if (p_hwfn
->mcp_info
->mcp_hist
!= generic_por_0
) {
286 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
287 p_hwfn
->mcp_info
->mcp_hist
, generic_por_0
);
289 qed_load_mcp_offsets(p_hwfn
, p_ptt
);
290 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
294 int qed_mcp_reset(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
296 u32 org_mcp_reset_seq
, seq
, delay
= QED_MCP_RESP_ITER_US
, cnt
= 0;
299 if (p_hwfn
->mcp_info
->b_block_cmd
) {
301 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
305 /* Ensure that only a single thread is accessing the mailbox */
306 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
308 org_mcp_reset_seq
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
310 /* Set drv command along with the updated sequence */
311 qed_mcp_reread_offsets(p_hwfn
, p_ptt
);
312 seq
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
313 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (DRV_MSG_CODE_MCP_RESET
| seq
));
316 /* Wait for MFW response */
318 /* Give the FW up to 500 second (50*1000*10usec) */
319 } while ((org_mcp_reset_seq
== qed_rd(p_hwfn
, p_ptt
,
320 MISCS_REG_GENERIC_POR_0
)) &&
321 (cnt
++ < QED_MCP_RESET_RETRIES
));
323 if (org_mcp_reset_seq
!=
324 qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
)) {
325 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
326 "MCP was reset after %d usec\n", cnt
* delay
);
328 DP_ERR(p_hwfn
, "Failed to reset MCP\n");
332 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
337 /* Must be called while cmd_lock is acquired */
338 static bool qed_mcp_has_pending_cmd(struct qed_hwfn
*p_hwfn
)
340 struct qed_mcp_cmd_elem
*p_cmd_elem
;
342 /* There is at most one pending command at a certain time, and if it
343 * exists - it is placed at the HEAD of the list.
345 if (!list_empty(&p_hwfn
->mcp_info
->cmd_list
)) {
346 p_cmd_elem
= list_first_entry(&p_hwfn
->mcp_info
->cmd_list
,
347 struct qed_mcp_cmd_elem
, list
);
348 return !p_cmd_elem
->b_is_completed
;
354 /* Must be called while cmd_lock is acquired */
356 qed_mcp_update_pending_cmd(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
358 struct qed_mcp_mb_params
*p_mb_params
;
359 struct qed_mcp_cmd_elem
*p_cmd_elem
;
363 mcp_resp
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_header
);
364 seq_num
= (u16
)(mcp_resp
& FW_MSG_SEQ_NUMBER_MASK
);
366 /* Return if no new non-handled response has been received */
367 if (seq_num
!= p_hwfn
->mcp_info
->drv_mb_seq
)
370 p_cmd_elem
= qed_mcp_cmd_get_elem(p_hwfn
, seq_num
);
373 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
378 p_mb_params
= p_cmd_elem
->p_mb_params
;
380 /* Get the MFW response along with the sequence number */
381 p_mb_params
->mcp_resp
= mcp_resp
;
383 /* Get the MFW param */
384 p_mb_params
->mcp_param
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_param
);
386 /* Get the union data */
387 if (p_mb_params
->p_data_dst
!= NULL
&& p_mb_params
->data_dst_size
) {
388 u32 union_data_addr
= p_hwfn
->mcp_info
->drv_mb_addr
+
389 offsetof(struct public_drv_mb
,
391 qed_memcpy_from(p_hwfn
, p_ptt
, p_mb_params
->p_data_dst
,
392 union_data_addr
, p_mb_params
->data_dst_size
);
395 p_cmd_elem
->b_is_completed
= true;
400 /* Must be called while cmd_lock is acquired */
401 static void __qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
402 struct qed_ptt
*p_ptt
,
403 struct qed_mcp_mb_params
*p_mb_params
,
406 union drv_union_data union_data
;
409 /* Set the union data */
410 union_data_addr
= p_hwfn
->mcp_info
->drv_mb_addr
+
411 offsetof(struct public_drv_mb
, union_data
);
412 memset(&union_data
, 0, sizeof(union_data
));
413 if (p_mb_params
->p_data_src
!= NULL
&& p_mb_params
->data_src_size
)
414 memcpy(&union_data
, p_mb_params
->p_data_src
,
415 p_mb_params
->data_src_size
);
416 qed_memcpy_to(p_hwfn
, p_ptt
, union_data_addr
, &union_data
,
419 /* Set the drv param */
420 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_param
, p_mb_params
->param
);
422 /* Set the drv command along with the sequence number */
423 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (p_mb_params
->cmd
| seq_num
));
425 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
426 "MFW mailbox: command 0x%08x param 0x%08x\n",
427 (p_mb_params
->cmd
| seq_num
), p_mb_params
->param
);
430 static void qed_mcp_cmd_set_blocking(struct qed_hwfn
*p_hwfn
, bool block_cmd
)
432 p_hwfn
->mcp_info
->b_block_cmd
= block_cmd
;
434 DP_INFO(p_hwfn
, "%s sending of mailbox commands to the MFW\n",
435 block_cmd
? "Block" : "Unblock");
438 static void qed_mcp_print_cpu_info(struct qed_hwfn
*p_hwfn
,
439 struct qed_ptt
*p_ptt
)
441 u32 cpu_mode
, cpu_state
, cpu_pc_0
, cpu_pc_1
, cpu_pc_2
;
442 u32 delay
= QED_MCP_RESP_ITER_US
;
444 cpu_mode
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
);
445 cpu_state
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
);
446 cpu_pc_0
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_PROGRAM_COUNTER
);
448 cpu_pc_1
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_PROGRAM_COUNTER
);
450 cpu_pc_2
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_PROGRAM_COUNTER
);
453 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
454 cpu_mode
, cpu_state
, cpu_pc_0
, cpu_pc_1
, cpu_pc_2
);
458 _qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
459 struct qed_ptt
*p_ptt
,
460 struct qed_mcp_mb_params
*p_mb_params
,
461 u32 max_retries
, u32 usecs
)
463 u32 cnt
= 0, msecs
= DIV_ROUND_UP(usecs
, 1000);
464 struct qed_mcp_cmd_elem
*p_cmd_elem
;
468 /* Wait until the mailbox is non-occupied */
470 /* Exit the loop if there is no pending command, or if the
471 * pending command is completed during this iteration.
472 * The spinlock stays locked until the command is sent.
475 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
477 if (!qed_mcp_has_pending_cmd(p_hwfn
))
480 rc
= qed_mcp_update_pending_cmd(p_hwfn
, p_ptt
);
483 else if (rc
!= -EAGAIN
)
486 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
488 if (QED_MB_FLAGS_IS_SET(p_mb_params
, CAN_SLEEP
))
492 } while (++cnt
< max_retries
);
494 if (cnt
>= max_retries
) {
496 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
497 p_mb_params
->cmd
, p_mb_params
->param
);
501 /* Send the mailbox command */
502 qed_mcp_reread_offsets(p_hwfn
, p_ptt
);
503 seq_num
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
504 p_cmd_elem
= qed_mcp_cmd_add_elem(p_hwfn
, p_mb_params
, seq_num
);
510 __qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, p_mb_params
, seq_num
);
511 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
513 /* Wait for the MFW response */
515 /* Exit the loop if the command is already completed, or if the
516 * command is completed during this iteration.
517 * The spinlock stays locked until the list element is removed.
520 if (QED_MB_FLAGS_IS_SET(p_mb_params
, CAN_SLEEP
))
525 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
527 if (p_cmd_elem
->b_is_completed
)
530 rc
= qed_mcp_update_pending_cmd(p_hwfn
, p_ptt
);
533 else if (rc
!= -EAGAIN
)
536 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
537 } while (++cnt
< max_retries
);
539 if (cnt
>= max_retries
) {
541 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
542 p_mb_params
->cmd
, p_mb_params
->param
);
543 qed_mcp_print_cpu_info(p_hwfn
, p_ptt
);
545 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
546 qed_mcp_cmd_del_elem(p_hwfn
, p_cmd_elem
);
547 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
549 if (!QED_MB_FLAGS_IS_SET(p_mb_params
, AVOID_BLOCK
))
550 qed_mcp_cmd_set_blocking(p_hwfn
, true);
552 qed_hw_err_notify(p_hwfn
, p_ptt
,
553 QED_HW_ERR_MFW_RESP_FAIL
, NULL
);
557 qed_mcp_cmd_del_elem(p_hwfn
, p_cmd_elem
);
558 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
562 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
563 p_mb_params
->mcp_resp
,
564 p_mb_params
->mcp_param
,
565 (cnt
* usecs
) / 1000, (cnt
* usecs
) % 1000);
567 /* Clear the sequence number from the MFW response */
568 p_mb_params
->mcp_resp
&= FW_MSG_CODE_MASK
;
573 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
577 static int qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
578 struct qed_ptt
*p_ptt
,
579 struct qed_mcp_mb_params
*p_mb_params
)
581 size_t union_data_size
= sizeof(union drv_union_data
);
582 u32 max_retries
= QED_DRV_MB_MAX_RETRIES
;
583 u32 usecs
= QED_MCP_RESP_ITER_US
;
585 /* MCP not initialized */
586 if (!qed_mcp_is_init(p_hwfn
)) {
587 DP_NOTICE(p_hwfn
, "MFW is not initialized!\n");
591 if (p_hwfn
->mcp_info
->b_block_cmd
) {
593 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
594 p_mb_params
->cmd
, p_mb_params
->param
);
598 if (p_mb_params
->data_src_size
> union_data_size
||
599 p_mb_params
->data_dst_size
> union_data_size
) {
601 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
602 p_mb_params
->data_src_size
,
603 p_mb_params
->data_dst_size
, union_data_size
);
607 if (QED_MB_FLAGS_IS_SET(p_mb_params
, CAN_SLEEP
)) {
608 max_retries
= DIV_ROUND_UP(max_retries
, 1000);
612 return _qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, p_mb_params
, max_retries
,
616 int qed_mcp_cmd(struct qed_hwfn
*p_hwfn
,
617 struct qed_ptt
*p_ptt
,
623 struct qed_mcp_mb_params mb_params
;
626 memset(&mb_params
, 0, sizeof(mb_params
));
628 mb_params
.param
= param
;
630 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
634 *o_mcp_resp
= mb_params
.mcp_resp
;
635 *o_mcp_param
= mb_params
.mcp_param
;
641 qed_mcp_nvm_wr_cmd(struct qed_hwfn
*p_hwfn
,
642 struct qed_ptt
*p_ptt
,
646 u32
*o_mcp_param
, u32 i_txn_size
, u32
*i_buf
)
648 struct qed_mcp_mb_params mb_params
;
651 memset(&mb_params
, 0, sizeof(mb_params
));
653 mb_params
.param
= param
;
654 mb_params
.p_data_src
= i_buf
;
655 mb_params
.data_src_size
= (u8
)i_txn_size
;
656 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
660 *o_mcp_resp
= mb_params
.mcp_resp
;
661 *o_mcp_param
= mb_params
.mcp_param
;
663 /* nvm_info needs to be updated */
664 p_hwfn
->nvm_info
.valid
= false;
669 int qed_mcp_nvm_rd_cmd(struct qed_hwfn
*p_hwfn
,
670 struct qed_ptt
*p_ptt
,
674 u32
*o_mcp_param
, u32
*o_txn_size
, u32
*o_buf
)
676 struct qed_mcp_mb_params mb_params
;
677 u8 raw_data
[MCP_DRV_NVM_BUF_LEN
];
680 memset(&mb_params
, 0, sizeof(mb_params
));
682 mb_params
.param
= param
;
683 mb_params
.p_data_dst
= raw_data
;
685 /* Use the maximal value since the actual one is part of the response */
686 mb_params
.data_dst_size
= MCP_DRV_NVM_BUF_LEN
;
688 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
692 *o_mcp_resp
= mb_params
.mcp_resp
;
693 *o_mcp_param
= mb_params
.mcp_param
;
695 *o_txn_size
= *o_mcp_param
;
696 memcpy(o_buf
, raw_data
, *o_txn_size
);
702 qed_mcp_can_force_load(u8 drv_role
,
704 enum qed_override_force_load override_force_load
)
706 bool can_force_load
= false;
708 switch (override_force_load
) {
709 case QED_OVERRIDE_FORCE_LOAD_ALWAYS
:
710 can_force_load
= true;
712 case QED_OVERRIDE_FORCE_LOAD_NEVER
:
713 can_force_load
= false;
716 can_force_load
= (drv_role
== DRV_ROLE_OS
&&
717 exist_drv_role
== DRV_ROLE_PREBOOT
) ||
718 (drv_role
== DRV_ROLE_KDUMP
&&
719 exist_drv_role
== DRV_ROLE_OS
);
723 return can_force_load
;
726 static int qed_mcp_cancel_load_req(struct qed_hwfn
*p_hwfn
,
727 struct qed_ptt
*p_ptt
)
729 u32 resp
= 0, param
= 0;
732 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CANCEL_LOAD_REQ
, 0,
736 "Failed to send cancel load request, rc = %d\n", rc
);
741 #define CONFIG_QEDE_BITMAP_IDX BIT(0)
742 #define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
743 #define CONFIG_QEDR_BITMAP_IDX BIT(2)
744 #define CONFIG_QEDF_BITMAP_IDX BIT(4)
745 #define CONFIG_QEDI_BITMAP_IDX BIT(5)
746 #define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
748 static u32
qed_get_config_bitmap(void)
750 u32 config_bitmap
= 0x0;
752 if (IS_ENABLED(CONFIG_QEDE
))
753 config_bitmap
|= CONFIG_QEDE_BITMAP_IDX
;
755 if (IS_ENABLED(CONFIG_QED_SRIOV
))
756 config_bitmap
|= CONFIG_QED_SRIOV_BITMAP_IDX
;
758 if (IS_ENABLED(CONFIG_QED_RDMA
))
759 config_bitmap
|= CONFIG_QEDR_BITMAP_IDX
;
761 if (IS_ENABLED(CONFIG_QED_FCOE
))
762 config_bitmap
|= CONFIG_QEDF_BITMAP_IDX
;
764 if (IS_ENABLED(CONFIG_QED_ISCSI
))
765 config_bitmap
|= CONFIG_QEDI_BITMAP_IDX
;
767 if (IS_ENABLED(CONFIG_QED_LL2
))
768 config_bitmap
|= CONFIG_QED_LL2_BITMAP_IDX
;
770 return config_bitmap
;
773 struct qed_load_req_in_params
{
775 #define QED_LOAD_REQ_HSI_VER_DEFAULT 0
776 #define QED_LOAD_REQ_HSI_VER_1 1
783 bool avoid_eng_reset
;
786 struct qed_load_req_out_params
{
797 __qed_mcp_load_req(struct qed_hwfn
*p_hwfn
,
798 struct qed_ptt
*p_ptt
,
799 struct qed_load_req_in_params
*p_in_params
,
800 struct qed_load_req_out_params
*p_out_params
)
802 struct qed_mcp_mb_params mb_params
;
803 struct load_req_stc load_req
;
804 struct load_rsp_stc load_rsp
;
808 memset(&load_req
, 0, sizeof(load_req
));
809 load_req
.drv_ver_0
= p_in_params
->drv_ver_0
;
810 load_req
.drv_ver_1
= p_in_params
->drv_ver_1
;
811 load_req
.fw_ver
= p_in_params
->fw_ver
;
812 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_ROLE
, p_in_params
->drv_role
);
813 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_LOCK_TO
,
814 p_in_params
->timeout_val
);
815 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_FORCE
,
816 p_in_params
->force_cmd
);
817 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_FLAGS0
,
818 p_in_params
->avoid_eng_reset
);
820 hsi_ver
= (p_in_params
->hsi_ver
== QED_LOAD_REQ_HSI_VER_DEFAULT
) ?
821 DRV_ID_MCP_HSI_VER_CURRENT
:
822 (p_in_params
->hsi_ver
<< DRV_ID_MCP_HSI_VER_SHIFT
);
824 memset(&mb_params
, 0, sizeof(mb_params
));
825 mb_params
.cmd
= DRV_MSG_CODE_LOAD_REQ
;
826 mb_params
.param
= PDA_COMP
| hsi_ver
| p_hwfn
->cdev
->drv_type
;
827 mb_params
.p_data_src
= &load_req
;
828 mb_params
.data_src_size
= sizeof(load_req
);
829 mb_params
.p_data_dst
= &load_rsp
;
830 mb_params
.data_dst_size
= sizeof(load_rsp
);
831 mb_params
.flags
= QED_MB_FLAG_CAN_SLEEP
| QED_MB_FLAG_AVOID_BLOCK
;
833 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
834 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
836 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_DRV_INIT_HW
),
837 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_DRV_TYPE
),
838 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_MCP_HSI_VER
),
839 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_PDA_COMP_VER
));
841 if (p_in_params
->hsi_ver
!= QED_LOAD_REQ_HSI_VER_1
) {
842 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
843 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
848 QED_MFW_GET_FIELD(load_req
.misc0
, LOAD_REQ_ROLE
),
849 QED_MFW_GET_FIELD(load_req
.misc0
,
851 QED_MFW_GET_FIELD(load_req
.misc0
, LOAD_REQ_FORCE
),
852 QED_MFW_GET_FIELD(load_req
.misc0
, LOAD_REQ_FLAGS0
));
855 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
857 DP_NOTICE(p_hwfn
, "Failed to send load request, rc = %d\n", rc
);
861 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
862 "Load Response: resp 0x%08x\n", mb_params
.mcp_resp
);
863 p_out_params
->load_code
= mb_params
.mcp_resp
;
865 if (p_in_params
->hsi_ver
!= QED_LOAD_REQ_HSI_VER_1
&&
866 p_out_params
->load_code
!= FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1
) {
869 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
874 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_ROLE
),
875 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_HSI
),
876 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_FLAGS0
));
878 p_out_params
->exist_drv_ver_0
= load_rsp
.drv_ver_0
;
879 p_out_params
->exist_drv_ver_1
= load_rsp
.drv_ver_1
;
880 p_out_params
->exist_fw_ver
= load_rsp
.fw_ver
;
881 p_out_params
->exist_drv_role
=
882 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_ROLE
);
883 p_out_params
->mfw_hsi_ver
=
884 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_HSI
);
885 p_out_params
->drv_exists
=
886 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_FLAGS0
) &
887 LOAD_RSP_FLAGS0_DRV_EXISTS
;
893 static int eocre_get_mfw_drv_role(struct qed_hwfn
*p_hwfn
,
894 enum qed_drv_role drv_role
,
898 case QED_DRV_ROLE_OS
:
899 *p_mfw_drv_role
= DRV_ROLE_OS
;
901 case QED_DRV_ROLE_KDUMP
:
902 *p_mfw_drv_role
= DRV_ROLE_KDUMP
;
905 DP_ERR(p_hwfn
, "Unexpected driver role %d\n", drv_role
);
912 enum qed_load_req_force
{
913 QED_LOAD_REQ_FORCE_NONE
,
914 QED_LOAD_REQ_FORCE_PF
,
915 QED_LOAD_REQ_FORCE_ALL
,
918 static void qed_get_mfw_force_cmd(struct qed_hwfn
*p_hwfn
,
920 enum qed_load_req_force force_cmd
,
924 case QED_LOAD_REQ_FORCE_NONE
:
925 *p_mfw_force_cmd
= LOAD_REQ_FORCE_NONE
;
927 case QED_LOAD_REQ_FORCE_PF
:
928 *p_mfw_force_cmd
= LOAD_REQ_FORCE_PF
;
930 case QED_LOAD_REQ_FORCE_ALL
:
931 *p_mfw_force_cmd
= LOAD_REQ_FORCE_ALL
;
936 int qed_mcp_load_req(struct qed_hwfn
*p_hwfn
,
937 struct qed_ptt
*p_ptt
,
938 struct qed_load_req_params
*p_params
)
940 struct qed_load_req_out_params out_params
;
941 struct qed_load_req_in_params in_params
;
942 u8 mfw_drv_role
, mfw_force_cmd
;
945 memset(&in_params
, 0, sizeof(in_params
));
946 in_params
.hsi_ver
= QED_LOAD_REQ_HSI_VER_DEFAULT
;
947 in_params
.drv_ver_0
= QED_VERSION
;
948 in_params
.drv_ver_1
= qed_get_config_bitmap();
949 in_params
.fw_ver
= STORM_FW_VERSION
;
950 rc
= eocre_get_mfw_drv_role(p_hwfn
, p_params
->drv_role
, &mfw_drv_role
);
954 in_params
.drv_role
= mfw_drv_role
;
955 in_params
.timeout_val
= p_params
->timeout_val
;
956 qed_get_mfw_force_cmd(p_hwfn
,
957 QED_LOAD_REQ_FORCE_NONE
, &mfw_force_cmd
);
959 in_params
.force_cmd
= mfw_force_cmd
;
960 in_params
.avoid_eng_reset
= p_params
->avoid_eng_reset
;
962 memset(&out_params
, 0, sizeof(out_params
));
963 rc
= __qed_mcp_load_req(p_hwfn
, p_ptt
, &in_params
, &out_params
);
967 /* First handle cases where another load request should/might be sent:
968 * - MFW expects the old interface [HSI version = 1]
969 * - MFW responds that a force load request is required
971 if (out_params
.load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1
) {
973 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
975 in_params
.hsi_ver
= QED_LOAD_REQ_HSI_VER_1
;
976 memset(&out_params
, 0, sizeof(out_params
));
977 rc
= __qed_mcp_load_req(p_hwfn
, p_ptt
, &in_params
, &out_params
);
980 } else if (out_params
.load_code
==
981 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE
) {
982 if (qed_mcp_can_force_load(in_params
.drv_role
,
983 out_params
.exist_drv_role
,
984 p_params
->override_force_load
)) {
986 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
987 in_params
.drv_role
, in_params
.fw_ver
,
988 in_params
.drv_ver_0
, in_params
.drv_ver_1
,
989 out_params
.exist_drv_role
,
990 out_params
.exist_fw_ver
,
991 out_params
.exist_drv_ver_0
,
992 out_params
.exist_drv_ver_1
);
994 qed_get_mfw_force_cmd(p_hwfn
,
995 QED_LOAD_REQ_FORCE_ALL
,
998 in_params
.force_cmd
= mfw_force_cmd
;
999 memset(&out_params
, 0, sizeof(out_params
));
1000 rc
= __qed_mcp_load_req(p_hwfn
, p_ptt
, &in_params
,
1006 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1007 in_params
.drv_role
, in_params
.fw_ver
,
1008 in_params
.drv_ver_0
, in_params
.drv_ver_1
,
1009 out_params
.exist_drv_role
,
1010 out_params
.exist_fw_ver
,
1011 out_params
.exist_drv_ver_0
,
1012 out_params
.exist_drv_ver_1
);
1014 "Avoid sending a force load request to prevent disruption of active PFs\n");
1016 qed_mcp_cancel_load_req(p_hwfn
, p_ptt
);
1021 /* Now handle the other types of responses.
1022 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1023 * expected here after the additional revised load requests were sent.
1025 switch (out_params
.load_code
) {
1026 case FW_MSG_CODE_DRV_LOAD_ENGINE
:
1027 case FW_MSG_CODE_DRV_LOAD_PORT
:
1028 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
1029 if (out_params
.mfw_hsi_ver
!= QED_LOAD_REQ_HSI_VER_1
&&
1030 out_params
.drv_exists
) {
1031 /* The role and fw/driver version match, but the PF is
1032 * already loaded and has not been unloaded gracefully.
1035 "PF is already loaded\n");
1041 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1042 out_params
.load_code
);
1046 p_params
->load_code
= out_params
.load_code
;
1051 int qed_mcp_load_done(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1053 u32 resp
= 0, param
= 0;
1056 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_LOAD_DONE
, 0, &resp
,
1060 "Failed to send a LOAD_DONE command, rc = %d\n", rc
);
1064 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1065 if (param
& FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR
)
1067 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1072 int qed_mcp_unload_req(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1074 struct qed_mcp_mb_params mb_params
;
1077 switch (p_hwfn
->cdev
->wol_config
) {
1078 case QED_OV_WOL_DISABLED
:
1079 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_DISABLED
;
1081 case QED_OV_WOL_ENABLED
:
1082 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_ENABLED
;
1086 "Unknown WoL configuration %02x\n",
1087 p_hwfn
->cdev
->wol_config
);
1089 case QED_OV_WOL_DEFAULT
:
1090 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_MCP
;
1093 memset(&mb_params
, 0, sizeof(mb_params
));
1094 mb_params
.cmd
= DRV_MSG_CODE_UNLOAD_REQ
;
1095 mb_params
.param
= wol_param
;
1096 mb_params
.flags
= QED_MB_FLAG_CAN_SLEEP
| QED_MB_FLAG_AVOID_BLOCK
;
1098 return qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1101 int qed_mcp_unload_done(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1103 struct qed_mcp_mb_params mb_params
;
1104 struct mcp_mac wol_mac
;
1106 memset(&mb_params
, 0, sizeof(mb_params
));
1107 mb_params
.cmd
= DRV_MSG_CODE_UNLOAD_DONE
;
1109 /* Set the primary MAC if WoL is enabled */
1110 if (p_hwfn
->cdev
->wol_config
== QED_OV_WOL_ENABLED
) {
1111 u8
*p_mac
= p_hwfn
->cdev
->wol_mac
;
1113 memset(&wol_mac
, 0, sizeof(wol_mac
));
1114 wol_mac
.mac_upper
= p_mac
[0] << 8 | p_mac
[1];
1115 wol_mac
.mac_lower
= p_mac
[2] << 24 | p_mac
[3] << 16 |
1116 p_mac
[4] << 8 | p_mac
[5];
1119 (QED_MSG_SP
| NETIF_MSG_IFDOWN
),
1120 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1121 p_mac
, wol_mac
.mac_upper
, wol_mac
.mac_lower
);
1123 mb_params
.p_data_src
= &wol_mac
;
1124 mb_params
.data_src_size
= sizeof(wol_mac
);
1127 return qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1130 static void qed_mcp_handle_vf_flr(struct qed_hwfn
*p_hwfn
,
1131 struct qed_ptt
*p_ptt
)
1133 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1135 u32 mfw_path_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
1136 u32 path_addr
= SECTION_ADDR(mfw_path_offsize
,
1137 QED_PATH_ID(p_hwfn
));
1138 u32 disabled_vfs
[VF_MAX_STATIC
/ 32];
1143 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1144 mfw_path_offsize
, path_addr
);
1146 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++) {
1147 disabled_vfs
[i
] = qed_rd(p_hwfn
, p_ptt
,
1149 offsetof(struct public_path
,
1152 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| QED_MSG_IOV
),
1153 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1154 i
* 32, (i
+ 1) * 32 - 1, disabled_vfs
[i
]);
1157 if (qed_iov_mark_vf_flr(p_hwfn
, disabled_vfs
))
1158 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_FLR_FLAG
);
1161 int qed_mcp_ack_vf_flr(struct qed_hwfn
*p_hwfn
,
1162 struct qed_ptt
*p_ptt
, u32
*vfs_to_ack
)
1164 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1166 u32 mfw_func_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
1167 u32 func_addr
= SECTION_ADDR(mfw_func_offsize
,
1169 struct qed_mcp_mb_params mb_params
;
1173 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
1174 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| QED_MSG_IOV
),
1175 "Acking VFs [%08x,...,%08x] - %08x\n",
1176 i
* 32, (i
+ 1) * 32 - 1, vfs_to_ack
[i
]);
1178 memset(&mb_params
, 0, sizeof(mb_params
));
1179 mb_params
.cmd
= DRV_MSG_CODE_VF_DISABLED_DONE
;
1180 mb_params
.p_data_src
= vfs_to_ack
;
1181 mb_params
.data_src_size
= VF_MAX_STATIC
/ 8;
1182 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1184 DP_NOTICE(p_hwfn
, "Failed to pass ACK for VF flr to MFW\n");
1188 /* Clear the ACK bits */
1189 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
1190 qed_wr(p_hwfn
, p_ptt
,
1192 offsetof(struct public_func
, drv_ack_vf_disabled
) +
1193 i
* sizeof(u32
), 0);
1198 static void qed_mcp_handle_transceiver_change(struct qed_hwfn
*p_hwfn
,
1199 struct qed_ptt
*p_ptt
)
1201 u32 transceiver_state
;
1203 transceiver_state
= qed_rd(p_hwfn
, p_ptt
,
1204 p_hwfn
->mcp_info
->port_addr
+
1205 offsetof(struct public_port
,
1209 (NETIF_MSG_HW
| QED_MSG_SP
),
1210 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1212 (u32
)(p_hwfn
->mcp_info
->port_addr
+
1213 offsetof(struct public_port
, transceiver_data
)));
1215 transceiver_state
= GET_FIELD(transceiver_state
,
1216 ETH_TRANSCEIVER_STATE
);
1218 if (transceiver_state
== ETH_TRANSCEIVER_STATE_PRESENT
)
1219 DP_NOTICE(p_hwfn
, "Transceiver is present.\n");
1221 DP_NOTICE(p_hwfn
, "Transceiver is unplugged.\n");
1224 static void qed_mcp_read_eee_config(struct qed_hwfn
*p_hwfn
,
1225 struct qed_ptt
*p_ptt
,
1226 struct qed_mcp_link_state
*p_link
)
1228 u32 eee_status
, val
;
1230 p_link
->eee_adv_caps
= 0;
1231 p_link
->eee_lp_adv_caps
= 0;
1232 eee_status
= qed_rd(p_hwfn
,
1234 p_hwfn
->mcp_info
->port_addr
+
1235 offsetof(struct public_port
, eee_status
));
1236 p_link
->eee_active
= !!(eee_status
& EEE_ACTIVE_BIT
);
1237 val
= (eee_status
& EEE_LD_ADV_STATUS_MASK
) >> EEE_LD_ADV_STATUS_OFFSET
;
1238 if (val
& EEE_1G_ADV
)
1239 p_link
->eee_adv_caps
|= QED_EEE_1G_ADV
;
1240 if (val
& EEE_10G_ADV
)
1241 p_link
->eee_adv_caps
|= QED_EEE_10G_ADV
;
1242 val
= (eee_status
& EEE_LP_ADV_STATUS_MASK
) >> EEE_LP_ADV_STATUS_OFFSET
;
1243 if (val
& EEE_1G_ADV
)
1244 p_link
->eee_lp_adv_caps
|= QED_EEE_1G_ADV
;
1245 if (val
& EEE_10G_ADV
)
1246 p_link
->eee_lp_adv_caps
|= QED_EEE_10G_ADV
;
1249 static u32
qed_mcp_get_shmem_func(struct qed_hwfn
*p_hwfn
,
1250 struct qed_ptt
*p_ptt
,
1251 struct public_func
*p_data
, int pfid
)
1253 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1255 u32 mfw_path_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
1259 func_addr
= SECTION_ADDR(mfw_path_offsize
, pfid
);
1260 memset(p_data
, 0, sizeof(*p_data
));
1262 size
= min_t(u32
, sizeof(*p_data
), QED_SECTION_SIZE(mfw_path_offsize
));
1263 for (i
= 0; i
< size
/ sizeof(u32
); i
++)
1264 ((u32
*)p_data
)[i
] = qed_rd(p_hwfn
, p_ptt
,
1265 func_addr
+ (i
<< 2));
1269 static void qed_read_pf_bandwidth(struct qed_hwfn
*p_hwfn
,
1270 struct public_func
*p_shmem_info
)
1272 struct qed_mcp_function_info
*p_info
;
1274 p_info
= &p_hwfn
->mcp_info
->func_info
;
1276 p_info
->bandwidth_min
= QED_MFW_GET_FIELD(p_shmem_info
->config
,
1277 FUNC_MF_CFG_MIN_BW
);
1278 if (p_info
->bandwidth_min
< 1 || p_info
->bandwidth_min
> 100) {
1280 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1281 p_info
->bandwidth_min
);
1282 p_info
->bandwidth_min
= 1;
1285 p_info
->bandwidth_max
= QED_MFW_GET_FIELD(p_shmem_info
->config
,
1286 FUNC_MF_CFG_MAX_BW
);
1287 if (p_info
->bandwidth_max
< 1 || p_info
->bandwidth_max
> 100) {
1289 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1290 p_info
->bandwidth_max
);
1291 p_info
->bandwidth_max
= 100;
1295 static void qed_mcp_handle_link_change(struct qed_hwfn
*p_hwfn
,
1296 struct qed_ptt
*p_ptt
, bool b_reset
)
1298 struct qed_mcp_link_state
*p_link
;
1302 /* Prevent SW/attentions from doing this at the same time */
1303 spin_lock_bh(&p_hwfn
->mcp_info
->link_lock
);
1305 p_link
= &p_hwfn
->mcp_info
->link_output
;
1306 memset(p_link
, 0, sizeof(*p_link
));
1308 status
= qed_rd(p_hwfn
, p_ptt
,
1309 p_hwfn
->mcp_info
->port_addr
+
1310 offsetof(struct public_port
, link_status
));
1311 DP_VERBOSE(p_hwfn
, (NETIF_MSG_LINK
| QED_MSG_SP
),
1312 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1314 (u32
)(p_hwfn
->mcp_info
->port_addr
+
1315 offsetof(struct public_port
, link_status
)));
1317 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1318 "Resetting link indications\n");
1322 if (p_hwfn
->b_drv_link_init
) {
1323 /* Link indication with modern MFW arrives as per-PF
1326 if (p_hwfn
->mcp_info
->capabilities
&
1327 FW_MB_PARAM_FEATURE_SUPPORT_VLINK
) {
1328 struct public_func shmem_info
;
1330 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
,
1332 p_link
->link_up
= !!(shmem_info
.status
&
1333 FUNC_STATUS_VIRTUAL_LINK_UP
);
1334 qed_read_pf_bandwidth(p_hwfn
, &shmem_info
);
1335 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1336 "Virtual link_up = %d\n", p_link
->link_up
);
1338 p_link
->link_up
= !!(status
& LINK_STATUS_LINK_UP
);
1339 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1340 "Physical link_up = %d\n", p_link
->link_up
);
1343 p_link
->link_up
= false;
1346 p_link
->full_duplex
= true;
1347 switch ((status
& LINK_STATUS_SPEED_AND_DUPLEX_MASK
)) {
1348 case LINK_STATUS_SPEED_AND_DUPLEX_100G
:
1349 p_link
->speed
= 100000;
1351 case LINK_STATUS_SPEED_AND_DUPLEX_50G
:
1352 p_link
->speed
= 50000;
1354 case LINK_STATUS_SPEED_AND_DUPLEX_40G
:
1355 p_link
->speed
= 40000;
1357 case LINK_STATUS_SPEED_AND_DUPLEX_25G
:
1358 p_link
->speed
= 25000;
1360 case LINK_STATUS_SPEED_AND_DUPLEX_20G
:
1361 p_link
->speed
= 20000;
1363 case LINK_STATUS_SPEED_AND_DUPLEX_10G
:
1364 p_link
->speed
= 10000;
1366 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD
:
1367 p_link
->full_duplex
= false;
1369 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
:
1370 p_link
->speed
= 1000;
1374 p_link
->link_up
= 0;
1377 if (p_link
->link_up
&& p_link
->speed
)
1378 p_link
->line_speed
= p_link
->speed
;
1380 p_link
->line_speed
= 0;
1382 max_bw
= p_hwfn
->mcp_info
->func_info
.bandwidth_max
;
1383 min_bw
= p_hwfn
->mcp_info
->func_info
.bandwidth_min
;
1385 /* Max bandwidth configuration */
1386 __qed_configure_pf_max_bandwidth(p_hwfn
, p_ptt
, p_link
, max_bw
);
1388 /* Min bandwidth configuration */
1389 __qed_configure_pf_min_bandwidth(p_hwfn
, p_ptt
, p_link
, min_bw
);
1390 qed_configure_vp_wfq_on_link_change(p_hwfn
->cdev
, p_ptt
,
1391 p_link
->min_pf_rate
);
1393 p_link
->an
= !!(status
& LINK_STATUS_AUTO_NEGOTIATE_ENABLED
);
1394 p_link
->an_complete
= !!(status
&
1395 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE
);
1396 p_link
->parallel_detection
= !!(status
&
1397 LINK_STATUS_PARALLEL_DETECTION_USED
);
1398 p_link
->pfc_enabled
= !!(status
& LINK_STATUS_PFC_ENABLED
);
1400 p_link
->partner_adv_speed
|=
1401 (status
& LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE
) ?
1402 QED_LINK_PARTNER_SPEED_1G_FD
: 0;
1403 p_link
->partner_adv_speed
|=
1404 (status
& LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE
) ?
1405 QED_LINK_PARTNER_SPEED_1G_HD
: 0;
1406 p_link
->partner_adv_speed
|=
1407 (status
& LINK_STATUS_LINK_PARTNER_10G_CAPABLE
) ?
1408 QED_LINK_PARTNER_SPEED_10G
: 0;
1409 p_link
->partner_adv_speed
|=
1410 (status
& LINK_STATUS_LINK_PARTNER_20G_CAPABLE
) ?
1411 QED_LINK_PARTNER_SPEED_20G
: 0;
1412 p_link
->partner_adv_speed
|=
1413 (status
& LINK_STATUS_LINK_PARTNER_25G_CAPABLE
) ?
1414 QED_LINK_PARTNER_SPEED_25G
: 0;
1415 p_link
->partner_adv_speed
|=
1416 (status
& LINK_STATUS_LINK_PARTNER_40G_CAPABLE
) ?
1417 QED_LINK_PARTNER_SPEED_40G
: 0;
1418 p_link
->partner_adv_speed
|=
1419 (status
& LINK_STATUS_LINK_PARTNER_50G_CAPABLE
) ?
1420 QED_LINK_PARTNER_SPEED_50G
: 0;
1421 p_link
->partner_adv_speed
|=
1422 (status
& LINK_STATUS_LINK_PARTNER_100G_CAPABLE
) ?
1423 QED_LINK_PARTNER_SPEED_100G
: 0;
1425 p_link
->partner_tx_flow_ctrl_en
=
1426 !!(status
& LINK_STATUS_TX_FLOW_CONTROL_ENABLED
);
1427 p_link
->partner_rx_flow_ctrl_en
=
1428 !!(status
& LINK_STATUS_RX_FLOW_CONTROL_ENABLED
);
1430 switch (status
& LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK
) {
1431 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE
:
1432 p_link
->partner_adv_pause
= QED_LINK_PARTNER_SYMMETRIC_PAUSE
;
1434 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE
:
1435 p_link
->partner_adv_pause
= QED_LINK_PARTNER_ASYMMETRIC_PAUSE
;
1437 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE
:
1438 p_link
->partner_adv_pause
= QED_LINK_PARTNER_BOTH_PAUSE
;
1441 p_link
->partner_adv_pause
= 0;
1444 p_link
->sfp_tx_fault
= !!(status
& LINK_STATUS_SFP_TX_FAULT
);
1446 if (p_hwfn
->mcp_info
->capabilities
& FW_MB_PARAM_FEATURE_SUPPORT_EEE
)
1447 qed_mcp_read_eee_config(p_hwfn
, p_ptt
, p_link
);
1449 if (p_hwfn
->mcp_info
->capabilities
&
1450 FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL
) {
1451 switch (status
& LINK_STATUS_FEC_MODE_MASK
) {
1452 case LINK_STATUS_FEC_MODE_NONE
:
1453 p_link
->fec_active
= QED_FEC_MODE_NONE
;
1455 case LINK_STATUS_FEC_MODE_FIRECODE_CL74
:
1456 p_link
->fec_active
= QED_FEC_MODE_FIRECODE
;
1458 case LINK_STATUS_FEC_MODE_RS_CL91
:
1459 p_link
->fec_active
= QED_FEC_MODE_RS
;
1462 p_link
->fec_active
= QED_FEC_MODE_AUTO
;
1465 p_link
->fec_active
= QED_FEC_MODE_UNSUPPORTED
;
1468 qed_link_update(p_hwfn
, p_ptt
);
1470 spin_unlock_bh(&p_hwfn
->mcp_info
->link_lock
);
1473 int qed_mcp_set_link(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
, bool b_up
)
1475 struct qed_mcp_link_params
*params
= &p_hwfn
->mcp_info
->link_input
;
1476 struct qed_mcp_mb_params mb_params
;
1477 struct eth_phy_cfg phy_cfg
;
1478 u32 cmd
, fec_bit
= 0;
1482 /* Set the shmem configuration according to params */
1483 memset(&phy_cfg
, 0, sizeof(phy_cfg
));
1484 cmd
= b_up
? DRV_MSG_CODE_INIT_PHY
: DRV_MSG_CODE_LINK_RESET
;
1485 if (!params
->speed
.autoneg
)
1486 phy_cfg
.speed
= params
->speed
.forced_speed
;
1487 phy_cfg
.pause
|= (params
->pause
.autoneg
) ? ETH_PAUSE_AUTONEG
: 0;
1488 phy_cfg
.pause
|= (params
->pause
.forced_rx
) ? ETH_PAUSE_RX
: 0;
1489 phy_cfg
.pause
|= (params
->pause
.forced_tx
) ? ETH_PAUSE_TX
: 0;
1490 phy_cfg
.adv_speed
= params
->speed
.advertised_speeds
;
1491 phy_cfg
.loopback_mode
= params
->loopback_mode
;
1493 /* There are MFWs that share this capability regardless of whether
1494 * this is feasible or not. And given that at the very least adv_caps
1495 * would be set internally by qed, we want to make sure LFA would
1498 if ((p_hwfn
->mcp_info
->capabilities
&
1499 FW_MB_PARAM_FEATURE_SUPPORT_EEE
) && params
->eee
.enable
) {
1500 phy_cfg
.eee_cfg
|= EEE_CFG_EEE_ENABLED
;
1501 if (params
->eee
.tx_lpi_enable
)
1502 phy_cfg
.eee_cfg
|= EEE_CFG_TX_LPI
;
1503 if (params
->eee
.adv_caps
& QED_EEE_1G_ADV
)
1504 phy_cfg
.eee_cfg
|= EEE_CFG_ADV_SPEED_1G
;
1505 if (params
->eee
.adv_caps
& QED_EEE_10G_ADV
)
1506 phy_cfg
.eee_cfg
|= EEE_CFG_ADV_SPEED_10G
;
1507 phy_cfg
.eee_cfg
|= (params
->eee
.tx_lpi_timer
<<
1508 EEE_TX_TIMER_USEC_OFFSET
) &
1509 EEE_TX_TIMER_USEC_MASK
;
1512 if (p_hwfn
->mcp_info
->capabilities
&
1513 FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL
) {
1514 if (params
->fec
& QED_FEC_MODE_NONE
)
1515 fec_bit
|= FEC_FORCE_MODE_NONE
;
1516 else if (params
->fec
& QED_FEC_MODE_FIRECODE
)
1517 fec_bit
|= FEC_FORCE_MODE_FIRECODE
;
1518 else if (params
->fec
& QED_FEC_MODE_RS
)
1519 fec_bit
|= FEC_FORCE_MODE_RS
;
1520 else if (params
->fec
& QED_FEC_MODE_AUTO
)
1521 fec_bit
|= FEC_FORCE_MODE_AUTO
;
1523 SET_MFW_FIELD(phy_cfg
.fec_mode
, FEC_FORCE_MODE
, fec_bit
);
1526 if (p_hwfn
->mcp_info
->capabilities
&
1527 FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL
) {
1529 if (params
->ext_speed
.autoneg
)
1530 ext_speed
|= ETH_EXT_SPEED_AN
;
1532 val
= params
->ext_speed
.forced_speed
;
1533 if (val
& QED_EXT_SPEED_1G
)
1534 ext_speed
|= ETH_EXT_SPEED_1G
;
1535 if (val
& QED_EXT_SPEED_10G
)
1536 ext_speed
|= ETH_EXT_SPEED_10G
;
1537 if (val
& QED_EXT_SPEED_20G
)
1538 ext_speed
|= ETH_EXT_SPEED_20G
;
1539 if (val
& QED_EXT_SPEED_25G
)
1540 ext_speed
|= ETH_EXT_SPEED_25G
;
1541 if (val
& QED_EXT_SPEED_40G
)
1542 ext_speed
|= ETH_EXT_SPEED_40G
;
1543 if (val
& QED_EXT_SPEED_50G_R
)
1544 ext_speed
|= ETH_EXT_SPEED_50G_BASE_R
;
1545 if (val
& QED_EXT_SPEED_50G_R2
)
1546 ext_speed
|= ETH_EXT_SPEED_50G_BASE_R2
;
1547 if (val
& QED_EXT_SPEED_100G_R2
)
1548 ext_speed
|= ETH_EXT_SPEED_100G_BASE_R2
;
1549 if (val
& QED_EXT_SPEED_100G_R4
)
1550 ext_speed
|= ETH_EXT_SPEED_100G_BASE_R4
;
1551 if (val
& QED_EXT_SPEED_100G_P4
)
1552 ext_speed
|= ETH_EXT_SPEED_100G_BASE_P4
;
1554 SET_MFW_FIELD(phy_cfg
.extended_speed
, ETH_EXT_SPEED
,
1559 val
= params
->ext_speed
.advertised_speeds
;
1560 if (val
& QED_EXT_SPEED_MASK_1G
)
1561 ext_speed
|= ETH_EXT_ADV_SPEED_1G
;
1562 if (val
& QED_EXT_SPEED_MASK_10G
)
1563 ext_speed
|= ETH_EXT_ADV_SPEED_10G
;
1564 if (val
& QED_EXT_SPEED_MASK_20G
)
1565 ext_speed
|= ETH_EXT_ADV_SPEED_20G
;
1566 if (val
& QED_EXT_SPEED_MASK_25G
)
1567 ext_speed
|= ETH_EXT_ADV_SPEED_25G
;
1568 if (val
& QED_EXT_SPEED_MASK_40G
)
1569 ext_speed
|= ETH_EXT_ADV_SPEED_40G
;
1570 if (val
& QED_EXT_SPEED_MASK_50G_R
)
1571 ext_speed
|= ETH_EXT_ADV_SPEED_50G_BASE_R
;
1572 if (val
& QED_EXT_SPEED_MASK_50G_R2
)
1573 ext_speed
|= ETH_EXT_ADV_SPEED_50G_BASE_R2
;
1574 if (val
& QED_EXT_SPEED_MASK_100G_R2
)
1575 ext_speed
|= ETH_EXT_ADV_SPEED_100G_BASE_R2
;
1576 if (val
& QED_EXT_SPEED_MASK_100G_R4
)
1577 ext_speed
|= ETH_EXT_ADV_SPEED_100G_BASE_R4
;
1578 if (val
& QED_EXT_SPEED_MASK_100G_P4
)
1579 ext_speed
|= ETH_EXT_ADV_SPEED_100G_BASE_P4
;
1581 phy_cfg
.extended_speed
|= ext_speed
;
1583 SET_MFW_FIELD(phy_cfg
.fec_mode
, FEC_EXTENDED_MODE
,
1584 params
->ext_fec_mode
);
1587 p_hwfn
->b_drv_link_init
= b_up
;
1590 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1591 "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n",
1592 phy_cfg
.speed
, phy_cfg
.pause
, phy_cfg
.adv_speed
,
1593 phy_cfg
.loopback_mode
, phy_cfg
.fec_mode
,
1594 phy_cfg
.extended_speed
);
1596 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
, "Resetting link\n");
1599 memset(&mb_params
, 0, sizeof(mb_params
));
1600 mb_params
.cmd
= cmd
;
1601 mb_params
.p_data_src
= &phy_cfg
;
1602 mb_params
.data_src_size
= sizeof(phy_cfg
);
1603 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1605 /* if mcp fails to respond we must abort */
1607 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1611 /* Mimic link-change attention, done for several reasons:
1612 * - On reset, there's no guarantee MFW would trigger
1614 * - On initialization, older MFWs might not indicate link change
1615 * during LFA, so we'll never get an UP indication.
1617 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, !b_up
);
1622 u32
qed_get_process_kill_counter(struct qed_hwfn
*p_hwfn
,
1623 struct qed_ptt
*p_ptt
)
1625 u32 path_offsize_addr
, path_offsize
, path_addr
, proc_kill_cnt
;
1627 if (IS_VF(p_hwfn
->cdev
))
1630 path_offsize_addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1632 path_offsize
= qed_rd(p_hwfn
, p_ptt
, path_offsize_addr
);
1633 path_addr
= SECTION_ADDR(path_offsize
, QED_PATH_ID(p_hwfn
));
1635 proc_kill_cnt
= qed_rd(p_hwfn
, p_ptt
,
1637 offsetof(struct public_path
, process_kill
)) &
1638 PROCESS_KILL_COUNTER_MASK
;
1640 return proc_kill_cnt
;
1643 static void qed_mcp_handle_process_kill(struct qed_hwfn
*p_hwfn
,
1644 struct qed_ptt
*p_ptt
)
1646 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1649 /* Prevent possible attentions/interrupts during the recovery handling
1650 * and till its load phase, during which they will be re-enabled.
1652 qed_int_igu_disable_int(p_hwfn
, p_ptt
);
1654 DP_NOTICE(p_hwfn
, "Received a process kill indication\n");
1656 /* The following operations should be done once, and thus in CMT mode
1657 * are carried out by only the first HW function.
1659 if (p_hwfn
!= QED_LEADING_HWFN(cdev
))
1662 if (cdev
->recov_in_prog
) {
1664 "Ignoring the indication since a recovery process is already in progress\n");
1668 cdev
->recov_in_prog
= true;
1670 proc_kill_cnt
= qed_get_process_kill_counter(p_hwfn
, p_ptt
);
1671 DP_NOTICE(p_hwfn
, "Process kill counter: %d\n", proc_kill_cnt
);
1673 qed_schedule_recovery_handler(p_hwfn
);
1676 static void qed_mcp_send_protocol_stats(struct qed_hwfn
*p_hwfn
,
1677 struct qed_ptt
*p_ptt
,
1678 enum MFW_DRV_MSG_TYPE type
)
1680 enum qed_mcp_protocol_type stats_type
;
1681 union qed_mcp_protocol_stats stats
;
1682 struct qed_mcp_mb_params mb_params
;
1686 case MFW_DRV_MSG_GET_LAN_STATS
:
1687 stats_type
= QED_MCP_LAN_STATS
;
1688 hsi_param
= DRV_MSG_CODE_STATS_TYPE_LAN
;
1690 case MFW_DRV_MSG_GET_FCOE_STATS
:
1691 stats_type
= QED_MCP_FCOE_STATS
;
1692 hsi_param
= DRV_MSG_CODE_STATS_TYPE_FCOE
;
1694 case MFW_DRV_MSG_GET_ISCSI_STATS
:
1695 stats_type
= QED_MCP_ISCSI_STATS
;
1696 hsi_param
= DRV_MSG_CODE_STATS_TYPE_ISCSI
;
1698 case MFW_DRV_MSG_GET_RDMA_STATS
:
1699 stats_type
= QED_MCP_RDMA_STATS
;
1700 hsi_param
= DRV_MSG_CODE_STATS_TYPE_RDMA
;
1703 DP_NOTICE(p_hwfn
, "Invalid protocol type %d\n", type
);
1707 qed_get_protocol_stats(p_hwfn
->cdev
, stats_type
, &stats
);
1709 memset(&mb_params
, 0, sizeof(mb_params
));
1710 mb_params
.cmd
= DRV_MSG_CODE_GET_STATS
;
1711 mb_params
.param
= hsi_param
;
1712 mb_params
.p_data_src
= &stats
;
1713 mb_params
.data_src_size
= sizeof(stats
);
1714 qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1717 static void qed_mcp_update_bw(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1719 struct qed_mcp_function_info
*p_info
;
1720 struct public_func shmem_info
;
1721 u32 resp
= 0, param
= 0;
1723 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1725 qed_read_pf_bandwidth(p_hwfn
, &shmem_info
);
1727 p_info
= &p_hwfn
->mcp_info
->func_info
;
1729 qed_configure_pf_min_bandwidth(p_hwfn
->cdev
, p_info
->bandwidth_min
);
1730 qed_configure_pf_max_bandwidth(p_hwfn
->cdev
, p_info
->bandwidth_max
);
1732 /* Acknowledge the MFW */
1733 qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BW_UPDATE_ACK
, 0, &resp
,
1737 static void qed_mcp_update_stag(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1739 struct public_func shmem_info
;
1740 u32 resp
= 0, param
= 0;
1742 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1744 p_hwfn
->mcp_info
->func_info
.ovlan
= (u16
)shmem_info
.ovlan_stag
&
1745 FUNC_MF_CFG_OV_STAG_MASK
;
1746 p_hwfn
->hw_info
.ovlan
= p_hwfn
->mcp_info
->func_info
.ovlan
;
1747 if (test_bit(QED_MF_OVLAN_CLSS
, &p_hwfn
->cdev
->mf_bits
)) {
1748 if (p_hwfn
->hw_info
.ovlan
!= QED_MCP_VLAN_UNSET
) {
1749 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_FUNC_TAG_VALUE
,
1750 p_hwfn
->hw_info
.ovlan
);
1751 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_FUNC_TAG_EN
, 1);
1753 /* Configure DB to add external vlan to EDPM packets */
1754 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_TAG1_OVRD_MODE
, 1);
1755 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_EXT_VID_BB_K2
,
1756 p_hwfn
->hw_info
.ovlan
);
1758 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_FUNC_TAG_EN
, 0);
1759 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_FUNC_TAG_VALUE
, 0);
1760 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_TAG1_OVRD_MODE
, 0);
1761 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_EXT_VID_BB_K2
, 0);
1764 qed_sp_pf_update_stag(p_hwfn
);
1767 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "ovlan = %d hw_mode = 0x%x\n",
1768 p_hwfn
->mcp_info
->func_info
.ovlan
, p_hwfn
->hw_info
.hw_mode
);
1770 /* Acknowledge the MFW */
1771 qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_S_TAG_UPDATE_ACK
, 0,
1775 static void qed_mcp_handle_fan_failure(struct qed_hwfn
*p_hwfn
,
1776 struct qed_ptt
*p_ptt
)
1778 /* A single notification should be sent to upper driver in CMT mode */
1779 if (p_hwfn
!= QED_LEADING_HWFN(p_hwfn
->cdev
))
1782 qed_hw_err_notify(p_hwfn
, p_ptt
, QED_HW_ERR_FAN_FAIL
,
1783 "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1786 struct qed_mdump_cmd_params
{
1796 qed_mcp_mdump_cmd(struct qed_hwfn
*p_hwfn
,
1797 struct qed_ptt
*p_ptt
,
1798 struct qed_mdump_cmd_params
*p_mdump_cmd_params
)
1800 struct qed_mcp_mb_params mb_params
;
1803 memset(&mb_params
, 0, sizeof(mb_params
));
1804 mb_params
.cmd
= DRV_MSG_CODE_MDUMP_CMD
;
1805 mb_params
.param
= p_mdump_cmd_params
->cmd
;
1806 mb_params
.p_data_src
= p_mdump_cmd_params
->p_data_src
;
1807 mb_params
.data_src_size
= p_mdump_cmd_params
->data_src_size
;
1808 mb_params
.p_data_dst
= p_mdump_cmd_params
->p_data_dst
;
1809 mb_params
.data_dst_size
= p_mdump_cmd_params
->data_dst_size
;
1810 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1814 p_mdump_cmd_params
->mcp_resp
= mb_params
.mcp_resp
;
1816 if (p_mdump_cmd_params
->mcp_resp
== FW_MSG_CODE_MDUMP_INVALID_CMD
) {
1818 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1819 p_mdump_cmd_params
->cmd
);
1821 } else if (p_mdump_cmd_params
->mcp_resp
== FW_MSG_CODE_UNSUPPORTED
) {
1823 "The mdump command is not supported by the MFW\n");
1830 static int qed_mcp_mdump_ack(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1832 struct qed_mdump_cmd_params mdump_cmd_params
;
1834 memset(&mdump_cmd_params
, 0, sizeof(mdump_cmd_params
));
1835 mdump_cmd_params
.cmd
= DRV_MSG_CODE_MDUMP_ACK
;
1837 return qed_mcp_mdump_cmd(p_hwfn
, p_ptt
, &mdump_cmd_params
);
1841 qed_mcp_mdump_get_retain(struct qed_hwfn
*p_hwfn
,
1842 struct qed_ptt
*p_ptt
,
1843 struct mdump_retain_data_stc
*p_mdump_retain
)
1845 struct qed_mdump_cmd_params mdump_cmd_params
;
1848 memset(&mdump_cmd_params
, 0, sizeof(mdump_cmd_params
));
1849 mdump_cmd_params
.cmd
= DRV_MSG_CODE_MDUMP_GET_RETAIN
;
1850 mdump_cmd_params
.p_data_dst
= p_mdump_retain
;
1851 mdump_cmd_params
.data_dst_size
= sizeof(*p_mdump_retain
);
1853 rc
= qed_mcp_mdump_cmd(p_hwfn
, p_ptt
, &mdump_cmd_params
);
1857 if (mdump_cmd_params
.mcp_resp
!= FW_MSG_CODE_OK
) {
1859 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1860 mdump_cmd_params
.mcp_resp
);
1867 static void qed_mcp_handle_critical_error(struct qed_hwfn
*p_hwfn
,
1868 struct qed_ptt
*p_ptt
)
1870 struct mdump_retain_data_stc mdump_retain
;
1873 /* In CMT mode - no need for more than a single acknowledgment to the
1874 * MFW, and no more than a single notification to the upper driver.
1876 if (p_hwfn
!= QED_LEADING_HWFN(p_hwfn
->cdev
))
1879 rc
= qed_mcp_mdump_get_retain(p_hwfn
, p_ptt
, &mdump_retain
);
1880 if (rc
== 0 && mdump_retain
.valid
)
1882 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1884 mdump_retain
.pf
, mdump_retain
.status
);
1887 "The MFW notified that a critical error occurred in the device\n");
1890 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1891 qed_mcp_mdump_ack(p_hwfn
, p_ptt
);
1893 qed_hw_err_notify(p_hwfn
, p_ptt
, QED_HW_ERR_HW_ATTN
, NULL
);
1896 void qed_mcp_read_ufp_config(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1898 struct public_func shmem_info
;
1901 if (!test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
1904 memset(&p_hwfn
->ufp_info
, 0, sizeof(p_hwfn
->ufp_info
));
1905 port_cfg
= qed_rd(p_hwfn
, p_ptt
, p_hwfn
->mcp_info
->port_addr
+
1906 offsetof(struct public_port
, oem_cfg_port
));
1907 val
= (port_cfg
& OEM_CFG_CHANNEL_TYPE_MASK
) >>
1908 OEM_CFG_CHANNEL_TYPE_OFFSET
;
1909 if (val
!= OEM_CFG_CHANNEL_TYPE_STAGGED
)
1911 "Incorrect UFP Channel type %d port_id 0x%02x\n",
1912 val
, MFW_PORT(p_hwfn
));
1914 val
= (port_cfg
& OEM_CFG_SCHED_TYPE_MASK
) >> OEM_CFG_SCHED_TYPE_OFFSET
;
1915 if (val
== OEM_CFG_SCHED_TYPE_ETS
) {
1916 p_hwfn
->ufp_info
.mode
= QED_UFP_MODE_ETS
;
1917 } else if (val
== OEM_CFG_SCHED_TYPE_VNIC_BW
) {
1918 p_hwfn
->ufp_info
.mode
= QED_UFP_MODE_VNIC_BW
;
1920 p_hwfn
->ufp_info
.mode
= QED_UFP_MODE_UNKNOWN
;
1922 "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1923 val
, MFW_PORT(p_hwfn
));
1926 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1927 val
= (shmem_info
.oem_cfg_func
& OEM_CFG_FUNC_TC_MASK
) >>
1928 OEM_CFG_FUNC_TC_OFFSET
;
1929 p_hwfn
->ufp_info
.tc
= (u8
)val
;
1930 val
= (shmem_info
.oem_cfg_func
& OEM_CFG_FUNC_HOST_PRI_CTRL_MASK
) >>
1931 OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET
;
1932 if (val
== OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC
) {
1933 p_hwfn
->ufp_info
.pri_type
= QED_UFP_PRI_VNIC
;
1934 } else if (val
== OEM_CFG_FUNC_HOST_PRI_CTRL_OS
) {
1935 p_hwfn
->ufp_info
.pri_type
= QED_UFP_PRI_OS
;
1937 p_hwfn
->ufp_info
.pri_type
= QED_UFP_PRI_UNKNOWN
;
1939 "Unknown Host priority control %d port_id 0x%02x\n",
1940 val
, MFW_PORT(p_hwfn
));
1944 "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1945 p_hwfn
->ufp_info
.mode
, p_hwfn
->ufp_info
.tc
,
1946 p_hwfn
->ufp_info
.pri_type
, MFW_PORT(p_hwfn
));
1950 qed_mcp_handle_ufp_event(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1952 qed_mcp_read_ufp_config(p_hwfn
, p_ptt
);
1954 if (p_hwfn
->ufp_info
.mode
== QED_UFP_MODE_VNIC_BW
) {
1955 p_hwfn
->qm_info
.ooo_tc
= p_hwfn
->ufp_info
.tc
;
1956 qed_hw_info_set_offload_tc(&p_hwfn
->hw_info
,
1957 p_hwfn
->ufp_info
.tc
);
1959 qed_qm_reconf(p_hwfn
, p_ptt
);
1960 } else if (p_hwfn
->ufp_info
.mode
== QED_UFP_MODE_ETS
) {
1961 /* Merge UFP TC with the dcbx TC data */
1962 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
1963 QED_DCBX_OPERATIONAL_MIB
);
1965 DP_ERR(p_hwfn
, "Invalid sched type, discard the UFP config\n");
1969 /* update storm FW with negotiation results */
1970 qed_sp_pf_update_ufp(p_hwfn
);
1972 /* update stag pcp value */
1973 qed_sp_pf_update_stag(p_hwfn
);
1978 int qed_mcp_handle_events(struct qed_hwfn
*p_hwfn
,
1979 struct qed_ptt
*p_ptt
)
1981 struct qed_mcp_info
*info
= p_hwfn
->mcp_info
;
1986 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "Received message from MFW\n");
1988 /* Read Messages from MFW */
1989 qed_mcp_read_mb(p_hwfn
, p_ptt
);
1991 /* Compare current messages to old ones */
1992 for (i
= 0; i
< info
->mfw_mb_length
; i
++) {
1993 if (info
->mfw_mb_cur
[i
] == info
->mfw_mb_shadow
[i
])
1998 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1999 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
2000 i
, info
->mfw_mb_shadow
[i
], info
->mfw_mb_cur
[i
]);
2003 case MFW_DRV_MSG_LINK_CHANGE
:
2004 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, false);
2006 case MFW_DRV_MSG_VF_DISABLED
:
2007 qed_mcp_handle_vf_flr(p_hwfn
, p_ptt
);
2009 case MFW_DRV_MSG_LLDP_DATA_UPDATED
:
2010 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
2011 QED_DCBX_REMOTE_LLDP_MIB
);
2013 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED
:
2014 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
2015 QED_DCBX_REMOTE_MIB
);
2017 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED
:
2018 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
2019 QED_DCBX_OPERATIONAL_MIB
);
2021 case MFW_DRV_MSG_OEM_CFG_UPDATE
:
2022 qed_mcp_handle_ufp_event(p_hwfn
, p_ptt
);
2024 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE
:
2025 qed_mcp_handle_transceiver_change(p_hwfn
, p_ptt
);
2027 case MFW_DRV_MSG_ERROR_RECOVERY
:
2028 qed_mcp_handle_process_kill(p_hwfn
, p_ptt
);
2030 case MFW_DRV_MSG_GET_LAN_STATS
:
2031 case MFW_DRV_MSG_GET_FCOE_STATS
:
2032 case MFW_DRV_MSG_GET_ISCSI_STATS
:
2033 case MFW_DRV_MSG_GET_RDMA_STATS
:
2034 qed_mcp_send_protocol_stats(p_hwfn
, p_ptt
, i
);
2036 case MFW_DRV_MSG_BW_UPDATE
:
2037 qed_mcp_update_bw(p_hwfn
, p_ptt
);
2039 case MFW_DRV_MSG_S_TAG_UPDATE
:
2040 qed_mcp_update_stag(p_hwfn
, p_ptt
);
2042 case MFW_DRV_MSG_FAILURE_DETECTED
:
2043 qed_mcp_handle_fan_failure(p_hwfn
, p_ptt
);
2045 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED
:
2046 qed_mcp_handle_critical_error(p_hwfn
, p_ptt
);
2048 case MFW_DRV_MSG_GET_TLV_REQ
:
2049 qed_mfw_tlv_req(p_hwfn
);
2052 DP_INFO(p_hwfn
, "Unimplemented MFW message %d\n", i
);
2057 /* ACK everything */
2058 for (i
= 0; i
< MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
); i
++) {
2059 __be32 val
= cpu_to_be32(((u32
*)info
->mfw_mb_cur
)[i
]);
2061 /* MFW expect answer in BE, so we force write in that format */
2062 qed_wr(p_hwfn
, p_ptt
,
2063 info
->mfw_mb_addr
+ sizeof(u32
) +
2064 MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
) *
2065 sizeof(u32
) + i
* sizeof(u32
),
2071 "Received an MFW message indication but no new message!\n");
2075 /* Copy the new mfw messages into the shadow */
2076 memcpy(info
->mfw_mb_shadow
, info
->mfw_mb_cur
, info
->mfw_mb_length
);
2081 int qed_mcp_get_mfw_ver(struct qed_hwfn
*p_hwfn
,
2082 struct qed_ptt
*p_ptt
,
2083 u32
*p_mfw_ver
, u32
*p_running_bundle_id
)
2087 if (IS_VF(p_hwfn
->cdev
)) {
2088 if (p_hwfn
->vf_iov_info
) {
2089 struct pfvf_acquire_resp_tlv
*p_resp
;
2091 p_resp
= &p_hwfn
->vf_iov_info
->acquire_resp
;
2092 *p_mfw_ver
= p_resp
->pfdev_info
.mfw_ver
;
2097 "VF requested MFW version prior to ACQUIRE\n");
2102 global_offsize
= qed_rd(p_hwfn
, p_ptt
,
2103 SECTION_OFFSIZE_ADDR(p_hwfn
->
2104 mcp_info
->public_base
,
2107 qed_rd(p_hwfn
, p_ptt
,
2108 SECTION_ADDR(global_offsize
,
2109 0) + offsetof(struct public_global
, mfw_ver
));
2111 if (p_running_bundle_id
!= NULL
) {
2112 *p_running_bundle_id
= qed_rd(p_hwfn
, p_ptt
,
2113 SECTION_ADDR(global_offsize
, 0) +
2114 offsetof(struct public_global
,
2115 running_bundle_id
));
2121 int qed_mcp_get_mbi_ver(struct qed_hwfn
*p_hwfn
,
2122 struct qed_ptt
*p_ptt
, u32
*p_mbi_ver
)
2124 u32 nvm_cfg_addr
, nvm_cfg1_offset
, mbi_ver_addr
;
2126 if (IS_VF(p_hwfn
->cdev
))
2129 /* Read the address of the nvm_cfg */
2130 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
2131 if (!nvm_cfg_addr
) {
2132 DP_NOTICE(p_hwfn
, "Shared memory not initialized\n");
2136 /* Read the offset of nvm_cfg1 */
2137 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
2139 mbi_ver_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
2140 offsetof(struct nvm_cfg1
, glob
) +
2141 offsetof(struct nvm_cfg1_glob
, mbi_version
);
2142 *p_mbi_ver
= qed_rd(p_hwfn
, p_ptt
,
2144 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK
|
2145 NVM_CFG1_GLOB_MBI_VERSION_1_MASK
|
2146 NVM_CFG1_GLOB_MBI_VERSION_2_MASK
);
2151 int qed_mcp_get_media_type(struct qed_hwfn
*p_hwfn
,
2152 struct qed_ptt
*p_ptt
, u32
*p_media_type
)
2154 *p_media_type
= MEDIA_UNSPECIFIED
;
2156 if (IS_VF(p_hwfn
->cdev
))
2159 if (!qed_mcp_is_init(p_hwfn
)) {
2160 DP_NOTICE(p_hwfn
, "MFW is not initialized!\n");
2165 *p_media_type
= MEDIA_UNSPECIFIED
;
2169 *p_media_type
= qed_rd(p_hwfn
, p_ptt
,
2170 p_hwfn
->mcp_info
->port_addr
+
2171 offsetof(struct public_port
,
2177 int qed_mcp_get_transceiver_data(struct qed_hwfn
*p_hwfn
,
2178 struct qed_ptt
*p_ptt
,
2179 u32
*p_transceiver_state
,
2180 u32
*p_transceiver_type
)
2182 u32 transceiver_info
;
2184 *p_transceiver_type
= ETH_TRANSCEIVER_TYPE_NONE
;
2185 *p_transceiver_state
= ETH_TRANSCEIVER_STATE_UPDATING
;
2187 if (IS_VF(p_hwfn
->cdev
))
2190 if (!qed_mcp_is_init(p_hwfn
)) {
2191 DP_NOTICE(p_hwfn
, "MFW is not initialized!\n");
2195 transceiver_info
= qed_rd(p_hwfn
, p_ptt
,
2196 p_hwfn
->mcp_info
->port_addr
+
2197 offsetof(struct public_port
,
2200 *p_transceiver_state
= (transceiver_info
&
2201 ETH_TRANSCEIVER_STATE_MASK
) >>
2202 ETH_TRANSCEIVER_STATE_OFFSET
;
2204 if (*p_transceiver_state
== ETH_TRANSCEIVER_STATE_PRESENT
)
2205 *p_transceiver_type
= (transceiver_info
&
2206 ETH_TRANSCEIVER_TYPE_MASK
) >>
2207 ETH_TRANSCEIVER_TYPE_OFFSET
;
2209 *p_transceiver_type
= ETH_TRANSCEIVER_TYPE_UNKNOWN
;
2213 static bool qed_is_transceiver_ready(u32 transceiver_state
,
2214 u32 transceiver_type
)
2216 if ((transceiver_state
& ETH_TRANSCEIVER_STATE_PRESENT
) &&
2217 ((transceiver_state
& ETH_TRANSCEIVER_STATE_UPDATING
) == 0x0) &&
2218 (transceiver_type
!= ETH_TRANSCEIVER_TYPE_NONE
))
2224 int qed_mcp_trans_speed_mask(struct qed_hwfn
*p_hwfn
,
2225 struct qed_ptt
*p_ptt
, u32
*p_speed_mask
)
2227 u32 transceiver_type
, transceiver_state
;
2230 ret
= qed_mcp_get_transceiver_data(p_hwfn
, p_ptt
, &transceiver_state
,
2235 if (qed_is_transceiver_ready(transceiver_state
, transceiver_type
) ==
2239 switch (transceiver_type
) {
2240 case ETH_TRANSCEIVER_TYPE_1G_LX
:
2241 case ETH_TRANSCEIVER_TYPE_1G_SX
:
2242 case ETH_TRANSCEIVER_TYPE_1G_PCC
:
2243 case ETH_TRANSCEIVER_TYPE_1G_ACC
:
2244 case ETH_TRANSCEIVER_TYPE_1000BASET
:
2245 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2247 case ETH_TRANSCEIVER_TYPE_10G_SR
:
2248 case ETH_TRANSCEIVER_TYPE_10G_LR
:
2249 case ETH_TRANSCEIVER_TYPE_10G_LRM
:
2250 case ETH_TRANSCEIVER_TYPE_10G_ER
:
2251 case ETH_TRANSCEIVER_TYPE_10G_PCC
:
2252 case ETH_TRANSCEIVER_TYPE_10G_ACC
:
2253 case ETH_TRANSCEIVER_TYPE_4x10G
:
2254 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
2256 case ETH_TRANSCEIVER_TYPE_40G_LR4
:
2257 case ETH_TRANSCEIVER_TYPE_40G_SR4
:
2258 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR
:
2259 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR
:
2260 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
|
2261 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
2263 case ETH_TRANSCEIVER_TYPE_100G_AOC
:
2264 case ETH_TRANSCEIVER_TYPE_100G_SR4
:
2265 case ETH_TRANSCEIVER_TYPE_100G_LR4
:
2266 case ETH_TRANSCEIVER_TYPE_100G_ER4
:
2267 case ETH_TRANSCEIVER_TYPE_100G_ACC
:
2269 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
|
2270 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
;
2272 case ETH_TRANSCEIVER_TYPE_25G_SR
:
2273 case ETH_TRANSCEIVER_TYPE_25G_LR
:
2274 case ETH_TRANSCEIVER_TYPE_25G_AOC
:
2275 case ETH_TRANSCEIVER_TYPE_25G_ACC_S
:
2276 case ETH_TRANSCEIVER_TYPE_25G_ACC_M
:
2277 case ETH_TRANSCEIVER_TYPE_25G_ACC_L
:
2278 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
;
2280 case ETH_TRANSCEIVER_TYPE_25G_CA_N
:
2281 case ETH_TRANSCEIVER_TYPE_25G_CA_S
:
2282 case ETH_TRANSCEIVER_TYPE_25G_CA_L
:
2283 case ETH_TRANSCEIVER_TYPE_4x25G_CR
:
2284 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
|
2285 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
|
2286 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2288 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR
:
2289 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR
:
2290 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
|
2291 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
2293 case ETH_TRANSCEIVER_TYPE_40G_CR4
:
2294 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR
:
2295 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
|
2296 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
|
2297 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2299 case ETH_TRANSCEIVER_TYPE_100G_CR4
:
2300 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR
:
2302 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
|
2303 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
|
2304 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
|
2305 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
|
2306 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
|
2307 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
|
2308 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2310 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR
:
2311 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR
:
2312 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC
:
2314 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
|
2315 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
|
2316 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
|
2317 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
2319 case ETH_TRANSCEIVER_TYPE_XLPPI
:
2320 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
;
2322 case ETH_TRANSCEIVER_TYPE_10G_BASET
:
2323 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR
:
2324 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR
:
2325 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
|
2326 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2329 DP_INFO(p_hwfn
, "Unknown transceiver type 0x%x\n",
2331 *p_speed_mask
= 0xff;
2338 int qed_mcp_get_board_config(struct qed_hwfn
*p_hwfn
,
2339 struct qed_ptt
*p_ptt
, u32
*p_board_config
)
2341 u32 nvm_cfg_addr
, nvm_cfg1_offset
, port_cfg_addr
;
2343 if (IS_VF(p_hwfn
->cdev
))
2346 if (!qed_mcp_is_init(p_hwfn
)) {
2347 DP_NOTICE(p_hwfn
, "MFW is not initialized!\n");
2351 *p_board_config
= NVM_CFG1_PORT_PORT_TYPE_UNDEFINED
;
2355 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
2356 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
2357 port_cfg_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
2358 offsetof(struct nvm_cfg1
, port
[MFW_PORT(p_hwfn
)]);
2359 *p_board_config
= qed_rd(p_hwfn
, p_ptt
,
2361 offsetof(struct nvm_cfg1_port
,
2367 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2369 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn
*p_hwfn
,
2370 enum qed_pci_personality
*p_proto
)
2372 /* There wasn't ever a legacy MFW that published iwarp.
2373 * So at this point, this is either plain l2 or RoCE.
2375 if (test_bit(QED_DEV_CAP_ROCE
, &p_hwfn
->hw_info
.device_capabilities
))
2376 *p_proto
= QED_PCI_ETH_ROCE
;
2378 *p_proto
= QED_PCI_ETH
;
2380 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
2381 "According to Legacy capabilities, L2 personality is %08x\n",
2386 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn
*p_hwfn
,
2387 struct qed_ptt
*p_ptt
,
2388 enum qed_pci_personality
*p_proto
)
2390 u32 resp
= 0, param
= 0;
2393 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
2394 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL
, 0, &resp
, ¶m
);
2397 if (resp
!= FW_MSG_CODE_OK
) {
2398 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
2399 "MFW lacks support for command; Returns %08x\n",
2405 case FW_MB_PARAM_GET_PF_RDMA_NONE
:
2406 *p_proto
= QED_PCI_ETH
;
2408 case FW_MB_PARAM_GET_PF_RDMA_ROCE
:
2409 *p_proto
= QED_PCI_ETH_ROCE
;
2411 case FW_MB_PARAM_GET_PF_RDMA_IWARP
:
2412 *p_proto
= QED_PCI_ETH_IWARP
;
2414 case FW_MB_PARAM_GET_PF_RDMA_BOTH
:
2415 *p_proto
= QED_PCI_ETH_RDMA
;
2419 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2426 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2427 (u32
) *p_proto
, resp
, param
);
2432 qed_mcp_get_shmem_proto(struct qed_hwfn
*p_hwfn
,
2433 struct public_func
*p_info
,
2434 struct qed_ptt
*p_ptt
,
2435 enum qed_pci_personality
*p_proto
)
2439 switch (p_info
->config
& FUNC_MF_CFG_PROTOCOL_MASK
) {
2440 case FUNC_MF_CFG_PROTOCOL_ETHERNET
:
2441 if (!IS_ENABLED(CONFIG_QED_RDMA
))
2442 *p_proto
= QED_PCI_ETH
;
2443 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn
, p_ptt
, p_proto
))
2444 qed_mcp_get_shmem_proto_legacy(p_hwfn
, p_proto
);
2446 case FUNC_MF_CFG_PROTOCOL_ISCSI
:
2447 *p_proto
= QED_PCI_ISCSI
;
2449 case FUNC_MF_CFG_PROTOCOL_FCOE
:
2450 *p_proto
= QED_PCI_FCOE
;
2452 case FUNC_MF_CFG_PROTOCOL_ROCE
:
2453 DP_NOTICE(p_hwfn
, "RoCE personality is not a valid value!\n");
2462 int qed_mcp_fill_shmem_func_info(struct qed_hwfn
*p_hwfn
,
2463 struct qed_ptt
*p_ptt
)
2465 struct qed_mcp_function_info
*info
;
2466 struct public_func shmem_info
;
2468 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
2469 info
= &p_hwfn
->mcp_info
->func_info
;
2471 info
->pause_on_host
= (shmem_info
.config
&
2472 FUNC_MF_CFG_PAUSE_ON_HOST_RING
) ? 1 : 0;
2474 if (qed_mcp_get_shmem_proto(p_hwfn
, &shmem_info
, p_ptt
,
2476 DP_ERR(p_hwfn
, "Unknown personality %08x\n",
2477 (u32
)(shmem_info
.config
& FUNC_MF_CFG_PROTOCOL_MASK
));
2481 qed_read_pf_bandwidth(p_hwfn
, &shmem_info
);
2483 if (shmem_info
.mac_upper
|| shmem_info
.mac_lower
) {
2484 info
->mac
[0] = (u8
)(shmem_info
.mac_upper
>> 8);
2485 info
->mac
[1] = (u8
)(shmem_info
.mac_upper
);
2486 info
->mac
[2] = (u8
)(shmem_info
.mac_lower
>> 24);
2487 info
->mac
[3] = (u8
)(shmem_info
.mac_lower
>> 16);
2488 info
->mac
[4] = (u8
)(shmem_info
.mac_lower
>> 8);
2489 info
->mac
[5] = (u8
)(shmem_info
.mac_lower
);
2491 /* Store primary MAC for later possible WoL */
2492 memcpy(&p_hwfn
->cdev
->wol_mac
, info
->mac
, ETH_ALEN
);
2494 DP_NOTICE(p_hwfn
, "MAC is 0 in shmem\n");
2497 info
->wwn_port
= (u64
)shmem_info
.fcoe_wwn_port_name_lower
|
2498 (((u64
)shmem_info
.fcoe_wwn_port_name_upper
) << 32);
2499 info
->wwn_node
= (u64
)shmem_info
.fcoe_wwn_node_name_lower
|
2500 (((u64
)shmem_info
.fcoe_wwn_node_name_upper
) << 32);
2502 info
->ovlan
= (u16
)(shmem_info
.ovlan_stag
& FUNC_MF_CFG_OV_STAG_MASK
);
2504 info
->mtu
= (u16
)shmem_info
.mtu_size
;
2506 p_hwfn
->hw_info
.b_wol_support
= QED_WOL_SUPPORT_NONE
;
2507 p_hwfn
->cdev
->wol_config
= (u8
)QED_OV_WOL_DEFAULT
;
2508 if (qed_mcp_is_init(p_hwfn
)) {
2509 u32 resp
= 0, param
= 0;
2512 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
2513 DRV_MSG_CODE_OS_WOL
, 0, &resp
, ¶m
);
2516 if (resp
== FW_MSG_CODE_OS_WOL_SUPPORTED
)
2517 p_hwfn
->hw_info
.b_wol_support
= QED_WOL_SUPPORT_PME
;
2520 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| NETIF_MSG_IFUP
),
2521 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n",
2522 info
->pause_on_host
, info
->protocol
,
2523 info
->bandwidth_min
, info
->bandwidth_max
,
2525 info
->wwn_port
, info
->wwn_node
,
2526 info
->ovlan
, (u8
)p_hwfn
->hw_info
.b_wol_support
);
2531 struct qed_mcp_link_params
2532 *qed_mcp_get_link_params(struct qed_hwfn
*p_hwfn
)
2534 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
2536 return &p_hwfn
->mcp_info
->link_input
;
2539 struct qed_mcp_link_state
2540 *qed_mcp_get_link_state(struct qed_hwfn
*p_hwfn
)
2542 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
2544 return &p_hwfn
->mcp_info
->link_output
;
2547 struct qed_mcp_link_capabilities
2548 *qed_mcp_get_link_capabilities(struct qed_hwfn
*p_hwfn
)
2550 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
2552 return &p_hwfn
->mcp_info
->link_capabilities
;
2555 int qed_mcp_drain(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2557 u32 resp
= 0, param
= 0;
2560 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
2561 DRV_MSG_CODE_NIG_DRAIN
, 1000, &resp
, ¶m
);
2563 /* Wait for the drain to complete before returning */
2569 int qed_mcp_get_flash_size(struct qed_hwfn
*p_hwfn
,
2570 struct qed_ptt
*p_ptt
, u32
*p_flash_size
)
2574 if (IS_VF(p_hwfn
->cdev
))
2577 flash_size
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_NVM_CFG4
);
2578 flash_size
= (flash_size
& MCP_REG_NVM_CFG4_FLASH_SIZE
) >>
2579 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT
;
2580 flash_size
= (1 << (flash_size
+ MCP_BYTES_PER_MBIT_SHIFT
));
2582 *p_flash_size
= flash_size
;
2587 int qed_start_recovery_process(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2589 struct qed_dev
*cdev
= p_hwfn
->cdev
;
2591 if (cdev
->recov_in_prog
) {
2593 "Avoid triggering a recovery since such a process is already in progress\n");
2597 DP_NOTICE(p_hwfn
, "Triggering a recovery process\n");
2598 qed_wr(p_hwfn
, p_ptt
, MISC_REG_AEU_GENERAL_ATTN_35
, 0x1);
2603 #define QED_RECOVERY_PROLOG_SLEEP_MS 100
2605 int qed_recovery_prolog(struct qed_dev
*cdev
)
2607 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2608 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
2611 /* Allow ongoing PCIe transactions to complete */
2612 msleep(QED_RECOVERY_PROLOG_SLEEP_MS
);
2614 /* Clear the PF's internal FID_enable in the PXP */
2615 rc
= qed_pglueb_set_pfid_enable(p_hwfn
, p_ptt
, false);
2618 "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2625 qed_mcp_config_vf_msix_bb(struct qed_hwfn
*p_hwfn
,
2626 struct qed_ptt
*p_ptt
, u8 vf_id
, u8 num
)
2628 u32 resp
= 0, param
= 0, rc_param
= 0;
2631 /* Only Leader can configure MSIX, and need to take CMT into account */
2632 if (!IS_LEAD_HWFN(p_hwfn
))
2634 num
*= p_hwfn
->cdev
->num_hwfns
;
2636 param
|= (vf_id
<< DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT
) &
2637 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK
;
2638 param
|= (num
<< DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT
) &
2639 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK
;
2641 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CFG_VF_MSIX
, param
,
2644 if (resp
!= FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE
) {
2645 DP_NOTICE(p_hwfn
, "VF[%d]: MFW failed to set MSI-X\n", vf_id
);
2648 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2649 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2657 qed_mcp_config_vf_msix_ah(struct qed_hwfn
*p_hwfn
,
2658 struct qed_ptt
*p_ptt
, u8 num
)
2660 u32 resp
= 0, param
= num
, rc_param
= 0;
2663 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CFG_PF_VFS_MSIX
,
2664 param
, &resp
, &rc_param
);
2666 if (resp
!= FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE
) {
2667 DP_NOTICE(p_hwfn
, "MFW failed to set MSI-X for VFs\n");
2670 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2671 "Requested 0x%02x MSI-x interrupts for VFs\n", num
);
2677 int qed_mcp_config_vf_msix(struct qed_hwfn
*p_hwfn
,
2678 struct qed_ptt
*p_ptt
, u8 vf_id
, u8 num
)
2680 if (QED_IS_BB(p_hwfn
->cdev
))
2681 return qed_mcp_config_vf_msix_bb(p_hwfn
, p_ptt
, vf_id
, num
);
2683 return qed_mcp_config_vf_msix_ah(p_hwfn
, p_ptt
, num
);
2687 qed_mcp_send_drv_version(struct qed_hwfn
*p_hwfn
,
2688 struct qed_ptt
*p_ptt
,
2689 struct qed_mcp_drv_version
*p_ver
)
2691 struct qed_mcp_mb_params mb_params
;
2692 struct drv_version_stc drv_version
;
2697 memset(&drv_version
, 0, sizeof(drv_version
));
2698 drv_version
.version
= p_ver
->version
;
2699 for (i
= 0; i
< (MCP_DRV_VER_STR_SIZE
- 4) / sizeof(u32
); i
++) {
2700 val
= cpu_to_be32(*((u32
*)&p_ver
->name
[i
* sizeof(u32
)]));
2701 *(__be32
*)&drv_version
.name
[i
* sizeof(u32
)] = val
;
2704 memset(&mb_params
, 0, sizeof(mb_params
));
2705 mb_params
.cmd
= DRV_MSG_CODE_SET_VERSION
;
2706 mb_params
.p_data_src
= &drv_version
;
2707 mb_params
.data_src_size
= sizeof(drv_version
);
2708 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
2710 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
2715 /* A maximal 100 msec waiting time for the MCP to halt */
2716 #define QED_MCP_HALT_SLEEP_MS 10
2717 #define QED_MCP_HALT_MAX_RETRIES 10
2719 int qed_mcp_halt(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2721 u32 resp
= 0, param
= 0, cpu_state
, cnt
= 0;
2724 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MCP_HALT
, 0, &resp
,
2727 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
2732 msleep(QED_MCP_HALT_SLEEP_MS
);
2733 cpu_state
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
);
2734 if (cpu_state
& MCP_REG_CPU_STATE_SOFT_HALTED
)
2736 } while (++cnt
< QED_MCP_HALT_MAX_RETRIES
);
2738 if (cnt
== QED_MCP_HALT_MAX_RETRIES
) {
2740 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2741 qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
), cpu_state
);
2745 qed_mcp_cmd_set_blocking(p_hwfn
, true);
2750 #define QED_MCP_RESUME_SLEEP_MS 10
2752 int qed_mcp_resume(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2754 u32 cpu_mode
, cpu_state
;
2756 qed_wr(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
, 0xffffffff);
2758 cpu_mode
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
);
2759 cpu_mode
&= ~MCP_REG_CPU_MODE_SOFT_HALT
;
2760 qed_wr(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
, cpu_mode
);
2761 msleep(QED_MCP_RESUME_SLEEP_MS
);
2762 cpu_state
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
);
2764 if (cpu_state
& MCP_REG_CPU_STATE_SOFT_HALTED
) {
2766 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2767 cpu_mode
, cpu_state
);
2771 qed_mcp_cmd_set_blocking(p_hwfn
, false);
2776 int qed_mcp_ov_update_current_config(struct qed_hwfn
*p_hwfn
,
2777 struct qed_ptt
*p_ptt
,
2778 enum qed_ov_client client
)
2780 u32 resp
= 0, param
= 0;
2785 case QED_OV_CLIENT_DRV
:
2786 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_OS
;
2788 case QED_OV_CLIENT_USER
:
2789 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_OTHER
;
2791 case QED_OV_CLIENT_VENDOR_SPEC
:
2792 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC
;
2795 DP_NOTICE(p_hwfn
, "Invalid client type %d\n", client
);
2799 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_CURR_CFG
,
2800 drv_mb_param
, &resp
, ¶m
);
2802 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
2807 int qed_mcp_ov_update_driver_state(struct qed_hwfn
*p_hwfn
,
2808 struct qed_ptt
*p_ptt
,
2809 enum qed_ov_driver_state drv_state
)
2811 u32 resp
= 0, param
= 0;
2815 switch (drv_state
) {
2816 case QED_OV_DRIVER_STATE_NOT_LOADED
:
2817 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED
;
2819 case QED_OV_DRIVER_STATE_DISABLED
:
2820 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED
;
2822 case QED_OV_DRIVER_STATE_ACTIVE
:
2823 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE
;
2826 DP_NOTICE(p_hwfn
, "Invalid driver state %d\n", drv_state
);
2830 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE
,
2831 drv_mb_param
, &resp
, ¶m
);
2833 DP_ERR(p_hwfn
, "Failed to send driver state\n");
2838 int qed_mcp_ov_update_mtu(struct qed_hwfn
*p_hwfn
,
2839 struct qed_ptt
*p_ptt
, u16 mtu
)
2841 u32 resp
= 0, param
= 0;
2845 drv_mb_param
= (u32
)mtu
<< DRV_MB_PARAM_OV_MTU_SIZE_SHIFT
;
2846 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_MTU
,
2847 drv_mb_param
, &resp
, ¶m
);
2849 DP_ERR(p_hwfn
, "Failed to send mtu value, rc = %d\n", rc
);
2854 int qed_mcp_ov_update_mac(struct qed_hwfn
*p_hwfn
,
2855 struct qed_ptt
*p_ptt
, u8
*mac
)
2857 struct qed_mcp_mb_params mb_params
;
2861 memset(&mb_params
, 0, sizeof(mb_params
));
2862 mb_params
.cmd
= DRV_MSG_CODE_SET_VMAC
;
2863 mb_params
.param
= DRV_MSG_CODE_VMAC_TYPE_MAC
<<
2864 DRV_MSG_CODE_VMAC_TYPE_SHIFT
;
2865 mb_params
.param
|= MCP_PF_ID(p_hwfn
);
2867 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2868 * in 32-bit granularity.
2869 * So the MAC has to be set in native order [and not byte order],
2870 * otherwise it would be read incorrectly by MFW after swap.
2872 mfw_mac
[0] = mac
[0] << 24 | mac
[1] << 16 | mac
[2] << 8 | mac
[3];
2873 mfw_mac
[1] = mac
[4] << 24 | mac
[5] << 16;
2875 mb_params
.p_data_src
= (u8
*)mfw_mac
;
2876 mb_params
.data_src_size
= 8;
2877 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
2879 DP_ERR(p_hwfn
, "Failed to send mac address, rc = %d\n", rc
);
2881 /* Store primary MAC for later possible WoL */
2882 memcpy(p_hwfn
->cdev
->wol_mac
, mac
, ETH_ALEN
);
2887 int qed_mcp_ov_update_wol(struct qed_hwfn
*p_hwfn
,
2888 struct qed_ptt
*p_ptt
, enum qed_ov_wol wol
)
2890 u32 resp
= 0, param
= 0;
2894 if (p_hwfn
->hw_info
.b_wol_support
== QED_WOL_SUPPORT_NONE
) {
2895 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
2896 "Can't change WoL configuration when WoL isn't supported\n");
2901 case QED_OV_WOL_DEFAULT
:
2902 drv_mb_param
= DRV_MB_PARAM_WOL_DEFAULT
;
2904 case QED_OV_WOL_DISABLED
:
2905 drv_mb_param
= DRV_MB_PARAM_WOL_DISABLED
;
2907 case QED_OV_WOL_ENABLED
:
2908 drv_mb_param
= DRV_MB_PARAM_WOL_ENABLED
;
2911 DP_ERR(p_hwfn
, "Invalid wol state %d\n", wol
);
2915 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_WOL
,
2916 drv_mb_param
, &resp
, ¶m
);
2918 DP_ERR(p_hwfn
, "Failed to send wol mode, rc = %d\n", rc
);
2920 /* Store the WoL update for a future unload */
2921 p_hwfn
->cdev
->wol_config
= (u8
)wol
;
2926 int qed_mcp_ov_update_eswitch(struct qed_hwfn
*p_hwfn
,
2927 struct qed_ptt
*p_ptt
,
2928 enum qed_ov_eswitch eswitch
)
2930 u32 resp
= 0, param
= 0;
2935 case QED_OV_ESWITCH_NONE
:
2936 drv_mb_param
= DRV_MB_PARAM_ESWITCH_MODE_NONE
;
2938 case QED_OV_ESWITCH_VEB
:
2939 drv_mb_param
= DRV_MB_PARAM_ESWITCH_MODE_VEB
;
2941 case QED_OV_ESWITCH_VEPA
:
2942 drv_mb_param
= DRV_MB_PARAM_ESWITCH_MODE_VEPA
;
2945 DP_ERR(p_hwfn
, "Invalid eswitch mode %d\n", eswitch
);
2949 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE
,
2950 drv_mb_param
, &resp
, ¶m
);
2952 DP_ERR(p_hwfn
, "Failed to send eswitch mode, rc = %d\n", rc
);
2957 int qed_mcp_set_led(struct qed_hwfn
*p_hwfn
,
2958 struct qed_ptt
*p_ptt
, enum qed_led_mode mode
)
2960 u32 resp
= 0, param
= 0, drv_mb_param
;
2964 case QED_LED_MODE_ON
:
2965 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_ON
;
2967 case QED_LED_MODE_OFF
:
2968 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OFF
;
2970 case QED_LED_MODE_RESTORE
:
2971 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OPER
;
2974 DP_NOTICE(p_hwfn
, "Invalid LED mode %d\n", mode
);
2978 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_SET_LED_MODE
,
2979 drv_mb_param
, &resp
, ¶m
);
2984 int qed_mcp_mask_parities(struct qed_hwfn
*p_hwfn
,
2985 struct qed_ptt
*p_ptt
, u32 mask_parities
)
2987 u32 resp
= 0, param
= 0;
2990 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MASK_PARITIES
,
2991 mask_parities
, &resp
, ¶m
);
2995 "MCP response failure for mask parities, aborting\n");
2996 } else if (resp
!= FW_MSG_CODE_OK
) {
2998 "MCP did not acknowledge mask parity request. Old MFW?\n");
3005 int qed_mcp_nvm_read(struct qed_dev
*cdev
, u32 addr
, u8
*p_buf
, u32 len
)
3007 u32 bytes_left
= len
, offset
= 0, bytes_to_copy
, read_len
= 0;
3008 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
3009 u32 resp
= 0, resp_param
= 0;
3010 struct qed_ptt
*p_ptt
;
3013 p_ptt
= qed_ptt_acquire(p_hwfn
);
3017 while (bytes_left
> 0) {
3018 bytes_to_copy
= min_t(u32
, bytes_left
, MCP_DRV_NVM_BUF_LEN
);
3020 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
3021 DRV_MSG_CODE_NVM_READ_NVRAM
,
3024 DRV_MB_PARAM_NVM_LEN_OFFSET
),
3027 (u32
*)(p_buf
+ offset
));
3029 if (rc
|| (resp
!= FW_MSG_CODE_NVM_OK
)) {
3030 DP_NOTICE(cdev
, "MCP command rc = %d\n", rc
);
3034 /* This can be a lengthy process, and it's possible scheduler
3035 * isn't preemptable. Sleep a bit to prevent CPU hogging.
3037 if (bytes_left
% 0x1000 <
3038 (bytes_left
- read_len
) % 0x1000)
3039 usleep_range(1000, 2000);
3042 bytes_left
-= read_len
;
3045 cdev
->mcp_nvm_resp
= resp
;
3046 qed_ptt_release(p_hwfn
, p_ptt
);
3051 int qed_mcp_nvm_resp(struct qed_dev
*cdev
, u8
*p_buf
)
3053 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
3054 struct qed_ptt
*p_ptt
;
3056 p_ptt
= qed_ptt_acquire(p_hwfn
);
3060 memcpy(p_buf
, &cdev
->mcp_nvm_resp
, sizeof(cdev
->mcp_nvm_resp
));
3061 qed_ptt_release(p_hwfn
, p_ptt
);
3066 int qed_mcp_nvm_write(struct qed_dev
*cdev
,
3067 u32 cmd
, u32 addr
, u8
*p_buf
, u32 len
)
3069 u32 buf_idx
= 0, buf_size
, nvm_cmd
, nvm_offset
, resp
= 0, param
;
3070 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
3071 struct qed_ptt
*p_ptt
;
3074 p_ptt
= qed_ptt_acquire(p_hwfn
);
3079 case QED_PUT_FILE_BEGIN
:
3080 nvm_cmd
= DRV_MSG_CODE_NVM_PUT_FILE_BEGIN
;
3082 case QED_PUT_FILE_DATA
:
3083 nvm_cmd
= DRV_MSG_CODE_NVM_PUT_FILE_DATA
;
3085 case QED_NVM_WRITE_NVRAM
:
3086 nvm_cmd
= DRV_MSG_CODE_NVM_WRITE_NVRAM
;
3089 DP_NOTICE(p_hwfn
, "Invalid nvm write command 0x%x\n", cmd
);
3094 buf_size
= min_t(u32
, (len
- buf_idx
), MCP_DRV_NVM_BUF_LEN
);
3095 while (buf_idx
< len
) {
3096 if (cmd
== QED_PUT_FILE_BEGIN
)
3099 nvm_offset
= ((buf_size
<<
3100 DRV_MB_PARAM_NVM_LEN_OFFSET
) | addr
) +
3102 rc
= qed_mcp_nvm_wr_cmd(p_hwfn
, p_ptt
, nvm_cmd
, nvm_offset
,
3103 &resp
, ¶m
, buf_size
,
3104 (u32
*)&p_buf
[buf_idx
]);
3106 DP_NOTICE(cdev
, "nvm write failed, rc = %d\n", rc
);
3107 resp
= FW_MSG_CODE_ERROR
;
3111 if (resp
!= FW_MSG_CODE_OK
&&
3112 resp
!= FW_MSG_CODE_NVM_OK
&&
3113 resp
!= FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK
) {
3115 "nvm write failed, resp = 0x%08x\n", resp
);
3120 /* This can be a lengthy process, and it's possible scheduler
3121 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
3123 if (buf_idx
% 0x1000 > (buf_idx
+ buf_size
) % 0x1000)
3124 usleep_range(1000, 2000);
3126 /* For MBI upgrade, MFW response includes the next buffer offset
3127 * to be delivered to MFW.
3129 if (param
&& cmd
== QED_PUT_FILE_DATA
) {
3130 buf_idx
= QED_MFW_GET_FIELD(param
,
3131 FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET
);
3132 buf_size
= QED_MFW_GET_FIELD(param
,
3133 FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE
);
3135 buf_idx
+= buf_size
;
3136 buf_size
= min_t(u32
, (len
- buf_idx
),
3137 MCP_DRV_NVM_BUF_LEN
);
3141 cdev
->mcp_nvm_resp
= resp
;
3143 qed_ptt_release(p_hwfn
, p_ptt
);
3148 int qed_mcp_phy_sfp_read(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
3149 u32 port
, u32 addr
, u32 offset
, u32 len
, u8
*p_buf
)
3151 u32 bytes_left
, bytes_to_copy
, buf_size
, nvm_offset
= 0;
3155 nvm_offset
|= (port
<< DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET
) &
3156 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK
;
3157 nvm_offset
|= (addr
<< DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET
) &
3158 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK
;
3163 while (bytes_left
> 0) {
3164 bytes_to_copy
= min_t(u32
, bytes_left
,
3165 MAX_I2C_TRANSACTION_SIZE
);
3166 nvm_offset
&= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK
|
3167 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK
);
3168 nvm_offset
|= ((addr
+ offset
) <<
3169 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET
) &
3170 DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK
;
3171 nvm_offset
|= (bytes_to_copy
<<
3172 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET
) &
3173 DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK
;
3174 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
3175 DRV_MSG_CODE_TRANSCEIVER_READ
,
3176 nvm_offset
, &resp
, ¶m
, &buf_size
,
3177 (u32
*)(p_buf
+ offset
));
3180 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3185 if (resp
== FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT
)
3187 else if (resp
!= FW_MSG_CODE_TRANSCEIVER_DIAG_OK
)
3191 bytes_left
-= buf_size
;
3197 int qed_mcp_bist_register_test(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3199 u32 drv_mb_param
= 0, rsp
, param
;
3202 drv_mb_param
= (DRV_MB_PARAM_BIST_REGISTER_TEST
<<
3203 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
3205 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
3206 drv_mb_param
, &rsp
, ¶m
);
3211 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
3212 (param
!= DRV_MB_PARAM_BIST_RC_PASSED
))
3218 int qed_mcp_bist_clock_test(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3220 u32 drv_mb_param
, rsp
, param
;
3223 drv_mb_param
= (DRV_MB_PARAM_BIST_CLOCK_TEST
<<
3224 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
3226 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
3227 drv_mb_param
, &rsp
, ¶m
);
3232 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
3233 (param
!= DRV_MB_PARAM_BIST_RC_PASSED
))
3239 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn
*p_hwfn
,
3240 struct qed_ptt
*p_ptt
,
3243 u32 drv_mb_param
= 0, rsp
;
3246 drv_mb_param
= (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES
<<
3247 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
3249 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
3250 drv_mb_param
, &rsp
, num_images
);
3254 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
))
3260 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn
*p_hwfn
,
3261 struct qed_ptt
*p_ptt
,
3262 struct bist_nvm_image_att
*p_image_att
,
3265 u32 buf_size
= 0, param
, resp
= 0, resp_param
= 0;
3268 param
= DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX
<<
3269 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
;
3270 param
|= image_index
<< DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT
;
3272 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
3273 DRV_MSG_CODE_BIST_TEST
, param
,
3276 (u32
*)p_image_att
);
3280 if (((resp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
3281 (p_image_att
->return_code
!= 1))
3287 int qed_mcp_nvm_info_populate(struct qed_hwfn
*p_hwfn
)
3289 struct qed_nvm_image_info nvm_info
;
3290 struct qed_ptt
*p_ptt
;
3294 if (p_hwfn
->nvm_info
.valid
)
3297 p_ptt
= qed_ptt_acquire(p_hwfn
);
3299 DP_ERR(p_hwfn
, "failed to acquire ptt\n");
3303 /* Acquire from MFW the amount of available images */
3304 nvm_info
.num_images
= 0;
3305 rc
= qed_mcp_bist_nvm_get_num_images(p_hwfn
,
3306 p_ptt
, &nvm_info
.num_images
);
3307 if (rc
== -EOPNOTSUPP
) {
3308 DP_INFO(p_hwfn
, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3310 } else if (rc
|| !nvm_info
.num_images
) {
3311 DP_ERR(p_hwfn
, "Failed getting number of images\n");
3315 nvm_info
.image_att
= kmalloc_array(nvm_info
.num_images
,
3316 sizeof(struct bist_nvm_image_att
),
3318 if (!nvm_info
.image_att
) {
3323 /* Iterate over images and get their attributes */
3324 for (i
= 0; i
< nvm_info
.num_images
; i
++) {
3325 rc
= qed_mcp_bist_nvm_get_image_att(p_hwfn
, p_ptt
,
3326 &nvm_info
.image_att
[i
], i
);
3329 "Failed getting image index %d attributes\n", i
);
3333 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "image index %d, size %x\n", i
,
3334 nvm_info
.image_att
[i
].len
);
3337 /* Update hwfn's nvm_info */
3338 if (nvm_info
.num_images
) {
3339 p_hwfn
->nvm_info
.num_images
= nvm_info
.num_images
;
3340 kfree(p_hwfn
->nvm_info
.image_att
);
3341 p_hwfn
->nvm_info
.image_att
= nvm_info
.image_att
;
3342 p_hwfn
->nvm_info
.valid
= true;
3345 qed_ptt_release(p_hwfn
, p_ptt
);
3349 kfree(nvm_info
.image_att
);
3351 qed_ptt_release(p_hwfn
, p_ptt
);
3355 void qed_mcp_nvm_info_free(struct qed_hwfn
*p_hwfn
)
3357 kfree(p_hwfn
->nvm_info
.image_att
);
3358 p_hwfn
->nvm_info
.image_att
= NULL
;
3359 p_hwfn
->nvm_info
.valid
= false;
3363 qed_mcp_get_nvm_image_att(struct qed_hwfn
*p_hwfn
,
3364 enum qed_nvm_images image_id
,
3365 struct qed_nvm_image_att
*p_image_att
)
3367 enum nvm_image_type type
;
3370 /* Translate image_id into MFW definitions */
3372 case QED_NVM_IMAGE_ISCSI_CFG
:
3373 type
= NVM_TYPE_ISCSI_CFG
;
3375 case QED_NVM_IMAGE_FCOE_CFG
:
3376 type
= NVM_TYPE_FCOE_CFG
;
3378 case QED_NVM_IMAGE_MDUMP
:
3379 type
= NVM_TYPE_MDUMP
;
3381 case QED_NVM_IMAGE_NVM_CFG1
:
3382 type
= NVM_TYPE_NVM_CFG1
;
3384 case QED_NVM_IMAGE_DEFAULT_CFG
:
3385 type
= NVM_TYPE_DEFAULT_CFG
;
3387 case QED_NVM_IMAGE_NVM_META
:
3388 type
= NVM_TYPE_META
;
3391 DP_NOTICE(p_hwfn
, "Unknown request of image_id %08x\n",
3396 qed_mcp_nvm_info_populate(p_hwfn
);
3397 for (i
= 0; i
< p_hwfn
->nvm_info
.num_images
; i
++)
3398 if (type
== p_hwfn
->nvm_info
.image_att
[i
].image_type
)
3400 if (i
== p_hwfn
->nvm_info
.num_images
) {
3401 DP_VERBOSE(p_hwfn
, QED_MSG_STORAGE
,
3402 "Failed to find nvram image of type %08x\n",
3407 p_image_att
->start_addr
= p_hwfn
->nvm_info
.image_att
[i
].nvm_start_addr
;
3408 p_image_att
->length
= p_hwfn
->nvm_info
.image_att
[i
].len
;
3413 int qed_mcp_get_nvm_image(struct qed_hwfn
*p_hwfn
,
3414 enum qed_nvm_images image_id
,
3415 u8
*p_buffer
, u32 buffer_len
)
3417 struct qed_nvm_image_att image_att
;
3420 memset(p_buffer
, 0, buffer_len
);
3422 rc
= qed_mcp_get_nvm_image_att(p_hwfn
, image_id
, &image_att
);
3426 /* Validate sizes - both the image's and the supplied buffer's */
3427 if (image_att
.length
<= 4) {
3428 DP_VERBOSE(p_hwfn
, QED_MSG_STORAGE
,
3429 "Image [%d] is too small - only %d bytes\n",
3430 image_id
, image_att
.length
);
3434 if (image_att
.length
> buffer_len
) {
3437 "Image [%d] is too big - %08x bytes where only %08x are available\n",
3438 image_id
, image_att
.length
, buffer_len
);
3442 return qed_mcp_nvm_read(p_hwfn
->cdev
, image_att
.start_addr
,
3443 p_buffer
, image_att
.length
);
3446 static enum resource_id_enum
qed_mcp_get_mfw_res_id(enum qed_resources res_id
)
3448 enum resource_id_enum mfw_res_id
= RESOURCE_NUM_INVALID
;
3452 mfw_res_id
= RESOURCE_NUM_SB_E
;
3455 mfw_res_id
= RESOURCE_NUM_L2_QUEUE_E
;
3458 mfw_res_id
= RESOURCE_NUM_VPORT_E
;
3461 mfw_res_id
= RESOURCE_NUM_RSS_ENGINES_E
;
3464 mfw_res_id
= RESOURCE_NUM_PQ_E
;
3467 mfw_res_id
= RESOURCE_NUM_RL_E
;
3471 /* Each VFC resource can accommodate both a MAC and a VLAN */
3472 mfw_res_id
= RESOURCE_VFC_FILTER_E
;
3475 mfw_res_id
= RESOURCE_ILT_E
;
3477 case QED_LL2_RAM_QUEUE
:
3478 mfw_res_id
= RESOURCE_LL2_QUEUE_E
;
3480 case QED_LL2_CTX_QUEUE
:
3481 mfw_res_id
= RESOURCE_LL2_CQS_E
;
3483 case QED_RDMA_CNQ_RAM
:
3485 /* CNQ/CMDQS are the same resource */
3486 mfw_res_id
= RESOURCE_CQS_E
;
3488 case QED_RDMA_STATS_QUEUE
:
3489 mfw_res_id
= RESOURCE_RDMA_STATS_QUEUE_E
;
3492 mfw_res_id
= RESOURCE_BDQ_E
;
3501 #define QED_RESC_ALLOC_VERSION_MAJOR 2
3502 #define QED_RESC_ALLOC_VERSION_MINOR 0
3503 #define QED_RESC_ALLOC_VERSION \
3504 ((QED_RESC_ALLOC_VERSION_MAJOR << \
3505 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3506 (QED_RESC_ALLOC_VERSION_MINOR << \
3507 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3509 struct qed_resc_alloc_in_params
{
3511 enum qed_resources res_id
;
3515 struct qed_resc_alloc_out_params
{
3526 qed_mcp_resc_allocation_msg(struct qed_hwfn
*p_hwfn
,
3527 struct qed_ptt
*p_ptt
,
3528 struct qed_resc_alloc_in_params
*p_in_params
,
3529 struct qed_resc_alloc_out_params
*p_out_params
)
3531 struct qed_mcp_mb_params mb_params
;
3532 struct resource_info mfw_resc_info
;
3535 memset(&mfw_resc_info
, 0, sizeof(mfw_resc_info
));
3537 mfw_resc_info
.res_id
= qed_mcp_get_mfw_res_id(p_in_params
->res_id
);
3538 if (mfw_resc_info
.res_id
== RESOURCE_NUM_INVALID
) {
3540 "Failed to match resource %d [%s] with the MFW resources\n",
3541 p_in_params
->res_id
,
3542 qed_hw_get_resc_name(p_in_params
->res_id
));
3546 switch (p_in_params
->cmd
) {
3547 case DRV_MSG_SET_RESOURCE_VALUE_MSG
:
3548 mfw_resc_info
.size
= p_in_params
->resc_max_val
;
3550 case DRV_MSG_GET_RESOURCE_ALLOC_MSG
:
3553 DP_ERR(p_hwfn
, "Unexpected resource alloc command [0x%08x]\n",
3558 memset(&mb_params
, 0, sizeof(mb_params
));
3559 mb_params
.cmd
= p_in_params
->cmd
;
3560 mb_params
.param
= QED_RESC_ALLOC_VERSION
;
3561 mb_params
.p_data_src
= &mfw_resc_info
;
3562 mb_params
.data_src_size
= sizeof(mfw_resc_info
);
3563 mb_params
.p_data_dst
= mb_params
.p_data_src
;
3564 mb_params
.data_dst_size
= mb_params
.data_src_size
;
3568 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3570 p_in_params
->res_id
,
3571 qed_hw_get_resc_name(p_in_params
->res_id
),
3572 QED_MFW_GET_FIELD(mb_params
.param
,
3573 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR
),
3574 QED_MFW_GET_FIELD(mb_params
.param
,
3575 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR
),
3576 p_in_params
->resc_max_val
);
3578 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
3582 p_out_params
->mcp_resp
= mb_params
.mcp_resp
;
3583 p_out_params
->mcp_param
= mb_params
.mcp_param
;
3584 p_out_params
->resc_num
= mfw_resc_info
.size
;
3585 p_out_params
->resc_start
= mfw_resc_info
.offset
;
3586 p_out_params
->vf_resc_num
= mfw_resc_info
.vf_size
;
3587 p_out_params
->vf_resc_start
= mfw_resc_info
.vf_offset
;
3588 p_out_params
->flags
= mfw_resc_info
.flags
;
3592 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3593 QED_MFW_GET_FIELD(p_out_params
->mcp_param
,
3594 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR
),
3595 QED_MFW_GET_FIELD(p_out_params
->mcp_param
,
3596 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR
),
3597 p_out_params
->resc_num
,
3598 p_out_params
->resc_start
,
3599 p_out_params
->vf_resc_num
,
3600 p_out_params
->vf_resc_start
, p_out_params
->flags
);
3606 qed_mcp_set_resc_max_val(struct qed_hwfn
*p_hwfn
,
3607 struct qed_ptt
*p_ptt
,
3608 enum qed_resources res_id
,
3609 u32 resc_max_val
, u32
*p_mcp_resp
)
3611 struct qed_resc_alloc_out_params out_params
;
3612 struct qed_resc_alloc_in_params in_params
;
3615 memset(&in_params
, 0, sizeof(in_params
));
3616 in_params
.cmd
= DRV_MSG_SET_RESOURCE_VALUE_MSG
;
3617 in_params
.res_id
= res_id
;
3618 in_params
.resc_max_val
= resc_max_val
;
3619 memset(&out_params
, 0, sizeof(out_params
));
3620 rc
= qed_mcp_resc_allocation_msg(p_hwfn
, p_ptt
, &in_params
,
3625 *p_mcp_resp
= out_params
.mcp_resp
;
3631 qed_mcp_get_resc_info(struct qed_hwfn
*p_hwfn
,
3632 struct qed_ptt
*p_ptt
,
3633 enum qed_resources res_id
,
3634 u32
*p_mcp_resp
, u32
*p_resc_num
, u32
*p_resc_start
)
3636 struct qed_resc_alloc_out_params out_params
;
3637 struct qed_resc_alloc_in_params in_params
;
3640 memset(&in_params
, 0, sizeof(in_params
));
3641 in_params
.cmd
= DRV_MSG_GET_RESOURCE_ALLOC_MSG
;
3642 in_params
.res_id
= res_id
;
3643 memset(&out_params
, 0, sizeof(out_params
));
3644 rc
= qed_mcp_resc_allocation_msg(p_hwfn
, p_ptt
, &in_params
,
3649 *p_mcp_resp
= out_params
.mcp_resp
;
3651 if (*p_mcp_resp
== FW_MSG_CODE_RESOURCE_ALLOC_OK
) {
3652 *p_resc_num
= out_params
.resc_num
;
3653 *p_resc_start
= out_params
.resc_start
;
3659 int qed_mcp_initiate_pf_flr(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3661 u32 mcp_resp
, mcp_param
;
3663 return qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_INITIATE_PF_FLR
, 0,
3664 &mcp_resp
, &mcp_param
);
3667 static int qed_mcp_resource_cmd(struct qed_hwfn
*p_hwfn
,
3668 struct qed_ptt
*p_ptt
,
3669 u32 param
, u32
*p_mcp_resp
, u32
*p_mcp_param
)
3673 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_RESOURCE_CMD
, param
,
3674 p_mcp_resp
, p_mcp_param
);
3678 if (*p_mcp_resp
== FW_MSG_CODE_UNSUPPORTED
) {
3680 "The resource command is unsupported by the MFW\n");
3684 if (*p_mcp_param
== RESOURCE_OPCODE_UNKNOWN_CMD
) {
3685 u8 opcode
= QED_MFW_GET_FIELD(param
, RESOURCE_CMD_REQ_OPCODE
);
3688 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3697 __qed_mcp_resc_lock(struct qed_hwfn
*p_hwfn
,
3698 struct qed_ptt
*p_ptt
,
3699 struct qed_resc_lock_params
*p_params
)
3701 u32 param
= 0, mcp_resp
, mcp_param
;
3705 switch (p_params
->timeout
) {
3706 case QED_MCP_RESC_LOCK_TO_DEFAULT
:
3707 opcode
= RESOURCE_OPCODE_REQ
;
3708 p_params
->timeout
= 0;
3710 case QED_MCP_RESC_LOCK_TO_NONE
:
3711 opcode
= RESOURCE_OPCODE_REQ_WO_AGING
;
3712 p_params
->timeout
= 0;
3715 opcode
= RESOURCE_OPCODE_REQ_W_AGING
;
3719 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_RESC
, p_params
->resource
);
3720 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_OPCODE
, opcode
);
3721 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_AGE
, p_params
->timeout
);
3725 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3726 param
, p_params
->timeout
, opcode
, p_params
->resource
);
3728 /* Attempt to acquire the resource */
3729 rc
= qed_mcp_resource_cmd(p_hwfn
, p_ptt
, param
, &mcp_resp
, &mcp_param
);
3733 /* Analyze the response */
3734 p_params
->owner
= QED_MFW_GET_FIELD(mcp_param
, RESOURCE_CMD_RSP_OWNER
);
3735 opcode
= QED_MFW_GET_FIELD(mcp_param
, RESOURCE_CMD_RSP_OPCODE
);
3739 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3740 mcp_param
, opcode
, p_params
->owner
);
3743 case RESOURCE_OPCODE_GNT
:
3744 p_params
->b_granted
= true;
3746 case RESOURCE_OPCODE_BUSY
:
3747 p_params
->b_granted
= false;
3751 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3760 qed_mcp_resc_lock(struct qed_hwfn
*p_hwfn
,
3761 struct qed_ptt
*p_ptt
, struct qed_resc_lock_params
*p_params
)
3767 /* No need for an interval before the first iteration */
3769 if (p_params
->sleep_b4_retry
) {
3770 u16 retry_interval_in_ms
=
3771 DIV_ROUND_UP(p_params
->retry_interval
,
3774 msleep(retry_interval_in_ms
);
3776 udelay(p_params
->retry_interval
);
3780 rc
= __qed_mcp_resc_lock(p_hwfn
, p_ptt
, p_params
);
3784 if (p_params
->b_granted
)
3786 } while (retry_cnt
++ < p_params
->retry_num
);
3792 qed_mcp_resc_unlock(struct qed_hwfn
*p_hwfn
,
3793 struct qed_ptt
*p_ptt
,
3794 struct qed_resc_unlock_params
*p_params
)
3796 u32 param
= 0, mcp_resp
, mcp_param
;
3800 opcode
= p_params
->b_force
? RESOURCE_OPCODE_FORCE_RELEASE
3801 : RESOURCE_OPCODE_RELEASE
;
3802 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_RESC
, p_params
->resource
);
3803 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_OPCODE
, opcode
);
3805 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
3806 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3807 param
, opcode
, p_params
->resource
);
3809 /* Attempt to release the resource */
3810 rc
= qed_mcp_resource_cmd(p_hwfn
, p_ptt
, param
, &mcp_resp
, &mcp_param
);
3814 /* Analyze the response */
3815 opcode
= QED_MFW_GET_FIELD(mcp_param
, RESOURCE_CMD_RSP_OPCODE
);
3817 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
3818 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3822 case RESOURCE_OPCODE_RELEASED_PREVIOUS
:
3824 "Resource unlock request for an already released resource [%d]\n",
3825 p_params
->resource
);
3827 case RESOURCE_OPCODE_RELEASED
:
3828 p_params
->b_released
= true;
3830 case RESOURCE_OPCODE_WRONG_OWNER
:
3831 p_params
->b_released
= false;
3835 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3843 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params
*p_lock
,
3844 struct qed_resc_unlock_params
*p_unlock
,
3846 resource
, bool b_is_permanent
)
3849 memset(p_lock
, 0, sizeof(*p_lock
));
3851 /* Permanent resources don't require aging, and there's no
3852 * point in trying to acquire them more than once since it's
3853 * unexpected another entity would release them.
3855 if (b_is_permanent
) {
3856 p_lock
->timeout
= QED_MCP_RESC_LOCK_TO_NONE
;
3858 p_lock
->retry_num
= QED_MCP_RESC_LOCK_RETRY_CNT_DFLT
;
3859 p_lock
->retry_interval
=
3860 QED_MCP_RESC_LOCK_RETRY_VAL_DFLT
;
3861 p_lock
->sleep_b4_retry
= true;
3864 p_lock
->resource
= resource
;
3868 memset(p_unlock
, 0, sizeof(*p_unlock
));
3869 p_unlock
->resource
= resource
;
3873 bool qed_mcp_is_smart_an_supported(struct qed_hwfn
*p_hwfn
)
3875 return !!(p_hwfn
->mcp_info
->capabilities
&
3876 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ
);
3879 int qed_mcp_get_capabilities(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3884 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT
,
3885 0, &mcp_resp
, &p_hwfn
->mcp_info
->capabilities
);
3887 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| NETIF_MSG_PROBE
),
3888 "MFW supported features: %08x\n",
3889 p_hwfn
->mcp_info
->capabilities
);
3894 int qed_mcp_set_capabilities(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3896 u32 mcp_resp
, mcp_param
, features
;
3898 features
= DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE
|
3899 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK
|
3900 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL
;
3902 if (QED_IS_E5(p_hwfn
->cdev
))
3904 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EXT_SPEED_FEC_CONTROL
;
3906 return qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_FEATURE_SUPPORT
,
3907 features
, &mcp_resp
, &mcp_param
);
3910 int qed_mcp_get_engine_config(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3912 struct qed_mcp_mb_params mb_params
= {0};
3913 struct qed_dev
*cdev
= p_hwfn
->cdev
;
3914 u8 fir_valid
, l2_valid
;
3917 mb_params
.cmd
= DRV_MSG_CODE_GET_ENGINE_CONFIG
;
3918 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
3922 if (mb_params
.mcp_resp
== FW_MSG_CODE_UNSUPPORTED
) {
3924 "The get_engine_config command is unsupported by the MFW\n");
3928 fir_valid
= QED_MFW_GET_FIELD(mb_params
.mcp_param
,
3929 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID
);
3932 QED_MFW_GET_FIELD(mb_params
.mcp_param
,
3933 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE
);
3935 l2_valid
= QED_MFW_GET_FIELD(mb_params
.mcp_param
,
3936 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID
);
3938 cdev
->l2_affin_hint
=
3939 QED_MFW_GET_FIELD(mb_params
.mcp_param
,
3940 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE
);
3943 "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
3944 fir_valid
, cdev
->fir_affin
, l2_valid
, cdev
->l2_affin_hint
);
3949 int qed_mcp_get_ppfid_bitmap(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3951 struct qed_mcp_mb_params mb_params
= {0};
3952 struct qed_dev
*cdev
= p_hwfn
->cdev
;
3955 mb_params
.cmd
= DRV_MSG_CODE_GET_PPFID_BITMAP
;
3956 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
3960 if (mb_params
.mcp_resp
== FW_MSG_CODE_UNSUPPORTED
) {
3962 "The get_ppfid_bitmap command is unsupported by the MFW\n");
3966 cdev
->ppfid_bitmap
= QED_MFW_GET_FIELD(mb_params
.mcp_param
,
3967 FW_MB_PARAM_PPFID_BITMAP
);
3969 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "PPFID bitmap 0x%hhx\n",
3970 cdev
->ppfid_bitmap
);
3975 int qed_mcp_nvm_get_cfg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
3976 u16 option_id
, u8 entity_id
, u16 flags
, u8
*p_buf
,
3979 u32 mb_param
= 0, resp
, param
;
3982 QED_MFW_SET_FIELD(mb_param
, DRV_MB_PARAM_NVM_CFG_OPTION_ID
, option_id
);
3983 if (flags
& QED_NVM_CFG_OPTION_INIT
)
3984 QED_MFW_SET_FIELD(mb_param
,
3985 DRV_MB_PARAM_NVM_CFG_OPTION_INIT
, 1);
3986 if (flags
& QED_NVM_CFG_OPTION_FREE
)
3987 QED_MFW_SET_FIELD(mb_param
,
3988 DRV_MB_PARAM_NVM_CFG_OPTION_FREE
, 1);
3989 if (flags
& QED_NVM_CFG_OPTION_ENTITY_SEL
) {
3990 QED_MFW_SET_FIELD(mb_param
,
3991 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL
, 1);
3992 QED_MFW_SET_FIELD(mb_param
,
3993 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID
,
3997 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
3998 DRV_MSG_CODE_GET_NVM_CFG_OPTION
,
3999 mb_param
, &resp
, ¶m
, p_len
, (u32
*)p_buf
);
4004 int qed_mcp_nvm_set_cfg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
4005 u16 option_id
, u8 entity_id
, u16 flags
, u8
*p_buf
,
4008 u32 mb_param
= 0, resp
, param
;
4010 QED_MFW_SET_FIELD(mb_param
, DRV_MB_PARAM_NVM_CFG_OPTION_ID
, option_id
);
4011 if (flags
& QED_NVM_CFG_OPTION_ALL
)
4012 QED_MFW_SET_FIELD(mb_param
,
4013 DRV_MB_PARAM_NVM_CFG_OPTION_ALL
, 1);
4014 if (flags
& QED_NVM_CFG_OPTION_INIT
)
4015 QED_MFW_SET_FIELD(mb_param
,
4016 DRV_MB_PARAM_NVM_CFG_OPTION_INIT
, 1);
4017 if (flags
& QED_NVM_CFG_OPTION_COMMIT
)
4018 QED_MFW_SET_FIELD(mb_param
,
4019 DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT
, 1);
4020 if (flags
& QED_NVM_CFG_OPTION_FREE
)
4021 QED_MFW_SET_FIELD(mb_param
,
4022 DRV_MB_PARAM_NVM_CFG_OPTION_FREE
, 1);
4023 if (flags
& QED_NVM_CFG_OPTION_ENTITY_SEL
) {
4024 QED_MFW_SET_FIELD(mb_param
,
4025 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL
, 1);
4026 QED_MFW_SET_FIELD(mb_param
,
4027 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID
,
4031 return qed_mcp_nvm_wr_cmd(p_hwfn
, p_ptt
,
4032 DRV_MSG_CODE_SET_NVM_CFG_OPTION
,
4033 mb_param
, &resp
, ¶m
, len
, (u32
*)p_buf
);
4036 #define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN
4037 #define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32)
4038 #define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
4039 (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
4042 __qed_mcp_send_debug_data(struct qed_hwfn
*p_hwfn
,
4043 struct qed_ptt
*p_ptt
, u8
*p_buf
, u8 size
)
4045 struct qed_mcp_mb_params mb_params
;
4048 if (size
> QED_MCP_DBG_DATA_MAX_SIZE
) {
4050 "Debug data size is %d while it should not exceed %d\n",
4051 size
, QED_MCP_DBG_DATA_MAX_SIZE
);
4055 memset(&mb_params
, 0, sizeof(mb_params
));
4056 mb_params
.cmd
= DRV_MSG_CODE_DEBUG_DATA_SEND
;
4057 SET_MFW_FIELD(mb_params
.param
, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE
, size
);
4058 mb_params
.p_data_src
= p_buf
;
4059 mb_params
.data_src_size
= size
;
4060 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
4064 if (mb_params
.mcp_resp
== FW_MSG_CODE_UNSUPPORTED
) {
4066 "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
4068 } else if (mb_params
.mcp_resp
== (u32
)FW_MSG_CODE_DEBUG_NOT_ENABLED
) {
4069 DP_INFO(p_hwfn
, "The DEBUG_DATA_SEND command is not enabled\n");
4071 } else if (mb_params
.mcp_resp
!= (u32
)FW_MSG_CODE_DEBUG_DATA_SEND_OK
) {
4073 "Failed to send debug data to the MFW [resp 0x%08x]\n",
4074 mb_params
.mcp_resp
);
4081 enum qed_mcp_dbg_data_type
{
4082 QED_MCP_DBG_DATA_TYPE_RAW
,
4085 /* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
4086 #define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0
4087 #define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff
4088 #define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12
4089 #define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000
4090 #define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20
4091 #define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
4092 #define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28
4093 #define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000
4095 #define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1
4096 #define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
4099 qed_mcp_send_debug_data(struct qed_hwfn
*p_hwfn
,
4100 struct qed_ptt
*p_ptt
,
4101 enum qed_mcp_dbg_data_type type
, u8
*p_buf
, u32 size
)
4103 u8 raw_data
[QED_MCP_DBG_DATA_MAX_SIZE
], *p_tmp_buf
= p_buf
;
4104 u32 tmp_size
= size
, *p_header
, *p_payload
;
4109 p_header
= (u32
*)raw_data
;
4110 p_payload
= (u32
*)(raw_data
+ QED_MCP_DBG_DATA_MAX_HEADER_SIZE
);
4112 seq
= (u16
)atomic_inc_return(&p_hwfn
->mcp_info
->dbg_data_seq
);
4114 /* First chunk is marked as 'first' */
4115 flags
|= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST
;
4118 SET_MFW_FIELD(*p_header
, QED_MCP_DBG_DATA_HDR_SN
, seq
);
4119 SET_MFW_FIELD(*p_header
, QED_MCP_DBG_DATA_HDR_TYPE
, type
);
4120 SET_MFW_FIELD(*p_header
, QED_MCP_DBG_DATA_HDR_FLAGS
, flags
);
4121 SET_MFW_FIELD(*p_header
, QED_MCP_DBG_DATA_HDR_PF
, p_hwfn
->abs_pf_id
);
4123 while (tmp_size
> QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE
) {
4124 memcpy(p_payload
, p_tmp_buf
, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE
);
4125 rc
= __qed_mcp_send_debug_data(p_hwfn
, p_ptt
, raw_data
,
4126 QED_MCP_DBG_DATA_MAX_SIZE
);
4130 /* Clear the 'first' marking after sending the first chunk */
4131 if (p_tmp_buf
== p_buf
) {
4132 flags
&= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST
;
4133 SET_MFW_FIELD(*p_header
, QED_MCP_DBG_DATA_HDR_FLAGS
,
4137 p_tmp_buf
+= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE
;
4138 tmp_size
-= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE
;
4141 /* Last chunk is marked as 'last' */
4142 flags
|= QED_MCP_DBG_DATA_HDR_FLAGS_LAST
;
4143 SET_MFW_FIELD(*p_header
, QED_MCP_DBG_DATA_HDR_FLAGS
, flags
);
4144 memcpy(p_payload
, p_tmp_buf
, tmp_size
);
4146 /* Casting the left size to u8 is ok since at this point it is <= 32 */
4147 return __qed_mcp_send_debug_data(p_hwfn
, p_ptt
, raw_data
,
4148 (u8
)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE
+
4153 qed_mcp_send_raw_debug_data(struct qed_hwfn
*p_hwfn
,
4154 struct qed_ptt
*p_ptt
, u8
*p_buf
, u32 size
)
4156 return qed_mcp_send_debug_data(p_hwfn
, p_ptt
,
4157 QED_MCP_DBG_DATA_TYPE_RAW
, p_buf
, size
);