1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <asm/byteorder.h>
9 #include <linux/delay.h>
10 #include <linux/errno.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/spinlock.h>
14 #include <linux/string.h>
15 #include <linux/etherdevice.h>
20 #include "qed_mfw_hsi.h"
23 #include "qed_reg_addr.h"
24 #include "qed_sriov.h"
26 #define GRCBASE_MCP 0xe00000
28 #define QED_MCP_RESP_ITER_US 10
30 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
31 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
33 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
34 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)), \
37 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
38 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + (_offset)))
40 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
41 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
42 offsetof(struct public_drv_mb, _field), _val)
44 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
45 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
46 offsetof(struct public_drv_mb, _field))
48 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
49 DRV_ID_PDA_COMP_VER_SHIFT)
51 #define MCP_BYTES_PER_MBIT_SHIFT 17
53 bool qed_mcp_is_init(struct qed_hwfn
*p_hwfn
)
55 if (!p_hwfn
->mcp_info
|| !p_hwfn
->mcp_info
->public_base
)
60 void qed_mcp_cmd_port_init(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
62 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
64 u32 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
66 p_hwfn
->mcp_info
->port_addr
= SECTION_ADDR(mfw_mb_offsize
,
68 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
69 "port_addr = 0x%x, port_id 0x%02x\n",
70 p_hwfn
->mcp_info
->port_addr
, MFW_PORT(p_hwfn
));
73 void qed_mcp_read_mb(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
75 u32 length
= MFW_DRV_MSG_MAX_DWORDS(p_hwfn
->mcp_info
->mfw_mb_length
);
78 if (!p_hwfn
->mcp_info
->public_base
)
81 for (i
= 0; i
< length
; i
++) {
82 tmp
= qed_rd(p_hwfn
, p_ptt
,
83 p_hwfn
->mcp_info
->mfw_mb_addr
+
84 (i
<< 2) + sizeof(u32
));
86 /* The MB data is actually BE; Need to force it to cpu */
87 ((u32
*)p_hwfn
->mcp_info
->mfw_mb_cur
)[i
] =
88 be32_to_cpu((__force __be32
)tmp
);
92 struct qed_mcp_cmd_elem
{
93 struct list_head list
;
94 struct qed_mcp_mb_params
*p_mb_params
;
99 /* Must be called while cmd_lock is acquired */
100 static struct qed_mcp_cmd_elem
*
101 qed_mcp_cmd_add_elem(struct qed_hwfn
*p_hwfn
,
102 struct qed_mcp_mb_params
*p_mb_params
,
103 u16 expected_seq_num
)
105 struct qed_mcp_cmd_elem
*p_cmd_elem
= NULL
;
107 p_cmd_elem
= kzalloc(sizeof(*p_cmd_elem
), GFP_ATOMIC
);
111 p_cmd_elem
->p_mb_params
= p_mb_params
;
112 p_cmd_elem
->expected_seq_num
= expected_seq_num
;
113 list_add(&p_cmd_elem
->list
, &p_hwfn
->mcp_info
->cmd_list
);
118 /* Must be called while cmd_lock is acquired */
119 static void qed_mcp_cmd_del_elem(struct qed_hwfn
*p_hwfn
,
120 struct qed_mcp_cmd_elem
*p_cmd_elem
)
122 list_del(&p_cmd_elem
->list
);
126 /* Must be called while cmd_lock is acquired */
127 static struct qed_mcp_cmd_elem
*qed_mcp_cmd_get_elem(struct qed_hwfn
*p_hwfn
,
130 struct qed_mcp_cmd_elem
*p_cmd_elem
= NULL
;
132 list_for_each_entry(p_cmd_elem
, &p_hwfn
->mcp_info
->cmd_list
, list
) {
133 if (p_cmd_elem
->expected_seq_num
== seq_num
)
140 int qed_mcp_free(struct qed_hwfn
*p_hwfn
)
142 if (p_hwfn
->mcp_info
) {
143 struct qed_mcp_cmd_elem
*p_cmd_elem
= NULL
, *p_tmp
;
145 kfree(p_hwfn
->mcp_info
->mfw_mb_cur
);
146 kfree(p_hwfn
->mcp_info
->mfw_mb_shadow
);
148 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
149 list_for_each_entry_safe(p_cmd_elem
,
151 &p_hwfn
->mcp_info
->cmd_list
, list
) {
152 qed_mcp_cmd_del_elem(p_hwfn
, p_cmd_elem
);
154 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
157 kfree(p_hwfn
->mcp_info
);
158 p_hwfn
->mcp_info
= NULL
;
163 /* Maximum of 1 sec to wait for the SHMEM ready indication */
164 #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
165 #define QED_MCP_SHMEM_RDY_ITER_MS 50
167 static int qed_load_mcp_offsets(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
169 struct qed_mcp_info
*p_info
= p_hwfn
->mcp_info
;
170 u8 cnt
= QED_MCP_SHMEM_RDY_MAX_RETRIES
;
171 u8 msec
= QED_MCP_SHMEM_RDY_ITER_MS
;
172 u32 drv_mb_offsize
, mfw_mb_offsize
;
173 u32 mcp_pf_id
= MCP_PF_ID(p_hwfn
);
175 p_info
->public_base
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_SHARED_MEM_ADDR
);
176 if (!p_info
->public_base
) {
178 "The address of the MCP scratch-pad is not configured\n");
182 p_info
->public_base
|= GRCBASE_MCP
;
184 /* Get the MFW MB address and number of supported messages */
185 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
186 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
188 p_info
->mfw_mb_addr
= SECTION_ADDR(mfw_mb_offsize
, mcp_pf_id
);
189 p_info
->mfw_mb_length
= (u16
)qed_rd(p_hwfn
, p_ptt
,
190 p_info
->mfw_mb_addr
+
191 offsetof(struct public_mfw_mb
,
194 /* The driver can notify that there was an MCP reset, and might read the
195 * SHMEM values before the MFW has completed initializing them.
196 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
197 * data ready indication.
199 while (!p_info
->mfw_mb_length
&& --cnt
) {
201 p_info
->mfw_mb_length
=
202 (u16
)qed_rd(p_hwfn
, p_ptt
,
203 p_info
->mfw_mb_addr
+
204 offsetof(struct public_mfw_mb
, sup_msgs
));
209 "Failed to get the SHMEM ready notification after %d msec\n",
210 QED_MCP_SHMEM_RDY_MAX_RETRIES
* msec
);
214 /* Calculate the driver and MFW mailbox address */
215 drv_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
216 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
218 p_info
->drv_mb_addr
= SECTION_ADDR(drv_mb_offsize
, mcp_pf_id
);
219 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
220 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
221 drv_mb_offsize
, p_info
->drv_mb_addr
, mcp_pf_id
);
223 /* Get the current driver mailbox sequence before sending
226 p_info
->drv_mb_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_mb_header
) &
227 DRV_MSG_SEQ_NUMBER_MASK
;
229 /* Get current FW pulse sequence */
230 p_info
->drv_pulse_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_pulse_mb
) &
233 p_info
->mcp_hist
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
238 int qed_mcp_cmd_init(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
240 struct qed_mcp_info
*p_info
;
243 /* Allocate mcp_info structure */
244 p_hwfn
->mcp_info
= kzalloc(sizeof(*p_hwfn
->mcp_info
), GFP_KERNEL
);
245 if (!p_hwfn
->mcp_info
)
247 p_info
= p_hwfn
->mcp_info
;
249 /* Initialize the MFW spinlock */
250 spin_lock_init(&p_info
->cmd_lock
);
251 spin_lock_init(&p_info
->link_lock
);
252 spin_lock_init(&p_info
->unload_lock
);
254 INIT_LIST_HEAD(&p_info
->cmd_list
);
256 if (qed_load_mcp_offsets(p_hwfn
, p_ptt
) != 0) {
257 DP_NOTICE(p_hwfn
, "MCP is not initialized\n");
258 /* Do not free mcp_info here, since public_base indicate that
259 * the MCP is not initialized
264 size
= MFW_DRV_MSG_MAX_DWORDS(p_info
->mfw_mb_length
) * sizeof(u32
);
265 p_info
->mfw_mb_cur
= kzalloc(size
, GFP_KERNEL
);
266 p_info
->mfw_mb_shadow
= kzalloc(size
, GFP_KERNEL
);
267 if (!p_info
->mfw_mb_cur
|| !p_info
->mfw_mb_shadow
)
273 qed_mcp_free(p_hwfn
);
277 static void qed_mcp_reread_offsets(struct qed_hwfn
*p_hwfn
,
278 struct qed_ptt
*p_ptt
)
280 u32 generic_por_0
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
282 /* Use MCP history register to check if MCP reset occurred between init
285 if (p_hwfn
->mcp_info
->mcp_hist
!= generic_por_0
) {
288 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
289 p_hwfn
->mcp_info
->mcp_hist
, generic_por_0
);
291 qed_load_mcp_offsets(p_hwfn
, p_ptt
);
292 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
296 int qed_mcp_reset(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
298 u32 org_mcp_reset_seq
, seq
, delay
= QED_MCP_RESP_ITER_US
, cnt
= 0;
301 if (p_hwfn
->mcp_info
->b_block_cmd
) {
303 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
307 /* Ensure that only a single thread is accessing the mailbox */
308 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
310 org_mcp_reset_seq
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
312 /* Set drv command along with the updated sequence */
313 qed_mcp_reread_offsets(p_hwfn
, p_ptt
);
314 seq
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
315 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (DRV_MSG_CODE_MCP_RESET
| seq
));
318 /* Wait for MFW response */
320 /* Give the FW up to 500 second (50*1000*10usec) */
321 } while ((org_mcp_reset_seq
== qed_rd(p_hwfn
, p_ptt
,
322 MISCS_REG_GENERIC_POR_0
)) &&
323 (cnt
++ < QED_MCP_RESET_RETRIES
));
325 if (org_mcp_reset_seq
!=
326 qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
)) {
327 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
328 "MCP was reset after %d usec\n", cnt
* delay
);
330 DP_ERR(p_hwfn
, "Failed to reset MCP\n");
334 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
339 /* Must be called while cmd_lock is acquired */
340 static bool qed_mcp_has_pending_cmd(struct qed_hwfn
*p_hwfn
)
342 struct qed_mcp_cmd_elem
*p_cmd_elem
;
344 /* There is at most one pending command at a certain time, and if it
345 * exists - it is placed at the HEAD of the list.
347 if (!list_empty(&p_hwfn
->mcp_info
->cmd_list
)) {
348 p_cmd_elem
= list_first_entry(&p_hwfn
->mcp_info
->cmd_list
,
349 struct qed_mcp_cmd_elem
, list
);
350 return !p_cmd_elem
->b_is_completed
;
356 /* Must be called while cmd_lock is acquired */
358 qed_mcp_update_pending_cmd(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
360 struct qed_mcp_mb_params
*p_mb_params
;
361 struct qed_mcp_cmd_elem
*p_cmd_elem
;
365 mcp_resp
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_header
);
366 seq_num
= (u16
)(mcp_resp
& FW_MSG_SEQ_NUMBER_MASK
);
368 /* Return if no new non-handled response has been received */
369 if (seq_num
!= p_hwfn
->mcp_info
->drv_mb_seq
)
372 p_cmd_elem
= qed_mcp_cmd_get_elem(p_hwfn
, seq_num
);
375 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
380 p_mb_params
= p_cmd_elem
->p_mb_params
;
382 /* Get the MFW response along with the sequence number */
383 p_mb_params
->mcp_resp
= mcp_resp
;
385 /* Get the MFW param */
386 p_mb_params
->mcp_param
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_param
);
388 /* Get the union data */
389 if (p_mb_params
->p_data_dst
&& p_mb_params
->data_dst_size
) {
390 u32 union_data_addr
= p_hwfn
->mcp_info
->drv_mb_addr
+
391 offsetof(struct public_drv_mb
,
393 qed_memcpy_from(p_hwfn
, p_ptt
, p_mb_params
->p_data_dst
,
394 union_data_addr
, p_mb_params
->data_dst_size
);
397 p_cmd_elem
->b_is_completed
= true;
402 /* Must be called while cmd_lock is acquired */
403 static void __qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
404 struct qed_ptt
*p_ptt
,
405 struct qed_mcp_mb_params
*p_mb_params
,
408 union drv_union_data union_data
;
411 /* Set the union data */
412 union_data_addr
= p_hwfn
->mcp_info
->drv_mb_addr
+
413 offsetof(struct public_drv_mb
, union_data
);
414 memset(&union_data
, 0, sizeof(union_data
));
415 if (p_mb_params
->p_data_src
&& p_mb_params
->data_src_size
)
416 memcpy(&union_data
, p_mb_params
->p_data_src
,
417 p_mb_params
->data_src_size
);
418 qed_memcpy_to(p_hwfn
, p_ptt
, union_data_addr
, &union_data
,
421 /* Set the drv param */
422 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_param
, p_mb_params
->param
);
424 /* Set the drv command along with the sequence number */
425 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (p_mb_params
->cmd
| seq_num
));
427 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
428 "MFW mailbox: command 0x%08x param 0x%08x\n",
429 (p_mb_params
->cmd
| seq_num
), p_mb_params
->param
);
432 static void qed_mcp_cmd_set_blocking(struct qed_hwfn
*p_hwfn
, bool block_cmd
)
434 p_hwfn
->mcp_info
->b_block_cmd
= block_cmd
;
436 DP_INFO(p_hwfn
, "%s sending of mailbox commands to the MFW\n",
437 block_cmd
? "Block" : "Unblock");
440 static void qed_mcp_print_cpu_info(struct qed_hwfn
*p_hwfn
,
441 struct qed_ptt
*p_ptt
)
443 u32 cpu_mode
, cpu_state
, cpu_pc_0
, cpu_pc_1
, cpu_pc_2
;
444 u32 delay
= QED_MCP_RESP_ITER_US
;
446 cpu_mode
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
);
447 cpu_state
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
);
448 cpu_pc_0
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_PROGRAM_COUNTER
);
450 cpu_pc_1
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_PROGRAM_COUNTER
);
452 cpu_pc_2
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_PROGRAM_COUNTER
);
455 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
456 cpu_mode
, cpu_state
, cpu_pc_0
, cpu_pc_1
, cpu_pc_2
);
460 _qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
461 struct qed_ptt
*p_ptt
,
462 struct qed_mcp_mb_params
*p_mb_params
)
464 struct qed_mcp_cmd_elem
*p_cmd_elem
;
469 /* Wait until the mailbox is non-occupied */
471 /* Exit the loop if there is no pending command, or if the
472 * pending command is completed during this iteration.
473 * The spinlock stays locked until the command is sent.
476 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
478 if (!qed_mcp_has_pending_cmd(p_hwfn
))
481 rc
= qed_mcp_update_pending_cmd(p_hwfn
, p_ptt
);
484 else if (rc
!= -EAGAIN
)
487 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
489 if (QED_MB_FLAGS_IS_SET(p_mb_params
, CAN_SLEEP
))
490 usleep_range(QED_MCP_RESP_ITER_US
,
491 QED_MCP_RESP_ITER_US
* 2);
493 udelay(QED_MCP_RESP_ITER_US
);
494 } while (++cnt
< QED_DRV_MB_MAX_RETRIES
);
496 if (cnt
>= QED_DRV_MB_MAX_RETRIES
) {
498 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
499 p_mb_params
->cmd
, p_mb_params
->param
);
503 /* Send the mailbox command */
504 qed_mcp_reread_offsets(p_hwfn
, p_ptt
);
505 seq_num
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
506 p_cmd_elem
= qed_mcp_cmd_add_elem(p_hwfn
, p_mb_params
, seq_num
);
512 __qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, p_mb_params
, seq_num
);
513 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
515 /* Wait for the MFW response */
517 /* Exit the loop if the command is already completed, or if the
518 * command is completed during this iteration.
519 * The spinlock stays locked until the list element is removed.
522 if (QED_MB_FLAGS_IS_SET(p_mb_params
, CAN_SLEEP
))
523 usleep_range(QED_MCP_RESP_ITER_US
,
524 QED_MCP_RESP_ITER_US
* 2);
526 udelay(QED_MCP_RESP_ITER_US
);
528 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
530 if (p_cmd_elem
->b_is_completed
)
533 rc
= qed_mcp_update_pending_cmd(p_hwfn
, p_ptt
);
536 else if (rc
!= -EAGAIN
)
539 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
540 } while (++cnt
< QED_DRV_MB_MAX_RETRIES
);
542 if (cnt
>= QED_DRV_MB_MAX_RETRIES
) {
544 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
545 p_mb_params
->cmd
, p_mb_params
->param
);
546 qed_mcp_print_cpu_info(p_hwfn
, p_ptt
);
548 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
549 qed_mcp_cmd_del_elem(p_hwfn
, p_cmd_elem
);
550 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
552 if (!QED_MB_FLAGS_IS_SET(p_mb_params
, AVOID_BLOCK
))
553 qed_mcp_cmd_set_blocking(p_hwfn
, true);
555 qed_hw_err_notify(p_hwfn
, p_ptt
,
556 QED_HW_ERR_MFW_RESP_FAIL
, NULL
);
560 qed_mcp_cmd_del_elem(p_hwfn
, p_cmd_elem
);
561 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
565 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
566 p_mb_params
->mcp_resp
,
567 p_mb_params
->mcp_param
,
568 (cnt
* QED_MCP_RESP_ITER_US
) / 1000,
569 (cnt
* QED_MCP_RESP_ITER_US
) % 1000);
571 /* Clear the sequence number from the MFW response */
572 p_mb_params
->mcp_resp
&= FW_MSG_CODE_MASK
;
577 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
581 static int qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
582 struct qed_ptt
*p_ptt
,
583 struct qed_mcp_mb_params
*p_mb_params
)
585 size_t union_data_size
= sizeof(union drv_union_data
);
587 /* MCP not initialized */
588 if (!qed_mcp_is_init(p_hwfn
)) {
589 DP_NOTICE(p_hwfn
, "MFW is not initialized!\n");
593 if (p_hwfn
->mcp_info
->b_block_cmd
) {
595 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
596 p_mb_params
->cmd
, p_mb_params
->param
);
600 if (p_mb_params
->data_src_size
> union_data_size
||
601 p_mb_params
->data_dst_size
> union_data_size
) {
603 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
604 p_mb_params
->data_src_size
,
605 p_mb_params
->data_dst_size
, union_data_size
);
609 return _qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, p_mb_params
);
612 static int _qed_mcp_cmd(struct qed_hwfn
*p_hwfn
,
613 struct qed_ptt
*p_ptt
,
620 struct qed_mcp_mb_params mb_params
;
623 memset(&mb_params
, 0, sizeof(mb_params
));
625 mb_params
.param
= param
;
626 mb_params
.flags
= can_sleep
? QED_MB_FLAG_CAN_SLEEP
: 0;
628 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
632 *o_mcp_resp
= mb_params
.mcp_resp
;
633 *o_mcp_param
= mb_params
.mcp_param
;
638 int qed_mcp_cmd(struct qed_hwfn
*p_hwfn
,
639 struct qed_ptt
*p_ptt
,
645 return (_qed_mcp_cmd(p_hwfn
, p_ptt
, cmd
, param
,
646 o_mcp_resp
, o_mcp_param
, true));
649 int qed_mcp_cmd_nosleep(struct qed_hwfn
*p_hwfn
,
650 struct qed_ptt
*p_ptt
,
656 return (_qed_mcp_cmd(p_hwfn
, p_ptt
, cmd
, param
,
657 o_mcp_resp
, o_mcp_param
, false));
661 qed_mcp_nvm_wr_cmd(struct qed_hwfn
*p_hwfn
,
662 struct qed_ptt
*p_ptt
,
666 u32
*o_mcp_param
, u32 i_txn_size
, u32
*i_buf
)
668 struct qed_mcp_mb_params mb_params
;
671 memset(&mb_params
, 0, sizeof(mb_params
));
673 mb_params
.param
= param
;
674 mb_params
.p_data_src
= i_buf
;
675 mb_params
.data_src_size
= (u8
)i_txn_size
;
676 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
680 *o_mcp_resp
= mb_params
.mcp_resp
;
681 *o_mcp_param
= mb_params
.mcp_param
;
683 /* nvm_info needs to be updated */
684 p_hwfn
->nvm_info
.valid
= false;
689 int qed_mcp_nvm_rd_cmd(struct qed_hwfn
*p_hwfn
,
690 struct qed_ptt
*p_ptt
,
695 u32
*o_txn_size
, u32
*o_buf
, bool b_can_sleep
)
697 struct qed_mcp_mb_params mb_params
;
698 u8 raw_data
[MCP_DRV_NVM_BUF_LEN
];
701 memset(&mb_params
, 0, sizeof(mb_params
));
703 mb_params
.param
= param
;
704 mb_params
.p_data_dst
= raw_data
;
706 /* Use the maximal value since the actual one is part of the response */
707 mb_params
.data_dst_size
= MCP_DRV_NVM_BUF_LEN
;
709 mb_params
.flags
= QED_MB_FLAG_CAN_SLEEP
;
711 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
715 *o_mcp_resp
= mb_params
.mcp_resp
;
716 *o_mcp_param
= mb_params
.mcp_param
;
718 *o_txn_size
= *o_mcp_param
;
719 memcpy(o_buf
, raw_data
, *o_txn_size
);
725 qed_mcp_can_force_load(u8 drv_role
,
727 enum qed_override_force_load override_force_load
)
729 bool can_force_load
= false;
731 switch (override_force_load
) {
732 case QED_OVERRIDE_FORCE_LOAD_ALWAYS
:
733 can_force_load
= true;
735 case QED_OVERRIDE_FORCE_LOAD_NEVER
:
736 can_force_load
= false;
739 can_force_load
= (drv_role
== DRV_ROLE_OS
&&
740 exist_drv_role
== DRV_ROLE_PREBOOT
) ||
741 (drv_role
== DRV_ROLE_KDUMP
&&
742 exist_drv_role
== DRV_ROLE_OS
);
746 return can_force_load
;
749 static int qed_mcp_cancel_load_req(struct qed_hwfn
*p_hwfn
,
750 struct qed_ptt
*p_ptt
)
752 u32 resp
= 0, param
= 0;
755 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CANCEL_LOAD_REQ
, 0,
759 "Failed to send cancel load request, rc = %d\n", rc
);
764 #define BITMAP_IDX_FOR_CONFIG_QEDE BIT(0)
765 #define BITMAP_IDX_FOR_CONFIG_QED_SRIOV BIT(1)
766 #define BITMAP_IDX_FOR_CONFIG_QEDR BIT(2)
767 #define BITMAP_IDX_FOR_CONFIG_QEDF BIT(4)
768 #define BITMAP_IDX_FOR_CONFIG_QEDI BIT(5)
769 #define BITMAP_IDX_FOR_CONFIG_QED_LL2 BIT(6)
771 static u32
qed_get_config_bitmap(void)
773 u32 config_bitmap
= 0x0;
775 if (IS_ENABLED(CONFIG_QEDE
))
776 config_bitmap
|= BITMAP_IDX_FOR_CONFIG_QEDE
;
778 if (IS_ENABLED(CONFIG_QED_SRIOV
))
779 config_bitmap
|= BITMAP_IDX_FOR_CONFIG_QED_SRIOV
;
781 if (IS_ENABLED(CONFIG_QED_RDMA
))
782 config_bitmap
|= BITMAP_IDX_FOR_CONFIG_QEDR
;
784 if (IS_ENABLED(CONFIG_QED_FCOE
))
785 config_bitmap
|= BITMAP_IDX_FOR_CONFIG_QEDF
;
787 if (IS_ENABLED(CONFIG_QED_ISCSI
))
788 config_bitmap
|= BITMAP_IDX_FOR_CONFIG_QEDI
;
790 if (IS_ENABLED(CONFIG_QED_LL2
))
791 config_bitmap
|= BITMAP_IDX_FOR_CONFIG_QED_LL2
;
793 return config_bitmap
;
796 struct qed_load_req_in_params
{
798 #define QED_LOAD_REQ_HSI_VER_DEFAULT 0
799 #define QED_LOAD_REQ_HSI_VER_1 1
806 bool avoid_eng_reset
;
809 struct qed_load_req_out_params
{
820 __qed_mcp_load_req(struct qed_hwfn
*p_hwfn
,
821 struct qed_ptt
*p_ptt
,
822 struct qed_load_req_in_params
*p_in_params
,
823 struct qed_load_req_out_params
*p_out_params
)
825 struct qed_mcp_mb_params mb_params
;
826 struct load_req_stc load_req
;
827 struct load_rsp_stc load_rsp
;
831 memset(&load_req
, 0, sizeof(load_req
));
832 load_req
.drv_ver_0
= p_in_params
->drv_ver_0
;
833 load_req
.drv_ver_1
= p_in_params
->drv_ver_1
;
834 load_req
.fw_ver
= p_in_params
->fw_ver
;
835 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_ROLE
, p_in_params
->drv_role
);
836 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_LOCK_TO
,
837 p_in_params
->timeout_val
);
838 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_FORCE
,
839 p_in_params
->force_cmd
);
840 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_FLAGS0
,
841 p_in_params
->avoid_eng_reset
);
843 hsi_ver
= (p_in_params
->hsi_ver
== QED_LOAD_REQ_HSI_VER_DEFAULT
) ?
844 DRV_ID_MCP_HSI_VER_CURRENT
:
845 (p_in_params
->hsi_ver
<< DRV_ID_MCP_HSI_VER_SHIFT
);
847 memset(&mb_params
, 0, sizeof(mb_params
));
848 mb_params
.cmd
= DRV_MSG_CODE_LOAD_REQ
;
849 mb_params
.param
= PDA_COMP
| hsi_ver
| p_hwfn
->cdev
->drv_type
;
850 mb_params
.p_data_src
= &load_req
;
851 mb_params
.data_src_size
= sizeof(load_req
);
852 mb_params
.p_data_dst
= &load_rsp
;
853 mb_params
.data_dst_size
= sizeof(load_rsp
);
854 mb_params
.flags
= QED_MB_FLAG_CAN_SLEEP
| QED_MB_FLAG_AVOID_BLOCK
;
856 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
857 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
859 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_DRV_INIT_HW
),
860 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_DRV_TYPE
),
861 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_MCP_HSI_VER
),
862 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_PDA_COMP_VER
));
864 if (p_in_params
->hsi_ver
!= QED_LOAD_REQ_HSI_VER_1
) {
865 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
866 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
871 QED_MFW_GET_FIELD(load_req
.misc0
, LOAD_REQ_ROLE
),
872 QED_MFW_GET_FIELD(load_req
.misc0
,
874 QED_MFW_GET_FIELD(load_req
.misc0
, LOAD_REQ_FORCE
),
875 QED_MFW_GET_FIELD(load_req
.misc0
, LOAD_REQ_FLAGS0
));
878 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
880 DP_NOTICE(p_hwfn
, "Failed to send load request, rc = %d\n", rc
);
884 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
885 "Load Response: resp 0x%08x\n", mb_params
.mcp_resp
);
886 p_out_params
->load_code
= mb_params
.mcp_resp
;
888 if (p_in_params
->hsi_ver
!= QED_LOAD_REQ_HSI_VER_1
&&
889 p_out_params
->load_code
!= FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1
) {
892 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
897 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_ROLE
),
898 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_HSI
),
899 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_FLAGS0
));
901 p_out_params
->exist_drv_ver_0
= load_rsp
.drv_ver_0
;
902 p_out_params
->exist_drv_ver_1
= load_rsp
.drv_ver_1
;
903 p_out_params
->exist_fw_ver
= load_rsp
.fw_ver
;
904 p_out_params
->exist_drv_role
=
905 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_ROLE
);
906 p_out_params
->mfw_hsi_ver
=
907 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_HSI
);
908 p_out_params
->drv_exists
=
909 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_FLAGS0
) &
910 LOAD_RSP_FLAGS0_DRV_EXISTS
;
916 static int eocre_get_mfw_drv_role(struct qed_hwfn
*p_hwfn
,
917 enum qed_drv_role drv_role
,
921 case QED_DRV_ROLE_OS
:
922 *p_mfw_drv_role
= DRV_ROLE_OS
;
924 case QED_DRV_ROLE_KDUMP
:
925 *p_mfw_drv_role
= DRV_ROLE_KDUMP
;
928 DP_ERR(p_hwfn
, "Unexpected driver role %d\n", drv_role
);
935 enum qed_load_req_force
{
936 QED_LOAD_REQ_FORCE_NONE
,
937 QED_LOAD_REQ_FORCE_PF
,
938 QED_LOAD_REQ_FORCE_ALL
,
941 static void qed_get_mfw_force_cmd(struct qed_hwfn
*p_hwfn
,
942 enum qed_load_req_force force_cmd
,
946 case QED_LOAD_REQ_FORCE_NONE
:
947 *p_mfw_force_cmd
= LOAD_REQ_FORCE_NONE
;
949 case QED_LOAD_REQ_FORCE_PF
:
950 *p_mfw_force_cmd
= LOAD_REQ_FORCE_PF
;
952 case QED_LOAD_REQ_FORCE_ALL
:
953 *p_mfw_force_cmd
= LOAD_REQ_FORCE_ALL
;
958 int qed_mcp_load_req(struct qed_hwfn
*p_hwfn
,
959 struct qed_ptt
*p_ptt
,
960 struct qed_load_req_params
*p_params
)
962 struct qed_load_req_out_params out_params
;
963 struct qed_load_req_in_params in_params
;
964 u8 mfw_drv_role
, mfw_force_cmd
;
967 memset(&in_params
, 0, sizeof(in_params
));
968 in_params
.hsi_ver
= QED_LOAD_REQ_HSI_VER_DEFAULT
;
969 in_params
.drv_ver_1
= qed_get_config_bitmap();
970 in_params
.fw_ver
= STORM_FW_VERSION
;
971 rc
= eocre_get_mfw_drv_role(p_hwfn
, p_params
->drv_role
, &mfw_drv_role
);
975 in_params
.drv_role
= mfw_drv_role
;
976 in_params
.timeout_val
= p_params
->timeout_val
;
977 qed_get_mfw_force_cmd(p_hwfn
,
978 QED_LOAD_REQ_FORCE_NONE
, &mfw_force_cmd
);
980 in_params
.force_cmd
= mfw_force_cmd
;
981 in_params
.avoid_eng_reset
= p_params
->avoid_eng_reset
;
983 memset(&out_params
, 0, sizeof(out_params
));
984 rc
= __qed_mcp_load_req(p_hwfn
, p_ptt
, &in_params
, &out_params
);
988 /* First handle cases where another load request should/might be sent:
989 * - MFW expects the old interface [HSI version = 1]
990 * - MFW responds that a force load request is required
992 if (out_params
.load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1
) {
994 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
996 in_params
.hsi_ver
= QED_LOAD_REQ_HSI_VER_1
;
997 memset(&out_params
, 0, sizeof(out_params
));
998 rc
= __qed_mcp_load_req(p_hwfn
, p_ptt
, &in_params
, &out_params
);
1001 } else if (out_params
.load_code
==
1002 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE
) {
1003 if (qed_mcp_can_force_load(in_params
.drv_role
,
1004 out_params
.exist_drv_role
,
1005 p_params
->override_force_load
)) {
1007 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
1008 in_params
.drv_role
, in_params
.fw_ver
,
1009 in_params
.drv_ver_0
, in_params
.drv_ver_1
,
1010 out_params
.exist_drv_role
,
1011 out_params
.exist_fw_ver
,
1012 out_params
.exist_drv_ver_0
,
1013 out_params
.exist_drv_ver_1
);
1015 qed_get_mfw_force_cmd(p_hwfn
,
1016 QED_LOAD_REQ_FORCE_ALL
,
1019 in_params
.force_cmd
= mfw_force_cmd
;
1020 memset(&out_params
, 0, sizeof(out_params
));
1021 rc
= __qed_mcp_load_req(p_hwfn
, p_ptt
, &in_params
,
1027 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1028 in_params
.drv_role
, in_params
.fw_ver
,
1029 in_params
.drv_ver_0
, in_params
.drv_ver_1
,
1030 out_params
.exist_drv_role
,
1031 out_params
.exist_fw_ver
,
1032 out_params
.exist_drv_ver_0
,
1033 out_params
.exist_drv_ver_1
);
1035 "Avoid sending a force load request to prevent disruption of active PFs\n");
1037 qed_mcp_cancel_load_req(p_hwfn
, p_ptt
);
1042 /* Now handle the other types of responses.
1043 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1044 * expected here after the additional revised load requests were sent.
1046 switch (out_params
.load_code
) {
1047 case FW_MSG_CODE_DRV_LOAD_ENGINE
:
1048 case FW_MSG_CODE_DRV_LOAD_PORT
:
1049 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
1050 if (out_params
.mfw_hsi_ver
!= QED_LOAD_REQ_HSI_VER_1
&&
1051 out_params
.drv_exists
) {
1052 /* The role and fw/driver version match, but the PF is
1053 * already loaded and has not been unloaded gracefully.
1056 "PF is already loaded\n");
1062 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1063 out_params
.load_code
);
1067 p_params
->load_code
= out_params
.load_code
;
1072 int qed_mcp_load_done(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1074 u32 resp
= 0, param
= 0;
1077 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_LOAD_DONE
, 0, &resp
,
1081 "Failed to send a LOAD_DONE command, rc = %d\n", rc
);
1085 /* Check if there is a DID mismatch between nvm-cfg/efuse */
1086 if (param
& FW_MB_PARAM_LOAD_DONE_DID_EFUSE_ERROR
)
1088 "warning: device configuration is not supported on this board type. The device may not function as expected.\n");
1093 #define MFW_COMPLETION_MAX_ITER 5000
1094 #define MFW_COMPLETION_INTERVAL_MS 1
1096 int qed_mcp_unload_req(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1098 struct qed_mcp_mb_params mb_params
;
1099 u32 cnt
= MFW_COMPLETION_MAX_ITER
;
1103 switch (p_hwfn
->cdev
->wol_config
) {
1104 case QED_OV_WOL_DISABLED
:
1105 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_DISABLED
;
1107 case QED_OV_WOL_ENABLED
:
1108 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_ENABLED
;
1112 "Unknown WoL configuration %02x\n",
1113 p_hwfn
->cdev
->wol_config
);
1115 case QED_OV_WOL_DEFAULT
:
1116 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_MCP
;
1119 memset(&mb_params
, 0, sizeof(mb_params
));
1120 mb_params
.cmd
= DRV_MSG_CODE_UNLOAD_REQ
;
1121 mb_params
.param
= wol_param
;
1122 mb_params
.flags
= QED_MB_FLAG_CAN_SLEEP
| QED_MB_FLAG_AVOID_BLOCK
;
1124 spin_lock_bh(&p_hwfn
->mcp_info
->unload_lock
);
1125 set_bit(QED_MCP_BYPASS_PROC_BIT
,
1126 &p_hwfn
->mcp_info
->mcp_handling_status
);
1127 spin_unlock_bh(&p_hwfn
->mcp_info
->unload_lock
);
1129 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1131 while (test_bit(QED_MCP_IN_PROCESSING_BIT
,
1132 &p_hwfn
->mcp_info
->mcp_handling_status
) && --cnt
)
1133 msleep(MFW_COMPLETION_INTERVAL_MS
);
1137 "Failed to wait MFW event completion after %d msec\n",
1138 MFW_COMPLETION_MAX_ITER
* MFW_COMPLETION_INTERVAL_MS
);
1143 int qed_mcp_unload_done(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1145 struct qed_mcp_mb_params mb_params
;
1146 struct mcp_mac wol_mac
;
1148 memset(&mb_params
, 0, sizeof(mb_params
));
1149 mb_params
.cmd
= DRV_MSG_CODE_UNLOAD_DONE
;
1151 /* Set the primary MAC if WoL is enabled */
1152 if (p_hwfn
->cdev
->wol_config
== QED_OV_WOL_ENABLED
) {
1153 u8
*p_mac
= p_hwfn
->cdev
->wol_mac
;
1155 memset(&wol_mac
, 0, sizeof(wol_mac
));
1156 wol_mac
.mac_upper
= p_mac
[0] << 8 | p_mac
[1];
1157 wol_mac
.mac_lower
= p_mac
[2] << 24 | p_mac
[3] << 16 |
1158 p_mac
[4] << 8 | p_mac
[5];
1161 (QED_MSG_SP
| NETIF_MSG_IFDOWN
),
1162 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1163 p_mac
, wol_mac
.mac_upper
, wol_mac
.mac_lower
);
1165 mb_params
.p_data_src
= &wol_mac
;
1166 mb_params
.data_src_size
= sizeof(wol_mac
);
1169 return qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1172 static void qed_mcp_handle_vf_flr(struct qed_hwfn
*p_hwfn
,
1173 struct qed_ptt
*p_ptt
)
1175 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1177 u32 mfw_path_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
1178 u32 path_addr
= SECTION_ADDR(mfw_path_offsize
,
1179 QED_PATH_ID(p_hwfn
));
1180 u32 disabled_vfs
[VF_MAX_STATIC
/ 32];
1185 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1186 mfw_path_offsize
, path_addr
);
1188 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++) {
1189 disabled_vfs
[i
] = qed_rd(p_hwfn
, p_ptt
,
1191 offsetof(struct public_path
,
1194 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| QED_MSG_IOV
),
1195 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1196 i
* 32, (i
+ 1) * 32 - 1, disabled_vfs
[i
]);
1199 if (qed_iov_mark_vf_flr(p_hwfn
, disabled_vfs
))
1200 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_FLR_FLAG
);
1203 int qed_mcp_ack_vf_flr(struct qed_hwfn
*p_hwfn
,
1204 struct qed_ptt
*p_ptt
, u32
*vfs_to_ack
)
1206 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1208 u32 mfw_func_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
1209 u32 func_addr
= SECTION_ADDR(mfw_func_offsize
,
1211 struct qed_mcp_mb_params mb_params
;
1215 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
1216 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| QED_MSG_IOV
),
1217 "Acking VFs [%08x,...,%08x] - %08x\n",
1218 i
* 32, (i
+ 1) * 32 - 1, vfs_to_ack
[i
]);
1220 memset(&mb_params
, 0, sizeof(mb_params
));
1221 mb_params
.cmd
= DRV_MSG_CODE_VF_DISABLED_DONE
;
1222 mb_params
.p_data_src
= vfs_to_ack
;
1223 mb_params
.data_src_size
= VF_MAX_STATIC
/ 8;
1224 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1226 DP_NOTICE(p_hwfn
, "Failed to pass ACK for VF flr to MFW\n");
1230 /* Clear the ACK bits */
1231 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
1232 qed_wr(p_hwfn
, p_ptt
,
1234 offsetof(struct public_func
, drv_ack_vf_disabled
) +
1235 i
* sizeof(u32
), 0);
1240 static void qed_mcp_handle_transceiver_change(struct qed_hwfn
*p_hwfn
,
1241 struct qed_ptt
*p_ptt
)
1243 u32 transceiver_state
;
1245 transceiver_state
= qed_rd(p_hwfn
, p_ptt
,
1246 p_hwfn
->mcp_info
->port_addr
+
1247 offsetof(struct public_port
,
1251 (NETIF_MSG_HW
| QED_MSG_SP
),
1252 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1254 (u32
)(p_hwfn
->mcp_info
->port_addr
+
1255 offsetof(struct public_port
, transceiver_data
)));
1257 transceiver_state
= GET_FIELD(transceiver_state
,
1258 ETH_TRANSCEIVER_STATE
);
1260 if (transceiver_state
== ETH_TRANSCEIVER_STATE_PRESENT
)
1261 DP_NOTICE(p_hwfn
, "Transceiver is present.\n");
1263 DP_NOTICE(p_hwfn
, "Transceiver is unplugged.\n");
1266 static void qed_mcp_read_eee_config(struct qed_hwfn
*p_hwfn
,
1267 struct qed_ptt
*p_ptt
,
1268 struct qed_mcp_link_state
*p_link
)
1270 u32 eee_status
, val
;
1272 p_link
->eee_adv_caps
= 0;
1273 p_link
->eee_lp_adv_caps
= 0;
1274 eee_status
= qed_rd(p_hwfn
,
1276 p_hwfn
->mcp_info
->port_addr
+
1277 offsetof(struct public_port
, eee_status
));
1278 p_link
->eee_active
= !!(eee_status
& EEE_ACTIVE_BIT
);
1279 val
= (eee_status
& EEE_LD_ADV_STATUS_MASK
) >> EEE_LD_ADV_STATUS_OFFSET
;
1280 if (val
& EEE_1G_ADV
)
1281 p_link
->eee_adv_caps
|= QED_EEE_1G_ADV
;
1282 if (val
& EEE_10G_ADV
)
1283 p_link
->eee_adv_caps
|= QED_EEE_10G_ADV
;
1284 val
= (eee_status
& EEE_LP_ADV_STATUS_MASK
) >> EEE_LP_ADV_STATUS_OFFSET
;
1285 if (val
& EEE_1G_ADV
)
1286 p_link
->eee_lp_adv_caps
|= QED_EEE_1G_ADV
;
1287 if (val
& EEE_10G_ADV
)
1288 p_link
->eee_lp_adv_caps
|= QED_EEE_10G_ADV
;
1291 static u32
qed_mcp_get_shmem_func(struct qed_hwfn
*p_hwfn
,
1292 struct qed_ptt
*p_ptt
,
1293 struct public_func
*p_data
, int pfid
)
1295 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1297 u32 mfw_path_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
1301 func_addr
= SECTION_ADDR(mfw_path_offsize
, pfid
);
1302 memset(p_data
, 0, sizeof(*p_data
));
1304 size
= min_t(u32
, sizeof(*p_data
), QED_SECTION_SIZE(mfw_path_offsize
));
1305 for (i
= 0; i
< size
/ sizeof(u32
); i
++)
1306 ((u32
*)p_data
)[i
] = qed_rd(p_hwfn
, p_ptt
,
1307 func_addr
+ (i
<< 2));
1311 static void qed_read_pf_bandwidth(struct qed_hwfn
*p_hwfn
,
1312 struct public_func
*p_shmem_info
)
1314 struct qed_mcp_function_info
*p_info
;
1316 p_info
= &p_hwfn
->mcp_info
->func_info
;
1318 p_info
->bandwidth_min
= QED_MFW_GET_FIELD(p_shmem_info
->config
,
1319 FUNC_MF_CFG_MIN_BW
);
1320 if (p_info
->bandwidth_min
< 1 || p_info
->bandwidth_min
> 100) {
1322 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1323 p_info
->bandwidth_min
);
1324 p_info
->bandwidth_min
= 1;
1327 p_info
->bandwidth_max
= QED_MFW_GET_FIELD(p_shmem_info
->config
,
1328 FUNC_MF_CFG_MAX_BW
);
1329 if (p_info
->bandwidth_max
< 1 || p_info
->bandwidth_max
> 100) {
1331 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1332 p_info
->bandwidth_max
);
1333 p_info
->bandwidth_max
= 100;
1337 static void qed_mcp_handle_link_change(struct qed_hwfn
*p_hwfn
,
1338 struct qed_ptt
*p_ptt
, bool b_reset
)
1340 struct qed_mcp_link_state
*p_link
;
1344 /* Prevent SW/attentions from doing this at the same time */
1345 spin_lock_bh(&p_hwfn
->mcp_info
->link_lock
);
1347 p_link
= &p_hwfn
->mcp_info
->link_output
;
1348 memset(p_link
, 0, sizeof(*p_link
));
1350 status
= qed_rd(p_hwfn
, p_ptt
,
1351 p_hwfn
->mcp_info
->port_addr
+
1352 offsetof(struct public_port
, link_status
));
1353 DP_VERBOSE(p_hwfn
, (NETIF_MSG_LINK
| QED_MSG_SP
),
1354 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1356 (u32
)(p_hwfn
->mcp_info
->port_addr
+
1357 offsetof(struct public_port
, link_status
)));
1359 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1360 "Resetting link indications\n");
1364 if (p_hwfn
->b_drv_link_init
) {
1365 /* Link indication with modern MFW arrives as per-PF
1368 if (p_hwfn
->mcp_info
->capabilities
&
1369 FW_MB_PARAM_FEATURE_SUPPORT_VLINK
) {
1370 struct public_func shmem_info
;
1372 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
,
1374 p_link
->link_up
= !!(shmem_info
.status
&
1375 FUNC_STATUS_VIRTUAL_LINK_UP
);
1376 qed_read_pf_bandwidth(p_hwfn
, &shmem_info
);
1377 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1378 "Virtual link_up = %d\n", p_link
->link_up
);
1380 p_link
->link_up
= !!(status
& LINK_STATUS_LINK_UP
);
1381 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1382 "Physical link_up = %d\n", p_link
->link_up
);
1385 p_link
->link_up
= false;
1388 p_link
->full_duplex
= true;
1389 switch ((status
& LINK_STATUS_SPEED_AND_DUPLEX_MASK
)) {
1390 case LINK_STATUS_SPEED_AND_DUPLEX_100G
:
1391 p_link
->speed
= 100000;
1393 case LINK_STATUS_SPEED_AND_DUPLEX_50G
:
1394 p_link
->speed
= 50000;
1396 case LINK_STATUS_SPEED_AND_DUPLEX_40G
:
1397 p_link
->speed
= 40000;
1399 case LINK_STATUS_SPEED_AND_DUPLEX_25G
:
1400 p_link
->speed
= 25000;
1402 case LINK_STATUS_SPEED_AND_DUPLEX_20G
:
1403 p_link
->speed
= 20000;
1405 case LINK_STATUS_SPEED_AND_DUPLEX_10G
:
1406 p_link
->speed
= 10000;
1408 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD
:
1409 p_link
->full_duplex
= false;
1411 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
:
1412 p_link
->speed
= 1000;
1416 p_link
->link_up
= 0;
1419 if (p_link
->link_up
&& p_link
->speed
)
1420 p_link
->line_speed
= p_link
->speed
;
1422 p_link
->line_speed
= 0;
1424 max_bw
= p_hwfn
->mcp_info
->func_info
.bandwidth_max
;
1425 min_bw
= p_hwfn
->mcp_info
->func_info
.bandwidth_min
;
1427 /* Max bandwidth configuration */
1428 __qed_configure_pf_max_bandwidth(p_hwfn
, p_ptt
, p_link
, max_bw
);
1430 /* Min bandwidth configuration */
1431 __qed_configure_pf_min_bandwidth(p_hwfn
, p_ptt
, p_link
, min_bw
);
1432 qed_configure_vp_wfq_on_link_change(p_hwfn
->cdev
, p_ptt
,
1433 p_link
->min_pf_rate
);
1435 p_link
->an
= !!(status
& LINK_STATUS_AUTO_NEGOTIATE_ENABLED
);
1436 p_link
->an_complete
= !!(status
&
1437 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE
);
1438 p_link
->parallel_detection
= !!(status
&
1439 LINK_STATUS_PARALLEL_DETECTION_USED
);
1440 p_link
->pfc_enabled
= !!(status
& LINK_STATUS_PFC_ENABLED
);
1442 p_link
->partner_adv_speed
|=
1443 (status
& LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE
) ?
1444 QED_LINK_PARTNER_SPEED_1G_FD
: 0;
1445 p_link
->partner_adv_speed
|=
1446 (status
& LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE
) ?
1447 QED_LINK_PARTNER_SPEED_1G_HD
: 0;
1448 p_link
->partner_adv_speed
|=
1449 (status
& LINK_STATUS_LINK_PARTNER_10G_CAPABLE
) ?
1450 QED_LINK_PARTNER_SPEED_10G
: 0;
1451 p_link
->partner_adv_speed
|=
1452 (status
& LINK_STATUS_LINK_PARTNER_20G_CAPABLE
) ?
1453 QED_LINK_PARTNER_SPEED_20G
: 0;
1454 p_link
->partner_adv_speed
|=
1455 (status
& LINK_STATUS_LINK_PARTNER_25G_CAPABLE
) ?
1456 QED_LINK_PARTNER_SPEED_25G
: 0;
1457 p_link
->partner_adv_speed
|=
1458 (status
& LINK_STATUS_LINK_PARTNER_40G_CAPABLE
) ?
1459 QED_LINK_PARTNER_SPEED_40G
: 0;
1460 p_link
->partner_adv_speed
|=
1461 (status
& LINK_STATUS_LINK_PARTNER_50G_CAPABLE
) ?
1462 QED_LINK_PARTNER_SPEED_50G
: 0;
1463 p_link
->partner_adv_speed
|=
1464 (status
& LINK_STATUS_LINK_PARTNER_100G_CAPABLE
) ?
1465 QED_LINK_PARTNER_SPEED_100G
: 0;
1467 p_link
->partner_tx_flow_ctrl_en
=
1468 !!(status
& LINK_STATUS_TX_FLOW_CONTROL_ENABLED
);
1469 p_link
->partner_rx_flow_ctrl_en
=
1470 !!(status
& LINK_STATUS_RX_FLOW_CONTROL_ENABLED
);
1472 switch (status
& LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK
) {
1473 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE
:
1474 p_link
->partner_adv_pause
= QED_LINK_PARTNER_SYMMETRIC_PAUSE
;
1476 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE
:
1477 p_link
->partner_adv_pause
= QED_LINK_PARTNER_ASYMMETRIC_PAUSE
;
1479 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE
:
1480 p_link
->partner_adv_pause
= QED_LINK_PARTNER_BOTH_PAUSE
;
1483 p_link
->partner_adv_pause
= 0;
1486 p_link
->sfp_tx_fault
= !!(status
& LINK_STATUS_SFP_TX_FAULT
);
1488 if (p_hwfn
->mcp_info
->capabilities
& FW_MB_PARAM_FEATURE_SUPPORT_EEE
)
1489 qed_mcp_read_eee_config(p_hwfn
, p_ptt
, p_link
);
1491 if (p_hwfn
->mcp_info
->capabilities
&
1492 FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL
) {
1493 switch (status
& LINK_STATUS_FEC_MODE_MASK
) {
1494 case LINK_STATUS_FEC_MODE_NONE
:
1495 p_link
->fec_active
= QED_FEC_MODE_NONE
;
1497 case LINK_STATUS_FEC_MODE_FIRECODE_CL74
:
1498 p_link
->fec_active
= QED_FEC_MODE_FIRECODE
;
1500 case LINK_STATUS_FEC_MODE_RS_CL91
:
1501 p_link
->fec_active
= QED_FEC_MODE_RS
;
1504 p_link
->fec_active
= QED_FEC_MODE_AUTO
;
1507 p_link
->fec_active
= QED_FEC_MODE_UNSUPPORTED
;
1510 qed_link_update(p_hwfn
, p_ptt
);
1512 spin_unlock_bh(&p_hwfn
->mcp_info
->link_lock
);
1515 int qed_mcp_set_link(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
, bool b_up
)
1517 struct qed_mcp_link_params
*params
= &p_hwfn
->mcp_info
->link_input
;
1518 struct qed_mcp_mb_params mb_params
;
1519 struct eth_phy_cfg phy_cfg
;
1520 u32 cmd
, fec_bit
= 0;
1524 /* Set the shmem configuration according to params */
1525 memset(&phy_cfg
, 0, sizeof(phy_cfg
));
1526 cmd
= b_up
? DRV_MSG_CODE_INIT_PHY
: DRV_MSG_CODE_LINK_RESET
;
1527 if (!params
->speed
.autoneg
)
1528 phy_cfg
.speed
= params
->speed
.forced_speed
;
1529 phy_cfg
.pause
|= (params
->pause
.autoneg
) ? ETH_PAUSE_AUTONEG
: 0;
1530 phy_cfg
.pause
|= (params
->pause
.forced_rx
) ? ETH_PAUSE_RX
: 0;
1531 phy_cfg
.pause
|= (params
->pause
.forced_tx
) ? ETH_PAUSE_TX
: 0;
1532 phy_cfg
.adv_speed
= params
->speed
.advertised_speeds
;
1533 phy_cfg
.loopback_mode
= params
->loopback_mode
;
1535 /* There are MFWs that share this capability regardless of whether
1536 * this is feasible or not. And given that at the very least adv_caps
1537 * would be set internally by qed, we want to make sure LFA would
1540 if ((p_hwfn
->mcp_info
->capabilities
&
1541 FW_MB_PARAM_FEATURE_SUPPORT_EEE
) && params
->eee
.enable
) {
1542 phy_cfg
.eee_cfg
|= EEE_CFG_EEE_ENABLED
;
1543 if (params
->eee
.tx_lpi_enable
)
1544 phy_cfg
.eee_cfg
|= EEE_CFG_TX_LPI
;
1545 if (params
->eee
.adv_caps
& QED_EEE_1G_ADV
)
1546 phy_cfg
.eee_cfg
|= EEE_CFG_ADV_SPEED_1G
;
1547 if (params
->eee
.adv_caps
& QED_EEE_10G_ADV
)
1548 phy_cfg
.eee_cfg
|= EEE_CFG_ADV_SPEED_10G
;
1549 phy_cfg
.eee_cfg
|= (params
->eee
.tx_lpi_timer
<<
1550 EEE_TX_TIMER_USEC_OFFSET
) &
1551 EEE_TX_TIMER_USEC_MASK
;
1554 if (p_hwfn
->mcp_info
->capabilities
&
1555 FW_MB_PARAM_FEATURE_SUPPORT_FEC_CONTROL
) {
1556 if (params
->fec
& QED_FEC_MODE_NONE
)
1557 fec_bit
|= FEC_FORCE_MODE_NONE
;
1558 else if (params
->fec
& QED_FEC_MODE_FIRECODE
)
1559 fec_bit
|= FEC_FORCE_MODE_FIRECODE
;
1560 else if (params
->fec
& QED_FEC_MODE_RS
)
1561 fec_bit
|= FEC_FORCE_MODE_RS
;
1562 else if (params
->fec
& QED_FEC_MODE_AUTO
)
1563 fec_bit
|= FEC_FORCE_MODE_AUTO
;
1565 SET_MFW_FIELD(phy_cfg
.fec_mode
, FEC_FORCE_MODE
, fec_bit
);
1568 if (p_hwfn
->mcp_info
->capabilities
&
1569 FW_MB_PARAM_FEATURE_SUPPORT_EXT_SPEED_FEC_CONTROL
) {
1571 if (params
->ext_speed
.autoneg
)
1572 ext_speed
|= ETH_EXT_SPEED_NONE
;
1574 val
= params
->ext_speed
.forced_speed
;
1575 if (val
& QED_EXT_SPEED_1G
)
1576 ext_speed
|= ETH_EXT_SPEED_1G
;
1577 if (val
& QED_EXT_SPEED_10G
)
1578 ext_speed
|= ETH_EXT_SPEED_10G
;
1579 if (val
& QED_EXT_SPEED_25G
)
1580 ext_speed
|= ETH_EXT_SPEED_25G
;
1581 if (val
& QED_EXT_SPEED_40G
)
1582 ext_speed
|= ETH_EXT_SPEED_40G
;
1583 if (val
& QED_EXT_SPEED_50G_R
)
1584 ext_speed
|= ETH_EXT_SPEED_50G_BASE_R
;
1585 if (val
& QED_EXT_SPEED_50G_R2
)
1586 ext_speed
|= ETH_EXT_SPEED_50G_BASE_R2
;
1587 if (val
& QED_EXT_SPEED_100G_R2
)
1588 ext_speed
|= ETH_EXT_SPEED_100G_BASE_R2
;
1589 if (val
& QED_EXT_SPEED_100G_R4
)
1590 ext_speed
|= ETH_EXT_SPEED_100G_BASE_R4
;
1591 if (val
& QED_EXT_SPEED_100G_P4
)
1592 ext_speed
|= ETH_EXT_SPEED_100G_BASE_P4
;
1594 SET_MFW_FIELD(phy_cfg
.extended_speed
, ETH_EXT_SPEED
,
1599 val
= params
->ext_speed
.advertised_speeds
;
1600 if (val
& QED_EXT_SPEED_MASK_1G
)
1601 ext_speed
|= ETH_EXT_ADV_SPEED_1G
;
1602 if (val
& QED_EXT_SPEED_MASK_10G
)
1603 ext_speed
|= ETH_EXT_ADV_SPEED_10G
;
1604 if (val
& QED_EXT_SPEED_MASK_25G
)
1605 ext_speed
|= ETH_EXT_ADV_SPEED_25G
;
1606 if (val
& QED_EXT_SPEED_MASK_40G
)
1607 ext_speed
|= ETH_EXT_ADV_SPEED_40G
;
1608 if (val
& QED_EXT_SPEED_MASK_50G_R
)
1609 ext_speed
|= ETH_EXT_ADV_SPEED_50G_BASE_R
;
1610 if (val
& QED_EXT_SPEED_MASK_50G_R2
)
1611 ext_speed
|= ETH_EXT_ADV_SPEED_50G_BASE_R2
;
1612 if (val
& QED_EXT_SPEED_MASK_100G_R2
)
1613 ext_speed
|= ETH_EXT_ADV_SPEED_100G_BASE_R2
;
1614 if (val
& QED_EXT_SPEED_MASK_100G_R4
)
1615 ext_speed
|= ETH_EXT_ADV_SPEED_100G_BASE_R4
;
1616 if (val
& QED_EXT_SPEED_MASK_100G_P4
)
1617 ext_speed
|= ETH_EXT_ADV_SPEED_100G_BASE_P4
;
1619 phy_cfg
.extended_speed
|= ext_speed
;
1621 SET_MFW_FIELD(phy_cfg
.fec_mode
, FEC_EXTENDED_MODE
,
1622 params
->ext_fec_mode
);
1625 p_hwfn
->b_drv_link_init
= b_up
;
1628 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1629 "Configuring Link: Speed 0x%08x, Pause 0x%08x, Adv. Speed 0x%08x, Loopback 0x%08x, FEC 0x%08x, Ext. Speed 0x%08x\n",
1630 phy_cfg
.speed
, phy_cfg
.pause
, phy_cfg
.adv_speed
,
1631 phy_cfg
.loopback_mode
, phy_cfg
.fec_mode
,
1632 phy_cfg
.extended_speed
);
1634 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
, "Resetting link\n");
1637 memset(&mb_params
, 0, sizeof(mb_params
));
1638 mb_params
.cmd
= cmd
;
1639 mb_params
.p_data_src
= &phy_cfg
;
1640 mb_params
.data_src_size
= sizeof(phy_cfg
);
1641 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1643 /* if mcp fails to respond we must abort */
1645 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1649 /* Mimic link-change attention, done for several reasons:
1650 * - On reset, there's no guarantee MFW would trigger
1652 * - On initialization, older MFWs might not indicate link change
1653 * during LFA, so we'll never get an UP indication.
1655 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, !b_up
);
1660 u32
qed_get_process_kill_counter(struct qed_hwfn
*p_hwfn
,
1661 struct qed_ptt
*p_ptt
)
1663 u32 path_offsize_addr
, path_offsize
, path_addr
, proc_kill_cnt
;
1665 if (IS_VF(p_hwfn
->cdev
))
1668 path_offsize_addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1670 path_offsize
= qed_rd(p_hwfn
, p_ptt
, path_offsize_addr
);
1671 path_addr
= SECTION_ADDR(path_offsize
, QED_PATH_ID(p_hwfn
));
1673 proc_kill_cnt
= qed_rd(p_hwfn
, p_ptt
,
1675 offsetof(struct public_path
, process_kill
)) &
1676 PROCESS_KILL_COUNTER_MASK
;
1678 return proc_kill_cnt
;
1681 static void qed_mcp_handle_process_kill(struct qed_hwfn
*p_hwfn
,
1682 struct qed_ptt
*p_ptt
)
1684 struct qed_dev
*cdev
= p_hwfn
->cdev
;
1687 /* Prevent possible attentions/interrupts during the recovery handling
1688 * and till its load phase, during which they will be re-enabled.
1690 qed_int_igu_disable_int(p_hwfn
, p_ptt
);
1692 DP_NOTICE(p_hwfn
, "Received a process kill indication\n");
1694 /* The following operations should be done once, and thus in CMT mode
1695 * are carried out by only the first HW function.
1697 if (p_hwfn
!= QED_LEADING_HWFN(cdev
))
1700 if (cdev
->recov_in_prog
) {
1702 "Ignoring the indication since a recovery process is already in progress\n");
1706 cdev
->recov_in_prog
= true;
1708 proc_kill_cnt
= qed_get_process_kill_counter(p_hwfn
, p_ptt
);
1709 DP_NOTICE(p_hwfn
, "Process kill counter: %d\n", proc_kill_cnt
);
1711 qed_schedule_recovery_handler(p_hwfn
);
1714 static void qed_mcp_send_protocol_stats(struct qed_hwfn
*p_hwfn
,
1715 struct qed_ptt
*p_ptt
,
1716 enum MFW_DRV_MSG_TYPE type
)
1718 enum qed_mcp_protocol_type stats_type
;
1719 union qed_mcp_protocol_stats stats
;
1720 struct qed_mcp_mb_params mb_params
;
1724 case MFW_DRV_MSG_GET_LAN_STATS
:
1725 stats_type
= QED_MCP_LAN_STATS
;
1726 hsi_param
= DRV_MSG_CODE_STATS_TYPE_LAN
;
1728 case MFW_DRV_MSG_GET_FCOE_STATS
:
1729 stats_type
= QED_MCP_FCOE_STATS
;
1730 hsi_param
= DRV_MSG_CODE_STATS_TYPE_FCOE
;
1732 case MFW_DRV_MSG_GET_ISCSI_STATS
:
1733 stats_type
= QED_MCP_ISCSI_STATS
;
1734 hsi_param
= DRV_MSG_CODE_STATS_TYPE_ISCSI
;
1736 case MFW_DRV_MSG_GET_RDMA_STATS
:
1737 stats_type
= QED_MCP_RDMA_STATS
;
1738 hsi_param
= DRV_MSG_CODE_STATS_TYPE_RDMA
;
1741 DP_NOTICE(p_hwfn
, "Invalid protocol type %d\n", type
);
1745 qed_get_protocol_stats(p_hwfn
->cdev
, stats_type
, &stats
);
1747 memset(&mb_params
, 0, sizeof(mb_params
));
1748 mb_params
.cmd
= DRV_MSG_CODE_GET_STATS
;
1749 mb_params
.param
= hsi_param
;
1750 mb_params
.p_data_src
= &stats
;
1751 mb_params
.data_src_size
= sizeof(stats
);
1752 qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1755 static void qed_mcp_update_bw(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1757 struct qed_mcp_function_info
*p_info
;
1758 struct public_func shmem_info
;
1759 u32 resp
= 0, param
= 0;
1761 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1763 qed_read_pf_bandwidth(p_hwfn
, &shmem_info
);
1765 p_info
= &p_hwfn
->mcp_info
->func_info
;
1767 qed_configure_pf_min_bandwidth(p_hwfn
->cdev
, p_info
->bandwidth_min
);
1768 qed_configure_pf_max_bandwidth(p_hwfn
->cdev
, p_info
->bandwidth_max
);
1770 /* Acknowledge the MFW */
1771 qed_mcp_cmd_nosleep(p_hwfn
, p_ptt
, DRV_MSG_CODE_BW_UPDATE_ACK
, 0, &resp
,
1775 static void qed_mcp_update_stag(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1777 struct public_func shmem_info
;
1778 u32 resp
= 0, param
= 0;
1780 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1782 p_hwfn
->mcp_info
->func_info
.ovlan
= (u16
)shmem_info
.ovlan_stag
&
1783 FUNC_MF_CFG_OV_STAG_MASK
;
1784 p_hwfn
->hw_info
.ovlan
= p_hwfn
->mcp_info
->func_info
.ovlan
;
1785 if (test_bit(QED_MF_OVLAN_CLSS
, &p_hwfn
->cdev
->mf_bits
)) {
1786 if (p_hwfn
->hw_info
.ovlan
!= QED_MCP_VLAN_UNSET
) {
1787 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_FUNC_TAG_VALUE
,
1788 p_hwfn
->hw_info
.ovlan
);
1789 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_FUNC_TAG_EN
, 1);
1791 /* Configure DB to add external vlan to EDPM packets */
1792 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_TAG1_OVRD_MODE
, 1);
1793 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_EXT_VID_BB_K2
,
1794 p_hwfn
->hw_info
.ovlan
);
1796 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_FUNC_TAG_EN
, 0);
1797 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_FUNC_TAG_VALUE
, 0);
1798 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_TAG1_OVRD_MODE
, 0);
1799 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_EXT_VID_BB_K2
, 0);
1802 qed_sp_pf_update_stag(p_hwfn
);
1805 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "ovlan = %d hw_mode = 0x%x\n",
1806 p_hwfn
->mcp_info
->func_info
.ovlan
, p_hwfn
->hw_info
.hw_mode
);
1808 /* Acknowledge the MFW */
1809 qed_mcp_cmd_nosleep(p_hwfn
, p_ptt
, DRV_MSG_CODE_S_TAG_UPDATE_ACK
, 0,
1813 static void qed_mcp_handle_fan_failure(struct qed_hwfn
*p_hwfn
,
1814 struct qed_ptt
*p_ptt
)
1816 /* A single notification should be sent to upper driver in CMT mode */
1817 if (p_hwfn
!= QED_LEADING_HWFN(p_hwfn
->cdev
))
1820 qed_hw_err_notify(p_hwfn
, p_ptt
, QED_HW_ERR_FAN_FAIL
,
1821 "Fan failure was detected on the network interface card and it's going to be shut down.\n");
1824 struct qed_mdump_cmd_params
{
1834 qed_mcp_mdump_cmd(struct qed_hwfn
*p_hwfn
,
1835 struct qed_ptt
*p_ptt
,
1836 struct qed_mdump_cmd_params
*p_mdump_cmd_params
)
1838 struct qed_mcp_mb_params mb_params
;
1841 memset(&mb_params
, 0, sizeof(mb_params
));
1842 mb_params
.cmd
= DRV_MSG_CODE_MDUMP_CMD
;
1843 mb_params
.param
= p_mdump_cmd_params
->cmd
;
1844 mb_params
.p_data_src
= p_mdump_cmd_params
->p_data_src
;
1845 mb_params
.data_src_size
= p_mdump_cmd_params
->data_src_size
;
1846 mb_params
.p_data_dst
= p_mdump_cmd_params
->p_data_dst
;
1847 mb_params
.data_dst_size
= p_mdump_cmd_params
->data_dst_size
;
1848 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1852 p_mdump_cmd_params
->mcp_resp
= mb_params
.mcp_resp
;
1854 if (p_mdump_cmd_params
->mcp_resp
== FW_MSG_CODE_MDUMP_INVALID_CMD
) {
1856 "The mdump sub command is unsupported by the MFW [mdump_cmd 0x%x]\n",
1857 p_mdump_cmd_params
->cmd
);
1859 } else if (p_mdump_cmd_params
->mcp_resp
== FW_MSG_CODE_UNSUPPORTED
) {
1861 "The mdump command is not supported by the MFW\n");
1868 static int qed_mcp_mdump_ack(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1870 struct qed_mdump_cmd_params mdump_cmd_params
;
1872 memset(&mdump_cmd_params
, 0, sizeof(mdump_cmd_params
));
1873 mdump_cmd_params
.cmd
= DRV_MSG_CODE_MDUMP_ACK
;
1875 return qed_mcp_mdump_cmd(p_hwfn
, p_ptt
, &mdump_cmd_params
);
1879 qed_mcp_mdump_get_retain(struct qed_hwfn
*p_hwfn
,
1880 struct qed_ptt
*p_ptt
,
1881 struct mdump_retain_data_stc
*p_mdump_retain
)
1883 struct qed_mdump_cmd_params mdump_cmd_params
;
1886 memset(&mdump_cmd_params
, 0, sizeof(mdump_cmd_params
));
1887 mdump_cmd_params
.cmd
= DRV_MSG_CODE_MDUMP_GET_RETAIN
;
1888 mdump_cmd_params
.p_data_dst
= p_mdump_retain
;
1889 mdump_cmd_params
.data_dst_size
= sizeof(*p_mdump_retain
);
1891 rc
= qed_mcp_mdump_cmd(p_hwfn
, p_ptt
, &mdump_cmd_params
);
1895 if (mdump_cmd_params
.mcp_resp
!= FW_MSG_CODE_OK
) {
1897 "Failed to get the mdump retained data [mcp_resp 0x%x]\n",
1898 mdump_cmd_params
.mcp_resp
);
1905 static void qed_mcp_handle_critical_error(struct qed_hwfn
*p_hwfn
,
1906 struct qed_ptt
*p_ptt
)
1908 struct mdump_retain_data_stc mdump_retain
;
1911 /* In CMT mode - no need for more than a single acknowledgment to the
1912 * MFW, and no more than a single notification to the upper driver.
1914 if (p_hwfn
!= QED_LEADING_HWFN(p_hwfn
->cdev
))
1917 rc
= qed_mcp_mdump_get_retain(p_hwfn
, p_ptt
, &mdump_retain
);
1918 if (rc
== 0 && mdump_retain
.valid
)
1920 "The MFW notified that a critical error occurred in the device [epoch 0x%08x, pf 0x%x, status 0x%08x]\n",
1922 mdump_retain
.pf
, mdump_retain
.status
);
1925 "The MFW notified that a critical error occurred in the device\n");
1928 "Acknowledging the notification to not allow the MFW crash dump [driver debug data collection is preferable]\n");
1929 qed_mcp_mdump_ack(p_hwfn
, p_ptt
);
1931 qed_hw_err_notify(p_hwfn
, p_ptt
, QED_HW_ERR_HW_ATTN
, NULL
);
1934 void qed_mcp_read_ufp_config(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1936 struct public_func shmem_info
;
1939 if (!test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
1942 memset(&p_hwfn
->ufp_info
, 0, sizeof(p_hwfn
->ufp_info
));
1943 port_cfg
= qed_rd(p_hwfn
, p_ptt
, p_hwfn
->mcp_info
->port_addr
+
1944 offsetof(struct public_port
, oem_cfg_port
));
1945 val
= (port_cfg
& OEM_CFG_CHANNEL_TYPE_MASK
) >>
1946 OEM_CFG_CHANNEL_TYPE_OFFSET
;
1947 if (val
!= OEM_CFG_CHANNEL_TYPE_STAGGED
)
1949 "Incorrect UFP Channel type %d port_id 0x%02x\n",
1950 val
, MFW_PORT(p_hwfn
));
1952 val
= (port_cfg
& OEM_CFG_SCHED_TYPE_MASK
) >> OEM_CFG_SCHED_TYPE_OFFSET
;
1953 if (val
== OEM_CFG_SCHED_TYPE_ETS
) {
1954 p_hwfn
->ufp_info
.mode
= QED_UFP_MODE_ETS
;
1955 } else if (val
== OEM_CFG_SCHED_TYPE_VNIC_BW
) {
1956 p_hwfn
->ufp_info
.mode
= QED_UFP_MODE_VNIC_BW
;
1958 p_hwfn
->ufp_info
.mode
= QED_UFP_MODE_UNKNOWN
;
1960 "Unknown UFP scheduling mode %d port_id 0x%02x\n",
1961 val
, MFW_PORT(p_hwfn
));
1964 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1965 val
= (shmem_info
.oem_cfg_func
& OEM_CFG_FUNC_TC_MASK
) >>
1966 OEM_CFG_FUNC_TC_OFFSET
;
1967 p_hwfn
->ufp_info
.tc
= (u8
)val
;
1968 val
= (shmem_info
.oem_cfg_func
& OEM_CFG_FUNC_HOST_PRI_CTRL_MASK
) >>
1969 OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET
;
1970 if (val
== OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC
) {
1971 p_hwfn
->ufp_info
.pri_type
= QED_UFP_PRI_VNIC
;
1972 } else if (val
== OEM_CFG_FUNC_HOST_PRI_CTRL_OS
) {
1973 p_hwfn
->ufp_info
.pri_type
= QED_UFP_PRI_OS
;
1975 p_hwfn
->ufp_info
.pri_type
= QED_UFP_PRI_UNKNOWN
;
1977 "Unknown Host priority control %d port_id 0x%02x\n",
1978 val
, MFW_PORT(p_hwfn
));
1982 "UFP shmem config: mode = %d tc = %d pri_type = %d port_id 0x%02x\n",
1983 p_hwfn
->ufp_info
.mode
, p_hwfn
->ufp_info
.tc
,
1984 p_hwfn
->ufp_info
.pri_type
, MFW_PORT(p_hwfn
));
1988 qed_mcp_handle_ufp_event(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1990 qed_mcp_read_ufp_config(p_hwfn
, p_ptt
);
1992 if (p_hwfn
->ufp_info
.mode
== QED_UFP_MODE_VNIC_BW
) {
1993 p_hwfn
->qm_info
.ooo_tc
= p_hwfn
->ufp_info
.tc
;
1994 qed_hw_info_set_offload_tc(&p_hwfn
->hw_info
,
1995 p_hwfn
->ufp_info
.tc
);
1997 qed_qm_reconf(p_hwfn
, p_ptt
);
1998 } else if (p_hwfn
->ufp_info
.mode
== QED_UFP_MODE_ETS
) {
1999 /* Merge UFP TC with the dcbx TC data */
2000 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
2001 QED_DCBX_OPERATIONAL_MIB
);
2003 DP_ERR(p_hwfn
, "Invalid sched type, discard the UFP config\n");
2007 /* update storm FW with negotiation results */
2008 qed_sp_pf_update_ufp(p_hwfn
);
2010 /* update stag pcp value */
2011 qed_sp_pf_update_stag(p_hwfn
);
2016 int qed_mcp_handle_events(struct qed_hwfn
*p_hwfn
,
2017 struct qed_ptt
*p_ptt
)
2019 struct qed_mcp_info
*info
= p_hwfn
->mcp_info
;
2024 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "Received message from MFW\n");
2026 /* Read Messages from MFW */
2027 qed_mcp_read_mb(p_hwfn
, p_ptt
);
2029 /* Compare current messages to old ones */
2030 for (i
= 0; i
< info
->mfw_mb_length
; i
++) {
2031 if (info
->mfw_mb_cur
[i
] == info
->mfw_mb_shadow
[i
])
2036 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
2037 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
2038 i
, info
->mfw_mb_shadow
[i
], info
->mfw_mb_cur
[i
]);
2040 spin_lock_bh(&p_hwfn
->mcp_info
->unload_lock
);
2041 if (test_bit(QED_MCP_BYPASS_PROC_BIT
,
2042 &p_hwfn
->mcp_info
->mcp_handling_status
)) {
2043 spin_unlock_bh(&p_hwfn
->mcp_info
->unload_lock
);
2045 "Msg [%d] is bypassed on unload flow\n", i
);
2049 set_bit(QED_MCP_IN_PROCESSING_BIT
,
2050 &p_hwfn
->mcp_info
->mcp_handling_status
);
2051 spin_unlock_bh(&p_hwfn
->mcp_info
->unload_lock
);
2054 case MFW_DRV_MSG_LINK_CHANGE
:
2055 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, false);
2057 case MFW_DRV_MSG_VF_DISABLED
:
2058 qed_mcp_handle_vf_flr(p_hwfn
, p_ptt
);
2060 case MFW_DRV_MSG_LLDP_DATA_UPDATED
:
2061 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
2062 QED_DCBX_REMOTE_LLDP_MIB
);
2064 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED
:
2065 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
2066 QED_DCBX_REMOTE_MIB
);
2068 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED
:
2069 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
2070 QED_DCBX_OPERATIONAL_MIB
);
2072 case MFW_DRV_MSG_OEM_CFG_UPDATE
:
2073 qed_mcp_handle_ufp_event(p_hwfn
, p_ptt
);
2075 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE
:
2076 qed_mcp_handle_transceiver_change(p_hwfn
, p_ptt
);
2078 case MFW_DRV_MSG_ERROR_RECOVERY
:
2079 qed_mcp_handle_process_kill(p_hwfn
, p_ptt
);
2081 case MFW_DRV_MSG_GET_LAN_STATS
:
2082 case MFW_DRV_MSG_GET_FCOE_STATS
:
2083 case MFW_DRV_MSG_GET_ISCSI_STATS
:
2084 case MFW_DRV_MSG_GET_RDMA_STATS
:
2085 qed_mcp_send_protocol_stats(p_hwfn
, p_ptt
, i
);
2087 case MFW_DRV_MSG_BW_UPDATE
:
2088 qed_mcp_update_bw(p_hwfn
, p_ptt
);
2090 case MFW_DRV_MSG_S_TAG_UPDATE
:
2091 qed_mcp_update_stag(p_hwfn
, p_ptt
);
2093 case MFW_DRV_MSG_FAILURE_DETECTED
:
2094 qed_mcp_handle_fan_failure(p_hwfn
, p_ptt
);
2096 case MFW_DRV_MSG_CRITICAL_ERROR_OCCURRED
:
2097 qed_mcp_handle_critical_error(p_hwfn
, p_ptt
);
2099 case MFW_DRV_MSG_GET_TLV_REQ
:
2100 qed_mfw_tlv_req(p_hwfn
);
2103 DP_INFO(p_hwfn
, "Unimplemented MFW message %d\n", i
);
2107 clear_bit(QED_MCP_IN_PROCESSING_BIT
,
2108 &p_hwfn
->mcp_info
->mcp_handling_status
);
2111 /* ACK everything */
2112 for (i
= 0; i
< MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
); i
++) {
2113 __be32 val
= cpu_to_be32(((u32
*)info
->mfw_mb_cur
)[i
]);
2115 /* MFW expect answer in BE, so we force write in that format */
2116 qed_wr(p_hwfn
, p_ptt
,
2117 info
->mfw_mb_addr
+ sizeof(u32
) +
2118 MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
) *
2119 sizeof(u32
) + i
* sizeof(u32
),
2125 "Received an MFW message indication but no new message!\n");
2129 /* Copy the new mfw messages into the shadow */
2130 memcpy(info
->mfw_mb_shadow
, info
->mfw_mb_cur
, info
->mfw_mb_length
);
2135 int qed_mcp_get_mfw_ver(struct qed_hwfn
*p_hwfn
,
2136 struct qed_ptt
*p_ptt
,
2137 u32
*p_mfw_ver
, u32
*p_running_bundle_id
)
2139 u32 global_offsize
, public_base
;
2141 if (IS_VF(p_hwfn
->cdev
)) {
2142 if (p_hwfn
->vf_iov_info
) {
2143 struct pfvf_acquire_resp_tlv
*p_resp
;
2145 p_resp
= &p_hwfn
->vf_iov_info
->acquire_resp
;
2146 *p_mfw_ver
= p_resp
->pfdev_info
.mfw_ver
;
2151 "VF requested MFW version prior to ACQUIRE\n");
2156 public_base
= p_hwfn
->mcp_info
->public_base
;
2157 global_offsize
= qed_rd(p_hwfn
, p_ptt
,
2158 SECTION_OFFSIZE_ADDR(public_base
,
2161 qed_rd(p_hwfn
, p_ptt
,
2162 SECTION_ADDR(global_offsize
,
2163 0) + offsetof(struct public_global
, mfw_ver
));
2165 if (p_running_bundle_id
) {
2166 *p_running_bundle_id
= qed_rd(p_hwfn
, p_ptt
,
2167 SECTION_ADDR(global_offsize
, 0) +
2168 offsetof(struct public_global
,
2169 running_bundle_id
));
2175 int qed_mcp_get_mbi_ver(struct qed_hwfn
*p_hwfn
,
2176 struct qed_ptt
*p_ptt
, u32
*p_mbi_ver
)
2178 u32 nvm_cfg_addr
, nvm_cfg1_offset
, mbi_ver_addr
;
2180 if (IS_VF(p_hwfn
->cdev
))
2183 /* Read the address of the nvm_cfg */
2184 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
2185 if (!nvm_cfg_addr
) {
2186 DP_NOTICE(p_hwfn
, "Shared memory not initialized\n");
2190 /* Read the offset of nvm_cfg1 */
2191 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
2193 mbi_ver_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
2194 offsetof(struct nvm_cfg1
, glob
) +
2195 offsetof(struct nvm_cfg1_glob
, mbi_version
);
2196 *p_mbi_ver
= qed_rd(p_hwfn
, p_ptt
,
2198 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK
|
2199 NVM_CFG1_GLOB_MBI_VERSION_1_MASK
|
2200 NVM_CFG1_GLOB_MBI_VERSION_2_MASK
);
2205 int qed_mcp_get_media_type(struct qed_hwfn
*p_hwfn
,
2206 struct qed_ptt
*p_ptt
, u32
*p_media_type
)
2208 *p_media_type
= MEDIA_UNSPECIFIED
;
2210 if (IS_VF(p_hwfn
->cdev
))
2213 if (!qed_mcp_is_init(p_hwfn
)) {
2214 DP_NOTICE(p_hwfn
, "MFW is not initialized!\n");
2219 *p_media_type
= MEDIA_UNSPECIFIED
;
2223 *p_media_type
= qed_rd(p_hwfn
, p_ptt
,
2224 p_hwfn
->mcp_info
->port_addr
+
2225 offsetof(struct public_port
,
2231 int qed_mcp_get_transceiver_data(struct qed_hwfn
*p_hwfn
,
2232 struct qed_ptt
*p_ptt
,
2233 u32
*p_transceiver_state
,
2234 u32
*p_transceiver_type
)
2236 u32 transceiver_info
;
2238 *p_transceiver_type
= ETH_TRANSCEIVER_TYPE_NONE
;
2239 *p_transceiver_state
= ETH_TRANSCEIVER_STATE_UPDATING
;
2241 if (IS_VF(p_hwfn
->cdev
))
2244 if (!qed_mcp_is_init(p_hwfn
)) {
2245 DP_NOTICE(p_hwfn
, "MFW is not initialized!\n");
2249 transceiver_info
= qed_rd(p_hwfn
, p_ptt
,
2250 p_hwfn
->mcp_info
->port_addr
+
2251 offsetof(struct public_port
,
2254 *p_transceiver_state
= (transceiver_info
&
2255 ETH_TRANSCEIVER_STATE_MASK
) >>
2256 ETH_TRANSCEIVER_STATE_OFFSET
;
2258 if (*p_transceiver_state
== ETH_TRANSCEIVER_STATE_PRESENT
)
2259 *p_transceiver_type
= (transceiver_info
&
2260 ETH_TRANSCEIVER_TYPE_MASK
) >>
2261 ETH_TRANSCEIVER_TYPE_OFFSET
;
2263 *p_transceiver_type
= ETH_TRANSCEIVER_TYPE_UNKNOWN
;
2268 static bool qed_is_transceiver_ready(u32 transceiver_state
,
2269 u32 transceiver_type
)
2271 if ((transceiver_state
& ETH_TRANSCEIVER_STATE_PRESENT
) &&
2272 ((transceiver_state
& ETH_TRANSCEIVER_STATE_UPDATING
) == 0x0) &&
2273 (transceiver_type
!= ETH_TRANSCEIVER_TYPE_NONE
))
2279 int qed_mcp_trans_speed_mask(struct qed_hwfn
*p_hwfn
,
2280 struct qed_ptt
*p_ptt
, u32
*p_speed_mask
)
2282 u32 transceiver_type
, transceiver_state
;
2285 ret
= qed_mcp_get_transceiver_data(p_hwfn
, p_ptt
, &transceiver_state
,
2290 if (qed_is_transceiver_ready(transceiver_state
, transceiver_type
) ==
2294 switch (transceiver_type
) {
2295 case ETH_TRANSCEIVER_TYPE_1G_LX
:
2296 case ETH_TRANSCEIVER_TYPE_1G_SX
:
2297 case ETH_TRANSCEIVER_TYPE_1G_PCC
:
2298 case ETH_TRANSCEIVER_TYPE_1G_ACC
:
2299 case ETH_TRANSCEIVER_TYPE_1000BASET
:
2300 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2302 case ETH_TRANSCEIVER_TYPE_10G_SR
:
2303 case ETH_TRANSCEIVER_TYPE_10G_LR
:
2304 case ETH_TRANSCEIVER_TYPE_10G_LRM
:
2305 case ETH_TRANSCEIVER_TYPE_10G_ER
:
2306 case ETH_TRANSCEIVER_TYPE_10G_PCC
:
2307 case ETH_TRANSCEIVER_TYPE_10G_ACC
:
2308 case ETH_TRANSCEIVER_TYPE_4x10G
:
2309 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
2311 case ETH_TRANSCEIVER_TYPE_40G_LR4
:
2312 case ETH_TRANSCEIVER_TYPE_40G_SR4
:
2313 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR
:
2314 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR
:
2315 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
|
2316 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
2318 case ETH_TRANSCEIVER_TYPE_100G_AOC
:
2319 case ETH_TRANSCEIVER_TYPE_100G_SR4
:
2320 case ETH_TRANSCEIVER_TYPE_100G_LR4
:
2321 case ETH_TRANSCEIVER_TYPE_100G_ER4
:
2322 case ETH_TRANSCEIVER_TYPE_100G_ACC
:
2324 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
|
2325 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
;
2327 case ETH_TRANSCEIVER_TYPE_25G_SR
:
2328 case ETH_TRANSCEIVER_TYPE_25G_LR
:
2329 case ETH_TRANSCEIVER_TYPE_25G_AOC
:
2330 case ETH_TRANSCEIVER_TYPE_25G_ACC_S
:
2331 case ETH_TRANSCEIVER_TYPE_25G_ACC_M
:
2332 case ETH_TRANSCEIVER_TYPE_25G_ACC_L
:
2333 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
;
2335 case ETH_TRANSCEIVER_TYPE_25G_CA_N
:
2336 case ETH_TRANSCEIVER_TYPE_25G_CA_S
:
2337 case ETH_TRANSCEIVER_TYPE_25G_CA_L
:
2338 case ETH_TRANSCEIVER_TYPE_4x25G_CR
:
2339 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
|
2340 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
|
2341 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2343 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR
:
2344 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR
:
2345 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
|
2346 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
2348 case ETH_TRANSCEIVER_TYPE_40G_CR4
:
2349 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR
:
2350 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
|
2351 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
|
2352 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2354 case ETH_TRANSCEIVER_TYPE_100G_CR4
:
2355 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR
:
2357 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
|
2358 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
|
2359 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
|
2360 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
|
2361 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
|
2362 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
|
2363 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2365 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR
:
2366 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR
:
2367 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_AOC
:
2369 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
|
2370 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
|
2371 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
|
2372 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
2374 case ETH_TRANSCEIVER_TYPE_XLPPI
:
2375 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
;
2377 case ETH_TRANSCEIVER_TYPE_10G_BASET
:
2378 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR
:
2379 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR
:
2380 *p_speed_mask
= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
|
2381 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2384 DP_INFO(p_hwfn
, "Unknown transceiver type 0x%x\n",
2386 *p_speed_mask
= 0xff;
2393 int qed_mcp_get_board_config(struct qed_hwfn
*p_hwfn
,
2394 struct qed_ptt
*p_ptt
, u32
*p_board_config
)
2396 u32 nvm_cfg_addr
, nvm_cfg1_offset
, port_cfg_addr
;
2398 if (IS_VF(p_hwfn
->cdev
))
2401 if (!qed_mcp_is_init(p_hwfn
)) {
2402 DP_NOTICE(p_hwfn
, "MFW is not initialized!\n");
2406 *p_board_config
= NVM_CFG1_PORT_PORT_TYPE_UNDEFINED
;
2410 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
2411 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
2412 port_cfg_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
2413 offsetof(struct nvm_cfg1
, port
[MFW_PORT(p_hwfn
)]);
2414 *p_board_config
= qed_rd(p_hwfn
, p_ptt
,
2416 offsetof(struct nvm_cfg1_port
,
2422 /* Old MFW has a global configuration for all PFs regarding RDMA support */
2424 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn
*p_hwfn
,
2425 enum qed_pci_personality
*p_proto
)
2427 /* There wasn't ever a legacy MFW that published iwarp.
2428 * So at this point, this is either plain l2 or RoCE.
2430 if (test_bit(QED_DEV_CAP_ROCE
, &p_hwfn
->hw_info
.device_capabilities
))
2431 *p_proto
= QED_PCI_ETH_ROCE
;
2433 *p_proto
= QED_PCI_ETH
;
2435 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
2436 "According to Legacy capabilities, L2 personality is %08x\n",
2441 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn
*p_hwfn
,
2442 struct qed_ptt
*p_ptt
,
2443 enum qed_pci_personality
*p_proto
)
2445 u32 resp
= 0, param
= 0;
2448 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
2449 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL
, 0, &resp
, ¶m
);
2452 if (resp
!= FW_MSG_CODE_OK
) {
2453 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
2454 "MFW lacks support for command; Returns %08x\n",
2460 case FW_MB_PARAM_GET_PF_RDMA_NONE
:
2461 *p_proto
= QED_PCI_ETH
;
2463 case FW_MB_PARAM_GET_PF_RDMA_ROCE
:
2464 *p_proto
= QED_PCI_ETH_ROCE
;
2466 case FW_MB_PARAM_GET_PF_RDMA_IWARP
:
2467 *p_proto
= QED_PCI_ETH_IWARP
;
2469 case FW_MB_PARAM_GET_PF_RDMA_BOTH
:
2470 *p_proto
= QED_PCI_ETH_RDMA
;
2474 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
2481 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
2482 (u32
)*p_proto
, resp
, param
);
2487 qed_mcp_get_shmem_proto(struct qed_hwfn
*p_hwfn
,
2488 struct public_func
*p_info
,
2489 struct qed_ptt
*p_ptt
,
2490 enum qed_pci_personality
*p_proto
)
2494 switch (p_info
->config
& FUNC_MF_CFG_PROTOCOL_MASK
) {
2495 case FUNC_MF_CFG_PROTOCOL_ETHERNET
:
2496 if (!IS_ENABLED(CONFIG_QED_RDMA
))
2497 *p_proto
= QED_PCI_ETH
;
2498 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn
, p_ptt
, p_proto
))
2499 qed_mcp_get_shmem_proto_legacy(p_hwfn
, p_proto
);
2501 case FUNC_MF_CFG_PROTOCOL_ISCSI
:
2502 *p_proto
= QED_PCI_ISCSI
;
2504 case FUNC_MF_CFG_PROTOCOL_FCOE
:
2505 *p_proto
= QED_PCI_FCOE
;
2507 case FUNC_MF_CFG_PROTOCOL_ROCE
:
2508 DP_NOTICE(p_hwfn
, "RoCE personality is not a valid value!\n");
2517 int qed_mcp_fill_shmem_func_info(struct qed_hwfn
*p_hwfn
,
2518 struct qed_ptt
*p_ptt
)
2520 struct qed_mcp_function_info
*info
;
2521 struct public_func shmem_info
;
2523 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
2524 info
= &p_hwfn
->mcp_info
->func_info
;
2526 info
->pause_on_host
= (shmem_info
.config
&
2527 FUNC_MF_CFG_PAUSE_ON_HOST_RING
) ? 1 : 0;
2529 if (qed_mcp_get_shmem_proto(p_hwfn
, &shmem_info
, p_ptt
,
2531 DP_ERR(p_hwfn
, "Unknown personality %08x\n",
2532 (u32
)(shmem_info
.config
& FUNC_MF_CFG_PROTOCOL_MASK
));
2536 qed_read_pf_bandwidth(p_hwfn
, &shmem_info
);
2538 if (shmem_info
.mac_upper
|| shmem_info
.mac_lower
) {
2539 info
->mac
[0] = (u8
)(shmem_info
.mac_upper
>> 8);
2540 info
->mac
[1] = (u8
)(shmem_info
.mac_upper
);
2541 info
->mac
[2] = (u8
)(shmem_info
.mac_lower
>> 24);
2542 info
->mac
[3] = (u8
)(shmem_info
.mac_lower
>> 16);
2543 info
->mac
[4] = (u8
)(shmem_info
.mac_lower
>> 8);
2544 info
->mac
[5] = (u8
)(shmem_info
.mac_lower
);
2546 /* Store primary MAC for later possible WoL */
2547 memcpy(&p_hwfn
->cdev
->wol_mac
, info
->mac
, ETH_ALEN
);
2549 DP_NOTICE(p_hwfn
, "MAC is 0 in shmem\n");
2552 info
->wwn_port
= (u64
)shmem_info
.fcoe_wwn_port_name_lower
|
2553 (((u64
)shmem_info
.fcoe_wwn_port_name_upper
) << 32);
2554 info
->wwn_node
= (u64
)shmem_info
.fcoe_wwn_node_name_lower
|
2555 (((u64
)shmem_info
.fcoe_wwn_node_name_upper
) << 32);
2557 info
->ovlan
= (u16
)(shmem_info
.ovlan_stag
& FUNC_MF_CFG_OV_STAG_MASK
);
2559 info
->mtu
= (u16
)shmem_info
.mtu_size
;
2561 p_hwfn
->hw_info
.b_wol_support
= QED_WOL_SUPPORT_NONE
;
2562 p_hwfn
->cdev
->wol_config
= (u8
)QED_OV_WOL_DEFAULT
;
2563 if (qed_mcp_is_init(p_hwfn
)) {
2564 u32 resp
= 0, param
= 0;
2567 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
2568 DRV_MSG_CODE_OS_WOL
, 0, &resp
, ¶m
);
2571 if (resp
== FW_MSG_CODE_OS_WOL_SUPPORTED
)
2572 p_hwfn
->hw_info
.b_wol_support
= QED_WOL_SUPPORT_PME
;
2575 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| NETIF_MSG_IFUP
),
2576 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %pM wwn port %llx node %llx ovlan %04x wol %02x\n",
2577 info
->pause_on_host
, info
->protocol
,
2578 info
->bandwidth_min
, info
->bandwidth_max
,
2580 info
->wwn_port
, info
->wwn_node
,
2581 info
->ovlan
, (u8
)p_hwfn
->hw_info
.b_wol_support
);
2586 struct qed_mcp_link_params
2587 *qed_mcp_get_link_params(struct qed_hwfn
*p_hwfn
)
2589 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
2591 return &p_hwfn
->mcp_info
->link_input
;
2594 struct qed_mcp_link_state
2595 *qed_mcp_get_link_state(struct qed_hwfn
*p_hwfn
)
2597 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
2599 return &p_hwfn
->mcp_info
->link_output
;
2602 struct qed_mcp_link_capabilities
2603 *qed_mcp_get_link_capabilities(struct qed_hwfn
*p_hwfn
)
2605 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
2607 return &p_hwfn
->mcp_info
->link_capabilities
;
2610 int qed_mcp_drain(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2612 u32 resp
= 0, param
= 0;
2615 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
2616 DRV_MSG_CODE_NIG_DRAIN
, 1000, &resp
, ¶m
);
2618 /* Wait for the drain to complete before returning */
2624 int qed_mcp_get_flash_size(struct qed_hwfn
*p_hwfn
,
2625 struct qed_ptt
*p_ptt
, u32
*p_flash_size
)
2629 if (IS_VF(p_hwfn
->cdev
))
2632 flash_size
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_NVM_CFG4
);
2633 flash_size
= (flash_size
& MCP_REG_NVM_CFG4_FLASH_SIZE
) >>
2634 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT
;
2635 flash_size
= (1 << (flash_size
+ MCP_BYTES_PER_MBIT_SHIFT
));
2637 *p_flash_size
= flash_size
;
2642 int qed_start_recovery_process(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2644 struct qed_dev
*cdev
= p_hwfn
->cdev
;
2646 if (cdev
->recov_in_prog
) {
2648 "Avoid triggering a recovery since such a process is already in progress\n");
2652 DP_NOTICE(p_hwfn
, "Triggering a recovery process\n");
2653 qed_wr(p_hwfn
, p_ptt
, MISC_REG_AEU_GENERAL_ATTN_35
, 0x1);
2658 #define QED_RECOVERY_PROLOG_SLEEP_MS 100
2660 int qed_recovery_prolog(struct qed_dev
*cdev
)
2662 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2663 struct qed_ptt
*p_ptt
= p_hwfn
->p_main_ptt
;
2666 /* Allow ongoing PCIe transactions to complete */
2667 msleep(QED_RECOVERY_PROLOG_SLEEP_MS
);
2669 /* Clear the PF's internal FID_enable in the PXP */
2670 rc
= qed_pglueb_set_pfid_enable(p_hwfn
, p_ptt
, false);
2673 "qed_pglueb_set_pfid_enable() failed. rc = %d.\n",
2680 qed_mcp_config_vf_msix_bb(struct qed_hwfn
*p_hwfn
,
2681 struct qed_ptt
*p_ptt
, u8 vf_id
, u8 num
)
2683 u32 resp
= 0, param
= 0, rc_param
= 0;
2686 /* Only Leader can configure MSIX, and need to take CMT into account */
2687 if (!IS_LEAD_HWFN(p_hwfn
))
2689 num
*= p_hwfn
->cdev
->num_hwfns
;
2691 param
|= (vf_id
<< DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT
) &
2692 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK
;
2693 param
|= (num
<< DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT
) &
2694 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK
;
2696 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CFG_VF_MSIX
, param
,
2699 if (resp
!= FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE
) {
2700 DP_NOTICE(p_hwfn
, "VF[%d]: MFW failed to set MSI-X\n", vf_id
);
2703 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2704 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2712 qed_mcp_config_vf_msix_ah(struct qed_hwfn
*p_hwfn
,
2713 struct qed_ptt
*p_ptt
, u8 num
)
2715 u32 resp
= 0, param
= num
, rc_param
= 0;
2718 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CFG_PF_VFS_MSIX
,
2719 param
, &resp
, &rc_param
);
2721 if (resp
!= FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE
) {
2722 DP_NOTICE(p_hwfn
, "MFW failed to set MSI-X for VFs\n");
2725 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2726 "Requested 0x%02x MSI-x interrupts for VFs\n", num
);
2732 int qed_mcp_config_vf_msix(struct qed_hwfn
*p_hwfn
,
2733 struct qed_ptt
*p_ptt
, u8 vf_id
, u8 num
)
2735 if (QED_IS_BB(p_hwfn
->cdev
))
2736 return qed_mcp_config_vf_msix_bb(p_hwfn
, p_ptt
, vf_id
, num
);
2738 return qed_mcp_config_vf_msix_ah(p_hwfn
, p_ptt
, num
);
2742 qed_mcp_send_drv_version(struct qed_hwfn
*p_hwfn
,
2743 struct qed_ptt
*p_ptt
,
2744 struct qed_mcp_drv_version
*p_ver
)
2746 struct qed_mcp_mb_params mb_params
;
2747 struct drv_version_stc drv_version
;
2752 memset(&drv_version
, 0, sizeof(drv_version
));
2753 drv_version
.version
= p_ver
->version
;
2754 for (i
= 0; i
< (MCP_DRV_VER_STR_SIZE
- 4) / sizeof(u32
); i
++) {
2755 val
= cpu_to_be32(*((u32
*)&p_ver
->name
[i
* sizeof(u32
)]));
2756 *(__be32
*)&drv_version
.name
[i
* sizeof(u32
)] = val
;
2759 memset(&mb_params
, 0, sizeof(mb_params
));
2760 mb_params
.cmd
= DRV_MSG_CODE_SET_VERSION
;
2761 mb_params
.p_data_src
= &drv_version
;
2762 mb_params
.data_src_size
= sizeof(drv_version
);
2763 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
2765 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
2770 /* A maximal 100 msec waiting time for the MCP to halt */
2771 #define QED_MCP_HALT_SLEEP_MS 10
2772 #define QED_MCP_HALT_MAX_RETRIES 10
2774 int qed_mcp_halt(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2776 u32 resp
= 0, param
= 0, cpu_state
, cnt
= 0;
2779 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MCP_HALT
, 0, &resp
,
2782 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
2787 msleep(QED_MCP_HALT_SLEEP_MS
);
2788 cpu_state
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
);
2789 if (cpu_state
& MCP_REG_CPU_STATE_SOFT_HALTED
)
2791 } while (++cnt
< QED_MCP_HALT_MAX_RETRIES
);
2793 if (cnt
== QED_MCP_HALT_MAX_RETRIES
) {
2795 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2796 qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
), cpu_state
);
2800 qed_mcp_cmd_set_blocking(p_hwfn
, true);
2805 #define QED_MCP_RESUME_SLEEP_MS 10
2807 int qed_mcp_resume(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2809 u32 cpu_mode
, cpu_state
;
2811 qed_wr(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
, 0xffffffff);
2813 cpu_mode
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
);
2814 cpu_mode
&= ~MCP_REG_CPU_MODE_SOFT_HALT
;
2815 qed_wr(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
, cpu_mode
);
2816 msleep(QED_MCP_RESUME_SLEEP_MS
);
2817 cpu_state
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
);
2819 if (cpu_state
& MCP_REG_CPU_STATE_SOFT_HALTED
) {
2821 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2822 cpu_mode
, cpu_state
);
2826 qed_mcp_cmd_set_blocking(p_hwfn
, false);
2831 int qed_mcp_ov_update_current_config(struct qed_hwfn
*p_hwfn
,
2832 struct qed_ptt
*p_ptt
,
2833 enum qed_ov_client client
)
2835 u32 resp
= 0, param
= 0;
2840 case QED_OV_CLIENT_DRV
:
2841 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_OS
;
2843 case QED_OV_CLIENT_USER
:
2844 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_OTHER
;
2846 case QED_OV_CLIENT_VENDOR_SPEC
:
2847 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC
;
2850 DP_NOTICE(p_hwfn
, "Invalid client type %d\n", client
);
2854 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_CURR_CFG
,
2855 drv_mb_param
, &resp
, ¶m
);
2857 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
2862 int qed_mcp_ov_update_driver_state(struct qed_hwfn
*p_hwfn
,
2863 struct qed_ptt
*p_ptt
,
2864 enum qed_ov_driver_state drv_state
)
2866 u32 resp
= 0, param
= 0;
2870 switch (drv_state
) {
2871 case QED_OV_DRIVER_STATE_NOT_LOADED
:
2872 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED
;
2874 case QED_OV_DRIVER_STATE_DISABLED
:
2875 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED
;
2877 case QED_OV_DRIVER_STATE_ACTIVE
:
2878 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE
;
2881 DP_NOTICE(p_hwfn
, "Invalid driver state %d\n", drv_state
);
2885 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE
,
2886 drv_mb_param
, &resp
, ¶m
);
2888 DP_ERR(p_hwfn
, "Failed to send driver state\n");
2893 int qed_mcp_ov_update_mtu(struct qed_hwfn
*p_hwfn
,
2894 struct qed_ptt
*p_ptt
, u16 mtu
)
2896 u32 resp
= 0, param
= 0;
2900 drv_mb_param
= (u32
)mtu
<< DRV_MB_PARAM_OV_MTU_SIZE_SHIFT
;
2901 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_MTU
,
2902 drv_mb_param
, &resp
, ¶m
);
2904 DP_ERR(p_hwfn
, "Failed to send mtu value, rc = %d\n", rc
);
2909 int qed_mcp_ov_update_mac(struct qed_hwfn
*p_hwfn
,
2910 struct qed_ptt
*p_ptt
, const u8
*mac
)
2912 struct qed_mcp_mb_params mb_params
;
2916 memset(&mb_params
, 0, sizeof(mb_params
));
2917 mb_params
.cmd
= DRV_MSG_CODE_SET_VMAC
;
2918 mb_params
.param
= DRV_MSG_CODE_VMAC_TYPE_MAC
<<
2919 DRV_MSG_CODE_VMAC_TYPE_SHIFT
;
2920 mb_params
.param
|= MCP_PF_ID(p_hwfn
);
2922 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2923 * in 32-bit granularity.
2924 * So the MAC has to be set in native order [and not byte order],
2925 * otherwise it would be read incorrectly by MFW after swap.
2927 mfw_mac
[0] = mac
[0] << 24 | mac
[1] << 16 | mac
[2] << 8 | mac
[3];
2928 mfw_mac
[1] = mac
[4] << 24 | mac
[5] << 16;
2930 mb_params
.p_data_src
= (u8
*)mfw_mac
;
2931 mb_params
.data_src_size
= 8;
2932 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
2934 DP_ERR(p_hwfn
, "Failed to send mac address, rc = %d\n", rc
);
2936 /* Store primary MAC for later possible WoL */
2937 memcpy(p_hwfn
->cdev
->wol_mac
, mac
, ETH_ALEN
);
2942 int qed_mcp_ov_update_wol(struct qed_hwfn
*p_hwfn
,
2943 struct qed_ptt
*p_ptt
, enum qed_ov_wol wol
)
2945 u32 resp
= 0, param
= 0;
2949 if (p_hwfn
->hw_info
.b_wol_support
== QED_WOL_SUPPORT_NONE
) {
2950 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
2951 "Can't change WoL configuration when WoL isn't supported\n");
2956 case QED_OV_WOL_DEFAULT
:
2957 drv_mb_param
= DRV_MB_PARAM_WOL_DEFAULT
;
2959 case QED_OV_WOL_DISABLED
:
2960 drv_mb_param
= DRV_MB_PARAM_WOL_DISABLED
;
2962 case QED_OV_WOL_ENABLED
:
2963 drv_mb_param
= DRV_MB_PARAM_WOL_ENABLED
;
2966 DP_ERR(p_hwfn
, "Invalid wol state %d\n", wol
);
2970 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_WOL
,
2971 drv_mb_param
, &resp
, ¶m
);
2973 DP_ERR(p_hwfn
, "Failed to send wol mode, rc = %d\n", rc
);
2975 /* Store the WoL update for a future unload */
2976 p_hwfn
->cdev
->wol_config
= (u8
)wol
;
2981 int qed_mcp_ov_update_eswitch(struct qed_hwfn
*p_hwfn
,
2982 struct qed_ptt
*p_ptt
,
2983 enum qed_ov_eswitch eswitch
)
2985 u32 resp
= 0, param
= 0;
2990 case QED_OV_ESWITCH_NONE
:
2991 drv_mb_param
= DRV_MB_PARAM_ESWITCH_MODE_NONE
;
2993 case QED_OV_ESWITCH_VEB
:
2994 drv_mb_param
= DRV_MB_PARAM_ESWITCH_MODE_VEB
;
2996 case QED_OV_ESWITCH_VEPA
:
2997 drv_mb_param
= DRV_MB_PARAM_ESWITCH_MODE_VEPA
;
3000 DP_ERR(p_hwfn
, "Invalid eswitch mode %d\n", eswitch
);
3004 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE
,
3005 drv_mb_param
, &resp
, ¶m
);
3007 DP_ERR(p_hwfn
, "Failed to send eswitch mode, rc = %d\n", rc
);
3012 int qed_mcp_set_led(struct qed_hwfn
*p_hwfn
,
3013 struct qed_ptt
*p_ptt
, enum qed_led_mode mode
)
3015 u32 resp
= 0, param
= 0, drv_mb_param
;
3019 case QED_LED_MODE_ON
:
3020 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_ON
;
3022 case QED_LED_MODE_OFF
:
3023 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OFF
;
3025 case QED_LED_MODE_RESTORE
:
3026 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OPER
;
3029 DP_NOTICE(p_hwfn
, "Invalid LED mode %d\n", mode
);
3033 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_SET_LED_MODE
,
3034 drv_mb_param
, &resp
, ¶m
);
3039 int qed_mcp_mask_parities(struct qed_hwfn
*p_hwfn
,
3040 struct qed_ptt
*p_ptt
, u32 mask_parities
)
3042 u32 resp
= 0, param
= 0;
3045 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MASK_PARITIES
,
3046 mask_parities
, &resp
, ¶m
);
3050 "MCP response failure for mask parities, aborting\n");
3051 } else if (resp
!= FW_MSG_CODE_OK
) {
3053 "MCP did not acknowledge mask parity request. Old MFW?\n");
3060 int qed_mcp_nvm_read(struct qed_dev
*cdev
, u32 addr
, u8
*p_buf
, u32 len
)
3062 u32 bytes_left
= len
, offset
= 0, bytes_to_copy
, read_len
= 0;
3063 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
3064 u32 resp
= 0, resp_param
= 0;
3065 struct qed_ptt
*p_ptt
;
3068 p_ptt
= qed_ptt_acquire(p_hwfn
);
3072 while (bytes_left
> 0) {
3073 bytes_to_copy
= min_t(u32
, bytes_left
, MCP_DRV_NVM_BUF_LEN
);
3075 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
3076 DRV_MSG_CODE_NVM_READ_NVRAM
,
3079 DRV_MB_PARAM_NVM_LEN_OFFSET
),
3082 (u32
*)(p_buf
+ offset
), true);
3084 if (rc
|| (resp
!= FW_MSG_CODE_NVM_OK
)) {
3085 DP_NOTICE(cdev
, "MCP command rc = %d\n", rc
);
3090 bytes_left
-= read_len
;
3093 cdev
->mcp_nvm_resp
= resp
;
3094 qed_ptt_release(p_hwfn
, p_ptt
);
3099 int qed_mcp_nvm_resp(struct qed_dev
*cdev
, u8
*p_buf
)
3101 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
3102 struct qed_ptt
*p_ptt
;
3104 p_ptt
= qed_ptt_acquire(p_hwfn
);
3108 memcpy(p_buf
, &cdev
->mcp_nvm_resp
, sizeof(cdev
->mcp_nvm_resp
));
3109 qed_ptt_release(p_hwfn
, p_ptt
);
3114 int qed_mcp_nvm_write(struct qed_dev
*cdev
,
3115 u32 cmd
, u32 addr
, u8
*p_buf
, u32 len
)
3117 u32 buf_idx
= 0, buf_size
, nvm_cmd
, nvm_offset
, resp
= 0, param
;
3118 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
3119 struct qed_ptt
*p_ptt
;
3122 p_ptt
= qed_ptt_acquire(p_hwfn
);
3127 case QED_PUT_FILE_BEGIN
:
3128 nvm_cmd
= DRV_MSG_CODE_NVM_PUT_FILE_BEGIN
;
3130 case QED_PUT_FILE_DATA
:
3131 nvm_cmd
= DRV_MSG_CODE_NVM_PUT_FILE_DATA
;
3133 case QED_NVM_WRITE_NVRAM
:
3134 nvm_cmd
= DRV_MSG_CODE_NVM_WRITE_NVRAM
;
3137 DP_NOTICE(p_hwfn
, "Invalid nvm write command 0x%x\n", cmd
);
3142 buf_size
= min_t(u32
, (len
- buf_idx
), MCP_DRV_NVM_BUF_LEN
);
3143 while (buf_idx
< len
) {
3144 if (cmd
== QED_PUT_FILE_BEGIN
)
3147 nvm_offset
= ((buf_size
<<
3148 DRV_MB_PARAM_NVM_LEN_OFFSET
) | addr
) +
3150 rc
= qed_mcp_nvm_wr_cmd(p_hwfn
, p_ptt
, nvm_cmd
, nvm_offset
,
3151 &resp
, ¶m
, buf_size
,
3152 (u32
*)&p_buf
[buf_idx
]);
3154 DP_NOTICE(cdev
, "nvm write failed, rc = %d\n", rc
);
3155 resp
= FW_MSG_CODE_ERROR
;
3159 if (resp
!= FW_MSG_CODE_OK
&&
3160 resp
!= FW_MSG_CODE_NVM_OK
&&
3161 resp
!= FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK
) {
3163 "nvm write failed, resp = 0x%08x\n", resp
);
3168 /* This can be a lengthy process, and it's possible scheduler
3169 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
3171 if (buf_idx
% 0x1000 > (buf_idx
+ buf_size
) % 0x1000)
3172 usleep_range(1000, 2000);
3174 /* For MBI upgrade, MFW response includes the next buffer offset
3175 * to be delivered to MFW.
3177 if (param
&& cmd
== QED_PUT_FILE_DATA
) {
3179 QED_MFW_GET_FIELD(param
,
3180 FW_MB_PARAM_NVM_PUT_FILE_REQ_OFFSET
);
3182 QED_MFW_GET_FIELD(param
,
3183 FW_MB_PARAM_NVM_PUT_FILE_REQ_SIZE
);
3185 buf_idx
+= buf_size
;
3186 buf_size
= min_t(u32
, (len
- buf_idx
),
3187 MCP_DRV_NVM_BUF_LEN
);
3191 cdev
->mcp_nvm_resp
= resp
;
3193 qed_ptt_release(p_hwfn
, p_ptt
);
3198 int qed_mcp_phy_sfp_read(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
3199 u32 port
, u32 addr
, u32 offset
, u32 len
, u8
*p_buf
)
3201 u32 bytes_left
, bytes_to_copy
, buf_size
, nvm_offset
= 0;
3205 nvm_offset
|= (port
<< DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET
) &
3206 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK
;
3207 nvm_offset
|= (addr
<< DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET
) &
3208 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK
;
3213 while (bytes_left
> 0) {
3214 bytes_to_copy
= min_t(u32
, bytes_left
,
3215 MAX_I2C_TRANSACTION_SIZE
);
3216 nvm_offset
&= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK
|
3217 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK
);
3218 nvm_offset
|= ((addr
+ offset
) <<
3219 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET
) &
3220 DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK
;
3221 nvm_offset
|= (bytes_to_copy
<<
3222 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET
) &
3223 DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK
;
3224 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
3225 DRV_MSG_CODE_TRANSCEIVER_READ
,
3226 nvm_offset
, &resp
, ¶m
, &buf_size
,
3227 (u32
*)(p_buf
+ offset
), true);
3230 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
3235 if (resp
== FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT
)
3237 else if (resp
!= FW_MSG_CODE_TRANSCEIVER_DIAG_OK
)
3241 bytes_left
-= buf_size
;
3247 int qed_mcp_bist_register_test(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3249 u32 drv_mb_param
= 0, rsp
, param
;
3252 drv_mb_param
= (DRV_MB_PARAM_BIST_REGISTER_TEST
<<
3253 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
3255 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
3256 drv_mb_param
, &rsp
, ¶m
);
3261 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
3262 (param
!= DRV_MB_PARAM_BIST_RC_PASSED
))
3268 int qed_mcp_bist_clock_test(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3270 u32 drv_mb_param
, rsp
, param
;
3273 drv_mb_param
= (DRV_MB_PARAM_BIST_CLOCK_TEST
<<
3274 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
3276 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
3277 drv_mb_param
, &rsp
, ¶m
);
3282 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
3283 (param
!= DRV_MB_PARAM_BIST_RC_PASSED
))
3289 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn
*p_hwfn
,
3290 struct qed_ptt
*p_ptt
,
3293 u32 drv_mb_param
= 0, rsp
;
3296 drv_mb_param
= (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES
<<
3297 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
3299 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
3300 drv_mb_param
, &rsp
, num_images
);
3304 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
))
3310 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn
*p_hwfn
,
3311 struct qed_ptt
*p_ptt
,
3312 struct bist_nvm_image_att
*p_image_att
,
3315 u32 buf_size
= 0, param
, resp
= 0, resp_param
= 0;
3318 param
= DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX
<<
3319 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
;
3320 param
|= image_index
<< DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT
;
3322 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
3323 DRV_MSG_CODE_BIST_TEST
, param
,
3326 (u32
*)p_image_att
, false);
3330 if (((resp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
3331 (p_image_att
->return_code
!= 1))
3337 int qed_mcp_nvm_info_populate(struct qed_hwfn
*p_hwfn
)
3339 struct qed_nvm_image_info nvm_info
;
3340 struct qed_ptt
*p_ptt
;
3344 if (p_hwfn
->nvm_info
.valid
)
3347 p_ptt
= qed_ptt_acquire(p_hwfn
);
3349 DP_ERR(p_hwfn
, "failed to acquire ptt\n");
3353 /* Acquire from MFW the amount of available images */
3354 nvm_info
.num_images
= 0;
3355 rc
= qed_mcp_bist_nvm_get_num_images(p_hwfn
,
3356 p_ptt
, &nvm_info
.num_images
);
3357 if (rc
== -EOPNOTSUPP
) {
3358 DP_INFO(p_hwfn
, "DRV_MSG_CODE_BIST_TEST is not supported\n");
3360 } else if (rc
|| !nvm_info
.num_images
) {
3361 DP_ERR(p_hwfn
, "Failed getting number of images\n");
3365 nvm_info
.image_att
= kmalloc_array(nvm_info
.num_images
,
3366 sizeof(struct bist_nvm_image_att
),
3368 if (!nvm_info
.image_att
) {
3373 /* Iterate over images and get their attributes */
3374 for (i
= 0; i
< nvm_info
.num_images
; i
++) {
3375 rc
= qed_mcp_bist_nvm_get_image_att(p_hwfn
, p_ptt
,
3376 &nvm_info
.image_att
[i
], i
);
3379 "Failed getting image index %d attributes\n", i
);
3383 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "image index %d, size %x\n", i
,
3384 nvm_info
.image_att
[i
].len
);
3387 /* Update hwfn's nvm_info */
3388 if (nvm_info
.num_images
) {
3389 p_hwfn
->nvm_info
.num_images
= nvm_info
.num_images
;
3390 kfree(p_hwfn
->nvm_info
.image_att
);
3391 p_hwfn
->nvm_info
.image_att
= nvm_info
.image_att
;
3392 p_hwfn
->nvm_info
.valid
= true;
3395 qed_ptt_release(p_hwfn
, p_ptt
);
3399 kfree(nvm_info
.image_att
);
3401 qed_ptt_release(p_hwfn
, p_ptt
);
3405 void qed_mcp_nvm_info_free(struct qed_hwfn
*p_hwfn
)
3407 kfree(p_hwfn
->nvm_info
.image_att
);
3408 p_hwfn
->nvm_info
.image_att
= NULL
;
3409 p_hwfn
->nvm_info
.valid
= false;
3413 qed_mcp_get_nvm_image_att(struct qed_hwfn
*p_hwfn
,
3414 enum qed_nvm_images image_id
,
3415 struct qed_nvm_image_att
*p_image_att
)
3417 enum nvm_image_type type
;
3421 /* Translate image_id into MFW definitions */
3423 case QED_NVM_IMAGE_ISCSI_CFG
:
3424 type
= NVM_TYPE_ISCSI_CFG
;
3426 case QED_NVM_IMAGE_FCOE_CFG
:
3427 type
= NVM_TYPE_FCOE_CFG
;
3429 case QED_NVM_IMAGE_MDUMP
:
3430 type
= NVM_TYPE_MDUMP
;
3432 case QED_NVM_IMAGE_NVM_CFG1
:
3433 type
= NVM_TYPE_NVM_CFG1
;
3435 case QED_NVM_IMAGE_DEFAULT_CFG
:
3436 type
= NVM_TYPE_DEFAULT_CFG
;
3438 case QED_NVM_IMAGE_NVM_META
:
3439 type
= NVM_TYPE_NVM_META
;
3442 DP_NOTICE(p_hwfn
, "Unknown request of image_id %08x\n",
3447 rc
= qed_mcp_nvm_info_populate(p_hwfn
);
3451 for (i
= 0; i
< p_hwfn
->nvm_info
.num_images
; i
++)
3452 if (type
== p_hwfn
->nvm_info
.image_att
[i
].image_type
)
3454 if (i
== p_hwfn
->nvm_info
.num_images
) {
3455 DP_VERBOSE(p_hwfn
, QED_MSG_STORAGE
,
3456 "Failed to find nvram image of type %08x\n",
3461 p_image_att
->start_addr
= p_hwfn
->nvm_info
.image_att
[i
].nvm_start_addr
;
3462 p_image_att
->length
= p_hwfn
->nvm_info
.image_att
[i
].len
;
3467 int qed_mcp_get_nvm_image(struct qed_hwfn
*p_hwfn
,
3468 enum qed_nvm_images image_id
,
3469 u8
*p_buffer
, u32 buffer_len
)
3471 struct qed_nvm_image_att image_att
;
3474 memset(p_buffer
, 0, buffer_len
);
3476 rc
= qed_mcp_get_nvm_image_att(p_hwfn
, image_id
, &image_att
);
3480 /* Validate sizes - both the image's and the supplied buffer's */
3481 if (image_att
.length
<= 4) {
3482 DP_VERBOSE(p_hwfn
, QED_MSG_STORAGE
,
3483 "Image [%d] is too small - only %d bytes\n",
3484 image_id
, image_att
.length
);
3488 if (image_att
.length
> buffer_len
) {
3491 "Image [%d] is too big - %08x bytes where only %08x are available\n",
3492 image_id
, image_att
.length
, buffer_len
);
3496 return qed_mcp_nvm_read(p_hwfn
->cdev
, image_att
.start_addr
,
3497 p_buffer
, image_att
.length
);
3500 static enum resource_id_enum
qed_mcp_get_mfw_res_id(enum qed_resources res_id
)
3502 enum resource_id_enum mfw_res_id
= RESOURCE_NUM_INVALID
;
3506 mfw_res_id
= RESOURCE_NUM_SB_E
;
3509 mfw_res_id
= RESOURCE_NUM_L2_QUEUE_E
;
3512 mfw_res_id
= RESOURCE_NUM_VPORT_E
;
3515 mfw_res_id
= RESOURCE_NUM_RSS_ENGINES_E
;
3518 mfw_res_id
= RESOURCE_NUM_PQ_E
;
3521 mfw_res_id
= RESOURCE_NUM_RL_E
;
3525 /* Each VFC resource can accommodate both a MAC and a VLAN */
3526 mfw_res_id
= RESOURCE_VFC_FILTER_E
;
3529 mfw_res_id
= RESOURCE_ILT_E
;
3531 case QED_LL2_RAM_QUEUE
:
3532 mfw_res_id
= RESOURCE_LL2_QUEUE_E
;
3534 case QED_LL2_CTX_QUEUE
:
3535 mfw_res_id
= RESOURCE_LL2_CQS_E
;
3537 case QED_RDMA_CNQ_RAM
:
3539 /* CNQ/CMDQS are the same resource */
3540 mfw_res_id
= RESOURCE_CQS_E
;
3542 case QED_RDMA_STATS_QUEUE
:
3543 mfw_res_id
= RESOURCE_RDMA_STATS_QUEUE_E
;
3546 mfw_res_id
= RESOURCE_BDQ_E
;
3555 #define QED_RESC_ALLOC_VERSION_MAJOR 2
3556 #define QED_RESC_ALLOC_VERSION_MINOR 0
3557 #define QED_RESC_ALLOC_VERSION \
3558 ((QED_RESC_ALLOC_VERSION_MAJOR << \
3559 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
3560 (QED_RESC_ALLOC_VERSION_MINOR << \
3561 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
3563 struct qed_resc_alloc_in_params
{
3565 enum qed_resources res_id
;
3569 struct qed_resc_alloc_out_params
{
3580 qed_mcp_resc_allocation_msg(struct qed_hwfn
*p_hwfn
,
3581 struct qed_ptt
*p_ptt
,
3582 struct qed_resc_alloc_in_params
*p_in_params
,
3583 struct qed_resc_alloc_out_params
*p_out_params
)
3585 struct qed_mcp_mb_params mb_params
;
3586 struct resource_info mfw_resc_info
;
3589 memset(&mfw_resc_info
, 0, sizeof(mfw_resc_info
));
3591 mfw_resc_info
.res_id
= qed_mcp_get_mfw_res_id(p_in_params
->res_id
);
3592 if (mfw_resc_info
.res_id
== RESOURCE_NUM_INVALID
) {
3594 "Failed to match resource %d [%s] with the MFW resources\n",
3595 p_in_params
->res_id
,
3596 qed_hw_get_resc_name(p_in_params
->res_id
));
3600 switch (p_in_params
->cmd
) {
3601 case DRV_MSG_SET_RESOURCE_VALUE_MSG
:
3602 mfw_resc_info
.size
= p_in_params
->resc_max_val
;
3604 case DRV_MSG_GET_RESOURCE_ALLOC_MSG
:
3607 DP_ERR(p_hwfn
, "Unexpected resource alloc command [0x%08x]\n",
3612 memset(&mb_params
, 0, sizeof(mb_params
));
3613 mb_params
.cmd
= p_in_params
->cmd
;
3614 mb_params
.param
= QED_RESC_ALLOC_VERSION
;
3615 mb_params
.p_data_src
= &mfw_resc_info
;
3616 mb_params
.data_src_size
= sizeof(mfw_resc_info
);
3617 mb_params
.p_data_dst
= mb_params
.p_data_src
;
3618 mb_params
.data_dst_size
= mb_params
.data_src_size
;
3622 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3624 p_in_params
->res_id
,
3625 qed_hw_get_resc_name(p_in_params
->res_id
),
3626 QED_MFW_GET_FIELD(mb_params
.param
,
3627 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR
),
3628 QED_MFW_GET_FIELD(mb_params
.param
,
3629 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR
),
3630 p_in_params
->resc_max_val
);
3632 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
3636 p_out_params
->mcp_resp
= mb_params
.mcp_resp
;
3637 p_out_params
->mcp_param
= mb_params
.mcp_param
;
3638 p_out_params
->resc_num
= mfw_resc_info
.size
;
3639 p_out_params
->resc_start
= mfw_resc_info
.offset
;
3640 p_out_params
->vf_resc_num
= mfw_resc_info
.vf_size
;
3641 p_out_params
->vf_resc_start
= mfw_resc_info
.vf_offset
;
3642 p_out_params
->flags
= mfw_resc_info
.flags
;
3646 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3647 QED_MFW_GET_FIELD(p_out_params
->mcp_param
,
3648 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR
),
3649 QED_MFW_GET_FIELD(p_out_params
->mcp_param
,
3650 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR
),
3651 p_out_params
->resc_num
,
3652 p_out_params
->resc_start
,
3653 p_out_params
->vf_resc_num
,
3654 p_out_params
->vf_resc_start
, p_out_params
->flags
);
3660 qed_mcp_set_resc_max_val(struct qed_hwfn
*p_hwfn
,
3661 struct qed_ptt
*p_ptt
,
3662 enum qed_resources res_id
,
3663 u32 resc_max_val
, u32
*p_mcp_resp
)
3665 struct qed_resc_alloc_out_params out_params
;
3666 struct qed_resc_alloc_in_params in_params
;
3669 memset(&in_params
, 0, sizeof(in_params
));
3670 in_params
.cmd
= DRV_MSG_SET_RESOURCE_VALUE_MSG
;
3671 in_params
.res_id
= res_id
;
3672 in_params
.resc_max_val
= resc_max_val
;
3673 memset(&out_params
, 0, sizeof(out_params
));
3674 rc
= qed_mcp_resc_allocation_msg(p_hwfn
, p_ptt
, &in_params
,
3679 *p_mcp_resp
= out_params
.mcp_resp
;
3685 qed_mcp_get_resc_info(struct qed_hwfn
*p_hwfn
,
3686 struct qed_ptt
*p_ptt
,
3687 enum qed_resources res_id
,
3688 u32
*p_mcp_resp
, u32
*p_resc_num
, u32
*p_resc_start
)
3690 struct qed_resc_alloc_out_params out_params
;
3691 struct qed_resc_alloc_in_params in_params
;
3694 memset(&in_params
, 0, sizeof(in_params
));
3695 in_params
.cmd
= DRV_MSG_GET_RESOURCE_ALLOC_MSG
;
3696 in_params
.res_id
= res_id
;
3697 memset(&out_params
, 0, sizeof(out_params
));
3698 rc
= qed_mcp_resc_allocation_msg(p_hwfn
, p_ptt
, &in_params
,
3703 *p_mcp_resp
= out_params
.mcp_resp
;
3705 if (*p_mcp_resp
== FW_MSG_CODE_RESOURCE_ALLOC_OK
) {
3706 *p_resc_num
= out_params
.resc_num
;
3707 *p_resc_start
= out_params
.resc_start
;
3713 int qed_mcp_initiate_pf_flr(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3715 u32 mcp_resp
, mcp_param
;
3717 return qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_INITIATE_PF_FLR
, 0,
3718 &mcp_resp
, &mcp_param
);
3721 static int qed_mcp_resource_cmd(struct qed_hwfn
*p_hwfn
,
3722 struct qed_ptt
*p_ptt
,
3723 u32 param
, u32
*p_mcp_resp
, u32
*p_mcp_param
)
3727 rc
= qed_mcp_cmd_nosleep(p_hwfn
, p_ptt
, DRV_MSG_CODE_RESOURCE_CMD
,
3728 param
, p_mcp_resp
, p_mcp_param
);
3732 if (*p_mcp_resp
== FW_MSG_CODE_UNSUPPORTED
) {
3734 "The resource command is unsupported by the MFW\n");
3738 if (*p_mcp_param
== RESOURCE_OPCODE_UNKNOWN_CMD
) {
3739 u8 opcode
= QED_MFW_GET_FIELD(param
, RESOURCE_CMD_REQ_OPCODE
);
3742 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3751 __qed_mcp_resc_lock(struct qed_hwfn
*p_hwfn
,
3752 struct qed_ptt
*p_ptt
,
3753 struct qed_resc_lock_params
*p_params
)
3755 u32 param
= 0, mcp_resp
, mcp_param
;
3759 switch (p_params
->timeout
) {
3760 case QED_MCP_RESC_LOCK_TO_DEFAULT
:
3761 opcode
= RESOURCE_OPCODE_REQ
;
3762 p_params
->timeout
= 0;
3764 case QED_MCP_RESC_LOCK_TO_NONE
:
3765 opcode
= RESOURCE_OPCODE_REQ_WO_AGING
;
3766 p_params
->timeout
= 0;
3769 opcode
= RESOURCE_OPCODE_REQ_W_AGING
;
3773 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_RESC
, p_params
->resource
);
3774 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_OPCODE
, opcode
);
3775 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_AGE
, p_params
->timeout
);
3779 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3780 param
, p_params
->timeout
, opcode
, p_params
->resource
);
3782 /* Attempt to acquire the resource */
3783 rc
= qed_mcp_resource_cmd(p_hwfn
, p_ptt
, param
, &mcp_resp
, &mcp_param
);
3787 /* Analyze the response */
3788 p_params
->owner
= QED_MFW_GET_FIELD(mcp_param
, RESOURCE_CMD_RSP_OWNER
);
3789 opcode
= QED_MFW_GET_FIELD(mcp_param
, RESOURCE_CMD_RSP_OPCODE
);
3793 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3794 mcp_param
, opcode
, p_params
->owner
);
3797 case RESOURCE_OPCODE_GNT
:
3798 p_params
->b_granted
= true;
3800 case RESOURCE_OPCODE_BUSY
:
3801 p_params
->b_granted
= false;
3805 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3814 qed_mcp_resc_lock(struct qed_hwfn
*p_hwfn
,
3815 struct qed_ptt
*p_ptt
, struct qed_resc_lock_params
*p_params
)
3821 /* No need for an interval before the first iteration */
3823 if (p_params
->sleep_b4_retry
) {
3824 u16 retry_interval_in_ms
=
3825 DIV_ROUND_UP(p_params
->retry_interval
,
3828 msleep(retry_interval_in_ms
);
3830 udelay(p_params
->retry_interval
);
3834 rc
= __qed_mcp_resc_lock(p_hwfn
, p_ptt
, p_params
);
3838 if (p_params
->b_granted
)
3840 } while (retry_cnt
++ < p_params
->retry_num
);
3846 qed_mcp_resc_unlock(struct qed_hwfn
*p_hwfn
,
3847 struct qed_ptt
*p_ptt
,
3848 struct qed_resc_unlock_params
*p_params
)
3850 u32 param
= 0, mcp_resp
, mcp_param
;
3854 opcode
= p_params
->b_force
? RESOURCE_OPCODE_FORCE_RELEASE
3855 : RESOURCE_OPCODE_RELEASE
;
3856 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_RESC
, p_params
->resource
);
3857 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_OPCODE
, opcode
);
3859 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
3860 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3861 param
, opcode
, p_params
->resource
);
3863 /* Attempt to release the resource */
3864 rc
= qed_mcp_resource_cmd(p_hwfn
, p_ptt
, param
, &mcp_resp
, &mcp_param
);
3868 /* Analyze the response */
3869 opcode
= QED_MFW_GET_FIELD(mcp_param
, RESOURCE_CMD_RSP_OPCODE
);
3871 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
3872 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3876 case RESOURCE_OPCODE_RELEASED_PREVIOUS
:
3878 "Resource unlock request for an already released resource [%d]\n",
3879 p_params
->resource
);
3881 case RESOURCE_OPCODE_RELEASED
:
3882 p_params
->b_released
= true;
3884 case RESOURCE_OPCODE_WRONG_OWNER
:
3885 p_params
->b_released
= false;
3889 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3897 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params
*p_lock
,
3898 struct qed_resc_unlock_params
*p_unlock
,
3900 resource
, bool b_is_permanent
)
3903 memset(p_lock
, 0, sizeof(*p_lock
));
3905 /* Permanent resources don't require aging, and there's no
3906 * point in trying to acquire them more than once since it's
3907 * unexpected another entity would release them.
3909 if (b_is_permanent
) {
3910 p_lock
->timeout
= QED_MCP_RESC_LOCK_TO_NONE
;
3912 p_lock
->retry_num
= QED_MCP_RESC_LOCK_RETRY_CNT_DFLT
;
3913 p_lock
->retry_interval
=
3914 QED_MCP_RESC_LOCK_RETRY_VAL_DFLT
;
3915 p_lock
->sleep_b4_retry
= true;
3918 p_lock
->resource
= resource
;
3922 memset(p_unlock
, 0, sizeof(*p_unlock
));
3923 p_unlock
->resource
= resource
;
3927 bool qed_mcp_is_smart_an_supported(struct qed_hwfn
*p_hwfn
)
3929 return !!(p_hwfn
->mcp_info
->capabilities
&
3930 FW_MB_PARAM_FEATURE_SUPPORT_SMARTLINQ
);
3933 int qed_mcp_get_capabilities(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3938 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT
,
3939 0, &mcp_resp
, &p_hwfn
->mcp_info
->capabilities
);
3941 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| NETIF_MSG_PROBE
),
3942 "MFW supported features: %08x\n",
3943 p_hwfn
->mcp_info
->capabilities
);
3948 int qed_mcp_set_capabilities(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3950 u32 mcp_resp
, mcp_param
, features
;
3952 features
= DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE
|
3953 DRV_MB_PARAM_FEATURE_SUPPORT_FUNC_VLINK
|
3954 DRV_MB_PARAM_FEATURE_SUPPORT_PORT_FEC_CONTROL
;
3956 return qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_FEATURE_SUPPORT
,
3957 features
, &mcp_resp
, &mcp_param
);
3960 int qed_mcp_get_engine_config(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3962 struct qed_mcp_mb_params mb_params
= {0};
3963 struct qed_dev
*cdev
= p_hwfn
->cdev
;
3964 u8 fir_valid
, l2_valid
;
3967 mb_params
.cmd
= DRV_MSG_CODE_GET_ENGINE_CONFIG
;
3968 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
3972 if (mb_params
.mcp_resp
== FW_MSG_CODE_UNSUPPORTED
) {
3974 "The get_engine_config command is unsupported by the MFW\n");
3978 fir_valid
= QED_MFW_GET_FIELD(mb_params
.mcp_param
,
3979 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALID
);
3982 QED_MFW_GET_FIELD(mb_params
.mcp_param
,
3983 FW_MB_PARAM_ENG_CFG_FIR_AFFIN_VALUE
);
3985 l2_valid
= QED_MFW_GET_FIELD(mb_params
.mcp_param
,
3986 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALID
);
3988 cdev
->l2_affin_hint
=
3989 QED_MFW_GET_FIELD(mb_params
.mcp_param
,
3990 FW_MB_PARAM_ENG_CFG_L2_AFFIN_VALUE
);
3993 "Engine affinity config: FIR={valid %hhd, value %hhd}, L2_hint={valid %hhd, value %hhd}\n",
3994 fir_valid
, cdev
->fir_affin
, l2_valid
, cdev
->l2_affin_hint
);
3999 int qed_mcp_get_ppfid_bitmap(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
4001 struct qed_mcp_mb_params mb_params
= {0};
4002 struct qed_dev
*cdev
= p_hwfn
->cdev
;
4005 mb_params
.cmd
= DRV_MSG_CODE_GET_PPFID_BITMAP
;
4006 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
4010 if (mb_params
.mcp_resp
== FW_MSG_CODE_UNSUPPORTED
) {
4012 "The get_ppfid_bitmap command is unsupported by the MFW\n");
4016 cdev
->ppfid_bitmap
= QED_MFW_GET_FIELD(mb_params
.mcp_param
,
4017 FW_MB_PARAM_PPFID_BITMAP
);
4019 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "PPFID bitmap 0x%hhx\n",
4020 cdev
->ppfid_bitmap
);
4025 int qed_mcp_nvm_get_cfg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
4026 u16 option_id
, u8 entity_id
, u16 flags
, u8
*p_buf
,
4029 u32 mb_param
= 0, resp
, param
;
4032 QED_MFW_SET_FIELD(mb_param
, DRV_MB_PARAM_NVM_CFG_OPTION_ID
, option_id
);
4033 if (flags
& QED_NVM_CFG_OPTION_INIT
)
4034 QED_MFW_SET_FIELD(mb_param
,
4035 DRV_MB_PARAM_NVM_CFG_OPTION_INIT
, 1);
4036 if (flags
& QED_NVM_CFG_OPTION_FREE
)
4037 QED_MFW_SET_FIELD(mb_param
,
4038 DRV_MB_PARAM_NVM_CFG_OPTION_FREE
, 1);
4039 if (flags
& QED_NVM_CFG_OPTION_ENTITY_SEL
) {
4040 QED_MFW_SET_FIELD(mb_param
,
4041 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL
, 1);
4042 QED_MFW_SET_FIELD(mb_param
,
4043 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID
,
4047 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
4048 DRV_MSG_CODE_GET_NVM_CFG_OPTION
,
4049 mb_param
, &resp
, ¶m
, p_len
,
4050 (u32
*)p_buf
, false);
4055 int qed_mcp_nvm_set_cfg(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
4056 u16 option_id
, u8 entity_id
, u16 flags
, u8
*p_buf
,
4059 u32 mb_param
= 0, resp
, param
;
4061 QED_MFW_SET_FIELD(mb_param
, DRV_MB_PARAM_NVM_CFG_OPTION_ID
, option_id
);
4062 if (flags
& QED_NVM_CFG_OPTION_ALL
)
4063 QED_MFW_SET_FIELD(mb_param
,
4064 DRV_MB_PARAM_NVM_CFG_OPTION_ALL
, 1);
4065 if (flags
& QED_NVM_CFG_OPTION_INIT
)
4066 QED_MFW_SET_FIELD(mb_param
,
4067 DRV_MB_PARAM_NVM_CFG_OPTION_INIT
, 1);
4068 if (flags
& QED_NVM_CFG_OPTION_COMMIT
)
4069 QED_MFW_SET_FIELD(mb_param
,
4070 DRV_MB_PARAM_NVM_CFG_OPTION_COMMIT
, 1);
4071 if (flags
& QED_NVM_CFG_OPTION_FREE
)
4072 QED_MFW_SET_FIELD(mb_param
,
4073 DRV_MB_PARAM_NVM_CFG_OPTION_FREE
, 1);
4074 if (flags
& QED_NVM_CFG_OPTION_ENTITY_SEL
) {
4075 QED_MFW_SET_FIELD(mb_param
,
4076 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_SEL
, 1);
4077 QED_MFW_SET_FIELD(mb_param
,
4078 DRV_MB_PARAM_NVM_CFG_OPTION_ENTITY_ID
,
4082 return qed_mcp_nvm_wr_cmd(p_hwfn
, p_ptt
,
4083 DRV_MSG_CODE_SET_NVM_CFG_OPTION
,
4084 mb_param
, &resp
, ¶m
, len
, (u32
*)p_buf
);
4087 #define QED_MCP_DBG_DATA_MAX_SIZE MCP_DRV_NVM_BUF_LEN
4088 #define QED_MCP_DBG_DATA_MAX_HEADER_SIZE sizeof(u32)
4089 #define QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE \
4090 (QED_MCP_DBG_DATA_MAX_SIZE - QED_MCP_DBG_DATA_MAX_HEADER_SIZE)
4093 __qed_mcp_send_debug_data(struct qed_hwfn
*p_hwfn
,
4094 struct qed_ptt
*p_ptt
, u8
*p_buf
, u8 size
)
4096 struct qed_mcp_mb_params mb_params
;
4099 if (size
> QED_MCP_DBG_DATA_MAX_SIZE
) {
4101 "Debug data size is %d while it should not exceed %d\n",
4102 size
, QED_MCP_DBG_DATA_MAX_SIZE
);
4106 memset(&mb_params
, 0, sizeof(mb_params
));
4107 mb_params
.cmd
= DRV_MSG_CODE_DEBUG_DATA_SEND
;
4108 SET_MFW_FIELD(mb_params
.param
, DRV_MSG_CODE_DEBUG_DATA_SEND_SIZE
, size
);
4109 mb_params
.p_data_src
= p_buf
;
4110 mb_params
.data_src_size
= size
;
4111 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
4115 if (mb_params
.mcp_resp
== FW_MSG_CODE_UNSUPPORTED
) {
4117 "The DEBUG_DATA_SEND command is unsupported by the MFW\n");
4119 } else if (mb_params
.mcp_resp
== (u32
)FW_MSG_CODE_DEBUG_NOT_ENABLED
) {
4120 DP_INFO(p_hwfn
, "The DEBUG_DATA_SEND command is not enabled\n");
4122 } else if (mb_params
.mcp_resp
!= (u32
)FW_MSG_CODE_DEBUG_DATA_SEND_OK
) {
4124 "Failed to send debug data to the MFW [resp 0x%08x]\n",
4125 mb_params
.mcp_resp
);
4132 enum qed_mcp_dbg_data_type
{
4133 QED_MCP_DBG_DATA_TYPE_RAW
,
4136 /* Header format: [31:28] PFID, [27:20] flags, [19:12] type, [11:0] S/N */
4137 #define QED_MCP_DBG_DATA_HDR_SN_OFFSET 0
4138 #define QED_MCP_DBG_DATA_HDR_SN_MASK 0x00000fff
4139 #define QED_MCP_DBG_DATA_HDR_TYPE_OFFSET 12
4140 #define QED_MCP_DBG_DATA_HDR_TYPE_MASK 0x000ff000
4141 #define QED_MCP_DBG_DATA_HDR_FLAGS_OFFSET 20
4142 #define QED_MCP_DBG_DATA_HDR_FLAGS_MASK 0x0ff00000
4143 #define QED_MCP_DBG_DATA_HDR_PF_OFFSET 28
4144 #define QED_MCP_DBG_DATA_HDR_PF_MASK 0xf0000000
4146 #define QED_MCP_DBG_DATA_HDR_FLAGS_FIRST 0x1
4147 #define QED_MCP_DBG_DATA_HDR_FLAGS_LAST 0x2
4150 qed_mcp_send_debug_data(struct qed_hwfn
*p_hwfn
,
4151 struct qed_ptt
*p_ptt
,
4152 enum qed_mcp_dbg_data_type type
, u8
*p_buf
, u32 size
)
4154 u8 raw_data
[QED_MCP_DBG_DATA_MAX_SIZE
], *p_tmp_buf
= p_buf
;
4155 u32 tmp_size
= size
, *p_header
, *p_payload
;
4160 p_header
= (u32
*)raw_data
;
4161 p_payload
= (u32
*)(raw_data
+ QED_MCP_DBG_DATA_MAX_HEADER_SIZE
);
4163 seq
= (u16
)atomic_inc_return(&p_hwfn
->mcp_info
->dbg_data_seq
);
4165 /* First chunk is marked as 'first' */
4166 flags
|= QED_MCP_DBG_DATA_HDR_FLAGS_FIRST
;
4169 SET_MFW_FIELD(*p_header
, QED_MCP_DBG_DATA_HDR_SN
, seq
);
4170 SET_MFW_FIELD(*p_header
, QED_MCP_DBG_DATA_HDR_TYPE
, type
);
4171 SET_MFW_FIELD(*p_header
, QED_MCP_DBG_DATA_HDR_FLAGS
, flags
);
4172 SET_MFW_FIELD(*p_header
, QED_MCP_DBG_DATA_HDR_PF
, p_hwfn
->abs_pf_id
);
4174 while (tmp_size
> QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE
) {
4175 memcpy(p_payload
, p_tmp_buf
, QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE
);
4176 rc
= __qed_mcp_send_debug_data(p_hwfn
, p_ptt
, raw_data
,
4177 QED_MCP_DBG_DATA_MAX_SIZE
);
4181 /* Clear the 'first' marking after sending the first chunk */
4182 if (p_tmp_buf
== p_buf
) {
4183 flags
&= ~QED_MCP_DBG_DATA_HDR_FLAGS_FIRST
;
4184 SET_MFW_FIELD(*p_header
, QED_MCP_DBG_DATA_HDR_FLAGS
,
4188 p_tmp_buf
+= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE
;
4189 tmp_size
-= QED_MCP_DBG_DATA_MAX_PAYLOAD_SIZE
;
4192 /* Last chunk is marked as 'last' */
4193 flags
|= QED_MCP_DBG_DATA_HDR_FLAGS_LAST
;
4194 SET_MFW_FIELD(*p_header
, QED_MCP_DBG_DATA_HDR_FLAGS
, flags
);
4195 memcpy(p_payload
, p_tmp_buf
, tmp_size
);
4197 /* Casting the left size to u8 is ok since at this point it is <= 32 */
4198 return __qed_mcp_send_debug_data(p_hwfn
, p_ptt
, raw_data
,
4199 (u8
)(QED_MCP_DBG_DATA_MAX_HEADER_SIZE
+
4204 qed_mcp_send_raw_debug_data(struct qed_hwfn
*p_hwfn
,
4205 struct qed_ptt
*p_ptt
, u8
*p_buf
, u32 size
)
4207 return qed_mcp_send_debug_data(p_hwfn
, p_ptt
,
4208 QED_MCP_DBG_DATA_TYPE_RAW
, p_buf
, size
);
4211 bool qed_mcp_is_esl_supported(struct qed_hwfn
*p_hwfn
)
4213 return !!(p_hwfn
->mcp_info
->capabilities
&
4214 FW_MB_PARAM_FEATURE_SUPPORT_ENHANCED_SYS_LCK
);
4217 int qed_mcp_get_esl_status(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
, bool *active
)
4219 u32 resp
= 0, param
= 0;
4222 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_GET_MANAGEMENT_STATUS
, 0, &resp
, ¶m
);
4224 DP_NOTICE(p_hwfn
, "Failed to send ESL command, rc = %d\n", rc
);
4228 *active
= !!(param
& FW_MB_PARAM_MANAGEMENT_STATUS_LOCKDOWN_ENABLED
);