1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/types.h>
34 #include <asm/byteorder.h>
35 #include <linux/delay.h>
36 #include <linux/errno.h>
37 #include <linux/kernel.h>
38 #include <linux/slab.h>
39 #include <linux/spinlock.h>
40 #include <linux/string.h>
41 #include <linux/etherdevice.h>
48 #include "qed_reg_addr.h"
49 #include "qed_sriov.h"
51 #define QED_MCP_RESP_ITER_US 10
53 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
54 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
56 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
57 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
60 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
61 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
63 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
64 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
65 offsetof(struct public_drv_mb, _field), _val)
67 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
68 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
69 offsetof(struct public_drv_mb, _field))
71 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
72 DRV_ID_PDA_COMP_VER_SHIFT)
74 #define MCP_BYTES_PER_MBIT_SHIFT 17
76 bool qed_mcp_is_init(struct qed_hwfn
*p_hwfn
)
78 if (!p_hwfn
->mcp_info
|| !p_hwfn
->mcp_info
->public_base
)
83 void qed_mcp_cmd_port_init(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
85 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
87 u32 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
89 p_hwfn
->mcp_info
->port_addr
= SECTION_ADDR(mfw_mb_offsize
,
91 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
92 "port_addr = 0x%x, port_id 0x%02x\n",
93 p_hwfn
->mcp_info
->port_addr
, MFW_PORT(p_hwfn
));
96 void qed_mcp_read_mb(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
98 u32 length
= MFW_DRV_MSG_MAX_DWORDS(p_hwfn
->mcp_info
->mfw_mb_length
);
101 if (!p_hwfn
->mcp_info
->public_base
)
104 for (i
= 0; i
< length
; i
++) {
105 tmp
= qed_rd(p_hwfn
, p_ptt
,
106 p_hwfn
->mcp_info
->mfw_mb_addr
+
107 (i
<< 2) + sizeof(u32
));
109 /* The MB data is actually BE; Need to force it to cpu */
110 ((u32
*)p_hwfn
->mcp_info
->mfw_mb_cur
)[i
] =
111 be32_to_cpu((__force __be32
)tmp
);
115 struct qed_mcp_cmd_elem
{
116 struct list_head list
;
117 struct qed_mcp_mb_params
*p_mb_params
;
118 u16 expected_seq_num
;
122 /* Must be called while cmd_lock is acquired */
123 static struct qed_mcp_cmd_elem
*
124 qed_mcp_cmd_add_elem(struct qed_hwfn
*p_hwfn
,
125 struct qed_mcp_mb_params
*p_mb_params
,
126 u16 expected_seq_num
)
128 struct qed_mcp_cmd_elem
*p_cmd_elem
= NULL
;
130 p_cmd_elem
= kzalloc(sizeof(*p_cmd_elem
), GFP_ATOMIC
);
134 p_cmd_elem
->p_mb_params
= p_mb_params
;
135 p_cmd_elem
->expected_seq_num
= expected_seq_num
;
136 list_add(&p_cmd_elem
->list
, &p_hwfn
->mcp_info
->cmd_list
);
141 /* Must be called while cmd_lock is acquired */
142 static void qed_mcp_cmd_del_elem(struct qed_hwfn
*p_hwfn
,
143 struct qed_mcp_cmd_elem
*p_cmd_elem
)
145 list_del(&p_cmd_elem
->list
);
149 /* Must be called while cmd_lock is acquired */
150 static struct qed_mcp_cmd_elem
*qed_mcp_cmd_get_elem(struct qed_hwfn
*p_hwfn
,
153 struct qed_mcp_cmd_elem
*p_cmd_elem
= NULL
;
155 list_for_each_entry(p_cmd_elem
, &p_hwfn
->mcp_info
->cmd_list
, list
) {
156 if (p_cmd_elem
->expected_seq_num
== seq_num
)
163 int qed_mcp_free(struct qed_hwfn
*p_hwfn
)
165 if (p_hwfn
->mcp_info
) {
166 struct qed_mcp_cmd_elem
*p_cmd_elem
, *p_tmp
;
168 kfree(p_hwfn
->mcp_info
->mfw_mb_cur
);
169 kfree(p_hwfn
->mcp_info
->mfw_mb_shadow
);
171 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
172 list_for_each_entry_safe(p_cmd_elem
,
174 &p_hwfn
->mcp_info
->cmd_list
, list
) {
175 qed_mcp_cmd_del_elem(p_hwfn
, p_cmd_elem
);
177 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
180 kfree(p_hwfn
->mcp_info
);
181 p_hwfn
->mcp_info
= NULL
;
186 /* Maximum of 1 sec to wait for the SHMEM ready indication */
187 #define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
188 #define QED_MCP_SHMEM_RDY_ITER_MS 50
190 static int qed_load_mcp_offsets(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
192 struct qed_mcp_info
*p_info
= p_hwfn
->mcp_info
;
193 u8 cnt
= QED_MCP_SHMEM_RDY_MAX_RETRIES
;
194 u8 msec
= QED_MCP_SHMEM_RDY_ITER_MS
;
195 u32 drv_mb_offsize
, mfw_mb_offsize
;
196 u32 mcp_pf_id
= MCP_PF_ID(p_hwfn
);
198 p_info
->public_base
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_SHARED_MEM_ADDR
);
199 if (!p_info
->public_base
) {
201 "The address of the MCP scratch-pad is not configured\n");
205 p_info
->public_base
|= GRCBASE_MCP
;
207 /* Get the MFW MB address and number of supported messages */
208 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
209 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
211 p_info
->mfw_mb_addr
= SECTION_ADDR(mfw_mb_offsize
, mcp_pf_id
);
212 p_info
->mfw_mb_length
= (u16
)qed_rd(p_hwfn
, p_ptt
,
213 p_info
->mfw_mb_addr
+
214 offsetof(struct public_mfw_mb
,
217 /* The driver can notify that there was an MCP reset, and might read the
218 * SHMEM values before the MFW has completed initializing them.
219 * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
220 * data ready indication.
222 while (!p_info
->mfw_mb_length
&& --cnt
) {
224 p_info
->mfw_mb_length
=
225 (u16
)qed_rd(p_hwfn
, p_ptt
,
226 p_info
->mfw_mb_addr
+
227 offsetof(struct public_mfw_mb
, sup_msgs
));
232 "Failed to get the SHMEM ready notification after %d msec\n",
233 QED_MCP_SHMEM_RDY_MAX_RETRIES
* msec
);
237 /* Calculate the driver and MFW mailbox address */
238 drv_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
239 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
241 p_info
->drv_mb_addr
= SECTION_ADDR(drv_mb_offsize
, mcp_pf_id
);
242 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
243 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
244 drv_mb_offsize
, p_info
->drv_mb_addr
, mcp_pf_id
);
246 /* Get the current driver mailbox sequence before sending
249 p_info
->drv_mb_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_mb_header
) &
250 DRV_MSG_SEQ_NUMBER_MASK
;
252 /* Get current FW pulse sequence */
253 p_info
->drv_pulse_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_pulse_mb
) &
256 p_info
->mcp_hist
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
261 int qed_mcp_cmd_init(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
263 struct qed_mcp_info
*p_info
;
266 /* Allocate mcp_info structure */
267 p_hwfn
->mcp_info
= kzalloc(sizeof(*p_hwfn
->mcp_info
), GFP_KERNEL
);
268 if (!p_hwfn
->mcp_info
)
270 p_info
= p_hwfn
->mcp_info
;
272 /* Initialize the MFW spinlock */
273 spin_lock_init(&p_info
->cmd_lock
);
274 spin_lock_init(&p_info
->link_lock
);
276 INIT_LIST_HEAD(&p_info
->cmd_list
);
278 if (qed_load_mcp_offsets(p_hwfn
, p_ptt
) != 0) {
279 DP_NOTICE(p_hwfn
, "MCP is not initialized\n");
280 /* Do not free mcp_info here, since public_base indicate that
281 * the MCP is not initialized
286 size
= MFW_DRV_MSG_MAX_DWORDS(p_info
->mfw_mb_length
) * sizeof(u32
);
287 p_info
->mfw_mb_cur
= kzalloc(size
, GFP_KERNEL
);
288 p_info
->mfw_mb_shadow
= kzalloc(size
, GFP_KERNEL
);
289 if (!p_info
->mfw_mb_cur
|| !p_info
->mfw_mb_shadow
)
295 qed_mcp_free(p_hwfn
);
299 static void qed_mcp_reread_offsets(struct qed_hwfn
*p_hwfn
,
300 struct qed_ptt
*p_ptt
)
302 u32 generic_por_0
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
304 /* Use MCP history register to check if MCP reset occurred between init
307 if (p_hwfn
->mcp_info
->mcp_hist
!= generic_por_0
) {
310 "Rereading MCP offsets [mcp_hist 0x%08x, generic_por_0 0x%08x]\n",
311 p_hwfn
->mcp_info
->mcp_hist
, generic_por_0
);
313 qed_load_mcp_offsets(p_hwfn
, p_ptt
);
314 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
318 int qed_mcp_reset(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
320 u32 org_mcp_reset_seq
, seq
, delay
= QED_MCP_RESP_ITER_US
, cnt
= 0;
323 if (p_hwfn
->mcp_info
->b_block_cmd
) {
325 "The MFW is not responsive. Avoid sending MCP_RESET mailbox command.\n");
329 /* Ensure that only a single thread is accessing the mailbox */
330 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
332 org_mcp_reset_seq
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
334 /* Set drv command along with the updated sequence */
335 qed_mcp_reread_offsets(p_hwfn
, p_ptt
);
336 seq
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
337 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (DRV_MSG_CODE_MCP_RESET
| seq
));
340 /* Wait for MFW response */
342 /* Give the FW up to 500 second (50*1000*10usec) */
343 } while ((org_mcp_reset_seq
== qed_rd(p_hwfn
, p_ptt
,
344 MISCS_REG_GENERIC_POR_0
)) &&
345 (cnt
++ < QED_MCP_RESET_RETRIES
));
347 if (org_mcp_reset_seq
!=
348 qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
)) {
349 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
350 "MCP was reset after %d usec\n", cnt
* delay
);
352 DP_ERR(p_hwfn
, "Failed to reset MCP\n");
356 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
361 /* Must be called while cmd_lock is acquired */
362 static bool qed_mcp_has_pending_cmd(struct qed_hwfn
*p_hwfn
)
364 struct qed_mcp_cmd_elem
*p_cmd_elem
;
366 /* There is at most one pending command at a certain time, and if it
367 * exists - it is placed at the HEAD of the list.
369 if (!list_empty(&p_hwfn
->mcp_info
->cmd_list
)) {
370 p_cmd_elem
= list_first_entry(&p_hwfn
->mcp_info
->cmd_list
,
371 struct qed_mcp_cmd_elem
, list
);
372 return !p_cmd_elem
->b_is_completed
;
378 /* Must be called while cmd_lock is acquired */
380 qed_mcp_update_pending_cmd(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
382 struct qed_mcp_mb_params
*p_mb_params
;
383 struct qed_mcp_cmd_elem
*p_cmd_elem
;
387 mcp_resp
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_header
);
388 seq_num
= (u16
)(mcp_resp
& FW_MSG_SEQ_NUMBER_MASK
);
390 /* Return if no new non-handled response has been received */
391 if (seq_num
!= p_hwfn
->mcp_info
->drv_mb_seq
)
394 p_cmd_elem
= qed_mcp_cmd_get_elem(p_hwfn
, seq_num
);
397 "Failed to find a pending mailbox cmd that expects sequence number %d\n",
402 p_mb_params
= p_cmd_elem
->p_mb_params
;
404 /* Get the MFW response along with the sequence number */
405 p_mb_params
->mcp_resp
= mcp_resp
;
407 /* Get the MFW param */
408 p_mb_params
->mcp_param
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_param
);
410 /* Get the union data */
411 if (p_mb_params
->p_data_dst
!= NULL
&& p_mb_params
->data_dst_size
) {
412 u32 union_data_addr
= p_hwfn
->mcp_info
->drv_mb_addr
+
413 offsetof(struct public_drv_mb
,
415 qed_memcpy_from(p_hwfn
, p_ptt
, p_mb_params
->p_data_dst
,
416 union_data_addr
, p_mb_params
->data_dst_size
);
419 p_cmd_elem
->b_is_completed
= true;
424 /* Must be called while cmd_lock is acquired */
425 static void __qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
426 struct qed_ptt
*p_ptt
,
427 struct qed_mcp_mb_params
*p_mb_params
,
430 union drv_union_data union_data
;
433 /* Set the union data */
434 union_data_addr
= p_hwfn
->mcp_info
->drv_mb_addr
+
435 offsetof(struct public_drv_mb
, union_data
);
436 memset(&union_data
, 0, sizeof(union_data
));
437 if (p_mb_params
->p_data_src
!= NULL
&& p_mb_params
->data_src_size
)
438 memcpy(&union_data
, p_mb_params
->p_data_src
,
439 p_mb_params
->data_src_size
);
440 qed_memcpy_to(p_hwfn
, p_ptt
, union_data_addr
, &union_data
,
443 /* Set the drv param */
444 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_param
, p_mb_params
->param
);
446 /* Set the drv command along with the sequence number */
447 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (p_mb_params
->cmd
| seq_num
));
449 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
450 "MFW mailbox: command 0x%08x param 0x%08x\n",
451 (p_mb_params
->cmd
| seq_num
), p_mb_params
->param
);
454 static void qed_mcp_cmd_set_blocking(struct qed_hwfn
*p_hwfn
, bool block_cmd
)
456 p_hwfn
->mcp_info
->b_block_cmd
= block_cmd
;
458 DP_INFO(p_hwfn
, "%s sending of mailbox commands to the MFW\n",
459 block_cmd
? "Block" : "Unblock");
462 static void qed_mcp_print_cpu_info(struct qed_hwfn
*p_hwfn
,
463 struct qed_ptt
*p_ptt
)
465 u32 cpu_mode
, cpu_state
, cpu_pc_0
, cpu_pc_1
, cpu_pc_2
;
466 u32 delay
= QED_MCP_RESP_ITER_US
;
468 cpu_mode
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
);
469 cpu_state
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
);
470 cpu_pc_0
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_PROGRAM_COUNTER
);
472 cpu_pc_1
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_PROGRAM_COUNTER
);
474 cpu_pc_2
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_PROGRAM_COUNTER
);
477 "MCP CPU info: mode 0x%08x, state 0x%08x, pc {0x%08x, 0x%08x, 0x%08x}\n",
478 cpu_mode
, cpu_state
, cpu_pc_0
, cpu_pc_1
, cpu_pc_2
);
482 _qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
483 struct qed_ptt
*p_ptt
,
484 struct qed_mcp_mb_params
*p_mb_params
,
485 u32 max_retries
, u32 usecs
)
487 u32 cnt
= 0, msecs
= DIV_ROUND_UP(usecs
, 1000);
488 struct qed_mcp_cmd_elem
*p_cmd_elem
;
492 /* Wait until the mailbox is non-occupied */
494 /* Exit the loop if there is no pending command, or if the
495 * pending command is completed during this iteration.
496 * The spinlock stays locked until the command is sent.
499 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
501 if (!qed_mcp_has_pending_cmd(p_hwfn
))
504 rc
= qed_mcp_update_pending_cmd(p_hwfn
, p_ptt
);
507 else if (rc
!= -EAGAIN
)
510 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
512 if (QED_MB_FLAGS_IS_SET(p_mb_params
, CAN_SLEEP
))
516 } while (++cnt
< max_retries
);
518 if (cnt
>= max_retries
) {
520 "The MFW mailbox is occupied by an uncompleted command. Failed to send command 0x%08x [param 0x%08x].\n",
521 p_mb_params
->cmd
, p_mb_params
->param
);
525 /* Send the mailbox command */
526 qed_mcp_reread_offsets(p_hwfn
, p_ptt
);
527 seq_num
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
528 p_cmd_elem
= qed_mcp_cmd_add_elem(p_hwfn
, p_mb_params
, seq_num
);
534 __qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, p_mb_params
, seq_num
);
535 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
537 /* Wait for the MFW response */
539 /* Exit the loop if the command is already completed, or if the
540 * command is completed during this iteration.
541 * The spinlock stays locked until the list element is removed.
544 if (QED_MB_FLAGS_IS_SET(p_mb_params
, CAN_SLEEP
))
549 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
551 if (p_cmd_elem
->b_is_completed
)
554 rc
= qed_mcp_update_pending_cmd(p_hwfn
, p_ptt
);
557 else if (rc
!= -EAGAIN
)
560 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
561 } while (++cnt
< max_retries
);
563 if (cnt
>= max_retries
) {
565 "The MFW failed to respond to command 0x%08x [param 0x%08x].\n",
566 p_mb_params
->cmd
, p_mb_params
->param
);
567 qed_mcp_print_cpu_info(p_hwfn
, p_ptt
);
569 spin_lock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
570 qed_mcp_cmd_del_elem(p_hwfn
, p_cmd_elem
);
571 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
573 if (!QED_MB_FLAGS_IS_SET(p_mb_params
, AVOID_BLOCK
))
574 qed_mcp_cmd_set_blocking(p_hwfn
, true);
579 qed_mcp_cmd_del_elem(p_hwfn
, p_cmd_elem
);
580 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
584 "MFW mailbox: response 0x%08x param 0x%08x [after %d.%03d ms]\n",
585 p_mb_params
->mcp_resp
,
586 p_mb_params
->mcp_param
,
587 (cnt
* usecs
) / 1000, (cnt
* usecs
) % 1000);
589 /* Clear the sequence number from the MFW response */
590 p_mb_params
->mcp_resp
&= FW_MSG_CODE_MASK
;
595 spin_unlock_bh(&p_hwfn
->mcp_info
->cmd_lock
);
599 static int qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
600 struct qed_ptt
*p_ptt
,
601 struct qed_mcp_mb_params
*p_mb_params
)
603 size_t union_data_size
= sizeof(union drv_union_data
);
604 u32 max_retries
= QED_DRV_MB_MAX_RETRIES
;
605 u32 usecs
= QED_MCP_RESP_ITER_US
;
607 /* MCP not initialized */
608 if (!qed_mcp_is_init(p_hwfn
)) {
609 DP_NOTICE(p_hwfn
, "MFW is not initialized!\n");
613 if (p_hwfn
->mcp_info
->b_block_cmd
) {
615 "The MFW is not responsive. Avoid sending mailbox command 0x%08x [param 0x%08x].\n",
616 p_mb_params
->cmd
, p_mb_params
->param
);
620 if (p_mb_params
->data_src_size
> union_data_size
||
621 p_mb_params
->data_dst_size
> union_data_size
) {
623 "The provided size is larger than the union data size [src_size %u, dst_size %u, union_data_size %zu]\n",
624 p_mb_params
->data_src_size
,
625 p_mb_params
->data_dst_size
, union_data_size
);
629 if (QED_MB_FLAGS_IS_SET(p_mb_params
, CAN_SLEEP
)) {
630 max_retries
= DIV_ROUND_UP(max_retries
, 1000);
634 return _qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, p_mb_params
, max_retries
,
638 int qed_mcp_cmd(struct qed_hwfn
*p_hwfn
,
639 struct qed_ptt
*p_ptt
,
645 struct qed_mcp_mb_params mb_params
;
648 memset(&mb_params
, 0, sizeof(mb_params
));
650 mb_params
.param
= param
;
652 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
656 *o_mcp_resp
= mb_params
.mcp_resp
;
657 *o_mcp_param
= mb_params
.mcp_param
;
663 qed_mcp_nvm_wr_cmd(struct qed_hwfn
*p_hwfn
,
664 struct qed_ptt
*p_ptt
,
668 u32
*o_mcp_param
, u32 i_txn_size
, u32
*i_buf
)
670 struct qed_mcp_mb_params mb_params
;
673 memset(&mb_params
, 0, sizeof(mb_params
));
675 mb_params
.param
= param
;
676 mb_params
.p_data_src
= i_buf
;
677 mb_params
.data_src_size
= (u8
)i_txn_size
;
678 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
682 *o_mcp_resp
= mb_params
.mcp_resp
;
683 *o_mcp_param
= mb_params
.mcp_param
;
685 /* nvm_info needs to be updated */
686 p_hwfn
->nvm_info
.valid
= false;
691 int qed_mcp_nvm_rd_cmd(struct qed_hwfn
*p_hwfn
,
692 struct qed_ptt
*p_ptt
,
696 u32
*o_mcp_param
, u32
*o_txn_size
, u32
*o_buf
)
698 struct qed_mcp_mb_params mb_params
;
699 u8 raw_data
[MCP_DRV_NVM_BUF_LEN
];
702 memset(&mb_params
, 0, sizeof(mb_params
));
704 mb_params
.param
= param
;
705 mb_params
.p_data_dst
= raw_data
;
707 /* Use the maximal value since the actual one is part of the response */
708 mb_params
.data_dst_size
= MCP_DRV_NVM_BUF_LEN
;
710 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
714 *o_mcp_resp
= mb_params
.mcp_resp
;
715 *o_mcp_param
= mb_params
.mcp_param
;
717 *o_txn_size
= *o_mcp_param
;
718 memcpy(o_buf
, raw_data
, *o_txn_size
);
724 qed_mcp_can_force_load(u8 drv_role
,
726 enum qed_override_force_load override_force_load
)
728 bool can_force_load
= false;
730 switch (override_force_load
) {
731 case QED_OVERRIDE_FORCE_LOAD_ALWAYS
:
732 can_force_load
= true;
734 case QED_OVERRIDE_FORCE_LOAD_NEVER
:
735 can_force_load
= false;
738 can_force_load
= (drv_role
== DRV_ROLE_OS
&&
739 exist_drv_role
== DRV_ROLE_PREBOOT
) ||
740 (drv_role
== DRV_ROLE_KDUMP
&&
741 exist_drv_role
== DRV_ROLE_OS
);
745 return can_force_load
;
748 static int qed_mcp_cancel_load_req(struct qed_hwfn
*p_hwfn
,
749 struct qed_ptt
*p_ptt
)
751 u32 resp
= 0, param
= 0;
754 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CANCEL_LOAD_REQ
, 0,
758 "Failed to send cancel load request, rc = %d\n", rc
);
763 #define CONFIG_QEDE_BITMAP_IDX BIT(0)
764 #define CONFIG_QED_SRIOV_BITMAP_IDX BIT(1)
765 #define CONFIG_QEDR_BITMAP_IDX BIT(2)
766 #define CONFIG_QEDF_BITMAP_IDX BIT(4)
767 #define CONFIG_QEDI_BITMAP_IDX BIT(5)
768 #define CONFIG_QED_LL2_BITMAP_IDX BIT(6)
770 static u32
qed_get_config_bitmap(void)
772 u32 config_bitmap
= 0x0;
774 if (IS_ENABLED(CONFIG_QEDE
))
775 config_bitmap
|= CONFIG_QEDE_BITMAP_IDX
;
777 if (IS_ENABLED(CONFIG_QED_SRIOV
))
778 config_bitmap
|= CONFIG_QED_SRIOV_BITMAP_IDX
;
780 if (IS_ENABLED(CONFIG_QED_RDMA
))
781 config_bitmap
|= CONFIG_QEDR_BITMAP_IDX
;
783 if (IS_ENABLED(CONFIG_QED_FCOE
))
784 config_bitmap
|= CONFIG_QEDF_BITMAP_IDX
;
786 if (IS_ENABLED(CONFIG_QED_ISCSI
))
787 config_bitmap
|= CONFIG_QEDI_BITMAP_IDX
;
789 if (IS_ENABLED(CONFIG_QED_LL2
))
790 config_bitmap
|= CONFIG_QED_LL2_BITMAP_IDX
;
792 return config_bitmap
;
795 struct qed_load_req_in_params
{
797 #define QED_LOAD_REQ_HSI_VER_DEFAULT 0
798 #define QED_LOAD_REQ_HSI_VER_1 1
805 bool avoid_eng_reset
;
808 struct qed_load_req_out_params
{
819 __qed_mcp_load_req(struct qed_hwfn
*p_hwfn
,
820 struct qed_ptt
*p_ptt
,
821 struct qed_load_req_in_params
*p_in_params
,
822 struct qed_load_req_out_params
*p_out_params
)
824 struct qed_mcp_mb_params mb_params
;
825 struct load_req_stc load_req
;
826 struct load_rsp_stc load_rsp
;
830 memset(&load_req
, 0, sizeof(load_req
));
831 load_req
.drv_ver_0
= p_in_params
->drv_ver_0
;
832 load_req
.drv_ver_1
= p_in_params
->drv_ver_1
;
833 load_req
.fw_ver
= p_in_params
->fw_ver
;
834 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_ROLE
, p_in_params
->drv_role
);
835 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_LOCK_TO
,
836 p_in_params
->timeout_val
);
837 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_FORCE
,
838 p_in_params
->force_cmd
);
839 QED_MFW_SET_FIELD(load_req
.misc0
, LOAD_REQ_FLAGS0
,
840 p_in_params
->avoid_eng_reset
);
842 hsi_ver
= (p_in_params
->hsi_ver
== QED_LOAD_REQ_HSI_VER_DEFAULT
) ?
843 DRV_ID_MCP_HSI_VER_CURRENT
:
844 (p_in_params
->hsi_ver
<< DRV_ID_MCP_HSI_VER_SHIFT
);
846 memset(&mb_params
, 0, sizeof(mb_params
));
847 mb_params
.cmd
= DRV_MSG_CODE_LOAD_REQ
;
848 mb_params
.param
= PDA_COMP
| hsi_ver
| p_hwfn
->cdev
->drv_type
;
849 mb_params
.p_data_src
= &load_req
;
850 mb_params
.data_src_size
= sizeof(load_req
);
851 mb_params
.p_data_dst
= &load_rsp
;
852 mb_params
.data_dst_size
= sizeof(load_rsp
);
853 mb_params
.flags
= QED_MB_FLAG_CAN_SLEEP
| QED_MB_FLAG_AVOID_BLOCK
;
855 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
856 "Load Request: param 0x%08x [init_hw %d, drv_type %d, hsi_ver %d, pda 0x%04x]\n",
858 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_DRV_INIT_HW
),
859 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_DRV_TYPE
),
860 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_MCP_HSI_VER
),
861 QED_MFW_GET_FIELD(mb_params
.param
, DRV_ID_PDA_COMP_VER
));
863 if (p_in_params
->hsi_ver
!= QED_LOAD_REQ_HSI_VER_1
) {
864 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
865 "Load Request: drv_ver 0x%08x_0x%08x, fw_ver 0x%08x, misc0 0x%08x [role %d, timeout %d, force %d, flags0 0x%x]\n",
870 QED_MFW_GET_FIELD(load_req
.misc0
, LOAD_REQ_ROLE
),
871 QED_MFW_GET_FIELD(load_req
.misc0
,
873 QED_MFW_GET_FIELD(load_req
.misc0
, LOAD_REQ_FORCE
),
874 QED_MFW_GET_FIELD(load_req
.misc0
, LOAD_REQ_FLAGS0
));
877 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
879 DP_NOTICE(p_hwfn
, "Failed to send load request, rc = %d\n", rc
);
883 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
884 "Load Response: resp 0x%08x\n", mb_params
.mcp_resp
);
885 p_out_params
->load_code
= mb_params
.mcp_resp
;
887 if (p_in_params
->hsi_ver
!= QED_LOAD_REQ_HSI_VER_1
&&
888 p_out_params
->load_code
!= FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1
) {
891 "Load Response: exist_drv_ver 0x%08x_0x%08x, exist_fw_ver 0x%08x, misc0 0x%08x [exist_role %d, mfw_hsi %d, flags0 0x%x]\n",
896 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_ROLE
),
897 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_HSI
),
898 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_FLAGS0
));
900 p_out_params
->exist_drv_ver_0
= load_rsp
.drv_ver_0
;
901 p_out_params
->exist_drv_ver_1
= load_rsp
.drv_ver_1
;
902 p_out_params
->exist_fw_ver
= load_rsp
.fw_ver
;
903 p_out_params
->exist_drv_role
=
904 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_ROLE
);
905 p_out_params
->mfw_hsi_ver
=
906 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_HSI
);
907 p_out_params
->drv_exists
=
908 QED_MFW_GET_FIELD(load_rsp
.misc0
, LOAD_RSP_FLAGS0
) &
909 LOAD_RSP_FLAGS0_DRV_EXISTS
;
915 static int eocre_get_mfw_drv_role(struct qed_hwfn
*p_hwfn
,
916 enum qed_drv_role drv_role
,
920 case QED_DRV_ROLE_OS
:
921 *p_mfw_drv_role
= DRV_ROLE_OS
;
923 case QED_DRV_ROLE_KDUMP
:
924 *p_mfw_drv_role
= DRV_ROLE_KDUMP
;
927 DP_ERR(p_hwfn
, "Unexpected driver role %d\n", drv_role
);
934 enum qed_load_req_force
{
935 QED_LOAD_REQ_FORCE_NONE
,
936 QED_LOAD_REQ_FORCE_PF
,
937 QED_LOAD_REQ_FORCE_ALL
,
940 static void qed_get_mfw_force_cmd(struct qed_hwfn
*p_hwfn
,
942 enum qed_load_req_force force_cmd
,
946 case QED_LOAD_REQ_FORCE_NONE
:
947 *p_mfw_force_cmd
= LOAD_REQ_FORCE_NONE
;
949 case QED_LOAD_REQ_FORCE_PF
:
950 *p_mfw_force_cmd
= LOAD_REQ_FORCE_PF
;
952 case QED_LOAD_REQ_FORCE_ALL
:
953 *p_mfw_force_cmd
= LOAD_REQ_FORCE_ALL
;
958 int qed_mcp_load_req(struct qed_hwfn
*p_hwfn
,
959 struct qed_ptt
*p_ptt
,
960 struct qed_load_req_params
*p_params
)
962 struct qed_load_req_out_params out_params
;
963 struct qed_load_req_in_params in_params
;
964 u8 mfw_drv_role
, mfw_force_cmd
;
967 memset(&in_params
, 0, sizeof(in_params
));
968 in_params
.hsi_ver
= QED_LOAD_REQ_HSI_VER_DEFAULT
;
969 in_params
.drv_ver_0
= QED_VERSION
;
970 in_params
.drv_ver_1
= qed_get_config_bitmap();
971 in_params
.fw_ver
= STORM_FW_VERSION
;
972 rc
= eocre_get_mfw_drv_role(p_hwfn
, p_params
->drv_role
, &mfw_drv_role
);
976 in_params
.drv_role
= mfw_drv_role
;
977 in_params
.timeout_val
= p_params
->timeout_val
;
978 qed_get_mfw_force_cmd(p_hwfn
,
979 QED_LOAD_REQ_FORCE_NONE
, &mfw_force_cmd
);
981 in_params
.force_cmd
= mfw_force_cmd
;
982 in_params
.avoid_eng_reset
= p_params
->avoid_eng_reset
;
984 memset(&out_params
, 0, sizeof(out_params
));
985 rc
= __qed_mcp_load_req(p_hwfn
, p_ptt
, &in_params
, &out_params
);
989 /* First handle cases where another load request should/might be sent:
990 * - MFW expects the old interface [HSI version = 1]
991 * - MFW responds that a force load request is required
993 if (out_params
.load_code
== FW_MSG_CODE_DRV_LOAD_REFUSED_HSI_1
) {
995 "MFW refused a load request due to HSI > 1. Resending with HSI = 1\n");
997 in_params
.hsi_ver
= QED_LOAD_REQ_HSI_VER_1
;
998 memset(&out_params
, 0, sizeof(out_params
));
999 rc
= __qed_mcp_load_req(p_hwfn
, p_ptt
, &in_params
, &out_params
);
1002 } else if (out_params
.load_code
==
1003 FW_MSG_CODE_DRV_LOAD_REFUSED_REQUIRES_FORCE
) {
1004 if (qed_mcp_can_force_load(in_params
.drv_role
,
1005 out_params
.exist_drv_role
,
1006 p_params
->override_force_load
)) {
1008 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}]\n",
1009 in_params
.drv_role
, in_params
.fw_ver
,
1010 in_params
.drv_ver_0
, in_params
.drv_ver_1
,
1011 out_params
.exist_drv_role
,
1012 out_params
.exist_fw_ver
,
1013 out_params
.exist_drv_ver_0
,
1014 out_params
.exist_drv_ver_1
);
1016 qed_get_mfw_force_cmd(p_hwfn
,
1017 QED_LOAD_REQ_FORCE_ALL
,
1020 in_params
.force_cmd
= mfw_force_cmd
;
1021 memset(&out_params
, 0, sizeof(out_params
));
1022 rc
= __qed_mcp_load_req(p_hwfn
, p_ptt
, &in_params
,
1028 "A force load is required [{role, fw_ver, drv_ver}: loading={%d, 0x%08x, x%08x_0x%08x}, existing={%d, 0x%08x, 0x%08x_0x%08x}] - Avoid\n",
1029 in_params
.drv_role
, in_params
.fw_ver
,
1030 in_params
.drv_ver_0
, in_params
.drv_ver_1
,
1031 out_params
.exist_drv_role
,
1032 out_params
.exist_fw_ver
,
1033 out_params
.exist_drv_ver_0
,
1034 out_params
.exist_drv_ver_1
);
1036 "Avoid sending a force load request to prevent disruption of active PFs\n");
1038 qed_mcp_cancel_load_req(p_hwfn
, p_ptt
);
1043 /* Now handle the other types of responses.
1044 * The "REFUSED_HSI_1" and "REFUSED_REQUIRES_FORCE" responses are not
1045 * expected here after the additional revised load requests were sent.
1047 switch (out_params
.load_code
) {
1048 case FW_MSG_CODE_DRV_LOAD_ENGINE
:
1049 case FW_MSG_CODE_DRV_LOAD_PORT
:
1050 case FW_MSG_CODE_DRV_LOAD_FUNCTION
:
1051 if (out_params
.mfw_hsi_ver
!= QED_LOAD_REQ_HSI_VER_1
&&
1052 out_params
.drv_exists
) {
1053 /* The role and fw/driver version match, but the PF is
1054 * already loaded and has not been unloaded gracefully.
1057 "PF is already loaded\n");
1063 "Unexpected refusal to load request [resp 0x%08x]. Aborting.\n",
1064 out_params
.load_code
);
1068 p_params
->load_code
= out_params
.load_code
;
1073 int qed_mcp_unload_req(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1075 struct qed_mcp_mb_params mb_params
;
1078 switch (p_hwfn
->cdev
->wol_config
) {
1079 case QED_OV_WOL_DISABLED
:
1080 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_DISABLED
;
1082 case QED_OV_WOL_ENABLED
:
1083 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_ENABLED
;
1087 "Unknown WoL configuration %02x\n",
1088 p_hwfn
->cdev
->wol_config
);
1090 case QED_OV_WOL_DEFAULT
:
1091 wol_param
= DRV_MB_PARAM_UNLOAD_WOL_MCP
;
1094 memset(&mb_params
, 0, sizeof(mb_params
));
1095 mb_params
.cmd
= DRV_MSG_CODE_UNLOAD_REQ
;
1096 mb_params
.param
= wol_param
;
1097 mb_params
.flags
= QED_MB_FLAG_CAN_SLEEP
| QED_MB_FLAG_AVOID_BLOCK
;
1099 return qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1102 int qed_mcp_unload_done(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1104 struct qed_mcp_mb_params mb_params
;
1105 struct mcp_mac wol_mac
;
1107 memset(&mb_params
, 0, sizeof(mb_params
));
1108 mb_params
.cmd
= DRV_MSG_CODE_UNLOAD_DONE
;
1110 /* Set the primary MAC if WoL is enabled */
1111 if (p_hwfn
->cdev
->wol_config
== QED_OV_WOL_ENABLED
) {
1112 u8
*p_mac
= p_hwfn
->cdev
->wol_mac
;
1114 memset(&wol_mac
, 0, sizeof(wol_mac
));
1115 wol_mac
.mac_upper
= p_mac
[0] << 8 | p_mac
[1];
1116 wol_mac
.mac_lower
= p_mac
[2] << 24 | p_mac
[3] << 16 |
1117 p_mac
[4] << 8 | p_mac
[5];
1120 (QED_MSG_SP
| NETIF_MSG_IFDOWN
),
1121 "Setting WoL MAC: %pM --> [%08x,%08x]\n",
1122 p_mac
, wol_mac
.mac_upper
, wol_mac
.mac_lower
);
1124 mb_params
.p_data_src
= &wol_mac
;
1125 mb_params
.data_src_size
= sizeof(wol_mac
);
1128 return qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1131 static void qed_mcp_handle_vf_flr(struct qed_hwfn
*p_hwfn
,
1132 struct qed_ptt
*p_ptt
)
1134 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1136 u32 mfw_path_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
1137 u32 path_addr
= SECTION_ADDR(mfw_path_offsize
,
1138 QED_PATH_ID(p_hwfn
));
1139 u32 disabled_vfs
[VF_MAX_STATIC
/ 32];
1144 "Reading Disabled VF information from [offset %08x], path_addr %08x\n",
1145 mfw_path_offsize
, path_addr
);
1147 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++) {
1148 disabled_vfs
[i
] = qed_rd(p_hwfn
, p_ptt
,
1150 offsetof(struct public_path
,
1153 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| QED_MSG_IOV
),
1154 "FLR-ed VFs [%08x,...,%08x] - %08x\n",
1155 i
* 32, (i
+ 1) * 32 - 1, disabled_vfs
[i
]);
1158 if (qed_iov_mark_vf_flr(p_hwfn
, disabled_vfs
))
1159 qed_schedule_iov(p_hwfn
, QED_IOV_WQ_FLR_FLAG
);
1162 int qed_mcp_ack_vf_flr(struct qed_hwfn
*p_hwfn
,
1163 struct qed_ptt
*p_ptt
, u32
*vfs_to_ack
)
1165 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1167 u32 mfw_func_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
1168 u32 func_addr
= SECTION_ADDR(mfw_func_offsize
,
1170 struct qed_mcp_mb_params mb_params
;
1174 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
1175 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| QED_MSG_IOV
),
1176 "Acking VFs [%08x,...,%08x] - %08x\n",
1177 i
* 32, (i
+ 1) * 32 - 1, vfs_to_ack
[i
]);
1179 memset(&mb_params
, 0, sizeof(mb_params
));
1180 mb_params
.cmd
= DRV_MSG_CODE_VF_DISABLED_DONE
;
1181 mb_params
.p_data_src
= vfs_to_ack
;
1182 mb_params
.data_src_size
= VF_MAX_STATIC
/ 8;
1183 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1185 DP_NOTICE(p_hwfn
, "Failed to pass ACK for VF flr to MFW\n");
1189 /* Clear the ACK bits */
1190 for (i
= 0; i
< (VF_MAX_STATIC
/ 32); i
++)
1191 qed_wr(p_hwfn
, p_ptt
,
1193 offsetof(struct public_func
, drv_ack_vf_disabled
) +
1194 i
* sizeof(u32
), 0);
1199 static void qed_mcp_handle_transceiver_change(struct qed_hwfn
*p_hwfn
,
1200 struct qed_ptt
*p_ptt
)
1202 u32 transceiver_state
;
1204 transceiver_state
= qed_rd(p_hwfn
, p_ptt
,
1205 p_hwfn
->mcp_info
->port_addr
+
1206 offsetof(struct public_port
,
1210 (NETIF_MSG_HW
| QED_MSG_SP
),
1211 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
1213 (u32
)(p_hwfn
->mcp_info
->port_addr
+
1214 offsetof(struct public_port
, transceiver_data
)));
1216 transceiver_state
= GET_FIELD(transceiver_state
,
1217 ETH_TRANSCEIVER_STATE
);
1219 if (transceiver_state
== ETH_TRANSCEIVER_STATE_PRESENT
)
1220 DP_NOTICE(p_hwfn
, "Transceiver is present.\n");
1222 DP_NOTICE(p_hwfn
, "Transceiver is unplugged.\n");
1225 static void qed_mcp_read_eee_config(struct qed_hwfn
*p_hwfn
,
1226 struct qed_ptt
*p_ptt
,
1227 struct qed_mcp_link_state
*p_link
)
1229 u32 eee_status
, val
;
1231 p_link
->eee_adv_caps
= 0;
1232 p_link
->eee_lp_adv_caps
= 0;
1233 eee_status
= qed_rd(p_hwfn
,
1235 p_hwfn
->mcp_info
->port_addr
+
1236 offsetof(struct public_port
, eee_status
));
1237 p_link
->eee_active
= !!(eee_status
& EEE_ACTIVE_BIT
);
1238 val
= (eee_status
& EEE_LD_ADV_STATUS_MASK
) >> EEE_LD_ADV_STATUS_OFFSET
;
1239 if (val
& EEE_1G_ADV
)
1240 p_link
->eee_adv_caps
|= QED_EEE_1G_ADV
;
1241 if (val
& EEE_10G_ADV
)
1242 p_link
->eee_adv_caps
|= QED_EEE_10G_ADV
;
1243 val
= (eee_status
& EEE_LP_ADV_STATUS_MASK
) >> EEE_LP_ADV_STATUS_OFFSET
;
1244 if (val
& EEE_1G_ADV
)
1245 p_link
->eee_lp_adv_caps
|= QED_EEE_1G_ADV
;
1246 if (val
& EEE_10G_ADV
)
1247 p_link
->eee_lp_adv_caps
|= QED_EEE_10G_ADV
;
1250 static void qed_mcp_handle_link_change(struct qed_hwfn
*p_hwfn
,
1251 struct qed_ptt
*p_ptt
, bool b_reset
)
1253 struct qed_mcp_link_state
*p_link
;
1257 /* Prevent SW/attentions from doing this at the same time */
1258 spin_lock_bh(&p_hwfn
->mcp_info
->link_lock
);
1260 p_link
= &p_hwfn
->mcp_info
->link_output
;
1261 memset(p_link
, 0, sizeof(*p_link
));
1263 status
= qed_rd(p_hwfn
, p_ptt
,
1264 p_hwfn
->mcp_info
->port_addr
+
1265 offsetof(struct public_port
, link_status
));
1266 DP_VERBOSE(p_hwfn
, (NETIF_MSG_LINK
| QED_MSG_SP
),
1267 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
1269 (u32
)(p_hwfn
->mcp_info
->port_addr
+
1270 offsetof(struct public_port
, link_status
)));
1272 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1273 "Resetting link indications\n");
1277 if (p_hwfn
->b_drv_link_init
)
1278 p_link
->link_up
= !!(status
& LINK_STATUS_LINK_UP
);
1280 p_link
->link_up
= false;
1282 p_link
->full_duplex
= true;
1283 switch ((status
& LINK_STATUS_SPEED_AND_DUPLEX_MASK
)) {
1284 case LINK_STATUS_SPEED_AND_DUPLEX_100G
:
1285 p_link
->speed
= 100000;
1287 case LINK_STATUS_SPEED_AND_DUPLEX_50G
:
1288 p_link
->speed
= 50000;
1290 case LINK_STATUS_SPEED_AND_DUPLEX_40G
:
1291 p_link
->speed
= 40000;
1293 case LINK_STATUS_SPEED_AND_DUPLEX_25G
:
1294 p_link
->speed
= 25000;
1296 case LINK_STATUS_SPEED_AND_DUPLEX_20G
:
1297 p_link
->speed
= 20000;
1299 case LINK_STATUS_SPEED_AND_DUPLEX_10G
:
1300 p_link
->speed
= 10000;
1302 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD
:
1303 p_link
->full_duplex
= false;
1305 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
:
1306 p_link
->speed
= 1000;
1310 p_link
->link_up
= 0;
1313 if (p_link
->link_up
&& p_link
->speed
)
1314 p_link
->line_speed
= p_link
->speed
;
1316 p_link
->line_speed
= 0;
1318 max_bw
= p_hwfn
->mcp_info
->func_info
.bandwidth_max
;
1319 min_bw
= p_hwfn
->mcp_info
->func_info
.bandwidth_min
;
1321 /* Max bandwidth configuration */
1322 __qed_configure_pf_max_bandwidth(p_hwfn
, p_ptt
, p_link
, max_bw
);
1324 /* Min bandwidth configuration */
1325 __qed_configure_pf_min_bandwidth(p_hwfn
, p_ptt
, p_link
, min_bw
);
1326 qed_configure_vp_wfq_on_link_change(p_hwfn
->cdev
, p_ptt
,
1327 p_link
->min_pf_rate
);
1329 p_link
->an
= !!(status
& LINK_STATUS_AUTO_NEGOTIATE_ENABLED
);
1330 p_link
->an_complete
= !!(status
&
1331 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE
);
1332 p_link
->parallel_detection
= !!(status
&
1333 LINK_STATUS_PARALLEL_DETECTION_USED
);
1334 p_link
->pfc_enabled
= !!(status
& LINK_STATUS_PFC_ENABLED
);
1336 p_link
->partner_adv_speed
|=
1337 (status
& LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE
) ?
1338 QED_LINK_PARTNER_SPEED_1G_FD
: 0;
1339 p_link
->partner_adv_speed
|=
1340 (status
& LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE
) ?
1341 QED_LINK_PARTNER_SPEED_1G_HD
: 0;
1342 p_link
->partner_adv_speed
|=
1343 (status
& LINK_STATUS_LINK_PARTNER_10G_CAPABLE
) ?
1344 QED_LINK_PARTNER_SPEED_10G
: 0;
1345 p_link
->partner_adv_speed
|=
1346 (status
& LINK_STATUS_LINK_PARTNER_20G_CAPABLE
) ?
1347 QED_LINK_PARTNER_SPEED_20G
: 0;
1348 p_link
->partner_adv_speed
|=
1349 (status
& LINK_STATUS_LINK_PARTNER_25G_CAPABLE
) ?
1350 QED_LINK_PARTNER_SPEED_25G
: 0;
1351 p_link
->partner_adv_speed
|=
1352 (status
& LINK_STATUS_LINK_PARTNER_40G_CAPABLE
) ?
1353 QED_LINK_PARTNER_SPEED_40G
: 0;
1354 p_link
->partner_adv_speed
|=
1355 (status
& LINK_STATUS_LINK_PARTNER_50G_CAPABLE
) ?
1356 QED_LINK_PARTNER_SPEED_50G
: 0;
1357 p_link
->partner_adv_speed
|=
1358 (status
& LINK_STATUS_LINK_PARTNER_100G_CAPABLE
) ?
1359 QED_LINK_PARTNER_SPEED_100G
: 0;
1361 p_link
->partner_tx_flow_ctrl_en
=
1362 !!(status
& LINK_STATUS_TX_FLOW_CONTROL_ENABLED
);
1363 p_link
->partner_rx_flow_ctrl_en
=
1364 !!(status
& LINK_STATUS_RX_FLOW_CONTROL_ENABLED
);
1366 switch (status
& LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK
) {
1367 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE
:
1368 p_link
->partner_adv_pause
= QED_LINK_PARTNER_SYMMETRIC_PAUSE
;
1370 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE
:
1371 p_link
->partner_adv_pause
= QED_LINK_PARTNER_ASYMMETRIC_PAUSE
;
1373 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE
:
1374 p_link
->partner_adv_pause
= QED_LINK_PARTNER_BOTH_PAUSE
;
1377 p_link
->partner_adv_pause
= 0;
1380 p_link
->sfp_tx_fault
= !!(status
& LINK_STATUS_SFP_TX_FAULT
);
1382 if (p_hwfn
->mcp_info
->capabilities
& FW_MB_PARAM_FEATURE_SUPPORT_EEE
)
1383 qed_mcp_read_eee_config(p_hwfn
, p_ptt
, p_link
);
1385 qed_link_update(p_hwfn
, p_ptt
);
1387 spin_unlock_bh(&p_hwfn
->mcp_info
->link_lock
);
1390 int qed_mcp_set_link(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
, bool b_up
)
1392 struct qed_mcp_link_params
*params
= &p_hwfn
->mcp_info
->link_input
;
1393 struct qed_mcp_mb_params mb_params
;
1394 struct eth_phy_cfg phy_cfg
;
1398 /* Set the shmem configuration according to params */
1399 memset(&phy_cfg
, 0, sizeof(phy_cfg
));
1400 cmd
= b_up
? DRV_MSG_CODE_INIT_PHY
: DRV_MSG_CODE_LINK_RESET
;
1401 if (!params
->speed
.autoneg
)
1402 phy_cfg
.speed
= params
->speed
.forced_speed
;
1403 phy_cfg
.pause
|= (params
->pause
.autoneg
) ? ETH_PAUSE_AUTONEG
: 0;
1404 phy_cfg
.pause
|= (params
->pause
.forced_rx
) ? ETH_PAUSE_RX
: 0;
1405 phy_cfg
.pause
|= (params
->pause
.forced_tx
) ? ETH_PAUSE_TX
: 0;
1406 phy_cfg
.adv_speed
= params
->speed
.advertised_speeds
;
1407 phy_cfg
.loopback_mode
= params
->loopback_mode
;
1409 /* There are MFWs that share this capability regardless of whether
1410 * this is feasible or not. And given that at the very least adv_caps
1411 * would be set internally by qed, we want to make sure LFA would
1414 if ((p_hwfn
->mcp_info
->capabilities
&
1415 FW_MB_PARAM_FEATURE_SUPPORT_EEE
) && params
->eee
.enable
) {
1416 phy_cfg
.eee_cfg
|= EEE_CFG_EEE_ENABLED
;
1417 if (params
->eee
.tx_lpi_enable
)
1418 phy_cfg
.eee_cfg
|= EEE_CFG_TX_LPI
;
1419 if (params
->eee
.adv_caps
& QED_EEE_1G_ADV
)
1420 phy_cfg
.eee_cfg
|= EEE_CFG_ADV_SPEED_1G
;
1421 if (params
->eee
.adv_caps
& QED_EEE_10G_ADV
)
1422 phy_cfg
.eee_cfg
|= EEE_CFG_ADV_SPEED_10G
;
1423 phy_cfg
.eee_cfg
|= (params
->eee
.tx_lpi_timer
<<
1424 EEE_TX_TIMER_USEC_OFFSET
) &
1425 EEE_TX_TIMER_USEC_MASK
;
1428 p_hwfn
->b_drv_link_init
= b_up
;
1431 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1432 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
1436 phy_cfg
.loopback_mode
,
1437 phy_cfg
.feature_config_flags
);
1439 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1440 "Resetting link\n");
1443 memset(&mb_params
, 0, sizeof(mb_params
));
1444 mb_params
.cmd
= cmd
;
1445 mb_params
.p_data_src
= &phy_cfg
;
1446 mb_params
.data_src_size
= sizeof(phy_cfg
);
1447 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1449 /* if mcp fails to respond we must abort */
1451 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
1455 /* Mimic link-change attention, done for several reasons:
1456 * - On reset, there's no guarantee MFW would trigger
1458 * - On initialization, older MFWs might not indicate link change
1459 * during LFA, so we'll never get an UP indication.
1461 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, !b_up
);
1466 static void qed_mcp_send_protocol_stats(struct qed_hwfn
*p_hwfn
,
1467 struct qed_ptt
*p_ptt
,
1468 enum MFW_DRV_MSG_TYPE type
)
1470 enum qed_mcp_protocol_type stats_type
;
1471 union qed_mcp_protocol_stats stats
;
1472 struct qed_mcp_mb_params mb_params
;
1476 case MFW_DRV_MSG_GET_LAN_STATS
:
1477 stats_type
= QED_MCP_LAN_STATS
;
1478 hsi_param
= DRV_MSG_CODE_STATS_TYPE_LAN
;
1480 case MFW_DRV_MSG_GET_FCOE_STATS
:
1481 stats_type
= QED_MCP_FCOE_STATS
;
1482 hsi_param
= DRV_MSG_CODE_STATS_TYPE_FCOE
;
1484 case MFW_DRV_MSG_GET_ISCSI_STATS
:
1485 stats_type
= QED_MCP_ISCSI_STATS
;
1486 hsi_param
= DRV_MSG_CODE_STATS_TYPE_ISCSI
;
1488 case MFW_DRV_MSG_GET_RDMA_STATS
:
1489 stats_type
= QED_MCP_RDMA_STATS
;
1490 hsi_param
= DRV_MSG_CODE_STATS_TYPE_RDMA
;
1493 DP_NOTICE(p_hwfn
, "Invalid protocol type %d\n", type
);
1497 qed_get_protocol_stats(p_hwfn
->cdev
, stats_type
, &stats
);
1499 memset(&mb_params
, 0, sizeof(mb_params
));
1500 mb_params
.cmd
= DRV_MSG_CODE_GET_STATS
;
1501 mb_params
.param
= hsi_param
;
1502 mb_params
.p_data_src
= &stats
;
1503 mb_params
.data_src_size
= sizeof(stats
);
1504 qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
1507 static void qed_read_pf_bandwidth(struct qed_hwfn
*p_hwfn
,
1508 struct public_func
*p_shmem_info
)
1510 struct qed_mcp_function_info
*p_info
;
1512 p_info
= &p_hwfn
->mcp_info
->func_info
;
1514 p_info
->bandwidth_min
= (p_shmem_info
->config
&
1515 FUNC_MF_CFG_MIN_BW_MASK
) >>
1516 FUNC_MF_CFG_MIN_BW_SHIFT
;
1517 if (p_info
->bandwidth_min
< 1 || p_info
->bandwidth_min
> 100) {
1519 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
1520 p_info
->bandwidth_min
);
1521 p_info
->bandwidth_min
= 1;
1524 p_info
->bandwidth_max
= (p_shmem_info
->config
&
1525 FUNC_MF_CFG_MAX_BW_MASK
) >>
1526 FUNC_MF_CFG_MAX_BW_SHIFT
;
1527 if (p_info
->bandwidth_max
< 1 || p_info
->bandwidth_max
> 100) {
1529 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
1530 p_info
->bandwidth_max
);
1531 p_info
->bandwidth_max
= 100;
1535 static u32
qed_mcp_get_shmem_func(struct qed_hwfn
*p_hwfn
,
1536 struct qed_ptt
*p_ptt
,
1537 struct public_func
*p_data
, int pfid
)
1539 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
1541 u32 mfw_path_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
1542 u32 func_addr
= SECTION_ADDR(mfw_path_offsize
, pfid
);
1545 memset(p_data
, 0, sizeof(*p_data
));
1547 size
= min_t(u32
, sizeof(*p_data
), QED_SECTION_SIZE(mfw_path_offsize
));
1548 for (i
= 0; i
< size
/ sizeof(u32
); i
++)
1549 ((u32
*)p_data
)[i
] = qed_rd(p_hwfn
, p_ptt
,
1550 func_addr
+ (i
<< 2));
1554 static void qed_mcp_update_bw(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1556 struct qed_mcp_function_info
*p_info
;
1557 struct public_func shmem_info
;
1558 u32 resp
= 0, param
= 0;
1560 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1562 qed_read_pf_bandwidth(p_hwfn
, &shmem_info
);
1564 p_info
= &p_hwfn
->mcp_info
->func_info
;
1566 qed_configure_pf_min_bandwidth(p_hwfn
->cdev
, p_info
->bandwidth_min
);
1567 qed_configure_pf_max_bandwidth(p_hwfn
->cdev
, p_info
->bandwidth_max
);
1569 /* Acknowledge the MFW */
1570 qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BW_UPDATE_ACK
, 0, &resp
,
1574 static void qed_mcp_update_stag(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1576 struct public_func shmem_info
;
1577 u32 resp
= 0, param
= 0;
1579 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1581 p_hwfn
->mcp_info
->func_info
.ovlan
= (u16
)shmem_info
.ovlan_stag
&
1582 FUNC_MF_CFG_OV_STAG_MASK
;
1583 p_hwfn
->hw_info
.ovlan
= p_hwfn
->mcp_info
->func_info
.ovlan
;
1584 if (test_bit(QED_MF_OVLAN_CLSS
, &p_hwfn
->cdev
->mf_bits
)) {
1585 if (p_hwfn
->hw_info
.ovlan
!= QED_MCP_VLAN_UNSET
) {
1586 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_FUNC_TAG_VALUE
,
1587 p_hwfn
->hw_info
.ovlan
);
1588 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_FUNC_TAG_EN
, 1);
1590 /* Configure DB to add external vlan to EDPM packets */
1591 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_TAG1_OVRD_MODE
, 1);
1592 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_EXT_VID_BB_K2
,
1593 p_hwfn
->hw_info
.ovlan
);
1595 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_FUNC_TAG_EN
, 0);
1596 qed_wr(p_hwfn
, p_ptt
, NIG_REG_LLH_FUNC_TAG_VALUE
, 0);
1597 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_TAG1_OVRD_MODE
, 0);
1598 qed_wr(p_hwfn
, p_ptt
, DORQ_REG_PF_EXT_VID_BB_K2
, 0);
1601 qed_sp_pf_update_stag(p_hwfn
);
1604 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "ovlan = %d hw_mode = 0x%x\n",
1605 p_hwfn
->mcp_info
->func_info
.ovlan
, p_hwfn
->hw_info
.hw_mode
);
1607 /* Acknowledge the MFW */
1608 qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_S_TAG_UPDATE_ACK
, 0,
1612 void qed_mcp_read_ufp_config(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1614 struct public_func shmem_info
;
1617 if (!test_bit(QED_MF_UFP_SPECIFIC
, &p_hwfn
->cdev
->mf_bits
))
1620 memset(&p_hwfn
->ufp_info
, 0, sizeof(p_hwfn
->ufp_info
));
1621 port_cfg
= qed_rd(p_hwfn
, p_ptt
, p_hwfn
->mcp_info
->port_addr
+
1622 offsetof(struct public_port
, oem_cfg_port
));
1623 val
= (port_cfg
& OEM_CFG_CHANNEL_TYPE_MASK
) >>
1624 OEM_CFG_CHANNEL_TYPE_OFFSET
;
1625 if (val
!= OEM_CFG_CHANNEL_TYPE_STAGGED
)
1626 DP_NOTICE(p_hwfn
, "Incorrect UFP Channel type %d\n", val
);
1628 val
= (port_cfg
& OEM_CFG_SCHED_TYPE_MASK
) >> OEM_CFG_SCHED_TYPE_OFFSET
;
1629 if (val
== OEM_CFG_SCHED_TYPE_ETS
) {
1630 p_hwfn
->ufp_info
.mode
= QED_UFP_MODE_ETS
;
1631 } else if (val
== OEM_CFG_SCHED_TYPE_VNIC_BW
) {
1632 p_hwfn
->ufp_info
.mode
= QED_UFP_MODE_VNIC_BW
;
1634 p_hwfn
->ufp_info
.mode
= QED_UFP_MODE_UNKNOWN
;
1635 DP_NOTICE(p_hwfn
, "Unknown UFP scheduling mode %d\n", val
);
1638 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1639 val
= (shmem_info
.oem_cfg_func
& OEM_CFG_FUNC_TC_MASK
) >>
1640 OEM_CFG_FUNC_TC_OFFSET
;
1641 p_hwfn
->ufp_info
.tc
= (u8
)val
;
1642 val
= (shmem_info
.oem_cfg_func
& OEM_CFG_FUNC_HOST_PRI_CTRL_MASK
) >>
1643 OEM_CFG_FUNC_HOST_PRI_CTRL_OFFSET
;
1644 if (val
== OEM_CFG_FUNC_HOST_PRI_CTRL_VNIC
) {
1645 p_hwfn
->ufp_info
.pri_type
= QED_UFP_PRI_VNIC
;
1646 } else if (val
== OEM_CFG_FUNC_HOST_PRI_CTRL_OS
) {
1647 p_hwfn
->ufp_info
.pri_type
= QED_UFP_PRI_OS
;
1649 p_hwfn
->ufp_info
.pri_type
= QED_UFP_PRI_UNKNOWN
;
1650 DP_NOTICE(p_hwfn
, "Unknown Host priority control %d\n", val
);
1654 "UFP shmem config: mode = %d tc = %d pri_type = %d\n",
1655 p_hwfn
->ufp_info
.mode
,
1656 p_hwfn
->ufp_info
.tc
, p_hwfn
->ufp_info
.pri_type
);
1660 qed_mcp_handle_ufp_event(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
1662 qed_mcp_read_ufp_config(p_hwfn
, p_ptt
);
1664 if (p_hwfn
->ufp_info
.mode
== QED_UFP_MODE_VNIC_BW
) {
1665 p_hwfn
->qm_info
.ooo_tc
= p_hwfn
->ufp_info
.tc
;
1666 qed_hw_info_set_offload_tc(&p_hwfn
->hw_info
,
1667 p_hwfn
->ufp_info
.tc
);
1669 qed_qm_reconf(p_hwfn
, p_ptt
);
1670 } else if (p_hwfn
->ufp_info
.mode
== QED_UFP_MODE_ETS
) {
1671 /* Merge UFP TC with the dcbx TC data */
1672 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
1673 QED_DCBX_OPERATIONAL_MIB
);
1675 DP_ERR(p_hwfn
, "Invalid sched type, discard the UFP config\n");
1679 /* update storm FW with negotiation results */
1680 qed_sp_pf_update_ufp(p_hwfn
);
1682 /* update stag pcp value */
1683 qed_sp_pf_update_stag(p_hwfn
);
1688 int qed_mcp_handle_events(struct qed_hwfn
*p_hwfn
,
1689 struct qed_ptt
*p_ptt
)
1691 struct qed_mcp_info
*info
= p_hwfn
->mcp_info
;
1696 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "Received message from MFW\n");
1698 /* Read Messages from MFW */
1699 qed_mcp_read_mb(p_hwfn
, p_ptt
);
1701 /* Compare current messages to old ones */
1702 for (i
= 0; i
< info
->mfw_mb_length
; i
++) {
1703 if (info
->mfw_mb_cur
[i
] == info
->mfw_mb_shadow
[i
])
1708 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
1709 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
1710 i
, info
->mfw_mb_shadow
[i
], info
->mfw_mb_cur
[i
]);
1713 case MFW_DRV_MSG_LINK_CHANGE
:
1714 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, false);
1716 case MFW_DRV_MSG_VF_DISABLED
:
1717 qed_mcp_handle_vf_flr(p_hwfn
, p_ptt
);
1719 case MFW_DRV_MSG_LLDP_DATA_UPDATED
:
1720 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
1721 QED_DCBX_REMOTE_LLDP_MIB
);
1723 case MFW_DRV_MSG_DCBX_REMOTE_MIB_UPDATED
:
1724 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
1725 QED_DCBX_REMOTE_MIB
);
1727 case MFW_DRV_MSG_DCBX_OPERATIONAL_MIB_UPDATED
:
1728 qed_dcbx_mib_update_event(p_hwfn
, p_ptt
,
1729 QED_DCBX_OPERATIONAL_MIB
);
1731 case MFW_DRV_MSG_OEM_CFG_UPDATE
:
1732 qed_mcp_handle_ufp_event(p_hwfn
, p_ptt
);
1734 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE
:
1735 qed_mcp_handle_transceiver_change(p_hwfn
, p_ptt
);
1737 case MFW_DRV_MSG_GET_LAN_STATS
:
1738 case MFW_DRV_MSG_GET_FCOE_STATS
:
1739 case MFW_DRV_MSG_GET_ISCSI_STATS
:
1740 case MFW_DRV_MSG_GET_RDMA_STATS
:
1741 qed_mcp_send_protocol_stats(p_hwfn
, p_ptt
, i
);
1743 case MFW_DRV_MSG_BW_UPDATE
:
1744 qed_mcp_update_bw(p_hwfn
, p_ptt
);
1746 case MFW_DRV_MSG_S_TAG_UPDATE
:
1747 qed_mcp_update_stag(p_hwfn
, p_ptt
);
1749 case MFW_DRV_MSG_GET_TLV_REQ
:
1750 qed_mfw_tlv_req(p_hwfn
);
1753 DP_INFO(p_hwfn
, "Unimplemented MFW message %d\n", i
);
1758 /* ACK everything */
1759 for (i
= 0; i
< MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
); i
++) {
1760 __be32 val
= cpu_to_be32(((u32
*)info
->mfw_mb_cur
)[i
]);
1762 /* MFW expect answer in BE, so we force write in that format */
1763 qed_wr(p_hwfn
, p_ptt
,
1764 info
->mfw_mb_addr
+ sizeof(u32
) +
1765 MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
) *
1766 sizeof(u32
) + i
* sizeof(u32
),
1772 "Received an MFW message indication but no new message!\n");
1776 /* Copy the new mfw messages into the shadow */
1777 memcpy(info
->mfw_mb_shadow
, info
->mfw_mb_cur
, info
->mfw_mb_length
);
1782 int qed_mcp_get_mfw_ver(struct qed_hwfn
*p_hwfn
,
1783 struct qed_ptt
*p_ptt
,
1784 u32
*p_mfw_ver
, u32
*p_running_bundle_id
)
1788 if (IS_VF(p_hwfn
->cdev
)) {
1789 if (p_hwfn
->vf_iov_info
) {
1790 struct pfvf_acquire_resp_tlv
*p_resp
;
1792 p_resp
= &p_hwfn
->vf_iov_info
->acquire_resp
;
1793 *p_mfw_ver
= p_resp
->pfdev_info
.mfw_ver
;
1798 "VF requested MFW version prior to ACQUIRE\n");
1803 global_offsize
= qed_rd(p_hwfn
, p_ptt
,
1804 SECTION_OFFSIZE_ADDR(p_hwfn
->
1805 mcp_info
->public_base
,
1808 qed_rd(p_hwfn
, p_ptt
,
1809 SECTION_ADDR(global_offsize
,
1810 0) + offsetof(struct public_global
, mfw_ver
));
1812 if (p_running_bundle_id
!= NULL
) {
1813 *p_running_bundle_id
= qed_rd(p_hwfn
, p_ptt
,
1814 SECTION_ADDR(global_offsize
, 0) +
1815 offsetof(struct public_global
,
1816 running_bundle_id
));
1822 int qed_mcp_get_mbi_ver(struct qed_hwfn
*p_hwfn
,
1823 struct qed_ptt
*p_ptt
, u32
*p_mbi_ver
)
1825 u32 nvm_cfg_addr
, nvm_cfg1_offset
, mbi_ver_addr
;
1827 if (IS_VF(p_hwfn
->cdev
))
1830 /* Read the address of the nvm_cfg */
1831 nvm_cfg_addr
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_GEN_PURP_CR0
);
1832 if (!nvm_cfg_addr
) {
1833 DP_NOTICE(p_hwfn
, "Shared memory not initialized\n");
1837 /* Read the offset of nvm_cfg1 */
1838 nvm_cfg1_offset
= qed_rd(p_hwfn
, p_ptt
, nvm_cfg_addr
+ 4);
1840 mbi_ver_addr
= MCP_REG_SCRATCH
+ nvm_cfg1_offset
+
1841 offsetof(struct nvm_cfg1
, glob
) +
1842 offsetof(struct nvm_cfg1_glob
, mbi_version
);
1843 *p_mbi_ver
= qed_rd(p_hwfn
, p_ptt
,
1845 (NVM_CFG1_GLOB_MBI_VERSION_0_MASK
|
1846 NVM_CFG1_GLOB_MBI_VERSION_1_MASK
|
1847 NVM_CFG1_GLOB_MBI_VERSION_2_MASK
);
1852 int qed_mcp_get_media_type(struct qed_hwfn
*p_hwfn
,
1853 struct qed_ptt
*p_ptt
, u32
*p_media_type
)
1855 if (IS_VF(p_hwfn
->cdev
))
1858 if (!qed_mcp_is_init(p_hwfn
)) {
1859 DP_NOTICE(p_hwfn
, "MFW is not initialized!\n");
1864 *p_media_type
= MEDIA_UNSPECIFIED
;
1868 *p_media_type
= qed_rd(p_hwfn
, p_ptt
,
1869 p_hwfn
->mcp_info
->port_addr
+
1870 offsetof(struct public_port
,
1876 /* Old MFW has a global configuration for all PFs regarding RDMA support */
1878 qed_mcp_get_shmem_proto_legacy(struct qed_hwfn
*p_hwfn
,
1879 enum qed_pci_personality
*p_proto
)
1881 /* There wasn't ever a legacy MFW that published iwarp.
1882 * So at this point, this is either plain l2 or RoCE.
1884 if (test_bit(QED_DEV_CAP_ROCE
, &p_hwfn
->hw_info
.device_capabilities
))
1885 *p_proto
= QED_PCI_ETH_ROCE
;
1887 *p_proto
= QED_PCI_ETH
;
1889 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
1890 "According to Legacy capabilities, L2 personality is %08x\n",
1895 qed_mcp_get_shmem_proto_mfw(struct qed_hwfn
*p_hwfn
,
1896 struct qed_ptt
*p_ptt
,
1897 enum qed_pci_personality
*p_proto
)
1899 u32 resp
= 0, param
= 0;
1902 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
1903 DRV_MSG_CODE_GET_PF_RDMA_PROTOCOL
, 0, &resp
, ¶m
);
1906 if (resp
!= FW_MSG_CODE_OK
) {
1907 DP_VERBOSE(p_hwfn
, NETIF_MSG_IFUP
,
1908 "MFW lacks support for command; Returns %08x\n",
1914 case FW_MB_PARAM_GET_PF_RDMA_NONE
:
1915 *p_proto
= QED_PCI_ETH
;
1917 case FW_MB_PARAM_GET_PF_RDMA_ROCE
:
1918 *p_proto
= QED_PCI_ETH_ROCE
;
1920 case FW_MB_PARAM_GET_PF_RDMA_IWARP
:
1921 *p_proto
= QED_PCI_ETH_IWARP
;
1923 case FW_MB_PARAM_GET_PF_RDMA_BOTH
:
1924 *p_proto
= QED_PCI_ETH_RDMA
;
1928 "MFW answers GET_PF_RDMA_PROTOCOL but param is %08x\n",
1935 "According to capabilities, L2 personality is %08x [resp %08x param %08x]\n",
1936 (u32
) *p_proto
, resp
, param
);
1941 qed_mcp_get_shmem_proto(struct qed_hwfn
*p_hwfn
,
1942 struct public_func
*p_info
,
1943 struct qed_ptt
*p_ptt
,
1944 enum qed_pci_personality
*p_proto
)
1948 switch (p_info
->config
& FUNC_MF_CFG_PROTOCOL_MASK
) {
1949 case FUNC_MF_CFG_PROTOCOL_ETHERNET
:
1950 if (!IS_ENABLED(CONFIG_QED_RDMA
))
1951 *p_proto
= QED_PCI_ETH
;
1952 else if (qed_mcp_get_shmem_proto_mfw(p_hwfn
, p_ptt
, p_proto
))
1953 qed_mcp_get_shmem_proto_legacy(p_hwfn
, p_proto
);
1955 case FUNC_MF_CFG_PROTOCOL_ISCSI
:
1956 *p_proto
= QED_PCI_ISCSI
;
1958 case FUNC_MF_CFG_PROTOCOL_FCOE
:
1959 *p_proto
= QED_PCI_FCOE
;
1961 case FUNC_MF_CFG_PROTOCOL_ROCE
:
1962 DP_NOTICE(p_hwfn
, "RoCE personality is not a valid value!\n");
1971 int qed_mcp_fill_shmem_func_info(struct qed_hwfn
*p_hwfn
,
1972 struct qed_ptt
*p_ptt
)
1974 struct qed_mcp_function_info
*info
;
1975 struct public_func shmem_info
;
1977 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
, MCP_PF_ID(p_hwfn
));
1978 info
= &p_hwfn
->mcp_info
->func_info
;
1980 info
->pause_on_host
= (shmem_info
.config
&
1981 FUNC_MF_CFG_PAUSE_ON_HOST_RING
) ? 1 : 0;
1983 if (qed_mcp_get_shmem_proto(p_hwfn
, &shmem_info
, p_ptt
,
1985 DP_ERR(p_hwfn
, "Unknown personality %08x\n",
1986 (u32
)(shmem_info
.config
& FUNC_MF_CFG_PROTOCOL_MASK
));
1990 qed_read_pf_bandwidth(p_hwfn
, &shmem_info
);
1992 if (shmem_info
.mac_upper
|| shmem_info
.mac_lower
) {
1993 info
->mac
[0] = (u8
)(shmem_info
.mac_upper
>> 8);
1994 info
->mac
[1] = (u8
)(shmem_info
.mac_upper
);
1995 info
->mac
[2] = (u8
)(shmem_info
.mac_lower
>> 24);
1996 info
->mac
[3] = (u8
)(shmem_info
.mac_lower
>> 16);
1997 info
->mac
[4] = (u8
)(shmem_info
.mac_lower
>> 8);
1998 info
->mac
[5] = (u8
)(shmem_info
.mac_lower
);
2000 /* Store primary MAC for later possible WoL */
2001 memcpy(&p_hwfn
->cdev
->wol_mac
, info
->mac
, ETH_ALEN
);
2003 DP_NOTICE(p_hwfn
, "MAC is 0 in shmem\n");
2006 info
->wwn_port
= (u64
)shmem_info
.fcoe_wwn_port_name_lower
|
2007 (((u64
)shmem_info
.fcoe_wwn_port_name_upper
) << 32);
2008 info
->wwn_node
= (u64
)shmem_info
.fcoe_wwn_node_name_lower
|
2009 (((u64
)shmem_info
.fcoe_wwn_node_name_upper
) << 32);
2011 info
->ovlan
= (u16
)(shmem_info
.ovlan_stag
& FUNC_MF_CFG_OV_STAG_MASK
);
2013 info
->mtu
= (u16
)shmem_info
.mtu_size
;
2015 p_hwfn
->hw_info
.b_wol_support
= QED_WOL_SUPPORT_NONE
;
2016 p_hwfn
->cdev
->wol_config
= (u8
)QED_OV_WOL_DEFAULT
;
2017 if (qed_mcp_is_init(p_hwfn
)) {
2018 u32 resp
= 0, param
= 0;
2021 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
2022 DRV_MSG_CODE_OS_WOL
, 0, &resp
, ¶m
);
2025 if (resp
== FW_MSG_CODE_OS_WOL_SUPPORTED
)
2026 p_hwfn
->hw_info
.b_wol_support
= QED_WOL_SUPPORT_PME
;
2029 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| NETIF_MSG_IFUP
),
2030 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x wol %02x\n",
2031 info
->pause_on_host
, info
->protocol
,
2032 info
->bandwidth_min
, info
->bandwidth_max
,
2033 info
->mac
[0], info
->mac
[1], info
->mac
[2],
2034 info
->mac
[3], info
->mac
[4], info
->mac
[5],
2035 info
->wwn_port
, info
->wwn_node
,
2036 info
->ovlan
, (u8
)p_hwfn
->hw_info
.b_wol_support
);
2041 struct qed_mcp_link_params
2042 *qed_mcp_get_link_params(struct qed_hwfn
*p_hwfn
)
2044 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
2046 return &p_hwfn
->mcp_info
->link_input
;
2049 struct qed_mcp_link_state
2050 *qed_mcp_get_link_state(struct qed_hwfn
*p_hwfn
)
2052 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
2054 return &p_hwfn
->mcp_info
->link_output
;
2057 struct qed_mcp_link_capabilities
2058 *qed_mcp_get_link_capabilities(struct qed_hwfn
*p_hwfn
)
2060 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
2062 return &p_hwfn
->mcp_info
->link_capabilities
;
2065 int qed_mcp_drain(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2067 u32 resp
= 0, param
= 0;
2070 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
2071 DRV_MSG_CODE_NIG_DRAIN
, 1000, &resp
, ¶m
);
2073 /* Wait for the drain to complete before returning */
2079 int qed_mcp_get_flash_size(struct qed_hwfn
*p_hwfn
,
2080 struct qed_ptt
*p_ptt
, u32
*p_flash_size
)
2084 if (IS_VF(p_hwfn
->cdev
))
2087 flash_size
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_NVM_CFG4
);
2088 flash_size
= (flash_size
& MCP_REG_NVM_CFG4_FLASH_SIZE
) >>
2089 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT
;
2090 flash_size
= (1 << (flash_size
+ MCP_BYTES_PER_MBIT_SHIFT
));
2092 *p_flash_size
= flash_size
;
2098 qed_mcp_config_vf_msix_bb(struct qed_hwfn
*p_hwfn
,
2099 struct qed_ptt
*p_ptt
, u8 vf_id
, u8 num
)
2101 u32 resp
= 0, param
= 0, rc_param
= 0;
2104 /* Only Leader can configure MSIX, and need to take CMT into account */
2105 if (!IS_LEAD_HWFN(p_hwfn
))
2107 num
*= p_hwfn
->cdev
->num_hwfns
;
2109 param
|= (vf_id
<< DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_SHIFT
) &
2110 DRV_MB_PARAM_CFG_VF_MSIX_VF_ID_MASK
;
2111 param
|= (num
<< DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_SHIFT
) &
2112 DRV_MB_PARAM_CFG_VF_MSIX_SB_NUM_MASK
;
2114 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CFG_VF_MSIX
, param
,
2117 if (resp
!= FW_MSG_CODE_DRV_CFG_VF_MSIX_DONE
) {
2118 DP_NOTICE(p_hwfn
, "VF[%d]: MFW failed to set MSI-X\n", vf_id
);
2121 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2122 "Requested 0x%02x MSI-x interrupts from VF 0x%02x\n",
2130 qed_mcp_config_vf_msix_ah(struct qed_hwfn
*p_hwfn
,
2131 struct qed_ptt
*p_ptt
, u8 num
)
2133 u32 resp
= 0, param
= num
, rc_param
= 0;
2136 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_CFG_PF_VFS_MSIX
,
2137 param
, &resp
, &rc_param
);
2139 if (resp
!= FW_MSG_CODE_DRV_CFG_PF_VFS_MSIX_DONE
) {
2140 DP_NOTICE(p_hwfn
, "MFW failed to set MSI-X for VFs\n");
2143 DP_VERBOSE(p_hwfn
, QED_MSG_IOV
,
2144 "Requested 0x%02x MSI-x interrupts for VFs\n", num
);
2150 int qed_mcp_config_vf_msix(struct qed_hwfn
*p_hwfn
,
2151 struct qed_ptt
*p_ptt
, u8 vf_id
, u8 num
)
2153 if (QED_IS_BB(p_hwfn
->cdev
))
2154 return qed_mcp_config_vf_msix_bb(p_hwfn
, p_ptt
, vf_id
, num
);
2156 return qed_mcp_config_vf_msix_ah(p_hwfn
, p_ptt
, num
);
2160 qed_mcp_send_drv_version(struct qed_hwfn
*p_hwfn
,
2161 struct qed_ptt
*p_ptt
,
2162 struct qed_mcp_drv_version
*p_ver
)
2164 struct qed_mcp_mb_params mb_params
;
2165 struct drv_version_stc drv_version
;
2170 memset(&drv_version
, 0, sizeof(drv_version
));
2171 drv_version
.version
= p_ver
->version
;
2172 for (i
= 0; i
< (MCP_DRV_VER_STR_SIZE
- 4) / sizeof(u32
); i
++) {
2173 val
= cpu_to_be32(*((u32
*)&p_ver
->name
[i
* sizeof(u32
)]));
2174 *(__be32
*)&drv_version
.name
[i
* sizeof(u32
)] = val
;
2177 memset(&mb_params
, 0, sizeof(mb_params
));
2178 mb_params
.cmd
= DRV_MSG_CODE_SET_VERSION
;
2179 mb_params
.p_data_src
= &drv_version
;
2180 mb_params
.data_src_size
= sizeof(drv_version
);
2181 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
2183 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
2188 /* A maximal 100 msec waiting time for the MCP to halt */
2189 #define QED_MCP_HALT_SLEEP_MS 10
2190 #define QED_MCP_HALT_MAX_RETRIES 10
2192 int qed_mcp_halt(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2194 u32 resp
= 0, param
= 0, cpu_state
, cnt
= 0;
2197 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MCP_HALT
, 0, &resp
,
2200 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
2205 msleep(QED_MCP_HALT_SLEEP_MS
);
2206 cpu_state
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
);
2207 if (cpu_state
& MCP_REG_CPU_STATE_SOFT_HALTED
)
2209 } while (++cnt
< QED_MCP_HALT_MAX_RETRIES
);
2211 if (cnt
== QED_MCP_HALT_MAX_RETRIES
) {
2213 "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2214 qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
), cpu_state
);
2218 qed_mcp_cmd_set_blocking(p_hwfn
, true);
2223 #define QED_MCP_RESUME_SLEEP_MS 10
2225 int qed_mcp_resume(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2227 u32 cpu_mode
, cpu_state
;
2229 qed_wr(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
, 0xffffffff);
2231 cpu_mode
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
);
2232 cpu_mode
&= ~MCP_REG_CPU_MODE_SOFT_HALT
;
2233 qed_wr(p_hwfn
, p_ptt
, MCP_REG_CPU_MODE
, cpu_mode
);
2234 msleep(QED_MCP_RESUME_SLEEP_MS
);
2235 cpu_state
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_CPU_STATE
);
2237 if (cpu_state
& MCP_REG_CPU_STATE_SOFT_HALTED
) {
2239 "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
2240 cpu_mode
, cpu_state
);
2244 qed_mcp_cmd_set_blocking(p_hwfn
, false);
2249 int qed_mcp_ov_update_current_config(struct qed_hwfn
*p_hwfn
,
2250 struct qed_ptt
*p_ptt
,
2251 enum qed_ov_client client
)
2253 u32 resp
= 0, param
= 0;
2258 case QED_OV_CLIENT_DRV
:
2259 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_OS
;
2261 case QED_OV_CLIENT_USER
:
2262 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_OTHER
;
2264 case QED_OV_CLIENT_VENDOR_SPEC
:
2265 drv_mb_param
= DRV_MB_PARAM_OV_CURR_CFG_VENDOR_SPEC
;
2268 DP_NOTICE(p_hwfn
, "Invalid client type %d\n", client
);
2272 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_CURR_CFG
,
2273 drv_mb_param
, &resp
, ¶m
);
2275 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
2280 int qed_mcp_ov_update_driver_state(struct qed_hwfn
*p_hwfn
,
2281 struct qed_ptt
*p_ptt
,
2282 enum qed_ov_driver_state drv_state
)
2284 u32 resp
= 0, param
= 0;
2288 switch (drv_state
) {
2289 case QED_OV_DRIVER_STATE_NOT_LOADED
:
2290 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_NOT_LOADED
;
2292 case QED_OV_DRIVER_STATE_DISABLED
:
2293 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_DISABLED
;
2295 case QED_OV_DRIVER_STATE_ACTIVE
:
2296 drv_mb_param
= DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE_ACTIVE
;
2299 DP_NOTICE(p_hwfn
, "Invalid driver state %d\n", drv_state
);
2303 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_DRIVER_STATE
,
2304 drv_mb_param
, &resp
, ¶m
);
2306 DP_ERR(p_hwfn
, "Failed to send driver state\n");
2311 int qed_mcp_ov_update_mtu(struct qed_hwfn
*p_hwfn
,
2312 struct qed_ptt
*p_ptt
, u16 mtu
)
2314 u32 resp
= 0, param
= 0;
2318 drv_mb_param
= (u32
)mtu
<< DRV_MB_PARAM_OV_MTU_SIZE_SHIFT
;
2319 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_MTU
,
2320 drv_mb_param
, &resp
, ¶m
);
2322 DP_ERR(p_hwfn
, "Failed to send mtu value, rc = %d\n", rc
);
2327 int qed_mcp_ov_update_mac(struct qed_hwfn
*p_hwfn
,
2328 struct qed_ptt
*p_ptt
, u8
*mac
)
2330 struct qed_mcp_mb_params mb_params
;
2334 memset(&mb_params
, 0, sizeof(mb_params
));
2335 mb_params
.cmd
= DRV_MSG_CODE_SET_VMAC
;
2336 mb_params
.param
= DRV_MSG_CODE_VMAC_TYPE_MAC
<<
2337 DRV_MSG_CODE_VMAC_TYPE_SHIFT
;
2338 mb_params
.param
|= MCP_PF_ID(p_hwfn
);
2340 /* MCP is BE, and on LE platforms PCI would swap access to SHMEM
2341 * in 32-bit granularity.
2342 * So the MAC has to be set in native order [and not byte order],
2343 * otherwise it would be read incorrectly by MFW after swap.
2345 mfw_mac
[0] = mac
[0] << 24 | mac
[1] << 16 | mac
[2] << 8 | mac
[3];
2346 mfw_mac
[1] = mac
[4] << 24 | mac
[5] << 16;
2348 mb_params
.p_data_src
= (u8
*)mfw_mac
;
2349 mb_params
.data_src_size
= 8;
2350 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
2352 DP_ERR(p_hwfn
, "Failed to send mac address, rc = %d\n", rc
);
2354 /* Store primary MAC for later possible WoL */
2355 memcpy(p_hwfn
->cdev
->wol_mac
, mac
, ETH_ALEN
);
2360 int qed_mcp_ov_update_wol(struct qed_hwfn
*p_hwfn
,
2361 struct qed_ptt
*p_ptt
, enum qed_ov_wol wol
)
2363 u32 resp
= 0, param
= 0;
2367 if (p_hwfn
->hw_info
.b_wol_support
== QED_WOL_SUPPORT_NONE
) {
2368 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
2369 "Can't change WoL configuration when WoL isn't supported\n");
2374 case QED_OV_WOL_DEFAULT
:
2375 drv_mb_param
= DRV_MB_PARAM_WOL_DEFAULT
;
2377 case QED_OV_WOL_DISABLED
:
2378 drv_mb_param
= DRV_MB_PARAM_WOL_DISABLED
;
2380 case QED_OV_WOL_ENABLED
:
2381 drv_mb_param
= DRV_MB_PARAM_WOL_ENABLED
;
2384 DP_ERR(p_hwfn
, "Invalid wol state %d\n", wol
);
2388 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_WOL
,
2389 drv_mb_param
, &resp
, ¶m
);
2391 DP_ERR(p_hwfn
, "Failed to send wol mode, rc = %d\n", rc
);
2393 /* Store the WoL update for a future unload */
2394 p_hwfn
->cdev
->wol_config
= (u8
)wol
;
2399 int qed_mcp_ov_update_eswitch(struct qed_hwfn
*p_hwfn
,
2400 struct qed_ptt
*p_ptt
,
2401 enum qed_ov_eswitch eswitch
)
2403 u32 resp
= 0, param
= 0;
2408 case QED_OV_ESWITCH_NONE
:
2409 drv_mb_param
= DRV_MB_PARAM_ESWITCH_MODE_NONE
;
2411 case QED_OV_ESWITCH_VEB
:
2412 drv_mb_param
= DRV_MB_PARAM_ESWITCH_MODE_VEB
;
2414 case QED_OV_ESWITCH_VEPA
:
2415 drv_mb_param
= DRV_MB_PARAM_ESWITCH_MODE_VEPA
;
2418 DP_ERR(p_hwfn
, "Invalid eswitch mode %d\n", eswitch
);
2422 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_OV_UPDATE_ESWITCH_MODE
,
2423 drv_mb_param
, &resp
, ¶m
);
2425 DP_ERR(p_hwfn
, "Failed to send eswitch mode, rc = %d\n", rc
);
2430 int qed_mcp_set_led(struct qed_hwfn
*p_hwfn
,
2431 struct qed_ptt
*p_ptt
, enum qed_led_mode mode
)
2433 u32 resp
= 0, param
= 0, drv_mb_param
;
2437 case QED_LED_MODE_ON
:
2438 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_ON
;
2440 case QED_LED_MODE_OFF
:
2441 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OFF
;
2443 case QED_LED_MODE_RESTORE
:
2444 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OPER
;
2447 DP_NOTICE(p_hwfn
, "Invalid LED mode %d\n", mode
);
2451 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_SET_LED_MODE
,
2452 drv_mb_param
, &resp
, ¶m
);
2457 int qed_mcp_mask_parities(struct qed_hwfn
*p_hwfn
,
2458 struct qed_ptt
*p_ptt
, u32 mask_parities
)
2460 u32 resp
= 0, param
= 0;
2463 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_MASK_PARITIES
,
2464 mask_parities
, &resp
, ¶m
);
2468 "MCP response failure for mask parities, aborting\n");
2469 } else if (resp
!= FW_MSG_CODE_OK
) {
2471 "MCP did not acknowledge mask parity request. Old MFW?\n");
2478 int qed_mcp_nvm_read(struct qed_dev
*cdev
, u32 addr
, u8
*p_buf
, u32 len
)
2480 u32 bytes_left
= len
, offset
= 0, bytes_to_copy
, read_len
= 0;
2481 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2482 u32 resp
= 0, resp_param
= 0;
2483 struct qed_ptt
*p_ptt
;
2486 p_ptt
= qed_ptt_acquire(p_hwfn
);
2490 while (bytes_left
> 0) {
2491 bytes_to_copy
= min_t(u32
, bytes_left
, MCP_DRV_NVM_BUF_LEN
);
2493 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
2494 DRV_MSG_CODE_NVM_READ_NVRAM
,
2497 DRV_MB_PARAM_NVM_LEN_OFFSET
),
2500 (u32
*)(p_buf
+ offset
));
2502 if (rc
|| (resp
!= FW_MSG_CODE_NVM_OK
)) {
2503 DP_NOTICE(cdev
, "MCP command rc = %d\n", rc
);
2507 /* This can be a lengthy process, and it's possible scheduler
2508 * isn't preemptable. Sleep a bit to prevent CPU hogging.
2510 if (bytes_left
% 0x1000 <
2511 (bytes_left
- read_len
) % 0x1000)
2512 usleep_range(1000, 2000);
2515 bytes_left
-= read_len
;
2518 cdev
->mcp_nvm_resp
= resp
;
2519 qed_ptt_release(p_hwfn
, p_ptt
);
2524 int qed_mcp_nvm_resp(struct qed_dev
*cdev
, u8
*p_buf
)
2526 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2527 struct qed_ptt
*p_ptt
;
2529 p_ptt
= qed_ptt_acquire(p_hwfn
);
2533 memcpy(p_buf
, &cdev
->mcp_nvm_resp
, sizeof(cdev
->mcp_nvm_resp
));
2534 qed_ptt_release(p_hwfn
, p_ptt
);
2539 int qed_mcp_nvm_put_file_begin(struct qed_dev
*cdev
, u32 addr
)
2541 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2542 struct qed_ptt
*p_ptt
;
2546 p_ptt
= qed_ptt_acquire(p_hwfn
);
2549 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_NVM_PUT_FILE_BEGIN
, addr
,
2551 cdev
->mcp_nvm_resp
= resp
;
2552 qed_ptt_release(p_hwfn
, p_ptt
);
2557 int qed_mcp_nvm_write(struct qed_dev
*cdev
,
2558 u32 cmd
, u32 addr
, u8
*p_buf
, u32 len
)
2560 u32 buf_idx
= 0, buf_size
, nvm_cmd
, nvm_offset
, resp
= 0, param
;
2561 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2562 struct qed_ptt
*p_ptt
;
2565 p_ptt
= qed_ptt_acquire(p_hwfn
);
2570 case QED_PUT_FILE_DATA
:
2571 nvm_cmd
= DRV_MSG_CODE_NVM_PUT_FILE_DATA
;
2573 case QED_NVM_WRITE_NVRAM
:
2574 nvm_cmd
= DRV_MSG_CODE_NVM_WRITE_NVRAM
;
2577 DP_NOTICE(p_hwfn
, "Invalid nvm write command 0x%x\n", cmd
);
2582 while (buf_idx
< len
) {
2583 buf_size
= min_t(u32
, (len
- buf_idx
), MCP_DRV_NVM_BUF_LEN
);
2584 nvm_offset
= ((buf_size
<< DRV_MB_PARAM_NVM_LEN_OFFSET
) |
2586 rc
= qed_mcp_nvm_wr_cmd(p_hwfn
, p_ptt
, nvm_cmd
, nvm_offset
,
2587 &resp
, ¶m
, buf_size
,
2588 (u32
*)&p_buf
[buf_idx
]);
2590 DP_NOTICE(cdev
, "nvm write failed, rc = %d\n", rc
);
2591 resp
= FW_MSG_CODE_ERROR
;
2595 if (resp
!= FW_MSG_CODE_OK
&&
2596 resp
!= FW_MSG_CODE_NVM_OK
&&
2597 resp
!= FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK
) {
2599 "nvm write failed, resp = 0x%08x\n", resp
);
2604 /* This can be a lengthy process, and it's possible scheduler
2605 * isn't pre-emptable. Sleep a bit to prevent CPU hogging.
2607 if (buf_idx
% 0x1000 > (buf_idx
+ buf_size
) % 0x1000)
2608 usleep_range(1000, 2000);
2610 buf_idx
+= buf_size
;
2613 cdev
->mcp_nvm_resp
= resp
;
2615 qed_ptt_release(p_hwfn
, p_ptt
);
2620 int qed_mcp_phy_sfp_read(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
2621 u32 port
, u32 addr
, u32 offset
, u32 len
, u8
*p_buf
)
2623 u32 bytes_left
, bytes_to_copy
, buf_size
, nvm_offset
= 0;
2627 nvm_offset
|= (port
<< DRV_MB_PARAM_TRANSCEIVER_PORT_OFFSET
) &
2628 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK
;
2629 nvm_offset
|= (addr
<< DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_OFFSET
) &
2630 DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK
;
2635 while (bytes_left
> 0) {
2636 bytes_to_copy
= min_t(u32
, bytes_left
,
2637 MAX_I2C_TRANSACTION_SIZE
);
2638 nvm_offset
&= (DRV_MB_PARAM_TRANSCEIVER_I2C_ADDRESS_MASK
|
2639 DRV_MB_PARAM_TRANSCEIVER_PORT_MASK
);
2640 nvm_offset
|= ((addr
+ offset
) <<
2641 DRV_MB_PARAM_TRANSCEIVER_OFFSET_OFFSET
) &
2642 DRV_MB_PARAM_TRANSCEIVER_OFFSET_MASK
;
2643 nvm_offset
|= (bytes_to_copy
<<
2644 DRV_MB_PARAM_TRANSCEIVER_SIZE_OFFSET
) &
2645 DRV_MB_PARAM_TRANSCEIVER_SIZE_MASK
;
2646 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
2647 DRV_MSG_CODE_TRANSCEIVER_READ
,
2648 nvm_offset
, &resp
, ¶m
, &buf_size
,
2649 (u32
*)(p_buf
+ offset
));
2652 "Failed to send a transceiver read command to the MFW. rc = %d.\n",
2657 if (resp
== FW_MSG_CODE_TRANSCEIVER_NOT_PRESENT
)
2659 else if (resp
!= FW_MSG_CODE_TRANSCEIVER_DIAG_OK
)
2663 bytes_left
-= buf_size
;
2669 int qed_mcp_bist_register_test(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2671 u32 drv_mb_param
= 0, rsp
, param
;
2674 drv_mb_param
= (DRV_MB_PARAM_BIST_REGISTER_TEST
<<
2675 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
2677 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
2678 drv_mb_param
, &rsp
, ¶m
);
2683 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
2684 (param
!= DRV_MB_PARAM_BIST_RC_PASSED
))
2690 int qed_mcp_bist_clock_test(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
2692 u32 drv_mb_param
, rsp
, param
;
2695 drv_mb_param
= (DRV_MB_PARAM_BIST_CLOCK_TEST
<<
2696 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
2698 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
2699 drv_mb_param
, &rsp
, ¶m
);
2704 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
2705 (param
!= DRV_MB_PARAM_BIST_RC_PASSED
))
2711 int qed_mcp_bist_nvm_get_num_images(struct qed_hwfn
*p_hwfn
,
2712 struct qed_ptt
*p_ptt
,
2715 u32 drv_mb_param
= 0, rsp
;
2718 drv_mb_param
= (DRV_MB_PARAM_BIST_NVM_TEST_NUM_IMAGES
<<
2719 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
);
2721 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_BIST_TEST
,
2722 drv_mb_param
, &rsp
, num_images
);
2726 if (((rsp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
))
2732 int qed_mcp_bist_nvm_get_image_att(struct qed_hwfn
*p_hwfn
,
2733 struct qed_ptt
*p_ptt
,
2734 struct bist_nvm_image_att
*p_image_att
,
2737 u32 buf_size
= 0, param
, resp
= 0, resp_param
= 0;
2740 param
= DRV_MB_PARAM_BIST_NVM_TEST_IMAGE_BY_INDEX
<<
2741 DRV_MB_PARAM_BIST_TEST_INDEX_SHIFT
;
2742 param
|= image_index
<< DRV_MB_PARAM_BIST_TEST_IMAGE_INDEX_SHIFT
;
2744 rc
= qed_mcp_nvm_rd_cmd(p_hwfn
, p_ptt
,
2745 DRV_MSG_CODE_BIST_TEST
, param
,
2748 (u32
*)p_image_att
);
2752 if (((resp
& FW_MSG_CODE_MASK
) != FW_MSG_CODE_OK
) ||
2753 (p_image_att
->return_code
!= 1))
2759 int qed_mcp_nvm_info_populate(struct qed_hwfn
*p_hwfn
)
2761 struct qed_nvm_image_info nvm_info
;
2762 struct qed_ptt
*p_ptt
;
2766 if (p_hwfn
->nvm_info
.valid
)
2769 p_ptt
= qed_ptt_acquire(p_hwfn
);
2771 DP_ERR(p_hwfn
, "failed to acquire ptt\n");
2775 /* Acquire from MFW the amount of available images */
2776 nvm_info
.num_images
= 0;
2777 rc
= qed_mcp_bist_nvm_get_num_images(p_hwfn
,
2778 p_ptt
, &nvm_info
.num_images
);
2779 if (rc
== -EOPNOTSUPP
) {
2780 DP_INFO(p_hwfn
, "DRV_MSG_CODE_BIST_TEST is not supported\n");
2782 } else if (rc
|| !nvm_info
.num_images
) {
2783 DP_ERR(p_hwfn
, "Failed getting number of images\n");
2787 nvm_info
.image_att
= kmalloc_array(nvm_info
.num_images
,
2788 sizeof(struct bist_nvm_image_att
),
2790 if (!nvm_info
.image_att
) {
2795 /* Iterate over images and get their attributes */
2796 for (i
= 0; i
< nvm_info
.num_images
; i
++) {
2797 rc
= qed_mcp_bist_nvm_get_image_att(p_hwfn
, p_ptt
,
2798 &nvm_info
.image_att
[i
], i
);
2801 "Failed getting image index %d attributes\n", i
);
2805 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "image index %d, size %x\n", i
,
2806 nvm_info
.image_att
[i
].len
);
2809 /* Update hwfn's nvm_info */
2810 if (nvm_info
.num_images
) {
2811 p_hwfn
->nvm_info
.num_images
= nvm_info
.num_images
;
2812 kfree(p_hwfn
->nvm_info
.image_att
);
2813 p_hwfn
->nvm_info
.image_att
= nvm_info
.image_att
;
2814 p_hwfn
->nvm_info
.valid
= true;
2817 qed_ptt_release(p_hwfn
, p_ptt
);
2821 kfree(nvm_info
.image_att
);
2823 qed_ptt_release(p_hwfn
, p_ptt
);
2828 qed_mcp_get_nvm_image_att(struct qed_hwfn
*p_hwfn
,
2829 enum qed_nvm_images image_id
,
2830 struct qed_nvm_image_att
*p_image_att
)
2832 enum nvm_image_type type
;
2835 /* Translate image_id into MFW definitions */
2837 case QED_NVM_IMAGE_ISCSI_CFG
:
2838 type
= NVM_TYPE_ISCSI_CFG
;
2840 case QED_NVM_IMAGE_FCOE_CFG
:
2841 type
= NVM_TYPE_FCOE_CFG
;
2843 case QED_NVM_IMAGE_NVM_CFG1
:
2844 type
= NVM_TYPE_NVM_CFG1
;
2846 case QED_NVM_IMAGE_DEFAULT_CFG
:
2847 type
= NVM_TYPE_DEFAULT_CFG
;
2849 case QED_NVM_IMAGE_NVM_META
:
2850 type
= NVM_TYPE_META
;
2853 DP_NOTICE(p_hwfn
, "Unknown request of image_id %08x\n",
2858 qed_mcp_nvm_info_populate(p_hwfn
);
2859 for (i
= 0; i
< p_hwfn
->nvm_info
.num_images
; i
++)
2860 if (type
== p_hwfn
->nvm_info
.image_att
[i
].image_type
)
2862 if (i
== p_hwfn
->nvm_info
.num_images
) {
2863 DP_VERBOSE(p_hwfn
, QED_MSG_STORAGE
,
2864 "Failed to find nvram image of type %08x\n",
2869 p_image_att
->start_addr
= p_hwfn
->nvm_info
.image_att
[i
].nvm_start_addr
;
2870 p_image_att
->length
= p_hwfn
->nvm_info
.image_att
[i
].len
;
2875 int qed_mcp_get_nvm_image(struct qed_hwfn
*p_hwfn
,
2876 enum qed_nvm_images image_id
,
2877 u8
*p_buffer
, u32 buffer_len
)
2879 struct qed_nvm_image_att image_att
;
2882 memset(p_buffer
, 0, buffer_len
);
2884 rc
= qed_mcp_get_nvm_image_att(p_hwfn
, image_id
, &image_att
);
2888 /* Validate sizes - both the image's and the supplied buffer's */
2889 if (image_att
.length
<= 4) {
2890 DP_VERBOSE(p_hwfn
, QED_MSG_STORAGE
,
2891 "Image [%d] is too small - only %d bytes\n",
2892 image_id
, image_att
.length
);
2896 if (image_att
.length
> buffer_len
) {
2899 "Image [%d] is too big - %08x bytes where only %08x are available\n",
2900 image_id
, image_att
.length
, buffer_len
);
2904 return qed_mcp_nvm_read(p_hwfn
->cdev
, image_att
.start_addr
,
2905 p_buffer
, image_att
.length
);
2908 static enum resource_id_enum
qed_mcp_get_mfw_res_id(enum qed_resources res_id
)
2910 enum resource_id_enum mfw_res_id
= RESOURCE_NUM_INVALID
;
2914 mfw_res_id
= RESOURCE_NUM_SB_E
;
2917 mfw_res_id
= RESOURCE_NUM_L2_QUEUE_E
;
2920 mfw_res_id
= RESOURCE_NUM_VPORT_E
;
2923 mfw_res_id
= RESOURCE_NUM_RSS_ENGINES_E
;
2926 mfw_res_id
= RESOURCE_NUM_PQ_E
;
2929 mfw_res_id
= RESOURCE_NUM_RL_E
;
2933 /* Each VFC resource can accommodate both a MAC and a VLAN */
2934 mfw_res_id
= RESOURCE_VFC_FILTER_E
;
2937 mfw_res_id
= RESOURCE_ILT_E
;
2940 mfw_res_id
= RESOURCE_LL2_QUEUE_E
;
2942 case QED_RDMA_CNQ_RAM
:
2944 /* CNQ/CMDQS are the same resource */
2945 mfw_res_id
= RESOURCE_CQS_E
;
2947 case QED_RDMA_STATS_QUEUE
:
2948 mfw_res_id
= RESOURCE_RDMA_STATS_QUEUE_E
;
2951 mfw_res_id
= RESOURCE_BDQ_E
;
2960 #define QED_RESC_ALLOC_VERSION_MAJOR 2
2961 #define QED_RESC_ALLOC_VERSION_MINOR 0
2962 #define QED_RESC_ALLOC_VERSION \
2963 ((QED_RESC_ALLOC_VERSION_MAJOR << \
2964 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR_SHIFT) | \
2965 (QED_RESC_ALLOC_VERSION_MINOR << \
2966 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR_SHIFT))
2968 struct qed_resc_alloc_in_params
{
2970 enum qed_resources res_id
;
2974 struct qed_resc_alloc_out_params
{
2985 qed_mcp_resc_allocation_msg(struct qed_hwfn
*p_hwfn
,
2986 struct qed_ptt
*p_ptt
,
2987 struct qed_resc_alloc_in_params
*p_in_params
,
2988 struct qed_resc_alloc_out_params
*p_out_params
)
2990 struct qed_mcp_mb_params mb_params
;
2991 struct resource_info mfw_resc_info
;
2994 memset(&mfw_resc_info
, 0, sizeof(mfw_resc_info
));
2996 mfw_resc_info
.res_id
= qed_mcp_get_mfw_res_id(p_in_params
->res_id
);
2997 if (mfw_resc_info
.res_id
== RESOURCE_NUM_INVALID
) {
2999 "Failed to match resource %d [%s] with the MFW resources\n",
3000 p_in_params
->res_id
,
3001 qed_hw_get_resc_name(p_in_params
->res_id
));
3005 switch (p_in_params
->cmd
) {
3006 case DRV_MSG_SET_RESOURCE_VALUE_MSG
:
3007 mfw_resc_info
.size
= p_in_params
->resc_max_val
;
3009 case DRV_MSG_GET_RESOURCE_ALLOC_MSG
:
3012 DP_ERR(p_hwfn
, "Unexpected resource alloc command [0x%08x]\n",
3017 memset(&mb_params
, 0, sizeof(mb_params
));
3018 mb_params
.cmd
= p_in_params
->cmd
;
3019 mb_params
.param
= QED_RESC_ALLOC_VERSION
;
3020 mb_params
.p_data_src
= &mfw_resc_info
;
3021 mb_params
.data_src_size
= sizeof(mfw_resc_info
);
3022 mb_params
.p_data_dst
= mb_params
.p_data_src
;
3023 mb_params
.data_dst_size
= mb_params
.data_src_size
;
3027 "Resource message request: cmd 0x%08x, res_id %d [%s], hsi_version %d.%d, val 0x%x\n",
3029 p_in_params
->res_id
,
3030 qed_hw_get_resc_name(p_in_params
->res_id
),
3031 QED_MFW_GET_FIELD(mb_params
.param
,
3032 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR
),
3033 QED_MFW_GET_FIELD(mb_params
.param
,
3034 DRV_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR
),
3035 p_in_params
->resc_max_val
);
3037 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
3041 p_out_params
->mcp_resp
= mb_params
.mcp_resp
;
3042 p_out_params
->mcp_param
= mb_params
.mcp_param
;
3043 p_out_params
->resc_num
= mfw_resc_info
.size
;
3044 p_out_params
->resc_start
= mfw_resc_info
.offset
;
3045 p_out_params
->vf_resc_num
= mfw_resc_info
.vf_size
;
3046 p_out_params
->vf_resc_start
= mfw_resc_info
.vf_offset
;
3047 p_out_params
->flags
= mfw_resc_info
.flags
;
3051 "Resource message response: mfw_hsi_version %d.%d, num 0x%x, start 0x%x, vf_num 0x%x, vf_start 0x%x, flags 0x%08x\n",
3052 QED_MFW_GET_FIELD(p_out_params
->mcp_param
,
3053 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MAJOR
),
3054 QED_MFW_GET_FIELD(p_out_params
->mcp_param
,
3055 FW_MB_PARAM_RESOURCE_ALLOC_VERSION_MINOR
),
3056 p_out_params
->resc_num
,
3057 p_out_params
->resc_start
,
3058 p_out_params
->vf_resc_num
,
3059 p_out_params
->vf_resc_start
, p_out_params
->flags
);
3065 qed_mcp_set_resc_max_val(struct qed_hwfn
*p_hwfn
,
3066 struct qed_ptt
*p_ptt
,
3067 enum qed_resources res_id
,
3068 u32 resc_max_val
, u32
*p_mcp_resp
)
3070 struct qed_resc_alloc_out_params out_params
;
3071 struct qed_resc_alloc_in_params in_params
;
3074 memset(&in_params
, 0, sizeof(in_params
));
3075 in_params
.cmd
= DRV_MSG_SET_RESOURCE_VALUE_MSG
;
3076 in_params
.res_id
= res_id
;
3077 in_params
.resc_max_val
= resc_max_val
;
3078 memset(&out_params
, 0, sizeof(out_params
));
3079 rc
= qed_mcp_resc_allocation_msg(p_hwfn
, p_ptt
, &in_params
,
3084 *p_mcp_resp
= out_params
.mcp_resp
;
3090 qed_mcp_get_resc_info(struct qed_hwfn
*p_hwfn
,
3091 struct qed_ptt
*p_ptt
,
3092 enum qed_resources res_id
,
3093 u32
*p_mcp_resp
, u32
*p_resc_num
, u32
*p_resc_start
)
3095 struct qed_resc_alloc_out_params out_params
;
3096 struct qed_resc_alloc_in_params in_params
;
3099 memset(&in_params
, 0, sizeof(in_params
));
3100 in_params
.cmd
= DRV_MSG_GET_RESOURCE_ALLOC_MSG
;
3101 in_params
.res_id
= res_id
;
3102 memset(&out_params
, 0, sizeof(out_params
));
3103 rc
= qed_mcp_resc_allocation_msg(p_hwfn
, p_ptt
, &in_params
,
3108 *p_mcp_resp
= out_params
.mcp_resp
;
3110 if (*p_mcp_resp
== FW_MSG_CODE_RESOURCE_ALLOC_OK
) {
3111 *p_resc_num
= out_params
.resc_num
;
3112 *p_resc_start
= out_params
.resc_start
;
3118 int qed_mcp_initiate_pf_flr(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3120 u32 mcp_resp
, mcp_param
;
3122 return qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_INITIATE_PF_FLR
, 0,
3123 &mcp_resp
, &mcp_param
);
3126 static int qed_mcp_resource_cmd(struct qed_hwfn
*p_hwfn
,
3127 struct qed_ptt
*p_ptt
,
3128 u32 param
, u32
*p_mcp_resp
, u32
*p_mcp_param
)
3132 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_RESOURCE_CMD
, param
,
3133 p_mcp_resp
, p_mcp_param
);
3137 if (*p_mcp_resp
== FW_MSG_CODE_UNSUPPORTED
) {
3139 "The resource command is unsupported by the MFW\n");
3143 if (*p_mcp_param
== RESOURCE_OPCODE_UNKNOWN_CMD
) {
3144 u8 opcode
= QED_MFW_GET_FIELD(param
, RESOURCE_CMD_REQ_OPCODE
);
3147 "The resource command is unknown to the MFW [param 0x%08x, opcode %d]\n",
3156 __qed_mcp_resc_lock(struct qed_hwfn
*p_hwfn
,
3157 struct qed_ptt
*p_ptt
,
3158 struct qed_resc_lock_params
*p_params
)
3160 u32 param
= 0, mcp_resp
, mcp_param
;
3164 switch (p_params
->timeout
) {
3165 case QED_MCP_RESC_LOCK_TO_DEFAULT
:
3166 opcode
= RESOURCE_OPCODE_REQ
;
3167 p_params
->timeout
= 0;
3169 case QED_MCP_RESC_LOCK_TO_NONE
:
3170 opcode
= RESOURCE_OPCODE_REQ_WO_AGING
;
3171 p_params
->timeout
= 0;
3174 opcode
= RESOURCE_OPCODE_REQ_W_AGING
;
3178 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_RESC
, p_params
->resource
);
3179 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_OPCODE
, opcode
);
3180 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_AGE
, p_params
->timeout
);
3184 "Resource lock request: param 0x%08x [age %d, opcode %d, resource %d]\n",
3185 param
, p_params
->timeout
, opcode
, p_params
->resource
);
3187 /* Attempt to acquire the resource */
3188 rc
= qed_mcp_resource_cmd(p_hwfn
, p_ptt
, param
, &mcp_resp
, &mcp_param
);
3192 /* Analyze the response */
3193 p_params
->owner
= QED_MFW_GET_FIELD(mcp_param
, RESOURCE_CMD_RSP_OWNER
);
3194 opcode
= QED_MFW_GET_FIELD(mcp_param
, RESOURCE_CMD_RSP_OPCODE
);
3198 "Resource lock response: mcp_param 0x%08x [opcode %d, owner %d]\n",
3199 mcp_param
, opcode
, p_params
->owner
);
3202 case RESOURCE_OPCODE_GNT
:
3203 p_params
->b_granted
= true;
3205 case RESOURCE_OPCODE_BUSY
:
3206 p_params
->b_granted
= false;
3210 "Unexpected opcode in resource lock response [mcp_param 0x%08x, opcode %d]\n",
3219 qed_mcp_resc_lock(struct qed_hwfn
*p_hwfn
,
3220 struct qed_ptt
*p_ptt
, struct qed_resc_lock_params
*p_params
)
3226 /* No need for an interval before the first iteration */
3228 if (p_params
->sleep_b4_retry
) {
3229 u16 retry_interval_in_ms
=
3230 DIV_ROUND_UP(p_params
->retry_interval
,
3233 msleep(retry_interval_in_ms
);
3235 udelay(p_params
->retry_interval
);
3239 rc
= __qed_mcp_resc_lock(p_hwfn
, p_ptt
, p_params
);
3243 if (p_params
->b_granted
)
3245 } while (retry_cnt
++ < p_params
->retry_num
);
3251 qed_mcp_resc_unlock(struct qed_hwfn
*p_hwfn
,
3252 struct qed_ptt
*p_ptt
,
3253 struct qed_resc_unlock_params
*p_params
)
3255 u32 param
= 0, mcp_resp
, mcp_param
;
3259 opcode
= p_params
->b_force
? RESOURCE_OPCODE_FORCE_RELEASE
3260 : RESOURCE_OPCODE_RELEASE
;
3261 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_RESC
, p_params
->resource
);
3262 QED_MFW_SET_FIELD(param
, RESOURCE_CMD_REQ_OPCODE
, opcode
);
3264 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
3265 "Resource unlock request: param 0x%08x [opcode %d, resource %d]\n",
3266 param
, opcode
, p_params
->resource
);
3268 /* Attempt to release the resource */
3269 rc
= qed_mcp_resource_cmd(p_hwfn
, p_ptt
, param
, &mcp_resp
, &mcp_param
);
3273 /* Analyze the response */
3274 opcode
= QED_MFW_GET_FIELD(mcp_param
, RESOURCE_CMD_RSP_OPCODE
);
3276 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
3277 "Resource unlock response: mcp_param 0x%08x [opcode %d]\n",
3281 case RESOURCE_OPCODE_RELEASED_PREVIOUS
:
3283 "Resource unlock request for an already released resource [%d]\n",
3284 p_params
->resource
);
3286 case RESOURCE_OPCODE_RELEASED
:
3287 p_params
->b_released
= true;
3289 case RESOURCE_OPCODE_WRONG_OWNER
:
3290 p_params
->b_released
= false;
3294 "Unexpected opcode in resource unlock response [mcp_param 0x%08x, opcode %d]\n",
3302 void qed_mcp_resc_lock_default_init(struct qed_resc_lock_params
*p_lock
,
3303 struct qed_resc_unlock_params
*p_unlock
,
3305 resource
, bool b_is_permanent
)
3308 memset(p_lock
, 0, sizeof(*p_lock
));
3310 /* Permanent resources don't require aging, and there's no
3311 * point in trying to acquire them more than once since it's
3312 * unexpected another entity would release them.
3314 if (b_is_permanent
) {
3315 p_lock
->timeout
= QED_MCP_RESC_LOCK_TO_NONE
;
3317 p_lock
->retry_num
= QED_MCP_RESC_LOCK_RETRY_CNT_DFLT
;
3318 p_lock
->retry_interval
=
3319 QED_MCP_RESC_LOCK_RETRY_VAL_DFLT
;
3320 p_lock
->sleep_b4_retry
= true;
3323 p_lock
->resource
= resource
;
3327 memset(p_unlock
, 0, sizeof(*p_unlock
));
3328 p_unlock
->resource
= resource
;
3332 int qed_mcp_get_capabilities(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3337 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_GET_MFW_FEATURE_SUPPORT
,
3338 0, &mcp_resp
, &p_hwfn
->mcp_info
->capabilities
);
3340 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| NETIF_MSG_PROBE
),
3341 "MFW supported features: %08x\n",
3342 p_hwfn
->mcp_info
->capabilities
);
3347 int qed_mcp_set_capabilities(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
)
3349 u32 mcp_resp
, mcp_param
, features
;
3351 features
= DRV_MB_PARAM_FEATURE_SUPPORT_PORT_EEE
;
3353 return qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_FEATURE_SUPPORT
,
3354 features
, &mcp_resp
, &mcp_param
);