1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/string.h>
21 #include "qed_reg_addr.h"
22 #define CHIP_MCP_RESP_ITER_US 10
24 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
25 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
27 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
28 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
31 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
32 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
34 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
35 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
36 offsetof(struct public_drv_mb, _field), _val)
38 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
39 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
40 offsetof(struct public_drv_mb, _field))
42 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
43 DRV_ID_PDA_COMP_VER_SHIFT)
45 #define MCP_BYTES_PER_MBIT_SHIFT 17
47 bool qed_mcp_is_init(struct qed_hwfn
*p_hwfn
)
49 if (!p_hwfn
->mcp_info
|| !p_hwfn
->mcp_info
->public_base
)
54 void qed_mcp_cmd_port_init(struct qed_hwfn
*p_hwfn
,
55 struct qed_ptt
*p_ptt
)
57 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
59 u32 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
61 p_hwfn
->mcp_info
->port_addr
= SECTION_ADDR(mfw_mb_offsize
,
63 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
64 "port_addr = 0x%x, port_id 0x%02x\n",
65 p_hwfn
->mcp_info
->port_addr
, MFW_PORT(p_hwfn
));
68 void qed_mcp_read_mb(struct qed_hwfn
*p_hwfn
,
69 struct qed_ptt
*p_ptt
)
71 u32 length
= MFW_DRV_MSG_MAX_DWORDS(p_hwfn
->mcp_info
->mfw_mb_length
);
74 if (!p_hwfn
->mcp_info
->public_base
)
77 for (i
= 0; i
< length
; i
++) {
78 tmp
= qed_rd(p_hwfn
, p_ptt
,
79 p_hwfn
->mcp_info
->mfw_mb_addr
+
80 (i
<< 2) + sizeof(u32
));
82 /* The MB data is actually BE; Need to force it to cpu */
83 ((u32
*)p_hwfn
->mcp_info
->mfw_mb_cur
)[i
] =
84 be32_to_cpu((__force __be32
)tmp
);
88 int qed_mcp_free(struct qed_hwfn
*p_hwfn
)
90 if (p_hwfn
->mcp_info
) {
91 kfree(p_hwfn
->mcp_info
->mfw_mb_cur
);
92 kfree(p_hwfn
->mcp_info
->mfw_mb_shadow
);
94 kfree(p_hwfn
->mcp_info
);
99 static int qed_load_mcp_offsets(struct qed_hwfn
*p_hwfn
,
100 struct qed_ptt
*p_ptt
)
102 struct qed_mcp_info
*p_info
= p_hwfn
->mcp_info
;
103 u32 drv_mb_offsize
, mfw_mb_offsize
;
104 u32 mcp_pf_id
= MCP_PF_ID(p_hwfn
);
106 p_info
->public_base
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_SHARED_MEM_ADDR
);
107 if (!p_info
->public_base
)
110 p_info
->public_base
|= GRCBASE_MCP
;
112 /* Calculate the driver and MFW mailbox address */
113 drv_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
114 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
116 p_info
->drv_mb_addr
= SECTION_ADDR(drv_mb_offsize
, mcp_pf_id
);
117 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
118 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
119 drv_mb_offsize
, p_info
->drv_mb_addr
, mcp_pf_id
);
121 /* Set the MFW MB address */
122 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
123 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
125 p_info
->mfw_mb_addr
= SECTION_ADDR(mfw_mb_offsize
, mcp_pf_id
);
126 p_info
->mfw_mb_length
= (u16
)qed_rd(p_hwfn
, p_ptt
, p_info
->mfw_mb_addr
);
128 /* Get the current driver mailbox sequence before sending
131 p_info
->drv_mb_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_mb_header
) &
132 DRV_MSG_SEQ_NUMBER_MASK
;
134 /* Get current FW pulse sequence */
135 p_info
->drv_pulse_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_pulse_mb
) &
138 p_info
->mcp_hist
= (u16
)qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
143 int qed_mcp_cmd_init(struct qed_hwfn
*p_hwfn
,
144 struct qed_ptt
*p_ptt
)
146 struct qed_mcp_info
*p_info
;
149 /* Allocate mcp_info structure */
150 p_hwfn
->mcp_info
= kzalloc(sizeof(*p_hwfn
->mcp_info
), GFP_KERNEL
);
151 if (!p_hwfn
->mcp_info
)
153 p_info
= p_hwfn
->mcp_info
;
155 if (qed_load_mcp_offsets(p_hwfn
, p_ptt
) != 0) {
156 DP_NOTICE(p_hwfn
, "MCP is not initialized\n");
157 /* Do not free mcp_info here, since public_base indicate that
158 * the MCP is not initialized
163 size
= MFW_DRV_MSG_MAX_DWORDS(p_info
->mfw_mb_length
) * sizeof(u32
);
164 p_info
->mfw_mb_cur
= kzalloc(size
, GFP_KERNEL
);
165 p_info
->mfw_mb_shadow
=
166 kzalloc(sizeof(u32
) * MFW_DRV_MSG_MAX_DWORDS(
167 p_info
->mfw_mb_length
), GFP_KERNEL
);
168 if (!p_info
->mfw_mb_shadow
|| !p_info
->mfw_mb_addr
)
171 /* Initialize the MFW spinlock */
172 spin_lock_init(&p_info
->lock
);
177 DP_NOTICE(p_hwfn
, "Failed to allocate mcp memory\n");
178 qed_mcp_free(p_hwfn
);
182 /* Locks the MFW mailbox of a PF to ensure a single access.
183 * The lock is achieved in most cases by holding a spinlock, causing other
184 * threads to wait till a previous access is done.
185 * In some cases (currently when a [UN]LOAD_REQ commands are sent), the single
186 * access is achieved by setting a blocking flag, which will fail other
187 * competing contexts to send their mailboxes.
189 static int qed_mcp_mb_lock(struct qed_hwfn
*p_hwfn
,
192 spin_lock_bh(&p_hwfn
->mcp_info
->lock
);
194 /* The spinlock shouldn't be acquired when the mailbox command is
195 * [UN]LOAD_REQ, since the engine is locked by the MFW, and a parallel
196 * pending [UN]LOAD_REQ command of another PF together with a spinlock
197 * (i.e. interrupts are disabled) - can lead to a deadlock.
198 * It is assumed that for a single PF, no other mailbox commands can be
199 * sent from another context while sending LOAD_REQ, and that any
200 * parallel commands to UNLOAD_REQ can be cancelled.
202 if (cmd
== DRV_MSG_CODE_LOAD_DONE
|| cmd
== DRV_MSG_CODE_UNLOAD_DONE
)
203 p_hwfn
->mcp_info
->block_mb_sending
= false;
205 if (p_hwfn
->mcp_info
->block_mb_sending
) {
207 "Trying to send a MFW mailbox command [0x%x] in parallel to [UN]LOAD_REQ. Aborting.\n",
209 spin_unlock_bh(&p_hwfn
->mcp_info
->lock
);
213 if (cmd
== DRV_MSG_CODE_LOAD_REQ
|| cmd
== DRV_MSG_CODE_UNLOAD_REQ
) {
214 p_hwfn
->mcp_info
->block_mb_sending
= true;
215 spin_unlock_bh(&p_hwfn
->mcp_info
->lock
);
221 static void qed_mcp_mb_unlock(struct qed_hwfn
*p_hwfn
,
224 if (cmd
!= DRV_MSG_CODE_LOAD_REQ
&& cmd
!= DRV_MSG_CODE_UNLOAD_REQ
)
225 spin_unlock_bh(&p_hwfn
->mcp_info
->lock
);
228 int qed_mcp_reset(struct qed_hwfn
*p_hwfn
,
229 struct qed_ptt
*p_ptt
)
231 u32 seq
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
232 u8 delay
= CHIP_MCP_RESP_ITER_US
;
233 u32 org_mcp_reset_seq
, cnt
= 0;
236 /* Ensure that only a single thread is accessing the mailbox at a
239 rc
= qed_mcp_mb_lock(p_hwfn
, DRV_MSG_CODE_MCP_RESET
);
243 /* Set drv command along with the updated sequence */
244 org_mcp_reset_seq
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
245 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
,
246 (DRV_MSG_CODE_MCP_RESET
| seq
));
249 /* Wait for MFW response */
251 /* Give the FW up to 500 second (50*1000*10usec) */
252 } while ((org_mcp_reset_seq
== qed_rd(p_hwfn
, p_ptt
,
253 MISCS_REG_GENERIC_POR_0
)) &&
254 (cnt
++ < QED_MCP_RESET_RETRIES
));
256 if (org_mcp_reset_seq
!=
257 qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
)) {
258 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
259 "MCP was reset after %d usec\n", cnt
* delay
);
261 DP_ERR(p_hwfn
, "Failed to reset MCP\n");
265 qed_mcp_mb_unlock(p_hwfn
, DRV_MSG_CODE_MCP_RESET
);
270 static int qed_do_mcp_cmd(struct qed_hwfn
*p_hwfn
,
271 struct qed_ptt
*p_ptt
,
277 u8 delay
= CHIP_MCP_RESP_ITER_US
;
278 u32 seq
, cnt
= 1, actual_mb_seq
;
281 /* Get actual driver mailbox sequence */
282 actual_mb_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_mb_header
) &
283 DRV_MSG_SEQ_NUMBER_MASK
;
285 /* Use MCP history register to check if MCP reset occurred between
288 if (p_hwfn
->mcp_info
->mcp_hist
!=
289 qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
)) {
290 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "Rereading MCP offsets\n");
291 qed_load_mcp_offsets(p_hwfn
, p_ptt
);
292 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
294 seq
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
297 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_param
, param
);
299 /* Set drv command along with the updated sequence */
300 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (cmd
| seq
));
302 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
303 "wrote command (%x) to MFW MB param 0x%08x\n",
307 /* Wait for MFW response */
309 *o_mcp_resp
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_header
);
311 /* Give the FW up to 5 second (500*10ms) */
312 } while ((seq
!= (*o_mcp_resp
& FW_MSG_SEQ_NUMBER_MASK
)) &&
313 (cnt
++ < QED_DRV_MB_MAX_RETRIES
));
315 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
316 "[after %d ms] read (%x) seq is (%x) from FW MB\n",
317 cnt
* delay
, *o_mcp_resp
, seq
);
319 /* Is this a reply to our command? */
320 if (seq
== (*o_mcp_resp
& FW_MSG_SEQ_NUMBER_MASK
)) {
321 *o_mcp_resp
&= FW_MSG_CODE_MASK
;
322 /* Get the MCP param */
323 *o_mcp_param
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_param
);
326 DP_ERR(p_hwfn
, "MFW failed to respond!\n");
333 static int qed_mcp_cmd_and_union(struct qed_hwfn
*p_hwfn
,
334 struct qed_ptt
*p_ptt
,
335 struct qed_mcp_mb_params
*p_mb_params
)
340 /* MCP not initialized */
341 if (!qed_mcp_is_init(p_hwfn
)) {
342 DP_NOTICE(p_hwfn
, "MFW is not initialized !\n");
346 union_data_addr
= p_hwfn
->mcp_info
->drv_mb_addr
+
347 offsetof(struct public_drv_mb
, union_data
);
349 /* Ensure that only a single thread is accessing the mailbox at a
352 rc
= qed_mcp_mb_lock(p_hwfn
, p_mb_params
->cmd
);
356 if (p_mb_params
->p_data_src
!= NULL
)
357 qed_memcpy_to(p_hwfn
, p_ptt
, union_data_addr
,
358 p_mb_params
->p_data_src
,
359 sizeof(*p_mb_params
->p_data_src
));
361 rc
= qed_do_mcp_cmd(p_hwfn
, p_ptt
, p_mb_params
->cmd
,
362 p_mb_params
->param
, &p_mb_params
->mcp_resp
,
363 &p_mb_params
->mcp_param
);
365 if (p_mb_params
->p_data_dst
!= NULL
)
366 qed_memcpy_from(p_hwfn
, p_ptt
, p_mb_params
->p_data_dst
,
368 sizeof(*p_mb_params
->p_data_dst
));
370 qed_mcp_mb_unlock(p_hwfn
, p_mb_params
->cmd
);
375 int qed_mcp_cmd(struct qed_hwfn
*p_hwfn
,
376 struct qed_ptt
*p_ptt
,
382 struct qed_mcp_mb_params mb_params
;
385 memset(&mb_params
, 0, sizeof(mb_params
));
387 mb_params
.param
= param
;
388 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
392 *o_mcp_resp
= mb_params
.mcp_resp
;
393 *o_mcp_param
= mb_params
.mcp_param
;
398 int qed_mcp_load_req(struct qed_hwfn
*p_hwfn
,
399 struct qed_ptt
*p_ptt
,
402 struct qed_dev
*cdev
= p_hwfn
->cdev
;
403 struct qed_mcp_mb_params mb_params
;
404 union drv_union_data union_data
;
407 memset(&mb_params
, 0, sizeof(mb_params
));
409 mb_params
.cmd
= DRV_MSG_CODE_LOAD_REQ
;
410 mb_params
.param
= PDA_COMP
| DRV_ID_MCP_HSI_VER_CURRENT
|
412 memcpy(&union_data
.ver_str
, cdev
->ver_str
, MCP_DRV_VER_STR_SIZE
);
413 mb_params
.p_data_src
= &union_data
;
414 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
416 /* if mcp fails to respond we must abort */
418 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
422 *p_load_code
= mb_params
.mcp_resp
;
424 /* If MFW refused (e.g. other port is in diagnostic mode) we
425 * must abort. This can happen in the following cases:
426 * - Other port is in diagnostic mode
427 * - Previously loaded function on the engine is not compliant with
429 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
432 if (!(*p_load_code
) ||
433 ((*p_load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI
) ||
434 ((*p_load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA
) ||
435 ((*p_load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG
)) {
436 DP_ERR(p_hwfn
, "MCP refused load request, aborting\n");
443 static void qed_mcp_handle_transceiver_change(struct qed_hwfn
*p_hwfn
,
444 struct qed_ptt
*p_ptt
)
446 u32 transceiver_state
;
448 transceiver_state
= qed_rd(p_hwfn
, p_ptt
,
449 p_hwfn
->mcp_info
->port_addr
+
450 offsetof(struct public_port
,
454 (NETIF_MSG_HW
| QED_MSG_SP
),
455 "Received transceiver state update [0x%08x] from mfw [Addr 0x%x]\n",
457 (u32
)(p_hwfn
->mcp_info
->port_addr
+
458 offsetof(struct public_port
,
461 transceiver_state
= GET_FIELD(transceiver_state
,
462 PMM_TRANSCEIVER_STATE
);
464 if (transceiver_state
== PMM_TRANSCEIVER_STATE_PRESENT
)
465 DP_NOTICE(p_hwfn
, "Transceiver is present.\n");
467 DP_NOTICE(p_hwfn
, "Transceiver is unplugged.\n");
470 static void qed_mcp_handle_link_change(struct qed_hwfn
*p_hwfn
,
471 struct qed_ptt
*p_ptt
,
474 struct qed_mcp_link_state
*p_link
;
477 p_link
= &p_hwfn
->mcp_info
->link_output
;
478 memset(p_link
, 0, sizeof(*p_link
));
480 status
= qed_rd(p_hwfn
, p_ptt
,
481 p_hwfn
->mcp_info
->port_addr
+
482 offsetof(struct public_port
, link_status
));
483 DP_VERBOSE(p_hwfn
, (NETIF_MSG_LINK
| QED_MSG_SP
),
484 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
486 (u32
)(p_hwfn
->mcp_info
->port_addr
+
487 offsetof(struct public_port
,
490 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
491 "Resetting link indications\n");
495 if (p_hwfn
->b_drv_link_init
)
496 p_link
->link_up
= !!(status
& LINK_STATUS_LINK_UP
);
498 p_link
->link_up
= false;
500 p_link
->full_duplex
= true;
501 switch ((status
& LINK_STATUS_SPEED_AND_DUPLEX_MASK
)) {
502 case LINK_STATUS_SPEED_AND_DUPLEX_100G
:
503 p_link
->speed
= 100000;
505 case LINK_STATUS_SPEED_AND_DUPLEX_50G
:
506 p_link
->speed
= 50000;
508 case LINK_STATUS_SPEED_AND_DUPLEX_40G
:
509 p_link
->speed
= 40000;
511 case LINK_STATUS_SPEED_AND_DUPLEX_25G
:
512 p_link
->speed
= 25000;
514 case LINK_STATUS_SPEED_AND_DUPLEX_20G
:
515 p_link
->speed
= 20000;
517 case LINK_STATUS_SPEED_AND_DUPLEX_10G
:
518 p_link
->speed
= 10000;
520 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD
:
521 p_link
->full_duplex
= false;
523 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
:
524 p_link
->speed
= 1000;
530 /* Correct speed according to bandwidth allocation */
531 if (p_hwfn
->mcp_info
->func_info
.bandwidth_max
&& p_link
->speed
) {
532 p_link
->speed
= p_link
->speed
*
533 p_hwfn
->mcp_info
->func_info
.bandwidth_max
/
535 qed_init_pf_rl(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
,
537 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
538 "Configured MAX bandwidth to be %08x Mb/sec\n",
542 p_link
->an
= !!(status
& LINK_STATUS_AUTO_NEGOTIATE_ENABLED
);
543 p_link
->an_complete
= !!(status
&
544 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE
);
545 p_link
->parallel_detection
= !!(status
&
546 LINK_STATUS_PARALLEL_DETECTION_USED
);
547 p_link
->pfc_enabled
= !!(status
& LINK_STATUS_PFC_ENABLED
);
549 p_link
->partner_adv_speed
|=
550 (status
& LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE
) ?
551 QED_LINK_PARTNER_SPEED_1G_FD
: 0;
552 p_link
->partner_adv_speed
|=
553 (status
& LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE
) ?
554 QED_LINK_PARTNER_SPEED_1G_HD
: 0;
555 p_link
->partner_adv_speed
|=
556 (status
& LINK_STATUS_LINK_PARTNER_10G_CAPABLE
) ?
557 QED_LINK_PARTNER_SPEED_10G
: 0;
558 p_link
->partner_adv_speed
|=
559 (status
& LINK_STATUS_LINK_PARTNER_20G_CAPABLE
) ?
560 QED_LINK_PARTNER_SPEED_20G
: 0;
561 p_link
->partner_adv_speed
|=
562 (status
& LINK_STATUS_LINK_PARTNER_40G_CAPABLE
) ?
563 QED_LINK_PARTNER_SPEED_40G
: 0;
564 p_link
->partner_adv_speed
|=
565 (status
& LINK_STATUS_LINK_PARTNER_50G_CAPABLE
) ?
566 QED_LINK_PARTNER_SPEED_50G
: 0;
567 p_link
->partner_adv_speed
|=
568 (status
& LINK_STATUS_LINK_PARTNER_100G_CAPABLE
) ?
569 QED_LINK_PARTNER_SPEED_100G
: 0;
571 p_link
->partner_tx_flow_ctrl_en
=
572 !!(status
& LINK_STATUS_TX_FLOW_CONTROL_ENABLED
);
573 p_link
->partner_rx_flow_ctrl_en
=
574 !!(status
& LINK_STATUS_RX_FLOW_CONTROL_ENABLED
);
576 switch (status
& LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK
) {
577 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE
:
578 p_link
->partner_adv_pause
= QED_LINK_PARTNER_SYMMETRIC_PAUSE
;
580 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE
:
581 p_link
->partner_adv_pause
= QED_LINK_PARTNER_ASYMMETRIC_PAUSE
;
583 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE
:
584 p_link
->partner_adv_pause
= QED_LINK_PARTNER_BOTH_PAUSE
;
587 p_link
->partner_adv_pause
= 0;
590 p_link
->sfp_tx_fault
= !!(status
& LINK_STATUS_SFP_TX_FAULT
);
592 qed_link_update(p_hwfn
);
595 int qed_mcp_set_link(struct qed_hwfn
*p_hwfn
,
596 struct qed_ptt
*p_ptt
,
599 struct qed_mcp_link_params
*params
= &p_hwfn
->mcp_info
->link_input
;
600 struct qed_mcp_mb_params mb_params
;
601 union drv_union_data union_data
;
602 struct pmm_phy_cfg
*phy_cfg
;
606 /* Set the shmem configuration according to params */
607 phy_cfg
= &union_data
.drv_phy_cfg
;
608 memset(phy_cfg
, 0, sizeof(*phy_cfg
));
609 cmd
= b_up
? DRV_MSG_CODE_INIT_PHY
: DRV_MSG_CODE_LINK_RESET
;
610 if (!params
->speed
.autoneg
)
611 phy_cfg
->speed
= params
->speed
.forced_speed
;
612 phy_cfg
->pause
|= (params
->pause
.autoneg
) ? PMM_PAUSE_AUTONEG
: 0;
613 phy_cfg
->pause
|= (params
->pause
.forced_rx
) ? PMM_PAUSE_RX
: 0;
614 phy_cfg
->pause
|= (params
->pause
.forced_tx
) ? PMM_PAUSE_TX
: 0;
615 phy_cfg
->adv_speed
= params
->speed
.advertised_speeds
;
616 phy_cfg
->loopback_mode
= params
->loopback_mode
;
618 p_hwfn
->b_drv_link_init
= b_up
;
621 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
622 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
626 phy_cfg
->loopback_mode
,
627 phy_cfg
->feature_config_flags
);
629 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
633 memset(&mb_params
, 0, sizeof(mb_params
));
635 mb_params
.p_data_src
= &union_data
;
636 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
638 /* if mcp fails to respond we must abort */
640 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
644 /* Reset the link status if needed */
646 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, true);
651 int qed_mcp_handle_events(struct qed_hwfn
*p_hwfn
,
652 struct qed_ptt
*p_ptt
)
654 struct qed_mcp_info
*info
= p_hwfn
->mcp_info
;
659 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "Received message from MFW\n");
661 /* Read Messages from MFW */
662 qed_mcp_read_mb(p_hwfn
, p_ptt
);
664 /* Compare current messages to old ones */
665 for (i
= 0; i
< info
->mfw_mb_length
; i
++) {
666 if (info
->mfw_mb_cur
[i
] == info
->mfw_mb_shadow
[i
])
671 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
672 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
673 i
, info
->mfw_mb_shadow
[i
], info
->mfw_mb_cur
[i
]);
676 case MFW_DRV_MSG_LINK_CHANGE
:
677 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, false);
679 case MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE
:
680 qed_mcp_handle_transceiver_change(p_hwfn
, p_ptt
);
683 DP_NOTICE(p_hwfn
, "Unimplemented MFW message %d\n", i
);
689 for (i
= 0; i
< MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
); i
++) {
690 __be32 val
= cpu_to_be32(((u32
*)info
->mfw_mb_cur
)[i
]);
692 /* MFW expect answer in BE, so we force write in that format */
693 qed_wr(p_hwfn
, p_ptt
,
694 info
->mfw_mb_addr
+ sizeof(u32
) +
695 MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
) *
696 sizeof(u32
) + i
* sizeof(u32
),
702 "Received an MFW message indication but no new message!\n");
706 /* Copy the new mfw messages into the shadow */
707 memcpy(info
->mfw_mb_shadow
, info
->mfw_mb_cur
, info
->mfw_mb_length
);
712 int qed_mcp_get_mfw_ver(struct qed_dev
*cdev
,
715 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[0];
716 struct qed_ptt
*p_ptt
;
719 p_ptt
= qed_ptt_acquire(p_hwfn
);
723 global_offsize
= qed_rd(p_hwfn
, p_ptt
,
724 SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->
727 *p_mfw_ver
= qed_rd(p_hwfn
, p_ptt
,
728 SECTION_ADDR(global_offsize
, 0) +
729 offsetof(struct public_global
, mfw_ver
));
731 qed_ptt_release(p_hwfn
, p_ptt
);
736 int qed_mcp_get_media_type(struct qed_dev
*cdev
,
739 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[0];
740 struct qed_ptt
*p_ptt
;
742 if (!qed_mcp_is_init(p_hwfn
)) {
743 DP_NOTICE(p_hwfn
, "MFW is not initialized !\n");
747 *p_media_type
= MEDIA_UNSPECIFIED
;
749 p_ptt
= qed_ptt_acquire(p_hwfn
);
753 *p_media_type
= qed_rd(p_hwfn
, p_ptt
, p_hwfn
->mcp_info
->port_addr
+
754 offsetof(struct public_port
, media_type
));
756 qed_ptt_release(p_hwfn
, p_ptt
);
761 static u32
qed_mcp_get_shmem_func(struct qed_hwfn
*p_hwfn
,
762 struct qed_ptt
*p_ptt
,
763 struct public_func
*p_data
,
766 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
768 u32 mfw_path_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
769 u32 func_addr
= SECTION_ADDR(mfw_path_offsize
, pfid
);
772 memset(p_data
, 0, sizeof(*p_data
));
774 size
= min_t(u32
, sizeof(*p_data
),
775 QED_SECTION_SIZE(mfw_path_offsize
));
776 for (i
= 0; i
< size
/ sizeof(u32
); i
++)
777 ((u32
*)p_data
)[i
] = qed_rd(p_hwfn
, p_ptt
,
778 func_addr
+ (i
<< 2));
784 qed_mcp_get_shmem_proto(struct qed_hwfn
*p_hwfn
,
785 struct public_func
*p_info
,
786 enum qed_pci_personality
*p_proto
)
790 switch (p_info
->config
& FUNC_MF_CFG_PROTOCOL_MASK
) {
791 case FUNC_MF_CFG_PROTOCOL_ETHERNET
:
792 *p_proto
= QED_PCI_ETH
;
801 int qed_mcp_fill_shmem_func_info(struct qed_hwfn
*p_hwfn
,
802 struct qed_ptt
*p_ptt
)
804 struct qed_mcp_function_info
*info
;
805 struct public_func shmem_info
;
807 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
,
809 info
= &p_hwfn
->mcp_info
->func_info
;
811 info
->pause_on_host
= (shmem_info
.config
&
812 FUNC_MF_CFG_PAUSE_ON_HOST_RING
) ? 1 : 0;
814 if (qed_mcp_get_shmem_proto(p_hwfn
, &shmem_info
,
816 DP_ERR(p_hwfn
, "Unknown personality %08x\n",
817 (u32
)(shmem_info
.config
& FUNC_MF_CFG_PROTOCOL_MASK
));
822 info
->bandwidth_min
= (shmem_info
.config
&
823 FUNC_MF_CFG_MIN_BW_MASK
) >>
824 FUNC_MF_CFG_MIN_BW_SHIFT
;
825 if (info
->bandwidth_min
< 1 || info
->bandwidth_min
> 100) {
827 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
828 info
->bandwidth_min
);
829 info
->bandwidth_min
= 1;
832 info
->bandwidth_max
= (shmem_info
.config
&
833 FUNC_MF_CFG_MAX_BW_MASK
) >>
834 FUNC_MF_CFG_MAX_BW_SHIFT
;
835 if (info
->bandwidth_max
< 1 || info
->bandwidth_max
> 100) {
837 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
838 info
->bandwidth_max
);
839 info
->bandwidth_max
= 100;
842 if (shmem_info
.mac_upper
|| shmem_info
.mac_lower
) {
843 info
->mac
[0] = (u8
)(shmem_info
.mac_upper
>> 8);
844 info
->mac
[1] = (u8
)(shmem_info
.mac_upper
);
845 info
->mac
[2] = (u8
)(shmem_info
.mac_lower
>> 24);
846 info
->mac
[3] = (u8
)(shmem_info
.mac_lower
>> 16);
847 info
->mac
[4] = (u8
)(shmem_info
.mac_lower
>> 8);
848 info
->mac
[5] = (u8
)(shmem_info
.mac_lower
);
850 DP_NOTICE(p_hwfn
, "MAC is 0 in shmem\n");
853 info
->wwn_port
= (u64
)shmem_info
.fcoe_wwn_port_name_upper
|
854 (((u64
)shmem_info
.fcoe_wwn_port_name_lower
) << 32);
855 info
->wwn_node
= (u64
)shmem_info
.fcoe_wwn_node_name_upper
|
856 (((u64
)shmem_info
.fcoe_wwn_node_name_lower
) << 32);
858 info
->ovlan
= (u16
)(shmem_info
.ovlan_stag
& FUNC_MF_CFG_OV_STAG_MASK
);
860 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| NETIF_MSG_IFUP
),
861 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
862 info
->pause_on_host
, info
->protocol
,
863 info
->bandwidth_min
, info
->bandwidth_max
,
864 info
->mac
[0], info
->mac
[1], info
->mac
[2],
865 info
->mac
[3], info
->mac
[4], info
->mac
[5],
866 info
->wwn_port
, info
->wwn_node
, info
->ovlan
);
871 struct qed_mcp_link_params
872 *qed_mcp_get_link_params(struct qed_hwfn
*p_hwfn
)
874 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
876 return &p_hwfn
->mcp_info
->link_input
;
879 struct qed_mcp_link_state
880 *qed_mcp_get_link_state(struct qed_hwfn
*p_hwfn
)
882 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
884 return &p_hwfn
->mcp_info
->link_output
;
887 struct qed_mcp_link_capabilities
888 *qed_mcp_get_link_capabilities(struct qed_hwfn
*p_hwfn
)
890 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
892 return &p_hwfn
->mcp_info
->link_capabilities
;
895 int qed_mcp_drain(struct qed_hwfn
*p_hwfn
,
896 struct qed_ptt
*p_ptt
)
898 u32 resp
= 0, param
= 0;
901 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
902 DRV_MSG_CODE_NIG_DRAIN
, 1000,
905 /* Wait for the drain to complete before returning */
911 int qed_mcp_get_flash_size(struct qed_hwfn
*p_hwfn
,
912 struct qed_ptt
*p_ptt
,
917 flash_size
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_NVM_CFG4
);
918 flash_size
= (flash_size
& MCP_REG_NVM_CFG4_FLASH_SIZE
) >>
919 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT
;
920 flash_size
= (1 << (flash_size
+ MCP_BYTES_PER_MBIT_SHIFT
));
922 *p_flash_size
= flash_size
;
928 qed_mcp_send_drv_version(struct qed_hwfn
*p_hwfn
,
929 struct qed_ptt
*p_ptt
,
930 struct qed_mcp_drv_version
*p_ver
)
932 struct drv_version_stc
*p_drv_version
;
933 struct qed_mcp_mb_params mb_params
;
934 union drv_union_data union_data
;
939 p_drv_version
= &union_data
.drv_version
;
940 p_drv_version
->version
= p_ver
->version
;
941 for (i
= 0; i
< MCP_DRV_VER_STR_SIZE
- 1; i
+= 4) {
942 val
= cpu_to_be32(p_ver
->name
[i
]);
943 *(u32
*)&p_drv_version
->name
[i
* sizeof(u32
)] = val
;
946 memset(&mb_params
, 0, sizeof(mb_params
));
947 mb_params
.cmd
= DRV_MSG_CODE_SET_VERSION
;
948 mb_params
.p_data_src
= &union_data
;
949 rc
= qed_mcp_cmd_and_union(p_hwfn
, p_ptt
, &mb_params
);
951 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
956 int qed_mcp_set_led(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
,
957 enum qed_led_mode mode
)
959 u32 resp
= 0, param
= 0, drv_mb_param
;
963 case QED_LED_MODE_ON
:
964 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_ON
;
966 case QED_LED_MODE_OFF
:
967 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OFF
;
969 case QED_LED_MODE_RESTORE
:
970 drv_mb_param
= DRV_MB_PARAM_SET_LED_MODE_OPER
;
973 DP_NOTICE(p_hwfn
, "Invalid LED mode %d\n", mode
);
977 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_SET_LED_MODE
,
978 drv_mb_param
, &resp
, ¶m
);