1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/types.h>
10 #include <asm/byteorder.h>
11 #include <linux/delay.h>
12 #include <linux/errno.h>
13 #include <linux/kernel.h>
14 #include <linux/mutex.h>
15 #include <linux/slab.h>
16 #include <linux/string.h>
21 #include "qed_reg_addr.h"
22 #define CHIP_MCP_RESP_ITER_US 10
24 #define QED_DRV_MB_MAX_RETRIES (500 * 1000) /* Account for 5 sec */
25 #define QED_MCP_RESET_RETRIES (50 * 1000) /* Account for 500 msec */
27 #define DRV_INNER_WR(_p_hwfn, _p_ptt, _ptr, _offset, _val) \
28 qed_wr(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset), \
31 #define DRV_INNER_RD(_p_hwfn, _p_ptt, _ptr, _offset) \
32 qed_rd(_p_hwfn, _p_ptt, (_p_hwfn->mcp_info->_ptr + _offset))
34 #define DRV_MB_WR(_p_hwfn, _p_ptt, _field, _val) \
35 DRV_INNER_WR(p_hwfn, _p_ptt, drv_mb_addr, \
36 offsetof(struct public_drv_mb, _field), _val)
38 #define DRV_MB_RD(_p_hwfn, _p_ptt, _field) \
39 DRV_INNER_RD(_p_hwfn, _p_ptt, drv_mb_addr, \
40 offsetof(struct public_drv_mb, _field))
42 #define PDA_COMP (((FW_MAJOR_VERSION) + (FW_MINOR_VERSION << 8)) << \
43 DRV_ID_PDA_COMP_VER_SHIFT)
45 #define MCP_BYTES_PER_MBIT_SHIFT 17
47 bool qed_mcp_is_init(struct qed_hwfn
*p_hwfn
)
49 if (!p_hwfn
->mcp_info
|| !p_hwfn
->mcp_info
->public_base
)
54 void qed_mcp_cmd_port_init(struct qed_hwfn
*p_hwfn
,
55 struct qed_ptt
*p_ptt
)
57 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
59 u32 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
61 p_hwfn
->mcp_info
->port_addr
= SECTION_ADDR(mfw_mb_offsize
,
63 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
64 "port_addr = 0x%x, port_id 0x%02x\n",
65 p_hwfn
->mcp_info
->port_addr
, MFW_PORT(p_hwfn
));
68 void qed_mcp_read_mb(struct qed_hwfn
*p_hwfn
,
69 struct qed_ptt
*p_ptt
)
71 u32 length
= MFW_DRV_MSG_MAX_DWORDS(p_hwfn
->mcp_info
->mfw_mb_length
);
74 if (!p_hwfn
->mcp_info
->public_base
)
77 for (i
= 0; i
< length
; i
++) {
78 tmp
= qed_rd(p_hwfn
, p_ptt
,
79 p_hwfn
->mcp_info
->mfw_mb_addr
+
80 (i
<< 2) + sizeof(u32
));
82 /* The MB data is actually BE; Need to force it to cpu */
83 ((u32
*)p_hwfn
->mcp_info
->mfw_mb_cur
)[i
] =
84 be32_to_cpu((__force __be32
)tmp
);
88 int qed_mcp_free(struct qed_hwfn
*p_hwfn
)
90 if (p_hwfn
->mcp_info
) {
91 kfree(p_hwfn
->mcp_info
->mfw_mb_cur
);
92 kfree(p_hwfn
->mcp_info
->mfw_mb_shadow
);
94 kfree(p_hwfn
->mcp_info
);
99 static int qed_load_mcp_offsets(struct qed_hwfn
*p_hwfn
,
100 struct qed_ptt
*p_ptt
)
102 struct qed_mcp_info
*p_info
= p_hwfn
->mcp_info
;
103 u32 drv_mb_offsize
, mfw_mb_offsize
;
104 u32 mcp_pf_id
= MCP_PF_ID(p_hwfn
);
106 p_info
->public_base
= qed_rd(p_hwfn
, p_ptt
, MISC_REG_SHARED_MEM_ADDR
);
107 if (!p_info
->public_base
)
110 p_info
->public_base
|= GRCBASE_MCP
;
112 /* Calculate the driver and MFW mailbox address */
113 drv_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
114 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
116 p_info
->drv_mb_addr
= SECTION_ADDR(drv_mb_offsize
, mcp_pf_id
);
117 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
118 "drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
119 drv_mb_offsize
, p_info
->drv_mb_addr
, mcp_pf_id
);
121 /* Set the MFW MB address */
122 mfw_mb_offsize
= qed_rd(p_hwfn
, p_ptt
,
123 SECTION_OFFSIZE_ADDR(p_info
->public_base
,
125 p_info
->mfw_mb_addr
= SECTION_ADDR(mfw_mb_offsize
, mcp_pf_id
);
126 p_info
->mfw_mb_length
= (u16
)qed_rd(p_hwfn
, p_ptt
, p_info
->mfw_mb_addr
);
128 /* Get the current driver mailbox sequence before sending
131 p_info
->drv_mb_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_mb_header
) &
132 DRV_MSG_SEQ_NUMBER_MASK
;
134 /* Get current FW pulse sequence */
135 p_info
->drv_pulse_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_pulse_mb
) &
138 p_info
->mcp_hist
= (u16
)qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
143 int qed_mcp_cmd_init(struct qed_hwfn
*p_hwfn
,
144 struct qed_ptt
*p_ptt
)
146 struct qed_mcp_info
*p_info
;
149 /* Allocate mcp_info structure */
150 p_hwfn
->mcp_info
= kzalloc(sizeof(*p_hwfn
->mcp_info
), GFP_ATOMIC
);
151 if (!p_hwfn
->mcp_info
)
153 p_info
= p_hwfn
->mcp_info
;
155 if (qed_load_mcp_offsets(p_hwfn
, p_ptt
) != 0) {
156 DP_NOTICE(p_hwfn
, "MCP is not initialized\n");
157 /* Do not free mcp_info here, since public_base indicate that
158 * the MCP is not initialized
163 size
= MFW_DRV_MSG_MAX_DWORDS(p_info
->mfw_mb_length
) * sizeof(u32
);
164 p_info
->mfw_mb_cur
= kzalloc(size
, GFP_ATOMIC
);
165 p_info
->mfw_mb_shadow
=
166 kzalloc(sizeof(u32
) * MFW_DRV_MSG_MAX_DWORDS(
167 p_info
->mfw_mb_length
), GFP_ATOMIC
);
168 if (!p_info
->mfw_mb_shadow
|| !p_info
->mfw_mb_addr
)
171 /* Initialize the MFW mutex */
172 mutex_init(&p_info
->mutex
);
177 DP_NOTICE(p_hwfn
, "Failed to allocate mcp memory\n");
178 qed_mcp_free(p_hwfn
);
182 int qed_mcp_reset(struct qed_hwfn
*p_hwfn
,
183 struct qed_ptt
*p_ptt
)
185 u32 seq
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
186 u8 delay
= CHIP_MCP_RESP_ITER_US
;
187 u32 org_mcp_reset_seq
, cnt
= 0;
190 /* Set drv command along with the updated sequence */
191 org_mcp_reset_seq
= qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
);
192 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
,
193 (DRV_MSG_CODE_MCP_RESET
| seq
));
196 /* Wait for MFW response */
198 /* Give the FW up to 500 second (50*1000*10usec) */
199 } while ((org_mcp_reset_seq
== qed_rd(p_hwfn
, p_ptt
,
200 MISCS_REG_GENERIC_POR_0
)) &&
201 (cnt
++ < QED_MCP_RESET_RETRIES
));
203 if (org_mcp_reset_seq
!=
204 qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
)) {
205 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
206 "MCP was reset after %d usec\n", cnt
* delay
);
208 DP_ERR(p_hwfn
, "Failed to reset MCP\n");
215 static int qed_do_mcp_cmd(struct qed_hwfn
*p_hwfn
,
216 struct qed_ptt
*p_ptt
,
222 u8 delay
= CHIP_MCP_RESP_ITER_US
;
223 u32 seq
, cnt
= 1, actual_mb_seq
;
226 /* Get actual driver mailbox sequence */
227 actual_mb_seq
= DRV_MB_RD(p_hwfn
, p_ptt
, drv_mb_header
) &
228 DRV_MSG_SEQ_NUMBER_MASK
;
230 /* Use MCP history register to check if MCP reset occurred between
233 if (p_hwfn
->mcp_info
->mcp_hist
!=
234 qed_rd(p_hwfn
, p_ptt
, MISCS_REG_GENERIC_POR_0
)) {
235 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "Rereading MCP offsets\n");
236 qed_load_mcp_offsets(p_hwfn
, p_ptt
);
237 qed_mcp_cmd_port_init(p_hwfn
, p_ptt
);
239 seq
= ++p_hwfn
->mcp_info
->drv_mb_seq
;
242 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_param
, param
);
244 /* Set drv command along with the updated sequence */
245 DRV_MB_WR(p_hwfn
, p_ptt
, drv_mb_header
, (cmd
| seq
));
247 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
248 "wrote command (%x) to MFW MB param 0x%08x\n",
252 /* Wait for MFW response */
254 *o_mcp_resp
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_header
);
256 /* Give the FW up to 5 second (500*10ms) */
257 } while ((seq
!= (*o_mcp_resp
& FW_MSG_SEQ_NUMBER_MASK
)) &&
258 (cnt
++ < QED_DRV_MB_MAX_RETRIES
));
260 DP_VERBOSE(p_hwfn
, QED_MSG_SP
,
261 "[after %d ms] read (%x) seq is (%x) from FW MB\n",
262 cnt
* delay
, *o_mcp_resp
, seq
);
264 /* Is this a reply to our command? */
265 if (seq
== (*o_mcp_resp
& FW_MSG_SEQ_NUMBER_MASK
)) {
266 *o_mcp_resp
&= FW_MSG_CODE_MASK
;
267 /* Get the MCP param */
268 *o_mcp_param
= DRV_MB_RD(p_hwfn
, p_ptt
, fw_mb_param
);
271 DP_ERR(p_hwfn
, "MFW failed to respond!\n");
278 int qed_mcp_cmd(struct qed_hwfn
*p_hwfn
,
279 struct qed_ptt
*p_ptt
,
287 /* MCP not initialized */
288 if (!qed_mcp_is_init(p_hwfn
)) {
289 DP_NOTICE(p_hwfn
, "MFW is not initialized !\n");
293 /* Lock Mutex to ensure only single thread is
294 * accessing the MCP at one time
296 mutex_lock(&p_hwfn
->mcp_info
->mutex
);
297 rc
= qed_do_mcp_cmd(p_hwfn
, p_ptt
, cmd
, param
,
298 o_mcp_resp
, o_mcp_param
);
300 mutex_unlock(&p_hwfn
->mcp_info
->mutex
);
305 static void qed_mcp_set_drv_ver(struct qed_dev
*cdev
,
306 struct qed_hwfn
*p_hwfn
,
307 struct qed_ptt
*p_ptt
)
311 /* Copy version string to MCP */
312 for (i
= 0; i
< MCP_DRV_VER_STR_SIZE_DWORD
; i
++)
313 DRV_MB_WR(p_hwfn
, p_ptt
, union_data
.ver_str
[i
],
314 *(u32
*)&cdev
->ver_str
[i
* sizeof(u32
)]);
317 int qed_mcp_load_req(struct qed_hwfn
*p_hwfn
,
318 struct qed_ptt
*p_ptt
,
321 struct qed_dev
*cdev
= p_hwfn
->cdev
;
325 if (!qed_mcp_is_init(p_hwfn
)) {
326 DP_NOTICE(p_hwfn
, "MFW is not initialized !\n");
330 /* Save driver's version to shmem */
331 qed_mcp_set_drv_ver(cdev
, p_hwfn
, p_ptt
);
333 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "fw_seq 0x%08x, drv_pulse 0x%x\n",
334 p_hwfn
->mcp_info
->drv_mb_seq
,
335 p_hwfn
->mcp_info
->drv_pulse_seq
);
338 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_LOAD_REQ
,
339 (PDA_COMP
| DRV_ID_MCP_HSI_VER_CURRENT
|
341 p_load_code
, ¶m
);
343 /* if mcp fails to respond we must abort */
345 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
349 /* If MFW refused (e.g. other port is in diagnostic mode) we
350 * must abort. This can happen in the following cases:
351 * - Other port is in diagnostic mode
352 * - Previously loaded function on the engine is not compliant with
354 * - MFW cannot cope with the requester's DRV_MFW_HSI_VERSION.
357 if (!(*p_load_code
) ||
358 ((*p_load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED_HSI
) ||
359 ((*p_load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED_PDA
) ||
360 ((*p_load_code
) == FW_MSG_CODE_DRV_LOAD_REFUSED_DIAG
)) {
361 DP_ERR(p_hwfn
, "MCP refused load request, aborting\n");
368 static void qed_mcp_handle_link_change(struct qed_hwfn
*p_hwfn
,
369 struct qed_ptt
*p_ptt
,
372 struct qed_mcp_link_state
*p_link
;
375 p_link
= &p_hwfn
->mcp_info
->link_output
;
376 memset(p_link
, 0, sizeof(*p_link
));
378 status
= qed_rd(p_hwfn
, p_ptt
,
379 p_hwfn
->mcp_info
->port_addr
+
380 offsetof(struct public_port
, link_status
));
381 DP_VERBOSE(p_hwfn
, (NETIF_MSG_LINK
| QED_MSG_SP
),
382 "Received link update [0x%08x] from mfw [Addr 0x%x]\n",
384 (u32
)(p_hwfn
->mcp_info
->port_addr
+
385 offsetof(struct public_port
,
388 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
389 "Resetting link indications\n");
393 p_link
->link_up
= !!(status
& LINK_STATUS_LINK_UP
);
395 p_link
->full_duplex
= true;
396 switch ((status
& LINK_STATUS_SPEED_AND_DUPLEX_MASK
)) {
397 case LINK_STATUS_SPEED_AND_DUPLEX_100G
:
398 p_link
->speed
= 100000;
400 case LINK_STATUS_SPEED_AND_DUPLEX_50G
:
401 p_link
->speed
= 50000;
403 case LINK_STATUS_SPEED_AND_DUPLEX_40G
:
404 p_link
->speed
= 40000;
406 case LINK_STATUS_SPEED_AND_DUPLEX_25G
:
407 p_link
->speed
= 25000;
409 case LINK_STATUS_SPEED_AND_DUPLEX_20G
:
410 p_link
->speed
= 20000;
412 case LINK_STATUS_SPEED_AND_DUPLEX_10G
:
413 p_link
->speed
= 10000;
415 case LINK_STATUS_SPEED_AND_DUPLEX_1000THD
:
416 p_link
->full_duplex
= false;
418 case LINK_STATUS_SPEED_AND_DUPLEX_1000TFD
:
419 p_link
->speed
= 1000;
425 /* Correct speed according to bandwidth allocation */
426 if (p_hwfn
->mcp_info
->func_info
.bandwidth_max
&& p_link
->speed
) {
427 p_link
->speed
= p_link
->speed
*
428 p_hwfn
->mcp_info
->func_info
.bandwidth_max
/
430 qed_init_pf_rl(p_hwfn
, p_ptt
, p_hwfn
->rel_pf_id
,
432 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
433 "Configured MAX bandwidth to be %08x Mb/sec\n",
437 p_link
->an
= !!(status
& LINK_STATUS_AUTO_NEGOTIATE_ENABLED
);
438 p_link
->an_complete
= !!(status
&
439 LINK_STATUS_AUTO_NEGOTIATE_COMPLETE
);
440 p_link
->parallel_detection
= !!(status
&
441 LINK_STATUS_PARALLEL_DETECTION_USED
);
442 p_link
->pfc_enabled
= !!(status
& LINK_STATUS_PFC_ENABLED
);
444 p_link
->partner_adv_speed
|=
445 (status
& LINK_STATUS_LINK_PARTNER_1000TFD_CAPABLE
) ?
446 QED_LINK_PARTNER_SPEED_1G_FD
: 0;
447 p_link
->partner_adv_speed
|=
448 (status
& LINK_STATUS_LINK_PARTNER_1000THD_CAPABLE
) ?
449 QED_LINK_PARTNER_SPEED_1G_HD
: 0;
450 p_link
->partner_adv_speed
|=
451 (status
& LINK_STATUS_LINK_PARTNER_10G_CAPABLE
) ?
452 QED_LINK_PARTNER_SPEED_10G
: 0;
453 p_link
->partner_adv_speed
|=
454 (status
& LINK_STATUS_LINK_PARTNER_20G_CAPABLE
) ?
455 QED_LINK_PARTNER_SPEED_20G
: 0;
456 p_link
->partner_adv_speed
|=
457 (status
& LINK_STATUS_LINK_PARTNER_40G_CAPABLE
) ?
458 QED_LINK_PARTNER_SPEED_40G
: 0;
459 p_link
->partner_adv_speed
|=
460 (status
& LINK_STATUS_LINK_PARTNER_50G_CAPABLE
) ?
461 QED_LINK_PARTNER_SPEED_50G
: 0;
462 p_link
->partner_adv_speed
|=
463 (status
& LINK_STATUS_LINK_PARTNER_100G_CAPABLE
) ?
464 QED_LINK_PARTNER_SPEED_100G
: 0;
466 p_link
->partner_tx_flow_ctrl_en
=
467 !!(status
& LINK_STATUS_TX_FLOW_CONTROL_ENABLED
);
468 p_link
->partner_rx_flow_ctrl_en
=
469 !!(status
& LINK_STATUS_RX_FLOW_CONTROL_ENABLED
);
471 switch (status
& LINK_STATUS_LINK_PARTNER_FLOW_CONTROL_MASK
) {
472 case LINK_STATUS_LINK_PARTNER_SYMMETRIC_PAUSE
:
473 p_link
->partner_adv_pause
= QED_LINK_PARTNER_SYMMETRIC_PAUSE
;
475 case LINK_STATUS_LINK_PARTNER_ASYMMETRIC_PAUSE
:
476 p_link
->partner_adv_pause
= QED_LINK_PARTNER_ASYMMETRIC_PAUSE
;
478 case LINK_STATUS_LINK_PARTNER_BOTH_PAUSE
:
479 p_link
->partner_adv_pause
= QED_LINK_PARTNER_BOTH_PAUSE
;
482 p_link
->partner_adv_pause
= 0;
485 p_link
->sfp_tx_fault
= !!(status
& LINK_STATUS_SFP_TX_FAULT
);
487 qed_link_update(p_hwfn
);
490 int qed_mcp_set_link(struct qed_hwfn
*p_hwfn
,
491 struct qed_ptt
*p_ptt
,
494 struct qed_mcp_link_params
*params
= &p_hwfn
->mcp_info
->link_input
;
495 u32 param
= 0, reply
= 0, cmd
;
496 struct pmm_phy_cfg phy_cfg
;
500 if (!qed_mcp_is_init(p_hwfn
)) {
501 DP_NOTICE(p_hwfn
, "MFW is not initialized !\n");
505 /* Set the shmem configuration according to params */
506 memset(&phy_cfg
, 0, sizeof(phy_cfg
));
507 cmd
= b_up
? DRV_MSG_CODE_INIT_PHY
: DRV_MSG_CODE_LINK_RESET
;
508 if (!params
->speed
.autoneg
)
509 phy_cfg
.speed
= params
->speed
.forced_speed
;
510 phy_cfg
.pause
|= (params
->pause
.autoneg
) ? PMM_PAUSE_AUTONEG
: 0;
511 phy_cfg
.pause
|= (params
->pause
.forced_rx
) ? PMM_PAUSE_RX
: 0;
512 phy_cfg
.pause
|= (params
->pause
.forced_tx
) ? PMM_PAUSE_TX
: 0;
513 phy_cfg
.adv_speed
= params
->speed
.advertised_speeds
;
514 phy_cfg
.loopback_mode
= params
->loopback_mode
;
516 /* Write the requested configuration to shmem */
517 for (i
= 0; i
< sizeof(phy_cfg
); i
+= 4)
518 qed_wr(p_hwfn
, p_ptt
,
519 p_hwfn
->mcp_info
->drv_mb_addr
+
520 offsetof(struct public_drv_mb
, union_data
) + i
,
521 ((u32
*)&phy_cfg
)[i
>> 2]);
524 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
525 "Configuring Link: Speed 0x%08x, Pause 0x%08x, adv_speed 0x%08x, loopback 0x%08x, features 0x%08x\n",
529 phy_cfg
.loopback_mode
,
530 phy_cfg
.feature_config_flags
);
532 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
536 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "fw_seq 0x%08x, drv_pulse 0x%x\n",
537 p_hwfn
->mcp_info
->drv_mb_seq
,
538 p_hwfn
->mcp_info
->drv_pulse_seq
);
541 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, cmd
, 0, &reply
, ¶m
);
543 /* if mcp fails to respond we must abort */
545 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");
549 /* Reset the link status if needed */
551 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, true);
556 int qed_mcp_handle_events(struct qed_hwfn
*p_hwfn
,
557 struct qed_ptt
*p_ptt
)
559 struct qed_mcp_info
*info
= p_hwfn
->mcp_info
;
564 DP_VERBOSE(p_hwfn
, QED_MSG_SP
, "Received message from MFW\n");
566 /* Read Messages from MFW */
567 qed_mcp_read_mb(p_hwfn
, p_ptt
);
569 /* Compare current messages to old ones */
570 for (i
= 0; i
< info
->mfw_mb_length
; i
++) {
571 if (info
->mfw_mb_cur
[i
] == info
->mfw_mb_shadow
[i
])
576 DP_VERBOSE(p_hwfn
, NETIF_MSG_LINK
,
577 "Msg [%d] - old CMD 0x%02x, new CMD 0x%02x\n",
578 i
, info
->mfw_mb_shadow
[i
], info
->mfw_mb_cur
[i
]);
581 case MFW_DRV_MSG_LINK_CHANGE
:
582 qed_mcp_handle_link_change(p_hwfn
, p_ptt
, false);
585 DP_NOTICE(p_hwfn
, "Unimplemented MFW message %d\n", i
);
591 for (i
= 0; i
< MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
); i
++) {
592 __be32 val
= cpu_to_be32(((u32
*)info
->mfw_mb_cur
)[i
]);
594 /* MFW expect answer in BE, so we force write in that format */
595 qed_wr(p_hwfn
, p_ptt
,
596 info
->mfw_mb_addr
+ sizeof(u32
) +
597 MFW_DRV_MSG_MAX_DWORDS(info
->mfw_mb_length
) *
598 sizeof(u32
) + i
* sizeof(u32
),
604 "Received an MFW message indication but no new message!\n");
608 /* Copy the new mfw messages into the shadow */
609 memcpy(info
->mfw_mb_shadow
, info
->mfw_mb_cur
, info
->mfw_mb_length
);
614 int qed_mcp_get_mfw_ver(struct qed_dev
*cdev
,
617 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[0];
618 struct qed_ptt
*p_ptt
;
621 p_ptt
= qed_ptt_acquire(p_hwfn
);
625 global_offsize
= qed_rd(p_hwfn
, p_ptt
,
626 SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->
629 *p_mfw_ver
= qed_rd(p_hwfn
, p_ptt
,
630 SECTION_ADDR(global_offsize
, 0) +
631 offsetof(struct public_global
, mfw_ver
));
633 qed_ptt_release(p_hwfn
, p_ptt
);
638 int qed_mcp_get_media_type(struct qed_dev
*cdev
,
641 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[0];
642 struct qed_ptt
*p_ptt
;
644 if (!qed_mcp_is_init(p_hwfn
)) {
645 DP_NOTICE(p_hwfn
, "MFW is not initialized !\n");
649 *p_media_type
= MEDIA_UNSPECIFIED
;
651 p_ptt
= qed_ptt_acquire(p_hwfn
);
655 *p_media_type
= qed_rd(p_hwfn
, p_ptt
, p_hwfn
->mcp_info
->port_addr
+
656 offsetof(struct public_port
, media_type
));
658 qed_ptt_release(p_hwfn
, p_ptt
);
663 static u32
qed_mcp_get_shmem_func(struct qed_hwfn
*p_hwfn
,
664 struct qed_ptt
*p_ptt
,
665 struct public_func
*p_data
,
668 u32 addr
= SECTION_OFFSIZE_ADDR(p_hwfn
->mcp_info
->public_base
,
670 u32 mfw_path_offsize
= qed_rd(p_hwfn
, p_ptt
, addr
);
671 u32 func_addr
= SECTION_ADDR(mfw_path_offsize
, pfid
);
674 memset(p_data
, 0, sizeof(*p_data
));
676 size
= min_t(u32
, sizeof(*p_data
),
677 QED_SECTION_SIZE(mfw_path_offsize
));
678 for (i
= 0; i
< size
/ sizeof(u32
); i
++)
679 ((u32
*)p_data
)[i
] = qed_rd(p_hwfn
, p_ptt
,
680 func_addr
+ (i
<< 2));
686 qed_mcp_get_shmem_proto(struct qed_hwfn
*p_hwfn
,
687 struct public_func
*p_info
,
688 enum qed_pci_personality
*p_proto
)
692 switch (p_info
->config
& FUNC_MF_CFG_PROTOCOL_MASK
) {
693 case FUNC_MF_CFG_PROTOCOL_ETHERNET
:
694 *p_proto
= QED_PCI_ETH
;
703 int qed_mcp_fill_shmem_func_info(struct qed_hwfn
*p_hwfn
,
704 struct qed_ptt
*p_ptt
)
706 struct qed_mcp_function_info
*info
;
707 struct public_func shmem_info
;
709 qed_mcp_get_shmem_func(p_hwfn
, p_ptt
, &shmem_info
,
711 info
= &p_hwfn
->mcp_info
->func_info
;
713 info
->pause_on_host
= (shmem_info
.config
&
714 FUNC_MF_CFG_PAUSE_ON_HOST_RING
) ? 1 : 0;
716 if (qed_mcp_get_shmem_proto(p_hwfn
, &shmem_info
,
718 DP_ERR(p_hwfn
, "Unknown personality %08x\n",
719 (u32
)(shmem_info
.config
& FUNC_MF_CFG_PROTOCOL_MASK
));
723 if (p_hwfn
->cdev
->mf_mode
!= SF
) {
724 info
->bandwidth_min
= (shmem_info
.config
&
725 FUNC_MF_CFG_MIN_BW_MASK
) >>
726 FUNC_MF_CFG_MIN_BW_SHIFT
;
727 if (info
->bandwidth_min
< 1 || info
->bandwidth_min
> 100) {
729 "bandwidth minimum out of bounds [%02x]. Set to 1\n",
730 info
->bandwidth_min
);
731 info
->bandwidth_min
= 1;
734 info
->bandwidth_max
= (shmem_info
.config
&
735 FUNC_MF_CFG_MAX_BW_MASK
) >>
736 FUNC_MF_CFG_MAX_BW_SHIFT
;
737 if (info
->bandwidth_max
< 1 || info
->bandwidth_max
> 100) {
739 "bandwidth maximum out of bounds [%02x]. Set to 100\n",
740 info
->bandwidth_max
);
741 info
->bandwidth_max
= 100;
745 if (shmem_info
.mac_upper
|| shmem_info
.mac_lower
) {
746 info
->mac
[0] = (u8
)(shmem_info
.mac_upper
>> 8);
747 info
->mac
[1] = (u8
)(shmem_info
.mac_upper
);
748 info
->mac
[2] = (u8
)(shmem_info
.mac_lower
>> 24);
749 info
->mac
[3] = (u8
)(shmem_info
.mac_lower
>> 16);
750 info
->mac
[4] = (u8
)(shmem_info
.mac_lower
>> 8);
751 info
->mac
[5] = (u8
)(shmem_info
.mac_lower
);
753 DP_NOTICE(p_hwfn
, "MAC is 0 in shmem\n");
756 info
->wwn_port
= (u64
)shmem_info
.fcoe_wwn_port_name_upper
|
757 (((u64
)shmem_info
.fcoe_wwn_port_name_lower
) << 32);
758 info
->wwn_node
= (u64
)shmem_info
.fcoe_wwn_node_name_upper
|
759 (((u64
)shmem_info
.fcoe_wwn_node_name_lower
) << 32);
761 info
->ovlan
= (u16
)(shmem_info
.ovlan_stag
& FUNC_MF_CFG_OV_STAG_MASK
);
763 DP_VERBOSE(p_hwfn
, (QED_MSG_SP
| NETIF_MSG_IFUP
),
764 "Read configuration from shmem: pause_on_host %02x protocol %02x BW [%02x - %02x] MAC %02x:%02x:%02x:%02x:%02x:%02x wwn port %llx node %llx ovlan %04x\n",
765 info
->pause_on_host
, info
->protocol
,
766 info
->bandwidth_min
, info
->bandwidth_max
,
767 info
->mac
[0], info
->mac
[1], info
->mac
[2],
768 info
->mac
[3], info
->mac
[4], info
->mac
[5],
769 info
->wwn_port
, info
->wwn_node
, info
->ovlan
);
774 struct qed_mcp_link_params
775 *qed_mcp_get_link_params(struct qed_hwfn
*p_hwfn
)
777 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
779 return &p_hwfn
->mcp_info
->link_input
;
782 struct qed_mcp_link_state
783 *qed_mcp_get_link_state(struct qed_hwfn
*p_hwfn
)
785 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
787 return &p_hwfn
->mcp_info
->link_output
;
790 struct qed_mcp_link_capabilities
791 *qed_mcp_get_link_capabilities(struct qed_hwfn
*p_hwfn
)
793 if (!p_hwfn
|| !p_hwfn
->mcp_info
)
795 return &p_hwfn
->mcp_info
->link_capabilities
;
798 int qed_mcp_drain(struct qed_hwfn
*p_hwfn
,
799 struct qed_ptt
*p_ptt
)
801 u32 resp
= 0, param
= 0;
804 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
,
805 DRV_MSG_CODE_NIG_DRAIN
, 100,
808 /* Wait for the drain to complete before returning */
814 int qed_mcp_get_flash_size(struct qed_hwfn
*p_hwfn
,
815 struct qed_ptt
*p_ptt
,
820 flash_size
= qed_rd(p_hwfn
, p_ptt
, MCP_REG_NVM_CFG4
);
821 flash_size
= (flash_size
& MCP_REG_NVM_CFG4_FLASH_SIZE
) >>
822 MCP_REG_NVM_CFG4_FLASH_SIZE_SHIFT
;
823 flash_size
= (1 << (flash_size
+ MCP_BYTES_PER_MBIT_SHIFT
));
825 *p_flash_size
= flash_size
;
831 qed_mcp_send_drv_version(struct qed_hwfn
*p_hwfn
,
832 struct qed_ptt
*p_ptt
,
833 struct qed_mcp_drv_version
*p_ver
)
836 u32 param
= 0, reply
= 0, i
;
838 if (!qed_mcp_is_init(p_hwfn
)) {
839 DP_NOTICE(p_hwfn
, "MFW is not initialized !\n");
843 DRV_MB_WR(p_hwfn
, p_ptt
, union_data
.drv_version
.version
,
845 /* Copy version string to shmem */
846 for (i
= 0; i
< (MCP_DRV_VER_STR_SIZE
- 4) / 4; i
++) {
847 DRV_MB_WR(p_hwfn
, p_ptt
,
848 union_data
.drv_version
.name
[i
* sizeof(u32
)],
849 *(u32
*)&p_ver
->name
[i
* sizeof(u32
)]);
852 rc
= qed_mcp_cmd(p_hwfn
, p_ptt
, DRV_MSG_CODE_SET_VERSION
, 0, &reply
,
855 DP_ERR(p_hwfn
, "MCP response failure, aborting\n");