1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/stddef.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/delay.h>
12 #include <asm/byteorder.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/string.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/workqueue.h>
18 #include <linux/ethtool.h>
19 #include <linux/etherdevice.h>
20 #include <linux/vmalloc.h>
21 #include <linux/crash_dump.h>
22 #include <linux/crc32.h>
23 #include <linux/qed/qed_if.h>
24 #include <linux/qed/qed_ll2_if.h>
25 #include <net/devlink.h>
26 #include <linux/phylink.h>
29 #include "qed_sriov.h"
31 #include "qed_dev_api.h"
34 #include "qed_iscsi.h"
37 #include "qed_reg_addr.h"
39 #include "qed_selftest.h"
40 #include "qed_debug.h"
41 #include "qed_devlink.h"
43 #define QED_ROCE_QPS (8192)
44 #define QED_ROCE_DPIS (8)
45 #define QED_RDMA_SRQS QED_ROCE_QPS
46 #define QED_NVM_CFG_GET_FLAGS 0xA
47 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A
48 #define QED_NVM_CFG_MAX_ATTRS 50
50 static char version
[] =
51 "QLogic FastLinQ 4xxxx Core Module qed\n";
53 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
54 MODULE_LICENSE("GPL");
56 #define FW_FILE_VERSION \
57 __stringify(FW_MAJOR_VERSION) "." \
58 __stringify(FW_MINOR_VERSION) "." \
59 __stringify(FW_REVISION_VERSION) "." \
60 __stringify(FW_ENGINEERING_VERSION)
62 #define QED_FW_FILE_NAME \
63 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
65 MODULE_FIRMWARE(QED_FW_FILE_NAME
);
67 /* MFW speed capabilities maps */
69 struct qed_mfw_speed_map
{
71 __ETHTOOL_DECLARE_LINK_MODE_MASK(caps
);
77 #define QED_MFW_SPEED_MAP(type, arr) \
81 .arr_size = ARRAY_SIZE(arr), \
84 static const u32 qed_mfw_ext_1g
[] __initconst
= {
85 ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
86 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
87 ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
90 static const u32 qed_mfw_ext_10g
[] __initconst
= {
91 ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
92 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
93 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
94 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT
,
95 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
,
96 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
97 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
,
98 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT
,
101 static const u32 qed_mfw_ext_25g
[] __initconst
= {
102 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
103 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
104 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
107 static const u32 qed_mfw_ext_40g
[] __initconst
= {
108 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
109 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
110 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
111 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
114 static const u32 qed_mfw_ext_50g_base_r
[] __initconst
= {
115 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
,
116 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT
,
117 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT
,
118 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT
,
119 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT
,
122 static const u32 qed_mfw_ext_50g_base_r2
[] __initconst
= {
123 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
124 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
125 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
128 static const u32 qed_mfw_ext_100g_base_r2
[] __initconst
= {
129 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT
,
130 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT
,
131 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT
,
132 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT
,
133 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT
,
136 static const u32 qed_mfw_ext_100g_base_r4
[] __initconst
= {
137 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
138 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
139 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
140 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
143 static struct qed_mfw_speed_map qed_mfw_ext_maps
[] __ro_after_init
= {
144 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G
, qed_mfw_ext_1g
),
145 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G
, qed_mfw_ext_10g
),
146 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G
, qed_mfw_ext_25g
),
147 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G
, qed_mfw_ext_40g
),
148 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R
,
149 qed_mfw_ext_50g_base_r
),
150 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2
,
151 qed_mfw_ext_50g_base_r2
),
152 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2
,
153 qed_mfw_ext_100g_base_r2
),
154 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4
,
155 qed_mfw_ext_100g_base_r4
),
158 static const u32 qed_mfw_legacy_1g
[] __initconst
= {
159 ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
160 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
161 ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
164 static const u32 qed_mfw_legacy_10g
[] __initconst
= {
165 ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
166 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
167 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
168 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT
,
169 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
,
170 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
171 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
,
172 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT
,
175 static const u32 qed_mfw_legacy_20g
[] __initconst
= {
176 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
,
179 static const u32 qed_mfw_legacy_25g
[] __initconst
= {
180 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
181 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
182 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
185 static const u32 qed_mfw_legacy_40g
[] __initconst
= {
186 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
187 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
188 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
189 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
192 static const u32 qed_mfw_legacy_50g
[] __initconst
= {
193 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
194 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
195 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
198 static const u32 qed_mfw_legacy_bb_100g
[] __initconst
= {
199 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
200 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
201 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
202 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
205 static struct qed_mfw_speed_map qed_mfw_legacy_maps
[] __ro_after_init
= {
206 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
,
208 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
,
210 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
,
212 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
,
214 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
,
216 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
,
218 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
,
219 qed_mfw_legacy_bb_100g
),
222 static void __init
qed_mfw_speed_map_populate(struct qed_mfw_speed_map
*map
)
224 linkmode_set_bit_array(map
->cap_arr
, map
->arr_size
, map
->caps
);
230 static void __init
qed_mfw_speed_maps_init(void)
234 for (i
= 0; i
< ARRAY_SIZE(qed_mfw_ext_maps
); i
++)
235 qed_mfw_speed_map_populate(qed_mfw_ext_maps
+ i
);
237 for (i
= 0; i
< ARRAY_SIZE(qed_mfw_legacy_maps
); i
++)
238 qed_mfw_speed_map_populate(qed_mfw_legacy_maps
+ i
);
241 static int __init
qed_init(void)
243 pr_info("%s", version
);
245 qed_mfw_speed_maps_init();
249 module_init(qed_init
);
251 static void __exit
qed_exit(void)
253 /* To prevent marking this module as "permanent" */
255 module_exit(qed_exit
);
257 static void qed_free_pci(struct qed_dev
*cdev
)
259 struct pci_dev
*pdev
= cdev
->pdev
;
261 if (cdev
->doorbells
&& cdev
->db_size
)
262 iounmap(cdev
->doorbells
);
264 iounmap(cdev
->regview
);
265 if (atomic_read(&pdev
->enable_cnt
) == 1)
266 pci_release_regions(pdev
);
268 pci_disable_device(pdev
);
271 #define PCI_REVISION_ID_ERROR_VAL 0xff
273 /* Performs PCI initializations as well as initializing PCI-related parameters
274 * in the device structrue. Returns 0 in case of success.
276 static int qed_init_pci(struct qed_dev
*cdev
, struct pci_dev
*pdev
)
283 rc
= pci_enable_device(pdev
);
285 DP_NOTICE(cdev
, "Cannot enable PCI device\n");
289 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
290 DP_NOTICE(cdev
, "No memory region found in bar #0\n");
295 if (IS_PF(cdev
) && !(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
296 DP_NOTICE(cdev
, "No memory region found in bar #2\n");
301 if (atomic_read(&pdev
->enable_cnt
) == 1) {
302 rc
= pci_request_regions(pdev
, "qed");
305 "Failed to request PCI memory resources\n");
308 pci_set_master(pdev
);
309 pci_save_state(pdev
);
312 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
313 if (rev_id
== PCI_REVISION_ID_ERROR_VAL
) {
315 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
320 if (!pci_is_pcie(pdev
)) {
321 DP_NOTICE(cdev
, "The bus is not PCI Express\n");
326 if (IS_PF(cdev
) && !pdev
->pm_cap
)
327 DP_NOTICE(cdev
, "Cannot find power management capability\n");
329 rc
= dma_set_mask_and_coherent(&cdev
->pdev
->dev
, DMA_BIT_MASK(64));
331 DP_NOTICE(cdev
, "Can't request DMA addresses\n");
336 cdev
->pci_params
.mem_start
= pci_resource_start(pdev
, 0);
337 cdev
->pci_params
.mem_end
= pci_resource_end(pdev
, 0);
338 cdev
->pci_params
.irq
= pdev
->irq
;
340 cdev
->regview
= pci_ioremap_bar(pdev
, 0);
341 if (!cdev
->regview
) {
342 DP_NOTICE(cdev
, "Cannot map register space, aborting\n");
347 cdev
->db_phys_addr
= pci_resource_start(cdev
->pdev
, 2);
348 cdev
->db_size
= pci_resource_len(cdev
->pdev
, 2);
349 if (!cdev
->db_size
) {
351 DP_NOTICE(cdev
, "No Doorbell bar available\n");
358 cdev
->doorbells
= ioremap_wc(cdev
->db_phys_addr
, cdev
->db_size
);
360 if (!cdev
->doorbells
) {
361 DP_NOTICE(cdev
, "Cannot map doorbell space\n");
368 pci_release_regions(pdev
);
370 pci_disable_device(pdev
);
375 int qed_fill_dev_info(struct qed_dev
*cdev
,
376 struct qed_dev_info
*dev_info
)
378 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
379 struct qed_hw_info
*hw_info
= &p_hwfn
->hw_info
;
380 struct qed_tunnel_info
*tun
= &cdev
->tunnel
;
383 memset(dev_info
, 0, sizeof(struct qed_dev_info
));
385 if (tun
->vxlan
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
386 tun
->vxlan
.b_mode_enabled
)
387 dev_info
->vxlan_enable
= true;
389 if (tun
->l2_gre
.b_mode_enabled
&& tun
->ip_gre
.b_mode_enabled
&&
390 tun
->l2_gre
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
391 tun
->ip_gre
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
)
392 dev_info
->gre_enable
= true;
394 if (tun
->l2_geneve
.b_mode_enabled
&& tun
->ip_geneve
.b_mode_enabled
&&
395 tun
->l2_geneve
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
396 tun
->ip_geneve
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
)
397 dev_info
->geneve_enable
= true;
399 dev_info
->num_hwfns
= cdev
->num_hwfns
;
400 dev_info
->pci_mem_start
= cdev
->pci_params
.mem_start
;
401 dev_info
->pci_mem_end
= cdev
->pci_params
.mem_end
;
402 dev_info
->pci_irq
= cdev
->pci_params
.irq
;
403 dev_info
->rdma_supported
= QED_IS_RDMA_PERSONALITY(p_hwfn
);
404 dev_info
->dev_type
= cdev
->type
;
405 ether_addr_copy(dev_info
->hw_mac
, hw_info
->hw_mac_addr
);
408 dev_info
->fw_major
= FW_MAJOR_VERSION
;
409 dev_info
->fw_minor
= FW_MINOR_VERSION
;
410 dev_info
->fw_rev
= FW_REVISION_VERSION
;
411 dev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
412 dev_info
->b_inter_pf_switch
= test_bit(QED_MF_INTER_PF_SWITCH
,
414 if (!test_bit(QED_MF_DISABLE_ARFS
, &cdev
->mf_bits
))
415 dev_info
->b_arfs_capable
= true;
416 dev_info
->tx_switching
= true;
418 if (hw_info
->b_wol_support
== QED_WOL_SUPPORT_PME
)
419 dev_info
->wol_support
= true;
421 dev_info
->smart_an
= qed_mcp_is_smart_an_supported(p_hwfn
);
422 dev_info
->esl
= qed_mcp_is_esl_supported(p_hwfn
);
423 dev_info
->abs_pf_id
= QED_LEADING_HWFN(cdev
)->abs_pf_id
;
425 qed_vf_get_fw_version(&cdev
->hwfns
[0], &dev_info
->fw_major
,
426 &dev_info
->fw_minor
, &dev_info
->fw_rev
,
431 ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
433 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev
), ptt
,
434 &dev_info
->mfw_rev
, NULL
);
436 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev
), ptt
,
437 &dev_info
->mbi_version
);
439 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev
), ptt
,
440 &dev_info
->flash_size
);
442 qed_ptt_release(QED_LEADING_HWFN(cdev
), ptt
);
445 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev
), NULL
,
446 &dev_info
->mfw_rev
, NULL
);
449 dev_info
->mtu
= hw_info
->mtu
;
450 cdev
->common_dev_info
= *dev_info
;
455 static void qed_free_cdev(struct qed_dev
*cdev
)
460 static struct qed_dev
*qed_alloc_cdev(struct pci_dev
*pdev
)
462 struct qed_dev
*cdev
;
464 cdev
= kzalloc(sizeof(*cdev
), GFP_KERNEL
);
468 qed_init_struct(cdev
);
473 /* Sets the requested power state */
474 static int qed_set_power_state(struct qed_dev
*cdev
, pci_power_t state
)
479 DP_VERBOSE(cdev
, NETIF_MSG_DRV
, "Omitting Power state change\n");
484 static struct qed_dev
*qed_probe(struct pci_dev
*pdev
,
485 struct qed_probe_params
*params
)
487 struct qed_dev
*cdev
;
490 cdev
= qed_alloc_cdev(pdev
);
494 cdev
->drv_type
= DRV_ID_DRV_TYPE_LINUX
;
495 cdev
->protocol
= params
->protocol
;
498 cdev
->b_is_vf
= true;
500 qed_init_dp(cdev
, params
->dp_module
, params
->dp_level
);
502 cdev
->recov_in_prog
= params
->recov_in_prog
;
504 rc
= qed_init_pci(cdev
, pdev
);
506 DP_ERR(cdev
, "init pci failed\n");
509 DP_INFO(cdev
, "PCI init completed successfully\n");
511 rc
= qed_hw_prepare(cdev
, QED_PCI_DEFAULT
);
513 DP_ERR(cdev
, "hw prepare failed\n");
517 DP_INFO(cdev
, "%s completed successfully\n", __func__
);
529 static void qed_remove(struct qed_dev
*cdev
)
538 qed_set_power_state(cdev
, PCI_D3hot
);
543 static void qed_disable_msix(struct qed_dev
*cdev
)
545 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
546 pci_disable_msix(cdev
->pdev
);
547 kfree(cdev
->int_params
.msix_table
);
548 } else if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSI
) {
549 pci_disable_msi(cdev
->pdev
);
552 memset(&cdev
->int_params
.out
, 0, sizeof(struct qed_int_param
));
555 static int qed_enable_msix(struct qed_dev
*cdev
,
556 struct qed_int_params
*int_params
)
560 cnt
= int_params
->in
.num_vectors
;
562 for (i
= 0; i
< cnt
; i
++)
563 int_params
->msix_table
[i
].entry
= i
;
565 rc
= pci_enable_msix_range(cdev
->pdev
, int_params
->msix_table
,
566 int_params
->in
.min_msix_cnt
, cnt
);
567 if (rc
< cnt
&& rc
>= int_params
->in
.min_msix_cnt
&&
568 (rc
% cdev
->num_hwfns
)) {
569 pci_disable_msix(cdev
->pdev
);
571 /* If fastpath is initialized, we need at least one interrupt
572 * per hwfn [and the slow path interrupts]. New requested number
573 * should be a multiple of the number of hwfns.
575 cnt
= (rc
/ cdev
->num_hwfns
) * cdev
->num_hwfns
;
577 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
578 cnt
, int_params
->in
.num_vectors
);
579 rc
= pci_enable_msix_exact(cdev
->pdev
, int_params
->msix_table
,
585 /* For VFs, we should return with an error in case we didn't get the
586 * exact number of msix vectors as we requested.
587 * Not doing that will lead to a crash when starting queues for
590 if ((IS_PF(cdev
) && rc
> 0) || (IS_VF(cdev
) && rc
== cnt
)) {
591 /* MSI-x configuration was achieved */
592 int_params
->out
.int_mode
= QED_INT_MODE_MSIX
;
593 int_params
->out
.num_vectors
= rc
;
597 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
604 /* This function outputs the int mode and the number of enabled msix vector */
605 static int qed_set_int_mode(struct qed_dev
*cdev
, bool force_mode
)
607 struct qed_int_params
*int_params
= &cdev
->int_params
;
608 struct msix_entry
*tbl
;
611 switch (int_params
->in
.int_mode
) {
612 case QED_INT_MODE_MSIX
:
613 /* Allocate MSIX table */
614 cnt
= int_params
->in
.num_vectors
;
615 int_params
->msix_table
= kcalloc(cnt
, sizeof(*tbl
), GFP_KERNEL
);
616 if (!int_params
->msix_table
) {
622 rc
= qed_enable_msix(cdev
, int_params
);
626 DP_NOTICE(cdev
, "Failed to enable MSI-X\n");
627 kfree(int_params
->msix_table
);
632 case QED_INT_MODE_MSI
:
633 if (cdev
->num_hwfns
== 1) {
634 rc
= pci_enable_msi(cdev
->pdev
);
636 int_params
->out
.int_mode
= QED_INT_MODE_MSI
;
640 DP_NOTICE(cdev
, "Failed to enable MSI\n");
646 case QED_INT_MODE_INTA
:
647 int_params
->out
.int_mode
= QED_INT_MODE_INTA
;
651 DP_NOTICE(cdev
, "Unknown int_mode value %d\n",
652 int_params
->in
.int_mode
);
658 DP_INFO(cdev
, "Using %s interrupts\n",
659 int_params
->out
.int_mode
== QED_INT_MODE_INTA
?
660 "INTa" : int_params
->out
.int_mode
== QED_INT_MODE_MSI
?
662 cdev
->int_coalescing_mode
= QED_COAL_MODE_ENABLE
;
667 static void qed_simd_handler_config(struct qed_dev
*cdev
, void *token
,
668 int index
, void(*handler
)(void *))
670 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
671 int relative_idx
= index
/ cdev
->num_hwfns
;
673 hwfn
->simd_proto_handler
[relative_idx
].func
= handler
;
674 hwfn
->simd_proto_handler
[relative_idx
].token
= token
;
677 static void qed_simd_handler_clean(struct qed_dev
*cdev
, int index
)
679 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
680 int relative_idx
= index
/ cdev
->num_hwfns
;
682 memset(&hwfn
->simd_proto_handler
[relative_idx
], 0,
683 sizeof(struct qed_simd_fp_handler
));
686 static irqreturn_t
qed_msix_sp_int(int irq
, void *tasklet
)
688 tasklet_schedule((struct tasklet_struct
*)tasklet
);
692 static irqreturn_t
qed_single_int(int irq
, void *dev_instance
)
694 struct qed_dev
*cdev
= (struct qed_dev
*)dev_instance
;
695 struct qed_hwfn
*hwfn
;
696 irqreturn_t rc
= IRQ_NONE
;
700 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
701 status
= qed_int_igu_read_sisr_reg(&cdev
->hwfns
[i
]);
706 hwfn
= &cdev
->hwfns
[i
];
708 /* Slowpath interrupt */
709 if (unlikely(status
& 0x1)) {
710 tasklet_schedule(&hwfn
->sp_dpc
);
715 /* Fastpath interrupts */
716 for (j
= 0; j
< 64; j
++) {
717 if ((0x2ULL
<< j
) & status
) {
718 struct qed_simd_fp_handler
*p_handler
=
719 &hwfn
->simd_proto_handler
[j
];
722 p_handler
->func(p_handler
->token
);
725 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
728 status
&= ~(0x2ULL
<< j
);
733 if (unlikely(status
))
734 DP_VERBOSE(hwfn
, NETIF_MSG_INTR
,
735 "got an unknown interrupt status 0x%llx\n",
742 int qed_slowpath_irq_req(struct qed_hwfn
*hwfn
)
744 struct qed_dev
*cdev
= hwfn
->cdev
;
749 int_mode
= cdev
->int_params
.out
.int_mode
;
750 if (int_mode
== QED_INT_MODE_MSIX
) {
752 snprintf(hwfn
->name
, NAME_SIZE
, "sp-%d-%02x:%02x.%02x",
753 id
, cdev
->pdev
->bus
->number
,
754 PCI_SLOT(cdev
->pdev
->devfn
), hwfn
->abs_pf_id
);
755 rc
= request_irq(cdev
->int_params
.msix_table
[id
].vector
,
756 qed_msix_sp_int
, 0, hwfn
->name
, &hwfn
->sp_dpc
);
758 unsigned long flags
= 0;
760 snprintf(cdev
->name
, NAME_SIZE
, "%02x:%02x.%02x",
761 cdev
->pdev
->bus
->number
, PCI_SLOT(cdev
->pdev
->devfn
),
762 PCI_FUNC(cdev
->pdev
->devfn
));
764 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_INTA
)
765 flags
|= IRQF_SHARED
;
767 rc
= request_irq(cdev
->pdev
->irq
, qed_single_int
,
768 flags
, cdev
->name
, cdev
);
772 DP_NOTICE(cdev
, "request_irq failed, rc = %d\n", rc
);
774 DP_VERBOSE(hwfn
, (NETIF_MSG_INTR
| QED_MSG_SP
),
775 "Requested slowpath %s\n",
776 (int_mode
== QED_INT_MODE_MSIX
) ? "MSI-X" : "IRQ");
781 static void qed_slowpath_tasklet_flush(struct qed_hwfn
*p_hwfn
)
783 /* Calling the disable function will make sure that any
784 * currently-running function is completed. The following call to the
785 * enable function makes this sequence a flush-like operation.
787 if (p_hwfn
->b_sp_dpc_enabled
) {
788 tasklet_disable(&p_hwfn
->sp_dpc
);
789 tasklet_enable(&p_hwfn
->sp_dpc
);
793 void qed_slowpath_irq_sync(struct qed_hwfn
*p_hwfn
)
795 struct qed_dev
*cdev
= p_hwfn
->cdev
;
796 u8 id
= p_hwfn
->my_id
;
799 int_mode
= cdev
->int_params
.out
.int_mode
;
800 if (int_mode
== QED_INT_MODE_MSIX
)
801 synchronize_irq(cdev
->int_params
.msix_table
[id
].vector
);
803 synchronize_irq(cdev
->pdev
->irq
);
805 qed_slowpath_tasklet_flush(p_hwfn
);
808 static void qed_slowpath_irq_free(struct qed_dev
*cdev
)
812 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
813 for_each_hwfn(cdev
, i
) {
814 if (!cdev
->hwfns
[i
].b_int_requested
)
816 free_irq(cdev
->int_params
.msix_table
[i
].vector
,
817 &cdev
->hwfns
[i
].sp_dpc
);
820 if (QED_LEADING_HWFN(cdev
)->b_int_requested
)
821 free_irq(cdev
->pdev
->irq
, cdev
);
823 qed_int_disable_post_isr_release(cdev
);
826 static int qed_nic_stop(struct qed_dev
*cdev
)
830 rc
= qed_hw_stop(cdev
);
832 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
833 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
835 if (p_hwfn
->b_sp_dpc_enabled
) {
836 tasklet_disable(&p_hwfn
->sp_dpc
);
837 p_hwfn
->b_sp_dpc_enabled
= false;
838 DP_VERBOSE(cdev
, NETIF_MSG_IFDOWN
,
839 "Disabled sp tasklet [hwfn %d] at %p\n",
844 qed_dbg_pf_exit(cdev
);
849 static int qed_nic_setup(struct qed_dev
*cdev
)
853 /* Determine if interface is going to require LL2 */
854 if (QED_LEADING_HWFN(cdev
)->hw_info
.personality
!= QED_PCI_ETH
) {
855 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
856 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
858 p_hwfn
->using_ll2
= true;
862 rc
= qed_resc_alloc(cdev
);
866 DP_INFO(cdev
, "Allocated qed resources\n");
868 qed_resc_setup(cdev
);
873 static int qed_set_int_fp(struct qed_dev
*cdev
, u16 cnt
)
877 /* Mark the fastpath as free/used */
878 cdev
->int_params
.fp_initialized
= cnt
? true : false;
880 if (cdev
->int_params
.out
.int_mode
!= QED_INT_MODE_MSIX
)
881 limit
= cdev
->num_hwfns
* 63;
882 else if (cdev
->int_params
.fp_msix_cnt
)
883 limit
= cdev
->int_params
.fp_msix_cnt
;
888 return min_t(int, cnt
, limit
);
891 static int qed_get_int_fp(struct qed_dev
*cdev
, struct qed_int_info
*info
)
893 memset(info
, 0, sizeof(struct qed_int_info
));
895 if (!cdev
->int_params
.fp_initialized
) {
897 "Protocol driver requested interrupt information, but its support is not yet configured\n");
901 /* Need to expose only MSI-X information; Single IRQ is handled solely
904 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
905 int msix_base
= cdev
->int_params
.fp_msix_base
;
907 info
->msix_cnt
= cdev
->int_params
.fp_msix_cnt
;
908 info
->msix
= &cdev
->int_params
.msix_table
[msix_base
];
914 static int qed_slowpath_setup_int(struct qed_dev
*cdev
,
915 enum qed_int_mode int_mode
)
917 struct qed_sb_cnt_info sb_cnt_info
;
918 int num_l2_queues
= 0;
922 if ((int_mode
== QED_INT_MODE_MSI
) && (cdev
->num_hwfns
> 1)) {
923 DP_NOTICE(cdev
, "MSI mode is not supported for CMT devices\n");
927 memset(&cdev
->int_params
, 0, sizeof(struct qed_int_params
));
928 cdev
->int_params
.in
.int_mode
= int_mode
;
929 for_each_hwfn(cdev
, i
) {
930 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
931 qed_int_get_num_sbs(&cdev
->hwfns
[i
], &sb_cnt_info
);
932 cdev
->int_params
.in
.num_vectors
+= sb_cnt_info
.cnt
;
933 cdev
->int_params
.in
.num_vectors
++; /* slowpath */
936 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
937 cdev
->int_params
.in
.min_msix_cnt
= cdev
->num_hwfns
* 2;
939 if (is_kdump_kernel()) {
941 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
942 cdev
->int_params
.in
.min_msix_cnt
);
943 cdev
->int_params
.in
.num_vectors
=
944 cdev
->int_params
.in
.min_msix_cnt
;
947 rc
= qed_set_int_mode(cdev
, false);
949 DP_ERR(cdev
, "%s ERR\n", __func__
);
953 cdev
->int_params
.fp_msix_base
= cdev
->num_hwfns
;
954 cdev
->int_params
.fp_msix_cnt
= cdev
->int_params
.out
.num_vectors
-
957 if (!IS_ENABLED(CONFIG_QED_RDMA
) ||
958 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev
)))
961 for_each_hwfn(cdev
, i
)
962 num_l2_queues
+= FEAT_NUM(&cdev
->hwfns
[i
], QED_PF_L2_QUE
);
964 DP_VERBOSE(cdev
, QED_MSG_RDMA
,
965 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
966 cdev
->int_params
.fp_msix_cnt
, num_l2_queues
);
968 if (cdev
->int_params
.fp_msix_cnt
> num_l2_queues
) {
969 cdev
->int_params
.rdma_msix_cnt
=
970 (cdev
->int_params
.fp_msix_cnt
- num_l2_queues
)
972 cdev
->int_params
.rdma_msix_base
=
973 cdev
->int_params
.fp_msix_base
+ num_l2_queues
;
974 cdev
->int_params
.fp_msix_cnt
= num_l2_queues
;
976 cdev
->int_params
.rdma_msix_cnt
= 0;
979 DP_VERBOSE(cdev
, QED_MSG_RDMA
, "roce_msix_cnt=%d roce_msix_base=%d\n",
980 cdev
->int_params
.rdma_msix_cnt
,
981 cdev
->int_params
.rdma_msix_base
);
986 static int qed_slowpath_vf_setup_int(struct qed_dev
*cdev
)
990 memset(&cdev
->int_params
, 0, sizeof(struct qed_int_params
));
991 cdev
->int_params
.in
.int_mode
= QED_INT_MODE_MSIX
;
993 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev
),
994 &cdev
->int_params
.in
.num_vectors
);
995 if (cdev
->num_hwfns
> 1) {
998 qed_vf_get_num_rxqs(&cdev
->hwfns
[1], &vectors
);
999 cdev
->int_params
.in
.num_vectors
+= vectors
;
1002 /* We want a minimum of one fastpath vector per vf hwfn */
1003 cdev
->int_params
.in
.min_msix_cnt
= cdev
->num_hwfns
;
1005 rc
= qed_set_int_mode(cdev
, true);
1009 cdev
->int_params
.fp_msix_base
= 0;
1010 cdev
->int_params
.fp_msix_cnt
= cdev
->int_params
.out
.num_vectors
;
1015 u32
qed_unzip_data(struct qed_hwfn
*p_hwfn
, u32 input_len
,
1016 u8
*input_buf
, u32 max_size
, u8
*unzip_buf
)
1020 p_hwfn
->stream
->next_in
= input_buf
;
1021 p_hwfn
->stream
->avail_in
= input_len
;
1022 p_hwfn
->stream
->next_out
= unzip_buf
;
1023 p_hwfn
->stream
->avail_out
= max_size
;
1025 rc
= zlib_inflateInit2(p_hwfn
->stream
, MAX_WBITS
);
1028 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "zlib init failed, rc = %d\n",
1033 rc
= zlib_inflate(p_hwfn
->stream
, Z_FINISH
);
1034 zlib_inflateEnd(p_hwfn
->stream
);
1036 if (rc
!= Z_OK
&& rc
!= Z_STREAM_END
) {
1037 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "FW unzip error: %s, rc=%d\n",
1038 p_hwfn
->stream
->msg
, rc
);
1042 return p_hwfn
->stream
->total_out
/ 4;
1045 static int qed_alloc_stream_mem(struct qed_dev
*cdev
)
1050 for_each_hwfn(cdev
, i
) {
1051 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1053 p_hwfn
->stream
= kzalloc(sizeof(*p_hwfn
->stream
), GFP_KERNEL
);
1054 if (!p_hwfn
->stream
)
1057 workspace
= vzalloc(zlib_inflate_workspacesize());
1060 p_hwfn
->stream
->workspace
= workspace
;
1066 static void qed_free_stream_mem(struct qed_dev
*cdev
)
1070 for_each_hwfn(cdev
, i
) {
1071 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1073 if (!p_hwfn
->stream
)
1076 vfree(p_hwfn
->stream
->workspace
);
1077 kfree(p_hwfn
->stream
);
1081 static void qed_update_pf_params(struct qed_dev
*cdev
,
1082 struct qed_pf_params
*params
)
1086 if (IS_ENABLED(CONFIG_QED_RDMA
)) {
1087 params
->rdma_pf_params
.num_qps
= QED_ROCE_QPS
;
1088 params
->rdma_pf_params
.min_dpis
= QED_ROCE_DPIS
;
1089 params
->rdma_pf_params
.num_srqs
= QED_RDMA_SRQS
;
1090 /* divide by 3 the MRs to avoid MF ILT overflow */
1091 params
->rdma_pf_params
.gl_pi
= QED_ROCE_PROTOCOL_INDEX
;
1094 if (cdev
->num_hwfns
> 1 || IS_VF(cdev
))
1095 params
->eth_pf_params
.num_arfs_filters
= 0;
1097 /* In case we might support RDMA, don't allow qede to be greedy
1098 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1101 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev
))) {
1104 num_cons
= ¶ms
->eth_pf_params
.num_cons
;
1105 *num_cons
= min_t(u16
, *num_cons
, QED_MAX_L2_CONS
);
1108 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
1109 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1111 p_hwfn
->pf_params
= *params
;
1115 #define QED_PERIODIC_DB_REC_COUNT 10
1116 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100
1117 #define QED_PERIODIC_DB_REC_INTERVAL \
1118 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1120 static int qed_slowpath_delayed_work(struct qed_hwfn
*hwfn
,
1121 enum qed_slowpath_wq_flag wq_flag
,
1122 unsigned long delay
)
1124 if (!hwfn
->slowpath_wq_active
)
1127 /* Memory barrier for setting atomic bit */
1128 smp_mb__before_atomic();
1129 set_bit(wq_flag
, &hwfn
->slowpath_task_flags
);
1130 /* Memory barrier after setting atomic bit */
1131 smp_mb__after_atomic();
1132 queue_delayed_work(hwfn
->slowpath_wq
, &hwfn
->slowpath_task
, delay
);
1137 void qed_periodic_db_rec_start(struct qed_hwfn
*p_hwfn
)
1139 /* Reset periodic Doorbell Recovery counter */
1140 p_hwfn
->periodic_db_rec_count
= QED_PERIODIC_DB_REC_COUNT
;
1142 /* Don't schedule periodic Doorbell Recovery if already scheduled */
1143 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC
,
1144 &p_hwfn
->slowpath_task_flags
))
1147 qed_slowpath_delayed_work(p_hwfn
, QED_SLOWPATH_PERIODIC_DB_REC
,
1148 QED_PERIODIC_DB_REC_INTERVAL
);
1151 static void qed_slowpath_wq_stop(struct qed_dev
*cdev
)
1158 for_each_hwfn(cdev
, i
) {
1159 if (!cdev
->hwfns
[i
].slowpath_wq
)
1162 /* Stop queuing new delayed works */
1163 cdev
->hwfns
[i
].slowpath_wq_active
= false;
1165 cancel_delayed_work(&cdev
->hwfns
[i
].slowpath_task
);
1166 destroy_workqueue(cdev
->hwfns
[i
].slowpath_wq
);
1170 static void qed_slowpath_task(struct work_struct
*work
)
1172 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
1173 slowpath_task
.work
);
1174 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
1177 if (hwfn
->slowpath_wq_active
)
1178 queue_delayed_work(hwfn
->slowpath_wq
,
1179 &hwfn
->slowpath_task
, 0);
1184 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ
,
1185 &hwfn
->slowpath_task_flags
))
1186 qed_mfw_process_tlv_req(hwfn
, ptt
);
1188 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC
,
1189 &hwfn
->slowpath_task_flags
)) {
1190 /* skip qed_db_rec_handler during recovery/unload */
1191 if (hwfn
->cdev
->recov_in_prog
|| !hwfn
->slowpath_wq_active
)
1194 qed_db_rec_handler(hwfn
, ptt
);
1195 if (hwfn
->periodic_db_rec_count
--)
1196 qed_slowpath_delayed_work(hwfn
,
1197 QED_SLOWPATH_PERIODIC_DB_REC
,
1198 QED_PERIODIC_DB_REC_INTERVAL
);
1202 qed_ptt_release(hwfn
, ptt
);
1205 static int qed_slowpath_wq_start(struct qed_dev
*cdev
)
1207 struct qed_hwfn
*hwfn
;
1213 for_each_hwfn(cdev
, i
) {
1214 hwfn
= &cdev
->hwfns
[i
];
1216 hwfn
->slowpath_wq
= alloc_workqueue("slowpath-%02x:%02x.%02x",
1217 0, 0, cdev
->pdev
->bus
->number
,
1218 PCI_SLOT(cdev
->pdev
->devfn
),
1221 if (!hwfn
->slowpath_wq
) {
1222 DP_NOTICE(hwfn
, "Cannot create slowpath workqueue\n");
1226 INIT_DELAYED_WORK(&hwfn
->slowpath_task
, qed_slowpath_task
);
1227 hwfn
->slowpath_wq_active
= true;
1233 static int qed_slowpath_start(struct qed_dev
*cdev
,
1234 struct qed_slowpath_params
*params
)
1236 struct qed_drv_load_params drv_load_params
;
1237 struct qed_hw_init_params hw_init_params
;
1238 struct qed_mcp_drv_version drv_version
;
1239 struct qed_tunnel_info tunn_info
;
1240 const u8
*data
= NULL
;
1241 struct qed_hwfn
*hwfn
;
1242 struct qed_ptt
*p_ptt
;
1245 if (qed_iov_wq_start(cdev
))
1248 if (qed_slowpath_wq_start(cdev
))
1252 rc
= request_firmware(&cdev
->firmware
, QED_FW_FILE_NAME
,
1256 "Failed to find fw file - /lib/firmware/%s\n",
1261 if (cdev
->num_hwfns
== 1) {
1262 p_ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
1264 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
= p_ptt
;
1267 "Failed to acquire PTT for aRFS\n");
1274 cdev
->rx_coalesce_usecs
= QED_DEFAULT_RX_USECS
;
1275 rc
= qed_nic_setup(cdev
);
1280 rc
= qed_slowpath_setup_int(cdev
, params
->int_mode
);
1282 rc
= qed_slowpath_vf_setup_int(cdev
);
1287 /* Allocate stream for unzipping */
1288 rc
= qed_alloc_stream_mem(cdev
);
1292 /* First Dword used to differentiate between various sources */
1293 data
= cdev
->firmware
->data
+ sizeof(u32
);
1295 qed_dbg_pf_init(cdev
);
1298 /* Start the slowpath */
1299 memset(&hw_init_params
, 0, sizeof(hw_init_params
));
1300 memset(&tunn_info
, 0, sizeof(tunn_info
));
1301 tunn_info
.vxlan
.b_mode_enabled
= true;
1302 tunn_info
.l2_gre
.b_mode_enabled
= true;
1303 tunn_info
.ip_gre
.b_mode_enabled
= true;
1304 tunn_info
.l2_geneve
.b_mode_enabled
= true;
1305 tunn_info
.ip_geneve
.b_mode_enabled
= true;
1306 tunn_info
.vxlan
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1307 tunn_info
.l2_gre
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1308 tunn_info
.ip_gre
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1309 tunn_info
.l2_geneve
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1310 tunn_info
.ip_geneve
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1311 hw_init_params
.p_tunn
= &tunn_info
;
1312 hw_init_params
.b_hw_start
= true;
1313 hw_init_params
.int_mode
= cdev
->int_params
.out
.int_mode
;
1314 hw_init_params
.allow_npar_tx_switch
= true;
1315 hw_init_params
.bin_fw_data
= data
;
1317 memset(&drv_load_params
, 0, sizeof(drv_load_params
));
1318 drv_load_params
.is_crash_kernel
= is_kdump_kernel();
1319 drv_load_params
.mfw_timeout_val
= QED_LOAD_REQ_LOCK_TO_DEFAULT
;
1320 drv_load_params
.avoid_eng_reset
= false;
1321 drv_load_params
.override_force_load
= QED_OVERRIDE_FORCE_LOAD_NONE
;
1322 hw_init_params
.p_drv_load_params
= &drv_load_params
;
1324 rc
= qed_hw_init(cdev
, &hw_init_params
);
1329 "HW initialization and function start completed successfully\n");
1332 cdev
->tunn_feature_mask
= (BIT(QED_MODE_VXLAN_TUNN
) |
1333 BIT(QED_MODE_L2GENEVE_TUNN
) |
1334 BIT(QED_MODE_IPGENEVE_TUNN
) |
1335 BIT(QED_MODE_L2GRE_TUNN
) |
1336 BIT(QED_MODE_IPGRE_TUNN
));
1339 /* Allocate LL2 interface if needed */
1340 if (QED_LEADING_HWFN(cdev
)->using_ll2
) {
1341 rc
= qed_ll2_alloc_if(cdev
);
1346 hwfn
= QED_LEADING_HWFN(cdev
);
1347 drv_version
.version
= (params
->drv_major
<< 24) |
1348 (params
->drv_minor
<< 16) |
1349 (params
->drv_rev
<< 8) |
1351 strscpy(drv_version
.name
, params
->name
,
1352 sizeof(drv_version
.name
));
1353 rc
= qed_mcp_send_drv_version(hwfn
, hwfn
->p_main_ptt
,
1356 DP_NOTICE(cdev
, "Failed sending drv version command\n");
1361 qed_reset_vport_stats(cdev
);
1366 qed_ll2_dealloc_if(cdev
);
1370 qed_hw_timers_stop_all(cdev
);
1372 qed_slowpath_irq_free(cdev
);
1373 qed_free_stream_mem(cdev
);
1374 qed_disable_msix(cdev
);
1376 qed_resc_free(cdev
);
1379 release_firmware(cdev
->firmware
);
1381 if (IS_PF(cdev
) && (cdev
->num_hwfns
== 1) &&
1382 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
)
1383 qed_ptt_release(QED_LEADING_HWFN(cdev
),
1384 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
);
1386 qed_iov_wq_stop(cdev
, false);
1388 qed_slowpath_wq_stop(cdev
);
1393 static int qed_slowpath_stop(struct qed_dev
*cdev
)
1398 qed_slowpath_wq_stop(cdev
);
1400 qed_ll2_dealloc_if(cdev
);
1403 if (cdev
->num_hwfns
== 1)
1404 qed_ptt_release(QED_LEADING_HWFN(cdev
),
1405 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
);
1406 qed_free_stream_mem(cdev
);
1407 if (IS_QED_ETH_IF(cdev
))
1408 qed_sriov_disable(cdev
, true);
1414 qed_slowpath_irq_free(cdev
);
1416 qed_disable_msix(cdev
);
1418 qed_resc_free(cdev
);
1420 qed_iov_wq_stop(cdev
, true);
1423 release_firmware(cdev
->firmware
);
1428 static void qed_set_name(struct qed_dev
*cdev
, char name
[NAME_SIZE
])
1432 memcpy(cdev
->name
, name
, NAME_SIZE
);
1433 for_each_hwfn(cdev
, i
)
1434 snprintf(cdev
->hwfns
[i
].name
, NAME_SIZE
, "%s-%d", name
, i
);
1437 static u32
qed_sb_init(struct qed_dev
*cdev
,
1438 struct qed_sb_info
*sb_info
,
1440 dma_addr_t sb_phy_addr
, u16 sb_id
,
1441 enum qed_sb_type type
)
1443 struct qed_hwfn
*p_hwfn
;
1444 struct qed_ptt
*p_ptt
;
1448 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1449 if (type
== QED_SB_TYPE_L2_QUEUE
) {
1450 p_hwfn
= &cdev
->hwfns
[sb_id
% cdev
->num_hwfns
];
1451 rel_sb_id
= sb_id
/ cdev
->num_hwfns
;
1453 p_hwfn
= QED_AFFIN_HWFN(cdev
);
1457 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
1458 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1459 IS_LEAD_HWFN(p_hwfn
) ? 0 : 1, rel_sb_id
, sb_id
);
1461 if (IS_PF(p_hwfn
->cdev
)) {
1462 p_ptt
= qed_ptt_acquire(p_hwfn
);
1466 rc
= qed_int_sb_init(p_hwfn
, p_ptt
, sb_info
, sb_virt_addr
,
1467 sb_phy_addr
, rel_sb_id
);
1468 qed_ptt_release(p_hwfn
, p_ptt
);
1470 rc
= qed_int_sb_init(p_hwfn
, NULL
, sb_info
, sb_virt_addr
,
1471 sb_phy_addr
, rel_sb_id
);
1477 static u32
qed_sb_release(struct qed_dev
*cdev
,
1478 struct qed_sb_info
*sb_info
,
1480 enum qed_sb_type type
)
1482 struct qed_hwfn
*p_hwfn
;
1486 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1487 if (type
== QED_SB_TYPE_L2_QUEUE
) {
1488 p_hwfn
= &cdev
->hwfns
[sb_id
% cdev
->num_hwfns
];
1489 rel_sb_id
= sb_id
/ cdev
->num_hwfns
;
1491 p_hwfn
= QED_AFFIN_HWFN(cdev
);
1495 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
1496 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1497 IS_LEAD_HWFN(p_hwfn
) ? 0 : 1, rel_sb_id
, sb_id
);
1499 rc
= qed_int_sb_release(p_hwfn
, sb_info
, rel_sb_id
);
1504 static bool qed_can_link_change(struct qed_dev
*cdev
)
1509 static void qed_set_ext_speed_params(struct qed_mcp_link_params
*link_params
,
1510 const struct qed_link_params
*params
)
1512 struct qed_mcp_link_speed_params
*ext_speed
= &link_params
->ext_speed
;
1513 const struct qed_mfw_speed_map
*map
;
1516 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_AUTONEG
)
1517 ext_speed
->autoneg
= !!params
->autoneg
;
1519 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS
) {
1520 ext_speed
->advertised_speeds
= 0;
1522 for (i
= 0; i
< ARRAY_SIZE(qed_mfw_ext_maps
); i
++) {
1523 map
= qed_mfw_ext_maps
+ i
;
1525 if (linkmode_intersects(params
->adv_speeds
, map
->caps
))
1526 ext_speed
->advertised_speeds
|= map
->mfw_val
;
1530 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_FORCED_SPEED
) {
1531 switch (params
->forced_speed
) {
1533 ext_speed
->forced_speed
= QED_EXT_SPEED_1G
;
1536 ext_speed
->forced_speed
= QED_EXT_SPEED_10G
;
1539 ext_speed
->forced_speed
= QED_EXT_SPEED_20G
;
1542 ext_speed
->forced_speed
= QED_EXT_SPEED_25G
;
1545 ext_speed
->forced_speed
= QED_EXT_SPEED_40G
;
1548 ext_speed
->forced_speed
= QED_EXT_SPEED_50G_R
|
1549 QED_EXT_SPEED_50G_R2
;
1552 ext_speed
->forced_speed
= QED_EXT_SPEED_100G_R2
|
1553 QED_EXT_SPEED_100G_R4
|
1554 QED_EXT_SPEED_100G_P4
;
1561 if (!(params
->override_flags
& QED_LINK_OVERRIDE_FEC_CONFIG
))
1564 switch (params
->forced_speed
) {
1566 switch (params
->fec
) {
1567 case FEC_FORCE_MODE_NONE
:
1568 link_params
->ext_fec_mode
= ETH_EXT_FEC_25G_NONE
;
1570 case FEC_FORCE_MODE_FIRECODE
:
1571 link_params
->ext_fec_mode
= ETH_EXT_FEC_25G_BASE_R
;
1573 case FEC_FORCE_MODE_RS
:
1574 link_params
->ext_fec_mode
= ETH_EXT_FEC_25G_RS528
;
1576 case FEC_FORCE_MODE_AUTO
:
1577 link_params
->ext_fec_mode
= ETH_EXT_FEC_25G_RS528
|
1578 ETH_EXT_FEC_25G_BASE_R
|
1579 ETH_EXT_FEC_25G_NONE
;
1587 switch (params
->fec
) {
1588 case FEC_FORCE_MODE_NONE
:
1589 link_params
->ext_fec_mode
= ETH_EXT_FEC_40G_NONE
;
1591 case FEC_FORCE_MODE_FIRECODE
:
1592 link_params
->ext_fec_mode
= ETH_EXT_FEC_40G_BASE_R
;
1594 case FEC_FORCE_MODE_AUTO
:
1595 link_params
->ext_fec_mode
= ETH_EXT_FEC_40G_BASE_R
|
1596 ETH_EXT_FEC_40G_NONE
;
1604 switch (params
->fec
) {
1605 case FEC_FORCE_MODE_NONE
:
1606 link_params
->ext_fec_mode
= ETH_EXT_FEC_50G_NONE
;
1608 case FEC_FORCE_MODE_FIRECODE
:
1609 link_params
->ext_fec_mode
= ETH_EXT_FEC_50G_BASE_R
;
1611 case FEC_FORCE_MODE_RS
:
1612 link_params
->ext_fec_mode
= ETH_EXT_FEC_50G_RS528
;
1614 case FEC_FORCE_MODE_AUTO
:
1615 link_params
->ext_fec_mode
= ETH_EXT_FEC_50G_RS528
|
1616 ETH_EXT_FEC_50G_BASE_R
|
1617 ETH_EXT_FEC_50G_NONE
;
1625 switch (params
->fec
) {
1626 case FEC_FORCE_MODE_NONE
:
1627 link_params
->ext_fec_mode
= ETH_EXT_FEC_100G_NONE
;
1629 case FEC_FORCE_MODE_FIRECODE
:
1630 link_params
->ext_fec_mode
= ETH_EXT_FEC_100G_BASE_R
;
1632 case FEC_FORCE_MODE_RS
:
1633 link_params
->ext_fec_mode
= ETH_EXT_FEC_100G_RS528
;
1635 case FEC_FORCE_MODE_AUTO
:
1636 link_params
->ext_fec_mode
= ETH_EXT_FEC_100G_RS528
|
1637 ETH_EXT_FEC_100G_BASE_R
|
1638 ETH_EXT_FEC_100G_NONE
;
1650 static int qed_set_link(struct qed_dev
*cdev
, struct qed_link_params
*params
)
1652 struct qed_mcp_link_params
*link_params
;
1653 struct qed_mcp_link_speed_params
*speed
;
1654 const struct qed_mfw_speed_map
*map
;
1655 struct qed_hwfn
*hwfn
;
1656 struct qed_ptt
*ptt
;
1663 /* The link should be set only once per PF */
1664 hwfn
= &cdev
->hwfns
[0];
1666 /* When VF wants to set link, force it to read the bulletin instead.
1667 * This mimics the PF behavior, where a noitification [both immediate
1668 * and possible later] would be generated when changing properties.
1671 qed_schedule_iov(hwfn
, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG
);
1675 ptt
= qed_ptt_acquire(hwfn
);
1679 link_params
= qed_mcp_get_link_params(hwfn
);
1683 speed
= &link_params
->speed
;
1685 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_AUTONEG
)
1686 speed
->autoneg
= !!params
->autoneg
;
1688 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS
) {
1689 speed
->advertised_speeds
= 0;
1691 for (i
= 0; i
< ARRAY_SIZE(qed_mfw_legacy_maps
); i
++) {
1692 map
= qed_mfw_legacy_maps
+ i
;
1694 if (linkmode_intersects(params
->adv_speeds
, map
->caps
))
1695 speed
->advertised_speeds
|= map
->mfw_val
;
1699 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_FORCED_SPEED
)
1700 speed
->forced_speed
= params
->forced_speed
;
1702 if (qed_mcp_is_ext_speed_supported(hwfn
))
1703 qed_set_ext_speed_params(link_params
, params
);
1705 if (params
->override_flags
& QED_LINK_OVERRIDE_PAUSE_CONFIG
) {
1706 if (params
->pause_config
& QED_LINK_PAUSE_AUTONEG_ENABLE
)
1707 link_params
->pause
.autoneg
= true;
1709 link_params
->pause
.autoneg
= false;
1710 if (params
->pause_config
& QED_LINK_PAUSE_RX_ENABLE
)
1711 link_params
->pause
.forced_rx
= true;
1713 link_params
->pause
.forced_rx
= false;
1714 if (params
->pause_config
& QED_LINK_PAUSE_TX_ENABLE
)
1715 link_params
->pause
.forced_tx
= true;
1717 link_params
->pause
.forced_tx
= false;
1720 if (params
->override_flags
& QED_LINK_OVERRIDE_LOOPBACK_MODE
) {
1721 switch (params
->loopback_mode
) {
1722 case QED_LINK_LOOPBACK_INT_PHY
:
1723 link_params
->loopback_mode
= ETH_LOOPBACK_INT_PHY
;
1725 case QED_LINK_LOOPBACK_EXT_PHY
:
1726 link_params
->loopback_mode
= ETH_LOOPBACK_EXT_PHY
;
1728 case QED_LINK_LOOPBACK_EXT
:
1729 link_params
->loopback_mode
= ETH_LOOPBACK_EXT
;
1731 case QED_LINK_LOOPBACK_MAC
:
1732 link_params
->loopback_mode
= ETH_LOOPBACK_MAC
;
1734 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123
:
1735 link_params
->loopback_mode
=
1736 ETH_LOOPBACK_CNIG_AH_ONLY_0123
;
1738 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301
:
1739 link_params
->loopback_mode
=
1740 ETH_LOOPBACK_CNIG_AH_ONLY_2301
;
1742 case QED_LINK_LOOPBACK_PCS_AH_ONLY
:
1743 link_params
->loopback_mode
= ETH_LOOPBACK_PCS_AH_ONLY
;
1745 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY
:
1746 link_params
->loopback_mode
=
1747 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY
;
1749 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY
:
1750 link_params
->loopback_mode
=
1751 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY
;
1754 link_params
->loopback_mode
= ETH_LOOPBACK_NONE
;
1759 if (params
->override_flags
& QED_LINK_OVERRIDE_EEE_CONFIG
)
1760 memcpy(&link_params
->eee
, ¶ms
->eee
,
1761 sizeof(link_params
->eee
));
1763 if (params
->override_flags
& QED_LINK_OVERRIDE_FEC_CONFIG
)
1764 link_params
->fec
= params
->fec
;
1766 rc
= qed_mcp_set_link(hwfn
, ptt
, params
->link_up
);
1768 qed_ptt_release(hwfn
, ptt
);
1773 static int qed_get_port_type(u32 media_type
)
1777 switch (media_type
) {
1778 case MEDIA_SFPP_10G_FIBER
:
1779 case MEDIA_SFP_1G_FIBER
:
1780 case MEDIA_XFP_FIBER
:
1781 case MEDIA_MODULE_FIBER
:
1782 port_type
= PORT_FIBRE
;
1784 case MEDIA_DA_TWINAX
:
1785 port_type
= PORT_DA
;
1788 port_type
= PORT_TP
;
1791 case MEDIA_NOT_PRESENT
:
1792 port_type
= PORT_NONE
;
1794 case MEDIA_UNSPECIFIED
:
1796 port_type
= PORT_OTHER
;
1802 static int qed_get_link_data(struct qed_hwfn
*hwfn
,
1803 struct qed_mcp_link_params
*params
,
1804 struct qed_mcp_link_state
*link
,
1805 struct qed_mcp_link_capabilities
*link_caps
)
1809 if (!IS_PF(hwfn
->cdev
)) {
1810 qed_vf_get_link_params(hwfn
, params
);
1811 qed_vf_get_link_state(hwfn
, link
);
1812 qed_vf_get_link_caps(hwfn
, link_caps
);
1817 p
= qed_mcp_get_link_params(hwfn
);
1820 memcpy(params
, p
, sizeof(*params
));
1822 p
= qed_mcp_get_link_state(hwfn
);
1825 memcpy(link
, p
, sizeof(*link
));
1827 p
= qed_mcp_get_link_capabilities(hwfn
);
1830 memcpy(link_caps
, p
, sizeof(*link_caps
));
1835 static void qed_fill_link_capability(struct qed_hwfn
*hwfn
,
1836 struct qed_ptt
*ptt
, u32 capability
,
1837 unsigned long *if_caps
)
1839 u32 media_type
, tcvr_state
, tcvr_type
;
1840 u32 speed_mask
, board_cfg
;
1842 if (qed_mcp_get_media_type(hwfn
, ptt
, &media_type
))
1843 media_type
= MEDIA_UNSPECIFIED
;
1845 if (qed_mcp_get_transceiver_data(hwfn
, ptt
, &tcvr_state
, &tcvr_type
))
1846 tcvr_type
= ETH_TRANSCEIVER_STATE_UNPLUGGED
;
1848 if (qed_mcp_trans_speed_mask(hwfn
, ptt
, &speed_mask
))
1849 speed_mask
= 0xFFFFFFFF;
1851 if (qed_mcp_get_board_config(hwfn
, ptt
, &board_cfg
))
1852 board_cfg
= NVM_CFG1_PORT_PORT_TYPE_UNDEFINED
;
1854 DP_VERBOSE(hwfn
->cdev
, NETIF_MSG_DRV
,
1855 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1856 media_type
, tcvr_state
, tcvr_type
, speed_mask
, board_cfg
);
1858 switch (media_type
) {
1859 case MEDIA_DA_TWINAX
:
1860 phylink_set(if_caps
, FIBRE
);
1862 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
)
1863 phylink_set(if_caps
, 20000baseKR2_Full
);
1865 /* For DAC media multiple speed capabilities are supported */
1866 capability
|= speed_mask
;
1868 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1869 phylink_set(if_caps
, 1000baseKX_Full
);
1870 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1871 phylink_set(if_caps
, 10000baseCR_Full
);
1873 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
1874 switch (tcvr_type
) {
1875 case ETH_TRANSCEIVER_TYPE_40G_CR4
:
1876 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR
:
1877 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR
:
1878 phylink_set(if_caps
, 40000baseCR4_Full
);
1884 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
1885 phylink_set(if_caps
, 25000baseCR_Full
);
1886 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
1887 phylink_set(if_caps
, 50000baseCR2_Full
);
1890 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
1891 switch (tcvr_type
) {
1892 case ETH_TRANSCEIVER_TYPE_100G_CR4
:
1893 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR
:
1894 phylink_set(if_caps
, 100000baseCR4_Full
);
1902 phylink_set(if_caps
, TP
);
1904 if (board_cfg
& NVM_CFG1_PORT_PORT_TYPE_EXT_PHY
) {
1906 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1907 phylink_set(if_caps
, 1000baseT_Full
);
1909 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1910 phylink_set(if_caps
, 10000baseT_Full
);
1913 if (board_cfg
& NVM_CFG1_PORT_PORT_TYPE_MODULE
) {
1914 phylink_set(if_caps
, FIBRE
);
1916 switch (tcvr_type
) {
1917 case ETH_TRANSCEIVER_TYPE_1000BASET
:
1918 phylink_set(if_caps
, 1000baseT_Full
);
1920 case ETH_TRANSCEIVER_TYPE_10G_BASET
:
1921 phylink_set(if_caps
, 10000baseT_Full
);
1929 case MEDIA_SFP_1G_FIBER
:
1930 case MEDIA_SFPP_10G_FIBER
:
1931 case MEDIA_XFP_FIBER
:
1932 case MEDIA_MODULE_FIBER
:
1933 phylink_set(if_caps
, FIBRE
);
1934 capability
|= speed_mask
;
1936 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1937 switch (tcvr_type
) {
1938 case ETH_TRANSCEIVER_TYPE_1G_LX
:
1939 case ETH_TRANSCEIVER_TYPE_1G_SX
:
1940 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR
:
1941 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR
:
1942 phylink_set(if_caps
, 1000baseKX_Full
);
1948 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1949 switch (tcvr_type
) {
1950 case ETH_TRANSCEIVER_TYPE_10G_SR
:
1951 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR
:
1952 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR
:
1953 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR
:
1954 phylink_set(if_caps
, 10000baseSR_Full
);
1956 case ETH_TRANSCEIVER_TYPE_10G_LR
:
1957 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR
:
1958 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR
:
1959 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR
:
1960 phylink_set(if_caps
, 10000baseLR_Full
);
1962 case ETH_TRANSCEIVER_TYPE_10G_LRM
:
1963 phylink_set(if_caps
, 10000baseLRM_Full
);
1965 case ETH_TRANSCEIVER_TYPE_10G_ER
:
1966 phylink_set(if_caps
, 10000baseR_FEC
);
1972 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
)
1973 phylink_set(if_caps
, 20000baseKR2_Full
);
1975 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
1976 switch (tcvr_type
) {
1977 case ETH_TRANSCEIVER_TYPE_25G_SR
:
1978 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR
:
1979 phylink_set(if_caps
, 25000baseSR_Full
);
1985 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
1986 switch (tcvr_type
) {
1987 case ETH_TRANSCEIVER_TYPE_40G_LR4
:
1988 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR
:
1989 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR
:
1990 phylink_set(if_caps
, 40000baseLR4_Full
);
1992 case ETH_TRANSCEIVER_TYPE_40G_SR4
:
1993 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR
:
1994 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR
:
1995 phylink_set(if_caps
, 40000baseSR4_Full
);
2001 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
2002 phylink_set(if_caps
, 50000baseKR2_Full
);
2005 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
2006 switch (tcvr_type
) {
2007 case ETH_TRANSCEIVER_TYPE_100G_SR4
:
2008 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR
:
2009 phylink_set(if_caps
, 100000baseSR4_Full
);
2011 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR
:
2012 phylink_set(if_caps
, 100000baseLR4_ER4_Full
);
2020 phylink_set(if_caps
, Backplane
);
2022 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
)
2023 phylink_set(if_caps
, 20000baseKR2_Full
);
2024 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
2025 phylink_set(if_caps
, 1000baseKX_Full
);
2026 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
2027 phylink_set(if_caps
, 10000baseKR_Full
);
2028 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
2029 phylink_set(if_caps
, 25000baseKR_Full
);
2030 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
2031 phylink_set(if_caps
, 40000baseKR4_Full
);
2032 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
2033 phylink_set(if_caps
, 50000baseKR2_Full
);
2035 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
2036 phylink_set(if_caps
, 100000baseKR4_Full
);
2039 case MEDIA_UNSPECIFIED
:
2040 case MEDIA_NOT_PRESENT
:
2042 DP_VERBOSE(hwfn
->cdev
, QED_MSG_DEBUG
,
2043 "Unknown media and transceiver type;\n");
2048 static void qed_lp_caps_to_speed_mask(u32 caps
, u32
*speed_mask
)
2053 (QED_LINK_PARTNER_SPEED_1G_FD
| QED_LINK_PARTNER_SPEED_1G_HD
))
2054 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2055 if (caps
& QED_LINK_PARTNER_SPEED_10G
)
2056 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
2057 if (caps
& QED_LINK_PARTNER_SPEED_20G
)
2058 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
;
2059 if (caps
& QED_LINK_PARTNER_SPEED_25G
)
2060 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
;
2061 if (caps
& QED_LINK_PARTNER_SPEED_40G
)
2062 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
;
2063 if (caps
& QED_LINK_PARTNER_SPEED_50G
)
2064 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
;
2065 if (caps
& QED_LINK_PARTNER_SPEED_100G
)
2066 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
;
2069 static void qed_fill_link(struct qed_hwfn
*hwfn
,
2070 struct qed_ptt
*ptt
,
2071 struct qed_link_output
*if_link
)
2073 struct qed_mcp_link_capabilities link_caps
;
2074 struct qed_mcp_link_params params
;
2075 struct qed_mcp_link_state link
;
2076 u32 media_type
, speed_mask
;
2078 memset(if_link
, 0, sizeof(*if_link
));
2080 /* Prepare source inputs */
2081 if (qed_get_link_data(hwfn
, ¶ms
, &link
, &link_caps
)) {
2082 dev_warn(&hwfn
->cdev
->pdev
->dev
, "no link data available\n");
2086 /* Set the link parameters to pass to protocol driver */
2088 if_link
->link_up
= true;
2090 if (IS_PF(hwfn
->cdev
) && qed_mcp_is_ext_speed_supported(hwfn
)) {
2091 if (link_caps
.default_ext_autoneg
)
2092 phylink_set(if_link
->supported_caps
, Autoneg
);
2094 linkmode_copy(if_link
->advertised_caps
, if_link
->supported_caps
);
2096 if (params
.ext_speed
.autoneg
)
2097 phylink_set(if_link
->advertised_caps
, Autoneg
);
2099 phylink_clear(if_link
->advertised_caps
, Autoneg
);
2101 qed_fill_link_capability(hwfn
, ptt
,
2102 params
.ext_speed
.advertised_speeds
,
2103 if_link
->advertised_caps
);
2105 if (link_caps
.default_speed_autoneg
)
2106 phylink_set(if_link
->supported_caps
, Autoneg
);
2108 linkmode_copy(if_link
->advertised_caps
, if_link
->supported_caps
);
2110 if (params
.speed
.autoneg
)
2111 phylink_set(if_link
->advertised_caps
, Autoneg
);
2113 phylink_clear(if_link
->advertised_caps
, Autoneg
);
2116 if (params
.pause
.autoneg
||
2117 (params
.pause
.forced_rx
&& params
.pause
.forced_tx
))
2118 phylink_set(if_link
->supported_caps
, Asym_Pause
);
2119 if (params
.pause
.autoneg
|| params
.pause
.forced_rx
||
2120 params
.pause
.forced_tx
)
2121 phylink_set(if_link
->supported_caps
, Pause
);
2123 if_link
->sup_fec
= link_caps
.fec_default
;
2124 if_link
->active_fec
= params
.fec
;
2126 /* Fill link advertised capability */
2127 qed_fill_link_capability(hwfn
, ptt
, params
.speed
.advertised_speeds
,
2128 if_link
->advertised_caps
);
2130 /* Fill link supported capability */
2131 qed_fill_link_capability(hwfn
, ptt
, link_caps
.speed_capabilities
,
2132 if_link
->supported_caps
);
2134 /* Fill partner advertised capability */
2135 qed_lp_caps_to_speed_mask(link
.partner_adv_speed
, &speed_mask
);
2136 qed_fill_link_capability(hwfn
, ptt
, speed_mask
, if_link
->lp_caps
);
2139 if_link
->speed
= link
.speed
;
2141 /* TODO - fill duplex properly */
2142 if_link
->duplex
= DUPLEX_FULL
;
2143 qed_mcp_get_media_type(hwfn
, ptt
, &media_type
);
2144 if_link
->port
= qed_get_port_type(media_type
);
2146 if_link
->autoneg
= params
.speed
.autoneg
;
2148 if (params
.pause
.autoneg
)
2149 if_link
->pause_config
|= QED_LINK_PAUSE_AUTONEG_ENABLE
;
2150 if (params
.pause
.forced_rx
)
2151 if_link
->pause_config
|= QED_LINK_PAUSE_RX_ENABLE
;
2152 if (params
.pause
.forced_tx
)
2153 if_link
->pause_config
|= QED_LINK_PAUSE_TX_ENABLE
;
2155 if (link
.an_complete
)
2156 phylink_set(if_link
->lp_caps
, Autoneg
);
2157 if (link
.partner_adv_pause
)
2158 phylink_set(if_link
->lp_caps
, Pause
);
2159 if (link
.partner_adv_pause
== QED_LINK_PARTNER_ASYMMETRIC_PAUSE
||
2160 link
.partner_adv_pause
== QED_LINK_PARTNER_BOTH_PAUSE
)
2161 phylink_set(if_link
->lp_caps
, Asym_Pause
);
2163 if (link_caps
.default_eee
== QED_MCP_EEE_UNSUPPORTED
) {
2164 if_link
->eee_supported
= false;
2166 if_link
->eee_supported
= true;
2167 if_link
->eee_active
= link
.eee_active
;
2168 if_link
->sup_caps
= link_caps
.eee_speed_caps
;
2169 /* MFW clears adv_caps on eee disable; use configured value */
2170 if_link
->eee
.adv_caps
= link
.eee_adv_caps
? link
.eee_adv_caps
:
2171 params
.eee
.adv_caps
;
2172 if_link
->eee
.lp_adv_caps
= link
.eee_lp_adv_caps
;
2173 if_link
->eee
.enable
= params
.eee
.enable
;
2174 if_link
->eee
.tx_lpi_enable
= params
.eee
.tx_lpi_enable
;
2175 if_link
->eee
.tx_lpi_timer
= params
.eee
.tx_lpi_timer
;
2179 static void qed_get_current_link(struct qed_dev
*cdev
,
2180 struct qed_link_output
*if_link
)
2182 struct qed_hwfn
*hwfn
;
2183 struct qed_ptt
*ptt
;
2186 hwfn
= &cdev
->hwfns
[0];
2188 ptt
= qed_ptt_acquire(hwfn
);
2190 qed_fill_link(hwfn
, ptt
, if_link
);
2191 qed_ptt_release(hwfn
, ptt
);
2193 DP_NOTICE(hwfn
, "Failed to fill link; No PTT\n");
2196 qed_fill_link(hwfn
, NULL
, if_link
);
2199 for_each_hwfn(cdev
, i
)
2200 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
2203 void qed_link_update(struct qed_hwfn
*hwfn
, struct qed_ptt
*ptt
)
2205 void *cookie
= hwfn
->cdev
->ops_cookie
;
2206 struct qed_common_cb_ops
*op
= hwfn
->cdev
->protocol_ops
.common
;
2207 struct qed_link_output if_link
;
2209 qed_fill_link(hwfn
, ptt
, &if_link
);
2210 qed_inform_vf_link_state(hwfn
);
2212 if (IS_LEAD_HWFN(hwfn
) && cookie
)
2213 op
->link_update(cookie
, &if_link
);
2216 void qed_bw_update(struct qed_hwfn
*hwfn
, struct qed_ptt
*ptt
)
2218 void *cookie
= hwfn
->cdev
->ops_cookie
;
2219 struct qed_common_cb_ops
*op
= hwfn
->cdev
->protocol_ops
.common
;
2221 if (IS_LEAD_HWFN(hwfn
) && cookie
&& op
&& op
->bw_update
)
2222 op
->bw_update(cookie
);
2225 static int qed_drain(struct qed_dev
*cdev
)
2227 struct qed_hwfn
*hwfn
;
2228 struct qed_ptt
*ptt
;
2234 for_each_hwfn(cdev
, i
) {
2235 hwfn
= &cdev
->hwfns
[i
];
2236 ptt
= qed_ptt_acquire(hwfn
);
2238 DP_NOTICE(hwfn
, "Failed to drain NIG; No PTT\n");
2241 rc
= qed_mcp_drain(hwfn
, ptt
);
2242 qed_ptt_release(hwfn
, ptt
);
2250 static u32
qed_nvm_flash_image_access_crc(struct qed_dev
*cdev
,
2251 struct qed_nvm_image_att
*nvm_image
,
2257 /* Allocate a buffer for holding the nvram image */
2258 buf
= kzalloc(nvm_image
->length
, GFP_KERNEL
);
2262 /* Read image into buffer */
2263 rc
= qed_mcp_nvm_read(cdev
, nvm_image
->start_addr
,
2264 buf
, nvm_image
->length
);
2266 DP_ERR(cdev
, "Failed reading image from nvm\n");
2270 /* Convert the buffer into big-endian format (excluding the
2271 * closing 4 bytes of CRC).
2273 cpu_to_be32_array((__force __be32
*)buf
, (const u32
*)buf
,
2274 DIV_ROUND_UP(nvm_image
->length
- 4, 4));
2276 /* Calc CRC for the "actual" image buffer, i.e. not including
2277 * the last 4 CRC bytes.
2279 *crc
= ~crc32(~0U, buf
, nvm_image
->length
- 4);
2280 *crc
= (__force u32
)cpu_to_be32p(crc
);
2288 /* Binary file format -
2289 * /----------------------------------------------------------------------\
2290 * 0B | 0x4 [command index] |
2291 * 4B | image_type | Options | Number of register settings |
2295 * \----------------------------------------------------------------------/
2296 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2297 * Options - 0'b - Calculate & Update CRC for image
2299 static int qed_nvm_flash_image_access(struct qed_dev
*cdev
, const u8
**data
,
2302 struct qed_nvm_image_att nvm_image
;
2303 struct qed_hwfn
*p_hwfn
;
2304 bool is_crc
= false;
2310 image_type
= **data
;
2311 p_hwfn
= QED_LEADING_HWFN(cdev
);
2312 for (i
= 0; i
< p_hwfn
->nvm_info
.num_images
; i
++)
2313 if (image_type
== p_hwfn
->nvm_info
.image_att
[i
].image_type
)
2315 if (i
== p_hwfn
->nvm_info
.num_images
) {
2316 DP_ERR(cdev
, "Failed to find nvram image of type %08x\n",
2321 nvm_image
.start_addr
= p_hwfn
->nvm_info
.image_att
[i
].nvm_start_addr
;
2322 nvm_image
.length
= p_hwfn
->nvm_info
.image_att
[i
].len
;
2324 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2325 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2326 **data
, image_type
, nvm_image
.start_addr
,
2327 nvm_image
.start_addr
+ nvm_image
.length
- 1);
2329 is_crc
= !!(**data
& BIT(0));
2331 len
= *((u16
*)*data
);
2336 rc
= qed_nvm_flash_image_access_crc(cdev
, &nvm_image
, &crc
);
2338 DP_ERR(cdev
, "Failed calculating CRC, rc = %d\n", rc
);
2342 rc
= qed_mcp_nvm_write(cdev
, QED_NVM_WRITE_NVRAM
,
2343 (nvm_image
.start_addr
+
2344 nvm_image
.length
- 4), (u8
*)&crc
, 4);
2346 DP_ERR(cdev
, "Failed writing to %08x, rc = %d\n",
2347 nvm_image
.start_addr
+ nvm_image
.length
- 4, rc
);
2351 /* Iterate over the values for setting */
2353 u32 offset
, mask
, value
, cur_value
;
2356 value
= *((u32
*)*data
);
2358 mask
= *((u32
*)*data
);
2360 offset
= *((u32
*)*data
);
2363 rc
= qed_mcp_nvm_read(cdev
, nvm_image
.start_addr
+ offset
, buf
,
2366 DP_ERR(cdev
, "Failed reading from %08x\n",
2367 nvm_image
.start_addr
+ offset
);
2371 cur_value
= le32_to_cpu(*((__le32
*)buf
));
2372 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2373 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2374 nvm_image
.start_addr
+ offset
, cur_value
,
2375 (cur_value
& ~mask
) | (value
& mask
), value
, mask
);
2376 value
= (value
& mask
) | (cur_value
& ~mask
);
2377 rc
= qed_mcp_nvm_write(cdev
, QED_NVM_WRITE_NVRAM
,
2378 nvm_image
.start_addr
+ offset
,
2381 DP_ERR(cdev
, "Failed writing to %08x\n",
2382 nvm_image
.start_addr
+ offset
);
2392 /* Binary file format -
2393 * /----------------------------------------------------------------------\
2394 * 0B | 0x3 [command index] |
2395 * 4B | b'0: check_response? | b'1-31 reserved |
2396 * 8B | File-type | reserved |
2397 * 12B | Image length in bytes |
2398 * \----------------------------------------------------------------------/
2399 * Start a new file of the provided type
2401 static int qed_nvm_flash_image_file_start(struct qed_dev
*cdev
,
2402 const u8
**data
, bool *check_resp
)
2404 u32 file_type
, file_size
= 0;
2408 *check_resp
= !!(**data
& BIT(0));
2412 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2413 "About to start a new file of type %02x\n", file_type
);
2414 if (file_type
== DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI
) {
2416 file_size
= *((u32
*)(*data
));
2419 rc
= qed_mcp_nvm_write(cdev
, QED_PUT_FILE_BEGIN
, file_type
,
2420 (u8
*)(&file_size
), 4);
2426 /* Binary file format -
2427 * /----------------------------------------------------------------------\
2428 * 0B | 0x2 [command index] |
2429 * 4B | Length in bytes |
2430 * 8B | b'0: check_response? | b'1-31 reserved |
2431 * 12B | Offset in bytes |
2433 * \----------------------------------------------------------------------/
2434 * Write data as part of a file that was previously started. Data should be
2435 * of length equal to that provided in the message
2437 static int qed_nvm_flash_image_file_data(struct qed_dev
*cdev
,
2438 const u8
**data
, bool *check_resp
)
2444 len
= *((u32
*)(*data
));
2446 *check_resp
= !!(**data
& BIT(0));
2448 offset
= *((u32
*)(*data
));
2451 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2452 "About to write File-data: %08x bytes to offset %08x\n",
2455 rc
= qed_mcp_nvm_write(cdev
, QED_PUT_FILE_DATA
, offset
,
2456 (char *)(*data
), len
);
2462 /* Binary file format [General header] -
2463 * /----------------------------------------------------------------------\
2464 * 0B | QED_NVM_SIGNATURE |
2465 * 4B | Length in bytes |
2466 * 8B | Highest command in this batchfile | Reserved |
2467 * \----------------------------------------------------------------------/
2469 static int qed_nvm_flash_image_validate(struct qed_dev
*cdev
,
2470 const struct firmware
*image
,
2475 /* Check minimum size */
2476 if (image
->size
< 12) {
2477 DP_ERR(cdev
, "Image is too short [%08x]\n", (u32
)image
->size
);
2481 /* Check signature */
2482 signature
= *((u32
*)(*data
));
2483 if (signature
!= QED_NVM_SIGNATURE
) {
2484 DP_ERR(cdev
, "Wrong signature '%08x'\n", signature
);
2489 /* Validate internal size equals the image-size */
2490 len
= *((u32
*)(*data
));
2491 if (len
!= image
->size
) {
2492 DP_ERR(cdev
, "Size mismatch: internal = %08x image = %08x\n",
2493 len
, (u32
)image
->size
);
2498 /* Make sure driver familiar with all commands necessary for this */
2499 if (*((u16
*)(*data
)) >= QED_NVM_FLASH_CMD_NVM_MAX
) {
2500 DP_ERR(cdev
, "File contains unsupported commands [Need %04x]\n",
2510 /* Binary file format -
2511 * /----------------------------------------------------------------------\
2512 * 0B | 0x5 [command index] |
2513 * 4B | Number of config attributes | Reserved |
2514 * 4B | Config ID | Entity ID | Length |
2517 * \----------------------------------------------------------------------/
2518 * There can be several cfg_id-entity_id-Length-Value sets as specified by
2519 * 'Number of config attributes'.
2521 * The API parses config attributes from the user provided buffer and flashes
2522 * them to the respective NVM path using Management FW inerface.
2524 static int qed_nvm_flash_cfg_write(struct qed_dev
*cdev
, const u8
**data
)
2526 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2527 u8 entity_id
, len
, buf
[32];
2528 bool need_nvm_init
= true;
2529 struct qed_ptt
*ptt
;
2534 ptt
= qed_ptt_acquire(hwfn
);
2538 /* NVM CFG ID attribute header */
2540 count
= *((u16
*)*data
);
2543 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2544 "Read config ids: num_attrs = %0d\n", count
);
2545 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2546 * arithmetic operations in the implementation.
2548 for (i
= 1; i
<= count
; i
++) {
2549 cfg_id
= *((u16
*)*data
);
2555 memcpy(buf
, *data
, len
);
2559 if (need_nvm_init
) {
2560 flags
|= QED_NVM_CFG_OPTION_INIT
;
2561 need_nvm_init
= false;
2564 /* Commit to flash and free the resources */
2565 if (!(i
% QED_NVM_CFG_MAX_ATTRS
) || i
== count
) {
2566 flags
|= QED_NVM_CFG_OPTION_COMMIT
|
2567 QED_NVM_CFG_OPTION_FREE
;
2568 need_nvm_init
= true;
2572 flags
|= QED_NVM_CFG_OPTION_ENTITY_SEL
;
2574 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2575 "cfg_id = %d entity = %d len = %d\n", cfg_id
,
2577 rc
= qed_mcp_nvm_set_cfg(hwfn
, ptt
, cfg_id
, entity_id
, flags
,
2580 DP_ERR(cdev
, "Error %d configuring %d\n", rc
, cfg_id
);
2585 qed_ptt_release(hwfn
, ptt
);
2590 #define QED_MAX_NVM_BUF_LEN 32
2591 static int qed_nvm_flash_cfg_len(struct qed_dev
*cdev
, u32 cmd
)
2593 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2594 u8 buf
[QED_MAX_NVM_BUF_LEN
];
2595 struct qed_ptt
*ptt
;
2599 ptt
= qed_ptt_acquire(hwfn
);
2601 return QED_MAX_NVM_BUF_LEN
;
2603 rc
= qed_mcp_nvm_get_cfg(hwfn
, ptt
, cmd
, 0, QED_NVM_CFG_GET_FLAGS
, buf
,
2606 DP_ERR(cdev
, "Error %d reading %d\n", rc
, cmd
);
2607 len
= QED_MAX_NVM_BUF_LEN
;
2610 qed_ptt_release(hwfn
, ptt
);
2615 static int qed_nvm_flash_cfg_read(struct qed_dev
*cdev
, u8
**data
,
2616 u32 cmd
, u32 entity_id
)
2618 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2619 struct qed_ptt
*ptt
;
2623 ptt
= qed_ptt_acquire(hwfn
);
2627 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2628 "Read config cmd = %d entity id %d\n", cmd
, entity_id
);
2629 flags
= entity_id
? QED_NVM_CFG_GET_PF_FLAGS
: QED_NVM_CFG_GET_FLAGS
;
2630 rc
= qed_mcp_nvm_get_cfg(hwfn
, ptt
, cmd
, entity_id
, flags
, *data
, &len
);
2632 DP_ERR(cdev
, "Error %d reading %d\n", rc
, cmd
);
2634 qed_ptt_release(hwfn
, ptt
);
2639 static int qed_nvm_flash(struct qed_dev
*cdev
, const char *name
)
2641 const struct firmware
*image
;
2642 const u8
*data
, *data_end
;
2646 rc
= request_firmware(&image
, name
, &cdev
->pdev
->dev
);
2648 DP_ERR(cdev
, "Failed to find '%s'\n", name
);
2652 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2653 "Flashing '%s' - firmware's data at %p, size is %08x\n",
2654 name
, image
->data
, (u32
)image
->size
);
2656 data_end
= data
+ image
->size
;
2658 rc
= qed_nvm_flash_image_validate(cdev
, image
, &data
);
2662 while (data
< data_end
) {
2663 bool check_resp
= false;
2665 /* Parse the actual command */
2666 cmd_type
= *((u32
*)data
);
2668 case QED_NVM_FLASH_CMD_FILE_DATA
:
2669 rc
= qed_nvm_flash_image_file_data(cdev
, &data
,
2672 case QED_NVM_FLASH_CMD_FILE_START
:
2673 rc
= qed_nvm_flash_image_file_start(cdev
, &data
,
2676 case QED_NVM_FLASH_CMD_NVM_CHANGE
:
2677 rc
= qed_nvm_flash_image_access(cdev
, &data
,
2680 case QED_NVM_FLASH_CMD_NVM_CFG_ID
:
2681 rc
= qed_nvm_flash_cfg_write(cdev
, &data
);
2684 DP_ERR(cdev
, "Unknown command %08x\n", cmd_type
);
2690 DP_ERR(cdev
, "Command %08x failed\n", cmd_type
);
2694 /* Check response if needed */
2696 u32 mcp_response
= 0;
2698 if (qed_mcp_nvm_resp(cdev
, (u8
*)&mcp_response
)) {
2699 DP_ERR(cdev
, "Failed getting MCP response\n");
2704 switch (mcp_response
& FW_MSG_CODE_MASK
) {
2705 case FW_MSG_CODE_OK
:
2706 case FW_MSG_CODE_NVM_OK
:
2707 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK
:
2708 case FW_MSG_CODE_PHY_OK
:
2711 DP_ERR(cdev
, "MFW returns error: %08x\n",
2720 release_firmware(image
);
2725 static int qed_nvm_get_image(struct qed_dev
*cdev
, enum qed_nvm_images type
,
2728 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2730 return qed_mcp_get_nvm_image(hwfn
, type
, buf
, len
);
2733 void qed_schedule_recovery_handler(struct qed_hwfn
*p_hwfn
)
2735 struct qed_common_cb_ops
*ops
= p_hwfn
->cdev
->protocol_ops
.common
;
2736 void *cookie
= p_hwfn
->cdev
->ops_cookie
;
2738 if (ops
&& ops
->schedule_recovery_handler
)
2739 ops
->schedule_recovery_handler(cookie
);
2742 static const char * const qed_hw_err_type_descr
[] = {
2743 [QED_HW_ERR_FAN_FAIL
] = "Fan Failure",
2744 [QED_HW_ERR_MFW_RESP_FAIL
] = "MFW Response Failure",
2745 [QED_HW_ERR_HW_ATTN
] = "HW Attention",
2746 [QED_HW_ERR_DMAE_FAIL
] = "DMAE Failure",
2747 [QED_HW_ERR_RAMROD_FAIL
] = "Ramrod Failure",
2748 [QED_HW_ERR_FW_ASSERT
] = "FW Assertion",
2749 [QED_HW_ERR_LAST
] = "Unknown",
2752 void qed_hw_error_occurred(struct qed_hwfn
*p_hwfn
,
2753 enum qed_hw_err_type err_type
)
2755 struct qed_common_cb_ops
*ops
= p_hwfn
->cdev
->protocol_ops
.common
;
2756 void *cookie
= p_hwfn
->cdev
->ops_cookie
;
2757 const char *err_str
;
2759 if (err_type
> QED_HW_ERR_LAST
)
2760 err_type
= QED_HW_ERR_LAST
;
2761 err_str
= qed_hw_err_type_descr
[err_type
];
2763 DP_NOTICE(p_hwfn
, "HW error occurred [%s]\n", err_str
);
2765 /* Call the HW error handler of the protocol driver.
2766 * If it is not available - perform a minimal handling of preventing
2767 * HW attentions from being reasserted.
2769 if (ops
&& ops
->schedule_hw_err_handler
)
2770 ops
->schedule_hw_err_handler(cookie
, err_type
);
2772 qed_int_attn_clr_enable(p_hwfn
->cdev
, true);
2775 static int qed_set_coalesce(struct qed_dev
*cdev
, u16 rx_coal
, u16 tx_coal
,
2778 return qed_set_queue_coalesce(rx_coal
, tx_coal
, handle
);
2781 static int qed_set_led(struct qed_dev
*cdev
, enum qed_led_mode mode
)
2783 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2784 struct qed_ptt
*ptt
;
2787 ptt
= qed_ptt_acquire(hwfn
);
2791 status
= qed_mcp_set_led(hwfn
, ptt
, mode
);
2793 qed_ptt_release(hwfn
, ptt
);
2798 int qed_recovery_process(struct qed_dev
*cdev
)
2800 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2801 struct qed_ptt
*p_ptt
;
2804 p_ptt
= qed_ptt_acquire(p_hwfn
);
2808 rc
= qed_start_recovery_process(p_hwfn
, p_ptt
);
2810 qed_ptt_release(p_hwfn
, p_ptt
);
2815 static int qed_update_wol(struct qed_dev
*cdev
, bool enabled
)
2817 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2818 struct qed_ptt
*ptt
;
2824 ptt
= qed_ptt_acquire(hwfn
);
2828 rc
= qed_mcp_ov_update_wol(hwfn
, ptt
, enabled
? QED_OV_WOL_ENABLED
2829 : QED_OV_WOL_DISABLED
);
2832 rc
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2835 qed_ptt_release(hwfn
, ptt
);
2839 static int qed_update_drv_state(struct qed_dev
*cdev
, bool active
)
2841 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2842 struct qed_ptt
*ptt
;
2848 ptt
= qed_ptt_acquire(hwfn
);
2852 status
= qed_mcp_ov_update_driver_state(hwfn
, ptt
, active
?
2853 QED_OV_DRIVER_STATE_ACTIVE
:
2854 QED_OV_DRIVER_STATE_DISABLED
);
2856 qed_ptt_release(hwfn
, ptt
);
2861 static int qed_update_mac(struct qed_dev
*cdev
, const u8
*mac
)
2863 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2864 struct qed_ptt
*ptt
;
2870 ptt
= qed_ptt_acquire(hwfn
);
2874 status
= qed_mcp_ov_update_mac(hwfn
, ptt
, mac
);
2878 status
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2881 qed_ptt_release(hwfn
, ptt
);
2885 static int qed_update_mtu(struct qed_dev
*cdev
, u16 mtu
)
2887 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2888 struct qed_ptt
*ptt
;
2894 ptt
= qed_ptt_acquire(hwfn
);
2898 status
= qed_mcp_ov_update_mtu(hwfn
, ptt
, mtu
);
2902 status
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2905 qed_ptt_release(hwfn
, ptt
);
2910 qed_get_sb_info(struct qed_dev
*cdev
, struct qed_sb_info
*sb
,
2911 u16 qid
, struct qed_sb_info_dbg
*sb_dbg
)
2913 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[qid
% cdev
->num_hwfns
];
2914 struct qed_ptt
*ptt
;
2920 ptt
= qed_ptt_acquire(hwfn
);
2922 DP_NOTICE(hwfn
, "Can't acquire PTT\n");
2926 memset(sb_dbg
, 0, sizeof(*sb_dbg
));
2927 rc
= qed_int_get_sb_dbg(hwfn
, ptt
, sb
, sb_dbg
);
2929 qed_ptt_release(hwfn
, ptt
);
2933 static int qed_read_module_eeprom(struct qed_dev
*cdev
, char *buf
,
2934 u8 dev_addr
, u32 offset
, u32 len
)
2936 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2937 struct qed_ptt
*ptt
;
2943 ptt
= qed_ptt_acquire(hwfn
);
2947 rc
= qed_mcp_phy_sfp_read(hwfn
, ptt
, MFW_PORT(hwfn
), dev_addr
,
2950 qed_ptt_release(hwfn
, ptt
);
2955 static int qed_set_grc_config(struct qed_dev
*cdev
, u32 cfg_id
, u32 val
)
2957 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2958 struct qed_ptt
*ptt
;
2964 ptt
= qed_ptt_acquire(hwfn
);
2968 rc
= qed_dbg_grc_config(hwfn
, cfg_id
, val
);
2970 qed_ptt_release(hwfn
, ptt
);
2975 static __printf(2, 3) void qed_mfw_report(struct qed_dev
*cdev
, char *fmt
, ...)
2977 char buf
[QED_MFW_REPORT_STR_SIZE
];
2978 struct qed_hwfn
*p_hwfn
;
2979 struct qed_ptt
*p_ptt
;
2983 vsnprintf(buf
, QED_MFW_REPORT_STR_SIZE
, fmt
, vl
);
2987 p_hwfn
= QED_LEADING_HWFN(cdev
);
2988 p_ptt
= qed_ptt_acquire(p_hwfn
);
2990 qed_mcp_send_raw_debug_data(p_hwfn
, p_ptt
, buf
, strlen(buf
));
2991 qed_ptt_release(p_hwfn
, p_ptt
);
2996 static u8
qed_get_affin_hwfn_idx(struct qed_dev
*cdev
)
2998 return QED_AFFIN_HWFN_IDX(cdev
);
3001 static int qed_get_esl_status(struct qed_dev
*cdev
, bool *esl_active
)
3003 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
3004 struct qed_ptt
*ptt
;
3007 *esl_active
= false;
3012 ptt
= qed_ptt_acquire(hwfn
);
3016 rc
= qed_mcp_get_esl_status(hwfn
, ptt
, esl_active
);
3018 qed_ptt_release(hwfn
, ptt
);
3023 static struct qed_selftest_ops qed_selftest_ops_pass
= {
3024 .selftest_memory
= &qed_selftest_memory
,
3025 .selftest_interrupt
= &qed_selftest_interrupt
,
3026 .selftest_register
= &qed_selftest_register
,
3027 .selftest_clock
= &qed_selftest_clock
,
3028 .selftest_nvram
= &qed_selftest_nvram
,
3031 const struct qed_common_ops qed_common_ops_pass
= {
3032 .selftest
= &qed_selftest_ops_pass
,
3033 .probe
= &qed_probe
,
3034 .remove
= &qed_remove
,
3035 .set_power_state
= &qed_set_power_state
,
3036 .set_name
= &qed_set_name
,
3037 .update_pf_params
= &qed_update_pf_params
,
3038 .slowpath_start
= &qed_slowpath_start
,
3039 .slowpath_stop
= &qed_slowpath_stop
,
3040 .set_fp_int
= &qed_set_int_fp
,
3041 .get_fp_int
= &qed_get_int_fp
,
3042 .sb_init
= &qed_sb_init
,
3043 .sb_release
= &qed_sb_release
,
3044 .simd_handler_config
= &qed_simd_handler_config
,
3045 .simd_handler_clean
= &qed_simd_handler_clean
,
3046 .dbg_grc
= &qed_dbg_grc
,
3047 .dbg_grc_size
= &qed_dbg_grc_size
,
3048 .can_link_change
= &qed_can_link_change
,
3049 .set_link
= &qed_set_link
,
3050 .get_link
= &qed_get_current_link
,
3051 .drain
= &qed_drain
,
3052 .update_msglvl
= &qed_init_dp
,
3053 .devlink_register
= qed_devlink_register
,
3054 .devlink_unregister
= qed_devlink_unregister
,
3055 .report_fatal_error
= qed_report_fatal_error
,
3056 .dbg_all_data
= &qed_dbg_all_data
,
3057 .dbg_all_data_size
= &qed_dbg_all_data_size
,
3058 .chain_alloc
= &qed_chain_alloc
,
3059 .chain_free
= &qed_chain_free
,
3060 .nvm_flash
= &qed_nvm_flash
,
3061 .nvm_get_image
= &qed_nvm_get_image
,
3062 .set_coalesce
= &qed_set_coalesce
,
3063 .set_led
= &qed_set_led
,
3064 .recovery_process
= &qed_recovery_process
,
3065 .recovery_prolog
= &qed_recovery_prolog
,
3066 .attn_clr_enable
= &qed_int_attn_clr_enable
,
3067 .update_drv_state
= &qed_update_drv_state
,
3068 .update_mac
= &qed_update_mac
,
3069 .update_mtu
= &qed_update_mtu
,
3070 .update_wol
= &qed_update_wol
,
3071 .db_recovery_add
= &qed_db_recovery_add
,
3072 .db_recovery_del
= &qed_db_recovery_del
,
3073 .read_module_eeprom
= &qed_read_module_eeprom
,
3074 .get_affin_hwfn_idx
= &qed_get_affin_hwfn_idx
,
3075 .read_nvm_cfg
= &qed_nvm_flash_cfg_read
,
3076 .read_nvm_cfg_len
= &qed_nvm_flash_cfg_len
,
3077 .set_grc_config
= &qed_set_grc_config
,
3078 .mfw_report
= &qed_mfw_report
,
3079 .get_sb_info
= &qed_get_sb_info
,
3080 .get_esl_status
= &qed_get_esl_status
,
3083 void qed_get_protocol_stats(struct qed_dev
*cdev
,
3084 enum qed_mcp_protocol_type type
,
3085 union qed_mcp_protocol_stats
*stats
)
3087 struct qed_eth_stats eth_stats
;
3089 memset(stats
, 0, sizeof(*stats
));
3092 case QED_MCP_LAN_STATS
:
3093 qed_get_vport_stats_context(cdev
, ð_stats
, true);
3094 stats
->lan_stats
.ucast_rx_pkts
=
3095 eth_stats
.common
.rx_ucast_pkts
;
3096 stats
->lan_stats
.ucast_tx_pkts
=
3097 eth_stats
.common
.tx_ucast_pkts
;
3098 stats
->lan_stats
.fcs_err
= -1;
3100 case QED_MCP_FCOE_STATS
:
3101 qed_get_protocol_stats_fcoe(cdev
, &stats
->fcoe_stats
, true);
3103 case QED_MCP_ISCSI_STATS
:
3104 qed_get_protocol_stats_iscsi(cdev
, &stats
->iscsi_stats
, true);
3107 DP_VERBOSE(cdev
, QED_MSG_SP
,
3108 "Invalid protocol type = %d\n", type
);
3113 int qed_mfw_tlv_req(struct qed_hwfn
*hwfn
)
3115 DP_VERBOSE(hwfn
->cdev
, NETIF_MSG_DRV
,
3116 "Scheduling slowpath task [Flag: %d]\n",
3117 QED_SLOWPATH_MFW_TLV_REQ
);
3118 /* Memory barrier for setting atomic bit */
3119 smp_mb__before_atomic();
3120 set_bit(QED_SLOWPATH_MFW_TLV_REQ
, &hwfn
->slowpath_task_flags
);
3121 /* Memory barrier after setting atomic bit */
3122 smp_mb__after_atomic();
3123 queue_delayed_work(hwfn
->slowpath_wq
, &hwfn
->slowpath_task
, 0);
3129 qed_fill_generic_tlv_data(struct qed_dev
*cdev
, struct qed_mfw_tlv_generic
*tlv
)
3131 struct qed_common_cb_ops
*op
= cdev
->protocol_ops
.common
;
3132 struct qed_eth_stats_common
*p_common
;
3133 struct qed_generic_tlvs gen_tlvs
;
3134 struct qed_eth_stats stats
;
3137 memset(&gen_tlvs
, 0, sizeof(gen_tlvs
));
3138 op
->get_generic_tlv_data(cdev
->ops_cookie
, &gen_tlvs
);
3140 if (gen_tlvs
.feat_flags
& QED_TLV_IP_CSUM
)
3141 tlv
->flags
.ipv4_csum_offload
= true;
3142 if (gen_tlvs
.feat_flags
& QED_TLV_LSO
)
3143 tlv
->flags
.lso_supported
= true;
3144 tlv
->flags
.b_set
= true;
3146 for (i
= 0; i
< QED_TLV_MAC_COUNT
; i
++) {
3147 if (is_valid_ether_addr(gen_tlvs
.mac
[i
])) {
3148 ether_addr_copy(tlv
->mac
[i
], gen_tlvs
.mac
[i
]);
3149 tlv
->mac_set
[i
] = true;
3153 qed_get_vport_stats(cdev
, &stats
);
3154 p_common
= &stats
.common
;
3155 tlv
->rx_frames
= p_common
->rx_ucast_pkts
+ p_common
->rx_mcast_pkts
+
3156 p_common
->rx_bcast_pkts
;
3157 tlv
->rx_frames_set
= true;
3158 tlv
->rx_bytes
= p_common
->rx_ucast_bytes
+ p_common
->rx_mcast_bytes
+
3159 p_common
->rx_bcast_bytes
;
3160 tlv
->rx_bytes_set
= true;
3161 tlv
->tx_frames
= p_common
->tx_ucast_pkts
+ p_common
->tx_mcast_pkts
+
3162 p_common
->tx_bcast_pkts
;
3163 tlv
->tx_frames_set
= true;
3164 tlv
->tx_bytes
= p_common
->tx_ucast_bytes
+ p_common
->tx_mcast_bytes
+
3165 p_common
->tx_bcast_bytes
;
3166 tlv
->rx_bytes_set
= true;
3169 int qed_mfw_fill_tlv_data(struct qed_hwfn
*hwfn
, enum qed_mfw_tlv_type type
,
3170 union qed_mfw_tlv_data
*tlv_buf
)
3172 struct qed_dev
*cdev
= hwfn
->cdev
;
3173 struct qed_common_cb_ops
*ops
;
3175 ops
= cdev
->protocol_ops
.common
;
3176 if (!ops
|| !ops
->get_protocol_tlv_data
|| !ops
->get_generic_tlv_data
) {
3177 DP_NOTICE(hwfn
, "Can't collect TLV management info\n");
3182 case QED_MFW_TLV_GENERIC
:
3183 qed_fill_generic_tlv_data(hwfn
->cdev
, &tlv_buf
->generic
);
3185 case QED_MFW_TLV_ETH
:
3186 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->eth
);
3188 case QED_MFW_TLV_FCOE
:
3189 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->fcoe
);
3191 case QED_MFW_TLV_ISCSI
:
3192 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->iscsi
);
3201 unsigned long qed_get_epoch_time(void)
3203 return ktime_get_real_seconds();