1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/stddef.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/delay.h>
12 #include <asm/byteorder.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/string.h>
15 #include <linux/module.h>
16 #include <linux/interrupt.h>
17 #include <linux/workqueue.h>
18 #include <linux/ethtool.h>
19 #include <linux/etherdevice.h>
20 #include <linux/vmalloc.h>
21 #include <linux/crash_dump.h>
22 #include <linux/crc32.h>
23 #include <linux/qed/qed_if.h>
24 #include <linux/qed/qed_ll2_if.h>
25 #include <net/devlink.h>
26 #include <linux/aer.h>
27 #include <linux/phylink.h>
30 #include "qed_sriov.h"
32 #include "qed_dev_api.h"
35 #include "qed_iscsi.h"
38 #include "qed_reg_addr.h"
40 #include "qed_selftest.h"
41 #include "qed_debug.h"
42 #include "qed_devlink.h"
44 #define QED_ROCE_QPS (8192)
45 #define QED_ROCE_DPIS (8)
46 #define QED_RDMA_SRQS QED_ROCE_QPS
47 #define QED_NVM_CFG_GET_FLAGS 0xA
48 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A
49 #define QED_NVM_CFG_MAX_ATTRS 50
51 static char version
[] =
52 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION
"\n";
54 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
55 MODULE_LICENSE("GPL");
56 MODULE_VERSION(DRV_MODULE_VERSION
);
58 #define FW_FILE_VERSION \
59 __stringify(FW_MAJOR_VERSION) "." \
60 __stringify(FW_MINOR_VERSION) "." \
61 __stringify(FW_REVISION_VERSION) "." \
62 __stringify(FW_ENGINEERING_VERSION)
64 #define QED_FW_FILE_NAME \
65 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
67 MODULE_FIRMWARE(QED_FW_FILE_NAME
);
69 /* MFW speed capabilities maps */
71 struct qed_mfw_speed_map
{
73 __ETHTOOL_DECLARE_LINK_MODE_MASK(caps
);
79 #define QED_MFW_SPEED_MAP(type, arr) \
83 .arr_size = ARRAY_SIZE(arr), \
86 static const u32 qed_mfw_ext_1g
[] __initconst
= {
87 ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
88 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
89 ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
92 static const u32 qed_mfw_ext_10g
[] __initconst
= {
93 ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
94 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
95 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
96 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT
,
97 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
,
98 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
99 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
,
100 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT
,
103 static const u32 qed_mfw_ext_20g
[] __initconst
= {
104 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
,
107 static const u32 qed_mfw_ext_25g
[] __initconst
= {
108 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
109 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
110 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
113 static const u32 qed_mfw_ext_40g
[] __initconst
= {
114 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
115 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
116 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
117 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
120 static const u32 qed_mfw_ext_50g_base_r
[] __initconst
= {
121 ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
,
122 ETHTOOL_LINK_MODE_50000baseCR_Full_BIT
,
123 ETHTOOL_LINK_MODE_50000baseSR_Full_BIT
,
124 ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT
,
125 ETHTOOL_LINK_MODE_50000baseDR_Full_BIT
,
128 static const u32 qed_mfw_ext_50g_base_r2
[] __initconst
= {
129 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
130 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
131 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
134 static const u32 qed_mfw_ext_100g_base_r2
[] __initconst
= {
135 ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT
,
136 ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT
,
137 ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT
,
138 ETHTOOL_LINK_MODE_100000baseDR2_Full_BIT
,
139 ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT
,
142 static const u32 qed_mfw_ext_100g_base_r4
[] __initconst
= {
143 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
144 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
145 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
146 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
149 static struct qed_mfw_speed_map qed_mfw_ext_maps
[] __ro_after_init
= {
150 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_1G
, qed_mfw_ext_1g
),
151 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_10G
, qed_mfw_ext_10g
),
152 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_20G
, qed_mfw_ext_20g
),
153 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_25G
, qed_mfw_ext_25g
),
154 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_40G
, qed_mfw_ext_40g
),
155 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R
,
156 qed_mfw_ext_50g_base_r
),
157 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_50G_BASE_R2
,
158 qed_mfw_ext_50g_base_r2
),
159 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R2
,
160 qed_mfw_ext_100g_base_r2
),
161 QED_MFW_SPEED_MAP(ETH_EXT_ADV_SPEED_100G_BASE_R4
,
162 qed_mfw_ext_100g_base_r4
),
165 static const u32 qed_mfw_legacy_1g
[] __initconst
= {
166 ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
167 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
168 ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
171 static const u32 qed_mfw_legacy_10g
[] __initconst
= {
172 ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
173 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
174 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
175 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT
,
176 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
,
177 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
178 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
,
179 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT
,
182 static const u32 qed_mfw_legacy_20g
[] __initconst
= {
183 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
,
186 static const u32 qed_mfw_legacy_25g
[] __initconst
= {
187 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
188 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
189 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
192 static const u32 qed_mfw_legacy_40g
[] __initconst
= {
193 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
194 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
195 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
196 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
199 static const u32 qed_mfw_legacy_50g
[] __initconst
= {
200 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
201 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
202 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
205 static const u32 qed_mfw_legacy_bb_100g
[] __initconst
= {
206 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
207 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
208 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
209 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
212 static struct qed_mfw_speed_map qed_mfw_legacy_maps
[] __ro_after_init
= {
213 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
,
215 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
,
217 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
,
219 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
,
221 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
,
223 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
,
225 QED_MFW_SPEED_MAP(NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
,
226 qed_mfw_legacy_bb_100g
),
229 static void __init
qed_mfw_speed_map_populate(struct qed_mfw_speed_map
*map
)
231 linkmode_set_bit_array(map
->cap_arr
, map
->arr_size
, map
->caps
);
237 static void __init
qed_mfw_speed_maps_init(void)
241 for (i
= 0; i
< ARRAY_SIZE(qed_mfw_ext_maps
); i
++)
242 qed_mfw_speed_map_populate(qed_mfw_ext_maps
+ i
);
244 for (i
= 0; i
< ARRAY_SIZE(qed_mfw_legacy_maps
); i
++)
245 qed_mfw_speed_map_populate(qed_mfw_legacy_maps
+ i
);
248 static int __init
qed_init(void)
250 pr_info("%s", version
);
252 qed_mfw_speed_maps_init();
256 module_init(qed_init
);
258 static void __exit
qed_exit(void)
260 /* To prevent marking this module as "permanent" */
262 module_exit(qed_exit
);
264 /* Check if the DMA controller on the machine can properly handle the DMA
265 * addressing required by the device.
267 static int qed_set_coherency_mask(struct qed_dev
*cdev
)
269 struct device
*dev
= &cdev
->pdev
->dev
;
271 if (dma_set_mask(dev
, DMA_BIT_MASK(64)) == 0) {
272 if (dma_set_coherent_mask(dev
, DMA_BIT_MASK(64)) != 0) {
274 "Can't request 64-bit consistent allocations\n");
277 } else if (dma_set_mask(dev
, DMA_BIT_MASK(32)) != 0) {
278 DP_NOTICE(cdev
, "Can't request 64b/32b DMA addresses\n");
285 static void qed_free_pci(struct qed_dev
*cdev
)
287 struct pci_dev
*pdev
= cdev
->pdev
;
289 pci_disable_pcie_error_reporting(pdev
);
291 if (cdev
->doorbells
&& cdev
->db_size
)
292 iounmap(cdev
->doorbells
);
294 iounmap(cdev
->regview
);
295 if (atomic_read(&pdev
->enable_cnt
) == 1)
296 pci_release_regions(pdev
);
298 pci_disable_device(pdev
);
301 #define PCI_REVISION_ID_ERROR_VAL 0xff
303 /* Performs PCI initializations as well as initializing PCI-related parameters
304 * in the device structrue. Returns 0 in case of success.
306 static int qed_init_pci(struct qed_dev
*cdev
, struct pci_dev
*pdev
)
313 rc
= pci_enable_device(pdev
);
315 DP_NOTICE(cdev
, "Cannot enable PCI device\n");
319 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
320 DP_NOTICE(cdev
, "No memory region found in bar #0\n");
325 if (IS_PF(cdev
) && !(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
326 DP_NOTICE(cdev
, "No memory region found in bar #2\n");
331 if (atomic_read(&pdev
->enable_cnt
) == 1) {
332 rc
= pci_request_regions(pdev
, "qed");
335 "Failed to request PCI memory resources\n");
338 pci_set_master(pdev
);
339 pci_save_state(pdev
);
342 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
343 if (rev_id
== PCI_REVISION_ID_ERROR_VAL
) {
345 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
350 if (!pci_is_pcie(pdev
)) {
351 DP_NOTICE(cdev
, "The bus is not PCI Express\n");
356 cdev
->pci_params
.pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
357 if (IS_PF(cdev
) && !cdev
->pci_params
.pm_cap
)
358 DP_NOTICE(cdev
, "Cannot find power management capability\n");
360 rc
= qed_set_coherency_mask(cdev
);
364 cdev
->pci_params
.mem_start
= pci_resource_start(pdev
, 0);
365 cdev
->pci_params
.mem_end
= pci_resource_end(pdev
, 0);
366 cdev
->pci_params
.irq
= pdev
->irq
;
368 cdev
->regview
= pci_ioremap_bar(pdev
, 0);
369 if (!cdev
->regview
) {
370 DP_NOTICE(cdev
, "Cannot map register space, aborting\n");
375 cdev
->db_phys_addr
= pci_resource_start(cdev
->pdev
, 2);
376 cdev
->db_size
= pci_resource_len(cdev
->pdev
, 2);
377 if (!cdev
->db_size
) {
379 DP_NOTICE(cdev
, "No Doorbell bar available\n");
386 cdev
->doorbells
= ioremap_wc(cdev
->db_phys_addr
, cdev
->db_size
);
388 if (!cdev
->doorbells
) {
389 DP_NOTICE(cdev
, "Cannot map doorbell space\n");
393 /* AER (Advanced Error reporting) configuration */
394 rc
= pci_enable_pcie_error_reporting(pdev
);
396 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
397 "Failed to configure PCIe AER [%d]\n", rc
);
402 pci_release_regions(pdev
);
404 pci_disable_device(pdev
);
409 int qed_fill_dev_info(struct qed_dev
*cdev
,
410 struct qed_dev_info
*dev_info
)
412 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
413 struct qed_hw_info
*hw_info
= &p_hwfn
->hw_info
;
414 struct qed_tunnel_info
*tun
= &cdev
->tunnel
;
417 memset(dev_info
, 0, sizeof(struct qed_dev_info
));
419 if (tun
->vxlan
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
420 tun
->vxlan
.b_mode_enabled
)
421 dev_info
->vxlan_enable
= true;
423 if (tun
->l2_gre
.b_mode_enabled
&& tun
->ip_gre
.b_mode_enabled
&&
424 tun
->l2_gre
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
425 tun
->ip_gre
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
)
426 dev_info
->gre_enable
= true;
428 if (tun
->l2_geneve
.b_mode_enabled
&& tun
->ip_geneve
.b_mode_enabled
&&
429 tun
->l2_geneve
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
430 tun
->ip_geneve
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
)
431 dev_info
->geneve_enable
= true;
433 dev_info
->num_hwfns
= cdev
->num_hwfns
;
434 dev_info
->pci_mem_start
= cdev
->pci_params
.mem_start
;
435 dev_info
->pci_mem_end
= cdev
->pci_params
.mem_end
;
436 dev_info
->pci_irq
= cdev
->pci_params
.irq
;
437 dev_info
->rdma_supported
= QED_IS_RDMA_PERSONALITY(p_hwfn
);
438 dev_info
->dev_type
= cdev
->type
;
439 ether_addr_copy(dev_info
->hw_mac
, hw_info
->hw_mac_addr
);
442 dev_info
->fw_major
= FW_MAJOR_VERSION
;
443 dev_info
->fw_minor
= FW_MINOR_VERSION
;
444 dev_info
->fw_rev
= FW_REVISION_VERSION
;
445 dev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
446 dev_info
->b_inter_pf_switch
= test_bit(QED_MF_INTER_PF_SWITCH
,
448 if (!test_bit(QED_MF_DISABLE_ARFS
, &cdev
->mf_bits
))
449 dev_info
->b_arfs_capable
= true;
450 dev_info
->tx_switching
= true;
452 if (hw_info
->b_wol_support
== QED_WOL_SUPPORT_PME
)
453 dev_info
->wol_support
= true;
455 dev_info
->smart_an
= qed_mcp_is_smart_an_supported(p_hwfn
);
457 dev_info
->abs_pf_id
= QED_LEADING_HWFN(cdev
)->abs_pf_id
;
459 qed_vf_get_fw_version(&cdev
->hwfns
[0], &dev_info
->fw_major
,
460 &dev_info
->fw_minor
, &dev_info
->fw_rev
,
465 ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
467 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev
), ptt
,
468 &dev_info
->mfw_rev
, NULL
);
470 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev
), ptt
,
471 &dev_info
->mbi_version
);
473 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev
), ptt
,
474 &dev_info
->flash_size
);
476 qed_ptt_release(QED_LEADING_HWFN(cdev
), ptt
);
479 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev
), NULL
,
480 &dev_info
->mfw_rev
, NULL
);
483 dev_info
->mtu
= hw_info
->mtu
;
484 cdev
->common_dev_info
= *dev_info
;
489 static void qed_free_cdev(struct qed_dev
*cdev
)
494 static struct qed_dev
*qed_alloc_cdev(struct pci_dev
*pdev
)
496 struct qed_dev
*cdev
;
498 cdev
= kzalloc(sizeof(*cdev
), GFP_KERNEL
);
502 qed_init_struct(cdev
);
507 /* Sets the requested power state */
508 static int qed_set_power_state(struct qed_dev
*cdev
, pci_power_t state
)
513 DP_VERBOSE(cdev
, NETIF_MSG_DRV
, "Omitting Power state change\n");
518 static struct qed_dev
*qed_probe(struct pci_dev
*pdev
,
519 struct qed_probe_params
*params
)
521 struct qed_dev
*cdev
;
524 cdev
= qed_alloc_cdev(pdev
);
528 cdev
->drv_type
= DRV_ID_DRV_TYPE_LINUX
;
529 cdev
->protocol
= params
->protocol
;
532 cdev
->b_is_vf
= true;
534 qed_init_dp(cdev
, params
->dp_module
, params
->dp_level
);
536 cdev
->recov_in_prog
= params
->recov_in_prog
;
538 rc
= qed_init_pci(cdev
, pdev
);
540 DP_ERR(cdev
, "init pci failed\n");
543 DP_INFO(cdev
, "PCI init completed successfully\n");
545 rc
= qed_hw_prepare(cdev
, QED_PCI_DEFAULT
);
547 DP_ERR(cdev
, "hw prepare failed\n");
551 DP_INFO(cdev
, "qed_probe completed successfully\n");
563 static void qed_remove(struct qed_dev
*cdev
)
572 qed_set_power_state(cdev
, PCI_D3hot
);
577 static void qed_disable_msix(struct qed_dev
*cdev
)
579 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
580 pci_disable_msix(cdev
->pdev
);
581 kfree(cdev
->int_params
.msix_table
);
582 } else if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSI
) {
583 pci_disable_msi(cdev
->pdev
);
586 memset(&cdev
->int_params
.out
, 0, sizeof(struct qed_int_param
));
589 static int qed_enable_msix(struct qed_dev
*cdev
,
590 struct qed_int_params
*int_params
)
594 cnt
= int_params
->in
.num_vectors
;
596 for (i
= 0; i
< cnt
; i
++)
597 int_params
->msix_table
[i
].entry
= i
;
599 rc
= pci_enable_msix_range(cdev
->pdev
, int_params
->msix_table
,
600 int_params
->in
.min_msix_cnt
, cnt
);
601 if (rc
< cnt
&& rc
>= int_params
->in
.min_msix_cnt
&&
602 (rc
% cdev
->num_hwfns
)) {
603 pci_disable_msix(cdev
->pdev
);
605 /* If fastpath is initialized, we need at least one interrupt
606 * per hwfn [and the slow path interrupts]. New requested number
607 * should be a multiple of the number of hwfns.
609 cnt
= (rc
/ cdev
->num_hwfns
) * cdev
->num_hwfns
;
611 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
612 cnt
, int_params
->in
.num_vectors
);
613 rc
= pci_enable_msix_exact(cdev
->pdev
, int_params
->msix_table
,
620 /* MSI-x configuration was achieved */
621 int_params
->out
.int_mode
= QED_INT_MODE_MSIX
;
622 int_params
->out
.num_vectors
= rc
;
626 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
633 /* This function outputs the int mode and the number of enabled msix vector */
634 static int qed_set_int_mode(struct qed_dev
*cdev
, bool force_mode
)
636 struct qed_int_params
*int_params
= &cdev
->int_params
;
637 struct msix_entry
*tbl
;
640 switch (int_params
->in
.int_mode
) {
641 case QED_INT_MODE_MSIX
:
642 /* Allocate MSIX table */
643 cnt
= int_params
->in
.num_vectors
;
644 int_params
->msix_table
= kcalloc(cnt
, sizeof(*tbl
), GFP_KERNEL
);
645 if (!int_params
->msix_table
) {
651 rc
= qed_enable_msix(cdev
, int_params
);
655 DP_NOTICE(cdev
, "Failed to enable MSI-X\n");
656 kfree(int_params
->msix_table
);
661 case QED_INT_MODE_MSI
:
662 if (cdev
->num_hwfns
== 1) {
663 rc
= pci_enable_msi(cdev
->pdev
);
665 int_params
->out
.int_mode
= QED_INT_MODE_MSI
;
669 DP_NOTICE(cdev
, "Failed to enable MSI\n");
675 case QED_INT_MODE_INTA
:
676 int_params
->out
.int_mode
= QED_INT_MODE_INTA
;
680 DP_NOTICE(cdev
, "Unknown int_mode value %d\n",
681 int_params
->in
.int_mode
);
687 DP_INFO(cdev
, "Using %s interrupts\n",
688 int_params
->out
.int_mode
== QED_INT_MODE_INTA
?
689 "INTa" : int_params
->out
.int_mode
== QED_INT_MODE_MSI
?
691 cdev
->int_coalescing_mode
= QED_COAL_MODE_ENABLE
;
696 static void qed_simd_handler_config(struct qed_dev
*cdev
, void *token
,
697 int index
, void(*handler
)(void *))
699 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
700 int relative_idx
= index
/ cdev
->num_hwfns
;
702 hwfn
->simd_proto_handler
[relative_idx
].func
= handler
;
703 hwfn
->simd_proto_handler
[relative_idx
].token
= token
;
706 static void qed_simd_handler_clean(struct qed_dev
*cdev
, int index
)
708 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
709 int relative_idx
= index
/ cdev
->num_hwfns
;
711 memset(&hwfn
->simd_proto_handler
[relative_idx
], 0,
712 sizeof(struct qed_simd_fp_handler
));
715 static irqreturn_t
qed_msix_sp_int(int irq
, void *tasklet
)
717 tasklet_schedule((struct tasklet_struct
*)tasklet
);
721 static irqreturn_t
qed_single_int(int irq
, void *dev_instance
)
723 struct qed_dev
*cdev
= (struct qed_dev
*)dev_instance
;
724 struct qed_hwfn
*hwfn
;
725 irqreturn_t rc
= IRQ_NONE
;
729 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
730 status
= qed_int_igu_read_sisr_reg(&cdev
->hwfns
[i
]);
735 hwfn
= &cdev
->hwfns
[i
];
737 /* Slowpath interrupt */
738 if (unlikely(status
& 0x1)) {
739 tasklet_schedule(&hwfn
->sp_dpc
);
744 /* Fastpath interrupts */
745 for (j
= 0; j
< 64; j
++) {
746 if ((0x2ULL
<< j
) & status
) {
747 struct qed_simd_fp_handler
*p_handler
=
748 &hwfn
->simd_proto_handler
[j
];
751 p_handler
->func(p_handler
->token
);
754 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
757 status
&= ~(0x2ULL
<< j
);
762 if (unlikely(status
))
763 DP_VERBOSE(hwfn
, NETIF_MSG_INTR
,
764 "got an unknown interrupt status 0x%llx\n",
771 int qed_slowpath_irq_req(struct qed_hwfn
*hwfn
)
773 struct qed_dev
*cdev
= hwfn
->cdev
;
778 int_mode
= cdev
->int_params
.out
.int_mode
;
779 if (int_mode
== QED_INT_MODE_MSIX
) {
781 snprintf(hwfn
->name
, NAME_SIZE
, "sp-%d-%02x:%02x.%02x",
782 id
, cdev
->pdev
->bus
->number
,
783 PCI_SLOT(cdev
->pdev
->devfn
), hwfn
->abs_pf_id
);
784 rc
= request_irq(cdev
->int_params
.msix_table
[id
].vector
,
785 qed_msix_sp_int
, 0, hwfn
->name
, &hwfn
->sp_dpc
);
787 unsigned long flags
= 0;
789 snprintf(cdev
->name
, NAME_SIZE
, "%02x:%02x.%02x",
790 cdev
->pdev
->bus
->number
, PCI_SLOT(cdev
->pdev
->devfn
),
791 PCI_FUNC(cdev
->pdev
->devfn
));
793 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_INTA
)
794 flags
|= IRQF_SHARED
;
796 rc
= request_irq(cdev
->pdev
->irq
, qed_single_int
,
797 flags
, cdev
->name
, cdev
);
801 DP_NOTICE(cdev
, "request_irq failed, rc = %d\n", rc
);
803 DP_VERBOSE(hwfn
, (NETIF_MSG_INTR
| QED_MSG_SP
),
804 "Requested slowpath %s\n",
805 (int_mode
== QED_INT_MODE_MSIX
) ? "MSI-X" : "IRQ");
810 static void qed_slowpath_tasklet_flush(struct qed_hwfn
*p_hwfn
)
812 /* Calling the disable function will make sure that any
813 * currently-running function is completed. The following call to the
814 * enable function makes this sequence a flush-like operation.
816 if (p_hwfn
->b_sp_dpc_enabled
) {
817 tasklet_disable(&p_hwfn
->sp_dpc
);
818 tasklet_enable(&p_hwfn
->sp_dpc
);
822 void qed_slowpath_irq_sync(struct qed_hwfn
*p_hwfn
)
824 struct qed_dev
*cdev
= p_hwfn
->cdev
;
825 u8 id
= p_hwfn
->my_id
;
828 int_mode
= cdev
->int_params
.out
.int_mode
;
829 if (int_mode
== QED_INT_MODE_MSIX
)
830 synchronize_irq(cdev
->int_params
.msix_table
[id
].vector
);
832 synchronize_irq(cdev
->pdev
->irq
);
834 qed_slowpath_tasklet_flush(p_hwfn
);
837 static void qed_slowpath_irq_free(struct qed_dev
*cdev
)
841 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
842 for_each_hwfn(cdev
, i
) {
843 if (!cdev
->hwfns
[i
].b_int_requested
)
845 synchronize_irq(cdev
->int_params
.msix_table
[i
].vector
);
846 free_irq(cdev
->int_params
.msix_table
[i
].vector
,
847 &cdev
->hwfns
[i
].sp_dpc
);
850 if (QED_LEADING_HWFN(cdev
)->b_int_requested
)
851 free_irq(cdev
->pdev
->irq
, cdev
);
853 qed_int_disable_post_isr_release(cdev
);
856 static int qed_nic_stop(struct qed_dev
*cdev
)
860 rc
= qed_hw_stop(cdev
);
862 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
863 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
865 if (p_hwfn
->b_sp_dpc_enabled
) {
866 tasklet_disable(&p_hwfn
->sp_dpc
);
867 p_hwfn
->b_sp_dpc_enabled
= false;
868 DP_VERBOSE(cdev
, NETIF_MSG_IFDOWN
,
869 "Disabled sp tasklet [hwfn %d] at %p\n",
874 qed_dbg_pf_exit(cdev
);
879 static int qed_nic_setup(struct qed_dev
*cdev
)
883 /* Determine if interface is going to require LL2 */
884 if (QED_LEADING_HWFN(cdev
)->hw_info
.personality
!= QED_PCI_ETH
) {
885 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
886 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
888 p_hwfn
->using_ll2
= true;
892 rc
= qed_resc_alloc(cdev
);
896 DP_INFO(cdev
, "Allocated qed resources\n");
898 qed_resc_setup(cdev
);
903 static int qed_set_int_fp(struct qed_dev
*cdev
, u16 cnt
)
907 /* Mark the fastpath as free/used */
908 cdev
->int_params
.fp_initialized
= cnt
? true : false;
910 if (cdev
->int_params
.out
.int_mode
!= QED_INT_MODE_MSIX
)
911 limit
= cdev
->num_hwfns
* 63;
912 else if (cdev
->int_params
.fp_msix_cnt
)
913 limit
= cdev
->int_params
.fp_msix_cnt
;
918 return min_t(int, cnt
, limit
);
921 static int qed_get_int_fp(struct qed_dev
*cdev
, struct qed_int_info
*info
)
923 memset(info
, 0, sizeof(struct qed_int_info
));
925 if (!cdev
->int_params
.fp_initialized
) {
927 "Protocol driver requested interrupt information, but its support is not yet configured\n");
931 /* Need to expose only MSI-X information; Single IRQ is handled solely
934 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
935 int msix_base
= cdev
->int_params
.fp_msix_base
;
937 info
->msix_cnt
= cdev
->int_params
.fp_msix_cnt
;
938 info
->msix
= &cdev
->int_params
.msix_table
[msix_base
];
944 static int qed_slowpath_setup_int(struct qed_dev
*cdev
,
945 enum qed_int_mode int_mode
)
947 struct qed_sb_cnt_info sb_cnt_info
;
948 int num_l2_queues
= 0;
952 if ((int_mode
== QED_INT_MODE_MSI
) && (cdev
->num_hwfns
> 1)) {
953 DP_NOTICE(cdev
, "MSI mode is not supported for CMT devices\n");
957 memset(&cdev
->int_params
, 0, sizeof(struct qed_int_params
));
958 cdev
->int_params
.in
.int_mode
= int_mode
;
959 for_each_hwfn(cdev
, i
) {
960 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
961 qed_int_get_num_sbs(&cdev
->hwfns
[i
], &sb_cnt_info
);
962 cdev
->int_params
.in
.num_vectors
+= sb_cnt_info
.cnt
;
963 cdev
->int_params
.in
.num_vectors
++; /* slowpath */
966 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
967 cdev
->int_params
.in
.min_msix_cnt
= cdev
->num_hwfns
* 2;
969 if (is_kdump_kernel()) {
971 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
972 cdev
->int_params
.in
.min_msix_cnt
);
973 cdev
->int_params
.in
.num_vectors
=
974 cdev
->int_params
.in
.min_msix_cnt
;
977 rc
= qed_set_int_mode(cdev
, false);
979 DP_ERR(cdev
, "qed_slowpath_setup_int ERR\n");
983 cdev
->int_params
.fp_msix_base
= cdev
->num_hwfns
;
984 cdev
->int_params
.fp_msix_cnt
= cdev
->int_params
.out
.num_vectors
-
987 if (!IS_ENABLED(CONFIG_QED_RDMA
) ||
988 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev
)))
991 for_each_hwfn(cdev
, i
)
992 num_l2_queues
+= FEAT_NUM(&cdev
->hwfns
[i
], QED_PF_L2_QUE
);
994 DP_VERBOSE(cdev
, QED_MSG_RDMA
,
995 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
996 cdev
->int_params
.fp_msix_cnt
, num_l2_queues
);
998 if (cdev
->int_params
.fp_msix_cnt
> num_l2_queues
) {
999 cdev
->int_params
.rdma_msix_cnt
=
1000 (cdev
->int_params
.fp_msix_cnt
- num_l2_queues
)
1002 cdev
->int_params
.rdma_msix_base
=
1003 cdev
->int_params
.fp_msix_base
+ num_l2_queues
;
1004 cdev
->int_params
.fp_msix_cnt
= num_l2_queues
;
1006 cdev
->int_params
.rdma_msix_cnt
= 0;
1009 DP_VERBOSE(cdev
, QED_MSG_RDMA
, "roce_msix_cnt=%d roce_msix_base=%d\n",
1010 cdev
->int_params
.rdma_msix_cnt
,
1011 cdev
->int_params
.rdma_msix_base
);
1016 static int qed_slowpath_vf_setup_int(struct qed_dev
*cdev
)
1020 memset(&cdev
->int_params
, 0, sizeof(struct qed_int_params
));
1021 cdev
->int_params
.in
.int_mode
= QED_INT_MODE_MSIX
;
1023 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev
),
1024 &cdev
->int_params
.in
.num_vectors
);
1025 if (cdev
->num_hwfns
> 1) {
1028 qed_vf_get_num_rxqs(&cdev
->hwfns
[1], &vectors
);
1029 cdev
->int_params
.in
.num_vectors
+= vectors
;
1032 /* We want a minimum of one fastpath vector per vf hwfn */
1033 cdev
->int_params
.in
.min_msix_cnt
= cdev
->num_hwfns
;
1035 rc
= qed_set_int_mode(cdev
, true);
1039 cdev
->int_params
.fp_msix_base
= 0;
1040 cdev
->int_params
.fp_msix_cnt
= cdev
->int_params
.out
.num_vectors
;
1045 u32
qed_unzip_data(struct qed_hwfn
*p_hwfn
, u32 input_len
,
1046 u8
*input_buf
, u32 max_size
, u8
*unzip_buf
)
1050 p_hwfn
->stream
->next_in
= input_buf
;
1051 p_hwfn
->stream
->avail_in
= input_len
;
1052 p_hwfn
->stream
->next_out
= unzip_buf
;
1053 p_hwfn
->stream
->avail_out
= max_size
;
1055 rc
= zlib_inflateInit2(p_hwfn
->stream
, MAX_WBITS
);
1058 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "zlib init failed, rc = %d\n",
1063 rc
= zlib_inflate(p_hwfn
->stream
, Z_FINISH
);
1064 zlib_inflateEnd(p_hwfn
->stream
);
1066 if (rc
!= Z_OK
&& rc
!= Z_STREAM_END
) {
1067 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "FW unzip error: %s, rc=%d\n",
1068 p_hwfn
->stream
->msg
, rc
);
1072 return p_hwfn
->stream
->total_out
/ 4;
1075 static int qed_alloc_stream_mem(struct qed_dev
*cdev
)
1080 for_each_hwfn(cdev
, i
) {
1081 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1083 p_hwfn
->stream
= kzalloc(sizeof(*p_hwfn
->stream
), GFP_KERNEL
);
1084 if (!p_hwfn
->stream
)
1087 workspace
= vzalloc(zlib_inflate_workspacesize());
1090 p_hwfn
->stream
->workspace
= workspace
;
1096 static void qed_free_stream_mem(struct qed_dev
*cdev
)
1100 for_each_hwfn(cdev
, i
) {
1101 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1103 if (!p_hwfn
->stream
)
1106 vfree(p_hwfn
->stream
->workspace
);
1107 kfree(p_hwfn
->stream
);
1111 static void qed_update_pf_params(struct qed_dev
*cdev
,
1112 struct qed_pf_params
*params
)
1116 if (IS_ENABLED(CONFIG_QED_RDMA
)) {
1117 params
->rdma_pf_params
.num_qps
= QED_ROCE_QPS
;
1118 params
->rdma_pf_params
.min_dpis
= QED_ROCE_DPIS
;
1119 params
->rdma_pf_params
.num_srqs
= QED_RDMA_SRQS
;
1120 /* divide by 3 the MRs to avoid MF ILT overflow */
1121 params
->rdma_pf_params
.gl_pi
= QED_ROCE_PROTOCOL_INDEX
;
1124 if (cdev
->num_hwfns
> 1 || IS_VF(cdev
))
1125 params
->eth_pf_params
.num_arfs_filters
= 0;
1127 /* In case we might support RDMA, don't allow qede to be greedy
1128 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1131 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev
))) {
1134 num_cons
= ¶ms
->eth_pf_params
.num_cons
;
1135 *num_cons
= min_t(u16
, *num_cons
, QED_MAX_L2_CONS
);
1138 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
1139 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1141 p_hwfn
->pf_params
= *params
;
1145 #define QED_PERIODIC_DB_REC_COUNT 10
1146 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100
1147 #define QED_PERIODIC_DB_REC_INTERVAL \
1148 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1150 static int qed_slowpath_delayed_work(struct qed_hwfn
*hwfn
,
1151 enum qed_slowpath_wq_flag wq_flag
,
1152 unsigned long delay
)
1154 if (!hwfn
->slowpath_wq_active
)
1157 /* Memory barrier for setting atomic bit */
1158 smp_mb__before_atomic();
1159 set_bit(wq_flag
, &hwfn
->slowpath_task_flags
);
1160 smp_mb__after_atomic();
1161 queue_delayed_work(hwfn
->slowpath_wq
, &hwfn
->slowpath_task
, delay
);
1166 void qed_periodic_db_rec_start(struct qed_hwfn
*p_hwfn
)
1168 /* Reset periodic Doorbell Recovery counter */
1169 p_hwfn
->periodic_db_rec_count
= QED_PERIODIC_DB_REC_COUNT
;
1171 /* Don't schedule periodic Doorbell Recovery if already scheduled */
1172 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC
,
1173 &p_hwfn
->slowpath_task_flags
))
1176 qed_slowpath_delayed_work(p_hwfn
, QED_SLOWPATH_PERIODIC_DB_REC
,
1177 QED_PERIODIC_DB_REC_INTERVAL
);
1180 static void qed_slowpath_wq_stop(struct qed_dev
*cdev
)
1187 for_each_hwfn(cdev
, i
) {
1188 if (!cdev
->hwfns
[i
].slowpath_wq
)
1191 /* Stop queuing new delayed works */
1192 cdev
->hwfns
[i
].slowpath_wq_active
= false;
1194 cancel_delayed_work(&cdev
->hwfns
[i
].slowpath_task
);
1195 destroy_workqueue(cdev
->hwfns
[i
].slowpath_wq
);
1199 static void qed_slowpath_task(struct work_struct
*work
)
1201 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
1202 slowpath_task
.work
);
1203 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
1206 if (hwfn
->slowpath_wq_active
)
1207 queue_delayed_work(hwfn
->slowpath_wq
,
1208 &hwfn
->slowpath_task
, 0);
1213 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ
,
1214 &hwfn
->slowpath_task_flags
))
1215 qed_mfw_process_tlv_req(hwfn
, ptt
);
1217 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC
,
1218 &hwfn
->slowpath_task_flags
)) {
1219 qed_db_rec_handler(hwfn
, ptt
);
1220 if (hwfn
->periodic_db_rec_count
--)
1221 qed_slowpath_delayed_work(hwfn
,
1222 QED_SLOWPATH_PERIODIC_DB_REC
,
1223 QED_PERIODIC_DB_REC_INTERVAL
);
1226 qed_ptt_release(hwfn
, ptt
);
1229 static int qed_slowpath_wq_start(struct qed_dev
*cdev
)
1231 struct qed_hwfn
*hwfn
;
1232 char name
[NAME_SIZE
];
1238 for_each_hwfn(cdev
, i
) {
1239 hwfn
= &cdev
->hwfns
[i
];
1241 snprintf(name
, NAME_SIZE
, "slowpath-%02x:%02x.%02x",
1242 cdev
->pdev
->bus
->number
,
1243 PCI_SLOT(cdev
->pdev
->devfn
), hwfn
->abs_pf_id
);
1245 hwfn
->slowpath_wq
= alloc_workqueue(name
, 0, 0);
1246 if (!hwfn
->slowpath_wq
) {
1247 DP_NOTICE(hwfn
, "Cannot create slowpath workqueue\n");
1251 INIT_DELAYED_WORK(&hwfn
->slowpath_task
, qed_slowpath_task
);
1252 hwfn
->slowpath_wq_active
= true;
1258 static int qed_slowpath_start(struct qed_dev
*cdev
,
1259 struct qed_slowpath_params
*params
)
1261 struct qed_drv_load_params drv_load_params
;
1262 struct qed_hw_init_params hw_init_params
;
1263 struct qed_mcp_drv_version drv_version
;
1264 struct qed_tunnel_info tunn_info
;
1265 const u8
*data
= NULL
;
1266 struct qed_hwfn
*hwfn
;
1267 struct qed_ptt
*p_ptt
;
1270 if (qed_iov_wq_start(cdev
))
1273 if (qed_slowpath_wq_start(cdev
))
1277 rc
= request_firmware(&cdev
->firmware
, QED_FW_FILE_NAME
,
1281 "Failed to find fw file - /lib/firmware/%s\n",
1286 if (cdev
->num_hwfns
== 1) {
1287 p_ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
1289 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
= p_ptt
;
1292 "Failed to acquire PTT for aRFS\n");
1298 cdev
->rx_coalesce_usecs
= QED_DEFAULT_RX_USECS
;
1299 rc
= qed_nic_setup(cdev
);
1304 rc
= qed_slowpath_setup_int(cdev
, params
->int_mode
);
1306 rc
= qed_slowpath_vf_setup_int(cdev
);
1311 /* Allocate stream for unzipping */
1312 rc
= qed_alloc_stream_mem(cdev
);
1316 /* First Dword used to differentiate between various sources */
1317 data
= cdev
->firmware
->data
+ sizeof(u32
);
1319 qed_dbg_pf_init(cdev
);
1322 /* Start the slowpath */
1323 memset(&hw_init_params
, 0, sizeof(hw_init_params
));
1324 memset(&tunn_info
, 0, sizeof(tunn_info
));
1325 tunn_info
.vxlan
.b_mode_enabled
= true;
1326 tunn_info
.l2_gre
.b_mode_enabled
= true;
1327 tunn_info
.ip_gre
.b_mode_enabled
= true;
1328 tunn_info
.l2_geneve
.b_mode_enabled
= true;
1329 tunn_info
.ip_geneve
.b_mode_enabled
= true;
1330 tunn_info
.vxlan
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1331 tunn_info
.l2_gre
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1332 tunn_info
.ip_gre
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1333 tunn_info
.l2_geneve
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1334 tunn_info
.ip_geneve
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1335 hw_init_params
.p_tunn
= &tunn_info
;
1336 hw_init_params
.b_hw_start
= true;
1337 hw_init_params
.int_mode
= cdev
->int_params
.out
.int_mode
;
1338 hw_init_params
.allow_npar_tx_switch
= true;
1339 hw_init_params
.bin_fw_data
= data
;
1341 memset(&drv_load_params
, 0, sizeof(drv_load_params
));
1342 drv_load_params
.is_crash_kernel
= is_kdump_kernel();
1343 drv_load_params
.mfw_timeout_val
= QED_LOAD_REQ_LOCK_TO_DEFAULT
;
1344 drv_load_params
.avoid_eng_reset
= false;
1345 drv_load_params
.override_force_load
= QED_OVERRIDE_FORCE_LOAD_NONE
;
1346 hw_init_params
.p_drv_load_params
= &drv_load_params
;
1348 rc
= qed_hw_init(cdev
, &hw_init_params
);
1353 "HW initialization and function start completed successfully\n");
1356 cdev
->tunn_feature_mask
= (BIT(QED_MODE_VXLAN_TUNN
) |
1357 BIT(QED_MODE_L2GENEVE_TUNN
) |
1358 BIT(QED_MODE_IPGENEVE_TUNN
) |
1359 BIT(QED_MODE_L2GRE_TUNN
) |
1360 BIT(QED_MODE_IPGRE_TUNN
));
1363 /* Allocate LL2 interface if needed */
1364 if (QED_LEADING_HWFN(cdev
)->using_ll2
) {
1365 rc
= qed_ll2_alloc_if(cdev
);
1370 hwfn
= QED_LEADING_HWFN(cdev
);
1371 drv_version
.version
= (params
->drv_major
<< 24) |
1372 (params
->drv_minor
<< 16) |
1373 (params
->drv_rev
<< 8) |
1375 strlcpy(drv_version
.name
, params
->name
,
1376 MCP_DRV_VER_STR_SIZE
- 4);
1377 rc
= qed_mcp_send_drv_version(hwfn
, hwfn
->p_main_ptt
,
1380 DP_NOTICE(cdev
, "Failed sending drv version command\n");
1385 qed_reset_vport_stats(cdev
);
1390 qed_ll2_dealloc_if(cdev
);
1394 qed_hw_timers_stop_all(cdev
);
1396 qed_slowpath_irq_free(cdev
);
1397 qed_free_stream_mem(cdev
);
1398 qed_disable_msix(cdev
);
1400 qed_resc_free(cdev
);
1403 release_firmware(cdev
->firmware
);
1405 if (IS_PF(cdev
) && (cdev
->num_hwfns
== 1) &&
1406 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
)
1407 qed_ptt_release(QED_LEADING_HWFN(cdev
),
1408 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
);
1410 qed_iov_wq_stop(cdev
, false);
1412 qed_slowpath_wq_stop(cdev
);
1417 static int qed_slowpath_stop(struct qed_dev
*cdev
)
1422 qed_slowpath_wq_stop(cdev
);
1424 qed_ll2_dealloc_if(cdev
);
1427 if (cdev
->num_hwfns
== 1)
1428 qed_ptt_release(QED_LEADING_HWFN(cdev
),
1429 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
);
1430 qed_free_stream_mem(cdev
);
1431 if (IS_QED_ETH_IF(cdev
))
1432 qed_sriov_disable(cdev
, true);
1438 qed_slowpath_irq_free(cdev
);
1440 qed_disable_msix(cdev
);
1442 qed_resc_free(cdev
);
1444 qed_iov_wq_stop(cdev
, true);
1447 release_firmware(cdev
->firmware
);
1452 static void qed_set_name(struct qed_dev
*cdev
, char name
[NAME_SIZE
])
1456 memcpy(cdev
->name
, name
, NAME_SIZE
);
1457 for_each_hwfn(cdev
, i
)
1458 snprintf(cdev
->hwfns
[i
].name
, NAME_SIZE
, "%s-%d", name
, i
);
1461 static u32
qed_sb_init(struct qed_dev
*cdev
,
1462 struct qed_sb_info
*sb_info
,
1464 dma_addr_t sb_phy_addr
, u16 sb_id
,
1465 enum qed_sb_type type
)
1467 struct qed_hwfn
*p_hwfn
;
1468 struct qed_ptt
*p_ptt
;
1472 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1473 if (type
== QED_SB_TYPE_L2_QUEUE
) {
1474 p_hwfn
= &cdev
->hwfns
[sb_id
% cdev
->num_hwfns
];
1475 rel_sb_id
= sb_id
/ cdev
->num_hwfns
;
1477 p_hwfn
= QED_AFFIN_HWFN(cdev
);
1481 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
1482 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1483 IS_LEAD_HWFN(p_hwfn
) ? 0 : 1, rel_sb_id
, sb_id
);
1485 if (IS_PF(p_hwfn
->cdev
)) {
1486 p_ptt
= qed_ptt_acquire(p_hwfn
);
1490 rc
= qed_int_sb_init(p_hwfn
, p_ptt
, sb_info
, sb_virt_addr
,
1491 sb_phy_addr
, rel_sb_id
);
1492 qed_ptt_release(p_hwfn
, p_ptt
);
1494 rc
= qed_int_sb_init(p_hwfn
, NULL
, sb_info
, sb_virt_addr
,
1495 sb_phy_addr
, rel_sb_id
);
1501 static u32
qed_sb_release(struct qed_dev
*cdev
,
1502 struct qed_sb_info
*sb_info
,
1504 enum qed_sb_type type
)
1506 struct qed_hwfn
*p_hwfn
;
1510 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1511 if (type
== QED_SB_TYPE_L2_QUEUE
) {
1512 p_hwfn
= &cdev
->hwfns
[sb_id
% cdev
->num_hwfns
];
1513 rel_sb_id
= sb_id
/ cdev
->num_hwfns
;
1515 p_hwfn
= QED_AFFIN_HWFN(cdev
);
1519 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
1520 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1521 IS_LEAD_HWFN(p_hwfn
) ? 0 : 1, rel_sb_id
, sb_id
);
1523 rc
= qed_int_sb_release(p_hwfn
, sb_info
, rel_sb_id
);
1528 static bool qed_can_link_change(struct qed_dev
*cdev
)
1533 static void qed_set_ext_speed_params(struct qed_mcp_link_params
*link_params
,
1534 const struct qed_link_params
*params
)
1536 struct qed_mcp_link_speed_params
*ext_speed
= &link_params
->ext_speed
;
1537 const struct qed_mfw_speed_map
*map
;
1540 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_AUTONEG
)
1541 ext_speed
->autoneg
= !!params
->autoneg
;
1543 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS
) {
1544 ext_speed
->advertised_speeds
= 0;
1546 for (i
= 0; i
< ARRAY_SIZE(qed_mfw_ext_maps
); i
++) {
1547 map
= qed_mfw_ext_maps
+ i
;
1549 if (linkmode_intersects(params
->adv_speeds
, map
->caps
))
1550 ext_speed
->advertised_speeds
|= map
->mfw_val
;
1554 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_FORCED_SPEED
) {
1555 switch (params
->forced_speed
) {
1557 ext_speed
->forced_speed
= QED_EXT_SPEED_1G
;
1560 ext_speed
->forced_speed
= QED_EXT_SPEED_10G
;
1563 ext_speed
->forced_speed
= QED_EXT_SPEED_20G
;
1566 ext_speed
->forced_speed
= QED_EXT_SPEED_25G
;
1569 ext_speed
->forced_speed
= QED_EXT_SPEED_40G
;
1572 ext_speed
->forced_speed
= QED_EXT_SPEED_50G_R
|
1573 QED_EXT_SPEED_50G_R2
;
1576 ext_speed
->forced_speed
= QED_EXT_SPEED_100G_R2
|
1577 QED_EXT_SPEED_100G_R4
|
1578 QED_EXT_SPEED_100G_P4
;
1585 if (!(params
->override_flags
& QED_LINK_OVERRIDE_FEC_CONFIG
))
1588 switch (params
->forced_speed
) {
1590 switch (params
->fec
) {
1591 case FEC_FORCE_MODE_NONE
:
1592 link_params
->ext_fec_mode
= ETH_EXT_FEC_25G_NONE
;
1594 case FEC_FORCE_MODE_FIRECODE
:
1595 link_params
->ext_fec_mode
= ETH_EXT_FEC_25G_BASE_R
;
1597 case FEC_FORCE_MODE_RS
:
1598 link_params
->ext_fec_mode
= ETH_EXT_FEC_25G_RS528
;
1600 case FEC_FORCE_MODE_AUTO
:
1601 link_params
->ext_fec_mode
= ETH_EXT_FEC_25G_RS528
|
1602 ETH_EXT_FEC_25G_BASE_R
|
1603 ETH_EXT_FEC_25G_NONE
;
1611 switch (params
->fec
) {
1612 case FEC_FORCE_MODE_NONE
:
1613 link_params
->ext_fec_mode
= ETH_EXT_FEC_40G_NONE
;
1615 case FEC_FORCE_MODE_FIRECODE
:
1616 link_params
->ext_fec_mode
= ETH_EXT_FEC_40G_BASE_R
;
1618 case FEC_FORCE_MODE_AUTO
:
1619 link_params
->ext_fec_mode
= ETH_EXT_FEC_40G_BASE_R
|
1620 ETH_EXT_FEC_40G_NONE
;
1628 switch (params
->fec
) {
1629 case FEC_FORCE_MODE_NONE
:
1630 link_params
->ext_fec_mode
= ETH_EXT_FEC_50G_NONE
;
1632 case FEC_FORCE_MODE_FIRECODE
:
1633 link_params
->ext_fec_mode
= ETH_EXT_FEC_50G_BASE_R
;
1635 case FEC_FORCE_MODE_RS
:
1636 link_params
->ext_fec_mode
= ETH_EXT_FEC_50G_RS528
;
1638 case FEC_FORCE_MODE_AUTO
:
1639 link_params
->ext_fec_mode
= ETH_EXT_FEC_50G_RS528
|
1640 ETH_EXT_FEC_50G_BASE_R
|
1641 ETH_EXT_FEC_50G_NONE
;
1649 switch (params
->fec
) {
1650 case FEC_FORCE_MODE_NONE
:
1651 link_params
->ext_fec_mode
= ETH_EXT_FEC_100G_NONE
;
1653 case FEC_FORCE_MODE_FIRECODE
:
1654 link_params
->ext_fec_mode
= ETH_EXT_FEC_100G_BASE_R
;
1656 case FEC_FORCE_MODE_RS
:
1657 link_params
->ext_fec_mode
= ETH_EXT_FEC_100G_RS528
;
1659 case FEC_FORCE_MODE_AUTO
:
1660 link_params
->ext_fec_mode
= ETH_EXT_FEC_100G_RS528
|
1661 ETH_EXT_FEC_100G_BASE_R
|
1662 ETH_EXT_FEC_100G_NONE
;
1674 static int qed_set_link(struct qed_dev
*cdev
, struct qed_link_params
*params
)
1676 struct qed_mcp_link_params
*link_params
;
1677 struct qed_mcp_link_speed_params
*speed
;
1678 const struct qed_mfw_speed_map
*map
;
1679 struct qed_hwfn
*hwfn
;
1680 struct qed_ptt
*ptt
;
1687 /* The link should be set only once per PF */
1688 hwfn
= &cdev
->hwfns
[0];
1690 /* When VF wants to set link, force it to read the bulletin instead.
1691 * This mimics the PF behavior, where a noitification [both immediate
1692 * and possible later] would be generated when changing properties.
1695 qed_schedule_iov(hwfn
, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG
);
1699 ptt
= qed_ptt_acquire(hwfn
);
1703 link_params
= qed_mcp_get_link_params(hwfn
);
1707 speed
= &link_params
->speed
;
1709 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_AUTONEG
)
1710 speed
->autoneg
= !!params
->autoneg
;
1712 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS
) {
1713 speed
->advertised_speeds
= 0;
1715 for (i
= 0; i
< ARRAY_SIZE(qed_mfw_legacy_maps
); i
++) {
1716 map
= qed_mfw_legacy_maps
+ i
;
1718 if (linkmode_intersects(params
->adv_speeds
, map
->caps
))
1719 speed
->advertised_speeds
|= map
->mfw_val
;
1723 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_FORCED_SPEED
)
1724 speed
->forced_speed
= params
->forced_speed
;
1726 if (qed_mcp_is_ext_speed_supported(hwfn
))
1727 qed_set_ext_speed_params(link_params
, params
);
1729 if (params
->override_flags
& QED_LINK_OVERRIDE_PAUSE_CONFIG
) {
1730 if (params
->pause_config
& QED_LINK_PAUSE_AUTONEG_ENABLE
)
1731 link_params
->pause
.autoneg
= true;
1733 link_params
->pause
.autoneg
= false;
1734 if (params
->pause_config
& QED_LINK_PAUSE_RX_ENABLE
)
1735 link_params
->pause
.forced_rx
= true;
1737 link_params
->pause
.forced_rx
= false;
1738 if (params
->pause_config
& QED_LINK_PAUSE_TX_ENABLE
)
1739 link_params
->pause
.forced_tx
= true;
1741 link_params
->pause
.forced_tx
= false;
1744 if (params
->override_flags
& QED_LINK_OVERRIDE_LOOPBACK_MODE
) {
1745 switch (params
->loopback_mode
) {
1746 case QED_LINK_LOOPBACK_INT_PHY
:
1747 link_params
->loopback_mode
= ETH_LOOPBACK_INT_PHY
;
1749 case QED_LINK_LOOPBACK_EXT_PHY
:
1750 link_params
->loopback_mode
= ETH_LOOPBACK_EXT_PHY
;
1752 case QED_LINK_LOOPBACK_EXT
:
1753 link_params
->loopback_mode
= ETH_LOOPBACK_EXT
;
1755 case QED_LINK_LOOPBACK_MAC
:
1756 link_params
->loopback_mode
= ETH_LOOPBACK_MAC
;
1758 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123
:
1759 link_params
->loopback_mode
=
1760 ETH_LOOPBACK_CNIG_AH_ONLY_0123
;
1762 case QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301
:
1763 link_params
->loopback_mode
=
1764 ETH_LOOPBACK_CNIG_AH_ONLY_2301
;
1766 case QED_LINK_LOOPBACK_PCS_AH_ONLY
:
1767 link_params
->loopback_mode
= ETH_LOOPBACK_PCS_AH_ONLY
;
1769 case QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY
:
1770 link_params
->loopback_mode
=
1771 ETH_LOOPBACK_REVERSE_MAC_AH_ONLY
;
1773 case QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY
:
1774 link_params
->loopback_mode
=
1775 ETH_LOOPBACK_INT_PHY_FEA_AH_ONLY
;
1778 link_params
->loopback_mode
= ETH_LOOPBACK_NONE
;
1783 if (params
->override_flags
& QED_LINK_OVERRIDE_EEE_CONFIG
)
1784 memcpy(&link_params
->eee
, ¶ms
->eee
,
1785 sizeof(link_params
->eee
));
1787 if (params
->override_flags
& QED_LINK_OVERRIDE_FEC_CONFIG
)
1788 link_params
->fec
= params
->fec
;
1790 rc
= qed_mcp_set_link(hwfn
, ptt
, params
->link_up
);
1792 qed_ptt_release(hwfn
, ptt
);
1797 static int qed_get_port_type(u32 media_type
)
1801 switch (media_type
) {
1802 case MEDIA_SFPP_10G_FIBER
:
1803 case MEDIA_SFP_1G_FIBER
:
1804 case MEDIA_XFP_FIBER
:
1805 case MEDIA_MODULE_FIBER
:
1806 port_type
= PORT_FIBRE
;
1808 case MEDIA_DA_TWINAX
:
1809 port_type
= PORT_DA
;
1812 port_type
= PORT_TP
;
1815 case MEDIA_NOT_PRESENT
:
1816 port_type
= PORT_NONE
;
1818 case MEDIA_UNSPECIFIED
:
1820 port_type
= PORT_OTHER
;
1826 static int qed_get_link_data(struct qed_hwfn
*hwfn
,
1827 struct qed_mcp_link_params
*params
,
1828 struct qed_mcp_link_state
*link
,
1829 struct qed_mcp_link_capabilities
*link_caps
)
1833 if (!IS_PF(hwfn
->cdev
)) {
1834 qed_vf_get_link_params(hwfn
, params
);
1835 qed_vf_get_link_state(hwfn
, link
);
1836 qed_vf_get_link_caps(hwfn
, link_caps
);
1841 p
= qed_mcp_get_link_params(hwfn
);
1844 memcpy(params
, p
, sizeof(*params
));
1846 p
= qed_mcp_get_link_state(hwfn
);
1849 memcpy(link
, p
, sizeof(*link
));
1851 p
= qed_mcp_get_link_capabilities(hwfn
);
1854 memcpy(link_caps
, p
, sizeof(*link_caps
));
1859 static void qed_fill_link_capability(struct qed_hwfn
*hwfn
,
1860 struct qed_ptt
*ptt
, u32 capability
,
1861 unsigned long *if_caps
)
1863 u32 media_type
, tcvr_state
, tcvr_type
;
1864 u32 speed_mask
, board_cfg
;
1866 if (qed_mcp_get_media_type(hwfn
, ptt
, &media_type
))
1867 media_type
= MEDIA_UNSPECIFIED
;
1869 if (qed_mcp_get_transceiver_data(hwfn
, ptt
, &tcvr_state
, &tcvr_type
))
1870 tcvr_type
= ETH_TRANSCEIVER_STATE_UNPLUGGED
;
1872 if (qed_mcp_trans_speed_mask(hwfn
, ptt
, &speed_mask
))
1873 speed_mask
= 0xFFFFFFFF;
1875 if (qed_mcp_get_board_config(hwfn
, ptt
, &board_cfg
))
1876 board_cfg
= NVM_CFG1_PORT_PORT_TYPE_UNDEFINED
;
1878 DP_VERBOSE(hwfn
->cdev
, NETIF_MSG_DRV
,
1879 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1880 media_type
, tcvr_state
, tcvr_type
, speed_mask
, board_cfg
);
1882 switch (media_type
) {
1883 case MEDIA_DA_TWINAX
:
1884 phylink_set(if_caps
, FIBRE
);
1886 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
)
1887 phylink_set(if_caps
, 20000baseKR2_Full
);
1889 /* For DAC media multiple speed capabilities are supported */
1890 capability
|= speed_mask
;
1892 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1893 phylink_set(if_caps
, 1000baseKX_Full
);
1894 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1895 phylink_set(if_caps
, 10000baseCR_Full
);
1897 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
1898 switch (tcvr_type
) {
1899 case ETH_TRANSCEIVER_TYPE_40G_CR4
:
1900 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_CR
:
1901 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR
:
1902 phylink_set(if_caps
, 40000baseCR4_Full
);
1908 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
1909 phylink_set(if_caps
, 25000baseCR_Full
);
1910 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
1911 phylink_set(if_caps
, 50000baseCR2_Full
);
1914 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
1915 switch (tcvr_type
) {
1916 case ETH_TRANSCEIVER_TYPE_100G_CR4
:
1917 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_CR
:
1918 phylink_set(if_caps
, 100000baseCR4_Full
);
1926 phylink_set(if_caps
, TP
);
1928 if (board_cfg
& NVM_CFG1_PORT_PORT_TYPE_EXT_PHY
) {
1930 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1931 phylink_set(if_caps
, 1000baseT_Full
);
1933 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1934 phylink_set(if_caps
, 10000baseT_Full
);
1937 if (board_cfg
& NVM_CFG1_PORT_PORT_TYPE_MODULE
) {
1938 phylink_set(if_caps
, FIBRE
);
1940 switch (tcvr_type
) {
1941 case ETH_TRANSCEIVER_TYPE_1000BASET
:
1942 phylink_set(if_caps
, 1000baseT_Full
);
1944 case ETH_TRANSCEIVER_TYPE_10G_BASET
:
1945 phylink_set(if_caps
, 10000baseT_Full
);
1953 case MEDIA_SFP_1G_FIBER
:
1954 case MEDIA_SFPP_10G_FIBER
:
1955 case MEDIA_XFP_FIBER
:
1956 case MEDIA_MODULE_FIBER
:
1957 phylink_set(if_caps
, FIBRE
);
1958 capability
|= speed_mask
;
1960 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1961 switch (tcvr_type
) {
1962 case ETH_TRANSCEIVER_TYPE_1G_LX
:
1963 case ETH_TRANSCEIVER_TYPE_1G_SX
:
1964 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR
:
1965 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR
:
1966 phylink_set(if_caps
, 1000baseKX_Full
);
1972 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1973 switch (tcvr_type
) {
1974 case ETH_TRANSCEIVER_TYPE_10G_SR
:
1975 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR
:
1976 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR
:
1977 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_SR
:
1978 phylink_set(if_caps
, 10000baseSR_Full
);
1980 case ETH_TRANSCEIVER_TYPE_10G_LR
:
1981 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR
:
1982 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_LR
:
1983 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_1G_10G_LR
:
1984 phylink_set(if_caps
, 10000baseLR_Full
);
1986 case ETH_TRANSCEIVER_TYPE_10G_LRM
:
1987 phylink_set(if_caps
, 10000baseLRM_Full
);
1989 case ETH_TRANSCEIVER_TYPE_10G_ER
:
1990 phylink_set(if_caps
, 10000baseR_FEC
);
1996 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
)
1997 phylink_set(if_caps
, 20000baseKR2_Full
);
1999 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
2000 switch (tcvr_type
) {
2001 case ETH_TRANSCEIVER_TYPE_25G_SR
:
2002 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_25G_SR
:
2003 phylink_set(if_caps
, 25000baseSR_Full
);
2009 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
2010 switch (tcvr_type
) {
2011 case ETH_TRANSCEIVER_TYPE_40G_LR4
:
2012 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_LR
:
2013 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR
:
2014 phylink_set(if_caps
, 40000baseLR4_Full
);
2016 case ETH_TRANSCEIVER_TYPE_40G_SR4
:
2017 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR
:
2018 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_10G_40G_SR
:
2019 phylink_set(if_caps
, 40000baseSR4_Full
);
2025 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
2026 phylink_set(if_caps
, 50000baseKR2_Full
);
2029 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
2030 switch (tcvr_type
) {
2031 case ETH_TRANSCEIVER_TYPE_100G_SR4
:
2032 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_SR
:
2033 phylink_set(if_caps
, 100000baseSR4_Full
);
2035 case ETH_TRANSCEIVER_TYPE_MULTI_RATE_40G_100G_LR
:
2036 phylink_set(if_caps
, 100000baseLR4_ER4_Full
);
2044 phylink_set(if_caps
, Backplane
);
2046 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
)
2047 phylink_set(if_caps
, 20000baseKR2_Full
);
2048 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
2049 phylink_set(if_caps
, 1000baseKX_Full
);
2050 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
2051 phylink_set(if_caps
, 10000baseKR_Full
);
2052 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
2053 phylink_set(if_caps
, 25000baseKR_Full
);
2054 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
2055 phylink_set(if_caps
, 40000baseKR4_Full
);
2056 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
2057 phylink_set(if_caps
, 50000baseKR2_Full
);
2059 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
2060 phylink_set(if_caps
, 100000baseKR4_Full
);
2063 case MEDIA_UNSPECIFIED
:
2064 case MEDIA_NOT_PRESENT
:
2066 DP_VERBOSE(hwfn
->cdev
, QED_MSG_DEBUG
,
2067 "Unknown media and transceiver type;\n");
2072 static void qed_lp_caps_to_speed_mask(u32 caps
, u32
*speed_mask
)
2077 (QED_LINK_PARTNER_SPEED_1G_FD
| QED_LINK_PARTNER_SPEED_1G_HD
))
2078 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
2079 if (caps
& QED_LINK_PARTNER_SPEED_10G
)
2080 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
2081 if (caps
& QED_LINK_PARTNER_SPEED_20G
)
2082 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
;
2083 if (caps
& QED_LINK_PARTNER_SPEED_25G
)
2084 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
;
2085 if (caps
& QED_LINK_PARTNER_SPEED_40G
)
2086 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
;
2087 if (caps
& QED_LINK_PARTNER_SPEED_50G
)
2088 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
;
2089 if (caps
& QED_LINK_PARTNER_SPEED_100G
)
2090 *speed_mask
|= NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
;
2093 static void qed_fill_link(struct qed_hwfn
*hwfn
,
2094 struct qed_ptt
*ptt
,
2095 struct qed_link_output
*if_link
)
2097 struct qed_mcp_link_capabilities link_caps
;
2098 struct qed_mcp_link_params params
;
2099 struct qed_mcp_link_state link
;
2100 u32 media_type
, speed_mask
;
2102 memset(if_link
, 0, sizeof(*if_link
));
2104 /* Prepare source inputs */
2105 if (qed_get_link_data(hwfn
, ¶ms
, &link
, &link_caps
)) {
2106 dev_warn(&hwfn
->cdev
->pdev
->dev
, "no link data available\n");
2110 /* Set the link parameters to pass to protocol driver */
2112 if_link
->link_up
= true;
2114 if (IS_PF(hwfn
->cdev
) && qed_mcp_is_ext_speed_supported(hwfn
)) {
2115 if (link_caps
.default_ext_autoneg
)
2116 phylink_set(if_link
->supported_caps
, Autoneg
);
2118 linkmode_copy(if_link
->advertised_caps
, if_link
->supported_caps
);
2120 if (params
.ext_speed
.autoneg
)
2121 phylink_set(if_link
->advertised_caps
, Autoneg
);
2123 phylink_clear(if_link
->advertised_caps
, Autoneg
);
2125 qed_fill_link_capability(hwfn
, ptt
,
2126 params
.ext_speed
.advertised_speeds
,
2127 if_link
->advertised_caps
);
2129 if (link_caps
.default_speed_autoneg
)
2130 phylink_set(if_link
->supported_caps
, Autoneg
);
2132 linkmode_copy(if_link
->advertised_caps
, if_link
->supported_caps
);
2134 if (params
.speed
.autoneg
)
2135 phylink_set(if_link
->advertised_caps
, Autoneg
);
2137 phylink_clear(if_link
->advertised_caps
, Autoneg
);
2140 if (params
.pause
.autoneg
||
2141 (params
.pause
.forced_rx
&& params
.pause
.forced_tx
))
2142 phylink_set(if_link
->supported_caps
, Asym_Pause
);
2143 if (params
.pause
.autoneg
|| params
.pause
.forced_rx
||
2144 params
.pause
.forced_tx
)
2145 phylink_set(if_link
->supported_caps
, Pause
);
2147 if_link
->sup_fec
= link_caps
.fec_default
;
2148 if_link
->active_fec
= params
.fec
;
2150 /* Fill link advertised capability */
2151 qed_fill_link_capability(hwfn
, ptt
, params
.speed
.advertised_speeds
,
2152 if_link
->advertised_caps
);
2154 /* Fill link supported capability */
2155 qed_fill_link_capability(hwfn
, ptt
, link_caps
.speed_capabilities
,
2156 if_link
->supported_caps
);
2158 /* Fill partner advertised capability */
2159 qed_lp_caps_to_speed_mask(link
.partner_adv_speed
, &speed_mask
);
2160 qed_fill_link_capability(hwfn
, ptt
, speed_mask
, if_link
->lp_caps
);
2163 if_link
->speed
= link
.speed
;
2165 /* TODO - fill duplex properly */
2166 if_link
->duplex
= DUPLEX_FULL
;
2167 qed_mcp_get_media_type(hwfn
, ptt
, &media_type
);
2168 if_link
->port
= qed_get_port_type(media_type
);
2170 if_link
->autoneg
= params
.speed
.autoneg
;
2172 if (params
.pause
.autoneg
)
2173 if_link
->pause_config
|= QED_LINK_PAUSE_AUTONEG_ENABLE
;
2174 if (params
.pause
.forced_rx
)
2175 if_link
->pause_config
|= QED_LINK_PAUSE_RX_ENABLE
;
2176 if (params
.pause
.forced_tx
)
2177 if_link
->pause_config
|= QED_LINK_PAUSE_TX_ENABLE
;
2179 if (link
.an_complete
)
2180 phylink_set(if_link
->lp_caps
, Autoneg
);
2181 if (link
.partner_adv_pause
)
2182 phylink_set(if_link
->lp_caps
, Pause
);
2183 if (link
.partner_adv_pause
== QED_LINK_PARTNER_ASYMMETRIC_PAUSE
||
2184 link
.partner_adv_pause
== QED_LINK_PARTNER_BOTH_PAUSE
)
2185 phylink_set(if_link
->lp_caps
, Asym_Pause
);
2187 if (link_caps
.default_eee
== QED_MCP_EEE_UNSUPPORTED
) {
2188 if_link
->eee_supported
= false;
2190 if_link
->eee_supported
= true;
2191 if_link
->eee_active
= link
.eee_active
;
2192 if_link
->sup_caps
= link_caps
.eee_speed_caps
;
2193 /* MFW clears adv_caps on eee disable; use configured value */
2194 if_link
->eee
.adv_caps
= link
.eee_adv_caps
? link
.eee_adv_caps
:
2195 params
.eee
.adv_caps
;
2196 if_link
->eee
.lp_adv_caps
= link
.eee_lp_adv_caps
;
2197 if_link
->eee
.enable
= params
.eee
.enable
;
2198 if_link
->eee
.tx_lpi_enable
= params
.eee
.tx_lpi_enable
;
2199 if_link
->eee
.tx_lpi_timer
= params
.eee
.tx_lpi_timer
;
2203 static void qed_get_current_link(struct qed_dev
*cdev
,
2204 struct qed_link_output
*if_link
)
2206 struct qed_hwfn
*hwfn
;
2207 struct qed_ptt
*ptt
;
2210 hwfn
= &cdev
->hwfns
[0];
2212 ptt
= qed_ptt_acquire(hwfn
);
2214 qed_fill_link(hwfn
, ptt
, if_link
);
2215 qed_ptt_release(hwfn
, ptt
);
2217 DP_NOTICE(hwfn
, "Failed to fill link; No PTT\n");
2220 qed_fill_link(hwfn
, NULL
, if_link
);
2223 for_each_hwfn(cdev
, i
)
2224 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
2227 void qed_link_update(struct qed_hwfn
*hwfn
, struct qed_ptt
*ptt
)
2229 void *cookie
= hwfn
->cdev
->ops_cookie
;
2230 struct qed_common_cb_ops
*op
= hwfn
->cdev
->protocol_ops
.common
;
2231 struct qed_link_output if_link
;
2233 qed_fill_link(hwfn
, ptt
, &if_link
);
2234 qed_inform_vf_link_state(hwfn
);
2236 if (IS_LEAD_HWFN(hwfn
) && cookie
)
2237 op
->link_update(cookie
, &if_link
);
2240 void qed_bw_update(struct qed_hwfn
*hwfn
, struct qed_ptt
*ptt
)
2242 void *cookie
= hwfn
->cdev
->ops_cookie
;
2243 struct qed_common_cb_ops
*op
= hwfn
->cdev
->protocol_ops
.common
;
2245 if (IS_LEAD_HWFN(hwfn
) && cookie
&& op
&& op
->bw_update
)
2246 op
->bw_update(cookie
);
2249 static int qed_drain(struct qed_dev
*cdev
)
2251 struct qed_hwfn
*hwfn
;
2252 struct qed_ptt
*ptt
;
2258 for_each_hwfn(cdev
, i
) {
2259 hwfn
= &cdev
->hwfns
[i
];
2260 ptt
= qed_ptt_acquire(hwfn
);
2262 DP_NOTICE(hwfn
, "Failed to drain NIG; No PTT\n");
2265 rc
= qed_mcp_drain(hwfn
, ptt
);
2266 qed_ptt_release(hwfn
, ptt
);
2274 static u32
qed_nvm_flash_image_access_crc(struct qed_dev
*cdev
,
2275 struct qed_nvm_image_att
*nvm_image
,
2281 /* Allocate a buffer for holding the nvram image */
2282 buf
= kzalloc(nvm_image
->length
, GFP_KERNEL
);
2286 /* Read image into buffer */
2287 rc
= qed_mcp_nvm_read(cdev
, nvm_image
->start_addr
,
2288 buf
, nvm_image
->length
);
2290 DP_ERR(cdev
, "Failed reading image from nvm\n");
2294 /* Convert the buffer into big-endian format (excluding the
2295 * closing 4 bytes of CRC).
2297 cpu_to_be32_array((__force __be32
*)buf
, (const u32
*)buf
,
2298 DIV_ROUND_UP(nvm_image
->length
- 4, 4));
2300 /* Calc CRC for the "actual" image buffer, i.e. not including
2301 * the last 4 CRC bytes.
2303 *crc
= ~crc32(~0U, buf
, nvm_image
->length
- 4);
2304 *crc
= (__force u32
)cpu_to_be32p(crc
);
2312 /* Binary file format -
2313 * /----------------------------------------------------------------------\
2314 * 0B | 0x4 [command index] |
2315 * 4B | image_type | Options | Number of register settings |
2319 * \----------------------------------------------------------------------/
2320 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2321 * Options - 0'b - Calculate & Update CRC for image
2323 static int qed_nvm_flash_image_access(struct qed_dev
*cdev
, const u8
**data
,
2326 struct qed_nvm_image_att nvm_image
;
2327 struct qed_hwfn
*p_hwfn
;
2328 bool is_crc
= false;
2334 image_type
= **data
;
2335 p_hwfn
= QED_LEADING_HWFN(cdev
);
2336 for (i
= 0; i
< p_hwfn
->nvm_info
.num_images
; i
++)
2337 if (image_type
== p_hwfn
->nvm_info
.image_att
[i
].image_type
)
2339 if (i
== p_hwfn
->nvm_info
.num_images
) {
2340 DP_ERR(cdev
, "Failed to find nvram image of type %08x\n",
2345 nvm_image
.start_addr
= p_hwfn
->nvm_info
.image_att
[i
].nvm_start_addr
;
2346 nvm_image
.length
= p_hwfn
->nvm_info
.image_att
[i
].len
;
2348 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2349 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2350 **data
, image_type
, nvm_image
.start_addr
,
2351 nvm_image
.start_addr
+ nvm_image
.length
- 1);
2353 is_crc
= !!(**data
& BIT(0));
2355 len
= *((u16
*)*data
);
2360 rc
= qed_nvm_flash_image_access_crc(cdev
, &nvm_image
, &crc
);
2362 DP_ERR(cdev
, "Failed calculating CRC, rc = %d\n", rc
);
2366 rc
= qed_mcp_nvm_write(cdev
, QED_NVM_WRITE_NVRAM
,
2367 (nvm_image
.start_addr
+
2368 nvm_image
.length
- 4), (u8
*)&crc
, 4);
2370 DP_ERR(cdev
, "Failed writing to %08x, rc = %d\n",
2371 nvm_image
.start_addr
+ nvm_image
.length
- 4, rc
);
2375 /* Iterate over the values for setting */
2377 u32 offset
, mask
, value
, cur_value
;
2380 value
= *((u32
*)*data
);
2382 mask
= *((u32
*)*data
);
2384 offset
= *((u32
*)*data
);
2387 rc
= qed_mcp_nvm_read(cdev
, nvm_image
.start_addr
+ offset
, buf
,
2390 DP_ERR(cdev
, "Failed reading from %08x\n",
2391 nvm_image
.start_addr
+ offset
);
2395 cur_value
= le32_to_cpu(*((__le32
*)buf
));
2396 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2397 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2398 nvm_image
.start_addr
+ offset
, cur_value
,
2399 (cur_value
& ~mask
) | (value
& mask
), value
, mask
);
2400 value
= (value
& mask
) | (cur_value
& ~mask
);
2401 rc
= qed_mcp_nvm_write(cdev
, QED_NVM_WRITE_NVRAM
,
2402 nvm_image
.start_addr
+ offset
,
2405 DP_ERR(cdev
, "Failed writing to %08x\n",
2406 nvm_image
.start_addr
+ offset
);
2416 /* Binary file format -
2417 * /----------------------------------------------------------------------\
2418 * 0B | 0x3 [command index] |
2419 * 4B | b'0: check_response? | b'1-31 reserved |
2420 * 8B | File-type | reserved |
2421 * 12B | Image length in bytes |
2422 * \----------------------------------------------------------------------/
2423 * Start a new file of the provided type
2425 static int qed_nvm_flash_image_file_start(struct qed_dev
*cdev
,
2426 const u8
**data
, bool *check_resp
)
2428 u32 file_type
, file_size
= 0;
2432 *check_resp
= !!(**data
& BIT(0));
2436 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2437 "About to start a new file of type %02x\n", file_type
);
2438 if (file_type
== DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI
) {
2440 file_size
= *((u32
*)(*data
));
2443 rc
= qed_mcp_nvm_write(cdev
, QED_PUT_FILE_BEGIN
, file_type
,
2444 (u8
*)(&file_size
), 4);
2450 /* Binary file format -
2451 * /----------------------------------------------------------------------\
2452 * 0B | 0x2 [command index] |
2453 * 4B | Length in bytes |
2454 * 8B | b'0: check_response? | b'1-31 reserved |
2455 * 12B | Offset in bytes |
2457 * \----------------------------------------------------------------------/
2458 * Write data as part of a file that was previously started. Data should be
2459 * of length equal to that provided in the message
2461 static int qed_nvm_flash_image_file_data(struct qed_dev
*cdev
,
2462 const u8
**data
, bool *check_resp
)
2468 len
= *((u32
*)(*data
));
2470 *check_resp
= !!(**data
& BIT(0));
2472 offset
= *((u32
*)(*data
));
2475 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2476 "About to write File-data: %08x bytes to offset %08x\n",
2479 rc
= qed_mcp_nvm_write(cdev
, QED_PUT_FILE_DATA
, offset
,
2480 (char *)(*data
), len
);
2486 /* Binary file format [General header] -
2487 * /----------------------------------------------------------------------\
2488 * 0B | QED_NVM_SIGNATURE |
2489 * 4B | Length in bytes |
2490 * 8B | Highest command in this batchfile | Reserved |
2491 * \----------------------------------------------------------------------/
2493 static int qed_nvm_flash_image_validate(struct qed_dev
*cdev
,
2494 const struct firmware
*image
,
2499 /* Check minimum size */
2500 if (image
->size
< 12) {
2501 DP_ERR(cdev
, "Image is too short [%08x]\n", (u32
)image
->size
);
2505 /* Check signature */
2506 signature
= *((u32
*)(*data
));
2507 if (signature
!= QED_NVM_SIGNATURE
) {
2508 DP_ERR(cdev
, "Wrong signature '%08x'\n", signature
);
2513 /* Validate internal size equals the image-size */
2514 len
= *((u32
*)(*data
));
2515 if (len
!= image
->size
) {
2516 DP_ERR(cdev
, "Size mismatch: internal = %08x image = %08x\n",
2517 len
, (u32
)image
->size
);
2522 /* Make sure driver familiar with all commands necessary for this */
2523 if (*((u16
*)(*data
)) >= QED_NVM_FLASH_CMD_NVM_MAX
) {
2524 DP_ERR(cdev
, "File contains unsupported commands [Need %04x]\n",
2534 /* Binary file format -
2535 * /----------------------------------------------------------------------\
2536 * 0B | 0x5 [command index] |
2537 * 4B | Number of config attributes | Reserved |
2538 * 4B | Config ID | Entity ID | Length |
2541 * \----------------------------------------------------------------------/
2542 * There can be several cfg_id-entity_id-Length-Value sets as specified by
2543 * 'Number of config attributes'.
2545 * The API parses config attributes from the user provided buffer and flashes
2546 * them to the respective NVM path using Management FW inerface.
2548 static int qed_nvm_flash_cfg_write(struct qed_dev
*cdev
, const u8
**data
)
2550 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2551 u8 entity_id
, len
, buf
[32];
2552 bool need_nvm_init
= true;
2553 struct qed_ptt
*ptt
;
2558 ptt
= qed_ptt_acquire(hwfn
);
2562 /* NVM CFG ID attribute header */
2564 count
= *((u16
*)*data
);
2567 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2568 "Read config ids: num_attrs = %0d\n", count
);
2569 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2570 * arithmetic operations in the implementation.
2572 for (i
= 1; i
<= count
; i
++) {
2573 cfg_id
= *((u16
*)*data
);
2579 memcpy(buf
, *data
, len
);
2583 if (need_nvm_init
) {
2584 flags
|= QED_NVM_CFG_OPTION_INIT
;
2585 need_nvm_init
= false;
2588 /* Commit to flash and free the resources */
2589 if (!(i
% QED_NVM_CFG_MAX_ATTRS
) || i
== count
) {
2590 flags
|= QED_NVM_CFG_OPTION_COMMIT
|
2591 QED_NVM_CFG_OPTION_FREE
;
2592 need_nvm_init
= true;
2596 flags
|= QED_NVM_CFG_OPTION_ENTITY_SEL
;
2598 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2599 "cfg_id = %d entity = %d len = %d\n", cfg_id
,
2601 rc
= qed_mcp_nvm_set_cfg(hwfn
, ptt
, cfg_id
, entity_id
, flags
,
2604 DP_ERR(cdev
, "Error %d configuring %d\n", rc
, cfg_id
);
2609 qed_ptt_release(hwfn
, ptt
);
2614 #define QED_MAX_NVM_BUF_LEN 32
2615 static int qed_nvm_flash_cfg_len(struct qed_dev
*cdev
, u32 cmd
)
2617 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2618 u8 buf
[QED_MAX_NVM_BUF_LEN
];
2619 struct qed_ptt
*ptt
;
2623 ptt
= qed_ptt_acquire(hwfn
);
2625 return QED_MAX_NVM_BUF_LEN
;
2627 rc
= qed_mcp_nvm_get_cfg(hwfn
, ptt
, cmd
, 0, QED_NVM_CFG_GET_FLAGS
, buf
,
2630 DP_ERR(cdev
, "Error %d reading %d\n", rc
, cmd
);
2631 len
= QED_MAX_NVM_BUF_LEN
;
2634 qed_ptt_release(hwfn
, ptt
);
2639 static int qed_nvm_flash_cfg_read(struct qed_dev
*cdev
, u8
**data
,
2640 u32 cmd
, u32 entity_id
)
2642 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2643 struct qed_ptt
*ptt
;
2647 ptt
= qed_ptt_acquire(hwfn
);
2651 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2652 "Read config cmd = %d entity id %d\n", cmd
, entity_id
);
2653 flags
= entity_id
? QED_NVM_CFG_GET_PF_FLAGS
: QED_NVM_CFG_GET_FLAGS
;
2654 rc
= qed_mcp_nvm_get_cfg(hwfn
, ptt
, cmd
, entity_id
, flags
, *data
, &len
);
2656 DP_ERR(cdev
, "Error %d reading %d\n", rc
, cmd
);
2658 qed_ptt_release(hwfn
, ptt
);
2663 static int qed_nvm_flash(struct qed_dev
*cdev
, const char *name
)
2665 const struct firmware
*image
;
2666 const u8
*data
, *data_end
;
2670 rc
= request_firmware(&image
, name
, &cdev
->pdev
->dev
);
2672 DP_ERR(cdev
, "Failed to find '%s'\n", name
);
2676 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2677 "Flashing '%s' - firmware's data at %p, size is %08x\n",
2678 name
, image
->data
, (u32
)image
->size
);
2680 data_end
= data
+ image
->size
;
2682 rc
= qed_nvm_flash_image_validate(cdev
, image
, &data
);
2686 while (data
< data_end
) {
2687 bool check_resp
= false;
2689 /* Parse the actual command */
2690 cmd_type
= *((u32
*)data
);
2692 case QED_NVM_FLASH_CMD_FILE_DATA
:
2693 rc
= qed_nvm_flash_image_file_data(cdev
, &data
,
2696 case QED_NVM_FLASH_CMD_FILE_START
:
2697 rc
= qed_nvm_flash_image_file_start(cdev
, &data
,
2700 case QED_NVM_FLASH_CMD_NVM_CHANGE
:
2701 rc
= qed_nvm_flash_image_access(cdev
, &data
,
2704 case QED_NVM_FLASH_CMD_NVM_CFG_ID
:
2705 rc
= qed_nvm_flash_cfg_write(cdev
, &data
);
2708 DP_ERR(cdev
, "Unknown command %08x\n", cmd_type
);
2714 DP_ERR(cdev
, "Command %08x failed\n", cmd_type
);
2718 /* Check response if needed */
2720 u32 mcp_response
= 0;
2722 if (qed_mcp_nvm_resp(cdev
, (u8
*)&mcp_response
)) {
2723 DP_ERR(cdev
, "Failed getting MCP response\n");
2728 switch (mcp_response
& FW_MSG_CODE_MASK
) {
2729 case FW_MSG_CODE_OK
:
2730 case FW_MSG_CODE_NVM_OK
:
2731 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK
:
2732 case FW_MSG_CODE_PHY_OK
:
2735 DP_ERR(cdev
, "MFW returns error: %08x\n",
2744 release_firmware(image
);
2749 static int qed_nvm_get_image(struct qed_dev
*cdev
, enum qed_nvm_images type
,
2752 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2754 return qed_mcp_get_nvm_image(hwfn
, type
, buf
, len
);
2757 void qed_schedule_recovery_handler(struct qed_hwfn
*p_hwfn
)
2759 struct qed_common_cb_ops
*ops
= p_hwfn
->cdev
->protocol_ops
.common
;
2760 void *cookie
= p_hwfn
->cdev
->ops_cookie
;
2762 if (ops
&& ops
->schedule_recovery_handler
)
2763 ops
->schedule_recovery_handler(cookie
);
2766 static const char * const qed_hw_err_type_descr
[] = {
2767 [QED_HW_ERR_FAN_FAIL
] = "Fan Failure",
2768 [QED_HW_ERR_MFW_RESP_FAIL
] = "MFW Response Failure",
2769 [QED_HW_ERR_HW_ATTN
] = "HW Attention",
2770 [QED_HW_ERR_DMAE_FAIL
] = "DMAE Failure",
2771 [QED_HW_ERR_RAMROD_FAIL
] = "Ramrod Failure",
2772 [QED_HW_ERR_FW_ASSERT
] = "FW Assertion",
2773 [QED_HW_ERR_LAST
] = "Unknown",
2776 void qed_hw_error_occurred(struct qed_hwfn
*p_hwfn
,
2777 enum qed_hw_err_type err_type
)
2779 struct qed_common_cb_ops
*ops
= p_hwfn
->cdev
->protocol_ops
.common
;
2780 void *cookie
= p_hwfn
->cdev
->ops_cookie
;
2781 const char *err_str
;
2783 if (err_type
> QED_HW_ERR_LAST
)
2784 err_type
= QED_HW_ERR_LAST
;
2785 err_str
= qed_hw_err_type_descr
[err_type
];
2787 DP_NOTICE(p_hwfn
, "HW error occurred [%s]\n", err_str
);
2789 /* Call the HW error handler of the protocol driver.
2790 * If it is not available - perform a minimal handling of preventing
2791 * HW attentions from being reasserted.
2793 if (ops
&& ops
->schedule_hw_err_handler
)
2794 ops
->schedule_hw_err_handler(cookie
, err_type
);
2796 qed_int_attn_clr_enable(p_hwfn
->cdev
, true);
2799 static int qed_set_coalesce(struct qed_dev
*cdev
, u16 rx_coal
, u16 tx_coal
,
2802 return qed_set_queue_coalesce(rx_coal
, tx_coal
, handle
);
2805 static int qed_set_led(struct qed_dev
*cdev
, enum qed_led_mode mode
)
2807 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2808 struct qed_ptt
*ptt
;
2811 ptt
= qed_ptt_acquire(hwfn
);
2815 status
= qed_mcp_set_led(hwfn
, ptt
, mode
);
2817 qed_ptt_release(hwfn
, ptt
);
2822 int qed_recovery_process(struct qed_dev
*cdev
)
2824 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2825 struct qed_ptt
*p_ptt
;
2828 p_ptt
= qed_ptt_acquire(p_hwfn
);
2832 rc
= qed_start_recovery_process(p_hwfn
, p_ptt
);
2834 qed_ptt_release(p_hwfn
, p_ptt
);
2839 static int qed_update_wol(struct qed_dev
*cdev
, bool enabled
)
2841 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2842 struct qed_ptt
*ptt
;
2848 ptt
= qed_ptt_acquire(hwfn
);
2852 rc
= qed_mcp_ov_update_wol(hwfn
, ptt
, enabled
? QED_OV_WOL_ENABLED
2853 : QED_OV_WOL_DISABLED
);
2856 rc
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2859 qed_ptt_release(hwfn
, ptt
);
2863 static int qed_update_drv_state(struct qed_dev
*cdev
, bool active
)
2865 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2866 struct qed_ptt
*ptt
;
2872 ptt
= qed_ptt_acquire(hwfn
);
2876 status
= qed_mcp_ov_update_driver_state(hwfn
, ptt
, active
?
2877 QED_OV_DRIVER_STATE_ACTIVE
:
2878 QED_OV_DRIVER_STATE_DISABLED
);
2880 qed_ptt_release(hwfn
, ptt
);
2885 static int qed_update_mac(struct qed_dev
*cdev
, u8
*mac
)
2887 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2888 struct qed_ptt
*ptt
;
2894 ptt
= qed_ptt_acquire(hwfn
);
2898 status
= qed_mcp_ov_update_mac(hwfn
, ptt
, mac
);
2902 status
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2905 qed_ptt_release(hwfn
, ptt
);
2909 static int qed_update_mtu(struct qed_dev
*cdev
, u16 mtu
)
2911 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2912 struct qed_ptt
*ptt
;
2918 ptt
= qed_ptt_acquire(hwfn
);
2922 status
= qed_mcp_ov_update_mtu(hwfn
, ptt
, mtu
);
2926 status
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2929 qed_ptt_release(hwfn
, ptt
);
2933 static int qed_read_module_eeprom(struct qed_dev
*cdev
, char *buf
,
2934 u8 dev_addr
, u32 offset
, u32 len
)
2936 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2937 struct qed_ptt
*ptt
;
2943 ptt
= qed_ptt_acquire(hwfn
);
2947 rc
= qed_mcp_phy_sfp_read(hwfn
, ptt
, MFW_PORT(hwfn
), dev_addr
,
2950 qed_ptt_release(hwfn
, ptt
);
2955 static int qed_set_grc_config(struct qed_dev
*cdev
, u32 cfg_id
, u32 val
)
2957 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2958 struct qed_ptt
*ptt
;
2964 ptt
= qed_ptt_acquire(hwfn
);
2968 rc
= qed_dbg_grc_config(hwfn
, cfg_id
, val
);
2970 qed_ptt_release(hwfn
, ptt
);
2975 static u8
qed_get_affin_hwfn_idx(struct qed_dev
*cdev
)
2977 return QED_AFFIN_HWFN_IDX(cdev
);
2980 static struct qed_selftest_ops qed_selftest_ops_pass
= {
2981 .selftest_memory
= &qed_selftest_memory
,
2982 .selftest_interrupt
= &qed_selftest_interrupt
,
2983 .selftest_register
= &qed_selftest_register
,
2984 .selftest_clock
= &qed_selftest_clock
,
2985 .selftest_nvram
= &qed_selftest_nvram
,
2988 const struct qed_common_ops qed_common_ops_pass
= {
2989 .selftest
= &qed_selftest_ops_pass
,
2990 .probe
= &qed_probe
,
2991 .remove
= &qed_remove
,
2992 .set_power_state
= &qed_set_power_state
,
2993 .set_name
= &qed_set_name
,
2994 .update_pf_params
= &qed_update_pf_params
,
2995 .slowpath_start
= &qed_slowpath_start
,
2996 .slowpath_stop
= &qed_slowpath_stop
,
2997 .set_fp_int
= &qed_set_int_fp
,
2998 .get_fp_int
= &qed_get_int_fp
,
2999 .sb_init
= &qed_sb_init
,
3000 .sb_release
= &qed_sb_release
,
3001 .simd_handler_config
= &qed_simd_handler_config
,
3002 .simd_handler_clean
= &qed_simd_handler_clean
,
3003 .dbg_grc
= &qed_dbg_grc
,
3004 .dbg_grc_size
= &qed_dbg_grc_size
,
3005 .can_link_change
= &qed_can_link_change
,
3006 .set_link
= &qed_set_link
,
3007 .get_link
= &qed_get_current_link
,
3008 .drain
= &qed_drain
,
3009 .update_msglvl
= &qed_init_dp
,
3010 .devlink_register
= qed_devlink_register
,
3011 .devlink_unregister
= qed_devlink_unregister
,
3012 .report_fatal_error
= qed_report_fatal_error
,
3013 .dbg_all_data
= &qed_dbg_all_data
,
3014 .dbg_all_data_size
= &qed_dbg_all_data_size
,
3015 .chain_alloc
= &qed_chain_alloc
,
3016 .chain_free
= &qed_chain_free
,
3017 .nvm_flash
= &qed_nvm_flash
,
3018 .nvm_get_image
= &qed_nvm_get_image
,
3019 .set_coalesce
= &qed_set_coalesce
,
3020 .set_led
= &qed_set_led
,
3021 .recovery_process
= &qed_recovery_process
,
3022 .recovery_prolog
= &qed_recovery_prolog
,
3023 .attn_clr_enable
= &qed_int_attn_clr_enable
,
3024 .update_drv_state
= &qed_update_drv_state
,
3025 .update_mac
= &qed_update_mac
,
3026 .update_mtu
= &qed_update_mtu
,
3027 .update_wol
= &qed_update_wol
,
3028 .db_recovery_add
= &qed_db_recovery_add
,
3029 .db_recovery_del
= &qed_db_recovery_del
,
3030 .read_module_eeprom
= &qed_read_module_eeprom
,
3031 .get_affin_hwfn_idx
= &qed_get_affin_hwfn_idx
,
3032 .read_nvm_cfg
= &qed_nvm_flash_cfg_read
,
3033 .read_nvm_cfg_len
= &qed_nvm_flash_cfg_len
,
3034 .set_grc_config
= &qed_set_grc_config
,
3037 void qed_get_protocol_stats(struct qed_dev
*cdev
,
3038 enum qed_mcp_protocol_type type
,
3039 union qed_mcp_protocol_stats
*stats
)
3041 struct qed_eth_stats eth_stats
;
3043 memset(stats
, 0, sizeof(*stats
));
3046 case QED_MCP_LAN_STATS
:
3047 qed_get_vport_stats(cdev
, ð_stats
);
3048 stats
->lan_stats
.ucast_rx_pkts
=
3049 eth_stats
.common
.rx_ucast_pkts
;
3050 stats
->lan_stats
.ucast_tx_pkts
=
3051 eth_stats
.common
.tx_ucast_pkts
;
3052 stats
->lan_stats
.fcs_err
= -1;
3054 case QED_MCP_FCOE_STATS
:
3055 qed_get_protocol_stats_fcoe(cdev
, &stats
->fcoe_stats
);
3057 case QED_MCP_ISCSI_STATS
:
3058 qed_get_protocol_stats_iscsi(cdev
, &stats
->iscsi_stats
);
3061 DP_VERBOSE(cdev
, QED_MSG_SP
,
3062 "Invalid protocol type = %d\n", type
);
3067 int qed_mfw_tlv_req(struct qed_hwfn
*hwfn
)
3069 DP_VERBOSE(hwfn
->cdev
, NETIF_MSG_DRV
,
3070 "Scheduling slowpath task [Flag: %d]\n",
3071 QED_SLOWPATH_MFW_TLV_REQ
);
3072 smp_mb__before_atomic();
3073 set_bit(QED_SLOWPATH_MFW_TLV_REQ
, &hwfn
->slowpath_task_flags
);
3074 smp_mb__after_atomic();
3075 queue_delayed_work(hwfn
->slowpath_wq
, &hwfn
->slowpath_task
, 0);
3081 qed_fill_generic_tlv_data(struct qed_dev
*cdev
, struct qed_mfw_tlv_generic
*tlv
)
3083 struct qed_common_cb_ops
*op
= cdev
->protocol_ops
.common
;
3084 struct qed_eth_stats_common
*p_common
;
3085 struct qed_generic_tlvs gen_tlvs
;
3086 struct qed_eth_stats stats
;
3089 memset(&gen_tlvs
, 0, sizeof(gen_tlvs
));
3090 op
->get_generic_tlv_data(cdev
->ops_cookie
, &gen_tlvs
);
3092 if (gen_tlvs
.feat_flags
& QED_TLV_IP_CSUM
)
3093 tlv
->flags
.ipv4_csum_offload
= true;
3094 if (gen_tlvs
.feat_flags
& QED_TLV_LSO
)
3095 tlv
->flags
.lso_supported
= true;
3096 tlv
->flags
.b_set
= true;
3098 for (i
= 0; i
< QED_TLV_MAC_COUNT
; i
++) {
3099 if (is_valid_ether_addr(gen_tlvs
.mac
[i
])) {
3100 ether_addr_copy(tlv
->mac
[i
], gen_tlvs
.mac
[i
]);
3101 tlv
->mac_set
[i
] = true;
3105 qed_get_vport_stats(cdev
, &stats
);
3106 p_common
= &stats
.common
;
3107 tlv
->rx_frames
= p_common
->rx_ucast_pkts
+ p_common
->rx_mcast_pkts
+
3108 p_common
->rx_bcast_pkts
;
3109 tlv
->rx_frames_set
= true;
3110 tlv
->rx_bytes
= p_common
->rx_ucast_bytes
+ p_common
->rx_mcast_bytes
+
3111 p_common
->rx_bcast_bytes
;
3112 tlv
->rx_bytes_set
= true;
3113 tlv
->tx_frames
= p_common
->tx_ucast_pkts
+ p_common
->tx_mcast_pkts
+
3114 p_common
->tx_bcast_pkts
;
3115 tlv
->tx_frames_set
= true;
3116 tlv
->tx_bytes
= p_common
->tx_ucast_bytes
+ p_common
->tx_mcast_bytes
+
3117 p_common
->tx_bcast_bytes
;
3118 tlv
->rx_bytes_set
= true;
3121 int qed_mfw_fill_tlv_data(struct qed_hwfn
*hwfn
, enum qed_mfw_tlv_type type
,
3122 union qed_mfw_tlv_data
*tlv_buf
)
3124 struct qed_dev
*cdev
= hwfn
->cdev
;
3125 struct qed_common_cb_ops
*ops
;
3127 ops
= cdev
->protocol_ops
.common
;
3128 if (!ops
|| !ops
->get_protocol_tlv_data
|| !ops
->get_generic_tlv_data
) {
3129 DP_NOTICE(hwfn
, "Can't collect TLV management info\n");
3134 case QED_MFW_TLV_GENERIC
:
3135 qed_fill_generic_tlv_data(hwfn
->cdev
, &tlv_buf
->generic
);
3137 case QED_MFW_TLV_ETH
:
3138 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->eth
);
3140 case QED_MFW_TLV_FCOE
:
3141 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->fcoe
);
3143 case QED_MFW_TLV_ISCSI
:
3144 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->iscsi
);