1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/string.h>
41 #include <linux/module.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/ethtool.h>
45 #include <linux/etherdevice.h>
46 #include <linux/vmalloc.h>
47 #include <linux/crash_dump.h>
48 #include <linux/crc32.h>
49 #include <linux/qed/qed_if.h>
50 #include <linux/qed/qed_ll2_if.h>
51 #include <net/devlink.h>
54 #include "qed_sriov.h"
56 #include "qed_dev_api.h"
59 #include "qed_iscsi.h"
62 #include "qed_reg_addr.h"
64 #include "qed_selftest.h"
65 #include "qed_debug.h"
67 #define QED_ROCE_QPS (8192)
68 #define QED_ROCE_DPIS (8)
69 #define QED_RDMA_SRQS QED_ROCE_QPS
70 #define QED_NVM_CFG_GET_FLAGS 0xA
71 #define QED_NVM_CFG_GET_PF_FLAGS 0x1A
72 #define QED_NVM_CFG_MAX_ATTRS 50
74 static char version
[] =
75 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION
"\n";
77 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION
);
81 #define FW_FILE_VERSION \
82 __stringify(FW_MAJOR_VERSION) "." \
83 __stringify(FW_MINOR_VERSION) "." \
84 __stringify(FW_REVISION_VERSION) "." \
85 __stringify(FW_ENGINEERING_VERSION)
87 #define QED_FW_FILE_NAME \
88 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
90 MODULE_FIRMWARE(QED_FW_FILE_NAME
);
92 static int __init
qed_init(void)
94 pr_info("%s", version
);
99 static void __exit
qed_cleanup(void)
101 pr_notice("qed_cleanup called\n");
104 module_init(qed_init
);
105 module_exit(qed_cleanup
);
107 /* Check if the DMA controller on the machine can properly handle the DMA
108 * addressing required by the device.
110 static int qed_set_coherency_mask(struct qed_dev
*cdev
)
112 struct device
*dev
= &cdev
->pdev
->dev
;
114 if (dma_set_mask(dev
, DMA_BIT_MASK(64)) == 0) {
115 if (dma_set_coherent_mask(dev
, DMA_BIT_MASK(64)) != 0) {
117 "Can't request 64-bit consistent allocations\n");
120 } else if (dma_set_mask(dev
, DMA_BIT_MASK(32)) != 0) {
121 DP_NOTICE(cdev
, "Can't request 64b/32b DMA addresses\n");
128 static void qed_free_pci(struct qed_dev
*cdev
)
130 struct pci_dev
*pdev
= cdev
->pdev
;
132 if (cdev
->doorbells
&& cdev
->db_size
)
133 iounmap(cdev
->doorbells
);
135 iounmap(cdev
->regview
);
136 if (atomic_read(&pdev
->enable_cnt
) == 1)
137 pci_release_regions(pdev
);
139 pci_disable_device(pdev
);
142 #define PCI_REVISION_ID_ERROR_VAL 0xff
144 /* Performs PCI initializations as well as initializing PCI-related parameters
145 * in the device structrue. Returns 0 in case of success.
147 static int qed_init_pci(struct qed_dev
*cdev
, struct pci_dev
*pdev
)
154 rc
= pci_enable_device(pdev
);
156 DP_NOTICE(cdev
, "Cannot enable PCI device\n");
160 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
161 DP_NOTICE(cdev
, "No memory region found in bar #0\n");
166 if (IS_PF(cdev
) && !(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
167 DP_NOTICE(cdev
, "No memory region found in bar #2\n");
172 if (atomic_read(&pdev
->enable_cnt
) == 1) {
173 rc
= pci_request_regions(pdev
, "qed");
176 "Failed to request PCI memory resources\n");
179 pci_set_master(pdev
);
180 pci_save_state(pdev
);
183 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
184 if (rev_id
== PCI_REVISION_ID_ERROR_VAL
) {
186 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
191 if (!pci_is_pcie(pdev
)) {
192 DP_NOTICE(cdev
, "The bus is not PCI Express\n");
197 cdev
->pci_params
.pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
198 if (IS_PF(cdev
) && !cdev
->pci_params
.pm_cap
)
199 DP_NOTICE(cdev
, "Cannot find power management capability\n");
201 rc
= qed_set_coherency_mask(cdev
);
205 cdev
->pci_params
.mem_start
= pci_resource_start(pdev
, 0);
206 cdev
->pci_params
.mem_end
= pci_resource_end(pdev
, 0);
207 cdev
->pci_params
.irq
= pdev
->irq
;
209 cdev
->regview
= pci_ioremap_bar(pdev
, 0);
210 if (!cdev
->regview
) {
211 DP_NOTICE(cdev
, "Cannot map register space, aborting\n");
216 cdev
->db_phys_addr
= pci_resource_start(cdev
->pdev
, 2);
217 cdev
->db_size
= pci_resource_len(cdev
->pdev
, 2);
218 if (!cdev
->db_size
) {
220 DP_NOTICE(cdev
, "No Doorbell bar available\n");
227 cdev
->doorbells
= ioremap_wc(cdev
->db_phys_addr
, cdev
->db_size
);
229 if (!cdev
->doorbells
) {
230 DP_NOTICE(cdev
, "Cannot map doorbell space\n");
237 pci_release_regions(pdev
);
239 pci_disable_device(pdev
);
244 int qed_fill_dev_info(struct qed_dev
*cdev
,
245 struct qed_dev_info
*dev_info
)
247 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
248 struct qed_hw_info
*hw_info
= &p_hwfn
->hw_info
;
249 struct qed_tunnel_info
*tun
= &cdev
->tunnel
;
252 memset(dev_info
, 0, sizeof(struct qed_dev_info
));
254 if (tun
->vxlan
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
255 tun
->vxlan
.b_mode_enabled
)
256 dev_info
->vxlan_enable
= true;
258 if (tun
->l2_gre
.b_mode_enabled
&& tun
->ip_gre
.b_mode_enabled
&&
259 tun
->l2_gre
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
260 tun
->ip_gre
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
)
261 dev_info
->gre_enable
= true;
263 if (tun
->l2_geneve
.b_mode_enabled
&& tun
->ip_geneve
.b_mode_enabled
&&
264 tun
->l2_geneve
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
265 tun
->ip_geneve
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
)
266 dev_info
->geneve_enable
= true;
268 dev_info
->num_hwfns
= cdev
->num_hwfns
;
269 dev_info
->pci_mem_start
= cdev
->pci_params
.mem_start
;
270 dev_info
->pci_mem_end
= cdev
->pci_params
.mem_end
;
271 dev_info
->pci_irq
= cdev
->pci_params
.irq
;
272 dev_info
->rdma_supported
= QED_IS_RDMA_PERSONALITY(p_hwfn
);
273 dev_info
->dev_type
= cdev
->type
;
274 ether_addr_copy(dev_info
->hw_mac
, hw_info
->hw_mac_addr
);
277 dev_info
->fw_major
= FW_MAJOR_VERSION
;
278 dev_info
->fw_minor
= FW_MINOR_VERSION
;
279 dev_info
->fw_rev
= FW_REVISION_VERSION
;
280 dev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
281 dev_info
->b_inter_pf_switch
= test_bit(QED_MF_INTER_PF_SWITCH
,
283 dev_info
->tx_switching
= true;
285 if (hw_info
->b_wol_support
== QED_WOL_SUPPORT_PME
)
286 dev_info
->wol_support
= true;
288 dev_info
->smart_an
= qed_mcp_is_smart_an_supported(p_hwfn
);
290 dev_info
->abs_pf_id
= QED_LEADING_HWFN(cdev
)->abs_pf_id
;
292 qed_vf_get_fw_version(&cdev
->hwfns
[0], &dev_info
->fw_major
,
293 &dev_info
->fw_minor
, &dev_info
->fw_rev
,
298 ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
300 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev
), ptt
,
301 &dev_info
->mfw_rev
, NULL
);
303 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev
), ptt
,
304 &dev_info
->mbi_version
);
306 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev
), ptt
,
307 &dev_info
->flash_size
);
309 qed_ptt_release(QED_LEADING_HWFN(cdev
), ptt
);
312 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev
), NULL
,
313 &dev_info
->mfw_rev
, NULL
);
316 dev_info
->mtu
= hw_info
->mtu
;
321 static void qed_free_cdev(struct qed_dev
*cdev
)
326 static struct qed_dev
*qed_alloc_cdev(struct pci_dev
*pdev
)
328 struct qed_dev
*cdev
;
330 cdev
= kzalloc(sizeof(*cdev
), GFP_KERNEL
);
334 qed_init_struct(cdev
);
339 /* Sets the requested power state */
340 static int qed_set_power_state(struct qed_dev
*cdev
, pci_power_t state
)
345 DP_VERBOSE(cdev
, NETIF_MSG_DRV
, "Omitting Power state change\n");
350 struct qed_dev
*cdev
;
353 enum qed_devlink_param_id
{
354 QED_DEVLINK_PARAM_ID_BASE
= DEVLINK_PARAM_GENERIC_ID_MAX
,
355 QED_DEVLINK_PARAM_ID_IWARP_CMT
,
358 static int qed_dl_param_get(struct devlink
*dl
, u32 id
,
359 struct devlink_param_gset_ctx
*ctx
)
361 struct qed_devlink
*qed_dl
;
362 struct qed_dev
*cdev
;
364 qed_dl
= devlink_priv(dl
);
366 ctx
->val
.vbool
= cdev
->iwarp_cmt
;
371 static int qed_dl_param_set(struct devlink
*dl
, u32 id
,
372 struct devlink_param_gset_ctx
*ctx
)
374 struct qed_devlink
*qed_dl
;
375 struct qed_dev
*cdev
;
377 qed_dl
= devlink_priv(dl
);
379 cdev
->iwarp_cmt
= ctx
->val
.vbool
;
384 static const struct devlink_param qed_devlink_params
[] = {
385 DEVLINK_PARAM_DRIVER(QED_DEVLINK_PARAM_ID_IWARP_CMT
,
386 "iwarp_cmt", DEVLINK_PARAM_TYPE_BOOL
,
387 BIT(DEVLINK_PARAM_CMODE_RUNTIME
),
388 qed_dl_param_get
, qed_dl_param_set
, NULL
),
391 static const struct devlink_ops qed_dl_ops
;
393 static int qed_devlink_register(struct qed_dev
*cdev
)
395 union devlink_param_value value
;
396 struct qed_devlink
*qed_dl
;
400 dl
= devlink_alloc(&qed_dl_ops
, sizeof(*qed_dl
));
404 qed_dl
= devlink_priv(dl
);
409 rc
= devlink_register(dl
, &cdev
->pdev
->dev
);
413 rc
= devlink_params_register(dl
, qed_devlink_params
,
414 ARRAY_SIZE(qed_devlink_params
));
419 devlink_param_driverinit_value_set(dl
,
420 QED_DEVLINK_PARAM_ID_IWARP_CMT
,
423 devlink_params_publish(dl
);
424 cdev
->iwarp_cmt
= false;
429 devlink_unregister(dl
);
438 static void qed_devlink_unregister(struct qed_dev
*cdev
)
443 devlink_params_unregister(cdev
->dl
, qed_devlink_params
,
444 ARRAY_SIZE(qed_devlink_params
));
446 devlink_unregister(cdev
->dl
);
447 devlink_free(cdev
->dl
);
451 static struct qed_dev
*qed_probe(struct pci_dev
*pdev
,
452 struct qed_probe_params
*params
)
454 struct qed_dev
*cdev
;
457 cdev
= qed_alloc_cdev(pdev
);
461 cdev
->drv_type
= DRV_ID_DRV_TYPE_LINUX
;
462 cdev
->protocol
= params
->protocol
;
465 cdev
->b_is_vf
= true;
467 qed_init_dp(cdev
, params
->dp_module
, params
->dp_level
);
469 cdev
->recov_in_prog
= params
->recov_in_prog
;
471 rc
= qed_init_pci(cdev
, pdev
);
473 DP_ERR(cdev
, "init pci failed\n");
476 DP_INFO(cdev
, "PCI init completed successfully\n");
478 rc
= qed_devlink_register(cdev
);
480 DP_INFO(cdev
, "Failed to register devlink.\n");
484 rc
= qed_hw_prepare(cdev
, QED_PCI_DEFAULT
);
486 DP_ERR(cdev
, "hw prepare failed\n");
490 DP_INFO(cdev
, "qed_probe completed successfully\n");
502 static void qed_remove(struct qed_dev
*cdev
)
511 qed_set_power_state(cdev
, PCI_D3hot
);
513 qed_devlink_unregister(cdev
);
518 static void qed_disable_msix(struct qed_dev
*cdev
)
520 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
521 pci_disable_msix(cdev
->pdev
);
522 kfree(cdev
->int_params
.msix_table
);
523 } else if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSI
) {
524 pci_disable_msi(cdev
->pdev
);
527 memset(&cdev
->int_params
.out
, 0, sizeof(struct qed_int_param
));
530 static int qed_enable_msix(struct qed_dev
*cdev
,
531 struct qed_int_params
*int_params
)
535 cnt
= int_params
->in
.num_vectors
;
537 for (i
= 0; i
< cnt
; i
++)
538 int_params
->msix_table
[i
].entry
= i
;
540 rc
= pci_enable_msix_range(cdev
->pdev
, int_params
->msix_table
,
541 int_params
->in
.min_msix_cnt
, cnt
);
542 if (rc
< cnt
&& rc
>= int_params
->in
.min_msix_cnt
&&
543 (rc
% cdev
->num_hwfns
)) {
544 pci_disable_msix(cdev
->pdev
);
546 /* If fastpath is initialized, we need at least one interrupt
547 * per hwfn [and the slow path interrupts]. New requested number
548 * should be a multiple of the number of hwfns.
550 cnt
= (rc
/ cdev
->num_hwfns
) * cdev
->num_hwfns
;
552 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
553 cnt
, int_params
->in
.num_vectors
);
554 rc
= pci_enable_msix_exact(cdev
->pdev
, int_params
->msix_table
,
561 /* MSI-x configuration was achieved */
562 int_params
->out
.int_mode
= QED_INT_MODE_MSIX
;
563 int_params
->out
.num_vectors
= rc
;
567 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
574 /* This function outputs the int mode and the number of enabled msix vector */
575 static int qed_set_int_mode(struct qed_dev
*cdev
, bool force_mode
)
577 struct qed_int_params
*int_params
= &cdev
->int_params
;
578 struct msix_entry
*tbl
;
581 switch (int_params
->in
.int_mode
) {
582 case QED_INT_MODE_MSIX
:
583 /* Allocate MSIX table */
584 cnt
= int_params
->in
.num_vectors
;
585 int_params
->msix_table
= kcalloc(cnt
, sizeof(*tbl
), GFP_KERNEL
);
586 if (!int_params
->msix_table
) {
592 rc
= qed_enable_msix(cdev
, int_params
);
596 DP_NOTICE(cdev
, "Failed to enable MSI-X\n");
597 kfree(int_params
->msix_table
);
602 case QED_INT_MODE_MSI
:
603 if (cdev
->num_hwfns
== 1) {
604 rc
= pci_enable_msi(cdev
->pdev
);
606 int_params
->out
.int_mode
= QED_INT_MODE_MSI
;
610 DP_NOTICE(cdev
, "Failed to enable MSI\n");
616 case QED_INT_MODE_INTA
:
617 int_params
->out
.int_mode
= QED_INT_MODE_INTA
;
621 DP_NOTICE(cdev
, "Unknown int_mode value %d\n",
622 int_params
->in
.int_mode
);
628 DP_INFO(cdev
, "Using %s interrupts\n",
629 int_params
->out
.int_mode
== QED_INT_MODE_INTA
?
630 "INTa" : int_params
->out
.int_mode
== QED_INT_MODE_MSI
?
632 cdev
->int_coalescing_mode
= QED_COAL_MODE_ENABLE
;
637 static void qed_simd_handler_config(struct qed_dev
*cdev
, void *token
,
638 int index
, void(*handler
)(void *))
640 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
641 int relative_idx
= index
/ cdev
->num_hwfns
;
643 hwfn
->simd_proto_handler
[relative_idx
].func
= handler
;
644 hwfn
->simd_proto_handler
[relative_idx
].token
= token
;
647 static void qed_simd_handler_clean(struct qed_dev
*cdev
, int index
)
649 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
650 int relative_idx
= index
/ cdev
->num_hwfns
;
652 memset(&hwfn
->simd_proto_handler
[relative_idx
], 0,
653 sizeof(struct qed_simd_fp_handler
));
656 static irqreturn_t
qed_msix_sp_int(int irq
, void *tasklet
)
658 tasklet_schedule((struct tasklet_struct
*)tasklet
);
662 static irqreturn_t
qed_single_int(int irq
, void *dev_instance
)
664 struct qed_dev
*cdev
= (struct qed_dev
*)dev_instance
;
665 struct qed_hwfn
*hwfn
;
666 irqreturn_t rc
= IRQ_NONE
;
670 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
671 status
= qed_int_igu_read_sisr_reg(&cdev
->hwfns
[i
]);
676 hwfn
= &cdev
->hwfns
[i
];
678 /* Slowpath interrupt */
679 if (unlikely(status
& 0x1)) {
680 tasklet_schedule(hwfn
->sp_dpc
);
685 /* Fastpath interrupts */
686 for (j
= 0; j
< 64; j
++) {
687 if ((0x2ULL
<< j
) & status
) {
688 struct qed_simd_fp_handler
*p_handler
=
689 &hwfn
->simd_proto_handler
[j
];
692 p_handler
->func(p_handler
->token
);
695 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
698 status
&= ~(0x2ULL
<< j
);
703 if (unlikely(status
))
704 DP_VERBOSE(hwfn
, NETIF_MSG_INTR
,
705 "got an unknown interrupt status 0x%llx\n",
712 int qed_slowpath_irq_req(struct qed_hwfn
*hwfn
)
714 struct qed_dev
*cdev
= hwfn
->cdev
;
719 int_mode
= cdev
->int_params
.out
.int_mode
;
720 if (int_mode
== QED_INT_MODE_MSIX
) {
722 snprintf(hwfn
->name
, NAME_SIZE
, "sp-%d-%02x:%02x.%02x",
723 id
, cdev
->pdev
->bus
->number
,
724 PCI_SLOT(cdev
->pdev
->devfn
), hwfn
->abs_pf_id
);
725 rc
= request_irq(cdev
->int_params
.msix_table
[id
].vector
,
726 qed_msix_sp_int
, 0, hwfn
->name
, hwfn
->sp_dpc
);
728 unsigned long flags
= 0;
730 snprintf(cdev
->name
, NAME_SIZE
, "%02x:%02x.%02x",
731 cdev
->pdev
->bus
->number
, PCI_SLOT(cdev
->pdev
->devfn
),
732 PCI_FUNC(cdev
->pdev
->devfn
));
734 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_INTA
)
735 flags
|= IRQF_SHARED
;
737 rc
= request_irq(cdev
->pdev
->irq
, qed_single_int
,
738 flags
, cdev
->name
, cdev
);
742 DP_NOTICE(cdev
, "request_irq failed, rc = %d\n", rc
);
744 DP_VERBOSE(hwfn
, (NETIF_MSG_INTR
| QED_MSG_SP
),
745 "Requested slowpath %s\n",
746 (int_mode
== QED_INT_MODE_MSIX
) ? "MSI-X" : "IRQ");
751 static void qed_slowpath_tasklet_flush(struct qed_hwfn
*p_hwfn
)
753 /* Calling the disable function will make sure that any
754 * currently-running function is completed. The following call to the
755 * enable function makes this sequence a flush-like operation.
757 if (p_hwfn
->b_sp_dpc_enabled
) {
758 tasklet_disable(p_hwfn
->sp_dpc
);
759 tasklet_enable(p_hwfn
->sp_dpc
);
763 void qed_slowpath_irq_sync(struct qed_hwfn
*p_hwfn
)
765 struct qed_dev
*cdev
= p_hwfn
->cdev
;
766 u8 id
= p_hwfn
->my_id
;
769 int_mode
= cdev
->int_params
.out
.int_mode
;
770 if (int_mode
== QED_INT_MODE_MSIX
)
771 synchronize_irq(cdev
->int_params
.msix_table
[id
].vector
);
773 synchronize_irq(cdev
->pdev
->irq
);
775 qed_slowpath_tasklet_flush(p_hwfn
);
778 static void qed_slowpath_irq_free(struct qed_dev
*cdev
)
782 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
783 for_each_hwfn(cdev
, i
) {
784 if (!cdev
->hwfns
[i
].b_int_requested
)
786 synchronize_irq(cdev
->int_params
.msix_table
[i
].vector
);
787 free_irq(cdev
->int_params
.msix_table
[i
].vector
,
788 cdev
->hwfns
[i
].sp_dpc
);
791 if (QED_LEADING_HWFN(cdev
)->b_int_requested
)
792 free_irq(cdev
->pdev
->irq
, cdev
);
794 qed_int_disable_post_isr_release(cdev
);
797 static int qed_nic_stop(struct qed_dev
*cdev
)
801 rc
= qed_hw_stop(cdev
);
803 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
804 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
806 if (p_hwfn
->b_sp_dpc_enabled
) {
807 tasklet_disable(p_hwfn
->sp_dpc
);
808 p_hwfn
->b_sp_dpc_enabled
= false;
809 DP_VERBOSE(cdev
, NETIF_MSG_IFDOWN
,
810 "Disabled sp tasklet [hwfn %d] at %p\n",
815 qed_dbg_pf_exit(cdev
);
820 static int qed_nic_setup(struct qed_dev
*cdev
)
824 /* Determine if interface is going to require LL2 */
825 if (QED_LEADING_HWFN(cdev
)->hw_info
.personality
!= QED_PCI_ETH
) {
826 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
827 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
829 p_hwfn
->using_ll2
= true;
833 rc
= qed_resc_alloc(cdev
);
837 DP_INFO(cdev
, "Allocated qed resources\n");
839 qed_resc_setup(cdev
);
844 static int qed_set_int_fp(struct qed_dev
*cdev
, u16 cnt
)
848 /* Mark the fastpath as free/used */
849 cdev
->int_params
.fp_initialized
= cnt
? true : false;
851 if (cdev
->int_params
.out
.int_mode
!= QED_INT_MODE_MSIX
)
852 limit
= cdev
->num_hwfns
* 63;
853 else if (cdev
->int_params
.fp_msix_cnt
)
854 limit
= cdev
->int_params
.fp_msix_cnt
;
859 return min_t(int, cnt
, limit
);
862 static int qed_get_int_fp(struct qed_dev
*cdev
, struct qed_int_info
*info
)
864 memset(info
, 0, sizeof(struct qed_int_info
));
866 if (!cdev
->int_params
.fp_initialized
) {
868 "Protocol driver requested interrupt information, but its support is not yet configured\n");
872 /* Need to expose only MSI-X information; Single IRQ is handled solely
875 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
876 int msix_base
= cdev
->int_params
.fp_msix_base
;
878 info
->msix_cnt
= cdev
->int_params
.fp_msix_cnt
;
879 info
->msix
= &cdev
->int_params
.msix_table
[msix_base
];
885 static int qed_slowpath_setup_int(struct qed_dev
*cdev
,
886 enum qed_int_mode int_mode
)
888 struct qed_sb_cnt_info sb_cnt_info
;
889 int num_l2_queues
= 0;
893 if ((int_mode
== QED_INT_MODE_MSI
) && (cdev
->num_hwfns
> 1)) {
894 DP_NOTICE(cdev
, "MSI mode is not supported for CMT devices\n");
898 memset(&cdev
->int_params
, 0, sizeof(struct qed_int_params
));
899 cdev
->int_params
.in
.int_mode
= int_mode
;
900 for_each_hwfn(cdev
, i
) {
901 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
902 qed_int_get_num_sbs(&cdev
->hwfns
[i
], &sb_cnt_info
);
903 cdev
->int_params
.in
.num_vectors
+= sb_cnt_info
.cnt
;
904 cdev
->int_params
.in
.num_vectors
++; /* slowpath */
907 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
908 cdev
->int_params
.in
.min_msix_cnt
= cdev
->num_hwfns
* 2;
910 if (is_kdump_kernel()) {
912 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
913 cdev
->int_params
.in
.min_msix_cnt
);
914 cdev
->int_params
.in
.num_vectors
=
915 cdev
->int_params
.in
.min_msix_cnt
;
918 rc
= qed_set_int_mode(cdev
, false);
920 DP_ERR(cdev
, "qed_slowpath_setup_int ERR\n");
924 cdev
->int_params
.fp_msix_base
= cdev
->num_hwfns
;
925 cdev
->int_params
.fp_msix_cnt
= cdev
->int_params
.out
.num_vectors
-
928 if (!IS_ENABLED(CONFIG_QED_RDMA
) ||
929 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev
)))
932 for_each_hwfn(cdev
, i
)
933 num_l2_queues
+= FEAT_NUM(&cdev
->hwfns
[i
], QED_PF_L2_QUE
);
935 DP_VERBOSE(cdev
, QED_MSG_RDMA
,
936 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
937 cdev
->int_params
.fp_msix_cnt
, num_l2_queues
);
939 if (cdev
->int_params
.fp_msix_cnt
> num_l2_queues
) {
940 cdev
->int_params
.rdma_msix_cnt
=
941 (cdev
->int_params
.fp_msix_cnt
- num_l2_queues
)
943 cdev
->int_params
.rdma_msix_base
=
944 cdev
->int_params
.fp_msix_base
+ num_l2_queues
;
945 cdev
->int_params
.fp_msix_cnt
= num_l2_queues
;
947 cdev
->int_params
.rdma_msix_cnt
= 0;
950 DP_VERBOSE(cdev
, QED_MSG_RDMA
, "roce_msix_cnt=%d roce_msix_base=%d\n",
951 cdev
->int_params
.rdma_msix_cnt
,
952 cdev
->int_params
.rdma_msix_base
);
957 static int qed_slowpath_vf_setup_int(struct qed_dev
*cdev
)
961 memset(&cdev
->int_params
, 0, sizeof(struct qed_int_params
));
962 cdev
->int_params
.in
.int_mode
= QED_INT_MODE_MSIX
;
964 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev
),
965 &cdev
->int_params
.in
.num_vectors
);
966 if (cdev
->num_hwfns
> 1) {
969 qed_vf_get_num_rxqs(&cdev
->hwfns
[1], &vectors
);
970 cdev
->int_params
.in
.num_vectors
+= vectors
;
973 /* We want a minimum of one fastpath vector per vf hwfn */
974 cdev
->int_params
.in
.min_msix_cnt
= cdev
->num_hwfns
;
976 rc
= qed_set_int_mode(cdev
, true);
980 cdev
->int_params
.fp_msix_base
= 0;
981 cdev
->int_params
.fp_msix_cnt
= cdev
->int_params
.out
.num_vectors
;
986 u32
qed_unzip_data(struct qed_hwfn
*p_hwfn
, u32 input_len
,
987 u8
*input_buf
, u32 max_size
, u8
*unzip_buf
)
991 p_hwfn
->stream
->next_in
= input_buf
;
992 p_hwfn
->stream
->avail_in
= input_len
;
993 p_hwfn
->stream
->next_out
= unzip_buf
;
994 p_hwfn
->stream
->avail_out
= max_size
;
996 rc
= zlib_inflateInit2(p_hwfn
->stream
, MAX_WBITS
);
999 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "zlib init failed, rc = %d\n",
1004 rc
= zlib_inflate(p_hwfn
->stream
, Z_FINISH
);
1005 zlib_inflateEnd(p_hwfn
->stream
);
1007 if (rc
!= Z_OK
&& rc
!= Z_STREAM_END
) {
1008 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "FW unzip error: %s, rc=%d\n",
1009 p_hwfn
->stream
->msg
, rc
);
1013 return p_hwfn
->stream
->total_out
/ 4;
1016 static int qed_alloc_stream_mem(struct qed_dev
*cdev
)
1021 for_each_hwfn(cdev
, i
) {
1022 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1024 p_hwfn
->stream
= kzalloc(sizeof(*p_hwfn
->stream
), GFP_KERNEL
);
1025 if (!p_hwfn
->stream
)
1028 workspace
= vzalloc(zlib_inflate_workspacesize());
1031 p_hwfn
->stream
->workspace
= workspace
;
1037 static void qed_free_stream_mem(struct qed_dev
*cdev
)
1041 for_each_hwfn(cdev
, i
) {
1042 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1044 if (!p_hwfn
->stream
)
1047 vfree(p_hwfn
->stream
->workspace
);
1048 kfree(p_hwfn
->stream
);
1052 static void qed_update_pf_params(struct qed_dev
*cdev
,
1053 struct qed_pf_params
*params
)
1057 if (IS_ENABLED(CONFIG_QED_RDMA
)) {
1058 params
->rdma_pf_params
.num_qps
= QED_ROCE_QPS
;
1059 params
->rdma_pf_params
.min_dpis
= QED_ROCE_DPIS
;
1060 params
->rdma_pf_params
.num_srqs
= QED_RDMA_SRQS
;
1061 /* divide by 3 the MRs to avoid MF ILT overflow */
1062 params
->rdma_pf_params
.gl_pi
= QED_ROCE_PROTOCOL_INDEX
;
1065 if (cdev
->num_hwfns
> 1 || IS_VF(cdev
))
1066 params
->eth_pf_params
.num_arfs_filters
= 0;
1068 /* In case we might support RDMA, don't allow qede to be greedy
1069 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
1072 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev
))) {
1075 num_cons
= ¶ms
->eth_pf_params
.num_cons
;
1076 *num_cons
= min_t(u16
, *num_cons
, QED_MAX_L2_CONS
);
1079 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
1080 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
1082 p_hwfn
->pf_params
= *params
;
1086 #define QED_PERIODIC_DB_REC_COUNT 10
1087 #define QED_PERIODIC_DB_REC_INTERVAL_MS 100
1088 #define QED_PERIODIC_DB_REC_INTERVAL \
1089 msecs_to_jiffies(QED_PERIODIC_DB_REC_INTERVAL_MS)
1090 #define QED_PERIODIC_DB_REC_WAIT_COUNT 10
1091 #define QED_PERIODIC_DB_REC_WAIT_INTERVAL \
1092 (QED_PERIODIC_DB_REC_INTERVAL_MS / QED_PERIODIC_DB_REC_WAIT_COUNT)
1094 static int qed_slowpath_delayed_work(struct qed_hwfn
*hwfn
,
1095 enum qed_slowpath_wq_flag wq_flag
,
1096 unsigned long delay
)
1098 if (!hwfn
->slowpath_wq_active
)
1101 /* Memory barrier for setting atomic bit */
1102 smp_mb__before_atomic();
1103 set_bit(wq_flag
, &hwfn
->slowpath_task_flags
);
1104 smp_mb__after_atomic();
1105 queue_delayed_work(hwfn
->slowpath_wq
, &hwfn
->slowpath_task
, delay
);
1110 void qed_periodic_db_rec_start(struct qed_hwfn
*p_hwfn
)
1112 /* Reset periodic Doorbell Recovery counter */
1113 p_hwfn
->periodic_db_rec_count
= QED_PERIODIC_DB_REC_COUNT
;
1115 /* Don't schedule periodic Doorbell Recovery if already scheduled */
1116 if (test_bit(QED_SLOWPATH_PERIODIC_DB_REC
,
1117 &p_hwfn
->slowpath_task_flags
))
1120 qed_slowpath_delayed_work(p_hwfn
, QED_SLOWPATH_PERIODIC_DB_REC
,
1121 QED_PERIODIC_DB_REC_INTERVAL
);
1124 static void qed_slowpath_wq_stop(struct qed_dev
*cdev
)
1126 int i
, sleep_count
= QED_PERIODIC_DB_REC_WAIT_COUNT
;
1131 for_each_hwfn(cdev
, i
) {
1132 if (!cdev
->hwfns
[i
].slowpath_wq
)
1135 /* Stop queuing new delayed works */
1136 cdev
->hwfns
[i
].slowpath_wq_active
= false;
1138 /* Wait until the last periodic doorbell recovery is executed */
1139 while (test_bit(QED_SLOWPATH_PERIODIC_DB_REC
,
1140 &cdev
->hwfns
[i
].slowpath_task_flags
) &&
1142 msleep(QED_PERIODIC_DB_REC_WAIT_INTERVAL
);
1144 flush_workqueue(cdev
->hwfns
[i
].slowpath_wq
);
1145 destroy_workqueue(cdev
->hwfns
[i
].slowpath_wq
);
1149 static void qed_slowpath_task(struct work_struct
*work
)
1151 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
1152 slowpath_task
.work
);
1153 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
1156 if (hwfn
->slowpath_wq_active
)
1157 queue_delayed_work(hwfn
->slowpath_wq
,
1158 &hwfn
->slowpath_task
, 0);
1163 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ
,
1164 &hwfn
->slowpath_task_flags
))
1165 qed_mfw_process_tlv_req(hwfn
, ptt
);
1167 if (test_and_clear_bit(QED_SLOWPATH_PERIODIC_DB_REC
,
1168 &hwfn
->slowpath_task_flags
)) {
1169 qed_db_rec_handler(hwfn
, ptt
);
1170 if (hwfn
->periodic_db_rec_count
--)
1171 qed_slowpath_delayed_work(hwfn
,
1172 QED_SLOWPATH_PERIODIC_DB_REC
,
1173 QED_PERIODIC_DB_REC_INTERVAL
);
1176 qed_ptt_release(hwfn
, ptt
);
1179 static int qed_slowpath_wq_start(struct qed_dev
*cdev
)
1181 struct qed_hwfn
*hwfn
;
1182 char name
[NAME_SIZE
];
1188 for_each_hwfn(cdev
, i
) {
1189 hwfn
= &cdev
->hwfns
[i
];
1191 snprintf(name
, NAME_SIZE
, "slowpath-%02x:%02x.%02x",
1192 cdev
->pdev
->bus
->number
,
1193 PCI_SLOT(cdev
->pdev
->devfn
), hwfn
->abs_pf_id
);
1195 hwfn
->slowpath_wq
= alloc_workqueue(name
, 0, 0);
1196 if (!hwfn
->slowpath_wq
) {
1197 DP_NOTICE(hwfn
, "Cannot create slowpath workqueue\n");
1201 INIT_DELAYED_WORK(&hwfn
->slowpath_task
, qed_slowpath_task
);
1202 hwfn
->slowpath_wq_active
= true;
1208 static int qed_slowpath_start(struct qed_dev
*cdev
,
1209 struct qed_slowpath_params
*params
)
1211 struct qed_drv_load_params drv_load_params
;
1212 struct qed_hw_init_params hw_init_params
;
1213 struct qed_mcp_drv_version drv_version
;
1214 struct qed_tunnel_info tunn_info
;
1215 const u8
*data
= NULL
;
1216 struct qed_hwfn
*hwfn
;
1217 struct qed_ptt
*p_ptt
;
1220 if (qed_iov_wq_start(cdev
))
1223 if (qed_slowpath_wq_start(cdev
))
1227 rc
= request_firmware(&cdev
->firmware
, QED_FW_FILE_NAME
,
1231 "Failed to find fw file - /lib/firmware/%s\n",
1236 if (cdev
->num_hwfns
== 1) {
1237 p_ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
1239 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
= p_ptt
;
1242 "Failed to acquire PTT for aRFS\n");
1248 cdev
->rx_coalesce_usecs
= QED_DEFAULT_RX_USECS
;
1249 rc
= qed_nic_setup(cdev
);
1254 rc
= qed_slowpath_setup_int(cdev
, params
->int_mode
);
1256 rc
= qed_slowpath_vf_setup_int(cdev
);
1261 /* Allocate stream for unzipping */
1262 rc
= qed_alloc_stream_mem(cdev
);
1266 /* First Dword used to differentiate between various sources */
1267 data
= cdev
->firmware
->data
+ sizeof(u32
);
1269 qed_dbg_pf_init(cdev
);
1272 /* Start the slowpath */
1273 memset(&hw_init_params
, 0, sizeof(hw_init_params
));
1274 memset(&tunn_info
, 0, sizeof(tunn_info
));
1275 tunn_info
.vxlan
.b_mode_enabled
= true;
1276 tunn_info
.l2_gre
.b_mode_enabled
= true;
1277 tunn_info
.ip_gre
.b_mode_enabled
= true;
1278 tunn_info
.l2_geneve
.b_mode_enabled
= true;
1279 tunn_info
.ip_geneve
.b_mode_enabled
= true;
1280 tunn_info
.vxlan
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1281 tunn_info
.l2_gre
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1282 tunn_info
.ip_gre
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1283 tunn_info
.l2_geneve
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1284 tunn_info
.ip_geneve
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1285 hw_init_params
.p_tunn
= &tunn_info
;
1286 hw_init_params
.b_hw_start
= true;
1287 hw_init_params
.int_mode
= cdev
->int_params
.out
.int_mode
;
1288 hw_init_params
.allow_npar_tx_switch
= true;
1289 hw_init_params
.bin_fw_data
= data
;
1291 memset(&drv_load_params
, 0, sizeof(drv_load_params
));
1292 drv_load_params
.is_crash_kernel
= is_kdump_kernel();
1293 drv_load_params
.mfw_timeout_val
= QED_LOAD_REQ_LOCK_TO_DEFAULT
;
1294 drv_load_params
.avoid_eng_reset
= false;
1295 drv_load_params
.override_force_load
= QED_OVERRIDE_FORCE_LOAD_NONE
;
1296 hw_init_params
.p_drv_load_params
= &drv_load_params
;
1298 rc
= qed_hw_init(cdev
, &hw_init_params
);
1303 "HW initialization and function start completed successfully\n");
1306 cdev
->tunn_feature_mask
= (BIT(QED_MODE_VXLAN_TUNN
) |
1307 BIT(QED_MODE_L2GENEVE_TUNN
) |
1308 BIT(QED_MODE_IPGENEVE_TUNN
) |
1309 BIT(QED_MODE_L2GRE_TUNN
) |
1310 BIT(QED_MODE_IPGRE_TUNN
));
1313 /* Allocate LL2 interface if needed */
1314 if (QED_LEADING_HWFN(cdev
)->using_ll2
) {
1315 rc
= qed_ll2_alloc_if(cdev
);
1320 hwfn
= QED_LEADING_HWFN(cdev
);
1321 drv_version
.version
= (params
->drv_major
<< 24) |
1322 (params
->drv_minor
<< 16) |
1323 (params
->drv_rev
<< 8) |
1325 strlcpy(drv_version
.name
, params
->name
,
1326 MCP_DRV_VER_STR_SIZE
- 4);
1327 rc
= qed_mcp_send_drv_version(hwfn
, hwfn
->p_main_ptt
,
1330 DP_NOTICE(cdev
, "Failed sending drv version command\n");
1335 qed_reset_vport_stats(cdev
);
1340 qed_ll2_dealloc_if(cdev
);
1344 qed_hw_timers_stop_all(cdev
);
1346 qed_slowpath_irq_free(cdev
);
1347 qed_free_stream_mem(cdev
);
1348 qed_disable_msix(cdev
);
1350 qed_resc_free(cdev
);
1353 release_firmware(cdev
->firmware
);
1355 if (IS_PF(cdev
) && (cdev
->num_hwfns
== 1) &&
1356 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
)
1357 qed_ptt_release(QED_LEADING_HWFN(cdev
),
1358 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
);
1360 qed_iov_wq_stop(cdev
, false);
1362 qed_slowpath_wq_stop(cdev
);
1367 static int qed_slowpath_stop(struct qed_dev
*cdev
)
1372 qed_slowpath_wq_stop(cdev
);
1374 qed_ll2_dealloc_if(cdev
);
1377 if (cdev
->num_hwfns
== 1)
1378 qed_ptt_release(QED_LEADING_HWFN(cdev
),
1379 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
);
1380 qed_free_stream_mem(cdev
);
1381 if (IS_QED_ETH_IF(cdev
))
1382 qed_sriov_disable(cdev
, true);
1388 qed_slowpath_irq_free(cdev
);
1390 qed_disable_msix(cdev
);
1392 qed_resc_free(cdev
);
1394 qed_iov_wq_stop(cdev
, true);
1397 release_firmware(cdev
->firmware
);
1402 static void qed_set_name(struct qed_dev
*cdev
, char name
[NAME_SIZE
])
1406 memcpy(cdev
->name
, name
, NAME_SIZE
);
1407 for_each_hwfn(cdev
, i
)
1408 snprintf(cdev
->hwfns
[i
].name
, NAME_SIZE
, "%s-%d", name
, i
);
1411 static u32
qed_sb_init(struct qed_dev
*cdev
,
1412 struct qed_sb_info
*sb_info
,
1414 dma_addr_t sb_phy_addr
, u16 sb_id
,
1415 enum qed_sb_type type
)
1417 struct qed_hwfn
*p_hwfn
;
1418 struct qed_ptt
*p_ptt
;
1422 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1423 if (type
== QED_SB_TYPE_L2_QUEUE
) {
1424 p_hwfn
= &cdev
->hwfns
[sb_id
% cdev
->num_hwfns
];
1425 rel_sb_id
= sb_id
/ cdev
->num_hwfns
;
1427 p_hwfn
= QED_AFFIN_HWFN(cdev
);
1431 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
1432 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1433 IS_LEAD_HWFN(p_hwfn
) ? 0 : 1, rel_sb_id
, sb_id
);
1435 if (IS_PF(p_hwfn
->cdev
)) {
1436 p_ptt
= qed_ptt_acquire(p_hwfn
);
1440 rc
= qed_int_sb_init(p_hwfn
, p_ptt
, sb_info
, sb_virt_addr
,
1441 sb_phy_addr
, rel_sb_id
);
1442 qed_ptt_release(p_hwfn
, p_ptt
);
1444 rc
= qed_int_sb_init(p_hwfn
, NULL
, sb_info
, sb_virt_addr
,
1445 sb_phy_addr
, rel_sb_id
);
1451 static u32
qed_sb_release(struct qed_dev
*cdev
,
1452 struct qed_sb_info
*sb_info
,
1454 enum qed_sb_type type
)
1456 struct qed_hwfn
*p_hwfn
;
1460 /* RoCE/Storage use a single engine in CMT mode while L2 uses both */
1461 if (type
== QED_SB_TYPE_L2_QUEUE
) {
1462 p_hwfn
= &cdev
->hwfns
[sb_id
% cdev
->num_hwfns
];
1463 rel_sb_id
= sb_id
/ cdev
->num_hwfns
;
1465 p_hwfn
= QED_AFFIN_HWFN(cdev
);
1469 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
1470 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1471 IS_LEAD_HWFN(p_hwfn
) ? 0 : 1, rel_sb_id
, sb_id
);
1473 rc
= qed_int_sb_release(p_hwfn
, sb_info
, rel_sb_id
);
1478 static bool qed_can_link_change(struct qed_dev
*cdev
)
1483 static int qed_set_link(struct qed_dev
*cdev
, struct qed_link_params
*params
)
1485 struct qed_hwfn
*hwfn
;
1486 struct qed_mcp_link_params
*link_params
;
1487 struct qed_ptt
*ptt
;
1494 /* The link should be set only once per PF */
1495 hwfn
= &cdev
->hwfns
[0];
1497 /* When VF wants to set link, force it to read the bulletin instead.
1498 * This mimics the PF behavior, where a noitification [both immediate
1499 * and possible later] would be generated when changing properties.
1502 qed_schedule_iov(hwfn
, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG
);
1506 ptt
= qed_ptt_acquire(hwfn
);
1510 link_params
= qed_mcp_get_link_params(hwfn
);
1511 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_AUTONEG
)
1512 link_params
->speed
.autoneg
= params
->autoneg
;
1513 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS
) {
1514 link_params
->speed
.advertised_speeds
= 0;
1515 sup_caps
= QED_LM_1000baseT_Full_BIT
|
1516 QED_LM_1000baseKX_Full_BIT
|
1517 QED_LM_1000baseX_Full_BIT
;
1518 if (params
->adv_speeds
& sup_caps
)
1519 link_params
->speed
.advertised_speeds
|=
1520 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
1521 sup_caps
= QED_LM_10000baseT_Full_BIT
|
1522 QED_LM_10000baseKR_Full_BIT
|
1523 QED_LM_10000baseKX4_Full_BIT
|
1524 QED_LM_10000baseR_FEC_BIT
|
1525 QED_LM_10000baseCR_Full_BIT
|
1526 QED_LM_10000baseSR_Full_BIT
|
1527 QED_LM_10000baseLR_Full_BIT
|
1528 QED_LM_10000baseLRM_Full_BIT
;
1529 if (params
->adv_speeds
& sup_caps
)
1530 link_params
->speed
.advertised_speeds
|=
1531 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
1532 if (params
->adv_speeds
& QED_LM_20000baseKR2_Full_BIT
)
1533 link_params
->speed
.advertised_speeds
|=
1534 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
;
1535 sup_caps
= QED_LM_25000baseKR_Full_BIT
|
1536 QED_LM_25000baseCR_Full_BIT
|
1537 QED_LM_25000baseSR_Full_BIT
;
1538 if (params
->adv_speeds
& sup_caps
)
1539 link_params
->speed
.advertised_speeds
|=
1540 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
;
1541 sup_caps
= QED_LM_40000baseLR4_Full_BIT
|
1542 QED_LM_40000baseKR4_Full_BIT
|
1543 QED_LM_40000baseCR4_Full_BIT
|
1544 QED_LM_40000baseSR4_Full_BIT
;
1545 if (params
->adv_speeds
& sup_caps
)
1546 link_params
->speed
.advertised_speeds
|=
1547 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
;
1548 sup_caps
= QED_LM_50000baseKR2_Full_BIT
|
1549 QED_LM_50000baseCR2_Full_BIT
|
1550 QED_LM_50000baseSR2_Full_BIT
;
1551 if (params
->adv_speeds
& sup_caps
)
1552 link_params
->speed
.advertised_speeds
|=
1553 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
;
1554 sup_caps
= QED_LM_100000baseKR4_Full_BIT
|
1555 QED_LM_100000baseSR4_Full_BIT
|
1556 QED_LM_100000baseCR4_Full_BIT
|
1557 QED_LM_100000baseLR4_ER4_Full_BIT
;
1558 if (params
->adv_speeds
& sup_caps
)
1559 link_params
->speed
.advertised_speeds
|=
1560 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
;
1562 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_FORCED_SPEED
)
1563 link_params
->speed
.forced_speed
= params
->forced_speed
;
1564 if (params
->override_flags
& QED_LINK_OVERRIDE_PAUSE_CONFIG
) {
1565 if (params
->pause_config
& QED_LINK_PAUSE_AUTONEG_ENABLE
)
1566 link_params
->pause
.autoneg
= true;
1568 link_params
->pause
.autoneg
= false;
1569 if (params
->pause_config
& QED_LINK_PAUSE_RX_ENABLE
)
1570 link_params
->pause
.forced_rx
= true;
1572 link_params
->pause
.forced_rx
= false;
1573 if (params
->pause_config
& QED_LINK_PAUSE_TX_ENABLE
)
1574 link_params
->pause
.forced_tx
= true;
1576 link_params
->pause
.forced_tx
= false;
1578 if (params
->override_flags
& QED_LINK_OVERRIDE_LOOPBACK_MODE
) {
1579 switch (params
->loopback_mode
) {
1580 case QED_LINK_LOOPBACK_INT_PHY
:
1581 link_params
->loopback_mode
= ETH_LOOPBACK_INT_PHY
;
1583 case QED_LINK_LOOPBACK_EXT_PHY
:
1584 link_params
->loopback_mode
= ETH_LOOPBACK_EXT_PHY
;
1586 case QED_LINK_LOOPBACK_EXT
:
1587 link_params
->loopback_mode
= ETH_LOOPBACK_EXT
;
1589 case QED_LINK_LOOPBACK_MAC
:
1590 link_params
->loopback_mode
= ETH_LOOPBACK_MAC
;
1593 link_params
->loopback_mode
= ETH_LOOPBACK_NONE
;
1598 if (params
->override_flags
& QED_LINK_OVERRIDE_EEE_CONFIG
)
1599 memcpy(&link_params
->eee
, ¶ms
->eee
,
1600 sizeof(link_params
->eee
));
1602 rc
= qed_mcp_set_link(hwfn
, ptt
, params
->link_up
);
1604 qed_ptt_release(hwfn
, ptt
);
1609 static int qed_get_port_type(u32 media_type
)
1613 switch (media_type
) {
1614 case MEDIA_SFPP_10G_FIBER
:
1615 case MEDIA_SFP_1G_FIBER
:
1616 case MEDIA_XFP_FIBER
:
1617 case MEDIA_MODULE_FIBER
:
1619 port_type
= PORT_FIBRE
;
1621 case MEDIA_DA_TWINAX
:
1622 port_type
= PORT_DA
;
1625 port_type
= PORT_TP
;
1627 case MEDIA_NOT_PRESENT
:
1628 port_type
= PORT_NONE
;
1630 case MEDIA_UNSPECIFIED
:
1632 port_type
= PORT_OTHER
;
1638 static int qed_get_link_data(struct qed_hwfn
*hwfn
,
1639 struct qed_mcp_link_params
*params
,
1640 struct qed_mcp_link_state
*link
,
1641 struct qed_mcp_link_capabilities
*link_caps
)
1645 if (!IS_PF(hwfn
->cdev
)) {
1646 qed_vf_get_link_params(hwfn
, params
);
1647 qed_vf_get_link_state(hwfn
, link
);
1648 qed_vf_get_link_caps(hwfn
, link_caps
);
1653 p
= qed_mcp_get_link_params(hwfn
);
1656 memcpy(params
, p
, sizeof(*params
));
1658 p
= qed_mcp_get_link_state(hwfn
);
1661 memcpy(link
, p
, sizeof(*link
));
1663 p
= qed_mcp_get_link_capabilities(hwfn
);
1666 memcpy(link_caps
, p
, sizeof(*link_caps
));
1671 static void qed_fill_link_capability(struct qed_hwfn
*hwfn
,
1672 struct qed_ptt
*ptt
, u32 capability
,
1675 u32 media_type
, tcvr_state
, tcvr_type
;
1676 u32 speed_mask
, board_cfg
;
1678 if (qed_mcp_get_media_type(hwfn
, ptt
, &media_type
))
1679 media_type
= MEDIA_UNSPECIFIED
;
1681 if (qed_mcp_get_transceiver_data(hwfn
, ptt
, &tcvr_state
, &tcvr_type
))
1682 tcvr_type
= ETH_TRANSCEIVER_STATE_UNPLUGGED
;
1684 if (qed_mcp_trans_speed_mask(hwfn
, ptt
, &speed_mask
))
1685 speed_mask
= 0xFFFFFFFF;
1687 if (qed_mcp_get_board_config(hwfn
, ptt
, &board_cfg
))
1688 board_cfg
= NVM_CFG1_PORT_PORT_TYPE_UNDEFINED
;
1690 DP_VERBOSE(hwfn
->cdev
, NETIF_MSG_DRV
,
1691 "Media_type = 0x%x tcvr_state = 0x%x tcvr_type = 0x%x speed_mask = 0x%x board_cfg = 0x%x\n",
1692 media_type
, tcvr_state
, tcvr_type
, speed_mask
, board_cfg
);
1694 switch (media_type
) {
1695 case MEDIA_DA_TWINAX
:
1696 *if_capability
|= QED_LM_FIBRE_BIT
;
1697 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
)
1698 *if_capability
|= QED_LM_20000baseKR2_Full_BIT
;
1699 /* For DAC media multiple speed capabilities are supported*/
1700 capability
= capability
& speed_mask
;
1701 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1702 *if_capability
|= QED_LM_1000baseKX_Full_BIT
;
1703 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1704 *if_capability
|= QED_LM_10000baseCR_Full_BIT
;
1705 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
1706 *if_capability
|= QED_LM_40000baseCR4_Full_BIT
;
1707 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
1708 *if_capability
|= QED_LM_25000baseCR_Full_BIT
;
1709 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
1710 *if_capability
|= QED_LM_50000baseCR2_Full_BIT
;
1712 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
1713 *if_capability
|= QED_LM_100000baseCR4_Full_BIT
;
1716 *if_capability
|= QED_LM_TP_BIT
;
1717 if (board_cfg
& NVM_CFG1_PORT_PORT_TYPE_EXT_PHY
) {
1719 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
) {
1720 *if_capability
|= QED_LM_1000baseT_Full_BIT
;
1723 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
) {
1724 *if_capability
|= QED_LM_10000baseT_Full_BIT
;
1727 if (board_cfg
& NVM_CFG1_PORT_PORT_TYPE_MODULE
) {
1728 *if_capability
|= QED_LM_FIBRE_BIT
;
1729 if (tcvr_type
== ETH_TRANSCEIVER_TYPE_1000BASET
)
1730 *if_capability
|= QED_LM_1000baseT_Full_BIT
;
1731 if (tcvr_type
== ETH_TRANSCEIVER_TYPE_10G_BASET
)
1732 *if_capability
|= QED_LM_10000baseT_Full_BIT
;
1735 case MEDIA_SFP_1G_FIBER
:
1736 case MEDIA_SFPP_10G_FIBER
:
1737 case MEDIA_XFP_FIBER
:
1738 case MEDIA_MODULE_FIBER
:
1739 *if_capability
|= QED_LM_FIBRE_BIT
;
1741 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
) {
1742 if ((tcvr_type
== ETH_TRANSCEIVER_TYPE_1G_LX
) ||
1743 (tcvr_type
== ETH_TRANSCEIVER_TYPE_1G_SX
))
1744 *if_capability
|= QED_LM_1000baseKX_Full_BIT
;
1747 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
) {
1748 if (tcvr_type
== ETH_TRANSCEIVER_TYPE_10G_SR
)
1749 *if_capability
|= QED_LM_10000baseSR_Full_BIT
;
1750 if (tcvr_type
== ETH_TRANSCEIVER_TYPE_10G_LR
)
1751 *if_capability
|= QED_LM_10000baseLR_Full_BIT
;
1752 if (tcvr_type
== ETH_TRANSCEIVER_TYPE_10G_LRM
)
1753 *if_capability
|= QED_LM_10000baseLRM_Full_BIT
;
1754 if (tcvr_type
== ETH_TRANSCEIVER_TYPE_10G_ER
)
1755 *if_capability
|= QED_LM_10000baseR_FEC_BIT
;
1757 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
)
1758 *if_capability
|= QED_LM_20000baseKR2_Full_BIT
;
1760 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
) {
1761 if (tcvr_type
== ETH_TRANSCEIVER_TYPE_25G_SR
)
1762 *if_capability
|= QED_LM_25000baseSR_Full_BIT
;
1765 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
) {
1766 if (tcvr_type
== ETH_TRANSCEIVER_TYPE_40G_LR4
)
1767 *if_capability
|= QED_LM_40000baseLR4_Full_BIT
;
1768 if (tcvr_type
== ETH_TRANSCEIVER_TYPE_40G_SR4
)
1769 *if_capability
|= QED_LM_40000baseSR4_Full_BIT
;
1772 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
1773 *if_capability
|= QED_LM_50000baseKR2_Full_BIT
;
1775 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
) {
1776 if (tcvr_type
== ETH_TRANSCEIVER_TYPE_100G_SR4
)
1777 *if_capability
|= QED_LM_100000baseSR4_Full_BIT
;
1782 *if_capability
|= QED_LM_Backplane_BIT
;
1783 if (capability
& NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_20G
)
1784 *if_capability
|= QED_LM_20000baseKR2_Full_BIT
;
1786 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1787 *if_capability
|= QED_LM_1000baseKX_Full_BIT
;
1789 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1790 *if_capability
|= QED_LM_10000baseKR_Full_BIT
;
1792 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
1793 *if_capability
|= QED_LM_25000baseKR_Full_BIT
;
1795 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
1796 *if_capability
|= QED_LM_40000baseKR4_Full_BIT
;
1798 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
1799 *if_capability
|= QED_LM_50000baseKR2_Full_BIT
;
1801 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
1802 *if_capability
|= QED_LM_100000baseKR4_Full_BIT
;
1804 case MEDIA_UNSPECIFIED
:
1805 case MEDIA_NOT_PRESENT
:
1806 DP_VERBOSE(hwfn
->cdev
, QED_MSG_DEBUG
,
1807 "Unknown media and transceiver type;\n");
1812 static void qed_fill_link(struct qed_hwfn
*hwfn
,
1813 struct qed_ptt
*ptt
,
1814 struct qed_link_output
*if_link
)
1816 struct qed_mcp_link_capabilities link_caps
;
1817 struct qed_mcp_link_params params
;
1818 struct qed_mcp_link_state link
;
1821 memset(if_link
, 0, sizeof(*if_link
));
1823 /* Prepare source inputs */
1824 if (qed_get_link_data(hwfn
, ¶ms
, &link
, &link_caps
)) {
1825 dev_warn(&hwfn
->cdev
->pdev
->dev
, "no link data available\n");
1829 /* Set the link parameters to pass to protocol driver */
1831 if_link
->link_up
= true;
1833 /* TODO - at the moment assume supported and advertised speed equal */
1834 if (link_caps
.default_speed_autoneg
)
1835 if_link
->supported_caps
|= QED_LM_Autoneg_BIT
;
1836 if (params
.pause
.autoneg
||
1837 (params
.pause
.forced_rx
&& params
.pause
.forced_tx
))
1838 if_link
->supported_caps
|= QED_LM_Asym_Pause_BIT
;
1839 if (params
.pause
.autoneg
|| params
.pause
.forced_rx
||
1840 params
.pause
.forced_tx
)
1841 if_link
->supported_caps
|= QED_LM_Pause_BIT
;
1843 if_link
->advertised_caps
= if_link
->supported_caps
;
1844 if (params
.speed
.autoneg
)
1845 if_link
->advertised_caps
|= QED_LM_Autoneg_BIT
;
1847 if_link
->advertised_caps
&= ~QED_LM_Autoneg_BIT
;
1849 /* Fill link advertised capability*/
1850 qed_fill_link_capability(hwfn
, ptt
, params
.speed
.advertised_speeds
,
1851 &if_link
->advertised_caps
);
1852 /* Fill link supported capability*/
1853 qed_fill_link_capability(hwfn
, ptt
, link_caps
.speed_capabilities
,
1854 &if_link
->supported_caps
);
1857 if_link
->speed
= link
.speed
;
1859 /* TODO - fill duplex properly */
1860 if_link
->duplex
= DUPLEX_FULL
;
1861 qed_mcp_get_media_type(hwfn
, ptt
, &media_type
);
1862 if_link
->port
= qed_get_port_type(media_type
);
1864 if_link
->autoneg
= params
.speed
.autoneg
;
1866 if (params
.pause
.autoneg
)
1867 if_link
->pause_config
|= QED_LINK_PAUSE_AUTONEG_ENABLE
;
1868 if (params
.pause
.forced_rx
)
1869 if_link
->pause_config
|= QED_LINK_PAUSE_RX_ENABLE
;
1870 if (params
.pause
.forced_tx
)
1871 if_link
->pause_config
|= QED_LINK_PAUSE_TX_ENABLE
;
1873 /* Link partner capabilities */
1874 if (link
.partner_adv_speed
&
1875 QED_LINK_PARTNER_SPEED_1G_FD
)
1876 if_link
->lp_caps
|= QED_LM_1000baseT_Full_BIT
;
1877 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_10G
)
1878 if_link
->lp_caps
|= QED_LM_10000baseKR_Full_BIT
;
1879 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_20G
)
1880 if_link
->lp_caps
|= QED_LM_20000baseKR2_Full_BIT
;
1881 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_25G
)
1882 if_link
->lp_caps
|= QED_LM_25000baseKR_Full_BIT
;
1883 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_40G
)
1884 if_link
->lp_caps
|= QED_LM_40000baseLR4_Full_BIT
;
1885 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_50G
)
1886 if_link
->lp_caps
|= QED_LM_50000baseKR2_Full_BIT
;
1887 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_100G
)
1888 if_link
->lp_caps
|= QED_LM_100000baseKR4_Full_BIT
;
1890 if (link
.an_complete
)
1891 if_link
->lp_caps
|= QED_LM_Autoneg_BIT
;
1893 if (link
.partner_adv_pause
)
1894 if_link
->lp_caps
|= QED_LM_Pause_BIT
;
1895 if (link
.partner_adv_pause
== QED_LINK_PARTNER_ASYMMETRIC_PAUSE
||
1896 link
.partner_adv_pause
== QED_LINK_PARTNER_BOTH_PAUSE
)
1897 if_link
->lp_caps
|= QED_LM_Asym_Pause_BIT
;
1899 if (link_caps
.default_eee
== QED_MCP_EEE_UNSUPPORTED
) {
1900 if_link
->eee_supported
= false;
1902 if_link
->eee_supported
= true;
1903 if_link
->eee_active
= link
.eee_active
;
1904 if_link
->sup_caps
= link_caps
.eee_speed_caps
;
1905 /* MFW clears adv_caps on eee disable; use configured value */
1906 if_link
->eee
.adv_caps
= link
.eee_adv_caps
? link
.eee_adv_caps
:
1907 params
.eee
.adv_caps
;
1908 if_link
->eee
.lp_adv_caps
= link
.eee_lp_adv_caps
;
1909 if_link
->eee
.enable
= params
.eee
.enable
;
1910 if_link
->eee
.tx_lpi_enable
= params
.eee
.tx_lpi_enable
;
1911 if_link
->eee
.tx_lpi_timer
= params
.eee
.tx_lpi_timer
;
1915 static void qed_get_current_link(struct qed_dev
*cdev
,
1916 struct qed_link_output
*if_link
)
1918 struct qed_hwfn
*hwfn
;
1919 struct qed_ptt
*ptt
;
1922 hwfn
= &cdev
->hwfns
[0];
1924 ptt
= qed_ptt_acquire(hwfn
);
1926 qed_fill_link(hwfn
, ptt
, if_link
);
1927 qed_ptt_release(hwfn
, ptt
);
1929 DP_NOTICE(hwfn
, "Failed to fill link; No PTT\n");
1932 qed_fill_link(hwfn
, NULL
, if_link
);
1935 for_each_hwfn(cdev
, i
)
1936 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
1939 void qed_link_update(struct qed_hwfn
*hwfn
, struct qed_ptt
*ptt
)
1941 void *cookie
= hwfn
->cdev
->ops_cookie
;
1942 struct qed_common_cb_ops
*op
= hwfn
->cdev
->protocol_ops
.common
;
1943 struct qed_link_output if_link
;
1945 qed_fill_link(hwfn
, ptt
, &if_link
);
1946 qed_inform_vf_link_state(hwfn
);
1948 if (IS_LEAD_HWFN(hwfn
) && cookie
)
1949 op
->link_update(cookie
, &if_link
);
1952 static int qed_drain(struct qed_dev
*cdev
)
1954 struct qed_hwfn
*hwfn
;
1955 struct qed_ptt
*ptt
;
1961 for_each_hwfn(cdev
, i
) {
1962 hwfn
= &cdev
->hwfns
[i
];
1963 ptt
= qed_ptt_acquire(hwfn
);
1965 DP_NOTICE(hwfn
, "Failed to drain NIG; No PTT\n");
1968 rc
= qed_mcp_drain(hwfn
, ptt
);
1969 qed_ptt_release(hwfn
, ptt
);
1977 static u32
qed_nvm_flash_image_access_crc(struct qed_dev
*cdev
,
1978 struct qed_nvm_image_att
*nvm_image
,
1985 /* Allocate a buffer for holding the nvram image */
1986 buf
= kzalloc(nvm_image
->length
, GFP_KERNEL
);
1990 /* Read image into buffer */
1991 rc
= qed_mcp_nvm_read(cdev
, nvm_image
->start_addr
,
1992 buf
, nvm_image
->length
);
1994 DP_ERR(cdev
, "Failed reading image from nvm\n");
1998 /* Convert the buffer into big-endian format (excluding the
1999 * closing 4 bytes of CRC).
2001 for (j
= 0; j
< nvm_image
->length
- 4; j
+= 4) {
2002 val
= cpu_to_be32(*(u32
*)&buf
[j
]);
2003 *(u32
*)&buf
[j
] = val
;
2006 /* Calc CRC for the "actual" image buffer, i.e. not including
2007 * the last 4 CRC bytes.
2009 *crc
= (~cpu_to_be32(crc32(0xffffffff, buf
, nvm_image
->length
- 4)));
2017 /* Binary file format -
2018 * /----------------------------------------------------------------------\
2019 * 0B | 0x4 [command index] |
2020 * 4B | image_type | Options | Number of register settings |
2024 * \----------------------------------------------------------------------/
2025 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
2026 * Options - 0'b - Calculate & Update CRC for image
2028 static int qed_nvm_flash_image_access(struct qed_dev
*cdev
, const u8
**data
,
2031 struct qed_nvm_image_att nvm_image
;
2032 struct qed_hwfn
*p_hwfn
;
2033 bool is_crc
= false;
2039 image_type
= **data
;
2040 p_hwfn
= QED_LEADING_HWFN(cdev
);
2041 for (i
= 0; i
< p_hwfn
->nvm_info
.num_images
; i
++)
2042 if (image_type
== p_hwfn
->nvm_info
.image_att
[i
].image_type
)
2044 if (i
== p_hwfn
->nvm_info
.num_images
) {
2045 DP_ERR(cdev
, "Failed to find nvram image of type %08x\n",
2050 nvm_image
.start_addr
= p_hwfn
->nvm_info
.image_att
[i
].nvm_start_addr
;
2051 nvm_image
.length
= p_hwfn
->nvm_info
.image_att
[i
].len
;
2053 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2054 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
2055 **data
, image_type
, nvm_image
.start_addr
,
2056 nvm_image
.start_addr
+ nvm_image
.length
- 1);
2058 is_crc
= !!(**data
& BIT(0));
2060 len
= *((u16
*)*data
);
2065 rc
= qed_nvm_flash_image_access_crc(cdev
, &nvm_image
, &crc
);
2067 DP_ERR(cdev
, "Failed calculating CRC, rc = %d\n", rc
);
2071 rc
= qed_mcp_nvm_write(cdev
, QED_NVM_WRITE_NVRAM
,
2072 (nvm_image
.start_addr
+
2073 nvm_image
.length
- 4), (u8
*)&crc
, 4);
2075 DP_ERR(cdev
, "Failed writing to %08x, rc = %d\n",
2076 nvm_image
.start_addr
+ nvm_image
.length
- 4, rc
);
2080 /* Iterate over the values for setting */
2082 u32 offset
, mask
, value
, cur_value
;
2085 value
= *((u32
*)*data
);
2087 mask
= *((u32
*)*data
);
2089 offset
= *((u32
*)*data
);
2092 rc
= qed_mcp_nvm_read(cdev
, nvm_image
.start_addr
+ offset
, buf
,
2095 DP_ERR(cdev
, "Failed reading from %08x\n",
2096 nvm_image
.start_addr
+ offset
);
2100 cur_value
= le32_to_cpu(*((__le32
*)buf
));
2101 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2102 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
2103 nvm_image
.start_addr
+ offset
, cur_value
,
2104 (cur_value
& ~mask
) | (value
& mask
), value
, mask
);
2105 value
= (value
& mask
) | (cur_value
& ~mask
);
2106 rc
= qed_mcp_nvm_write(cdev
, QED_NVM_WRITE_NVRAM
,
2107 nvm_image
.start_addr
+ offset
,
2110 DP_ERR(cdev
, "Failed writing to %08x\n",
2111 nvm_image
.start_addr
+ offset
);
2121 /* Binary file format -
2122 * /----------------------------------------------------------------------\
2123 * 0B | 0x3 [command index] |
2124 * 4B | b'0: check_response? | b'1-31 reserved |
2125 * 8B | File-type | reserved |
2126 * 12B | Image length in bytes |
2127 * \----------------------------------------------------------------------/
2128 * Start a new file of the provided type
2130 static int qed_nvm_flash_image_file_start(struct qed_dev
*cdev
,
2131 const u8
**data
, bool *check_resp
)
2133 u32 file_type
, file_size
= 0;
2137 *check_resp
= !!(**data
& BIT(0));
2141 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2142 "About to start a new file of type %02x\n", file_type
);
2143 if (file_type
== DRV_MB_PARAM_NVM_PUT_FILE_BEGIN_MBI
) {
2145 file_size
= *((u32
*)(*data
));
2148 rc
= qed_mcp_nvm_write(cdev
, QED_PUT_FILE_BEGIN
, file_type
,
2149 (u8
*)(&file_size
), 4);
2155 /* Binary file format -
2156 * /----------------------------------------------------------------------\
2157 * 0B | 0x2 [command index] |
2158 * 4B | Length in bytes |
2159 * 8B | b'0: check_response? | b'1-31 reserved |
2160 * 12B | Offset in bytes |
2162 * \----------------------------------------------------------------------/
2163 * Write data as part of a file that was previously started. Data should be
2164 * of length equal to that provided in the message
2166 static int qed_nvm_flash_image_file_data(struct qed_dev
*cdev
,
2167 const u8
**data
, bool *check_resp
)
2173 len
= *((u32
*)(*data
));
2175 *check_resp
= !!(**data
& BIT(0));
2177 offset
= *((u32
*)(*data
));
2180 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2181 "About to write File-data: %08x bytes to offset %08x\n",
2184 rc
= qed_mcp_nvm_write(cdev
, QED_PUT_FILE_DATA
, offset
,
2185 (char *)(*data
), len
);
2191 /* Binary file format [General header] -
2192 * /----------------------------------------------------------------------\
2193 * 0B | QED_NVM_SIGNATURE |
2194 * 4B | Length in bytes |
2195 * 8B | Highest command in this batchfile | Reserved |
2196 * \----------------------------------------------------------------------/
2198 static int qed_nvm_flash_image_validate(struct qed_dev
*cdev
,
2199 const struct firmware
*image
,
2204 /* Check minimum size */
2205 if (image
->size
< 12) {
2206 DP_ERR(cdev
, "Image is too short [%08x]\n", (u32
)image
->size
);
2210 /* Check signature */
2211 signature
= *((u32
*)(*data
));
2212 if (signature
!= QED_NVM_SIGNATURE
) {
2213 DP_ERR(cdev
, "Wrong signature '%08x'\n", signature
);
2218 /* Validate internal size equals the image-size */
2219 len
= *((u32
*)(*data
));
2220 if (len
!= image
->size
) {
2221 DP_ERR(cdev
, "Size mismatch: internal = %08x image = %08x\n",
2222 len
, (u32
)image
->size
);
2227 /* Make sure driver familiar with all commands necessary for this */
2228 if (*((u16
*)(*data
)) >= QED_NVM_FLASH_CMD_NVM_MAX
) {
2229 DP_ERR(cdev
, "File contains unsupported commands [Need %04x]\n",
2239 /* Binary file format -
2240 * /----------------------------------------------------------------------\
2241 * 0B | 0x5 [command index] |
2242 * 4B | Number of config attributes | Reserved |
2243 * 4B | Config ID | Entity ID | Length |
2246 * \----------------------------------------------------------------------/
2247 * There can be several cfg_id-entity_id-Length-Value sets as specified by
2248 * 'Number of config attributes'.
2250 * The API parses config attributes from the user provided buffer and flashes
2251 * them to the respective NVM path using Management FW inerface.
2253 static int qed_nvm_flash_cfg_write(struct qed_dev
*cdev
, const u8
**data
)
2255 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2256 u8 entity_id
, len
, buf
[32];
2257 bool need_nvm_init
= true;
2258 struct qed_ptt
*ptt
;
2263 ptt
= qed_ptt_acquire(hwfn
);
2267 /* NVM CFG ID attribute header */
2269 count
= *((u16
*)*data
);
2272 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2273 "Read config ids: num_attrs = %0d\n", count
);
2274 /* NVM CFG ID attributes. Start loop index from 1 to avoid additional
2275 * arithmetic operations in the implementation.
2277 for (i
= 1; i
<= count
; i
++) {
2278 cfg_id
= *((u16
*)*data
);
2284 memcpy(buf
, *data
, len
);
2288 if (need_nvm_init
) {
2289 flags
|= QED_NVM_CFG_OPTION_INIT
;
2290 need_nvm_init
= false;
2293 /* Commit to flash and free the resources */
2294 if (!(i
% QED_NVM_CFG_MAX_ATTRS
) || i
== count
) {
2295 flags
|= QED_NVM_CFG_OPTION_COMMIT
|
2296 QED_NVM_CFG_OPTION_FREE
;
2297 need_nvm_init
= true;
2301 flags
|= QED_NVM_CFG_OPTION_ENTITY_SEL
;
2303 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2304 "cfg_id = %d entity = %d len = %d\n", cfg_id
,
2306 rc
= qed_mcp_nvm_set_cfg(hwfn
, ptt
, cfg_id
, entity_id
, flags
,
2309 DP_ERR(cdev
, "Error %d configuring %d\n", rc
, cfg_id
);
2314 qed_ptt_release(hwfn
, ptt
);
2319 #define QED_MAX_NVM_BUF_LEN 32
2320 static int qed_nvm_flash_cfg_len(struct qed_dev
*cdev
, u32 cmd
)
2322 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2323 u8 buf
[QED_MAX_NVM_BUF_LEN
];
2324 struct qed_ptt
*ptt
;
2328 ptt
= qed_ptt_acquire(hwfn
);
2330 return QED_MAX_NVM_BUF_LEN
;
2332 rc
= qed_mcp_nvm_get_cfg(hwfn
, ptt
, cmd
, 0, QED_NVM_CFG_GET_FLAGS
, buf
,
2335 DP_ERR(cdev
, "Error %d reading %d\n", rc
, cmd
);
2336 len
= QED_MAX_NVM_BUF_LEN
;
2339 qed_ptt_release(hwfn
, ptt
);
2344 static int qed_nvm_flash_cfg_read(struct qed_dev
*cdev
, u8
**data
,
2345 u32 cmd
, u32 entity_id
)
2347 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2348 struct qed_ptt
*ptt
;
2352 ptt
= qed_ptt_acquire(hwfn
);
2356 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2357 "Read config cmd = %d entity id %d\n", cmd
, entity_id
);
2358 flags
= entity_id
? QED_NVM_CFG_GET_PF_FLAGS
: QED_NVM_CFG_GET_FLAGS
;
2359 rc
= qed_mcp_nvm_get_cfg(hwfn
, ptt
, cmd
, entity_id
, flags
, *data
, &len
);
2361 DP_ERR(cdev
, "Error %d reading %d\n", rc
, cmd
);
2363 qed_ptt_release(hwfn
, ptt
);
2368 static int qed_nvm_flash(struct qed_dev
*cdev
, const char *name
)
2370 const struct firmware
*image
;
2371 const u8
*data
, *data_end
;
2375 rc
= request_firmware(&image
, name
, &cdev
->pdev
->dev
);
2377 DP_ERR(cdev
, "Failed to find '%s'\n", name
);
2381 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
2382 "Flashing '%s' - firmware's data at %p, size is %08x\n",
2383 name
, image
->data
, (u32
)image
->size
);
2385 data_end
= data
+ image
->size
;
2387 rc
= qed_nvm_flash_image_validate(cdev
, image
, &data
);
2391 while (data
< data_end
) {
2392 bool check_resp
= false;
2394 /* Parse the actual command */
2395 cmd_type
= *((u32
*)data
);
2397 case QED_NVM_FLASH_CMD_FILE_DATA
:
2398 rc
= qed_nvm_flash_image_file_data(cdev
, &data
,
2401 case QED_NVM_FLASH_CMD_FILE_START
:
2402 rc
= qed_nvm_flash_image_file_start(cdev
, &data
,
2405 case QED_NVM_FLASH_CMD_NVM_CHANGE
:
2406 rc
= qed_nvm_flash_image_access(cdev
, &data
,
2409 case QED_NVM_FLASH_CMD_NVM_CFG_ID
:
2410 rc
= qed_nvm_flash_cfg_write(cdev
, &data
);
2413 DP_ERR(cdev
, "Unknown command %08x\n", cmd_type
);
2419 DP_ERR(cdev
, "Command %08x failed\n", cmd_type
);
2423 /* Check response if needed */
2425 u32 mcp_response
= 0;
2427 if (qed_mcp_nvm_resp(cdev
, (u8
*)&mcp_response
)) {
2428 DP_ERR(cdev
, "Failed getting MCP response\n");
2433 switch (mcp_response
& FW_MSG_CODE_MASK
) {
2434 case FW_MSG_CODE_OK
:
2435 case FW_MSG_CODE_NVM_OK
:
2436 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK
:
2437 case FW_MSG_CODE_PHY_OK
:
2440 DP_ERR(cdev
, "MFW returns error: %08x\n",
2449 release_firmware(image
);
2454 static int qed_nvm_get_image(struct qed_dev
*cdev
, enum qed_nvm_images type
,
2457 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2459 return qed_mcp_get_nvm_image(hwfn
, type
, buf
, len
);
2462 void qed_schedule_recovery_handler(struct qed_hwfn
*p_hwfn
)
2464 struct qed_common_cb_ops
*ops
= p_hwfn
->cdev
->protocol_ops
.common
;
2465 void *cookie
= p_hwfn
->cdev
->ops_cookie
;
2467 if (ops
&& ops
->schedule_recovery_handler
)
2468 ops
->schedule_recovery_handler(cookie
);
2471 static int qed_set_coalesce(struct qed_dev
*cdev
, u16 rx_coal
, u16 tx_coal
,
2474 return qed_set_queue_coalesce(rx_coal
, tx_coal
, handle
);
2477 static int qed_set_led(struct qed_dev
*cdev
, enum qed_led_mode mode
)
2479 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2480 struct qed_ptt
*ptt
;
2483 ptt
= qed_ptt_acquire(hwfn
);
2487 status
= qed_mcp_set_led(hwfn
, ptt
, mode
);
2489 qed_ptt_release(hwfn
, ptt
);
2494 static int qed_recovery_process(struct qed_dev
*cdev
)
2496 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
2497 struct qed_ptt
*p_ptt
;
2500 p_ptt
= qed_ptt_acquire(p_hwfn
);
2504 rc
= qed_start_recovery_process(p_hwfn
, p_ptt
);
2506 qed_ptt_release(p_hwfn
, p_ptt
);
2511 static int qed_update_wol(struct qed_dev
*cdev
, bool enabled
)
2513 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2514 struct qed_ptt
*ptt
;
2520 ptt
= qed_ptt_acquire(hwfn
);
2524 rc
= qed_mcp_ov_update_wol(hwfn
, ptt
, enabled
? QED_OV_WOL_ENABLED
2525 : QED_OV_WOL_DISABLED
);
2528 rc
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2531 qed_ptt_release(hwfn
, ptt
);
2535 static int qed_update_drv_state(struct qed_dev
*cdev
, bool active
)
2537 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2538 struct qed_ptt
*ptt
;
2544 ptt
= qed_ptt_acquire(hwfn
);
2548 status
= qed_mcp_ov_update_driver_state(hwfn
, ptt
, active
?
2549 QED_OV_DRIVER_STATE_ACTIVE
:
2550 QED_OV_DRIVER_STATE_DISABLED
);
2552 qed_ptt_release(hwfn
, ptt
);
2557 static int qed_update_mac(struct qed_dev
*cdev
, u8
*mac
)
2559 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2560 struct qed_ptt
*ptt
;
2566 ptt
= qed_ptt_acquire(hwfn
);
2570 status
= qed_mcp_ov_update_mac(hwfn
, ptt
, mac
);
2574 status
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2577 qed_ptt_release(hwfn
, ptt
);
2581 static int qed_update_mtu(struct qed_dev
*cdev
, u16 mtu
)
2583 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2584 struct qed_ptt
*ptt
;
2590 ptt
= qed_ptt_acquire(hwfn
);
2594 status
= qed_mcp_ov_update_mtu(hwfn
, ptt
, mtu
);
2598 status
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2601 qed_ptt_release(hwfn
, ptt
);
2605 static int qed_read_module_eeprom(struct qed_dev
*cdev
, char *buf
,
2606 u8 dev_addr
, u32 offset
, u32 len
)
2608 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2609 struct qed_ptt
*ptt
;
2615 ptt
= qed_ptt_acquire(hwfn
);
2619 rc
= qed_mcp_phy_sfp_read(hwfn
, ptt
, MFW_PORT(hwfn
), dev_addr
,
2622 qed_ptt_release(hwfn
, ptt
);
2627 static int qed_set_grc_config(struct qed_dev
*cdev
, u32 cfg_id
, u32 val
)
2629 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2630 struct qed_ptt
*ptt
;
2636 ptt
= qed_ptt_acquire(hwfn
);
2640 rc
= qed_dbg_grc_config(hwfn
, cfg_id
, val
);
2642 qed_ptt_release(hwfn
, ptt
);
2647 static u8
qed_get_affin_hwfn_idx(struct qed_dev
*cdev
)
2649 return QED_AFFIN_HWFN_IDX(cdev
);
2652 static struct qed_selftest_ops qed_selftest_ops_pass
= {
2653 .selftest_memory
= &qed_selftest_memory
,
2654 .selftest_interrupt
= &qed_selftest_interrupt
,
2655 .selftest_register
= &qed_selftest_register
,
2656 .selftest_clock
= &qed_selftest_clock
,
2657 .selftest_nvram
= &qed_selftest_nvram
,
2660 const struct qed_common_ops qed_common_ops_pass
= {
2661 .selftest
= &qed_selftest_ops_pass
,
2662 .probe
= &qed_probe
,
2663 .remove
= &qed_remove
,
2664 .set_power_state
= &qed_set_power_state
,
2665 .set_name
= &qed_set_name
,
2666 .update_pf_params
= &qed_update_pf_params
,
2667 .slowpath_start
= &qed_slowpath_start
,
2668 .slowpath_stop
= &qed_slowpath_stop
,
2669 .set_fp_int
= &qed_set_int_fp
,
2670 .get_fp_int
= &qed_get_int_fp
,
2671 .sb_init
= &qed_sb_init
,
2672 .sb_release
= &qed_sb_release
,
2673 .simd_handler_config
= &qed_simd_handler_config
,
2674 .simd_handler_clean
= &qed_simd_handler_clean
,
2675 .dbg_grc
= &qed_dbg_grc
,
2676 .dbg_grc_size
= &qed_dbg_grc_size
,
2677 .can_link_change
= &qed_can_link_change
,
2678 .set_link
= &qed_set_link
,
2679 .get_link
= &qed_get_current_link
,
2680 .drain
= &qed_drain
,
2681 .update_msglvl
= &qed_init_dp
,
2682 .dbg_all_data
= &qed_dbg_all_data
,
2683 .dbg_all_data_size
= &qed_dbg_all_data_size
,
2684 .chain_alloc
= &qed_chain_alloc
,
2685 .chain_free
= &qed_chain_free
,
2686 .nvm_flash
= &qed_nvm_flash
,
2687 .nvm_get_image
= &qed_nvm_get_image
,
2688 .set_coalesce
= &qed_set_coalesce
,
2689 .set_led
= &qed_set_led
,
2690 .recovery_process
= &qed_recovery_process
,
2691 .recovery_prolog
= &qed_recovery_prolog
,
2692 .update_drv_state
= &qed_update_drv_state
,
2693 .update_mac
= &qed_update_mac
,
2694 .update_mtu
= &qed_update_mtu
,
2695 .update_wol
= &qed_update_wol
,
2696 .db_recovery_add
= &qed_db_recovery_add
,
2697 .db_recovery_del
= &qed_db_recovery_del
,
2698 .read_module_eeprom
= &qed_read_module_eeprom
,
2699 .get_affin_hwfn_idx
= &qed_get_affin_hwfn_idx
,
2700 .read_nvm_cfg
= &qed_nvm_flash_cfg_read
,
2701 .read_nvm_cfg_len
= &qed_nvm_flash_cfg_len
,
2702 .set_grc_config
= &qed_set_grc_config
,
2705 void qed_get_protocol_stats(struct qed_dev
*cdev
,
2706 enum qed_mcp_protocol_type type
,
2707 union qed_mcp_protocol_stats
*stats
)
2709 struct qed_eth_stats eth_stats
;
2711 memset(stats
, 0, sizeof(*stats
));
2714 case QED_MCP_LAN_STATS
:
2715 qed_get_vport_stats(cdev
, ð_stats
);
2716 stats
->lan_stats
.ucast_rx_pkts
=
2717 eth_stats
.common
.rx_ucast_pkts
;
2718 stats
->lan_stats
.ucast_tx_pkts
=
2719 eth_stats
.common
.tx_ucast_pkts
;
2720 stats
->lan_stats
.fcs_err
= -1;
2722 case QED_MCP_FCOE_STATS
:
2723 qed_get_protocol_stats_fcoe(cdev
, &stats
->fcoe_stats
);
2725 case QED_MCP_ISCSI_STATS
:
2726 qed_get_protocol_stats_iscsi(cdev
, &stats
->iscsi_stats
);
2729 DP_VERBOSE(cdev
, QED_MSG_SP
,
2730 "Invalid protocol type = %d\n", type
);
2735 int qed_mfw_tlv_req(struct qed_hwfn
*hwfn
)
2737 DP_VERBOSE(hwfn
->cdev
, NETIF_MSG_DRV
,
2738 "Scheduling slowpath task [Flag: %d]\n",
2739 QED_SLOWPATH_MFW_TLV_REQ
);
2740 smp_mb__before_atomic();
2741 set_bit(QED_SLOWPATH_MFW_TLV_REQ
, &hwfn
->slowpath_task_flags
);
2742 smp_mb__after_atomic();
2743 queue_delayed_work(hwfn
->slowpath_wq
, &hwfn
->slowpath_task
, 0);
2749 qed_fill_generic_tlv_data(struct qed_dev
*cdev
, struct qed_mfw_tlv_generic
*tlv
)
2751 struct qed_common_cb_ops
*op
= cdev
->protocol_ops
.common
;
2752 struct qed_eth_stats_common
*p_common
;
2753 struct qed_generic_tlvs gen_tlvs
;
2754 struct qed_eth_stats stats
;
2757 memset(&gen_tlvs
, 0, sizeof(gen_tlvs
));
2758 op
->get_generic_tlv_data(cdev
->ops_cookie
, &gen_tlvs
);
2760 if (gen_tlvs
.feat_flags
& QED_TLV_IP_CSUM
)
2761 tlv
->flags
.ipv4_csum_offload
= true;
2762 if (gen_tlvs
.feat_flags
& QED_TLV_LSO
)
2763 tlv
->flags
.lso_supported
= true;
2764 tlv
->flags
.b_set
= true;
2766 for (i
= 0; i
< QED_TLV_MAC_COUNT
; i
++) {
2767 if (is_valid_ether_addr(gen_tlvs
.mac
[i
])) {
2768 ether_addr_copy(tlv
->mac
[i
], gen_tlvs
.mac
[i
]);
2769 tlv
->mac_set
[i
] = true;
2773 qed_get_vport_stats(cdev
, &stats
);
2774 p_common
= &stats
.common
;
2775 tlv
->rx_frames
= p_common
->rx_ucast_pkts
+ p_common
->rx_mcast_pkts
+
2776 p_common
->rx_bcast_pkts
;
2777 tlv
->rx_frames_set
= true;
2778 tlv
->rx_bytes
= p_common
->rx_ucast_bytes
+ p_common
->rx_mcast_bytes
+
2779 p_common
->rx_bcast_bytes
;
2780 tlv
->rx_bytes_set
= true;
2781 tlv
->tx_frames
= p_common
->tx_ucast_pkts
+ p_common
->tx_mcast_pkts
+
2782 p_common
->tx_bcast_pkts
;
2783 tlv
->tx_frames_set
= true;
2784 tlv
->tx_bytes
= p_common
->tx_ucast_bytes
+ p_common
->tx_mcast_bytes
+
2785 p_common
->tx_bcast_bytes
;
2786 tlv
->rx_bytes_set
= true;
2789 int qed_mfw_fill_tlv_data(struct qed_hwfn
*hwfn
, enum qed_mfw_tlv_type type
,
2790 union qed_mfw_tlv_data
*tlv_buf
)
2792 struct qed_dev
*cdev
= hwfn
->cdev
;
2793 struct qed_common_cb_ops
*ops
;
2795 ops
= cdev
->protocol_ops
.common
;
2796 if (!ops
|| !ops
->get_protocol_tlv_data
|| !ops
->get_generic_tlv_data
) {
2797 DP_NOTICE(hwfn
, "Can't collect TLV management info\n");
2802 case QED_MFW_TLV_GENERIC
:
2803 qed_fill_generic_tlv_data(hwfn
->cdev
, &tlv_buf
->generic
);
2805 case QED_MFW_TLV_ETH
:
2806 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->eth
);
2808 case QED_MFW_TLV_FCOE
:
2809 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->fcoe
);
2811 case QED_MFW_TLV_ISCSI
:
2812 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->iscsi
);