1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015-2017 QLogic Corporation
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and /or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/stddef.h>
34 #include <linux/pci.h>
35 #include <linux/kernel.h>
36 #include <linux/slab.h>
37 #include <linux/delay.h>
38 #include <asm/byteorder.h>
39 #include <linux/dma-mapping.h>
40 #include <linux/string.h>
41 #include <linux/module.h>
42 #include <linux/interrupt.h>
43 #include <linux/workqueue.h>
44 #include <linux/ethtool.h>
45 #include <linux/etherdevice.h>
46 #include <linux/vmalloc.h>
47 #include <linux/crash_dump.h>
48 #include <linux/crc32.h>
49 #include <linux/qed/qed_if.h>
50 #include <linux/qed/qed_ll2_if.h>
53 #include "qed_sriov.h"
55 #include "qed_dev_api.h"
58 #include "qed_iscsi.h"
62 #include "qed_selftest.h"
63 #include "qed_debug.h"
65 #define QED_ROCE_QPS (8192)
66 #define QED_ROCE_DPIS (8)
67 #define QED_RDMA_SRQS QED_ROCE_QPS
69 static char version
[] =
70 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION
"\n";
72 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
73 MODULE_LICENSE("GPL");
74 MODULE_VERSION(DRV_MODULE_VERSION
);
76 #define FW_FILE_VERSION \
77 __stringify(FW_MAJOR_VERSION) "." \
78 __stringify(FW_MINOR_VERSION) "." \
79 __stringify(FW_REVISION_VERSION) "." \
80 __stringify(FW_ENGINEERING_VERSION)
82 #define QED_FW_FILE_NAME \
83 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
85 MODULE_FIRMWARE(QED_FW_FILE_NAME
);
87 static int __init
qed_init(void)
89 pr_info("%s", version
);
94 static void __exit
qed_cleanup(void)
96 pr_notice("qed_cleanup called\n");
99 module_init(qed_init
);
100 module_exit(qed_cleanup
);
102 /* Check if the DMA controller on the machine can properly handle the DMA
103 * addressing required by the device.
105 static int qed_set_coherency_mask(struct qed_dev
*cdev
)
107 struct device
*dev
= &cdev
->pdev
->dev
;
109 if (dma_set_mask(dev
, DMA_BIT_MASK(64)) == 0) {
110 if (dma_set_coherent_mask(dev
, DMA_BIT_MASK(64)) != 0) {
112 "Can't request 64-bit consistent allocations\n");
115 } else if (dma_set_mask(dev
, DMA_BIT_MASK(32)) != 0) {
116 DP_NOTICE(cdev
, "Can't request 64b/32b DMA addresses\n");
123 static void qed_free_pci(struct qed_dev
*cdev
)
125 struct pci_dev
*pdev
= cdev
->pdev
;
127 if (cdev
->doorbells
&& cdev
->db_size
)
128 iounmap(cdev
->doorbells
);
130 iounmap(cdev
->regview
);
131 if (atomic_read(&pdev
->enable_cnt
) == 1)
132 pci_release_regions(pdev
);
134 pci_disable_device(pdev
);
137 #define PCI_REVISION_ID_ERROR_VAL 0xff
139 /* Performs PCI initializations as well as initializing PCI-related parameters
140 * in the device structrue. Returns 0 in case of success.
142 static int qed_init_pci(struct qed_dev
*cdev
, struct pci_dev
*pdev
)
149 rc
= pci_enable_device(pdev
);
151 DP_NOTICE(cdev
, "Cannot enable PCI device\n");
155 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
156 DP_NOTICE(cdev
, "No memory region found in bar #0\n");
161 if (IS_PF(cdev
) && !(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
162 DP_NOTICE(cdev
, "No memory region found in bar #2\n");
167 if (atomic_read(&pdev
->enable_cnt
) == 1) {
168 rc
= pci_request_regions(pdev
, "qed");
171 "Failed to request PCI memory resources\n");
174 pci_set_master(pdev
);
175 pci_save_state(pdev
);
178 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
179 if (rev_id
== PCI_REVISION_ID_ERROR_VAL
) {
181 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
186 if (!pci_is_pcie(pdev
)) {
187 DP_NOTICE(cdev
, "The bus is not PCI Express\n");
192 cdev
->pci_params
.pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
193 if (IS_PF(cdev
) && !cdev
->pci_params
.pm_cap
)
194 DP_NOTICE(cdev
, "Cannot find power management capability\n");
196 rc
= qed_set_coherency_mask(cdev
);
200 cdev
->pci_params
.mem_start
= pci_resource_start(pdev
, 0);
201 cdev
->pci_params
.mem_end
= pci_resource_end(pdev
, 0);
202 cdev
->pci_params
.irq
= pdev
->irq
;
204 cdev
->regview
= pci_ioremap_bar(pdev
, 0);
205 if (!cdev
->regview
) {
206 DP_NOTICE(cdev
, "Cannot map register space, aborting\n");
211 cdev
->db_phys_addr
= pci_resource_start(cdev
->pdev
, 2);
212 cdev
->db_size
= pci_resource_len(cdev
->pdev
, 2);
213 if (!cdev
->db_size
) {
215 DP_NOTICE(cdev
, "No Doorbell bar available\n");
222 cdev
->doorbells
= ioremap_wc(cdev
->db_phys_addr
, cdev
->db_size
);
224 if (!cdev
->doorbells
) {
225 DP_NOTICE(cdev
, "Cannot map doorbell space\n");
232 pci_release_regions(pdev
);
234 pci_disable_device(pdev
);
239 int qed_fill_dev_info(struct qed_dev
*cdev
,
240 struct qed_dev_info
*dev_info
)
242 struct qed_hwfn
*p_hwfn
= QED_LEADING_HWFN(cdev
);
243 struct qed_hw_info
*hw_info
= &p_hwfn
->hw_info
;
244 struct qed_tunnel_info
*tun
= &cdev
->tunnel
;
247 memset(dev_info
, 0, sizeof(struct qed_dev_info
));
249 if (tun
->vxlan
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
250 tun
->vxlan
.b_mode_enabled
)
251 dev_info
->vxlan_enable
= true;
253 if (tun
->l2_gre
.b_mode_enabled
&& tun
->ip_gre
.b_mode_enabled
&&
254 tun
->l2_gre
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
255 tun
->ip_gre
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
)
256 dev_info
->gre_enable
= true;
258 if (tun
->l2_geneve
.b_mode_enabled
&& tun
->ip_geneve
.b_mode_enabled
&&
259 tun
->l2_geneve
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
&&
260 tun
->ip_geneve
.tun_cls
== QED_TUNN_CLSS_MAC_VLAN
)
261 dev_info
->geneve_enable
= true;
263 dev_info
->num_hwfns
= cdev
->num_hwfns
;
264 dev_info
->pci_mem_start
= cdev
->pci_params
.mem_start
;
265 dev_info
->pci_mem_end
= cdev
->pci_params
.mem_end
;
266 dev_info
->pci_irq
= cdev
->pci_params
.irq
;
267 dev_info
->rdma_supported
= QED_IS_RDMA_PERSONALITY(p_hwfn
);
268 dev_info
->dev_type
= cdev
->type
;
269 ether_addr_copy(dev_info
->hw_mac
, hw_info
->hw_mac_addr
);
272 dev_info
->fw_major
= FW_MAJOR_VERSION
;
273 dev_info
->fw_minor
= FW_MINOR_VERSION
;
274 dev_info
->fw_rev
= FW_REVISION_VERSION
;
275 dev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
276 dev_info
->b_inter_pf_switch
= test_bit(QED_MF_INTER_PF_SWITCH
,
278 dev_info
->tx_switching
= true;
280 if (hw_info
->b_wol_support
== QED_WOL_SUPPORT_PME
)
281 dev_info
->wol_support
= true;
283 dev_info
->abs_pf_id
= QED_LEADING_HWFN(cdev
)->abs_pf_id
;
285 qed_vf_get_fw_version(&cdev
->hwfns
[0], &dev_info
->fw_major
,
286 &dev_info
->fw_minor
, &dev_info
->fw_rev
,
291 ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
293 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev
), ptt
,
294 &dev_info
->mfw_rev
, NULL
);
296 qed_mcp_get_mbi_ver(QED_LEADING_HWFN(cdev
), ptt
,
297 &dev_info
->mbi_version
);
299 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev
), ptt
,
300 &dev_info
->flash_size
);
302 qed_ptt_release(QED_LEADING_HWFN(cdev
), ptt
);
305 qed_mcp_get_mfw_ver(QED_LEADING_HWFN(cdev
), NULL
,
306 &dev_info
->mfw_rev
, NULL
);
309 dev_info
->mtu
= hw_info
->mtu
;
314 static void qed_free_cdev(struct qed_dev
*cdev
)
319 static struct qed_dev
*qed_alloc_cdev(struct pci_dev
*pdev
)
321 struct qed_dev
*cdev
;
323 cdev
= kzalloc(sizeof(*cdev
), GFP_KERNEL
);
327 qed_init_struct(cdev
);
332 /* Sets the requested power state */
333 static int qed_set_power_state(struct qed_dev
*cdev
, pci_power_t state
)
338 DP_VERBOSE(cdev
, NETIF_MSG_DRV
, "Omitting Power state change\n");
343 static struct qed_dev
*qed_probe(struct pci_dev
*pdev
,
344 struct qed_probe_params
*params
)
346 struct qed_dev
*cdev
;
349 cdev
= qed_alloc_cdev(pdev
);
353 cdev
->drv_type
= DRV_ID_DRV_TYPE_LINUX
;
354 cdev
->protocol
= params
->protocol
;
357 cdev
->b_is_vf
= true;
359 qed_init_dp(cdev
, params
->dp_module
, params
->dp_level
);
361 rc
= qed_init_pci(cdev
, pdev
);
363 DP_ERR(cdev
, "init pci failed\n");
366 DP_INFO(cdev
, "PCI init completed successfully\n");
368 rc
= qed_hw_prepare(cdev
, QED_PCI_DEFAULT
);
370 DP_ERR(cdev
, "hw prepare failed\n");
374 DP_INFO(cdev
, "qed_probe completed successfully\n");
386 static void qed_remove(struct qed_dev
*cdev
)
395 qed_set_power_state(cdev
, PCI_D3hot
);
400 static void qed_disable_msix(struct qed_dev
*cdev
)
402 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
403 pci_disable_msix(cdev
->pdev
);
404 kfree(cdev
->int_params
.msix_table
);
405 } else if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSI
) {
406 pci_disable_msi(cdev
->pdev
);
409 memset(&cdev
->int_params
.out
, 0, sizeof(struct qed_int_param
));
412 static int qed_enable_msix(struct qed_dev
*cdev
,
413 struct qed_int_params
*int_params
)
417 cnt
= int_params
->in
.num_vectors
;
419 for (i
= 0; i
< cnt
; i
++)
420 int_params
->msix_table
[i
].entry
= i
;
422 rc
= pci_enable_msix_range(cdev
->pdev
, int_params
->msix_table
,
423 int_params
->in
.min_msix_cnt
, cnt
);
424 if (rc
< cnt
&& rc
>= int_params
->in
.min_msix_cnt
&&
425 (rc
% cdev
->num_hwfns
)) {
426 pci_disable_msix(cdev
->pdev
);
428 /* If fastpath is initialized, we need at least one interrupt
429 * per hwfn [and the slow path interrupts]. New requested number
430 * should be a multiple of the number of hwfns.
432 cnt
= (rc
/ cdev
->num_hwfns
) * cdev
->num_hwfns
;
434 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
435 cnt
, int_params
->in
.num_vectors
);
436 rc
= pci_enable_msix_exact(cdev
->pdev
, int_params
->msix_table
,
443 /* MSI-x configuration was achieved */
444 int_params
->out
.int_mode
= QED_INT_MODE_MSIX
;
445 int_params
->out
.num_vectors
= rc
;
449 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
456 /* This function outputs the int mode and the number of enabled msix vector */
457 static int qed_set_int_mode(struct qed_dev
*cdev
, bool force_mode
)
459 struct qed_int_params
*int_params
= &cdev
->int_params
;
460 struct msix_entry
*tbl
;
463 switch (int_params
->in
.int_mode
) {
464 case QED_INT_MODE_MSIX
:
465 /* Allocate MSIX table */
466 cnt
= int_params
->in
.num_vectors
;
467 int_params
->msix_table
= kcalloc(cnt
, sizeof(*tbl
), GFP_KERNEL
);
468 if (!int_params
->msix_table
) {
474 rc
= qed_enable_msix(cdev
, int_params
);
478 DP_NOTICE(cdev
, "Failed to enable MSI-X\n");
479 kfree(int_params
->msix_table
);
484 case QED_INT_MODE_MSI
:
485 if (cdev
->num_hwfns
== 1) {
486 rc
= pci_enable_msi(cdev
->pdev
);
488 int_params
->out
.int_mode
= QED_INT_MODE_MSI
;
492 DP_NOTICE(cdev
, "Failed to enable MSI\n");
498 case QED_INT_MODE_INTA
:
499 int_params
->out
.int_mode
= QED_INT_MODE_INTA
;
503 DP_NOTICE(cdev
, "Unknown int_mode value %d\n",
504 int_params
->in
.int_mode
);
510 DP_INFO(cdev
, "Using %s interrupts\n",
511 int_params
->out
.int_mode
== QED_INT_MODE_INTA
?
512 "INTa" : int_params
->out
.int_mode
== QED_INT_MODE_MSI
?
514 cdev
->int_coalescing_mode
= QED_COAL_MODE_ENABLE
;
519 static void qed_simd_handler_config(struct qed_dev
*cdev
, void *token
,
520 int index
, void(*handler
)(void *))
522 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
523 int relative_idx
= index
/ cdev
->num_hwfns
;
525 hwfn
->simd_proto_handler
[relative_idx
].func
= handler
;
526 hwfn
->simd_proto_handler
[relative_idx
].token
= token
;
529 static void qed_simd_handler_clean(struct qed_dev
*cdev
, int index
)
531 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
532 int relative_idx
= index
/ cdev
->num_hwfns
;
534 memset(&hwfn
->simd_proto_handler
[relative_idx
], 0,
535 sizeof(struct qed_simd_fp_handler
));
538 static irqreturn_t
qed_msix_sp_int(int irq
, void *tasklet
)
540 tasklet_schedule((struct tasklet_struct
*)tasklet
);
544 static irqreturn_t
qed_single_int(int irq
, void *dev_instance
)
546 struct qed_dev
*cdev
= (struct qed_dev
*)dev_instance
;
547 struct qed_hwfn
*hwfn
;
548 irqreturn_t rc
= IRQ_NONE
;
552 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
553 status
= qed_int_igu_read_sisr_reg(&cdev
->hwfns
[i
]);
558 hwfn
= &cdev
->hwfns
[i
];
560 /* Slowpath interrupt */
561 if (unlikely(status
& 0x1)) {
562 tasklet_schedule(hwfn
->sp_dpc
);
567 /* Fastpath interrupts */
568 for (j
= 0; j
< 64; j
++) {
569 if ((0x2ULL
<< j
) & status
) {
570 struct qed_simd_fp_handler
*p_handler
=
571 &hwfn
->simd_proto_handler
[j
];
574 p_handler
->func(p_handler
->token
);
577 "Not calling fastpath handler as it is NULL [handler #%d, status 0x%llx]\n",
580 status
&= ~(0x2ULL
<< j
);
585 if (unlikely(status
))
586 DP_VERBOSE(hwfn
, NETIF_MSG_INTR
,
587 "got an unknown interrupt status 0x%llx\n",
594 int qed_slowpath_irq_req(struct qed_hwfn
*hwfn
)
596 struct qed_dev
*cdev
= hwfn
->cdev
;
601 int_mode
= cdev
->int_params
.out
.int_mode
;
602 if (int_mode
== QED_INT_MODE_MSIX
) {
604 snprintf(hwfn
->name
, NAME_SIZE
, "sp-%d-%02x:%02x.%02x",
605 id
, cdev
->pdev
->bus
->number
,
606 PCI_SLOT(cdev
->pdev
->devfn
), hwfn
->abs_pf_id
);
607 rc
= request_irq(cdev
->int_params
.msix_table
[id
].vector
,
608 qed_msix_sp_int
, 0, hwfn
->name
, hwfn
->sp_dpc
);
610 unsigned long flags
= 0;
612 snprintf(cdev
->name
, NAME_SIZE
, "%02x:%02x.%02x",
613 cdev
->pdev
->bus
->number
, PCI_SLOT(cdev
->pdev
->devfn
),
614 PCI_FUNC(cdev
->pdev
->devfn
));
616 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_INTA
)
617 flags
|= IRQF_SHARED
;
619 rc
= request_irq(cdev
->pdev
->irq
, qed_single_int
,
620 flags
, cdev
->name
, cdev
);
624 DP_NOTICE(cdev
, "request_irq failed, rc = %d\n", rc
);
626 DP_VERBOSE(hwfn
, (NETIF_MSG_INTR
| QED_MSG_SP
),
627 "Requested slowpath %s\n",
628 (int_mode
== QED_INT_MODE_MSIX
) ? "MSI-X" : "IRQ");
633 static void qed_slowpath_tasklet_flush(struct qed_hwfn
*p_hwfn
)
635 /* Calling the disable function will make sure that any
636 * currently-running function is completed. The following call to the
637 * enable function makes this sequence a flush-like operation.
639 if (p_hwfn
->b_sp_dpc_enabled
) {
640 tasklet_disable(p_hwfn
->sp_dpc
);
641 tasklet_enable(p_hwfn
->sp_dpc
);
645 void qed_slowpath_irq_sync(struct qed_hwfn
*p_hwfn
)
647 struct qed_dev
*cdev
= p_hwfn
->cdev
;
648 u8 id
= p_hwfn
->my_id
;
651 int_mode
= cdev
->int_params
.out
.int_mode
;
652 if (int_mode
== QED_INT_MODE_MSIX
)
653 synchronize_irq(cdev
->int_params
.msix_table
[id
].vector
);
655 synchronize_irq(cdev
->pdev
->irq
);
657 qed_slowpath_tasklet_flush(p_hwfn
);
660 static void qed_slowpath_irq_free(struct qed_dev
*cdev
)
664 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
665 for_each_hwfn(cdev
, i
) {
666 if (!cdev
->hwfns
[i
].b_int_requested
)
668 synchronize_irq(cdev
->int_params
.msix_table
[i
].vector
);
669 free_irq(cdev
->int_params
.msix_table
[i
].vector
,
670 cdev
->hwfns
[i
].sp_dpc
);
673 if (QED_LEADING_HWFN(cdev
)->b_int_requested
)
674 free_irq(cdev
->pdev
->irq
, cdev
);
676 qed_int_disable_post_isr_release(cdev
);
679 static int qed_nic_stop(struct qed_dev
*cdev
)
683 rc
= qed_hw_stop(cdev
);
685 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
686 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
688 if (p_hwfn
->b_sp_dpc_enabled
) {
689 tasklet_disable(p_hwfn
->sp_dpc
);
690 p_hwfn
->b_sp_dpc_enabled
= false;
691 DP_VERBOSE(cdev
, NETIF_MSG_IFDOWN
,
692 "Disabled sp tasklet [hwfn %d] at %p\n",
697 qed_dbg_pf_exit(cdev
);
702 static int qed_nic_setup(struct qed_dev
*cdev
)
706 /* Determine if interface is going to require LL2 */
707 if (QED_LEADING_HWFN(cdev
)->hw_info
.personality
!= QED_PCI_ETH
) {
708 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
709 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
711 p_hwfn
->using_ll2
= true;
715 rc
= qed_resc_alloc(cdev
);
719 DP_INFO(cdev
, "Allocated qed resources\n");
721 qed_resc_setup(cdev
);
726 static int qed_set_int_fp(struct qed_dev
*cdev
, u16 cnt
)
730 /* Mark the fastpath as free/used */
731 cdev
->int_params
.fp_initialized
= cnt
? true : false;
733 if (cdev
->int_params
.out
.int_mode
!= QED_INT_MODE_MSIX
)
734 limit
= cdev
->num_hwfns
* 63;
735 else if (cdev
->int_params
.fp_msix_cnt
)
736 limit
= cdev
->int_params
.fp_msix_cnt
;
741 return min_t(int, cnt
, limit
);
744 static int qed_get_int_fp(struct qed_dev
*cdev
, struct qed_int_info
*info
)
746 memset(info
, 0, sizeof(struct qed_int_info
));
748 if (!cdev
->int_params
.fp_initialized
) {
750 "Protocol driver requested interrupt information, but its support is not yet configured\n");
754 /* Need to expose only MSI-X information; Single IRQ is handled solely
757 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
758 int msix_base
= cdev
->int_params
.fp_msix_base
;
760 info
->msix_cnt
= cdev
->int_params
.fp_msix_cnt
;
761 info
->msix
= &cdev
->int_params
.msix_table
[msix_base
];
767 static int qed_slowpath_setup_int(struct qed_dev
*cdev
,
768 enum qed_int_mode int_mode
)
770 struct qed_sb_cnt_info sb_cnt_info
;
771 int num_l2_queues
= 0;
775 if ((int_mode
== QED_INT_MODE_MSI
) && (cdev
->num_hwfns
> 1)) {
776 DP_NOTICE(cdev
, "MSI mode is not supported for CMT devices\n");
780 memset(&cdev
->int_params
, 0, sizeof(struct qed_int_params
));
781 cdev
->int_params
.in
.int_mode
= int_mode
;
782 for_each_hwfn(cdev
, i
) {
783 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
784 qed_int_get_num_sbs(&cdev
->hwfns
[i
], &sb_cnt_info
);
785 cdev
->int_params
.in
.num_vectors
+= sb_cnt_info
.cnt
;
786 cdev
->int_params
.in
.num_vectors
++; /* slowpath */
789 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
790 cdev
->int_params
.in
.min_msix_cnt
= cdev
->num_hwfns
* 2;
792 if (is_kdump_kernel()) {
794 "Kdump kernel: Limit the max number of requested MSI-X vectors to %hd\n",
795 cdev
->int_params
.in
.min_msix_cnt
);
796 cdev
->int_params
.in
.num_vectors
=
797 cdev
->int_params
.in
.min_msix_cnt
;
800 rc
= qed_set_int_mode(cdev
, false);
802 DP_ERR(cdev
, "qed_slowpath_setup_int ERR\n");
806 cdev
->int_params
.fp_msix_base
= cdev
->num_hwfns
;
807 cdev
->int_params
.fp_msix_cnt
= cdev
->int_params
.out
.num_vectors
-
810 if (!IS_ENABLED(CONFIG_QED_RDMA
) ||
811 !QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev
)))
814 for_each_hwfn(cdev
, i
)
815 num_l2_queues
+= FEAT_NUM(&cdev
->hwfns
[i
], QED_PF_L2_QUE
);
817 DP_VERBOSE(cdev
, QED_MSG_RDMA
,
818 "cdev->int_params.fp_msix_cnt=%d num_l2_queues=%d\n",
819 cdev
->int_params
.fp_msix_cnt
, num_l2_queues
);
821 if (cdev
->int_params
.fp_msix_cnt
> num_l2_queues
) {
822 cdev
->int_params
.rdma_msix_cnt
=
823 (cdev
->int_params
.fp_msix_cnt
- num_l2_queues
)
825 cdev
->int_params
.rdma_msix_base
=
826 cdev
->int_params
.fp_msix_base
+ num_l2_queues
;
827 cdev
->int_params
.fp_msix_cnt
= num_l2_queues
;
829 cdev
->int_params
.rdma_msix_cnt
= 0;
832 DP_VERBOSE(cdev
, QED_MSG_RDMA
, "roce_msix_cnt=%d roce_msix_base=%d\n",
833 cdev
->int_params
.rdma_msix_cnt
,
834 cdev
->int_params
.rdma_msix_base
);
839 static int qed_slowpath_vf_setup_int(struct qed_dev
*cdev
)
843 memset(&cdev
->int_params
, 0, sizeof(struct qed_int_params
));
844 cdev
->int_params
.in
.int_mode
= QED_INT_MODE_MSIX
;
846 qed_vf_get_num_rxqs(QED_LEADING_HWFN(cdev
),
847 &cdev
->int_params
.in
.num_vectors
);
848 if (cdev
->num_hwfns
> 1) {
851 qed_vf_get_num_rxqs(&cdev
->hwfns
[1], &vectors
);
852 cdev
->int_params
.in
.num_vectors
+= vectors
;
855 /* We want a minimum of one fastpath vector per vf hwfn */
856 cdev
->int_params
.in
.min_msix_cnt
= cdev
->num_hwfns
;
858 rc
= qed_set_int_mode(cdev
, true);
862 cdev
->int_params
.fp_msix_base
= 0;
863 cdev
->int_params
.fp_msix_cnt
= cdev
->int_params
.out
.num_vectors
;
868 u32
qed_unzip_data(struct qed_hwfn
*p_hwfn
, u32 input_len
,
869 u8
*input_buf
, u32 max_size
, u8
*unzip_buf
)
873 p_hwfn
->stream
->next_in
= input_buf
;
874 p_hwfn
->stream
->avail_in
= input_len
;
875 p_hwfn
->stream
->next_out
= unzip_buf
;
876 p_hwfn
->stream
->avail_out
= max_size
;
878 rc
= zlib_inflateInit2(p_hwfn
->stream
, MAX_WBITS
);
881 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "zlib init failed, rc = %d\n",
886 rc
= zlib_inflate(p_hwfn
->stream
, Z_FINISH
);
887 zlib_inflateEnd(p_hwfn
->stream
);
889 if (rc
!= Z_OK
&& rc
!= Z_STREAM_END
) {
890 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "FW unzip error: %s, rc=%d\n",
891 p_hwfn
->stream
->msg
, rc
);
895 return p_hwfn
->stream
->total_out
/ 4;
898 static int qed_alloc_stream_mem(struct qed_dev
*cdev
)
903 for_each_hwfn(cdev
, i
) {
904 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
906 p_hwfn
->stream
= kzalloc(sizeof(*p_hwfn
->stream
), GFP_KERNEL
);
910 workspace
= vzalloc(zlib_inflate_workspacesize());
913 p_hwfn
->stream
->workspace
= workspace
;
919 static void qed_free_stream_mem(struct qed_dev
*cdev
)
923 for_each_hwfn(cdev
, i
) {
924 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
929 vfree(p_hwfn
->stream
->workspace
);
930 kfree(p_hwfn
->stream
);
934 static void qed_update_pf_params(struct qed_dev
*cdev
,
935 struct qed_pf_params
*params
)
939 if (IS_ENABLED(CONFIG_QED_RDMA
)) {
940 params
->rdma_pf_params
.num_qps
= QED_ROCE_QPS
;
941 params
->rdma_pf_params
.min_dpis
= QED_ROCE_DPIS
;
942 params
->rdma_pf_params
.num_srqs
= QED_RDMA_SRQS
;
943 /* divide by 3 the MRs to avoid MF ILT overflow */
944 params
->rdma_pf_params
.gl_pi
= QED_ROCE_PROTOCOL_INDEX
;
947 if (cdev
->num_hwfns
> 1 || IS_VF(cdev
))
948 params
->eth_pf_params
.num_arfs_filters
= 0;
950 /* In case we might support RDMA, don't allow qede to be greedy
951 * with the L2 contexts. Allow for 64 queues [rx, tx cos, xdp]
954 if (QED_IS_RDMA_PERSONALITY(QED_LEADING_HWFN(cdev
))) {
957 num_cons
= ¶ms
->eth_pf_params
.num_cons
;
958 *num_cons
= min_t(u16
, *num_cons
, QED_MAX_L2_CONS
);
961 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
962 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
964 p_hwfn
->pf_params
= *params
;
968 static void qed_slowpath_wq_stop(struct qed_dev
*cdev
)
975 for_each_hwfn(cdev
, i
) {
976 if (!cdev
->hwfns
[i
].slowpath_wq
)
979 flush_workqueue(cdev
->hwfns
[i
].slowpath_wq
);
980 destroy_workqueue(cdev
->hwfns
[i
].slowpath_wq
);
984 static void qed_slowpath_task(struct work_struct
*work
)
986 struct qed_hwfn
*hwfn
= container_of(work
, struct qed_hwfn
,
988 struct qed_ptt
*ptt
= qed_ptt_acquire(hwfn
);
991 queue_delayed_work(hwfn
->slowpath_wq
, &hwfn
->slowpath_task
, 0);
995 if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ
,
996 &hwfn
->slowpath_task_flags
))
997 qed_mfw_process_tlv_req(hwfn
, ptt
);
999 qed_ptt_release(hwfn
, ptt
);
1002 static int qed_slowpath_wq_start(struct qed_dev
*cdev
)
1004 struct qed_hwfn
*hwfn
;
1005 char name
[NAME_SIZE
];
1011 for_each_hwfn(cdev
, i
) {
1012 hwfn
= &cdev
->hwfns
[i
];
1014 snprintf(name
, NAME_SIZE
, "slowpath-%02x:%02x.%02x",
1015 cdev
->pdev
->bus
->number
,
1016 PCI_SLOT(cdev
->pdev
->devfn
), hwfn
->abs_pf_id
);
1018 hwfn
->slowpath_wq
= alloc_workqueue(name
, 0, 0);
1019 if (!hwfn
->slowpath_wq
) {
1020 DP_NOTICE(hwfn
, "Cannot create slowpath workqueue\n");
1024 INIT_DELAYED_WORK(&hwfn
->slowpath_task
, qed_slowpath_task
);
1030 static int qed_slowpath_start(struct qed_dev
*cdev
,
1031 struct qed_slowpath_params
*params
)
1033 struct qed_drv_load_params drv_load_params
;
1034 struct qed_hw_init_params hw_init_params
;
1035 struct qed_mcp_drv_version drv_version
;
1036 struct qed_tunnel_info tunn_info
;
1037 const u8
*data
= NULL
;
1038 struct qed_hwfn
*hwfn
;
1039 struct qed_ptt
*p_ptt
;
1042 if (qed_iov_wq_start(cdev
))
1045 if (qed_slowpath_wq_start(cdev
))
1049 rc
= request_firmware(&cdev
->firmware
, QED_FW_FILE_NAME
,
1053 "Failed to find fw file - /lib/firmware/%s\n",
1058 if (cdev
->num_hwfns
== 1) {
1059 p_ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
1061 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
= p_ptt
;
1064 "Failed to acquire PTT for aRFS\n");
1070 cdev
->rx_coalesce_usecs
= QED_DEFAULT_RX_USECS
;
1071 rc
= qed_nic_setup(cdev
);
1076 rc
= qed_slowpath_setup_int(cdev
, params
->int_mode
);
1078 rc
= qed_slowpath_vf_setup_int(cdev
);
1083 /* Allocate stream for unzipping */
1084 rc
= qed_alloc_stream_mem(cdev
);
1088 /* First Dword used to differentiate between various sources */
1089 data
= cdev
->firmware
->data
+ sizeof(u32
);
1091 qed_dbg_pf_init(cdev
);
1094 /* Start the slowpath */
1095 memset(&hw_init_params
, 0, sizeof(hw_init_params
));
1096 memset(&tunn_info
, 0, sizeof(tunn_info
));
1097 tunn_info
.vxlan
.b_mode_enabled
= true;
1098 tunn_info
.l2_gre
.b_mode_enabled
= true;
1099 tunn_info
.ip_gre
.b_mode_enabled
= true;
1100 tunn_info
.l2_geneve
.b_mode_enabled
= true;
1101 tunn_info
.ip_geneve
.b_mode_enabled
= true;
1102 tunn_info
.vxlan
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1103 tunn_info
.l2_gre
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1104 tunn_info
.ip_gre
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1105 tunn_info
.l2_geneve
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1106 tunn_info
.ip_geneve
.tun_cls
= QED_TUNN_CLSS_MAC_VLAN
;
1107 hw_init_params
.p_tunn
= &tunn_info
;
1108 hw_init_params
.b_hw_start
= true;
1109 hw_init_params
.int_mode
= cdev
->int_params
.out
.int_mode
;
1110 hw_init_params
.allow_npar_tx_switch
= true;
1111 hw_init_params
.bin_fw_data
= data
;
1113 memset(&drv_load_params
, 0, sizeof(drv_load_params
));
1114 drv_load_params
.is_crash_kernel
= is_kdump_kernel();
1115 drv_load_params
.mfw_timeout_val
= QED_LOAD_REQ_LOCK_TO_DEFAULT
;
1116 drv_load_params
.avoid_eng_reset
= false;
1117 drv_load_params
.override_force_load
= QED_OVERRIDE_FORCE_LOAD_NONE
;
1118 hw_init_params
.p_drv_load_params
= &drv_load_params
;
1120 rc
= qed_hw_init(cdev
, &hw_init_params
);
1125 "HW initialization and function start completed successfully\n");
1128 cdev
->tunn_feature_mask
= (BIT(QED_MODE_VXLAN_TUNN
) |
1129 BIT(QED_MODE_L2GENEVE_TUNN
) |
1130 BIT(QED_MODE_IPGENEVE_TUNN
) |
1131 BIT(QED_MODE_L2GRE_TUNN
) |
1132 BIT(QED_MODE_IPGRE_TUNN
));
1135 /* Allocate LL2 interface if needed */
1136 if (QED_LEADING_HWFN(cdev
)->using_ll2
) {
1137 rc
= qed_ll2_alloc_if(cdev
);
1142 hwfn
= QED_LEADING_HWFN(cdev
);
1143 drv_version
.version
= (params
->drv_major
<< 24) |
1144 (params
->drv_minor
<< 16) |
1145 (params
->drv_rev
<< 8) |
1147 strlcpy(drv_version
.name
, params
->name
,
1148 MCP_DRV_VER_STR_SIZE
- 4);
1149 rc
= qed_mcp_send_drv_version(hwfn
, hwfn
->p_main_ptt
,
1152 DP_NOTICE(cdev
, "Failed sending drv version command\n");
1157 qed_reset_vport_stats(cdev
);
1162 qed_ll2_dealloc_if(cdev
);
1166 qed_hw_timers_stop_all(cdev
);
1168 qed_slowpath_irq_free(cdev
);
1169 qed_free_stream_mem(cdev
);
1170 qed_disable_msix(cdev
);
1172 qed_resc_free(cdev
);
1175 release_firmware(cdev
->firmware
);
1177 if (IS_PF(cdev
) && (cdev
->num_hwfns
== 1) &&
1178 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
)
1179 qed_ptt_release(QED_LEADING_HWFN(cdev
),
1180 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
);
1182 qed_iov_wq_stop(cdev
, false);
1184 qed_slowpath_wq_stop(cdev
);
1189 static int qed_slowpath_stop(struct qed_dev
*cdev
)
1194 qed_slowpath_wq_stop(cdev
);
1196 qed_ll2_dealloc_if(cdev
);
1199 if (cdev
->num_hwfns
== 1)
1200 qed_ptt_release(QED_LEADING_HWFN(cdev
),
1201 QED_LEADING_HWFN(cdev
)->p_arfs_ptt
);
1202 qed_free_stream_mem(cdev
);
1203 if (IS_QED_ETH_IF(cdev
))
1204 qed_sriov_disable(cdev
, true);
1210 qed_slowpath_irq_free(cdev
);
1212 qed_disable_msix(cdev
);
1214 qed_resc_free(cdev
);
1216 qed_iov_wq_stop(cdev
, true);
1219 release_firmware(cdev
->firmware
);
1224 static void qed_set_name(struct qed_dev
*cdev
, char name
[NAME_SIZE
])
1228 memcpy(cdev
->name
, name
, NAME_SIZE
);
1229 for_each_hwfn(cdev
, i
)
1230 snprintf(cdev
->hwfns
[i
].name
, NAME_SIZE
, "%s-%d", name
, i
);
1233 static u32
qed_sb_init(struct qed_dev
*cdev
,
1234 struct qed_sb_info
*sb_info
,
1236 dma_addr_t sb_phy_addr
, u16 sb_id
,
1237 enum qed_sb_type type
)
1239 struct qed_hwfn
*p_hwfn
;
1240 struct qed_ptt
*p_ptt
;
1246 /* RoCE uses single engine and CMT uses two engines. When using both
1247 * we force only a single engine. Storage uses only engine 0 too.
1249 if (type
== QED_SB_TYPE_L2_QUEUE
)
1250 n_hwfns
= cdev
->num_hwfns
;
1254 hwfn_index
= sb_id
% n_hwfns
;
1255 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1256 rel_sb_id
= sb_id
/ n_hwfns
;
1258 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
1259 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1260 hwfn_index
, rel_sb_id
, sb_id
);
1262 if (IS_PF(p_hwfn
->cdev
)) {
1263 p_ptt
= qed_ptt_acquire(p_hwfn
);
1267 rc
= qed_int_sb_init(p_hwfn
, p_ptt
, sb_info
, sb_virt_addr
,
1268 sb_phy_addr
, rel_sb_id
);
1269 qed_ptt_release(p_hwfn
, p_ptt
);
1271 rc
= qed_int_sb_init(p_hwfn
, NULL
, sb_info
, sb_virt_addr
,
1272 sb_phy_addr
, rel_sb_id
);
1278 static u32
qed_sb_release(struct qed_dev
*cdev
,
1279 struct qed_sb_info
*sb_info
, u16 sb_id
)
1281 struct qed_hwfn
*p_hwfn
;
1286 hwfn_index
= sb_id
% cdev
->num_hwfns
;
1287 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
1288 rel_sb_id
= sb_id
/ cdev
->num_hwfns
;
1290 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
1291 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
1292 hwfn_index
, rel_sb_id
, sb_id
);
1294 rc
= qed_int_sb_release(p_hwfn
, sb_info
, rel_sb_id
);
1299 static bool qed_can_link_change(struct qed_dev
*cdev
)
1304 static int qed_set_link(struct qed_dev
*cdev
, struct qed_link_params
*params
)
1306 struct qed_hwfn
*hwfn
;
1307 struct qed_mcp_link_params
*link_params
;
1308 struct qed_ptt
*ptt
;
1314 /* The link should be set only once per PF */
1315 hwfn
= &cdev
->hwfns
[0];
1317 /* When VF wants to set link, force it to read the bulletin instead.
1318 * This mimics the PF behavior, where a noitification [both immediate
1319 * and possible later] would be generated when changing properties.
1322 qed_schedule_iov(hwfn
, QED_IOV_WQ_VF_FORCE_LINK_QUERY_FLAG
);
1326 ptt
= qed_ptt_acquire(hwfn
);
1330 link_params
= qed_mcp_get_link_params(hwfn
);
1331 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_AUTONEG
)
1332 link_params
->speed
.autoneg
= params
->autoneg
;
1333 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS
) {
1334 link_params
->speed
.advertised_speeds
= 0;
1335 if ((params
->adv_speeds
& QED_LM_1000baseT_Half_BIT
) ||
1336 (params
->adv_speeds
& QED_LM_1000baseT_Full_BIT
))
1337 link_params
->speed
.advertised_speeds
|=
1338 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
1339 if (params
->adv_speeds
& QED_LM_10000baseKR_Full_BIT
)
1340 link_params
->speed
.advertised_speeds
|=
1341 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
1342 if (params
->adv_speeds
& QED_LM_25000baseKR_Full_BIT
)
1343 link_params
->speed
.advertised_speeds
|=
1344 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
;
1345 if (params
->adv_speeds
& QED_LM_40000baseLR4_Full_BIT
)
1346 link_params
->speed
.advertised_speeds
|=
1347 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
;
1348 if (params
->adv_speeds
& QED_LM_50000baseKR2_Full_BIT
)
1349 link_params
->speed
.advertised_speeds
|=
1350 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
;
1351 if (params
->adv_speeds
& QED_LM_100000baseKR4_Full_BIT
)
1352 link_params
->speed
.advertised_speeds
|=
1353 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
;
1355 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_FORCED_SPEED
)
1356 link_params
->speed
.forced_speed
= params
->forced_speed
;
1357 if (params
->override_flags
& QED_LINK_OVERRIDE_PAUSE_CONFIG
) {
1358 if (params
->pause_config
& QED_LINK_PAUSE_AUTONEG_ENABLE
)
1359 link_params
->pause
.autoneg
= true;
1361 link_params
->pause
.autoneg
= false;
1362 if (params
->pause_config
& QED_LINK_PAUSE_RX_ENABLE
)
1363 link_params
->pause
.forced_rx
= true;
1365 link_params
->pause
.forced_rx
= false;
1366 if (params
->pause_config
& QED_LINK_PAUSE_TX_ENABLE
)
1367 link_params
->pause
.forced_tx
= true;
1369 link_params
->pause
.forced_tx
= false;
1371 if (params
->override_flags
& QED_LINK_OVERRIDE_LOOPBACK_MODE
) {
1372 switch (params
->loopback_mode
) {
1373 case QED_LINK_LOOPBACK_INT_PHY
:
1374 link_params
->loopback_mode
= ETH_LOOPBACK_INT_PHY
;
1376 case QED_LINK_LOOPBACK_EXT_PHY
:
1377 link_params
->loopback_mode
= ETH_LOOPBACK_EXT_PHY
;
1379 case QED_LINK_LOOPBACK_EXT
:
1380 link_params
->loopback_mode
= ETH_LOOPBACK_EXT
;
1382 case QED_LINK_LOOPBACK_MAC
:
1383 link_params
->loopback_mode
= ETH_LOOPBACK_MAC
;
1386 link_params
->loopback_mode
= ETH_LOOPBACK_NONE
;
1391 if (params
->override_flags
& QED_LINK_OVERRIDE_EEE_CONFIG
)
1392 memcpy(&link_params
->eee
, ¶ms
->eee
,
1393 sizeof(link_params
->eee
));
1395 rc
= qed_mcp_set_link(hwfn
, ptt
, params
->link_up
);
1397 qed_ptt_release(hwfn
, ptt
);
1402 static int qed_get_port_type(u32 media_type
)
1406 switch (media_type
) {
1407 case MEDIA_SFPP_10G_FIBER
:
1408 case MEDIA_SFP_1G_FIBER
:
1409 case MEDIA_XFP_FIBER
:
1410 case MEDIA_MODULE_FIBER
:
1412 port_type
= PORT_FIBRE
;
1414 case MEDIA_DA_TWINAX
:
1415 port_type
= PORT_DA
;
1418 port_type
= PORT_TP
;
1420 case MEDIA_NOT_PRESENT
:
1421 port_type
= PORT_NONE
;
1423 case MEDIA_UNSPECIFIED
:
1425 port_type
= PORT_OTHER
;
1431 static int qed_get_link_data(struct qed_hwfn
*hwfn
,
1432 struct qed_mcp_link_params
*params
,
1433 struct qed_mcp_link_state
*link
,
1434 struct qed_mcp_link_capabilities
*link_caps
)
1438 if (!IS_PF(hwfn
->cdev
)) {
1439 qed_vf_get_link_params(hwfn
, params
);
1440 qed_vf_get_link_state(hwfn
, link
);
1441 qed_vf_get_link_caps(hwfn
, link_caps
);
1446 p
= qed_mcp_get_link_params(hwfn
);
1449 memcpy(params
, p
, sizeof(*params
));
1451 p
= qed_mcp_get_link_state(hwfn
);
1454 memcpy(link
, p
, sizeof(*link
));
1456 p
= qed_mcp_get_link_capabilities(hwfn
);
1459 memcpy(link_caps
, p
, sizeof(*link_caps
));
1464 static void qed_fill_link(struct qed_hwfn
*hwfn
,
1465 struct qed_ptt
*ptt
,
1466 struct qed_link_output
*if_link
)
1468 struct qed_mcp_link_params params
;
1469 struct qed_mcp_link_state link
;
1470 struct qed_mcp_link_capabilities link_caps
;
1473 memset(if_link
, 0, sizeof(*if_link
));
1475 /* Prepare source inputs */
1476 if (qed_get_link_data(hwfn
, ¶ms
, &link
, &link_caps
)) {
1477 dev_warn(&hwfn
->cdev
->pdev
->dev
, "no link data available\n");
1481 /* Set the link parameters to pass to protocol driver */
1483 if_link
->link_up
= true;
1485 /* TODO - at the moment assume supported and advertised speed equal */
1486 if_link
->supported_caps
= QED_LM_FIBRE_BIT
;
1487 if (link_caps
.default_speed_autoneg
)
1488 if_link
->supported_caps
|= QED_LM_Autoneg_BIT
;
1489 if (params
.pause
.autoneg
||
1490 (params
.pause
.forced_rx
&& params
.pause
.forced_tx
))
1491 if_link
->supported_caps
|= QED_LM_Asym_Pause_BIT
;
1492 if (params
.pause
.autoneg
|| params
.pause
.forced_rx
||
1493 params
.pause
.forced_tx
)
1494 if_link
->supported_caps
|= QED_LM_Pause_BIT
;
1496 if_link
->advertised_caps
= if_link
->supported_caps
;
1497 if (params
.speed
.autoneg
)
1498 if_link
->advertised_caps
|= QED_LM_Autoneg_BIT
;
1500 if_link
->advertised_caps
&= ~QED_LM_Autoneg_BIT
;
1501 if (params
.speed
.advertised_speeds
&
1502 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1503 if_link
->advertised_caps
|= QED_LM_1000baseT_Half_BIT
|
1504 QED_LM_1000baseT_Full_BIT
;
1505 if (params
.speed
.advertised_speeds
&
1506 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1507 if_link
->advertised_caps
|= QED_LM_10000baseKR_Full_BIT
;
1508 if (params
.speed
.advertised_speeds
&
1509 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
1510 if_link
->advertised_caps
|= QED_LM_25000baseKR_Full_BIT
;
1511 if (params
.speed
.advertised_speeds
&
1512 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
1513 if_link
->advertised_caps
|= QED_LM_40000baseLR4_Full_BIT
;
1514 if (params
.speed
.advertised_speeds
&
1515 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
1516 if_link
->advertised_caps
|= QED_LM_50000baseKR2_Full_BIT
;
1517 if (params
.speed
.advertised_speeds
&
1518 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
1519 if_link
->advertised_caps
|= QED_LM_100000baseKR4_Full_BIT
;
1521 if (link_caps
.speed_capabilities
&
1522 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1523 if_link
->supported_caps
|= QED_LM_1000baseT_Half_BIT
|
1524 QED_LM_1000baseT_Full_BIT
;
1525 if (link_caps
.speed_capabilities
&
1526 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1527 if_link
->supported_caps
|= QED_LM_10000baseKR_Full_BIT
;
1528 if (link_caps
.speed_capabilities
&
1529 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_25G
)
1530 if_link
->supported_caps
|= QED_LM_25000baseKR_Full_BIT
;
1531 if (link_caps
.speed_capabilities
&
1532 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
1533 if_link
->supported_caps
|= QED_LM_40000baseLR4_Full_BIT
;
1534 if (link_caps
.speed_capabilities
&
1535 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
1536 if_link
->supported_caps
|= QED_LM_50000baseKR2_Full_BIT
;
1537 if (link_caps
.speed_capabilities
&
1538 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_BB_100G
)
1539 if_link
->supported_caps
|= QED_LM_100000baseKR4_Full_BIT
;
1542 if_link
->speed
= link
.speed
;
1544 /* TODO - fill duplex properly */
1545 if_link
->duplex
= DUPLEX_FULL
;
1546 qed_mcp_get_media_type(hwfn
, ptt
, &media_type
);
1547 if_link
->port
= qed_get_port_type(media_type
);
1549 if_link
->autoneg
= params
.speed
.autoneg
;
1551 if (params
.pause
.autoneg
)
1552 if_link
->pause_config
|= QED_LINK_PAUSE_AUTONEG_ENABLE
;
1553 if (params
.pause
.forced_rx
)
1554 if_link
->pause_config
|= QED_LINK_PAUSE_RX_ENABLE
;
1555 if (params
.pause
.forced_tx
)
1556 if_link
->pause_config
|= QED_LINK_PAUSE_TX_ENABLE
;
1558 /* Link partner capabilities */
1559 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_1G_HD
)
1560 if_link
->lp_caps
|= QED_LM_1000baseT_Half_BIT
;
1561 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_1G_FD
)
1562 if_link
->lp_caps
|= QED_LM_1000baseT_Full_BIT
;
1563 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_10G
)
1564 if_link
->lp_caps
|= QED_LM_10000baseKR_Full_BIT
;
1565 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_25G
)
1566 if_link
->lp_caps
|= QED_LM_25000baseKR_Full_BIT
;
1567 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_40G
)
1568 if_link
->lp_caps
|= QED_LM_40000baseLR4_Full_BIT
;
1569 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_50G
)
1570 if_link
->lp_caps
|= QED_LM_50000baseKR2_Full_BIT
;
1571 if (link
.partner_adv_speed
& QED_LINK_PARTNER_SPEED_100G
)
1572 if_link
->lp_caps
|= QED_LM_100000baseKR4_Full_BIT
;
1574 if (link
.an_complete
)
1575 if_link
->lp_caps
|= QED_LM_Autoneg_BIT
;
1577 if (link
.partner_adv_pause
)
1578 if_link
->lp_caps
|= QED_LM_Pause_BIT
;
1579 if (link
.partner_adv_pause
== QED_LINK_PARTNER_ASYMMETRIC_PAUSE
||
1580 link
.partner_adv_pause
== QED_LINK_PARTNER_BOTH_PAUSE
)
1581 if_link
->lp_caps
|= QED_LM_Asym_Pause_BIT
;
1583 if (link_caps
.default_eee
== QED_MCP_EEE_UNSUPPORTED
) {
1584 if_link
->eee_supported
= false;
1586 if_link
->eee_supported
= true;
1587 if_link
->eee_active
= link
.eee_active
;
1588 if_link
->sup_caps
= link_caps
.eee_speed_caps
;
1589 /* MFW clears adv_caps on eee disable; use configured value */
1590 if_link
->eee
.adv_caps
= link
.eee_adv_caps
? link
.eee_adv_caps
:
1591 params
.eee
.adv_caps
;
1592 if_link
->eee
.lp_adv_caps
= link
.eee_lp_adv_caps
;
1593 if_link
->eee
.enable
= params
.eee
.enable
;
1594 if_link
->eee
.tx_lpi_enable
= params
.eee
.tx_lpi_enable
;
1595 if_link
->eee
.tx_lpi_timer
= params
.eee
.tx_lpi_timer
;
1599 static void qed_get_current_link(struct qed_dev
*cdev
,
1600 struct qed_link_output
*if_link
)
1602 struct qed_hwfn
*hwfn
;
1603 struct qed_ptt
*ptt
;
1606 hwfn
= &cdev
->hwfns
[0];
1608 ptt
= qed_ptt_acquire(hwfn
);
1610 qed_fill_link(hwfn
, ptt
, if_link
);
1611 qed_ptt_release(hwfn
, ptt
);
1613 DP_NOTICE(hwfn
, "Failed to fill link; No PTT\n");
1616 qed_fill_link(hwfn
, NULL
, if_link
);
1619 for_each_hwfn(cdev
, i
)
1620 qed_inform_vf_link_state(&cdev
->hwfns
[i
]);
1623 void qed_link_update(struct qed_hwfn
*hwfn
, struct qed_ptt
*ptt
)
1625 void *cookie
= hwfn
->cdev
->ops_cookie
;
1626 struct qed_common_cb_ops
*op
= hwfn
->cdev
->protocol_ops
.common
;
1627 struct qed_link_output if_link
;
1629 qed_fill_link(hwfn
, ptt
, &if_link
);
1630 qed_inform_vf_link_state(hwfn
);
1632 if (IS_LEAD_HWFN(hwfn
) && cookie
)
1633 op
->link_update(cookie
, &if_link
);
1636 static int qed_drain(struct qed_dev
*cdev
)
1638 struct qed_hwfn
*hwfn
;
1639 struct qed_ptt
*ptt
;
1645 for_each_hwfn(cdev
, i
) {
1646 hwfn
= &cdev
->hwfns
[i
];
1647 ptt
= qed_ptt_acquire(hwfn
);
1649 DP_NOTICE(hwfn
, "Failed to drain NIG; No PTT\n");
1652 rc
= qed_mcp_drain(hwfn
, ptt
);
1653 qed_ptt_release(hwfn
, ptt
);
1661 static u32
qed_nvm_flash_image_access_crc(struct qed_dev
*cdev
,
1662 struct qed_nvm_image_att
*nvm_image
,
1669 /* Allocate a buffer for holding the nvram image */
1670 buf
= kzalloc(nvm_image
->length
, GFP_KERNEL
);
1674 /* Read image into buffer */
1675 rc
= qed_mcp_nvm_read(cdev
, nvm_image
->start_addr
,
1676 buf
, nvm_image
->length
);
1678 DP_ERR(cdev
, "Failed reading image from nvm\n");
1682 /* Convert the buffer into big-endian format (excluding the
1683 * closing 4 bytes of CRC).
1685 for (j
= 0; j
< nvm_image
->length
- 4; j
+= 4) {
1686 val
= cpu_to_be32(*(u32
*)&buf
[j
]);
1687 *(u32
*)&buf
[j
] = val
;
1690 /* Calc CRC for the "actual" image buffer, i.e. not including
1691 * the last 4 CRC bytes.
1693 *crc
= (~cpu_to_be32(crc32(0xffffffff, buf
, nvm_image
->length
- 4)));
1701 /* Binary file format -
1702 * /----------------------------------------------------------------------\
1703 * 0B | 0x4 [command index] |
1704 * 4B | image_type | Options | Number of register settings |
1708 * \----------------------------------------------------------------------/
1709 * There can be several Value-Mask-Offset sets as specified by 'Number of...'.
1710 * Options - 0'b - Calculate & Update CRC for image
1712 static int qed_nvm_flash_image_access(struct qed_dev
*cdev
, const u8
**data
,
1715 struct qed_nvm_image_att nvm_image
;
1716 struct qed_hwfn
*p_hwfn
;
1717 bool is_crc
= false;
1723 image_type
= **data
;
1724 p_hwfn
= QED_LEADING_HWFN(cdev
);
1725 for (i
= 0; i
< p_hwfn
->nvm_info
.num_images
; i
++)
1726 if (image_type
== p_hwfn
->nvm_info
.image_att
[i
].image_type
)
1728 if (i
== p_hwfn
->nvm_info
.num_images
) {
1729 DP_ERR(cdev
, "Failed to find nvram image of type %08x\n",
1734 nvm_image
.start_addr
= p_hwfn
->nvm_info
.image_att
[i
].nvm_start_addr
;
1735 nvm_image
.length
= p_hwfn
->nvm_info
.image_att
[i
].len
;
1737 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
1738 "Read image %02x; type = %08x; NVM [%08x,...,%08x]\n",
1739 **data
, image_type
, nvm_image
.start_addr
,
1740 nvm_image
.start_addr
+ nvm_image
.length
- 1);
1742 is_crc
= !!(**data
& BIT(0));
1744 len
= *((u16
*)*data
);
1749 rc
= qed_nvm_flash_image_access_crc(cdev
, &nvm_image
, &crc
);
1751 DP_ERR(cdev
, "Failed calculating CRC, rc = %d\n", rc
);
1755 rc
= qed_mcp_nvm_write(cdev
, QED_NVM_WRITE_NVRAM
,
1756 (nvm_image
.start_addr
+
1757 nvm_image
.length
- 4), (u8
*)&crc
, 4);
1759 DP_ERR(cdev
, "Failed writing to %08x, rc = %d\n",
1760 nvm_image
.start_addr
+ nvm_image
.length
- 4, rc
);
1764 /* Iterate over the values for setting */
1766 u32 offset
, mask
, value
, cur_value
;
1769 value
= *((u32
*)*data
);
1771 mask
= *((u32
*)*data
);
1773 offset
= *((u32
*)*data
);
1776 rc
= qed_mcp_nvm_read(cdev
, nvm_image
.start_addr
+ offset
, buf
,
1779 DP_ERR(cdev
, "Failed reading from %08x\n",
1780 nvm_image
.start_addr
+ offset
);
1784 cur_value
= le32_to_cpu(*((__le32
*)buf
));
1785 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
1786 "NVM %08x: %08x -> %08x [Value %08x Mask %08x]\n",
1787 nvm_image
.start_addr
+ offset
, cur_value
,
1788 (cur_value
& ~mask
) | (value
& mask
), value
, mask
);
1789 value
= (value
& mask
) | (cur_value
& ~mask
);
1790 rc
= qed_mcp_nvm_write(cdev
, QED_NVM_WRITE_NVRAM
,
1791 nvm_image
.start_addr
+ offset
,
1794 DP_ERR(cdev
, "Failed writing to %08x\n",
1795 nvm_image
.start_addr
+ offset
);
1805 /* Binary file format -
1806 * /----------------------------------------------------------------------\
1807 * 0B | 0x3 [command index] |
1808 * 4B | b'0: check_response? | b'1-31 reserved |
1809 * 8B | File-type | reserved |
1810 * \----------------------------------------------------------------------/
1811 * Start a new file of the provided type
1813 static int qed_nvm_flash_image_file_start(struct qed_dev
*cdev
,
1814 const u8
**data
, bool *check_resp
)
1819 *check_resp
= !!(**data
& BIT(0));
1822 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
1823 "About to start a new file of type %02x\n", **data
);
1824 rc
= qed_mcp_nvm_put_file_begin(cdev
, **data
);
1830 /* Binary file format -
1831 * /----------------------------------------------------------------------\
1832 * 0B | 0x2 [command index] |
1833 * 4B | Length in bytes |
1834 * 8B | b'0: check_response? | b'1-31 reserved |
1835 * 12B | Offset in bytes |
1837 * \----------------------------------------------------------------------/
1838 * Write data as part of a file that was previously started. Data should be
1839 * of length equal to that provided in the message
1841 static int qed_nvm_flash_image_file_data(struct qed_dev
*cdev
,
1842 const u8
**data
, bool *check_resp
)
1848 len
= *((u32
*)(*data
));
1850 *check_resp
= !!(**data
& BIT(0));
1852 offset
= *((u32
*)(*data
));
1855 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
1856 "About to write File-data: %08x bytes to offset %08x\n",
1859 rc
= qed_mcp_nvm_write(cdev
, QED_PUT_FILE_DATA
, offset
,
1860 (char *)(*data
), len
);
1866 /* Binary file format [General header] -
1867 * /----------------------------------------------------------------------\
1868 * 0B | QED_NVM_SIGNATURE |
1869 * 4B | Length in bytes |
1870 * 8B | Highest command in this batchfile | Reserved |
1871 * \----------------------------------------------------------------------/
1873 static int qed_nvm_flash_image_validate(struct qed_dev
*cdev
,
1874 const struct firmware
*image
,
1879 /* Check minimum size */
1880 if (image
->size
< 12) {
1881 DP_ERR(cdev
, "Image is too short [%08x]\n", (u32
)image
->size
);
1885 /* Check signature */
1886 signature
= *((u32
*)(*data
));
1887 if (signature
!= QED_NVM_SIGNATURE
) {
1888 DP_ERR(cdev
, "Wrong signature '%08x'\n", signature
);
1893 /* Validate internal size equals the image-size */
1894 len
= *((u32
*)(*data
));
1895 if (len
!= image
->size
) {
1896 DP_ERR(cdev
, "Size mismatch: internal = %08x image = %08x\n",
1897 len
, (u32
)image
->size
);
1902 /* Make sure driver familiar with all commands necessary for this */
1903 if (*((u16
*)(*data
)) >= QED_NVM_FLASH_CMD_NVM_MAX
) {
1904 DP_ERR(cdev
, "File contains unsupported commands [Need %04x]\n",
1914 static int qed_nvm_flash(struct qed_dev
*cdev
, const char *name
)
1916 const struct firmware
*image
;
1917 const u8
*data
, *data_end
;
1921 rc
= request_firmware(&image
, name
, &cdev
->pdev
->dev
);
1923 DP_ERR(cdev
, "Failed to find '%s'\n", name
);
1927 DP_VERBOSE(cdev
, NETIF_MSG_DRV
,
1928 "Flashing '%s' - firmware's data at %p, size is %08x\n",
1929 name
, image
->data
, (u32
)image
->size
);
1931 data_end
= data
+ image
->size
;
1933 rc
= qed_nvm_flash_image_validate(cdev
, image
, &data
);
1937 while (data
< data_end
) {
1938 bool check_resp
= false;
1940 /* Parse the actual command */
1941 cmd_type
= *((u32
*)data
);
1943 case QED_NVM_FLASH_CMD_FILE_DATA
:
1944 rc
= qed_nvm_flash_image_file_data(cdev
, &data
,
1947 case QED_NVM_FLASH_CMD_FILE_START
:
1948 rc
= qed_nvm_flash_image_file_start(cdev
, &data
,
1951 case QED_NVM_FLASH_CMD_NVM_CHANGE
:
1952 rc
= qed_nvm_flash_image_access(cdev
, &data
,
1956 DP_ERR(cdev
, "Unknown command %08x\n", cmd_type
);
1962 DP_ERR(cdev
, "Command %08x failed\n", cmd_type
);
1966 /* Check response if needed */
1968 u32 mcp_response
= 0;
1970 if (qed_mcp_nvm_resp(cdev
, (u8
*)&mcp_response
)) {
1971 DP_ERR(cdev
, "Failed getting MCP response\n");
1976 switch (mcp_response
& FW_MSG_CODE_MASK
) {
1977 case FW_MSG_CODE_OK
:
1978 case FW_MSG_CODE_NVM_OK
:
1979 case FW_MSG_CODE_NVM_PUT_FILE_FINISH_OK
:
1980 case FW_MSG_CODE_PHY_OK
:
1983 DP_ERR(cdev
, "MFW returns error: %08x\n",
1992 release_firmware(image
);
1997 static int qed_nvm_get_image(struct qed_dev
*cdev
, enum qed_nvm_images type
,
2000 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2002 return qed_mcp_get_nvm_image(hwfn
, type
, buf
, len
);
2005 static int qed_set_coalesce(struct qed_dev
*cdev
, u16 rx_coal
, u16 tx_coal
,
2008 return qed_set_queue_coalesce(rx_coal
, tx_coal
, handle
);
2011 static int qed_set_led(struct qed_dev
*cdev
, enum qed_led_mode mode
)
2013 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2014 struct qed_ptt
*ptt
;
2017 ptt
= qed_ptt_acquire(hwfn
);
2021 status
= qed_mcp_set_led(hwfn
, ptt
, mode
);
2023 qed_ptt_release(hwfn
, ptt
);
2028 static int qed_update_wol(struct qed_dev
*cdev
, bool enabled
)
2030 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2031 struct qed_ptt
*ptt
;
2037 ptt
= qed_ptt_acquire(hwfn
);
2041 rc
= qed_mcp_ov_update_wol(hwfn
, ptt
, enabled
? QED_OV_WOL_ENABLED
2042 : QED_OV_WOL_DISABLED
);
2045 rc
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2048 qed_ptt_release(hwfn
, ptt
);
2052 static int qed_update_drv_state(struct qed_dev
*cdev
, bool active
)
2054 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2055 struct qed_ptt
*ptt
;
2061 ptt
= qed_ptt_acquire(hwfn
);
2065 status
= qed_mcp_ov_update_driver_state(hwfn
, ptt
, active
?
2066 QED_OV_DRIVER_STATE_ACTIVE
:
2067 QED_OV_DRIVER_STATE_DISABLED
);
2069 qed_ptt_release(hwfn
, ptt
);
2074 static int qed_update_mac(struct qed_dev
*cdev
, u8
*mac
)
2076 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2077 struct qed_ptt
*ptt
;
2083 ptt
= qed_ptt_acquire(hwfn
);
2087 status
= qed_mcp_ov_update_mac(hwfn
, ptt
, mac
);
2091 status
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2094 qed_ptt_release(hwfn
, ptt
);
2098 static int qed_update_mtu(struct qed_dev
*cdev
, u16 mtu
)
2100 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2101 struct qed_ptt
*ptt
;
2107 ptt
= qed_ptt_acquire(hwfn
);
2111 status
= qed_mcp_ov_update_mtu(hwfn
, ptt
, mtu
);
2115 status
= qed_mcp_ov_update_current_config(hwfn
, ptt
, QED_OV_CLIENT_DRV
);
2118 qed_ptt_release(hwfn
, ptt
);
2122 static int qed_read_module_eeprom(struct qed_dev
*cdev
, char *buf
,
2123 u8 dev_addr
, u32 offset
, u32 len
)
2125 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
2126 struct qed_ptt
*ptt
;
2132 ptt
= qed_ptt_acquire(hwfn
);
2136 rc
= qed_mcp_phy_sfp_read(hwfn
, ptt
, MFW_PORT(hwfn
), dev_addr
,
2139 qed_ptt_release(hwfn
, ptt
);
2144 static struct qed_selftest_ops qed_selftest_ops_pass
= {
2145 .selftest_memory
= &qed_selftest_memory
,
2146 .selftest_interrupt
= &qed_selftest_interrupt
,
2147 .selftest_register
= &qed_selftest_register
,
2148 .selftest_clock
= &qed_selftest_clock
,
2149 .selftest_nvram
= &qed_selftest_nvram
,
2152 const struct qed_common_ops qed_common_ops_pass
= {
2153 .selftest
= &qed_selftest_ops_pass
,
2154 .probe
= &qed_probe
,
2155 .remove
= &qed_remove
,
2156 .set_power_state
= &qed_set_power_state
,
2157 .set_name
= &qed_set_name
,
2158 .update_pf_params
= &qed_update_pf_params
,
2159 .slowpath_start
= &qed_slowpath_start
,
2160 .slowpath_stop
= &qed_slowpath_stop
,
2161 .set_fp_int
= &qed_set_int_fp
,
2162 .get_fp_int
= &qed_get_int_fp
,
2163 .sb_init
= &qed_sb_init
,
2164 .sb_release
= &qed_sb_release
,
2165 .simd_handler_config
= &qed_simd_handler_config
,
2166 .simd_handler_clean
= &qed_simd_handler_clean
,
2167 .dbg_grc
= &qed_dbg_grc
,
2168 .dbg_grc_size
= &qed_dbg_grc_size
,
2169 .can_link_change
= &qed_can_link_change
,
2170 .set_link
= &qed_set_link
,
2171 .get_link
= &qed_get_current_link
,
2172 .drain
= &qed_drain
,
2173 .update_msglvl
= &qed_init_dp
,
2174 .dbg_all_data
= &qed_dbg_all_data
,
2175 .dbg_all_data_size
= &qed_dbg_all_data_size
,
2176 .chain_alloc
= &qed_chain_alloc
,
2177 .chain_free
= &qed_chain_free
,
2178 .nvm_flash
= &qed_nvm_flash
,
2179 .nvm_get_image
= &qed_nvm_get_image
,
2180 .set_coalesce
= &qed_set_coalesce
,
2181 .set_led
= &qed_set_led
,
2182 .update_drv_state
= &qed_update_drv_state
,
2183 .update_mac
= &qed_update_mac
,
2184 .update_mtu
= &qed_update_mtu
,
2185 .update_wol
= &qed_update_wol
,
2186 .read_module_eeprom
= &qed_read_module_eeprom
,
2189 void qed_get_protocol_stats(struct qed_dev
*cdev
,
2190 enum qed_mcp_protocol_type type
,
2191 union qed_mcp_protocol_stats
*stats
)
2193 struct qed_eth_stats eth_stats
;
2195 memset(stats
, 0, sizeof(*stats
));
2198 case QED_MCP_LAN_STATS
:
2199 qed_get_vport_stats(cdev
, ð_stats
);
2200 stats
->lan_stats
.ucast_rx_pkts
=
2201 eth_stats
.common
.rx_ucast_pkts
;
2202 stats
->lan_stats
.ucast_tx_pkts
=
2203 eth_stats
.common
.tx_ucast_pkts
;
2204 stats
->lan_stats
.fcs_err
= -1;
2206 case QED_MCP_FCOE_STATS
:
2207 qed_get_protocol_stats_fcoe(cdev
, &stats
->fcoe_stats
);
2209 case QED_MCP_ISCSI_STATS
:
2210 qed_get_protocol_stats_iscsi(cdev
, &stats
->iscsi_stats
);
2213 DP_VERBOSE(cdev
, QED_MSG_SP
,
2214 "Invalid protocol type = %d\n", type
);
2219 int qed_mfw_tlv_req(struct qed_hwfn
*hwfn
)
2221 DP_VERBOSE(hwfn
->cdev
, NETIF_MSG_DRV
,
2222 "Scheduling slowpath task [Flag: %d]\n",
2223 QED_SLOWPATH_MFW_TLV_REQ
);
2224 smp_mb__before_atomic();
2225 set_bit(QED_SLOWPATH_MFW_TLV_REQ
, &hwfn
->slowpath_task_flags
);
2226 smp_mb__after_atomic();
2227 queue_delayed_work(hwfn
->slowpath_wq
, &hwfn
->slowpath_task
, 0);
2233 qed_fill_generic_tlv_data(struct qed_dev
*cdev
, struct qed_mfw_tlv_generic
*tlv
)
2235 struct qed_common_cb_ops
*op
= cdev
->protocol_ops
.common
;
2236 struct qed_eth_stats_common
*p_common
;
2237 struct qed_generic_tlvs gen_tlvs
;
2238 struct qed_eth_stats stats
;
2241 memset(&gen_tlvs
, 0, sizeof(gen_tlvs
));
2242 op
->get_generic_tlv_data(cdev
->ops_cookie
, &gen_tlvs
);
2244 if (gen_tlvs
.feat_flags
& QED_TLV_IP_CSUM
)
2245 tlv
->flags
.ipv4_csum_offload
= true;
2246 if (gen_tlvs
.feat_flags
& QED_TLV_LSO
)
2247 tlv
->flags
.lso_supported
= true;
2248 tlv
->flags
.b_set
= true;
2250 for (i
= 0; i
< QED_TLV_MAC_COUNT
; i
++) {
2251 if (is_valid_ether_addr(gen_tlvs
.mac
[i
])) {
2252 ether_addr_copy(tlv
->mac
[i
], gen_tlvs
.mac
[i
]);
2253 tlv
->mac_set
[i
] = true;
2257 qed_get_vport_stats(cdev
, &stats
);
2258 p_common
= &stats
.common
;
2259 tlv
->rx_frames
= p_common
->rx_ucast_pkts
+ p_common
->rx_mcast_pkts
+
2260 p_common
->rx_bcast_pkts
;
2261 tlv
->rx_frames_set
= true;
2262 tlv
->rx_bytes
= p_common
->rx_ucast_bytes
+ p_common
->rx_mcast_bytes
+
2263 p_common
->rx_bcast_bytes
;
2264 tlv
->rx_bytes_set
= true;
2265 tlv
->tx_frames
= p_common
->tx_ucast_pkts
+ p_common
->tx_mcast_pkts
+
2266 p_common
->tx_bcast_pkts
;
2267 tlv
->tx_frames_set
= true;
2268 tlv
->tx_bytes
= p_common
->tx_ucast_bytes
+ p_common
->tx_mcast_bytes
+
2269 p_common
->tx_bcast_bytes
;
2270 tlv
->rx_bytes_set
= true;
2273 int qed_mfw_fill_tlv_data(struct qed_hwfn
*hwfn
, enum qed_mfw_tlv_type type
,
2274 union qed_mfw_tlv_data
*tlv_buf
)
2276 struct qed_dev
*cdev
= hwfn
->cdev
;
2277 struct qed_common_cb_ops
*ops
;
2279 ops
= cdev
->protocol_ops
.common
;
2280 if (!ops
|| !ops
->get_protocol_tlv_data
|| !ops
->get_generic_tlv_data
) {
2281 DP_NOTICE(hwfn
, "Can't collect TLV management info\n");
2286 case QED_MFW_TLV_GENERIC
:
2287 qed_fill_generic_tlv_data(hwfn
->cdev
, &tlv_buf
->generic
);
2289 case QED_MFW_TLV_ETH
:
2290 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->eth
);
2292 case QED_MFW_TLV_FCOE
:
2293 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->fcoe
);
2295 case QED_MFW_TLV_ISCSI
:
2296 ops
->get_protocol_tlv_data(cdev
->ops_cookie
, &tlv_buf
->iscsi
);