1 /* QLogic qed NIC Driver
2 * Copyright (c) 2015 QLogic Corporation
4 * This software is available under the terms of the GNU General Public License
5 * (GPL) Version 2, available from the file COPYING in the main directory of
9 #include <linux/stddef.h>
10 #include <linux/pci.h>
11 #include <linux/kernel.h>
12 #include <linux/slab.h>
13 #include <linux/version.h>
14 #include <linux/delay.h>
15 #include <asm/byteorder.h>
16 #include <linux/dma-mapping.h>
17 #include <linux/string.h>
18 #include <linux/module.h>
19 #include <linux/interrupt.h>
20 #include <linux/workqueue.h>
21 #include <linux/ethtool.h>
22 #include <linux/etherdevice.h>
23 #include <linux/vmalloc.h>
24 #include <linux/qed/qed_if.h>
28 #include "qed_dev_api.h"
32 static char version
[] =
33 "QLogic FastLinQ 4xxxx Core Module qed " DRV_MODULE_VERSION
"\n";
35 MODULE_DESCRIPTION("QLogic FastLinQ 4xxxx Core Module");
36 MODULE_LICENSE("GPL");
37 MODULE_VERSION(DRV_MODULE_VERSION
);
39 #define FW_FILE_VERSION \
40 __stringify(FW_MAJOR_VERSION) "." \
41 __stringify(FW_MINOR_VERSION) "." \
42 __stringify(FW_REVISION_VERSION) "." \
43 __stringify(FW_ENGINEERING_VERSION)
45 #define QED_FW_FILE_NAME \
46 "qed/qed_init_values_zipped-" FW_FILE_VERSION ".bin"
48 MODULE_FIRMWARE(QED_FW_FILE_NAME
);
50 static int __init
qed_init(void)
52 pr_notice("qed_init called\n");
54 pr_info("%s", version
);
59 static void __exit
qed_cleanup(void)
61 pr_notice("qed_cleanup called\n");
64 module_init(qed_init
);
65 module_exit(qed_cleanup
);
67 /* Check if the DMA controller on the machine can properly handle the DMA
68 * addressing required by the device.
70 static int qed_set_coherency_mask(struct qed_dev
*cdev
)
72 struct device
*dev
= &cdev
->pdev
->dev
;
74 if (dma_set_mask(dev
, DMA_BIT_MASK(64)) == 0) {
75 if (dma_set_coherent_mask(dev
, DMA_BIT_MASK(64)) != 0) {
77 "Can't request 64-bit consistent allocations\n");
80 } else if (dma_set_mask(dev
, DMA_BIT_MASK(32)) != 0) {
81 DP_NOTICE(cdev
, "Can't request 64b/32b DMA addresses\n");
88 static void qed_free_pci(struct qed_dev
*cdev
)
90 struct pci_dev
*pdev
= cdev
->pdev
;
93 iounmap(cdev
->doorbells
);
95 iounmap(cdev
->regview
);
96 if (atomic_read(&pdev
->enable_cnt
) == 1)
97 pci_release_regions(pdev
);
99 pci_disable_device(pdev
);
102 #define PCI_REVISION_ID_ERROR_VAL 0xff
104 /* Performs PCI initializations as well as initializing PCI-related parameters
105 * in the device structrue. Returns 0 in case of success.
107 static int qed_init_pci(struct qed_dev
*cdev
,
108 struct pci_dev
*pdev
)
115 rc
= pci_enable_device(pdev
);
117 DP_NOTICE(cdev
, "Cannot enable PCI device\n");
121 if (!(pci_resource_flags(pdev
, 0) & IORESOURCE_MEM
)) {
122 DP_NOTICE(cdev
, "No memory region found in bar #0\n");
127 if (!(pci_resource_flags(pdev
, 2) & IORESOURCE_MEM
)) {
128 DP_NOTICE(cdev
, "No memory region found in bar #2\n");
133 if (atomic_read(&pdev
->enable_cnt
) == 1) {
134 rc
= pci_request_regions(pdev
, "qed");
137 "Failed to request PCI memory resources\n");
140 pci_set_master(pdev
);
141 pci_save_state(pdev
);
144 pci_read_config_byte(pdev
, PCI_REVISION_ID
, &rev_id
);
145 if (rev_id
== PCI_REVISION_ID_ERROR_VAL
) {
147 "Detected PCI device error [rev_id 0x%x]. Probably due to prior indication. Aborting.\n",
152 if (!pci_is_pcie(pdev
)) {
153 DP_NOTICE(cdev
, "The bus is not PCI Express\n");
158 cdev
->pci_params
.pm_cap
= pci_find_capability(pdev
, PCI_CAP_ID_PM
);
159 if (cdev
->pci_params
.pm_cap
== 0)
160 DP_NOTICE(cdev
, "Cannot find power management capability\n");
162 rc
= qed_set_coherency_mask(cdev
);
166 cdev
->pci_params
.mem_start
= pci_resource_start(pdev
, 0);
167 cdev
->pci_params
.mem_end
= pci_resource_end(pdev
, 0);
168 cdev
->pci_params
.irq
= pdev
->irq
;
170 cdev
->regview
= pci_ioremap_bar(pdev
, 0);
171 if (!cdev
->regview
) {
172 DP_NOTICE(cdev
, "Cannot map register space, aborting\n");
177 cdev
->db_phys_addr
= pci_resource_start(cdev
->pdev
, 2);
178 cdev
->db_size
= pci_resource_len(cdev
->pdev
, 2);
179 cdev
->doorbells
= ioremap_wc(cdev
->db_phys_addr
, cdev
->db_size
);
180 if (!cdev
->doorbells
) {
181 DP_NOTICE(cdev
, "Cannot map doorbell space\n");
188 pci_release_regions(pdev
);
190 pci_disable_device(pdev
);
195 int qed_fill_dev_info(struct qed_dev
*cdev
,
196 struct qed_dev_info
*dev_info
)
200 memset(dev_info
, 0, sizeof(struct qed_dev_info
));
202 dev_info
->num_hwfns
= cdev
->num_hwfns
;
203 dev_info
->pci_mem_start
= cdev
->pci_params
.mem_start
;
204 dev_info
->pci_mem_end
= cdev
->pci_params
.mem_end
;
205 dev_info
->pci_irq
= cdev
->pci_params
.irq
;
206 dev_info
->is_mf_default
= IS_MF_DEFAULT(&cdev
->hwfns
[0]);
207 ether_addr_copy(dev_info
->hw_mac
, cdev
->hwfns
[0].hw_info
.hw_mac_addr
);
209 dev_info
->fw_major
= FW_MAJOR_VERSION
;
210 dev_info
->fw_minor
= FW_MINOR_VERSION
;
211 dev_info
->fw_rev
= FW_REVISION_VERSION
;
212 dev_info
->fw_eng
= FW_ENGINEERING_VERSION
;
213 dev_info
->mf_mode
= cdev
->mf_mode
;
215 qed_mcp_get_mfw_ver(cdev
, &dev_info
->mfw_rev
);
217 ptt
= qed_ptt_acquire(QED_LEADING_HWFN(cdev
));
219 qed_mcp_get_flash_size(QED_LEADING_HWFN(cdev
), ptt
,
220 &dev_info
->flash_size
);
222 qed_ptt_release(QED_LEADING_HWFN(cdev
), ptt
);
228 static void qed_free_cdev(struct qed_dev
*cdev
)
233 static struct qed_dev
*qed_alloc_cdev(struct pci_dev
*pdev
)
235 struct qed_dev
*cdev
;
237 cdev
= kzalloc(sizeof(*cdev
), GFP_KERNEL
);
241 qed_init_struct(cdev
);
246 /* Sets the requested power state */
247 static int qed_set_power_state(struct qed_dev
*cdev
,
253 DP_VERBOSE(cdev
, NETIF_MSG_DRV
, "Omitting Power state change\n");
258 static struct qed_dev
*qed_probe(struct pci_dev
*pdev
,
259 enum qed_protocol protocol
,
263 struct qed_dev
*cdev
;
266 cdev
= qed_alloc_cdev(pdev
);
270 cdev
->protocol
= protocol
;
272 qed_init_dp(cdev
, dp_module
, dp_level
);
274 rc
= qed_init_pci(cdev
, pdev
);
276 DP_ERR(cdev
, "init pci failed\n");
279 DP_INFO(cdev
, "PCI init completed successfully\n");
281 rc
= qed_hw_prepare(cdev
, QED_PCI_DEFAULT
);
283 DP_ERR(cdev
, "hw prepare failed\n");
287 DP_INFO(cdev
, "qed_probe completed successffuly\n");
299 static void qed_remove(struct qed_dev
*cdev
)
308 qed_set_power_state(cdev
, PCI_D3hot
);
313 static void qed_disable_msix(struct qed_dev
*cdev
)
315 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
316 pci_disable_msix(cdev
->pdev
);
317 kfree(cdev
->int_params
.msix_table
);
318 } else if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSI
) {
319 pci_disable_msi(cdev
->pdev
);
322 memset(&cdev
->int_params
.out
, 0, sizeof(struct qed_int_param
));
325 static int qed_enable_msix(struct qed_dev
*cdev
,
326 struct qed_int_params
*int_params
)
330 cnt
= int_params
->in
.num_vectors
;
332 for (i
= 0; i
< cnt
; i
++)
333 int_params
->msix_table
[i
].entry
= i
;
335 rc
= pci_enable_msix_range(cdev
->pdev
, int_params
->msix_table
,
336 int_params
->in
.min_msix_cnt
, cnt
);
337 if (rc
< cnt
&& rc
>= int_params
->in
.min_msix_cnt
&&
338 (rc
% cdev
->num_hwfns
)) {
339 pci_disable_msix(cdev
->pdev
);
341 /* If fastpath is initialized, we need at least one interrupt
342 * per hwfn [and the slow path interrupts]. New requested number
343 * should be a multiple of the number of hwfns.
345 cnt
= (rc
/ cdev
->num_hwfns
) * cdev
->num_hwfns
;
347 "Trying to enable MSI-X with less vectors (%d out of %d)\n",
348 cnt
, int_params
->in
.num_vectors
);
349 rc
= pci_enable_msix_exact(cdev
->pdev
,
350 int_params
->msix_table
, cnt
);
356 /* MSI-x configuration was achieved */
357 int_params
->out
.int_mode
= QED_INT_MODE_MSIX
;
358 int_params
->out
.num_vectors
= rc
;
362 "Failed to enable MSI-X [Requested %d vectors][rc %d]\n",
369 /* This function outputs the int mode and the number of enabled msix vector */
370 static int qed_set_int_mode(struct qed_dev
*cdev
, bool force_mode
)
372 struct qed_int_params
*int_params
= &cdev
->int_params
;
373 struct msix_entry
*tbl
;
376 switch (int_params
->in
.int_mode
) {
377 case QED_INT_MODE_MSIX
:
378 /* Allocate MSIX table */
379 cnt
= int_params
->in
.num_vectors
;
380 int_params
->msix_table
= kcalloc(cnt
, sizeof(*tbl
), GFP_KERNEL
);
381 if (!int_params
->msix_table
) {
387 rc
= qed_enable_msix(cdev
, int_params
);
391 DP_NOTICE(cdev
, "Failed to enable MSI-X\n");
392 kfree(int_params
->msix_table
);
397 case QED_INT_MODE_MSI
:
398 rc
= pci_enable_msi(cdev
->pdev
);
400 int_params
->out
.int_mode
= QED_INT_MODE_MSI
;
404 DP_NOTICE(cdev
, "Failed to enable MSI\n");
409 case QED_INT_MODE_INTA
:
410 int_params
->out
.int_mode
= QED_INT_MODE_INTA
;
414 DP_NOTICE(cdev
, "Unknown int_mode value %d\n",
415 int_params
->in
.int_mode
);
420 cdev
->int_coalescing_mode
= QED_COAL_MODE_ENABLE
;
425 static void qed_simd_handler_config(struct qed_dev
*cdev
, void *token
,
426 int index
, void(*handler
)(void *))
428 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
429 int relative_idx
= index
/ cdev
->num_hwfns
;
431 hwfn
->simd_proto_handler
[relative_idx
].func
= handler
;
432 hwfn
->simd_proto_handler
[relative_idx
].token
= token
;
435 static void qed_simd_handler_clean(struct qed_dev
*cdev
, int index
)
437 struct qed_hwfn
*hwfn
= &cdev
->hwfns
[index
% cdev
->num_hwfns
];
438 int relative_idx
= index
/ cdev
->num_hwfns
;
440 memset(&hwfn
->simd_proto_handler
[relative_idx
], 0,
441 sizeof(struct qed_simd_fp_handler
));
444 static irqreturn_t
qed_msix_sp_int(int irq
, void *tasklet
)
446 tasklet_schedule((struct tasklet_struct
*)tasklet
);
450 static irqreturn_t
qed_single_int(int irq
, void *dev_instance
)
452 struct qed_dev
*cdev
= (struct qed_dev
*)dev_instance
;
453 struct qed_hwfn
*hwfn
;
454 irqreturn_t rc
= IRQ_NONE
;
458 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
459 status
= qed_int_igu_read_sisr_reg(&cdev
->hwfns
[i
]);
464 hwfn
= &cdev
->hwfns
[i
];
466 /* Slowpath interrupt */
467 if (unlikely(status
& 0x1)) {
468 tasklet_schedule(hwfn
->sp_dpc
);
473 /* Fastpath interrupts */
474 for (j
= 0; j
< 64; j
++) {
475 if ((0x2ULL
<< j
) & status
) {
476 hwfn
->simd_proto_handler
[j
].func(
477 hwfn
->simd_proto_handler
[j
].token
);
478 status
&= ~(0x2ULL
<< j
);
483 if (unlikely(status
))
484 DP_VERBOSE(hwfn
, NETIF_MSG_INTR
,
485 "got an unknown interrupt status 0x%llx\n",
492 int qed_slowpath_irq_req(struct qed_hwfn
*hwfn
)
494 struct qed_dev
*cdev
= hwfn
->cdev
;
498 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
500 snprintf(hwfn
->name
, NAME_SIZE
, "sp-%d-%02x:%02x.%02x",
501 id
, cdev
->pdev
->bus
->number
,
502 PCI_SLOT(cdev
->pdev
->devfn
), hwfn
->abs_pf_id
);
503 rc
= request_irq(cdev
->int_params
.msix_table
[id
].vector
,
504 qed_msix_sp_int
, 0, hwfn
->name
, hwfn
->sp_dpc
);
506 DP_VERBOSE(hwfn
, (NETIF_MSG_INTR
| QED_MSG_SP
),
507 "Requested slowpath MSI-X\n");
509 unsigned long flags
= 0;
511 snprintf(cdev
->name
, NAME_SIZE
, "%02x:%02x.%02x",
512 cdev
->pdev
->bus
->number
, PCI_SLOT(cdev
->pdev
->devfn
),
513 PCI_FUNC(cdev
->pdev
->devfn
));
515 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_INTA
)
516 flags
|= IRQF_SHARED
;
518 rc
= request_irq(cdev
->pdev
->irq
, qed_single_int
,
519 flags
, cdev
->name
, cdev
);
525 static void qed_slowpath_irq_free(struct qed_dev
*cdev
)
529 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
530 for_each_hwfn(cdev
, i
) {
531 if (!cdev
->hwfns
[i
].b_int_requested
)
533 synchronize_irq(cdev
->int_params
.msix_table
[i
].vector
);
534 free_irq(cdev
->int_params
.msix_table
[i
].vector
,
535 cdev
->hwfns
[i
].sp_dpc
);
538 if (QED_LEADING_HWFN(cdev
)->b_int_requested
)
539 free_irq(cdev
->pdev
->irq
, cdev
);
541 qed_int_disable_post_isr_release(cdev
);
544 static int qed_nic_stop(struct qed_dev
*cdev
)
548 rc
= qed_hw_stop(cdev
);
550 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
551 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
553 if (p_hwfn
->b_sp_dpc_enabled
) {
554 tasklet_disable(p_hwfn
->sp_dpc
);
555 p_hwfn
->b_sp_dpc_enabled
= false;
556 DP_VERBOSE(cdev
, NETIF_MSG_IFDOWN
,
557 "Disabled sp taskelt [hwfn %d] at %p\n",
565 static int qed_nic_reset(struct qed_dev
*cdev
)
569 rc
= qed_hw_reset(cdev
);
578 static int qed_nic_setup(struct qed_dev
*cdev
)
582 rc
= qed_resc_alloc(cdev
);
586 DP_INFO(cdev
, "Allocated qed resources\n");
588 qed_resc_setup(cdev
);
593 static int qed_set_int_fp(struct qed_dev
*cdev
, u16 cnt
)
597 /* Mark the fastpath as free/used */
598 cdev
->int_params
.fp_initialized
= cnt
? true : false;
600 if (cdev
->int_params
.out
.int_mode
!= QED_INT_MODE_MSIX
)
601 limit
= cdev
->num_hwfns
* 63;
602 else if (cdev
->int_params
.fp_msix_cnt
)
603 limit
= cdev
->int_params
.fp_msix_cnt
;
608 return min_t(int, cnt
, limit
);
611 static int qed_get_int_fp(struct qed_dev
*cdev
, struct qed_int_info
*info
)
613 memset(info
, 0, sizeof(struct qed_int_info
));
615 if (!cdev
->int_params
.fp_initialized
) {
617 "Protocol driver requested interrupt information, but its support is not yet configured\n");
621 /* Need to expose only MSI-X information; Single IRQ is handled solely
624 if (cdev
->int_params
.out
.int_mode
== QED_INT_MODE_MSIX
) {
625 int msix_base
= cdev
->int_params
.fp_msix_base
;
627 info
->msix_cnt
= cdev
->int_params
.fp_msix_cnt
;
628 info
->msix
= &cdev
->int_params
.msix_table
[msix_base
];
634 static int qed_slowpath_setup_int(struct qed_dev
*cdev
,
635 enum qed_int_mode int_mode
)
637 struct qed_sb_cnt_info sb_cnt_info
;
640 memset(&cdev
->int_params
, 0, sizeof(struct qed_int_params
));
642 cdev
->int_params
.in
.int_mode
= int_mode
;
643 for_each_hwfn(cdev
, i
) {
644 memset(&sb_cnt_info
, 0, sizeof(sb_cnt_info
));
645 qed_int_get_num_sbs(&cdev
->hwfns
[i
], &sb_cnt_info
);
646 cdev
->int_params
.in
.num_vectors
+= sb_cnt_info
.sb_cnt
;
647 cdev
->int_params
.in
.num_vectors
++; /* slowpath */
650 /* We want a minimum of one slowpath and one fastpath vector per hwfn */
651 cdev
->int_params
.in
.min_msix_cnt
= cdev
->num_hwfns
* 2;
653 rc
= qed_set_int_mode(cdev
, false);
655 DP_ERR(cdev
, "qed_slowpath_setup_int ERR\n");
659 cdev
->int_params
.fp_msix_base
= cdev
->num_hwfns
;
660 cdev
->int_params
.fp_msix_cnt
= cdev
->int_params
.out
.num_vectors
-
666 u32
qed_unzip_data(struct qed_hwfn
*p_hwfn
, u32 input_len
,
667 u8
*input_buf
, u32 max_size
, u8
*unzip_buf
)
671 p_hwfn
->stream
->next_in
= input_buf
;
672 p_hwfn
->stream
->avail_in
= input_len
;
673 p_hwfn
->stream
->next_out
= unzip_buf
;
674 p_hwfn
->stream
->avail_out
= max_size
;
676 rc
= zlib_inflateInit2(p_hwfn
->stream
, MAX_WBITS
);
679 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "zlib init failed, rc = %d\n",
684 rc
= zlib_inflate(p_hwfn
->stream
, Z_FINISH
);
685 zlib_inflateEnd(p_hwfn
->stream
);
687 if (rc
!= Z_OK
&& rc
!= Z_STREAM_END
) {
688 DP_VERBOSE(p_hwfn
, NETIF_MSG_DRV
, "FW unzip error: %s, rc=%d\n",
689 p_hwfn
->stream
->msg
, rc
);
693 return p_hwfn
->stream
->total_out
/ 4;
696 static int qed_alloc_stream_mem(struct qed_dev
*cdev
)
701 for_each_hwfn(cdev
, i
) {
702 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
704 p_hwfn
->stream
= kzalloc(sizeof(*p_hwfn
->stream
), GFP_KERNEL
);
708 workspace
= vzalloc(zlib_inflate_workspacesize());
711 p_hwfn
->stream
->workspace
= workspace
;
717 static void qed_free_stream_mem(struct qed_dev
*cdev
)
721 for_each_hwfn(cdev
, i
) {
722 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
727 vfree(p_hwfn
->stream
->workspace
);
728 kfree(p_hwfn
->stream
);
732 static void qed_update_pf_params(struct qed_dev
*cdev
,
733 struct qed_pf_params
*params
)
737 for (i
= 0; i
< cdev
->num_hwfns
; i
++) {
738 struct qed_hwfn
*p_hwfn
= &cdev
->hwfns
[i
];
740 p_hwfn
->pf_params
= *params
;
744 static int qed_slowpath_start(struct qed_dev
*cdev
,
745 struct qed_slowpath_params
*params
)
747 struct qed_mcp_drv_version drv_version
;
748 const u8
*data
= NULL
;
749 struct qed_hwfn
*hwfn
;
752 rc
= request_firmware(&cdev
->firmware
, QED_FW_FILE_NAME
,
756 "Failed to find fw file - /lib/firmware/%s\n",
761 rc
= qed_nic_setup(cdev
);
765 rc
= qed_slowpath_setup_int(cdev
, params
->int_mode
);
769 /* Allocate stream for unzipping */
770 rc
= qed_alloc_stream_mem(cdev
);
772 DP_NOTICE(cdev
, "Failed to allocate stream memory\n");
776 /* Start the slowpath */
777 data
= cdev
->firmware
->data
;
779 rc
= qed_hw_init(cdev
, true, cdev
->int_params
.out
.int_mode
,
785 "HW initialization and function start completed successfully\n");
787 hwfn
= QED_LEADING_HWFN(cdev
);
788 drv_version
.version
= (params
->drv_major
<< 24) |
789 (params
->drv_minor
<< 16) |
790 (params
->drv_rev
<< 8) |
792 strlcpy(drv_version
.name
, params
->name
,
793 MCP_DRV_VER_STR_SIZE
- 4);
794 rc
= qed_mcp_send_drv_version(hwfn
, hwfn
->p_main_ptt
,
797 DP_NOTICE(cdev
, "Failed sending drv version command\n");
801 qed_reset_vport_stats(cdev
);
806 qed_hw_timers_stop_all(cdev
);
807 qed_slowpath_irq_free(cdev
);
808 qed_free_stream_mem(cdev
);
809 qed_disable_msix(cdev
);
813 release_firmware(cdev
->firmware
);
818 static int qed_slowpath_stop(struct qed_dev
*cdev
)
823 qed_free_stream_mem(cdev
);
826 qed_slowpath_irq_free(cdev
);
828 qed_disable_msix(cdev
);
831 release_firmware(cdev
->firmware
);
836 static void qed_set_id(struct qed_dev
*cdev
, char name
[NAME_SIZE
],
837 char ver_str
[VER_SIZE
])
841 memcpy(cdev
->name
, name
, NAME_SIZE
);
842 for_each_hwfn(cdev
, i
)
843 snprintf(cdev
->hwfns
[i
].name
, NAME_SIZE
, "%s-%d", name
, i
);
845 memcpy(cdev
->ver_str
, ver_str
, VER_SIZE
);
846 cdev
->drv_type
= DRV_ID_DRV_TYPE_LINUX
;
849 static u32
qed_sb_init(struct qed_dev
*cdev
,
850 struct qed_sb_info
*sb_info
,
852 dma_addr_t sb_phy_addr
, u16 sb_id
,
853 enum qed_sb_type type
)
855 struct qed_hwfn
*p_hwfn
;
861 /* RoCE uses single engine and CMT uses two engines. When using both
862 * we force only a single engine. Storage uses only engine 0 too.
864 if (type
== QED_SB_TYPE_L2_QUEUE
)
865 n_hwfns
= cdev
->num_hwfns
;
869 hwfn_index
= sb_id
% n_hwfns
;
870 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
871 rel_sb_id
= sb_id
/ n_hwfns
;
873 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
874 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
875 hwfn_index
, rel_sb_id
, sb_id
);
877 rc
= qed_int_sb_init(p_hwfn
, p_hwfn
->p_main_ptt
, sb_info
,
878 sb_virt_addr
, sb_phy_addr
, rel_sb_id
);
883 static u32
qed_sb_release(struct qed_dev
*cdev
,
884 struct qed_sb_info
*sb_info
,
887 struct qed_hwfn
*p_hwfn
;
892 hwfn_index
= sb_id
% cdev
->num_hwfns
;
893 p_hwfn
= &cdev
->hwfns
[hwfn_index
];
894 rel_sb_id
= sb_id
/ cdev
->num_hwfns
;
896 DP_VERBOSE(cdev
, NETIF_MSG_INTR
,
897 "hwfn [%d] <--[init]-- SB %04x [0x%04x upper]\n",
898 hwfn_index
, rel_sb_id
, sb_id
);
900 rc
= qed_int_sb_release(p_hwfn
, sb_info
, rel_sb_id
);
905 static int qed_set_link(struct qed_dev
*cdev
,
906 struct qed_link_params
*params
)
908 struct qed_hwfn
*hwfn
;
909 struct qed_mcp_link_params
*link_params
;
916 /* The link should be set only once per PF */
917 hwfn
= &cdev
->hwfns
[0];
919 ptt
= qed_ptt_acquire(hwfn
);
923 link_params
= qed_mcp_get_link_params(hwfn
);
924 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_AUTONEG
)
925 link_params
->speed
.autoneg
= params
->autoneg
;
926 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS
) {
927 link_params
->speed
.advertised_speeds
= 0;
928 if ((params
->adv_speeds
& SUPPORTED_1000baseT_Half
) ||
929 (params
->adv_speeds
& SUPPORTED_1000baseT_Full
))
930 link_params
->speed
.advertised_speeds
|=
931 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
;
932 if (params
->adv_speeds
& SUPPORTED_10000baseKR_Full
)
933 link_params
->speed
.advertised_speeds
|=
934 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
;
935 if (params
->adv_speeds
& SUPPORTED_40000baseLR4_Full
)
936 link_params
->speed
.advertised_speeds
|=
937 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
;
938 if (params
->adv_speeds
& 0)
939 link_params
->speed
.advertised_speeds
|=
940 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
;
941 if (params
->adv_speeds
& 0)
942 link_params
->speed
.advertised_speeds
|=
943 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G
;
945 if (params
->override_flags
& QED_LINK_OVERRIDE_SPEED_FORCED_SPEED
)
946 link_params
->speed
.forced_speed
= params
->forced_speed
;
948 rc
= qed_mcp_set_link(hwfn
, ptt
, params
->link_up
);
950 qed_ptt_release(hwfn
, ptt
);
955 static int qed_get_port_type(u32 media_type
)
959 switch (media_type
) {
960 case MEDIA_SFPP_10G_FIBER
:
961 case MEDIA_SFP_1G_FIBER
:
962 case MEDIA_XFP_FIBER
:
964 port_type
= PORT_FIBRE
;
966 case MEDIA_DA_TWINAX
:
972 case MEDIA_NOT_PRESENT
:
973 port_type
= PORT_NONE
;
975 case MEDIA_UNSPECIFIED
:
977 port_type
= PORT_OTHER
;
983 static void qed_fill_link(struct qed_hwfn
*hwfn
,
984 struct qed_link_output
*if_link
)
986 struct qed_mcp_link_params params
;
987 struct qed_mcp_link_state link
;
988 struct qed_mcp_link_capabilities link_caps
;
991 memset(if_link
, 0, sizeof(*if_link
));
993 /* Prepare source inputs */
994 memcpy(¶ms
, qed_mcp_get_link_params(hwfn
), sizeof(params
));
995 memcpy(&link
, qed_mcp_get_link_state(hwfn
), sizeof(link
));
996 memcpy(&link_caps
, qed_mcp_get_link_capabilities(hwfn
),
999 /* Set the link parameters to pass to protocol driver */
1001 if_link
->link_up
= true;
1003 /* TODO - at the moment assume supported and advertised speed equal */
1004 if_link
->supported_caps
= SUPPORTED_FIBRE
;
1005 if (params
.speed
.autoneg
)
1006 if_link
->supported_caps
|= SUPPORTED_Autoneg
;
1007 if (params
.pause
.autoneg
||
1008 (params
.pause
.forced_rx
&& params
.pause
.forced_tx
))
1009 if_link
->supported_caps
|= SUPPORTED_Asym_Pause
;
1010 if (params
.pause
.autoneg
|| params
.pause
.forced_rx
||
1011 params
.pause
.forced_tx
)
1012 if_link
->supported_caps
|= SUPPORTED_Pause
;
1014 if_link
->advertised_caps
= if_link
->supported_caps
;
1015 if (params
.speed
.advertised_speeds
&
1016 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1017 if_link
->advertised_caps
|= SUPPORTED_1000baseT_Half
|
1018 SUPPORTED_1000baseT_Full
;
1019 if (params
.speed
.advertised_speeds
&
1020 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1021 if_link
->advertised_caps
|= SUPPORTED_10000baseKR_Full
;
1022 if (params
.speed
.advertised_speeds
&
1023 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
1024 if_link
->advertised_caps
|= SUPPORTED_40000baseLR4_Full
;
1025 if (params
.speed
.advertised_speeds
&
1026 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
1027 if_link
->advertised_caps
|= 0;
1028 if (params
.speed
.advertised_speeds
&
1029 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G
)
1030 if_link
->advertised_caps
|= 0;
1032 if (link_caps
.speed_capabilities
&
1033 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G
)
1034 if_link
->supported_caps
|= SUPPORTED_1000baseT_Half
|
1035 SUPPORTED_1000baseT_Full
;
1036 if (link_caps
.speed_capabilities
&
1037 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G
)
1038 if_link
->supported_caps
|= SUPPORTED_10000baseKR_Full
;
1039 if (link_caps
.speed_capabilities
&
1040 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_40G
)
1041 if_link
->supported_caps
|= SUPPORTED_40000baseLR4_Full
;
1042 if (link_caps
.speed_capabilities
&
1043 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_50G
)
1044 if_link
->supported_caps
|= 0;
1045 if (link_caps
.speed_capabilities
&
1046 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_100G
)
1047 if_link
->supported_caps
|= 0;
1050 if_link
->speed
= link
.speed
;
1052 /* TODO - fill duplex properly */
1053 if_link
->duplex
= DUPLEX_FULL
;
1054 qed_mcp_get_media_type(hwfn
->cdev
, &media_type
);
1055 if_link
->port
= qed_get_port_type(media_type
);
1057 if_link
->autoneg
= params
.speed
.autoneg
;
1059 if (params
.pause
.autoneg
)
1060 if_link
->pause_config
|= QED_LINK_PAUSE_AUTONEG_ENABLE
;
1061 if (params
.pause
.forced_rx
)
1062 if_link
->pause_config
|= QED_LINK_PAUSE_RX_ENABLE
;
1063 if (params
.pause
.forced_tx
)
1064 if_link
->pause_config
|= QED_LINK_PAUSE_TX_ENABLE
;
1066 /* Link partner capabilities */
1067 if (link
.partner_adv_speed
&
1068 QED_LINK_PARTNER_SPEED_1G_HD
)
1069 if_link
->lp_caps
|= SUPPORTED_1000baseT_Half
;
1070 if (link
.partner_adv_speed
&
1071 QED_LINK_PARTNER_SPEED_1G_FD
)
1072 if_link
->lp_caps
|= SUPPORTED_1000baseT_Full
;
1073 if (link
.partner_adv_speed
&
1074 QED_LINK_PARTNER_SPEED_10G
)
1075 if_link
->lp_caps
|= SUPPORTED_10000baseKR_Full
;
1076 if (link
.partner_adv_speed
&
1077 QED_LINK_PARTNER_SPEED_40G
)
1078 if_link
->lp_caps
|= SUPPORTED_40000baseLR4_Full
;
1079 if (link
.partner_adv_speed
&
1080 QED_LINK_PARTNER_SPEED_50G
)
1081 if_link
->lp_caps
|= 0;
1082 if (link
.partner_adv_speed
&
1083 QED_LINK_PARTNER_SPEED_100G
)
1084 if_link
->lp_caps
|= 0;
1086 if (link
.an_complete
)
1087 if_link
->lp_caps
|= SUPPORTED_Autoneg
;
1089 if (link
.partner_adv_pause
)
1090 if_link
->lp_caps
|= SUPPORTED_Pause
;
1091 if (link
.partner_adv_pause
== QED_LINK_PARTNER_ASYMMETRIC_PAUSE
||
1092 link
.partner_adv_pause
== QED_LINK_PARTNER_BOTH_PAUSE
)
1093 if_link
->lp_caps
|= SUPPORTED_Asym_Pause
;
1096 static void qed_get_current_link(struct qed_dev
*cdev
,
1097 struct qed_link_output
*if_link
)
1099 qed_fill_link(&cdev
->hwfns
[0], if_link
);
1102 void qed_link_update(struct qed_hwfn
*hwfn
)
1104 void *cookie
= hwfn
->cdev
->ops_cookie
;
1105 struct qed_common_cb_ops
*op
= hwfn
->cdev
->protocol_ops
.common
;
1106 struct qed_link_output if_link
;
1108 qed_fill_link(hwfn
, &if_link
);
1110 if (IS_LEAD_HWFN(hwfn
) && cookie
)
1111 op
->link_update(cookie
, &if_link
);
1114 static int qed_drain(struct qed_dev
*cdev
)
1116 struct qed_hwfn
*hwfn
;
1117 struct qed_ptt
*ptt
;
1120 for_each_hwfn(cdev
, i
) {
1121 hwfn
= &cdev
->hwfns
[i
];
1122 ptt
= qed_ptt_acquire(hwfn
);
1124 DP_NOTICE(hwfn
, "Failed to drain NIG; No PTT\n");
1127 rc
= qed_mcp_drain(hwfn
, ptt
);
1130 qed_ptt_release(hwfn
, ptt
);
1136 static int qed_set_led(struct qed_dev
*cdev
, enum qed_led_mode mode
)
1138 struct qed_hwfn
*hwfn
= QED_LEADING_HWFN(cdev
);
1139 struct qed_ptt
*ptt
;
1142 ptt
= qed_ptt_acquire(hwfn
);
1146 status
= qed_mcp_set_led(hwfn
, ptt
, mode
);
1148 qed_ptt_release(hwfn
, ptt
);
1153 const struct qed_common_ops qed_common_ops_pass
= {
1154 .probe
= &qed_probe
,
1155 .remove
= &qed_remove
,
1156 .set_power_state
= &qed_set_power_state
,
1157 .set_id
= &qed_set_id
,
1158 .update_pf_params
= &qed_update_pf_params
,
1159 .slowpath_start
= &qed_slowpath_start
,
1160 .slowpath_stop
= &qed_slowpath_stop
,
1161 .set_fp_int
= &qed_set_int_fp
,
1162 .get_fp_int
= &qed_get_int_fp
,
1163 .sb_init
= &qed_sb_init
,
1164 .sb_release
= &qed_sb_release
,
1165 .simd_handler_config
= &qed_simd_handler_config
,
1166 .simd_handler_clean
= &qed_simd_handler_clean
,
1167 .set_link
= &qed_set_link
,
1168 .get_link
= &qed_get_current_link
,
1169 .drain
= &qed_drain
,
1170 .update_msglvl
= &qed_init_dp
,
1171 .chain_alloc
= &qed_chain_alloc
,
1172 .chain_free
= &qed_chain_free
,
1173 .set_led
= &qed_set_led
,
1176 u32
qed_get_protocol_version(enum qed_protocol protocol
)
1179 case QED_PROTOCOL_ETH
:
1180 return QED_ETH_INTERFACE_VERSION
;
1185 EXPORT_SYMBOL(qed_get_protocol_version
);