1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
10 #include <linux/types.h>
12 #include <linux/delay.h>
13 #include <linux/firmware.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/workqueue.h>
21 #include <linux/zlib.h>
22 #include <linux/hashtable.h>
23 #include <linux/qed/qed_if.h>
24 #include "qed_debug.h"
27 extern const struct qed_common_ops qed_common_ops_pass
;
29 #define QED_MAJOR_VERSION 8
30 #define QED_MINOR_VERSION 37
31 #define QED_REVISION_VERSION 0
32 #define QED_ENGINEERING_VERSION 20
35 ((QED_MAJOR_VERSION << 24) | (QED_MINOR_VERSION << 16) | \
36 (QED_REVISION_VERSION << 8) | QED_ENGINEERING_VERSION)
38 #define STORM_FW_VERSION \
39 ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
40 (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
42 #define MAX_HWFNS_PER_DEVICE (4)
46 #define QED_WFQ_UNIT 100
48 #define QED_WID_SIZE (1024)
49 #define QED_MIN_WIDS (4)
50 #define QED_PF_DEMS_SIZE (4)
53 enum qed_coalescing_mode
{
54 QED_COAL_MODE_DISABLE
,
59 QED_PUT_FILE_BEGIN
= DRV_MSG_CODE_NVM_PUT_FILE_BEGIN
,
60 QED_PUT_FILE_DATA
= DRV_MSG_CODE_NVM_PUT_FILE_DATA
,
61 QED_NVM_WRITE_NVRAM
= DRV_MSG_CODE_NVM_WRITE_NVRAM
,
62 QED_GET_MCP_NVM_RESP
= 0xFFFFFF00
65 struct qed_eth_cb_ops
;
67 union qed_mcp_protocol_stats
;
68 enum qed_mcp_protocol_type
;
69 enum qed_mfw_tlv_type
;
70 union qed_mfw_tlv_data
;
73 #define QED_MFW_GET_FIELD(name, field) \
74 (((name) & (field ## _MASK)) >> (field ## _SHIFT))
76 #define QED_MFW_SET_FIELD(name, field, value) \
78 (name) &= ~(field ## _MASK); \
79 (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
82 static inline u32
qed_db_addr(u32 cid
, u32 DEMS
)
84 u32 db_addr
= FIELD_VALUE(DB_LEGACY_ADDR_DEMS
, DEMS
) |
85 (cid
* QED_PF_DEMS_SIZE
);
90 static inline u32
qed_db_addr_vf(u32 cid
, u32 DEMS
)
92 u32 db_addr
= FIELD_VALUE(DB_LEGACY_ADDR_DEMS
, DEMS
) |
93 FIELD_VALUE(DB_LEGACY_ADDR_ICID
, cid
);
98 #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
99 ((sizeof(type_name) + (u32)(1 << (p_hwfn->cdev->cache_shift)) - 1) & \
100 ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
102 #define for_each_hwfn(cdev, i) for (i = 0; i < cdev->num_hwfns; i++)
104 #define D_TRINE(val, cond1, cond2, true1, true2, def) \
105 (val == (cond1) ? true1 : \
106 (val == (cond2) ? true2 : def))
112 struct qed_sb_attn_info
;
114 struct qed_sb_sp_info
;
125 QED_MODE_L2GENEVE_TUNN
,
126 QED_MODE_IPGENEVE_TUNN
,
133 QED_TUNN_CLSS_MAC_VLAN
,
134 QED_TUNN_CLSS_MAC_VNI
,
135 QED_TUNN_CLSS_INNER_MAC_VLAN
,
136 QED_TUNN_CLSS_INNER_MAC_VNI
,
137 QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE
,
141 struct qed_tunn_update_type
{
144 enum qed_tunn_clss tun_cls
;
147 struct qed_tunn_update_udp_port
{
152 struct qed_tunnel_info
{
153 struct qed_tunn_update_type vxlan
;
154 struct qed_tunn_update_type l2_geneve
;
155 struct qed_tunn_update_type ip_geneve
;
156 struct qed_tunn_update_type l2_gre
;
157 struct qed_tunn_update_type ip_gre
;
159 struct qed_tunn_update_udp_port vxlan_port
;
160 struct qed_tunn_update_udp_port geneve_port
;
162 bool b_update_rx_cls
;
163 bool b_update_tx_cls
;
166 struct qed_tunn_start_params
{
167 unsigned long tunn_mode
;
170 u8 update_vxlan_udp_port
;
171 u8 update_geneve_udp_port
;
173 u8 tunn_clss_l2geneve
;
174 u8 tunn_clss_ipgeneve
;
179 struct qed_tunn_update_params
{
180 unsigned long tunn_mode_update_mask
;
181 unsigned long tunn_mode
;
184 u8 update_rx_pf_clss
;
185 u8 update_tx_pf_clss
;
186 u8 update_vxlan_udp_port
;
187 u8 update_geneve_udp_port
;
189 u8 tunn_clss_l2geneve
;
190 u8 tunn_clss_ipgeneve
;
195 /* The PCI personality is not quite synonymous to protocol ID:
196 * 1. All personalities need CORE connections
197 * 2. The Ethernet personality may support also the RoCE/iWARP protocol
199 enum qed_pci_personality
{
206 QED_PCI_DEFAULT
, /* default in shmem */
209 /* All VFs are symmetric, all counters are PF + all VFs */
216 /* HW / FW resources, output of features supported below, most information
217 * is received from MFW.
233 QED_RDMA_STATS_QUEUE
,
256 enum qed_wol_support
{
257 QED_WOL_SUPPORT_NONE
,
261 enum qed_db_rec_exec
{
268 /* PCI personality */
269 enum qed_pci_personality personality
;
270 #define QED_IS_RDMA_PERSONALITY(dev) \
271 ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
272 (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
273 (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
274 #define QED_IS_ROCE_PERSONALITY(dev) \
275 ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
276 (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
277 #define QED_IS_IWARP_PERSONALITY(dev) \
278 ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
279 (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
280 #define QED_IS_L2_PERSONALITY(dev) \
281 ((dev)->hw_info.personality == QED_PCI_ETH || \
282 QED_IS_RDMA_PERSONALITY(dev))
283 #define QED_IS_FCOE_PERSONALITY(dev) \
284 ((dev)->hw_info.personality == QED_PCI_FCOE)
285 #define QED_IS_ISCSI_PERSONALITY(dev) \
286 ((dev)->hw_info.personality == QED_PCI_ISCSI)
288 /* Resource Allocation scheme results */
289 u32 resc_start
[QED_MAX_RESC
];
290 u32 resc_num
[QED_MAX_RESC
];
291 #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
292 #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
293 #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
294 RESC_NUM(_p_hwfn, resc))
296 u32 feat_num
[QED_MAX_FEATURES
];
297 #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
299 /* Amount of traffic classes HW supports */
302 /* Amount of TCs which should be active according to DCBx or upper
303 * layer driver configuration.
310 bool multi_tc_roce_en
;
311 #define IS_QED_MULTI_TC_ROCE(p_hwfn) ((p_hwfn)->hw_info.multi_tc_roce_en)
318 unsigned char hw_mac_addr
[ETH_ALEN
];
324 struct qed_igu_info
*p_igu_info
;
327 unsigned long device_capabilities
;
330 enum qed_wol_support b_wol_support
;
333 /* maximun size of read/write commands (HW limit) */
334 #define DMAE_MAX_RW_SIZE 0x2000
336 struct qed_dmae_info
{
337 /* Mutex for synchronizing access to functions */
342 dma_addr_t completion_word_phys_addr
;
344 /* The memory location where the DMAE writes the completion
345 * value when an operation is finished on this context.
347 u32
*p_completion_word
;
349 dma_addr_t intermediate_buffer_phys_addr
;
351 /* An intermediate buffer for DMAE operations that use virtual
352 * addresses - data is DMA'd to/from this buffer and then
353 * memcpy'd to/from the virtual address
355 u32
*p_intermediate_buffer
;
357 dma_addr_t dmae_cmd_phys_addr
;
358 struct dmae_cmd
*p_dmae_cmd
;
361 struct qed_wfq_data
{
362 /* when feature is configured for at least 1 vport */
368 struct init_qm_pq_params
*qm_pq_params
;
369 struct init_qm_vport_params
*qm_vport_params
;
370 struct init_qm_port_params
*qm_port_params
;
384 u8 max_phys_tcs_per_port
;
392 struct qed_wfq_data
*wfq_data
;
396 #define QED_OVERFLOW_BIT 1
398 struct qed_db_recovery_info
{
399 struct list_head list
;
401 /* Lock to protect the doorbell recovery mechanism list */
404 u32 db_recovery_counter
;
405 unsigned long overflow
;
413 struct qed_storm_stats
{
414 struct storm_stats mstats
;
415 struct storm_stats pstats
;
416 struct storm_stats tstats
;
417 struct storm_stats ustats
;
421 struct fw_ver_info
*fw_ver_info
;
422 const u8
*modes_tree_buf
;
423 union init_op
*init_ops
;
425 const u32
*fw_overlays
;
430 enum qed_mf_mode_bit
{
431 /* Supports PF-classification based on tag */
434 /* Supports PF-classification based on MAC */
437 /* Supports PF-classification based on protocol type */
438 QED_MF_LLH_PROTO_CLSS
,
440 /* Requires a default PF to be set */
443 /* Allow LL2 to multicast/broadcast */
444 QED_MF_LL2_NON_UNICAST
,
446 /* Allow Cross-PF [& child VFs] Tx-switching */
447 QED_MF_INTER_PF_SWITCH
,
449 /* Unified Fabtic Port support enabled */
452 /* Disable Accelerated Receive Flow Steering (aRFS) */
455 /* Use vlan for steering */
456 QED_MF_8021Q_TAGGING
,
458 /* Use stag for steering */
459 QED_MF_8021AD_TAGGING
,
461 /* Allow DSCP to TC mapping */
462 QED_MF_DSCP_TO_TC_MAP
,
464 /* Do not insert a vlan tag with id 0 */
465 QED_MF_DONT_ADD_VLAN0_TAG
,
470 QED_UFP_MODE_VNIC_BW
,
474 enum qed_ufp_pri_type
{
480 struct qed_ufp_info
{
481 enum qed_ufp_pri_type pri_type
;
482 enum qed_ufp_mode mode
;
487 BAR_ID_0
, /* used for GRC */
488 BAR_ID_1
/* Used for doorbells */
491 struct qed_nvm_image_info
{
493 struct bist_nvm_image_att
*image_att
;
497 enum qed_hsi_def_type
{
498 QED_HSI_DEF_MAX_NUM_VFS
,
499 QED_HSI_DEF_MAX_NUM_L2_QUEUES
,
500 QED_HSI_DEF_MAX_NUM_PORTS
,
501 QED_HSI_DEF_MAX_SB_PER_PATH
,
502 QED_HSI_DEF_MAX_NUM_PFS
,
503 QED_HSI_DEF_MAX_NUM_VPORTS
,
504 QED_HSI_DEF_NUM_ETH_RSS_ENGINE
,
505 QED_HSI_DEF_MAX_QM_TX_QUEUES
,
506 QED_HSI_DEF_NUM_PXP_ILT_RECORDS
,
507 QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS
,
508 QED_HSI_DEF_MAX_QM_GLOBAL_RLS
,
509 QED_HSI_DEF_MAX_PBF_CMD_LINES
,
510 QED_HSI_DEF_MAX_BTB_BLOCKS
,
514 #define DRV_MODULE_VERSION \
515 __stringify(QED_MAJOR_VERSION) "." \
516 __stringify(QED_MINOR_VERSION) "." \
517 __stringify(QED_REVISION_VERSION) "." \
518 __stringify(QED_ENGINEERING_VERSION)
520 struct qed_simd_fp_handler
{
522 void (*func
)(void *);
525 enum qed_slowpath_wq_flag
{
526 QED_SLOWPATH_MFW_TLV_REQ
,
527 QED_SLOWPATH_PERIODIC_DB_REC
,
531 struct qed_dev
*cdev
;
532 u8 my_id
; /* ID inside the PF */
533 #define IS_LEAD_HWFN(edev) (!((edev)->my_id))
534 u8 rel_pf_id
; /* Relative to engine*/
536 #define QED_PATH_ID(_p_hwfn) \
537 (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
543 char name
[NAME_SIZE
];
547 u8 num_funcs_on_engine
;
551 void __iomem
*regview
;
552 void __iomem
*doorbells
;
554 unsigned long db_size
;
557 struct qed_ptt_pool
*p_ptt_pool
;
560 struct qed_hw_info hw_info
;
562 /* rt_array (for init-tool) */
563 struct qed_rt_data rt_data
;
566 struct qed_spq
*p_spq
;
572 struct qed_consq
*p_consq
;
574 /* Slow-Path definitions */
575 struct tasklet_struct sp_dpc
;
576 bool b_sp_dpc_enabled
;
578 struct qed_ptt
*p_main_ptt
;
579 struct qed_ptt
*p_dpc_ptt
;
581 /* PTP will be used only by the leading function.
582 * Usage of all PTP-apis should be synchronized as result.
584 struct qed_ptt
*p_ptp_ptt
;
586 struct qed_sb_sp_info
*p_sp_sb
;
587 struct qed_sb_attn_info
*p_sb_attn
;
589 /* Protocol related */
591 struct qed_ll2_info
*p_ll2_info
;
592 struct qed_ooo_info
*p_ooo_info
;
593 struct qed_rdma_info
*p_rdma_info
;
594 struct qed_iscsi_info
*p_iscsi_info
;
595 struct qed_fcoe_info
*p_fcoe_info
;
596 struct qed_pf_params pf_params
;
598 bool b_rdma_enabled_in_prs
;
599 u32 rdma_prs_search_reg
;
601 struct qed_cxt_mngr
*p_cxt_mngr
;
603 /* Flag indicating whether interrupts are enabled or not*/
605 bool b_int_requested
;
607 /* True if the driver requests for the link */
608 bool b_drv_link_init
;
610 struct qed_vf_iov
*vf_iov_info
;
611 struct qed_pf_iov
*pf_iov_info
;
612 struct qed_mcp_info
*mcp_info
;
614 struct qed_dcbx_info
*p_dcbx_info
;
616 struct qed_ufp_info ufp_info
;
618 struct qed_dmae_info dmae_info
;
621 struct qed_qm_info qm_info
;
622 struct qed_storm_stats storm_stats
;
624 /* Buffer for unzipping firmware data */
627 struct dbg_tools_data dbg_info
;
629 struct virt_mem_desc dbg_arrays
[MAX_BIN_DBG_BUFFER_TYPE
];
631 /* PWM region specific data */
636 /* This is used to calculate the doorbell address */
637 u32 dpi_start_offset
;
639 /* If one of the following is set then EDPM shouldn't be used */
644 struct qed_l2_info
*p_l2_info
;
646 /* Mechanism for recovering from doorbell drop */
647 struct qed_db_recovery_info db_recovery_info
;
649 /* Nvm images number and attributes */
650 struct qed_nvm_image_info nvm_info
;
652 struct phys_mem_desc
*fw_overlay_mem
;
653 struct qed_ptt
*p_arfs_ptt
;
655 struct qed_simd_fp_handler simd_proto_handler
[64];
657 #ifdef CONFIG_QED_SRIOV
658 struct workqueue_struct
*iov_wq
;
659 struct delayed_work iov_task
;
660 unsigned long iov_task_flags
;
662 struct z_stream_s
*stream
;
663 bool slowpath_wq_active
;
664 struct workqueue_struct
*slowpath_wq
;
665 struct delayed_work slowpath_task
;
666 unsigned long slowpath_task_flags
;
667 u32 periodic_db_rec_count
;
673 unsigned long mem_start
;
674 unsigned long mem_end
;
679 struct qed_int_param
{
682 u8 min_msix_cnt
; /* for minimal functionality */
685 struct qed_int_params
{
686 struct qed_int_param in
;
687 struct qed_int_param out
;
688 struct msix_entry
*msix_table
;
696 struct qed_dbg_feature
{
697 struct dentry
*dentry
;
706 char name
[NAME_SIZE
];
708 enum qed_dev_type type
;
709 /* Translate type/revision combo into the proper conditions */
710 #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
711 #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev))
712 #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
713 #define QED_IS_K2(dev) QED_IS_AH(dev)
714 #define QED_IS_E4(dev) (QED_IS_BB(dev) || QED_IS_AH(dev))
715 #define QED_IS_E5(dev) ((dev)->type == QED_DEV_TYPE_E5)
720 #define QED_DEV_ID_MASK 0xff00
721 #define QED_DEV_ID_MASK_BB 0x1600
722 #define QED_DEV_ID_MASK_AH 0x8000
725 #define CHIP_NUM_MASK 0xffff
726 #define CHIP_NUM_SHIFT 16
729 #define CHIP_REV_MASK 0xf
730 #define CHIP_REV_SHIFT 12
731 #define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
734 #define CHIP_METAL_MASK 0xff
735 #define CHIP_METAL_SHIFT 4
738 #define CHIP_BOND_ID_MASK 0xf
739 #define CHIP_BOND_ID_SHIFT 0
743 u8 num_ports_in_engine
;
744 u8 num_funcs_in_port
;
748 unsigned long mf_bits
;
753 /* Add MF related configuration */
757 /* WoL related configurations */
759 u8 wol_mac
[ETH_ALEN
];
762 enum qed_coalescing_mode int_coalescing_mode
;
763 u16 rx_coalesce_usecs
;
764 u16 tx_coalesce_usecs
;
766 /* Start Bar offset of first hwfn */
767 void __iomem
*regview
;
768 void __iomem
*doorbells
;
770 unsigned long db_size
;
777 #define IRO ((const struct iro *)p_hwfn->cdev->iro_arr)
781 struct qed_hwfn hwfns
[MAX_HWFNS_PER_DEVICE
];
783 /* Engine affinity */
789 struct qed_hw_sriov_info
*p_iov_info
;
790 #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
791 struct qed_tunnel_info tunnel
;
794 struct qed_eth_stats
*reset_stats
;
795 struct qed_fw_data
*fw_data
;
802 /* Indicates whether should prevent attentions from being reasserted */
807 struct qed_llh_info
*p_llh_info
;
809 /* Linux specific here */
810 struct qed_dev_info common_dev_info
;
811 struct qede_dev
*edev
;
812 struct pci_dev
*pdev
;
814 #define QED_FLAG_STORAGE_STARTED (BIT(0))
817 struct pci_params pci_params
;
819 struct qed_int_params int_params
;
822 #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
823 #define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE)
825 /* Callbacks to protocol driver */
827 struct qed_common_cb_ops
*common
;
828 struct qed_eth_cb_ops
*eth
;
829 struct qed_fcoe_cb_ops
*fcoe
;
830 struct qed_iscsi_cb_ops
*iscsi
;
834 #ifdef CONFIG_QED_LL2
835 struct qed_cb_ll2_info
*ll2
;
836 u8 ll2_mac_address
[ETH_ALEN
];
838 struct qed_dbg_feature dbg_features
[DBG_FEATURE_NUM
];
840 bool disable_ilt_dump
;
843 DECLARE_HASHTABLE(connections
, 10);
844 const struct firmware
*firmware
;
850 u32 rdma_max_srq_sge
;
851 u16 tunn_feature_mask
;
856 u32
qed_get_hsi_def_val(struct qed_dev
*cdev
, enum qed_hsi_def_type type
);
858 #define NUM_OF_VFS(dev) \
859 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS)
860 #define NUM_OF_L2_QUEUES(dev) \
861 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES)
862 #define NUM_OF_PORTS(dev) \
863 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS)
864 #define NUM_OF_SBS(dev) \
865 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH)
866 #define NUM_OF_ENG_PFS(dev) \
867 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS)
868 #define NUM_OF_VPORTS(dev) \
869 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS)
870 #define NUM_OF_RSS_ENGINES(dev) \
871 qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE)
872 #define NUM_OF_QM_TX_QUEUES(dev) \
873 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES)
874 #define NUM_OF_PXP_ILT_RECORDS(dev) \
875 qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS)
876 #define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
877 qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
878 #define NUM_OF_QM_GLOBAL_RLS(dev) \
879 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS)
880 #define NUM_OF_PBF_CMD_LINES(dev) \
881 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES)
882 #define NUM_OF_BTB_BLOCKS(dev) \
883 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
887 * @brief qed_concrete_to_sw_fid - get the sw function id from
888 * the concrete value.
890 * @param concrete_fid
894 static inline u8
qed_concrete_to_sw_fid(struct qed_dev
*cdev
,
897 u8 vfid
= GET_FIELD(concrete_fid
, PXP_CONCRETE_FID_VFID
);
898 u8 pfid
= GET_FIELD(concrete_fid
, PXP_CONCRETE_FID_PFID
);
899 u8 vf_valid
= GET_FIELD(concrete_fid
,
900 PXP_CONCRETE_FID_VFVALID
);
904 sw_fid
= vfid
+ MAX_NUM_PFS
;
912 #define MAX_NUM_VOQS_E4 20
914 int qed_configure_vport_wfq(struct qed_dev
*cdev
, u16 vp_id
, u32 rate
);
915 void qed_configure_vp_wfq_on_link_change(struct qed_dev
*cdev
,
916 struct qed_ptt
*p_ptt
,
919 void qed_clean_wfq_db(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
);
920 int qed_device_num_engines(struct qed_dev
*cdev
);
921 void qed_set_fw_mac_addr(__le16
*fw_msb
,
922 __le16
*fw_mid
, __le16
*fw_lsb
, u8
*mac
);
924 #define QED_LEADING_HWFN(dev) (&dev->hwfns[0])
925 #define QED_IS_CMT(dev) ((dev)->num_hwfns > 1)
926 /* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */
927 #define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin])
928 #define QED_IWARP_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->iwarp_affin])
929 #define QED_AFFIN_HWFN(dev) \
930 (QED_IS_IWARP_PERSONALITY(QED_LEADING_HWFN(dev)) ? \
931 QED_IWARP_AFFIN_HWFN(dev) : QED_FIR_AFFIN_HWFN(dev))
932 #define QED_AFFIN_HWFN_IDX(dev) (IS_LEAD_HWFN(QED_AFFIN_HWFN(dev)) ? 0 : 1)
934 /* Flags for indication of required queues */
935 #define PQ_FLAGS_RLS (BIT(0))
936 #define PQ_FLAGS_MCOS (BIT(1))
937 #define PQ_FLAGS_LB (BIT(2))
938 #define PQ_FLAGS_OOO (BIT(3))
939 #define PQ_FLAGS_ACK (BIT(4))
940 #define PQ_FLAGS_OFLD (BIT(5))
941 #define PQ_FLAGS_VFS (BIT(6))
942 #define PQ_FLAGS_LLT (BIT(7))
943 #define PQ_FLAGS_MTC (BIT(8))
945 /* physical queue index for cm context intialization */
946 u16
qed_get_cm_pq_idx(struct qed_hwfn
*p_hwfn
, u32 pq_flags
);
947 u16
qed_get_cm_pq_idx_mcos(struct qed_hwfn
*p_hwfn
, u8 tc
);
948 u16
qed_get_cm_pq_idx_vf(struct qed_hwfn
*p_hwfn
, u16 vf
);
949 u16
qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn
*p_hwfn
, u8 tc
);
950 u16
qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn
*p_hwfn
, u8 tc
);
952 /* doorbell recovery mechanism */
953 void qed_db_recovery_dp(struct qed_hwfn
*p_hwfn
);
954 void qed_db_recovery_execute(struct qed_hwfn
*p_hwfn
);
955 bool qed_edpm_enabled(struct qed_hwfn
*p_hwfn
);
957 /* Other Linux specific common definitions */
958 #define DP_NAME(cdev) ((cdev)->name)
960 #define REG_ADDR(cdev, offset) (void __iomem *)((u8 __iomem *)\
964 #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
965 #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
966 #define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
968 #define DOORBELL(cdev, db_addr, val) \
969 writel((u32)val, (void __iomem *)((u8 __iomem *)\
970 (cdev->doorbells) + (db_addr)))
972 #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
973 qed_device_num_ports((_p_hwfn)->cdev))
974 int qed_device_num_ports(struct qed_dev
*cdev
);
977 int qed_fill_dev_info(struct qed_dev
*cdev
,
978 struct qed_dev_info
*dev_info
);
979 void qed_link_update(struct qed_hwfn
*hwfn
, struct qed_ptt
*ptt
);
980 void qed_bw_update(struct qed_hwfn
*hwfn
, struct qed_ptt
*ptt
);
981 u32
qed_unzip_data(struct qed_hwfn
*p_hwfn
,
982 u32 input_len
, u8
*input_buf
,
983 u32 max_size
, u8
*unzip_buf
);
984 int qed_recovery_process(struct qed_dev
*cdev
);
985 void qed_schedule_recovery_handler(struct qed_hwfn
*p_hwfn
);
986 void qed_hw_error_occurred(struct qed_hwfn
*p_hwfn
,
987 enum qed_hw_err_type err_type
);
988 void qed_get_protocol_stats(struct qed_dev
*cdev
,
989 enum qed_mcp_protocol_type type
,
990 union qed_mcp_protocol_stats
*stats
);
991 int qed_slowpath_irq_req(struct qed_hwfn
*hwfn
);
992 void qed_slowpath_irq_sync(struct qed_hwfn
*p_hwfn
);
993 int qed_mfw_tlv_req(struct qed_hwfn
*hwfn
);
995 int qed_mfw_fill_tlv_data(struct qed_hwfn
*hwfn
,
996 enum qed_mfw_tlv_type type
,
997 union qed_mfw_tlv_data
*tlv_data
);
999 void qed_hw_info_set_offload_tc(struct qed_hw_info
*p_info
, u8 tc
);
1001 void qed_periodic_db_rec_start(struct qed_hwfn
*p_hwfn
);