1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
10 #include <linux/types.h>
12 #include <linux/delay.h>
13 #include <linux/firmware.h>
14 #include <linux/interrupt.h>
15 #include <linux/list.h>
16 #include <linux/mutex.h>
17 #include <linux/pci.h>
18 #include <linux/slab.h>
19 #include <linux/string.h>
20 #include <linux/workqueue.h>
21 #include <linux/zlib.h>
22 #include <linux/hashtable.h>
23 #include <linux/qed/qed_if.h>
24 #include "qed_debug.h"
26 #include "qed_dbg_hsi.h"
27 #include "qed_mfw_hsi.h"
29 extern const struct qed_common_ops qed_common_ops_pass
;
31 #define STORM_FW_VERSION \
32 ((FW_MAJOR_VERSION << 24) | (FW_MINOR_VERSION << 16) | \
33 (FW_REVISION_VERSION << 8) | FW_ENGINEERING_VERSION)
35 #define MAX_HWFNS_PER_DEVICE (4)
39 #define QED_WFQ_UNIT 100
41 #define QED_WID_SIZE (1024)
42 #define QED_MIN_WIDS (4)
43 #define QED_PF_DEMS_SIZE (4)
45 #define QED_LLH_DONT_CARE 0
48 enum qed_coalescing_mode
{
49 QED_COAL_MODE_DISABLE
,
54 QED_PUT_FILE_BEGIN
= DRV_MSG_CODE_NVM_PUT_FILE_BEGIN
,
55 QED_PUT_FILE_DATA
= DRV_MSG_CODE_NVM_PUT_FILE_DATA
,
56 QED_NVM_WRITE_NVRAM
= DRV_MSG_CODE_NVM_WRITE_NVRAM
,
57 QED_GET_MCP_NVM_RESP
= 0xFFFFFF00
60 struct qed_eth_cb_ops
;
62 union qed_mcp_protocol_stats
;
63 enum qed_mcp_protocol_type
;
64 enum qed_mfw_tlv_type
;
65 union qed_mfw_tlv_data
;
68 #define QED_MFW_GET_FIELD(name, field) \
69 (((name) & (field ## _MASK)) >> (field ## _SHIFT))
71 #define QED_MFW_SET_FIELD(name, field, value) \
73 (name) &= ~(field ## _MASK); \
74 (name) |= (((value) << (field ## _SHIFT)) & (field ## _MASK));\
77 static inline u32
qed_db_addr(u32 cid
, u32 DEMS
)
79 u32 db_addr
= FIELD_VALUE(DB_LEGACY_ADDR_DEMS
, DEMS
) |
80 (cid
* QED_PF_DEMS_SIZE
);
85 static inline u32
qed_db_addr_vf(u32 cid
, u32 DEMS
)
87 u32 db_addr
= FIELD_VALUE(DB_LEGACY_ADDR_DEMS
, DEMS
) |
88 FIELD_VALUE(DB_LEGACY_ADDR_ICID
, cid
);
93 #define ALIGNED_TYPE_SIZE(type_name, p_hwfn) \
94 ((sizeof(type_name) + (u32)(1 << ((p_hwfn)->cdev->cache_shift)) - 1) & \
95 ~((1 << (p_hwfn->cdev->cache_shift)) - 1))
97 #define for_each_hwfn(cdev, i) for (i = 0; i < (cdev)->num_hwfns; i++)
99 #define D_TRINE(val, cond1, cond2, true1, true2, def) \
100 ((val) == (cond1) ? true1 : \
101 ((val) == (cond2) ? true2 : def))
107 struct qed_sb_attn_info
;
109 struct qed_sb_sp_info
;
120 QED_MODE_L2GENEVE_TUNN
,
121 QED_MODE_IPGENEVE_TUNN
,
128 QED_TUNN_CLSS_MAC_VLAN
,
129 QED_TUNN_CLSS_MAC_VNI
,
130 QED_TUNN_CLSS_INNER_MAC_VLAN
,
131 QED_TUNN_CLSS_INNER_MAC_VNI
,
132 QED_TUNN_CLSS_MAC_VLAN_DUAL_STAGE
,
136 struct qed_tunn_update_type
{
139 enum qed_tunn_clss tun_cls
;
142 struct qed_tunn_update_udp_port
{
147 struct qed_tunnel_info
{
148 struct qed_tunn_update_type vxlan
;
149 struct qed_tunn_update_type l2_geneve
;
150 struct qed_tunn_update_type ip_geneve
;
151 struct qed_tunn_update_type l2_gre
;
152 struct qed_tunn_update_type ip_gre
;
154 struct qed_tunn_update_udp_port vxlan_port
;
155 struct qed_tunn_update_udp_port geneve_port
;
157 bool b_update_rx_cls
;
158 bool b_update_tx_cls
;
161 struct qed_tunn_start_params
{
162 unsigned long tunn_mode
;
165 u8 update_vxlan_udp_port
;
166 u8 update_geneve_udp_port
;
168 u8 tunn_clss_l2geneve
;
169 u8 tunn_clss_ipgeneve
;
174 struct qed_tunn_update_params
{
175 unsigned long tunn_mode_update_mask
;
176 unsigned long tunn_mode
;
179 u8 update_rx_pf_clss
;
180 u8 update_tx_pf_clss
;
181 u8 update_vxlan_udp_port
;
182 u8 update_geneve_udp_port
;
184 u8 tunn_clss_l2geneve
;
185 u8 tunn_clss_ipgeneve
;
190 /* The PCI personality is not quite synonymous to protocol ID:
191 * 1. All personalities need CORE connections
192 * 2. The Ethernet personality may support also the RoCE/iWARP protocol
194 enum qed_pci_personality
{
202 QED_PCI_DEFAULT
, /* default in shmem */
205 /* All VFs are symmetric, all counters are PF + all VFs */
212 /* HW / FW resources, output of features supported below, most information
213 * is received from MFW.
229 QED_RDMA_STATS_QUEUE
,
253 enum qed_wol_support
{
254 QED_WOL_SUPPORT_NONE
,
258 enum qed_db_rec_exec
{
265 /* PCI personality */
266 enum qed_pci_personality personality
;
267 #define QED_IS_RDMA_PERSONALITY(dev) \
268 ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
269 (dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
270 (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
271 #define QED_IS_ROCE_PERSONALITY(dev) \
272 ((dev)->hw_info.personality == QED_PCI_ETH_ROCE || \
273 (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
274 #define QED_IS_IWARP_PERSONALITY(dev) \
275 ((dev)->hw_info.personality == QED_PCI_ETH_IWARP || \
276 (dev)->hw_info.personality == QED_PCI_ETH_RDMA)
277 #define QED_IS_L2_PERSONALITY(dev) \
278 ((dev)->hw_info.personality == QED_PCI_ETH || \
279 QED_IS_RDMA_PERSONALITY(dev))
280 #define QED_IS_FCOE_PERSONALITY(dev) \
281 ((dev)->hw_info.personality == QED_PCI_FCOE)
282 #define QED_IS_ISCSI_PERSONALITY(dev) \
283 ((dev)->hw_info.personality == QED_PCI_ISCSI)
284 #define QED_IS_NVMETCP_PERSONALITY(dev) \
285 ((dev)->hw_info.personality == QED_PCI_NVMETCP)
287 /* Resource Allocation scheme results */
288 u32 resc_start
[QED_MAX_RESC
];
289 u32 resc_num
[QED_MAX_RESC
];
290 #define RESC_START(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_start[resc])
291 #define RESC_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.resc_num[resc])
292 #define RESC_END(_p_hwfn, resc) (RESC_START(_p_hwfn, resc) + \
293 RESC_NUM(_p_hwfn, resc))
295 u32 feat_num
[QED_MAX_FEATURES
];
296 #define FEAT_NUM(_p_hwfn, resc) ((_p_hwfn)->hw_info.feat_num[resc])
298 /* Amount of traffic classes HW supports */
301 /* Amount of TCs which should be active according to DCBx or upper
302 * layer driver configuration.
309 bool multi_tc_roce_en
;
310 #define IS_QED_MULTI_TC_ROCE(p_hwfn) ((p_hwfn)->hw_info.multi_tc_roce_en)
317 unsigned char hw_mac_addr
[ETH_ALEN
];
323 struct qed_igu_info
*p_igu_info
;
326 unsigned long device_capabilities
;
329 enum qed_wol_support b_wol_support
;
332 /* maximun size of read/write commands (HW limit) */
333 #define DMAE_MAX_RW_SIZE 0x2000
335 struct qed_dmae_info
{
336 /* Mutex for synchronizing access to functions */
341 dma_addr_t completion_word_phys_addr
;
343 /* The memory location where the DMAE writes the completion
344 * value when an operation is finished on this context.
346 u32
*p_completion_word
;
348 dma_addr_t intermediate_buffer_phys_addr
;
350 /* An intermediate buffer for DMAE operations that use virtual
351 * addresses - data is DMA'd to/from this buffer and then
352 * memcpy'd to/from the virtual address
354 u32
*p_intermediate_buffer
;
356 dma_addr_t dmae_cmd_phys_addr
;
357 struct dmae_cmd
*p_dmae_cmd
;
360 struct qed_wfq_data
{
361 /* when feature is configured for at least 1 vport */
367 struct init_qm_pq_params
*qm_pq_params
;
368 struct init_qm_vport_params
*qm_vport_params
;
369 struct init_qm_port_params
*qm_port_params
;
383 u8 max_phys_tcs_per_port
;
391 struct qed_wfq_data
*wfq_data
;
395 #define QED_OVERFLOW_BIT 1
397 struct qed_db_recovery_info
{
398 struct list_head list
;
400 /* Lock to protect the doorbell recovery mechanism list */
403 u32 db_recovery_counter
;
404 unsigned long overflow
;
412 struct qed_storm_stats
{
413 struct storm_stats mstats
;
414 struct storm_stats pstats
;
415 struct storm_stats tstats
;
416 struct storm_stats ustats
;
420 struct fw_ver_info
*fw_ver_info
;
421 const u8
*modes_tree_buf
;
422 union init_op
*init_ops
;
424 const u32
*fw_overlays
;
429 enum qed_mf_mode_bit
{
430 /* Supports PF-classification based on tag */
433 /* Supports PF-classification based on MAC */
436 /* Supports PF-classification based on protocol type */
437 QED_MF_LLH_PROTO_CLSS
,
439 /* Requires a default PF to be set */
442 /* Allow LL2 to multicast/broadcast */
443 QED_MF_LL2_NON_UNICAST
,
445 /* Allow Cross-PF [& child VFs] Tx-switching */
446 QED_MF_INTER_PF_SWITCH
,
448 /* Unified Fabtic Port support enabled */
451 /* Disable Accelerated Receive Flow Steering (aRFS) */
454 /* Use vlan for steering */
455 QED_MF_8021Q_TAGGING
,
457 /* Use stag for steering */
458 QED_MF_8021AD_TAGGING
,
460 /* Allow DSCP to TC mapping */
461 QED_MF_DSCP_TO_TC_MAP
,
463 /* Do not insert a vlan tag with id 0 */
464 QED_MF_DONT_ADD_VLAN0_TAG
,
469 QED_UFP_MODE_VNIC_BW
,
473 enum qed_ufp_pri_type
{
479 struct qed_ufp_info
{
480 enum qed_ufp_pri_type pri_type
;
481 enum qed_ufp_mode mode
;
486 BAR_ID_0
, /* used for GRC */
487 BAR_ID_1
/* Used for doorbells */
490 struct qed_nvm_image_info
{
492 struct bist_nvm_image_att
*image_att
;
496 enum qed_hsi_def_type
{
497 QED_HSI_DEF_MAX_NUM_VFS
,
498 QED_HSI_DEF_MAX_NUM_L2_QUEUES
,
499 QED_HSI_DEF_MAX_NUM_PORTS
,
500 QED_HSI_DEF_MAX_SB_PER_PATH
,
501 QED_HSI_DEF_MAX_NUM_PFS
,
502 QED_HSI_DEF_MAX_NUM_VPORTS
,
503 QED_HSI_DEF_NUM_ETH_RSS_ENGINE
,
504 QED_HSI_DEF_MAX_QM_TX_QUEUES
,
505 QED_HSI_DEF_NUM_PXP_ILT_RECORDS
,
506 QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS
,
507 QED_HSI_DEF_MAX_QM_GLOBAL_RLS
,
508 QED_HSI_DEF_MAX_PBF_CMD_LINES
,
509 QED_HSI_DEF_MAX_BTB_BLOCKS
,
513 struct qed_simd_fp_handler
{
515 void (*func
)(void *cookie
);
518 enum qed_slowpath_wq_flag
{
519 QED_SLOWPATH_MFW_TLV_REQ
,
520 QED_SLOWPATH_PERIODIC_DB_REC
,
524 struct qed_dev
*cdev
;
525 u8 my_id
; /* ID inside the PF */
526 #define IS_LEAD_HWFN(edev) (!((edev)->my_id))
527 u8 rel_pf_id
; /* Relative to engine*/
529 #define QED_PATH_ID(_p_hwfn) \
530 (QED_IS_K2((_p_hwfn)->cdev) ? 0 : ((_p_hwfn)->abs_pf_id & 1))
536 char name
[NAME_SIZE
];
540 u8 num_funcs_on_engine
;
544 void __iomem
*regview
;
545 void __iomem
*doorbells
;
547 unsigned long db_size
;
550 struct qed_ptt_pool
*p_ptt_pool
;
553 struct qed_hw_info hw_info
;
555 /* rt_array (for init-tool) */
556 struct qed_rt_data rt_data
;
559 struct qed_spq
*p_spq
;
565 struct qed_consq
*p_consq
;
567 /* Slow-Path definitions */
568 struct tasklet_struct sp_dpc
;
569 bool b_sp_dpc_enabled
;
571 struct qed_ptt
*p_main_ptt
;
572 struct qed_ptt
*p_dpc_ptt
;
574 /* PTP will be used only by the leading function.
575 * Usage of all PTP-apis should be synchronized as result.
577 struct qed_ptt
*p_ptp_ptt
;
579 struct qed_sb_sp_info
*p_sp_sb
;
580 struct qed_sb_attn_info
*p_sb_attn
;
582 /* Protocol related */
584 struct qed_ll2_info
*p_ll2_info
;
585 struct qed_ooo_info
*p_ooo_info
;
586 struct qed_rdma_info
*p_rdma_info
;
587 struct qed_iscsi_info
*p_iscsi_info
;
588 struct qed_nvmetcp_info
*p_nvmetcp_info
;
589 struct qed_fcoe_info
*p_fcoe_info
;
590 struct qed_pf_params pf_params
;
592 bool b_rdma_enabled_in_prs
;
593 u32 rdma_prs_search_reg
;
595 struct qed_cxt_mngr
*p_cxt_mngr
;
597 /* Flag indicating whether interrupts are enabled or not*/
599 bool b_int_requested
;
601 /* True if the driver requests for the link */
602 bool b_drv_link_init
;
604 struct qed_vf_iov
*vf_iov_info
;
605 struct qed_pf_iov
*pf_iov_info
;
606 struct qed_mcp_info
*mcp_info
;
608 struct qed_dcbx_info
*p_dcbx_info
;
610 struct qed_ufp_info ufp_info
;
612 struct qed_dmae_info dmae_info
;
615 struct qed_qm_info qm_info
;
616 struct qed_storm_stats storm_stats
;
618 /* Buffer for unzipping firmware data */
621 struct dbg_tools_data dbg_info
;
623 struct virt_mem_desc dbg_arrays
[MAX_BIN_DBG_BUFFER_TYPE
];
625 /* PWM region specific data */
630 /* This is used to calculate the doorbell address */
631 u32 dpi_start_offset
;
633 /* If one of the following is set then EDPM shouldn't be used */
638 struct qed_l2_info
*p_l2_info
;
640 /* Mechanism for recovering from doorbell drop */
641 struct qed_db_recovery_info db_recovery_info
;
643 /* Nvm images number and attributes */
644 struct qed_nvm_image_info nvm_info
;
646 struct phys_mem_desc
*fw_overlay_mem
;
647 struct qed_ptt
*p_arfs_ptt
;
649 struct qed_simd_fp_handler simd_proto_handler
[64];
651 #ifdef CONFIG_QED_SRIOV
652 struct workqueue_struct
*iov_wq
;
653 struct delayed_work iov_task
;
654 unsigned long iov_task_flags
;
656 struct z_stream_s
*stream
;
657 bool slowpath_wq_active
;
658 struct workqueue_struct
*slowpath_wq
;
659 struct delayed_work slowpath_task
;
660 unsigned long slowpath_task_flags
;
661 u32 periodic_db_rec_count
;
665 unsigned long mem_start
;
666 unsigned long mem_end
;
671 struct qed_int_param
{
674 u8 min_msix_cnt
; /* for minimal functionality */
677 struct qed_int_params
{
678 struct qed_int_param in
;
679 struct qed_int_param out
;
680 struct msix_entry
*msix_table
;
688 struct qed_dbg_feature
{
689 struct dentry
*dentry
;
698 char name
[NAME_SIZE
];
700 enum qed_dev_type type
;
701 /* Translate type/revision combo into the proper conditions */
702 #define QED_IS_BB(dev) ((dev)->type == QED_DEV_TYPE_BB)
703 #define QED_IS_BB_B0(dev) (QED_IS_BB(dev) && CHIP_REV_IS_B0(dev))
704 #define QED_IS_AH(dev) ((dev)->type == QED_DEV_TYPE_AH)
705 #define QED_IS_K2(dev) QED_IS_AH(dev)
710 #define QED_DEV_ID_MASK 0xff00
711 #define QED_DEV_ID_MASK_BB 0x1600
712 #define QED_DEV_ID_MASK_AH 0x8000
715 #define CHIP_NUM_MASK 0xffff
716 #define CHIP_NUM_SHIFT 16
719 #define CHIP_REV_MASK 0xf
720 #define CHIP_REV_SHIFT 12
721 #define CHIP_REV_IS_B0(_cdev) ((_cdev)->chip_rev == 1)
724 #define CHIP_METAL_MASK 0xff
725 #define CHIP_METAL_SHIFT 4
728 #define CHIP_BOND_ID_MASK 0xf
729 #define CHIP_BOND_ID_SHIFT 0
733 u8 num_ports_in_engine
;
734 u8 num_funcs_in_port
;
738 unsigned long mf_bits
;
743 /* Add MF related configuration */
747 /* WoL related configurations */
749 u8 wol_mac
[ETH_ALEN
];
752 enum qed_coalescing_mode int_coalescing_mode
;
753 u16 rx_coalesce_usecs
;
754 u16 tx_coalesce_usecs
;
756 /* Start Bar offset of first hwfn */
757 void __iomem
*regview
;
758 void __iomem
*doorbells
;
760 unsigned long db_size
;
767 #define IRO ((const struct iro *)p_hwfn->cdev->iro_arr)
771 struct qed_hwfn hwfns
[MAX_HWFNS_PER_DEVICE
];
773 /* Engine affinity */
779 struct qed_hw_sriov_info
*p_iov_info
;
780 #define IS_QED_SRIOV(cdev) (!!(cdev)->p_iov_info)
781 struct qed_tunnel_info tunnel
;
784 struct qed_eth_stats
*reset_stats
;
785 struct qed_fw_data
*fw_data
;
792 /* Indicates whether should prevent attentions from being reasserted */
797 struct qed_llh_info
*p_llh_info
;
799 /* Linux specific here */
800 struct qed_dev_info common_dev_info
;
801 struct qede_dev
*edev
;
802 struct pci_dev
*pdev
;
804 #define QED_FLAG_STORAGE_STARTED (BIT(0))
807 struct pci_params pci_params
;
809 struct qed_int_params int_params
;
812 #define IS_QED_ETH_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_ETH)
813 #define IS_QED_FCOE_IF(cdev) ((cdev)->protocol == QED_PROTOCOL_FCOE)
815 /* Callbacks to protocol driver */
817 struct qed_common_cb_ops
*common
;
818 struct qed_eth_cb_ops
*eth
;
819 struct qed_fcoe_cb_ops
*fcoe
;
820 struct qed_iscsi_cb_ops
*iscsi
;
821 struct qed_nvmetcp_cb_ops
*nvmetcp
;
825 #ifdef CONFIG_QED_LL2
826 struct qed_cb_ll2_info
*ll2
;
827 u8 ll2_mac_address
[ETH_ALEN
];
829 struct qed_dbg_feature dbg_features
[DBG_FEATURE_NUM
];
831 bool disable_ilt_dump
;
834 DECLARE_HASHTABLE(connections
, 10);
835 const struct firmware
*firmware
;
841 u32 rdma_max_srq_sge
;
842 u16 tunn_feature_mask
;
847 u32
qed_get_hsi_def_val(struct qed_dev
*cdev
, enum qed_hsi_def_type type
);
849 #define NUM_OF_VFS(dev) \
850 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VFS)
851 #define NUM_OF_L2_QUEUES(dev) \
852 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_L2_QUEUES)
853 #define NUM_OF_PORTS(dev) \
854 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PORTS)
855 #define NUM_OF_SBS(dev) \
856 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_SB_PER_PATH)
857 #define NUM_OF_ENG_PFS(dev) \
858 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_PFS)
859 #define NUM_OF_VPORTS(dev) \
860 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_NUM_VPORTS)
861 #define NUM_OF_RSS_ENGINES(dev) \
862 qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_ETH_RSS_ENGINE)
863 #define NUM_OF_QM_TX_QUEUES(dev) \
864 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_TX_QUEUES)
865 #define NUM_OF_PXP_ILT_RECORDS(dev) \
866 qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_PXP_ILT_RECORDS)
867 #define NUM_OF_RDMA_STATISTIC_COUNTERS(dev) \
868 qed_get_hsi_def_val(dev, QED_HSI_DEF_NUM_RDMA_STATISTIC_COUNTERS)
869 #define NUM_OF_QM_GLOBAL_RLS(dev) \
870 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_QM_GLOBAL_RLS)
871 #define NUM_OF_PBF_CMD_LINES(dev) \
872 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_PBF_CMD_LINES)
873 #define NUM_OF_BTB_BLOCKS(dev) \
874 qed_get_hsi_def_val(dev, QED_HSI_DEF_MAX_BTB_BLOCKS)
877 * qed_concrete_to_sw_fid(): Get the sw function id from
878 * the concrete value.
880 * @cdev: Qed dev pointer.
881 * @concrete_fid: Concrete fid.
885 static inline u8
qed_concrete_to_sw_fid(struct qed_dev
*cdev
,
888 u8 vfid
= GET_FIELD(concrete_fid
, PXP_CONCRETE_FID_VFID
);
889 u8 pfid
= GET_FIELD(concrete_fid
, PXP_CONCRETE_FID_PFID
);
890 u8 vf_valid
= GET_FIELD(concrete_fid
,
891 PXP_CONCRETE_FID_VFVALID
);
895 sw_fid
= vfid
+ MAX_NUM_PFS
;
904 int qed_configure_vport_wfq(struct qed_dev
*cdev
, u16 vp_id
, u32 rate
);
905 void qed_configure_vp_wfq_on_link_change(struct qed_dev
*cdev
,
906 struct qed_ptt
*p_ptt
,
909 void qed_clean_wfq_db(struct qed_hwfn
*p_hwfn
, struct qed_ptt
*p_ptt
);
910 void qed_set_fw_mac_addr(__le16
*fw_msb
,
911 __le16
*fw_mid
, __le16
*fw_lsb
, u8
*mac
);
913 #define QED_LEADING_HWFN(dev) (&(dev)->hwfns[0])
914 #define QED_IS_CMT(dev) ((dev)->num_hwfns > 1)
915 /* Macros for getting the engine-affinitized hwfn (FIR: fcoe,iscsi,roce) */
916 #define QED_FIR_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->fir_affin])
917 #define QED_IWARP_AFFIN_HWFN(dev) (&(dev)->hwfns[dev->iwarp_affin])
918 #define QED_AFFIN_HWFN(dev) \
919 (QED_IS_IWARP_PERSONALITY(QED_LEADING_HWFN(dev)) ? \
920 QED_IWARP_AFFIN_HWFN(dev) : QED_FIR_AFFIN_HWFN(dev))
921 #define QED_AFFIN_HWFN_IDX(dev) (IS_LEAD_HWFN(QED_AFFIN_HWFN(dev)) ? 0 : 1)
923 /* Flags for indication of required queues */
924 #define PQ_FLAGS_RLS (BIT(0))
925 #define PQ_FLAGS_MCOS (BIT(1))
926 #define PQ_FLAGS_LB (BIT(2))
927 #define PQ_FLAGS_OOO (BIT(3))
928 #define PQ_FLAGS_ACK (BIT(4))
929 #define PQ_FLAGS_OFLD (BIT(5))
930 #define PQ_FLAGS_VFS (BIT(6))
931 #define PQ_FLAGS_LLT (BIT(7))
932 #define PQ_FLAGS_MTC (BIT(8))
934 /* physical queue index for cm context initialization */
935 u16
qed_get_cm_pq_idx(struct qed_hwfn
*p_hwfn
, u32 pq_flags
);
936 u16
qed_get_cm_pq_idx_mcos(struct qed_hwfn
*p_hwfn
, u8 tc
);
937 u16
qed_get_cm_pq_idx_vf(struct qed_hwfn
*p_hwfn
, u16 vf
);
938 u16
qed_get_cm_pq_idx_ofld_mtc(struct qed_hwfn
*p_hwfn
, u8 tc
);
939 u16
qed_get_cm_pq_idx_llt_mtc(struct qed_hwfn
*p_hwfn
, u8 tc
);
941 /* doorbell recovery mechanism */
942 void qed_db_recovery_dp(struct qed_hwfn
*p_hwfn
);
943 void qed_db_recovery_execute(struct qed_hwfn
*p_hwfn
);
944 bool qed_edpm_enabled(struct qed_hwfn
*p_hwfn
);
946 #define GET_GTT_REG_ADDR(__base, __offset, __idx) \
947 ((__base) + __offset ## _GTT_OFFSET((__idx)))
949 #define GET_GTT_BDQ_REG_ADDR(__base, __offset, __idx, __bdq_idx) \
950 ((__base) + __offset ## _GTT_OFFSET((__idx), (__bdq_idx)))
952 /* Other Linux specific common definitions */
953 #define DP_NAME(cdev) ((cdev)->name)
955 #define REG_ADDR(cdev, offset) ((void __iomem *)((u8 __iomem *)\
956 ((cdev)->regview) + \
959 #define REG_RD(cdev, offset) readl(REG_ADDR(cdev, offset))
960 #define REG_WR(cdev, offset, val) writel((u32)val, REG_ADDR(cdev, offset))
961 #define REG_WR16(cdev, offset, val) writew((u16)val, REG_ADDR(cdev, offset))
963 #define DOORBELL(cdev, db_addr, val) \
964 writel((u32)val, (void __iomem *)((u8 __iomem *)\
965 ((cdev)->doorbells) + (db_addr)))
967 #define MFW_PORT(_p_hwfn) ((_p_hwfn)->abs_pf_id % \
968 qed_device_num_ports((_p_hwfn)->cdev))
969 int qed_device_num_ports(struct qed_dev
*cdev
);
972 int qed_fill_dev_info(struct qed_dev
*cdev
,
973 struct qed_dev_info
*dev_info
);
974 void qed_link_update(struct qed_hwfn
*hwfn
, struct qed_ptt
*ptt
);
975 void qed_bw_update(struct qed_hwfn
*hwfn
, struct qed_ptt
*ptt
);
976 u32
qed_unzip_data(struct qed_hwfn
*p_hwfn
,
977 u32 input_len
, u8
*input_buf
,
978 u32 max_size
, u8
*unzip_buf
);
979 int qed_recovery_process(struct qed_dev
*cdev
);
980 void qed_schedule_recovery_handler(struct qed_hwfn
*p_hwfn
);
981 void qed_hw_error_occurred(struct qed_hwfn
*p_hwfn
,
982 enum qed_hw_err_type err_type
);
983 void qed_get_protocol_stats(struct qed_dev
*cdev
,
984 enum qed_mcp_protocol_type type
,
985 union qed_mcp_protocol_stats
*stats
);
986 int qed_slowpath_irq_req(struct qed_hwfn
*hwfn
);
987 void qed_slowpath_irq_sync(struct qed_hwfn
*p_hwfn
);
988 int qed_mfw_tlv_req(struct qed_hwfn
*hwfn
);
990 int qed_mfw_fill_tlv_data(struct qed_hwfn
*hwfn
,
991 enum qed_mfw_tlv_type type
,
992 union qed_mfw_tlv_data
*tlv_data
);
994 void qed_hw_info_set_offload_tc(struct qed_hw_info
*p_info
, u8 tc
);
996 void qed_periodic_db_rec_start(struct qed_hwfn
*p_hwfn
);
998 int qed_llh_add_src_tcp_port_filter(struct qed_dev
*cdev
, u16 src_port
);
999 int qed_llh_add_dst_tcp_port_filter(struct qed_dev
*cdev
, u16 dest_port
);
1000 void qed_llh_remove_src_tcp_port_filter(struct qed_dev
*cdev
, u16 src_port
);
1001 void qed_llh_remove_dst_tcp_port_filter(struct qed_dev
*cdev
, u16 src_port
);
1002 void qed_llh_clear_all_filters(struct qed_dev
*cdev
);
1003 unsigned long qed_get_epoch_time(void);