1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
10 #include <linux/ethtool.h>
11 #include <linux/types.h>
12 #include <linux/interrupt.h>
13 #include <linux/netdevice.h>
14 #include <linux/pci.h>
15 #include <linux/skbuff.h>
16 #include <asm/byteorder.h>
18 #include <linux/compiler.h>
19 #include <linux/kernel.h>
20 #include <linux/list.h>
21 #include <linux/slab.h>
22 #include <linux/qed/common_hsi.h>
23 #include <linux/qed/qed_chain.h>
24 #include <linux/io-64-nonatomic-lo-hi.h>
25 #include <net/devlink.h>
27 #define QED_TX_SWS_TIMER_DFLT 500
28 #define QED_TWO_MSL_TIMER_DFLT 4000
30 enum dcbx_protocol_type
{
34 DCBX_PROTOCOL_ROCE_V2
,
36 DCBX_MAX_PROTOCOL_TYPE
39 #define QED_ROCE_PROTOCOL_INDEX (3)
41 #define QED_LLDP_CHASSIS_ID_STAT_LEN 4
42 #define QED_LLDP_PORT_ID_STAT_LEN 4
43 #define QED_DCBX_MAX_APP_PROTOCOL 32
44 #define QED_MAX_PFC_PRIORITIES 8
45 #define QED_DCBX_DSCP_SIZE 64
47 struct qed_dcbx_lldp_remote
{
48 u32 peer_chassis_id
[QED_LLDP_CHASSIS_ID_STAT_LEN
];
49 u32 peer_port_id
[QED_LLDP_PORT_ID_STAT_LEN
];
56 struct qed_dcbx_lldp_local
{
57 u32 local_chassis_id
[QED_LLDP_CHASSIS_ID_STAT_LEN
];
58 u32 local_port_id
[QED_LLDP_PORT_ID_STAT_LEN
];
61 struct qed_dcbx_app_prio
{
69 struct qed_dbcx_pfc_params
{
72 u8 prio
[QED_MAX_PFC_PRIORITIES
];
76 enum qed_dcbx_sf_ieee_type
{
77 QED_DCBX_SF_IEEE_ETHTYPE
,
78 QED_DCBX_SF_IEEE_TCP_PORT
,
79 QED_DCBX_SF_IEEE_UDP_PORT
,
80 QED_DCBX_SF_IEEE_TCP_UDP_PORT
83 struct qed_app_entry
{
85 enum qed_dcbx_sf_ieee_type sf_ieee
;
89 enum dcbx_protocol_type proto_type
;
92 struct qed_dcbx_params
{
93 struct qed_app_entry app_entry
[QED_DCBX_MAX_APP_PROTOCOL
];
102 u8 ets_pri_tc_tbl
[QED_MAX_PFC_PRIORITIES
];
103 u8 ets_tc_bw_tbl
[QED_MAX_PFC_PRIORITIES
];
104 u8 ets_tc_tsa_tbl
[QED_MAX_PFC_PRIORITIES
];
105 struct qed_dbcx_pfc_params pfc
;
109 struct qed_dcbx_admin_params
{
110 struct qed_dcbx_params params
;
114 struct qed_dcbx_remote_params
{
115 struct qed_dcbx_params params
;
119 struct qed_dcbx_operational_params
{
120 struct qed_dcbx_app_prio app_prio
;
121 struct qed_dcbx_params params
;
130 struct qed_dcbx_get
{
131 struct qed_dcbx_operational_params operational
;
132 struct qed_dcbx_lldp_remote lldp_remote
;
133 struct qed_dcbx_lldp_local lldp_local
;
134 struct qed_dcbx_remote_params remote
;
135 struct qed_dcbx_admin_params local
;
138 enum qed_nvm_images
{
139 QED_NVM_IMAGE_ISCSI_CFG
,
140 QED_NVM_IMAGE_FCOE_CFG
,
142 QED_NVM_IMAGE_NVM_CFG1
,
143 QED_NVM_IMAGE_DEFAULT_CFG
,
144 QED_NVM_IMAGE_NVM_META
,
147 struct qed_link_eee_params
{
149 #define QED_EEE_1G_ADV BIT(0)
150 #define QED_EEE_10G_ADV BIT(1)
152 /* Capabilities are represented using QED_EEE_*_ADV values */
165 struct qed_mfw_tlv_eth
{
167 bool lso_maxoff_size_set
;
169 bool lso_minseg_size_set
;
173 bool tx_descr_size_set
;
175 bool rx_descr_size_set
;
179 bool tcp4_offloads_set
;
181 bool tcp6_offloads_set
;
183 bool tx_descr_qdepth_set
;
185 bool rx_descr_qdepth_set
;
187 #define QED_MFW_TLV_IOV_OFFLOAD_NONE (0)
188 #define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE (1)
189 #define QED_MFW_TLV_IOV_OFFLOAD_VEB (2)
190 #define QED_MFW_TLV_IOV_OFFLOAD_VEPA (3)
191 bool iov_offload_set
;
197 bool num_txqs_full_set
;
199 bool num_rxqs_full_set
;
202 #define QED_MFW_TLV_TIME_SIZE 14
203 struct qed_mfw_tlv_time
{
213 struct qed_mfw_tlv_fcoe
{
215 bool scsi_timeout_set
;
229 bool num_npiv_ids_set
;
231 bool switch_name_set
;
233 bool switch_portnum_set
;
235 bool switch_portid_set
;
237 bool vendor_name_set
;
239 bool switch_model_set
;
240 u8 switch_fw_version
[8];
241 bool switch_fw_version_set
;
247 #define QED_MFW_TLV_PORT_STATE_OFFLINE (0)
248 #define QED_MFW_TLV_PORT_STATE_LOOP (1)
249 #define QED_MFW_TLV_PORT_STATE_P2P (2)
250 #define QED_MFW_TLV_PORT_STATE_FABRIC (3)
252 u16 fip_tx_descr_size
;
253 bool fip_tx_descr_size_set
;
254 u16 fip_rx_descr_size
;
255 bool fip_rx_descr_size_set
;
257 bool link_failures_set
;
258 u8 fcoe_boot_progress
;
259 bool fcoe_boot_progress_set
;
265 bool fcoe_txq_depth_set
;
267 bool fcoe_rxq_depth_set
;
269 bool fcoe_rx_frames_set
;
271 bool fcoe_rx_bytes_set
;
273 bool fcoe_tx_frames_set
;
275 bool fcoe_tx_bytes_set
;
278 u32 crc_err_src_fcid
[5];
279 bool crc_err_src_fcid_set
[5];
280 struct qed_mfw_tlv_time crc_err
[5];
286 bool primtive_err_set
;
288 bool disparity_err_set
;
289 u16 code_violation_err
;
290 bool code_violation_err_set
;
292 bool flogi_param_set
[4];
293 struct qed_mfw_tlv_time flogi_tstamp
;
294 u32 flogi_acc_param
[4];
295 bool flogi_acc_param_set
[4];
296 struct qed_mfw_tlv_time flogi_acc_tstamp
;
299 struct qed_mfw_tlv_time flogi_rjt_tstamp
;
312 u32 plogi_dst_fcid
[5];
313 bool plogi_dst_fcid_set
[5];
314 struct qed_mfw_tlv_time plogi_tstamp
[5];
315 u32 plogi_acc_src_fcid
[5];
316 bool plogi_acc_src_fcid_set
[5];
317 struct qed_mfw_tlv_time plogi_acc_tstamp
[5];
324 u32 plogo_src_fcid
[5];
325 bool plogo_src_fcid_set
[5];
326 struct qed_mfw_tlv_time plogo_tstamp
[5];
338 bool rx_abts_acc_set
;
340 bool rx_abts_rjt_set
;
341 u32 abts_dst_fcid
[5];
342 bool abts_dst_fcid_set
[5];
343 struct qed_mfw_tlv_time abts_tstamp
[5];
346 u32 rx_rscn_nport
[4];
347 bool rx_rscn_nport_set
[4];
351 bool abort_task_sets_set
;
375 bool scsi_cond_met_set
;
380 u8 scsi_inter_cond_met
;
381 bool scsi_inter_cond_met_set
;
382 u8 scsi_rsv_conflicts
;
383 bool scsi_rsv_conflicts_set
;
385 bool scsi_tsk_full_set
;
387 bool scsi_aca_active_set
;
389 bool scsi_tsk_abort_set
;
391 bool scsi_rx_chk_set
[5];
392 struct qed_mfw_tlv_time scsi_chk_tstamp
[5];
395 struct qed_mfw_tlv_iscsi
{
397 bool target_llmnr_set
;
399 bool header_digest_set
;
401 bool data_digest_set
;
403 #define QED_MFW_TLV_AUTH_METHOD_NONE (1)
404 #define QED_MFW_TLV_AUTH_METHOD_CHAP (2)
405 #define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP (3)
406 bool auth_method_set
;
407 u16 boot_taget_portal
;
408 bool boot_taget_portal_set
;
412 bool tx_desc_size_set
;
414 bool rx_desc_size_set
;
416 bool boot_progress_set
;
418 bool tx_desc_qdepth_set
;
420 bool rx_desc_qdepth_set
;
431 enum qed_db_rec_width
{
436 enum qed_db_rec_space
{
441 #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
442 (void __iomem *)(reg_addr))
444 #define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
446 #define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \
447 (void __iomem *)(reg_addr))
449 #define QED_COALESCE_MAX 0x1FF
450 #define QED_DEFAULT_RX_USECS 12
451 #define QED_DEFAULT_TX_USECS 48
456 struct qed_eth_pf_params
{
457 /* The following parameters are used during HW-init
458 * and these parameters need to be passed as arguments
459 * to update_pf_params routine invoked before slowpath start
463 /* per-VF number of CIDs */
465 #define ETH_PF_PARAMS_VF_CONS_DEFAULT (32)
467 /* To enable arfs, previous to HW-init a positive number needs to be
468 * set [as filters require allocated searcher ILT memory].
469 * This will set the maximal number of configured steering-filters.
471 u32 num_arfs_filters
;
474 struct qed_fcoe_pf_params
{
475 /* The following parameters are used during protocol-init */
476 u64 glbl_q_params_addr
;
477 u64 bdq_pbl_base_addr
[2];
479 /* The following parameters are used during HW-init
480 * and these parameters need to be passed as arguments
481 * to update_pf_params routine invoked before slowpath start
486 /* The following parameters are used during protocol-init */
487 u16 sq_num_pbl_pages
;
490 u16 cmdq_num_entries
;
491 u16 rq_buffer_log_size
;
494 u16 bdq_xoff_threshold
[2];
495 u16 bdq_xon_threshold
[2];
497 u8 num_cqs
; /* num of global CQs */
503 u8 bdq_pbl_num_entries
[2];
506 /* Most of the parameters below are described in the FW iSCSI / TCP HSI */
507 struct qed_iscsi_pf_params
{
508 u64 glbl_q_params_addr
;
509 u64 bdq_pbl_base_addr
[3];
511 u16 cmdq_num_entries
;
515 /* The following parameters are used during HW-init
516 * and these parameters need to be passed as arguments
517 * to update_pf_params routine invoked before slowpath start
522 /* The following parameters are used during protocol-init */
523 u16 half_way_close_timeout
;
524 u16 bdq_xoff_threshold
[3];
525 u16 bdq_xon_threshold
[3];
526 u16 cmdq_xoff_threshold
;
527 u16 cmdq_xon_threshold
;
530 u8 num_sq_pages_in_ring
;
531 u8 num_r2tq_pages_in_ring
;
532 u8 num_uhq_pages_in_ring
;
544 u8 soc_num_of_blocks_log
;
545 u8 bdq_pbl_num_entries
[3];
548 struct qed_nvmetcp_pf_params
{
549 u64 glbl_q_params_addr
;
553 u8 num_sq_pages_in_ring
;
554 u8 num_r2tq_pages_in_ring
;
555 u8 num_uhq_pages_in_ring
;
564 struct qed_rdma_pf_params
{
565 /* Supplied to QED during resource allocation (may affect the ILT and
568 u32 min_dpis
; /* number of requested DPIs */
569 u32 num_qps
; /* number of requested Queue Pairs */
570 u32 num_srqs
; /* number of requested SRQ */
571 u8 roce_edpm_mode
; /* see QED_ROCE_EDPM_MODE_ENABLE */
572 u8 gl_pi
; /* protocol index */
574 /* Will allocate rate limiters to be used with QPs */
578 struct qed_pf_params
{
579 struct qed_eth_pf_params eth_pf_params
;
580 struct qed_fcoe_pf_params fcoe_pf_params
;
581 struct qed_iscsi_pf_params iscsi_pf_params
;
582 struct qed_nvmetcp_pf_params nvmetcp_pf_params
;
583 struct qed_rdma_pf_params rdma_pf_params
;
594 struct status_block
*sb_virt
;
596 u32 sb_ack
; /* Last given ack */
598 void __iomem
*igu_addr
;
600 #define QED_SB_INFO_INIT 0x1
601 #define QED_SB_INFO_SETUP 0x2
603 struct qed_dev
*cdev
;
606 enum qed_hw_err_type
{
608 QED_HW_ERR_MFW_RESP_FAIL
,
610 QED_HW_ERR_DMAE_FAIL
,
611 QED_HW_ERR_RAMROD_FAIL
,
612 QED_HW_ERR_FW_ASSERT
,
621 struct qed_dev_info
{
622 unsigned long pci_mem_start
;
623 unsigned long pci_mem_end
;
624 unsigned int pci_irq
;
637 #define QED_MFW_VERSION_0_MASK 0x000000FF
638 #define QED_MFW_VERSION_0_OFFSET 0
639 #define QED_MFW_VERSION_1_MASK 0x0000FF00
640 #define QED_MFW_VERSION_1_OFFSET 8
641 #define QED_MFW_VERSION_2_MASK 0x00FF0000
642 #define QED_MFW_VERSION_2_OFFSET 16
643 #define QED_MFW_VERSION_3_MASK 0xFF000000
644 #define QED_MFW_VERSION_3_OFFSET 24
648 bool b_inter_pf_switch
;
659 #define QED_MBI_VERSION_0_MASK 0x000000FF
660 #define QED_MBI_VERSION_0_OFFSET 0
661 #define QED_MBI_VERSION_1_MASK 0x0000FF00
662 #define QED_MBI_VERSION_1_OFFSET 8
663 #define QED_MBI_VERSION_2_MASK 0x00FF0000
664 #define QED_MBI_VERSION_2_OFFSET 16
666 enum qed_dev_type dev_type
;
668 /* Output parameters for qede */
677 QED_SB_TYPE_L2_QUEUE
,
685 QED_PROTOCOL_NVMETCP
= QED_PROTOCOL_ISCSI
,
690 QED_FEC_MODE_NONE
= BIT(0),
691 QED_FEC_MODE_FIRECODE
= BIT(1),
692 QED_FEC_MODE_RS
= BIT(2),
693 QED_FEC_MODE_AUTO
= BIT(3),
694 QED_FEC_MODE_UNSUPPORTED
= BIT(4),
697 struct qed_link_params
{
701 #define QED_LINK_OVERRIDE_SPEED_AUTONEG BIT(0)
702 #define QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS BIT(1)
703 #define QED_LINK_OVERRIDE_SPEED_FORCED_SPEED BIT(2)
704 #define QED_LINK_OVERRIDE_PAUSE_CONFIG BIT(3)
705 #define QED_LINK_OVERRIDE_LOOPBACK_MODE BIT(4)
706 #define QED_LINK_OVERRIDE_EEE_CONFIG BIT(5)
707 #define QED_LINK_OVERRIDE_FEC_CONFIG BIT(6)
710 __ETHTOOL_DECLARE_LINK_MODE_MASK(adv_speeds
);
714 #define QED_LINK_PAUSE_AUTONEG_ENABLE BIT(0)
715 #define QED_LINK_PAUSE_RX_ENABLE BIT(1)
716 #define QED_LINK_PAUSE_TX_ENABLE BIT(2)
719 #define QED_LINK_LOOPBACK_NONE BIT(0)
720 #define QED_LINK_LOOPBACK_INT_PHY BIT(1)
721 #define QED_LINK_LOOPBACK_EXT_PHY BIT(2)
722 #define QED_LINK_LOOPBACK_EXT BIT(3)
723 #define QED_LINK_LOOPBACK_MAC BIT(4)
724 #define QED_LINK_LOOPBACK_CNIG_AH_ONLY_0123 BIT(5)
725 #define QED_LINK_LOOPBACK_CNIG_AH_ONLY_2301 BIT(6)
726 #define QED_LINK_LOOPBACK_PCS_AH_ONLY BIT(7)
727 #define QED_LINK_LOOPBACK_REVERSE_MAC_AH_ONLY BIT(8)
728 #define QED_LINK_LOOPBACK_INT_PHY_FEA_AH_ONLY BIT(9)
730 struct qed_link_eee_params eee
;
734 struct qed_link_output
{
737 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported_caps
);
738 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertised_caps
);
739 __ETHTOOL_DECLARE_LINK_MODE_MASK(lp_caps
);
741 u32 speed
; /* In Mb/s */
742 u8 duplex
; /* In DUPLEX defs */
743 u8 port
; /* In PORT defs */
747 /* EEE - capability & param */
751 struct qed_link_eee_params eee
;
757 struct qed_probe_params
{
758 enum qed_protocol protocol
;
765 #define QED_DRV_VER_STR_SIZE 12
766 struct qed_slowpath_params
{
772 u8 name
[QED_DRV_VER_STR_SIZE
];
775 #define ILT_PAGE_SIZE_TCFC 0x8000 /* 32KB */
777 struct qed_int_info
{
778 struct msix_entry
*msix
;
781 /* This should be updated by the protocol driver */
785 struct qed_generic_tlvs
{
786 #define QED_TLV_IP_CSUM BIT(0)
787 #define QED_TLV_LSO BIT(1)
789 #define QED_TLV_MAC_COUNT 3
790 u8 mac
[QED_TLV_MAC_COUNT
][ETH_ALEN
];
793 #define QED_I2C_DEV_ADDR_A0 0xA0
794 #define QED_I2C_DEV_ADDR_A2 0xA2
796 #define QED_NVM_SIGNATURE 0x12435687
798 enum qed_nvm_flash_cmd
{
799 QED_NVM_FLASH_CMD_FILE_DATA
= 0x2,
800 QED_NVM_FLASH_CMD_FILE_START
= 0x3,
801 QED_NVM_FLASH_CMD_NVM_CHANGE
= 0x4,
802 QED_NVM_FLASH_CMD_NVM_CFG_ID
= 0x5,
803 QED_NVM_FLASH_CMD_NVM_MAX
,
807 struct qed_dev
*cdev
;
808 struct devlink_health_reporter
*fw_reporter
;
811 struct qed_sb_info_dbg
{
817 struct qed_common_cb_ops
{
818 void (*arfs_filter_op
)(void *dev
, void *fltr
, u8 fw_rc
);
819 void (*link_update
)(void *dev
, struct qed_link_output
*link
);
820 void (*schedule_recovery_handler
)(void *dev
);
821 void (*schedule_hw_err_handler
)(void *dev
,
822 enum qed_hw_err_type err_type
);
823 void (*dcbx_aen
)(void *dev
, struct qed_dcbx_get
*get
, u32 mib_type
);
824 void (*get_generic_tlv_data
)(void *dev
, struct qed_generic_tlvs
*data
);
825 void (*get_protocol_tlv_data
)(void *dev
, void *data
);
826 void (*bw_update
)(void *dev
);
829 struct qed_selftest_ops
{
831 * selftest_interrupt(): Perform interrupt test.
833 * @cdev: Qed dev pointer.
835 * Return: 0 on success, error otherwise.
837 int (*selftest_interrupt
)(struct qed_dev
*cdev
);
840 * selftest_memory(): Perform memory test.
842 * @cdev: Qed dev pointer.
844 * Return: 0 on success, error otherwise.
846 int (*selftest_memory
)(struct qed_dev
*cdev
);
849 * selftest_register(): Perform register test.
851 * @cdev: Qed dev pointer.
853 * Return: 0 on success, error otherwise.
855 int (*selftest_register
)(struct qed_dev
*cdev
);
858 * selftest_clock(): Perform clock test.
860 * @cdev: Qed dev pointer.
862 * Return: 0 on success, error otherwise.
864 int (*selftest_clock
)(struct qed_dev
*cdev
);
867 * selftest_nvram(): Perform nvram test.
869 * @cdev: Qed dev pointer.
871 * Return: 0 on success, error otherwise.
873 int (*selftest_nvram
) (struct qed_dev
*cdev
);
876 struct qed_common_ops
{
877 struct qed_selftest_ops
*selftest
;
879 struct qed_dev
* (*probe
)(struct pci_dev
*dev
,
880 struct qed_probe_params
*params
);
882 void (*remove
)(struct qed_dev
*cdev
);
884 int (*set_power_state
)(struct qed_dev
*cdev
, pci_power_t state
);
886 void (*set_name
) (struct qed_dev
*cdev
, char name
[]);
888 /* Client drivers need to make this call before slowpath_start.
889 * PF params required for the call before slowpath_start is
890 * documented within the qed_pf_params structure definition.
892 void (*update_pf_params
)(struct qed_dev
*cdev
,
893 struct qed_pf_params
*params
);
895 int (*slowpath_start
)(struct qed_dev
*cdev
,
896 struct qed_slowpath_params
*params
);
898 int (*slowpath_stop
)(struct qed_dev
*cdev
);
900 /* Requests to use `cnt' interrupts for fastpath.
901 * upon success, returns number of interrupts allocated for fastpath.
903 int (*set_fp_int
)(struct qed_dev
*cdev
, u16 cnt
);
905 /* Fills `info' with pointers required for utilizing interrupts */
906 int (*get_fp_int
)(struct qed_dev
*cdev
, struct qed_int_info
*info
);
908 u32 (*sb_init
)(struct qed_dev
*cdev
,
909 struct qed_sb_info
*sb_info
,
911 dma_addr_t sb_phy_addr
,
913 enum qed_sb_type type
);
915 u32 (*sb_release
)(struct qed_dev
*cdev
,
916 struct qed_sb_info
*sb_info
,
918 enum qed_sb_type type
);
920 void (*simd_handler_config
)(struct qed_dev
*cdev
,
923 void (*handler
)(void *));
925 void (*simd_handler_clean
)(struct qed_dev
*cdev
, int index
);
927 int (*dbg_grc
)(struct qed_dev
*cdev
, void *buffer
, u32
*num_dumped_bytes
);
929 int (*dbg_grc_size
)(struct qed_dev
*cdev
);
931 int (*dbg_all_data
)(struct qed_dev
*cdev
, void *buffer
);
933 int (*dbg_all_data_size
)(struct qed_dev
*cdev
);
935 int (*report_fatal_error
)(struct devlink
*devlink
,
936 enum qed_hw_err_type err_type
);
939 * can_link_change(): can the instance change the link or not.
941 * @cdev: Qed dev pointer.
943 * Return: true if link-change is allowed, false otherwise.
945 bool (*can_link_change
)(struct qed_dev
*cdev
);
948 * set_link(): set links according to params.
950 * @cdev: Qed dev pointer.
951 * @params: values used to override the default link configuration.
953 * Return: 0 on success, error otherwise.
955 int (*set_link
)(struct qed_dev
*cdev
,
956 struct qed_link_params
*params
);
959 * get_link(): returns the current link state.
961 * @cdev: Qed dev pointer.
962 * @if_link: structure to be filled with current link configuration.
966 void (*get_link
)(struct qed_dev
*cdev
,
967 struct qed_link_output
*if_link
);
970 * drain(): drains chip in case Tx completions fail to arrive due to pause.
972 * @cdev: Qed dev pointer.
976 int (*drain
)(struct qed_dev
*cdev
);
979 * update_msglvl(): update module debug level.
981 * @cdev: Qed dev pointer.
982 * @dp_module: Debug module.
983 * @dp_level: Debug level.
987 void (*update_msglvl
)(struct qed_dev
*cdev
,
991 int (*chain_alloc
)(struct qed_dev
*cdev
,
992 struct qed_chain
*chain
,
993 struct qed_chain_init_params
*params
);
995 void (*chain_free
)(struct qed_dev
*cdev
,
996 struct qed_chain
*p_chain
);
999 * nvm_flash(): Flash nvm data.
1001 * @cdev: Qed dev pointer.
1002 * @name: file containing the data.
1004 * Return: 0 on success, error otherwise.
1006 int (*nvm_flash
)(struct qed_dev
*cdev
, const char *name
);
1009 * nvm_get_image(): reads an entire image from nvram.
1011 * @cdev: Qed dev pointer.
1012 * @type: type of the request nvram image.
1013 * @buf: preallocated buffer to fill with the image.
1014 * @len: length of the allocated buffer.
1016 * Return: 0 on success, error otherwise.
1018 int (*nvm_get_image
)(struct qed_dev
*cdev
,
1019 enum qed_nvm_images type
, u8
*buf
, u16 len
);
1022 * set_coalesce(): Configure Rx coalesce value in usec.
1024 * @cdev: Qed dev pointer.
1025 * @rx_coal: Rx coalesce value in usec.
1026 * @tx_coal: Tx coalesce value in usec.
1029 * Return: 0 on success, error otherwise.
1031 int (*set_coalesce
)(struct qed_dev
*cdev
,
1032 u16 rx_coal
, u16 tx_coal
, void *handle
);
1035 * set_led() - Configure LED mode.
1037 * @cdev: Qed dev pointer.
1040 * Return: 0 on success, error otherwise.
1042 int (*set_led
)(struct qed_dev
*cdev
,
1043 enum qed_led_mode mode
);
1046 * attn_clr_enable(): Prevent attentions from being reasserted.
1048 * @cdev: Qed dev pointer.
1049 * @clr_enable: Clear enable.
1053 void (*attn_clr_enable
)(struct qed_dev
*cdev
, bool clr_enable
);
1056 * db_recovery_add(): add doorbell information to the doorbell
1057 * recovery mechanism.
1059 * @cdev: Qed dev pointer.
1060 * @db_addr: Doorbell address.
1061 * @db_data: Dddress of where db_data is stored.
1062 * @db_width: Doorbell is 32b or 64b.
1063 * @db_space: Doorbell recovery addresses are user or kernel space.
1067 int (*db_recovery_add
)(struct qed_dev
*cdev
,
1068 void __iomem
*db_addr
,
1070 enum qed_db_rec_width db_width
,
1071 enum qed_db_rec_space db_space
);
1074 * db_recovery_del(): remove doorbell information from the doorbell
1075 * recovery mechanism. db_data serves as key (db_addr is not unique).
1077 * @cdev: Qed dev pointer.
1078 * @db_addr: Doorbell address.
1079 * @db_data: Address where db_data is stored. Serves as key for the
1084 int (*db_recovery_del
)(struct qed_dev
*cdev
,
1085 void __iomem
*db_addr
, void *db_data
);
1088 * recovery_process(): Trigger a recovery process.
1090 * @cdev: Qed dev pointer.
1092 * Return: 0 on success, error otherwise.
1094 int (*recovery_process
)(struct qed_dev
*cdev
);
1097 * recovery_prolog(): Execute the prolog operations of a recovery process.
1099 * @cdev: Qed dev pointer.
1101 * Return: 0 on success, error otherwise.
1103 int (*recovery_prolog
)(struct qed_dev
*cdev
);
1106 * update_drv_state(): API to inform the change in the driver state.
1108 * @cdev: Qed dev pointer.
1113 int (*update_drv_state
)(struct qed_dev
*cdev
, bool active
);
1116 * update_mac(): API to inform the change in the mac address.
1118 * @cdev: Qed dev pointer.
1123 int (*update_mac
)(struct qed_dev
*cdev
, const u8
*mac
);
1126 * update_mtu(): API to inform the change in the mtu.
1128 * @cdev: Qed dev pointer.
1133 int (*update_mtu
)(struct qed_dev
*cdev
, u16 mtu
);
1136 * update_wol(): Update of changes in the WoL configuration.
1138 * @cdev: Qed dev pointer.
1139 * @enabled: true iff WoL should be enabled.
1143 int (*update_wol
) (struct qed_dev
*cdev
, bool enabled
);
1146 * read_module_eeprom(): Read EEPROM.
1148 * @cdev: Qed dev pointer.
1150 * @dev_addr: PHY device memory region.
1151 * @offset: offset into eeprom contents to be read.
1152 * @len: buffer length, i.e., max bytes to be read.
1156 int (*read_module_eeprom
)(struct qed_dev
*cdev
,
1157 char *buf
, u8 dev_addr
, u32 offset
, u32 len
);
1160 * get_affin_hwfn_idx(): Get affine HW function.
1162 * @cdev: Qed dev pointer.
1166 u8 (*get_affin_hwfn_idx
)(struct qed_dev
*cdev
);
1169 * read_nvm_cfg(): Read NVM config attribute value.
1171 * @cdev: Qed dev pointer.
1173 * @cmd: NVM CFG command id.
1174 * @entity_id: Entity id.
1178 int (*read_nvm_cfg
)(struct qed_dev
*cdev
, u8
**buf
, u32 cmd
,
1181 * read_nvm_cfg_len(): Read NVM config attribute value.
1183 * @cdev: Qed dev pointer.
1184 * @cmd: NVM CFG command id.
1186 * Return: config id length, 0 on error.
1188 int (*read_nvm_cfg_len
)(struct qed_dev
*cdev
, u32 cmd
);
1191 * set_grc_config(): Configure value for grc config id.
1193 * @cdev: Qed dev pointer.
1194 * @cfg_id: grc config id
1195 * @val: grc config value
1199 int (*set_grc_config
)(struct qed_dev
*cdev
, u32 cfg_id
, u32 val
);
1201 struct devlink
* (*devlink_register
)(struct qed_dev
*cdev
);
1203 void (*devlink_unregister
)(struct devlink
*devlink
);
1205 __printf(2, 3) void (*mfw_report
)(struct qed_dev
*cdev
, char *fmt
, ...);
1207 int (*get_sb_info
)(struct qed_dev
*cdev
, struct qed_sb_info
*sb
,
1208 u16 qid
, struct qed_sb_info_dbg
*sb_dbg
);
1210 int (*get_esl_status
)(struct qed_dev
*cdev
, bool *esl_active
);
1213 #define MASK_FIELD(_name, _value) \
1214 ((_value) &= (_name ## _MASK))
1216 #define FIELD_VALUE(_name, _value) \
1217 ((_value & _name ## _MASK) << _name ## _SHIFT)
1219 #define SET_FIELD(value, name, flag) \
1221 (value) &= ~(name ## _MASK << name ## _SHIFT); \
1222 (value) |= (((u64)flag) << (name ## _SHIFT)); \
1225 #define GET_FIELD(value, name) \
1226 (((value) >> (name ## _SHIFT)) & name ## _MASK)
1228 #define GET_MFW_FIELD(name, field) \
1229 (((name) & (field ## _MASK)) >> (field ## _OFFSET))
1231 #define SET_MFW_FIELD(name, field, value) \
1233 (name) &= ~(field ## _MASK); \
1234 (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
1237 #define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
1239 /* Debug print definitions */
1240 #define DP_ERR(cdev, fmt, ...) \
1242 pr_err("[%s:%d(%s)]" fmt, \
1243 __func__, __LINE__, \
1244 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1248 #define DP_NOTICE(cdev, fmt, ...) \
1250 if (unlikely((cdev)->dp_level <= QED_LEVEL_NOTICE)) { \
1251 pr_notice("[%s:%d(%s)]" fmt, \
1252 __func__, __LINE__, \
1253 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1259 #define DP_INFO(cdev, fmt, ...) \
1261 if (unlikely((cdev)->dp_level <= QED_LEVEL_INFO)) { \
1262 pr_notice("[%s:%d(%s)]" fmt, \
1263 __func__, __LINE__, \
1264 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1269 #define DP_VERBOSE(cdev, module, fmt, ...) \
1271 if (unlikely(((cdev)->dp_level <= QED_LEVEL_VERBOSE) && \
1272 ((cdev)->dp_module & module))) { \
1273 pr_notice("[%s:%d(%s)]" fmt, \
1274 __func__, __LINE__, \
1275 DP_NAME(cdev) ? DP_NAME(cdev) : "", \
1281 QED_LEVEL_VERBOSE
= 0x0,
1282 QED_LEVEL_INFO
= 0x1,
1283 QED_LEVEL_NOTICE
= 0x2,
1284 QED_LEVEL_ERR
= 0x3,
1287 #define QED_LOG_LEVEL_SHIFT (30)
1288 #define QED_LOG_VERBOSE_MASK (0x3fffffff)
1289 #define QED_LOG_INFO_MASK (0x40000000)
1290 #define QED_LOG_NOTICE_MASK (0x80000000)
1293 QED_MSG_SPQ
= 0x10000,
1294 QED_MSG_STATS
= 0x20000,
1295 QED_MSG_DCB
= 0x40000,
1296 QED_MSG_IOV
= 0x80000,
1297 QED_MSG_SP
= 0x100000,
1298 QED_MSG_STORAGE
= 0x200000,
1299 QED_MSG_CXT
= 0x800000,
1300 QED_MSG_LL2
= 0x1000000,
1301 QED_MSG_ILT
= 0x2000000,
1302 QED_MSG_RDMA
= 0x4000000,
1303 QED_MSG_DEBUG
= 0x8000000,
1304 /* to be added...up to 0x8000000 */
1313 struct qed_eth_stats_common
{
1314 u64 no_buff_discards
;
1315 u64 packet_too_big_discard
;
1323 u64 mftag_filter_discards
;
1324 u64 mac_filter_discards
;
1325 u64 gft_filter_drop
;
1332 u64 tx_err_drop_pkts
;
1333 u64 tpa_coalesced_pkts
;
1334 u64 tpa_coalesced_events
;
1336 u64 tpa_not_coalesced_pkts
;
1337 u64 tpa_coalesced_bytes
;
1340 u64 rx_64_byte_packets
;
1341 u64 rx_65_to_127_byte_packets
;
1342 u64 rx_128_to_255_byte_packets
;
1343 u64 rx_256_to_511_byte_packets
;
1344 u64 rx_512_to_1023_byte_packets
;
1345 u64 rx_1024_to_1518_byte_packets
;
1347 u64 rx_mac_crtl_frames
;
1348 u64 rx_pause_frames
;
1350 u64 rx_align_errors
;
1351 u64 rx_carrier_errors
;
1352 u64 rx_oversize_packets
;
1354 u64 rx_undersize_packets
;
1356 u64 tx_64_byte_packets
;
1357 u64 tx_65_to_127_byte_packets
;
1358 u64 tx_128_to_255_byte_packets
;
1359 u64 tx_256_to_511_byte_packets
;
1360 u64 tx_512_to_1023_byte_packets
;
1361 u64 tx_1024_to_1518_byte_packets
;
1362 u64 tx_pause_frames
;
1367 u64 rx_mac_uc_packets
;
1368 u64 rx_mac_mc_packets
;
1369 u64 rx_mac_bc_packets
;
1370 u64 rx_mac_frames_ok
;
1372 u64 tx_mac_uc_packets
;
1373 u64 tx_mac_mc_packets
;
1374 u64 tx_mac_bc_packets
;
1375 u64 tx_mac_ctrl_frames
;
1376 u64 link_change_count
;
1379 struct qed_eth_stats_bb
{
1380 u64 rx_1519_to_1522_byte_packets
;
1381 u64 rx_1519_to_2047_byte_packets
;
1382 u64 rx_2048_to_4095_byte_packets
;
1383 u64 rx_4096_to_9216_byte_packets
;
1384 u64 rx_9217_to_16383_byte_packets
;
1385 u64 tx_1519_to_2047_byte_packets
;
1386 u64 tx_2048_to_4095_byte_packets
;
1387 u64 tx_4096_to_9216_byte_packets
;
1388 u64 tx_9217_to_16383_byte_packets
;
1389 u64 tx_lpi_entry_count
;
1390 u64 tx_total_collisions
;
1393 struct qed_eth_stats_ah
{
1394 u64 rx_1519_to_max_byte_packets
;
1395 u64 tx_1519_to_max_byte_packets
;
1398 struct qed_eth_stats
{
1399 struct qed_eth_stats_common common
;
1402 struct qed_eth_stats_bb bb
;
1403 struct qed_eth_stats_ah ah
;
1407 #define QED_SB_IDX 0x0002
1410 #define TX_PI(tc) (RX_PI + 1 + tc)
1412 struct qed_sb_cnt_info
{
1413 /* Original, current, and free SBs for PF */
1418 /* Original, current and free SBS for child VFs */
1424 static inline u16
qed_sb_update_sb_idx(struct qed_sb_info
*sb_info
)
1429 prod
= le32_to_cpu(sb_info
->sb_virt
->prod_index
) &
1430 STATUS_BLOCK_PROD_INDEX_MASK
;
1431 if (sb_info
->sb_ack
!= prod
) {
1432 sb_info
->sb_ack
= prod
;
1441 * qed_sb_ack(): This function creates an update command for interrupts
1442 * that is written to the IGU.
1444 * @sb_info: This is the structure allocated and
1445 * initialized per status block. Assumption is
1446 * that it was initialized using qed_sb_init
1447 * @int_cmd: Enable/Disable/Nop
1448 * @upd_flg: Whether igu consumer should be updated.
1450 * Return: inline void.
1452 static inline void qed_sb_ack(struct qed_sb_info
*sb_info
,
1453 enum igu_int_cmd int_cmd
,
1458 igu_ack
= ((sb_info
->sb_ack
<< IGU_PROD_CONS_UPDATE_SB_INDEX_SHIFT
) |
1459 (upd_flg
<< IGU_PROD_CONS_UPDATE_UPDATE_FLAG_SHIFT
) |
1460 (int_cmd
<< IGU_PROD_CONS_UPDATE_ENABLE_INT_SHIFT
) |
1461 (IGU_SEG_ACCESS_REG
<<
1462 IGU_PROD_CONS_UPDATE_SEGMENT_ACCESS_SHIFT
));
1464 DIRECT_REG_WR(sb_info
->igu_addr
, igu_ack
);
1466 /* Both segments (interrupts & acks) are written to same place address;
1467 * Need to guarantee all commands will be received (in-order) by HW.
1472 static inline void __internal_ram_wr(void *p_hwfn
,
1480 for (i
= 0; i
< size
/ sizeof(*data
); i
++)
1481 DIRECT_REG_WR(&((u32 __iomem
*)addr
)[i
], data
[i
]);
1484 static inline void internal_ram_wr(void __iomem
*addr
,
1488 __internal_ram_wr(NULL
, addr
, size
, data
);
1494 QED_RSS_IPV4_TCP
= 0x4,
1495 QED_RSS_IPV6_TCP
= 0x8,
1496 QED_RSS_IPV4_UDP
= 0x10,
1497 QED_RSS_IPV6_UDP
= 0x20,
1500 #define QED_RSS_IND_TABLE_SIZE 128
1501 #define QED_RSS_KEY_SIZE 10 /* size in 32b chunks */