1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
9 #include <linux/compiler.h>
10 #include <linux/version.h>
11 #include <linux/workqueue.h>
12 #include <linux/netdevice.h>
13 #include <linux/interrupt.h>
14 #include <linux/bitmap.h>
15 #include <linux/kernel.h>
16 #include <linux/mutex.h>
17 #include <linux/bpf.h>
19 #include <linux/qed/qede_rdma.h>
21 #ifdef CONFIG_RFS_ACCEL
22 #include <linux/cpu_rmap.h>
24 #include <linux/qed/common_hsi.h>
25 #include <linux/qed/eth_common.h>
26 #include <linux/qed/qed_if.h>
27 #include <linux/qed/qed_chain.h>
28 #include <linux/qed/qed_eth_if.h>
30 #include <net/pkt_cls.h>
31 #include <net/tc_act/tc_gact.h>
33 #define QEDE_MAJOR_VERSION 8
34 #define QEDE_MINOR_VERSION 37
35 #define QEDE_REVISION_VERSION 0
36 #define QEDE_ENGINEERING_VERSION 20
37 #define DRV_MODULE_VERSION __stringify(QEDE_MAJOR_VERSION) "." \
38 __stringify(QEDE_MINOR_VERSION) "." \
39 __stringify(QEDE_REVISION_VERSION) "." \
40 __stringify(QEDE_ENGINEERING_VERSION)
42 #define DRV_MODULE_SYM qede
44 struct qede_stats_common
{
46 u64 packet_too_big_discard
;
54 u64 mftag_filter_discards
;
55 u64 mac_filter_discards
;
66 u64 coalesced_aborts_num
;
67 u64 non_coalesced_pkts
;
69 u64 link_change_count
;
73 u64 rx_64_byte_packets
;
74 u64 rx_65_to_127_byte_packets
;
75 u64 rx_128_to_255_byte_packets
;
76 u64 rx_256_to_511_byte_packets
;
77 u64 rx_512_to_1023_byte_packets
;
78 u64 rx_1024_to_1518_byte_packets
;
80 u64 rx_mac_crtl_frames
;
84 u64 rx_carrier_errors
;
85 u64 rx_oversize_packets
;
87 u64 rx_undersize_packets
;
89 u64 tx_64_byte_packets
;
90 u64 tx_65_to_127_byte_packets
;
91 u64 tx_128_to_255_byte_packets
;
92 u64 tx_256_to_511_byte_packets
;
93 u64 tx_512_to_1023_byte_packets
;
94 u64 tx_1024_to_1518_byte_packets
;
99 u64 tx_mac_ctrl_frames
;
102 struct qede_stats_bb
{
103 u64 rx_1519_to_1522_byte_packets
;
104 u64 rx_1519_to_2047_byte_packets
;
105 u64 rx_2048_to_4095_byte_packets
;
106 u64 rx_4096_to_9216_byte_packets
;
107 u64 rx_9217_to_16383_byte_packets
;
108 u64 tx_1519_to_2047_byte_packets
;
109 u64 tx_2048_to_4095_byte_packets
;
110 u64 tx_4096_to_9216_byte_packets
;
111 u64 tx_9217_to_16383_byte_packets
;
112 u64 tx_lpi_entry_count
;
113 u64 tx_total_collisions
;
116 struct qede_stats_ah
{
117 u64 rx_1519_to_max_byte_packets
;
118 u64 tx_1519_to_max_byte_packets
;
122 struct qede_stats_common common
;
125 struct qede_stats_bb bb
;
126 struct qede_stats_ah ah
;
131 struct list_head list
;
136 struct qede_rdma_dev
{
137 struct qedr_dev
*qedr_dev
;
138 struct list_head entry
;
139 struct list_head rdma_event_list
;
140 struct workqueue_struct
*rdma_wq
;
142 struct completion event_comp
;
148 #define QEDE_RFS_MAX_FLTR 256
150 enum qede_flags_bit
{
151 QEDE_FLAGS_IS_VF
= 0,
152 QEDE_FLAGS_LINK_REQUESTED
,
153 QEDE_FLAGS_PTP_TX_IN_PRORGESS
,
154 QEDE_FLAGS_TX_TIMESTAMPING_EN
157 #define QEDE_DUMP_MAX_ARGS 4
159 QEDE_DUMP_CMD_NONE
= 0,
160 QEDE_DUMP_CMD_NVM_CFG
,
161 QEDE_DUMP_CMD_GRCDUMP
,
165 struct qede_dump_info
{
166 enum qede_dump_cmd cmd
;
168 u32 args
[QEDE_DUMP_MAX_ARGS
];
172 struct qed_dev
*cdev
;
173 struct net_device
*ndev
;
174 struct pci_dev
*pdev
;
175 struct devlink
*devlink
;
181 #define IS_VF(edev) test_bit(QEDE_FLAGS_IS_VF, \
184 const struct qed_eth_ops
*ops
;
185 struct qede_ptp
*ptp
;
188 struct qed_dev_eth_info dev_info
;
189 #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
190 #define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
191 #define QEDE_IS_BB(edev) \
192 ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_BB)
193 #define QEDE_IS_AH(edev) \
194 ((edev)->dev_info.common.dev_type == QED_DEV_TYPE_AH)
196 struct qede_fastpath
*fp_array
;
203 u16 total_xdp_queues
;
205 #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
206 #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
207 #define QEDE_RX_QUEUE_IDX(edev, i) (i)
208 #define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
210 struct qed_int_info int_info
;
212 /* Smaller private variant of the RTNL lock */
213 struct mutex qede_lock
;
214 u32 state
; /* Protected by qede_lock */
218 /* L2 header size + 2*VLANs (8 bytes) + LLC SNAP (8 bytes) */
219 #define ETH_OVERHEAD (ETH_HLEN + 8 + 8)
220 /* Max supported alignment is 256 (8 shift)
221 * minimal alignment shift 6 is optimal for 57xxx HW performance
223 #define QEDE_RX_ALIGN_SHIFT max(6, min(8, L1_CACHE_SHIFT))
224 /* We assume skb_build() uses sizeof(struct skb_shared_info) bytes
225 * at the end of skb->data, to avoid wasting a full cache line.
226 * This reduces memory use (skb->truesize).
228 #define QEDE_FW_RX_ALIGN_END \
229 max_t(u64, 1UL << QEDE_RX_ALIGN_SHIFT, \
230 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
232 struct qede_stats stats
;
234 /* Bitfield to track initialized RSS params */
235 u32 rss_params_inited
;
236 #define QEDE_RSS_INDIR_INITED BIT(0)
237 #define QEDE_RSS_KEY_INITED BIT(1)
238 #define QEDE_RSS_CAPS_INITED BIT(2)
240 u16 rss_ind_table
[128];
244 /* Both must be a power of two */
245 u16 q_num_rx_buffers
;
246 u16 q_num_tx_buffers
;
250 struct list_head vlan_list
;
251 u16 configured_vlans
;
252 u16 non_configured_vlans
;
253 bool accept_any_vlan
;
255 struct delayed_work sp_task
;
256 unsigned long sp_flags
;
260 struct qede_arfs
*arfs
;
263 struct qede_rdma_dev rdma_info
;
265 struct bpf_prog
*xdp_prog
;
267 enum qed_hw_err_type last_err_type
;
268 unsigned long err_flags
;
269 #define QEDE_ERR_IS_HANDLED 31
270 #define QEDE_ERR_ATTN_CLR_EN 0
271 #define QEDE_ERR_GET_DBG_INFO 1
272 #define QEDE_ERR_IS_RECOVERABLE 2
273 #define QEDE_ERR_WARN 3
275 struct qede_dump_info dump_info
;
284 #define HILO_U64(hi, lo) ((((u64)(hi)) << 32) + (lo))
287 #define MAX_NUM_PRI 8
289 /* The driver supports the new build_skb() API:
290 * RX ring buffer contains pointer to kmalloc() data only,
291 * skb are built only after the frame was DMA-ed.
296 unsigned int page_offset
;
299 enum qede_agg_state
{
300 QEDE_AGG_STATE_NONE
= 0,
301 QEDE_AGG_STATE_START
= 1,
302 QEDE_AGG_STATE_ERROR
= 2
305 struct qede_agg_info
{
306 /* rx_buf is a data buffer that can be placed / consumed from rx bd
307 * chain. It has two purposes: We will preallocate the data buffer
308 * for each aggregation when we open the interface and will place this
309 * buffer on the rx-bd-ring when we receive TPA_START. We don't want
310 * to be in a state where allocation fails, as we can't reuse the
311 * consumer buffer in the rx-chain since FW may still be writing to it
312 * (since header needs to be modified for TPA).
313 * The second purpose is to keep a pointer to the bd buffer during
316 struct sw_rx_data buffer
;
319 /* We need some structs from the start cookie until termination */
329 struct qede_rx_queue
{
331 void __iomem
*hw_rxq_prod_addr
;
333 /* Required for the allocation of replacement buffers */
336 struct bpf_prog
*xdp_prog
;
345 /* Used once per each NAPI run */
353 struct sw_rx_data
*sw_rx_ring
;
354 struct qed_chain rx_bd_ring
;
355 struct qed_chain rx_comp_ring ____cacheline_aligned
;
358 struct qede_agg_info tpa_info
[ETH_TPA_MAX_AGGS_NUM
];
360 /* Used once per each NAPI run */
370 struct xdp_rxq_info xdp_rxq
;
374 struct eth_db_data data
;
381 /* Set on the first BD descriptor when there is a split BD */
382 #define QEDE_TSO_SPLIT_BD BIT(0)
387 struct xdp_frame
*xdpf
;
391 struct qede_tx_queue
{
396 u16 num_tx_buffers
; /* Slowpath only */
400 u64 tx_mem_alloc_err
;
404 /* Needed for the mapping of packets */
407 void __iomem
*doorbell_addr
;
410 /* Spinlock for XDP queues in case of XDP_REDIRECT */
411 spinlock_t xdp_tx_lock
;
413 int index
; /* Slowpath only */
414 #define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
415 QEDE_MAX_TSS_CNT(edev))
416 #define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
417 #define QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx) ((edev)->fp_num_rx + \
418 ((idx) % QEDE_TSS_COUNT(edev)))
419 #define QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx) ((idx) / QEDE_TSS_COUNT(edev))
420 #define QEDE_TXQ_TO_NDEV_TXQ_ID(edev, txq) ((QEDE_TSS_COUNT(edev) * \
421 (txq)->cos) + (txq)->index)
422 #define QEDE_NDEV_TXQ_ID_TO_TXQ(edev, idx) \
423 (&((edev)->fp_array[QEDE_NDEV_TXQ_ID_TO_FP_ID(edev, idx)].txq \
424 [QEDE_NDEV_TXQ_ID_TO_TXQ_COS(edev, idx)]))
425 #define QEDE_FP_TC0_TXQ(fp) (&((fp)->txq[0]))
427 /* Regular Tx requires skb + metadata for release purpose,
428 * while XDP requires the pages and the mapped address.
431 struct sw_tx_bd
*skbs
;
432 struct sw_tx_xdp
*xdp
;
435 struct qed_chain tx_pbl
;
437 /* Slowpath; Should be kept in end [unless missing padding] */
443 #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
444 le32_to_cpu((bd)->addr.lo))
445 #define BD_SET_UNMAP_ADDR_LEN(bd, maddr, len) \
447 (bd)->addr.hi = cpu_to_le32(upper_32_bits(maddr)); \
448 (bd)->addr.lo = cpu_to_le32(lower_32_bits(maddr)); \
449 (bd)->nbytes = cpu_to_le16(len); \
451 #define BD_UNMAP_LEN(bd) (le16_to_cpu((bd)->nbytes))
453 struct qede_fastpath
{
454 struct qede_dev
*edev
;
457 #define QEDE_FASTPATH_TX BIT(0)
458 #define QEDE_FASTPATH_RX BIT(1)
459 #define QEDE_FASTPATH_XDP BIT(2)
460 #define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
465 #define QEDE_XDP_TX BIT(0)
466 #define QEDE_XDP_REDIRECT BIT(1)
468 struct napi_struct napi
;
469 struct qed_sb_info
*sb_info
;
470 struct qede_rx_queue
*rxq
;
471 struct qede_tx_queue
*txq
;
472 struct qede_tx_queue
*xdp_tx
;
474 char name
[IFNAMSIZ
+ 8];
477 /* Debug print definitions */
478 #define DP_NAME(edev) netdev_name((edev)->ndev)
481 #define XMIT_L4_CSUM BIT(0)
482 #define XMIT_LSO BIT(1)
483 #define XMIT_ENC BIT(2)
484 #define XMIT_ENC_GSO_L4_CSUM BIT(3)
486 #define QEDE_CSUM_ERROR BIT(0)
487 #define QEDE_CSUM_UNNECESSARY BIT(1)
488 #define QEDE_TUNN_CSUM_UNNECESSARY BIT(2)
490 #define QEDE_SP_RECOVERY 0
491 #define QEDE_SP_RX_MODE 1
492 #define QEDE_SP_RSVD1 2
493 #define QEDE_SP_RSVD2 3
494 #define QEDE_SP_HW_ERR 4
495 #define QEDE_SP_ARFS_CONFIG 5
496 #define QEDE_SP_AER 7
498 #ifdef CONFIG_RFS_ACCEL
499 int qede_rx_flow_steer(struct net_device
*dev
, const struct sk_buff
*skb
,
500 u16 rxq_index
, u32 flow_id
);
501 #define QEDE_SP_TASK_POLL_DELAY (5 * HZ)
504 void qede_process_arfs_filters(struct qede_dev
*edev
, bool free_fltr
);
505 void qede_poll_for_freeing_arfs_filters(struct qede_dev
*edev
);
506 void qede_arfs_filter_op(void *dev
, void *filter
, u8 fw_rc
);
507 void qede_free_arfs(struct qede_dev
*edev
);
508 int qede_alloc_arfs(struct qede_dev
*edev
);
509 int qede_add_cls_rule(struct qede_dev
*edev
, struct ethtool_rxnfc
*info
);
510 int qede_delete_flow_filter(struct qede_dev
*edev
, u64 cookie
);
511 int qede_get_cls_rule_entry(struct qede_dev
*edev
, struct ethtool_rxnfc
*cmd
);
512 int qede_get_cls_rule_all(struct qede_dev
*edev
, struct ethtool_rxnfc
*info
,
514 int qede_get_arfs_filter_count(struct qede_dev
*edev
);
516 struct qede_reload_args
{
517 void (*func
)(struct qede_dev
*edev
, struct qede_reload_args
*args
);
519 netdev_features_t features
;
520 struct bpf_prog
*new_prog
;
525 /* Datapath functions definition */
526 netdev_tx_t
qede_start_xmit(struct sk_buff
*skb
, struct net_device
*ndev
);
527 int qede_xdp_transmit(struct net_device
*dev
, int n_frames
,
528 struct xdp_frame
**frames
, u32 flags
);
529 u16
qede_select_queue(struct net_device
*dev
, struct sk_buff
*skb
,
530 struct net_device
*sb_dev
);
531 netdev_features_t
qede_features_check(struct sk_buff
*skb
,
532 struct net_device
*dev
,
533 netdev_features_t features
);
534 int qede_alloc_rx_buffer(struct qede_rx_queue
*rxq
, bool allow_lazy
);
535 int qede_free_tx_pkt(struct qede_dev
*edev
,
536 struct qede_tx_queue
*txq
, int *len
);
537 int qede_poll(struct napi_struct
*napi
, int budget
);
538 irqreturn_t
qede_msix_fp_int(int irq
, void *fp_cookie
);
540 /* Filtering function definitions */
541 void qede_force_mac(void *dev
, u8
*mac
, bool forced
);
542 void qede_udp_ports_update(void *dev
, u16 vxlan_port
, u16 geneve_port
);
543 int qede_set_mac_addr(struct net_device
*ndev
, void *p
);
545 int qede_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
);
546 int qede_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
);
547 void qede_vlan_mark_nonconfigured(struct qede_dev
*edev
);
548 int qede_configure_vlan_filters(struct qede_dev
*edev
);
550 netdev_features_t
qede_fix_features(struct net_device
*dev
,
551 netdev_features_t features
);
552 int qede_set_features(struct net_device
*dev
, netdev_features_t features
);
553 void qede_set_rx_mode(struct net_device
*ndev
);
554 void qede_config_rx_mode(struct net_device
*ndev
);
555 void qede_fill_rss_params(struct qede_dev
*edev
,
556 struct qed_update_vport_rss_params
*rss
, u8
*update
);
558 void qede_udp_tunnel_add(struct net_device
*dev
, struct udp_tunnel_info
*ti
);
559 void qede_udp_tunnel_del(struct net_device
*dev
, struct udp_tunnel_info
*ti
);
561 int qede_xdp(struct net_device
*dev
, struct netdev_bpf
*xdp
);
564 void qede_set_dcbnl_ops(struct net_device
*ndev
);
567 void qede_config_debug(uint debug
, u32
*p_dp_module
, u8
*p_dp_level
);
568 void qede_set_ethtool_ops(struct net_device
*netdev
);
569 void qede_set_udp_tunnels(struct qede_dev
*edev
);
570 void qede_reload(struct qede_dev
*edev
,
571 struct qede_reload_args
*args
, bool is_locked
);
572 int qede_change_mtu(struct net_device
*dev
, int new_mtu
);
573 void qede_fill_by_demand_stats(struct qede_dev
*edev
);
574 void __qede_lock(struct qede_dev
*edev
);
575 void __qede_unlock(struct qede_dev
*edev
);
576 bool qede_has_rx_work(struct qede_rx_queue
*rxq
);
577 int qede_txq_has_work(struct qede_tx_queue
*txq
);
578 void qede_recycle_rx_bd_ring(struct qede_rx_queue
*rxq
, u8 count
);
579 void qede_update_rx_prod(struct qede_dev
*edev
, struct qede_rx_queue
*rxq
);
580 int qede_add_tc_flower_fltr(struct qede_dev
*edev
, __be16 proto
,
581 struct flow_cls_offload
*f
);
583 void qede_forced_speed_maps_init(void);
585 #define RX_RING_SIZE_POW 13
586 #define RX_RING_SIZE ((u16)BIT(RX_RING_SIZE_POW))
587 #define NUM_RX_BDS_MAX (RX_RING_SIZE - 1)
588 #define NUM_RX_BDS_MIN 128
589 #define NUM_RX_BDS_KDUMP_MIN 63
590 #define NUM_RX_BDS_DEF ((u16)BIT(10) - 1)
592 #define TX_RING_SIZE_POW 13
593 #define TX_RING_SIZE ((u16)BIT(TX_RING_SIZE_POW))
594 #define NUM_TX_BDS_MAX (TX_RING_SIZE - 1)
595 #define NUM_TX_BDS_MIN 128
596 #define NUM_TX_BDS_KDUMP_MIN 63
597 #define NUM_TX_BDS_DEF NUM_TX_BDS_MAX
599 #define QEDE_MIN_PKT_LEN 64
600 #define QEDE_RX_HDR_SIZE 256
601 #define QEDE_MAX_JUMBO_PACKET_SIZE 9600
602 #define for_each_queue(i) for (i = 0; i < edev->num_queues; i++)
603 #define for_each_cos_in_txq(edev, var) \
604 for ((var) = 0; (var) < (edev)->dev_info.num_tc; (var)++)
606 #endif /* _QEDE_H_ */