1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * Copyright 2008-2010 Cisco Systems, Inc. All rights reserved.
4 * Copyright 2007 Nuova Systems, Inc. All rights reserved.
10 #include "vnic_enet.h"
15 #include "vnic_intr.h"
16 #include "vnic_stats.h"
19 #include <linux/irq.h>
21 #define DRV_NAME "enic"
22 #define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
24 #define ENIC_BARS_MAX 6
26 #define ENIC_WQ_MAX 256
27 #define ENIC_RQ_MAX 256
29 #define ENIC_WQ_NAPI_BUDGET 256
31 #define ENIC_AIC_LARGE_PKT_DIFF 3
33 struct enic_msix_entry
{
35 char devname
[IFNAMSIZ
+ 8];
36 irqreturn_t (*isr
)(int, void *);
38 cpumask_var_t affinity_mask
;
41 /* Store only the lower range. Higher range is given by fw. */
42 struct enic_intr_mod_range
{
43 u32 small_pkt_range_start
;
44 u32 large_pkt_range_start
;
47 struct enic_intr_mod_table
{
52 #define ENIC_MAX_LINK_SPEEDS 3
53 #define ENIC_LINK_SPEED_10G 10000
54 #define ENIC_LINK_SPEED_4G 4000
55 #define ENIC_LINK_40G_INDEX 2
56 #define ENIC_LINK_10G_INDEX 1
57 #define ENIC_LINK_4G_INDEX 0
58 #define ENIC_RX_COALESCE_RANGE_END 125
59 #define ENIC_AIC_TS_BREAK 100
62 u32 small_pkt_range_start
;
63 u32 large_pkt_range_start
;
65 u32 use_adaptive_rx_coalesce
;
69 #define ENIC_SRIOV_ENABLED (1 << 0)
71 /* enic port profile set flags */
72 #define ENIC_PORT_REQUEST_APPLIED (1 << 0)
73 #define ENIC_SET_REQUEST (1 << 1)
74 #define ENIC_SET_NAME (1 << 2)
75 #define ENIC_SET_INSTANCE (1 << 3)
76 #define ENIC_SET_HOST (1 << 4)
78 struct enic_port_profile
{
81 char name
[PORT_PROFILE_MAX
];
82 u8 instance_uuid
[PORT_UUID_MAX
];
83 u8 host_uuid
[PORT_UUID_MAX
];
85 u8 mac_addr
[ETH_ALEN
];
88 /* enic_rfs_fltr_node - rfs filter node in hash table
89 * @@keys: IPv4 5 tuple
90 * @flow_id: flow_id of clsf filter provided by kernel
91 * @fltr_id: filter id of clsf filter returned by adaptor
92 * @rq_id: desired rq index
95 struct enic_rfs_fltr_node
{
96 struct flow_keys keys
;
100 struct hlist_node node
;
103 /* enic_rfs_flw_tbl - rfs flow table
104 * @max: Maximum number of filters vNIC supports
105 * @free: Number of free filters available
106 * @toclean: hash table index to clean next
107 * @ht_head: hash table list head
109 * @rfs_may_expire: timer function for enic_rps_may_expire_flow
111 struct enic_rfs_flw_tbl
{
115 #define ENIC_RFS_FLW_BITSHIFT (10)
116 #define ENIC_RFS_FLW_MASK ((1 << ENIC_RFS_FLW_BITSHIFT) - 1)
117 u16 toclean
:ENIC_RFS_FLW_BITSHIFT
;
118 struct hlist_head ht_head
[1 << ENIC_RFS_FLW_BITSHIFT
];
120 struct timer_list rfs_may_expire
;
123 struct vxlan_offload
{
124 u16 vxlan_udp_port_number
;
129 struct enic_wq_stats
{
130 u64 packets
; /* pkts queued for Tx */
131 u64 stopped
; /* Tx ring almost full, queue stopped */
132 u64 wake
; /* Tx ring no longer full, queue woken up*/
133 u64 tso
; /* non-encap tso pkt */
134 u64 encap_tso
; /* encap tso pkt */
135 u64 encap_csum
; /* encap HW csum */
136 u64 csum_partial
; /* skb->ip_summed = CHECKSUM_PARTIAL */
137 u64 csum_none
; /* HW csum not required */
138 u64 bytes
; /* bytes queued for Tx */
139 u64 add_vlan
; /* HW adds vlan tag */
140 u64 cq_work
; /* Tx completions processed */
141 u64 cq_bytes
; /* Tx bytes processed */
142 u64 null_pkt
; /* skb length <= 0 */
143 u64 skb_linear_fail
; /* linearize failures */
144 u64 desc_full_awake
; /* TX ring full while queue awake */
147 struct enic_rq_stats
{
148 u64 packets
; /* pkts received */
149 u64 bytes
; /* bytes received */
150 u64 l4_rss_hash
; /* hashed on l4 */
151 u64 l3_rss_hash
; /* hashed on l3 */
152 u64 csum_unnecessary
; /* HW verified csum */
153 u64 csum_unnecessary_encap
; /* HW verified csum on encap packet */
154 u64 vlan_stripped
; /* HW stripped vlan */
155 u64 napi_complete
; /* napi complete intr reenabled */
156 u64 napi_repoll
; /* napi poll again */
157 u64 bad_fcs
; /* bad pkts */
158 u64 pkt_truncated
; /* truncated pkts */
159 u64 no_skb
; /* out of skbs */
160 u64 desc_skip
; /* Rx pkt went into later buffer */
164 spinlock_t lock
; /* spinlock for wq */
166 struct enic_wq_stats stats
;
167 } ____cacheline_aligned
;
171 struct enic_rq_stats stats
;
172 } ____cacheline_aligned
;
174 /* Per-instance private data structure */
176 struct net_device
*netdev
;
177 struct pci_dev
*pdev
;
178 struct vnic_enet_config config
;
179 struct vnic_dev_bar bar
[ENIC_BARS_MAX
];
180 struct vnic_dev
*vdev
;
181 struct timer_list notify_timer
;
182 struct work_struct reset
;
183 struct work_struct tx_hang_reset
;
184 struct work_struct change_mtu_work
;
185 struct msix_entry
*msix_entry
;
186 struct enic_msix_entry
*msix
;
188 spinlock_t devcmd_lock
;
189 u8 mac_addr
[ETH_ALEN
];
191 unsigned int priv_flags
;
192 unsigned int mc_count
;
193 unsigned int uc_count
;
195 struct enic_rx_coal rx_coalesce_setting
;
196 u32 rx_coalesce_usecs
;
197 u32 tx_coalesce_usecs
;
198 #ifdef CONFIG_PCI_IOV
201 spinlock_t enic_api_lock
;
203 struct enic_port_profile
*pp
;
206 unsigned int wq_avail
;
207 unsigned int wq_count
;
212 unsigned int rq_avail
;
213 unsigned int rq_count
;
214 struct vxlan_offload vxlan
;
215 struct napi_struct
*napi
;
217 struct vnic_intr
*intr
;
218 unsigned int intr_avail
;
219 unsigned int intr_count
;
220 u32 __iomem
*legacy_pba
; /* memory-mapped */
223 unsigned int cq_avail
;
224 unsigned int cq_count
;
225 struct enic_rfs_flw_tbl rfs_h
;
227 u8 rss_key
[ENIC_RSS_LEN
];
228 struct vnic_gen_stats gen_stats
;
231 static inline struct net_device
*vnic_get_netdev(struct vnic_dev
*vdev
)
233 struct enic
*enic
= vdev
->priv
;
238 /* wrappers function for kernel log
240 #define vdev_err(vdev, fmt, ...) \
241 dev_err(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__)
242 #define vdev_warn(vdev, fmt, ...) \
243 dev_warn(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__)
244 #define vdev_info(vdev, fmt, ...) \
245 dev_info(&(vdev)->pdev->dev, fmt, ##__VA_ARGS__)
247 #define vdev_neterr(vdev, fmt, ...) \
248 netdev_err(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__)
249 #define vdev_netwarn(vdev, fmt, ...) \
250 netdev_warn(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__)
251 #define vdev_netinfo(vdev, fmt, ...) \
252 netdev_info(vnic_get_netdev(vdev), fmt, ##__VA_ARGS__)
254 static inline struct device
*enic_get_dev(struct enic
*enic
)
256 return &(enic
->pdev
->dev
);
259 static inline unsigned int enic_cq_rq(struct enic
*enic
, unsigned int rq
)
264 static inline unsigned int enic_cq_wq(struct enic
*enic
, unsigned int wq
)
266 return enic
->rq_count
+ wq
;
269 static inline unsigned int enic_msix_rq_intr(struct enic
*enic
,
272 return enic
->cq
[enic_cq_rq(enic
, rq
)].interrupt_offset
;
275 static inline unsigned int enic_msix_wq_intr(struct enic
*enic
,
278 return enic
->cq
[enic_cq_wq(enic
, wq
)].interrupt_offset
;
281 /* MSIX interrupts are organized as the error interrupt, then the notify
282 * interrupt followed by all the I/O interrupts. The error interrupt needs
283 * to fit in 7 bits due to hardware constraints
285 #define ENIC_MSIX_RESERVED_INTR 2
286 #define ENIC_MSIX_ERR_INTR 0
287 #define ENIC_MSIX_NOTIFY_INTR 1
288 #define ENIC_MSIX_IO_INTR_BASE ENIC_MSIX_RESERVED_INTR
289 #define ENIC_MSIX_MIN_INTR (ENIC_MSIX_RESERVED_INTR + 2)
291 #define ENIC_LEGACY_IO_INTR 0
292 #define ENIC_LEGACY_ERR_INTR 1
293 #define ENIC_LEGACY_NOTIFY_INTR 2
295 static inline unsigned int enic_msix_err_intr(struct enic
*enic
)
297 return ENIC_MSIX_ERR_INTR
;
300 static inline unsigned int enic_msix_notify_intr(struct enic
*enic
)
302 return ENIC_MSIX_NOTIFY_INTR
;
305 static inline bool enic_is_err_intr(struct enic
*enic
, int intr
)
307 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
308 case VNIC_DEV_INTR_MODE_INTX
:
309 return intr
== ENIC_LEGACY_ERR_INTR
;
310 case VNIC_DEV_INTR_MODE_MSIX
:
311 return intr
== enic_msix_err_intr(enic
);
312 case VNIC_DEV_INTR_MODE_MSI
:
318 static inline bool enic_is_notify_intr(struct enic
*enic
, int intr
)
320 switch (vnic_dev_get_intr_mode(enic
->vdev
)) {
321 case VNIC_DEV_INTR_MODE_INTX
:
322 return intr
== ENIC_LEGACY_NOTIFY_INTR
;
323 case VNIC_DEV_INTR_MODE_MSIX
:
324 return intr
== enic_msix_notify_intr(enic
);
325 case VNIC_DEV_INTR_MODE_MSI
:
331 static inline int enic_dma_map_check(struct enic
*enic
, dma_addr_t dma_addr
)
333 if (unlikely(dma_mapping_error(&enic
->pdev
->dev
, dma_addr
))) {
334 net_warn_ratelimited("%s: PCI dma mapping failed!\n",
336 enic
->gen_stats
.dma_map_error
++;
344 void enic_reset_addr_lists(struct enic
*enic
);
345 int enic_sriov_enabled(struct enic
*enic
);
346 int enic_is_valid_vf(struct enic
*enic
, int vf
);
347 int enic_is_dynamic(struct enic
*enic
);
348 void enic_set_ethtool_ops(struct net_device
*netdev
);
349 int __enic_set_rsskey(struct enic
*enic
);
351 #endif /* _ENIC_H_ */