1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
32 #include <linux/bitops.h>
33 #include <linux/types.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/cpumask.h>
37 #include <linux/aer.h>
38 #include <linux/if_vlan.h>
39 #include <linux/jiffies.h>
41 #include <linux/timecounter.h>
42 #include <linux/net_tstamp.h>
43 #include <linux/ptp_clock_kernel.h>
45 #include "ixgbe_type.h"
46 #include "ixgbe_common.h"
47 #include "ixgbe_dcb.h"
48 #if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
50 #include "ixgbe_fcoe.h"
51 #endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
52 #ifdef CONFIG_IXGBE_DCA
53 #include <linux/dca.h>
56 #include <net/busy_poll.h>
58 #ifdef CONFIG_NET_RX_BUSY_POLL
59 #define BP_EXTENDED_STATS
61 /* common prefix used by pr_<> macros */
63 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
65 /* TX/RX descriptor defines */
66 #define IXGBE_DEFAULT_TXD 512
67 #define IXGBE_DEFAULT_TX_WORK 256
68 #define IXGBE_MAX_TXD 4096
69 #define IXGBE_MIN_TXD 64
71 #if (PAGE_SIZE < 8192)
72 #define IXGBE_DEFAULT_RXD 512
74 #define IXGBE_DEFAULT_RXD 128
76 #define IXGBE_MAX_RXD 4096
77 #define IXGBE_MIN_RXD 64
79 #define IXGBE_ETH_P_LLDP 0x88CC
82 #define IXGBE_MIN_FCRTL 0x40
83 #define IXGBE_MAX_FCRTL 0x7FF80
84 #define IXGBE_MIN_FCRTH 0x600
85 #define IXGBE_MAX_FCRTH 0x7FFF0
86 #define IXGBE_DEFAULT_FCPAUSE 0xFFFF
87 #define IXGBE_MIN_FCPAUSE 0
88 #define IXGBE_MAX_FCPAUSE 0xFFFF
90 /* Supported Rx Buffer Sizes */
91 #define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */
92 #define IXGBE_RXBUFFER_2K 2048
93 #define IXGBE_RXBUFFER_3K 3072
94 #define IXGBE_RXBUFFER_4K 4096
95 #define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */
98 * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
99 * reserve 64 more, and skb_shared_info adds an additional 320 bytes more,
100 * this adds up to 448 bytes of extra data.
102 * Since netdev_alloc_skb now allocates a page fragment we can use a value
103 * of 256 and the resultant skb will have a truesize of 960 or less.
105 #define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256
107 /* How many Rx Buffers do we bundle into one write to the hardware ? */
108 #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */
110 enum ixgbe_tx_flags
{
112 IXGBE_TX_FLAGS_HW_VLAN
= 0x01,
113 IXGBE_TX_FLAGS_TSO
= 0x02,
114 IXGBE_TX_FLAGS_TSTAMP
= 0x04,
117 IXGBE_TX_FLAGS_CC
= 0x08,
118 IXGBE_TX_FLAGS_IPV4
= 0x10,
119 IXGBE_TX_FLAGS_CSUM
= 0x20,
121 /* software defined flags */
122 IXGBE_TX_FLAGS_SW_VLAN
= 0x40,
123 IXGBE_TX_FLAGS_FCOE
= 0x80,
127 #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000
128 #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
129 #define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29
130 #define IXGBE_TX_FLAGS_VLAN_SHIFT 16
132 #define IXGBE_MAX_VF_MC_ENTRIES 30
133 #define IXGBE_MAX_VF_FUNCTIONS 64
134 #define IXGBE_MAX_VFTA_ENTRIES 128
135 #define MAX_EMULATION_MAC_ADDRS 16
136 #define IXGBE_MAX_PF_MACVLANS 15
137 #define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset)
138 #define IXGBE_82599_VF_DEVICE_ID 0x10ED
139 #define IXGBE_X540_VF_DEVICE_ID 0x1515
141 struct vf_data_storage
{
142 struct pci_dev
*vfdev
;
143 unsigned char vf_mac_addresses
[ETH_ALEN
];
144 u16 vf_mc_hashes
[IXGBE_MAX_VF_MC_ENTRIES
];
145 u16 num_vf_mc_hashes
;
148 u16 pf_vlan
; /* When set, guest VLAN config not allowed. */
152 bool rss_query_enabled
;
158 enum ixgbevf_xcast_modes
{
159 IXGBEVF_XCAST_MODE_NONE
= 0,
160 IXGBEVF_XCAST_MODE_MULTI
,
161 IXGBEVF_XCAST_MODE_ALLMULTI
,
169 u8 vf_macvlan
[ETH_ALEN
];
172 #define IXGBE_MAX_TXD_PWR 14
173 #define IXGBE_MAX_DATA_PER_TXD (1u << IXGBE_MAX_TXD_PWR)
175 /* Tx Descriptors needed, worst case */
176 #define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD)
177 #define DESC_NEEDED (MAX_SKB_FRAGS + 4)
179 /* wrapper around a pointer to a socket buffer,
180 * so a DMA handle can be stored along with the buffer */
181 struct ixgbe_tx_buffer
{
182 union ixgbe_adv_tx_desc
*next_to_watch
;
183 unsigned long time_stamp
;
185 unsigned int bytecount
;
186 unsigned short gso_segs
;
188 DEFINE_DMA_UNMAP_ADDR(dma
);
189 DEFINE_DMA_UNMAP_LEN(len
);
193 struct ixgbe_rx_buffer
{
197 unsigned int page_offset
;
200 struct ixgbe_queue_stats
{
203 #ifdef BP_EXTENDED_STATS
207 #endif /* BP_EXTENDED_STATS */
210 struct ixgbe_tx_queue_stats
{
216 struct ixgbe_rx_queue_stats
{
220 u64 alloc_rx_page_failed
;
221 u64 alloc_rx_buff_failed
;
225 #define IXGBE_TS_HDR_LEN 8
227 enum ixgbe_ring_state_t
{
228 __IXGBE_TX_FDIR_INIT_DONE
,
229 __IXGBE_TX_XPS_INIT_DONE
,
230 __IXGBE_TX_DETECT_HANG
,
231 __IXGBE_HANG_CHECK_ARMED
,
232 __IXGBE_RX_RSC_ENABLED
,
233 __IXGBE_RX_CSUM_UDP_ZERO_ERR
,
237 struct ixgbe_fwd_adapter
{
238 unsigned long active_vlans
[BITS_TO_LONGS(VLAN_N_VID
)];
239 struct net_device
*netdev
;
240 struct ixgbe_adapter
*real_adapter
;
241 unsigned int tx_base_queue
;
242 unsigned int rx_base_queue
;
246 #define check_for_tx_hang(ring) \
247 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
248 #define set_check_for_tx_hang(ring) \
249 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
250 #define clear_check_for_tx_hang(ring) \
251 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
252 #define ring_is_rsc_enabled(ring) \
253 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
254 #define set_ring_rsc_enabled(ring) \
255 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
256 #define clear_ring_rsc_enabled(ring) \
257 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
259 struct ixgbe_ring
*next
; /* pointer to next ring in q_vector */
260 struct ixgbe_q_vector
*q_vector
; /* backpointer to host q_vector */
261 struct net_device
*netdev
; /* netdev ring belongs to */
262 struct device
*dev
; /* device for DMA mapping */
263 struct ixgbe_fwd_adapter
*l2_accel_priv
;
264 void *desc
; /* descriptor ring memory */
266 struct ixgbe_tx_buffer
*tx_buffer_info
;
267 struct ixgbe_rx_buffer
*rx_buffer_info
;
271 dma_addr_t dma
; /* phys. address of descriptor ring */
272 unsigned int size
; /* length in bytes */
274 u16 count
; /* amount of descriptors */
276 u8 queue_index
; /* needed for multiqueue queue management */
277 u8 reg_idx
; /* holds the special value that gets
278 * the hardware register offset
279 * associated with this ring, which is
280 * different for DCB and RSS modes
285 unsigned long last_rx_timestamp
;
296 struct ixgbe_queue_stats stats
;
297 struct u64_stats_sync syncp
;
299 struct ixgbe_tx_queue_stats tx_stats
;
300 struct ixgbe_rx_queue_stats rx_stats
;
302 } ____cacheline_internodealigned_in_smp
;
304 enum ixgbe_ring_f_enum
{
306 RING_F_VMDQ
, /* SR-IOV uses the same ring feature */
311 #endif /* IXGBE_FCOE */
313 RING_F_ARRAY_SIZE
/* must be last in enum set */
316 #define IXGBE_MAX_RSS_INDICES 16
317 #define IXGBE_MAX_RSS_INDICES_X550 63
318 #define IXGBE_MAX_VMDQ_INDICES 64
319 #define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */
320 #define IXGBE_MAX_FCOE_INDICES 8
321 #define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
322 #define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1)
323 #define IXGBE_MAX_L2A_QUEUES 4
324 #define IXGBE_BAD_L2A_QUEUE 3
325 #define IXGBE_MAX_MACVLANS 31
326 #define IXGBE_MAX_DCBMACVLANS 8
328 struct ixgbe_ring_feature
{
329 u16 limit
; /* upper limit on feature indices */
330 u16 indices
; /* current value of indices */
331 u16 mask
; /* Mask used for feature to ring mapping */
332 u16 offset
; /* offset to start of feature */
333 } ____cacheline_internodealigned_in_smp
;
335 #define IXGBE_82599_VMDQ_8Q_MASK 0x78
336 #define IXGBE_82599_VMDQ_4Q_MASK 0x7C
337 #define IXGBE_82599_VMDQ_2Q_MASK 0x7E
340 * FCoE requires that all Rx buffers be over 2200 bytes in length. Since
341 * this is twice the size of a half page we need to double the page order
342 * for FCoE enabled Rx queues.
344 static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring
*ring
)
347 if (test_bit(__IXGBE_RX_FCOE
, &ring
->state
))
348 return (PAGE_SIZE
< 8192) ? IXGBE_RXBUFFER_4K
:
351 return IXGBE_RXBUFFER_2K
;
354 static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring
*ring
)
357 if (test_bit(__IXGBE_RX_FCOE
, &ring
->state
))
358 return (PAGE_SIZE
< 8192) ? 1 : 0;
362 #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring))
364 struct ixgbe_ring_container
{
365 struct ixgbe_ring
*ring
; /* pointer to linked list of rings */
366 unsigned int total_bytes
; /* total bytes processed this int */
367 unsigned int total_packets
; /* total packets processed this int */
368 u16 work_limit
; /* total work allowed per interrupt */
369 u8 count
; /* total number of rings in vector */
370 u8 itr
; /* current ITR setting for ring */
373 /* iterator for handling rings in ring container */
374 #define ixgbe_for_each_ring(pos, head) \
375 for (pos = (head).ring; pos != NULL; pos = pos->next)
377 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
379 #define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS
381 /* MAX_Q_VECTORS of these are allocated,
382 * but we only use one per queue-specific vector.
384 struct ixgbe_q_vector
{
385 struct ixgbe_adapter
*adapter
;
386 #ifdef CONFIG_IXGBE_DCA
387 int cpu
; /* CPU for DCA */
389 u16 v_idx
; /* index of q_vector within array, also used for
390 * finding the bit in EICR and friends that
391 * represents the vector for this ring */
392 u16 itr
; /* Interrupt throttle rate written to EITR */
393 struct ixgbe_ring_container rx
, tx
;
395 struct napi_struct napi
;
396 cpumask_t affinity_mask
;
398 struct rcu_head rcu
; /* to avoid race with update stats on free */
399 char name
[IFNAMSIZ
+ 9];
401 #ifdef CONFIG_NET_RX_BUSY_POLL
403 #endif /* CONFIG_NET_RX_BUSY_POLL */
405 /* for dynamic allocation of rings associated with this q_vector */
406 struct ixgbe_ring ring
[0] ____cacheline_internodealigned_in_smp
;
409 #ifdef CONFIG_NET_RX_BUSY_POLL
410 enum ixgbe_qv_state_t
{
411 IXGBE_QV_STATE_IDLE
= 0,
414 IXGBE_QV_STATE_DISABLE
417 static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector
*q_vector
)
419 /* reset state to idle */
420 atomic_set(&q_vector
->state
, IXGBE_QV_STATE_IDLE
);
423 /* called from the device poll routine to get ownership of a q_vector */
424 static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector
*q_vector
)
426 int rc
= atomic_cmpxchg(&q_vector
->state
, IXGBE_QV_STATE_IDLE
,
427 IXGBE_QV_STATE_NAPI
);
428 #ifdef BP_EXTENDED_STATS
429 if (rc
!= IXGBE_QV_STATE_IDLE
)
430 q_vector
->tx
.ring
->stats
.yields
++;
433 return rc
== IXGBE_QV_STATE_IDLE
;
436 /* returns true is someone tried to get the qv while napi had it */
437 static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector
*q_vector
)
439 WARN_ON(atomic_read(&q_vector
->state
) != IXGBE_QV_STATE_NAPI
);
441 /* flush any outstanding Rx frames */
442 if (q_vector
->napi
.gro_list
)
443 napi_gro_flush(&q_vector
->napi
, false);
445 /* reset state to idle */
446 atomic_set(&q_vector
->state
, IXGBE_QV_STATE_IDLE
);
449 /* called from ixgbe_low_latency_poll() */
450 static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector
*q_vector
)
452 int rc
= atomic_cmpxchg(&q_vector
->state
, IXGBE_QV_STATE_IDLE
,
453 IXGBE_QV_STATE_POLL
);
454 #ifdef BP_EXTENDED_STATS
455 if (rc
!= IXGBE_QV_STATE_IDLE
)
456 q_vector
->rx
.ring
->stats
.yields
++;
458 return rc
== IXGBE_QV_STATE_IDLE
;
461 /* returns true if someone tried to get the qv while it was locked */
462 static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector
*q_vector
)
464 WARN_ON(atomic_read(&q_vector
->state
) != IXGBE_QV_STATE_POLL
);
466 /* reset state to idle */
467 atomic_set(&q_vector
->state
, IXGBE_QV_STATE_IDLE
);
470 /* true if a socket is polling, even if it did not get the lock */
471 static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector
*q_vector
)
473 return atomic_read(&q_vector
->state
) == IXGBE_QV_STATE_POLL
;
476 /* false if QV is currently owned */
477 static inline bool ixgbe_qv_disable(struct ixgbe_q_vector
*q_vector
)
479 int rc
= atomic_cmpxchg(&q_vector
->state
, IXGBE_QV_STATE_IDLE
,
480 IXGBE_QV_STATE_DISABLE
);
482 return rc
== IXGBE_QV_STATE_IDLE
;
485 #else /* CONFIG_NET_RX_BUSY_POLL */
486 static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector
*q_vector
)
490 static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector
*q_vector
)
495 static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector
*q_vector
)
500 static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector
*q_vector
)
505 static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector
*q_vector
)
510 static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector
*q_vector
)
515 static inline bool ixgbe_qv_disable(struct ixgbe_q_vector
*q_vector
)
520 #endif /* CONFIG_NET_RX_BUSY_POLL */
522 #ifdef CONFIG_IXGBE_HWMON
524 #define IXGBE_HWMON_TYPE_LOC 0
525 #define IXGBE_HWMON_TYPE_TEMP 1
526 #define IXGBE_HWMON_TYPE_CAUTION 2
527 #define IXGBE_HWMON_TYPE_MAX 3
530 struct device_attribute dev_attr
;
532 struct ixgbe_thermal_diode_data
*sensor
;
537 struct attribute_group group
;
538 const struct attribute_group
*groups
[2];
539 struct attribute
*attrs
[IXGBE_MAX_SENSORS
* 4 + 1];
540 struct hwmon_attr hwmon_list
[IXGBE_MAX_SENSORS
* 4];
541 unsigned int n_hwmon
;
543 #endif /* CONFIG_IXGBE_HWMON */
546 * microsecond values for various ITR rates shifted by 2 to fit itr register
547 * with the first 3 bits reserved 0
549 #define IXGBE_MIN_RSC_ITR 24
550 #define IXGBE_100K_ITR 40
551 #define IXGBE_20K_ITR 200
552 #define IXGBE_12K_ITR 336
554 /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */
555 static inline __le32
ixgbe_test_staterr(union ixgbe_adv_rx_desc
*rx_desc
,
556 const u32 stat_err_bits
)
558 return rx_desc
->wb
.upper
.status_error
& cpu_to_le32(stat_err_bits
);
561 static inline u16
ixgbe_desc_unused(struct ixgbe_ring
*ring
)
563 u16 ntc
= ring
->next_to_clean
;
564 u16 ntu
= ring
->next_to_use
;
566 return ((ntc
> ntu
) ? 0 : ring
->count
) + ntc
- ntu
- 1;
569 #define IXGBE_RX_DESC(R, i) \
570 (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i]))
571 #define IXGBE_TX_DESC(R, i) \
572 (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i]))
573 #define IXGBE_TX_CTXTDESC(R, i) \
574 (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i]))
576 #define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */
578 /* Use 3K as the baby jumbo frame size for FCoE */
579 #define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072
580 #endif /* IXGBE_FCOE */
582 #define OTHER_VECTOR 1
583 #define NON_Q_VECTORS (OTHER_VECTOR)
585 #define MAX_MSIX_VECTORS_82599 64
586 #define MAX_Q_VECTORS_82599 64
587 #define MAX_MSIX_VECTORS_82598 18
588 #define MAX_Q_VECTORS_82598 16
590 struct ixgbe_mac_addr
{
593 u16 state
; /* bitmask */
596 #define IXGBE_MAC_STATE_DEFAULT 0x1
597 #define IXGBE_MAC_STATE_MODIFIED 0x2
598 #define IXGBE_MAC_STATE_IN_USE 0x4
600 #define MAX_Q_VECTORS MAX_Q_VECTORS_82599
601 #define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599
603 #define MIN_MSIX_Q_VECTORS 1
604 #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS)
606 /* default to trying for four seconds */
607 #define IXGBE_TRY_LINK_TIMEOUT (4 * HZ)
608 #define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */
610 /* board specific private data structure */
611 struct ixgbe_adapter
{
612 unsigned long active_vlans
[BITS_TO_LONGS(VLAN_N_VID
)];
613 /* OS defined structs */
614 struct net_device
*netdev
;
615 struct pci_dev
*pdev
;
619 /* Some features need tri-state capability,
620 * thus the additional *_CAPABLE flags.
623 #define IXGBE_FLAG_MSI_ENABLED BIT(1)
624 #define IXGBE_FLAG_MSIX_ENABLED BIT(3)
625 #define IXGBE_FLAG_RX_1BUF_CAPABLE BIT(4)
626 #define IXGBE_FLAG_RX_PS_CAPABLE BIT(5)
627 #define IXGBE_FLAG_RX_PS_ENABLED BIT(6)
628 #define IXGBE_FLAG_DCA_ENABLED BIT(8)
629 #define IXGBE_FLAG_DCA_CAPABLE BIT(9)
630 #define IXGBE_FLAG_IMIR_ENABLED BIT(10)
631 #define IXGBE_FLAG_MQ_CAPABLE BIT(11)
632 #define IXGBE_FLAG_DCB_ENABLED BIT(12)
633 #define IXGBE_FLAG_VMDQ_CAPABLE BIT(13)
634 #define IXGBE_FLAG_VMDQ_ENABLED BIT(14)
635 #define IXGBE_FLAG_FAN_FAIL_CAPABLE BIT(15)
636 #define IXGBE_FLAG_NEED_LINK_UPDATE BIT(16)
637 #define IXGBE_FLAG_NEED_LINK_CONFIG BIT(17)
638 #define IXGBE_FLAG_FDIR_HASH_CAPABLE BIT(18)
639 #define IXGBE_FLAG_FDIR_PERFECT_CAPABLE BIT(19)
640 #define IXGBE_FLAG_FCOE_CAPABLE BIT(20)
641 #define IXGBE_FLAG_FCOE_ENABLED BIT(21)
642 #define IXGBE_FLAG_SRIOV_CAPABLE BIT(22)
643 #define IXGBE_FLAG_SRIOV_ENABLED BIT(23)
644 #define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE BIT(24)
645 #define IXGBE_FLAG_RX_HWTSTAMP_ENABLED BIT(25)
646 #define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER BIT(26)
647 #define IXGBE_FLAG_DCB_CAPABLE BIT(27)
650 #define IXGBE_FLAG2_RSC_CAPABLE BIT(0)
651 #define IXGBE_FLAG2_RSC_ENABLED BIT(1)
652 #define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE BIT(2)
653 #define IXGBE_FLAG2_TEMP_SENSOR_EVENT BIT(3)
654 #define IXGBE_FLAG2_SEARCH_FOR_SFP BIT(4)
655 #define IXGBE_FLAG2_SFP_NEEDS_RESET BIT(5)
656 #define IXGBE_FLAG2_RESET_REQUESTED BIT(6)
657 #define IXGBE_FLAG2_FDIR_REQUIRES_REINIT BIT(7)
658 #define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP BIT(8)
659 #define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP BIT(9)
660 #define IXGBE_FLAG2_PTP_PPS_ENABLED BIT(10)
661 #define IXGBE_FLAG2_PHY_INTERRUPT BIT(11)
662 #define IXGBE_FLAG2_VXLAN_REREG_NEEDED BIT(12)
663 #define IXGBE_FLAG2_VLAN_PROMISC BIT(13)
665 /* Tx fast path data */
670 /* Rx fast path data */
674 /* Port number used to identify VXLAN traffic */
678 struct ixgbe_ring
*tx_ring
[MAX_TX_QUEUES
] ____cacheline_aligned_in_smp
;
682 u32 tx_timeout_count
;
685 struct ixgbe_ring
*rx_ring
[MAX_RX_QUEUES
];
686 int num_rx_pools
; /* == num_rx_queues in 82598 */
687 int num_rx_queues_per_pool
; /* 1 if 82598, can be many if 82599 */
688 u64 hw_csum_rx_error
;
689 u64 hw_rx_no_dma_resources
;
693 u32 alloc_rx_page_failed
;
694 u32 alloc_rx_buff_failed
;
696 struct ixgbe_q_vector
*q_vector
[MAX_Q_VECTORS
];
699 struct ieee_pfc
*ixgbe_ieee_pfc
;
700 struct ieee_ets
*ixgbe_ieee_ets
;
701 struct ixgbe_dcb_config dcb_cfg
;
702 struct ixgbe_dcb_config temp_dcb_cfg
;
705 enum ixgbe_fc_mode last_lfc_mode
;
707 int num_q_vectors
; /* current number of q_vectors for device */
708 int max_q_vectors
; /* true count of q_vectors for device */
709 struct ixgbe_ring_feature ring_feature
[RING_F_ARRAY_SIZE
];
710 struct msix_entry
*msix_entries
;
713 struct ixgbe_ring test_tx_ring
;
714 struct ixgbe_ring test_rx_ring
;
716 /* structs defined in ixgbe_hw.h */
719 struct ixgbe_hw_stats stats
;
722 unsigned int tx_ring_count
;
723 unsigned int rx_ring_count
;
727 unsigned long sfp_poll_time
;
728 unsigned long link_check_timeout
;
730 struct timer_list service_timer
;
731 struct work_struct service_task
;
733 struct hlist_head fdir_filter_list
;
734 unsigned long fdir_overflow
; /* number of times ATR was backed off */
735 union ixgbe_atr_input fdir_mask
;
736 int fdir_filter_count
;
739 spinlock_t fdir_perfect_lock
;
742 struct ixgbe_fcoe fcoe
;
743 #endif /* IXGBE_FCOE */
744 u8 __iomem
*io_addr
; /* Mainly for iounmap use */
756 struct ptp_clock
*ptp_clock
;
757 struct ptp_clock_info ptp_caps
;
758 struct work_struct ptp_tx_work
;
759 struct sk_buff
*ptp_tx_skb
;
760 struct hwtstamp_config tstamp_config
;
761 unsigned long ptp_tx_start
;
762 unsigned long last_overflow_check
;
763 unsigned long last_rx_ptp_check
;
764 unsigned long last_rx_timestamp
;
765 spinlock_t tmreg_lock
;
766 struct cyclecounter hw_cc
;
767 struct timecounter hw_tc
;
769 u32 tx_hwtstamp_timeouts
;
770 u32 rx_hwtstamp_cleared
;
771 void (*ptp_setup_sdp
)(struct ixgbe_adapter
*);
774 DECLARE_BITMAP(active_vfs
, IXGBE_MAX_VF_FUNCTIONS
);
775 unsigned int num_vfs
;
776 struct vf_data_storage
*vfinfo
;
777 int vf_rate_link_speed
;
778 struct vf_macvlans vf_mvs
;
779 struct vf_macvlans
*mv_list
;
781 u32 timer_event_accumulator
;
783 struct ixgbe_mac_addr
*mac_table
;
784 struct kobject
*info_kobj
;
785 #ifdef CONFIG_IXGBE_HWMON
786 struct hwmon_buff
*ixgbe_hwmon_buff
;
787 #endif /* CONFIG_IXGBE_HWMON */
788 #ifdef CONFIG_DEBUG_FS
789 struct dentry
*ixgbe_dbg_adapter
;
790 #endif /*CONFIG_DEBUG_FS*/
793 unsigned long fwd_bitmask
; /* Bitmask indicating in use pools */
795 #define IXGBE_MAX_LINK_HANDLE 10
796 struct ixgbe_jump_table
*jump_tables
[IXGBE_MAX_LINK_HANDLE
];
797 unsigned long tables
;
799 /* maximum number of RETA entries among all devices supported by ixgbe
800 * driver: currently it's x550 device in non-SRIOV mode
802 #define IXGBE_MAX_RETA_ENTRIES 512
803 u8 rss_indir_tbl
[IXGBE_MAX_RETA_ENTRIES
];
805 #define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */
806 u32 rss_key
[IXGBE_RSS_KEY_SIZE
/ sizeof(u32
)];
809 static inline u8
ixgbe_max_rss_indices(struct ixgbe_adapter
*adapter
)
811 switch (adapter
->hw
.mac
.type
) {
812 case ixgbe_mac_82598EB
:
813 case ixgbe_mac_82599EB
:
815 return IXGBE_MAX_RSS_INDICES
;
817 case ixgbe_mac_X550EM_x
:
818 case ixgbe_mac_x550em_a
:
819 return IXGBE_MAX_RSS_INDICES_X550
;
825 struct ixgbe_fdir_filter
{
826 struct hlist_node fdir_node
;
827 union ixgbe_atr_input filter
;
838 __IXGBE_SERVICE_SCHED
,
839 __IXGBE_SERVICE_INITED
,
842 __IXGBE_PTP_TX_IN_PROGRESS
,
846 union { /* Union defining head/tail partner */
847 struct sk_buff
*head
;
848 struct sk_buff
*tail
;
854 #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb)
865 extern const struct ixgbe_info ixgbe_82598_info
;
866 extern const struct ixgbe_info ixgbe_82599_info
;
867 extern const struct ixgbe_info ixgbe_X540_info
;
868 extern const struct ixgbe_info ixgbe_X550_info
;
869 extern const struct ixgbe_info ixgbe_X550EM_x_info
;
870 extern const struct ixgbe_info ixgbe_x550em_a_info
;
871 #ifdef CONFIG_IXGBE_DCB
872 extern const struct dcbnl_rtnl_ops dcbnl_ops
;
875 extern char ixgbe_driver_name
[];
876 extern const char ixgbe_driver_version
[];
878 extern char ixgbe_default_device_descr
[];
879 #endif /* IXGBE_FCOE */
881 int ixgbe_open(struct net_device
*netdev
);
882 int ixgbe_close(struct net_device
*netdev
);
883 void ixgbe_up(struct ixgbe_adapter
*adapter
);
884 void ixgbe_down(struct ixgbe_adapter
*adapter
);
885 void ixgbe_reinit_locked(struct ixgbe_adapter
*adapter
);
886 void ixgbe_reset(struct ixgbe_adapter
*adapter
);
887 void ixgbe_set_ethtool_ops(struct net_device
*netdev
);
888 int ixgbe_setup_rx_resources(struct ixgbe_ring
*);
889 int ixgbe_setup_tx_resources(struct ixgbe_ring
*);
890 void ixgbe_free_rx_resources(struct ixgbe_ring
*);
891 void ixgbe_free_tx_resources(struct ixgbe_ring
*);
892 void ixgbe_configure_rx_ring(struct ixgbe_adapter
*, struct ixgbe_ring
*);
893 void ixgbe_configure_tx_ring(struct ixgbe_adapter
*, struct ixgbe_ring
*);
894 void ixgbe_disable_rx_queue(struct ixgbe_adapter
*adapter
, struct ixgbe_ring
*);
895 void ixgbe_update_stats(struct ixgbe_adapter
*adapter
);
896 int ixgbe_init_interrupt_scheme(struct ixgbe_adapter
*adapter
);
897 bool ixgbe_wol_supported(struct ixgbe_adapter
*adapter
, u16 device_id
,
899 #ifdef CONFIG_PCI_IOV
900 void ixgbe_full_sync_mac_table(struct ixgbe_adapter
*adapter
);
902 int ixgbe_add_mac_filter(struct ixgbe_adapter
*adapter
,
903 const u8
*addr
, u16 queue
);
904 int ixgbe_del_mac_filter(struct ixgbe_adapter
*adapter
,
905 const u8
*addr
, u16 queue
);
906 void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter
*adapter
, u32 vid
);
907 void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter
*adapter
);
908 netdev_tx_t
ixgbe_xmit_frame_ring(struct sk_buff
*, struct ixgbe_adapter
*,
909 struct ixgbe_ring
*);
910 void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring
*,
911 struct ixgbe_tx_buffer
*);
912 void ixgbe_alloc_rx_buffers(struct ixgbe_ring
*, u16
);
913 void ixgbe_write_eitr(struct ixgbe_q_vector
*);
914 int ixgbe_poll(struct napi_struct
*napi
, int budget
);
915 int ethtool_ioctl(struct ifreq
*ifr
);
916 s32
ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw
*hw
);
917 s32
ixgbe_init_fdir_signature_82599(struct ixgbe_hw
*hw
, u32 fdirctrl
);
918 s32
ixgbe_init_fdir_perfect_82599(struct ixgbe_hw
*hw
, u32 fdirctrl
);
919 s32
ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw
*hw
,
920 union ixgbe_atr_hash_dword input
,
921 union ixgbe_atr_hash_dword common
,
923 s32
ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw
*hw
,
924 union ixgbe_atr_input
*input_mask
);
925 s32
ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw
*hw
,
926 union ixgbe_atr_input
*input
,
927 u16 soft_id
, u8 queue
);
928 s32
ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw
*hw
,
929 union ixgbe_atr_input
*input
,
931 void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input
*input
,
932 union ixgbe_atr_input
*mask
);
933 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
934 struct ixgbe_fdir_filter
*input
,
936 void ixgbe_set_rx_mode(struct net_device
*netdev
);
937 #ifdef CONFIG_IXGBE_DCB
938 void ixgbe_set_rx_drop_en(struct ixgbe_adapter
*adapter
);
940 int ixgbe_setup_tc(struct net_device
*dev
, u8 tc
);
941 void ixgbe_tx_ctxtdesc(struct ixgbe_ring
*, u32
, u32
, u32
, u32
);
942 void ixgbe_do_reset(struct net_device
*netdev
);
943 #ifdef CONFIG_IXGBE_HWMON
944 void ixgbe_sysfs_exit(struct ixgbe_adapter
*adapter
);
945 int ixgbe_sysfs_init(struct ixgbe_adapter
*adapter
);
946 #endif /* CONFIG_IXGBE_HWMON */
948 void ixgbe_configure_fcoe(struct ixgbe_adapter
*adapter
);
949 int ixgbe_fso(struct ixgbe_ring
*tx_ring
, struct ixgbe_tx_buffer
*first
,
951 int ixgbe_fcoe_ddp(struct ixgbe_adapter
*adapter
,
952 union ixgbe_adv_rx_desc
*rx_desc
, struct sk_buff
*skb
);
953 int ixgbe_fcoe_ddp_get(struct net_device
*netdev
, u16 xid
,
954 struct scatterlist
*sgl
, unsigned int sgc
);
955 int ixgbe_fcoe_ddp_target(struct net_device
*netdev
, u16 xid
,
956 struct scatterlist
*sgl
, unsigned int sgc
);
957 int ixgbe_fcoe_ddp_put(struct net_device
*netdev
, u16 xid
);
958 int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter
*adapter
);
959 void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter
*adapter
);
960 int ixgbe_fcoe_enable(struct net_device
*netdev
);
961 int ixgbe_fcoe_disable(struct net_device
*netdev
);
962 #ifdef CONFIG_IXGBE_DCB
963 u8
ixgbe_fcoe_getapp(struct ixgbe_adapter
*adapter
);
964 u8
ixgbe_fcoe_setapp(struct ixgbe_adapter
*adapter
, u8 up
);
965 #endif /* CONFIG_IXGBE_DCB */
966 int ixgbe_fcoe_get_wwn(struct net_device
*netdev
, u64
*wwn
, int type
);
967 int ixgbe_fcoe_get_hbainfo(struct net_device
*netdev
,
968 struct netdev_fcoe_hbainfo
*info
);
969 u8
ixgbe_fcoe_get_tc(struct ixgbe_adapter
*adapter
);
970 #endif /* IXGBE_FCOE */
971 #ifdef CONFIG_DEBUG_FS
972 void ixgbe_dbg_adapter_init(struct ixgbe_adapter
*adapter
);
973 void ixgbe_dbg_adapter_exit(struct ixgbe_adapter
*adapter
);
974 void ixgbe_dbg_init(void);
975 void ixgbe_dbg_exit(void);
977 static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter
*adapter
) {}
978 static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter
*adapter
) {}
979 static inline void ixgbe_dbg_init(void) {}
980 static inline void ixgbe_dbg_exit(void) {}
981 #endif /* CONFIG_DEBUG_FS */
982 static inline struct netdev_queue
*txring_txq(const struct ixgbe_ring
*ring
)
984 return netdev_get_tx_queue(ring
->netdev
, ring
->queue_index
);
987 void ixgbe_ptp_init(struct ixgbe_adapter
*adapter
);
988 void ixgbe_ptp_suspend(struct ixgbe_adapter
*adapter
);
989 void ixgbe_ptp_stop(struct ixgbe_adapter
*adapter
);
990 void ixgbe_ptp_overflow_check(struct ixgbe_adapter
*adapter
);
991 void ixgbe_ptp_rx_hang(struct ixgbe_adapter
*adapter
);
992 void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector
*, struct sk_buff
*);
993 void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector
*, struct sk_buff
*skb
);
994 static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring
*rx_ring
,
995 union ixgbe_adv_rx_desc
*rx_desc
,
998 if (unlikely(ixgbe_test_staterr(rx_desc
, IXGBE_RXD_STAT_TSIP
))) {
999 ixgbe_ptp_rx_pktstamp(rx_ring
->q_vector
, skb
);
1003 if (unlikely(!ixgbe_test_staterr(rx_desc
, IXGBE_RXDADV_STAT_TS
)))
1006 ixgbe_ptp_rx_rgtstamp(rx_ring
->q_vector
, skb
);
1008 /* Update the last_rx_timestamp timer in order to enable watchdog check
1009 * for error case of latched timestamp on a dropped packet.
1011 rx_ring
->last_rx_timestamp
= jiffies
;
1014 int ixgbe_ptp_set_ts_config(struct ixgbe_adapter
*adapter
, struct ifreq
*ifr
);
1015 int ixgbe_ptp_get_ts_config(struct ixgbe_adapter
*adapter
, struct ifreq
*ifr
);
1016 void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter
*adapter
);
1017 void ixgbe_ptp_reset(struct ixgbe_adapter
*adapter
);
1018 void ixgbe_ptp_check_pps_event(struct ixgbe_adapter
*adapter
);
1019 #ifdef CONFIG_PCI_IOV
1020 void ixgbe_sriov_reinit(struct ixgbe_adapter
*adapter
);
1023 netdev_tx_t
ixgbe_xmit_frame_ring(struct sk_buff
*skb
,
1024 struct ixgbe_adapter
*adapter
,
1025 struct ixgbe_ring
*tx_ring
);
1026 u32
ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter
*adapter
);
1027 void ixgbe_store_reta(struct ixgbe_adapter
*adapter
);
1028 #endif /* _IXGBE_H_ */