1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
4 /* ethtool support for ixgbe */
6 #include <linux/interrupt.h>
7 #include <linux/types.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/pci.h>
11 #include <linux/netdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/vmalloc.h>
14 #include <linux/highmem.h>
15 #include <linux/uaccess.h>
18 #include "ixgbe_phy.h"
21 #define IXGBE_ALL_RAR_ENTRIES 16
23 enum {NETDEV_STATS
, IXGBE_STATS
};
26 char stat_string
[ETH_GSTRING_LEN
];
32 #define IXGBE_STAT(m) IXGBE_STATS, \
33 sizeof(((struct ixgbe_adapter *)0)->m), \
34 offsetof(struct ixgbe_adapter, m)
35 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
36 sizeof(((struct rtnl_link_stats64 *)0)->m), \
37 offsetof(struct rtnl_link_stats64, m)
39 static const struct ixgbe_stats ixgbe_gstrings_stats
[] = {
40 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets
)},
41 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets
)},
42 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes
)},
43 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes
)},
44 {"rx_pkts_nic", IXGBE_STAT(stats
.gprc
)},
45 {"tx_pkts_nic", IXGBE_STAT(stats
.gptc
)},
46 {"rx_bytes_nic", IXGBE_STAT(stats
.gorc
)},
47 {"tx_bytes_nic", IXGBE_STAT(stats
.gotc
)},
48 {"lsc_int", IXGBE_STAT(lsc_int
)},
49 {"tx_busy", IXGBE_STAT(tx_busy
)},
50 {"non_eop_descs", IXGBE_STAT(non_eop_descs
)},
51 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors
)},
52 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors
)},
53 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped
)},
54 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped
)},
55 {"multicast", IXGBE_NETDEV_STAT(multicast
)},
56 {"broadcast", IXGBE_STAT(stats
.bprc
)},
57 {"rx_no_buffer_count", IXGBE_STAT(stats
.rnbc
[0]) },
58 {"collisions", IXGBE_NETDEV_STAT(collisions
)},
59 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors
)},
60 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors
)},
61 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors
)},
62 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count
)},
63 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush
)},
64 {"fdir_match", IXGBE_STAT(stats
.fdirmatch
)},
65 {"fdir_miss", IXGBE_STAT(stats
.fdirmiss
)},
66 {"fdir_overflow", IXGBE_STAT(fdir_overflow
)},
67 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors
)},
68 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors
)},
69 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors
)},
70 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors
)},
71 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors
)},
72 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors
)},
73 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count
)},
74 {"tx_restart_queue", IXGBE_STAT(restart_queue
)},
75 {"rx_length_errors", IXGBE_STAT(stats
.rlec
)},
76 {"rx_long_length_errors", IXGBE_STAT(stats
.roc
)},
77 {"rx_short_length_errors", IXGBE_STAT(stats
.ruc
)},
78 {"tx_flow_control_xon", IXGBE_STAT(stats
.lxontxc
)},
79 {"rx_flow_control_xon", IXGBE_STAT(stats
.lxonrxc
)},
80 {"tx_flow_control_xoff", IXGBE_STAT(stats
.lxofftxc
)},
81 {"rx_flow_control_xoff", IXGBE_STAT(stats
.lxoffrxc
)},
82 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error
)},
83 {"alloc_rx_page", IXGBE_STAT(alloc_rx_page
)},
84 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed
)},
85 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed
)},
86 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources
)},
87 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats
.o2bgptc
)},
88 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats
.b2ospc
)},
89 {"os2bmc_tx_by_host", IXGBE_STAT(stats
.o2bspc
)},
90 {"os2bmc_rx_by_host", IXGBE_STAT(stats
.b2ogprc
)},
91 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts
)},
92 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped
)},
93 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared
)},
94 {"tx_ipsec", IXGBE_STAT(tx_ipsec
)},
95 {"rx_ipsec", IXGBE_STAT(rx_ipsec
)},
97 {"fcoe_bad_fccrc", IXGBE_STAT(stats
.fccrc
)},
98 {"rx_fcoe_dropped", IXGBE_STAT(stats
.fcoerpdc
)},
99 {"rx_fcoe_packets", IXGBE_STAT(stats
.fcoeprc
)},
100 {"rx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwrc
)},
101 {"fcoe_noddp", IXGBE_STAT(stats
.fcoe_noddp
)},
102 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats
.fcoe_noddp_ext_buff
)},
103 {"tx_fcoe_packets", IXGBE_STAT(stats
.fcoeptc
)},
104 {"tx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwtc
)},
105 #endif /* IXGBE_FCOE */
108 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
109 * we set the num_rx_queues to evaluate to num_tx_queues. This is
110 * used because we do not have a good way to get the max number of
111 * rx queues with CONFIG_RPS disabled.
113 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
115 #define IXGBE_QUEUE_STATS_LEN ( \
116 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
117 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
118 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
119 #define IXGBE_PB_STATS_LEN ( \
120 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
121 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
122 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
123 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
125 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
126 IXGBE_PB_STATS_LEN + \
127 IXGBE_QUEUE_STATS_LEN)
129 static const char ixgbe_gstrings_test
[][ETH_GSTRING_LEN
] = {
130 "Register test (offline)", "Eeprom test (offline)",
131 "Interrupt test (offline)", "Loopback test (offline)",
132 "Link test (on/offline)"
134 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
136 static const char ixgbe_priv_flags_strings
[][ETH_GSTRING_LEN
] = {
137 #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
141 #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
143 /* currently supported speeds for 10G */
144 #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
145 SUPPORTED_10000baseKX4_Full | \
146 SUPPORTED_10000baseKR_Full)
148 #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
150 static u32
ixgbe_get_supported_10gtypes(struct ixgbe_hw
*hw
)
152 if (!ixgbe_isbackplane(hw
->phy
.media_type
))
153 return SUPPORTED_10000baseT_Full
;
155 switch (hw
->device_id
) {
156 case IXGBE_DEV_ID_82598
:
157 case IXGBE_DEV_ID_82599_KX4
:
158 case IXGBE_DEV_ID_82599_KX4_MEZZ
:
159 case IXGBE_DEV_ID_X550EM_X_KX4
:
160 return SUPPORTED_10000baseKX4_Full
;
161 case IXGBE_DEV_ID_82598_BX
:
162 case IXGBE_DEV_ID_82599_KR
:
163 case IXGBE_DEV_ID_X550EM_X_KR
:
164 case IXGBE_DEV_ID_X550EM_X_XFI
:
165 return SUPPORTED_10000baseKR_Full
;
167 return SUPPORTED_10000baseKX4_Full
|
168 SUPPORTED_10000baseKR_Full
;
172 static int ixgbe_get_link_ksettings(struct net_device
*netdev
,
173 struct ethtool_link_ksettings
*cmd
)
175 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
176 struct ixgbe_hw
*hw
= &adapter
->hw
;
177 ixgbe_link_speed supported_link
;
178 bool autoneg
= false;
179 u32 supported
, advertising
;
181 ethtool_convert_link_mode_to_legacy_u32(&supported
,
182 cmd
->link_modes
.supported
);
184 hw
->mac
.ops
.get_link_capabilities(hw
, &supported_link
, &autoneg
);
186 /* set the supported link speeds */
187 if (supported_link
& IXGBE_LINK_SPEED_10GB_FULL
)
188 supported
|= ixgbe_get_supported_10gtypes(hw
);
189 if (supported_link
& IXGBE_LINK_SPEED_1GB_FULL
)
190 supported
|= (ixgbe_isbackplane(hw
->phy
.media_type
)) ?
191 SUPPORTED_1000baseKX_Full
:
192 SUPPORTED_1000baseT_Full
;
193 if (supported_link
& IXGBE_LINK_SPEED_100_FULL
)
194 supported
|= SUPPORTED_100baseT_Full
;
195 if (supported_link
& IXGBE_LINK_SPEED_10_FULL
)
196 supported
|= SUPPORTED_10baseT_Full
;
198 /* default advertised speed if phy.autoneg_advertised isn't set */
199 advertising
= supported
;
200 /* set the advertised speeds */
201 if (hw
->phy
.autoneg_advertised
) {
203 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_10_FULL
)
204 advertising
|= ADVERTISED_10baseT_Full
;
205 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_100_FULL
)
206 advertising
|= ADVERTISED_100baseT_Full
;
207 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_10GB_FULL
)
208 advertising
|= supported
& ADVRTSD_MSK_10G
;
209 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_1GB_FULL
) {
210 if (supported
& SUPPORTED_1000baseKX_Full
)
211 advertising
|= ADVERTISED_1000baseKX_Full
;
213 advertising
|= ADVERTISED_1000baseT_Full
;
216 if (hw
->phy
.multispeed_fiber
&& !autoneg
) {
217 if (supported_link
& IXGBE_LINK_SPEED_10GB_FULL
)
218 advertising
= ADVERTISED_10000baseT_Full
;
223 supported
|= SUPPORTED_Autoneg
;
224 advertising
|= ADVERTISED_Autoneg
;
225 cmd
->base
.autoneg
= AUTONEG_ENABLE
;
227 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
229 /* Determine the remaining settings based on the PHY type. */
230 switch (adapter
->hw
.phy
.type
) {
233 case ixgbe_phy_x550em_ext_t
:
235 case ixgbe_phy_cu_unknown
:
236 supported
|= SUPPORTED_TP
;
237 advertising
|= ADVERTISED_TP
;
238 cmd
->base
.port
= PORT_TP
;
241 supported
|= SUPPORTED_FIBRE
;
242 advertising
|= ADVERTISED_FIBRE
;
243 cmd
->base
.port
= PORT_FIBRE
;
246 case ixgbe_phy_sfp_passive_tyco
:
247 case ixgbe_phy_sfp_passive_unknown
:
248 case ixgbe_phy_sfp_ftl
:
249 case ixgbe_phy_sfp_avago
:
250 case ixgbe_phy_sfp_intel
:
251 case ixgbe_phy_sfp_unknown
:
252 case ixgbe_phy_qsfp_passive_unknown
:
253 case ixgbe_phy_qsfp_active_unknown
:
254 case ixgbe_phy_qsfp_intel
:
255 case ixgbe_phy_qsfp_unknown
:
256 /* SFP+ devices, further checking needed */
257 switch (adapter
->hw
.phy
.sfp_type
) {
258 case ixgbe_sfp_type_da_cu
:
259 case ixgbe_sfp_type_da_cu_core0
:
260 case ixgbe_sfp_type_da_cu_core1
:
261 supported
|= SUPPORTED_FIBRE
;
262 advertising
|= ADVERTISED_FIBRE
;
263 cmd
->base
.port
= PORT_DA
;
265 case ixgbe_sfp_type_sr
:
266 case ixgbe_sfp_type_lr
:
267 case ixgbe_sfp_type_srlr_core0
:
268 case ixgbe_sfp_type_srlr_core1
:
269 case ixgbe_sfp_type_1g_sx_core0
:
270 case ixgbe_sfp_type_1g_sx_core1
:
271 case ixgbe_sfp_type_1g_lx_core0
:
272 case ixgbe_sfp_type_1g_lx_core1
:
273 supported
|= SUPPORTED_FIBRE
;
274 advertising
|= ADVERTISED_FIBRE
;
275 cmd
->base
.port
= PORT_FIBRE
;
277 case ixgbe_sfp_type_not_present
:
278 supported
|= SUPPORTED_FIBRE
;
279 advertising
|= ADVERTISED_FIBRE
;
280 cmd
->base
.port
= PORT_NONE
;
282 case ixgbe_sfp_type_1g_cu_core0
:
283 case ixgbe_sfp_type_1g_cu_core1
:
284 supported
|= SUPPORTED_TP
;
285 advertising
|= ADVERTISED_TP
;
286 cmd
->base
.port
= PORT_TP
;
288 case ixgbe_sfp_type_unknown
:
290 supported
|= SUPPORTED_FIBRE
;
291 advertising
|= ADVERTISED_FIBRE
;
292 cmd
->base
.port
= PORT_OTHER
;
297 supported
|= SUPPORTED_FIBRE
;
298 advertising
|= ADVERTISED_FIBRE
;
299 cmd
->base
.port
= PORT_NONE
;
301 case ixgbe_phy_unknown
:
302 case ixgbe_phy_generic
:
303 case ixgbe_phy_sfp_unsupported
:
305 supported
|= SUPPORTED_FIBRE
;
306 advertising
|= ADVERTISED_FIBRE
;
307 cmd
->base
.port
= PORT_OTHER
;
311 /* Indicate pause support */
312 supported
|= SUPPORTED_Pause
;
314 switch (hw
->fc
.requested_mode
) {
316 advertising
|= ADVERTISED_Pause
;
318 case ixgbe_fc_rx_pause
:
319 advertising
|= ADVERTISED_Pause
|
320 ADVERTISED_Asym_Pause
;
322 case ixgbe_fc_tx_pause
:
323 advertising
|= ADVERTISED_Asym_Pause
;
326 advertising
&= ~(ADVERTISED_Pause
|
327 ADVERTISED_Asym_Pause
);
330 if (netif_carrier_ok(netdev
)) {
331 switch (adapter
->link_speed
) {
332 case IXGBE_LINK_SPEED_10GB_FULL
:
333 cmd
->base
.speed
= SPEED_10000
;
335 case IXGBE_LINK_SPEED_5GB_FULL
:
336 cmd
->base
.speed
= SPEED_5000
;
338 case IXGBE_LINK_SPEED_2_5GB_FULL
:
339 cmd
->base
.speed
= SPEED_2500
;
341 case IXGBE_LINK_SPEED_1GB_FULL
:
342 cmd
->base
.speed
= SPEED_1000
;
344 case IXGBE_LINK_SPEED_100_FULL
:
345 cmd
->base
.speed
= SPEED_100
;
347 case IXGBE_LINK_SPEED_10_FULL
:
348 cmd
->base
.speed
= SPEED_10
;
353 cmd
->base
.duplex
= DUPLEX_FULL
;
355 cmd
->base
.speed
= SPEED_UNKNOWN
;
356 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
359 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
361 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
367 static int ixgbe_set_link_ksettings(struct net_device
*netdev
,
368 const struct ethtool_link_ksettings
*cmd
)
370 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
371 struct ixgbe_hw
*hw
= &adapter
->hw
;
374 u32 supported
, advertising
;
376 ethtool_convert_link_mode_to_legacy_u32(&supported
,
377 cmd
->link_modes
.supported
);
378 ethtool_convert_link_mode_to_legacy_u32(&advertising
,
379 cmd
->link_modes
.advertising
);
381 if ((hw
->phy
.media_type
== ixgbe_media_type_copper
) ||
382 (hw
->phy
.multispeed_fiber
)) {
384 * this function does not support duplex forcing, but can
385 * limit the advertising of the adapter to the specified speed
387 if (advertising
& ~supported
)
390 /* only allow one speed at a time if no autoneg */
391 if (!cmd
->base
.autoneg
&& hw
->phy
.multispeed_fiber
) {
393 (ADVERTISED_10000baseT_Full
|
394 ADVERTISED_1000baseT_Full
))
398 old
= hw
->phy
.autoneg_advertised
;
400 if (advertising
& ADVERTISED_10000baseT_Full
)
401 advertised
|= IXGBE_LINK_SPEED_10GB_FULL
;
403 if (advertising
& ADVERTISED_1000baseT_Full
)
404 advertised
|= IXGBE_LINK_SPEED_1GB_FULL
;
406 if (advertising
& ADVERTISED_100baseT_Full
)
407 advertised
|= IXGBE_LINK_SPEED_100_FULL
;
409 if (advertising
& ADVERTISED_10baseT_Full
)
410 advertised
|= IXGBE_LINK_SPEED_10_FULL
;
412 if (old
== advertised
)
414 /* this sets the link speed and restarts auto-neg */
415 while (test_and_set_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
416 usleep_range(1000, 2000);
418 hw
->mac
.autotry_restart
= true;
419 err
= hw
->mac
.ops
.setup_link(hw
, advertised
, true);
421 e_info(probe
, "setup link failed with code %d\n", err
);
422 hw
->mac
.ops
.setup_link(hw
, old
, true);
424 clear_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
);
426 /* in this case we currently only support 10Gb/FULL */
427 u32 speed
= cmd
->base
.speed
;
429 if ((cmd
->base
.autoneg
== AUTONEG_ENABLE
) ||
430 (advertising
!= ADVERTISED_10000baseT_Full
) ||
431 (speed
+ cmd
->base
.duplex
!= SPEED_10000
+ DUPLEX_FULL
))
438 static void ixgbe_get_pauseparam(struct net_device
*netdev
,
439 struct ethtool_pauseparam
*pause
)
441 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
442 struct ixgbe_hw
*hw
= &adapter
->hw
;
444 if (ixgbe_device_supports_autoneg_fc(hw
) &&
445 !hw
->fc
.disable_fc_autoneg
)
450 if (hw
->fc
.current_mode
== ixgbe_fc_rx_pause
) {
452 } else if (hw
->fc
.current_mode
== ixgbe_fc_tx_pause
) {
454 } else if (hw
->fc
.current_mode
== ixgbe_fc_full
) {
460 static int ixgbe_set_pauseparam(struct net_device
*netdev
,
461 struct ethtool_pauseparam
*pause
)
463 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
464 struct ixgbe_hw
*hw
= &adapter
->hw
;
465 struct ixgbe_fc_info fc
= hw
->fc
;
467 /* 82598 does no support link flow control with DCB enabled */
468 if ((hw
->mac
.type
== ixgbe_mac_82598EB
) &&
469 (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
))
472 /* some devices do not support autoneg of link flow control */
473 if ((pause
->autoneg
== AUTONEG_ENABLE
) &&
474 !ixgbe_device_supports_autoneg_fc(hw
))
477 fc
.disable_fc_autoneg
= (pause
->autoneg
!= AUTONEG_ENABLE
);
479 if ((pause
->rx_pause
&& pause
->tx_pause
) || pause
->autoneg
)
480 fc
.requested_mode
= ixgbe_fc_full
;
481 else if (pause
->rx_pause
&& !pause
->tx_pause
)
482 fc
.requested_mode
= ixgbe_fc_rx_pause
;
483 else if (!pause
->rx_pause
&& pause
->tx_pause
)
484 fc
.requested_mode
= ixgbe_fc_tx_pause
;
486 fc
.requested_mode
= ixgbe_fc_none
;
488 /* if the thing changed then we'll update and use new autoneg */
489 if (memcmp(&fc
, &hw
->fc
, sizeof(struct ixgbe_fc_info
))) {
491 if (netif_running(netdev
))
492 ixgbe_reinit_locked(adapter
);
494 ixgbe_reset(adapter
);
500 static u32
ixgbe_get_msglevel(struct net_device
*netdev
)
502 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
503 return adapter
->msg_enable
;
506 static void ixgbe_set_msglevel(struct net_device
*netdev
, u32 data
)
508 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
509 adapter
->msg_enable
= data
;
512 static int ixgbe_get_regs_len(struct net_device
*netdev
)
514 #define IXGBE_REGS_LEN 1145
515 return IXGBE_REGS_LEN
* sizeof(u32
);
518 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
520 static void ixgbe_get_regs(struct net_device
*netdev
,
521 struct ethtool_regs
*regs
, void *p
)
523 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
524 struct ixgbe_hw
*hw
= &adapter
->hw
;
528 memset(p
, 0, IXGBE_REGS_LEN
* sizeof(u32
));
530 regs
->version
= hw
->mac
.type
<< 24 | hw
->revision_id
<< 16 |
533 /* General Registers */
534 regs_buff
[0] = IXGBE_READ_REG(hw
, IXGBE_CTRL
);
535 regs_buff
[1] = IXGBE_READ_REG(hw
, IXGBE_STATUS
);
536 regs_buff
[2] = IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
537 regs_buff
[3] = IXGBE_READ_REG(hw
, IXGBE_ESDP
);
538 regs_buff
[4] = IXGBE_READ_REG(hw
, IXGBE_EODSDP
);
539 regs_buff
[5] = IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
540 regs_buff
[6] = IXGBE_READ_REG(hw
, IXGBE_FRTIMER
);
541 regs_buff
[7] = IXGBE_READ_REG(hw
, IXGBE_TCPTIMER
);
544 regs_buff
[8] = IXGBE_READ_REG(hw
, IXGBE_EEC(hw
));
545 regs_buff
[9] = IXGBE_READ_REG(hw
, IXGBE_EERD
);
546 regs_buff
[10] = IXGBE_READ_REG(hw
, IXGBE_FLA(hw
));
547 regs_buff
[11] = IXGBE_READ_REG(hw
, IXGBE_EEMNGCTL
);
548 regs_buff
[12] = IXGBE_READ_REG(hw
, IXGBE_EEMNGDATA
);
549 regs_buff
[13] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCTL
);
550 regs_buff
[14] = IXGBE_READ_REG(hw
, IXGBE_FLMNGDATA
);
551 regs_buff
[15] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCNT
);
552 regs_buff
[16] = IXGBE_READ_REG(hw
, IXGBE_FLOP
);
553 regs_buff
[17] = IXGBE_READ_REG(hw
, IXGBE_GRC(hw
));
556 /* don't read EICR because it can clear interrupt causes, instead
557 * read EICS which is a shadow but doesn't clear EICR */
558 regs_buff
[18] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
559 regs_buff
[19] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
560 regs_buff
[20] = IXGBE_READ_REG(hw
, IXGBE_EIMS
);
561 regs_buff
[21] = IXGBE_READ_REG(hw
, IXGBE_EIMC
);
562 regs_buff
[22] = IXGBE_READ_REG(hw
, IXGBE_EIAC
);
563 regs_buff
[23] = IXGBE_READ_REG(hw
, IXGBE_EIAM
);
564 regs_buff
[24] = IXGBE_READ_REG(hw
, IXGBE_EITR(0));
565 regs_buff
[25] = IXGBE_READ_REG(hw
, IXGBE_IVAR(0));
566 regs_buff
[26] = IXGBE_READ_REG(hw
, IXGBE_MSIXT
);
567 regs_buff
[27] = IXGBE_READ_REG(hw
, IXGBE_MSIXPBA
);
568 regs_buff
[28] = IXGBE_READ_REG(hw
, IXGBE_PBACL(0));
569 regs_buff
[29] = IXGBE_READ_REG(hw
, IXGBE_GPIE
);
572 regs_buff
[30] = IXGBE_READ_REG(hw
, IXGBE_PFCTOP
);
573 for (i
= 0; i
< 4; i
++)
574 regs_buff
[31 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(i
));
575 for (i
= 0; i
< 8; i
++) {
576 switch (hw
->mac
.type
) {
577 case ixgbe_mac_82598EB
:
578 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL(i
));
579 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH(i
));
581 case ixgbe_mac_82599EB
:
584 case ixgbe_mac_X550EM_x
:
585 case ixgbe_mac_x550em_a
:
586 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL_82599(i
));
587 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH_82599(i
));
593 regs_buff
[51] = IXGBE_READ_REG(hw
, IXGBE_FCRTV
);
594 regs_buff
[52] = IXGBE_READ_REG(hw
, IXGBE_TFCS
);
597 for (i
= 0; i
< 64; i
++)
598 regs_buff
[53 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAL(i
));
599 for (i
= 0; i
< 64; i
++)
600 regs_buff
[117 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAH(i
));
601 for (i
= 0; i
< 64; i
++)
602 regs_buff
[181 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDLEN(i
));
603 for (i
= 0; i
< 64; i
++)
604 regs_buff
[245 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDH(i
));
605 for (i
= 0; i
< 64; i
++)
606 regs_buff
[309 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDT(i
));
607 for (i
= 0; i
< 64; i
++)
608 regs_buff
[373 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXDCTL(i
));
609 for (i
= 0; i
< 16; i
++)
610 regs_buff
[437 + i
] = IXGBE_READ_REG(hw
, IXGBE_SRRCTL(i
));
611 for (i
= 0; i
< 16; i
++)
612 regs_buff
[453 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_RXCTRL(i
));
613 regs_buff
[469] = IXGBE_READ_REG(hw
, IXGBE_RDRXCTL
);
614 for (i
= 0; i
< 8; i
++)
615 regs_buff
[470 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(i
));
616 regs_buff
[478] = IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
617 regs_buff
[479] = IXGBE_READ_REG(hw
, IXGBE_DROPEN
);
620 regs_buff
[480] = IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
621 regs_buff
[481] = IXGBE_READ_REG(hw
, IXGBE_RFCTL
);
622 for (i
= 0; i
< 16; i
++)
623 regs_buff
[482 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAL(i
));
624 for (i
= 0; i
< 16; i
++)
625 regs_buff
[498 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAH(i
));
626 regs_buff
[514] = IXGBE_READ_REG(hw
, IXGBE_PSRTYPE(0));
627 regs_buff
[515] = IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
628 regs_buff
[516] = IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
629 regs_buff
[517] = IXGBE_READ_REG(hw
, IXGBE_MCSTCTRL
);
630 regs_buff
[518] = IXGBE_READ_REG(hw
, IXGBE_MRQC
);
631 regs_buff
[519] = IXGBE_READ_REG(hw
, IXGBE_VMD_CTL
);
632 for (i
= 0; i
< 8; i
++)
633 regs_buff
[520 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIR(i
));
634 for (i
= 0; i
< 8; i
++)
635 regs_buff
[528 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIREXT(i
));
636 regs_buff
[536] = IXGBE_READ_REG(hw
, IXGBE_IMIRVP
);
639 for (i
= 0; i
< 32; i
++)
640 regs_buff
[537 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAL(i
));
641 for (i
= 0; i
< 32; i
++)
642 regs_buff
[569 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAH(i
));
643 for (i
= 0; i
< 32; i
++)
644 regs_buff
[601 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDLEN(i
));
645 for (i
= 0; i
< 32; i
++)
646 regs_buff
[633 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDH(i
));
647 for (i
= 0; i
< 32; i
++)
648 regs_buff
[665 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDT(i
));
649 for (i
= 0; i
< 32; i
++)
650 regs_buff
[697 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXDCTL(i
));
651 for (i
= 0; i
< 32; i
++)
652 regs_buff
[729 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAL(i
));
653 for (i
= 0; i
< 32; i
++)
654 regs_buff
[761 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAH(i
));
655 regs_buff
[793] = IXGBE_READ_REG(hw
, IXGBE_DTXCTL
);
656 for (i
= 0; i
< 16; i
++)
657 regs_buff
[794 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL(i
));
658 regs_buff
[810] = IXGBE_READ_REG(hw
, IXGBE_TIPG
);
659 for (i
= 0; i
< 8; i
++)
660 regs_buff
[811 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXPBSIZE(i
));
661 regs_buff
[819] = IXGBE_READ_REG(hw
, IXGBE_MNGTXMAP
);
664 regs_buff
[820] = IXGBE_READ_REG(hw
, IXGBE_WUC
);
665 regs_buff
[821] = IXGBE_READ_REG(hw
, IXGBE_WUFC
);
666 regs_buff
[822] = IXGBE_READ_REG(hw
, IXGBE_WUS
);
667 regs_buff
[823] = IXGBE_READ_REG(hw
, IXGBE_IPAV
);
668 regs_buff
[824] = IXGBE_READ_REG(hw
, IXGBE_IP4AT
);
669 regs_buff
[825] = IXGBE_READ_REG(hw
, IXGBE_IP6AT
);
670 regs_buff
[826] = IXGBE_READ_REG(hw
, IXGBE_WUPL
);
671 regs_buff
[827] = IXGBE_READ_REG(hw
, IXGBE_WUPM
);
672 regs_buff
[828] = IXGBE_READ_REG(hw
, IXGBE_FHFT(0));
675 regs_buff
[829] = IXGBE_READ_REG(hw
, IXGBE_RMCS
); /* same as FCCFG */
676 regs_buff
[831] = IXGBE_READ_REG(hw
, IXGBE_PDPMCS
); /* same as RTTPCS */
678 switch (hw
->mac
.type
) {
679 case ixgbe_mac_82598EB
:
680 regs_buff
[830] = IXGBE_READ_REG(hw
, IXGBE_DPMCS
);
681 regs_buff
[832] = IXGBE_READ_REG(hw
, IXGBE_RUPPBMR
);
682 for (i
= 0; i
< 8; i
++)
684 IXGBE_READ_REG(hw
, IXGBE_RT2CR(i
));
685 for (i
= 0; i
< 8; i
++)
687 IXGBE_READ_REG(hw
, IXGBE_RT2SR(i
));
688 for (i
= 0; i
< 8; i
++)
690 IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCCR(i
));
691 for (i
= 0; i
< 8; i
++)
693 IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCSR(i
));
695 case ixgbe_mac_82599EB
:
698 case ixgbe_mac_X550EM_x
:
699 case ixgbe_mac_x550em_a
:
700 regs_buff
[830] = IXGBE_READ_REG(hw
, IXGBE_RTTDCS
);
701 regs_buff
[832] = IXGBE_READ_REG(hw
, IXGBE_RTRPCS
);
702 for (i
= 0; i
< 8; i
++)
704 IXGBE_READ_REG(hw
, IXGBE_RTRPT4C(i
));
705 for (i
= 0; i
< 8; i
++)
707 IXGBE_READ_REG(hw
, IXGBE_RTRPT4S(i
));
708 for (i
= 0; i
< 8; i
++)
710 IXGBE_READ_REG(hw
, IXGBE_RTTDT2C(i
));
711 for (i
= 0; i
< 8; i
++)
713 IXGBE_READ_REG(hw
, IXGBE_RTTDT2S(i
));
719 for (i
= 0; i
< 8; i
++)
721 IXGBE_READ_REG(hw
, IXGBE_TDPT2TCCR(i
)); /* same as RTTPT2C */
722 for (i
= 0; i
< 8; i
++)
724 IXGBE_READ_REG(hw
, IXGBE_TDPT2TCSR(i
)); /* same as RTTPT2S */
727 regs_buff
[881] = IXGBE_GET_STAT(adapter
, crcerrs
);
728 regs_buff
[882] = IXGBE_GET_STAT(adapter
, illerrc
);
729 regs_buff
[883] = IXGBE_GET_STAT(adapter
, errbc
);
730 regs_buff
[884] = IXGBE_GET_STAT(adapter
, mspdc
);
731 for (i
= 0; i
< 8; i
++)
732 regs_buff
[885 + i
] = IXGBE_GET_STAT(adapter
, mpc
[i
]);
733 regs_buff
[893] = IXGBE_GET_STAT(adapter
, mlfc
);
734 regs_buff
[894] = IXGBE_GET_STAT(adapter
, mrfc
);
735 regs_buff
[895] = IXGBE_GET_STAT(adapter
, rlec
);
736 regs_buff
[896] = IXGBE_GET_STAT(adapter
, lxontxc
);
737 regs_buff
[897] = IXGBE_GET_STAT(adapter
, lxonrxc
);
738 regs_buff
[898] = IXGBE_GET_STAT(adapter
, lxofftxc
);
739 regs_buff
[899] = IXGBE_GET_STAT(adapter
, lxoffrxc
);
740 for (i
= 0; i
< 8; i
++)
741 regs_buff
[900 + i
] = IXGBE_GET_STAT(adapter
, pxontxc
[i
]);
742 for (i
= 0; i
< 8; i
++)
743 regs_buff
[908 + i
] = IXGBE_GET_STAT(adapter
, pxonrxc
[i
]);
744 for (i
= 0; i
< 8; i
++)
745 regs_buff
[916 + i
] = IXGBE_GET_STAT(adapter
, pxofftxc
[i
]);
746 for (i
= 0; i
< 8; i
++)
747 regs_buff
[924 + i
] = IXGBE_GET_STAT(adapter
, pxoffrxc
[i
]);
748 regs_buff
[932] = IXGBE_GET_STAT(adapter
, prc64
);
749 regs_buff
[933] = IXGBE_GET_STAT(adapter
, prc127
);
750 regs_buff
[934] = IXGBE_GET_STAT(adapter
, prc255
);
751 regs_buff
[935] = IXGBE_GET_STAT(adapter
, prc511
);
752 regs_buff
[936] = IXGBE_GET_STAT(adapter
, prc1023
);
753 regs_buff
[937] = IXGBE_GET_STAT(adapter
, prc1522
);
754 regs_buff
[938] = IXGBE_GET_STAT(adapter
, gprc
);
755 regs_buff
[939] = IXGBE_GET_STAT(adapter
, bprc
);
756 regs_buff
[940] = IXGBE_GET_STAT(adapter
, mprc
);
757 regs_buff
[941] = IXGBE_GET_STAT(adapter
, gptc
);
758 regs_buff
[942] = (u32
)IXGBE_GET_STAT(adapter
, gorc
);
759 regs_buff
[943] = (u32
)(IXGBE_GET_STAT(adapter
, gorc
) >> 32);
760 regs_buff
[944] = (u32
)IXGBE_GET_STAT(adapter
, gotc
);
761 regs_buff
[945] = (u32
)(IXGBE_GET_STAT(adapter
, gotc
) >> 32);
762 for (i
= 0; i
< 8; i
++)
763 regs_buff
[946 + i
] = IXGBE_GET_STAT(adapter
, rnbc
[i
]);
764 regs_buff
[954] = IXGBE_GET_STAT(adapter
, ruc
);
765 regs_buff
[955] = IXGBE_GET_STAT(adapter
, rfc
);
766 regs_buff
[956] = IXGBE_GET_STAT(adapter
, roc
);
767 regs_buff
[957] = IXGBE_GET_STAT(adapter
, rjc
);
768 regs_buff
[958] = IXGBE_GET_STAT(adapter
, mngprc
);
769 regs_buff
[959] = IXGBE_GET_STAT(adapter
, mngpdc
);
770 regs_buff
[960] = IXGBE_GET_STAT(adapter
, mngptc
);
771 regs_buff
[961] = (u32
)IXGBE_GET_STAT(adapter
, tor
);
772 regs_buff
[962] = (u32
)(IXGBE_GET_STAT(adapter
, tor
) >> 32);
773 regs_buff
[963] = IXGBE_GET_STAT(adapter
, tpr
);
774 regs_buff
[964] = IXGBE_GET_STAT(adapter
, tpt
);
775 regs_buff
[965] = IXGBE_GET_STAT(adapter
, ptc64
);
776 regs_buff
[966] = IXGBE_GET_STAT(adapter
, ptc127
);
777 regs_buff
[967] = IXGBE_GET_STAT(adapter
, ptc255
);
778 regs_buff
[968] = IXGBE_GET_STAT(adapter
, ptc511
);
779 regs_buff
[969] = IXGBE_GET_STAT(adapter
, ptc1023
);
780 regs_buff
[970] = IXGBE_GET_STAT(adapter
, ptc1522
);
781 regs_buff
[971] = IXGBE_GET_STAT(adapter
, mptc
);
782 regs_buff
[972] = IXGBE_GET_STAT(adapter
, bptc
);
783 regs_buff
[973] = IXGBE_GET_STAT(adapter
, xec
);
784 for (i
= 0; i
< 16; i
++)
785 regs_buff
[974 + i
] = IXGBE_GET_STAT(adapter
, qprc
[i
]);
786 for (i
= 0; i
< 16; i
++)
787 regs_buff
[990 + i
] = IXGBE_GET_STAT(adapter
, qptc
[i
]);
788 for (i
= 0; i
< 16; i
++)
789 regs_buff
[1006 + i
] = IXGBE_GET_STAT(adapter
, qbrc
[i
]);
790 for (i
= 0; i
< 16; i
++)
791 regs_buff
[1022 + i
] = IXGBE_GET_STAT(adapter
, qbtc
[i
]);
794 regs_buff
[1038] = IXGBE_READ_REG(hw
, IXGBE_PCS1GCFIG
);
795 regs_buff
[1039] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLCTL
);
796 regs_buff
[1040] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLSTA
);
797 regs_buff
[1041] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG0
);
798 regs_buff
[1042] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG1
);
799 regs_buff
[1043] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANA
);
800 regs_buff
[1044] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLP
);
801 regs_buff
[1045] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANNP
);
802 regs_buff
[1046] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLPNP
);
803 regs_buff
[1047] = IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
804 regs_buff
[1048] = IXGBE_READ_REG(hw
, IXGBE_HLREG1
);
805 regs_buff
[1049] = IXGBE_READ_REG(hw
, IXGBE_PAP
);
806 regs_buff
[1050] = IXGBE_READ_REG(hw
, IXGBE_MACA
);
807 regs_buff
[1051] = IXGBE_READ_REG(hw
, IXGBE_APAE
);
808 regs_buff
[1052] = IXGBE_READ_REG(hw
, IXGBE_ARD
);
809 regs_buff
[1053] = IXGBE_READ_REG(hw
, IXGBE_AIS
);
810 regs_buff
[1054] = IXGBE_READ_REG(hw
, IXGBE_MSCA
);
811 regs_buff
[1055] = IXGBE_READ_REG(hw
, IXGBE_MSRWD
);
812 regs_buff
[1056] = IXGBE_READ_REG(hw
, IXGBE_MLADD
);
813 regs_buff
[1057] = IXGBE_READ_REG(hw
, IXGBE_MHADD
);
814 regs_buff
[1058] = IXGBE_READ_REG(hw
, IXGBE_TREG
);
815 regs_buff
[1059] = IXGBE_READ_REG(hw
, IXGBE_PCSS1
);
816 regs_buff
[1060] = IXGBE_READ_REG(hw
, IXGBE_PCSS2
);
817 regs_buff
[1061] = IXGBE_READ_REG(hw
, IXGBE_XPCSS
);
818 regs_buff
[1062] = IXGBE_READ_REG(hw
, IXGBE_SERDESC
);
819 regs_buff
[1063] = IXGBE_READ_REG(hw
, IXGBE_MACS
);
820 regs_buff
[1064] = IXGBE_READ_REG(hw
, IXGBE_AUTOC
);
821 regs_buff
[1065] = IXGBE_READ_REG(hw
, IXGBE_LINKS
);
822 regs_buff
[1066] = IXGBE_READ_REG(hw
, IXGBE_AUTOC2
);
823 regs_buff
[1067] = IXGBE_READ_REG(hw
, IXGBE_AUTOC3
);
824 regs_buff
[1068] = IXGBE_READ_REG(hw
, IXGBE_ANLP1
);
825 regs_buff
[1069] = IXGBE_READ_REG(hw
, IXGBE_ANLP2
);
826 regs_buff
[1070] = IXGBE_READ_REG(hw
, IXGBE_ATLASCTL
);
829 regs_buff
[1071] = IXGBE_READ_REG(hw
, IXGBE_RDSTATCTL
);
830 for (i
= 0; i
< 8; i
++)
831 regs_buff
[1072 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDSTAT(i
));
832 regs_buff
[1080] = IXGBE_READ_REG(hw
, IXGBE_RDHMPN
);
833 for (i
= 0; i
< 4; i
++)
834 regs_buff
[1081 + i
] = IXGBE_READ_REG(hw
, IXGBE_RIC_DW(i
));
835 regs_buff
[1085] = IXGBE_READ_REG(hw
, IXGBE_RDPROBE
);
836 regs_buff
[1086] = IXGBE_READ_REG(hw
, IXGBE_TDSTATCTL
);
837 for (i
= 0; i
< 8; i
++)
838 regs_buff
[1087 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDSTAT(i
));
839 regs_buff
[1095] = IXGBE_READ_REG(hw
, IXGBE_TDHMPN
);
840 for (i
= 0; i
< 4; i
++)
841 regs_buff
[1096 + i
] = IXGBE_READ_REG(hw
, IXGBE_TIC_DW(i
));
842 regs_buff
[1100] = IXGBE_READ_REG(hw
, IXGBE_TDPROBE
);
843 regs_buff
[1101] = IXGBE_READ_REG(hw
, IXGBE_TXBUFCTRL
);
844 for (i
= 0; i
< 4; i
++)
845 regs_buff
[1102 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA(i
));
846 regs_buff
[1106] = IXGBE_READ_REG(hw
, IXGBE_RXBUFCTRL
);
847 for (i
= 0; i
< 4; i
++)
848 regs_buff
[1107 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA(i
));
849 for (i
= 0; i
< 8; i
++)
850 regs_buff
[1111 + i
] = IXGBE_READ_REG(hw
, IXGBE_PCIE_DIAG(i
));
851 regs_buff
[1119] = IXGBE_READ_REG(hw
, IXGBE_RFVAL
);
852 regs_buff
[1120] = IXGBE_READ_REG(hw
, IXGBE_MDFTC1
);
853 regs_buff
[1121] = IXGBE_READ_REG(hw
, IXGBE_MDFTC2
);
854 regs_buff
[1122] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO1
);
855 regs_buff
[1123] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO2
);
856 regs_buff
[1124] = IXGBE_READ_REG(hw
, IXGBE_MDFTS
);
857 regs_buff
[1125] = IXGBE_READ_REG(hw
, IXGBE_PCIEECCCTL
);
858 regs_buff
[1126] = IXGBE_READ_REG(hw
, IXGBE_PBTXECC
);
859 regs_buff
[1127] = IXGBE_READ_REG(hw
, IXGBE_PBRXECC
);
861 /* 82599 X540 specific registers */
862 regs_buff
[1128] = IXGBE_READ_REG(hw
, IXGBE_MFLCN
);
864 /* 82599 X540 specific DCB registers */
865 regs_buff
[1129] = IXGBE_READ_REG(hw
, IXGBE_RTRUP2TC
);
866 regs_buff
[1130] = IXGBE_READ_REG(hw
, IXGBE_RTTUP2TC
);
867 for (i
= 0; i
< 4; i
++)
868 regs_buff
[1131 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXLLQ(i
));
869 regs_buff
[1135] = IXGBE_READ_REG(hw
, IXGBE_RTTBCNRM
);
870 /* same as RTTQCNRM */
871 regs_buff
[1136] = IXGBE_READ_REG(hw
, IXGBE_RTTBCNRD
);
872 /* same as RTTQCNRR */
874 /* X540 specific DCB registers */
875 regs_buff
[1137] = IXGBE_READ_REG(hw
, IXGBE_RTTQCNCR
);
876 regs_buff
[1138] = IXGBE_READ_REG(hw
, IXGBE_RTTQCNTG
);
878 /* Security config registers */
879 regs_buff
[1139] = IXGBE_READ_REG(hw
, IXGBE_SECTXCTRL
);
880 regs_buff
[1140] = IXGBE_READ_REG(hw
, IXGBE_SECTXSTAT
);
881 regs_buff
[1141] = IXGBE_READ_REG(hw
, IXGBE_SECTXBUFFAF
);
882 regs_buff
[1142] = IXGBE_READ_REG(hw
, IXGBE_SECTXMINIFG
);
883 regs_buff
[1143] = IXGBE_READ_REG(hw
, IXGBE_SECRXCTRL
);
884 regs_buff
[1144] = IXGBE_READ_REG(hw
, IXGBE_SECRXSTAT
);
887 static int ixgbe_get_eeprom_len(struct net_device
*netdev
)
889 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
890 return adapter
->hw
.eeprom
.word_size
* 2;
893 static int ixgbe_get_eeprom(struct net_device
*netdev
,
894 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
896 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
897 struct ixgbe_hw
*hw
= &adapter
->hw
;
899 int first_word
, last_word
, eeprom_len
;
903 if (eeprom
->len
== 0)
906 eeprom
->magic
= hw
->vendor_id
| (hw
->device_id
<< 16);
908 first_word
= eeprom
->offset
>> 1;
909 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
910 eeprom_len
= last_word
- first_word
+ 1;
912 eeprom_buff
= kmalloc_array(eeprom_len
, sizeof(u16
), GFP_KERNEL
);
916 ret_val
= hw
->eeprom
.ops
.read_buffer(hw
, first_word
, eeprom_len
,
919 /* Device's eeprom is always little-endian, word addressable */
920 for (i
= 0; i
< eeprom_len
; i
++)
921 le16_to_cpus(&eeprom_buff
[i
]);
923 memcpy(bytes
, (u8
*)eeprom_buff
+ (eeprom
->offset
& 1), eeprom
->len
);
929 static int ixgbe_set_eeprom(struct net_device
*netdev
,
930 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
932 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
933 struct ixgbe_hw
*hw
= &adapter
->hw
;
936 int max_len
, first_word
, last_word
, ret_val
= 0;
939 if (eeprom
->len
== 0)
942 if (eeprom
->magic
!= (hw
->vendor_id
| (hw
->device_id
<< 16)))
945 max_len
= hw
->eeprom
.word_size
* 2;
947 first_word
= eeprom
->offset
>> 1;
948 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
949 eeprom_buff
= kmalloc(max_len
, GFP_KERNEL
);
955 if (eeprom
->offset
& 1) {
957 * need read/modify/write of first changed EEPROM word
958 * only the second byte of the word is being modified
960 ret_val
= hw
->eeprom
.ops
.read(hw
, first_word
, &eeprom_buff
[0]);
966 if ((eeprom
->offset
+ eeprom
->len
) & 1) {
968 * need read/modify/write of last changed EEPROM word
969 * only the first byte of the word is being modified
971 ret_val
= hw
->eeprom
.ops
.read(hw
, last_word
,
972 &eeprom_buff
[last_word
- first_word
]);
977 /* Device's eeprom is always little-endian, word addressable */
978 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
979 le16_to_cpus(&eeprom_buff
[i
]);
981 memcpy(ptr
, bytes
, eeprom
->len
);
983 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
984 cpu_to_le16s(&eeprom_buff
[i
]);
986 ret_val
= hw
->eeprom
.ops
.write_buffer(hw
, first_word
,
987 last_word
- first_word
+ 1,
990 /* Update the checksum */
992 hw
->eeprom
.ops
.update_checksum(hw
);
999 static void ixgbe_get_drvinfo(struct net_device
*netdev
,
1000 struct ethtool_drvinfo
*drvinfo
)
1002 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1004 strlcpy(drvinfo
->driver
, ixgbe_driver_name
, sizeof(drvinfo
->driver
));
1005 strlcpy(drvinfo
->version
, ixgbe_driver_version
,
1006 sizeof(drvinfo
->version
));
1008 strlcpy(drvinfo
->fw_version
, adapter
->eeprom_id
,
1009 sizeof(drvinfo
->fw_version
));
1011 strlcpy(drvinfo
->bus_info
, pci_name(adapter
->pdev
),
1012 sizeof(drvinfo
->bus_info
));
1014 drvinfo
->n_priv_flags
= IXGBE_PRIV_FLAGS_STR_LEN
;
1017 static void ixgbe_get_ringparam(struct net_device
*netdev
,
1018 struct ethtool_ringparam
*ring
)
1020 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1021 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[0];
1022 struct ixgbe_ring
*rx_ring
= adapter
->rx_ring
[0];
1024 ring
->rx_max_pending
= IXGBE_MAX_RXD
;
1025 ring
->tx_max_pending
= IXGBE_MAX_TXD
;
1026 ring
->rx_pending
= rx_ring
->count
;
1027 ring
->tx_pending
= tx_ring
->count
;
1030 static int ixgbe_set_ringparam(struct net_device
*netdev
,
1031 struct ethtool_ringparam
*ring
)
1033 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1034 struct ixgbe_ring
*temp_ring
;
1036 u32 new_rx_count
, new_tx_count
;
1038 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
1041 new_tx_count
= clamp_t(u32
, ring
->tx_pending
,
1042 IXGBE_MIN_TXD
, IXGBE_MAX_TXD
);
1043 new_tx_count
= ALIGN(new_tx_count
, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE
);
1045 new_rx_count
= clamp_t(u32
, ring
->rx_pending
,
1046 IXGBE_MIN_RXD
, IXGBE_MAX_RXD
);
1047 new_rx_count
= ALIGN(new_rx_count
, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE
);
1049 if ((new_tx_count
== adapter
->tx_ring_count
) &&
1050 (new_rx_count
== adapter
->rx_ring_count
)) {
1055 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
1056 usleep_range(1000, 2000);
1058 if (!netif_running(adapter
->netdev
)) {
1059 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1060 adapter
->tx_ring
[i
]->count
= new_tx_count
;
1061 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++)
1062 adapter
->xdp_ring
[i
]->count
= new_tx_count
;
1063 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1064 adapter
->rx_ring
[i
]->count
= new_rx_count
;
1065 adapter
->tx_ring_count
= new_tx_count
;
1066 adapter
->xdp_ring_count
= new_tx_count
;
1067 adapter
->rx_ring_count
= new_rx_count
;
1071 /* allocate temporary buffer to store rings in */
1072 i
= max_t(int, adapter
->num_tx_queues
+ adapter
->num_xdp_queues
,
1073 adapter
->num_rx_queues
);
1074 temp_ring
= vmalloc(array_size(i
, sizeof(struct ixgbe_ring
)));
1081 ixgbe_down(adapter
);
1084 * Setup new Tx resources and free the old Tx resources in that order.
1085 * We can then assign the new resources to the rings via a memcpy.
1086 * The advantage to this approach is that we are guaranteed to still
1087 * have resources even in the case of an allocation failure.
1089 if (new_tx_count
!= adapter
->tx_ring_count
) {
1090 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1091 memcpy(&temp_ring
[i
], adapter
->tx_ring
[i
],
1092 sizeof(struct ixgbe_ring
));
1094 temp_ring
[i
].count
= new_tx_count
;
1095 err
= ixgbe_setup_tx_resources(&temp_ring
[i
]);
1099 ixgbe_free_tx_resources(&temp_ring
[i
]);
1105 for (j
= 0; j
< adapter
->num_xdp_queues
; j
++, i
++) {
1106 memcpy(&temp_ring
[i
], adapter
->xdp_ring
[j
],
1107 sizeof(struct ixgbe_ring
));
1109 temp_ring
[i
].count
= new_tx_count
;
1110 err
= ixgbe_setup_tx_resources(&temp_ring
[i
]);
1114 ixgbe_free_tx_resources(&temp_ring
[i
]);
1120 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1121 ixgbe_free_tx_resources(adapter
->tx_ring
[i
]);
1123 memcpy(adapter
->tx_ring
[i
], &temp_ring
[i
],
1124 sizeof(struct ixgbe_ring
));
1126 for (j
= 0; j
< adapter
->num_xdp_queues
; j
++, i
++) {
1127 ixgbe_free_tx_resources(adapter
->xdp_ring
[j
]);
1129 memcpy(adapter
->xdp_ring
[j
], &temp_ring
[i
],
1130 sizeof(struct ixgbe_ring
));
1133 adapter
->tx_ring_count
= new_tx_count
;
1136 /* Repeat the process for the Rx rings if needed */
1137 if (new_rx_count
!= adapter
->rx_ring_count
) {
1138 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1139 memcpy(&temp_ring
[i
], adapter
->rx_ring
[i
],
1140 sizeof(struct ixgbe_ring
));
1142 /* Clear copied XDP RX-queue info */
1143 memset(&temp_ring
[i
].xdp_rxq
, 0,
1144 sizeof(temp_ring
[i
].xdp_rxq
));
1146 temp_ring
[i
].count
= new_rx_count
;
1147 err
= ixgbe_setup_rx_resources(adapter
, &temp_ring
[i
]);
1151 ixgbe_free_rx_resources(&temp_ring
[i
]);
1158 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1159 ixgbe_free_rx_resources(adapter
->rx_ring
[i
]);
1161 memcpy(adapter
->rx_ring
[i
], &temp_ring
[i
],
1162 sizeof(struct ixgbe_ring
));
1165 adapter
->rx_ring_count
= new_rx_count
;
1172 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
1176 static int ixgbe_get_sset_count(struct net_device
*netdev
, int sset
)
1180 return IXGBE_TEST_LEN
;
1182 return IXGBE_STATS_LEN
;
1183 case ETH_SS_PRIV_FLAGS
:
1184 return IXGBE_PRIV_FLAGS_STR_LEN
;
1190 static void ixgbe_get_ethtool_stats(struct net_device
*netdev
,
1191 struct ethtool_stats
*stats
, u64
*data
)
1193 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1194 struct rtnl_link_stats64 temp
;
1195 const struct rtnl_link_stats64
*net_stats
;
1197 struct ixgbe_ring
*ring
;
1201 ixgbe_update_stats(adapter
);
1202 net_stats
= dev_get_stats(netdev
, &temp
);
1203 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1204 switch (ixgbe_gstrings_stats
[i
].type
) {
1206 p
= (char *) net_stats
+
1207 ixgbe_gstrings_stats
[i
].stat_offset
;
1210 p
= (char *) adapter
+
1211 ixgbe_gstrings_stats
[i
].stat_offset
;
1218 data
[i
] = (ixgbe_gstrings_stats
[i
].sizeof_stat
==
1219 sizeof(u64
)) ? *(u64
*)p
: *(u32
*)p
;
1221 for (j
= 0; j
< netdev
->num_tx_queues
; j
++) {
1222 ring
= adapter
->tx_ring
[j
];
1231 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1232 data
[i
] = ring
->stats
.packets
;
1233 data
[i
+1] = ring
->stats
.bytes
;
1234 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1237 for (j
= 0; j
< IXGBE_NUM_RX_QUEUES
; j
++) {
1238 ring
= adapter
->rx_ring
[j
];
1247 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1248 data
[i
] = ring
->stats
.packets
;
1249 data
[i
+1] = ring
->stats
.bytes
;
1250 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1254 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1255 data
[i
++] = adapter
->stats
.pxontxc
[j
];
1256 data
[i
++] = adapter
->stats
.pxofftxc
[j
];
1258 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1259 data
[i
++] = adapter
->stats
.pxonrxc
[j
];
1260 data
[i
++] = adapter
->stats
.pxoffrxc
[j
];
1264 static void ixgbe_get_strings(struct net_device
*netdev
, u32 stringset
,
1267 char *p
= (char *)data
;
1270 switch (stringset
) {
1272 for (i
= 0; i
< IXGBE_TEST_LEN
; i
++) {
1273 memcpy(data
, ixgbe_gstrings_test
[i
], ETH_GSTRING_LEN
);
1274 data
+= ETH_GSTRING_LEN
;
1278 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1279 memcpy(p
, ixgbe_gstrings_stats
[i
].stat_string
,
1281 p
+= ETH_GSTRING_LEN
;
1283 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
1284 sprintf(p
, "tx_queue_%u_packets", i
);
1285 p
+= ETH_GSTRING_LEN
;
1286 sprintf(p
, "tx_queue_%u_bytes", i
);
1287 p
+= ETH_GSTRING_LEN
;
1289 for (i
= 0; i
< IXGBE_NUM_RX_QUEUES
; i
++) {
1290 sprintf(p
, "rx_queue_%u_packets", i
);
1291 p
+= ETH_GSTRING_LEN
;
1292 sprintf(p
, "rx_queue_%u_bytes", i
);
1293 p
+= ETH_GSTRING_LEN
;
1295 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1296 sprintf(p
, "tx_pb_%u_pxon", i
);
1297 p
+= ETH_GSTRING_LEN
;
1298 sprintf(p
, "tx_pb_%u_pxoff", i
);
1299 p
+= ETH_GSTRING_LEN
;
1301 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1302 sprintf(p
, "rx_pb_%u_pxon", i
);
1303 p
+= ETH_GSTRING_LEN
;
1304 sprintf(p
, "rx_pb_%u_pxoff", i
);
1305 p
+= ETH_GSTRING_LEN
;
1307 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1309 case ETH_SS_PRIV_FLAGS
:
1310 memcpy(data
, ixgbe_priv_flags_strings
,
1311 IXGBE_PRIV_FLAGS_STR_LEN
* ETH_GSTRING_LEN
);
1315 static int ixgbe_link_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1317 struct ixgbe_hw
*hw
= &adapter
->hw
;
1321 if (ixgbe_removed(hw
->hw_addr
)) {
1327 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, true);
1335 /* ethtool register test data */
1336 struct ixgbe_reg_test
{
1344 /* In the hardware, registers are laid out either singly, in arrays
1345 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1346 * most tests take place on arrays or single registers (handled
1347 * as a single-element array) and special-case the tables.
1348 * Table tests are always pattern tests.
1350 * We also make provision for some required setup steps by specifying
1351 * registers to be written without any read-back testing.
1354 #define PATTERN_TEST 1
1355 #define SET_READ_TEST 2
1356 #define WRITE_NO_TEST 3
1357 #define TABLE32_TEST 4
1358 #define TABLE64_TEST_LO 5
1359 #define TABLE64_TEST_HI 6
1361 /* default 82599 register test */
1362 static const struct ixgbe_reg_test reg_test_82599
[] = {
1363 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1364 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1365 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1366 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1367 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFF80 },
1368 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1369 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1370 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1371 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1372 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1373 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1374 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1375 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1376 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1377 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFF80 },
1378 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000001, 0x00000001 },
1379 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1380 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x8001FFFF, 0x800CFFFF },
1381 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1385 /* default 82598 register test */
1386 static const struct ixgbe_reg_test reg_test_82598
[] = {
1387 { IXGBE_FCRTL(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1388 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1389 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1390 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1391 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1392 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1393 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1394 /* Enable all four RX queues before testing. */
1395 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1396 /* RDH is read-only for 82598, only test RDT. */
1397 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1398 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1399 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1400 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1401 { IXGBE_TIPG
, 1, PATTERN_TEST
, 0x000000FF, 0x000000FF },
1402 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1403 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1404 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1405 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000003, 0x00000003 },
1406 { IXGBE_DTXCTL
, 1, SET_READ_TEST
, 0x00000005, 0x00000005 },
1407 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1408 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x800CFFFF, 0x800CFFFF },
1409 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1413 static bool reg_pattern_test(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1414 u32 mask
, u32 write
)
1416 u32 pat
, val
, before
;
1417 static const u32 test_pattern
[] = {
1418 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1420 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
1424 for (pat
= 0; pat
< ARRAY_SIZE(test_pattern
); pat
++) {
1425 before
= ixgbe_read_reg(&adapter
->hw
, reg
);
1426 ixgbe_write_reg(&adapter
->hw
, reg
, test_pattern
[pat
] & write
);
1427 val
= ixgbe_read_reg(&adapter
->hw
, reg
);
1428 if (val
!= (test_pattern
[pat
] & write
& mask
)) {
1429 e_err(drv
, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1430 reg
, val
, (test_pattern
[pat
] & write
& mask
));
1432 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1435 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1440 static bool reg_set_and_check(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1441 u32 mask
, u32 write
)
1445 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
1449 before
= ixgbe_read_reg(&adapter
->hw
, reg
);
1450 ixgbe_write_reg(&adapter
->hw
, reg
, write
& mask
);
1451 val
= ixgbe_read_reg(&adapter
->hw
, reg
);
1452 if ((write
& mask
) != (val
& mask
)) {
1453 e_err(drv
, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1454 reg
, (val
& mask
), (write
& mask
));
1456 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1459 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1463 static int ixgbe_reg_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1465 const struct ixgbe_reg_test
*test
;
1466 u32 value
, before
, after
;
1469 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
1470 e_err(drv
, "Adapter removed - register test blocked\n");
1474 switch (adapter
->hw
.mac
.type
) {
1475 case ixgbe_mac_82598EB
:
1476 toggle
= 0x7FFFF3FF;
1477 test
= reg_test_82598
;
1479 case ixgbe_mac_82599EB
:
1480 case ixgbe_mac_X540
:
1481 case ixgbe_mac_X550
:
1482 case ixgbe_mac_X550EM_x
:
1483 case ixgbe_mac_x550em_a
:
1484 toggle
= 0x7FFFF30F;
1485 test
= reg_test_82599
;
1493 * Because the status register is such a special case,
1494 * we handle it separately from the rest of the register
1495 * tests. Some bits are read-only, some toggle, and some
1496 * are writeable on newer MACs.
1498 before
= ixgbe_read_reg(&adapter
->hw
, IXGBE_STATUS
);
1499 value
= (ixgbe_read_reg(&adapter
->hw
, IXGBE_STATUS
) & toggle
);
1500 ixgbe_write_reg(&adapter
->hw
, IXGBE_STATUS
, toggle
);
1501 after
= ixgbe_read_reg(&adapter
->hw
, IXGBE_STATUS
) & toggle
;
1502 if (value
!= after
) {
1503 e_err(drv
, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1508 /* restore previous status */
1509 ixgbe_write_reg(&adapter
->hw
, IXGBE_STATUS
, before
);
1512 * Perform the remainder of the register test, looping through
1513 * the test table until we either fail or reach the null entry.
1516 for (i
= 0; i
< test
->array_len
; i
++) {
1519 switch (test
->test_type
) {
1521 b
= reg_pattern_test(adapter
, data
,
1522 test
->reg
+ (i
* 0x40),
1527 b
= reg_set_and_check(adapter
, data
,
1528 test
->reg
+ (i
* 0x40),
1533 ixgbe_write_reg(&adapter
->hw
,
1534 test
->reg
+ (i
* 0x40),
1538 b
= reg_pattern_test(adapter
, data
,
1539 test
->reg
+ (i
* 4),
1543 case TABLE64_TEST_LO
:
1544 b
= reg_pattern_test(adapter
, data
,
1545 test
->reg
+ (i
* 8),
1549 case TABLE64_TEST_HI
:
1550 b
= reg_pattern_test(adapter
, data
,
1551 (test
->reg
+ 4) + (i
* 8),
1566 static int ixgbe_eeprom_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1568 struct ixgbe_hw
*hw
= &adapter
->hw
;
1569 if (hw
->eeprom
.ops
.validate_checksum(hw
, NULL
))
1576 static irqreturn_t
ixgbe_test_intr(int irq
, void *data
)
1578 struct net_device
*netdev
= (struct net_device
*) data
;
1579 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1581 adapter
->test_icr
|= IXGBE_READ_REG(&adapter
->hw
, IXGBE_EICR
);
1586 static int ixgbe_intr_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1588 struct net_device
*netdev
= adapter
->netdev
;
1589 u32 mask
, i
= 0, shared_int
= true;
1590 u32 irq
= adapter
->pdev
->irq
;
1594 /* Hook up test interrupt handler just for this test */
1595 if (adapter
->msix_entries
) {
1596 /* NOTE: we don't test MSI-X interrupts here, yet */
1598 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1600 if (request_irq(irq
, ixgbe_test_intr
, 0, netdev
->name
,
1605 } else if (!request_irq(irq
, ixgbe_test_intr
, IRQF_PROBE_SHARED
,
1606 netdev
->name
, netdev
)) {
1608 } else if (request_irq(irq
, ixgbe_test_intr
, IRQF_SHARED
,
1609 netdev
->name
, netdev
)) {
1613 e_info(hw
, "testing %s interrupt\n", shared_int
?
1614 "shared" : "unshared");
1616 /* Disable all the interrupts */
1617 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1618 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1619 usleep_range(10000, 20000);
1621 /* Test each interrupt */
1622 for (; i
< 10; i
++) {
1623 /* Interrupt to test */
1628 * Disable the interrupts to be reported in
1629 * the cause register and then force the same
1630 * interrupt and see if one gets posted. If
1631 * an interrupt was posted to the bus, the
1634 adapter
->test_icr
= 0;
1635 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1636 ~mask
& 0x00007FFF);
1637 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1638 ~mask
& 0x00007FFF);
1639 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1640 usleep_range(10000, 20000);
1642 if (adapter
->test_icr
& mask
) {
1649 * Enable the interrupt to be reported in the cause
1650 * register and then force the same interrupt and see
1651 * if one gets posted. If an interrupt was not posted
1652 * to the bus, the test failed.
1654 adapter
->test_icr
= 0;
1655 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, mask
);
1656 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, mask
);
1657 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1658 usleep_range(10000, 20000);
1660 if (!(adapter
->test_icr
& mask
)) {
1667 * Disable the other interrupts to be reported in
1668 * the cause register and then force the other
1669 * interrupts and see if any get posted. If
1670 * an interrupt was posted to the bus, the
1673 adapter
->test_icr
= 0;
1674 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1675 ~mask
& 0x00007FFF);
1676 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1677 ~mask
& 0x00007FFF);
1678 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1679 usleep_range(10000, 20000);
1681 if (adapter
->test_icr
) {
1688 /* Disable all the interrupts */
1689 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1690 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1691 usleep_range(10000, 20000);
1693 /* Unhook test interrupt handler */
1694 free_irq(irq
, netdev
);
1699 static void ixgbe_free_desc_rings(struct ixgbe_adapter
*adapter
)
1701 /* Shut down the DMA engines now so they can be reinitialized later,
1702 * since the test rings and normally used rings should overlap on
1703 * queue 0 we can just use the standard disable Rx/Tx calls and they
1704 * will take care of disabling the test rings for us.
1708 ixgbe_disable_rx(adapter
);
1711 ixgbe_disable_tx(adapter
);
1713 ixgbe_reset(adapter
);
1715 ixgbe_free_tx_resources(&adapter
->test_tx_ring
);
1716 ixgbe_free_rx_resources(&adapter
->test_rx_ring
);
1719 static int ixgbe_setup_desc_rings(struct ixgbe_adapter
*adapter
)
1721 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1722 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1723 struct ixgbe_hw
*hw
= &adapter
->hw
;
1728 /* Setup Tx descriptor ring and Tx buffers */
1729 tx_ring
->count
= IXGBE_DEFAULT_TXD
;
1730 tx_ring
->queue_index
= 0;
1731 tx_ring
->dev
= &adapter
->pdev
->dev
;
1732 tx_ring
->netdev
= adapter
->netdev
;
1733 tx_ring
->reg_idx
= adapter
->tx_ring
[0]->reg_idx
;
1735 err
= ixgbe_setup_tx_resources(tx_ring
);
1739 switch (adapter
->hw
.mac
.type
) {
1740 case ixgbe_mac_82599EB
:
1741 case ixgbe_mac_X540
:
1742 case ixgbe_mac_X550
:
1743 case ixgbe_mac_X550EM_x
:
1744 case ixgbe_mac_x550em_a
:
1745 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DMATXCTL
);
1746 reg_data
|= IXGBE_DMATXCTL_TE
;
1747 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DMATXCTL
, reg_data
);
1753 ixgbe_configure_tx_ring(adapter
, tx_ring
);
1755 /* Setup Rx Descriptor ring and Rx buffers */
1756 rx_ring
->count
= IXGBE_DEFAULT_RXD
;
1757 rx_ring
->queue_index
= 0;
1758 rx_ring
->dev
= &adapter
->pdev
->dev
;
1759 rx_ring
->netdev
= adapter
->netdev
;
1760 rx_ring
->reg_idx
= adapter
->rx_ring
[0]->reg_idx
;
1762 err
= ixgbe_setup_rx_resources(adapter
, rx_ring
);
1768 hw
->mac
.ops
.disable_rx(hw
);
1770 ixgbe_configure_rx_ring(adapter
, rx_ring
);
1772 rctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_RXCTRL
);
1773 rctl
|= IXGBE_RXCTRL_DMBYPS
;
1774 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXCTRL
, rctl
);
1776 hw
->mac
.ops
.enable_rx(hw
);
1781 ixgbe_free_desc_rings(adapter
);
1785 static int ixgbe_setup_loopback_test(struct ixgbe_adapter
*adapter
)
1787 struct ixgbe_hw
*hw
= &adapter
->hw
;
1791 /* Setup MAC loopback */
1792 reg_data
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
1793 reg_data
|= IXGBE_HLREG0_LPBK
;
1794 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, reg_data
);
1796 reg_data
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
1797 reg_data
|= IXGBE_FCTRL_BAM
| IXGBE_FCTRL_SBP
| IXGBE_FCTRL_MPE
;
1798 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, reg_data
);
1800 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1801 switch (adapter
->hw
.mac
.type
) {
1802 case ixgbe_mac_X540
:
1803 case ixgbe_mac_X550
:
1804 case ixgbe_mac_X550EM_x
:
1805 case ixgbe_mac_x550em_a
:
1806 reg_data
= IXGBE_READ_REG(hw
, IXGBE_MACC
);
1807 reg_data
|= IXGBE_MACC_FLU
;
1808 IXGBE_WRITE_REG(hw
, IXGBE_MACC
, reg_data
);
1811 if (hw
->mac
.orig_autoc
) {
1812 reg_data
= hw
->mac
.orig_autoc
| IXGBE_AUTOC_FLU
;
1813 IXGBE_WRITE_REG(hw
, IXGBE_AUTOC
, reg_data
);
1818 IXGBE_WRITE_FLUSH(hw
);
1819 usleep_range(10000, 20000);
1821 /* Disable Atlas Tx lanes; re-enabled in reset path */
1822 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
1825 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, &atlas
);
1826 atlas
|= IXGBE_ATLAS_PDN_TX_REG_EN
;
1827 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, atlas
);
1829 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, &atlas
);
1830 atlas
|= IXGBE_ATLAS_PDN_TX_10G_QL_ALL
;
1831 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, atlas
);
1833 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, &atlas
);
1834 atlas
|= IXGBE_ATLAS_PDN_TX_1G_QL_ALL
;
1835 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, atlas
);
1837 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, &atlas
);
1838 atlas
|= IXGBE_ATLAS_PDN_TX_AN_QL_ALL
;
1839 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, atlas
);
1845 static void ixgbe_loopback_cleanup(struct ixgbe_adapter
*adapter
)
1849 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_HLREG0
);
1850 reg_data
&= ~IXGBE_HLREG0_LPBK
;
1851 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_HLREG0
, reg_data
);
1854 static void ixgbe_create_lbtest_frame(struct sk_buff
*skb
,
1855 unsigned int frame_size
)
1857 memset(skb
->data
, 0xFF, frame_size
);
1859 memset(&skb
->data
[frame_size
], 0xAA, frame_size
/ 2 - 1);
1860 memset(&skb
->data
[frame_size
+ 10], 0xBE, 1);
1861 memset(&skb
->data
[frame_size
+ 12], 0xAF, 1);
1864 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer
*rx_buffer
,
1865 unsigned int frame_size
)
1867 unsigned char *data
;
1872 data
= kmap(rx_buffer
->page
) + rx_buffer
->page_offset
;
1874 if (data
[3] != 0xFF ||
1875 data
[frame_size
+ 10] != 0xBE ||
1876 data
[frame_size
+ 12] != 0xAF)
1879 kunmap(rx_buffer
->page
);
1884 static u16
ixgbe_clean_test_rings(struct ixgbe_ring
*rx_ring
,
1885 struct ixgbe_ring
*tx_ring
,
1888 union ixgbe_adv_rx_desc
*rx_desc
;
1889 u16 rx_ntc
, tx_ntc
, count
= 0;
1891 /* initialize next to clean and descriptor values */
1892 rx_ntc
= rx_ring
->next_to_clean
;
1893 tx_ntc
= tx_ring
->next_to_clean
;
1894 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ntc
);
1896 while (tx_ntc
!= tx_ring
->next_to_use
) {
1897 union ixgbe_adv_tx_desc
*tx_desc
;
1898 struct ixgbe_tx_buffer
*tx_buffer
;
1900 tx_desc
= IXGBE_TX_DESC(tx_ring
, tx_ntc
);
1902 /* if DD is not set transmit has not completed */
1903 if (!(tx_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)))
1906 /* unmap buffer on Tx side */
1907 tx_buffer
= &tx_ring
->tx_buffer_info
[tx_ntc
];
1909 /* Free all the Tx ring sk_buffs */
1910 dev_kfree_skb_any(tx_buffer
->skb
);
1912 /* unmap skb header data */
1913 dma_unmap_single(tx_ring
->dev
,
1914 dma_unmap_addr(tx_buffer
, dma
),
1915 dma_unmap_len(tx_buffer
, len
),
1917 dma_unmap_len_set(tx_buffer
, len
, 0);
1919 /* increment Tx next to clean counter */
1921 if (tx_ntc
== tx_ring
->count
)
1925 while (rx_desc
->wb
.upper
.length
) {
1926 struct ixgbe_rx_buffer
*rx_buffer
;
1928 /* check Rx buffer */
1929 rx_buffer
= &rx_ring
->rx_buffer_info
[rx_ntc
];
1931 /* sync Rx buffer for CPU read */
1932 dma_sync_single_for_cpu(rx_ring
->dev
,
1934 ixgbe_rx_bufsz(rx_ring
),
1937 /* verify contents of skb */
1938 if (ixgbe_check_lbtest_frame(rx_buffer
, size
))
1943 /* sync Rx buffer for device write */
1944 dma_sync_single_for_device(rx_ring
->dev
,
1946 ixgbe_rx_bufsz(rx_ring
),
1949 /* increment Rx next to clean counter */
1951 if (rx_ntc
== rx_ring
->count
)
1954 /* fetch next descriptor */
1955 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ntc
);
1958 netdev_tx_reset_queue(txring_txq(tx_ring
));
1960 /* re-map buffers to ring, store next to clean values */
1961 ixgbe_alloc_rx_buffers(rx_ring
, count
);
1962 rx_ring
->next_to_clean
= rx_ntc
;
1963 tx_ring
->next_to_clean
= tx_ntc
;
1968 static int ixgbe_run_loopback_test(struct ixgbe_adapter
*adapter
)
1970 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1971 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1972 int i
, j
, lc
, good_cnt
, ret_val
= 0;
1973 unsigned int size
= 1024;
1974 netdev_tx_t tx_ret_val
;
1975 struct sk_buff
*skb
;
1976 u32 flags_orig
= adapter
->flags
;
1978 /* DCB can modify the frames on Tx */
1979 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
1981 /* allocate test skb */
1982 skb
= alloc_skb(size
, GFP_KERNEL
);
1986 /* place data into test skb */
1987 ixgbe_create_lbtest_frame(skb
, size
);
1991 * Calculate the loop count based on the largest descriptor ring
1992 * The idea is to wrap the largest ring a number of times using 64
1993 * send/receive pairs during each loop
1996 if (rx_ring
->count
<= tx_ring
->count
)
1997 lc
= ((tx_ring
->count
/ 64) * 2) + 1;
1999 lc
= ((rx_ring
->count
/ 64) * 2) + 1;
2001 for (j
= 0; j
<= lc
; j
++) {
2002 /* reset count of good packets */
2005 /* place 64 packets on the transmit queue*/
2006 for (i
= 0; i
< 64; i
++) {
2008 tx_ret_val
= ixgbe_xmit_frame_ring(skb
,
2011 if (tx_ret_val
== NETDEV_TX_OK
)
2015 if (good_cnt
!= 64) {
2020 /* allow 200 milliseconds for packets to go from Tx to Rx */
2023 good_cnt
= ixgbe_clean_test_rings(rx_ring
, tx_ring
, size
);
2024 if (good_cnt
!= 64) {
2030 /* free the original skb */
2032 adapter
->flags
= flags_orig
;
2037 static int ixgbe_loopback_test(struct ixgbe_adapter
*adapter
, u64
*data
)
2039 *data
= ixgbe_setup_desc_rings(adapter
);
2042 *data
= ixgbe_setup_loopback_test(adapter
);
2045 *data
= ixgbe_run_loopback_test(adapter
);
2046 ixgbe_loopback_cleanup(adapter
);
2049 ixgbe_free_desc_rings(adapter
);
2054 static void ixgbe_diag_test(struct net_device
*netdev
,
2055 struct ethtool_test
*eth_test
, u64
*data
)
2057 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2058 bool if_running
= netif_running(netdev
);
2060 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
2061 e_err(hw
, "Adapter removed - test blocked\n");
2067 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2070 set_bit(__IXGBE_TESTING
, &adapter
->state
);
2071 if (eth_test
->flags
== ETH_TEST_FL_OFFLINE
) {
2072 struct ixgbe_hw
*hw
= &adapter
->hw
;
2074 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
2076 for (i
= 0; i
< adapter
->num_vfs
; i
++) {
2077 if (adapter
->vfinfo
[i
].clear_to_send
) {
2078 netdev_warn(netdev
, "offline diagnostic is not supported when VFs are present\n");
2084 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2085 clear_bit(__IXGBE_TESTING
,
2093 e_info(hw
, "offline testing starting\n");
2095 /* Link test performed before hardware reset so autoneg doesn't
2096 * interfere with test result
2098 if (ixgbe_link_test(adapter
, &data
[4]))
2099 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2102 /* indicate we're in test mode */
2103 ixgbe_close(netdev
);
2105 ixgbe_reset(adapter
);
2107 e_info(hw
, "register testing starting\n");
2108 if (ixgbe_reg_test(adapter
, &data
[0]))
2109 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2111 ixgbe_reset(adapter
);
2112 e_info(hw
, "eeprom testing starting\n");
2113 if (ixgbe_eeprom_test(adapter
, &data
[1]))
2114 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2116 ixgbe_reset(adapter
);
2117 e_info(hw
, "interrupt testing starting\n");
2118 if (ixgbe_intr_test(adapter
, &data
[2]))
2119 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2121 /* If SRIOV or VMDq is enabled then skip MAC
2122 * loopback diagnostic. */
2123 if (adapter
->flags
& (IXGBE_FLAG_SRIOV_ENABLED
|
2124 IXGBE_FLAG_VMDQ_ENABLED
)) {
2125 e_info(hw
, "Skip MAC loopback diagnostic in VT mode\n");
2130 ixgbe_reset(adapter
);
2131 e_info(hw
, "loopback testing starting\n");
2132 if (ixgbe_loopback_test(adapter
, &data
[3]))
2133 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2136 ixgbe_reset(adapter
);
2138 /* clear testing bit and return adapter to previous state */
2139 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
2142 else if (hw
->mac
.ops
.disable_tx_laser
)
2143 hw
->mac
.ops
.disable_tx_laser(hw
);
2145 e_info(hw
, "online testing starting\n");
2148 if (ixgbe_link_test(adapter
, &data
[4]))
2149 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2151 /* Offline tests aren't run; pass by default */
2157 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
2161 msleep_interruptible(4 * 1000);
2164 static int ixgbe_wol_exclusion(struct ixgbe_adapter
*adapter
,
2165 struct ethtool_wolinfo
*wol
)
2167 struct ixgbe_hw
*hw
= &adapter
->hw
;
2170 /* WOL not supported for all devices */
2171 if (!ixgbe_wol_supported(adapter
, hw
->device_id
,
2172 hw
->subsystem_device_id
)) {
2180 static void ixgbe_get_wol(struct net_device
*netdev
,
2181 struct ethtool_wolinfo
*wol
)
2183 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2185 wol
->supported
= WAKE_UCAST
| WAKE_MCAST
|
2186 WAKE_BCAST
| WAKE_MAGIC
;
2189 if (ixgbe_wol_exclusion(adapter
, wol
) ||
2190 !device_can_wakeup(&adapter
->pdev
->dev
))
2193 if (adapter
->wol
& IXGBE_WUFC_EX
)
2194 wol
->wolopts
|= WAKE_UCAST
;
2195 if (adapter
->wol
& IXGBE_WUFC_MC
)
2196 wol
->wolopts
|= WAKE_MCAST
;
2197 if (adapter
->wol
& IXGBE_WUFC_BC
)
2198 wol
->wolopts
|= WAKE_BCAST
;
2199 if (adapter
->wol
& IXGBE_WUFC_MAG
)
2200 wol
->wolopts
|= WAKE_MAGIC
;
2203 static int ixgbe_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2205 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2207 if (wol
->wolopts
& (WAKE_PHY
| WAKE_ARP
| WAKE_MAGICSECURE
))
2210 if (ixgbe_wol_exclusion(adapter
, wol
))
2211 return wol
->wolopts
? -EOPNOTSUPP
: 0;
2215 if (wol
->wolopts
& WAKE_UCAST
)
2216 adapter
->wol
|= IXGBE_WUFC_EX
;
2217 if (wol
->wolopts
& WAKE_MCAST
)
2218 adapter
->wol
|= IXGBE_WUFC_MC
;
2219 if (wol
->wolopts
& WAKE_BCAST
)
2220 adapter
->wol
|= IXGBE_WUFC_BC
;
2221 if (wol
->wolopts
& WAKE_MAGIC
)
2222 adapter
->wol
|= IXGBE_WUFC_MAG
;
2224 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
2229 static int ixgbe_nway_reset(struct net_device
*netdev
)
2231 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2233 if (netif_running(netdev
))
2234 ixgbe_reinit_locked(adapter
);
2239 static int ixgbe_set_phys_id(struct net_device
*netdev
,
2240 enum ethtool_phys_id_state state
)
2242 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2243 struct ixgbe_hw
*hw
= &adapter
->hw
;
2245 if (!hw
->mac
.ops
.led_on
|| !hw
->mac
.ops
.led_off
)
2249 case ETHTOOL_ID_ACTIVE
:
2250 adapter
->led_reg
= IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
2254 hw
->mac
.ops
.led_on(hw
, hw
->mac
.led_link_act
);
2257 case ETHTOOL_ID_OFF
:
2258 hw
->mac
.ops
.led_off(hw
, hw
->mac
.led_link_act
);
2261 case ETHTOOL_ID_INACTIVE
:
2262 /* Restore LED settings */
2263 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_LEDCTL
, adapter
->led_reg
);
2270 static int ixgbe_get_coalesce(struct net_device
*netdev
,
2271 struct ethtool_coalesce
*ec
)
2273 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2275 /* only valid if in constant ITR mode */
2276 if (adapter
->rx_itr_setting
<= 1)
2277 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
;
2279 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
>> 2;
2281 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2282 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
2285 /* only valid if in constant ITR mode */
2286 if (adapter
->tx_itr_setting
<= 1)
2287 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
;
2289 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
>> 2;
2295 * this function must be called before setting the new value of
2298 static bool ixgbe_update_rsc(struct ixgbe_adapter
*adapter
)
2300 struct net_device
*netdev
= adapter
->netdev
;
2302 /* nothing to do if LRO or RSC are not enabled */
2303 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_CAPABLE
) ||
2304 !(netdev
->features
& NETIF_F_LRO
))
2307 /* check the feature flag value and enable RSC if necessary */
2308 if (adapter
->rx_itr_setting
== 1 ||
2309 adapter
->rx_itr_setting
> IXGBE_MIN_RSC_ITR
) {
2310 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)) {
2311 adapter
->flags2
|= IXGBE_FLAG2_RSC_ENABLED
;
2312 e_info(probe
, "rx-usecs value high enough to re-enable RSC\n");
2315 /* if interrupt rate is too high then disable RSC */
2316 } else if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
) {
2317 adapter
->flags2
&= ~IXGBE_FLAG2_RSC_ENABLED
;
2318 e_info(probe
, "rx-usecs set too low, disabling RSC\n");
2324 static int ixgbe_set_coalesce(struct net_device
*netdev
,
2325 struct ethtool_coalesce
*ec
)
2327 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2328 struct ixgbe_q_vector
*q_vector
;
2330 u16 tx_itr_param
, rx_itr_param
, tx_itr_prev
;
2331 bool need_reset
= false;
2333 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
) {
2334 /* reject Tx specific changes in case of mixed RxTx vectors */
2335 if (ec
->tx_coalesce_usecs
)
2337 tx_itr_prev
= adapter
->rx_itr_setting
;
2339 tx_itr_prev
= adapter
->tx_itr_setting
;
2342 if ((ec
->rx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)) ||
2343 (ec
->tx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)))
2346 if (ec
->rx_coalesce_usecs
> 1)
2347 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
<< 2;
2349 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
;
2351 if (adapter
->rx_itr_setting
== 1)
2352 rx_itr_param
= IXGBE_20K_ITR
;
2354 rx_itr_param
= adapter
->rx_itr_setting
;
2356 if (ec
->tx_coalesce_usecs
> 1)
2357 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
<< 2;
2359 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
;
2361 if (adapter
->tx_itr_setting
== 1)
2362 tx_itr_param
= IXGBE_12K_ITR
;
2364 tx_itr_param
= adapter
->tx_itr_setting
;
2367 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
2368 adapter
->tx_itr_setting
= adapter
->rx_itr_setting
;
2370 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2371 if ((adapter
->tx_itr_setting
!= 1) &&
2372 (adapter
->tx_itr_setting
< IXGBE_100K_ITR
)) {
2373 if ((tx_itr_prev
== 1) ||
2374 (tx_itr_prev
>= IXGBE_100K_ITR
))
2377 if ((tx_itr_prev
!= 1) &&
2378 (tx_itr_prev
< IXGBE_100K_ITR
))
2382 /* check the old value and enable RSC if necessary */
2383 need_reset
|= ixgbe_update_rsc(adapter
);
2385 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
2386 q_vector
= adapter
->q_vector
[i
];
2387 if (q_vector
->tx
.count
&& !q_vector
->rx
.count
)
2389 q_vector
->itr
= tx_itr_param
;
2391 /* rx only or mixed */
2392 q_vector
->itr
= rx_itr_param
;
2393 ixgbe_write_eitr(q_vector
);
2397 * do reset here at the end to make sure EITR==0 case is handled
2398 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2399 * also locks in RSC enable/disable which requires reset
2402 ixgbe_do_reset(netdev
);
2407 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2408 struct ethtool_rxnfc
*cmd
)
2410 union ixgbe_atr_input
*mask
= &adapter
->fdir_mask
;
2411 struct ethtool_rx_flow_spec
*fsp
=
2412 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2413 struct hlist_node
*node2
;
2414 struct ixgbe_fdir_filter
*rule
= NULL
;
2416 /* report total rule count */
2417 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2419 hlist_for_each_entry_safe(rule
, node2
,
2420 &adapter
->fdir_filter_list
, fdir_node
) {
2421 if (fsp
->location
<= rule
->sw_idx
)
2425 if (!rule
|| fsp
->location
!= rule
->sw_idx
)
2428 /* fill out the flow spec entry */
2430 /* set flow type field */
2431 switch (rule
->filter
.formatted
.flow_type
) {
2432 case IXGBE_ATR_FLOW_TYPE_TCPV4
:
2433 fsp
->flow_type
= TCP_V4_FLOW
;
2435 case IXGBE_ATR_FLOW_TYPE_UDPV4
:
2436 fsp
->flow_type
= UDP_V4_FLOW
;
2438 case IXGBE_ATR_FLOW_TYPE_SCTPV4
:
2439 fsp
->flow_type
= SCTP_V4_FLOW
;
2441 case IXGBE_ATR_FLOW_TYPE_IPV4
:
2442 fsp
->flow_type
= IP_USER_FLOW
;
2443 fsp
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
2444 fsp
->h_u
.usr_ip4_spec
.proto
= 0;
2445 fsp
->m_u
.usr_ip4_spec
.proto
= 0;
2451 fsp
->h_u
.tcp_ip4_spec
.psrc
= rule
->filter
.formatted
.src_port
;
2452 fsp
->m_u
.tcp_ip4_spec
.psrc
= mask
->formatted
.src_port
;
2453 fsp
->h_u
.tcp_ip4_spec
.pdst
= rule
->filter
.formatted
.dst_port
;
2454 fsp
->m_u
.tcp_ip4_spec
.pdst
= mask
->formatted
.dst_port
;
2455 fsp
->h_u
.tcp_ip4_spec
.ip4src
= rule
->filter
.formatted
.src_ip
[0];
2456 fsp
->m_u
.tcp_ip4_spec
.ip4src
= mask
->formatted
.src_ip
[0];
2457 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= rule
->filter
.formatted
.dst_ip
[0];
2458 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= mask
->formatted
.dst_ip
[0];
2459 fsp
->h_ext
.vlan_tci
= rule
->filter
.formatted
.vlan_id
;
2460 fsp
->m_ext
.vlan_tci
= mask
->formatted
.vlan_id
;
2461 fsp
->h_ext
.vlan_etype
= rule
->filter
.formatted
.flex_bytes
;
2462 fsp
->m_ext
.vlan_etype
= mask
->formatted
.flex_bytes
;
2463 fsp
->h_ext
.data
[1] = htonl(rule
->filter
.formatted
.vm_pool
);
2464 fsp
->m_ext
.data
[1] = htonl(mask
->formatted
.vm_pool
);
2465 fsp
->flow_type
|= FLOW_EXT
;
2468 if (rule
->action
== IXGBE_FDIR_DROP_QUEUE
)
2469 fsp
->ring_cookie
= RX_CLS_FLOW_DISC
;
2471 fsp
->ring_cookie
= rule
->action
;
2476 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter
*adapter
,
2477 struct ethtool_rxnfc
*cmd
,
2480 struct hlist_node
*node2
;
2481 struct ixgbe_fdir_filter
*rule
;
2484 /* report total rule count */
2485 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2487 hlist_for_each_entry_safe(rule
, node2
,
2488 &adapter
->fdir_filter_list
, fdir_node
) {
2489 if (cnt
== cmd
->rule_cnt
)
2491 rule_locs
[cnt
] = rule
->sw_idx
;
2495 cmd
->rule_cnt
= cnt
;
2500 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter
*adapter
,
2501 struct ethtool_rxnfc
*cmd
)
2505 /* Report default options for RSS on ixgbe */
2506 switch (cmd
->flow_type
) {
2508 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2511 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
2512 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2515 case AH_ESP_V4_FLOW
:
2519 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
2522 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2525 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
2526 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2529 case AH_ESP_V6_FLOW
:
2533 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
2542 static int ixgbe_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
2545 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2546 int ret
= -EOPNOTSUPP
;
2549 case ETHTOOL_GRXRINGS
:
2550 cmd
->data
= adapter
->num_rx_queues
;
2553 case ETHTOOL_GRXCLSRLCNT
:
2554 cmd
->rule_cnt
= adapter
->fdir_filter_count
;
2557 case ETHTOOL_GRXCLSRULE
:
2558 ret
= ixgbe_get_ethtool_fdir_entry(adapter
, cmd
);
2560 case ETHTOOL_GRXCLSRLALL
:
2561 ret
= ixgbe_get_ethtool_fdir_all(adapter
, cmd
, rule_locs
);
2564 ret
= ixgbe_get_rss_hash_opts(adapter
, cmd
);
2573 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2574 struct ixgbe_fdir_filter
*input
,
2577 struct ixgbe_hw
*hw
= &adapter
->hw
;
2578 struct hlist_node
*node2
;
2579 struct ixgbe_fdir_filter
*rule
, *parent
;
2585 hlist_for_each_entry_safe(rule
, node2
,
2586 &adapter
->fdir_filter_list
, fdir_node
) {
2587 /* hash found, or no matching entry */
2588 if (rule
->sw_idx
>= sw_idx
)
2593 /* if there is an old rule occupying our place remove it */
2594 if (rule
&& (rule
->sw_idx
== sw_idx
)) {
2595 if (!input
|| (rule
->filter
.formatted
.bkt_hash
!=
2596 input
->filter
.formatted
.bkt_hash
)) {
2597 err
= ixgbe_fdir_erase_perfect_filter_82599(hw
,
2602 hlist_del(&rule
->fdir_node
);
2604 adapter
->fdir_filter_count
--;
2608 * If no input this was a delete, err should be 0 if a rule was
2609 * successfully found and removed from the list else -EINVAL
2614 /* initialize node and set software index */
2615 INIT_HLIST_NODE(&input
->fdir_node
);
2617 /* add filter to the list */
2619 hlist_add_behind(&input
->fdir_node
, &parent
->fdir_node
);
2621 hlist_add_head(&input
->fdir_node
,
2622 &adapter
->fdir_filter_list
);
2625 adapter
->fdir_filter_count
++;
2630 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec
*fsp
,
2633 switch (fsp
->flow_type
& ~FLOW_EXT
) {
2635 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2638 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2641 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2644 switch (fsp
->h_u
.usr_ip4_spec
.proto
) {
2646 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2649 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2652 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2655 if (!fsp
->m_u
.usr_ip4_spec
.proto
) {
2656 *flow_type
= IXGBE_ATR_FLOW_TYPE_IPV4
;
2671 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2672 struct ethtool_rxnfc
*cmd
)
2674 struct ethtool_rx_flow_spec
*fsp
=
2675 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2676 struct ixgbe_hw
*hw
= &adapter
->hw
;
2677 struct ixgbe_fdir_filter
*input
;
2678 union ixgbe_atr_input mask
;
2682 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
2685 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2686 * we use the drop index.
2688 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
) {
2689 queue
= IXGBE_FDIR_DROP_QUEUE
;
2691 u32 ring
= ethtool_get_flow_spec_ring(fsp
->ring_cookie
);
2692 u8 vf
= ethtool_get_flow_spec_ring_vf(fsp
->ring_cookie
);
2694 if (!vf
&& (ring
>= adapter
->num_rx_queues
))
2697 ((vf
> adapter
->num_vfs
) ||
2698 ring
>= adapter
->num_rx_queues_per_pool
))
2701 /* Map the ring onto the absolute queue index */
2703 queue
= adapter
->rx_ring
[ring
]->reg_idx
;
2706 adapter
->num_rx_queues_per_pool
) + ring
;
2709 /* Don't allow indexes to exist outside of available space */
2710 if (fsp
->location
>= ((1024 << adapter
->fdir_pballoc
) - 2)) {
2711 e_err(drv
, "Location out of range\n");
2715 input
= kzalloc(sizeof(*input
), GFP_ATOMIC
);
2719 memset(&mask
, 0, sizeof(union ixgbe_atr_input
));
2722 input
->sw_idx
= fsp
->location
;
2724 /* record flow type */
2725 if (!ixgbe_flowspec_to_flow_type(fsp
,
2726 &input
->filter
.formatted
.flow_type
)) {
2727 e_err(drv
, "Unrecognized flow type\n");
2731 mask
.formatted
.flow_type
= IXGBE_ATR_L4TYPE_IPV6_MASK
|
2732 IXGBE_ATR_L4TYPE_MASK
;
2734 if (input
->filter
.formatted
.flow_type
== IXGBE_ATR_FLOW_TYPE_IPV4
)
2735 mask
.formatted
.flow_type
&= IXGBE_ATR_L4TYPE_IPV6_MASK
;
2737 /* Copy input into formatted structures */
2738 input
->filter
.formatted
.src_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4src
;
2739 mask
.formatted
.src_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4src
;
2740 input
->filter
.formatted
.dst_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4dst
;
2741 mask
.formatted
.dst_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4dst
;
2742 input
->filter
.formatted
.src_port
= fsp
->h_u
.tcp_ip4_spec
.psrc
;
2743 mask
.formatted
.src_port
= fsp
->m_u
.tcp_ip4_spec
.psrc
;
2744 input
->filter
.formatted
.dst_port
= fsp
->h_u
.tcp_ip4_spec
.pdst
;
2745 mask
.formatted
.dst_port
= fsp
->m_u
.tcp_ip4_spec
.pdst
;
2747 if (fsp
->flow_type
& FLOW_EXT
) {
2748 input
->filter
.formatted
.vm_pool
=
2749 (unsigned char)ntohl(fsp
->h_ext
.data
[1]);
2750 mask
.formatted
.vm_pool
=
2751 (unsigned char)ntohl(fsp
->m_ext
.data
[1]);
2752 input
->filter
.formatted
.vlan_id
= fsp
->h_ext
.vlan_tci
;
2753 mask
.formatted
.vlan_id
= fsp
->m_ext
.vlan_tci
;
2754 input
->filter
.formatted
.flex_bytes
=
2755 fsp
->h_ext
.vlan_etype
;
2756 mask
.formatted
.flex_bytes
= fsp
->m_ext
.vlan_etype
;
2759 /* determine if we need to drop or route the packet */
2760 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
)
2761 input
->action
= IXGBE_FDIR_DROP_QUEUE
;
2763 input
->action
= fsp
->ring_cookie
;
2765 spin_lock(&adapter
->fdir_perfect_lock
);
2767 if (hlist_empty(&adapter
->fdir_filter_list
)) {
2768 /* save mask and program input mask into HW */
2769 memcpy(&adapter
->fdir_mask
, &mask
, sizeof(mask
));
2770 err
= ixgbe_fdir_set_input_mask_82599(hw
, &mask
);
2772 e_err(drv
, "Error writing mask\n");
2773 goto err_out_w_lock
;
2775 } else if (memcmp(&adapter
->fdir_mask
, &mask
, sizeof(mask
))) {
2776 e_err(drv
, "Only one mask supported per port\n");
2777 goto err_out_w_lock
;
2780 /* apply mask and compute/store hash */
2781 ixgbe_atr_compute_perfect_hash_82599(&input
->filter
, &mask
);
2783 /* program filters to filter memory */
2784 err
= ixgbe_fdir_write_perfect_filter_82599(hw
,
2785 &input
->filter
, input
->sw_idx
, queue
);
2787 goto err_out_w_lock
;
2789 ixgbe_update_ethtool_fdir_entry(adapter
, input
, input
->sw_idx
);
2791 spin_unlock(&adapter
->fdir_perfect_lock
);
2795 spin_unlock(&adapter
->fdir_perfect_lock
);
2801 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2802 struct ethtool_rxnfc
*cmd
)
2804 struct ethtool_rx_flow_spec
*fsp
=
2805 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2808 spin_lock(&adapter
->fdir_perfect_lock
);
2809 err
= ixgbe_update_ethtool_fdir_entry(adapter
, NULL
, fsp
->location
);
2810 spin_unlock(&adapter
->fdir_perfect_lock
);
2815 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2816 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2817 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter
*adapter
,
2818 struct ethtool_rxnfc
*nfc
)
2820 u32 flags2
= adapter
->flags2
;
2823 * RSS does not support anything other than hashing
2824 * to queues on src and dst IPs and ports
2826 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
2827 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
2830 switch (nfc
->flow_type
) {
2833 if (!(nfc
->data
& RXH_IP_SRC
) ||
2834 !(nfc
->data
& RXH_IP_DST
) ||
2835 !(nfc
->data
& RXH_L4_B_0_1
) ||
2836 !(nfc
->data
& RXH_L4_B_2_3
))
2840 if (!(nfc
->data
& RXH_IP_SRC
) ||
2841 !(nfc
->data
& RXH_IP_DST
))
2843 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
2845 flags2
&= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
;
2847 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
2848 flags2
|= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
;
2855 if (!(nfc
->data
& RXH_IP_SRC
) ||
2856 !(nfc
->data
& RXH_IP_DST
))
2858 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
2860 flags2
&= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
;
2862 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
2863 flags2
|= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
;
2869 case AH_ESP_V4_FLOW
:
2873 case AH_ESP_V6_FLOW
:
2877 if (!(nfc
->data
& RXH_IP_SRC
) ||
2878 !(nfc
->data
& RXH_IP_DST
) ||
2879 (nfc
->data
& RXH_L4_B_0_1
) ||
2880 (nfc
->data
& RXH_L4_B_2_3
))
2887 /* if we changed something we need to update flags */
2888 if (flags2
!= adapter
->flags2
) {
2889 struct ixgbe_hw
*hw
= &adapter
->hw
;
2891 unsigned int pf_pool
= adapter
->num_vfs
;
2893 if ((hw
->mac
.type
>= ixgbe_mac_X550
) &&
2894 (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
2895 mrqc
= IXGBE_READ_REG(hw
, IXGBE_PFVFMRQC(pf_pool
));
2897 mrqc
= IXGBE_READ_REG(hw
, IXGBE_MRQC
);
2899 if ((flags2
& UDP_RSS_FLAGS
) &&
2900 !(adapter
->flags2
& UDP_RSS_FLAGS
))
2901 e_warn(drv
, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2903 adapter
->flags2
= flags2
;
2905 /* Perform hash on these packet types */
2906 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4
2907 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2908 | IXGBE_MRQC_RSS_FIELD_IPV6
2909 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
;
2911 mrqc
&= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP
|
2912 IXGBE_MRQC_RSS_FIELD_IPV6_UDP
);
2914 if (flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
2915 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4_UDP
;
2917 if (flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
2918 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV6_UDP
;
2920 if ((hw
->mac
.type
>= ixgbe_mac_X550
) &&
2921 (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
2922 IXGBE_WRITE_REG(hw
, IXGBE_PFVFMRQC(pf_pool
), mrqc
);
2924 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
2930 static int ixgbe_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
2932 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2933 int ret
= -EOPNOTSUPP
;
2936 case ETHTOOL_SRXCLSRLINS
:
2937 ret
= ixgbe_add_ethtool_fdir_entry(adapter
, cmd
);
2939 case ETHTOOL_SRXCLSRLDEL
:
2940 ret
= ixgbe_del_ethtool_fdir_entry(adapter
, cmd
);
2943 ret
= ixgbe_set_rss_hash_opt(adapter
, cmd
);
2952 static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter
*adapter
)
2954 if (adapter
->hw
.mac
.type
< ixgbe_mac_X550
)
2960 static u32
ixgbe_get_rxfh_key_size(struct net_device
*netdev
)
2962 return IXGBE_RSS_KEY_SIZE
;
2965 static u32
ixgbe_rss_indir_size(struct net_device
*netdev
)
2967 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2969 return ixgbe_rss_indir_tbl_entries(adapter
);
2972 static void ixgbe_get_reta(struct ixgbe_adapter
*adapter
, u32
*indir
)
2974 int i
, reta_size
= ixgbe_rss_indir_tbl_entries(adapter
);
2975 u16 rss_m
= adapter
->ring_feature
[RING_F_RSS
].mask
;
2977 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
2978 rss_m
= adapter
->ring_feature
[RING_F_RSS
].indices
- 1;
2980 for (i
= 0; i
< reta_size
; i
++)
2981 indir
[i
] = adapter
->rss_indir_tbl
[i
] & rss_m
;
2984 static int ixgbe_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
,
2987 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2990 *hfunc
= ETH_RSS_HASH_TOP
;
2993 ixgbe_get_reta(adapter
, indir
);
2996 memcpy(key
, adapter
->rss_key
, ixgbe_get_rxfh_key_size(netdev
));
3001 static int ixgbe_set_rxfh(struct net_device
*netdev
, const u32
*indir
,
3002 const u8
*key
, const u8 hfunc
)
3004 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3006 u32 reta_entries
= ixgbe_rss_indir_tbl_entries(adapter
);
3011 /* Fill out the redirection table */
3013 int max_queues
= min_t(int, adapter
->num_rx_queues
,
3014 ixgbe_rss_indir_tbl_max(adapter
));
3016 /*Allow at least 2 queues w/ SR-IOV.*/
3017 if ((adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) &&
3021 /* Verify user input. */
3022 for (i
= 0; i
< reta_entries
; i
++)
3023 if (indir
[i
] >= max_queues
)
3026 for (i
= 0; i
< reta_entries
; i
++)
3027 adapter
->rss_indir_tbl
[i
] = indir
[i
];
3029 ixgbe_store_reta(adapter
);
3032 /* Fill out the rss hash key */
3034 memcpy(adapter
->rss_key
, key
, ixgbe_get_rxfh_key_size(netdev
));
3035 ixgbe_store_key(adapter
);
3041 static int ixgbe_get_ts_info(struct net_device
*dev
,
3042 struct ethtool_ts_info
*info
)
3044 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3046 /* we always support timestamping disabled */
3047 info
->rx_filters
= BIT(HWTSTAMP_FILTER_NONE
);
3049 switch (adapter
->hw
.mac
.type
) {
3050 case ixgbe_mac_X550
:
3051 case ixgbe_mac_X550EM_x
:
3052 case ixgbe_mac_x550em_a
:
3053 info
->rx_filters
|= BIT(HWTSTAMP_FILTER_ALL
);
3055 case ixgbe_mac_X540
:
3056 case ixgbe_mac_82599EB
:
3058 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC
) |
3059 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
) |
3060 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT
);
3063 return ethtool_op_get_ts_info(dev
, info
);
3066 info
->so_timestamping
=
3067 SOF_TIMESTAMPING_TX_SOFTWARE
|
3068 SOF_TIMESTAMPING_RX_SOFTWARE
|
3069 SOF_TIMESTAMPING_SOFTWARE
|
3070 SOF_TIMESTAMPING_TX_HARDWARE
|
3071 SOF_TIMESTAMPING_RX_HARDWARE
|
3072 SOF_TIMESTAMPING_RAW_HARDWARE
;
3074 if (adapter
->ptp_clock
)
3075 info
->phc_index
= ptp_clock_index(adapter
->ptp_clock
);
3077 info
->phc_index
= -1;
3080 BIT(HWTSTAMP_TX_OFF
) |
3081 BIT(HWTSTAMP_TX_ON
);
3086 static unsigned int ixgbe_max_channels(struct ixgbe_adapter
*adapter
)
3088 unsigned int max_combined
;
3089 u8 tcs
= adapter
->hw_tcs
;
3091 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
3092 /* We only support one q_vector without MSI-X */
3094 } else if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
3095 /* Limit value based on the queue mask */
3096 max_combined
= adapter
->ring_feature
[RING_F_RSS
].mask
+ 1;
3097 } else if (tcs
> 1) {
3098 /* For DCB report channels per traffic class */
3099 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
) {
3100 /* 8 TC w/ 4 queues per TC */
3102 } else if (tcs
> 4) {
3103 /* 8 TC w/ 8 queues per TC */
3106 /* 4 TC w/ 16 queues per TC */
3109 } else if (adapter
->atr_sample_rate
) {
3110 /* support up to 64 queues with ATR */
3111 max_combined
= IXGBE_MAX_FDIR_INDICES
;
3113 /* support up to 16 queues with RSS */
3114 max_combined
= ixgbe_max_rss_indices(adapter
);
3117 return max_combined
;
3120 static void ixgbe_get_channels(struct net_device
*dev
,
3121 struct ethtool_channels
*ch
)
3123 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3125 /* report maximum channels */
3126 ch
->max_combined
= ixgbe_max_channels(adapter
);
3128 /* report info for other vector */
3129 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
3130 ch
->max_other
= NON_Q_VECTORS
;
3131 ch
->other_count
= NON_Q_VECTORS
;
3134 /* record RSS queues */
3135 ch
->combined_count
= adapter
->ring_feature
[RING_F_RSS
].indices
;
3137 /* nothing else to report if RSS is disabled */
3138 if (ch
->combined_count
== 1)
3141 /* we do not support ATR queueing if SR-IOV is enabled */
3142 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
3145 /* same thing goes for being DCB enabled */
3146 if (adapter
->hw_tcs
> 1)
3149 /* if ATR is disabled we can exit */
3150 if (!adapter
->atr_sample_rate
)
3153 /* report flow director queues as maximum channels */
3154 ch
->combined_count
= adapter
->ring_feature
[RING_F_FDIR
].indices
;
3157 static int ixgbe_set_channels(struct net_device
*dev
,
3158 struct ethtool_channels
*ch
)
3160 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3161 unsigned int count
= ch
->combined_count
;
3162 u8 max_rss_indices
= ixgbe_max_rss_indices(adapter
);
3164 /* verify they are not requesting separate vectors */
3165 if (!count
|| ch
->rx_count
|| ch
->tx_count
)
3168 /* verify other_count has not changed */
3169 if (ch
->other_count
!= NON_Q_VECTORS
)
3172 /* verify the number of channels does not exceed hardware limits */
3173 if (count
> ixgbe_max_channels(adapter
))
3176 /* update feature limits from largest to smallest supported values */
3177 adapter
->ring_feature
[RING_F_FDIR
].limit
= count
;
3180 if (count
> max_rss_indices
)
3181 count
= max_rss_indices
;
3182 adapter
->ring_feature
[RING_F_RSS
].limit
= count
;
3185 /* cap FCoE limit at 8 */
3186 if (count
> IXGBE_FCRETA_SIZE
)
3187 count
= IXGBE_FCRETA_SIZE
;
3188 adapter
->ring_feature
[RING_F_FCOE
].limit
= count
;
3191 /* use setup TC to update any traffic class queue mapping */
3192 return ixgbe_setup_tc(dev
, adapter
->hw_tcs
);
3195 static int ixgbe_get_module_info(struct net_device
*dev
,
3196 struct ethtool_modinfo
*modinfo
)
3198 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3199 struct ixgbe_hw
*hw
= &adapter
->hw
;
3201 u8 sff8472_rev
, addr_mode
;
3202 bool page_swap
= false;
3204 if (hw
->phy
.type
== ixgbe_phy_fw
)
3207 /* Check whether we support SFF-8472 or not */
3208 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
,
3209 IXGBE_SFF_SFF_8472_COMP
,
3214 /* addressing mode is not supported */
3215 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
,
3216 IXGBE_SFF_SFF_8472_SWAP
,
3221 if (addr_mode
& IXGBE_SFF_ADDRESSING_MODE
) {
3222 e_err(drv
, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3226 if (sff8472_rev
== IXGBE_SFF_SFF_8472_UNSUP
|| page_swap
) {
3227 /* We have a SFP, but it does not support SFF-8472 */
3228 modinfo
->type
= ETH_MODULE_SFF_8079
;
3229 modinfo
->eeprom_len
= ETH_MODULE_SFF_8079_LEN
;
3231 /* We have a SFP which supports a revision of SFF-8472. */
3232 modinfo
->type
= ETH_MODULE_SFF_8472
;
3233 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
3239 static int ixgbe_get_module_eeprom(struct net_device
*dev
,
3240 struct ethtool_eeprom
*ee
,
3243 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3244 struct ixgbe_hw
*hw
= &adapter
->hw
;
3245 s32 status
= IXGBE_ERR_PHY_ADDR_INVALID
;
3252 if (hw
->phy
.type
== ixgbe_phy_fw
)
3255 for (i
= ee
->offset
; i
< ee
->offset
+ ee
->len
; i
++) {
3256 /* I2C reads can take long time */
3257 if (test_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
3260 if (i
< ETH_MODULE_SFF_8079_LEN
)
3261 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
, i
, &databyte
);
3263 status
= hw
->phy
.ops
.read_i2c_sff8472(hw
, i
, &databyte
);
3268 data
[i
- ee
->offset
] = databyte
;
3274 static const struct {
3275 ixgbe_link_speed mac_speed
;
3277 } ixgbe_ls_map
[] = {
3278 { IXGBE_LINK_SPEED_10_FULL
, SUPPORTED_10baseT_Full
},
3279 { IXGBE_LINK_SPEED_100_FULL
, SUPPORTED_100baseT_Full
},
3280 { IXGBE_LINK_SPEED_1GB_FULL
, SUPPORTED_1000baseT_Full
},
3281 { IXGBE_LINK_SPEED_2_5GB_FULL
, SUPPORTED_2500baseX_Full
},
3282 { IXGBE_LINK_SPEED_10GB_FULL
, SUPPORTED_10000baseT_Full
},
3285 static const struct {
3288 } ixgbe_lp_map
[] = {
3289 { FW_PHY_ACT_UD_2_100M_TX_EEE
, SUPPORTED_100baseT_Full
},
3290 { FW_PHY_ACT_UD_2_1G_T_EEE
, SUPPORTED_1000baseT_Full
},
3291 { FW_PHY_ACT_UD_2_10G_T_EEE
, SUPPORTED_10000baseT_Full
},
3292 { FW_PHY_ACT_UD_2_1G_KX_EEE
, SUPPORTED_1000baseKX_Full
},
3293 { FW_PHY_ACT_UD_2_10G_KX4_EEE
, SUPPORTED_10000baseKX4_Full
},
3294 { FW_PHY_ACT_UD_2_10G_KR_EEE
, SUPPORTED_10000baseKR_Full
},
3298 ixgbe_get_eee_fw(struct ixgbe_adapter
*adapter
, struct ethtool_eee
*edata
)
3300 u32 info
[FW_PHY_ACT_DATA_COUNT
] = { 0 };
3301 struct ixgbe_hw
*hw
= &adapter
->hw
;
3305 rc
= ixgbe_fw_phy_activity(hw
, FW_PHY_ACT_UD_2
, &info
);
3309 edata
->lp_advertised
= 0;
3310 for (i
= 0; i
< ARRAY_SIZE(ixgbe_lp_map
); ++i
) {
3311 if (info
[0] & ixgbe_lp_map
[i
].lp_advertised
)
3312 edata
->lp_advertised
|= ixgbe_lp_map
[i
].mac_speed
;
3315 edata
->supported
= 0;
3316 for (i
= 0; i
< ARRAY_SIZE(ixgbe_ls_map
); ++i
) {
3317 if (hw
->phy
.eee_speeds_supported
& ixgbe_ls_map
[i
].mac_speed
)
3318 edata
->supported
|= ixgbe_ls_map
[i
].supported
;
3321 edata
->advertised
= 0;
3322 for (i
= 0; i
< ARRAY_SIZE(ixgbe_ls_map
); ++i
) {
3323 if (hw
->phy
.eee_speeds_advertised
& ixgbe_ls_map
[i
].mac_speed
)
3324 edata
->advertised
|= ixgbe_ls_map
[i
].supported
;
3327 edata
->eee_enabled
= !!edata
->advertised
;
3328 edata
->tx_lpi_enabled
= edata
->eee_enabled
;
3329 if (edata
->advertised
& edata
->lp_advertised
)
3330 edata
->eee_active
= true;
3335 static int ixgbe_get_eee(struct net_device
*netdev
, struct ethtool_eee
*edata
)
3337 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3338 struct ixgbe_hw
*hw
= &adapter
->hw
;
3340 if (!(adapter
->flags2
& IXGBE_FLAG2_EEE_CAPABLE
))
3343 if (hw
->phy
.eee_speeds_supported
&& hw
->phy
.type
== ixgbe_phy_fw
)
3344 return ixgbe_get_eee_fw(adapter
, edata
);
3349 static int ixgbe_set_eee(struct net_device
*netdev
, struct ethtool_eee
*edata
)
3351 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3352 struct ixgbe_hw
*hw
= &adapter
->hw
;
3353 struct ethtool_eee eee_data
;
3356 if (!(adapter
->flags2
& IXGBE_FLAG2_EEE_CAPABLE
))
3359 memset(&eee_data
, 0, sizeof(struct ethtool_eee
));
3361 ret_val
= ixgbe_get_eee(netdev
, &eee_data
);
3365 if (eee_data
.eee_enabled
&& !edata
->eee_enabled
) {
3366 if (eee_data
.tx_lpi_enabled
!= edata
->tx_lpi_enabled
) {
3367 e_err(drv
, "Setting EEE tx-lpi is not supported\n");
3371 if (eee_data
.tx_lpi_timer
!= edata
->tx_lpi_timer
) {
3373 "Setting EEE Tx LPI timer is not supported\n");
3377 if (eee_data
.advertised
!= edata
->advertised
) {
3379 "Setting EEE advertised speeds is not supported\n");
3384 if (eee_data
.eee_enabled
!= edata
->eee_enabled
) {
3385 if (edata
->eee_enabled
) {
3386 adapter
->flags2
|= IXGBE_FLAG2_EEE_ENABLED
;
3387 hw
->phy
.eee_speeds_advertised
=
3388 hw
->phy
.eee_speeds_supported
;
3390 adapter
->flags2
&= ~IXGBE_FLAG2_EEE_ENABLED
;
3391 hw
->phy
.eee_speeds_advertised
= 0;
3395 if (netif_running(netdev
))
3396 ixgbe_reinit_locked(adapter
);
3398 ixgbe_reset(adapter
);
3404 static u32
ixgbe_get_priv_flags(struct net_device
*netdev
)
3406 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3409 if (adapter
->flags2
& IXGBE_FLAG2_RX_LEGACY
)
3410 priv_flags
|= IXGBE_PRIV_FLAGS_LEGACY_RX
;
3415 static int ixgbe_set_priv_flags(struct net_device
*netdev
, u32 priv_flags
)
3417 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3418 unsigned int flags2
= adapter
->flags2
;
3420 flags2
&= ~IXGBE_FLAG2_RX_LEGACY
;
3421 if (priv_flags
& IXGBE_PRIV_FLAGS_LEGACY_RX
)
3422 flags2
|= IXGBE_FLAG2_RX_LEGACY
;
3424 if (flags2
!= adapter
->flags2
) {
3425 adapter
->flags2
= flags2
;
3427 /* reset interface to repopulate queues */
3428 if (netif_running(netdev
))
3429 ixgbe_reinit_locked(adapter
);
3435 static const struct ethtool_ops ixgbe_ethtool_ops
= {
3436 .get_drvinfo
= ixgbe_get_drvinfo
,
3437 .get_regs_len
= ixgbe_get_regs_len
,
3438 .get_regs
= ixgbe_get_regs
,
3439 .get_wol
= ixgbe_get_wol
,
3440 .set_wol
= ixgbe_set_wol
,
3441 .nway_reset
= ixgbe_nway_reset
,
3442 .get_link
= ethtool_op_get_link
,
3443 .get_eeprom_len
= ixgbe_get_eeprom_len
,
3444 .get_eeprom
= ixgbe_get_eeprom
,
3445 .set_eeprom
= ixgbe_set_eeprom
,
3446 .get_ringparam
= ixgbe_get_ringparam
,
3447 .set_ringparam
= ixgbe_set_ringparam
,
3448 .get_pauseparam
= ixgbe_get_pauseparam
,
3449 .set_pauseparam
= ixgbe_set_pauseparam
,
3450 .get_msglevel
= ixgbe_get_msglevel
,
3451 .set_msglevel
= ixgbe_set_msglevel
,
3452 .self_test
= ixgbe_diag_test
,
3453 .get_strings
= ixgbe_get_strings
,
3454 .set_phys_id
= ixgbe_set_phys_id
,
3455 .get_sset_count
= ixgbe_get_sset_count
,
3456 .get_ethtool_stats
= ixgbe_get_ethtool_stats
,
3457 .get_coalesce
= ixgbe_get_coalesce
,
3458 .set_coalesce
= ixgbe_set_coalesce
,
3459 .get_rxnfc
= ixgbe_get_rxnfc
,
3460 .set_rxnfc
= ixgbe_set_rxnfc
,
3461 .get_rxfh_indir_size
= ixgbe_rss_indir_size
,
3462 .get_rxfh_key_size
= ixgbe_get_rxfh_key_size
,
3463 .get_rxfh
= ixgbe_get_rxfh
,
3464 .set_rxfh
= ixgbe_set_rxfh
,
3465 .get_eee
= ixgbe_get_eee
,
3466 .set_eee
= ixgbe_set_eee
,
3467 .get_channels
= ixgbe_get_channels
,
3468 .set_channels
= ixgbe_set_channels
,
3469 .get_priv_flags
= ixgbe_get_priv_flags
,
3470 .set_priv_flags
= ixgbe_set_priv_flags
,
3471 .get_ts_info
= ixgbe_get_ts_info
,
3472 .get_module_info
= ixgbe_get_module_info
,
3473 .get_module_eeprom
= ixgbe_get_module_eeprom
,
3474 .get_link_ksettings
= ixgbe_get_link_ksettings
,
3475 .set_link_ksettings
= ixgbe_set_link_ksettings
,
3478 void ixgbe_set_ethtool_ops(struct net_device
*netdev
)
3480 netdev
->ethtool_ops
= &ixgbe_ethtool_ops
;