1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2016 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 /* ethtool support for ixgbe */
31 #include <linux/interrupt.h>
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/ethtool.h>
38 #include <linux/vmalloc.h>
39 #include <linux/highmem.h>
40 #include <linux/uaccess.h>
43 #include "ixgbe_phy.h"
46 #define IXGBE_ALL_RAR_ENTRIES 16
48 enum {NETDEV_STATS
, IXGBE_STATS
};
51 char stat_string
[ETH_GSTRING_LEN
];
57 #define IXGBE_STAT(m) IXGBE_STATS, \
58 sizeof(((struct ixgbe_adapter *)0)->m), \
59 offsetof(struct ixgbe_adapter, m)
60 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
61 sizeof(((struct rtnl_link_stats64 *)0)->m), \
62 offsetof(struct rtnl_link_stats64, m)
64 static const struct ixgbe_stats ixgbe_gstrings_stats
[] = {
65 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets
)},
66 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets
)},
67 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes
)},
68 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes
)},
69 {"rx_pkts_nic", IXGBE_STAT(stats
.gprc
)},
70 {"tx_pkts_nic", IXGBE_STAT(stats
.gptc
)},
71 {"rx_bytes_nic", IXGBE_STAT(stats
.gorc
)},
72 {"tx_bytes_nic", IXGBE_STAT(stats
.gotc
)},
73 {"lsc_int", IXGBE_STAT(lsc_int
)},
74 {"tx_busy", IXGBE_STAT(tx_busy
)},
75 {"non_eop_descs", IXGBE_STAT(non_eop_descs
)},
76 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors
)},
77 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors
)},
78 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped
)},
79 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped
)},
80 {"multicast", IXGBE_NETDEV_STAT(multicast
)},
81 {"broadcast", IXGBE_STAT(stats
.bprc
)},
82 {"rx_no_buffer_count", IXGBE_STAT(stats
.rnbc
[0]) },
83 {"collisions", IXGBE_NETDEV_STAT(collisions
)},
84 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors
)},
85 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors
)},
86 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors
)},
87 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count
)},
88 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush
)},
89 {"fdir_match", IXGBE_STAT(stats
.fdirmatch
)},
90 {"fdir_miss", IXGBE_STAT(stats
.fdirmiss
)},
91 {"fdir_overflow", IXGBE_STAT(fdir_overflow
)},
92 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors
)},
93 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors
)},
94 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors
)},
95 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors
)},
96 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors
)},
97 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors
)},
98 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count
)},
99 {"tx_restart_queue", IXGBE_STAT(restart_queue
)},
100 {"rx_long_length_errors", IXGBE_STAT(stats
.roc
)},
101 {"rx_short_length_errors", IXGBE_STAT(stats
.ruc
)},
102 {"tx_flow_control_xon", IXGBE_STAT(stats
.lxontxc
)},
103 {"rx_flow_control_xon", IXGBE_STAT(stats
.lxonrxc
)},
104 {"tx_flow_control_xoff", IXGBE_STAT(stats
.lxofftxc
)},
105 {"rx_flow_control_xoff", IXGBE_STAT(stats
.lxoffrxc
)},
106 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error
)},
107 {"alloc_rx_page", IXGBE_STAT(alloc_rx_page
)},
108 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed
)},
109 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed
)},
110 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources
)},
111 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats
.o2bgptc
)},
112 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats
.b2ospc
)},
113 {"os2bmc_tx_by_host", IXGBE_STAT(stats
.o2bspc
)},
114 {"os2bmc_rx_by_host", IXGBE_STAT(stats
.b2ogprc
)},
115 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts
)},
116 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped
)},
117 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared
)},
118 {"tx_ipsec", IXGBE_STAT(tx_ipsec
)},
119 {"rx_ipsec", IXGBE_STAT(rx_ipsec
)},
121 {"fcoe_bad_fccrc", IXGBE_STAT(stats
.fccrc
)},
122 {"rx_fcoe_dropped", IXGBE_STAT(stats
.fcoerpdc
)},
123 {"rx_fcoe_packets", IXGBE_STAT(stats
.fcoeprc
)},
124 {"rx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwrc
)},
125 {"fcoe_noddp", IXGBE_STAT(stats
.fcoe_noddp
)},
126 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats
.fcoe_noddp_ext_buff
)},
127 {"tx_fcoe_packets", IXGBE_STAT(stats
.fcoeptc
)},
128 {"tx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwtc
)},
129 #endif /* IXGBE_FCOE */
132 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
133 * we set the num_rx_queues to evaluate to num_tx_queues. This is
134 * used because we do not have a good way to get the max number of
135 * rx queues with CONFIG_RPS disabled.
137 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
139 #define IXGBE_QUEUE_STATS_LEN ( \
140 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
141 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
142 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
143 #define IXGBE_PB_STATS_LEN ( \
144 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
145 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
146 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
147 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
149 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
150 IXGBE_PB_STATS_LEN + \
151 IXGBE_QUEUE_STATS_LEN)
153 static const char ixgbe_gstrings_test
[][ETH_GSTRING_LEN
] = {
154 "Register test (offline)", "Eeprom test (offline)",
155 "Interrupt test (offline)", "Loopback test (offline)",
156 "Link test (on/offline)"
158 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
160 static const char ixgbe_priv_flags_strings
[][ETH_GSTRING_LEN
] = {
161 #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
165 #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
167 /* currently supported speeds for 10G */
168 #define ADVRTSD_MSK_10G (SUPPORTED_10000baseT_Full | \
169 SUPPORTED_10000baseKX4_Full | \
170 SUPPORTED_10000baseKR_Full)
172 #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
174 static u32
ixgbe_get_supported_10gtypes(struct ixgbe_hw
*hw
)
176 if (!ixgbe_isbackplane(hw
->phy
.media_type
))
177 return SUPPORTED_10000baseT_Full
;
179 switch (hw
->device_id
) {
180 case IXGBE_DEV_ID_82598
:
181 case IXGBE_DEV_ID_82599_KX4
:
182 case IXGBE_DEV_ID_82599_KX4_MEZZ
:
183 case IXGBE_DEV_ID_X550EM_X_KX4
:
184 return SUPPORTED_10000baseKX4_Full
;
185 case IXGBE_DEV_ID_82598_BX
:
186 case IXGBE_DEV_ID_82599_KR
:
187 case IXGBE_DEV_ID_X550EM_X_KR
:
188 case IXGBE_DEV_ID_X550EM_X_XFI
:
189 return SUPPORTED_10000baseKR_Full
;
191 return SUPPORTED_10000baseKX4_Full
|
192 SUPPORTED_10000baseKR_Full
;
196 static int ixgbe_get_link_ksettings(struct net_device
*netdev
,
197 struct ethtool_link_ksettings
*cmd
)
199 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
200 struct ixgbe_hw
*hw
= &adapter
->hw
;
201 ixgbe_link_speed supported_link
;
202 bool autoneg
= false;
203 u32 supported
, advertising
;
205 ethtool_convert_link_mode_to_legacy_u32(&supported
,
206 cmd
->link_modes
.supported
);
208 hw
->mac
.ops
.get_link_capabilities(hw
, &supported_link
, &autoneg
);
210 /* set the supported link speeds */
211 if (supported_link
& IXGBE_LINK_SPEED_10GB_FULL
)
212 supported
|= ixgbe_get_supported_10gtypes(hw
);
213 if (supported_link
& IXGBE_LINK_SPEED_1GB_FULL
)
214 supported
|= (ixgbe_isbackplane(hw
->phy
.media_type
)) ?
215 SUPPORTED_1000baseKX_Full
:
216 SUPPORTED_1000baseT_Full
;
217 if (supported_link
& IXGBE_LINK_SPEED_100_FULL
)
218 supported
|= SUPPORTED_100baseT_Full
;
219 if (supported_link
& IXGBE_LINK_SPEED_10_FULL
)
220 supported
|= SUPPORTED_10baseT_Full
;
222 /* default advertised speed if phy.autoneg_advertised isn't set */
223 advertising
= supported
;
224 /* set the advertised speeds */
225 if (hw
->phy
.autoneg_advertised
) {
227 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_10_FULL
)
228 advertising
|= ADVERTISED_10baseT_Full
;
229 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_100_FULL
)
230 advertising
|= ADVERTISED_100baseT_Full
;
231 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_10GB_FULL
)
232 advertising
|= supported
& ADVRTSD_MSK_10G
;
233 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_1GB_FULL
) {
234 if (supported
& SUPPORTED_1000baseKX_Full
)
235 advertising
|= ADVERTISED_1000baseKX_Full
;
237 advertising
|= ADVERTISED_1000baseT_Full
;
240 if (hw
->phy
.multispeed_fiber
&& !autoneg
) {
241 if (supported_link
& IXGBE_LINK_SPEED_10GB_FULL
)
242 advertising
= ADVERTISED_10000baseT_Full
;
247 supported
|= SUPPORTED_Autoneg
;
248 advertising
|= ADVERTISED_Autoneg
;
249 cmd
->base
.autoneg
= AUTONEG_ENABLE
;
251 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
253 /* Determine the remaining settings based on the PHY type. */
254 switch (adapter
->hw
.phy
.type
) {
257 case ixgbe_phy_x550em_ext_t
:
259 case ixgbe_phy_cu_unknown
:
260 supported
|= SUPPORTED_TP
;
261 advertising
|= ADVERTISED_TP
;
262 cmd
->base
.port
= PORT_TP
;
265 supported
|= SUPPORTED_FIBRE
;
266 advertising
|= ADVERTISED_FIBRE
;
267 cmd
->base
.port
= PORT_FIBRE
;
270 case ixgbe_phy_sfp_passive_tyco
:
271 case ixgbe_phy_sfp_passive_unknown
:
272 case ixgbe_phy_sfp_ftl
:
273 case ixgbe_phy_sfp_avago
:
274 case ixgbe_phy_sfp_intel
:
275 case ixgbe_phy_sfp_unknown
:
276 case ixgbe_phy_qsfp_passive_unknown
:
277 case ixgbe_phy_qsfp_active_unknown
:
278 case ixgbe_phy_qsfp_intel
:
279 case ixgbe_phy_qsfp_unknown
:
280 /* SFP+ devices, further checking needed */
281 switch (adapter
->hw
.phy
.sfp_type
) {
282 case ixgbe_sfp_type_da_cu
:
283 case ixgbe_sfp_type_da_cu_core0
:
284 case ixgbe_sfp_type_da_cu_core1
:
285 supported
|= SUPPORTED_FIBRE
;
286 advertising
|= ADVERTISED_FIBRE
;
287 cmd
->base
.port
= PORT_DA
;
289 case ixgbe_sfp_type_sr
:
290 case ixgbe_sfp_type_lr
:
291 case ixgbe_sfp_type_srlr_core0
:
292 case ixgbe_sfp_type_srlr_core1
:
293 case ixgbe_sfp_type_1g_sx_core0
:
294 case ixgbe_sfp_type_1g_sx_core1
:
295 case ixgbe_sfp_type_1g_lx_core0
:
296 case ixgbe_sfp_type_1g_lx_core1
:
297 supported
|= SUPPORTED_FIBRE
;
298 advertising
|= ADVERTISED_FIBRE
;
299 cmd
->base
.port
= PORT_FIBRE
;
301 case ixgbe_sfp_type_not_present
:
302 supported
|= SUPPORTED_FIBRE
;
303 advertising
|= ADVERTISED_FIBRE
;
304 cmd
->base
.port
= PORT_NONE
;
306 case ixgbe_sfp_type_1g_cu_core0
:
307 case ixgbe_sfp_type_1g_cu_core1
:
308 supported
|= SUPPORTED_TP
;
309 advertising
|= ADVERTISED_TP
;
310 cmd
->base
.port
= PORT_TP
;
312 case ixgbe_sfp_type_unknown
:
314 supported
|= SUPPORTED_FIBRE
;
315 advertising
|= ADVERTISED_FIBRE
;
316 cmd
->base
.port
= PORT_OTHER
;
321 supported
|= SUPPORTED_FIBRE
;
322 advertising
|= ADVERTISED_FIBRE
;
323 cmd
->base
.port
= PORT_NONE
;
325 case ixgbe_phy_unknown
:
326 case ixgbe_phy_generic
:
327 case ixgbe_phy_sfp_unsupported
:
329 supported
|= SUPPORTED_FIBRE
;
330 advertising
|= ADVERTISED_FIBRE
;
331 cmd
->base
.port
= PORT_OTHER
;
335 /* Indicate pause support */
336 supported
|= SUPPORTED_Pause
;
338 switch (hw
->fc
.requested_mode
) {
340 advertising
|= ADVERTISED_Pause
;
342 case ixgbe_fc_rx_pause
:
343 advertising
|= ADVERTISED_Pause
|
344 ADVERTISED_Asym_Pause
;
346 case ixgbe_fc_tx_pause
:
347 advertising
|= ADVERTISED_Asym_Pause
;
350 advertising
&= ~(ADVERTISED_Pause
|
351 ADVERTISED_Asym_Pause
);
354 if (netif_carrier_ok(netdev
)) {
355 switch (adapter
->link_speed
) {
356 case IXGBE_LINK_SPEED_10GB_FULL
:
357 cmd
->base
.speed
= SPEED_10000
;
359 case IXGBE_LINK_SPEED_5GB_FULL
:
360 cmd
->base
.speed
= SPEED_5000
;
362 case IXGBE_LINK_SPEED_2_5GB_FULL
:
363 cmd
->base
.speed
= SPEED_2500
;
365 case IXGBE_LINK_SPEED_1GB_FULL
:
366 cmd
->base
.speed
= SPEED_1000
;
368 case IXGBE_LINK_SPEED_100_FULL
:
369 cmd
->base
.speed
= SPEED_100
;
371 case IXGBE_LINK_SPEED_10_FULL
:
372 cmd
->base
.speed
= SPEED_10
;
377 cmd
->base
.duplex
= DUPLEX_FULL
;
379 cmd
->base
.speed
= SPEED_UNKNOWN
;
380 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
383 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
385 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
391 static int ixgbe_set_link_ksettings(struct net_device
*netdev
,
392 const struct ethtool_link_ksettings
*cmd
)
394 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
395 struct ixgbe_hw
*hw
= &adapter
->hw
;
398 u32 supported
, advertising
;
400 ethtool_convert_link_mode_to_legacy_u32(&supported
,
401 cmd
->link_modes
.supported
);
402 ethtool_convert_link_mode_to_legacy_u32(&advertising
,
403 cmd
->link_modes
.advertising
);
405 if ((hw
->phy
.media_type
== ixgbe_media_type_copper
) ||
406 (hw
->phy
.multispeed_fiber
)) {
408 * this function does not support duplex forcing, but can
409 * limit the advertising of the adapter to the specified speed
411 if (advertising
& ~supported
)
414 /* only allow one speed at a time if no autoneg */
415 if (!cmd
->base
.autoneg
&& hw
->phy
.multispeed_fiber
) {
417 (ADVERTISED_10000baseT_Full
|
418 ADVERTISED_1000baseT_Full
))
422 old
= hw
->phy
.autoneg_advertised
;
424 if (advertising
& ADVERTISED_10000baseT_Full
)
425 advertised
|= IXGBE_LINK_SPEED_10GB_FULL
;
427 if (advertising
& ADVERTISED_1000baseT_Full
)
428 advertised
|= IXGBE_LINK_SPEED_1GB_FULL
;
430 if (advertising
& ADVERTISED_100baseT_Full
)
431 advertised
|= IXGBE_LINK_SPEED_100_FULL
;
433 if (advertising
& ADVERTISED_10baseT_Full
)
434 advertised
|= IXGBE_LINK_SPEED_10_FULL
;
436 if (old
== advertised
)
438 /* this sets the link speed and restarts auto-neg */
439 while (test_and_set_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
440 usleep_range(1000, 2000);
442 hw
->mac
.autotry_restart
= true;
443 err
= hw
->mac
.ops
.setup_link(hw
, advertised
, true);
445 e_info(probe
, "setup link failed with code %d\n", err
);
446 hw
->mac
.ops
.setup_link(hw
, old
, true);
448 clear_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
);
450 /* in this case we currently only support 10Gb/FULL */
451 u32 speed
= cmd
->base
.speed
;
453 if ((cmd
->base
.autoneg
== AUTONEG_ENABLE
) ||
454 (advertising
!= ADVERTISED_10000baseT_Full
) ||
455 (speed
+ cmd
->base
.duplex
!= SPEED_10000
+ DUPLEX_FULL
))
462 static void ixgbe_get_pauseparam(struct net_device
*netdev
,
463 struct ethtool_pauseparam
*pause
)
465 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
466 struct ixgbe_hw
*hw
= &adapter
->hw
;
468 if (ixgbe_device_supports_autoneg_fc(hw
) &&
469 !hw
->fc
.disable_fc_autoneg
)
474 if (hw
->fc
.current_mode
== ixgbe_fc_rx_pause
) {
476 } else if (hw
->fc
.current_mode
== ixgbe_fc_tx_pause
) {
478 } else if (hw
->fc
.current_mode
== ixgbe_fc_full
) {
484 static int ixgbe_set_pauseparam(struct net_device
*netdev
,
485 struct ethtool_pauseparam
*pause
)
487 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
488 struct ixgbe_hw
*hw
= &adapter
->hw
;
489 struct ixgbe_fc_info fc
= hw
->fc
;
491 /* 82598 does no support link flow control with DCB enabled */
492 if ((hw
->mac
.type
== ixgbe_mac_82598EB
) &&
493 (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
))
496 /* some devices do not support autoneg of link flow control */
497 if ((pause
->autoneg
== AUTONEG_ENABLE
) &&
498 !ixgbe_device_supports_autoneg_fc(hw
))
501 fc
.disable_fc_autoneg
= (pause
->autoneg
!= AUTONEG_ENABLE
);
503 if ((pause
->rx_pause
&& pause
->tx_pause
) || pause
->autoneg
)
504 fc
.requested_mode
= ixgbe_fc_full
;
505 else if (pause
->rx_pause
&& !pause
->tx_pause
)
506 fc
.requested_mode
= ixgbe_fc_rx_pause
;
507 else if (!pause
->rx_pause
&& pause
->tx_pause
)
508 fc
.requested_mode
= ixgbe_fc_tx_pause
;
510 fc
.requested_mode
= ixgbe_fc_none
;
512 /* if the thing changed then we'll update and use new autoneg */
513 if (memcmp(&fc
, &hw
->fc
, sizeof(struct ixgbe_fc_info
))) {
515 if (netif_running(netdev
))
516 ixgbe_reinit_locked(adapter
);
518 ixgbe_reset(adapter
);
524 static u32
ixgbe_get_msglevel(struct net_device
*netdev
)
526 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
527 return adapter
->msg_enable
;
530 static void ixgbe_set_msglevel(struct net_device
*netdev
, u32 data
)
532 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
533 adapter
->msg_enable
= data
;
536 static int ixgbe_get_regs_len(struct net_device
*netdev
)
538 #define IXGBE_REGS_LEN 1139
539 return IXGBE_REGS_LEN
* sizeof(u32
);
542 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
544 static void ixgbe_get_regs(struct net_device
*netdev
,
545 struct ethtool_regs
*regs
, void *p
)
547 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
548 struct ixgbe_hw
*hw
= &adapter
->hw
;
552 memset(p
, 0, IXGBE_REGS_LEN
* sizeof(u32
));
554 regs
->version
= hw
->mac
.type
<< 24 | hw
->revision_id
<< 16 |
557 /* General Registers */
558 regs_buff
[0] = IXGBE_READ_REG(hw
, IXGBE_CTRL
);
559 regs_buff
[1] = IXGBE_READ_REG(hw
, IXGBE_STATUS
);
560 regs_buff
[2] = IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
561 regs_buff
[3] = IXGBE_READ_REG(hw
, IXGBE_ESDP
);
562 regs_buff
[4] = IXGBE_READ_REG(hw
, IXGBE_EODSDP
);
563 regs_buff
[5] = IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
564 regs_buff
[6] = IXGBE_READ_REG(hw
, IXGBE_FRTIMER
);
565 regs_buff
[7] = IXGBE_READ_REG(hw
, IXGBE_TCPTIMER
);
568 regs_buff
[8] = IXGBE_READ_REG(hw
, IXGBE_EEC(hw
));
569 regs_buff
[9] = IXGBE_READ_REG(hw
, IXGBE_EERD
);
570 regs_buff
[10] = IXGBE_READ_REG(hw
, IXGBE_FLA(hw
));
571 regs_buff
[11] = IXGBE_READ_REG(hw
, IXGBE_EEMNGCTL
);
572 regs_buff
[12] = IXGBE_READ_REG(hw
, IXGBE_EEMNGDATA
);
573 regs_buff
[13] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCTL
);
574 regs_buff
[14] = IXGBE_READ_REG(hw
, IXGBE_FLMNGDATA
);
575 regs_buff
[15] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCNT
);
576 regs_buff
[16] = IXGBE_READ_REG(hw
, IXGBE_FLOP
);
577 regs_buff
[17] = IXGBE_READ_REG(hw
, IXGBE_GRC(hw
));
580 /* don't read EICR because it can clear interrupt causes, instead
581 * read EICS which is a shadow but doesn't clear EICR */
582 regs_buff
[18] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
583 regs_buff
[19] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
584 regs_buff
[20] = IXGBE_READ_REG(hw
, IXGBE_EIMS
);
585 regs_buff
[21] = IXGBE_READ_REG(hw
, IXGBE_EIMC
);
586 regs_buff
[22] = IXGBE_READ_REG(hw
, IXGBE_EIAC
);
587 regs_buff
[23] = IXGBE_READ_REG(hw
, IXGBE_EIAM
);
588 regs_buff
[24] = IXGBE_READ_REG(hw
, IXGBE_EITR(0));
589 regs_buff
[25] = IXGBE_READ_REG(hw
, IXGBE_IVAR(0));
590 regs_buff
[26] = IXGBE_READ_REG(hw
, IXGBE_MSIXT
);
591 regs_buff
[27] = IXGBE_READ_REG(hw
, IXGBE_MSIXPBA
);
592 regs_buff
[28] = IXGBE_READ_REG(hw
, IXGBE_PBACL(0));
593 regs_buff
[29] = IXGBE_READ_REG(hw
, IXGBE_GPIE
);
596 regs_buff
[30] = IXGBE_READ_REG(hw
, IXGBE_PFCTOP
);
597 for (i
= 0; i
< 4; i
++)
598 regs_buff
[31 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(i
));
599 for (i
= 0; i
< 8; i
++) {
600 switch (hw
->mac
.type
) {
601 case ixgbe_mac_82598EB
:
602 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL(i
));
603 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH(i
));
605 case ixgbe_mac_82599EB
:
608 case ixgbe_mac_X550EM_x
:
609 case ixgbe_mac_x550em_a
:
610 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL_82599(i
));
611 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH_82599(i
));
617 regs_buff
[51] = IXGBE_READ_REG(hw
, IXGBE_FCRTV
);
618 regs_buff
[52] = IXGBE_READ_REG(hw
, IXGBE_TFCS
);
621 for (i
= 0; i
< 64; i
++)
622 regs_buff
[53 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAL(i
));
623 for (i
= 0; i
< 64; i
++)
624 regs_buff
[117 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAH(i
));
625 for (i
= 0; i
< 64; i
++)
626 regs_buff
[181 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDLEN(i
));
627 for (i
= 0; i
< 64; i
++)
628 regs_buff
[245 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDH(i
));
629 for (i
= 0; i
< 64; i
++)
630 regs_buff
[309 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDT(i
));
631 for (i
= 0; i
< 64; i
++)
632 regs_buff
[373 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXDCTL(i
));
633 for (i
= 0; i
< 16; i
++)
634 regs_buff
[437 + i
] = IXGBE_READ_REG(hw
, IXGBE_SRRCTL(i
));
635 for (i
= 0; i
< 16; i
++)
636 regs_buff
[453 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_RXCTRL(i
));
637 regs_buff
[469] = IXGBE_READ_REG(hw
, IXGBE_RDRXCTL
);
638 for (i
= 0; i
< 8; i
++)
639 regs_buff
[470 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(i
));
640 regs_buff
[478] = IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
641 regs_buff
[479] = IXGBE_READ_REG(hw
, IXGBE_DROPEN
);
644 regs_buff
[480] = IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
645 regs_buff
[481] = IXGBE_READ_REG(hw
, IXGBE_RFCTL
);
646 for (i
= 0; i
< 16; i
++)
647 regs_buff
[482 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAL(i
));
648 for (i
= 0; i
< 16; i
++)
649 regs_buff
[498 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAH(i
));
650 regs_buff
[514] = IXGBE_READ_REG(hw
, IXGBE_PSRTYPE(0));
651 regs_buff
[515] = IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
652 regs_buff
[516] = IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
653 regs_buff
[517] = IXGBE_READ_REG(hw
, IXGBE_MCSTCTRL
);
654 regs_buff
[518] = IXGBE_READ_REG(hw
, IXGBE_MRQC
);
655 regs_buff
[519] = IXGBE_READ_REG(hw
, IXGBE_VMD_CTL
);
656 for (i
= 0; i
< 8; i
++)
657 regs_buff
[520 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIR(i
));
658 for (i
= 0; i
< 8; i
++)
659 regs_buff
[528 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIREXT(i
));
660 regs_buff
[536] = IXGBE_READ_REG(hw
, IXGBE_IMIRVP
);
663 for (i
= 0; i
< 32; i
++)
664 regs_buff
[537 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAL(i
));
665 for (i
= 0; i
< 32; i
++)
666 regs_buff
[569 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAH(i
));
667 for (i
= 0; i
< 32; i
++)
668 regs_buff
[601 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDLEN(i
));
669 for (i
= 0; i
< 32; i
++)
670 regs_buff
[633 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDH(i
));
671 for (i
= 0; i
< 32; i
++)
672 regs_buff
[665 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDT(i
));
673 for (i
= 0; i
< 32; i
++)
674 regs_buff
[697 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXDCTL(i
));
675 for (i
= 0; i
< 32; i
++)
676 regs_buff
[729 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAL(i
));
677 for (i
= 0; i
< 32; i
++)
678 regs_buff
[761 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAH(i
));
679 regs_buff
[793] = IXGBE_READ_REG(hw
, IXGBE_DTXCTL
);
680 for (i
= 0; i
< 16; i
++)
681 regs_buff
[794 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL(i
));
682 regs_buff
[810] = IXGBE_READ_REG(hw
, IXGBE_TIPG
);
683 for (i
= 0; i
< 8; i
++)
684 regs_buff
[811 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXPBSIZE(i
));
685 regs_buff
[819] = IXGBE_READ_REG(hw
, IXGBE_MNGTXMAP
);
688 regs_buff
[820] = IXGBE_READ_REG(hw
, IXGBE_WUC
);
689 regs_buff
[821] = IXGBE_READ_REG(hw
, IXGBE_WUFC
);
690 regs_buff
[822] = IXGBE_READ_REG(hw
, IXGBE_WUS
);
691 regs_buff
[823] = IXGBE_READ_REG(hw
, IXGBE_IPAV
);
692 regs_buff
[824] = IXGBE_READ_REG(hw
, IXGBE_IP4AT
);
693 regs_buff
[825] = IXGBE_READ_REG(hw
, IXGBE_IP6AT
);
694 regs_buff
[826] = IXGBE_READ_REG(hw
, IXGBE_WUPL
);
695 regs_buff
[827] = IXGBE_READ_REG(hw
, IXGBE_WUPM
);
696 regs_buff
[828] = IXGBE_READ_REG(hw
, IXGBE_FHFT(0));
699 regs_buff
[829] = IXGBE_READ_REG(hw
, IXGBE_RMCS
); /* same as FCCFG */
700 regs_buff
[831] = IXGBE_READ_REG(hw
, IXGBE_PDPMCS
); /* same as RTTPCS */
702 switch (hw
->mac
.type
) {
703 case ixgbe_mac_82598EB
:
704 regs_buff
[830] = IXGBE_READ_REG(hw
, IXGBE_DPMCS
);
705 regs_buff
[832] = IXGBE_READ_REG(hw
, IXGBE_RUPPBMR
);
706 for (i
= 0; i
< 8; i
++)
708 IXGBE_READ_REG(hw
, IXGBE_RT2CR(i
));
709 for (i
= 0; i
< 8; i
++)
711 IXGBE_READ_REG(hw
, IXGBE_RT2SR(i
));
712 for (i
= 0; i
< 8; i
++)
714 IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCCR(i
));
715 for (i
= 0; i
< 8; i
++)
717 IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCSR(i
));
719 case ixgbe_mac_82599EB
:
722 case ixgbe_mac_X550EM_x
:
723 case ixgbe_mac_x550em_a
:
724 regs_buff
[830] = IXGBE_READ_REG(hw
, IXGBE_RTTDCS
);
725 regs_buff
[832] = IXGBE_READ_REG(hw
, IXGBE_RTRPCS
);
726 for (i
= 0; i
< 8; i
++)
728 IXGBE_READ_REG(hw
, IXGBE_RTRPT4C(i
));
729 for (i
= 0; i
< 8; i
++)
731 IXGBE_READ_REG(hw
, IXGBE_RTRPT4S(i
));
732 for (i
= 0; i
< 8; i
++)
734 IXGBE_READ_REG(hw
, IXGBE_RTTDT2C(i
));
735 for (i
= 0; i
< 8; i
++)
737 IXGBE_READ_REG(hw
, IXGBE_RTTDT2S(i
));
743 for (i
= 0; i
< 8; i
++)
745 IXGBE_READ_REG(hw
, IXGBE_TDPT2TCCR(i
)); /* same as RTTPT2C */
746 for (i
= 0; i
< 8; i
++)
748 IXGBE_READ_REG(hw
, IXGBE_TDPT2TCSR(i
)); /* same as RTTPT2S */
751 regs_buff
[881] = IXGBE_GET_STAT(adapter
, crcerrs
);
752 regs_buff
[882] = IXGBE_GET_STAT(adapter
, illerrc
);
753 regs_buff
[883] = IXGBE_GET_STAT(adapter
, errbc
);
754 regs_buff
[884] = IXGBE_GET_STAT(adapter
, mspdc
);
755 for (i
= 0; i
< 8; i
++)
756 regs_buff
[885 + i
] = IXGBE_GET_STAT(adapter
, mpc
[i
]);
757 regs_buff
[893] = IXGBE_GET_STAT(adapter
, mlfc
);
758 regs_buff
[894] = IXGBE_GET_STAT(adapter
, mrfc
);
759 regs_buff
[895] = IXGBE_GET_STAT(adapter
, rlec
);
760 regs_buff
[896] = IXGBE_GET_STAT(adapter
, lxontxc
);
761 regs_buff
[897] = IXGBE_GET_STAT(adapter
, lxonrxc
);
762 regs_buff
[898] = IXGBE_GET_STAT(adapter
, lxofftxc
);
763 regs_buff
[899] = IXGBE_GET_STAT(adapter
, lxoffrxc
);
764 for (i
= 0; i
< 8; i
++)
765 regs_buff
[900 + i
] = IXGBE_GET_STAT(adapter
, pxontxc
[i
]);
766 for (i
= 0; i
< 8; i
++)
767 regs_buff
[908 + i
] = IXGBE_GET_STAT(adapter
, pxonrxc
[i
]);
768 for (i
= 0; i
< 8; i
++)
769 regs_buff
[916 + i
] = IXGBE_GET_STAT(adapter
, pxofftxc
[i
]);
770 for (i
= 0; i
< 8; i
++)
771 regs_buff
[924 + i
] = IXGBE_GET_STAT(adapter
, pxoffrxc
[i
]);
772 regs_buff
[932] = IXGBE_GET_STAT(adapter
, prc64
);
773 regs_buff
[933] = IXGBE_GET_STAT(adapter
, prc127
);
774 regs_buff
[934] = IXGBE_GET_STAT(adapter
, prc255
);
775 regs_buff
[935] = IXGBE_GET_STAT(adapter
, prc511
);
776 regs_buff
[936] = IXGBE_GET_STAT(adapter
, prc1023
);
777 regs_buff
[937] = IXGBE_GET_STAT(adapter
, prc1522
);
778 regs_buff
[938] = IXGBE_GET_STAT(adapter
, gprc
);
779 regs_buff
[939] = IXGBE_GET_STAT(adapter
, bprc
);
780 regs_buff
[940] = IXGBE_GET_STAT(adapter
, mprc
);
781 regs_buff
[941] = IXGBE_GET_STAT(adapter
, gptc
);
782 regs_buff
[942] = (u32
)IXGBE_GET_STAT(adapter
, gorc
);
783 regs_buff
[943] = (u32
)(IXGBE_GET_STAT(adapter
, gorc
) >> 32);
784 regs_buff
[944] = (u32
)IXGBE_GET_STAT(adapter
, gotc
);
785 regs_buff
[945] = (u32
)(IXGBE_GET_STAT(adapter
, gotc
) >> 32);
786 for (i
= 0; i
< 8; i
++)
787 regs_buff
[946 + i
] = IXGBE_GET_STAT(adapter
, rnbc
[i
]);
788 regs_buff
[954] = IXGBE_GET_STAT(adapter
, ruc
);
789 regs_buff
[955] = IXGBE_GET_STAT(adapter
, rfc
);
790 regs_buff
[956] = IXGBE_GET_STAT(adapter
, roc
);
791 regs_buff
[957] = IXGBE_GET_STAT(adapter
, rjc
);
792 regs_buff
[958] = IXGBE_GET_STAT(adapter
, mngprc
);
793 regs_buff
[959] = IXGBE_GET_STAT(adapter
, mngpdc
);
794 regs_buff
[960] = IXGBE_GET_STAT(adapter
, mngptc
);
795 regs_buff
[961] = (u32
)IXGBE_GET_STAT(adapter
, tor
);
796 regs_buff
[962] = (u32
)(IXGBE_GET_STAT(adapter
, tor
) >> 32);
797 regs_buff
[963] = IXGBE_GET_STAT(adapter
, tpr
);
798 regs_buff
[964] = IXGBE_GET_STAT(adapter
, tpt
);
799 regs_buff
[965] = IXGBE_GET_STAT(adapter
, ptc64
);
800 regs_buff
[966] = IXGBE_GET_STAT(adapter
, ptc127
);
801 regs_buff
[967] = IXGBE_GET_STAT(adapter
, ptc255
);
802 regs_buff
[968] = IXGBE_GET_STAT(adapter
, ptc511
);
803 regs_buff
[969] = IXGBE_GET_STAT(adapter
, ptc1023
);
804 regs_buff
[970] = IXGBE_GET_STAT(adapter
, ptc1522
);
805 regs_buff
[971] = IXGBE_GET_STAT(adapter
, mptc
);
806 regs_buff
[972] = IXGBE_GET_STAT(adapter
, bptc
);
807 regs_buff
[973] = IXGBE_GET_STAT(adapter
, xec
);
808 for (i
= 0; i
< 16; i
++)
809 regs_buff
[974 + i
] = IXGBE_GET_STAT(adapter
, qprc
[i
]);
810 for (i
= 0; i
< 16; i
++)
811 regs_buff
[990 + i
] = IXGBE_GET_STAT(adapter
, qptc
[i
]);
812 for (i
= 0; i
< 16; i
++)
813 regs_buff
[1006 + i
] = IXGBE_GET_STAT(adapter
, qbrc
[i
]);
814 for (i
= 0; i
< 16; i
++)
815 regs_buff
[1022 + i
] = IXGBE_GET_STAT(adapter
, qbtc
[i
]);
818 regs_buff
[1038] = IXGBE_READ_REG(hw
, IXGBE_PCS1GCFIG
);
819 regs_buff
[1039] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLCTL
);
820 regs_buff
[1040] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLSTA
);
821 regs_buff
[1041] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG0
);
822 regs_buff
[1042] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG1
);
823 regs_buff
[1043] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANA
);
824 regs_buff
[1044] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLP
);
825 regs_buff
[1045] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANNP
);
826 regs_buff
[1046] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLPNP
);
827 regs_buff
[1047] = IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
828 regs_buff
[1048] = IXGBE_READ_REG(hw
, IXGBE_HLREG1
);
829 regs_buff
[1049] = IXGBE_READ_REG(hw
, IXGBE_PAP
);
830 regs_buff
[1050] = IXGBE_READ_REG(hw
, IXGBE_MACA
);
831 regs_buff
[1051] = IXGBE_READ_REG(hw
, IXGBE_APAE
);
832 regs_buff
[1052] = IXGBE_READ_REG(hw
, IXGBE_ARD
);
833 regs_buff
[1053] = IXGBE_READ_REG(hw
, IXGBE_AIS
);
834 regs_buff
[1054] = IXGBE_READ_REG(hw
, IXGBE_MSCA
);
835 regs_buff
[1055] = IXGBE_READ_REG(hw
, IXGBE_MSRWD
);
836 regs_buff
[1056] = IXGBE_READ_REG(hw
, IXGBE_MLADD
);
837 regs_buff
[1057] = IXGBE_READ_REG(hw
, IXGBE_MHADD
);
838 regs_buff
[1058] = IXGBE_READ_REG(hw
, IXGBE_TREG
);
839 regs_buff
[1059] = IXGBE_READ_REG(hw
, IXGBE_PCSS1
);
840 regs_buff
[1060] = IXGBE_READ_REG(hw
, IXGBE_PCSS2
);
841 regs_buff
[1061] = IXGBE_READ_REG(hw
, IXGBE_XPCSS
);
842 regs_buff
[1062] = IXGBE_READ_REG(hw
, IXGBE_SERDESC
);
843 regs_buff
[1063] = IXGBE_READ_REG(hw
, IXGBE_MACS
);
844 regs_buff
[1064] = IXGBE_READ_REG(hw
, IXGBE_AUTOC
);
845 regs_buff
[1065] = IXGBE_READ_REG(hw
, IXGBE_LINKS
);
846 regs_buff
[1066] = IXGBE_READ_REG(hw
, IXGBE_AUTOC2
);
847 regs_buff
[1067] = IXGBE_READ_REG(hw
, IXGBE_AUTOC3
);
848 regs_buff
[1068] = IXGBE_READ_REG(hw
, IXGBE_ANLP1
);
849 regs_buff
[1069] = IXGBE_READ_REG(hw
, IXGBE_ANLP2
);
850 regs_buff
[1070] = IXGBE_READ_REG(hw
, IXGBE_ATLASCTL
);
853 regs_buff
[1071] = IXGBE_READ_REG(hw
, IXGBE_RDSTATCTL
);
854 for (i
= 0; i
< 8; i
++)
855 regs_buff
[1072 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDSTAT(i
));
856 regs_buff
[1080] = IXGBE_READ_REG(hw
, IXGBE_RDHMPN
);
857 for (i
= 0; i
< 4; i
++)
858 regs_buff
[1081 + i
] = IXGBE_READ_REG(hw
, IXGBE_RIC_DW(i
));
859 regs_buff
[1085] = IXGBE_READ_REG(hw
, IXGBE_RDPROBE
);
860 regs_buff
[1086] = IXGBE_READ_REG(hw
, IXGBE_TDSTATCTL
);
861 for (i
= 0; i
< 8; i
++)
862 regs_buff
[1087 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDSTAT(i
));
863 regs_buff
[1095] = IXGBE_READ_REG(hw
, IXGBE_TDHMPN
);
864 for (i
= 0; i
< 4; i
++)
865 regs_buff
[1096 + i
] = IXGBE_READ_REG(hw
, IXGBE_TIC_DW(i
));
866 regs_buff
[1100] = IXGBE_READ_REG(hw
, IXGBE_TDPROBE
);
867 regs_buff
[1101] = IXGBE_READ_REG(hw
, IXGBE_TXBUFCTRL
);
868 for (i
= 0; i
< 4; i
++)
869 regs_buff
[1102 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA(i
));
870 regs_buff
[1106] = IXGBE_READ_REG(hw
, IXGBE_RXBUFCTRL
);
871 for (i
= 0; i
< 4; i
++)
872 regs_buff
[1107 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA(i
));
873 for (i
= 0; i
< 8; i
++)
874 regs_buff
[1111 + i
] = IXGBE_READ_REG(hw
, IXGBE_PCIE_DIAG(i
));
875 regs_buff
[1119] = IXGBE_READ_REG(hw
, IXGBE_RFVAL
);
876 regs_buff
[1120] = IXGBE_READ_REG(hw
, IXGBE_MDFTC1
);
877 regs_buff
[1121] = IXGBE_READ_REG(hw
, IXGBE_MDFTC2
);
878 regs_buff
[1122] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO1
);
879 regs_buff
[1123] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO2
);
880 regs_buff
[1124] = IXGBE_READ_REG(hw
, IXGBE_MDFTS
);
881 regs_buff
[1125] = IXGBE_READ_REG(hw
, IXGBE_PCIEECCCTL
);
882 regs_buff
[1126] = IXGBE_READ_REG(hw
, IXGBE_PBTXECC
);
883 regs_buff
[1127] = IXGBE_READ_REG(hw
, IXGBE_PBRXECC
);
885 /* 82599 X540 specific registers */
886 regs_buff
[1128] = IXGBE_READ_REG(hw
, IXGBE_MFLCN
);
888 /* 82599 X540 specific DCB registers */
889 regs_buff
[1129] = IXGBE_READ_REG(hw
, IXGBE_RTRUP2TC
);
890 regs_buff
[1130] = IXGBE_READ_REG(hw
, IXGBE_RTTUP2TC
);
891 for (i
= 0; i
< 4; i
++)
892 regs_buff
[1131 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXLLQ(i
));
893 regs_buff
[1135] = IXGBE_READ_REG(hw
, IXGBE_RTTBCNRM
);
894 /* same as RTTQCNRM */
895 regs_buff
[1136] = IXGBE_READ_REG(hw
, IXGBE_RTTBCNRD
);
896 /* same as RTTQCNRR */
898 /* X540 specific DCB registers */
899 regs_buff
[1137] = IXGBE_READ_REG(hw
, IXGBE_RTTQCNCR
);
900 regs_buff
[1138] = IXGBE_READ_REG(hw
, IXGBE_RTTQCNTG
);
903 static int ixgbe_get_eeprom_len(struct net_device
*netdev
)
905 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
906 return adapter
->hw
.eeprom
.word_size
* 2;
909 static int ixgbe_get_eeprom(struct net_device
*netdev
,
910 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
912 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
913 struct ixgbe_hw
*hw
= &adapter
->hw
;
915 int first_word
, last_word
, eeprom_len
;
919 if (eeprom
->len
== 0)
922 eeprom
->magic
= hw
->vendor_id
| (hw
->device_id
<< 16);
924 first_word
= eeprom
->offset
>> 1;
925 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
926 eeprom_len
= last_word
- first_word
+ 1;
928 eeprom_buff
= kmalloc(sizeof(u16
) * eeprom_len
, GFP_KERNEL
);
932 ret_val
= hw
->eeprom
.ops
.read_buffer(hw
, first_word
, eeprom_len
,
935 /* Device's eeprom is always little-endian, word addressable */
936 for (i
= 0; i
< eeprom_len
; i
++)
937 le16_to_cpus(&eeprom_buff
[i
]);
939 memcpy(bytes
, (u8
*)eeprom_buff
+ (eeprom
->offset
& 1), eeprom
->len
);
945 static int ixgbe_set_eeprom(struct net_device
*netdev
,
946 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
948 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
949 struct ixgbe_hw
*hw
= &adapter
->hw
;
952 int max_len
, first_word
, last_word
, ret_val
= 0;
955 if (eeprom
->len
== 0)
958 if (eeprom
->magic
!= (hw
->vendor_id
| (hw
->device_id
<< 16)))
961 max_len
= hw
->eeprom
.word_size
* 2;
963 first_word
= eeprom
->offset
>> 1;
964 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
965 eeprom_buff
= kmalloc(max_len
, GFP_KERNEL
);
971 if (eeprom
->offset
& 1) {
973 * need read/modify/write of first changed EEPROM word
974 * only the second byte of the word is being modified
976 ret_val
= hw
->eeprom
.ops
.read(hw
, first_word
, &eeprom_buff
[0]);
982 if ((eeprom
->offset
+ eeprom
->len
) & 1) {
984 * need read/modify/write of last changed EEPROM word
985 * only the first byte of the word is being modified
987 ret_val
= hw
->eeprom
.ops
.read(hw
, last_word
,
988 &eeprom_buff
[last_word
- first_word
]);
993 /* Device's eeprom is always little-endian, word addressable */
994 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
995 le16_to_cpus(&eeprom_buff
[i
]);
997 memcpy(ptr
, bytes
, eeprom
->len
);
999 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
1000 cpu_to_le16s(&eeprom_buff
[i
]);
1002 ret_val
= hw
->eeprom
.ops
.write_buffer(hw
, first_word
,
1003 last_word
- first_word
+ 1,
1006 /* Update the checksum */
1008 hw
->eeprom
.ops
.update_checksum(hw
);
1015 static void ixgbe_get_drvinfo(struct net_device
*netdev
,
1016 struct ethtool_drvinfo
*drvinfo
)
1018 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1020 strlcpy(drvinfo
->driver
, ixgbe_driver_name
, sizeof(drvinfo
->driver
));
1021 strlcpy(drvinfo
->version
, ixgbe_driver_version
,
1022 sizeof(drvinfo
->version
));
1024 strlcpy(drvinfo
->fw_version
, adapter
->eeprom_id
,
1025 sizeof(drvinfo
->fw_version
));
1027 strlcpy(drvinfo
->bus_info
, pci_name(adapter
->pdev
),
1028 sizeof(drvinfo
->bus_info
));
1030 drvinfo
->n_priv_flags
= IXGBE_PRIV_FLAGS_STR_LEN
;
1033 static void ixgbe_get_ringparam(struct net_device
*netdev
,
1034 struct ethtool_ringparam
*ring
)
1036 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1037 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[0];
1038 struct ixgbe_ring
*rx_ring
= adapter
->rx_ring
[0];
1040 ring
->rx_max_pending
= IXGBE_MAX_RXD
;
1041 ring
->tx_max_pending
= IXGBE_MAX_TXD
;
1042 ring
->rx_pending
= rx_ring
->count
;
1043 ring
->tx_pending
= tx_ring
->count
;
1046 static int ixgbe_set_ringparam(struct net_device
*netdev
,
1047 struct ethtool_ringparam
*ring
)
1049 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1050 struct ixgbe_ring
*temp_ring
;
1052 u32 new_rx_count
, new_tx_count
;
1054 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
1057 new_tx_count
= clamp_t(u32
, ring
->tx_pending
,
1058 IXGBE_MIN_TXD
, IXGBE_MAX_TXD
);
1059 new_tx_count
= ALIGN(new_tx_count
, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE
);
1061 new_rx_count
= clamp_t(u32
, ring
->rx_pending
,
1062 IXGBE_MIN_RXD
, IXGBE_MAX_RXD
);
1063 new_rx_count
= ALIGN(new_rx_count
, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE
);
1065 if ((new_tx_count
== adapter
->tx_ring_count
) &&
1066 (new_rx_count
== adapter
->rx_ring_count
)) {
1071 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
1072 usleep_range(1000, 2000);
1074 if (!netif_running(adapter
->netdev
)) {
1075 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1076 adapter
->tx_ring
[i
]->count
= new_tx_count
;
1077 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++)
1078 adapter
->xdp_ring
[i
]->count
= new_tx_count
;
1079 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1080 adapter
->rx_ring
[i
]->count
= new_rx_count
;
1081 adapter
->tx_ring_count
= new_tx_count
;
1082 adapter
->xdp_ring_count
= new_tx_count
;
1083 adapter
->rx_ring_count
= new_rx_count
;
1087 /* allocate temporary buffer to store rings in */
1088 i
= max_t(int, adapter
->num_tx_queues
+ adapter
->num_xdp_queues
,
1089 adapter
->num_rx_queues
);
1090 temp_ring
= vmalloc(i
* sizeof(struct ixgbe_ring
));
1097 ixgbe_down(adapter
);
1100 * Setup new Tx resources and free the old Tx resources in that order.
1101 * We can then assign the new resources to the rings via a memcpy.
1102 * The advantage to this approach is that we are guaranteed to still
1103 * have resources even in the case of an allocation failure.
1105 if (new_tx_count
!= adapter
->tx_ring_count
) {
1106 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1107 memcpy(&temp_ring
[i
], adapter
->tx_ring
[i
],
1108 sizeof(struct ixgbe_ring
));
1110 temp_ring
[i
].count
= new_tx_count
;
1111 err
= ixgbe_setup_tx_resources(&temp_ring
[i
]);
1115 ixgbe_free_tx_resources(&temp_ring
[i
]);
1121 for (j
= 0; j
< adapter
->num_xdp_queues
; j
++, i
++) {
1122 memcpy(&temp_ring
[i
], adapter
->xdp_ring
[j
],
1123 sizeof(struct ixgbe_ring
));
1125 temp_ring
[i
].count
= new_tx_count
;
1126 err
= ixgbe_setup_tx_resources(&temp_ring
[i
]);
1130 ixgbe_free_tx_resources(&temp_ring
[i
]);
1136 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1137 ixgbe_free_tx_resources(adapter
->tx_ring
[i
]);
1139 memcpy(adapter
->tx_ring
[i
], &temp_ring
[i
],
1140 sizeof(struct ixgbe_ring
));
1142 for (j
= 0; j
< adapter
->num_xdp_queues
; j
++, i
++) {
1143 ixgbe_free_tx_resources(adapter
->xdp_ring
[j
]);
1145 memcpy(adapter
->xdp_ring
[j
], &temp_ring
[i
],
1146 sizeof(struct ixgbe_ring
));
1149 adapter
->tx_ring_count
= new_tx_count
;
1152 /* Repeat the process for the Rx rings if needed */
1153 if (new_rx_count
!= adapter
->rx_ring_count
) {
1154 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1155 memcpy(&temp_ring
[i
], adapter
->rx_ring
[i
],
1156 sizeof(struct ixgbe_ring
));
1158 /* Clear copied XDP RX-queue info */
1159 memset(&temp_ring
[i
].xdp_rxq
, 0,
1160 sizeof(temp_ring
[i
].xdp_rxq
));
1162 temp_ring
[i
].count
= new_rx_count
;
1163 err
= ixgbe_setup_rx_resources(adapter
, &temp_ring
[i
]);
1167 ixgbe_free_rx_resources(&temp_ring
[i
]);
1174 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1175 ixgbe_free_rx_resources(adapter
->rx_ring
[i
]);
1177 memcpy(adapter
->rx_ring
[i
], &temp_ring
[i
],
1178 sizeof(struct ixgbe_ring
));
1181 adapter
->rx_ring_count
= new_rx_count
;
1188 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
1192 static int ixgbe_get_sset_count(struct net_device
*netdev
, int sset
)
1196 return IXGBE_TEST_LEN
;
1198 return IXGBE_STATS_LEN
;
1199 case ETH_SS_PRIV_FLAGS
:
1200 return IXGBE_PRIV_FLAGS_STR_LEN
;
1206 static void ixgbe_get_ethtool_stats(struct net_device
*netdev
,
1207 struct ethtool_stats
*stats
, u64
*data
)
1209 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1210 struct rtnl_link_stats64 temp
;
1211 const struct rtnl_link_stats64
*net_stats
;
1213 struct ixgbe_ring
*ring
;
1217 ixgbe_update_stats(adapter
);
1218 net_stats
= dev_get_stats(netdev
, &temp
);
1219 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1220 switch (ixgbe_gstrings_stats
[i
].type
) {
1222 p
= (char *) net_stats
+
1223 ixgbe_gstrings_stats
[i
].stat_offset
;
1226 p
= (char *) adapter
+
1227 ixgbe_gstrings_stats
[i
].stat_offset
;
1234 data
[i
] = (ixgbe_gstrings_stats
[i
].sizeof_stat
==
1235 sizeof(u64
)) ? *(u64
*)p
: *(u32
*)p
;
1237 for (j
= 0; j
< netdev
->num_tx_queues
; j
++) {
1238 ring
= adapter
->tx_ring
[j
];
1247 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1248 data
[i
] = ring
->stats
.packets
;
1249 data
[i
+1] = ring
->stats
.bytes
;
1250 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1253 for (j
= 0; j
< IXGBE_NUM_RX_QUEUES
; j
++) {
1254 ring
= adapter
->rx_ring
[j
];
1263 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
1264 data
[i
] = ring
->stats
.packets
;
1265 data
[i
+1] = ring
->stats
.bytes
;
1266 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
1270 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1271 data
[i
++] = adapter
->stats
.pxontxc
[j
];
1272 data
[i
++] = adapter
->stats
.pxofftxc
[j
];
1274 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1275 data
[i
++] = adapter
->stats
.pxonrxc
[j
];
1276 data
[i
++] = adapter
->stats
.pxoffrxc
[j
];
1280 static void ixgbe_get_strings(struct net_device
*netdev
, u32 stringset
,
1283 char *p
= (char *)data
;
1286 switch (stringset
) {
1288 for (i
= 0; i
< IXGBE_TEST_LEN
; i
++) {
1289 memcpy(data
, ixgbe_gstrings_test
[i
], ETH_GSTRING_LEN
);
1290 data
+= ETH_GSTRING_LEN
;
1294 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1295 memcpy(p
, ixgbe_gstrings_stats
[i
].stat_string
,
1297 p
+= ETH_GSTRING_LEN
;
1299 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
1300 sprintf(p
, "tx_queue_%u_packets", i
);
1301 p
+= ETH_GSTRING_LEN
;
1302 sprintf(p
, "tx_queue_%u_bytes", i
);
1303 p
+= ETH_GSTRING_LEN
;
1305 for (i
= 0; i
< IXGBE_NUM_RX_QUEUES
; i
++) {
1306 sprintf(p
, "rx_queue_%u_packets", i
);
1307 p
+= ETH_GSTRING_LEN
;
1308 sprintf(p
, "rx_queue_%u_bytes", i
);
1309 p
+= ETH_GSTRING_LEN
;
1311 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1312 sprintf(p
, "tx_pb_%u_pxon", i
);
1313 p
+= ETH_GSTRING_LEN
;
1314 sprintf(p
, "tx_pb_%u_pxoff", i
);
1315 p
+= ETH_GSTRING_LEN
;
1317 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1318 sprintf(p
, "rx_pb_%u_pxon", i
);
1319 p
+= ETH_GSTRING_LEN
;
1320 sprintf(p
, "rx_pb_%u_pxoff", i
);
1321 p
+= ETH_GSTRING_LEN
;
1323 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1325 case ETH_SS_PRIV_FLAGS
:
1326 memcpy(data
, ixgbe_priv_flags_strings
,
1327 IXGBE_PRIV_FLAGS_STR_LEN
* ETH_GSTRING_LEN
);
1331 static int ixgbe_link_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1333 struct ixgbe_hw
*hw
= &adapter
->hw
;
1337 if (ixgbe_removed(hw
->hw_addr
)) {
1343 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, true);
1351 /* ethtool register test data */
1352 struct ixgbe_reg_test
{
1360 /* In the hardware, registers are laid out either singly, in arrays
1361 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1362 * most tests take place on arrays or single registers (handled
1363 * as a single-element array) and special-case the tables.
1364 * Table tests are always pattern tests.
1366 * We also make provision for some required setup steps by specifying
1367 * registers to be written without any read-back testing.
1370 #define PATTERN_TEST 1
1371 #define SET_READ_TEST 2
1372 #define WRITE_NO_TEST 3
1373 #define TABLE32_TEST 4
1374 #define TABLE64_TEST_LO 5
1375 #define TABLE64_TEST_HI 6
1377 /* default 82599 register test */
1378 static const struct ixgbe_reg_test reg_test_82599
[] = {
1379 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1380 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1381 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1382 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1383 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFF80 },
1384 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1385 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1386 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1387 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1388 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1389 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1390 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1391 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1392 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1393 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFF80 },
1394 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000001, 0x00000001 },
1395 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1396 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x8001FFFF, 0x800CFFFF },
1397 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1401 /* default 82598 register test */
1402 static const struct ixgbe_reg_test reg_test_82598
[] = {
1403 { IXGBE_FCRTL(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1404 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1405 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1406 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1407 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1408 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1409 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1410 /* Enable all four RX queues before testing. */
1411 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1412 /* RDH is read-only for 82598, only test RDT. */
1413 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1414 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1415 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1416 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1417 { IXGBE_TIPG
, 1, PATTERN_TEST
, 0x000000FF, 0x000000FF },
1418 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1419 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1420 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1421 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000003, 0x00000003 },
1422 { IXGBE_DTXCTL
, 1, SET_READ_TEST
, 0x00000005, 0x00000005 },
1423 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1424 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x800CFFFF, 0x800CFFFF },
1425 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1429 static bool reg_pattern_test(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1430 u32 mask
, u32 write
)
1432 u32 pat
, val
, before
;
1433 static const u32 test_pattern
[] = {
1434 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1436 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
1440 for (pat
= 0; pat
< ARRAY_SIZE(test_pattern
); pat
++) {
1441 before
= ixgbe_read_reg(&adapter
->hw
, reg
);
1442 ixgbe_write_reg(&adapter
->hw
, reg
, test_pattern
[pat
] & write
);
1443 val
= ixgbe_read_reg(&adapter
->hw
, reg
);
1444 if (val
!= (test_pattern
[pat
] & write
& mask
)) {
1445 e_err(drv
, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1446 reg
, val
, (test_pattern
[pat
] & write
& mask
));
1448 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1451 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1456 static bool reg_set_and_check(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1457 u32 mask
, u32 write
)
1461 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
1465 before
= ixgbe_read_reg(&adapter
->hw
, reg
);
1466 ixgbe_write_reg(&adapter
->hw
, reg
, write
& mask
);
1467 val
= ixgbe_read_reg(&adapter
->hw
, reg
);
1468 if ((write
& mask
) != (val
& mask
)) {
1469 e_err(drv
, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1470 reg
, (val
& mask
), (write
& mask
));
1472 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1475 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1479 static int ixgbe_reg_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1481 const struct ixgbe_reg_test
*test
;
1482 u32 value
, before
, after
;
1485 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
1486 e_err(drv
, "Adapter removed - register test blocked\n");
1490 switch (adapter
->hw
.mac
.type
) {
1491 case ixgbe_mac_82598EB
:
1492 toggle
= 0x7FFFF3FF;
1493 test
= reg_test_82598
;
1495 case ixgbe_mac_82599EB
:
1496 case ixgbe_mac_X540
:
1497 case ixgbe_mac_X550
:
1498 case ixgbe_mac_X550EM_x
:
1499 case ixgbe_mac_x550em_a
:
1500 toggle
= 0x7FFFF30F;
1501 test
= reg_test_82599
;
1509 * Because the status register is such a special case,
1510 * we handle it separately from the rest of the register
1511 * tests. Some bits are read-only, some toggle, and some
1512 * are writeable on newer MACs.
1514 before
= ixgbe_read_reg(&adapter
->hw
, IXGBE_STATUS
);
1515 value
= (ixgbe_read_reg(&adapter
->hw
, IXGBE_STATUS
) & toggle
);
1516 ixgbe_write_reg(&adapter
->hw
, IXGBE_STATUS
, toggle
);
1517 after
= ixgbe_read_reg(&adapter
->hw
, IXGBE_STATUS
) & toggle
;
1518 if (value
!= after
) {
1519 e_err(drv
, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1524 /* restore previous status */
1525 ixgbe_write_reg(&adapter
->hw
, IXGBE_STATUS
, before
);
1528 * Perform the remainder of the register test, looping through
1529 * the test table until we either fail or reach the null entry.
1532 for (i
= 0; i
< test
->array_len
; i
++) {
1535 switch (test
->test_type
) {
1537 b
= reg_pattern_test(adapter
, data
,
1538 test
->reg
+ (i
* 0x40),
1543 b
= reg_set_and_check(adapter
, data
,
1544 test
->reg
+ (i
* 0x40),
1549 ixgbe_write_reg(&adapter
->hw
,
1550 test
->reg
+ (i
* 0x40),
1554 b
= reg_pattern_test(adapter
, data
,
1555 test
->reg
+ (i
* 4),
1559 case TABLE64_TEST_LO
:
1560 b
= reg_pattern_test(adapter
, data
,
1561 test
->reg
+ (i
* 8),
1565 case TABLE64_TEST_HI
:
1566 b
= reg_pattern_test(adapter
, data
,
1567 (test
->reg
+ 4) + (i
* 8),
1582 static int ixgbe_eeprom_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1584 struct ixgbe_hw
*hw
= &adapter
->hw
;
1585 if (hw
->eeprom
.ops
.validate_checksum(hw
, NULL
))
1592 static irqreturn_t
ixgbe_test_intr(int irq
, void *data
)
1594 struct net_device
*netdev
= (struct net_device
*) data
;
1595 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1597 adapter
->test_icr
|= IXGBE_READ_REG(&adapter
->hw
, IXGBE_EICR
);
1602 static int ixgbe_intr_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1604 struct net_device
*netdev
= adapter
->netdev
;
1605 u32 mask
, i
= 0, shared_int
= true;
1606 u32 irq
= adapter
->pdev
->irq
;
1610 /* Hook up test interrupt handler just for this test */
1611 if (adapter
->msix_entries
) {
1612 /* NOTE: we don't test MSI-X interrupts here, yet */
1614 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1616 if (request_irq(irq
, ixgbe_test_intr
, 0, netdev
->name
,
1621 } else if (!request_irq(irq
, ixgbe_test_intr
, IRQF_PROBE_SHARED
,
1622 netdev
->name
, netdev
)) {
1624 } else if (request_irq(irq
, ixgbe_test_intr
, IRQF_SHARED
,
1625 netdev
->name
, netdev
)) {
1629 e_info(hw
, "testing %s interrupt\n", shared_int
?
1630 "shared" : "unshared");
1632 /* Disable all the interrupts */
1633 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1634 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1635 usleep_range(10000, 20000);
1637 /* Test each interrupt */
1638 for (; i
< 10; i
++) {
1639 /* Interrupt to test */
1644 * Disable the interrupts to be reported in
1645 * the cause register and then force the same
1646 * interrupt and see if one gets posted. If
1647 * an interrupt was posted to the bus, the
1650 adapter
->test_icr
= 0;
1651 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1652 ~mask
& 0x00007FFF);
1653 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1654 ~mask
& 0x00007FFF);
1655 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1656 usleep_range(10000, 20000);
1658 if (adapter
->test_icr
& mask
) {
1665 * Enable the interrupt to be reported in the cause
1666 * register and then force the same interrupt and see
1667 * if one gets posted. If an interrupt was not posted
1668 * to the bus, the test failed.
1670 adapter
->test_icr
= 0;
1671 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, mask
);
1672 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, mask
);
1673 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1674 usleep_range(10000, 20000);
1676 if (!(adapter
->test_icr
& mask
)) {
1683 * Disable the other interrupts to be reported in
1684 * the cause register and then force the other
1685 * interrupts and see if any get posted. If
1686 * an interrupt was posted to the bus, the
1689 adapter
->test_icr
= 0;
1690 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1691 ~mask
& 0x00007FFF);
1692 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1693 ~mask
& 0x00007FFF);
1694 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1695 usleep_range(10000, 20000);
1697 if (adapter
->test_icr
) {
1704 /* Disable all the interrupts */
1705 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1706 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1707 usleep_range(10000, 20000);
1709 /* Unhook test interrupt handler */
1710 free_irq(irq
, netdev
);
1715 static void ixgbe_free_desc_rings(struct ixgbe_adapter
*adapter
)
1717 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1718 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1719 struct ixgbe_hw
*hw
= &adapter
->hw
;
1722 /* shut down the DMA engines now so they can be reinitialized later */
1725 hw
->mac
.ops
.disable_rx(hw
);
1726 ixgbe_disable_rx_queue(adapter
, rx_ring
);
1729 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(tx_ring
->reg_idx
));
1730 reg_ctl
&= ~IXGBE_TXDCTL_ENABLE
;
1731 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(tx_ring
->reg_idx
), reg_ctl
);
1733 switch (hw
->mac
.type
) {
1734 case ixgbe_mac_82599EB
:
1735 case ixgbe_mac_X540
:
1736 case ixgbe_mac_X550
:
1737 case ixgbe_mac_X550EM_x
:
1738 case ixgbe_mac_x550em_a
:
1739 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
);
1740 reg_ctl
&= ~IXGBE_DMATXCTL_TE
;
1741 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
, reg_ctl
);
1747 ixgbe_reset(adapter
);
1749 ixgbe_free_tx_resources(&adapter
->test_tx_ring
);
1750 ixgbe_free_rx_resources(&adapter
->test_rx_ring
);
1753 static int ixgbe_setup_desc_rings(struct ixgbe_adapter
*adapter
)
1755 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1756 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1757 struct ixgbe_hw
*hw
= &adapter
->hw
;
1762 /* Setup Tx descriptor ring and Tx buffers */
1763 tx_ring
->count
= IXGBE_DEFAULT_TXD
;
1764 tx_ring
->queue_index
= 0;
1765 tx_ring
->dev
= &adapter
->pdev
->dev
;
1766 tx_ring
->netdev
= adapter
->netdev
;
1767 tx_ring
->reg_idx
= adapter
->tx_ring
[0]->reg_idx
;
1769 err
= ixgbe_setup_tx_resources(tx_ring
);
1773 switch (adapter
->hw
.mac
.type
) {
1774 case ixgbe_mac_82599EB
:
1775 case ixgbe_mac_X540
:
1776 case ixgbe_mac_X550
:
1777 case ixgbe_mac_X550EM_x
:
1778 case ixgbe_mac_x550em_a
:
1779 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DMATXCTL
);
1780 reg_data
|= IXGBE_DMATXCTL_TE
;
1781 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DMATXCTL
, reg_data
);
1787 ixgbe_configure_tx_ring(adapter
, tx_ring
);
1789 /* Setup Rx Descriptor ring and Rx buffers */
1790 rx_ring
->count
= IXGBE_DEFAULT_RXD
;
1791 rx_ring
->queue_index
= 0;
1792 rx_ring
->dev
= &adapter
->pdev
->dev
;
1793 rx_ring
->netdev
= adapter
->netdev
;
1794 rx_ring
->reg_idx
= adapter
->rx_ring
[0]->reg_idx
;
1796 err
= ixgbe_setup_rx_resources(adapter
, rx_ring
);
1802 hw
->mac
.ops
.disable_rx(hw
);
1804 ixgbe_configure_rx_ring(adapter
, rx_ring
);
1806 rctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_RXCTRL
);
1807 rctl
|= IXGBE_RXCTRL_DMBYPS
;
1808 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXCTRL
, rctl
);
1810 hw
->mac
.ops
.enable_rx(hw
);
1815 ixgbe_free_desc_rings(adapter
);
1819 static int ixgbe_setup_loopback_test(struct ixgbe_adapter
*adapter
)
1821 struct ixgbe_hw
*hw
= &adapter
->hw
;
1825 /* Setup MAC loopback */
1826 reg_data
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
1827 reg_data
|= IXGBE_HLREG0_LPBK
;
1828 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, reg_data
);
1830 reg_data
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
1831 reg_data
|= IXGBE_FCTRL_BAM
| IXGBE_FCTRL_SBP
| IXGBE_FCTRL_MPE
;
1832 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, reg_data
);
1834 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1835 switch (adapter
->hw
.mac
.type
) {
1836 case ixgbe_mac_X540
:
1837 case ixgbe_mac_X550
:
1838 case ixgbe_mac_X550EM_x
:
1839 case ixgbe_mac_x550em_a
:
1840 reg_data
= IXGBE_READ_REG(hw
, IXGBE_MACC
);
1841 reg_data
|= IXGBE_MACC_FLU
;
1842 IXGBE_WRITE_REG(hw
, IXGBE_MACC
, reg_data
);
1845 if (hw
->mac
.orig_autoc
) {
1846 reg_data
= hw
->mac
.orig_autoc
| IXGBE_AUTOC_FLU
;
1847 IXGBE_WRITE_REG(hw
, IXGBE_AUTOC
, reg_data
);
1852 IXGBE_WRITE_FLUSH(hw
);
1853 usleep_range(10000, 20000);
1855 /* Disable Atlas Tx lanes; re-enabled in reset path */
1856 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
1859 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, &atlas
);
1860 atlas
|= IXGBE_ATLAS_PDN_TX_REG_EN
;
1861 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, atlas
);
1863 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, &atlas
);
1864 atlas
|= IXGBE_ATLAS_PDN_TX_10G_QL_ALL
;
1865 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, atlas
);
1867 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, &atlas
);
1868 atlas
|= IXGBE_ATLAS_PDN_TX_1G_QL_ALL
;
1869 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, atlas
);
1871 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, &atlas
);
1872 atlas
|= IXGBE_ATLAS_PDN_TX_AN_QL_ALL
;
1873 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, atlas
);
1879 static void ixgbe_loopback_cleanup(struct ixgbe_adapter
*adapter
)
1883 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_HLREG0
);
1884 reg_data
&= ~IXGBE_HLREG0_LPBK
;
1885 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_HLREG0
, reg_data
);
1888 static void ixgbe_create_lbtest_frame(struct sk_buff
*skb
,
1889 unsigned int frame_size
)
1891 memset(skb
->data
, 0xFF, frame_size
);
1893 memset(&skb
->data
[frame_size
], 0xAA, frame_size
/ 2 - 1);
1894 memset(&skb
->data
[frame_size
+ 10], 0xBE, 1);
1895 memset(&skb
->data
[frame_size
+ 12], 0xAF, 1);
1898 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer
*rx_buffer
,
1899 unsigned int frame_size
)
1901 unsigned char *data
;
1906 data
= kmap(rx_buffer
->page
) + rx_buffer
->page_offset
;
1908 if (data
[3] != 0xFF ||
1909 data
[frame_size
+ 10] != 0xBE ||
1910 data
[frame_size
+ 12] != 0xAF)
1913 kunmap(rx_buffer
->page
);
1918 static u16
ixgbe_clean_test_rings(struct ixgbe_ring
*rx_ring
,
1919 struct ixgbe_ring
*tx_ring
,
1922 union ixgbe_adv_rx_desc
*rx_desc
;
1923 u16 rx_ntc
, tx_ntc
, count
= 0;
1925 /* initialize next to clean and descriptor values */
1926 rx_ntc
= rx_ring
->next_to_clean
;
1927 tx_ntc
= tx_ring
->next_to_clean
;
1928 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ntc
);
1930 while (tx_ntc
!= tx_ring
->next_to_use
) {
1931 union ixgbe_adv_tx_desc
*tx_desc
;
1932 struct ixgbe_tx_buffer
*tx_buffer
;
1934 tx_desc
= IXGBE_TX_DESC(tx_ring
, tx_ntc
);
1936 /* if DD is not set transmit has not completed */
1937 if (!(tx_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)))
1940 /* unmap buffer on Tx side */
1941 tx_buffer
= &tx_ring
->tx_buffer_info
[tx_ntc
];
1943 /* Free all the Tx ring sk_buffs */
1944 dev_kfree_skb_any(tx_buffer
->skb
);
1946 /* unmap skb header data */
1947 dma_unmap_single(tx_ring
->dev
,
1948 dma_unmap_addr(tx_buffer
, dma
),
1949 dma_unmap_len(tx_buffer
, len
),
1951 dma_unmap_len_set(tx_buffer
, len
, 0);
1953 /* increment Tx next to clean counter */
1955 if (tx_ntc
== tx_ring
->count
)
1959 while (rx_desc
->wb
.upper
.length
) {
1960 struct ixgbe_rx_buffer
*rx_buffer
;
1962 /* check Rx buffer */
1963 rx_buffer
= &rx_ring
->rx_buffer_info
[rx_ntc
];
1965 /* sync Rx buffer for CPU read */
1966 dma_sync_single_for_cpu(rx_ring
->dev
,
1968 ixgbe_rx_bufsz(rx_ring
),
1971 /* verify contents of skb */
1972 if (ixgbe_check_lbtest_frame(rx_buffer
, size
))
1977 /* sync Rx buffer for device write */
1978 dma_sync_single_for_device(rx_ring
->dev
,
1980 ixgbe_rx_bufsz(rx_ring
),
1983 /* increment Rx next to clean counter */
1985 if (rx_ntc
== rx_ring
->count
)
1988 /* fetch next descriptor */
1989 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ntc
);
1992 netdev_tx_reset_queue(txring_txq(tx_ring
));
1994 /* re-map buffers to ring, store next to clean values */
1995 ixgbe_alloc_rx_buffers(rx_ring
, count
);
1996 rx_ring
->next_to_clean
= rx_ntc
;
1997 tx_ring
->next_to_clean
= tx_ntc
;
2002 static int ixgbe_run_loopback_test(struct ixgbe_adapter
*adapter
)
2004 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
2005 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
2006 int i
, j
, lc
, good_cnt
, ret_val
= 0;
2007 unsigned int size
= 1024;
2008 netdev_tx_t tx_ret_val
;
2009 struct sk_buff
*skb
;
2010 u32 flags_orig
= adapter
->flags
;
2012 /* DCB can modify the frames on Tx */
2013 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
2015 /* allocate test skb */
2016 skb
= alloc_skb(size
, GFP_KERNEL
);
2020 /* place data into test skb */
2021 ixgbe_create_lbtest_frame(skb
, size
);
2025 * Calculate the loop count based on the largest descriptor ring
2026 * The idea is to wrap the largest ring a number of times using 64
2027 * send/receive pairs during each loop
2030 if (rx_ring
->count
<= tx_ring
->count
)
2031 lc
= ((tx_ring
->count
/ 64) * 2) + 1;
2033 lc
= ((rx_ring
->count
/ 64) * 2) + 1;
2035 for (j
= 0; j
<= lc
; j
++) {
2036 /* reset count of good packets */
2039 /* place 64 packets on the transmit queue*/
2040 for (i
= 0; i
< 64; i
++) {
2042 tx_ret_val
= ixgbe_xmit_frame_ring(skb
,
2045 if (tx_ret_val
== NETDEV_TX_OK
)
2049 if (good_cnt
!= 64) {
2054 /* allow 200 milliseconds for packets to go from Tx to Rx */
2057 good_cnt
= ixgbe_clean_test_rings(rx_ring
, tx_ring
, size
);
2058 if (good_cnt
!= 64) {
2064 /* free the original skb */
2066 adapter
->flags
= flags_orig
;
2071 static int ixgbe_loopback_test(struct ixgbe_adapter
*adapter
, u64
*data
)
2073 *data
= ixgbe_setup_desc_rings(adapter
);
2076 *data
= ixgbe_setup_loopback_test(adapter
);
2079 *data
= ixgbe_run_loopback_test(adapter
);
2080 ixgbe_loopback_cleanup(adapter
);
2083 ixgbe_free_desc_rings(adapter
);
2088 static void ixgbe_diag_test(struct net_device
*netdev
,
2089 struct ethtool_test
*eth_test
, u64
*data
)
2091 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2092 bool if_running
= netif_running(netdev
);
2094 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
2095 e_err(hw
, "Adapter removed - test blocked\n");
2101 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2104 set_bit(__IXGBE_TESTING
, &adapter
->state
);
2105 if (eth_test
->flags
== ETH_TEST_FL_OFFLINE
) {
2106 struct ixgbe_hw
*hw
= &adapter
->hw
;
2108 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
2110 for (i
= 0; i
< adapter
->num_vfs
; i
++) {
2111 if (adapter
->vfinfo
[i
].clear_to_send
) {
2112 netdev_warn(netdev
, "offline diagnostic is not supported when VFs are present\n");
2118 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2119 clear_bit(__IXGBE_TESTING
,
2127 e_info(hw
, "offline testing starting\n");
2129 /* Link test performed before hardware reset so autoneg doesn't
2130 * interfere with test result
2132 if (ixgbe_link_test(adapter
, &data
[4]))
2133 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2136 /* indicate we're in test mode */
2137 ixgbe_close(netdev
);
2139 ixgbe_reset(adapter
);
2141 e_info(hw
, "register testing starting\n");
2142 if (ixgbe_reg_test(adapter
, &data
[0]))
2143 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2145 ixgbe_reset(adapter
);
2146 e_info(hw
, "eeprom testing starting\n");
2147 if (ixgbe_eeprom_test(adapter
, &data
[1]))
2148 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2150 ixgbe_reset(adapter
);
2151 e_info(hw
, "interrupt testing starting\n");
2152 if (ixgbe_intr_test(adapter
, &data
[2]))
2153 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2155 /* If SRIOV or VMDq is enabled then skip MAC
2156 * loopback diagnostic. */
2157 if (adapter
->flags
& (IXGBE_FLAG_SRIOV_ENABLED
|
2158 IXGBE_FLAG_VMDQ_ENABLED
)) {
2159 e_info(hw
, "Skip MAC loopback diagnostic in VT mode\n");
2164 ixgbe_reset(adapter
);
2165 e_info(hw
, "loopback testing starting\n");
2166 if (ixgbe_loopback_test(adapter
, &data
[3]))
2167 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2170 ixgbe_reset(adapter
);
2172 /* clear testing bit and return adapter to previous state */
2173 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
2176 else if (hw
->mac
.ops
.disable_tx_laser
)
2177 hw
->mac
.ops
.disable_tx_laser(hw
);
2179 e_info(hw
, "online testing starting\n");
2182 if (ixgbe_link_test(adapter
, &data
[4]))
2183 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2185 /* Offline tests aren't run; pass by default */
2191 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
2195 msleep_interruptible(4 * 1000);
2198 static int ixgbe_wol_exclusion(struct ixgbe_adapter
*adapter
,
2199 struct ethtool_wolinfo
*wol
)
2201 struct ixgbe_hw
*hw
= &adapter
->hw
;
2204 /* WOL not supported for all devices */
2205 if (!ixgbe_wol_supported(adapter
, hw
->device_id
,
2206 hw
->subsystem_device_id
)) {
2214 static void ixgbe_get_wol(struct net_device
*netdev
,
2215 struct ethtool_wolinfo
*wol
)
2217 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2219 wol
->supported
= WAKE_UCAST
| WAKE_MCAST
|
2220 WAKE_BCAST
| WAKE_MAGIC
;
2223 if (ixgbe_wol_exclusion(adapter
, wol
) ||
2224 !device_can_wakeup(&adapter
->pdev
->dev
))
2227 if (adapter
->wol
& IXGBE_WUFC_EX
)
2228 wol
->wolopts
|= WAKE_UCAST
;
2229 if (adapter
->wol
& IXGBE_WUFC_MC
)
2230 wol
->wolopts
|= WAKE_MCAST
;
2231 if (adapter
->wol
& IXGBE_WUFC_BC
)
2232 wol
->wolopts
|= WAKE_BCAST
;
2233 if (adapter
->wol
& IXGBE_WUFC_MAG
)
2234 wol
->wolopts
|= WAKE_MAGIC
;
2237 static int ixgbe_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2239 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2241 if (wol
->wolopts
& (WAKE_PHY
| WAKE_ARP
| WAKE_MAGICSECURE
))
2244 if (ixgbe_wol_exclusion(adapter
, wol
))
2245 return wol
->wolopts
? -EOPNOTSUPP
: 0;
2249 if (wol
->wolopts
& WAKE_UCAST
)
2250 adapter
->wol
|= IXGBE_WUFC_EX
;
2251 if (wol
->wolopts
& WAKE_MCAST
)
2252 adapter
->wol
|= IXGBE_WUFC_MC
;
2253 if (wol
->wolopts
& WAKE_BCAST
)
2254 adapter
->wol
|= IXGBE_WUFC_BC
;
2255 if (wol
->wolopts
& WAKE_MAGIC
)
2256 adapter
->wol
|= IXGBE_WUFC_MAG
;
2258 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
2263 static int ixgbe_nway_reset(struct net_device
*netdev
)
2265 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2267 if (netif_running(netdev
))
2268 ixgbe_reinit_locked(adapter
);
2273 static int ixgbe_set_phys_id(struct net_device
*netdev
,
2274 enum ethtool_phys_id_state state
)
2276 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2277 struct ixgbe_hw
*hw
= &adapter
->hw
;
2279 if (!hw
->mac
.ops
.led_on
|| !hw
->mac
.ops
.led_off
)
2283 case ETHTOOL_ID_ACTIVE
:
2284 adapter
->led_reg
= IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
2288 hw
->mac
.ops
.led_on(hw
, hw
->mac
.led_link_act
);
2291 case ETHTOOL_ID_OFF
:
2292 hw
->mac
.ops
.led_off(hw
, hw
->mac
.led_link_act
);
2295 case ETHTOOL_ID_INACTIVE
:
2296 /* Restore LED settings */
2297 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_LEDCTL
, adapter
->led_reg
);
2304 static int ixgbe_get_coalesce(struct net_device
*netdev
,
2305 struct ethtool_coalesce
*ec
)
2307 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2309 /* only valid if in constant ITR mode */
2310 if (adapter
->rx_itr_setting
<= 1)
2311 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
;
2313 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
>> 2;
2315 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2316 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
2319 /* only valid if in constant ITR mode */
2320 if (adapter
->tx_itr_setting
<= 1)
2321 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
;
2323 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
>> 2;
2329 * this function must be called before setting the new value of
2332 static bool ixgbe_update_rsc(struct ixgbe_adapter
*adapter
)
2334 struct net_device
*netdev
= adapter
->netdev
;
2336 /* nothing to do if LRO or RSC are not enabled */
2337 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_CAPABLE
) ||
2338 !(netdev
->features
& NETIF_F_LRO
))
2341 /* check the feature flag value and enable RSC if necessary */
2342 if (adapter
->rx_itr_setting
== 1 ||
2343 adapter
->rx_itr_setting
> IXGBE_MIN_RSC_ITR
) {
2344 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)) {
2345 adapter
->flags2
|= IXGBE_FLAG2_RSC_ENABLED
;
2346 e_info(probe
, "rx-usecs value high enough to re-enable RSC\n");
2349 /* if interrupt rate is too high then disable RSC */
2350 } else if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
) {
2351 adapter
->flags2
&= ~IXGBE_FLAG2_RSC_ENABLED
;
2352 e_info(probe
, "rx-usecs set too low, disabling RSC\n");
2358 static int ixgbe_set_coalesce(struct net_device
*netdev
,
2359 struct ethtool_coalesce
*ec
)
2361 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2362 struct ixgbe_q_vector
*q_vector
;
2364 u16 tx_itr_param
, rx_itr_param
, tx_itr_prev
;
2365 bool need_reset
= false;
2367 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
) {
2368 /* reject Tx specific changes in case of mixed RxTx vectors */
2369 if (ec
->tx_coalesce_usecs
)
2371 tx_itr_prev
= adapter
->rx_itr_setting
;
2373 tx_itr_prev
= adapter
->tx_itr_setting
;
2376 if ((ec
->rx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)) ||
2377 (ec
->tx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)))
2380 if (ec
->rx_coalesce_usecs
> 1)
2381 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
<< 2;
2383 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
;
2385 if (adapter
->rx_itr_setting
== 1)
2386 rx_itr_param
= IXGBE_20K_ITR
;
2388 rx_itr_param
= adapter
->rx_itr_setting
;
2390 if (ec
->tx_coalesce_usecs
> 1)
2391 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
<< 2;
2393 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
;
2395 if (adapter
->tx_itr_setting
== 1)
2396 tx_itr_param
= IXGBE_12K_ITR
;
2398 tx_itr_param
= adapter
->tx_itr_setting
;
2401 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
2402 adapter
->tx_itr_setting
= adapter
->rx_itr_setting
;
2404 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2405 if ((adapter
->tx_itr_setting
!= 1) &&
2406 (adapter
->tx_itr_setting
< IXGBE_100K_ITR
)) {
2407 if ((tx_itr_prev
== 1) ||
2408 (tx_itr_prev
>= IXGBE_100K_ITR
))
2411 if ((tx_itr_prev
!= 1) &&
2412 (tx_itr_prev
< IXGBE_100K_ITR
))
2416 /* check the old value and enable RSC if necessary */
2417 need_reset
|= ixgbe_update_rsc(adapter
);
2419 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
2420 q_vector
= adapter
->q_vector
[i
];
2421 if (q_vector
->tx
.count
&& !q_vector
->rx
.count
)
2423 q_vector
->itr
= tx_itr_param
;
2425 /* rx only or mixed */
2426 q_vector
->itr
= rx_itr_param
;
2427 ixgbe_write_eitr(q_vector
);
2431 * do reset here at the end to make sure EITR==0 case is handled
2432 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2433 * also locks in RSC enable/disable which requires reset
2436 ixgbe_do_reset(netdev
);
2441 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2442 struct ethtool_rxnfc
*cmd
)
2444 union ixgbe_atr_input
*mask
= &adapter
->fdir_mask
;
2445 struct ethtool_rx_flow_spec
*fsp
=
2446 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2447 struct hlist_node
*node2
;
2448 struct ixgbe_fdir_filter
*rule
= NULL
;
2450 /* report total rule count */
2451 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2453 hlist_for_each_entry_safe(rule
, node2
,
2454 &adapter
->fdir_filter_list
, fdir_node
) {
2455 if (fsp
->location
<= rule
->sw_idx
)
2459 if (!rule
|| fsp
->location
!= rule
->sw_idx
)
2462 /* fill out the flow spec entry */
2464 /* set flow type field */
2465 switch (rule
->filter
.formatted
.flow_type
) {
2466 case IXGBE_ATR_FLOW_TYPE_TCPV4
:
2467 fsp
->flow_type
= TCP_V4_FLOW
;
2469 case IXGBE_ATR_FLOW_TYPE_UDPV4
:
2470 fsp
->flow_type
= UDP_V4_FLOW
;
2472 case IXGBE_ATR_FLOW_TYPE_SCTPV4
:
2473 fsp
->flow_type
= SCTP_V4_FLOW
;
2475 case IXGBE_ATR_FLOW_TYPE_IPV4
:
2476 fsp
->flow_type
= IP_USER_FLOW
;
2477 fsp
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
2478 fsp
->h_u
.usr_ip4_spec
.proto
= 0;
2479 fsp
->m_u
.usr_ip4_spec
.proto
= 0;
2485 fsp
->h_u
.tcp_ip4_spec
.psrc
= rule
->filter
.formatted
.src_port
;
2486 fsp
->m_u
.tcp_ip4_spec
.psrc
= mask
->formatted
.src_port
;
2487 fsp
->h_u
.tcp_ip4_spec
.pdst
= rule
->filter
.formatted
.dst_port
;
2488 fsp
->m_u
.tcp_ip4_spec
.pdst
= mask
->formatted
.dst_port
;
2489 fsp
->h_u
.tcp_ip4_spec
.ip4src
= rule
->filter
.formatted
.src_ip
[0];
2490 fsp
->m_u
.tcp_ip4_spec
.ip4src
= mask
->formatted
.src_ip
[0];
2491 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= rule
->filter
.formatted
.dst_ip
[0];
2492 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= mask
->formatted
.dst_ip
[0];
2493 fsp
->h_ext
.vlan_tci
= rule
->filter
.formatted
.vlan_id
;
2494 fsp
->m_ext
.vlan_tci
= mask
->formatted
.vlan_id
;
2495 fsp
->h_ext
.vlan_etype
= rule
->filter
.formatted
.flex_bytes
;
2496 fsp
->m_ext
.vlan_etype
= mask
->formatted
.flex_bytes
;
2497 fsp
->h_ext
.data
[1] = htonl(rule
->filter
.formatted
.vm_pool
);
2498 fsp
->m_ext
.data
[1] = htonl(mask
->formatted
.vm_pool
);
2499 fsp
->flow_type
|= FLOW_EXT
;
2502 if (rule
->action
== IXGBE_FDIR_DROP_QUEUE
)
2503 fsp
->ring_cookie
= RX_CLS_FLOW_DISC
;
2505 fsp
->ring_cookie
= rule
->action
;
2510 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter
*adapter
,
2511 struct ethtool_rxnfc
*cmd
,
2514 struct hlist_node
*node2
;
2515 struct ixgbe_fdir_filter
*rule
;
2518 /* report total rule count */
2519 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2521 hlist_for_each_entry_safe(rule
, node2
,
2522 &adapter
->fdir_filter_list
, fdir_node
) {
2523 if (cnt
== cmd
->rule_cnt
)
2525 rule_locs
[cnt
] = rule
->sw_idx
;
2529 cmd
->rule_cnt
= cnt
;
2534 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter
*adapter
,
2535 struct ethtool_rxnfc
*cmd
)
2539 /* Report default options for RSS on ixgbe */
2540 switch (cmd
->flow_type
) {
2542 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2545 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
2546 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2549 case AH_ESP_V4_FLOW
:
2553 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
2556 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2559 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
2560 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2563 case AH_ESP_V6_FLOW
:
2567 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
2576 static int ixgbe_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
2579 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2580 int ret
= -EOPNOTSUPP
;
2583 case ETHTOOL_GRXRINGS
:
2584 cmd
->data
= adapter
->num_rx_queues
;
2587 case ETHTOOL_GRXCLSRLCNT
:
2588 cmd
->rule_cnt
= adapter
->fdir_filter_count
;
2591 case ETHTOOL_GRXCLSRULE
:
2592 ret
= ixgbe_get_ethtool_fdir_entry(adapter
, cmd
);
2594 case ETHTOOL_GRXCLSRLALL
:
2595 ret
= ixgbe_get_ethtool_fdir_all(adapter
, cmd
, rule_locs
);
2598 ret
= ixgbe_get_rss_hash_opts(adapter
, cmd
);
2607 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2608 struct ixgbe_fdir_filter
*input
,
2611 struct ixgbe_hw
*hw
= &adapter
->hw
;
2612 struct hlist_node
*node2
;
2613 struct ixgbe_fdir_filter
*rule
, *parent
;
2619 hlist_for_each_entry_safe(rule
, node2
,
2620 &adapter
->fdir_filter_list
, fdir_node
) {
2621 /* hash found, or no matching entry */
2622 if (rule
->sw_idx
>= sw_idx
)
2627 /* if there is an old rule occupying our place remove it */
2628 if (rule
&& (rule
->sw_idx
== sw_idx
)) {
2629 if (!input
|| (rule
->filter
.formatted
.bkt_hash
!=
2630 input
->filter
.formatted
.bkt_hash
)) {
2631 err
= ixgbe_fdir_erase_perfect_filter_82599(hw
,
2636 hlist_del(&rule
->fdir_node
);
2638 adapter
->fdir_filter_count
--;
2642 * If no input this was a delete, err should be 0 if a rule was
2643 * successfully found and removed from the list else -EINVAL
2648 /* initialize node and set software index */
2649 INIT_HLIST_NODE(&input
->fdir_node
);
2651 /* add filter to the list */
2653 hlist_add_behind(&input
->fdir_node
, &parent
->fdir_node
);
2655 hlist_add_head(&input
->fdir_node
,
2656 &adapter
->fdir_filter_list
);
2659 adapter
->fdir_filter_count
++;
2664 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec
*fsp
,
2667 switch (fsp
->flow_type
& ~FLOW_EXT
) {
2669 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2672 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2675 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2678 switch (fsp
->h_u
.usr_ip4_spec
.proto
) {
2680 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2683 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2686 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2689 if (!fsp
->m_u
.usr_ip4_spec
.proto
) {
2690 *flow_type
= IXGBE_ATR_FLOW_TYPE_IPV4
;
2705 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2706 struct ethtool_rxnfc
*cmd
)
2708 struct ethtool_rx_flow_spec
*fsp
=
2709 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2710 struct ixgbe_hw
*hw
= &adapter
->hw
;
2711 struct ixgbe_fdir_filter
*input
;
2712 union ixgbe_atr_input mask
;
2716 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
2719 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2720 * we use the drop index.
2722 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
) {
2723 queue
= IXGBE_FDIR_DROP_QUEUE
;
2725 u32 ring
= ethtool_get_flow_spec_ring(fsp
->ring_cookie
);
2726 u8 vf
= ethtool_get_flow_spec_ring_vf(fsp
->ring_cookie
);
2728 if (!vf
&& (ring
>= adapter
->num_rx_queues
))
2731 ((vf
> adapter
->num_vfs
) ||
2732 ring
>= adapter
->num_rx_queues_per_pool
))
2735 /* Map the ring onto the absolute queue index */
2737 queue
= adapter
->rx_ring
[ring
]->reg_idx
;
2740 adapter
->num_rx_queues_per_pool
) + ring
;
2743 /* Don't allow indexes to exist outside of available space */
2744 if (fsp
->location
>= ((1024 << adapter
->fdir_pballoc
) - 2)) {
2745 e_err(drv
, "Location out of range\n");
2749 input
= kzalloc(sizeof(*input
), GFP_ATOMIC
);
2753 memset(&mask
, 0, sizeof(union ixgbe_atr_input
));
2756 input
->sw_idx
= fsp
->location
;
2758 /* record flow type */
2759 if (!ixgbe_flowspec_to_flow_type(fsp
,
2760 &input
->filter
.formatted
.flow_type
)) {
2761 e_err(drv
, "Unrecognized flow type\n");
2765 mask
.formatted
.flow_type
= IXGBE_ATR_L4TYPE_IPV6_MASK
|
2766 IXGBE_ATR_L4TYPE_MASK
;
2768 if (input
->filter
.formatted
.flow_type
== IXGBE_ATR_FLOW_TYPE_IPV4
)
2769 mask
.formatted
.flow_type
&= IXGBE_ATR_L4TYPE_IPV6_MASK
;
2771 /* Copy input into formatted structures */
2772 input
->filter
.formatted
.src_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4src
;
2773 mask
.formatted
.src_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4src
;
2774 input
->filter
.formatted
.dst_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4dst
;
2775 mask
.formatted
.dst_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4dst
;
2776 input
->filter
.formatted
.src_port
= fsp
->h_u
.tcp_ip4_spec
.psrc
;
2777 mask
.formatted
.src_port
= fsp
->m_u
.tcp_ip4_spec
.psrc
;
2778 input
->filter
.formatted
.dst_port
= fsp
->h_u
.tcp_ip4_spec
.pdst
;
2779 mask
.formatted
.dst_port
= fsp
->m_u
.tcp_ip4_spec
.pdst
;
2781 if (fsp
->flow_type
& FLOW_EXT
) {
2782 input
->filter
.formatted
.vm_pool
=
2783 (unsigned char)ntohl(fsp
->h_ext
.data
[1]);
2784 mask
.formatted
.vm_pool
=
2785 (unsigned char)ntohl(fsp
->m_ext
.data
[1]);
2786 input
->filter
.formatted
.vlan_id
= fsp
->h_ext
.vlan_tci
;
2787 mask
.formatted
.vlan_id
= fsp
->m_ext
.vlan_tci
;
2788 input
->filter
.formatted
.flex_bytes
=
2789 fsp
->h_ext
.vlan_etype
;
2790 mask
.formatted
.flex_bytes
= fsp
->m_ext
.vlan_etype
;
2793 /* determine if we need to drop or route the packet */
2794 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
)
2795 input
->action
= IXGBE_FDIR_DROP_QUEUE
;
2797 input
->action
= fsp
->ring_cookie
;
2799 spin_lock(&adapter
->fdir_perfect_lock
);
2801 if (hlist_empty(&adapter
->fdir_filter_list
)) {
2802 /* save mask and program input mask into HW */
2803 memcpy(&adapter
->fdir_mask
, &mask
, sizeof(mask
));
2804 err
= ixgbe_fdir_set_input_mask_82599(hw
, &mask
);
2806 e_err(drv
, "Error writing mask\n");
2807 goto err_out_w_lock
;
2809 } else if (memcmp(&adapter
->fdir_mask
, &mask
, sizeof(mask
))) {
2810 e_err(drv
, "Only one mask supported per port\n");
2811 goto err_out_w_lock
;
2814 /* apply mask and compute/store hash */
2815 ixgbe_atr_compute_perfect_hash_82599(&input
->filter
, &mask
);
2817 /* program filters to filter memory */
2818 err
= ixgbe_fdir_write_perfect_filter_82599(hw
,
2819 &input
->filter
, input
->sw_idx
, queue
);
2821 goto err_out_w_lock
;
2823 ixgbe_update_ethtool_fdir_entry(adapter
, input
, input
->sw_idx
);
2825 spin_unlock(&adapter
->fdir_perfect_lock
);
2829 spin_unlock(&adapter
->fdir_perfect_lock
);
2835 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2836 struct ethtool_rxnfc
*cmd
)
2838 struct ethtool_rx_flow_spec
*fsp
=
2839 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2842 spin_lock(&adapter
->fdir_perfect_lock
);
2843 err
= ixgbe_update_ethtool_fdir_entry(adapter
, NULL
, fsp
->location
);
2844 spin_unlock(&adapter
->fdir_perfect_lock
);
2849 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2850 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2851 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter
*adapter
,
2852 struct ethtool_rxnfc
*nfc
)
2854 u32 flags2
= adapter
->flags2
;
2857 * RSS does not support anything other than hashing
2858 * to queues on src and dst IPs and ports
2860 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
2861 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
2864 switch (nfc
->flow_type
) {
2867 if (!(nfc
->data
& RXH_IP_SRC
) ||
2868 !(nfc
->data
& RXH_IP_DST
) ||
2869 !(nfc
->data
& RXH_L4_B_0_1
) ||
2870 !(nfc
->data
& RXH_L4_B_2_3
))
2874 if (!(nfc
->data
& RXH_IP_SRC
) ||
2875 !(nfc
->data
& RXH_IP_DST
))
2877 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
2879 flags2
&= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
;
2881 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
2882 flags2
|= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
;
2889 if (!(nfc
->data
& RXH_IP_SRC
) ||
2890 !(nfc
->data
& RXH_IP_DST
))
2892 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
2894 flags2
&= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
;
2896 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
2897 flags2
|= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
;
2903 case AH_ESP_V4_FLOW
:
2907 case AH_ESP_V6_FLOW
:
2911 if (!(nfc
->data
& RXH_IP_SRC
) ||
2912 !(nfc
->data
& RXH_IP_DST
) ||
2913 (nfc
->data
& RXH_L4_B_0_1
) ||
2914 (nfc
->data
& RXH_L4_B_2_3
))
2921 /* if we changed something we need to update flags */
2922 if (flags2
!= adapter
->flags2
) {
2923 struct ixgbe_hw
*hw
= &adapter
->hw
;
2925 unsigned int pf_pool
= adapter
->num_vfs
;
2927 if ((hw
->mac
.type
>= ixgbe_mac_X550
) &&
2928 (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
2929 mrqc
= IXGBE_READ_REG(hw
, IXGBE_PFVFMRQC(pf_pool
));
2931 mrqc
= IXGBE_READ_REG(hw
, IXGBE_MRQC
);
2933 if ((flags2
& UDP_RSS_FLAGS
) &&
2934 !(adapter
->flags2
& UDP_RSS_FLAGS
))
2935 e_warn(drv
, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
2937 adapter
->flags2
= flags2
;
2939 /* Perform hash on these packet types */
2940 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4
2941 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
2942 | IXGBE_MRQC_RSS_FIELD_IPV6
2943 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
;
2945 mrqc
&= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP
|
2946 IXGBE_MRQC_RSS_FIELD_IPV6_UDP
);
2948 if (flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
2949 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4_UDP
;
2951 if (flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
2952 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV6_UDP
;
2954 if ((hw
->mac
.type
>= ixgbe_mac_X550
) &&
2955 (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
2956 IXGBE_WRITE_REG(hw
, IXGBE_PFVFMRQC(pf_pool
), mrqc
);
2958 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
2964 static int ixgbe_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
2966 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2967 int ret
= -EOPNOTSUPP
;
2970 case ETHTOOL_SRXCLSRLINS
:
2971 ret
= ixgbe_add_ethtool_fdir_entry(adapter
, cmd
);
2973 case ETHTOOL_SRXCLSRLDEL
:
2974 ret
= ixgbe_del_ethtool_fdir_entry(adapter
, cmd
);
2977 ret
= ixgbe_set_rss_hash_opt(adapter
, cmd
);
2986 static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter
*adapter
)
2988 if (adapter
->hw
.mac
.type
< ixgbe_mac_X550
)
2994 static u32
ixgbe_get_rxfh_key_size(struct net_device
*netdev
)
2996 return IXGBE_RSS_KEY_SIZE
;
2999 static u32
ixgbe_rss_indir_size(struct net_device
*netdev
)
3001 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3003 return ixgbe_rss_indir_tbl_entries(adapter
);
3006 static void ixgbe_get_reta(struct ixgbe_adapter
*adapter
, u32
*indir
)
3008 int i
, reta_size
= ixgbe_rss_indir_tbl_entries(adapter
);
3009 u16 rss_m
= adapter
->ring_feature
[RING_F_RSS
].mask
;
3011 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
3012 rss_m
= adapter
->ring_feature
[RING_F_RSS
].indices
- 1;
3014 for (i
= 0; i
< reta_size
; i
++)
3015 indir
[i
] = adapter
->rss_indir_tbl
[i
] & rss_m
;
3018 static int ixgbe_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
,
3021 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3024 *hfunc
= ETH_RSS_HASH_TOP
;
3027 ixgbe_get_reta(adapter
, indir
);
3030 memcpy(key
, adapter
->rss_key
, ixgbe_get_rxfh_key_size(netdev
));
3035 static int ixgbe_set_rxfh(struct net_device
*netdev
, const u32
*indir
,
3036 const u8
*key
, const u8 hfunc
)
3038 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3040 u32 reta_entries
= ixgbe_rss_indir_tbl_entries(adapter
);
3045 /* Fill out the redirection table */
3047 int max_queues
= min_t(int, adapter
->num_rx_queues
,
3048 ixgbe_rss_indir_tbl_max(adapter
));
3050 /*Allow at least 2 queues w/ SR-IOV.*/
3051 if ((adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) &&
3055 /* Verify user input. */
3056 for (i
= 0; i
< reta_entries
; i
++)
3057 if (indir
[i
] >= max_queues
)
3060 for (i
= 0; i
< reta_entries
; i
++)
3061 adapter
->rss_indir_tbl
[i
] = indir
[i
];
3064 /* Fill out the rss hash key */
3066 memcpy(adapter
->rss_key
, key
, ixgbe_get_rxfh_key_size(netdev
));
3067 ixgbe_store_key(adapter
);
3070 ixgbe_store_reta(adapter
);
3075 static int ixgbe_get_ts_info(struct net_device
*dev
,
3076 struct ethtool_ts_info
*info
)
3078 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3080 /* we always support timestamping disabled */
3081 info
->rx_filters
= BIT(HWTSTAMP_FILTER_NONE
);
3083 switch (adapter
->hw
.mac
.type
) {
3084 case ixgbe_mac_X550
:
3085 case ixgbe_mac_X550EM_x
:
3086 case ixgbe_mac_x550em_a
:
3087 info
->rx_filters
|= BIT(HWTSTAMP_FILTER_ALL
);
3089 case ixgbe_mac_X540
:
3090 case ixgbe_mac_82599EB
:
3092 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC
) |
3093 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
) |
3094 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT
);
3097 return ethtool_op_get_ts_info(dev
, info
);
3100 info
->so_timestamping
=
3101 SOF_TIMESTAMPING_TX_SOFTWARE
|
3102 SOF_TIMESTAMPING_RX_SOFTWARE
|
3103 SOF_TIMESTAMPING_SOFTWARE
|
3104 SOF_TIMESTAMPING_TX_HARDWARE
|
3105 SOF_TIMESTAMPING_RX_HARDWARE
|
3106 SOF_TIMESTAMPING_RAW_HARDWARE
;
3108 if (adapter
->ptp_clock
)
3109 info
->phc_index
= ptp_clock_index(adapter
->ptp_clock
);
3111 info
->phc_index
= -1;
3114 BIT(HWTSTAMP_TX_OFF
) |
3115 BIT(HWTSTAMP_TX_ON
);
3120 static unsigned int ixgbe_max_channels(struct ixgbe_adapter
*adapter
)
3122 unsigned int max_combined
;
3123 u8 tcs
= adapter
->hw_tcs
;
3125 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
3126 /* We only support one q_vector without MSI-X */
3128 } else if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
3129 /* Limit value based on the queue mask */
3130 max_combined
= adapter
->ring_feature
[RING_F_RSS
].mask
+ 1;
3131 } else if (tcs
> 1) {
3132 /* For DCB report channels per traffic class */
3133 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
) {
3134 /* 8 TC w/ 4 queues per TC */
3136 } else if (tcs
> 4) {
3137 /* 8 TC w/ 8 queues per TC */
3140 /* 4 TC w/ 16 queues per TC */
3143 } else if (adapter
->atr_sample_rate
) {
3144 /* support up to 64 queues with ATR */
3145 max_combined
= IXGBE_MAX_FDIR_INDICES
;
3147 /* support up to 16 queues with RSS */
3148 max_combined
= ixgbe_max_rss_indices(adapter
);
3151 return max_combined
;
3154 static void ixgbe_get_channels(struct net_device
*dev
,
3155 struct ethtool_channels
*ch
)
3157 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3159 /* report maximum channels */
3160 ch
->max_combined
= ixgbe_max_channels(adapter
);
3162 /* report info for other vector */
3163 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
3164 ch
->max_other
= NON_Q_VECTORS
;
3165 ch
->other_count
= NON_Q_VECTORS
;
3168 /* record RSS queues */
3169 ch
->combined_count
= adapter
->ring_feature
[RING_F_RSS
].indices
;
3171 /* nothing else to report if RSS is disabled */
3172 if (ch
->combined_count
== 1)
3175 /* we do not support ATR queueing if SR-IOV is enabled */
3176 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
3179 /* same thing goes for being DCB enabled */
3180 if (adapter
->hw_tcs
> 1)
3183 /* if ATR is disabled we can exit */
3184 if (!adapter
->atr_sample_rate
)
3187 /* report flow director queues as maximum channels */
3188 ch
->combined_count
= adapter
->ring_feature
[RING_F_FDIR
].indices
;
3191 static int ixgbe_set_channels(struct net_device
*dev
,
3192 struct ethtool_channels
*ch
)
3194 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3195 unsigned int count
= ch
->combined_count
;
3196 u8 max_rss_indices
= ixgbe_max_rss_indices(adapter
);
3198 /* verify they are not requesting separate vectors */
3199 if (!count
|| ch
->rx_count
|| ch
->tx_count
)
3202 /* verify other_count has not changed */
3203 if (ch
->other_count
!= NON_Q_VECTORS
)
3206 /* verify the number of channels does not exceed hardware limits */
3207 if (count
> ixgbe_max_channels(adapter
))
3210 /* update feature limits from largest to smallest supported values */
3211 adapter
->ring_feature
[RING_F_FDIR
].limit
= count
;
3214 if (count
> max_rss_indices
)
3215 count
= max_rss_indices
;
3216 adapter
->ring_feature
[RING_F_RSS
].limit
= count
;
3219 /* cap FCoE limit at 8 */
3220 if (count
> IXGBE_FCRETA_SIZE
)
3221 count
= IXGBE_FCRETA_SIZE
;
3222 adapter
->ring_feature
[RING_F_FCOE
].limit
= count
;
3225 /* use setup TC to update any traffic class queue mapping */
3226 return ixgbe_setup_tc(dev
, adapter
->hw_tcs
);
3229 static int ixgbe_get_module_info(struct net_device
*dev
,
3230 struct ethtool_modinfo
*modinfo
)
3232 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3233 struct ixgbe_hw
*hw
= &adapter
->hw
;
3235 u8 sff8472_rev
, addr_mode
;
3236 bool page_swap
= false;
3238 if (hw
->phy
.type
== ixgbe_phy_fw
)
3241 /* Check whether we support SFF-8472 or not */
3242 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
,
3243 IXGBE_SFF_SFF_8472_COMP
,
3248 /* addressing mode is not supported */
3249 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
,
3250 IXGBE_SFF_SFF_8472_SWAP
,
3255 if (addr_mode
& IXGBE_SFF_ADDRESSING_MODE
) {
3256 e_err(drv
, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3260 if (sff8472_rev
== IXGBE_SFF_SFF_8472_UNSUP
|| page_swap
) {
3261 /* We have a SFP, but it does not support SFF-8472 */
3262 modinfo
->type
= ETH_MODULE_SFF_8079
;
3263 modinfo
->eeprom_len
= ETH_MODULE_SFF_8079_LEN
;
3265 /* We have a SFP which supports a revision of SFF-8472. */
3266 modinfo
->type
= ETH_MODULE_SFF_8472
;
3267 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
3273 static int ixgbe_get_module_eeprom(struct net_device
*dev
,
3274 struct ethtool_eeprom
*ee
,
3277 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3278 struct ixgbe_hw
*hw
= &adapter
->hw
;
3279 s32 status
= IXGBE_ERR_PHY_ADDR_INVALID
;
3286 if (hw
->phy
.type
== ixgbe_phy_fw
)
3289 for (i
= ee
->offset
; i
< ee
->offset
+ ee
->len
; i
++) {
3290 /* I2C reads can take long time */
3291 if (test_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
3294 if (i
< ETH_MODULE_SFF_8079_LEN
)
3295 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
, i
, &databyte
);
3297 status
= hw
->phy
.ops
.read_i2c_sff8472(hw
, i
, &databyte
);
3302 data
[i
- ee
->offset
] = databyte
;
3308 static const struct {
3309 ixgbe_link_speed mac_speed
;
3311 } ixgbe_ls_map
[] = {
3312 { IXGBE_LINK_SPEED_10_FULL
, SUPPORTED_10baseT_Full
},
3313 { IXGBE_LINK_SPEED_100_FULL
, SUPPORTED_100baseT_Full
},
3314 { IXGBE_LINK_SPEED_1GB_FULL
, SUPPORTED_1000baseT_Full
},
3315 { IXGBE_LINK_SPEED_2_5GB_FULL
, SUPPORTED_2500baseX_Full
},
3316 { IXGBE_LINK_SPEED_10GB_FULL
, SUPPORTED_10000baseT_Full
},
3319 static const struct {
3322 } ixgbe_lp_map
[] = {
3323 { FW_PHY_ACT_UD_2_100M_TX_EEE
, SUPPORTED_100baseT_Full
},
3324 { FW_PHY_ACT_UD_2_1G_T_EEE
, SUPPORTED_1000baseT_Full
},
3325 { FW_PHY_ACT_UD_2_10G_T_EEE
, SUPPORTED_10000baseT_Full
},
3326 { FW_PHY_ACT_UD_2_1G_KX_EEE
, SUPPORTED_1000baseKX_Full
},
3327 { FW_PHY_ACT_UD_2_10G_KX4_EEE
, SUPPORTED_10000baseKX4_Full
},
3328 { FW_PHY_ACT_UD_2_10G_KR_EEE
, SUPPORTED_10000baseKR_Full
},
3332 ixgbe_get_eee_fw(struct ixgbe_adapter
*adapter
, struct ethtool_eee
*edata
)
3334 u32 info
[FW_PHY_ACT_DATA_COUNT
] = { 0 };
3335 struct ixgbe_hw
*hw
= &adapter
->hw
;
3339 rc
= ixgbe_fw_phy_activity(hw
, FW_PHY_ACT_UD_2
, &info
);
3343 edata
->lp_advertised
= 0;
3344 for (i
= 0; i
< ARRAY_SIZE(ixgbe_lp_map
); ++i
) {
3345 if (info
[0] & ixgbe_lp_map
[i
].lp_advertised
)
3346 edata
->lp_advertised
|= ixgbe_lp_map
[i
].mac_speed
;
3349 edata
->supported
= 0;
3350 for (i
= 0; i
< ARRAY_SIZE(ixgbe_ls_map
); ++i
) {
3351 if (hw
->phy
.eee_speeds_supported
& ixgbe_ls_map
[i
].mac_speed
)
3352 edata
->supported
|= ixgbe_ls_map
[i
].supported
;
3355 edata
->advertised
= 0;
3356 for (i
= 0; i
< ARRAY_SIZE(ixgbe_ls_map
); ++i
) {
3357 if (hw
->phy
.eee_speeds_advertised
& ixgbe_ls_map
[i
].mac_speed
)
3358 edata
->advertised
|= ixgbe_ls_map
[i
].supported
;
3361 edata
->eee_enabled
= !!edata
->advertised
;
3362 edata
->tx_lpi_enabled
= edata
->eee_enabled
;
3363 if (edata
->advertised
& edata
->lp_advertised
)
3364 edata
->eee_active
= true;
3369 static int ixgbe_get_eee(struct net_device
*netdev
, struct ethtool_eee
*edata
)
3371 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3372 struct ixgbe_hw
*hw
= &adapter
->hw
;
3374 if (!(adapter
->flags2
& IXGBE_FLAG2_EEE_CAPABLE
))
3377 if (hw
->phy
.eee_speeds_supported
&& hw
->phy
.type
== ixgbe_phy_fw
)
3378 return ixgbe_get_eee_fw(adapter
, edata
);
3383 static int ixgbe_set_eee(struct net_device
*netdev
, struct ethtool_eee
*edata
)
3385 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3386 struct ixgbe_hw
*hw
= &adapter
->hw
;
3387 struct ethtool_eee eee_data
;
3390 if (!(adapter
->flags2
& IXGBE_FLAG2_EEE_CAPABLE
))
3393 memset(&eee_data
, 0, sizeof(struct ethtool_eee
));
3395 ret_val
= ixgbe_get_eee(netdev
, &eee_data
);
3399 if (eee_data
.eee_enabled
&& !edata
->eee_enabled
) {
3400 if (eee_data
.tx_lpi_enabled
!= edata
->tx_lpi_enabled
) {
3401 e_err(drv
, "Setting EEE tx-lpi is not supported\n");
3405 if (eee_data
.tx_lpi_timer
!= edata
->tx_lpi_timer
) {
3407 "Setting EEE Tx LPI timer is not supported\n");
3411 if (eee_data
.advertised
!= edata
->advertised
) {
3413 "Setting EEE advertised speeds is not supported\n");
3418 if (eee_data
.eee_enabled
!= edata
->eee_enabled
) {
3419 if (edata
->eee_enabled
) {
3420 adapter
->flags2
|= IXGBE_FLAG2_EEE_ENABLED
;
3421 hw
->phy
.eee_speeds_advertised
=
3422 hw
->phy
.eee_speeds_supported
;
3424 adapter
->flags2
&= ~IXGBE_FLAG2_EEE_ENABLED
;
3425 hw
->phy
.eee_speeds_advertised
= 0;
3429 if (netif_running(netdev
))
3430 ixgbe_reinit_locked(adapter
);
3432 ixgbe_reset(adapter
);
3438 static u32
ixgbe_get_priv_flags(struct net_device
*netdev
)
3440 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3443 if (adapter
->flags2
& IXGBE_FLAG2_RX_LEGACY
)
3444 priv_flags
|= IXGBE_PRIV_FLAGS_LEGACY_RX
;
3449 static int ixgbe_set_priv_flags(struct net_device
*netdev
, u32 priv_flags
)
3451 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3452 unsigned int flags2
= adapter
->flags2
;
3454 flags2
&= ~IXGBE_FLAG2_RX_LEGACY
;
3455 if (priv_flags
& IXGBE_PRIV_FLAGS_LEGACY_RX
)
3456 flags2
|= IXGBE_FLAG2_RX_LEGACY
;
3458 if (flags2
!= adapter
->flags2
) {
3459 adapter
->flags2
= flags2
;
3461 /* reset interface to repopulate queues */
3462 if (netif_running(netdev
))
3463 ixgbe_reinit_locked(adapter
);
3469 static const struct ethtool_ops ixgbe_ethtool_ops
= {
3470 .get_drvinfo
= ixgbe_get_drvinfo
,
3471 .get_regs_len
= ixgbe_get_regs_len
,
3472 .get_regs
= ixgbe_get_regs
,
3473 .get_wol
= ixgbe_get_wol
,
3474 .set_wol
= ixgbe_set_wol
,
3475 .nway_reset
= ixgbe_nway_reset
,
3476 .get_link
= ethtool_op_get_link
,
3477 .get_eeprom_len
= ixgbe_get_eeprom_len
,
3478 .get_eeprom
= ixgbe_get_eeprom
,
3479 .set_eeprom
= ixgbe_set_eeprom
,
3480 .get_ringparam
= ixgbe_get_ringparam
,
3481 .set_ringparam
= ixgbe_set_ringparam
,
3482 .get_pauseparam
= ixgbe_get_pauseparam
,
3483 .set_pauseparam
= ixgbe_set_pauseparam
,
3484 .get_msglevel
= ixgbe_get_msglevel
,
3485 .set_msglevel
= ixgbe_set_msglevel
,
3486 .self_test
= ixgbe_diag_test
,
3487 .get_strings
= ixgbe_get_strings
,
3488 .set_phys_id
= ixgbe_set_phys_id
,
3489 .get_sset_count
= ixgbe_get_sset_count
,
3490 .get_ethtool_stats
= ixgbe_get_ethtool_stats
,
3491 .get_coalesce
= ixgbe_get_coalesce
,
3492 .set_coalesce
= ixgbe_set_coalesce
,
3493 .get_rxnfc
= ixgbe_get_rxnfc
,
3494 .set_rxnfc
= ixgbe_set_rxnfc
,
3495 .get_rxfh_indir_size
= ixgbe_rss_indir_size
,
3496 .get_rxfh_key_size
= ixgbe_get_rxfh_key_size
,
3497 .get_rxfh
= ixgbe_get_rxfh
,
3498 .set_rxfh
= ixgbe_set_rxfh
,
3499 .get_eee
= ixgbe_get_eee
,
3500 .set_eee
= ixgbe_set_eee
,
3501 .get_channels
= ixgbe_get_channels
,
3502 .set_channels
= ixgbe_set_channels
,
3503 .get_priv_flags
= ixgbe_get_priv_flags
,
3504 .set_priv_flags
= ixgbe_set_priv_flags
,
3505 .get_ts_info
= ixgbe_get_ts_info
,
3506 .get_module_info
= ixgbe_get_module_info
,
3507 .get_module_eeprom
= ixgbe_get_module_eeprom
,
3508 .get_link_ksettings
= ixgbe_get_link_ksettings
,
3509 .set_link_ksettings
= ixgbe_set_link_ksettings
,
3512 void ixgbe_set_ethtool_ops(struct net_device
*netdev
)
3514 netdev
->ethtool_ops
= &ixgbe_ethtool_ops
;