1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
4 /* ethtool support for ixgbe */
6 #include <linux/interrupt.h>
7 #include <linux/types.h>
8 #include <linux/module.h>
9 #include <linux/slab.h>
10 #include <linux/pci.h>
11 #include <linux/netdevice.h>
12 #include <linux/ethtool.h>
13 #include <linux/vmalloc.h>
14 #include <linux/highmem.h>
15 #include <linux/uaccess.h>
18 #include "ixgbe_phy.h"
21 enum {NETDEV_STATS
, IXGBE_STATS
};
24 char stat_string
[ETH_GSTRING_LEN
];
30 #define IXGBE_STAT(m) IXGBE_STATS, \
31 sizeof(((struct ixgbe_adapter *)0)->m), \
32 offsetof(struct ixgbe_adapter, m)
33 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
34 sizeof(((struct rtnl_link_stats64 *)0)->m), \
35 offsetof(struct rtnl_link_stats64, m)
37 static const struct ixgbe_stats ixgbe_gstrings_stats
[] = {
38 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets
)},
39 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets
)},
40 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes
)},
41 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes
)},
42 {"rx_pkts_nic", IXGBE_STAT(stats
.gprc
)},
43 {"tx_pkts_nic", IXGBE_STAT(stats
.gptc
)},
44 {"rx_bytes_nic", IXGBE_STAT(stats
.gorc
)},
45 {"tx_bytes_nic", IXGBE_STAT(stats
.gotc
)},
46 {"lsc_int", IXGBE_STAT(lsc_int
)},
47 {"tx_busy", IXGBE_STAT(tx_busy
)},
48 {"non_eop_descs", IXGBE_STAT(non_eop_descs
)},
49 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors
)},
50 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors
)},
51 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped
)},
52 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped
)},
53 {"multicast", IXGBE_NETDEV_STAT(multicast
)},
54 {"broadcast", IXGBE_STAT(stats
.bprc
)},
55 {"rx_no_buffer_count", IXGBE_STAT(stats
.rnbc
[0]) },
56 {"collisions", IXGBE_NETDEV_STAT(collisions
)},
57 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors
)},
58 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors
)},
59 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors
)},
60 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count
)},
61 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush
)},
62 {"fdir_match", IXGBE_STAT(stats
.fdirmatch
)},
63 {"fdir_miss", IXGBE_STAT(stats
.fdirmiss
)},
64 {"fdir_overflow", IXGBE_STAT(fdir_overflow
)},
65 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors
)},
66 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors
)},
67 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors
)},
68 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors
)},
69 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors
)},
70 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors
)},
71 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count
)},
72 {"tx_restart_queue", IXGBE_STAT(restart_queue
)},
73 {"rx_length_errors", IXGBE_STAT(stats
.rlec
)},
74 {"rx_long_length_errors", IXGBE_STAT(stats
.roc
)},
75 {"rx_short_length_errors", IXGBE_STAT(stats
.ruc
)},
76 {"tx_flow_control_xon", IXGBE_STAT(stats
.lxontxc
)},
77 {"rx_flow_control_xon", IXGBE_STAT(stats
.lxonrxc
)},
78 {"tx_flow_control_xoff", IXGBE_STAT(stats
.lxofftxc
)},
79 {"rx_flow_control_xoff", IXGBE_STAT(stats
.lxoffrxc
)},
80 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error
)},
81 {"alloc_rx_page", IXGBE_STAT(alloc_rx_page
)},
82 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed
)},
83 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed
)},
84 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources
)},
85 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats
.o2bgptc
)},
86 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats
.b2ospc
)},
87 {"os2bmc_tx_by_host", IXGBE_STAT(stats
.o2bspc
)},
88 {"os2bmc_rx_by_host", IXGBE_STAT(stats
.b2ogprc
)},
89 {"tx_hwtstamp_timeouts", IXGBE_STAT(tx_hwtstamp_timeouts
)},
90 {"tx_hwtstamp_skipped", IXGBE_STAT(tx_hwtstamp_skipped
)},
91 {"rx_hwtstamp_cleared", IXGBE_STAT(rx_hwtstamp_cleared
)},
92 {"tx_ipsec", IXGBE_STAT(tx_ipsec
)},
93 {"rx_ipsec", IXGBE_STAT(rx_ipsec
)},
95 {"fcoe_bad_fccrc", IXGBE_STAT(stats
.fccrc
)},
96 {"rx_fcoe_dropped", IXGBE_STAT(stats
.fcoerpdc
)},
97 {"rx_fcoe_packets", IXGBE_STAT(stats
.fcoeprc
)},
98 {"rx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwrc
)},
99 {"fcoe_noddp", IXGBE_STAT(stats
.fcoe_noddp
)},
100 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats
.fcoe_noddp_ext_buff
)},
101 {"tx_fcoe_packets", IXGBE_STAT(stats
.fcoeptc
)},
102 {"tx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwtc
)},
103 #endif /* IXGBE_FCOE */
106 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
107 * we set the num_rx_queues to evaluate to num_tx_queues. This is
108 * used because we do not have a good way to get the max number of
109 * rx queues with CONFIG_RPS disabled.
111 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
113 #define IXGBE_QUEUE_STATS_LEN ( \
114 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
115 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
116 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
117 #define IXGBE_PB_STATS_LEN ( \
118 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
119 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
120 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
121 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
123 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
124 IXGBE_PB_STATS_LEN + \
125 IXGBE_QUEUE_STATS_LEN)
127 static const char ixgbe_gstrings_test
[][ETH_GSTRING_LEN
] = {
128 "Register test (offline)", "Eeprom test (offline)",
129 "Interrupt test (offline)", "Loopback test (offline)",
130 "Link test (on/offline)"
132 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
134 static const char ixgbe_priv_flags_strings
[][ETH_GSTRING_LEN
] = {
135 #define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(0)
137 #define IXGBE_PRIV_FLAGS_VF_IPSEC_EN BIT(1)
139 #define IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF BIT(2)
143 #define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings)
145 #define ixgbe_isbackplane(type) ((type) == ixgbe_media_type_backplane)
147 static void ixgbe_set_supported_10gtypes(struct ixgbe_hw
*hw
,
148 struct ethtool_link_ksettings
*cmd
)
150 if (!ixgbe_isbackplane(hw
->phy
.media_type
)) {
151 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
156 switch (hw
->device_id
) {
157 case IXGBE_DEV_ID_82598
:
158 case IXGBE_DEV_ID_82599_KX4
:
159 case IXGBE_DEV_ID_82599_KX4_MEZZ
:
160 case IXGBE_DEV_ID_X550EM_X_KX4
:
161 ethtool_link_ksettings_add_link_mode
162 (cmd
, supported
, 10000baseKX4_Full
);
164 case IXGBE_DEV_ID_82598_BX
:
165 case IXGBE_DEV_ID_82599_KR
:
166 case IXGBE_DEV_ID_X550EM_X_KR
:
167 case IXGBE_DEV_ID_X550EM_X_XFI
:
168 ethtool_link_ksettings_add_link_mode
169 (cmd
, supported
, 10000baseKR_Full
);
172 ethtool_link_ksettings_add_link_mode
173 (cmd
, supported
, 10000baseKX4_Full
);
174 ethtool_link_ksettings_add_link_mode
175 (cmd
, supported
, 10000baseKR_Full
);
180 static void ixgbe_set_advertising_10gtypes(struct ixgbe_hw
*hw
,
181 struct ethtool_link_ksettings
*cmd
)
183 if (!ixgbe_isbackplane(hw
->phy
.media_type
)) {
184 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
189 switch (hw
->device_id
) {
190 case IXGBE_DEV_ID_82598
:
191 case IXGBE_DEV_ID_82599_KX4
:
192 case IXGBE_DEV_ID_82599_KX4_MEZZ
:
193 case IXGBE_DEV_ID_X550EM_X_KX4
:
194 ethtool_link_ksettings_add_link_mode
195 (cmd
, advertising
, 10000baseKX4_Full
);
197 case IXGBE_DEV_ID_82598_BX
:
198 case IXGBE_DEV_ID_82599_KR
:
199 case IXGBE_DEV_ID_X550EM_X_KR
:
200 case IXGBE_DEV_ID_X550EM_X_XFI
:
201 ethtool_link_ksettings_add_link_mode
202 (cmd
, advertising
, 10000baseKR_Full
);
205 ethtool_link_ksettings_add_link_mode
206 (cmd
, advertising
, 10000baseKX4_Full
);
207 ethtool_link_ksettings_add_link_mode
208 (cmd
, advertising
, 10000baseKR_Full
);
213 static int ixgbe_get_link_ksettings(struct net_device
*netdev
,
214 struct ethtool_link_ksettings
*cmd
)
216 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
217 struct ixgbe_hw
*hw
= &adapter
->hw
;
218 ixgbe_link_speed supported_link
;
219 bool autoneg
= false;
221 ethtool_link_ksettings_zero_link_mode(cmd
, supported
);
222 ethtool_link_ksettings_zero_link_mode(cmd
, advertising
);
224 hw
->mac
.ops
.get_link_capabilities(hw
, &supported_link
, &autoneg
);
226 /* set the supported link speeds */
227 if (supported_link
& IXGBE_LINK_SPEED_10GB_FULL
) {
228 ixgbe_set_supported_10gtypes(hw
, cmd
);
229 ixgbe_set_advertising_10gtypes(hw
, cmd
);
231 if (supported_link
& IXGBE_LINK_SPEED_5GB_FULL
)
232 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
235 if (supported_link
& IXGBE_LINK_SPEED_2_5GB_FULL
)
236 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
239 if (supported_link
& IXGBE_LINK_SPEED_1GB_FULL
) {
240 if (ixgbe_isbackplane(hw
->phy
.media_type
)) {
241 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
243 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
246 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
248 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
252 if (supported_link
& IXGBE_LINK_SPEED_100_FULL
) {
253 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
255 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
258 if (supported_link
& IXGBE_LINK_SPEED_10_FULL
) {
259 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
261 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
265 /* set the advertised speeds */
266 if (hw
->phy
.autoneg_advertised
) {
267 ethtool_link_ksettings_zero_link_mode(cmd
, advertising
);
268 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_10_FULL
)
269 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
271 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_100_FULL
)
272 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
274 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_10GB_FULL
)
275 ixgbe_set_advertising_10gtypes(hw
, cmd
);
276 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_1GB_FULL
) {
277 if (ethtool_link_ksettings_test_link_mode
278 (cmd
, supported
, 1000baseKX_Full
))
279 ethtool_link_ksettings_add_link_mode
280 (cmd
, advertising
, 1000baseKX_Full
);
282 ethtool_link_ksettings_add_link_mode
283 (cmd
, advertising
, 1000baseT_Full
);
285 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_5GB_FULL
)
286 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
288 if (hw
->phy
.autoneg_advertised
& IXGBE_LINK_SPEED_2_5GB_FULL
)
289 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
292 if (hw
->phy
.multispeed_fiber
&& !autoneg
) {
293 if (supported_link
& IXGBE_LINK_SPEED_10GB_FULL
)
294 ethtool_link_ksettings_add_link_mode
295 (cmd
, advertising
, 10000baseT_Full
);
300 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Autoneg
);
301 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Autoneg
);
302 cmd
->base
.autoneg
= AUTONEG_ENABLE
;
304 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
306 /* Determine the remaining settings based on the PHY type. */
307 switch (adapter
->hw
.phy
.type
) {
310 case ixgbe_phy_x550em_ext_t
:
312 case ixgbe_phy_cu_unknown
:
313 ethtool_link_ksettings_add_link_mode(cmd
, supported
, TP
);
314 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, TP
);
315 cmd
->base
.port
= PORT_TP
;
318 ethtool_link_ksettings_add_link_mode(cmd
, supported
, FIBRE
);
319 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, FIBRE
);
320 cmd
->base
.port
= PORT_FIBRE
;
323 case ixgbe_phy_sfp_passive_tyco
:
324 case ixgbe_phy_sfp_passive_unknown
:
325 case ixgbe_phy_sfp_ftl
:
326 case ixgbe_phy_sfp_avago
:
327 case ixgbe_phy_sfp_intel
:
328 case ixgbe_phy_sfp_unknown
:
329 case ixgbe_phy_qsfp_passive_unknown
:
330 case ixgbe_phy_qsfp_active_unknown
:
331 case ixgbe_phy_qsfp_intel
:
332 case ixgbe_phy_qsfp_unknown
:
333 /* SFP+ devices, further checking needed */
334 switch (adapter
->hw
.phy
.sfp_type
) {
335 case ixgbe_sfp_type_da_cu
:
336 case ixgbe_sfp_type_da_cu_core0
:
337 case ixgbe_sfp_type_da_cu_core1
:
338 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
340 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
342 cmd
->base
.port
= PORT_DA
;
344 case ixgbe_sfp_type_sr
:
345 case ixgbe_sfp_type_lr
:
346 case ixgbe_sfp_type_srlr_core0
:
347 case ixgbe_sfp_type_srlr_core1
:
348 case ixgbe_sfp_type_1g_sx_core0
:
349 case ixgbe_sfp_type_1g_sx_core1
:
350 case ixgbe_sfp_type_1g_lx_core0
:
351 case ixgbe_sfp_type_1g_lx_core1
:
352 case ixgbe_sfp_type_1g_bx_core0
:
353 case ixgbe_sfp_type_1g_bx_core1
:
354 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
356 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
358 cmd
->base
.port
= PORT_FIBRE
;
360 case ixgbe_sfp_type_not_present
:
361 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
363 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
365 cmd
->base
.port
= PORT_NONE
;
367 case ixgbe_sfp_type_1g_cu_core0
:
368 case ixgbe_sfp_type_1g_cu_core1
:
369 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
371 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
373 cmd
->base
.port
= PORT_TP
;
375 case ixgbe_sfp_type_unknown
:
377 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
379 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
381 cmd
->base
.port
= PORT_OTHER
;
386 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
388 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
390 cmd
->base
.port
= PORT_NONE
;
392 case ixgbe_phy_unknown
:
393 case ixgbe_phy_generic
:
394 case ixgbe_phy_sfp_unsupported
:
396 ethtool_link_ksettings_add_link_mode(cmd
, supported
,
398 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
400 cmd
->base
.port
= PORT_OTHER
;
404 /* Indicate pause support */
405 ethtool_link_ksettings_add_link_mode(cmd
, supported
, Pause
);
407 switch (hw
->fc
.requested_mode
) {
409 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Pause
);
411 case ixgbe_fc_rx_pause
:
412 ethtool_link_ksettings_add_link_mode(cmd
, advertising
, Pause
);
413 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
416 case ixgbe_fc_tx_pause
:
417 ethtool_link_ksettings_add_link_mode(cmd
, advertising
,
421 ethtool_link_ksettings_del_link_mode(cmd
, advertising
, Pause
);
422 ethtool_link_ksettings_del_link_mode(cmd
, advertising
,
426 if (netif_carrier_ok(netdev
)) {
427 switch (adapter
->link_speed
) {
428 case IXGBE_LINK_SPEED_10GB_FULL
:
429 cmd
->base
.speed
= SPEED_10000
;
431 case IXGBE_LINK_SPEED_5GB_FULL
:
432 cmd
->base
.speed
= SPEED_5000
;
434 case IXGBE_LINK_SPEED_2_5GB_FULL
:
435 cmd
->base
.speed
= SPEED_2500
;
437 case IXGBE_LINK_SPEED_1GB_FULL
:
438 cmd
->base
.speed
= SPEED_1000
;
440 case IXGBE_LINK_SPEED_100_FULL
:
441 cmd
->base
.speed
= SPEED_100
;
443 case IXGBE_LINK_SPEED_10_FULL
:
444 cmd
->base
.speed
= SPEED_10
;
449 cmd
->base
.duplex
= DUPLEX_FULL
;
451 cmd
->base
.speed
= SPEED_UNKNOWN
;
452 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
458 static int ixgbe_set_link_ksettings(struct net_device
*netdev
,
459 const struct ethtool_link_ksettings
*cmd
)
461 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
462 struct ixgbe_hw
*hw
= &adapter
->hw
;
466 if ((hw
->phy
.media_type
== ixgbe_media_type_copper
) ||
467 (hw
->phy
.multispeed_fiber
)) {
469 * this function does not support duplex forcing, but can
470 * limit the advertising of the adapter to the specified speed
472 if (!linkmode_subset(cmd
->link_modes
.advertising
,
473 cmd
->link_modes
.supported
))
476 /* only allow one speed at a time if no autoneg */
477 if (!cmd
->base
.autoneg
&& hw
->phy
.multispeed_fiber
) {
478 if (ethtool_link_ksettings_test_link_mode(cmd
, advertising
,
480 ethtool_link_ksettings_test_link_mode(cmd
, advertising
,
485 old
= hw
->phy
.autoneg_advertised
;
487 if (ethtool_link_ksettings_test_link_mode(cmd
, advertising
,
489 advertised
|= IXGBE_LINK_SPEED_10GB_FULL
;
490 if (ethtool_link_ksettings_test_link_mode(cmd
, advertising
,
492 advertised
|= IXGBE_LINK_SPEED_5GB_FULL
;
493 if (ethtool_link_ksettings_test_link_mode(cmd
, advertising
,
495 advertised
|= IXGBE_LINK_SPEED_2_5GB_FULL
;
496 if (ethtool_link_ksettings_test_link_mode(cmd
, advertising
,
498 advertised
|= IXGBE_LINK_SPEED_1GB_FULL
;
500 if (ethtool_link_ksettings_test_link_mode(cmd
, advertising
,
502 advertised
|= IXGBE_LINK_SPEED_100_FULL
;
504 if (ethtool_link_ksettings_test_link_mode(cmd
, advertising
,
506 advertised
|= IXGBE_LINK_SPEED_10_FULL
;
508 if (old
== advertised
)
510 /* this sets the link speed and restarts auto-neg */
511 while (test_and_set_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
512 usleep_range(1000, 2000);
514 hw
->mac
.autotry_restart
= true;
515 err
= hw
->mac
.ops
.setup_link(hw
, advertised
, true);
517 e_info(probe
, "setup link failed with code %d\n", err
);
518 hw
->mac
.ops
.setup_link(hw
, old
, true);
520 clear_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
);
522 /* in this case we currently only support 10Gb/FULL */
523 u32 speed
= cmd
->base
.speed
;
525 if ((cmd
->base
.autoneg
== AUTONEG_ENABLE
) ||
526 (!ethtool_link_ksettings_test_link_mode(cmd
, advertising
,
528 (speed
+ cmd
->base
.duplex
!= SPEED_10000
+ DUPLEX_FULL
))
535 static void ixgbe_get_pause_stats(struct net_device
*netdev
,
536 struct ethtool_pause_stats
*stats
)
538 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
539 struct ixgbe_hw_stats
*hwstats
= &adapter
->stats
;
541 stats
->tx_pause_frames
= hwstats
->lxontxc
+ hwstats
->lxofftxc
;
542 stats
->rx_pause_frames
= hwstats
->lxonrxc
+ hwstats
->lxoffrxc
;
545 static void ixgbe_get_pauseparam(struct net_device
*netdev
,
546 struct ethtool_pauseparam
*pause
)
548 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
549 struct ixgbe_hw
*hw
= &adapter
->hw
;
551 if (ixgbe_device_supports_autoneg_fc(hw
) &&
552 !hw
->fc
.disable_fc_autoneg
)
557 if (hw
->fc
.current_mode
== ixgbe_fc_rx_pause
) {
559 } else if (hw
->fc
.current_mode
== ixgbe_fc_tx_pause
) {
561 } else if (hw
->fc
.current_mode
== ixgbe_fc_full
) {
567 static int ixgbe_set_pauseparam(struct net_device
*netdev
,
568 struct ethtool_pauseparam
*pause
)
570 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
571 struct ixgbe_hw
*hw
= &adapter
->hw
;
572 struct ixgbe_fc_info fc
= hw
->fc
;
574 /* 82598 does no support link flow control with DCB enabled */
575 if ((hw
->mac
.type
== ixgbe_mac_82598EB
) &&
576 (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
))
579 /* some devices do not support autoneg of link flow control */
580 if ((pause
->autoneg
== AUTONEG_ENABLE
) &&
581 !ixgbe_device_supports_autoneg_fc(hw
))
584 fc
.disable_fc_autoneg
= (pause
->autoneg
!= AUTONEG_ENABLE
);
586 if ((pause
->rx_pause
&& pause
->tx_pause
) || pause
->autoneg
)
587 fc
.requested_mode
= ixgbe_fc_full
;
588 else if (pause
->rx_pause
&& !pause
->tx_pause
)
589 fc
.requested_mode
= ixgbe_fc_rx_pause
;
590 else if (!pause
->rx_pause
&& pause
->tx_pause
)
591 fc
.requested_mode
= ixgbe_fc_tx_pause
;
593 fc
.requested_mode
= ixgbe_fc_none
;
595 /* if the thing changed then we'll update and use new autoneg */
596 if (memcmp(&fc
, &hw
->fc
, sizeof(struct ixgbe_fc_info
))) {
598 if (netif_running(netdev
))
599 ixgbe_reinit_locked(adapter
);
601 ixgbe_reset(adapter
);
607 static u32
ixgbe_get_msglevel(struct net_device
*netdev
)
609 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
610 return adapter
->msg_enable
;
613 static void ixgbe_set_msglevel(struct net_device
*netdev
, u32 data
)
615 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
616 adapter
->msg_enable
= data
;
619 static int ixgbe_get_regs_len(struct net_device
*netdev
)
621 #define IXGBE_REGS_LEN 1145
622 return IXGBE_REGS_LEN
* sizeof(u32
);
625 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
627 static void ixgbe_get_regs(struct net_device
*netdev
,
628 struct ethtool_regs
*regs
, void *p
)
630 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
631 struct ixgbe_hw
*hw
= &adapter
->hw
;
635 memset(p
, 0, IXGBE_REGS_LEN
* sizeof(u32
));
637 regs
->version
= hw
->mac
.type
<< 24 | hw
->revision_id
<< 16 |
640 /* General Registers */
641 regs_buff
[0] = IXGBE_READ_REG(hw
, IXGBE_CTRL
);
642 regs_buff
[1] = IXGBE_READ_REG(hw
, IXGBE_STATUS
);
643 regs_buff
[2] = IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
644 regs_buff
[3] = IXGBE_READ_REG(hw
, IXGBE_ESDP
);
645 regs_buff
[4] = IXGBE_READ_REG(hw
, IXGBE_EODSDP
);
646 regs_buff
[5] = IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
647 regs_buff
[6] = IXGBE_READ_REG(hw
, IXGBE_FRTIMER
);
648 regs_buff
[7] = IXGBE_READ_REG(hw
, IXGBE_TCPTIMER
);
651 regs_buff
[8] = IXGBE_READ_REG(hw
, IXGBE_EEC(hw
));
652 regs_buff
[9] = IXGBE_READ_REG(hw
, IXGBE_EERD
);
653 regs_buff
[10] = IXGBE_READ_REG(hw
, IXGBE_FLA(hw
));
654 regs_buff
[11] = IXGBE_READ_REG(hw
, IXGBE_EEMNGCTL
);
655 regs_buff
[12] = IXGBE_READ_REG(hw
, IXGBE_EEMNGDATA
);
656 regs_buff
[13] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCTL
);
657 regs_buff
[14] = IXGBE_READ_REG(hw
, IXGBE_FLMNGDATA
);
658 regs_buff
[15] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCNT
);
659 regs_buff
[16] = IXGBE_READ_REG(hw
, IXGBE_FLOP
);
660 regs_buff
[17] = IXGBE_READ_REG(hw
, IXGBE_GRC(hw
));
663 /* don't read EICR because it can clear interrupt causes, instead
664 * read EICS which is a shadow but doesn't clear EICR */
665 regs_buff
[18] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
666 regs_buff
[19] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
667 regs_buff
[20] = IXGBE_READ_REG(hw
, IXGBE_EIMS
);
668 regs_buff
[21] = IXGBE_READ_REG(hw
, IXGBE_EIMC
);
669 regs_buff
[22] = IXGBE_READ_REG(hw
, IXGBE_EIAC
);
670 regs_buff
[23] = IXGBE_READ_REG(hw
, IXGBE_EIAM
);
671 regs_buff
[24] = IXGBE_READ_REG(hw
, IXGBE_EITR(0));
672 regs_buff
[25] = IXGBE_READ_REG(hw
, IXGBE_IVAR(0));
673 regs_buff
[26] = IXGBE_READ_REG(hw
, IXGBE_MSIXT
);
674 regs_buff
[27] = IXGBE_READ_REG(hw
, IXGBE_MSIXPBA
);
675 regs_buff
[28] = IXGBE_READ_REG(hw
, IXGBE_PBACL(0));
676 regs_buff
[29] = IXGBE_READ_REG(hw
, IXGBE_GPIE
);
679 regs_buff
[30] = IXGBE_READ_REG(hw
, IXGBE_PFCTOP
);
680 for (i
= 0; i
< 4; i
++)
681 regs_buff
[31 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(i
));
682 for (i
= 0; i
< 8; i
++) {
683 switch (hw
->mac
.type
) {
684 case ixgbe_mac_82598EB
:
685 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL(i
));
686 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH(i
));
688 case ixgbe_mac_82599EB
:
691 case ixgbe_mac_X550EM_x
:
692 case ixgbe_mac_x550em_a
:
693 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL_82599(i
));
694 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH_82599(i
));
700 regs_buff
[51] = IXGBE_READ_REG(hw
, IXGBE_FCRTV
);
701 regs_buff
[52] = IXGBE_READ_REG(hw
, IXGBE_TFCS
);
704 for (i
= 0; i
< 64; i
++)
705 regs_buff
[53 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAL(i
));
706 for (i
= 0; i
< 64; i
++)
707 regs_buff
[117 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAH(i
));
708 for (i
= 0; i
< 64; i
++)
709 regs_buff
[181 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDLEN(i
));
710 for (i
= 0; i
< 64; i
++)
711 regs_buff
[245 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDH(i
));
712 for (i
= 0; i
< 64; i
++)
713 regs_buff
[309 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDT(i
));
714 for (i
= 0; i
< 64; i
++)
715 regs_buff
[373 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXDCTL(i
));
716 for (i
= 0; i
< 16; i
++)
717 regs_buff
[437 + i
] = IXGBE_READ_REG(hw
, IXGBE_SRRCTL(i
));
718 for (i
= 0; i
< 16; i
++)
719 regs_buff
[453 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_RXCTRL(i
));
720 regs_buff
[469] = IXGBE_READ_REG(hw
, IXGBE_RDRXCTL
);
721 for (i
= 0; i
< 8; i
++)
722 regs_buff
[470 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(i
));
723 regs_buff
[478] = IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
724 regs_buff
[479] = IXGBE_READ_REG(hw
, IXGBE_DROPEN
);
727 regs_buff
[480] = IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
728 regs_buff
[481] = IXGBE_READ_REG(hw
, IXGBE_RFCTL
);
729 for (i
= 0; i
< 16; i
++)
730 regs_buff
[482 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAL(i
));
731 for (i
= 0; i
< 16; i
++)
732 regs_buff
[498 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAH(i
));
733 regs_buff
[514] = IXGBE_READ_REG(hw
, IXGBE_PSRTYPE(0));
734 regs_buff
[515] = IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
735 regs_buff
[516] = IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
736 regs_buff
[517] = IXGBE_READ_REG(hw
, IXGBE_MCSTCTRL
);
737 regs_buff
[518] = IXGBE_READ_REG(hw
, IXGBE_MRQC
);
738 regs_buff
[519] = IXGBE_READ_REG(hw
, IXGBE_VMD_CTL
);
739 for (i
= 0; i
< 8; i
++)
740 regs_buff
[520 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIR(i
));
741 for (i
= 0; i
< 8; i
++)
742 regs_buff
[528 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIREXT(i
));
743 regs_buff
[536] = IXGBE_READ_REG(hw
, IXGBE_IMIRVP
);
746 for (i
= 0; i
< 32; i
++)
747 regs_buff
[537 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAL(i
));
748 for (i
= 0; i
< 32; i
++)
749 regs_buff
[569 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAH(i
));
750 for (i
= 0; i
< 32; i
++)
751 regs_buff
[601 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDLEN(i
));
752 for (i
= 0; i
< 32; i
++)
753 regs_buff
[633 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDH(i
));
754 for (i
= 0; i
< 32; i
++)
755 regs_buff
[665 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDT(i
));
756 for (i
= 0; i
< 32; i
++)
757 regs_buff
[697 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXDCTL(i
));
758 for (i
= 0; i
< 32; i
++)
759 regs_buff
[729 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAL(i
));
760 for (i
= 0; i
< 32; i
++)
761 regs_buff
[761 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAH(i
));
762 regs_buff
[793] = IXGBE_READ_REG(hw
, IXGBE_DTXCTL
);
763 for (i
= 0; i
< 16; i
++)
764 regs_buff
[794 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL(i
));
765 regs_buff
[810] = IXGBE_READ_REG(hw
, IXGBE_TIPG
);
766 for (i
= 0; i
< 8; i
++)
767 regs_buff
[811 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXPBSIZE(i
));
768 regs_buff
[819] = IXGBE_READ_REG(hw
, IXGBE_MNGTXMAP
);
771 regs_buff
[820] = IXGBE_READ_REG(hw
, IXGBE_WUC
);
772 regs_buff
[821] = IXGBE_READ_REG(hw
, IXGBE_WUFC
);
773 regs_buff
[822] = IXGBE_READ_REG(hw
, IXGBE_WUS
);
774 regs_buff
[823] = IXGBE_READ_REG(hw
, IXGBE_IPAV
);
775 regs_buff
[824] = IXGBE_READ_REG(hw
, IXGBE_IP4AT
);
776 regs_buff
[825] = IXGBE_READ_REG(hw
, IXGBE_IP6AT
);
777 regs_buff
[826] = IXGBE_READ_REG(hw
, IXGBE_WUPL
);
778 regs_buff
[827] = IXGBE_READ_REG(hw
, IXGBE_WUPM
);
779 regs_buff
[828] = IXGBE_READ_REG(hw
, IXGBE_FHFT(0));
782 regs_buff
[829] = IXGBE_READ_REG(hw
, IXGBE_RMCS
); /* same as FCCFG */
783 regs_buff
[831] = IXGBE_READ_REG(hw
, IXGBE_PDPMCS
); /* same as RTTPCS */
785 switch (hw
->mac
.type
) {
786 case ixgbe_mac_82598EB
:
787 regs_buff
[830] = IXGBE_READ_REG(hw
, IXGBE_DPMCS
);
788 regs_buff
[832] = IXGBE_READ_REG(hw
, IXGBE_RUPPBMR
);
789 for (i
= 0; i
< 8; i
++)
791 IXGBE_READ_REG(hw
, IXGBE_RT2CR(i
));
792 for (i
= 0; i
< 8; i
++)
794 IXGBE_READ_REG(hw
, IXGBE_RT2SR(i
));
795 for (i
= 0; i
< 8; i
++)
797 IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCCR(i
));
798 for (i
= 0; i
< 8; i
++)
800 IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCSR(i
));
802 case ixgbe_mac_82599EB
:
805 case ixgbe_mac_X550EM_x
:
806 case ixgbe_mac_x550em_a
:
807 regs_buff
[830] = IXGBE_READ_REG(hw
, IXGBE_RTTDCS
);
808 regs_buff
[832] = IXGBE_READ_REG(hw
, IXGBE_RTRPCS
);
809 for (i
= 0; i
< 8; i
++)
811 IXGBE_READ_REG(hw
, IXGBE_RTRPT4C(i
));
812 for (i
= 0; i
< 8; i
++)
814 IXGBE_READ_REG(hw
, IXGBE_RTRPT4S(i
));
815 for (i
= 0; i
< 8; i
++)
817 IXGBE_READ_REG(hw
, IXGBE_RTTDT2C(i
));
818 for (i
= 0; i
< 8; i
++)
820 IXGBE_READ_REG(hw
, IXGBE_RTTDT2S(i
));
826 for (i
= 0; i
< 8; i
++)
828 IXGBE_READ_REG(hw
, IXGBE_TDPT2TCCR(i
)); /* same as RTTPT2C */
829 for (i
= 0; i
< 8; i
++)
831 IXGBE_READ_REG(hw
, IXGBE_TDPT2TCSR(i
)); /* same as RTTPT2S */
834 regs_buff
[881] = IXGBE_GET_STAT(adapter
, crcerrs
);
835 regs_buff
[882] = IXGBE_GET_STAT(adapter
, illerrc
);
836 regs_buff
[883] = IXGBE_GET_STAT(adapter
, errbc
);
837 regs_buff
[884] = IXGBE_GET_STAT(adapter
, mspdc
);
838 for (i
= 0; i
< 8; i
++)
839 regs_buff
[885 + i
] = IXGBE_GET_STAT(adapter
, mpc
[i
]);
840 regs_buff
[893] = IXGBE_GET_STAT(adapter
, mlfc
);
841 regs_buff
[894] = IXGBE_GET_STAT(adapter
, mrfc
);
842 regs_buff
[895] = IXGBE_GET_STAT(adapter
, rlec
);
843 regs_buff
[896] = IXGBE_GET_STAT(adapter
, lxontxc
);
844 regs_buff
[897] = IXGBE_GET_STAT(adapter
, lxonrxc
);
845 regs_buff
[898] = IXGBE_GET_STAT(adapter
, lxofftxc
);
846 regs_buff
[899] = IXGBE_GET_STAT(adapter
, lxoffrxc
);
847 for (i
= 0; i
< 8; i
++)
848 regs_buff
[900 + i
] = IXGBE_GET_STAT(adapter
, pxontxc
[i
]);
849 for (i
= 0; i
< 8; i
++)
850 regs_buff
[908 + i
] = IXGBE_GET_STAT(adapter
, pxonrxc
[i
]);
851 for (i
= 0; i
< 8; i
++)
852 regs_buff
[916 + i
] = IXGBE_GET_STAT(adapter
, pxofftxc
[i
]);
853 for (i
= 0; i
< 8; i
++)
854 regs_buff
[924 + i
] = IXGBE_GET_STAT(adapter
, pxoffrxc
[i
]);
855 regs_buff
[932] = IXGBE_GET_STAT(adapter
, prc64
);
856 regs_buff
[933] = IXGBE_GET_STAT(adapter
, prc127
);
857 regs_buff
[934] = IXGBE_GET_STAT(adapter
, prc255
);
858 regs_buff
[935] = IXGBE_GET_STAT(adapter
, prc511
);
859 regs_buff
[936] = IXGBE_GET_STAT(adapter
, prc1023
);
860 regs_buff
[937] = IXGBE_GET_STAT(adapter
, prc1522
);
861 regs_buff
[938] = IXGBE_GET_STAT(adapter
, gprc
);
862 regs_buff
[939] = IXGBE_GET_STAT(adapter
, bprc
);
863 regs_buff
[940] = IXGBE_GET_STAT(adapter
, mprc
);
864 regs_buff
[941] = IXGBE_GET_STAT(adapter
, gptc
);
865 regs_buff
[942] = (u32
)IXGBE_GET_STAT(adapter
, gorc
);
866 regs_buff
[943] = (u32
)(IXGBE_GET_STAT(adapter
, gorc
) >> 32);
867 regs_buff
[944] = (u32
)IXGBE_GET_STAT(adapter
, gotc
);
868 regs_buff
[945] = (u32
)(IXGBE_GET_STAT(adapter
, gotc
) >> 32);
869 for (i
= 0; i
< 8; i
++)
870 regs_buff
[946 + i
] = IXGBE_GET_STAT(adapter
, rnbc
[i
]);
871 regs_buff
[954] = IXGBE_GET_STAT(adapter
, ruc
);
872 regs_buff
[955] = IXGBE_GET_STAT(adapter
, rfc
);
873 regs_buff
[956] = IXGBE_GET_STAT(adapter
, roc
);
874 regs_buff
[957] = IXGBE_GET_STAT(adapter
, rjc
);
875 regs_buff
[958] = IXGBE_GET_STAT(adapter
, mngprc
);
876 regs_buff
[959] = IXGBE_GET_STAT(adapter
, mngpdc
);
877 regs_buff
[960] = IXGBE_GET_STAT(adapter
, mngptc
);
878 regs_buff
[961] = (u32
)IXGBE_GET_STAT(adapter
, tor
);
879 regs_buff
[962] = (u32
)(IXGBE_GET_STAT(adapter
, tor
) >> 32);
880 regs_buff
[963] = IXGBE_GET_STAT(adapter
, tpr
);
881 regs_buff
[964] = IXGBE_GET_STAT(adapter
, tpt
);
882 regs_buff
[965] = IXGBE_GET_STAT(adapter
, ptc64
);
883 regs_buff
[966] = IXGBE_GET_STAT(adapter
, ptc127
);
884 regs_buff
[967] = IXGBE_GET_STAT(adapter
, ptc255
);
885 regs_buff
[968] = IXGBE_GET_STAT(adapter
, ptc511
);
886 regs_buff
[969] = IXGBE_GET_STAT(adapter
, ptc1023
);
887 regs_buff
[970] = IXGBE_GET_STAT(adapter
, ptc1522
);
888 regs_buff
[971] = IXGBE_GET_STAT(adapter
, mptc
);
889 regs_buff
[972] = IXGBE_GET_STAT(adapter
, bptc
);
890 regs_buff
[973] = IXGBE_GET_STAT(adapter
, xec
);
891 for (i
= 0; i
< 16; i
++)
892 regs_buff
[974 + i
] = IXGBE_GET_STAT(adapter
, qprc
[i
]);
893 for (i
= 0; i
< 16; i
++)
894 regs_buff
[990 + i
] = IXGBE_GET_STAT(adapter
, qptc
[i
]);
895 for (i
= 0; i
< 16; i
++)
896 regs_buff
[1006 + i
] = IXGBE_GET_STAT(adapter
, qbrc
[i
]);
897 for (i
= 0; i
< 16; i
++)
898 regs_buff
[1022 + i
] = IXGBE_GET_STAT(adapter
, qbtc
[i
]);
901 regs_buff
[1038] = IXGBE_READ_REG(hw
, IXGBE_PCS1GCFIG
);
902 regs_buff
[1039] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLCTL
);
903 regs_buff
[1040] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLSTA
);
904 regs_buff
[1041] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG0
);
905 regs_buff
[1042] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG1
);
906 regs_buff
[1043] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANA
);
907 regs_buff
[1044] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLP
);
908 regs_buff
[1045] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANNP
);
909 regs_buff
[1046] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLPNP
);
910 regs_buff
[1047] = IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
911 regs_buff
[1048] = IXGBE_READ_REG(hw
, IXGBE_HLREG1
);
912 regs_buff
[1049] = IXGBE_READ_REG(hw
, IXGBE_PAP
);
913 regs_buff
[1050] = IXGBE_READ_REG(hw
, IXGBE_MACA
);
914 regs_buff
[1051] = IXGBE_READ_REG(hw
, IXGBE_APAE
);
915 regs_buff
[1052] = IXGBE_READ_REG(hw
, IXGBE_ARD
);
916 regs_buff
[1053] = IXGBE_READ_REG(hw
, IXGBE_AIS
);
917 regs_buff
[1054] = IXGBE_READ_REG(hw
, IXGBE_MSCA
);
918 regs_buff
[1055] = IXGBE_READ_REG(hw
, IXGBE_MSRWD
);
919 regs_buff
[1056] = IXGBE_READ_REG(hw
, IXGBE_MLADD
);
920 regs_buff
[1057] = IXGBE_READ_REG(hw
, IXGBE_MHADD
);
921 regs_buff
[1058] = IXGBE_READ_REG(hw
, IXGBE_TREG
);
922 regs_buff
[1059] = IXGBE_READ_REG(hw
, IXGBE_PCSS1
);
923 regs_buff
[1060] = IXGBE_READ_REG(hw
, IXGBE_PCSS2
);
924 regs_buff
[1061] = IXGBE_READ_REG(hw
, IXGBE_XPCSS
);
925 regs_buff
[1062] = IXGBE_READ_REG(hw
, IXGBE_SERDESC
);
926 regs_buff
[1063] = IXGBE_READ_REG(hw
, IXGBE_MACS
);
927 regs_buff
[1064] = IXGBE_READ_REG(hw
, IXGBE_AUTOC
);
928 regs_buff
[1065] = IXGBE_READ_REG(hw
, IXGBE_LINKS
);
929 regs_buff
[1066] = IXGBE_READ_REG(hw
, IXGBE_AUTOC2
);
930 regs_buff
[1067] = IXGBE_READ_REG(hw
, IXGBE_AUTOC3
);
931 regs_buff
[1068] = IXGBE_READ_REG(hw
, IXGBE_ANLP1
);
932 regs_buff
[1069] = IXGBE_READ_REG(hw
, IXGBE_ANLP2
);
933 regs_buff
[1070] = IXGBE_READ_REG(hw
, IXGBE_ATLASCTL
);
936 regs_buff
[1071] = IXGBE_READ_REG(hw
, IXGBE_RDSTATCTL
);
937 for (i
= 0; i
< 8; i
++)
938 regs_buff
[1072 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDSTAT(i
));
939 regs_buff
[1080] = IXGBE_READ_REG(hw
, IXGBE_RDHMPN
);
940 for (i
= 0; i
< 4; i
++)
941 regs_buff
[1081 + i
] = IXGBE_READ_REG(hw
, IXGBE_RIC_DW(i
));
942 regs_buff
[1085] = IXGBE_READ_REG(hw
, IXGBE_RDPROBE
);
943 regs_buff
[1086] = IXGBE_READ_REG(hw
, IXGBE_TDSTATCTL
);
944 for (i
= 0; i
< 8; i
++)
945 regs_buff
[1087 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDSTAT(i
));
946 regs_buff
[1095] = IXGBE_READ_REG(hw
, IXGBE_TDHMPN
);
947 for (i
= 0; i
< 4; i
++)
948 regs_buff
[1096 + i
] = IXGBE_READ_REG(hw
, IXGBE_TIC_DW(i
));
949 regs_buff
[1100] = IXGBE_READ_REG(hw
, IXGBE_TDPROBE
);
950 regs_buff
[1101] = IXGBE_READ_REG(hw
, IXGBE_TXBUFCTRL
);
951 for (i
= 0; i
< 4; i
++)
952 regs_buff
[1102 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA(i
));
953 regs_buff
[1106] = IXGBE_READ_REG(hw
, IXGBE_RXBUFCTRL
);
954 for (i
= 0; i
< 4; i
++)
955 regs_buff
[1107 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA(i
));
956 for (i
= 0; i
< 8; i
++)
957 regs_buff
[1111 + i
] = IXGBE_READ_REG(hw
, IXGBE_PCIE_DIAG(i
));
958 regs_buff
[1119] = IXGBE_READ_REG(hw
, IXGBE_RFVAL
);
959 regs_buff
[1120] = IXGBE_READ_REG(hw
, IXGBE_MDFTC1
);
960 regs_buff
[1121] = IXGBE_READ_REG(hw
, IXGBE_MDFTC2
);
961 regs_buff
[1122] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO1
);
962 regs_buff
[1123] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO2
);
963 regs_buff
[1124] = IXGBE_READ_REG(hw
, IXGBE_MDFTS
);
964 regs_buff
[1125] = IXGBE_READ_REG(hw
, IXGBE_PCIEECCCTL
);
965 regs_buff
[1126] = IXGBE_READ_REG(hw
, IXGBE_PBTXECC
);
966 regs_buff
[1127] = IXGBE_READ_REG(hw
, IXGBE_PBRXECC
);
968 /* 82599 X540 specific registers */
969 regs_buff
[1128] = IXGBE_READ_REG(hw
, IXGBE_MFLCN
);
971 /* 82599 X540 specific DCB registers */
972 regs_buff
[1129] = IXGBE_READ_REG(hw
, IXGBE_RTRUP2TC
);
973 regs_buff
[1130] = IXGBE_READ_REG(hw
, IXGBE_RTTUP2TC
);
974 for (i
= 0; i
< 4; i
++)
975 regs_buff
[1131 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXLLQ(i
));
976 regs_buff
[1135] = IXGBE_READ_REG(hw
, IXGBE_RTTBCNRM
);
977 /* same as RTTQCNRM */
978 regs_buff
[1136] = IXGBE_READ_REG(hw
, IXGBE_RTTBCNRD
);
979 /* same as RTTQCNRR */
981 /* X540 specific DCB registers */
982 regs_buff
[1137] = IXGBE_READ_REG(hw
, IXGBE_RTTQCNCR
);
983 regs_buff
[1138] = IXGBE_READ_REG(hw
, IXGBE_RTTQCNTG
);
985 /* Security config registers */
986 regs_buff
[1139] = IXGBE_READ_REG(hw
, IXGBE_SECTXCTRL
);
987 regs_buff
[1140] = IXGBE_READ_REG(hw
, IXGBE_SECTXSTAT
);
988 regs_buff
[1141] = IXGBE_READ_REG(hw
, IXGBE_SECTXBUFFAF
);
989 regs_buff
[1142] = IXGBE_READ_REG(hw
, IXGBE_SECTXMINIFG
);
990 regs_buff
[1143] = IXGBE_READ_REG(hw
, IXGBE_SECRXCTRL
);
991 regs_buff
[1144] = IXGBE_READ_REG(hw
, IXGBE_SECRXSTAT
);
994 static int ixgbe_get_eeprom_len(struct net_device
*netdev
)
996 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
997 return adapter
->hw
.eeprom
.word_size
* 2;
1000 static int ixgbe_get_eeprom(struct net_device
*netdev
,
1001 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
1003 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1004 struct ixgbe_hw
*hw
= &adapter
->hw
;
1006 int first_word
, last_word
, eeprom_len
;
1010 if (eeprom
->len
== 0)
1013 eeprom
->magic
= hw
->vendor_id
| (hw
->device_id
<< 16);
1015 first_word
= eeprom
->offset
>> 1;
1016 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
1017 eeprom_len
= last_word
- first_word
+ 1;
1019 eeprom_buff
= kmalloc_array(eeprom_len
, sizeof(u16
), GFP_KERNEL
);
1023 ret_val
= hw
->eeprom
.ops
.read_buffer(hw
, first_word
, eeprom_len
,
1026 /* Device's eeprom is always little-endian, word addressable */
1027 for (i
= 0; i
< eeprom_len
; i
++)
1028 le16_to_cpus(&eeprom_buff
[i
]);
1030 memcpy(bytes
, (u8
*)eeprom_buff
+ (eeprom
->offset
& 1), eeprom
->len
);
1036 static int ixgbe_set_eeprom(struct net_device
*netdev
,
1037 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
1039 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1040 struct ixgbe_hw
*hw
= &adapter
->hw
;
1043 int max_len
, first_word
, last_word
, ret_val
= 0;
1046 if (eeprom
->len
== 0)
1049 if (eeprom
->magic
!= (hw
->vendor_id
| (hw
->device_id
<< 16)))
1052 max_len
= hw
->eeprom
.word_size
* 2;
1054 first_word
= eeprom
->offset
>> 1;
1055 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
1056 eeprom_buff
= kmalloc(max_len
, GFP_KERNEL
);
1062 if (eeprom
->offset
& 1) {
1064 * need read/modify/write of first changed EEPROM word
1065 * only the second byte of the word is being modified
1067 ret_val
= hw
->eeprom
.ops
.read(hw
, first_word
, &eeprom_buff
[0]);
1073 if ((eeprom
->offset
+ eeprom
->len
) & 1) {
1075 * need read/modify/write of last changed EEPROM word
1076 * only the first byte of the word is being modified
1078 ret_val
= hw
->eeprom
.ops
.read(hw
, last_word
,
1079 &eeprom_buff
[last_word
- first_word
]);
1084 /* Device's eeprom is always little-endian, word addressable */
1085 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
1086 le16_to_cpus(&eeprom_buff
[i
]);
1088 memcpy(ptr
, bytes
, eeprom
->len
);
1090 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
1091 cpu_to_le16s(&eeprom_buff
[i
]);
1093 ret_val
= hw
->eeprom
.ops
.write_buffer(hw
, first_word
,
1094 last_word
- first_word
+ 1,
1097 /* Update the checksum */
1099 hw
->eeprom
.ops
.update_checksum(hw
);
1106 static void ixgbe_get_drvinfo(struct net_device
*netdev
,
1107 struct ethtool_drvinfo
*drvinfo
)
1109 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1111 strscpy(drvinfo
->driver
, ixgbe_driver_name
, sizeof(drvinfo
->driver
));
1113 strscpy(drvinfo
->fw_version
, adapter
->eeprom_id
,
1114 sizeof(drvinfo
->fw_version
));
1116 strscpy(drvinfo
->bus_info
, pci_name(adapter
->pdev
),
1117 sizeof(drvinfo
->bus_info
));
1119 drvinfo
->n_priv_flags
= IXGBE_PRIV_FLAGS_STR_LEN
;
1122 static u32
ixgbe_get_max_rxd(struct ixgbe_adapter
*adapter
)
1124 switch (adapter
->hw
.mac
.type
) {
1125 case ixgbe_mac_82598EB
:
1126 return IXGBE_MAX_RXD_82598
;
1127 case ixgbe_mac_82599EB
:
1128 return IXGBE_MAX_RXD_82599
;
1129 case ixgbe_mac_X540
:
1130 return IXGBE_MAX_RXD_X540
;
1131 case ixgbe_mac_X550
:
1132 case ixgbe_mac_X550EM_x
:
1133 case ixgbe_mac_x550em_a
:
1134 return IXGBE_MAX_RXD_X550
;
1136 return IXGBE_MAX_RXD_82598
;
1140 static u32
ixgbe_get_max_txd(struct ixgbe_adapter
*adapter
)
1142 switch (adapter
->hw
.mac
.type
) {
1143 case ixgbe_mac_82598EB
:
1144 return IXGBE_MAX_TXD_82598
;
1145 case ixgbe_mac_82599EB
:
1146 return IXGBE_MAX_TXD_82599
;
1147 case ixgbe_mac_X540
:
1148 return IXGBE_MAX_TXD_X540
;
1149 case ixgbe_mac_X550
:
1150 case ixgbe_mac_X550EM_x
:
1151 case ixgbe_mac_x550em_a
:
1152 return IXGBE_MAX_TXD_X550
;
1154 return IXGBE_MAX_TXD_82598
;
1158 static void ixgbe_get_ringparam(struct net_device
*netdev
,
1159 struct ethtool_ringparam
*ring
,
1160 struct kernel_ethtool_ringparam
*kernel_ring
,
1161 struct netlink_ext_ack
*extack
)
1163 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1164 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[0];
1165 struct ixgbe_ring
*rx_ring
= adapter
->rx_ring
[0];
1167 ring
->rx_max_pending
= ixgbe_get_max_rxd(adapter
);
1168 ring
->tx_max_pending
= ixgbe_get_max_txd(adapter
);
1169 ring
->rx_pending
= rx_ring
->count
;
1170 ring
->tx_pending
= tx_ring
->count
;
1173 static int ixgbe_set_ringparam(struct net_device
*netdev
,
1174 struct ethtool_ringparam
*ring
,
1175 struct kernel_ethtool_ringparam
*kernel_ring
,
1176 struct netlink_ext_ack
*extack
)
1178 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1179 struct ixgbe_ring
*temp_ring
;
1181 u32 new_rx_count
, new_tx_count
;
1183 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
1186 new_tx_count
= clamp_t(u32
, ring
->tx_pending
,
1187 IXGBE_MIN_TXD
, ixgbe_get_max_txd(adapter
));
1188 new_tx_count
= ALIGN(new_tx_count
, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE
);
1190 new_rx_count
= clamp_t(u32
, ring
->rx_pending
,
1191 IXGBE_MIN_RXD
, ixgbe_get_max_rxd(adapter
));
1192 new_rx_count
= ALIGN(new_rx_count
, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE
);
1194 if ((new_tx_count
== adapter
->tx_ring_count
) &&
1195 (new_rx_count
== adapter
->rx_ring_count
)) {
1200 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
1201 usleep_range(1000, 2000);
1203 if (!netif_running(adapter
->netdev
)) {
1204 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
1205 adapter
->tx_ring
[i
]->count
= new_tx_count
;
1206 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++)
1207 adapter
->xdp_ring
[i
]->count
= new_tx_count
;
1208 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
1209 adapter
->rx_ring
[i
]->count
= new_rx_count
;
1210 adapter
->tx_ring_count
= new_tx_count
;
1211 adapter
->xdp_ring_count
= new_tx_count
;
1212 adapter
->rx_ring_count
= new_rx_count
;
1216 /* allocate temporary buffer to store rings in */
1217 i
= max_t(int, adapter
->num_tx_queues
+ adapter
->num_xdp_queues
,
1218 adapter
->num_rx_queues
);
1219 temp_ring
= vmalloc(array_size(i
, sizeof(struct ixgbe_ring
)));
1226 ixgbe_down(adapter
);
1229 * Setup new Tx resources and free the old Tx resources in that order.
1230 * We can then assign the new resources to the rings via a memcpy.
1231 * The advantage to this approach is that we are guaranteed to still
1232 * have resources even in the case of an allocation failure.
1234 if (new_tx_count
!= adapter
->tx_ring_count
) {
1235 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1236 memcpy(&temp_ring
[i
], adapter
->tx_ring
[i
],
1237 sizeof(struct ixgbe_ring
));
1239 temp_ring
[i
].count
= new_tx_count
;
1240 err
= ixgbe_setup_tx_resources(&temp_ring
[i
]);
1244 ixgbe_free_tx_resources(&temp_ring
[i
]);
1250 for (j
= 0; j
< adapter
->num_xdp_queues
; j
++, i
++) {
1251 memcpy(&temp_ring
[i
], adapter
->xdp_ring
[j
],
1252 sizeof(struct ixgbe_ring
));
1254 temp_ring
[i
].count
= new_tx_count
;
1255 err
= ixgbe_setup_tx_resources(&temp_ring
[i
]);
1259 ixgbe_free_tx_resources(&temp_ring
[i
]);
1265 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1266 ixgbe_free_tx_resources(adapter
->tx_ring
[i
]);
1268 memcpy(adapter
->tx_ring
[i
], &temp_ring
[i
],
1269 sizeof(struct ixgbe_ring
));
1271 for (j
= 0; j
< adapter
->num_xdp_queues
; j
++, i
++) {
1272 ixgbe_free_tx_resources(adapter
->xdp_ring
[j
]);
1274 memcpy(adapter
->xdp_ring
[j
], &temp_ring
[i
],
1275 sizeof(struct ixgbe_ring
));
1278 adapter
->tx_ring_count
= new_tx_count
;
1281 /* Repeat the process for the Rx rings if needed */
1282 if (new_rx_count
!= adapter
->rx_ring_count
) {
1283 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1284 memcpy(&temp_ring
[i
], adapter
->rx_ring
[i
],
1285 sizeof(struct ixgbe_ring
));
1287 /* Clear copied XDP RX-queue info */
1288 memset(&temp_ring
[i
].xdp_rxq
, 0,
1289 sizeof(temp_ring
[i
].xdp_rxq
));
1291 temp_ring
[i
].count
= new_rx_count
;
1292 err
= ixgbe_setup_rx_resources(adapter
, &temp_ring
[i
]);
1296 ixgbe_free_rx_resources(&temp_ring
[i
]);
1303 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1304 ixgbe_free_rx_resources(adapter
->rx_ring
[i
]);
1306 memcpy(adapter
->rx_ring
[i
], &temp_ring
[i
],
1307 sizeof(struct ixgbe_ring
));
1310 adapter
->rx_ring_count
= new_rx_count
;
1317 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
1321 static int ixgbe_get_sset_count(struct net_device
*netdev
, int sset
)
1325 return IXGBE_TEST_LEN
;
1327 return IXGBE_STATS_LEN
;
1328 case ETH_SS_PRIV_FLAGS
:
1329 return IXGBE_PRIV_FLAGS_STR_LEN
;
1335 static void ixgbe_get_ethtool_stats(struct net_device
*netdev
,
1336 struct ethtool_stats
*stats
, u64
*data
)
1338 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1339 struct rtnl_link_stats64 temp
;
1340 const struct rtnl_link_stats64
*net_stats
;
1342 struct ixgbe_ring
*ring
;
1346 ixgbe_update_stats(adapter
);
1347 net_stats
= dev_get_stats(netdev
, &temp
);
1348 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1349 switch (ixgbe_gstrings_stats
[i
].type
) {
1351 p
= (char *) net_stats
+
1352 ixgbe_gstrings_stats
[i
].stat_offset
;
1355 p
= (char *) adapter
+
1356 ixgbe_gstrings_stats
[i
].stat_offset
;
1363 data
[i
] = (ixgbe_gstrings_stats
[i
].sizeof_stat
==
1364 sizeof(u64
)) ? *(u64
*)p
: *(u32
*)p
;
1366 for (j
= 0; j
< netdev
->num_tx_queues
; j
++) {
1367 ring
= adapter
->tx_ring
[j
];
1376 start
= u64_stats_fetch_begin(&ring
->syncp
);
1377 data
[i
] = ring
->stats
.packets
;
1378 data
[i
+1] = ring
->stats
.bytes
;
1379 } while (u64_stats_fetch_retry(&ring
->syncp
, start
));
1382 for (j
= 0; j
< IXGBE_NUM_RX_QUEUES
; j
++) {
1383 ring
= adapter
->rx_ring
[j
];
1392 start
= u64_stats_fetch_begin(&ring
->syncp
);
1393 data
[i
] = ring
->stats
.packets
;
1394 data
[i
+1] = ring
->stats
.bytes
;
1395 } while (u64_stats_fetch_retry(&ring
->syncp
, start
));
1399 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1400 data
[i
++] = adapter
->stats
.pxontxc
[j
];
1401 data
[i
++] = adapter
->stats
.pxofftxc
[j
];
1403 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1404 data
[i
++] = adapter
->stats
.pxonrxc
[j
];
1405 data
[i
++] = adapter
->stats
.pxoffrxc
[j
];
1409 static void ixgbe_get_strings(struct net_device
*netdev
, u32 stringset
,
1415 switch (stringset
) {
1417 for (i
= 0; i
< IXGBE_TEST_LEN
; i
++)
1418 ethtool_puts(&p
, ixgbe_gstrings_test
[i
]);
1421 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++)
1422 ethtool_puts(&p
, ixgbe_gstrings_stats
[i
].stat_string
);
1423 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
1424 ethtool_sprintf(&p
, "tx_queue_%u_packets", i
);
1425 ethtool_sprintf(&p
, "tx_queue_%u_bytes", i
);
1427 for (i
= 0; i
< IXGBE_NUM_RX_QUEUES
; i
++) {
1428 ethtool_sprintf(&p
, "rx_queue_%u_packets", i
);
1429 ethtool_sprintf(&p
, "rx_queue_%u_bytes", i
);
1431 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1432 ethtool_sprintf(&p
, "tx_pb_%u_pxon", i
);
1433 ethtool_sprintf(&p
, "tx_pb_%u_pxoff", i
);
1435 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1436 ethtool_sprintf(&p
, "rx_pb_%u_pxon", i
);
1437 ethtool_sprintf(&p
, "rx_pb_%u_pxoff", i
);
1439 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1441 case ETH_SS_PRIV_FLAGS
:
1442 memcpy(data
, ixgbe_priv_flags_strings
,
1443 IXGBE_PRIV_FLAGS_STR_LEN
* ETH_GSTRING_LEN
);
1447 static int ixgbe_link_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1449 struct ixgbe_hw
*hw
= &adapter
->hw
;
1453 if (ixgbe_removed(hw
->hw_addr
)) {
1459 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, true);
1467 /* ethtool register test data */
1468 struct ixgbe_reg_test
{
1476 /* In the hardware, registers are laid out either singly, in arrays
1477 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1478 * most tests take place on arrays or single registers (handled
1479 * as a single-element array) and special-case the tables.
1480 * Table tests are always pattern tests.
1482 * We also make provision for some required setup steps by specifying
1483 * registers to be written without any read-back testing.
1486 #define PATTERN_TEST 1
1487 #define SET_READ_TEST 2
1488 #define WRITE_NO_TEST 3
1489 #define TABLE32_TEST 4
1490 #define TABLE64_TEST_LO 5
1491 #define TABLE64_TEST_HI 6
1493 /* default 82599 register test */
1494 static const struct ixgbe_reg_test reg_test_82599
[] = {
1495 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1496 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1497 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1498 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1499 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFF80 },
1500 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1501 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1502 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1503 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1504 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1505 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1506 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1507 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1508 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1509 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFF80 },
1510 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000001, 0x00000001 },
1511 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1512 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x8001FFFF, 0x800CFFFF },
1513 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1517 /* default 82598 register test */
1518 static const struct ixgbe_reg_test reg_test_82598
[] = {
1519 { IXGBE_FCRTL(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1520 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1521 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1522 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1523 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1524 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1525 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1526 /* Enable all four RX queues before testing. */
1527 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1528 /* RDH is read-only for 82598, only test RDT. */
1529 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1530 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1531 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1532 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1533 { IXGBE_TIPG
, 1, PATTERN_TEST
, 0x000000FF, 0x000000FF },
1534 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1535 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1536 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1537 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000003, 0x00000003 },
1538 { IXGBE_DTXCTL
, 1, SET_READ_TEST
, 0x00000005, 0x00000005 },
1539 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1540 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x800CFFFF, 0x800CFFFF },
1541 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1545 static bool reg_pattern_test(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1546 u32 mask
, u32 write
)
1548 u32 pat
, val
, before
;
1549 static const u32 test_pattern
[] = {
1550 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1552 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
1556 for (pat
= 0; pat
< ARRAY_SIZE(test_pattern
); pat
++) {
1557 before
= ixgbe_read_reg(&adapter
->hw
, reg
);
1558 ixgbe_write_reg(&adapter
->hw
, reg
, test_pattern
[pat
] & write
);
1559 val
= ixgbe_read_reg(&adapter
->hw
, reg
);
1560 if (val
!= (test_pattern
[pat
] & write
& mask
)) {
1561 e_err(drv
, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
1562 reg
, val
, (test_pattern
[pat
] & write
& mask
));
1564 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1567 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1572 static bool reg_set_and_check(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1573 u32 mask
, u32 write
)
1577 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
1581 before
= ixgbe_read_reg(&adapter
->hw
, reg
);
1582 ixgbe_write_reg(&adapter
->hw
, reg
, write
& mask
);
1583 val
= ixgbe_read_reg(&adapter
->hw
, reg
);
1584 if ((write
& mask
) != (val
& mask
)) {
1585 e_err(drv
, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
1586 reg
, (val
& mask
), (write
& mask
));
1588 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1591 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
1595 static int ixgbe_reg_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1597 const struct ixgbe_reg_test
*test
;
1598 u32 value
, before
, after
;
1601 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
1602 e_err(drv
, "Adapter removed - register test blocked\n");
1606 switch (adapter
->hw
.mac
.type
) {
1607 case ixgbe_mac_82598EB
:
1608 toggle
= 0x7FFFF3FF;
1609 test
= reg_test_82598
;
1611 case ixgbe_mac_82599EB
:
1612 case ixgbe_mac_X540
:
1613 case ixgbe_mac_X550
:
1614 case ixgbe_mac_X550EM_x
:
1615 case ixgbe_mac_x550em_a
:
1616 toggle
= 0x7FFFF30F;
1617 test
= reg_test_82599
;
1625 * Because the status register is such a special case,
1626 * we handle it separately from the rest of the register
1627 * tests. Some bits are read-only, some toggle, and some
1628 * are writeable on newer MACs.
1630 before
= ixgbe_read_reg(&adapter
->hw
, IXGBE_STATUS
);
1631 value
= (ixgbe_read_reg(&adapter
->hw
, IXGBE_STATUS
) & toggle
);
1632 ixgbe_write_reg(&adapter
->hw
, IXGBE_STATUS
, toggle
);
1633 after
= ixgbe_read_reg(&adapter
->hw
, IXGBE_STATUS
) & toggle
;
1634 if (value
!= after
) {
1635 e_err(drv
, "failed STATUS register test got: 0x%08X expected: 0x%08X\n",
1640 /* restore previous status */
1641 ixgbe_write_reg(&adapter
->hw
, IXGBE_STATUS
, before
);
1644 * Perform the remainder of the register test, looping through
1645 * the test table until we either fail or reach the null entry.
1648 for (i
= 0; i
< test
->array_len
; i
++) {
1651 switch (test
->test_type
) {
1653 b
= reg_pattern_test(adapter
, data
,
1654 test
->reg
+ (i
* 0x40),
1659 b
= reg_set_and_check(adapter
, data
,
1660 test
->reg
+ (i
* 0x40),
1665 ixgbe_write_reg(&adapter
->hw
,
1666 test
->reg
+ (i
* 0x40),
1670 b
= reg_pattern_test(adapter
, data
,
1671 test
->reg
+ (i
* 4),
1675 case TABLE64_TEST_LO
:
1676 b
= reg_pattern_test(adapter
, data
,
1677 test
->reg
+ (i
* 8),
1681 case TABLE64_TEST_HI
:
1682 b
= reg_pattern_test(adapter
, data
,
1683 (test
->reg
+ 4) + (i
* 8),
1698 static int ixgbe_eeprom_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1700 struct ixgbe_hw
*hw
= &adapter
->hw
;
1701 if (hw
->eeprom
.ops
.validate_checksum(hw
, NULL
))
1708 static irqreturn_t
ixgbe_test_intr(int irq
, void *data
)
1710 struct net_device
*netdev
= (struct net_device
*) data
;
1711 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1713 adapter
->test_icr
|= IXGBE_READ_REG(&adapter
->hw
, IXGBE_EICR
);
1718 static int ixgbe_intr_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1720 struct net_device
*netdev
= adapter
->netdev
;
1721 u32 mask
, i
= 0, shared_int
= true;
1722 u32 irq
= adapter
->pdev
->irq
;
1726 /* Hook up test interrupt handler just for this test */
1727 if (adapter
->msix_entries
) {
1728 /* NOTE: we don't test MSI-X interrupts here, yet */
1730 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1732 if (request_irq(irq
, ixgbe_test_intr
, 0, netdev
->name
,
1737 } else if (!request_irq(irq
, ixgbe_test_intr
, IRQF_PROBE_SHARED
,
1738 netdev
->name
, netdev
)) {
1740 } else if (request_irq(irq
, ixgbe_test_intr
, IRQF_SHARED
,
1741 netdev
->name
, netdev
)) {
1745 e_info(hw
, "testing %s interrupt\n", shared_int
?
1746 "shared" : "unshared");
1748 /* Disable all the interrupts */
1749 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1750 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1751 usleep_range(10000, 20000);
1753 /* Test each interrupt */
1754 for (; i
< 10; i
++) {
1755 /* Interrupt to test */
1760 * Disable the interrupts to be reported in
1761 * the cause register and then force the same
1762 * interrupt and see if one gets posted. If
1763 * an interrupt was posted to the bus, the
1766 adapter
->test_icr
= 0;
1767 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1768 ~mask
& 0x00007FFF);
1769 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1770 ~mask
& 0x00007FFF);
1771 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1772 usleep_range(10000, 20000);
1774 if (adapter
->test_icr
& mask
) {
1781 * Enable the interrupt to be reported in the cause
1782 * register and then force the same interrupt and see
1783 * if one gets posted. If an interrupt was not posted
1784 * to the bus, the test failed.
1786 adapter
->test_icr
= 0;
1787 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, mask
);
1788 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, mask
);
1789 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1790 usleep_range(10000, 20000);
1792 if (!(adapter
->test_icr
& mask
)) {
1799 * Disable the other interrupts to be reported in
1800 * the cause register and then force the other
1801 * interrupts and see if any get posted. If
1802 * an interrupt was posted to the bus, the
1805 adapter
->test_icr
= 0;
1806 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1807 ~mask
& 0x00007FFF);
1808 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1809 ~mask
& 0x00007FFF);
1810 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1811 usleep_range(10000, 20000);
1813 if (adapter
->test_icr
) {
1820 /* Disable all the interrupts */
1821 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1822 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1823 usleep_range(10000, 20000);
1825 /* Unhook test interrupt handler */
1826 free_irq(irq
, netdev
);
1831 static void ixgbe_free_desc_rings(struct ixgbe_adapter
*adapter
)
1833 /* Shut down the DMA engines now so they can be reinitialized later,
1834 * since the test rings and normally used rings should overlap on
1835 * queue 0 we can just use the standard disable Rx/Tx calls and they
1836 * will take care of disabling the test rings for us.
1840 ixgbe_disable_rx(adapter
);
1843 ixgbe_disable_tx(adapter
);
1845 ixgbe_reset(adapter
);
1847 ixgbe_free_tx_resources(&adapter
->test_tx_ring
);
1848 ixgbe_free_rx_resources(&adapter
->test_rx_ring
);
1851 static int ixgbe_setup_desc_rings(struct ixgbe_adapter
*adapter
)
1853 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1854 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1855 struct ixgbe_hw
*hw
= &adapter
->hw
;
1860 /* Setup Tx descriptor ring and Tx buffers */
1861 tx_ring
->count
= IXGBE_DEFAULT_TXD
;
1862 tx_ring
->queue_index
= 0;
1863 tx_ring
->dev
= &adapter
->pdev
->dev
;
1864 tx_ring
->netdev
= adapter
->netdev
;
1865 tx_ring
->reg_idx
= adapter
->tx_ring
[0]->reg_idx
;
1867 err
= ixgbe_setup_tx_resources(tx_ring
);
1871 switch (adapter
->hw
.mac
.type
) {
1872 case ixgbe_mac_82599EB
:
1873 case ixgbe_mac_X540
:
1874 case ixgbe_mac_X550
:
1875 case ixgbe_mac_X550EM_x
:
1876 case ixgbe_mac_x550em_a
:
1877 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DMATXCTL
);
1878 reg_data
|= IXGBE_DMATXCTL_TE
;
1879 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DMATXCTL
, reg_data
);
1885 ixgbe_configure_tx_ring(adapter
, tx_ring
);
1887 /* Setup Rx Descriptor ring and Rx buffers */
1888 rx_ring
->count
= IXGBE_DEFAULT_RXD
;
1889 rx_ring
->queue_index
= 0;
1890 rx_ring
->dev
= &adapter
->pdev
->dev
;
1891 rx_ring
->netdev
= adapter
->netdev
;
1892 rx_ring
->reg_idx
= adapter
->rx_ring
[0]->reg_idx
;
1894 err
= ixgbe_setup_rx_resources(adapter
, rx_ring
);
1900 hw
->mac
.ops
.disable_rx(hw
);
1902 ixgbe_configure_rx_ring(adapter
, rx_ring
);
1904 rctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_RXCTRL
);
1905 rctl
|= IXGBE_RXCTRL_DMBYPS
;
1906 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXCTRL
, rctl
);
1908 hw
->mac
.ops
.enable_rx(hw
);
1913 ixgbe_free_desc_rings(adapter
);
1917 static int ixgbe_setup_loopback_test(struct ixgbe_adapter
*adapter
)
1919 struct ixgbe_hw
*hw
= &adapter
->hw
;
1923 /* Setup MAC loopback */
1924 reg_data
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
1925 reg_data
|= IXGBE_HLREG0_LPBK
;
1926 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, reg_data
);
1928 reg_data
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
1929 reg_data
|= IXGBE_FCTRL_BAM
| IXGBE_FCTRL_SBP
| IXGBE_FCTRL_MPE
;
1930 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, reg_data
);
1932 /* X540 and X550 needs to set the MACC.FLU bit to force link up */
1933 switch (adapter
->hw
.mac
.type
) {
1934 case ixgbe_mac_X540
:
1935 case ixgbe_mac_X550
:
1936 case ixgbe_mac_X550EM_x
:
1937 case ixgbe_mac_x550em_a
:
1938 reg_data
= IXGBE_READ_REG(hw
, IXGBE_MACC
);
1939 reg_data
|= IXGBE_MACC_FLU
;
1940 IXGBE_WRITE_REG(hw
, IXGBE_MACC
, reg_data
);
1943 if (hw
->mac
.orig_autoc
) {
1944 reg_data
= hw
->mac
.orig_autoc
| IXGBE_AUTOC_FLU
;
1945 IXGBE_WRITE_REG(hw
, IXGBE_AUTOC
, reg_data
);
1950 IXGBE_WRITE_FLUSH(hw
);
1951 usleep_range(10000, 20000);
1953 /* Disable Atlas Tx lanes; re-enabled in reset path */
1954 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
1957 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, &atlas
);
1958 atlas
|= IXGBE_ATLAS_PDN_TX_REG_EN
;
1959 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, atlas
);
1961 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, &atlas
);
1962 atlas
|= IXGBE_ATLAS_PDN_TX_10G_QL_ALL
;
1963 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, atlas
);
1965 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, &atlas
);
1966 atlas
|= IXGBE_ATLAS_PDN_TX_1G_QL_ALL
;
1967 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, atlas
);
1969 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, &atlas
);
1970 atlas
|= IXGBE_ATLAS_PDN_TX_AN_QL_ALL
;
1971 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, atlas
);
1977 static void ixgbe_loopback_cleanup(struct ixgbe_adapter
*adapter
)
1981 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_HLREG0
);
1982 reg_data
&= ~IXGBE_HLREG0_LPBK
;
1983 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_HLREG0
, reg_data
);
1986 static void ixgbe_create_lbtest_frame(struct sk_buff
*skb
,
1987 unsigned int frame_size
)
1989 memset(skb
->data
, 0xFF, frame_size
);
1991 memset(&skb
->data
[frame_size
], 0xAA, frame_size
/ 2 - 1);
1992 skb
->data
[frame_size
+ 10] = 0xBE;
1993 skb
->data
[frame_size
+ 12] = 0xAF;
1996 static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer
*rx_buffer
,
1997 unsigned int frame_size
)
1999 unsigned char *data
;
2003 data
= page_address(rx_buffer
->page
) + rx_buffer
->page_offset
;
2005 return data
[3] == 0xFF && data
[frame_size
+ 10] == 0xBE &&
2006 data
[frame_size
+ 12] == 0xAF;
2009 static u16
ixgbe_clean_test_rings(struct ixgbe_ring
*rx_ring
,
2010 struct ixgbe_ring
*tx_ring
,
2013 union ixgbe_adv_rx_desc
*rx_desc
;
2014 u16 rx_ntc
, tx_ntc
, count
= 0;
2016 /* initialize next to clean and descriptor values */
2017 rx_ntc
= rx_ring
->next_to_clean
;
2018 tx_ntc
= tx_ring
->next_to_clean
;
2019 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ntc
);
2021 while (tx_ntc
!= tx_ring
->next_to_use
) {
2022 union ixgbe_adv_tx_desc
*tx_desc
;
2023 struct ixgbe_tx_buffer
*tx_buffer
;
2025 tx_desc
= IXGBE_TX_DESC(tx_ring
, tx_ntc
);
2027 /* if DD is not set transmit has not completed */
2028 if (!(tx_desc
->wb
.status
& cpu_to_le32(IXGBE_TXD_STAT_DD
)))
2031 /* unmap buffer on Tx side */
2032 tx_buffer
= &tx_ring
->tx_buffer_info
[tx_ntc
];
2034 /* Free all the Tx ring sk_buffs */
2035 dev_kfree_skb_any(tx_buffer
->skb
);
2037 /* unmap skb header data */
2038 dma_unmap_single(tx_ring
->dev
,
2039 dma_unmap_addr(tx_buffer
, dma
),
2040 dma_unmap_len(tx_buffer
, len
),
2042 dma_unmap_len_set(tx_buffer
, len
, 0);
2044 /* increment Tx next to clean counter */
2046 if (tx_ntc
== tx_ring
->count
)
2050 while (rx_desc
->wb
.upper
.length
) {
2051 struct ixgbe_rx_buffer
*rx_buffer
;
2053 /* check Rx buffer */
2054 rx_buffer
= &rx_ring
->rx_buffer_info
[rx_ntc
];
2056 /* sync Rx buffer for CPU read */
2057 dma_sync_single_for_cpu(rx_ring
->dev
,
2059 ixgbe_rx_bufsz(rx_ring
),
2062 /* verify contents of skb */
2063 if (ixgbe_check_lbtest_frame(rx_buffer
, size
))
2068 /* sync Rx buffer for device write */
2069 dma_sync_single_for_device(rx_ring
->dev
,
2071 ixgbe_rx_bufsz(rx_ring
),
2074 /* increment Rx next to clean counter */
2076 if (rx_ntc
== rx_ring
->count
)
2079 /* fetch next descriptor */
2080 rx_desc
= IXGBE_RX_DESC(rx_ring
, rx_ntc
);
2083 netdev_tx_reset_queue(txring_txq(tx_ring
));
2085 /* re-map buffers to ring, store next to clean values */
2086 ixgbe_alloc_rx_buffers(rx_ring
, count
);
2087 rx_ring
->next_to_clean
= rx_ntc
;
2088 tx_ring
->next_to_clean
= tx_ntc
;
2093 static int ixgbe_run_loopback_test(struct ixgbe_adapter
*adapter
)
2095 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
2096 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
2097 int i
, j
, lc
, good_cnt
, ret_val
= 0;
2098 unsigned int size
= 1024;
2099 netdev_tx_t tx_ret_val
;
2100 struct sk_buff
*skb
;
2101 u32 flags_orig
= adapter
->flags
;
2103 /* DCB can modify the frames on Tx */
2104 adapter
->flags
&= ~IXGBE_FLAG_DCB_ENABLED
;
2106 /* allocate test skb */
2107 skb
= alloc_skb(size
, GFP_KERNEL
);
2111 /* place data into test skb */
2112 ixgbe_create_lbtest_frame(skb
, size
);
2116 * Calculate the loop count based on the largest descriptor ring
2117 * The idea is to wrap the largest ring a number of times using 64
2118 * send/receive pairs during each loop
2121 if (rx_ring
->count
<= tx_ring
->count
)
2122 lc
= ((tx_ring
->count
/ 64) * 2) + 1;
2124 lc
= ((rx_ring
->count
/ 64) * 2) + 1;
2126 for (j
= 0; j
<= lc
; j
++) {
2127 /* reset count of good packets */
2130 /* place 64 packets on the transmit queue*/
2131 for (i
= 0; i
< 64; i
++) {
2133 tx_ret_val
= ixgbe_xmit_frame_ring(skb
,
2136 if (tx_ret_val
== NETDEV_TX_OK
)
2140 if (good_cnt
!= 64) {
2145 /* allow 200 milliseconds for packets to go from Tx to Rx */
2148 good_cnt
= ixgbe_clean_test_rings(rx_ring
, tx_ring
, size
);
2149 if (good_cnt
!= 64) {
2155 /* free the original skb */
2157 adapter
->flags
= flags_orig
;
2162 static int ixgbe_loopback_test(struct ixgbe_adapter
*adapter
, u64
*data
)
2164 *data
= ixgbe_setup_desc_rings(adapter
);
2167 *data
= ixgbe_setup_loopback_test(adapter
);
2170 *data
= ixgbe_run_loopback_test(adapter
);
2171 ixgbe_loopback_cleanup(adapter
);
2174 ixgbe_free_desc_rings(adapter
);
2179 static void ixgbe_diag_test(struct net_device
*netdev
,
2180 struct ethtool_test
*eth_test
, u64
*data
)
2182 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2183 bool if_running
= netif_running(netdev
);
2185 if (ixgbe_removed(adapter
->hw
.hw_addr
)) {
2186 e_err(hw
, "Adapter removed - test blocked\n");
2192 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2195 set_bit(__IXGBE_TESTING
, &adapter
->state
);
2196 if (eth_test
->flags
== ETH_TEST_FL_OFFLINE
) {
2197 struct ixgbe_hw
*hw
= &adapter
->hw
;
2199 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
2201 for (i
= 0; i
< adapter
->num_vfs
; i
++) {
2202 if (adapter
->vfinfo
[i
].clear_to_send
) {
2203 netdev_warn(netdev
, "offline diagnostic is not supported when VFs are present\n");
2209 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2210 clear_bit(__IXGBE_TESTING
,
2218 e_info(hw
, "offline testing starting\n");
2220 /* Link test performed before hardware reset so autoneg doesn't
2221 * interfere with test result
2223 if (ixgbe_link_test(adapter
, &data
[4]))
2224 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2227 /* indicate we're in test mode */
2228 ixgbe_close(netdev
);
2230 ixgbe_reset(adapter
);
2232 e_info(hw
, "register testing starting\n");
2233 if (ixgbe_reg_test(adapter
, &data
[0]))
2234 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2236 ixgbe_reset(adapter
);
2237 e_info(hw
, "eeprom testing starting\n");
2238 if (ixgbe_eeprom_test(adapter
, &data
[1]))
2239 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2241 ixgbe_reset(adapter
);
2242 e_info(hw
, "interrupt testing starting\n");
2243 if (ixgbe_intr_test(adapter
, &data
[2]))
2244 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2246 /* If SRIOV or VMDq is enabled then skip MAC
2247 * loopback diagnostic. */
2248 if (adapter
->flags
& (IXGBE_FLAG_SRIOV_ENABLED
|
2249 IXGBE_FLAG_VMDQ_ENABLED
)) {
2250 e_info(hw
, "Skip MAC loopback diagnostic in VT mode\n");
2255 ixgbe_reset(adapter
);
2256 e_info(hw
, "loopback testing starting\n");
2257 if (ixgbe_loopback_test(adapter
, &data
[3]))
2258 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2261 ixgbe_reset(adapter
);
2263 /* clear testing bit and return adapter to previous state */
2264 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
2267 else if (hw
->mac
.ops
.disable_tx_laser
)
2268 hw
->mac
.ops
.disable_tx_laser(hw
);
2270 e_info(hw
, "online testing starting\n");
2273 if (ixgbe_link_test(adapter
, &data
[4]))
2274 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
2276 /* Offline tests aren't run; pass by default */
2282 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
2286 static int ixgbe_wol_exclusion(struct ixgbe_adapter
*adapter
,
2287 struct ethtool_wolinfo
*wol
)
2289 struct ixgbe_hw
*hw
= &adapter
->hw
;
2292 /* WOL not supported for all devices */
2293 if (!ixgbe_wol_supported(adapter
, hw
->device_id
,
2294 hw
->subsystem_device_id
)) {
2302 static void ixgbe_get_wol(struct net_device
*netdev
,
2303 struct ethtool_wolinfo
*wol
)
2305 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2307 wol
->supported
= WAKE_UCAST
| WAKE_MCAST
|
2308 WAKE_BCAST
| WAKE_MAGIC
;
2311 if (ixgbe_wol_exclusion(adapter
, wol
) ||
2312 !device_can_wakeup(&adapter
->pdev
->dev
))
2315 if (adapter
->wol
& IXGBE_WUFC_EX
)
2316 wol
->wolopts
|= WAKE_UCAST
;
2317 if (adapter
->wol
& IXGBE_WUFC_MC
)
2318 wol
->wolopts
|= WAKE_MCAST
;
2319 if (adapter
->wol
& IXGBE_WUFC_BC
)
2320 wol
->wolopts
|= WAKE_BCAST
;
2321 if (adapter
->wol
& IXGBE_WUFC_MAG
)
2322 wol
->wolopts
|= WAKE_MAGIC
;
2325 static int ixgbe_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2327 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2329 if (wol
->wolopts
& (WAKE_PHY
| WAKE_ARP
| WAKE_MAGICSECURE
|
2333 if (ixgbe_wol_exclusion(adapter
, wol
))
2334 return wol
->wolopts
? -EOPNOTSUPP
: 0;
2338 if (wol
->wolopts
& WAKE_UCAST
)
2339 adapter
->wol
|= IXGBE_WUFC_EX
;
2340 if (wol
->wolopts
& WAKE_MCAST
)
2341 adapter
->wol
|= IXGBE_WUFC_MC
;
2342 if (wol
->wolopts
& WAKE_BCAST
)
2343 adapter
->wol
|= IXGBE_WUFC_BC
;
2344 if (wol
->wolopts
& WAKE_MAGIC
)
2345 adapter
->wol
|= IXGBE_WUFC_MAG
;
2347 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
2352 static int ixgbe_nway_reset(struct net_device
*netdev
)
2354 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2356 if (netif_running(netdev
))
2357 ixgbe_reinit_locked(adapter
);
2362 static int ixgbe_set_phys_id(struct net_device
*netdev
,
2363 enum ethtool_phys_id_state state
)
2365 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2366 struct ixgbe_hw
*hw
= &adapter
->hw
;
2368 if (!hw
->mac
.ops
.led_on
|| !hw
->mac
.ops
.led_off
)
2372 case ETHTOOL_ID_ACTIVE
:
2373 adapter
->led_reg
= IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
2377 hw
->mac
.ops
.led_on(hw
, hw
->mac
.led_link_act
);
2380 case ETHTOOL_ID_OFF
:
2381 hw
->mac
.ops
.led_off(hw
, hw
->mac
.led_link_act
);
2384 case ETHTOOL_ID_INACTIVE
:
2385 /* Restore LED settings */
2386 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_LEDCTL
, adapter
->led_reg
);
2393 static int ixgbe_get_coalesce(struct net_device
*netdev
,
2394 struct ethtool_coalesce
*ec
,
2395 struct kernel_ethtool_coalesce
*kernel_coal
,
2396 struct netlink_ext_ack
*extack
)
2398 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2400 /* only valid if in constant ITR mode */
2401 if (adapter
->rx_itr_setting
<= 1)
2402 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
;
2404 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
>> 2;
2406 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2407 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
2410 /* only valid if in constant ITR mode */
2411 if (adapter
->tx_itr_setting
<= 1)
2412 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
;
2414 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
>> 2;
2420 * this function must be called before setting the new value of
2423 static bool ixgbe_update_rsc(struct ixgbe_adapter
*adapter
)
2425 struct net_device
*netdev
= adapter
->netdev
;
2427 /* nothing to do if LRO or RSC are not enabled */
2428 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_CAPABLE
) ||
2429 !(netdev
->features
& NETIF_F_LRO
))
2432 /* check the feature flag value and enable RSC if necessary */
2433 if (adapter
->rx_itr_setting
== 1 ||
2434 adapter
->rx_itr_setting
> IXGBE_MIN_RSC_ITR
) {
2435 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)) {
2436 adapter
->flags2
|= IXGBE_FLAG2_RSC_ENABLED
;
2437 e_info(probe
, "rx-usecs value high enough to re-enable RSC\n");
2440 /* if interrupt rate is too high then disable RSC */
2441 } else if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
) {
2442 adapter
->flags2
&= ~IXGBE_FLAG2_RSC_ENABLED
;
2443 e_info(probe
, "rx-usecs set too low, disabling RSC\n");
2449 static int ixgbe_set_coalesce(struct net_device
*netdev
,
2450 struct ethtool_coalesce
*ec
,
2451 struct kernel_ethtool_coalesce
*kernel_coal
,
2452 struct netlink_ext_ack
*extack
)
2454 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2455 struct ixgbe_q_vector
*q_vector
;
2457 u16 tx_itr_param
, rx_itr_param
, tx_itr_prev
;
2458 bool need_reset
= false;
2460 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
) {
2461 /* reject Tx specific changes in case of mixed RxTx vectors */
2462 if (ec
->tx_coalesce_usecs
)
2464 tx_itr_prev
= adapter
->rx_itr_setting
;
2466 tx_itr_prev
= adapter
->tx_itr_setting
;
2469 if ((ec
->rx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)) ||
2470 (ec
->tx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)))
2473 if (ec
->rx_coalesce_usecs
> 1)
2474 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
<< 2;
2476 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
;
2478 if (adapter
->rx_itr_setting
== 1)
2479 rx_itr_param
= IXGBE_20K_ITR
;
2481 rx_itr_param
= adapter
->rx_itr_setting
;
2483 if (ec
->tx_coalesce_usecs
> 1)
2484 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
<< 2;
2486 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
;
2488 if (adapter
->tx_itr_setting
== 1)
2489 tx_itr_param
= IXGBE_12K_ITR
;
2491 tx_itr_param
= adapter
->tx_itr_setting
;
2494 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
2495 adapter
->tx_itr_setting
= adapter
->rx_itr_setting
;
2497 /* detect ITR changes that require update of TXDCTL.WTHRESH */
2498 if ((adapter
->tx_itr_setting
!= 1) &&
2499 (adapter
->tx_itr_setting
< IXGBE_100K_ITR
)) {
2500 if ((tx_itr_prev
== 1) ||
2501 (tx_itr_prev
>= IXGBE_100K_ITR
))
2504 if ((tx_itr_prev
!= 1) &&
2505 (tx_itr_prev
< IXGBE_100K_ITR
))
2509 /* check the old value and enable RSC if necessary */
2510 need_reset
|= ixgbe_update_rsc(adapter
);
2512 for (i
= 0; i
< adapter
->num_q_vectors
; i
++) {
2513 q_vector
= adapter
->q_vector
[i
];
2514 if (q_vector
->tx
.count
&& !q_vector
->rx
.count
)
2516 q_vector
->itr
= tx_itr_param
;
2518 /* rx only or mixed */
2519 q_vector
->itr
= rx_itr_param
;
2520 ixgbe_write_eitr(q_vector
);
2524 * do reset here at the end to make sure EITR==0 case is handled
2525 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2526 * also locks in RSC enable/disable which requires reset
2529 ixgbe_do_reset(netdev
);
2534 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2535 struct ethtool_rxnfc
*cmd
)
2537 union ixgbe_atr_input
*mask
= &adapter
->fdir_mask
;
2538 struct ethtool_rx_flow_spec
*fsp
=
2539 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2540 struct hlist_node
*node2
;
2541 struct ixgbe_fdir_filter
*rule
= NULL
;
2543 /* report total rule count */
2544 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2546 hlist_for_each_entry_safe(rule
, node2
,
2547 &adapter
->fdir_filter_list
, fdir_node
) {
2548 if (fsp
->location
<= rule
->sw_idx
)
2552 if (!rule
|| fsp
->location
!= rule
->sw_idx
)
2555 /* fill out the flow spec entry */
2557 /* set flow type field */
2558 switch (rule
->filter
.formatted
.flow_type
) {
2559 case IXGBE_ATR_FLOW_TYPE_TCPV4
:
2560 fsp
->flow_type
= TCP_V4_FLOW
;
2562 case IXGBE_ATR_FLOW_TYPE_UDPV4
:
2563 fsp
->flow_type
= UDP_V4_FLOW
;
2565 case IXGBE_ATR_FLOW_TYPE_SCTPV4
:
2566 fsp
->flow_type
= SCTP_V4_FLOW
;
2568 case IXGBE_ATR_FLOW_TYPE_IPV4
:
2569 fsp
->flow_type
= IP_USER_FLOW
;
2570 fsp
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
2571 fsp
->h_u
.usr_ip4_spec
.proto
= 0;
2572 fsp
->m_u
.usr_ip4_spec
.proto
= 0;
2578 fsp
->h_u
.tcp_ip4_spec
.psrc
= rule
->filter
.formatted
.src_port
;
2579 fsp
->m_u
.tcp_ip4_spec
.psrc
= mask
->formatted
.src_port
;
2580 fsp
->h_u
.tcp_ip4_spec
.pdst
= rule
->filter
.formatted
.dst_port
;
2581 fsp
->m_u
.tcp_ip4_spec
.pdst
= mask
->formatted
.dst_port
;
2582 fsp
->h_u
.tcp_ip4_spec
.ip4src
= rule
->filter
.formatted
.src_ip
[0];
2583 fsp
->m_u
.tcp_ip4_spec
.ip4src
= mask
->formatted
.src_ip
[0];
2584 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= rule
->filter
.formatted
.dst_ip
[0];
2585 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= mask
->formatted
.dst_ip
[0];
2586 fsp
->h_ext
.vlan_tci
= rule
->filter
.formatted
.vlan_id
;
2587 fsp
->m_ext
.vlan_tci
= mask
->formatted
.vlan_id
;
2588 fsp
->h_ext
.vlan_etype
= rule
->filter
.formatted
.flex_bytes
;
2589 fsp
->m_ext
.vlan_etype
= mask
->formatted
.flex_bytes
;
2590 fsp
->h_ext
.data
[1] = htonl(rule
->filter
.formatted
.vm_pool
);
2591 fsp
->m_ext
.data
[1] = htonl(mask
->formatted
.vm_pool
);
2592 fsp
->flow_type
|= FLOW_EXT
;
2595 if (rule
->action
== IXGBE_FDIR_DROP_QUEUE
)
2596 fsp
->ring_cookie
= RX_CLS_FLOW_DISC
;
2598 fsp
->ring_cookie
= rule
->action
;
2603 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter
*adapter
,
2604 struct ethtool_rxnfc
*cmd
,
2607 struct hlist_node
*node2
;
2608 struct ixgbe_fdir_filter
*rule
;
2611 /* report total rule count */
2612 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2614 hlist_for_each_entry_safe(rule
, node2
,
2615 &adapter
->fdir_filter_list
, fdir_node
) {
2616 if (cnt
== cmd
->rule_cnt
)
2618 rule_locs
[cnt
] = rule
->sw_idx
;
2622 cmd
->rule_cnt
= cnt
;
2627 static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter
*adapter
,
2628 struct ethtool_rxnfc
*cmd
)
2632 /* Report default options for RSS on ixgbe */
2633 switch (cmd
->flow_type
) {
2635 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2638 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
2639 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2642 case AH_ESP_V4_FLOW
:
2646 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
2649 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2652 if (adapter
->flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
2653 cmd
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
2656 case AH_ESP_V6_FLOW
:
2660 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
2669 static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter
*adapter
)
2671 if (adapter
->hw
.mac
.type
< ixgbe_mac_X550
)
2677 static int ixgbe_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
2680 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2681 int ret
= -EOPNOTSUPP
;
2684 case ETHTOOL_GRXRINGS
:
2685 cmd
->data
= min_t(int, adapter
->num_rx_queues
,
2686 ixgbe_rss_indir_tbl_max(adapter
));
2689 case ETHTOOL_GRXCLSRLCNT
:
2690 cmd
->rule_cnt
= adapter
->fdir_filter_count
;
2693 case ETHTOOL_GRXCLSRULE
:
2694 ret
= ixgbe_get_ethtool_fdir_entry(adapter
, cmd
);
2696 case ETHTOOL_GRXCLSRLALL
:
2697 ret
= ixgbe_get_ethtool_fdir_all(adapter
, cmd
, rule_locs
);
2700 ret
= ixgbe_get_rss_hash_opts(adapter
, cmd
);
2709 int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2710 struct ixgbe_fdir_filter
*input
,
2713 struct ixgbe_hw
*hw
= &adapter
->hw
;
2714 struct hlist_node
*node2
;
2715 struct ixgbe_fdir_filter
*rule
, *parent
;
2721 hlist_for_each_entry_safe(rule
, node2
,
2722 &adapter
->fdir_filter_list
, fdir_node
) {
2723 /* hash found, or no matching entry */
2724 if (rule
->sw_idx
>= sw_idx
)
2729 /* if there is an old rule occupying our place remove it */
2730 if (rule
&& (rule
->sw_idx
== sw_idx
)) {
2731 if (!input
|| (rule
->filter
.formatted
.bkt_hash
!=
2732 input
->filter
.formatted
.bkt_hash
)) {
2733 err
= ixgbe_fdir_erase_perfect_filter_82599(hw
,
2738 hlist_del(&rule
->fdir_node
);
2740 adapter
->fdir_filter_count
--;
2744 * If no input this was a delete, err should be 0 if a rule was
2745 * successfully found and removed from the list else -EINVAL
2750 /* initialize node and set software index */
2751 INIT_HLIST_NODE(&input
->fdir_node
);
2753 /* add filter to the list */
2755 hlist_add_behind(&input
->fdir_node
, &parent
->fdir_node
);
2757 hlist_add_head(&input
->fdir_node
,
2758 &adapter
->fdir_filter_list
);
2761 adapter
->fdir_filter_count
++;
2766 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec
*fsp
,
2769 switch (fsp
->flow_type
& ~FLOW_EXT
) {
2771 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2774 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2777 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2780 switch (fsp
->h_u
.usr_ip4_spec
.proto
) {
2782 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2785 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2788 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2791 if (!fsp
->m_u
.usr_ip4_spec
.proto
) {
2792 *flow_type
= IXGBE_ATR_FLOW_TYPE_IPV4
;
2807 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2808 struct ethtool_rxnfc
*cmd
)
2810 struct ethtool_rx_flow_spec
*fsp
=
2811 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2812 struct ixgbe_hw
*hw
= &adapter
->hw
;
2813 struct ixgbe_fdir_filter
*input
;
2814 union ixgbe_atr_input mask
;
2818 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
2821 /* ring_cookie is a masked into a set of queues and ixgbe pools or
2822 * we use the drop index.
2824 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
) {
2825 queue
= IXGBE_FDIR_DROP_QUEUE
;
2827 u32 ring
= ethtool_get_flow_spec_ring(fsp
->ring_cookie
);
2828 u8 vf
= ethtool_get_flow_spec_ring_vf(fsp
->ring_cookie
);
2830 if (!vf
&& (ring
>= adapter
->num_rx_queues
))
2833 ((vf
> adapter
->num_vfs
) ||
2834 ring
>= adapter
->num_rx_queues_per_pool
))
2837 /* Map the ring onto the absolute queue index */
2839 queue
= adapter
->rx_ring
[ring
]->reg_idx
;
2842 adapter
->num_rx_queues_per_pool
) + ring
;
2845 /* Don't allow indexes to exist outside of available space */
2846 if (fsp
->location
>= ((1024 << adapter
->fdir_pballoc
) - 2)) {
2847 e_err(drv
, "Location out of range\n");
2851 input
= kzalloc(sizeof(*input
), GFP_ATOMIC
);
2855 memset(&mask
, 0, sizeof(union ixgbe_atr_input
));
2858 input
->sw_idx
= fsp
->location
;
2860 /* record flow type */
2861 if (!ixgbe_flowspec_to_flow_type(fsp
,
2862 &input
->filter
.formatted
.flow_type
)) {
2863 e_err(drv
, "Unrecognized flow type\n");
2867 mask
.formatted
.flow_type
= IXGBE_ATR_L4TYPE_IPV6_MASK
|
2868 IXGBE_ATR_L4TYPE_MASK
;
2870 if (input
->filter
.formatted
.flow_type
== IXGBE_ATR_FLOW_TYPE_IPV4
)
2871 mask
.formatted
.flow_type
&= IXGBE_ATR_L4TYPE_IPV6_MASK
;
2873 /* Copy input into formatted structures */
2874 input
->filter
.formatted
.src_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4src
;
2875 mask
.formatted
.src_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4src
;
2876 input
->filter
.formatted
.dst_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4dst
;
2877 mask
.formatted
.dst_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4dst
;
2878 input
->filter
.formatted
.src_port
= fsp
->h_u
.tcp_ip4_spec
.psrc
;
2879 mask
.formatted
.src_port
= fsp
->m_u
.tcp_ip4_spec
.psrc
;
2880 input
->filter
.formatted
.dst_port
= fsp
->h_u
.tcp_ip4_spec
.pdst
;
2881 mask
.formatted
.dst_port
= fsp
->m_u
.tcp_ip4_spec
.pdst
;
2883 if (fsp
->flow_type
& FLOW_EXT
) {
2884 input
->filter
.formatted
.vm_pool
=
2885 (unsigned char)ntohl(fsp
->h_ext
.data
[1]);
2886 mask
.formatted
.vm_pool
=
2887 (unsigned char)ntohl(fsp
->m_ext
.data
[1]);
2888 input
->filter
.formatted
.vlan_id
= fsp
->h_ext
.vlan_tci
;
2889 mask
.formatted
.vlan_id
= fsp
->m_ext
.vlan_tci
;
2890 input
->filter
.formatted
.flex_bytes
=
2891 fsp
->h_ext
.vlan_etype
;
2892 mask
.formatted
.flex_bytes
= fsp
->m_ext
.vlan_etype
;
2895 /* determine if we need to drop or route the packet */
2896 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
)
2897 input
->action
= IXGBE_FDIR_DROP_QUEUE
;
2899 input
->action
= fsp
->ring_cookie
;
2901 spin_lock(&adapter
->fdir_perfect_lock
);
2903 if (hlist_empty(&adapter
->fdir_filter_list
)) {
2904 /* save mask and program input mask into HW */
2905 memcpy(&adapter
->fdir_mask
, &mask
, sizeof(mask
));
2906 err
= ixgbe_fdir_set_input_mask_82599(hw
, &mask
);
2908 e_err(drv
, "Error writing mask\n");
2909 goto err_out_w_lock
;
2911 } else if (memcmp(&adapter
->fdir_mask
, &mask
, sizeof(mask
))) {
2912 e_err(drv
, "Only one mask supported per port\n");
2913 goto err_out_w_lock
;
2916 /* apply mask and compute/store hash */
2917 ixgbe_atr_compute_perfect_hash_82599(&input
->filter
, &mask
);
2919 /* program filters to filter memory */
2920 err
= ixgbe_fdir_write_perfect_filter_82599(hw
,
2921 &input
->filter
, input
->sw_idx
, queue
);
2923 goto err_out_w_lock
;
2925 ixgbe_update_ethtool_fdir_entry(adapter
, input
, input
->sw_idx
);
2927 spin_unlock(&adapter
->fdir_perfect_lock
);
2931 spin_unlock(&adapter
->fdir_perfect_lock
);
2937 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2938 struct ethtool_rxnfc
*cmd
)
2940 struct ethtool_rx_flow_spec
*fsp
=
2941 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2944 spin_lock(&adapter
->fdir_perfect_lock
);
2945 err
= ixgbe_update_ethtool_fdir_entry(adapter
, NULL
, fsp
->location
);
2946 spin_unlock(&adapter
->fdir_perfect_lock
);
2951 #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \
2952 IXGBE_FLAG2_RSS_FIELD_IPV6_UDP)
2953 static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter
*adapter
,
2954 struct ethtool_rxnfc
*nfc
)
2956 u32 flags2
= adapter
->flags2
;
2959 * RSS does not support anything other than hashing
2960 * to queues on src and dst IPs and ports
2962 if (nfc
->data
& ~(RXH_IP_SRC
| RXH_IP_DST
|
2963 RXH_L4_B_0_1
| RXH_L4_B_2_3
))
2966 switch (nfc
->flow_type
) {
2969 if (!(nfc
->data
& RXH_IP_SRC
) ||
2970 !(nfc
->data
& RXH_IP_DST
) ||
2971 !(nfc
->data
& RXH_L4_B_0_1
) ||
2972 !(nfc
->data
& RXH_L4_B_2_3
))
2976 if (!(nfc
->data
& RXH_IP_SRC
) ||
2977 !(nfc
->data
& RXH_IP_DST
))
2979 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
2981 flags2
&= ~IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
;
2983 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
2984 flags2
|= IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
;
2991 if (!(nfc
->data
& RXH_IP_SRC
) ||
2992 !(nfc
->data
& RXH_IP_DST
))
2994 switch (nfc
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
2996 flags2
&= ~IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
;
2998 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
2999 flags2
|= IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
;
3005 case AH_ESP_V4_FLOW
:
3009 case AH_ESP_V6_FLOW
:
3013 if (!(nfc
->data
& RXH_IP_SRC
) ||
3014 !(nfc
->data
& RXH_IP_DST
) ||
3015 (nfc
->data
& RXH_L4_B_0_1
) ||
3016 (nfc
->data
& RXH_L4_B_2_3
))
3023 /* if we changed something we need to update flags */
3024 if (flags2
!= adapter
->flags2
) {
3025 struct ixgbe_hw
*hw
= &adapter
->hw
;
3027 unsigned int pf_pool
= adapter
->num_vfs
;
3029 if ((hw
->mac
.type
>= ixgbe_mac_X550
) &&
3030 (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
3031 mrqc
= IXGBE_READ_REG(hw
, IXGBE_PFVFMRQC(pf_pool
));
3033 mrqc
= IXGBE_READ_REG(hw
, IXGBE_MRQC
);
3035 if ((flags2
& UDP_RSS_FLAGS
) &&
3036 !(adapter
->flags2
& UDP_RSS_FLAGS
))
3037 e_warn(drv
, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n");
3039 adapter
->flags2
= flags2
;
3041 /* Perform hash on these packet types */
3042 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4
3043 | IXGBE_MRQC_RSS_FIELD_IPV4_TCP
3044 | IXGBE_MRQC_RSS_FIELD_IPV6
3045 | IXGBE_MRQC_RSS_FIELD_IPV6_TCP
;
3047 mrqc
&= ~(IXGBE_MRQC_RSS_FIELD_IPV4_UDP
|
3048 IXGBE_MRQC_RSS_FIELD_IPV6_UDP
);
3050 if (flags2
& IXGBE_FLAG2_RSS_FIELD_IPV4_UDP
)
3051 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV4_UDP
;
3053 if (flags2
& IXGBE_FLAG2_RSS_FIELD_IPV6_UDP
)
3054 mrqc
|= IXGBE_MRQC_RSS_FIELD_IPV6_UDP
;
3056 if ((hw
->mac
.type
>= ixgbe_mac_X550
) &&
3057 (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
))
3058 IXGBE_WRITE_REG(hw
, IXGBE_PFVFMRQC(pf_pool
), mrqc
);
3060 IXGBE_WRITE_REG(hw
, IXGBE_MRQC
, mrqc
);
3066 static int ixgbe_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
3068 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3069 int ret
= -EOPNOTSUPP
;
3072 case ETHTOOL_SRXCLSRLINS
:
3073 ret
= ixgbe_add_ethtool_fdir_entry(adapter
, cmd
);
3075 case ETHTOOL_SRXCLSRLDEL
:
3076 ret
= ixgbe_del_ethtool_fdir_entry(adapter
, cmd
);
3079 ret
= ixgbe_set_rss_hash_opt(adapter
, cmd
);
3088 static u32
ixgbe_get_rxfh_key_size(struct net_device
*netdev
)
3090 return IXGBE_RSS_KEY_SIZE
;
3093 static u32
ixgbe_rss_indir_size(struct net_device
*netdev
)
3095 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3097 return ixgbe_rss_indir_tbl_entries(adapter
);
3100 static void ixgbe_get_reta(struct ixgbe_adapter
*adapter
, u32
*indir
)
3102 int i
, reta_size
= ixgbe_rss_indir_tbl_entries(adapter
);
3103 u16 rss_m
= adapter
->ring_feature
[RING_F_RSS
].mask
;
3105 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
3106 rss_m
= adapter
->ring_feature
[RING_F_RSS
].indices
- 1;
3108 for (i
= 0; i
< reta_size
; i
++)
3109 indir
[i
] = adapter
->rss_indir_tbl
[i
] & rss_m
;
3112 static int ixgbe_get_rxfh(struct net_device
*netdev
,
3113 struct ethtool_rxfh_param
*rxfh
)
3115 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3117 rxfh
->hfunc
= ETH_RSS_HASH_TOP
;
3120 ixgbe_get_reta(adapter
, rxfh
->indir
);
3123 memcpy(rxfh
->key
, adapter
->rss_key
,
3124 ixgbe_get_rxfh_key_size(netdev
));
3129 static int ixgbe_set_rxfh(struct net_device
*netdev
,
3130 struct ethtool_rxfh_param
*rxfh
,
3131 struct netlink_ext_ack
*extack
)
3133 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3135 u32 reta_entries
= ixgbe_rss_indir_tbl_entries(adapter
);
3137 if (rxfh
->hfunc
!= ETH_RSS_HASH_NO_CHANGE
&&
3138 rxfh
->hfunc
!= ETH_RSS_HASH_TOP
)
3141 /* Fill out the redirection table */
3143 int max_queues
= min_t(int, adapter
->num_rx_queues
,
3144 ixgbe_rss_indir_tbl_max(adapter
));
3146 /*Allow at least 2 queues w/ SR-IOV.*/
3147 if ((adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) &&
3151 /* Verify user input. */
3152 for (i
= 0; i
< reta_entries
; i
++)
3153 if (rxfh
->indir
[i
] >= max_queues
)
3156 for (i
= 0; i
< reta_entries
; i
++)
3157 adapter
->rss_indir_tbl
[i
] = rxfh
->indir
[i
];
3159 ixgbe_store_reta(adapter
);
3162 /* Fill out the rss hash key */
3164 memcpy(adapter
->rss_key
, rxfh
->key
,
3165 ixgbe_get_rxfh_key_size(netdev
));
3166 ixgbe_store_key(adapter
);
3172 static int ixgbe_get_ts_info(struct net_device
*dev
,
3173 struct kernel_ethtool_ts_info
*info
)
3175 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3177 /* we always support timestamping disabled */
3178 info
->rx_filters
= BIT(HWTSTAMP_FILTER_NONE
);
3180 switch (adapter
->hw
.mac
.type
) {
3181 case ixgbe_mac_X550
:
3182 case ixgbe_mac_X550EM_x
:
3183 case ixgbe_mac_x550em_a
:
3184 info
->rx_filters
|= BIT(HWTSTAMP_FILTER_ALL
);
3186 case ixgbe_mac_X540
:
3187 case ixgbe_mac_82599EB
:
3189 BIT(HWTSTAMP_FILTER_PTP_V1_L4_SYNC
) |
3190 BIT(HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ
) |
3191 BIT(HWTSTAMP_FILTER_PTP_V2_EVENT
);
3194 return ethtool_op_get_ts_info(dev
, info
);
3197 info
->so_timestamping
=
3198 SOF_TIMESTAMPING_TX_SOFTWARE
|
3199 SOF_TIMESTAMPING_TX_HARDWARE
|
3200 SOF_TIMESTAMPING_RX_HARDWARE
|
3201 SOF_TIMESTAMPING_RAW_HARDWARE
;
3203 if (adapter
->ptp_clock
)
3204 info
->phc_index
= ptp_clock_index(adapter
->ptp_clock
);
3207 BIT(HWTSTAMP_TX_OFF
) |
3208 BIT(HWTSTAMP_TX_ON
);
3213 static unsigned int ixgbe_max_channels(struct ixgbe_adapter
*adapter
)
3215 unsigned int max_combined
;
3216 u8 tcs
= adapter
->hw_tcs
;
3218 if (!(adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)) {
3219 /* We only support one q_vector without MSI-X */
3221 } else if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
3222 /* Limit value based on the queue mask */
3223 max_combined
= adapter
->ring_feature
[RING_F_RSS
].mask
+ 1;
3224 } else if (tcs
> 1) {
3225 /* For DCB report channels per traffic class */
3226 if (adapter
->hw
.mac
.type
== ixgbe_mac_82598EB
) {
3227 /* 8 TC w/ 4 queues per TC */
3229 } else if (tcs
> 4) {
3230 /* 8 TC w/ 8 queues per TC */
3233 /* 4 TC w/ 16 queues per TC */
3236 } else if (adapter
->atr_sample_rate
) {
3237 /* support up to 64 queues with ATR */
3238 max_combined
= IXGBE_MAX_FDIR_INDICES
;
3240 /* support up to 16 queues with RSS */
3241 max_combined
= ixgbe_max_rss_indices(adapter
);
3244 return min_t(int, max_combined
, num_online_cpus());
3247 static void ixgbe_get_channels(struct net_device
*dev
,
3248 struct ethtool_channels
*ch
)
3250 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3252 /* report maximum channels */
3253 ch
->max_combined
= ixgbe_max_channels(adapter
);
3255 /* report info for other vector */
3256 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
) {
3257 ch
->max_other
= NON_Q_VECTORS
;
3258 ch
->other_count
= NON_Q_VECTORS
;
3261 /* record RSS queues */
3262 ch
->combined_count
= adapter
->ring_feature
[RING_F_RSS
].indices
;
3264 /* nothing else to report if RSS is disabled */
3265 if (ch
->combined_count
== 1)
3268 /* we do not support ATR queueing if SR-IOV is enabled */
3269 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
)
3272 /* same thing goes for being DCB enabled */
3273 if (adapter
->hw_tcs
> 1)
3276 /* if ATR is disabled we can exit */
3277 if (!adapter
->atr_sample_rate
)
3280 /* report flow director queues as maximum channels */
3281 ch
->combined_count
= adapter
->ring_feature
[RING_F_FDIR
].indices
;
3284 static int ixgbe_set_channels(struct net_device
*dev
,
3285 struct ethtool_channels
*ch
)
3287 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3288 unsigned int count
= ch
->combined_count
;
3289 u8 max_rss_indices
= ixgbe_max_rss_indices(adapter
);
3291 /* verify they are not requesting separate vectors */
3292 if (!count
|| ch
->rx_count
|| ch
->tx_count
)
3295 /* verify other_count has not changed */
3296 if (ch
->other_count
!= NON_Q_VECTORS
)
3299 /* verify the number of channels does not exceed hardware limits */
3300 if (count
> ixgbe_max_channels(adapter
))
3303 /* update feature limits from largest to smallest supported values */
3304 adapter
->ring_feature
[RING_F_FDIR
].limit
= count
;
3307 if (count
> max_rss_indices
)
3308 count
= max_rss_indices
;
3309 adapter
->ring_feature
[RING_F_RSS
].limit
= count
;
3312 /* cap FCoE limit at 8 */
3313 if (count
> IXGBE_FCRETA_SIZE
)
3314 count
= IXGBE_FCRETA_SIZE
;
3315 adapter
->ring_feature
[RING_F_FCOE
].limit
= count
;
3318 /* use setup TC to update any traffic class queue mapping */
3319 return ixgbe_setup_tc(dev
, adapter
->hw_tcs
);
3322 static int ixgbe_get_module_info(struct net_device
*dev
,
3323 struct ethtool_modinfo
*modinfo
)
3325 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3326 struct ixgbe_hw
*hw
= &adapter
->hw
;
3327 u8 sff8472_rev
, addr_mode
;
3328 bool page_swap
= false;
3331 if (hw
->phy
.type
== ixgbe_phy_fw
)
3334 /* Check whether we support SFF-8472 or not */
3335 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
,
3336 IXGBE_SFF_SFF_8472_COMP
,
3341 /* addressing mode is not supported */
3342 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
,
3343 IXGBE_SFF_SFF_8472_SWAP
,
3348 if (addr_mode
& IXGBE_SFF_ADDRESSING_MODE
) {
3349 e_err(drv
, "Address change required to access page 0xA2, but not supported. Please report the module type to the driver maintainers.\n");
3353 if (sff8472_rev
== IXGBE_SFF_SFF_8472_UNSUP
|| page_swap
||
3354 !(addr_mode
& IXGBE_SFF_DDM_IMPLEMENTED
)) {
3355 /* We have a SFP, but it does not support SFF-8472 */
3356 modinfo
->type
= ETH_MODULE_SFF_8079
;
3357 modinfo
->eeprom_len
= ETH_MODULE_SFF_8079_LEN
;
3359 /* We have a SFP which supports a revision of SFF-8472. */
3360 modinfo
->type
= ETH_MODULE_SFF_8472
;
3361 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
3367 static int ixgbe_get_module_eeprom(struct net_device
*dev
,
3368 struct ethtool_eeprom
*ee
,
3371 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
3372 struct ixgbe_hw
*hw
= &adapter
->hw
;
3373 int status
= -EFAULT
;
3380 if (hw
->phy
.type
== ixgbe_phy_fw
)
3383 for (i
= ee
->offset
; i
< ee
->offset
+ ee
->len
; i
++) {
3384 /* I2C reads can take long time */
3385 if (test_bit(__IXGBE_IN_SFP_INIT
, &adapter
->state
))
3388 if (i
< ETH_MODULE_SFF_8079_LEN
)
3389 status
= hw
->phy
.ops
.read_i2c_eeprom(hw
, i
, &databyte
);
3391 status
= hw
->phy
.ops
.read_i2c_sff8472(hw
, i
, &databyte
);
3396 data
[i
- ee
->offset
] = databyte
;
3402 static const struct {
3403 ixgbe_link_speed mac_speed
;
3405 } ixgbe_ls_map
[] = {
3406 { IXGBE_LINK_SPEED_10_FULL
, ETHTOOL_LINK_MODE_10baseT_Full_BIT
},
3407 { IXGBE_LINK_SPEED_100_FULL
, ETHTOOL_LINK_MODE_100baseT_Full_BIT
},
3408 { IXGBE_LINK_SPEED_1GB_FULL
, ETHTOOL_LINK_MODE_1000baseT_Full_BIT
},
3409 { IXGBE_LINK_SPEED_2_5GB_FULL
, ETHTOOL_LINK_MODE_2500baseX_Full_BIT
},
3410 { IXGBE_LINK_SPEED_10GB_FULL
, ETHTOOL_LINK_MODE_10000baseT_Full_BIT
},
3413 static const struct {
3416 } ixgbe_lp_map
[] = {
3417 { FW_PHY_ACT_UD_2_100M_TX_EEE
, ETHTOOL_LINK_MODE_100baseT_Full_BIT
},
3418 { FW_PHY_ACT_UD_2_1G_T_EEE
, ETHTOOL_LINK_MODE_1000baseT_Full_BIT
},
3419 { FW_PHY_ACT_UD_2_10G_T_EEE
, ETHTOOL_LINK_MODE_10000baseT_Full_BIT
},
3420 { FW_PHY_ACT_UD_2_1G_KX_EEE
, ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
},
3421 { FW_PHY_ACT_UD_2_10G_KX4_EEE
, ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
},
3422 { FW_PHY_ACT_UD_2_10G_KR_EEE
, ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
},
3426 ixgbe_get_eee_fw(struct ixgbe_adapter
*adapter
, struct ethtool_keee
*edata
)
3428 __ETHTOOL_DECLARE_LINK_MODE_MASK(common
);
3429 u32 info
[FW_PHY_ACT_DATA_COUNT
] = { 0 };
3430 struct ixgbe_hw
*hw
= &adapter
->hw
;
3434 rc
= ixgbe_fw_phy_activity(hw
, FW_PHY_ACT_UD_2
, &info
);
3438 for (i
= 0; i
< ARRAY_SIZE(ixgbe_lp_map
); ++i
) {
3439 if (info
[0] & ixgbe_lp_map
[i
].lp_advertised
)
3440 linkmode_set_bit(ixgbe_lp_map
[i
].link_mode
,
3441 edata
->lp_advertised
);
3444 for (i
= 0; i
< ARRAY_SIZE(ixgbe_ls_map
); ++i
) {
3445 if (hw
->phy
.eee_speeds_supported
& ixgbe_ls_map
[i
].mac_speed
)
3446 linkmode_set_bit(ixgbe_lp_map
[i
].link_mode
,
3450 for (i
= 0; i
< ARRAY_SIZE(ixgbe_ls_map
); ++i
) {
3451 if (hw
->phy
.eee_speeds_advertised
& ixgbe_ls_map
[i
].mac_speed
)
3452 linkmode_set_bit(ixgbe_lp_map
[i
].link_mode
,
3456 edata
->eee_enabled
= !linkmode_empty(edata
->advertised
);
3457 edata
->tx_lpi_enabled
= edata
->eee_enabled
;
3459 linkmode_and(common
, edata
->advertised
, edata
->lp_advertised
);
3460 edata
->eee_active
= !linkmode_empty(common
);
3465 static int ixgbe_get_eee(struct net_device
*netdev
, struct ethtool_keee
*edata
)
3467 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3468 struct ixgbe_hw
*hw
= &adapter
->hw
;
3470 if (!(adapter
->flags2
& IXGBE_FLAG2_EEE_CAPABLE
))
3473 if (hw
->phy
.eee_speeds_supported
&& hw
->phy
.type
== ixgbe_phy_fw
)
3474 return ixgbe_get_eee_fw(adapter
, edata
);
3479 static int ixgbe_set_eee(struct net_device
*netdev
, struct ethtool_keee
*edata
)
3481 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3482 struct ixgbe_hw
*hw
= &adapter
->hw
;
3483 struct ethtool_keee eee_data
;
3486 if (!(adapter
->flags2
& IXGBE_FLAG2_EEE_CAPABLE
))
3489 memset(&eee_data
, 0, sizeof(struct ethtool_keee
));
3491 ret_val
= ixgbe_get_eee(netdev
, &eee_data
);
3495 if (eee_data
.eee_enabled
&& !edata
->eee_enabled
) {
3496 if (eee_data
.tx_lpi_enabled
!= edata
->tx_lpi_enabled
) {
3497 e_err(drv
, "Setting EEE tx-lpi is not supported\n");
3501 if (eee_data
.tx_lpi_timer
!= edata
->tx_lpi_timer
) {
3503 "Setting EEE Tx LPI timer is not supported\n");
3507 if (!linkmode_equal(eee_data
.advertised
, edata
->advertised
)) {
3509 "Setting EEE advertised speeds is not supported\n");
3514 if (eee_data
.eee_enabled
!= edata
->eee_enabled
) {
3515 if (edata
->eee_enabled
) {
3516 adapter
->flags2
|= IXGBE_FLAG2_EEE_ENABLED
;
3517 hw
->phy
.eee_speeds_advertised
=
3518 hw
->phy
.eee_speeds_supported
;
3520 adapter
->flags2
&= ~IXGBE_FLAG2_EEE_ENABLED
;
3521 hw
->phy
.eee_speeds_advertised
= 0;
3525 if (netif_running(netdev
))
3526 ixgbe_reinit_locked(adapter
);
3528 ixgbe_reset(adapter
);
3534 static u32
ixgbe_get_priv_flags(struct net_device
*netdev
)
3536 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3539 if (adapter
->flags2
& IXGBE_FLAG2_RX_LEGACY
)
3540 priv_flags
|= IXGBE_PRIV_FLAGS_LEGACY_RX
;
3542 if (adapter
->flags2
& IXGBE_FLAG2_VF_IPSEC_ENABLED
)
3543 priv_flags
|= IXGBE_PRIV_FLAGS_VF_IPSEC_EN
;
3545 if (adapter
->flags2
& IXGBE_FLAG2_AUTO_DISABLE_VF
)
3546 priv_flags
|= IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF
;
3551 static int ixgbe_set_priv_flags(struct net_device
*netdev
, u32 priv_flags
)
3553 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
3554 unsigned int flags2
= adapter
->flags2
;
3557 flags2
&= ~IXGBE_FLAG2_RX_LEGACY
;
3558 if (priv_flags
& IXGBE_PRIV_FLAGS_LEGACY_RX
)
3559 flags2
|= IXGBE_FLAG2_RX_LEGACY
;
3561 flags2
&= ~IXGBE_FLAG2_VF_IPSEC_ENABLED
;
3562 if (priv_flags
& IXGBE_PRIV_FLAGS_VF_IPSEC_EN
)
3563 flags2
|= IXGBE_FLAG2_VF_IPSEC_ENABLED
;
3565 flags2
&= ~IXGBE_FLAG2_AUTO_DISABLE_VF
;
3566 if (priv_flags
& IXGBE_PRIV_FLAGS_AUTO_DISABLE_VF
) {
3567 if (adapter
->hw
.mac
.type
== ixgbe_mac_82599EB
) {
3568 /* Reset primary abort counter */
3569 for (i
= 0; i
< adapter
->num_vfs
; i
++)
3570 adapter
->vfinfo
[i
].primary_abort_count
= 0;
3572 flags2
|= IXGBE_FLAG2_AUTO_DISABLE_VF
;
3575 "Cannot set private flags: Operation not supported\n");
3580 if (flags2
!= adapter
->flags2
) {
3581 adapter
->flags2
= flags2
;
3583 /* reset interface to repopulate queues */
3584 if (netif_running(netdev
))
3585 ixgbe_reinit_locked(adapter
);
3591 static const struct ethtool_ops ixgbe_ethtool_ops
= {
3592 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
,
3593 .get_drvinfo
= ixgbe_get_drvinfo
,
3594 .get_regs_len
= ixgbe_get_regs_len
,
3595 .get_regs
= ixgbe_get_regs
,
3596 .get_wol
= ixgbe_get_wol
,
3597 .set_wol
= ixgbe_set_wol
,
3598 .nway_reset
= ixgbe_nway_reset
,
3599 .get_link
= ethtool_op_get_link
,
3600 .get_eeprom_len
= ixgbe_get_eeprom_len
,
3601 .get_eeprom
= ixgbe_get_eeprom
,
3602 .set_eeprom
= ixgbe_set_eeprom
,
3603 .get_ringparam
= ixgbe_get_ringparam
,
3604 .set_ringparam
= ixgbe_set_ringparam
,
3605 .get_pause_stats
= ixgbe_get_pause_stats
,
3606 .get_pauseparam
= ixgbe_get_pauseparam
,
3607 .set_pauseparam
= ixgbe_set_pauseparam
,
3608 .get_msglevel
= ixgbe_get_msglevel
,
3609 .set_msglevel
= ixgbe_set_msglevel
,
3610 .self_test
= ixgbe_diag_test
,
3611 .get_strings
= ixgbe_get_strings
,
3612 .set_phys_id
= ixgbe_set_phys_id
,
3613 .get_sset_count
= ixgbe_get_sset_count
,
3614 .get_ethtool_stats
= ixgbe_get_ethtool_stats
,
3615 .get_coalesce
= ixgbe_get_coalesce
,
3616 .set_coalesce
= ixgbe_set_coalesce
,
3617 .get_rxnfc
= ixgbe_get_rxnfc
,
3618 .set_rxnfc
= ixgbe_set_rxnfc
,
3619 .get_rxfh_indir_size
= ixgbe_rss_indir_size
,
3620 .get_rxfh_key_size
= ixgbe_get_rxfh_key_size
,
3621 .get_rxfh
= ixgbe_get_rxfh
,
3622 .set_rxfh
= ixgbe_set_rxfh
,
3623 .get_eee
= ixgbe_get_eee
,
3624 .set_eee
= ixgbe_set_eee
,
3625 .get_channels
= ixgbe_get_channels
,
3626 .set_channels
= ixgbe_set_channels
,
3627 .get_priv_flags
= ixgbe_get_priv_flags
,
3628 .set_priv_flags
= ixgbe_set_priv_flags
,
3629 .get_ts_info
= ixgbe_get_ts_info
,
3630 .get_module_info
= ixgbe_get_module_info
,
3631 .get_module_eeprom
= ixgbe_get_module_eeprom
,
3632 .get_link_ksettings
= ixgbe_get_link_ksettings
,
3633 .set_link_ksettings
= ixgbe_set_link_ksettings
,
3636 void ixgbe_set_ethtool_ops(struct net_device
*netdev
)
3638 netdev
->ethtool_ops
= &ixgbe_ethtool_ops
;