1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 /* ethtool support for ixgbe */
30 #include <linux/interrupt.h>
31 #include <linux/types.h>
32 #include <linux/module.h>
33 #include <linux/slab.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/vmalloc.h>
38 #include <linux/uaccess.h>
43 #define IXGBE_ALL_RAR_ENTRIES 16
45 enum {NETDEV_STATS
, IXGBE_STATS
};
48 char stat_string
[ETH_GSTRING_LEN
];
54 #define IXGBE_STAT(m) IXGBE_STATS, \
55 sizeof(((struct ixgbe_adapter *)0)->m), \
56 offsetof(struct ixgbe_adapter, m)
57 #define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
58 sizeof(((struct rtnl_link_stats64 *)0)->m), \
59 offsetof(struct rtnl_link_stats64, m)
61 static const struct ixgbe_stats ixgbe_gstrings_stats
[] = {
62 {"rx_packets", IXGBE_NETDEV_STAT(rx_packets
)},
63 {"tx_packets", IXGBE_NETDEV_STAT(tx_packets
)},
64 {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes
)},
65 {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes
)},
66 {"rx_pkts_nic", IXGBE_STAT(stats
.gprc
)},
67 {"tx_pkts_nic", IXGBE_STAT(stats
.gptc
)},
68 {"rx_bytes_nic", IXGBE_STAT(stats
.gorc
)},
69 {"tx_bytes_nic", IXGBE_STAT(stats
.gotc
)},
70 {"lsc_int", IXGBE_STAT(lsc_int
)},
71 {"tx_busy", IXGBE_STAT(tx_busy
)},
72 {"non_eop_descs", IXGBE_STAT(non_eop_descs
)},
73 {"rx_errors", IXGBE_NETDEV_STAT(rx_errors
)},
74 {"tx_errors", IXGBE_NETDEV_STAT(tx_errors
)},
75 {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped
)},
76 {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped
)},
77 {"multicast", IXGBE_NETDEV_STAT(multicast
)},
78 {"broadcast", IXGBE_STAT(stats
.bprc
)},
79 {"rx_no_buffer_count", IXGBE_STAT(stats
.rnbc
[0]) },
80 {"collisions", IXGBE_NETDEV_STAT(collisions
)},
81 {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors
)},
82 {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors
)},
83 {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors
)},
84 {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count
)},
85 {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush
)},
86 {"fdir_match", IXGBE_STAT(stats
.fdirmatch
)},
87 {"fdir_miss", IXGBE_STAT(stats
.fdirmiss
)},
88 {"fdir_overflow", IXGBE_STAT(fdir_overflow
)},
89 {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors
)},
90 {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors
)},
91 {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors
)},
92 {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors
)},
93 {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors
)},
94 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors
)},
95 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count
)},
96 {"tx_restart_queue", IXGBE_STAT(restart_queue
)},
97 {"rx_long_length_errors", IXGBE_STAT(stats
.roc
)},
98 {"rx_short_length_errors", IXGBE_STAT(stats
.ruc
)},
99 {"tx_flow_control_xon", IXGBE_STAT(stats
.lxontxc
)},
100 {"rx_flow_control_xon", IXGBE_STAT(stats
.lxonrxc
)},
101 {"tx_flow_control_xoff", IXGBE_STAT(stats
.lxofftxc
)},
102 {"rx_flow_control_xoff", IXGBE_STAT(stats
.lxoffrxc
)},
103 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error
)},
104 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed
)},
105 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed
)},
106 {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources
)},
107 {"os2bmc_rx_by_bmc", IXGBE_STAT(stats
.o2bgptc
)},
108 {"os2bmc_tx_by_bmc", IXGBE_STAT(stats
.b2ospc
)},
109 {"os2bmc_tx_by_host", IXGBE_STAT(stats
.o2bspc
)},
110 {"os2bmc_rx_by_host", IXGBE_STAT(stats
.b2ogprc
)},
112 {"fcoe_bad_fccrc", IXGBE_STAT(stats
.fccrc
)},
113 {"rx_fcoe_dropped", IXGBE_STAT(stats
.fcoerpdc
)},
114 {"rx_fcoe_packets", IXGBE_STAT(stats
.fcoeprc
)},
115 {"rx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwrc
)},
116 {"fcoe_noddp", IXGBE_STAT(stats
.fcoe_noddp
)},
117 {"fcoe_noddp_ext_buff", IXGBE_STAT(stats
.fcoe_noddp_ext_buff
)},
118 {"tx_fcoe_packets", IXGBE_STAT(stats
.fcoeptc
)},
119 {"tx_fcoe_dwords", IXGBE_STAT(stats
.fcoedwtc
)},
120 #endif /* IXGBE_FCOE */
123 /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so
124 * we set the num_rx_queues to evaluate to num_tx_queues. This is
125 * used because we do not have a good way to get the max number of
126 * rx queues with CONFIG_RPS disabled.
128 #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues
130 #define IXGBE_QUEUE_STATS_LEN ( \
131 (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \
132 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
133 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
134 #define IXGBE_PB_STATS_LEN ( \
135 (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \
136 sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \
137 sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \
138 sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \
140 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \
141 IXGBE_PB_STATS_LEN + \
142 IXGBE_QUEUE_STATS_LEN)
144 static const char ixgbe_gstrings_test
[][ETH_GSTRING_LEN
] = {
145 "Register test (offline)", "Eeprom test (offline)",
146 "Interrupt test (offline)", "Loopback test (offline)",
147 "Link test (on/offline)"
149 #define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN
151 static int ixgbe_get_settings(struct net_device
*netdev
,
152 struct ethtool_cmd
*ecmd
)
154 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
155 struct ixgbe_hw
*hw
= &adapter
->hw
;
159 ecmd
->supported
= SUPPORTED_10000baseT_Full
;
160 ecmd
->autoneg
= AUTONEG_ENABLE
;
161 ecmd
->transceiver
= XCVR_EXTERNAL
;
162 if ((hw
->phy
.media_type
== ixgbe_media_type_copper
) ||
163 (hw
->phy
.multispeed_fiber
)) {
164 ecmd
->supported
|= (SUPPORTED_1000baseT_Full
|
167 switch (hw
->mac
.type
) {
169 ecmd
->supported
|= SUPPORTED_100baseT_Full
;
175 ecmd
->advertising
= ADVERTISED_Autoneg
;
176 if (hw
->phy
.autoneg_advertised
) {
177 if (hw
->phy
.autoneg_advertised
&
178 IXGBE_LINK_SPEED_100_FULL
)
179 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
180 if (hw
->phy
.autoneg_advertised
&
181 IXGBE_LINK_SPEED_10GB_FULL
)
182 ecmd
->advertising
|= ADVERTISED_10000baseT_Full
;
183 if (hw
->phy
.autoneg_advertised
&
184 IXGBE_LINK_SPEED_1GB_FULL
)
185 ecmd
->advertising
|= ADVERTISED_1000baseT_Full
;
188 * Default advertised modes in case
189 * phy.autoneg_advertised isn't set.
191 ecmd
->advertising
|= (ADVERTISED_10000baseT_Full
|
192 ADVERTISED_1000baseT_Full
);
193 if (hw
->mac
.type
== ixgbe_mac_X540
)
194 ecmd
->advertising
|= ADVERTISED_100baseT_Full
;
197 if (hw
->phy
.media_type
== ixgbe_media_type_copper
) {
198 ecmd
->supported
|= SUPPORTED_TP
;
199 ecmd
->advertising
|= ADVERTISED_TP
;
200 ecmd
->port
= PORT_TP
;
202 ecmd
->supported
|= SUPPORTED_FIBRE
;
203 ecmd
->advertising
|= ADVERTISED_FIBRE
;
204 ecmd
->port
= PORT_FIBRE
;
206 } else if (hw
->phy
.media_type
== ixgbe_media_type_backplane
) {
207 /* Set as FIBRE until SERDES defined in kernel */
208 if (hw
->device_id
== IXGBE_DEV_ID_82598_BX
) {
209 ecmd
->supported
= (SUPPORTED_1000baseT_Full
|
211 ecmd
->advertising
= (ADVERTISED_1000baseT_Full
|
213 ecmd
->port
= PORT_FIBRE
;
214 ecmd
->autoneg
= AUTONEG_DISABLE
;
215 } else if ((hw
->device_id
== IXGBE_DEV_ID_82599_COMBO_BACKPLANE
) ||
216 (hw
->device_id
== IXGBE_DEV_ID_82599_KX4_MEZZ
)) {
217 ecmd
->supported
|= (SUPPORTED_1000baseT_Full
|
220 ecmd
->advertising
= (ADVERTISED_10000baseT_Full
|
221 ADVERTISED_1000baseT_Full
|
224 ecmd
->port
= PORT_FIBRE
;
226 ecmd
->supported
|= (SUPPORTED_1000baseT_Full
|
228 ecmd
->advertising
= (ADVERTISED_10000baseT_Full
|
229 ADVERTISED_1000baseT_Full
|
231 ecmd
->port
= PORT_FIBRE
;
234 ecmd
->supported
|= SUPPORTED_FIBRE
;
235 ecmd
->advertising
= (ADVERTISED_10000baseT_Full
|
237 ecmd
->port
= PORT_FIBRE
;
238 ecmd
->autoneg
= AUTONEG_DISABLE
;
242 switch (adapter
->hw
.phy
.type
) {
245 case ixgbe_phy_cu_unknown
:
246 /* Copper 10G-BASET */
247 ecmd
->port
= PORT_TP
;
250 ecmd
->port
= PORT_FIBRE
;
253 case ixgbe_phy_sfp_passive_tyco
:
254 case ixgbe_phy_sfp_passive_unknown
:
255 case ixgbe_phy_sfp_ftl
:
256 case ixgbe_phy_sfp_avago
:
257 case ixgbe_phy_sfp_intel
:
258 case ixgbe_phy_sfp_unknown
:
259 switch (adapter
->hw
.phy
.sfp_type
) {
260 /* SFP+ devices, further checking needed */
261 case ixgbe_sfp_type_da_cu
:
262 case ixgbe_sfp_type_da_cu_core0
:
263 case ixgbe_sfp_type_da_cu_core1
:
264 ecmd
->port
= PORT_DA
;
266 case ixgbe_sfp_type_sr
:
267 case ixgbe_sfp_type_lr
:
268 case ixgbe_sfp_type_srlr_core0
:
269 case ixgbe_sfp_type_srlr_core1
:
270 ecmd
->port
= PORT_FIBRE
;
272 case ixgbe_sfp_type_not_present
:
273 ecmd
->port
= PORT_NONE
;
275 case ixgbe_sfp_type_1g_cu_core0
:
276 case ixgbe_sfp_type_1g_cu_core1
:
277 ecmd
->port
= PORT_TP
;
278 ecmd
->supported
= SUPPORTED_TP
;
279 ecmd
->advertising
= (ADVERTISED_1000baseT_Full
|
282 case ixgbe_sfp_type_unknown
:
284 ecmd
->port
= PORT_OTHER
;
289 ecmd
->port
= PORT_NONE
;
291 case ixgbe_phy_unknown
:
292 case ixgbe_phy_generic
:
293 case ixgbe_phy_sfp_unsupported
:
295 ecmd
->port
= PORT_OTHER
;
299 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, false);
301 switch (link_speed
) {
302 case IXGBE_LINK_SPEED_10GB_FULL
:
303 ethtool_cmd_speed_set(ecmd
, SPEED_10000
);
305 case IXGBE_LINK_SPEED_1GB_FULL
:
306 ethtool_cmd_speed_set(ecmd
, SPEED_1000
);
308 case IXGBE_LINK_SPEED_100_FULL
:
309 ethtool_cmd_speed_set(ecmd
, SPEED_100
);
314 ecmd
->duplex
= DUPLEX_FULL
;
316 ethtool_cmd_speed_set(ecmd
, -1);
323 static int ixgbe_set_settings(struct net_device
*netdev
,
324 struct ethtool_cmd
*ecmd
)
326 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
327 struct ixgbe_hw
*hw
= &adapter
->hw
;
331 if ((hw
->phy
.media_type
== ixgbe_media_type_copper
) ||
332 (hw
->phy
.multispeed_fiber
)) {
334 * this function does not support duplex forcing, but can
335 * limit the advertising of the adapter to the specified speed
337 if (ecmd
->autoneg
== AUTONEG_DISABLE
)
340 if (ecmd
->advertising
& ~ecmd
->supported
)
343 old
= hw
->phy
.autoneg_advertised
;
345 if (ecmd
->advertising
& ADVERTISED_10000baseT_Full
)
346 advertised
|= IXGBE_LINK_SPEED_10GB_FULL
;
348 if (ecmd
->advertising
& ADVERTISED_1000baseT_Full
)
349 advertised
|= IXGBE_LINK_SPEED_1GB_FULL
;
351 if (ecmd
->advertising
& ADVERTISED_100baseT_Full
)
352 advertised
|= IXGBE_LINK_SPEED_100_FULL
;
354 if (old
== advertised
)
356 /* this sets the link speed and restarts auto-neg */
357 hw
->mac
.autotry_restart
= true;
358 err
= hw
->mac
.ops
.setup_link(hw
, advertised
, true, true);
360 e_info(probe
, "setup link failed with code %d\n", err
);
361 hw
->mac
.ops
.setup_link(hw
, old
, true, true);
364 /* in this case we currently only support 10Gb/FULL */
365 u32 speed
= ethtool_cmd_speed(ecmd
);
366 if ((ecmd
->autoneg
== AUTONEG_ENABLE
) ||
367 (ecmd
->advertising
!= ADVERTISED_10000baseT_Full
) ||
368 (speed
+ ecmd
->duplex
!= SPEED_10000
+ DUPLEX_FULL
))
375 static void ixgbe_get_pauseparam(struct net_device
*netdev
,
376 struct ethtool_pauseparam
*pause
)
378 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
379 struct ixgbe_hw
*hw
= &adapter
->hw
;
381 if (hw
->fc
.disable_fc_autoneg
)
386 if (hw
->fc
.current_mode
== ixgbe_fc_rx_pause
) {
388 } else if (hw
->fc
.current_mode
== ixgbe_fc_tx_pause
) {
390 } else if (hw
->fc
.current_mode
== ixgbe_fc_full
) {
394 } else if (hw
->fc
.current_mode
== ixgbe_fc_pfc
) {
401 static int ixgbe_set_pauseparam(struct net_device
*netdev
,
402 struct ethtool_pauseparam
*pause
)
404 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
405 struct ixgbe_hw
*hw
= &adapter
->hw
;
406 struct ixgbe_fc_info fc
;
409 if (adapter
->dcb_cfg
.pfc_mode_enable
||
410 ((hw
->mac
.type
== ixgbe_mac_82598EB
) &&
411 (adapter
->flags
& IXGBE_FLAG_DCB_ENABLED
)))
417 if (pause
->autoneg
!= AUTONEG_ENABLE
)
418 fc
.disable_fc_autoneg
= true;
420 fc
.disable_fc_autoneg
= false;
422 if ((pause
->rx_pause
&& pause
->tx_pause
) || pause
->autoneg
)
423 fc
.requested_mode
= ixgbe_fc_full
;
424 else if (pause
->rx_pause
&& !pause
->tx_pause
)
425 fc
.requested_mode
= ixgbe_fc_rx_pause
;
426 else if (!pause
->rx_pause
&& pause
->tx_pause
)
427 fc
.requested_mode
= ixgbe_fc_tx_pause
;
428 else if (!pause
->rx_pause
&& !pause
->tx_pause
)
429 fc
.requested_mode
= ixgbe_fc_none
;
434 adapter
->last_lfc_mode
= fc
.requested_mode
;
437 /* if the thing changed then we'll update and use new autoneg */
438 if (memcmp(&fc
, &hw
->fc
, sizeof(struct ixgbe_fc_info
))) {
440 if (netif_running(netdev
))
441 ixgbe_reinit_locked(adapter
);
443 ixgbe_reset(adapter
);
449 static u32
ixgbe_get_msglevel(struct net_device
*netdev
)
451 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
452 return adapter
->msg_enable
;
455 static void ixgbe_set_msglevel(struct net_device
*netdev
, u32 data
)
457 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
458 adapter
->msg_enable
= data
;
461 static int ixgbe_get_regs_len(struct net_device
*netdev
)
463 #define IXGBE_REGS_LEN 1129
464 return IXGBE_REGS_LEN
* sizeof(u32
);
467 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
469 static void ixgbe_get_regs(struct net_device
*netdev
,
470 struct ethtool_regs
*regs
, void *p
)
472 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
473 struct ixgbe_hw
*hw
= &adapter
->hw
;
477 memset(p
, 0, IXGBE_REGS_LEN
* sizeof(u32
));
479 regs
->version
= (1 << 24) | hw
->revision_id
<< 16 | hw
->device_id
;
481 /* General Registers */
482 regs_buff
[0] = IXGBE_READ_REG(hw
, IXGBE_CTRL
);
483 regs_buff
[1] = IXGBE_READ_REG(hw
, IXGBE_STATUS
);
484 regs_buff
[2] = IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
485 regs_buff
[3] = IXGBE_READ_REG(hw
, IXGBE_ESDP
);
486 regs_buff
[4] = IXGBE_READ_REG(hw
, IXGBE_EODSDP
);
487 regs_buff
[5] = IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
488 regs_buff
[6] = IXGBE_READ_REG(hw
, IXGBE_FRTIMER
);
489 regs_buff
[7] = IXGBE_READ_REG(hw
, IXGBE_TCPTIMER
);
492 regs_buff
[8] = IXGBE_READ_REG(hw
, IXGBE_EEC
);
493 regs_buff
[9] = IXGBE_READ_REG(hw
, IXGBE_EERD
);
494 regs_buff
[10] = IXGBE_READ_REG(hw
, IXGBE_FLA
);
495 regs_buff
[11] = IXGBE_READ_REG(hw
, IXGBE_EEMNGCTL
);
496 regs_buff
[12] = IXGBE_READ_REG(hw
, IXGBE_EEMNGDATA
);
497 regs_buff
[13] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCTL
);
498 regs_buff
[14] = IXGBE_READ_REG(hw
, IXGBE_FLMNGDATA
);
499 regs_buff
[15] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCNT
);
500 regs_buff
[16] = IXGBE_READ_REG(hw
, IXGBE_FLOP
);
501 regs_buff
[17] = IXGBE_READ_REG(hw
, IXGBE_GRC
);
504 /* don't read EICR because it can clear interrupt causes, instead
505 * read EICS which is a shadow but doesn't clear EICR */
506 regs_buff
[18] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
507 regs_buff
[19] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
508 regs_buff
[20] = IXGBE_READ_REG(hw
, IXGBE_EIMS
);
509 regs_buff
[21] = IXGBE_READ_REG(hw
, IXGBE_EIMC
);
510 regs_buff
[22] = IXGBE_READ_REG(hw
, IXGBE_EIAC
);
511 regs_buff
[23] = IXGBE_READ_REG(hw
, IXGBE_EIAM
);
512 regs_buff
[24] = IXGBE_READ_REG(hw
, IXGBE_EITR(0));
513 regs_buff
[25] = IXGBE_READ_REG(hw
, IXGBE_IVAR(0));
514 regs_buff
[26] = IXGBE_READ_REG(hw
, IXGBE_MSIXT
);
515 regs_buff
[27] = IXGBE_READ_REG(hw
, IXGBE_MSIXPBA
);
516 regs_buff
[28] = IXGBE_READ_REG(hw
, IXGBE_PBACL(0));
517 regs_buff
[29] = IXGBE_READ_REG(hw
, IXGBE_GPIE
);
520 regs_buff
[30] = IXGBE_READ_REG(hw
, IXGBE_PFCTOP
);
521 regs_buff
[31] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(0));
522 regs_buff
[32] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(1));
523 regs_buff
[33] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(2));
524 regs_buff
[34] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(3));
525 for (i
= 0; i
< 8; i
++) {
526 switch (hw
->mac
.type
) {
527 case ixgbe_mac_82598EB
:
528 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL(i
));
529 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH(i
));
531 case ixgbe_mac_82599EB
:
533 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL_82599(i
));
534 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH_82599(i
));
540 regs_buff
[51] = IXGBE_READ_REG(hw
, IXGBE_FCRTV
);
541 regs_buff
[52] = IXGBE_READ_REG(hw
, IXGBE_TFCS
);
544 for (i
= 0; i
< 64; i
++)
545 regs_buff
[53 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAL(i
));
546 for (i
= 0; i
< 64; i
++)
547 regs_buff
[117 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAH(i
));
548 for (i
= 0; i
< 64; i
++)
549 regs_buff
[181 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDLEN(i
));
550 for (i
= 0; i
< 64; i
++)
551 regs_buff
[245 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDH(i
));
552 for (i
= 0; i
< 64; i
++)
553 regs_buff
[309 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDT(i
));
554 for (i
= 0; i
< 64; i
++)
555 regs_buff
[373 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXDCTL(i
));
556 for (i
= 0; i
< 16; i
++)
557 regs_buff
[437 + i
] = IXGBE_READ_REG(hw
, IXGBE_SRRCTL(i
));
558 for (i
= 0; i
< 16; i
++)
559 regs_buff
[453 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_RXCTRL(i
));
560 regs_buff
[469] = IXGBE_READ_REG(hw
, IXGBE_RDRXCTL
);
561 for (i
= 0; i
< 8; i
++)
562 regs_buff
[470 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(i
));
563 regs_buff
[478] = IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
564 regs_buff
[479] = IXGBE_READ_REG(hw
, IXGBE_DROPEN
);
567 regs_buff
[480] = IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
568 regs_buff
[481] = IXGBE_READ_REG(hw
, IXGBE_RFCTL
);
569 for (i
= 0; i
< 16; i
++)
570 regs_buff
[482 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAL(i
));
571 for (i
= 0; i
< 16; i
++)
572 regs_buff
[498 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAH(i
));
573 regs_buff
[514] = IXGBE_READ_REG(hw
, IXGBE_PSRTYPE(0));
574 regs_buff
[515] = IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
575 regs_buff
[516] = IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
576 regs_buff
[517] = IXGBE_READ_REG(hw
, IXGBE_MCSTCTRL
);
577 regs_buff
[518] = IXGBE_READ_REG(hw
, IXGBE_MRQC
);
578 regs_buff
[519] = IXGBE_READ_REG(hw
, IXGBE_VMD_CTL
);
579 for (i
= 0; i
< 8; i
++)
580 regs_buff
[520 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIR(i
));
581 for (i
= 0; i
< 8; i
++)
582 regs_buff
[528 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIREXT(i
));
583 regs_buff
[536] = IXGBE_READ_REG(hw
, IXGBE_IMIRVP
);
586 for (i
= 0; i
< 32; i
++)
587 regs_buff
[537 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAL(i
));
588 for (i
= 0; i
< 32; i
++)
589 regs_buff
[569 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAH(i
));
590 for (i
= 0; i
< 32; i
++)
591 regs_buff
[601 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDLEN(i
));
592 for (i
= 0; i
< 32; i
++)
593 regs_buff
[633 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDH(i
));
594 for (i
= 0; i
< 32; i
++)
595 regs_buff
[665 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDT(i
));
596 for (i
= 0; i
< 32; i
++)
597 regs_buff
[697 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXDCTL(i
));
598 for (i
= 0; i
< 32; i
++)
599 regs_buff
[729 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAL(i
));
600 for (i
= 0; i
< 32; i
++)
601 regs_buff
[761 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAH(i
));
602 regs_buff
[793] = IXGBE_READ_REG(hw
, IXGBE_DTXCTL
);
603 for (i
= 0; i
< 16; i
++)
604 regs_buff
[794 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL(i
));
605 regs_buff
[810] = IXGBE_READ_REG(hw
, IXGBE_TIPG
);
606 for (i
= 0; i
< 8; i
++)
607 regs_buff
[811 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXPBSIZE(i
));
608 regs_buff
[819] = IXGBE_READ_REG(hw
, IXGBE_MNGTXMAP
);
611 regs_buff
[820] = IXGBE_READ_REG(hw
, IXGBE_WUC
);
612 regs_buff
[821] = IXGBE_READ_REG(hw
, IXGBE_WUFC
);
613 regs_buff
[822] = IXGBE_READ_REG(hw
, IXGBE_WUS
);
614 regs_buff
[823] = IXGBE_READ_REG(hw
, IXGBE_IPAV
);
615 regs_buff
[824] = IXGBE_READ_REG(hw
, IXGBE_IP4AT
);
616 regs_buff
[825] = IXGBE_READ_REG(hw
, IXGBE_IP6AT
);
617 regs_buff
[826] = IXGBE_READ_REG(hw
, IXGBE_WUPL
);
618 regs_buff
[827] = IXGBE_READ_REG(hw
, IXGBE_WUPM
);
619 regs_buff
[828] = IXGBE_READ_REG(hw
, IXGBE_FHFT(0));
622 regs_buff
[829] = IXGBE_READ_REG(hw
, IXGBE_RMCS
);
623 regs_buff
[830] = IXGBE_READ_REG(hw
, IXGBE_DPMCS
);
624 regs_buff
[831] = IXGBE_READ_REG(hw
, IXGBE_PDPMCS
);
625 regs_buff
[832] = IXGBE_READ_REG(hw
, IXGBE_RUPPBMR
);
626 for (i
= 0; i
< 8; i
++)
627 regs_buff
[833 + i
] = IXGBE_READ_REG(hw
, IXGBE_RT2CR(i
));
628 for (i
= 0; i
< 8; i
++)
629 regs_buff
[841 + i
] = IXGBE_READ_REG(hw
, IXGBE_RT2SR(i
));
630 for (i
= 0; i
< 8; i
++)
631 regs_buff
[849 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCCR(i
));
632 for (i
= 0; i
< 8; i
++)
633 regs_buff
[857 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCSR(i
));
634 for (i
= 0; i
< 8; i
++)
635 regs_buff
[865 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDPT2TCCR(i
));
636 for (i
= 0; i
< 8; i
++)
637 regs_buff
[873 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDPT2TCSR(i
));
640 regs_buff
[881] = IXGBE_GET_STAT(adapter
, crcerrs
);
641 regs_buff
[882] = IXGBE_GET_STAT(adapter
, illerrc
);
642 regs_buff
[883] = IXGBE_GET_STAT(adapter
, errbc
);
643 regs_buff
[884] = IXGBE_GET_STAT(adapter
, mspdc
);
644 for (i
= 0; i
< 8; i
++)
645 regs_buff
[885 + i
] = IXGBE_GET_STAT(adapter
, mpc
[i
]);
646 regs_buff
[893] = IXGBE_GET_STAT(adapter
, mlfc
);
647 regs_buff
[894] = IXGBE_GET_STAT(adapter
, mrfc
);
648 regs_buff
[895] = IXGBE_GET_STAT(adapter
, rlec
);
649 regs_buff
[896] = IXGBE_GET_STAT(adapter
, lxontxc
);
650 regs_buff
[897] = IXGBE_GET_STAT(adapter
, lxonrxc
);
651 regs_buff
[898] = IXGBE_GET_STAT(adapter
, lxofftxc
);
652 regs_buff
[899] = IXGBE_GET_STAT(adapter
, lxoffrxc
);
653 for (i
= 0; i
< 8; i
++)
654 regs_buff
[900 + i
] = IXGBE_GET_STAT(adapter
, pxontxc
[i
]);
655 for (i
= 0; i
< 8; i
++)
656 regs_buff
[908 + i
] = IXGBE_GET_STAT(adapter
, pxonrxc
[i
]);
657 for (i
= 0; i
< 8; i
++)
658 regs_buff
[916 + i
] = IXGBE_GET_STAT(adapter
, pxofftxc
[i
]);
659 for (i
= 0; i
< 8; i
++)
660 regs_buff
[924 + i
] = IXGBE_GET_STAT(adapter
, pxoffrxc
[i
]);
661 regs_buff
[932] = IXGBE_GET_STAT(adapter
, prc64
);
662 regs_buff
[933] = IXGBE_GET_STAT(adapter
, prc127
);
663 regs_buff
[934] = IXGBE_GET_STAT(adapter
, prc255
);
664 regs_buff
[935] = IXGBE_GET_STAT(adapter
, prc511
);
665 regs_buff
[936] = IXGBE_GET_STAT(adapter
, prc1023
);
666 regs_buff
[937] = IXGBE_GET_STAT(adapter
, prc1522
);
667 regs_buff
[938] = IXGBE_GET_STAT(adapter
, gprc
);
668 regs_buff
[939] = IXGBE_GET_STAT(adapter
, bprc
);
669 regs_buff
[940] = IXGBE_GET_STAT(adapter
, mprc
);
670 regs_buff
[941] = IXGBE_GET_STAT(adapter
, gptc
);
671 regs_buff
[942] = IXGBE_GET_STAT(adapter
, gorc
);
672 regs_buff
[944] = IXGBE_GET_STAT(adapter
, gotc
);
673 for (i
= 0; i
< 8; i
++)
674 regs_buff
[946 + i
] = IXGBE_GET_STAT(adapter
, rnbc
[i
]);
675 regs_buff
[954] = IXGBE_GET_STAT(adapter
, ruc
);
676 regs_buff
[955] = IXGBE_GET_STAT(adapter
, rfc
);
677 regs_buff
[956] = IXGBE_GET_STAT(adapter
, roc
);
678 regs_buff
[957] = IXGBE_GET_STAT(adapter
, rjc
);
679 regs_buff
[958] = IXGBE_GET_STAT(adapter
, mngprc
);
680 regs_buff
[959] = IXGBE_GET_STAT(adapter
, mngpdc
);
681 regs_buff
[960] = IXGBE_GET_STAT(adapter
, mngptc
);
682 regs_buff
[961] = IXGBE_GET_STAT(adapter
, tor
);
683 regs_buff
[963] = IXGBE_GET_STAT(adapter
, tpr
);
684 regs_buff
[964] = IXGBE_GET_STAT(adapter
, tpt
);
685 regs_buff
[965] = IXGBE_GET_STAT(adapter
, ptc64
);
686 regs_buff
[966] = IXGBE_GET_STAT(adapter
, ptc127
);
687 regs_buff
[967] = IXGBE_GET_STAT(adapter
, ptc255
);
688 regs_buff
[968] = IXGBE_GET_STAT(adapter
, ptc511
);
689 regs_buff
[969] = IXGBE_GET_STAT(adapter
, ptc1023
);
690 regs_buff
[970] = IXGBE_GET_STAT(adapter
, ptc1522
);
691 regs_buff
[971] = IXGBE_GET_STAT(adapter
, mptc
);
692 regs_buff
[972] = IXGBE_GET_STAT(adapter
, bptc
);
693 regs_buff
[973] = IXGBE_GET_STAT(adapter
, xec
);
694 for (i
= 0; i
< 16; i
++)
695 regs_buff
[974 + i
] = IXGBE_GET_STAT(adapter
, qprc
[i
]);
696 for (i
= 0; i
< 16; i
++)
697 regs_buff
[990 + i
] = IXGBE_GET_STAT(adapter
, qptc
[i
]);
698 for (i
= 0; i
< 16; i
++)
699 regs_buff
[1006 + i
] = IXGBE_GET_STAT(adapter
, qbrc
[i
]);
700 for (i
= 0; i
< 16; i
++)
701 regs_buff
[1022 + i
] = IXGBE_GET_STAT(adapter
, qbtc
[i
]);
704 regs_buff
[1038] = IXGBE_READ_REG(hw
, IXGBE_PCS1GCFIG
);
705 regs_buff
[1039] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLCTL
);
706 regs_buff
[1040] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLSTA
);
707 regs_buff
[1041] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG0
);
708 regs_buff
[1042] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG1
);
709 regs_buff
[1043] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANA
);
710 regs_buff
[1044] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLP
);
711 regs_buff
[1045] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANNP
);
712 regs_buff
[1046] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLPNP
);
713 regs_buff
[1047] = IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
714 regs_buff
[1048] = IXGBE_READ_REG(hw
, IXGBE_HLREG1
);
715 regs_buff
[1049] = IXGBE_READ_REG(hw
, IXGBE_PAP
);
716 regs_buff
[1050] = IXGBE_READ_REG(hw
, IXGBE_MACA
);
717 regs_buff
[1051] = IXGBE_READ_REG(hw
, IXGBE_APAE
);
718 regs_buff
[1052] = IXGBE_READ_REG(hw
, IXGBE_ARD
);
719 regs_buff
[1053] = IXGBE_READ_REG(hw
, IXGBE_AIS
);
720 regs_buff
[1054] = IXGBE_READ_REG(hw
, IXGBE_MSCA
);
721 regs_buff
[1055] = IXGBE_READ_REG(hw
, IXGBE_MSRWD
);
722 regs_buff
[1056] = IXGBE_READ_REG(hw
, IXGBE_MLADD
);
723 regs_buff
[1057] = IXGBE_READ_REG(hw
, IXGBE_MHADD
);
724 regs_buff
[1058] = IXGBE_READ_REG(hw
, IXGBE_TREG
);
725 regs_buff
[1059] = IXGBE_READ_REG(hw
, IXGBE_PCSS1
);
726 regs_buff
[1060] = IXGBE_READ_REG(hw
, IXGBE_PCSS2
);
727 regs_buff
[1061] = IXGBE_READ_REG(hw
, IXGBE_XPCSS
);
728 regs_buff
[1062] = IXGBE_READ_REG(hw
, IXGBE_SERDESC
);
729 regs_buff
[1063] = IXGBE_READ_REG(hw
, IXGBE_MACS
);
730 regs_buff
[1064] = IXGBE_READ_REG(hw
, IXGBE_AUTOC
);
731 regs_buff
[1065] = IXGBE_READ_REG(hw
, IXGBE_LINKS
);
732 regs_buff
[1066] = IXGBE_READ_REG(hw
, IXGBE_AUTOC2
);
733 regs_buff
[1067] = IXGBE_READ_REG(hw
, IXGBE_AUTOC3
);
734 regs_buff
[1068] = IXGBE_READ_REG(hw
, IXGBE_ANLP1
);
735 regs_buff
[1069] = IXGBE_READ_REG(hw
, IXGBE_ANLP2
);
736 regs_buff
[1070] = IXGBE_READ_REG(hw
, IXGBE_ATLASCTL
);
739 regs_buff
[1071] = IXGBE_READ_REG(hw
, IXGBE_RDSTATCTL
);
740 for (i
= 0; i
< 8; i
++)
741 regs_buff
[1072 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDSTAT(i
));
742 regs_buff
[1080] = IXGBE_READ_REG(hw
, IXGBE_RDHMPN
);
743 for (i
= 0; i
< 4; i
++)
744 regs_buff
[1081 + i
] = IXGBE_READ_REG(hw
, IXGBE_RIC_DW(i
));
745 regs_buff
[1085] = IXGBE_READ_REG(hw
, IXGBE_RDPROBE
);
746 regs_buff
[1086] = IXGBE_READ_REG(hw
, IXGBE_TDSTATCTL
);
747 for (i
= 0; i
< 8; i
++)
748 regs_buff
[1087 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDSTAT(i
));
749 regs_buff
[1095] = IXGBE_READ_REG(hw
, IXGBE_TDHMPN
);
750 for (i
= 0; i
< 4; i
++)
751 regs_buff
[1096 + i
] = IXGBE_READ_REG(hw
, IXGBE_TIC_DW(i
));
752 regs_buff
[1100] = IXGBE_READ_REG(hw
, IXGBE_TDPROBE
);
753 regs_buff
[1101] = IXGBE_READ_REG(hw
, IXGBE_TXBUFCTRL
);
754 regs_buff
[1102] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA0
);
755 regs_buff
[1103] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA1
);
756 regs_buff
[1104] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA2
);
757 regs_buff
[1105] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA3
);
758 regs_buff
[1106] = IXGBE_READ_REG(hw
, IXGBE_RXBUFCTRL
);
759 regs_buff
[1107] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA0
);
760 regs_buff
[1108] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA1
);
761 regs_buff
[1109] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA2
);
762 regs_buff
[1110] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA3
);
763 for (i
= 0; i
< 8; i
++)
764 regs_buff
[1111 + i
] = IXGBE_READ_REG(hw
, IXGBE_PCIE_DIAG(i
));
765 regs_buff
[1119] = IXGBE_READ_REG(hw
, IXGBE_RFVAL
);
766 regs_buff
[1120] = IXGBE_READ_REG(hw
, IXGBE_MDFTC1
);
767 regs_buff
[1121] = IXGBE_READ_REG(hw
, IXGBE_MDFTC2
);
768 regs_buff
[1122] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO1
);
769 regs_buff
[1123] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO2
);
770 regs_buff
[1124] = IXGBE_READ_REG(hw
, IXGBE_MDFTS
);
771 regs_buff
[1125] = IXGBE_READ_REG(hw
, IXGBE_PCIEECCCTL
);
772 regs_buff
[1126] = IXGBE_READ_REG(hw
, IXGBE_PBTXECC
);
773 regs_buff
[1127] = IXGBE_READ_REG(hw
, IXGBE_PBRXECC
);
775 /* 82599 X540 specific registers */
776 regs_buff
[1128] = IXGBE_READ_REG(hw
, IXGBE_MFLCN
);
779 static int ixgbe_get_eeprom_len(struct net_device
*netdev
)
781 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
782 return adapter
->hw
.eeprom
.word_size
* 2;
785 static int ixgbe_get_eeprom(struct net_device
*netdev
,
786 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
788 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
789 struct ixgbe_hw
*hw
= &adapter
->hw
;
791 int first_word
, last_word
, eeprom_len
;
795 if (eeprom
->len
== 0)
798 eeprom
->magic
= hw
->vendor_id
| (hw
->device_id
<< 16);
800 first_word
= eeprom
->offset
>> 1;
801 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
802 eeprom_len
= last_word
- first_word
+ 1;
804 eeprom_buff
= kmalloc(sizeof(u16
) * eeprom_len
, GFP_KERNEL
);
808 ret_val
= hw
->eeprom
.ops
.read_buffer(hw
, first_word
, eeprom_len
,
811 /* Device's eeprom is always little-endian, word addressable */
812 for (i
= 0; i
< eeprom_len
; i
++)
813 le16_to_cpus(&eeprom_buff
[i
]);
815 memcpy(bytes
, (u8
*)eeprom_buff
+ (eeprom
->offset
& 1), eeprom
->len
);
821 static int ixgbe_set_eeprom(struct net_device
*netdev
,
822 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
824 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
825 struct ixgbe_hw
*hw
= &adapter
->hw
;
828 int max_len
, first_word
, last_word
, ret_val
= 0;
831 if (eeprom
->len
== 0)
834 if (eeprom
->magic
!= (hw
->vendor_id
| (hw
->device_id
<< 16)))
837 max_len
= hw
->eeprom
.word_size
* 2;
839 first_word
= eeprom
->offset
>> 1;
840 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
841 eeprom_buff
= kmalloc(max_len
, GFP_KERNEL
);
847 if (eeprom
->offset
& 1) {
849 * need read/modify/write of first changed EEPROM word
850 * only the second byte of the word is being modified
852 ret_val
= hw
->eeprom
.ops
.read(hw
, first_word
, &eeprom_buff
[0]);
858 if ((eeprom
->offset
+ eeprom
->len
) & 1) {
860 * need read/modify/write of last changed EEPROM word
861 * only the first byte of the word is being modified
863 ret_val
= hw
->eeprom
.ops
.read(hw
, last_word
,
864 &eeprom_buff
[last_word
- first_word
]);
869 /* Device's eeprom is always little-endian, word addressable */
870 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
871 le16_to_cpus(&eeprom_buff
[i
]);
873 memcpy(ptr
, bytes
, eeprom
->len
);
875 for (i
= 0; i
< last_word
- first_word
+ 1; i
++)
876 cpu_to_le16s(&eeprom_buff
[i
]);
878 ret_val
= hw
->eeprom
.ops
.write_buffer(hw
, first_word
,
879 last_word
- first_word
+ 1,
882 /* Update the checksum */
884 hw
->eeprom
.ops
.update_checksum(hw
);
891 static void ixgbe_get_drvinfo(struct net_device
*netdev
,
892 struct ethtool_drvinfo
*drvinfo
)
894 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
897 strlcpy(drvinfo
->driver
, ixgbe_driver_name
, sizeof(drvinfo
->driver
));
898 strlcpy(drvinfo
->version
, ixgbe_driver_version
,
899 sizeof(drvinfo
->version
));
901 nvm_track_id
= (adapter
->eeprom_verh
<< 16) |
902 adapter
->eeprom_verl
;
903 snprintf(drvinfo
->fw_version
, sizeof(drvinfo
->fw_version
), "0x%08x",
906 strlcpy(drvinfo
->bus_info
, pci_name(adapter
->pdev
),
907 sizeof(drvinfo
->bus_info
));
908 drvinfo
->n_stats
= IXGBE_STATS_LEN
;
909 drvinfo
->testinfo_len
= IXGBE_TEST_LEN
;
910 drvinfo
->regdump_len
= ixgbe_get_regs_len(netdev
);
913 static void ixgbe_get_ringparam(struct net_device
*netdev
,
914 struct ethtool_ringparam
*ring
)
916 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
917 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
[0];
918 struct ixgbe_ring
*rx_ring
= adapter
->rx_ring
[0];
920 ring
->rx_max_pending
= IXGBE_MAX_RXD
;
921 ring
->tx_max_pending
= IXGBE_MAX_TXD
;
922 ring
->rx_pending
= rx_ring
->count
;
923 ring
->tx_pending
= tx_ring
->count
;
926 static int ixgbe_set_ringparam(struct net_device
*netdev
,
927 struct ethtool_ringparam
*ring
)
929 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
930 struct ixgbe_ring
*temp_tx_ring
, *temp_rx_ring
;
932 u32 new_rx_count
, new_tx_count
;
933 bool need_update
= false;
935 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
938 new_rx_count
= max(ring
->rx_pending
, (u32
)IXGBE_MIN_RXD
);
939 new_rx_count
= min(new_rx_count
, (u32
)IXGBE_MAX_RXD
);
940 new_rx_count
= ALIGN(new_rx_count
, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE
);
942 new_tx_count
= max(ring
->tx_pending
, (u32
)IXGBE_MIN_TXD
);
943 new_tx_count
= min(new_tx_count
, (u32
)IXGBE_MAX_TXD
);
944 new_tx_count
= ALIGN(new_tx_count
, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE
);
946 if ((new_tx_count
== adapter
->tx_ring
[0]->count
) &&
947 (new_rx_count
== adapter
->rx_ring
[0]->count
)) {
952 while (test_and_set_bit(__IXGBE_RESETTING
, &adapter
->state
))
953 usleep_range(1000, 2000);
955 if (!netif_running(adapter
->netdev
)) {
956 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
957 adapter
->tx_ring
[i
]->count
= new_tx_count
;
958 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
959 adapter
->rx_ring
[i
]->count
= new_rx_count
;
960 adapter
->tx_ring_count
= new_tx_count
;
961 adapter
->rx_ring_count
= new_rx_count
;
965 temp_tx_ring
= vmalloc(adapter
->num_tx_queues
* sizeof(struct ixgbe_ring
));
971 if (new_tx_count
!= adapter
->tx_ring_count
) {
972 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
973 memcpy(&temp_tx_ring
[i
], adapter
->tx_ring
[i
],
974 sizeof(struct ixgbe_ring
));
975 temp_tx_ring
[i
].count
= new_tx_count
;
976 err
= ixgbe_setup_tx_resources(&temp_tx_ring
[i
]);
980 ixgbe_free_tx_resources(&temp_tx_ring
[i
]);
988 temp_rx_ring
= vmalloc(adapter
->num_rx_queues
* sizeof(struct ixgbe_ring
));
994 if (new_rx_count
!= adapter
->rx_ring_count
) {
995 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
996 memcpy(&temp_rx_ring
[i
], adapter
->rx_ring
[i
],
997 sizeof(struct ixgbe_ring
));
998 temp_rx_ring
[i
].count
= new_rx_count
;
999 err
= ixgbe_setup_rx_resources(&temp_rx_ring
[i
]);
1003 ixgbe_free_rx_resources(&temp_rx_ring
[i
]);
1011 /* if rings need to be updated, here's the place to do it in one shot */
1013 ixgbe_down(adapter
);
1016 if (new_tx_count
!= adapter
->tx_ring_count
) {
1017 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
1018 ixgbe_free_tx_resources(adapter
->tx_ring
[i
]);
1019 memcpy(adapter
->tx_ring
[i
], &temp_tx_ring
[i
],
1020 sizeof(struct ixgbe_ring
));
1022 adapter
->tx_ring_count
= new_tx_count
;
1026 if (new_rx_count
!= adapter
->rx_ring_count
) {
1027 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
1028 ixgbe_free_rx_resources(adapter
->rx_ring
[i
]);
1029 memcpy(adapter
->rx_ring
[i
], &temp_rx_ring
[i
],
1030 sizeof(struct ixgbe_ring
));
1032 adapter
->rx_ring_count
= new_rx_count
;
1037 vfree(temp_rx_ring
);
1039 vfree(temp_tx_ring
);
1041 clear_bit(__IXGBE_RESETTING
, &adapter
->state
);
1045 static int ixgbe_get_sset_count(struct net_device
*netdev
, int sset
)
1049 return IXGBE_TEST_LEN
;
1051 return IXGBE_STATS_LEN
;
1057 static void ixgbe_get_ethtool_stats(struct net_device
*netdev
,
1058 struct ethtool_stats
*stats
, u64
*data
)
1060 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1061 struct rtnl_link_stats64 temp
;
1062 const struct rtnl_link_stats64
*net_stats
;
1064 struct ixgbe_ring
*ring
;
1068 ixgbe_update_stats(adapter
);
1069 net_stats
= dev_get_stats(netdev
, &temp
);
1070 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1071 switch (ixgbe_gstrings_stats
[i
].type
) {
1073 p
= (char *) net_stats
+
1074 ixgbe_gstrings_stats
[i
].stat_offset
;
1077 p
= (char *) adapter
+
1078 ixgbe_gstrings_stats
[i
].stat_offset
;
1082 data
[i
] = (ixgbe_gstrings_stats
[i
].sizeof_stat
==
1083 sizeof(u64
)) ? *(u64
*)p
: *(u32
*)p
;
1085 for (j
= 0; j
< IXGBE_NUM_RX_QUEUES
; j
++) {
1086 ring
= adapter
->tx_ring
[j
];
1095 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
1096 data
[i
] = ring
->stats
.packets
;
1097 data
[i
+1] = ring
->stats
.bytes
;
1098 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
1101 for (j
= 0; j
< IXGBE_NUM_RX_QUEUES
; j
++) {
1102 ring
= adapter
->rx_ring
[j
];
1111 start
= u64_stats_fetch_begin_bh(&ring
->syncp
);
1112 data
[i
] = ring
->stats
.packets
;
1113 data
[i
+1] = ring
->stats
.bytes
;
1114 } while (u64_stats_fetch_retry_bh(&ring
->syncp
, start
));
1118 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1119 data
[i
++] = adapter
->stats
.pxontxc
[j
];
1120 data
[i
++] = adapter
->stats
.pxofftxc
[j
];
1122 for (j
= 0; j
< IXGBE_MAX_PACKET_BUFFERS
; j
++) {
1123 data
[i
++] = adapter
->stats
.pxonrxc
[j
];
1124 data
[i
++] = adapter
->stats
.pxoffrxc
[j
];
1128 static void ixgbe_get_strings(struct net_device
*netdev
, u32 stringset
,
1131 char *p
= (char *)data
;
1134 switch (stringset
) {
1136 memcpy(data
, *ixgbe_gstrings_test
,
1137 IXGBE_TEST_LEN
* ETH_GSTRING_LEN
);
1140 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
1141 memcpy(p
, ixgbe_gstrings_stats
[i
].stat_string
,
1143 p
+= ETH_GSTRING_LEN
;
1145 for (i
= 0; i
< netdev
->num_tx_queues
; i
++) {
1146 sprintf(p
, "tx_queue_%u_packets", i
);
1147 p
+= ETH_GSTRING_LEN
;
1148 sprintf(p
, "tx_queue_%u_bytes", i
);
1149 p
+= ETH_GSTRING_LEN
;
1151 for (i
= 0; i
< IXGBE_NUM_RX_QUEUES
; i
++) {
1152 sprintf(p
, "rx_queue_%u_packets", i
);
1153 p
+= ETH_GSTRING_LEN
;
1154 sprintf(p
, "rx_queue_%u_bytes", i
);
1155 p
+= ETH_GSTRING_LEN
;
1157 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1158 sprintf(p
, "tx_pb_%u_pxon", i
);
1159 p
+= ETH_GSTRING_LEN
;
1160 sprintf(p
, "tx_pb_%u_pxoff", i
);
1161 p
+= ETH_GSTRING_LEN
;
1163 for (i
= 0; i
< IXGBE_MAX_PACKET_BUFFERS
; i
++) {
1164 sprintf(p
, "rx_pb_%u_pxon", i
);
1165 p
+= ETH_GSTRING_LEN
;
1166 sprintf(p
, "rx_pb_%u_pxoff", i
);
1167 p
+= ETH_GSTRING_LEN
;
1169 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
1174 static int ixgbe_link_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1176 struct ixgbe_hw
*hw
= &adapter
->hw
;
1181 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, true);
1189 /* ethtool register test data */
1190 struct ixgbe_reg_test
{
1198 /* In the hardware, registers are laid out either singly, in arrays
1199 * spaced 0x40 bytes apart, or in contiguous tables. We assume
1200 * most tests take place on arrays or single registers (handled
1201 * as a single-element array) and special-case the tables.
1202 * Table tests are always pattern tests.
1204 * We also make provision for some required setup steps by specifying
1205 * registers to be written without any read-back testing.
1208 #define PATTERN_TEST 1
1209 #define SET_READ_TEST 2
1210 #define WRITE_NO_TEST 3
1211 #define TABLE32_TEST 4
1212 #define TABLE64_TEST_LO 5
1213 #define TABLE64_TEST_HI 6
1215 /* default 82599 register test */
1216 static const struct ixgbe_reg_test reg_test_82599
[] = {
1217 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1218 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1219 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1220 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1221 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFF80 },
1222 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1223 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1224 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1225 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1226 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1227 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1228 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1229 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1230 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1231 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFF80 },
1232 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000001, 0x00000001 },
1233 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1234 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x8001FFFF, 0x800CFFFF },
1235 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1239 /* default 82598 register test */
1240 static const struct ixgbe_reg_test reg_test_82598
[] = {
1241 { IXGBE_FCRTL(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1242 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1243 { IXGBE_PFCTOP
, 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1244 { IXGBE_VLNCTRL
, 1, PATTERN_TEST
, 0x00000000, 0x00000000 },
1245 { IXGBE_RDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1246 { IXGBE_RDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1247 { IXGBE_RDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1248 /* Enable all four RX queues before testing. */
1249 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
1250 /* RDH is read-only for 82598, only test RDT. */
1251 { IXGBE_RDT(0), 4, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
1252 { IXGBE_RXDCTL(0), 4, WRITE_NO_TEST
, 0, 0 },
1253 { IXGBE_FCRTH(0), 1, PATTERN_TEST
, 0x8007FFF0, 0x8007FFF0 },
1254 { IXGBE_FCTTV(0), 1, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1255 { IXGBE_TIPG
, 1, PATTERN_TEST
, 0x000000FF, 0x000000FF },
1256 { IXGBE_TDBAL(0), 4, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
1257 { IXGBE_TDBAH(0), 4, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1258 { IXGBE_TDLEN(0), 4, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
1259 { IXGBE_RXCTRL
, 1, SET_READ_TEST
, 0x00000003, 0x00000003 },
1260 { IXGBE_DTXCTL
, 1, SET_READ_TEST
, 0x00000005, 0x00000005 },
1261 { IXGBE_RAL(0), 16, TABLE64_TEST_LO
, 0xFFFFFFFF, 0xFFFFFFFF },
1262 { IXGBE_RAL(0), 16, TABLE64_TEST_HI
, 0x800CFFFF, 0x800CFFFF },
1263 { IXGBE_MTA(0), 128, TABLE32_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
1267 static bool reg_pattern_test(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1268 u32 mask
, u32 write
)
1270 u32 pat
, val
, before
;
1271 static const u32 test_pattern
[] = {
1272 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
1274 for (pat
= 0; pat
< ARRAY_SIZE(test_pattern
); pat
++) {
1275 before
= readl(adapter
->hw
.hw_addr
+ reg
);
1276 writel((test_pattern
[pat
] & write
),
1277 (adapter
->hw
.hw_addr
+ reg
));
1278 val
= readl(adapter
->hw
.hw_addr
+ reg
);
1279 if (val
!= (test_pattern
[pat
] & write
& mask
)) {
1280 e_err(drv
, "pattern test reg %04X failed: got "
1281 "0x%08X expected 0x%08X\n",
1282 reg
, val
, (test_pattern
[pat
] & write
& mask
));
1284 writel(before
, adapter
->hw
.hw_addr
+ reg
);
1287 writel(before
, adapter
->hw
.hw_addr
+ reg
);
1292 static bool reg_set_and_check(struct ixgbe_adapter
*adapter
, u64
*data
, int reg
,
1293 u32 mask
, u32 write
)
1296 before
= readl(adapter
->hw
.hw_addr
+ reg
);
1297 writel((write
& mask
), (adapter
->hw
.hw_addr
+ reg
));
1298 val
= readl(adapter
->hw
.hw_addr
+ reg
);
1299 if ((write
& mask
) != (val
& mask
)) {
1300 e_err(drv
, "set/check reg %04X test failed: got 0x%08X "
1301 "expected 0x%08X\n", reg
, (val
& mask
), (write
& mask
));
1303 writel(before
, (adapter
->hw
.hw_addr
+ reg
));
1306 writel(before
, (adapter
->hw
.hw_addr
+ reg
));
1310 #define REG_PATTERN_TEST(reg, mask, write) \
1312 if (reg_pattern_test(adapter, data, reg, mask, write)) \
1317 #define REG_SET_AND_CHECK(reg, mask, write) \
1319 if (reg_set_and_check(adapter, data, reg, mask, write)) \
1323 static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1325 const struct ixgbe_reg_test
*test
;
1326 u32 value
, before
, after
;
1329 switch (adapter
->hw
.mac
.type
) {
1330 case ixgbe_mac_82598EB
:
1331 toggle
= 0x7FFFF3FF;
1332 test
= reg_test_82598
;
1334 case ixgbe_mac_82599EB
:
1335 case ixgbe_mac_X540
:
1336 toggle
= 0x7FFFF30F;
1337 test
= reg_test_82599
;
1346 * Because the status register is such a special case,
1347 * we handle it separately from the rest of the register
1348 * tests. Some bits are read-only, some toggle, and some
1349 * are writeable on newer MACs.
1351 before
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_STATUS
);
1352 value
= (IXGBE_READ_REG(&adapter
->hw
, IXGBE_STATUS
) & toggle
);
1353 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_STATUS
, toggle
);
1354 after
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_STATUS
) & toggle
;
1355 if (value
!= after
) {
1356 e_err(drv
, "failed STATUS register test got: 0x%08X "
1357 "expected: 0x%08X\n", after
, value
);
1361 /* restore previous status */
1362 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_STATUS
, before
);
1365 * Perform the remainder of the register test, looping through
1366 * the test table until we either fail or reach the null entry.
1369 for (i
= 0; i
< test
->array_len
; i
++) {
1370 switch (test
->test_type
) {
1372 REG_PATTERN_TEST(test
->reg
+ (i
* 0x40),
1377 REG_SET_AND_CHECK(test
->reg
+ (i
* 0x40),
1383 (adapter
->hw
.hw_addr
+ test
->reg
)
1387 REG_PATTERN_TEST(test
->reg
+ (i
* 4),
1391 case TABLE64_TEST_LO
:
1392 REG_PATTERN_TEST(test
->reg
+ (i
* 8),
1396 case TABLE64_TEST_HI
:
1397 REG_PATTERN_TEST((test
->reg
+ 4) + (i
* 8),
1410 static int ixgbe_eeprom_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1412 struct ixgbe_hw
*hw
= &adapter
->hw
;
1413 if (hw
->eeprom
.ops
.validate_checksum(hw
, NULL
))
1420 static irqreturn_t
ixgbe_test_intr(int irq
, void *data
)
1422 struct net_device
*netdev
= (struct net_device
*) data
;
1423 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1425 adapter
->test_icr
|= IXGBE_READ_REG(&adapter
->hw
, IXGBE_EICR
);
1430 static int ixgbe_intr_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1432 struct net_device
*netdev
= adapter
->netdev
;
1433 u32 mask
, i
= 0, shared_int
= true;
1434 u32 irq
= adapter
->pdev
->irq
;
1438 /* Hook up test interrupt handler just for this test */
1439 if (adapter
->msix_entries
) {
1440 /* NOTE: we don't test MSI-X interrupts here, yet */
1442 } else if (adapter
->flags
& IXGBE_FLAG_MSI_ENABLED
) {
1444 if (request_irq(irq
, ixgbe_test_intr
, 0, netdev
->name
,
1449 } else if (!request_irq(irq
, ixgbe_test_intr
, IRQF_PROBE_SHARED
,
1450 netdev
->name
, netdev
)) {
1452 } else if (request_irq(irq
, ixgbe_test_intr
, IRQF_SHARED
,
1453 netdev
->name
, netdev
)) {
1457 e_info(hw
, "testing %s interrupt\n", shared_int
?
1458 "shared" : "unshared");
1460 /* Disable all the interrupts */
1461 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1462 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1463 usleep_range(10000, 20000);
1465 /* Test each interrupt */
1466 for (; i
< 10; i
++) {
1467 /* Interrupt to test */
1472 * Disable the interrupts to be reported in
1473 * the cause register and then force the same
1474 * interrupt and see if one gets posted. If
1475 * an interrupt was posted to the bus, the
1478 adapter
->test_icr
= 0;
1479 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1480 ~mask
& 0x00007FFF);
1481 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1482 ~mask
& 0x00007FFF);
1483 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1484 usleep_range(10000, 20000);
1486 if (adapter
->test_icr
& mask
) {
1493 * Enable the interrupt to be reported in the cause
1494 * register and then force the same interrupt and see
1495 * if one gets posted. If an interrupt was not posted
1496 * to the bus, the test failed.
1498 adapter
->test_icr
= 0;
1499 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMS
, mask
);
1500 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
, mask
);
1501 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1502 usleep_range(10000, 20000);
1504 if (!(adapter
->test_icr
&mask
)) {
1511 * Disable the other interrupts to be reported in
1512 * the cause register and then force the other
1513 * interrupts and see if any get posted. If
1514 * an interrupt was posted to the bus, the
1517 adapter
->test_icr
= 0;
1518 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
,
1519 ~mask
& 0x00007FFF);
1520 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EICS
,
1521 ~mask
& 0x00007FFF);
1522 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1523 usleep_range(10000, 20000);
1525 if (adapter
->test_icr
) {
1532 /* Disable all the interrupts */
1533 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_EIMC
, 0xFFFFFFFF);
1534 IXGBE_WRITE_FLUSH(&adapter
->hw
);
1535 usleep_range(10000, 20000);
1537 /* Unhook test interrupt handler */
1538 free_irq(irq
, netdev
);
1543 static void ixgbe_free_desc_rings(struct ixgbe_adapter
*adapter
)
1545 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1546 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1547 struct ixgbe_hw
*hw
= &adapter
->hw
;
1550 /* shut down the DMA engines now so they can be reinitialized later */
1553 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
1554 reg_ctl
&= ~IXGBE_RXCTRL_RXEN
;
1555 IXGBE_WRITE_REG(hw
, IXGBE_RXCTRL
, reg_ctl
);
1556 ixgbe_disable_rx_queue(adapter
, rx_ring
);
1559 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_TXDCTL(tx_ring
->reg_idx
));
1560 reg_ctl
&= ~IXGBE_TXDCTL_ENABLE
;
1561 IXGBE_WRITE_REG(hw
, IXGBE_TXDCTL(tx_ring
->reg_idx
), reg_ctl
);
1563 switch (hw
->mac
.type
) {
1564 case ixgbe_mac_82599EB
:
1565 case ixgbe_mac_X540
:
1566 reg_ctl
= IXGBE_READ_REG(hw
, IXGBE_DMATXCTL
);
1567 reg_ctl
&= ~IXGBE_DMATXCTL_TE
;
1568 IXGBE_WRITE_REG(hw
, IXGBE_DMATXCTL
, reg_ctl
);
1574 ixgbe_reset(adapter
);
1576 ixgbe_free_tx_resources(&adapter
->test_tx_ring
);
1577 ixgbe_free_rx_resources(&adapter
->test_rx_ring
);
1580 static int ixgbe_setup_desc_rings(struct ixgbe_adapter
*adapter
)
1582 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1583 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1588 /* Setup Tx descriptor ring and Tx buffers */
1589 tx_ring
->count
= IXGBE_DEFAULT_TXD
;
1590 tx_ring
->queue_index
= 0;
1591 tx_ring
->dev
= &adapter
->pdev
->dev
;
1592 tx_ring
->netdev
= adapter
->netdev
;
1593 tx_ring
->reg_idx
= adapter
->tx_ring
[0]->reg_idx
;
1594 tx_ring
->numa_node
= adapter
->node
;
1596 err
= ixgbe_setup_tx_resources(tx_ring
);
1600 switch (adapter
->hw
.mac
.type
) {
1601 case ixgbe_mac_82599EB
:
1602 case ixgbe_mac_X540
:
1603 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_DMATXCTL
);
1604 reg_data
|= IXGBE_DMATXCTL_TE
;
1605 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_DMATXCTL
, reg_data
);
1611 ixgbe_configure_tx_ring(adapter
, tx_ring
);
1613 /* Setup Rx Descriptor ring and Rx buffers */
1614 rx_ring
->count
= IXGBE_DEFAULT_RXD
;
1615 rx_ring
->queue_index
= 0;
1616 rx_ring
->dev
= &adapter
->pdev
->dev
;
1617 rx_ring
->netdev
= adapter
->netdev
;
1618 rx_ring
->reg_idx
= adapter
->rx_ring
[0]->reg_idx
;
1619 rx_ring
->rx_buf_len
= IXGBE_RXBUFFER_2K
;
1620 rx_ring
->numa_node
= adapter
->node
;
1622 err
= ixgbe_setup_rx_resources(rx_ring
);
1628 rctl
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_RXCTRL
);
1629 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXCTRL
, rctl
& ~IXGBE_RXCTRL_RXEN
);
1631 ixgbe_configure_rx_ring(adapter
, rx_ring
);
1633 rctl
|= IXGBE_RXCTRL_RXEN
| IXGBE_RXCTRL_DMBYPS
;
1634 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_RXCTRL
, rctl
);
1639 ixgbe_free_desc_rings(adapter
);
1643 static int ixgbe_setup_loopback_test(struct ixgbe_adapter
*adapter
)
1645 struct ixgbe_hw
*hw
= &adapter
->hw
;
1648 /* X540 needs to set the MACC.FLU bit to force link up */
1649 if (adapter
->hw
.mac
.type
== ixgbe_mac_X540
) {
1650 reg_data
= IXGBE_READ_REG(hw
, IXGBE_MACC
);
1651 reg_data
|= IXGBE_MACC_FLU
;
1652 IXGBE_WRITE_REG(hw
, IXGBE_MACC
, reg_data
);
1655 /* right now we only support MAC loopback in the driver */
1656 reg_data
= IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
1657 /* Setup MAC loopback */
1658 reg_data
|= IXGBE_HLREG0_LPBK
;
1659 IXGBE_WRITE_REG(hw
, IXGBE_HLREG0
, reg_data
);
1661 reg_data
= IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
1662 reg_data
|= IXGBE_FCTRL_BAM
| IXGBE_FCTRL_SBP
| IXGBE_FCTRL_MPE
;
1663 IXGBE_WRITE_REG(hw
, IXGBE_FCTRL
, reg_data
);
1665 reg_data
= IXGBE_READ_REG(hw
, IXGBE_AUTOC
);
1666 reg_data
&= ~IXGBE_AUTOC_LMS_MASK
;
1667 reg_data
|= IXGBE_AUTOC_LMS_10G_LINK_NO_AN
| IXGBE_AUTOC_FLU
;
1668 IXGBE_WRITE_REG(hw
, IXGBE_AUTOC
, reg_data
);
1669 IXGBE_WRITE_FLUSH(hw
);
1670 usleep_range(10000, 20000);
1672 /* Disable Atlas Tx lanes; re-enabled in reset path */
1673 if (hw
->mac
.type
== ixgbe_mac_82598EB
) {
1676 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, &atlas
);
1677 atlas
|= IXGBE_ATLAS_PDN_TX_REG_EN
;
1678 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_LPBK
, atlas
);
1680 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, &atlas
);
1681 atlas
|= IXGBE_ATLAS_PDN_TX_10G_QL_ALL
;
1682 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_10G
, atlas
);
1684 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, &atlas
);
1685 atlas
|= IXGBE_ATLAS_PDN_TX_1G_QL_ALL
;
1686 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_1G
, atlas
);
1688 hw
->mac
.ops
.read_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, &atlas
);
1689 atlas
|= IXGBE_ATLAS_PDN_TX_AN_QL_ALL
;
1690 hw
->mac
.ops
.write_analog_reg8(hw
, IXGBE_ATLAS_PDN_AN
, atlas
);
1696 static void ixgbe_loopback_cleanup(struct ixgbe_adapter
*adapter
)
1700 reg_data
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_HLREG0
);
1701 reg_data
&= ~IXGBE_HLREG0_LPBK
;
1702 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_HLREG0
, reg_data
);
1705 static void ixgbe_create_lbtest_frame(struct sk_buff
*skb
,
1706 unsigned int frame_size
)
1708 memset(skb
->data
, 0xFF, frame_size
);
1710 memset(&skb
->data
[frame_size
/ 2], 0xAA, frame_size
/ 2 - 1);
1711 memset(&skb
->data
[frame_size
/ 2 + 10], 0xBE, 1);
1712 memset(&skb
->data
[frame_size
/ 2 + 12], 0xAF, 1);
1715 static int ixgbe_check_lbtest_frame(struct sk_buff
*skb
,
1716 unsigned int frame_size
)
1719 if (*(skb
->data
+ 3) == 0xFF) {
1720 if ((*(skb
->data
+ frame_size
/ 2 + 10) == 0xBE) &&
1721 (*(skb
->data
+ frame_size
/ 2 + 12) == 0xAF)) {
1728 static u16
ixgbe_clean_test_rings(struct ixgbe_ring
*rx_ring
,
1729 struct ixgbe_ring
*tx_ring
,
1732 union ixgbe_adv_rx_desc
*rx_desc
;
1733 struct ixgbe_rx_buffer
*rx_buffer_info
;
1734 struct ixgbe_tx_buffer
*tx_buffer_info
;
1735 const int bufsz
= rx_ring
->rx_buf_len
;
1737 u16 rx_ntc
, tx_ntc
, count
= 0;
1739 /* initialize next to clean and descriptor values */
1740 rx_ntc
= rx_ring
->next_to_clean
;
1741 tx_ntc
= tx_ring
->next_to_clean
;
1742 rx_desc
= IXGBE_RX_DESC_ADV(rx_ring
, rx_ntc
);
1743 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
1745 while (staterr
& IXGBE_RXD_STAT_DD
) {
1746 /* check Rx buffer */
1747 rx_buffer_info
= &rx_ring
->rx_buffer_info
[rx_ntc
];
1749 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
1750 dma_unmap_single(rx_ring
->dev
,
1751 rx_buffer_info
->dma
,
1754 rx_buffer_info
->dma
= 0;
1756 /* verify contents of skb */
1757 if (!ixgbe_check_lbtest_frame(rx_buffer_info
->skb
, size
))
1760 /* unmap buffer on Tx side */
1761 tx_buffer_info
= &tx_ring
->tx_buffer_info
[tx_ntc
];
1762 ixgbe_unmap_and_free_tx_resource(tx_ring
, tx_buffer_info
);
1764 /* increment Rx/Tx next to clean counters */
1766 if (rx_ntc
== rx_ring
->count
)
1769 if (tx_ntc
== tx_ring
->count
)
1772 /* fetch next descriptor */
1773 rx_desc
= IXGBE_RX_DESC_ADV(rx_ring
, rx_ntc
);
1774 staterr
= le32_to_cpu(rx_desc
->wb
.upper
.status_error
);
1777 /* re-map buffers to ring, store next to clean values */
1778 ixgbe_alloc_rx_buffers(rx_ring
, count
);
1779 rx_ring
->next_to_clean
= rx_ntc
;
1780 tx_ring
->next_to_clean
= tx_ntc
;
1785 static int ixgbe_run_loopback_test(struct ixgbe_adapter
*adapter
)
1787 struct ixgbe_ring
*tx_ring
= &adapter
->test_tx_ring
;
1788 struct ixgbe_ring
*rx_ring
= &adapter
->test_rx_ring
;
1789 int i
, j
, lc
, good_cnt
, ret_val
= 0;
1790 unsigned int size
= 1024;
1791 netdev_tx_t tx_ret_val
;
1792 struct sk_buff
*skb
;
1794 /* allocate test skb */
1795 skb
= alloc_skb(size
, GFP_KERNEL
);
1799 /* place data into test skb */
1800 ixgbe_create_lbtest_frame(skb
, size
);
1804 * Calculate the loop count based on the largest descriptor ring
1805 * The idea is to wrap the largest ring a number of times using 64
1806 * send/receive pairs during each loop
1809 if (rx_ring
->count
<= tx_ring
->count
)
1810 lc
= ((tx_ring
->count
/ 64) * 2) + 1;
1812 lc
= ((rx_ring
->count
/ 64) * 2) + 1;
1814 for (j
= 0; j
<= lc
; j
++) {
1815 /* reset count of good packets */
1818 /* place 64 packets on the transmit queue*/
1819 for (i
= 0; i
< 64; i
++) {
1821 tx_ret_val
= ixgbe_xmit_frame_ring(skb
,
1824 if (tx_ret_val
== NETDEV_TX_OK
)
1828 if (good_cnt
!= 64) {
1833 /* allow 200 milliseconds for packets to go from Tx to Rx */
1836 good_cnt
= ixgbe_clean_test_rings(rx_ring
, tx_ring
, size
);
1837 if (good_cnt
!= 64) {
1843 /* free the original skb */
1849 static int ixgbe_loopback_test(struct ixgbe_adapter
*adapter
, u64
*data
)
1851 *data
= ixgbe_setup_desc_rings(adapter
);
1854 *data
= ixgbe_setup_loopback_test(adapter
);
1857 *data
= ixgbe_run_loopback_test(adapter
);
1858 ixgbe_loopback_cleanup(adapter
);
1861 ixgbe_free_desc_rings(adapter
);
1866 static void ixgbe_diag_test(struct net_device
*netdev
,
1867 struct ethtool_test
*eth_test
, u64
*data
)
1869 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
1870 bool if_running
= netif_running(netdev
);
1872 set_bit(__IXGBE_TESTING
, &adapter
->state
);
1873 if (eth_test
->flags
== ETH_TEST_FL_OFFLINE
) {
1876 e_info(hw
, "offline testing starting\n");
1878 /* Link test performed before hardware reset so autoneg doesn't
1879 * interfere with test result */
1880 if (ixgbe_link_test(adapter
, &data
[4]))
1881 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1883 if (adapter
->flags
& IXGBE_FLAG_SRIOV_ENABLED
) {
1885 for (i
= 0; i
< adapter
->num_vfs
; i
++) {
1886 if (adapter
->vfinfo
[i
].clear_to_send
) {
1887 netdev_warn(netdev
, "%s",
1888 "offline diagnostic is not "
1889 "supported when VFs are "
1895 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1896 clear_bit(__IXGBE_TESTING
,
1904 /* indicate we're in test mode */
1907 ixgbe_reset(adapter
);
1909 e_info(hw
, "register testing starting\n");
1910 if (ixgbe_reg_test(adapter
, &data
[0]))
1911 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1913 ixgbe_reset(adapter
);
1914 e_info(hw
, "eeprom testing starting\n");
1915 if (ixgbe_eeprom_test(adapter
, &data
[1]))
1916 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1918 ixgbe_reset(adapter
);
1919 e_info(hw
, "interrupt testing starting\n");
1920 if (ixgbe_intr_test(adapter
, &data
[2]))
1921 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1923 /* If SRIOV or VMDq is enabled then skip MAC
1924 * loopback diagnostic. */
1925 if (adapter
->flags
& (IXGBE_FLAG_SRIOV_ENABLED
|
1926 IXGBE_FLAG_VMDQ_ENABLED
)) {
1927 e_info(hw
, "Skip MAC loopback diagnostic in VT "
1933 ixgbe_reset(adapter
);
1934 e_info(hw
, "loopback testing starting\n");
1935 if (ixgbe_loopback_test(adapter
, &data
[3]))
1936 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1939 ixgbe_reset(adapter
);
1941 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
1945 e_info(hw
, "online testing starting\n");
1947 if (ixgbe_link_test(adapter
, &data
[4]))
1948 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
1950 /* Online tests aren't run; pass by default */
1956 clear_bit(__IXGBE_TESTING
, &adapter
->state
);
1959 msleep_interruptible(4 * 1000);
1962 static int ixgbe_wol_exclusion(struct ixgbe_adapter
*adapter
,
1963 struct ethtool_wolinfo
*wol
)
1965 struct ixgbe_hw
*hw
= &adapter
->hw
;
1967 u16 wol_cap
= adapter
->eeprom_cap
& IXGBE_DEVICE_CAPS_WOL_MASK
;
1969 /* WOL not supported except for the following */
1970 switch(hw
->device_id
) {
1971 case IXGBE_DEV_ID_82599_SFP
:
1972 /* Only these subdevices could supports WOL */
1973 switch (hw
->subsystem_device_id
) {
1974 case IXGBE_SUBDEV_ID_82599_560FLR
:
1975 /* only support first port */
1976 if (hw
->bus
.func
!= 0) {
1980 case IXGBE_SUBDEV_ID_82599_SFP
:
1988 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE
:
1989 /* All except this subdevice support WOL */
1990 if (hw
->subsystem_device_id
==
1991 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ
) {
1997 case IXGBE_DEV_ID_82599_KX4
:
2000 case IXGBE_DEV_ID_X540T
:
2001 /* check eeprom to see if enabled wol */
2002 if ((wol_cap
== IXGBE_DEVICE_CAPS_WOL_PORT0_1
) ||
2003 ((wol_cap
== IXGBE_DEVICE_CAPS_WOL_PORT0
) &&
2004 (hw
->bus
.func
== 0))) {
2009 /* All others not supported */
2019 static void ixgbe_get_wol(struct net_device
*netdev
,
2020 struct ethtool_wolinfo
*wol
)
2022 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2024 wol
->supported
= WAKE_UCAST
| WAKE_MCAST
|
2025 WAKE_BCAST
| WAKE_MAGIC
;
2028 if (ixgbe_wol_exclusion(adapter
, wol
) ||
2029 !device_can_wakeup(&adapter
->pdev
->dev
))
2032 if (adapter
->wol
& IXGBE_WUFC_EX
)
2033 wol
->wolopts
|= WAKE_UCAST
;
2034 if (adapter
->wol
& IXGBE_WUFC_MC
)
2035 wol
->wolopts
|= WAKE_MCAST
;
2036 if (adapter
->wol
& IXGBE_WUFC_BC
)
2037 wol
->wolopts
|= WAKE_BCAST
;
2038 if (adapter
->wol
& IXGBE_WUFC_MAG
)
2039 wol
->wolopts
|= WAKE_MAGIC
;
2042 static int ixgbe_set_wol(struct net_device
*netdev
, struct ethtool_wolinfo
*wol
)
2044 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2046 if (wol
->wolopts
& (WAKE_PHY
| WAKE_ARP
| WAKE_MAGICSECURE
))
2049 if (ixgbe_wol_exclusion(adapter
, wol
))
2050 return wol
->wolopts
? -EOPNOTSUPP
: 0;
2054 if (wol
->wolopts
& WAKE_UCAST
)
2055 adapter
->wol
|= IXGBE_WUFC_EX
;
2056 if (wol
->wolopts
& WAKE_MCAST
)
2057 adapter
->wol
|= IXGBE_WUFC_MC
;
2058 if (wol
->wolopts
& WAKE_BCAST
)
2059 adapter
->wol
|= IXGBE_WUFC_BC
;
2060 if (wol
->wolopts
& WAKE_MAGIC
)
2061 adapter
->wol
|= IXGBE_WUFC_MAG
;
2063 device_set_wakeup_enable(&adapter
->pdev
->dev
, adapter
->wol
);
2068 static int ixgbe_nway_reset(struct net_device
*netdev
)
2070 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2072 if (netif_running(netdev
))
2073 ixgbe_reinit_locked(adapter
);
2078 static int ixgbe_set_phys_id(struct net_device
*netdev
,
2079 enum ethtool_phys_id_state state
)
2081 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2082 struct ixgbe_hw
*hw
= &adapter
->hw
;
2085 case ETHTOOL_ID_ACTIVE
:
2086 adapter
->led_reg
= IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
2090 hw
->mac
.ops
.led_on(hw
, IXGBE_LED_ON
);
2093 case ETHTOOL_ID_OFF
:
2094 hw
->mac
.ops
.led_off(hw
, IXGBE_LED_ON
);
2097 case ETHTOOL_ID_INACTIVE
:
2098 /* Restore LED settings */
2099 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_LEDCTL
, adapter
->led_reg
);
2106 static int ixgbe_get_coalesce(struct net_device
*netdev
,
2107 struct ethtool_coalesce
*ec
)
2109 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2111 ec
->tx_max_coalesced_frames_irq
= adapter
->tx_work_limit
;
2113 /* only valid if in constant ITR mode */
2114 if (adapter
->rx_itr_setting
<= 1)
2115 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
;
2117 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
>> 2;
2119 /* if in mixed tx/rx queues per vector mode, report only rx settings */
2120 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
2123 /* only valid if in constant ITR mode */
2124 if (adapter
->tx_itr_setting
<= 1)
2125 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
;
2127 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
>> 2;
2133 * this function must be called before setting the new value of
2136 static bool ixgbe_update_rsc(struct ixgbe_adapter
*adapter
,
2137 struct ethtool_coalesce
*ec
)
2139 struct net_device
*netdev
= adapter
->netdev
;
2141 if (!(adapter
->flags2
& IXGBE_FLAG2_RSC_CAPABLE
))
2144 /* if interrupt rate is too high then disable RSC */
2145 if (ec
->rx_coalesce_usecs
!= 1 &&
2146 ec
->rx_coalesce_usecs
<= (IXGBE_MIN_RSC_ITR
>> 2)) {
2147 if (adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
) {
2148 e_info(probe
, "rx-usecs set too low, disabling RSC\n");
2149 adapter
->flags2
&= ~IXGBE_FLAG2_RSC_ENABLED
;
2153 /* check the feature flag value and enable RSC if necessary */
2154 if ((netdev
->features
& NETIF_F_LRO
) &&
2155 !(adapter
->flags2
& IXGBE_FLAG2_RSC_ENABLED
)) {
2156 e_info(probe
, "rx-usecs set to %d, re-enabling RSC\n",
2157 ec
->rx_coalesce_usecs
);
2158 adapter
->flags2
|= IXGBE_FLAG2_RSC_ENABLED
;
2165 static int ixgbe_set_coalesce(struct net_device
*netdev
,
2166 struct ethtool_coalesce
*ec
)
2168 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
2169 struct ixgbe_q_vector
*q_vector
;
2172 u16 tx_itr_param
, rx_itr_param
;
2173 bool need_reset
= false;
2175 /* don't accept tx specific changes if we've got mixed RxTx vectors */
2176 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
2177 && ec
->tx_coalesce_usecs
)
2180 if (ec
->tx_max_coalesced_frames_irq
)
2181 adapter
->tx_work_limit
= ec
->tx_max_coalesced_frames_irq
;
2183 if ((ec
->rx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)) ||
2184 (ec
->tx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)))
2187 /* check the old value and enable RSC if necessary */
2188 need_reset
= ixgbe_update_rsc(adapter
, ec
);
2190 if (ec
->rx_coalesce_usecs
> 1)
2191 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
<< 2;
2193 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
;
2195 if (adapter
->rx_itr_setting
== 1)
2196 rx_itr_param
= IXGBE_20K_ITR
;
2198 rx_itr_param
= adapter
->rx_itr_setting
;
2200 if (ec
->tx_coalesce_usecs
> 1)
2201 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
<< 2;
2203 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
;
2205 if (adapter
->tx_itr_setting
== 1)
2206 tx_itr_param
= IXGBE_10K_ITR
;
2208 tx_itr_param
= adapter
->tx_itr_setting
;
2210 if (adapter
->flags
& IXGBE_FLAG_MSIX_ENABLED
)
2211 num_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
2215 for (i
= 0; i
< num_vectors
; i
++) {
2216 q_vector
= adapter
->q_vector
[i
];
2217 q_vector
->tx
.work_limit
= adapter
->tx_work_limit
;
2218 if (q_vector
->tx
.count
&& !q_vector
->rx
.count
)
2220 q_vector
->itr
= tx_itr_param
;
2222 /* rx only or mixed */
2223 q_vector
->itr
= rx_itr_param
;
2224 ixgbe_write_eitr(q_vector
);
2228 * do reset here at the end to make sure EITR==0 case is handled
2229 * correctly w.r.t stopping tx, and changing TXDCTL.WTHRESH settings
2230 * also locks in RSC enable/disable which requires reset
2233 ixgbe_do_reset(netdev
);
2238 static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2239 struct ethtool_rxnfc
*cmd
)
2241 union ixgbe_atr_input
*mask
= &adapter
->fdir_mask
;
2242 struct ethtool_rx_flow_spec
*fsp
=
2243 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2244 struct hlist_node
*node
, *node2
;
2245 struct ixgbe_fdir_filter
*rule
= NULL
;
2247 /* report total rule count */
2248 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2250 hlist_for_each_entry_safe(rule
, node
, node2
,
2251 &adapter
->fdir_filter_list
, fdir_node
) {
2252 if (fsp
->location
<= rule
->sw_idx
)
2256 if (!rule
|| fsp
->location
!= rule
->sw_idx
)
2259 /* fill out the flow spec entry */
2261 /* set flow type field */
2262 switch (rule
->filter
.formatted
.flow_type
) {
2263 case IXGBE_ATR_FLOW_TYPE_TCPV4
:
2264 fsp
->flow_type
= TCP_V4_FLOW
;
2266 case IXGBE_ATR_FLOW_TYPE_UDPV4
:
2267 fsp
->flow_type
= UDP_V4_FLOW
;
2269 case IXGBE_ATR_FLOW_TYPE_SCTPV4
:
2270 fsp
->flow_type
= SCTP_V4_FLOW
;
2272 case IXGBE_ATR_FLOW_TYPE_IPV4
:
2273 fsp
->flow_type
= IP_USER_FLOW
;
2274 fsp
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
2275 fsp
->h_u
.usr_ip4_spec
.proto
= 0;
2276 fsp
->m_u
.usr_ip4_spec
.proto
= 0;
2282 fsp
->h_u
.tcp_ip4_spec
.psrc
= rule
->filter
.formatted
.src_port
;
2283 fsp
->m_u
.tcp_ip4_spec
.psrc
= mask
->formatted
.src_port
;
2284 fsp
->h_u
.tcp_ip4_spec
.pdst
= rule
->filter
.formatted
.dst_port
;
2285 fsp
->m_u
.tcp_ip4_spec
.pdst
= mask
->formatted
.dst_port
;
2286 fsp
->h_u
.tcp_ip4_spec
.ip4src
= rule
->filter
.formatted
.src_ip
[0];
2287 fsp
->m_u
.tcp_ip4_spec
.ip4src
= mask
->formatted
.src_ip
[0];
2288 fsp
->h_u
.tcp_ip4_spec
.ip4dst
= rule
->filter
.formatted
.dst_ip
[0];
2289 fsp
->m_u
.tcp_ip4_spec
.ip4dst
= mask
->formatted
.dst_ip
[0];
2290 fsp
->h_ext
.vlan_tci
= rule
->filter
.formatted
.vlan_id
;
2291 fsp
->m_ext
.vlan_tci
= mask
->formatted
.vlan_id
;
2292 fsp
->h_ext
.vlan_etype
= rule
->filter
.formatted
.flex_bytes
;
2293 fsp
->m_ext
.vlan_etype
= mask
->formatted
.flex_bytes
;
2294 fsp
->h_ext
.data
[1] = htonl(rule
->filter
.formatted
.vm_pool
);
2295 fsp
->m_ext
.data
[1] = htonl(mask
->formatted
.vm_pool
);
2296 fsp
->flow_type
|= FLOW_EXT
;
2299 if (rule
->action
== IXGBE_FDIR_DROP_QUEUE
)
2300 fsp
->ring_cookie
= RX_CLS_FLOW_DISC
;
2302 fsp
->ring_cookie
= rule
->action
;
2307 static int ixgbe_get_ethtool_fdir_all(struct ixgbe_adapter
*adapter
,
2308 struct ethtool_rxnfc
*cmd
,
2311 struct hlist_node
*node
, *node2
;
2312 struct ixgbe_fdir_filter
*rule
;
2315 /* report total rule count */
2316 cmd
->data
= (1024 << adapter
->fdir_pballoc
) - 2;
2318 hlist_for_each_entry_safe(rule
, node
, node2
,
2319 &adapter
->fdir_filter_list
, fdir_node
) {
2320 if (cnt
== cmd
->rule_cnt
)
2322 rule_locs
[cnt
] = rule
->sw_idx
;
2326 cmd
->rule_cnt
= cnt
;
2331 static int ixgbe_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
2334 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2335 int ret
= -EOPNOTSUPP
;
2338 case ETHTOOL_GRXRINGS
:
2339 cmd
->data
= adapter
->num_rx_queues
;
2342 case ETHTOOL_GRXCLSRLCNT
:
2343 cmd
->rule_cnt
= adapter
->fdir_filter_count
;
2346 case ETHTOOL_GRXCLSRULE
:
2347 ret
= ixgbe_get_ethtool_fdir_entry(adapter
, cmd
);
2349 case ETHTOOL_GRXCLSRLALL
:
2350 ret
= ixgbe_get_ethtool_fdir_all(adapter
, cmd
, rule_locs
);
2359 static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2360 struct ixgbe_fdir_filter
*input
,
2363 struct ixgbe_hw
*hw
= &adapter
->hw
;
2364 struct hlist_node
*node
, *node2
, *parent
;
2365 struct ixgbe_fdir_filter
*rule
;
2371 hlist_for_each_entry_safe(rule
, node
, node2
,
2372 &adapter
->fdir_filter_list
, fdir_node
) {
2373 /* hash found, or no matching entry */
2374 if (rule
->sw_idx
>= sw_idx
)
2379 /* if there is an old rule occupying our place remove it */
2380 if (rule
&& (rule
->sw_idx
== sw_idx
)) {
2381 if (!input
|| (rule
->filter
.formatted
.bkt_hash
!=
2382 input
->filter
.formatted
.bkt_hash
)) {
2383 err
= ixgbe_fdir_erase_perfect_filter_82599(hw
,
2388 hlist_del(&rule
->fdir_node
);
2390 adapter
->fdir_filter_count
--;
2394 * If no input this was a delete, err should be 0 if a rule was
2395 * successfully found and removed from the list else -EINVAL
2400 /* initialize node and set software index */
2401 INIT_HLIST_NODE(&input
->fdir_node
);
2403 /* add filter to the list */
2405 hlist_add_after(parent
, &input
->fdir_node
);
2407 hlist_add_head(&input
->fdir_node
,
2408 &adapter
->fdir_filter_list
);
2411 adapter
->fdir_filter_count
++;
2416 static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec
*fsp
,
2419 switch (fsp
->flow_type
& ~FLOW_EXT
) {
2421 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2424 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2427 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2430 switch (fsp
->h_u
.usr_ip4_spec
.proto
) {
2432 *flow_type
= IXGBE_ATR_FLOW_TYPE_TCPV4
;
2435 *flow_type
= IXGBE_ATR_FLOW_TYPE_UDPV4
;
2438 *flow_type
= IXGBE_ATR_FLOW_TYPE_SCTPV4
;
2441 if (!fsp
->m_u
.usr_ip4_spec
.proto
) {
2442 *flow_type
= IXGBE_ATR_FLOW_TYPE_IPV4
;
2456 static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2457 struct ethtool_rxnfc
*cmd
)
2459 struct ethtool_rx_flow_spec
*fsp
=
2460 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2461 struct ixgbe_hw
*hw
= &adapter
->hw
;
2462 struct ixgbe_fdir_filter
*input
;
2463 union ixgbe_atr_input mask
;
2466 if (!(adapter
->flags
& IXGBE_FLAG_FDIR_PERFECT_CAPABLE
))
2470 * Don't allow programming if the action is a queue greater than
2471 * the number of online Rx queues.
2473 if ((fsp
->ring_cookie
!= RX_CLS_FLOW_DISC
) &&
2474 (fsp
->ring_cookie
>= adapter
->num_rx_queues
))
2477 /* Don't allow indexes to exist outside of available space */
2478 if (fsp
->location
>= ((1024 << adapter
->fdir_pballoc
) - 2)) {
2479 e_err(drv
, "Location out of range\n");
2483 input
= kzalloc(sizeof(*input
), GFP_ATOMIC
);
2487 memset(&mask
, 0, sizeof(union ixgbe_atr_input
));
2490 input
->sw_idx
= fsp
->location
;
2492 /* record flow type */
2493 if (!ixgbe_flowspec_to_flow_type(fsp
,
2494 &input
->filter
.formatted
.flow_type
)) {
2495 e_err(drv
, "Unrecognized flow type\n");
2499 mask
.formatted
.flow_type
= IXGBE_ATR_L4TYPE_IPV6_MASK
|
2500 IXGBE_ATR_L4TYPE_MASK
;
2502 if (input
->filter
.formatted
.flow_type
== IXGBE_ATR_FLOW_TYPE_IPV4
)
2503 mask
.formatted
.flow_type
&= IXGBE_ATR_L4TYPE_IPV6_MASK
;
2505 /* Copy input into formatted structures */
2506 input
->filter
.formatted
.src_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4src
;
2507 mask
.formatted
.src_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4src
;
2508 input
->filter
.formatted
.dst_ip
[0] = fsp
->h_u
.tcp_ip4_spec
.ip4dst
;
2509 mask
.formatted
.dst_ip
[0] = fsp
->m_u
.tcp_ip4_spec
.ip4dst
;
2510 input
->filter
.formatted
.src_port
= fsp
->h_u
.tcp_ip4_spec
.psrc
;
2511 mask
.formatted
.src_port
= fsp
->m_u
.tcp_ip4_spec
.psrc
;
2512 input
->filter
.formatted
.dst_port
= fsp
->h_u
.tcp_ip4_spec
.pdst
;
2513 mask
.formatted
.dst_port
= fsp
->m_u
.tcp_ip4_spec
.pdst
;
2515 if (fsp
->flow_type
& FLOW_EXT
) {
2516 input
->filter
.formatted
.vm_pool
=
2517 (unsigned char)ntohl(fsp
->h_ext
.data
[1]);
2518 mask
.formatted
.vm_pool
=
2519 (unsigned char)ntohl(fsp
->m_ext
.data
[1]);
2520 input
->filter
.formatted
.vlan_id
= fsp
->h_ext
.vlan_tci
;
2521 mask
.formatted
.vlan_id
= fsp
->m_ext
.vlan_tci
;
2522 input
->filter
.formatted
.flex_bytes
=
2523 fsp
->h_ext
.vlan_etype
;
2524 mask
.formatted
.flex_bytes
= fsp
->m_ext
.vlan_etype
;
2527 /* determine if we need to drop or route the packet */
2528 if (fsp
->ring_cookie
== RX_CLS_FLOW_DISC
)
2529 input
->action
= IXGBE_FDIR_DROP_QUEUE
;
2531 input
->action
= fsp
->ring_cookie
;
2533 spin_lock(&adapter
->fdir_perfect_lock
);
2535 if (hlist_empty(&adapter
->fdir_filter_list
)) {
2536 /* save mask and program input mask into HW */
2537 memcpy(&adapter
->fdir_mask
, &mask
, sizeof(mask
));
2538 err
= ixgbe_fdir_set_input_mask_82599(hw
, &mask
);
2540 e_err(drv
, "Error writing mask\n");
2541 goto err_out_w_lock
;
2543 } else if (memcmp(&adapter
->fdir_mask
, &mask
, sizeof(mask
))) {
2544 e_err(drv
, "Only one mask supported per port\n");
2545 goto err_out_w_lock
;
2548 /* apply mask and compute/store hash */
2549 ixgbe_atr_compute_perfect_hash_82599(&input
->filter
, &mask
);
2551 /* program filters to filter memory */
2552 err
= ixgbe_fdir_write_perfect_filter_82599(hw
,
2553 &input
->filter
, input
->sw_idx
,
2554 (input
->action
== IXGBE_FDIR_DROP_QUEUE
) ?
2555 IXGBE_FDIR_DROP_QUEUE
:
2556 adapter
->rx_ring
[input
->action
]->reg_idx
);
2558 goto err_out_w_lock
;
2560 ixgbe_update_ethtool_fdir_entry(adapter
, input
, input
->sw_idx
);
2562 spin_unlock(&adapter
->fdir_perfect_lock
);
2566 spin_unlock(&adapter
->fdir_perfect_lock
);
2572 static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter
*adapter
,
2573 struct ethtool_rxnfc
*cmd
)
2575 struct ethtool_rx_flow_spec
*fsp
=
2576 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
2579 spin_lock(&adapter
->fdir_perfect_lock
);
2580 err
= ixgbe_update_ethtool_fdir_entry(adapter
, NULL
, fsp
->location
);
2581 spin_unlock(&adapter
->fdir_perfect_lock
);
2586 static int ixgbe_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
2588 struct ixgbe_adapter
*adapter
= netdev_priv(dev
);
2589 int ret
= -EOPNOTSUPP
;
2592 case ETHTOOL_SRXCLSRLINS
:
2593 ret
= ixgbe_add_ethtool_fdir_entry(adapter
, cmd
);
2595 case ETHTOOL_SRXCLSRLDEL
:
2596 ret
= ixgbe_del_ethtool_fdir_entry(adapter
, cmd
);
2605 static const struct ethtool_ops ixgbe_ethtool_ops
= {
2606 .get_settings
= ixgbe_get_settings
,
2607 .set_settings
= ixgbe_set_settings
,
2608 .get_drvinfo
= ixgbe_get_drvinfo
,
2609 .get_regs_len
= ixgbe_get_regs_len
,
2610 .get_regs
= ixgbe_get_regs
,
2611 .get_wol
= ixgbe_get_wol
,
2612 .set_wol
= ixgbe_set_wol
,
2613 .nway_reset
= ixgbe_nway_reset
,
2614 .get_link
= ethtool_op_get_link
,
2615 .get_eeprom_len
= ixgbe_get_eeprom_len
,
2616 .get_eeprom
= ixgbe_get_eeprom
,
2617 .set_eeprom
= ixgbe_set_eeprom
,
2618 .get_ringparam
= ixgbe_get_ringparam
,
2619 .set_ringparam
= ixgbe_set_ringparam
,
2620 .get_pauseparam
= ixgbe_get_pauseparam
,
2621 .set_pauseparam
= ixgbe_set_pauseparam
,
2622 .get_msglevel
= ixgbe_get_msglevel
,
2623 .set_msglevel
= ixgbe_set_msglevel
,
2624 .self_test
= ixgbe_diag_test
,
2625 .get_strings
= ixgbe_get_strings
,
2626 .set_phys_id
= ixgbe_set_phys_id
,
2627 .get_sset_count
= ixgbe_get_sset_count
,
2628 .get_ethtool_stats
= ixgbe_get_ethtool_stats
,
2629 .get_coalesce
= ixgbe_get_coalesce
,
2630 .set_coalesce
= ixgbe_set_coalesce
,
2631 .get_rxnfc
= ixgbe_get_rxnfc
,
2632 .set_rxnfc
= ixgbe_set_rxnfc
,
2635 void ixgbe_set_ethtool_ops(struct net_device
*netdev
)
2637 SET_ETHTOOL_OPS(netdev
, &ixgbe_ethtool_ops
);