1 /*******************************************************************************
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2007 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
23 Linux NICS <linux.nics@intel.com>
24 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
25 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *******************************************************************************/
29 /* ethtool support for ixgbe */
31 #include <linux/types.h>
32 #include <linux/module.h>
33 #include <linux/pci.h>
34 #include <linux/netdevice.h>
35 #include <linux/ethtool.h>
36 #include <linux/vmalloc.h>
37 #include <linux/uaccess.h>
42 #define IXGBE_ALL_RAR_ENTRIES 16
45 char stat_string
[ETH_GSTRING_LEN
];
50 #define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \
51 offsetof(struct ixgbe_adapter, m)
52 static struct ixgbe_stats ixgbe_gstrings_stats
[] = {
53 {"rx_packets", IXGBE_STAT(net_stats
.rx_packets
)},
54 {"tx_packets", IXGBE_STAT(net_stats
.tx_packets
)},
55 {"rx_bytes", IXGBE_STAT(net_stats
.rx_bytes
)},
56 {"tx_bytes", IXGBE_STAT(net_stats
.tx_bytes
)},
57 {"lsc_int", IXGBE_STAT(lsc_int
)},
58 {"tx_busy", IXGBE_STAT(tx_busy
)},
59 {"non_eop_descs", IXGBE_STAT(non_eop_descs
)},
60 {"rx_errors", IXGBE_STAT(net_stats
.rx_errors
)},
61 {"tx_errors", IXGBE_STAT(net_stats
.tx_errors
)},
62 {"rx_dropped", IXGBE_STAT(net_stats
.rx_dropped
)},
63 {"tx_dropped", IXGBE_STAT(net_stats
.tx_dropped
)},
64 {"multicast", IXGBE_STAT(net_stats
.multicast
)},
65 {"broadcast", IXGBE_STAT(stats
.bprc
)},
66 {"rx_no_buffer_count", IXGBE_STAT(stats
.rnbc
[0]) },
67 {"collisions", IXGBE_STAT(net_stats
.collisions
)},
68 {"rx_over_errors", IXGBE_STAT(net_stats
.rx_over_errors
)},
69 {"rx_crc_errors", IXGBE_STAT(net_stats
.rx_crc_errors
)},
70 {"rx_frame_errors", IXGBE_STAT(net_stats
.rx_frame_errors
)},
71 {"rx_fifo_errors", IXGBE_STAT(net_stats
.rx_fifo_errors
)},
72 {"rx_missed_errors", IXGBE_STAT(net_stats
.rx_missed_errors
)},
73 {"tx_aborted_errors", IXGBE_STAT(net_stats
.tx_aborted_errors
)},
74 {"tx_carrier_errors", IXGBE_STAT(net_stats
.tx_carrier_errors
)},
75 {"tx_fifo_errors", IXGBE_STAT(net_stats
.tx_fifo_errors
)},
76 {"tx_heartbeat_errors", IXGBE_STAT(net_stats
.tx_heartbeat_errors
)},
77 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count
)},
78 {"tx_restart_queue", IXGBE_STAT(restart_queue
)},
79 {"rx_long_length_errors", IXGBE_STAT(stats
.roc
)},
80 {"rx_short_length_errors", IXGBE_STAT(stats
.ruc
)},
81 {"tx_tcp4_seg_ctxt", IXGBE_STAT(hw_tso_ctxt
)},
82 {"tx_tcp6_seg_ctxt", IXGBE_STAT(hw_tso6_ctxt
)},
83 {"tx_flow_control_xon", IXGBE_STAT(stats
.lxontxc
)},
84 {"rx_flow_control_xon", IXGBE_STAT(stats
.lxonrxc
)},
85 {"tx_flow_control_xoff", IXGBE_STAT(stats
.lxofftxc
)},
86 {"rx_flow_control_xoff", IXGBE_STAT(stats
.lxoffrxc
)},
87 {"rx_csum_offload_good", IXGBE_STAT(hw_csum_rx_good
)},
88 {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error
)},
89 {"tx_csum_offload_ctxt", IXGBE_STAT(hw_csum_tx_good
)},
90 {"rx_header_split", IXGBE_STAT(rx_hdr_split
)},
91 {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed
)},
92 {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed
)},
95 #define IXGBE_QUEUE_STATS_LEN \
96 ((((struct ixgbe_adapter *)netdev->priv)->num_tx_queues + \
97 ((struct ixgbe_adapter *)netdev->priv)->num_rx_queues) * \
98 (sizeof(struct ixgbe_queue_stats) / sizeof(u64)))
99 #define IXGBE_GLOBAL_STATS_LEN \
100 sizeof(ixgbe_gstrings_stats) / sizeof(struct ixgbe_stats)
101 #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
103 static int ixgbe_get_settings(struct net_device
*netdev
,
104 struct ethtool_cmd
*ecmd
)
106 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
108 ecmd
->supported
= (SUPPORTED_10000baseT_Full
| SUPPORTED_FIBRE
);
109 ecmd
->advertising
= (ADVERTISED_10000baseT_Full
| ADVERTISED_FIBRE
);
110 ecmd
->port
= PORT_FIBRE
;
111 ecmd
->transceiver
= XCVR_EXTERNAL
;
113 if (netif_carrier_ok(adapter
->netdev
)) {
114 ecmd
->speed
= SPEED_10000
;
115 ecmd
->duplex
= DUPLEX_FULL
;
121 ecmd
->autoneg
= AUTONEG_DISABLE
;
125 static int ixgbe_set_settings(struct net_device
*netdev
,
126 struct ethtool_cmd
*ecmd
)
128 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
130 if (ecmd
->autoneg
== AUTONEG_ENABLE
||
131 ecmd
->speed
+ ecmd
->duplex
!= SPEED_10000
+ DUPLEX_FULL
)
134 if (netif_running(adapter
->netdev
)) {
136 ixgbe_reset(adapter
);
139 ixgbe_reset(adapter
);
145 static void ixgbe_get_pauseparam(struct net_device
*netdev
,
146 struct ethtool_pauseparam
*pause
)
148 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
149 struct ixgbe_hw
*hw
= &adapter
->hw
;
151 pause
->autoneg
= AUTONEG_DISABLE
;
153 if (hw
->fc
.type
== ixgbe_fc_rx_pause
) {
155 } else if (hw
->fc
.type
== ixgbe_fc_tx_pause
) {
157 } else if (hw
->fc
.type
== ixgbe_fc_full
) {
163 static int ixgbe_set_pauseparam(struct net_device
*netdev
,
164 struct ethtool_pauseparam
*pause
)
166 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
167 struct ixgbe_hw
*hw
= &adapter
->hw
;
169 if (pause
->autoneg
== AUTONEG_ENABLE
)
172 if (pause
->rx_pause
&& pause
->tx_pause
)
173 hw
->fc
.type
= ixgbe_fc_full
;
174 else if (pause
->rx_pause
&& !pause
->tx_pause
)
175 hw
->fc
.type
= ixgbe_fc_rx_pause
;
176 else if (!pause
->rx_pause
&& pause
->tx_pause
)
177 hw
->fc
.type
= ixgbe_fc_tx_pause
;
178 else if (!pause
->rx_pause
&& !pause
->tx_pause
)
179 hw
->fc
.type
= ixgbe_fc_none
;
181 hw
->fc
.original_type
= hw
->fc
.type
;
183 if (netif_running(adapter
->netdev
)) {
187 ixgbe_reset(adapter
);
193 static u32
ixgbe_get_rx_csum(struct net_device
*netdev
)
195 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
196 return (adapter
->flags
& IXGBE_FLAG_RX_CSUM_ENABLED
);
199 static int ixgbe_set_rx_csum(struct net_device
*netdev
, u32 data
)
201 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
203 adapter
->flags
|= IXGBE_FLAG_RX_CSUM_ENABLED
;
205 adapter
->flags
&= ~IXGBE_FLAG_RX_CSUM_ENABLED
;
207 if (netif_running(netdev
)) {
211 ixgbe_reset(adapter
);
217 static u32
ixgbe_get_tx_csum(struct net_device
*netdev
)
219 return (netdev
->features
& NETIF_F_HW_CSUM
) != 0;
222 static int ixgbe_set_tx_csum(struct net_device
*netdev
, u32 data
)
225 netdev
->features
|= NETIF_F_HW_CSUM
;
227 netdev
->features
&= ~NETIF_F_HW_CSUM
;
232 static int ixgbe_set_tso(struct net_device
*netdev
, u32 data
)
236 netdev
->features
|= NETIF_F_TSO
;
237 netdev
->features
|= NETIF_F_TSO6
;
239 netdev
->features
&= ~NETIF_F_TSO
;
240 netdev
->features
&= ~NETIF_F_TSO6
;
245 static u32
ixgbe_get_msglevel(struct net_device
*netdev
)
247 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
248 return adapter
->msg_enable
;
251 static void ixgbe_set_msglevel(struct net_device
*netdev
, u32 data
)
253 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
254 adapter
->msg_enable
= data
;
257 static int ixgbe_get_regs_len(struct net_device
*netdev
)
259 #define IXGBE_REGS_LEN 1128
260 return IXGBE_REGS_LEN
* sizeof(u32
);
263 #define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_
265 static void ixgbe_get_regs(struct net_device
*netdev
,
266 struct ethtool_regs
*regs
, void *p
)
268 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
269 struct ixgbe_hw
*hw
= &adapter
->hw
;
273 memset(p
, 0, IXGBE_REGS_LEN
* sizeof(u32
));
275 regs
->version
= (1 << 24) | hw
->revision_id
<< 16 | hw
->device_id
;
277 /* General Registers */
278 regs_buff
[0] = IXGBE_READ_REG(hw
, IXGBE_CTRL
);
279 regs_buff
[1] = IXGBE_READ_REG(hw
, IXGBE_STATUS
);
280 regs_buff
[2] = IXGBE_READ_REG(hw
, IXGBE_CTRL_EXT
);
281 regs_buff
[3] = IXGBE_READ_REG(hw
, IXGBE_ESDP
);
282 regs_buff
[4] = IXGBE_READ_REG(hw
, IXGBE_EODSDP
);
283 regs_buff
[5] = IXGBE_READ_REG(hw
, IXGBE_LEDCTL
);
284 regs_buff
[6] = IXGBE_READ_REG(hw
, IXGBE_FRTIMER
);
285 regs_buff
[7] = IXGBE_READ_REG(hw
, IXGBE_TCPTIMER
);
288 regs_buff
[8] = IXGBE_READ_REG(hw
, IXGBE_EEC
);
289 regs_buff
[9] = IXGBE_READ_REG(hw
, IXGBE_EERD
);
290 regs_buff
[10] = IXGBE_READ_REG(hw
, IXGBE_FLA
);
291 regs_buff
[11] = IXGBE_READ_REG(hw
, IXGBE_EEMNGCTL
);
292 regs_buff
[12] = IXGBE_READ_REG(hw
, IXGBE_EEMNGDATA
);
293 regs_buff
[13] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCTL
);
294 regs_buff
[14] = IXGBE_READ_REG(hw
, IXGBE_FLMNGDATA
);
295 regs_buff
[15] = IXGBE_READ_REG(hw
, IXGBE_FLMNGCNT
);
296 regs_buff
[16] = IXGBE_READ_REG(hw
, IXGBE_FLOP
);
297 regs_buff
[17] = IXGBE_READ_REG(hw
, IXGBE_GRC
);
300 regs_buff
[18] = IXGBE_READ_REG(hw
, IXGBE_EICR
);
301 regs_buff
[19] = IXGBE_READ_REG(hw
, IXGBE_EICS
);
302 regs_buff
[20] = IXGBE_READ_REG(hw
, IXGBE_EIMS
);
303 regs_buff
[21] = IXGBE_READ_REG(hw
, IXGBE_EIMC
);
304 regs_buff
[22] = IXGBE_READ_REG(hw
, IXGBE_EIAC
);
305 regs_buff
[23] = IXGBE_READ_REG(hw
, IXGBE_EIAM
);
306 regs_buff
[24] = IXGBE_READ_REG(hw
, IXGBE_EITR(0));
307 regs_buff
[25] = IXGBE_READ_REG(hw
, IXGBE_IVAR(0));
308 regs_buff
[26] = IXGBE_READ_REG(hw
, IXGBE_MSIXT
);
309 regs_buff
[27] = IXGBE_READ_REG(hw
, IXGBE_MSIXPBA
);
310 regs_buff
[28] = IXGBE_READ_REG(hw
, IXGBE_PBACL
);
311 regs_buff
[29] = IXGBE_READ_REG(hw
, IXGBE_GPIE
);
314 regs_buff
[30] = IXGBE_READ_REG(hw
, IXGBE_PFCTOP
);
315 regs_buff
[31] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(0));
316 regs_buff
[32] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(1));
317 regs_buff
[33] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(2));
318 regs_buff
[34] = IXGBE_READ_REG(hw
, IXGBE_FCTTV(3));
319 for (i
= 0; i
< 8; i
++)
320 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTL(i
));
321 for (i
= 0; i
< 8; i
++)
322 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_FCRTH(i
));
323 regs_buff
[51] = IXGBE_READ_REG(hw
, IXGBE_FCRTV
);
324 regs_buff
[52] = IXGBE_READ_REG(hw
, IXGBE_TFCS
);
327 for (i
= 0; i
< 64; i
++)
328 regs_buff
[53 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAL(i
));
329 for (i
= 0; i
< 64; i
++)
330 regs_buff
[117 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDBAH(i
));
331 for (i
= 0; i
< 64; i
++)
332 regs_buff
[181 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDLEN(i
));
333 for (i
= 0; i
< 64; i
++)
334 regs_buff
[245 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDH(i
));
335 for (i
= 0; i
< 64; i
++)
336 regs_buff
[309 + i
] = IXGBE_READ_REG(hw
, IXGBE_RDT(i
));
337 for (i
= 0; i
< 64; i
++)
338 regs_buff
[373 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXDCTL(i
));
339 for (i
= 0; i
< 16; i
++)
340 regs_buff
[437 + i
] = IXGBE_READ_REG(hw
, IXGBE_SRRCTL(i
));
341 for (i
= 0; i
< 16; i
++)
342 regs_buff
[453 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_RXCTRL(i
));
343 regs_buff
[469] = IXGBE_READ_REG(hw
, IXGBE_RDRXCTL
);
344 for (i
= 0; i
< 8; i
++)
345 regs_buff
[470 + i
] = IXGBE_READ_REG(hw
, IXGBE_RXPBSIZE(i
));
346 regs_buff
[478] = IXGBE_READ_REG(hw
, IXGBE_RXCTRL
);
347 regs_buff
[479] = IXGBE_READ_REG(hw
, IXGBE_DROPEN
);
350 regs_buff
[480] = IXGBE_READ_REG(hw
, IXGBE_RXCSUM
);
351 regs_buff
[481] = IXGBE_READ_REG(hw
, IXGBE_RFCTL
);
352 for (i
= 0; i
< 16; i
++)
353 regs_buff
[482 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAL(i
));
354 for (i
= 0; i
< 16; i
++)
355 regs_buff
[498 + i
] = IXGBE_READ_REG(hw
, IXGBE_RAH(i
));
356 regs_buff
[514] = IXGBE_READ_REG(hw
, IXGBE_PSRTYPE
);
357 regs_buff
[515] = IXGBE_READ_REG(hw
, IXGBE_FCTRL
);
358 regs_buff
[516] = IXGBE_READ_REG(hw
, IXGBE_VLNCTRL
);
359 regs_buff
[517] = IXGBE_READ_REG(hw
, IXGBE_MCSTCTRL
);
360 regs_buff
[518] = IXGBE_READ_REG(hw
, IXGBE_MRQC
);
361 regs_buff
[519] = IXGBE_READ_REG(hw
, IXGBE_VMD_CTL
);
362 for (i
= 0; i
< 8; i
++)
363 regs_buff
[520 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIR(i
));
364 for (i
= 0; i
< 8; i
++)
365 regs_buff
[528 + i
] = IXGBE_READ_REG(hw
, IXGBE_IMIREXT(i
));
366 regs_buff
[536] = IXGBE_READ_REG(hw
, IXGBE_IMIRVP
);
369 for (i
= 0; i
< 32; i
++)
370 regs_buff
[537 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAL(i
));
371 for (i
= 0; i
< 32; i
++)
372 regs_buff
[569 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDBAH(i
));
373 for (i
= 0; i
< 32; i
++)
374 regs_buff
[601 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDLEN(i
));
375 for (i
= 0; i
< 32; i
++)
376 regs_buff
[633 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDH(i
));
377 for (i
= 0; i
< 32; i
++)
378 regs_buff
[665 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDT(i
));
379 for (i
= 0; i
< 32; i
++)
380 regs_buff
[697 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXDCTL(i
));
381 for (i
= 0; i
< 32; i
++)
382 regs_buff
[729 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAL(i
));
383 for (i
= 0; i
< 32; i
++)
384 regs_buff
[761 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDWBAH(i
));
385 regs_buff
[793] = IXGBE_READ_REG(hw
, IXGBE_DTXCTL
);
386 for (i
= 0; i
< 16; i
++)
387 regs_buff
[794 + i
] = IXGBE_READ_REG(hw
, IXGBE_DCA_TXCTRL(i
));
388 regs_buff
[810] = IXGBE_READ_REG(hw
, IXGBE_TIPG
);
389 for (i
= 0; i
< 8; i
++)
390 regs_buff
[811 + i
] = IXGBE_READ_REG(hw
, IXGBE_TXPBSIZE(i
));
391 regs_buff
[819] = IXGBE_READ_REG(hw
, IXGBE_MNGTXMAP
);
394 regs_buff
[820] = IXGBE_READ_REG(hw
, IXGBE_WUC
);
395 regs_buff
[821] = IXGBE_READ_REG(hw
, IXGBE_WUFC
);
396 regs_buff
[822] = IXGBE_READ_REG(hw
, IXGBE_WUS
);
397 regs_buff
[823] = IXGBE_READ_REG(hw
, IXGBE_IPAV
);
398 regs_buff
[824] = IXGBE_READ_REG(hw
, IXGBE_IP4AT
);
399 regs_buff
[825] = IXGBE_READ_REG(hw
, IXGBE_IP6AT
);
400 regs_buff
[826] = IXGBE_READ_REG(hw
, IXGBE_WUPL
);
401 regs_buff
[827] = IXGBE_READ_REG(hw
, IXGBE_WUPM
);
402 regs_buff
[828] = IXGBE_READ_REG(hw
, IXGBE_FHFT
);
405 regs_buff
[829] = IXGBE_READ_REG(hw
, IXGBE_RMCS
);
406 regs_buff
[830] = IXGBE_READ_REG(hw
, IXGBE_DPMCS
);
407 regs_buff
[831] = IXGBE_READ_REG(hw
, IXGBE_PDPMCS
);
408 regs_buff
[832] = IXGBE_READ_REG(hw
, IXGBE_RUPPBMR
);
409 for (i
= 0; i
< 8; i
++)
410 regs_buff
[833 + i
] = IXGBE_READ_REG(hw
, IXGBE_RT2CR(i
));
411 for (i
= 0; i
< 8; i
++)
412 regs_buff
[841 + i
] = IXGBE_READ_REG(hw
, IXGBE_RT2SR(i
));
413 for (i
= 0; i
< 8; i
++)
414 regs_buff
[849 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCCR(i
));
415 for (i
= 0; i
< 8; i
++)
416 regs_buff
[857 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDTQ2TCSR(i
));
417 for (i
= 0; i
< 8; i
++)
418 regs_buff
[865 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDPT2TCCR(i
));
419 for (i
= 0; i
< 8; i
++)
420 regs_buff
[873 + i
] = IXGBE_READ_REG(hw
, IXGBE_TDPT2TCSR(i
));
423 regs_buff
[881] = IXGBE_GET_STAT(adapter
, crcerrs
);
424 regs_buff
[882] = IXGBE_GET_STAT(adapter
, illerrc
);
425 regs_buff
[883] = IXGBE_GET_STAT(adapter
, errbc
);
426 regs_buff
[884] = IXGBE_GET_STAT(adapter
, mspdc
);
427 for (i
= 0; i
< 8; i
++)
428 regs_buff
[885 + i
] = IXGBE_GET_STAT(adapter
, mpc
[i
]);
429 regs_buff
[893] = IXGBE_GET_STAT(adapter
, mlfc
);
430 regs_buff
[894] = IXGBE_GET_STAT(adapter
, mrfc
);
431 regs_buff
[895] = IXGBE_GET_STAT(adapter
, rlec
);
432 regs_buff
[896] = IXGBE_GET_STAT(adapter
, lxontxc
);
433 regs_buff
[897] = IXGBE_GET_STAT(adapter
, lxonrxc
);
434 regs_buff
[898] = IXGBE_GET_STAT(adapter
, lxofftxc
);
435 regs_buff
[899] = IXGBE_GET_STAT(adapter
, lxoffrxc
);
436 for (i
= 0; i
< 8; i
++)
437 regs_buff
[900 + i
] = IXGBE_GET_STAT(adapter
, pxontxc
[i
]);
438 for (i
= 0; i
< 8; i
++)
439 regs_buff
[908 + i
] = IXGBE_GET_STAT(adapter
, pxonrxc
[i
]);
440 for (i
= 0; i
< 8; i
++)
441 regs_buff
[916 + i
] = IXGBE_GET_STAT(adapter
, pxofftxc
[i
]);
442 for (i
= 0; i
< 8; i
++)
443 regs_buff
[924 + i
] = IXGBE_GET_STAT(adapter
, pxoffrxc
[i
]);
444 regs_buff
[932] = IXGBE_GET_STAT(adapter
, prc64
);
445 regs_buff
[933] = IXGBE_GET_STAT(adapter
, prc127
);
446 regs_buff
[934] = IXGBE_GET_STAT(adapter
, prc255
);
447 regs_buff
[935] = IXGBE_GET_STAT(adapter
, prc511
);
448 regs_buff
[936] = IXGBE_GET_STAT(adapter
, prc1023
);
449 regs_buff
[937] = IXGBE_GET_STAT(adapter
, prc1522
);
450 regs_buff
[938] = IXGBE_GET_STAT(adapter
, gprc
);
451 regs_buff
[939] = IXGBE_GET_STAT(adapter
, bprc
);
452 regs_buff
[940] = IXGBE_GET_STAT(adapter
, mprc
);
453 regs_buff
[941] = IXGBE_GET_STAT(adapter
, gptc
);
454 regs_buff
[942] = IXGBE_GET_STAT(adapter
, gorc
);
455 regs_buff
[944] = IXGBE_GET_STAT(adapter
, gotc
);
456 for (i
= 0; i
< 8; i
++)
457 regs_buff
[946 + i
] = IXGBE_GET_STAT(adapter
, rnbc
[i
]);
458 regs_buff
[954] = IXGBE_GET_STAT(adapter
, ruc
);
459 regs_buff
[955] = IXGBE_GET_STAT(adapter
, rfc
);
460 regs_buff
[956] = IXGBE_GET_STAT(adapter
, roc
);
461 regs_buff
[957] = IXGBE_GET_STAT(adapter
, rjc
);
462 regs_buff
[958] = IXGBE_GET_STAT(adapter
, mngprc
);
463 regs_buff
[959] = IXGBE_GET_STAT(adapter
, mngpdc
);
464 regs_buff
[960] = IXGBE_GET_STAT(adapter
, mngptc
);
465 regs_buff
[961] = IXGBE_GET_STAT(adapter
, tor
);
466 regs_buff
[963] = IXGBE_GET_STAT(adapter
, tpr
);
467 regs_buff
[964] = IXGBE_GET_STAT(adapter
, tpt
);
468 regs_buff
[965] = IXGBE_GET_STAT(adapter
, ptc64
);
469 regs_buff
[966] = IXGBE_GET_STAT(adapter
, ptc127
);
470 regs_buff
[967] = IXGBE_GET_STAT(adapter
, ptc255
);
471 regs_buff
[968] = IXGBE_GET_STAT(adapter
, ptc511
);
472 regs_buff
[969] = IXGBE_GET_STAT(adapter
, ptc1023
);
473 regs_buff
[970] = IXGBE_GET_STAT(adapter
, ptc1522
);
474 regs_buff
[971] = IXGBE_GET_STAT(adapter
, mptc
);
475 regs_buff
[972] = IXGBE_GET_STAT(adapter
, bptc
);
476 regs_buff
[973] = IXGBE_GET_STAT(adapter
, xec
);
477 for (i
= 0; i
< 16; i
++)
478 regs_buff
[974 + i
] = IXGBE_GET_STAT(adapter
, qprc
[i
]);
479 for (i
= 0; i
< 16; i
++)
480 regs_buff
[990 + i
] = IXGBE_GET_STAT(adapter
, qptc
[i
]);
481 for (i
= 0; i
< 16; i
++)
482 regs_buff
[1006 + i
] = IXGBE_GET_STAT(adapter
, qbrc
[i
]);
483 for (i
= 0; i
< 16; i
++)
484 regs_buff
[1022 + i
] = IXGBE_GET_STAT(adapter
, qbtc
[i
]);
487 regs_buff
[1038] = IXGBE_READ_REG(hw
, IXGBE_PCS1GCFIG
);
488 regs_buff
[1039] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLCTL
);
489 regs_buff
[1040] = IXGBE_READ_REG(hw
, IXGBE_PCS1GLSTA
);
490 regs_buff
[1041] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG0
);
491 regs_buff
[1042] = IXGBE_READ_REG(hw
, IXGBE_PCS1GDBG1
);
492 regs_buff
[1043] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANA
);
493 regs_buff
[1044] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLP
);
494 regs_buff
[1045] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANNP
);
495 regs_buff
[1046] = IXGBE_READ_REG(hw
, IXGBE_PCS1GANLPNP
);
496 regs_buff
[1047] = IXGBE_READ_REG(hw
, IXGBE_HLREG0
);
497 regs_buff
[1048] = IXGBE_READ_REG(hw
, IXGBE_HLREG1
);
498 regs_buff
[1049] = IXGBE_READ_REG(hw
, IXGBE_PAP
);
499 regs_buff
[1050] = IXGBE_READ_REG(hw
, IXGBE_MACA
);
500 regs_buff
[1051] = IXGBE_READ_REG(hw
, IXGBE_APAE
);
501 regs_buff
[1052] = IXGBE_READ_REG(hw
, IXGBE_ARD
);
502 regs_buff
[1053] = IXGBE_READ_REG(hw
, IXGBE_AIS
);
503 regs_buff
[1054] = IXGBE_READ_REG(hw
, IXGBE_MSCA
);
504 regs_buff
[1055] = IXGBE_READ_REG(hw
, IXGBE_MSRWD
);
505 regs_buff
[1056] = IXGBE_READ_REG(hw
, IXGBE_MLADD
);
506 regs_buff
[1057] = IXGBE_READ_REG(hw
, IXGBE_MHADD
);
507 regs_buff
[1058] = IXGBE_READ_REG(hw
, IXGBE_TREG
);
508 regs_buff
[1059] = IXGBE_READ_REG(hw
, IXGBE_PCSS1
);
509 regs_buff
[1060] = IXGBE_READ_REG(hw
, IXGBE_PCSS2
);
510 regs_buff
[1061] = IXGBE_READ_REG(hw
, IXGBE_XPCSS
);
511 regs_buff
[1062] = IXGBE_READ_REG(hw
, IXGBE_SERDESC
);
512 regs_buff
[1063] = IXGBE_READ_REG(hw
, IXGBE_MACS
);
513 regs_buff
[1064] = IXGBE_READ_REG(hw
, IXGBE_AUTOC
);
514 regs_buff
[1065] = IXGBE_READ_REG(hw
, IXGBE_LINKS
);
515 regs_buff
[1066] = IXGBE_READ_REG(hw
, IXGBE_AUTOC2
);
516 regs_buff
[1067] = IXGBE_READ_REG(hw
, IXGBE_AUTOC3
);
517 regs_buff
[1068] = IXGBE_READ_REG(hw
, IXGBE_ANLP1
);
518 regs_buff
[1069] = IXGBE_READ_REG(hw
, IXGBE_ANLP2
);
519 regs_buff
[1070] = IXGBE_READ_REG(hw
, IXGBE_ATLASCTL
);
522 regs_buff
[1071] = IXGBE_READ_REG(hw
, IXGBE_RDSTATCTL
);
523 for (i
= 0; i
< 8; i
++)
524 regs_buff
[1072] = IXGBE_READ_REG(hw
, IXGBE_RDSTAT(i
));
525 regs_buff
[1080] = IXGBE_READ_REG(hw
, IXGBE_RDHMPN
);
526 regs_buff
[1081] = IXGBE_READ_REG(hw
, IXGBE_RIC_DW0
);
527 regs_buff
[1082] = IXGBE_READ_REG(hw
, IXGBE_RIC_DW1
);
528 regs_buff
[1083] = IXGBE_READ_REG(hw
, IXGBE_RIC_DW2
);
529 regs_buff
[1084] = IXGBE_READ_REG(hw
, IXGBE_RIC_DW3
);
530 regs_buff
[1085] = IXGBE_READ_REG(hw
, IXGBE_RDPROBE
);
531 regs_buff
[1086] = IXGBE_READ_REG(hw
, IXGBE_TDSTATCTL
);
532 for (i
= 0; i
< 8; i
++)
533 regs_buff
[1087] = IXGBE_READ_REG(hw
, IXGBE_TDSTAT(i
));
534 regs_buff
[1095] = IXGBE_READ_REG(hw
, IXGBE_TDHMPN
);
535 regs_buff
[1096] = IXGBE_READ_REG(hw
, IXGBE_TIC_DW0
);
536 regs_buff
[1097] = IXGBE_READ_REG(hw
, IXGBE_TIC_DW1
);
537 regs_buff
[1098] = IXGBE_READ_REG(hw
, IXGBE_TIC_DW2
);
538 regs_buff
[1099] = IXGBE_READ_REG(hw
, IXGBE_TIC_DW3
);
539 regs_buff
[1100] = IXGBE_READ_REG(hw
, IXGBE_TDPROBE
);
540 regs_buff
[1101] = IXGBE_READ_REG(hw
, IXGBE_TXBUFCTRL
);
541 regs_buff
[1102] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA0
);
542 regs_buff
[1103] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA1
);
543 regs_buff
[1104] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA2
);
544 regs_buff
[1105] = IXGBE_READ_REG(hw
, IXGBE_TXBUFDATA3
);
545 regs_buff
[1106] = IXGBE_READ_REG(hw
, IXGBE_RXBUFCTRL
);
546 regs_buff
[1107] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA0
);
547 regs_buff
[1108] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA1
);
548 regs_buff
[1109] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA2
);
549 regs_buff
[1110] = IXGBE_READ_REG(hw
, IXGBE_RXBUFDATA3
);
550 for (i
= 0; i
< 8; i
++)
551 regs_buff
[1111] = IXGBE_READ_REG(hw
, IXGBE_PCIE_DIAG(i
));
552 regs_buff
[1119] = IXGBE_READ_REG(hw
, IXGBE_RFVAL
);
553 regs_buff
[1120] = IXGBE_READ_REG(hw
, IXGBE_MDFTC1
);
554 regs_buff
[1121] = IXGBE_READ_REG(hw
, IXGBE_MDFTC2
);
555 regs_buff
[1122] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO1
);
556 regs_buff
[1123] = IXGBE_READ_REG(hw
, IXGBE_MDFTFIFO2
);
557 regs_buff
[1124] = IXGBE_READ_REG(hw
, IXGBE_MDFTS
);
558 regs_buff
[1125] = IXGBE_READ_REG(hw
, IXGBE_PCIEECCCTL
);
559 regs_buff
[1126] = IXGBE_READ_REG(hw
, IXGBE_PBTXECC
);
560 regs_buff
[1127] = IXGBE_READ_REG(hw
, IXGBE_PBRXECC
);
563 static int ixgbe_get_eeprom_len(struct net_device
*netdev
)
565 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
566 return adapter
->hw
.eeprom
.word_size
* 2;
569 static int ixgbe_get_eeprom(struct net_device
*netdev
,
570 struct ethtool_eeprom
*eeprom
, u8
*bytes
)
572 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
573 struct ixgbe_hw
*hw
= &adapter
->hw
;
575 int first_word
, last_word
, eeprom_len
;
579 if (eeprom
->len
== 0)
582 eeprom
->magic
= hw
->vendor_id
| (hw
->device_id
<< 16);
584 first_word
= eeprom
->offset
>> 1;
585 last_word
= (eeprom
->offset
+ eeprom
->len
- 1) >> 1;
586 eeprom_len
= last_word
- first_word
+ 1;
588 eeprom_buff
= kmalloc(sizeof(u16
) * eeprom_len
, GFP_KERNEL
);
592 for (i
= 0; i
< eeprom_len
; i
++) {
593 if ((ret_val
= ixgbe_read_eeprom(hw
, first_word
+ i
,
598 /* Device's eeprom is always little-endian, word addressable */
599 for (i
= 0; i
< eeprom_len
; i
++)
600 le16_to_cpus(&eeprom_buff
[i
]);
602 memcpy(bytes
, (u8
*)eeprom_buff
+ (eeprom
->offset
& 1), eeprom
->len
);
608 static void ixgbe_get_drvinfo(struct net_device
*netdev
,
609 struct ethtool_drvinfo
*drvinfo
)
611 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
613 strncpy(drvinfo
->driver
, ixgbe_driver_name
, 32);
614 strncpy(drvinfo
->version
, ixgbe_driver_version
, 32);
615 strncpy(drvinfo
->fw_version
, "N/A", 32);
616 strncpy(drvinfo
->bus_info
, pci_name(adapter
->pdev
), 32);
617 drvinfo
->n_stats
= IXGBE_STATS_LEN
;
618 drvinfo
->regdump_len
= ixgbe_get_regs_len(netdev
);
621 static void ixgbe_get_ringparam(struct net_device
*netdev
,
622 struct ethtool_ringparam
*ring
)
624 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
625 struct ixgbe_ring
*tx_ring
= adapter
->tx_ring
;
626 struct ixgbe_ring
*rx_ring
= adapter
->rx_ring
;
628 ring
->rx_max_pending
= IXGBE_MAX_RXD
;
629 ring
->tx_max_pending
= IXGBE_MAX_TXD
;
630 ring
->rx_mini_max_pending
= 0;
631 ring
->rx_jumbo_max_pending
= 0;
632 ring
->rx_pending
= rx_ring
->count
;
633 ring
->tx_pending
= tx_ring
->count
;
634 ring
->rx_mini_pending
= 0;
635 ring
->rx_jumbo_pending
= 0;
638 static int ixgbe_set_ringparam(struct net_device
*netdev
,
639 struct ethtool_ringparam
*ring
)
641 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
642 struct ixgbe_tx_buffer
*old_buf
;
643 struct ixgbe_rx_buffer
*old_rx_buf
;
646 u32 new_rx_count
, new_tx_count
, old_size
;
649 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
652 new_rx_count
= max(ring
->rx_pending
, (u32
)IXGBE_MIN_RXD
);
653 new_rx_count
= min(new_rx_count
, (u32
)IXGBE_MAX_RXD
);
654 new_rx_count
= ALIGN(new_rx_count
, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE
);
656 new_tx_count
= max(ring
->tx_pending
, (u32
)IXGBE_MIN_TXD
);
657 new_tx_count
= min(new_tx_count
, (u32
)IXGBE_MAX_TXD
);
658 new_tx_count
= ALIGN(new_tx_count
, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE
);
660 if ((new_tx_count
== adapter
->tx_ring
->count
) &&
661 (new_rx_count
== adapter
->rx_ring
->count
)) {
666 if (netif_running(adapter
->netdev
))
670 * We can't just free everything and then setup again,
671 * because the ISRs in MSI-X mode get passed pointers
672 * to the tx and rx ring structs.
674 if (new_tx_count
!= adapter
->tx_ring
->count
) {
675 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
676 /* Save existing descriptor ring */
677 old_buf
= adapter
->tx_ring
[i
].tx_buffer_info
;
678 old_desc
= adapter
->tx_ring
[i
].desc
;
679 old_size
= adapter
->tx_ring
[i
].size
;
680 old_dma
= adapter
->tx_ring
[i
].dma
;
681 /* Try to allocate a new one */
682 adapter
->tx_ring
[i
].tx_buffer_info
= NULL
;
683 adapter
->tx_ring
[i
].desc
= NULL
;
684 adapter
->tx_ring
[i
].count
= new_tx_count
;
685 err
= ixgbe_setup_tx_resources(adapter
,
686 &adapter
->tx_ring
[i
]);
688 /* Restore the old one so at least
689 the adapter still works, even if
690 we failed the request */
691 adapter
->tx_ring
[i
].tx_buffer_info
= old_buf
;
692 adapter
->tx_ring
[i
].desc
= old_desc
;
693 adapter
->tx_ring
[i
].size
= old_size
;
694 adapter
->tx_ring
[i
].dma
= old_dma
;
697 /* Free the old buffer manually */
699 pci_free_consistent(adapter
->pdev
, old_size
,
704 if (new_rx_count
!= adapter
->rx_ring
->count
) {
705 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
707 old_rx_buf
= adapter
->rx_ring
[i
].rx_buffer_info
;
708 old_desc
= adapter
->rx_ring
[i
].desc
;
709 old_size
= adapter
->rx_ring
[i
].size
;
710 old_dma
= adapter
->rx_ring
[i
].dma
;
712 adapter
->rx_ring
[i
].rx_buffer_info
= NULL
;
713 adapter
->rx_ring
[i
].desc
= NULL
;
714 adapter
->rx_ring
[i
].dma
= 0;
715 adapter
->rx_ring
[i
].count
= new_rx_count
;
716 err
= ixgbe_setup_rx_resources(adapter
,
717 &adapter
->rx_ring
[i
]);
719 adapter
->rx_ring
[i
].rx_buffer_info
= old_rx_buf
;
720 adapter
->rx_ring
[i
].desc
= old_desc
;
721 adapter
->rx_ring
[i
].size
= old_size
;
722 adapter
->rx_ring
[i
].dma
= old_dma
;
727 pci_free_consistent(adapter
->pdev
, old_size
, old_desc
,
734 if (netif_running(adapter
->netdev
))
740 static int ixgbe_get_sset_count(struct net_device
*netdev
, int sset
)
744 return IXGBE_STATS_LEN
;
750 static void ixgbe_get_ethtool_stats(struct net_device
*netdev
,
751 struct ethtool_stats
*stats
, u64
*data
)
753 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
755 int stat_count
= sizeof(struct ixgbe_queue_stats
) / sizeof(u64
);
759 ixgbe_update_stats(adapter
);
760 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
761 char *p
= (char *)adapter
+ ixgbe_gstrings_stats
[i
].stat_offset
;
762 data
[i
] = (ixgbe_gstrings_stats
[i
].sizeof_stat
==
763 sizeof(u64
)) ? *(u64
*)p
: *(u32
*)p
;
765 for (j
= 0; j
< adapter
->num_tx_queues
; j
++) {
766 queue_stat
= (u64
*)&adapter
->tx_ring
[j
].stats
;
767 for (k
= 0; k
< stat_count
; k
++)
768 data
[i
+ k
] = queue_stat
[k
];
771 for (j
= 0; j
< adapter
->num_rx_queues
; j
++) {
772 queue_stat
= (u64
*)&adapter
->rx_ring
[j
].stats
;
773 for (k
= 0; k
< stat_count
; k
++)
774 data
[i
+ k
] = queue_stat
[k
];
779 static void ixgbe_get_strings(struct net_device
*netdev
, u32 stringset
,
782 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
788 for (i
= 0; i
< IXGBE_GLOBAL_STATS_LEN
; i
++) {
789 memcpy(p
, ixgbe_gstrings_stats
[i
].stat_string
,
791 p
+= ETH_GSTRING_LEN
;
793 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
794 sprintf(p
, "tx_queue_%u_packets", i
);
795 p
+= ETH_GSTRING_LEN
;
796 sprintf(p
, "tx_queue_%u_bytes", i
);
797 p
+= ETH_GSTRING_LEN
;
799 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
800 sprintf(p
, "rx_queue_%u_packets", i
);
801 p
+= ETH_GSTRING_LEN
;
802 sprintf(p
, "rx_queue_%u_bytes", i
);
803 p
+= ETH_GSTRING_LEN
;
805 /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */
811 static void ixgbe_get_wol(struct net_device
*netdev
,
812 struct ethtool_wolinfo
*wol
)
820 static int ixgbe_nway_reset(struct net_device
*netdev
)
822 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
824 if (netif_running(netdev
)) {
826 ixgbe_reset(adapter
);
833 static int ixgbe_phys_id(struct net_device
*netdev
, u32 data
)
835 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
836 u32 led_reg
= IXGBE_READ_REG(&adapter
->hw
, IXGBE_LEDCTL
);
839 if (!data
|| data
> 300)
842 for (i
= 0; i
< (data
* 1000); i
+= 400) {
843 ixgbe_led_on(&adapter
->hw
, IXGBE_LED_ON
);
844 msleep_interruptible(200);
845 ixgbe_led_off(&adapter
->hw
, IXGBE_LED_ON
);
846 msleep_interruptible(200);
849 /* Restore LED settings */
850 IXGBE_WRITE_REG(&adapter
->hw
, IXGBE_LEDCTL
, led_reg
);
855 static int ixgbe_get_coalesce(struct net_device
*netdev
,
856 struct ethtool_coalesce
*ec
)
858 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
860 if (adapter
->rx_eitr
== 0)
861 ec
->rx_coalesce_usecs
= 0;
863 ec
->rx_coalesce_usecs
= 1000000 / adapter
->rx_eitr
;
865 if (adapter
->tx_eitr
== 0)
866 ec
->tx_coalesce_usecs
= 0;
868 ec
->tx_coalesce_usecs
= 1000000 / adapter
->tx_eitr
;
870 ec
->tx_max_coalesced_frames_irq
= adapter
->tx_ring
[0].work_limit
;
874 static int ixgbe_set_coalesce(struct net_device
*netdev
,
875 struct ethtool_coalesce
*ec
)
877 struct ixgbe_adapter
*adapter
= netdev_priv(netdev
);
879 if ((ec
->rx_coalesce_usecs
> IXGBE_MAX_ITR_USECS
) ||
880 ((ec
->rx_coalesce_usecs
> 0) &&
881 (ec
->rx_coalesce_usecs
< IXGBE_MIN_ITR_USECS
)))
883 if ((ec
->tx_coalesce_usecs
> IXGBE_MAX_ITR_USECS
) ||
884 ((ec
->tx_coalesce_usecs
> 0) &&
885 (ec
->tx_coalesce_usecs
< IXGBE_MIN_ITR_USECS
)))
888 /* convert to rate of irq's per second */
889 if (ec
->rx_coalesce_usecs
== 0)
890 adapter
->rx_eitr
= 0;
892 adapter
->rx_eitr
= (1000000 / ec
->rx_coalesce_usecs
);
894 if (ec
->tx_coalesce_usecs
== 0)
895 adapter
->tx_eitr
= 0;
897 adapter
->tx_eitr
= (1000000 / ec
->tx_coalesce_usecs
);
899 if (ec
->tx_max_coalesced_frames_irq
)
900 adapter
->tx_ring
[0].work_limit
=
901 ec
->tx_max_coalesced_frames_irq
;
903 if (netif_running(netdev
)) {
912 static struct ethtool_ops ixgbe_ethtool_ops
= {
913 .get_settings
= ixgbe_get_settings
,
914 .set_settings
= ixgbe_set_settings
,
915 .get_drvinfo
= ixgbe_get_drvinfo
,
916 .get_regs_len
= ixgbe_get_regs_len
,
917 .get_regs
= ixgbe_get_regs
,
918 .get_wol
= ixgbe_get_wol
,
919 .nway_reset
= ixgbe_nway_reset
,
920 .get_link
= ethtool_op_get_link
,
921 .get_eeprom_len
= ixgbe_get_eeprom_len
,
922 .get_eeprom
= ixgbe_get_eeprom
,
923 .get_ringparam
= ixgbe_get_ringparam
,
924 .set_ringparam
= ixgbe_set_ringparam
,
925 .get_pauseparam
= ixgbe_get_pauseparam
,
926 .set_pauseparam
= ixgbe_set_pauseparam
,
927 .get_rx_csum
= ixgbe_get_rx_csum
,
928 .set_rx_csum
= ixgbe_set_rx_csum
,
929 .get_tx_csum
= ixgbe_get_tx_csum
,
930 .set_tx_csum
= ixgbe_set_tx_csum
,
931 .get_sg
= ethtool_op_get_sg
,
932 .set_sg
= ethtool_op_set_sg
,
933 .get_msglevel
= ixgbe_get_msglevel
,
934 .set_msglevel
= ixgbe_set_msglevel
,
935 .get_tso
= ethtool_op_get_tso
,
936 .set_tso
= ixgbe_set_tso
,
937 .get_strings
= ixgbe_get_strings
,
938 .phys_id
= ixgbe_phys_id
,
939 .get_sset_count
= ixgbe_get_sset_count
,
940 .get_ethtool_stats
= ixgbe_get_ethtool_stats
,
941 .get_coalesce
= ixgbe_get_coalesce
,
942 .set_coalesce
= ixgbe_set_coalesce
,
945 void ixgbe_set_ethtool_ops(struct net_device
*netdev
)
947 SET_ETHTOOL_OPS(netdev
, &ixgbe_ethtool_ops
);