spi-topcliff-pch: supports a spi mode setup and bit order setup by IO control
[zen-stable.git] / drivers / net / ethernet / intel / ixgbevf / ethtool.c
blob2bfe0d1d7958cba80d5396aa3a64178ed5e028ae
1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2012 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
26 *******************************************************************************/
28 /* ethtool support for ixgbevf */
30 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
32 #include <linux/types.h>
33 #include <linux/module.h>
34 #include <linux/slab.h>
35 #include <linux/pci.h>
36 #include <linux/netdevice.h>
37 #include <linux/ethtool.h>
38 #include <linux/vmalloc.h>
39 #include <linux/if_vlan.h>
40 #include <linux/uaccess.h>
42 #include "ixgbevf.h"
44 #define IXGBE_ALL_RAR_ENTRIES 16
46 #ifdef ETHTOOL_GSTATS
47 struct ixgbe_stats {
48 char stat_string[ETH_GSTRING_LEN];
49 int sizeof_stat;
50 int stat_offset;
51 int base_stat_offset;
52 int saved_reset_offset;
55 #define IXGBEVF_STAT(m, b, r) sizeof(((struct ixgbevf_adapter *)0)->m), \
56 offsetof(struct ixgbevf_adapter, m), \
57 offsetof(struct ixgbevf_adapter, b), \
58 offsetof(struct ixgbevf_adapter, r)
60 static const struct ixgbe_stats ixgbe_gstrings_stats[] = {
61 {"rx_packets", IXGBEVF_STAT(stats.vfgprc, stats.base_vfgprc,
62 stats.saved_reset_vfgprc)},
63 {"tx_packets", IXGBEVF_STAT(stats.vfgptc, stats.base_vfgptc,
64 stats.saved_reset_vfgptc)},
65 {"rx_bytes", IXGBEVF_STAT(stats.vfgorc, stats.base_vfgorc,
66 stats.saved_reset_vfgorc)},
67 {"tx_bytes", IXGBEVF_STAT(stats.vfgotc, stats.base_vfgotc,
68 stats.saved_reset_vfgotc)},
69 {"tx_busy", IXGBEVF_STAT(tx_busy, zero_base, zero_base)},
70 {"multicast", IXGBEVF_STAT(stats.vfmprc, stats.base_vfmprc,
71 stats.saved_reset_vfmprc)},
72 {"rx_csum_offload_good", IXGBEVF_STAT(hw_csum_rx_good, zero_base,
73 zero_base)},
74 {"rx_csum_offload_errors", IXGBEVF_STAT(hw_csum_rx_error, zero_base,
75 zero_base)},
76 {"tx_csum_offload_ctxt", IXGBEVF_STAT(hw_csum_tx_good, zero_base,
77 zero_base)},
78 {"rx_header_split", IXGBEVF_STAT(rx_hdr_split, zero_base, zero_base)},
81 #define IXGBE_QUEUE_STATS_LEN 0
82 #define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats)
84 #define IXGBEVF_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + IXGBE_QUEUE_STATS_LEN)
85 #endif /* ETHTOOL_GSTATS */
86 #ifdef ETHTOOL_TEST
87 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
88 "Register test (offline)",
89 "Link test (on/offline)"
91 #define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
92 #endif /* ETHTOOL_TEST */
94 static int ixgbevf_get_settings(struct net_device *netdev,
95 struct ethtool_cmd *ecmd)
97 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
98 struct ixgbe_hw *hw = &adapter->hw;
99 u32 link_speed = 0;
100 bool link_up;
102 ecmd->supported = SUPPORTED_10000baseT_Full;
103 ecmd->autoneg = AUTONEG_DISABLE;
104 ecmd->transceiver = XCVR_DUMMY1;
105 ecmd->port = -1;
107 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
109 if (link_up) {
110 ethtool_cmd_speed_set(
111 ecmd,
112 (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
113 SPEED_10000 : SPEED_1000);
114 ecmd->duplex = DUPLEX_FULL;
115 } else {
116 ethtool_cmd_speed_set(ecmd, -1);
117 ecmd->duplex = -1;
120 return 0;
123 static u32 ixgbevf_get_msglevel(struct net_device *netdev)
125 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
126 return adapter->msg_enable;
129 static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
131 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
132 adapter->msg_enable = data;
135 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
137 static char *ixgbevf_reg_names[] = {
138 "IXGBE_VFCTRL",
139 "IXGBE_VFSTATUS",
140 "IXGBE_VFLINKS",
141 "IXGBE_VFRXMEMWRAP",
142 "IXGBE_VFFRTIMER",
143 "IXGBE_VTEICR",
144 "IXGBE_VTEICS",
145 "IXGBE_VTEIMS",
146 "IXGBE_VTEIMC",
147 "IXGBE_VTEIAC",
148 "IXGBE_VTEIAM",
149 "IXGBE_VTEITR",
150 "IXGBE_VTIVAR",
151 "IXGBE_VTIVAR_MISC",
152 "IXGBE_VFRDBAL0",
153 "IXGBE_VFRDBAL1",
154 "IXGBE_VFRDBAH0",
155 "IXGBE_VFRDBAH1",
156 "IXGBE_VFRDLEN0",
157 "IXGBE_VFRDLEN1",
158 "IXGBE_VFRDH0",
159 "IXGBE_VFRDH1",
160 "IXGBE_VFRDT0",
161 "IXGBE_VFRDT1",
162 "IXGBE_VFRXDCTL0",
163 "IXGBE_VFRXDCTL1",
164 "IXGBE_VFSRRCTL0",
165 "IXGBE_VFSRRCTL1",
166 "IXGBE_VFPSRTYPE",
167 "IXGBE_VFTDBAL0",
168 "IXGBE_VFTDBAL1",
169 "IXGBE_VFTDBAH0",
170 "IXGBE_VFTDBAH1",
171 "IXGBE_VFTDLEN0",
172 "IXGBE_VFTDLEN1",
173 "IXGBE_VFTDH0",
174 "IXGBE_VFTDH1",
175 "IXGBE_VFTDT0",
176 "IXGBE_VFTDT1",
177 "IXGBE_VFTXDCTL0",
178 "IXGBE_VFTXDCTL1",
179 "IXGBE_VFTDWBAL0",
180 "IXGBE_VFTDWBAL1",
181 "IXGBE_VFTDWBAH0",
182 "IXGBE_VFTDWBAH1"
186 static int ixgbevf_get_regs_len(struct net_device *netdev)
188 return (ARRAY_SIZE(ixgbevf_reg_names)) * sizeof(u32);
191 static void ixgbevf_get_regs(struct net_device *netdev,
192 struct ethtool_regs *regs,
193 void *p)
195 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
196 struct ixgbe_hw *hw = &adapter->hw;
197 u32 *regs_buff = p;
198 u32 regs_len = ixgbevf_get_regs_len(netdev);
199 u8 i;
201 memset(p, 0, regs_len);
203 regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;
205 /* General Registers */
206 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
207 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
208 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
209 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
210 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
212 /* Interrupt */
213 /* don't read EICR because it can clear interrupt causes, instead
214 * read EICS which is a shadow but doesn't clear EICR */
215 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
216 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
217 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
218 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
219 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
220 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
221 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
222 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
223 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
225 /* Receive DMA */
226 for (i = 0; i < 2; i++)
227 regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
228 for (i = 0; i < 2; i++)
229 regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
230 for (i = 0; i < 2; i++)
231 regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
232 for (i = 0; i < 2; i++)
233 regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
234 for (i = 0; i < 2; i++)
235 regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
236 for (i = 0; i < 2; i++)
237 regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
238 for (i = 0; i < 2; i++)
239 regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
241 /* Receive */
242 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
244 /* Transmit */
245 for (i = 0; i < 2; i++)
246 regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
247 for (i = 0; i < 2; i++)
248 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
249 for (i = 0; i < 2; i++)
250 regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
251 for (i = 0; i < 2; i++)
252 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
253 for (i = 0; i < 2; i++)
254 regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
255 for (i = 0; i < 2; i++)
256 regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
257 for (i = 0; i < 2; i++)
258 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
259 for (i = 0; i < 2; i++)
260 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
262 for (i = 0; i < ARRAY_SIZE(ixgbevf_reg_names); i++)
263 hw_dbg(hw, "%s\t%8.8x\n", ixgbevf_reg_names[i], regs_buff[i]);
266 static void ixgbevf_get_drvinfo(struct net_device *netdev,
267 struct ethtool_drvinfo *drvinfo)
269 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
271 strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
272 strlcpy(drvinfo->version, ixgbevf_driver_version,
273 sizeof(drvinfo->version));
274 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
275 sizeof(drvinfo->bus_info));
278 static void ixgbevf_get_ringparam(struct net_device *netdev,
279 struct ethtool_ringparam *ring)
281 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
282 struct ixgbevf_ring *tx_ring = adapter->tx_ring;
283 struct ixgbevf_ring *rx_ring = adapter->rx_ring;
285 ring->rx_max_pending = IXGBEVF_MAX_RXD;
286 ring->tx_max_pending = IXGBEVF_MAX_TXD;
287 ring->rx_pending = rx_ring->count;
288 ring->tx_pending = tx_ring->count;
291 static int ixgbevf_set_ringparam(struct net_device *netdev,
292 struct ethtool_ringparam *ring)
294 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
295 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
296 int i, err = 0;
297 u32 new_rx_count, new_tx_count;
299 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
300 return -EINVAL;
302 new_rx_count = max(ring->rx_pending, (u32)IXGBEVF_MIN_RXD);
303 new_rx_count = min(new_rx_count, (u32)IXGBEVF_MAX_RXD);
304 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
306 new_tx_count = max(ring->tx_pending, (u32)IXGBEVF_MIN_TXD);
307 new_tx_count = min(new_tx_count, (u32)IXGBEVF_MAX_TXD);
308 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
310 if ((new_tx_count == adapter->tx_ring->count) &&
311 (new_rx_count == adapter->rx_ring->count)) {
312 /* nothing to do */
313 return 0;
316 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
317 msleep(1);
320 * If the adapter isn't up and running then just set the
321 * new parameters and scurry for the exits.
323 if (!netif_running(adapter->netdev)) {
324 for (i = 0; i < adapter->num_tx_queues; i++)
325 adapter->tx_ring[i].count = new_tx_count;
326 for (i = 0; i < adapter->num_rx_queues; i++)
327 adapter->rx_ring[i].count = new_rx_count;
328 adapter->tx_ring_count = new_tx_count;
329 adapter->rx_ring_count = new_rx_count;
330 goto clear_reset;
333 tx_ring = kcalloc(adapter->num_tx_queues,
334 sizeof(struct ixgbevf_ring), GFP_KERNEL);
335 if (!tx_ring) {
336 err = -ENOMEM;
337 goto clear_reset;
340 rx_ring = kcalloc(adapter->num_rx_queues,
341 sizeof(struct ixgbevf_ring), GFP_KERNEL);
342 if (!rx_ring) {
343 err = -ENOMEM;
344 goto err_rx_setup;
347 ixgbevf_down(adapter);
349 memcpy(tx_ring, adapter->tx_ring,
350 adapter->num_tx_queues * sizeof(struct ixgbevf_ring));
351 for (i = 0; i < adapter->num_tx_queues; i++) {
352 tx_ring[i].count = new_tx_count;
353 err = ixgbevf_setup_tx_resources(adapter, &tx_ring[i]);
354 if (err) {
355 while (i) {
356 i--;
357 ixgbevf_free_tx_resources(adapter,
358 &tx_ring[i]);
360 goto err_tx_ring_setup;
362 tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
365 memcpy(rx_ring, adapter->rx_ring,
366 adapter->num_rx_queues * sizeof(struct ixgbevf_ring));
367 for (i = 0; i < adapter->num_rx_queues; i++) {
368 rx_ring[i].count = new_rx_count;
369 err = ixgbevf_setup_rx_resources(adapter, &rx_ring[i]);
370 if (err) {
371 while (i) {
372 i--;
373 ixgbevf_free_rx_resources(adapter,
374 &rx_ring[i]);
376 goto err_rx_ring_setup;
378 rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
382 * Only switch to new rings if all the prior allocations
383 * and ring setups have succeeded.
385 kfree(adapter->tx_ring);
386 adapter->tx_ring = tx_ring;
387 adapter->tx_ring_count = new_tx_count;
389 kfree(adapter->rx_ring);
390 adapter->rx_ring = rx_ring;
391 adapter->rx_ring_count = new_rx_count;
393 /* success! */
394 ixgbevf_up(adapter);
396 goto clear_reset;
398 err_rx_ring_setup:
399 for(i = 0; i < adapter->num_tx_queues; i++)
400 ixgbevf_free_tx_resources(adapter, &tx_ring[i]);
402 err_tx_ring_setup:
403 kfree(rx_ring);
405 err_rx_setup:
406 kfree(tx_ring);
408 clear_reset:
409 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
410 return err;
413 static int ixgbevf_get_sset_count(struct net_device *dev, int stringset)
415 switch (stringset) {
416 case ETH_SS_TEST:
417 return IXGBE_TEST_LEN;
418 case ETH_SS_STATS:
419 return IXGBE_GLOBAL_STATS_LEN;
420 default:
421 return -EINVAL;
425 static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
426 struct ethtool_stats *stats, u64 *data)
428 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
429 int i;
431 ixgbevf_update_stats(adapter);
432 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
433 char *p = (char *)adapter +
434 ixgbe_gstrings_stats[i].stat_offset;
435 char *b = (char *)adapter +
436 ixgbe_gstrings_stats[i].base_stat_offset;
437 char *r = (char *)adapter +
438 ixgbe_gstrings_stats[i].saved_reset_offset;
439 data[i] = ((ixgbe_gstrings_stats[i].sizeof_stat ==
440 sizeof(u64)) ? *(u64 *)p : *(u32 *)p) -
441 ((ixgbe_gstrings_stats[i].sizeof_stat ==
442 sizeof(u64)) ? *(u64 *)b : *(u32 *)b) +
443 ((ixgbe_gstrings_stats[i].sizeof_stat ==
444 sizeof(u64)) ? *(u64 *)r : *(u32 *)r);
448 static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
449 u8 *data)
451 char *p = (char *)data;
452 int i;
454 switch (stringset) {
455 case ETH_SS_TEST:
456 memcpy(data, *ixgbe_gstrings_test,
457 IXGBE_TEST_LEN * ETH_GSTRING_LEN);
458 break;
459 case ETH_SS_STATS:
460 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
461 memcpy(p, ixgbe_gstrings_stats[i].stat_string,
462 ETH_GSTRING_LEN);
463 p += ETH_GSTRING_LEN;
465 break;
469 static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
471 struct ixgbe_hw *hw = &adapter->hw;
472 bool link_up;
473 u32 link_speed = 0;
474 *data = 0;
476 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
477 if (!link_up)
478 *data = 1;
480 return *data;
483 /* ethtool register test data */
484 struct ixgbevf_reg_test {
485 u16 reg;
486 u8 array_len;
487 u8 test_type;
488 u32 mask;
489 u32 write;
492 /* In the hardware, registers are laid out either singly, in arrays
493 * spaced 0x40 bytes apart, or in contiguous tables. We assume
494 * most tests take place on arrays or single registers (handled
495 * as a single-element array) and special-case the tables.
496 * Table tests are always pattern tests.
498 * We also make provision for some required setup steps by specifying
499 * registers to be written without any read-back testing.
502 #define PATTERN_TEST 1
503 #define SET_READ_TEST 2
504 #define WRITE_NO_TEST 3
505 #define TABLE32_TEST 4
506 #define TABLE64_TEST_LO 5
507 #define TABLE64_TEST_HI 6
509 /* default VF register test */
510 static const struct ixgbevf_reg_test reg_test_vf[] = {
511 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
512 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
513 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
514 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
515 { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
516 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
517 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
518 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
519 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
520 { 0, 0, 0, 0 }
523 static const u32 register_test_patterns[] = {
524 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
527 #define REG_PATTERN_TEST(R, M, W) \
529 u32 pat, val, before; \
530 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
531 before = readl(adapter->hw.hw_addr + R); \
532 writel((register_test_patterns[pat] & W), \
533 (adapter->hw.hw_addr + R)); \
534 val = readl(adapter->hw.hw_addr + R); \
535 if (val != (register_test_patterns[pat] & W & M)) { \
536 hw_dbg(&adapter->hw, \
537 "pattern test reg %04X failed: got " \
538 "0x%08X expected 0x%08X\n", \
539 R, val, (register_test_patterns[pat] & W & M)); \
540 *data = R; \
541 writel(before, adapter->hw.hw_addr + R); \
542 return 1; \
544 writel(before, adapter->hw.hw_addr + R); \
548 #define REG_SET_AND_CHECK(R, M, W) \
550 u32 val, before; \
551 before = readl(adapter->hw.hw_addr + R); \
552 writel((W & M), (adapter->hw.hw_addr + R)); \
553 val = readl(adapter->hw.hw_addr + R); \
554 if ((W & M) != (val & M)) { \
555 pr_err("set/check reg %04X test failed: got 0x%08X expected " \
556 "0x%08X\n", R, (val & M), (W & M)); \
557 *data = R; \
558 writel(before, (adapter->hw.hw_addr + R)); \
559 return 1; \
561 writel(before, (adapter->hw.hw_addr + R)); \
564 static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
566 const struct ixgbevf_reg_test *test;
567 u32 i;
569 test = reg_test_vf;
572 * Perform the register test, looping through the test table
573 * until we either fail or reach the null entry.
575 while (test->reg) {
576 for (i = 0; i < test->array_len; i++) {
577 switch (test->test_type) {
578 case PATTERN_TEST:
579 REG_PATTERN_TEST(test->reg + (i * 0x40),
580 test->mask,
581 test->write);
582 break;
583 case SET_READ_TEST:
584 REG_SET_AND_CHECK(test->reg + (i * 0x40),
585 test->mask,
586 test->write);
587 break;
588 case WRITE_NO_TEST:
589 writel(test->write,
590 (adapter->hw.hw_addr + test->reg)
591 + (i * 0x40));
592 break;
593 case TABLE32_TEST:
594 REG_PATTERN_TEST(test->reg + (i * 4),
595 test->mask,
596 test->write);
597 break;
598 case TABLE64_TEST_LO:
599 REG_PATTERN_TEST(test->reg + (i * 8),
600 test->mask,
601 test->write);
602 break;
603 case TABLE64_TEST_HI:
604 REG_PATTERN_TEST((test->reg + 4) + (i * 8),
605 test->mask,
606 test->write);
607 break;
610 test++;
613 *data = 0;
614 return *data;
617 static void ixgbevf_diag_test(struct net_device *netdev,
618 struct ethtool_test *eth_test, u64 *data)
620 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
621 bool if_running = netif_running(netdev);
623 set_bit(__IXGBEVF_TESTING, &adapter->state);
624 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
625 /* Offline tests */
627 hw_dbg(&adapter->hw, "offline testing starting\n");
629 /* Link test performed before hardware reset so autoneg doesn't
630 * interfere with test result */
631 if (ixgbevf_link_test(adapter, &data[1]))
632 eth_test->flags |= ETH_TEST_FL_FAILED;
634 if (if_running)
635 /* indicate we're in test mode */
636 dev_close(netdev);
637 else
638 ixgbevf_reset(adapter);
640 hw_dbg(&adapter->hw, "register testing starting\n");
641 if (ixgbevf_reg_test(adapter, &data[0]))
642 eth_test->flags |= ETH_TEST_FL_FAILED;
644 ixgbevf_reset(adapter);
646 clear_bit(__IXGBEVF_TESTING, &adapter->state);
647 if (if_running)
648 dev_open(netdev);
649 } else {
650 hw_dbg(&adapter->hw, "online testing starting\n");
651 /* Online tests */
652 if (ixgbevf_link_test(adapter, &data[1]))
653 eth_test->flags |= ETH_TEST_FL_FAILED;
655 /* Online tests aren't run; pass by default */
656 data[0] = 0;
658 clear_bit(__IXGBEVF_TESTING, &adapter->state);
660 msleep_interruptible(4 * 1000);
663 static int ixgbevf_nway_reset(struct net_device *netdev)
665 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
667 if (netif_running(netdev)) {
668 if (!adapter->dev_closed)
669 ixgbevf_reinit_locked(adapter);
672 return 0;
675 static const struct ethtool_ops ixgbevf_ethtool_ops = {
676 .get_settings = ixgbevf_get_settings,
677 .get_drvinfo = ixgbevf_get_drvinfo,
678 .get_regs_len = ixgbevf_get_regs_len,
679 .get_regs = ixgbevf_get_regs,
680 .nway_reset = ixgbevf_nway_reset,
681 .get_link = ethtool_op_get_link,
682 .get_ringparam = ixgbevf_get_ringparam,
683 .set_ringparam = ixgbevf_set_ringparam,
684 .get_msglevel = ixgbevf_get_msglevel,
685 .set_msglevel = ixgbevf_set_msglevel,
686 .self_test = ixgbevf_diag_test,
687 .get_sset_count = ixgbevf_get_sset_count,
688 .get_strings = ixgbevf_get_strings,
689 .get_ethtool_stats = ixgbevf_get_ethtool_stats,
692 void ixgbevf_set_ethtool_ops(struct net_device *netdev)
694 SET_ETHTOOL_OPS(netdev, &ixgbevf_ethtool_ops);