x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / drivers / net / ethernet / intel / ixgbevf / ethtool.c
blob1f6c0ecd50bbbcf8e20fa9f883712003b055648f
1 /*******************************************************************************
3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2015 Intel Corporation.
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, see <http://www.gnu.org/licenses/>.
18 The full GNU General Public License is included in this distribution in
19 the file called "COPYING".
21 Contact Information:
22 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25 *******************************************************************************/
27 /* ethtool support for ixgbevf */
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/types.h>
32 #include <linux/module.h>
33 #include <linux/slab.h>
34 #include <linux/pci.h>
35 #include <linux/netdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/vmalloc.h>
38 #include <linux/if_vlan.h>
39 #include <linux/uaccess.h>
41 #include "ixgbevf.h"
43 #define IXGBE_ALL_RAR_ENTRIES 16
45 enum {NETDEV_STATS, IXGBEVF_STATS};
47 struct ixgbe_stats {
48 char stat_string[ETH_GSTRING_LEN];
49 int type;
50 int sizeof_stat;
51 int stat_offset;
54 #define IXGBEVF_STAT(_name, _stat) { \
55 .stat_string = _name, \
56 .type = IXGBEVF_STATS, \
57 .sizeof_stat = FIELD_SIZEOF(struct ixgbevf_adapter, _stat), \
58 .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
61 #define IXGBEVF_NETDEV_STAT(_net_stat) { \
62 .stat_string = #_net_stat, \
63 .type = NETDEV_STATS, \
64 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
65 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
68 static struct ixgbe_stats ixgbevf_gstrings_stats[] = {
69 IXGBEVF_NETDEV_STAT(rx_packets),
70 IXGBEVF_NETDEV_STAT(tx_packets),
71 IXGBEVF_NETDEV_STAT(rx_bytes),
72 IXGBEVF_NETDEV_STAT(tx_bytes),
73 IXGBEVF_STAT("tx_busy", tx_busy),
74 IXGBEVF_STAT("tx_restart_queue", restart_queue),
75 IXGBEVF_STAT("tx_timeout_count", tx_timeout_count),
76 IXGBEVF_NETDEV_STAT(multicast),
77 IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error),
80 #define IXGBEVF_QUEUE_STATS_LEN ( \
81 (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
82 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
83 (sizeof(struct ixgbe_stats) / sizeof(u64)))
84 #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
86 #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
87 static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = {
88 "Register test (offline)",
89 "Link test (on/offline)"
92 #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
94 static int ixgbevf_get_settings(struct net_device *netdev,
95 struct ethtool_cmd *ecmd)
97 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
98 struct ixgbe_hw *hw = &adapter->hw;
99 u32 link_speed = 0;
100 bool link_up;
102 ecmd->supported = SUPPORTED_10000baseT_Full;
103 ecmd->autoneg = AUTONEG_DISABLE;
104 ecmd->transceiver = XCVR_DUMMY1;
105 ecmd->port = -1;
107 hw->mac.get_link_status = 1;
108 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
110 if (link_up) {
111 __u32 speed = SPEED_10000;
113 switch (link_speed) {
114 case IXGBE_LINK_SPEED_10GB_FULL:
115 speed = SPEED_10000;
116 break;
117 case IXGBE_LINK_SPEED_1GB_FULL:
118 speed = SPEED_1000;
119 break;
120 case IXGBE_LINK_SPEED_100_FULL:
121 speed = SPEED_100;
122 break;
125 ethtool_cmd_speed_set(ecmd, speed);
126 ecmd->duplex = DUPLEX_FULL;
127 } else {
128 ethtool_cmd_speed_set(ecmd, SPEED_UNKNOWN);
129 ecmd->duplex = DUPLEX_UNKNOWN;
132 return 0;
135 static u32 ixgbevf_get_msglevel(struct net_device *netdev)
137 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
139 return adapter->msg_enable;
142 static void ixgbevf_set_msglevel(struct net_device *netdev, u32 data)
144 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
146 adapter->msg_enable = data;
149 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
151 static int ixgbevf_get_regs_len(struct net_device *netdev)
153 #define IXGBE_REGS_LEN 45
154 return IXGBE_REGS_LEN * sizeof(u32);
157 static void ixgbevf_get_regs(struct net_device *netdev,
158 struct ethtool_regs *regs,
159 void *p)
161 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
162 struct ixgbe_hw *hw = &adapter->hw;
163 u32 *regs_buff = p;
164 u32 regs_len = ixgbevf_get_regs_len(netdev);
165 u8 i;
167 memset(p, 0, regs_len);
169 /* generate a number suitable for ethtool's register version */
170 regs->version = (1u << 24) | (hw->revision_id << 16) | hw->device_id;
172 /* General Registers */
173 regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_VFCTRL);
174 regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_VFSTATUS);
175 regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_VFLINKS);
176 regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_VFRXMEMWRAP);
177 regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_VFFRTIMER);
179 /* Interrupt */
180 /* don't read EICR because it can clear interrupt causes, instead
181 * read EICS which is a shadow but doesn't clear EICR
183 regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
184 regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_VTEICS);
185 regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_VTEIMS);
186 regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_VTEIMC);
187 regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_VTEIAC);
188 regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_VTEIAM);
189 regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_VTEITR(0));
190 regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_VTIVAR(0));
191 regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_VTIVAR_MISC);
193 /* Receive DMA */
194 for (i = 0; i < 2; i++)
195 regs_buff[14 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAL(i));
196 for (i = 0; i < 2; i++)
197 regs_buff[16 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDBAH(i));
198 for (i = 0; i < 2; i++)
199 regs_buff[18 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDLEN(i));
200 for (i = 0; i < 2; i++)
201 regs_buff[20 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDH(i));
202 for (i = 0; i < 2; i++)
203 regs_buff[22 + i] = IXGBE_READ_REG(hw, IXGBE_VFRDT(i));
204 for (i = 0; i < 2; i++)
205 regs_buff[24 + i] = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(i));
206 for (i = 0; i < 2; i++)
207 regs_buff[26 + i] = IXGBE_READ_REG(hw, IXGBE_VFSRRCTL(i));
209 /* Receive */
210 regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_VFPSRTYPE);
212 /* Transmit */
213 for (i = 0; i < 2; i++)
214 regs_buff[29 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAL(i));
215 for (i = 0; i < 2; i++)
216 regs_buff[31 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDBAH(i));
217 for (i = 0; i < 2; i++)
218 regs_buff[33 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDLEN(i));
219 for (i = 0; i < 2; i++)
220 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDH(i));
221 for (i = 0; i < 2; i++)
222 regs_buff[37 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDT(i));
223 for (i = 0; i < 2; i++)
224 regs_buff[39 + i] = IXGBE_READ_REG(hw, IXGBE_VFTXDCTL(i));
225 for (i = 0; i < 2; i++)
226 regs_buff[41 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAL(i));
227 for (i = 0; i < 2; i++)
228 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_VFTDWBAH(i));
231 static void ixgbevf_get_drvinfo(struct net_device *netdev,
232 struct ethtool_drvinfo *drvinfo)
234 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
236 strlcpy(drvinfo->driver, ixgbevf_driver_name, sizeof(drvinfo->driver));
237 strlcpy(drvinfo->version, ixgbevf_driver_version,
238 sizeof(drvinfo->version));
239 strlcpy(drvinfo->bus_info, pci_name(adapter->pdev),
240 sizeof(drvinfo->bus_info));
243 static void ixgbevf_get_ringparam(struct net_device *netdev,
244 struct ethtool_ringparam *ring)
246 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
248 ring->rx_max_pending = IXGBEVF_MAX_RXD;
249 ring->tx_max_pending = IXGBEVF_MAX_TXD;
250 ring->rx_pending = adapter->rx_ring_count;
251 ring->tx_pending = adapter->tx_ring_count;
254 static int ixgbevf_set_ringparam(struct net_device *netdev,
255 struct ethtool_ringparam *ring)
257 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
258 struct ixgbevf_ring *tx_ring = NULL, *rx_ring = NULL;
259 u32 new_rx_count, new_tx_count;
260 int i, err = 0;
262 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
263 return -EINVAL;
265 new_tx_count = max_t(u32, ring->tx_pending, IXGBEVF_MIN_TXD);
266 new_tx_count = min_t(u32, new_tx_count, IXGBEVF_MAX_TXD);
267 new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);
269 new_rx_count = max_t(u32, ring->rx_pending, IXGBEVF_MIN_RXD);
270 new_rx_count = min_t(u32, new_rx_count, IXGBEVF_MAX_RXD);
271 new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);
273 /* if nothing to do return success */
274 if ((new_tx_count == adapter->tx_ring_count) &&
275 (new_rx_count == adapter->rx_ring_count))
276 return 0;
278 while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
279 usleep_range(1000, 2000);
281 if (!netif_running(adapter->netdev)) {
282 for (i = 0; i < adapter->num_tx_queues; i++)
283 adapter->tx_ring[i]->count = new_tx_count;
284 for (i = 0; i < adapter->num_rx_queues; i++)
285 adapter->rx_ring[i]->count = new_rx_count;
286 adapter->tx_ring_count = new_tx_count;
287 adapter->rx_ring_count = new_rx_count;
288 goto clear_reset;
291 if (new_tx_count != adapter->tx_ring_count) {
292 tx_ring = vmalloc(adapter->num_tx_queues * sizeof(*tx_ring));
293 if (!tx_ring) {
294 err = -ENOMEM;
295 goto clear_reset;
298 for (i = 0; i < adapter->num_tx_queues; i++) {
299 /* clone ring and setup updated count */
300 tx_ring[i] = *adapter->tx_ring[i];
301 tx_ring[i].count = new_tx_count;
302 err = ixgbevf_setup_tx_resources(&tx_ring[i]);
303 if (err) {
304 while (i) {
305 i--;
306 ixgbevf_free_tx_resources(&tx_ring[i]);
309 vfree(tx_ring);
310 tx_ring = NULL;
312 goto clear_reset;
317 if (new_rx_count != adapter->rx_ring_count) {
318 rx_ring = vmalloc(adapter->num_rx_queues * sizeof(*rx_ring));
319 if (!rx_ring) {
320 err = -ENOMEM;
321 goto clear_reset;
324 for (i = 0; i < adapter->num_rx_queues; i++) {
325 /* clone ring and setup updated count */
326 rx_ring[i] = *adapter->rx_ring[i];
327 rx_ring[i].count = new_rx_count;
328 err = ixgbevf_setup_rx_resources(&rx_ring[i]);
329 if (err) {
330 while (i) {
331 i--;
332 ixgbevf_free_rx_resources(&rx_ring[i]);
335 vfree(rx_ring);
336 rx_ring = NULL;
338 goto clear_reset;
343 /* bring interface down to prepare for update */
344 ixgbevf_down(adapter);
346 /* Tx */
347 if (tx_ring) {
348 for (i = 0; i < adapter->num_tx_queues; i++) {
349 ixgbevf_free_tx_resources(adapter->tx_ring[i]);
350 *adapter->tx_ring[i] = tx_ring[i];
352 adapter->tx_ring_count = new_tx_count;
354 vfree(tx_ring);
355 tx_ring = NULL;
358 /* Rx */
359 if (rx_ring) {
360 for (i = 0; i < adapter->num_rx_queues; i++) {
361 ixgbevf_free_rx_resources(adapter->rx_ring[i]);
362 *adapter->rx_ring[i] = rx_ring[i];
364 adapter->rx_ring_count = new_rx_count;
366 vfree(rx_ring);
367 rx_ring = NULL;
370 /* restore interface using new values */
371 ixgbevf_up(adapter);
373 clear_reset:
374 /* free Tx resources if Rx error is encountered */
375 if (tx_ring) {
376 for (i = 0; i < adapter->num_tx_queues; i++)
377 ixgbevf_free_tx_resources(&tx_ring[i]);
378 vfree(tx_ring);
381 clear_bit(__IXGBEVF_RESETTING, &adapter->state);
382 return err;
385 static int ixgbevf_get_sset_count(struct net_device *netdev, int stringset)
387 switch (stringset) {
388 case ETH_SS_TEST:
389 return IXGBEVF_TEST_LEN;
390 case ETH_SS_STATS:
391 return IXGBEVF_STATS_LEN;
392 default:
393 return -EINVAL;
397 static void ixgbevf_get_ethtool_stats(struct net_device *netdev,
398 struct ethtool_stats *stats, u64 *data)
400 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
401 struct rtnl_link_stats64 temp;
402 const struct rtnl_link_stats64 *net_stats;
403 unsigned int start;
404 struct ixgbevf_ring *ring;
405 int i, j;
406 char *p;
408 ixgbevf_update_stats(adapter);
409 net_stats = dev_get_stats(netdev, &temp);
410 for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
411 switch (ixgbevf_gstrings_stats[i].type) {
412 case NETDEV_STATS:
413 p = (char *)net_stats +
414 ixgbevf_gstrings_stats[i].stat_offset;
415 break;
416 case IXGBEVF_STATS:
417 p = (char *)adapter +
418 ixgbevf_gstrings_stats[i].stat_offset;
419 break;
420 default:
421 data[i] = 0;
422 continue;
425 data[i] = (ixgbevf_gstrings_stats[i].sizeof_stat ==
426 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
429 /* populate Tx queue data */
430 for (j = 0; j < adapter->num_tx_queues; j++) {
431 ring = adapter->tx_ring[j];
432 if (!ring) {
433 data[i++] = 0;
434 data[i++] = 0;
435 continue;
438 do {
439 start = u64_stats_fetch_begin_irq(&ring->syncp);
440 data[i] = ring->stats.packets;
441 data[i + 1] = ring->stats.bytes;
442 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
443 i += 2;
446 /* populate Rx queue data */
447 for (j = 0; j < adapter->num_rx_queues; j++) {
448 ring = adapter->rx_ring[j];
449 if (!ring) {
450 data[i++] = 0;
451 data[i++] = 0;
452 continue;
455 do {
456 start = u64_stats_fetch_begin_irq(&ring->syncp);
457 data[i] = ring->stats.packets;
458 data[i + 1] = ring->stats.bytes;
459 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
460 i += 2;
464 static void ixgbevf_get_strings(struct net_device *netdev, u32 stringset,
465 u8 *data)
467 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
468 char *p = (char *)data;
469 int i;
471 switch (stringset) {
472 case ETH_SS_TEST:
473 memcpy(data, *ixgbe_gstrings_test,
474 IXGBEVF_TEST_LEN * ETH_GSTRING_LEN);
475 break;
476 case ETH_SS_STATS:
477 for (i = 0; i < IXGBEVF_GLOBAL_STATS_LEN; i++) {
478 memcpy(p, ixgbevf_gstrings_stats[i].stat_string,
479 ETH_GSTRING_LEN);
480 p += ETH_GSTRING_LEN;
483 for (i = 0; i < adapter->num_tx_queues; i++) {
484 sprintf(p, "tx_queue_%u_packets", i);
485 p += ETH_GSTRING_LEN;
486 sprintf(p, "tx_queue_%u_bytes", i);
487 p += ETH_GSTRING_LEN;
489 for (i = 0; i < adapter->num_rx_queues; i++) {
490 sprintf(p, "rx_queue_%u_packets", i);
491 p += ETH_GSTRING_LEN;
492 sprintf(p, "rx_queue_%u_bytes", i);
493 p += ETH_GSTRING_LEN;
495 break;
499 static int ixgbevf_link_test(struct ixgbevf_adapter *adapter, u64 *data)
501 struct ixgbe_hw *hw = &adapter->hw;
502 bool link_up;
503 u32 link_speed = 0;
504 *data = 0;
506 hw->mac.ops.check_link(hw, &link_speed, &link_up, true);
507 if (!link_up)
508 *data = 1;
510 return *data;
513 /* ethtool register test data */
514 struct ixgbevf_reg_test {
515 u16 reg;
516 u8 array_len;
517 u8 test_type;
518 u32 mask;
519 u32 write;
522 /* In the hardware, registers are laid out either singly, in arrays
523 * spaced 0x40 bytes apart, or in contiguous tables. We assume
524 * most tests take place on arrays or single registers (handled
525 * as a single-element array) and special-case the tables.
526 * Table tests are always pattern tests.
528 * We also make provision for some required setup steps by specifying
529 * registers to be written without any read-back testing.
532 #define PATTERN_TEST 1
533 #define SET_READ_TEST 2
534 #define WRITE_NO_TEST 3
535 #define TABLE32_TEST 4
536 #define TABLE64_TEST_LO 5
537 #define TABLE64_TEST_HI 6
539 /* default VF register test */
540 static const struct ixgbevf_reg_test reg_test_vf[] = {
541 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
542 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
543 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
544 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, IXGBE_RXDCTL_ENABLE },
545 { IXGBE_VFRDT(0), 2, PATTERN_TEST, 0x0000FFFF, 0x0000FFFF },
546 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST, 0, 0 },
547 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFFFF },
548 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
549 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFF80 },
550 { .reg = 0 }
553 static const u32 register_test_patterns[] = {
554 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
557 static bool reg_pattern_test(struct ixgbevf_adapter *adapter, u64 *data,
558 int reg, u32 mask, u32 write)
560 u32 pat, val, before;
562 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
563 *data = 1;
564 return true;
566 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) {
567 before = ixgbevf_read_reg(&adapter->hw, reg);
568 ixgbe_write_reg(&adapter->hw, reg,
569 register_test_patterns[pat] & write);
570 val = ixgbevf_read_reg(&adapter->hw, reg);
571 if (val != (register_test_patterns[pat] & write & mask)) {
572 hw_dbg(&adapter->hw,
573 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
574 reg, val,
575 register_test_patterns[pat] & write & mask);
576 *data = reg;
577 ixgbe_write_reg(&adapter->hw, reg, before);
578 return true;
580 ixgbe_write_reg(&adapter->hw, reg, before);
582 return false;
585 static bool reg_set_and_check(struct ixgbevf_adapter *adapter, u64 *data,
586 int reg, u32 mask, u32 write)
588 u32 val, before;
590 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
591 *data = 1;
592 return true;
594 before = ixgbevf_read_reg(&adapter->hw, reg);
595 ixgbe_write_reg(&adapter->hw, reg, write & mask);
596 val = ixgbevf_read_reg(&adapter->hw, reg);
597 if ((write & mask) != (val & mask)) {
598 pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
599 reg, (val & mask), write & mask);
600 *data = reg;
601 ixgbe_write_reg(&adapter->hw, reg, before);
602 return true;
604 ixgbe_write_reg(&adapter->hw, reg, before);
605 return false;
608 static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
610 const struct ixgbevf_reg_test *test;
611 u32 i;
613 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
614 dev_err(&adapter->pdev->dev,
615 "Adapter removed - register test blocked\n");
616 *data = 1;
617 return 1;
619 test = reg_test_vf;
621 /* Perform the register test, looping through the test table
622 * until we either fail or reach the null entry.
624 while (test->reg) {
625 for (i = 0; i < test->array_len; i++) {
626 bool b = false;
628 switch (test->test_type) {
629 case PATTERN_TEST:
630 b = reg_pattern_test(adapter, data,
631 test->reg + (i * 0x40),
632 test->mask,
633 test->write);
634 break;
635 case SET_READ_TEST:
636 b = reg_set_and_check(adapter, data,
637 test->reg + (i * 0x40),
638 test->mask,
639 test->write);
640 break;
641 case WRITE_NO_TEST:
642 ixgbe_write_reg(&adapter->hw,
643 test->reg + (i * 0x40),
644 test->write);
645 break;
646 case TABLE32_TEST:
647 b = reg_pattern_test(adapter, data,
648 test->reg + (i * 4),
649 test->mask,
650 test->write);
651 break;
652 case TABLE64_TEST_LO:
653 b = reg_pattern_test(adapter, data,
654 test->reg + (i * 8),
655 test->mask,
656 test->write);
657 break;
658 case TABLE64_TEST_HI:
659 b = reg_pattern_test(adapter, data,
660 test->reg + 4 + (i * 8),
661 test->mask,
662 test->write);
663 break;
665 if (b)
666 return 1;
668 test++;
671 *data = 0;
672 return *data;
675 static void ixgbevf_diag_test(struct net_device *netdev,
676 struct ethtool_test *eth_test, u64 *data)
678 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
679 bool if_running = netif_running(netdev);
681 if (IXGBE_REMOVED(adapter->hw.hw_addr)) {
682 dev_err(&adapter->pdev->dev,
683 "Adapter removed - test blocked\n");
684 data[0] = 1;
685 data[1] = 1;
686 eth_test->flags |= ETH_TEST_FL_FAILED;
687 return;
689 set_bit(__IXGBEVF_TESTING, &adapter->state);
690 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
691 /* Offline tests */
693 hw_dbg(&adapter->hw, "offline testing starting\n");
695 /* Link test performed before hardware reset so autoneg doesn't
696 * interfere with test result
698 if (ixgbevf_link_test(adapter, &data[1]))
699 eth_test->flags |= ETH_TEST_FL_FAILED;
701 if (if_running)
702 /* indicate we're in test mode */
703 ixgbevf_close(netdev);
704 else
705 ixgbevf_reset(adapter);
707 hw_dbg(&adapter->hw, "register testing starting\n");
708 if (ixgbevf_reg_test(adapter, &data[0]))
709 eth_test->flags |= ETH_TEST_FL_FAILED;
711 ixgbevf_reset(adapter);
713 clear_bit(__IXGBEVF_TESTING, &adapter->state);
714 if (if_running)
715 ixgbevf_open(netdev);
716 } else {
717 hw_dbg(&adapter->hw, "online testing starting\n");
718 /* Online tests */
719 if (ixgbevf_link_test(adapter, &data[1]))
720 eth_test->flags |= ETH_TEST_FL_FAILED;
722 /* Online tests aren't run; pass by default */
723 data[0] = 0;
725 clear_bit(__IXGBEVF_TESTING, &adapter->state);
727 msleep_interruptible(4 * 1000);
730 static int ixgbevf_nway_reset(struct net_device *netdev)
732 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
734 if (netif_running(netdev))
735 ixgbevf_reinit_locked(adapter);
737 return 0;
740 static int ixgbevf_get_coalesce(struct net_device *netdev,
741 struct ethtool_coalesce *ec)
743 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
745 /* only valid if in constant ITR mode */
746 if (adapter->rx_itr_setting <= 1)
747 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
748 else
749 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
751 /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */
752 if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count)
753 return 0;
755 /* only valid if in constant ITR mode */
756 if (adapter->tx_itr_setting <= 1)
757 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
758 else
759 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
761 return 0;
764 static int ixgbevf_set_coalesce(struct net_device *netdev,
765 struct ethtool_coalesce *ec)
767 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
768 struct ixgbevf_q_vector *q_vector;
769 int num_vectors, i;
770 u16 tx_itr_param, rx_itr_param;
772 /* don't accept Tx specific changes if we've got mixed RxTx vectors */
773 if (adapter->q_vector[0]->tx.count &&
774 adapter->q_vector[0]->rx.count && ec->tx_coalesce_usecs)
775 return -EINVAL;
777 if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) ||
778 (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)))
779 return -EINVAL;
781 if (ec->rx_coalesce_usecs > 1)
782 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
783 else
784 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
786 if (adapter->rx_itr_setting == 1)
787 rx_itr_param = IXGBE_20K_ITR;
788 else
789 rx_itr_param = adapter->rx_itr_setting;
791 if (ec->tx_coalesce_usecs > 1)
792 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
793 else
794 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
796 if (adapter->tx_itr_setting == 1)
797 tx_itr_param = IXGBE_12K_ITR;
798 else
799 tx_itr_param = adapter->tx_itr_setting;
801 num_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
803 for (i = 0; i < num_vectors; i++) {
804 q_vector = adapter->q_vector[i];
805 if (q_vector->tx.count && !q_vector->rx.count)
806 /* Tx only */
807 q_vector->itr = tx_itr_param;
808 else
809 /* Rx only or mixed */
810 q_vector->itr = rx_itr_param;
811 ixgbevf_write_eitr(q_vector);
814 return 0;
817 static int ixgbevf_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
818 u32 *rules __always_unused)
820 struct ixgbevf_adapter *adapter = netdev_priv(dev);
822 switch (info->cmd) {
823 case ETHTOOL_GRXRINGS:
824 info->data = adapter->num_rx_queues;
825 return 0;
826 default:
827 hw_dbg(&adapter->hw, "Command parameters not supported\n");
828 return -EOPNOTSUPP;
832 static u32 ixgbevf_get_rxfh_indir_size(struct net_device *netdev)
834 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
836 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf)
837 return IXGBEVF_X550_VFRETA_SIZE;
839 return IXGBEVF_82599_RETA_SIZE;
842 static u32 ixgbevf_get_rxfh_key_size(struct net_device *netdev)
844 return IXGBEVF_RSS_HASH_KEY_SIZE;
847 static int ixgbevf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
848 u8 *hfunc)
850 struct ixgbevf_adapter *adapter = netdev_priv(netdev);
851 int err = 0;
853 if (hfunc)
854 *hfunc = ETH_RSS_HASH_TOP;
856 if (adapter->hw.mac.type >= ixgbe_mac_X550_vf) {
857 if (key)
858 memcpy(key, adapter->rss_key, sizeof(adapter->rss_key));
860 if (indir) {
861 int i;
863 for (i = 0; i < IXGBEVF_X550_VFRETA_SIZE; i++)
864 indir[i] = adapter->rss_indir_tbl[i];
866 } else {
867 /* If neither indirection table nor hash key was requested
868 * - just return a success avoiding taking any locks.
870 if (!indir && !key)
871 return 0;
873 spin_lock_bh(&adapter->mbx_lock);
874 if (indir)
875 err = ixgbevf_get_reta_locked(&adapter->hw, indir,
876 adapter->num_rx_queues);
878 if (!err && key)
879 err = ixgbevf_get_rss_key_locked(&adapter->hw, key);
881 spin_unlock_bh(&adapter->mbx_lock);
884 return err;
887 static const struct ethtool_ops ixgbevf_ethtool_ops = {
888 .get_settings = ixgbevf_get_settings,
889 .get_drvinfo = ixgbevf_get_drvinfo,
890 .get_regs_len = ixgbevf_get_regs_len,
891 .get_regs = ixgbevf_get_regs,
892 .nway_reset = ixgbevf_nway_reset,
893 .get_link = ethtool_op_get_link,
894 .get_ringparam = ixgbevf_get_ringparam,
895 .set_ringparam = ixgbevf_set_ringparam,
896 .get_msglevel = ixgbevf_get_msglevel,
897 .set_msglevel = ixgbevf_set_msglevel,
898 .self_test = ixgbevf_diag_test,
899 .get_sset_count = ixgbevf_get_sset_count,
900 .get_strings = ixgbevf_get_strings,
901 .get_ethtool_stats = ixgbevf_get_ethtool_stats,
902 .get_coalesce = ixgbevf_get_coalesce,
903 .set_coalesce = ixgbevf_set_coalesce,
904 .get_rxnfc = ixgbevf_get_rxnfc,
905 .get_rxfh_indir_size = ixgbevf_get_rxfh_indir_size,
906 .get_rxfh_key_size = ixgbevf_get_rxfh_key_size,
907 .get_rxfh = ixgbevf_get_rxfh,
910 void ixgbevf_set_ethtool_ops(struct net_device *netdev)
912 netdev->ethtool_ops = &ixgbevf_ethtool_ops;