1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
4 /* ethtool support for ixgbevf */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/vmalloc.h>
15 #include <linux/if_vlan.h>
16 #include <linux/uaccess.h>
20 #define IXGBE_ALL_RAR_ENTRIES 16
22 enum {NETDEV_STATS
, IXGBEVF_STATS
};
25 char stat_string
[ETH_GSTRING_LEN
];
31 #define IXGBEVF_STAT(_name, _stat) { \
32 .stat_string = _name, \
33 .type = IXGBEVF_STATS, \
34 .sizeof_stat = sizeof_field(struct ixgbevf_adapter, _stat), \
35 .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
38 #define IXGBEVF_NETDEV_STAT(_net_stat) { \
39 .stat_string = #_net_stat, \
40 .type = NETDEV_STATS, \
41 .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \
42 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
45 static struct ixgbe_stats ixgbevf_gstrings_stats
[] = {
46 IXGBEVF_NETDEV_STAT(rx_packets
),
47 IXGBEVF_NETDEV_STAT(tx_packets
),
48 IXGBEVF_NETDEV_STAT(rx_bytes
),
49 IXGBEVF_NETDEV_STAT(tx_bytes
),
50 IXGBEVF_STAT("tx_busy", tx_busy
),
51 IXGBEVF_STAT("tx_restart_queue", restart_queue
),
52 IXGBEVF_STAT("tx_timeout_count", tx_timeout_count
),
53 IXGBEVF_NETDEV_STAT(multicast
),
54 IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error
),
55 IXGBEVF_STAT("alloc_rx_page", alloc_rx_page
),
56 IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed
),
57 IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed
),
58 IXGBEVF_STAT("tx_ipsec", tx_ipsec
),
59 IXGBEVF_STAT("rx_ipsec", rx_ipsec
),
62 #define IXGBEVF_QUEUE_STATS_LEN ( \
63 (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
64 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \
65 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
66 (sizeof(struct ixgbevf_stats) / sizeof(u64)))
67 #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
69 #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
70 static const char ixgbe_gstrings_test
[][ETH_GSTRING_LEN
] = {
71 "Register test (offline)",
72 "Link test (on/offline)"
75 #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
77 static const char ixgbevf_priv_flags_strings
[][ETH_GSTRING_LEN
] = {
78 #define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0)
82 #define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings)
84 static int ixgbevf_get_link_ksettings(struct net_device
*netdev
,
85 struct ethtool_link_ksettings
*cmd
)
87 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
89 ethtool_link_ksettings_zero_link_mode(cmd
, supported
);
90 ethtool_link_ksettings_add_link_mode(cmd
, supported
, 10000baseT_Full
);
91 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
94 if (adapter
->link_up
) {
95 __u32 speed
= SPEED_10000
;
97 switch (adapter
->link_speed
) {
98 case IXGBE_LINK_SPEED_10GB_FULL
:
101 case IXGBE_LINK_SPEED_1GB_FULL
:
104 case IXGBE_LINK_SPEED_100_FULL
:
109 cmd
->base
.speed
= speed
;
110 cmd
->base
.duplex
= DUPLEX_FULL
;
112 cmd
->base
.speed
= SPEED_UNKNOWN
;
113 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
119 static u32
ixgbevf_get_msglevel(struct net_device
*netdev
)
121 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
123 return adapter
->msg_enable
;
126 static void ixgbevf_set_msglevel(struct net_device
*netdev
, u32 data
)
128 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
130 adapter
->msg_enable
= data
;
133 #define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_)
135 static int ixgbevf_get_regs_len(struct net_device
*netdev
)
137 #define IXGBE_REGS_LEN 45
138 return IXGBE_REGS_LEN
* sizeof(u32
);
141 static void ixgbevf_get_regs(struct net_device
*netdev
,
142 struct ethtool_regs
*regs
,
145 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
146 struct ixgbe_hw
*hw
= &adapter
->hw
;
148 u32 regs_len
= ixgbevf_get_regs_len(netdev
);
151 memset(p
, 0, regs_len
);
153 /* generate a number suitable for ethtool's register version */
154 regs
->version
= (1u << 24) | (hw
->revision_id
<< 16) | hw
->device_id
;
156 /* General Registers */
157 regs_buff
[0] = IXGBE_READ_REG(hw
, IXGBE_VFCTRL
);
158 regs_buff
[1] = IXGBE_READ_REG(hw
, IXGBE_VFSTATUS
);
159 regs_buff
[2] = IXGBE_READ_REG(hw
, IXGBE_VFLINKS
);
160 regs_buff
[3] = IXGBE_READ_REG(hw
, IXGBE_VFRXMEMWRAP
);
161 regs_buff
[4] = IXGBE_READ_REG(hw
, IXGBE_VFFRTIMER
);
164 /* don't read EICR because it can clear interrupt causes, instead
165 * read EICS which is a shadow but doesn't clear EICR
167 regs_buff
[5] = IXGBE_READ_REG(hw
, IXGBE_VTEICS
);
168 regs_buff
[6] = IXGBE_READ_REG(hw
, IXGBE_VTEICS
);
169 regs_buff
[7] = IXGBE_READ_REG(hw
, IXGBE_VTEIMS
);
170 regs_buff
[8] = IXGBE_READ_REG(hw
, IXGBE_VTEIMC
);
171 regs_buff
[9] = IXGBE_READ_REG(hw
, IXGBE_VTEIAC
);
172 regs_buff
[10] = IXGBE_READ_REG(hw
, IXGBE_VTEIAM
);
173 regs_buff
[11] = IXGBE_READ_REG(hw
, IXGBE_VTEITR(0));
174 regs_buff
[12] = IXGBE_READ_REG(hw
, IXGBE_VTIVAR(0));
175 regs_buff
[13] = IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
178 for (i
= 0; i
< 2; i
++)
179 regs_buff
[14 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFRDBAL(i
));
180 for (i
= 0; i
< 2; i
++)
181 regs_buff
[16 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFRDBAH(i
));
182 for (i
= 0; i
< 2; i
++)
183 regs_buff
[18 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFRDLEN(i
));
184 for (i
= 0; i
< 2; i
++)
185 regs_buff
[20 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFRDH(i
));
186 for (i
= 0; i
< 2; i
++)
187 regs_buff
[22 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFRDT(i
));
188 for (i
= 0; i
< 2; i
++)
189 regs_buff
[24 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(i
));
190 for (i
= 0; i
< 2; i
++)
191 regs_buff
[26 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFSRRCTL(i
));
194 regs_buff
[28] = IXGBE_READ_REG(hw
, IXGBE_VFPSRTYPE
);
197 for (i
= 0; i
< 2; i
++)
198 regs_buff
[29 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDBAL(i
));
199 for (i
= 0; i
< 2; i
++)
200 regs_buff
[31 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDBAH(i
));
201 for (i
= 0; i
< 2; i
++)
202 regs_buff
[33 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDLEN(i
));
203 for (i
= 0; i
< 2; i
++)
204 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDH(i
));
205 for (i
= 0; i
< 2; i
++)
206 regs_buff
[37 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDT(i
));
207 for (i
= 0; i
< 2; i
++)
208 regs_buff
[39 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(i
));
209 for (i
= 0; i
< 2; i
++)
210 regs_buff
[41 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDWBAL(i
));
211 for (i
= 0; i
< 2; i
++)
212 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDWBAH(i
));
215 static void ixgbevf_get_drvinfo(struct net_device
*netdev
,
216 struct ethtool_drvinfo
*drvinfo
)
218 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
220 strlcpy(drvinfo
->driver
, ixgbevf_driver_name
, sizeof(drvinfo
->driver
));
221 strlcpy(drvinfo
->bus_info
, pci_name(adapter
->pdev
),
222 sizeof(drvinfo
->bus_info
));
224 drvinfo
->n_priv_flags
= IXGBEVF_PRIV_FLAGS_STR_LEN
;
227 static void ixgbevf_get_ringparam(struct net_device
*netdev
,
228 struct ethtool_ringparam
*ring
)
230 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
232 ring
->rx_max_pending
= IXGBEVF_MAX_RXD
;
233 ring
->tx_max_pending
= IXGBEVF_MAX_TXD
;
234 ring
->rx_pending
= adapter
->rx_ring_count
;
235 ring
->tx_pending
= adapter
->tx_ring_count
;
238 static int ixgbevf_set_ringparam(struct net_device
*netdev
,
239 struct ethtool_ringparam
*ring
)
241 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
242 struct ixgbevf_ring
*tx_ring
= NULL
, *rx_ring
= NULL
;
243 u32 new_rx_count
, new_tx_count
;
246 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
249 new_tx_count
= max_t(u32
, ring
->tx_pending
, IXGBEVF_MIN_TXD
);
250 new_tx_count
= min_t(u32
, new_tx_count
, IXGBEVF_MAX_TXD
);
251 new_tx_count
= ALIGN(new_tx_count
, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE
);
253 new_rx_count
= max_t(u32
, ring
->rx_pending
, IXGBEVF_MIN_RXD
);
254 new_rx_count
= min_t(u32
, new_rx_count
, IXGBEVF_MAX_RXD
);
255 new_rx_count
= ALIGN(new_rx_count
, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE
);
257 /* if nothing to do return success */
258 if ((new_tx_count
== adapter
->tx_ring_count
) &&
259 (new_rx_count
== adapter
->rx_ring_count
))
262 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
263 usleep_range(1000, 2000);
265 if (!netif_running(adapter
->netdev
)) {
266 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
267 adapter
->tx_ring
[i
]->count
= new_tx_count
;
268 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++)
269 adapter
->xdp_ring
[i
]->count
= new_tx_count
;
270 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
271 adapter
->rx_ring
[i
]->count
= new_rx_count
;
272 adapter
->tx_ring_count
= new_tx_count
;
273 adapter
->xdp_ring_count
= new_tx_count
;
274 adapter
->rx_ring_count
= new_rx_count
;
278 if (new_tx_count
!= adapter
->tx_ring_count
) {
279 tx_ring
= vmalloc(array_size(sizeof(*tx_ring
),
280 adapter
->num_tx_queues
+
281 adapter
->num_xdp_queues
));
287 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
288 /* clone ring and setup updated count */
289 tx_ring
[i
] = *adapter
->tx_ring
[i
];
290 tx_ring
[i
].count
= new_tx_count
;
291 err
= ixgbevf_setup_tx_resources(&tx_ring
[i
]);
295 ixgbevf_free_tx_resources(&tx_ring
[i
]);
305 for (j
= 0; j
< adapter
->num_xdp_queues
; i
++, j
++) {
306 /* clone ring and setup updated count */
307 tx_ring
[i
] = *adapter
->xdp_ring
[j
];
308 tx_ring
[i
].count
= new_tx_count
;
309 err
= ixgbevf_setup_tx_resources(&tx_ring
[i
]);
313 ixgbevf_free_tx_resources(&tx_ring
[i
]);
324 if (new_rx_count
!= adapter
->rx_ring_count
) {
325 rx_ring
= vmalloc(array_size(sizeof(*rx_ring
),
326 adapter
->num_rx_queues
));
332 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
333 /* clone ring and setup updated count */
334 rx_ring
[i
] = *adapter
->rx_ring
[i
];
336 /* Clear copied XDP RX-queue info */
337 memset(&rx_ring
[i
].xdp_rxq
, 0,
338 sizeof(rx_ring
[i
].xdp_rxq
));
340 rx_ring
[i
].count
= new_rx_count
;
341 err
= ixgbevf_setup_rx_resources(adapter
, &rx_ring
[i
]);
345 ixgbevf_free_rx_resources(&rx_ring
[i
]);
356 /* bring interface down to prepare for update */
357 ixgbevf_down(adapter
);
361 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
362 ixgbevf_free_tx_resources(adapter
->tx_ring
[i
]);
363 *adapter
->tx_ring
[i
] = tx_ring
[i
];
365 adapter
->tx_ring_count
= new_tx_count
;
367 for (j
= 0; j
< adapter
->num_xdp_queues
; i
++, j
++) {
368 ixgbevf_free_tx_resources(adapter
->xdp_ring
[j
]);
369 *adapter
->xdp_ring
[j
] = tx_ring
[i
];
371 adapter
->xdp_ring_count
= new_tx_count
;
379 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
380 ixgbevf_free_rx_resources(adapter
->rx_ring
[i
]);
381 *adapter
->rx_ring
[i
] = rx_ring
[i
];
383 adapter
->rx_ring_count
= new_rx_count
;
389 /* restore interface using new values */
393 /* free Tx resources if Rx error is encountered */
396 i
< adapter
->num_tx_queues
+ adapter
->num_xdp_queues
; i
++)
397 ixgbevf_free_tx_resources(&tx_ring
[i
]);
401 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
405 static int ixgbevf_get_sset_count(struct net_device
*netdev
, int stringset
)
409 return IXGBEVF_TEST_LEN
;
411 return IXGBEVF_STATS_LEN
;
412 case ETH_SS_PRIV_FLAGS
:
413 return IXGBEVF_PRIV_FLAGS_STR_LEN
;
419 static void ixgbevf_get_ethtool_stats(struct net_device
*netdev
,
420 struct ethtool_stats
*stats
, u64
*data
)
422 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
423 struct rtnl_link_stats64 temp
;
424 const struct rtnl_link_stats64
*net_stats
;
426 struct ixgbevf_ring
*ring
;
430 ixgbevf_update_stats(adapter
);
431 net_stats
= dev_get_stats(netdev
, &temp
);
432 for (i
= 0; i
< IXGBEVF_GLOBAL_STATS_LEN
; i
++) {
433 switch (ixgbevf_gstrings_stats
[i
].type
) {
435 p
= (char *)net_stats
+
436 ixgbevf_gstrings_stats
[i
].stat_offset
;
439 p
= (char *)adapter
+
440 ixgbevf_gstrings_stats
[i
].stat_offset
;
447 data
[i
] = (ixgbevf_gstrings_stats
[i
].sizeof_stat
==
448 sizeof(u64
)) ? *(u64
*)p
: *(u32
*)p
;
451 /* populate Tx queue data */
452 for (j
= 0; j
< adapter
->num_tx_queues
; j
++) {
453 ring
= adapter
->tx_ring
[j
];
461 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
462 data
[i
] = ring
->stats
.packets
;
463 data
[i
+ 1] = ring
->stats
.bytes
;
464 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
468 /* populate XDP queue data */
469 for (j
= 0; j
< adapter
->num_xdp_queues
; j
++) {
470 ring
= adapter
->xdp_ring
[j
];
478 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
479 data
[i
] = ring
->stats
.packets
;
480 data
[i
+ 1] = ring
->stats
.bytes
;
481 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
485 /* populate Rx queue data */
486 for (j
= 0; j
< adapter
->num_rx_queues
; j
++) {
487 ring
= adapter
->rx_ring
[j
];
495 start
= u64_stats_fetch_begin_irq(&ring
->syncp
);
496 data
[i
] = ring
->stats
.packets
;
497 data
[i
+ 1] = ring
->stats
.bytes
;
498 } while (u64_stats_fetch_retry_irq(&ring
->syncp
, start
));
503 static void ixgbevf_get_strings(struct net_device
*netdev
, u32 stringset
,
506 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
507 char *p
= (char *)data
;
512 memcpy(data
, *ixgbe_gstrings_test
,
513 IXGBEVF_TEST_LEN
* ETH_GSTRING_LEN
);
516 for (i
= 0; i
< IXGBEVF_GLOBAL_STATS_LEN
; i
++) {
517 memcpy(p
, ixgbevf_gstrings_stats
[i
].stat_string
,
519 p
+= ETH_GSTRING_LEN
;
522 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
523 sprintf(p
, "tx_queue_%u_packets", i
);
524 p
+= ETH_GSTRING_LEN
;
525 sprintf(p
, "tx_queue_%u_bytes", i
);
526 p
+= ETH_GSTRING_LEN
;
528 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++) {
529 sprintf(p
, "xdp_queue_%u_packets", i
);
530 p
+= ETH_GSTRING_LEN
;
531 sprintf(p
, "xdp_queue_%u_bytes", i
);
532 p
+= ETH_GSTRING_LEN
;
534 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
535 sprintf(p
, "rx_queue_%u_packets", i
);
536 p
+= ETH_GSTRING_LEN
;
537 sprintf(p
, "rx_queue_%u_bytes", i
);
538 p
+= ETH_GSTRING_LEN
;
541 case ETH_SS_PRIV_FLAGS
:
542 memcpy(data
, ixgbevf_priv_flags_strings
,
543 IXGBEVF_PRIV_FLAGS_STR_LEN
* ETH_GSTRING_LEN
);
548 static int ixgbevf_link_test(struct ixgbevf_adapter
*adapter
, u64
*data
)
550 struct ixgbe_hw
*hw
= &adapter
->hw
;
555 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, true);
562 /* ethtool register test data */
563 struct ixgbevf_reg_test
{
571 /* In the hardware, registers are laid out either singly, in arrays
572 * spaced 0x40 bytes apart, or in contiguous tables. We assume
573 * most tests take place on arrays or single registers (handled
574 * as a single-element array) and special-case the tables.
575 * Table tests are always pattern tests.
577 * We also make provision for some required setup steps by specifying
578 * registers to be written without any read-back testing.
581 #define PATTERN_TEST 1
582 #define SET_READ_TEST 2
583 #define WRITE_NO_TEST 3
584 #define TABLE32_TEST 4
585 #define TABLE64_TEST_LO 5
586 #define TABLE64_TEST_HI 6
588 /* default VF register test */
589 static const struct ixgbevf_reg_test reg_test_vf
[] = {
590 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFF80 },
591 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
592 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
593 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
594 { IXGBE_VFRDT(0), 2, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
595 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST
, 0, 0 },
596 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
597 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
598 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST
, 0x000FFF80, 0x000FFF80 },
602 static const u32 register_test_patterns
[] = {
603 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
606 static bool reg_pattern_test(struct ixgbevf_adapter
*adapter
, u64
*data
,
607 int reg
, u32 mask
, u32 write
)
609 u32 pat
, val
, before
;
611 if (IXGBE_REMOVED(adapter
->hw
.hw_addr
)) {
615 for (pat
= 0; pat
< ARRAY_SIZE(register_test_patterns
); pat
++) {
616 before
= ixgbevf_read_reg(&adapter
->hw
, reg
);
617 ixgbe_write_reg(&adapter
->hw
, reg
,
618 register_test_patterns
[pat
] & write
);
619 val
= ixgbevf_read_reg(&adapter
->hw
, reg
);
620 if (val
!= (register_test_patterns
[pat
] & write
& mask
)) {
622 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
624 register_test_patterns
[pat
] & write
& mask
);
626 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
629 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
634 static bool reg_set_and_check(struct ixgbevf_adapter
*adapter
, u64
*data
,
635 int reg
, u32 mask
, u32 write
)
639 if (IXGBE_REMOVED(adapter
->hw
.hw_addr
)) {
643 before
= ixgbevf_read_reg(&adapter
->hw
, reg
);
644 ixgbe_write_reg(&adapter
->hw
, reg
, write
& mask
);
645 val
= ixgbevf_read_reg(&adapter
->hw
, reg
);
646 if ((write
& mask
) != (val
& mask
)) {
647 pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
648 reg
, (val
& mask
), write
& mask
);
650 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
653 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
657 static int ixgbevf_reg_test(struct ixgbevf_adapter
*adapter
, u64
*data
)
659 const struct ixgbevf_reg_test
*test
;
662 if (IXGBE_REMOVED(adapter
->hw
.hw_addr
)) {
663 dev_err(&adapter
->pdev
->dev
,
664 "Adapter removed - register test blocked\n");
670 /* Perform the register test, looping through the test table
671 * until we either fail or reach the null entry.
674 for (i
= 0; i
< test
->array_len
; i
++) {
677 switch (test
->test_type
) {
679 b
= reg_pattern_test(adapter
, data
,
680 test
->reg
+ (i
* 0x40),
685 b
= reg_set_and_check(adapter
, data
,
686 test
->reg
+ (i
* 0x40),
691 ixgbe_write_reg(&adapter
->hw
,
692 test
->reg
+ (i
* 0x40),
696 b
= reg_pattern_test(adapter
, data
,
701 case TABLE64_TEST_LO
:
702 b
= reg_pattern_test(adapter
, data
,
707 case TABLE64_TEST_HI
:
708 b
= reg_pattern_test(adapter
, data
,
709 test
->reg
+ 4 + (i
* 8),
724 static void ixgbevf_diag_test(struct net_device
*netdev
,
725 struct ethtool_test
*eth_test
, u64
*data
)
727 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
728 bool if_running
= netif_running(netdev
);
730 if (IXGBE_REMOVED(adapter
->hw
.hw_addr
)) {
731 dev_err(&adapter
->pdev
->dev
,
732 "Adapter removed - test blocked\n");
735 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
738 set_bit(__IXGBEVF_TESTING
, &adapter
->state
);
739 if (eth_test
->flags
== ETH_TEST_FL_OFFLINE
) {
742 hw_dbg(&adapter
->hw
, "offline testing starting\n");
744 /* Link test performed before hardware reset so autoneg doesn't
745 * interfere with test result
747 if (ixgbevf_link_test(adapter
, &data
[1]))
748 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
751 /* indicate we're in test mode */
752 ixgbevf_close(netdev
);
754 ixgbevf_reset(adapter
);
756 hw_dbg(&adapter
->hw
, "register testing starting\n");
757 if (ixgbevf_reg_test(adapter
, &data
[0]))
758 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
760 ixgbevf_reset(adapter
);
762 clear_bit(__IXGBEVF_TESTING
, &adapter
->state
);
764 ixgbevf_open(netdev
);
766 hw_dbg(&adapter
->hw
, "online testing starting\n");
768 if (ixgbevf_link_test(adapter
, &data
[1]))
769 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
771 /* Online tests aren't run; pass by default */
774 clear_bit(__IXGBEVF_TESTING
, &adapter
->state
);
776 msleep_interruptible(4 * 1000);
779 static int ixgbevf_nway_reset(struct net_device
*netdev
)
781 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
783 if (netif_running(netdev
))
784 ixgbevf_reinit_locked(adapter
);
789 static int ixgbevf_get_coalesce(struct net_device
*netdev
,
790 struct ethtool_coalesce
*ec
)
792 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
794 /* only valid if in constant ITR mode */
795 if (adapter
->rx_itr_setting
<= 1)
796 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
;
798 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
>> 2;
800 /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */
801 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
804 /* only valid if in constant ITR mode */
805 if (adapter
->tx_itr_setting
<= 1)
806 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
;
808 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
>> 2;
813 static int ixgbevf_set_coalesce(struct net_device
*netdev
,
814 struct ethtool_coalesce
*ec
)
816 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
817 struct ixgbevf_q_vector
*q_vector
;
819 u16 tx_itr_param
, rx_itr_param
;
821 /* don't accept Tx specific changes if we've got mixed RxTx vectors */
822 if (adapter
->q_vector
[0]->tx
.count
&&
823 adapter
->q_vector
[0]->rx
.count
&& ec
->tx_coalesce_usecs
)
826 if ((ec
->rx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)) ||
827 (ec
->tx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)))
830 if (ec
->rx_coalesce_usecs
> 1)
831 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
<< 2;
833 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
;
835 if (adapter
->rx_itr_setting
== 1)
836 rx_itr_param
= IXGBE_20K_ITR
;
838 rx_itr_param
= adapter
->rx_itr_setting
;
840 if (ec
->tx_coalesce_usecs
> 1)
841 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
<< 2;
843 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
;
845 if (adapter
->tx_itr_setting
== 1)
846 tx_itr_param
= IXGBE_12K_ITR
;
848 tx_itr_param
= adapter
->tx_itr_setting
;
850 num_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
852 for (i
= 0; i
< num_vectors
; i
++) {
853 q_vector
= adapter
->q_vector
[i
];
854 if (q_vector
->tx
.count
&& !q_vector
->rx
.count
)
856 q_vector
->itr
= tx_itr_param
;
858 /* Rx only or mixed */
859 q_vector
->itr
= rx_itr_param
;
860 ixgbevf_write_eitr(q_vector
);
866 static int ixgbevf_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
867 u32
*rules __always_unused
)
869 struct ixgbevf_adapter
*adapter
= netdev_priv(dev
);
872 case ETHTOOL_GRXRINGS
:
873 info
->data
= adapter
->num_rx_queues
;
876 hw_dbg(&adapter
->hw
, "Command parameters not supported\n");
881 static u32
ixgbevf_get_rxfh_indir_size(struct net_device
*netdev
)
883 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
885 if (adapter
->hw
.mac
.type
>= ixgbe_mac_X550_vf
)
886 return IXGBEVF_X550_VFRETA_SIZE
;
888 return IXGBEVF_82599_RETA_SIZE
;
891 static u32
ixgbevf_get_rxfh_key_size(struct net_device
*netdev
)
893 return IXGBEVF_RSS_HASH_KEY_SIZE
;
896 static int ixgbevf_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
,
899 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
903 *hfunc
= ETH_RSS_HASH_TOP
;
905 if (adapter
->hw
.mac
.type
>= ixgbe_mac_X550_vf
) {
907 memcpy(key
, adapter
->rss_key
,
908 ixgbevf_get_rxfh_key_size(netdev
));
913 for (i
= 0; i
< IXGBEVF_X550_VFRETA_SIZE
; i
++)
914 indir
[i
] = adapter
->rss_indir_tbl
[i
];
917 /* If neither indirection table nor hash key was requested
918 * - just return a success avoiding taking any locks.
923 spin_lock_bh(&adapter
->mbx_lock
);
925 err
= ixgbevf_get_reta_locked(&adapter
->hw
, indir
,
926 adapter
->num_rx_queues
);
929 err
= ixgbevf_get_rss_key_locked(&adapter
->hw
, key
);
931 spin_unlock_bh(&adapter
->mbx_lock
);
937 static u32
ixgbevf_get_priv_flags(struct net_device
*netdev
)
939 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
942 if (adapter
->flags
& IXGBEVF_FLAGS_LEGACY_RX
)
943 priv_flags
|= IXGBEVF_PRIV_FLAGS_LEGACY_RX
;
948 static int ixgbevf_set_priv_flags(struct net_device
*netdev
, u32 priv_flags
)
950 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
951 unsigned int flags
= adapter
->flags
;
953 flags
&= ~IXGBEVF_FLAGS_LEGACY_RX
;
954 if (priv_flags
& IXGBEVF_PRIV_FLAGS_LEGACY_RX
)
955 flags
|= IXGBEVF_FLAGS_LEGACY_RX
;
957 if (flags
!= adapter
->flags
) {
958 adapter
->flags
= flags
;
960 /* reset interface to repopulate queues */
961 if (netif_running(netdev
))
962 ixgbevf_reinit_locked(adapter
);
968 static const struct ethtool_ops ixgbevf_ethtool_ops
= {
969 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
,
970 .get_drvinfo
= ixgbevf_get_drvinfo
,
971 .get_regs_len
= ixgbevf_get_regs_len
,
972 .get_regs
= ixgbevf_get_regs
,
973 .nway_reset
= ixgbevf_nway_reset
,
974 .get_link
= ethtool_op_get_link
,
975 .get_ringparam
= ixgbevf_get_ringparam
,
976 .set_ringparam
= ixgbevf_set_ringparam
,
977 .get_msglevel
= ixgbevf_get_msglevel
,
978 .set_msglevel
= ixgbevf_set_msglevel
,
979 .self_test
= ixgbevf_diag_test
,
980 .get_sset_count
= ixgbevf_get_sset_count
,
981 .get_strings
= ixgbevf_get_strings
,
982 .get_ethtool_stats
= ixgbevf_get_ethtool_stats
,
983 .get_coalesce
= ixgbevf_get_coalesce
,
984 .set_coalesce
= ixgbevf_set_coalesce
,
985 .get_rxnfc
= ixgbevf_get_rxnfc
,
986 .get_rxfh_indir_size
= ixgbevf_get_rxfh_indir_size
,
987 .get_rxfh_key_size
= ixgbevf_get_rxfh_key_size
,
988 .get_rxfh
= ixgbevf_get_rxfh
,
989 .get_link_ksettings
= ixgbevf_get_link_ksettings
,
990 .get_priv_flags
= ixgbevf_get_priv_flags
,
991 .set_priv_flags
= ixgbevf_set_priv_flags
,
994 void ixgbevf_set_ethtool_ops(struct net_device
*netdev
)
996 netdev
->ethtool_ops
= &ixgbevf_ethtool_ops
;