1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 1999 - 2018 Intel Corporation. */
4 /* ethtool support for ixgbevf */
6 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8 #include <linux/types.h>
9 #include <linux/module.h>
10 #include <linux/slab.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/ethtool.h>
14 #include <linux/vmalloc.h>
15 #include <linux/if_vlan.h>
16 #include <linux/uaccess.h>
20 enum {NETDEV_STATS
, IXGBEVF_STATS
};
23 char stat_string
[ETH_GSTRING_LEN
];
29 #define IXGBEVF_STAT(_name, _stat) { \
30 .stat_string = _name, \
31 .type = IXGBEVF_STATS, \
32 .sizeof_stat = sizeof_field(struct ixgbevf_adapter, _stat), \
33 .stat_offset = offsetof(struct ixgbevf_adapter, _stat) \
36 #define IXGBEVF_NETDEV_STAT(_net_stat) { \
37 .stat_string = #_net_stat, \
38 .type = NETDEV_STATS, \
39 .sizeof_stat = sizeof_field(struct net_device_stats, _net_stat), \
40 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
43 static struct ixgbe_stats ixgbevf_gstrings_stats
[] = {
44 IXGBEVF_NETDEV_STAT(rx_packets
),
45 IXGBEVF_NETDEV_STAT(tx_packets
),
46 IXGBEVF_NETDEV_STAT(rx_bytes
),
47 IXGBEVF_NETDEV_STAT(tx_bytes
),
48 IXGBEVF_STAT("tx_busy", tx_busy
),
49 IXGBEVF_STAT("tx_restart_queue", restart_queue
),
50 IXGBEVF_STAT("tx_timeout_count", tx_timeout_count
),
51 IXGBEVF_NETDEV_STAT(multicast
),
52 IXGBEVF_STAT("rx_csum_offload_errors", hw_csum_rx_error
),
53 IXGBEVF_STAT("alloc_rx_page", alloc_rx_page
),
54 IXGBEVF_STAT("alloc_rx_page_failed", alloc_rx_page_failed
),
55 IXGBEVF_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed
),
56 IXGBEVF_STAT("tx_ipsec", tx_ipsec
),
57 IXGBEVF_STAT("rx_ipsec", rx_ipsec
),
60 #define IXGBEVF_QUEUE_STATS_LEN ( \
61 (((struct ixgbevf_adapter *)netdev_priv(netdev))->num_tx_queues + \
62 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_xdp_queues + \
63 ((struct ixgbevf_adapter *)netdev_priv(netdev))->num_rx_queues) * \
64 (sizeof(struct ixgbevf_stats) / sizeof(u64)))
65 #define IXGBEVF_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbevf_gstrings_stats)
67 #define IXGBEVF_STATS_LEN (IXGBEVF_GLOBAL_STATS_LEN + IXGBEVF_QUEUE_STATS_LEN)
68 static const char ixgbe_gstrings_test
[][ETH_GSTRING_LEN
] = {
69 "Register test (offline)",
70 "Link test (on/offline)"
73 #define IXGBEVF_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN)
75 static const char ixgbevf_priv_flags_strings
[][ETH_GSTRING_LEN
] = {
76 #define IXGBEVF_PRIV_FLAGS_LEGACY_RX BIT(0)
80 #define IXGBEVF_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbevf_priv_flags_strings)
82 static int ixgbevf_get_link_ksettings(struct net_device
*netdev
,
83 struct ethtool_link_ksettings
*cmd
)
85 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
87 ethtool_link_ksettings_zero_link_mode(cmd
, supported
);
88 ethtool_link_ksettings_add_link_mode(cmd
, supported
, 10000baseT_Full
);
89 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
92 if (adapter
->link_up
) {
93 __u32 speed
= SPEED_10000
;
95 switch (adapter
->link_speed
) {
96 case IXGBE_LINK_SPEED_10GB_FULL
:
99 case IXGBE_LINK_SPEED_1GB_FULL
:
102 case IXGBE_LINK_SPEED_100_FULL
:
107 cmd
->base
.speed
= speed
;
108 cmd
->base
.duplex
= DUPLEX_FULL
;
110 cmd
->base
.speed
= SPEED_UNKNOWN
;
111 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
117 static u32
ixgbevf_get_msglevel(struct net_device
*netdev
)
119 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
121 return adapter
->msg_enable
;
124 static void ixgbevf_set_msglevel(struct net_device
*netdev
, u32 data
)
126 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
128 adapter
->msg_enable
= data
;
131 static int ixgbevf_get_regs_len(struct net_device
*netdev
)
133 #define IXGBE_REGS_LEN 45
134 return IXGBE_REGS_LEN
* sizeof(u32
);
137 static void ixgbevf_get_regs(struct net_device
*netdev
,
138 struct ethtool_regs
*regs
,
141 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
142 struct ixgbe_hw
*hw
= &adapter
->hw
;
144 u32 regs_len
= ixgbevf_get_regs_len(netdev
);
147 memset(p
, 0, regs_len
);
149 /* generate a number suitable for ethtool's register version */
150 regs
->version
= (1u << 24) | (hw
->revision_id
<< 16) | hw
->device_id
;
152 /* General Registers */
153 regs_buff
[0] = IXGBE_READ_REG(hw
, IXGBE_VFCTRL
);
154 regs_buff
[1] = IXGBE_READ_REG(hw
, IXGBE_VFSTATUS
);
155 regs_buff
[2] = IXGBE_READ_REG(hw
, IXGBE_VFLINKS
);
156 regs_buff
[3] = IXGBE_READ_REG(hw
, IXGBE_VFRXMEMWRAP
);
157 regs_buff
[4] = IXGBE_READ_REG(hw
, IXGBE_VFFRTIMER
);
160 /* don't read EICR because it can clear interrupt causes, instead
161 * read EICS which is a shadow but doesn't clear EICR
163 regs_buff
[5] = IXGBE_READ_REG(hw
, IXGBE_VTEICS
);
164 regs_buff
[6] = IXGBE_READ_REG(hw
, IXGBE_VTEICS
);
165 regs_buff
[7] = IXGBE_READ_REG(hw
, IXGBE_VTEIMS
);
166 regs_buff
[8] = IXGBE_READ_REG(hw
, IXGBE_VTEIMC
);
167 regs_buff
[9] = IXGBE_READ_REG(hw
, IXGBE_VTEIAC
);
168 regs_buff
[10] = IXGBE_READ_REG(hw
, IXGBE_VTEIAM
);
169 regs_buff
[11] = IXGBE_READ_REG(hw
, IXGBE_VTEITR(0));
170 regs_buff
[12] = IXGBE_READ_REG(hw
, IXGBE_VTIVAR(0));
171 regs_buff
[13] = IXGBE_READ_REG(hw
, IXGBE_VTIVAR_MISC
);
174 for (i
= 0; i
< 2; i
++)
175 regs_buff
[14 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFRDBAL(i
));
176 for (i
= 0; i
< 2; i
++)
177 regs_buff
[16 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFRDBAH(i
));
178 for (i
= 0; i
< 2; i
++)
179 regs_buff
[18 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFRDLEN(i
));
180 for (i
= 0; i
< 2; i
++)
181 regs_buff
[20 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFRDH(i
));
182 for (i
= 0; i
< 2; i
++)
183 regs_buff
[22 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFRDT(i
));
184 for (i
= 0; i
< 2; i
++)
185 regs_buff
[24 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFRXDCTL(i
));
186 for (i
= 0; i
< 2; i
++)
187 regs_buff
[26 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFSRRCTL(i
));
190 regs_buff
[28] = IXGBE_READ_REG(hw
, IXGBE_VFPSRTYPE
);
193 for (i
= 0; i
< 2; i
++)
194 regs_buff
[29 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDBAL(i
));
195 for (i
= 0; i
< 2; i
++)
196 regs_buff
[31 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDBAH(i
));
197 for (i
= 0; i
< 2; i
++)
198 regs_buff
[33 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDLEN(i
));
199 for (i
= 0; i
< 2; i
++)
200 regs_buff
[35 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDH(i
));
201 for (i
= 0; i
< 2; i
++)
202 regs_buff
[37 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDT(i
));
203 for (i
= 0; i
< 2; i
++)
204 regs_buff
[39 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTXDCTL(i
));
205 for (i
= 0; i
< 2; i
++)
206 regs_buff
[41 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDWBAL(i
));
207 for (i
= 0; i
< 2; i
++)
208 regs_buff
[43 + i
] = IXGBE_READ_REG(hw
, IXGBE_VFTDWBAH(i
));
211 static void ixgbevf_get_drvinfo(struct net_device
*netdev
,
212 struct ethtool_drvinfo
*drvinfo
)
214 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
216 strscpy(drvinfo
->driver
, ixgbevf_driver_name
, sizeof(drvinfo
->driver
));
217 strscpy(drvinfo
->bus_info
, pci_name(adapter
->pdev
),
218 sizeof(drvinfo
->bus_info
));
220 drvinfo
->n_priv_flags
= IXGBEVF_PRIV_FLAGS_STR_LEN
;
223 static void ixgbevf_get_ringparam(struct net_device
*netdev
,
224 struct ethtool_ringparam
*ring
,
225 struct kernel_ethtool_ringparam
*kernel_ring
,
226 struct netlink_ext_ack
*extack
)
228 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
230 ring
->rx_max_pending
= IXGBEVF_MAX_RXD
;
231 ring
->tx_max_pending
= IXGBEVF_MAX_TXD
;
232 ring
->rx_pending
= adapter
->rx_ring_count
;
233 ring
->tx_pending
= adapter
->tx_ring_count
;
236 static int ixgbevf_set_ringparam(struct net_device
*netdev
,
237 struct ethtool_ringparam
*ring
,
238 struct kernel_ethtool_ringparam
*kernel_ring
,
239 struct netlink_ext_ack
*extack
)
241 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
242 struct ixgbevf_ring
*tx_ring
= NULL
, *rx_ring
= NULL
;
243 u32 new_rx_count
, new_tx_count
;
246 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
249 new_tx_count
= max_t(u32
, ring
->tx_pending
, IXGBEVF_MIN_TXD
);
250 new_tx_count
= min_t(u32
, new_tx_count
, IXGBEVF_MAX_TXD
);
251 new_tx_count
= ALIGN(new_tx_count
, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE
);
253 new_rx_count
= max_t(u32
, ring
->rx_pending
, IXGBEVF_MIN_RXD
);
254 new_rx_count
= min_t(u32
, new_rx_count
, IXGBEVF_MAX_RXD
);
255 new_rx_count
= ALIGN(new_rx_count
, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE
);
257 /* if nothing to do return success */
258 if ((new_tx_count
== adapter
->tx_ring_count
) &&
259 (new_rx_count
== adapter
->rx_ring_count
))
262 while (test_and_set_bit(__IXGBEVF_RESETTING
, &adapter
->state
))
263 usleep_range(1000, 2000);
265 if (!netif_running(adapter
->netdev
)) {
266 for (i
= 0; i
< adapter
->num_tx_queues
; i
++)
267 adapter
->tx_ring
[i
]->count
= new_tx_count
;
268 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++)
269 adapter
->xdp_ring
[i
]->count
= new_tx_count
;
270 for (i
= 0; i
< adapter
->num_rx_queues
; i
++)
271 adapter
->rx_ring
[i
]->count
= new_rx_count
;
272 adapter
->tx_ring_count
= new_tx_count
;
273 adapter
->xdp_ring_count
= new_tx_count
;
274 adapter
->rx_ring_count
= new_rx_count
;
278 if (new_tx_count
!= adapter
->tx_ring_count
) {
279 tx_ring
= vmalloc(array_size(sizeof(*tx_ring
),
280 adapter
->num_tx_queues
+
281 adapter
->num_xdp_queues
));
287 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
288 /* clone ring and setup updated count */
289 tx_ring
[i
] = *adapter
->tx_ring
[i
];
290 tx_ring
[i
].count
= new_tx_count
;
291 err
= ixgbevf_setup_tx_resources(&tx_ring
[i
]);
295 ixgbevf_free_tx_resources(&tx_ring
[i
]);
305 for (j
= 0; j
< adapter
->num_xdp_queues
; i
++, j
++) {
306 /* clone ring and setup updated count */
307 tx_ring
[i
] = *adapter
->xdp_ring
[j
];
308 tx_ring
[i
].count
= new_tx_count
;
309 err
= ixgbevf_setup_tx_resources(&tx_ring
[i
]);
313 ixgbevf_free_tx_resources(&tx_ring
[i
]);
324 if (new_rx_count
!= adapter
->rx_ring_count
) {
325 rx_ring
= vmalloc(array_size(sizeof(*rx_ring
),
326 adapter
->num_rx_queues
));
332 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
333 /* clone ring and setup updated count */
334 rx_ring
[i
] = *adapter
->rx_ring
[i
];
336 /* Clear copied XDP RX-queue info */
337 memset(&rx_ring
[i
].xdp_rxq
, 0,
338 sizeof(rx_ring
[i
].xdp_rxq
));
340 rx_ring
[i
].count
= new_rx_count
;
341 err
= ixgbevf_setup_rx_resources(adapter
, &rx_ring
[i
]);
345 ixgbevf_free_rx_resources(&rx_ring
[i
]);
356 /* bring interface down to prepare for update */
357 ixgbevf_down(adapter
);
361 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
362 ixgbevf_free_tx_resources(adapter
->tx_ring
[i
]);
363 *adapter
->tx_ring
[i
] = tx_ring
[i
];
365 adapter
->tx_ring_count
= new_tx_count
;
367 for (j
= 0; j
< adapter
->num_xdp_queues
; i
++, j
++) {
368 ixgbevf_free_tx_resources(adapter
->xdp_ring
[j
]);
369 *adapter
->xdp_ring
[j
] = tx_ring
[i
];
371 adapter
->xdp_ring_count
= new_tx_count
;
379 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
380 ixgbevf_free_rx_resources(adapter
->rx_ring
[i
]);
381 *adapter
->rx_ring
[i
] = rx_ring
[i
];
383 adapter
->rx_ring_count
= new_rx_count
;
389 /* restore interface using new values */
393 /* free Tx resources if Rx error is encountered */
396 i
< adapter
->num_tx_queues
+ adapter
->num_xdp_queues
; i
++)
397 ixgbevf_free_tx_resources(&tx_ring
[i
]);
401 clear_bit(__IXGBEVF_RESETTING
, &adapter
->state
);
405 static int ixgbevf_get_sset_count(struct net_device
*netdev
, int stringset
)
409 return IXGBEVF_TEST_LEN
;
411 return IXGBEVF_STATS_LEN
;
412 case ETH_SS_PRIV_FLAGS
:
413 return IXGBEVF_PRIV_FLAGS_STR_LEN
;
419 static void ixgbevf_get_ethtool_stats(struct net_device
*netdev
,
420 struct ethtool_stats
*stats
, u64
*data
)
422 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
423 struct rtnl_link_stats64 temp
;
424 const struct rtnl_link_stats64
*net_stats
;
426 struct ixgbevf_ring
*ring
;
430 ixgbevf_update_stats(adapter
);
431 net_stats
= dev_get_stats(netdev
, &temp
);
432 for (i
= 0; i
< IXGBEVF_GLOBAL_STATS_LEN
; i
++) {
433 switch (ixgbevf_gstrings_stats
[i
].type
) {
435 p
= (char *)net_stats
+
436 ixgbevf_gstrings_stats
[i
].stat_offset
;
439 p
= (char *)adapter
+
440 ixgbevf_gstrings_stats
[i
].stat_offset
;
447 data
[i
] = (ixgbevf_gstrings_stats
[i
].sizeof_stat
==
448 sizeof(u64
)) ? *(u64
*)p
: *(u32
*)p
;
451 /* populate Tx queue data */
452 for (j
= 0; j
< adapter
->num_tx_queues
; j
++) {
453 ring
= adapter
->tx_ring
[j
];
461 start
= u64_stats_fetch_begin(&ring
->syncp
);
462 data
[i
] = ring
->stats
.packets
;
463 data
[i
+ 1] = ring
->stats
.bytes
;
464 } while (u64_stats_fetch_retry(&ring
->syncp
, start
));
468 /* populate XDP queue data */
469 for (j
= 0; j
< adapter
->num_xdp_queues
; j
++) {
470 ring
= adapter
->xdp_ring
[j
];
478 start
= u64_stats_fetch_begin(&ring
->syncp
);
479 data
[i
] = ring
->stats
.packets
;
480 data
[i
+ 1] = ring
->stats
.bytes
;
481 } while (u64_stats_fetch_retry(&ring
->syncp
, start
));
485 /* populate Rx queue data */
486 for (j
= 0; j
< adapter
->num_rx_queues
; j
++) {
487 ring
= adapter
->rx_ring
[j
];
495 start
= u64_stats_fetch_begin(&ring
->syncp
);
496 data
[i
] = ring
->stats
.packets
;
497 data
[i
+ 1] = ring
->stats
.bytes
;
498 } while (u64_stats_fetch_retry(&ring
->syncp
, start
));
503 static void ixgbevf_get_strings(struct net_device
*netdev
, u32 stringset
,
506 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
507 char *p
= (char *)data
;
512 memcpy(data
, *ixgbe_gstrings_test
,
513 IXGBEVF_TEST_LEN
* ETH_GSTRING_LEN
);
516 for (i
= 0; i
< IXGBEVF_GLOBAL_STATS_LEN
; i
++) {
517 memcpy(p
, ixgbevf_gstrings_stats
[i
].stat_string
,
519 p
+= ETH_GSTRING_LEN
;
522 for (i
= 0; i
< adapter
->num_tx_queues
; i
++) {
523 sprintf(p
, "tx_queue_%u_packets", i
);
524 p
+= ETH_GSTRING_LEN
;
525 sprintf(p
, "tx_queue_%u_bytes", i
);
526 p
+= ETH_GSTRING_LEN
;
528 for (i
= 0; i
< adapter
->num_xdp_queues
; i
++) {
529 sprintf(p
, "xdp_queue_%u_packets", i
);
530 p
+= ETH_GSTRING_LEN
;
531 sprintf(p
, "xdp_queue_%u_bytes", i
);
532 p
+= ETH_GSTRING_LEN
;
534 for (i
= 0; i
< adapter
->num_rx_queues
; i
++) {
535 sprintf(p
, "rx_queue_%u_packets", i
);
536 p
+= ETH_GSTRING_LEN
;
537 sprintf(p
, "rx_queue_%u_bytes", i
);
538 p
+= ETH_GSTRING_LEN
;
541 case ETH_SS_PRIV_FLAGS
:
542 memcpy(data
, ixgbevf_priv_flags_strings
,
543 IXGBEVF_PRIV_FLAGS_STR_LEN
* ETH_GSTRING_LEN
);
548 static int ixgbevf_link_test(struct ixgbevf_adapter
*adapter
, u64
*data
)
550 struct ixgbe_hw
*hw
= &adapter
->hw
;
555 hw
->mac
.ops
.check_link(hw
, &link_speed
, &link_up
, true);
562 /* ethtool register test data */
563 struct ixgbevf_reg_test
{
571 /* In the hardware, registers are laid out either singly, in arrays
572 * spaced 0x40 bytes apart, or in contiguous tables. We assume
573 * most tests take place on arrays or single registers (handled
574 * as a single-element array) and special-case the tables.
575 * Table tests are always pattern tests.
577 * We also make provision for some required setup steps by specifying
578 * registers to be written without any read-back testing.
581 #define PATTERN_TEST 1
582 #define SET_READ_TEST 2
583 #define WRITE_NO_TEST 3
584 #define TABLE32_TEST 4
585 #define TABLE64_TEST_LO 5
586 #define TABLE64_TEST_HI 6
588 /* default VF register test */
589 static const struct ixgbevf_reg_test reg_test_vf
[] = {
590 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFF80 },
591 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
592 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST
, 0x000FFF80, 0x000FFFFF },
593 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST
, 0, IXGBE_RXDCTL_ENABLE
},
594 { IXGBE_VFRDT(0), 2, PATTERN_TEST
, 0x0000FFFF, 0x0000FFFF },
595 { IXGBE_VFRXDCTL(0), 2, WRITE_NO_TEST
, 0, 0 },
596 { IXGBE_VFTDBAL(0), 2, PATTERN_TEST
, 0xFFFFFF80, 0xFFFFFFFF },
597 { IXGBE_VFTDBAH(0), 2, PATTERN_TEST
, 0xFFFFFFFF, 0xFFFFFFFF },
598 { IXGBE_VFTDLEN(0), 2, PATTERN_TEST
, 0x000FFF80, 0x000FFF80 },
602 static const u32 register_test_patterns
[] = {
603 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
606 static bool reg_pattern_test(struct ixgbevf_adapter
*adapter
, u64
*data
,
607 int reg
, u32 mask
, u32 write
)
609 u32 pat
, val
, before
;
611 if (IXGBE_REMOVED(adapter
->hw
.hw_addr
)) {
615 for (pat
= 0; pat
< ARRAY_SIZE(register_test_patterns
); pat
++) {
616 before
= ixgbevf_read_reg(&adapter
->hw
, reg
);
617 ixgbe_write_reg(&adapter
->hw
, reg
,
618 register_test_patterns
[pat
] & write
);
619 val
= ixgbevf_read_reg(&adapter
->hw
, reg
);
620 if (val
!= (register_test_patterns
[pat
] & write
& mask
)) {
622 "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n",
624 register_test_patterns
[pat
] & write
& mask
);
626 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
629 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
634 static bool reg_set_and_check(struct ixgbevf_adapter
*adapter
, u64
*data
,
635 int reg
, u32 mask
, u32 write
)
639 if (IXGBE_REMOVED(adapter
->hw
.hw_addr
)) {
643 before
= ixgbevf_read_reg(&adapter
->hw
, reg
);
644 ixgbe_write_reg(&adapter
->hw
, reg
, write
& mask
);
645 val
= ixgbevf_read_reg(&adapter
->hw
, reg
);
646 if ((write
& mask
) != (val
& mask
)) {
647 pr_err("set/check reg %04X test failed: got 0x%08X expected 0x%08X\n",
648 reg
, (val
& mask
), write
& mask
);
650 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
653 ixgbe_write_reg(&adapter
->hw
, reg
, before
);
657 static int ixgbevf_reg_test(struct ixgbevf_adapter
*adapter
, u64
*data
)
659 const struct ixgbevf_reg_test
*test
;
662 if (IXGBE_REMOVED(adapter
->hw
.hw_addr
)) {
663 dev_err(&adapter
->pdev
->dev
,
664 "Adapter removed - register test blocked\n");
670 /* Perform the register test, looping through the test table
671 * until we either fail or reach the null entry.
674 for (i
= 0; i
< test
->array_len
; i
++) {
677 switch (test
->test_type
) {
679 b
= reg_pattern_test(adapter
, data
,
680 test
->reg
+ (i
* 0x40),
685 b
= reg_set_and_check(adapter
, data
,
686 test
->reg
+ (i
* 0x40),
691 ixgbe_write_reg(&adapter
->hw
,
692 test
->reg
+ (i
* 0x40),
696 b
= reg_pattern_test(adapter
, data
,
701 case TABLE64_TEST_LO
:
702 b
= reg_pattern_test(adapter
, data
,
707 case TABLE64_TEST_HI
:
708 b
= reg_pattern_test(adapter
, data
,
709 test
->reg
+ 4 + (i
* 8),
724 static void ixgbevf_diag_test(struct net_device
*netdev
,
725 struct ethtool_test
*eth_test
, u64
*data
)
727 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
728 bool if_running
= netif_running(netdev
);
730 if (IXGBE_REMOVED(adapter
->hw
.hw_addr
)) {
731 dev_err(&adapter
->pdev
->dev
,
732 "Adapter removed - test blocked\n");
735 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
738 set_bit(__IXGBEVF_TESTING
, &adapter
->state
);
739 if (eth_test
->flags
== ETH_TEST_FL_OFFLINE
) {
742 hw_dbg(&adapter
->hw
, "offline testing starting\n");
744 /* Link test performed before hardware reset so autoneg doesn't
745 * interfere with test result
747 if (ixgbevf_link_test(adapter
, &data
[1]))
748 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
751 /* indicate we're in test mode */
752 ixgbevf_close(netdev
);
754 ixgbevf_reset(adapter
);
756 hw_dbg(&adapter
->hw
, "register testing starting\n");
757 if (ixgbevf_reg_test(adapter
, &data
[0]))
758 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
760 ixgbevf_reset(adapter
);
762 clear_bit(__IXGBEVF_TESTING
, &adapter
->state
);
764 ixgbevf_open(netdev
);
766 hw_dbg(&adapter
->hw
, "online testing starting\n");
768 if (ixgbevf_link_test(adapter
, &data
[1]))
769 eth_test
->flags
|= ETH_TEST_FL_FAILED
;
771 /* Online tests aren't run; pass by default */
774 clear_bit(__IXGBEVF_TESTING
, &adapter
->state
);
776 msleep_interruptible(4 * 1000);
779 static int ixgbevf_nway_reset(struct net_device
*netdev
)
781 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
783 if (netif_running(netdev
))
784 ixgbevf_reinit_locked(adapter
);
789 static int ixgbevf_get_coalesce(struct net_device
*netdev
,
790 struct ethtool_coalesce
*ec
,
791 struct kernel_ethtool_coalesce
*kernel_coal
,
792 struct netlink_ext_ack
*extack
)
794 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
796 /* only valid if in constant ITR mode */
797 if (adapter
->rx_itr_setting
<= 1)
798 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
;
800 ec
->rx_coalesce_usecs
= adapter
->rx_itr_setting
>> 2;
802 /* if in mixed Tx/Rx queues per vector mode, report only Rx settings */
803 if (adapter
->q_vector
[0]->tx
.count
&& adapter
->q_vector
[0]->rx
.count
)
806 /* only valid if in constant ITR mode */
807 if (adapter
->tx_itr_setting
<= 1)
808 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
;
810 ec
->tx_coalesce_usecs
= adapter
->tx_itr_setting
>> 2;
815 static int ixgbevf_set_coalesce(struct net_device
*netdev
,
816 struct ethtool_coalesce
*ec
,
817 struct kernel_ethtool_coalesce
*kernel_coal
,
818 struct netlink_ext_ack
*extack
)
820 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
821 struct ixgbevf_q_vector
*q_vector
;
823 u16 tx_itr_param
, rx_itr_param
;
825 /* don't accept Tx specific changes if we've got mixed RxTx vectors */
826 if (adapter
->q_vector
[0]->tx
.count
&&
827 adapter
->q_vector
[0]->rx
.count
&& ec
->tx_coalesce_usecs
)
830 if ((ec
->rx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)) ||
831 (ec
->tx_coalesce_usecs
> (IXGBE_MAX_EITR
>> 2)))
834 if (ec
->rx_coalesce_usecs
> 1)
835 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
<< 2;
837 adapter
->rx_itr_setting
= ec
->rx_coalesce_usecs
;
839 if (adapter
->rx_itr_setting
== 1)
840 rx_itr_param
= IXGBE_20K_ITR
;
842 rx_itr_param
= adapter
->rx_itr_setting
;
844 if (ec
->tx_coalesce_usecs
> 1)
845 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
<< 2;
847 adapter
->tx_itr_setting
= ec
->tx_coalesce_usecs
;
849 if (adapter
->tx_itr_setting
== 1)
850 tx_itr_param
= IXGBE_12K_ITR
;
852 tx_itr_param
= adapter
->tx_itr_setting
;
854 num_vectors
= adapter
->num_msix_vectors
- NON_Q_VECTORS
;
856 for (i
= 0; i
< num_vectors
; i
++) {
857 q_vector
= adapter
->q_vector
[i
];
858 if (q_vector
->tx
.count
&& !q_vector
->rx
.count
)
860 q_vector
->itr
= tx_itr_param
;
862 /* Rx only or mixed */
863 q_vector
->itr
= rx_itr_param
;
864 ixgbevf_write_eitr(q_vector
);
870 static int ixgbevf_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
871 u32
*rules __always_unused
)
873 struct ixgbevf_adapter
*adapter
= netdev_priv(dev
);
876 case ETHTOOL_GRXRINGS
:
877 info
->data
= adapter
->num_rx_queues
;
880 hw_dbg(&adapter
->hw
, "Command parameters not supported\n");
885 static u32
ixgbevf_get_rxfh_indir_size(struct net_device
*netdev
)
887 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
889 if (adapter
->hw
.mac
.type
>= ixgbe_mac_X550_vf
)
890 return IXGBEVF_X550_VFRETA_SIZE
;
892 return IXGBEVF_82599_RETA_SIZE
;
895 static u32
ixgbevf_get_rxfh_key_size(struct net_device
*netdev
)
897 return IXGBEVF_RSS_HASH_KEY_SIZE
;
900 static int ixgbevf_get_rxfh(struct net_device
*netdev
,
901 struct ethtool_rxfh_param
*rxfh
)
903 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
906 rxfh
->hfunc
= ETH_RSS_HASH_TOP
;
908 if (adapter
->hw
.mac
.type
>= ixgbe_mac_X550_vf
) {
910 memcpy(rxfh
->key
, adapter
->rss_key
,
911 ixgbevf_get_rxfh_key_size(netdev
));
916 for (i
= 0; i
< IXGBEVF_X550_VFRETA_SIZE
; i
++)
917 rxfh
->indir
[i
] = adapter
->rss_indir_tbl
[i
];
920 /* If neither indirection table nor hash key was requested
921 * - just return a success avoiding taking any locks.
923 if (!rxfh
->indir
&& !rxfh
->key
)
926 spin_lock_bh(&adapter
->mbx_lock
);
928 err
= ixgbevf_get_reta_locked(&adapter
->hw
,
930 adapter
->num_rx_queues
);
932 if (!err
&& rxfh
->key
)
933 err
= ixgbevf_get_rss_key_locked(&adapter
->hw
,
936 spin_unlock_bh(&adapter
->mbx_lock
);
942 static u32
ixgbevf_get_priv_flags(struct net_device
*netdev
)
944 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
947 if (adapter
->flags
& IXGBEVF_FLAGS_LEGACY_RX
)
948 priv_flags
|= IXGBEVF_PRIV_FLAGS_LEGACY_RX
;
953 static int ixgbevf_set_priv_flags(struct net_device
*netdev
, u32 priv_flags
)
955 struct ixgbevf_adapter
*adapter
= netdev_priv(netdev
);
956 unsigned int flags
= adapter
->flags
;
958 flags
&= ~IXGBEVF_FLAGS_LEGACY_RX
;
959 if (priv_flags
& IXGBEVF_PRIV_FLAGS_LEGACY_RX
)
960 flags
|= IXGBEVF_FLAGS_LEGACY_RX
;
962 if (flags
!= adapter
->flags
) {
963 adapter
->flags
= flags
;
965 /* reset interface to repopulate queues */
966 if (netif_running(netdev
))
967 ixgbevf_reinit_locked(adapter
);
973 static const struct ethtool_ops ixgbevf_ethtool_ops
= {
974 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
,
975 .get_drvinfo
= ixgbevf_get_drvinfo
,
976 .get_regs_len
= ixgbevf_get_regs_len
,
977 .get_regs
= ixgbevf_get_regs
,
978 .nway_reset
= ixgbevf_nway_reset
,
979 .get_link
= ethtool_op_get_link
,
980 .get_ringparam
= ixgbevf_get_ringparam
,
981 .set_ringparam
= ixgbevf_set_ringparam
,
982 .get_msglevel
= ixgbevf_get_msglevel
,
983 .set_msglevel
= ixgbevf_set_msglevel
,
984 .self_test
= ixgbevf_diag_test
,
985 .get_sset_count
= ixgbevf_get_sset_count
,
986 .get_strings
= ixgbevf_get_strings
,
987 .get_ethtool_stats
= ixgbevf_get_ethtool_stats
,
988 .get_coalesce
= ixgbevf_get_coalesce
,
989 .set_coalesce
= ixgbevf_set_coalesce
,
990 .get_rxnfc
= ixgbevf_get_rxnfc
,
991 .get_rxfh_indir_size
= ixgbevf_get_rxfh_indir_size
,
992 .get_rxfh_key_size
= ixgbevf_get_rxfh_key_size
,
993 .get_rxfh
= ixgbevf_get_rxfh
,
994 .get_link_ksettings
= ixgbevf_get_link_ksettings
,
995 .get_priv_flags
= ixgbevf_get_priv_flags
,
996 .set_priv_flags
= ixgbevf_set_priv_flags
,
999 void ixgbevf_set_ethtool_ops(struct net_device
*netdev
)
1001 netdev
->ethtool_ops
= &ixgbevf_ethtool_ops
;