1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/ctype.h>
12 #include <linux/stringify.h>
13 #include <linux/ethtool.h>
14 #include <linux/interrupt.h>
15 #include <linux/pci.h>
16 #include <linux/etherdevice.h>
17 #include <linux/crc32.h>
18 #include <linux/firmware.h>
19 #include <linux/utsname.h>
20 #include <linux/time.h>
24 #include "bnxt_ethtool.h"
25 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
26 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
27 #include "bnxt_coredump.h"
28 #define FLASH_NVRAM_TIMEOUT ((HWRM_CMD_TIMEOUT) * 100)
29 #define FLASH_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
30 #define INSTALL_PACKAGE_TIMEOUT ((HWRM_CMD_TIMEOUT) * 200)
32 static u32
bnxt_get_msglevel(struct net_device
*dev
)
34 struct bnxt
*bp
= netdev_priv(dev
);
36 return bp
->msg_enable
;
39 static void bnxt_set_msglevel(struct net_device
*dev
, u32 value
)
41 struct bnxt
*bp
= netdev_priv(dev
);
43 bp
->msg_enable
= value
;
46 static int bnxt_get_coalesce(struct net_device
*dev
,
47 struct ethtool_coalesce
*coal
)
49 struct bnxt
*bp
= netdev_priv(dev
);
50 struct bnxt_coal
*hw_coal
;
53 memset(coal
, 0, sizeof(*coal
));
55 coal
->use_adaptive_rx_coalesce
= bp
->flags
& BNXT_FLAG_DIM
;
57 hw_coal
= &bp
->rx_coal
;
58 mult
= hw_coal
->bufs_per_record
;
59 coal
->rx_coalesce_usecs
= hw_coal
->coal_ticks
;
60 coal
->rx_max_coalesced_frames
= hw_coal
->coal_bufs
/ mult
;
61 coal
->rx_coalesce_usecs_irq
= hw_coal
->coal_ticks_irq
;
62 coal
->rx_max_coalesced_frames_irq
= hw_coal
->coal_bufs_irq
/ mult
;
64 hw_coal
= &bp
->tx_coal
;
65 mult
= hw_coal
->bufs_per_record
;
66 coal
->tx_coalesce_usecs
= hw_coal
->coal_ticks
;
67 coal
->tx_max_coalesced_frames
= hw_coal
->coal_bufs
/ mult
;
68 coal
->tx_coalesce_usecs_irq
= hw_coal
->coal_ticks_irq
;
69 coal
->tx_max_coalesced_frames_irq
= hw_coal
->coal_bufs_irq
/ mult
;
71 coal
->stats_block_coalesce_usecs
= bp
->stats_coal_ticks
;
76 static int bnxt_set_coalesce(struct net_device
*dev
,
77 struct ethtool_coalesce
*coal
)
79 struct bnxt
*bp
= netdev_priv(dev
);
80 bool update_stats
= false;
81 struct bnxt_coal
*hw_coal
;
85 if (coal
->use_adaptive_rx_coalesce
) {
86 bp
->flags
|= BNXT_FLAG_DIM
;
88 if (bp
->flags
& BNXT_FLAG_DIM
) {
89 bp
->flags
&= ~(BNXT_FLAG_DIM
);
94 hw_coal
= &bp
->rx_coal
;
95 mult
= hw_coal
->bufs_per_record
;
96 hw_coal
->coal_ticks
= coal
->rx_coalesce_usecs
;
97 hw_coal
->coal_bufs
= coal
->rx_max_coalesced_frames
* mult
;
98 hw_coal
->coal_ticks_irq
= coal
->rx_coalesce_usecs_irq
;
99 hw_coal
->coal_bufs_irq
= coal
->rx_max_coalesced_frames_irq
* mult
;
101 hw_coal
= &bp
->tx_coal
;
102 mult
= hw_coal
->bufs_per_record
;
103 hw_coal
->coal_ticks
= coal
->tx_coalesce_usecs
;
104 hw_coal
->coal_bufs
= coal
->tx_max_coalesced_frames
* mult
;
105 hw_coal
->coal_ticks_irq
= coal
->tx_coalesce_usecs_irq
;
106 hw_coal
->coal_bufs_irq
= coal
->tx_max_coalesced_frames_irq
* mult
;
108 if (bp
->stats_coal_ticks
!= coal
->stats_block_coalesce_usecs
) {
109 u32 stats_ticks
= coal
->stats_block_coalesce_usecs
;
111 /* Allow 0, which means disable. */
113 stats_ticks
= clamp_t(u32
, stats_ticks
,
114 BNXT_MIN_STATS_COAL_TICKS
,
115 BNXT_MAX_STATS_COAL_TICKS
);
116 stats_ticks
= rounddown(stats_ticks
, BNXT_MIN_STATS_COAL_TICKS
);
117 bp
->stats_coal_ticks
= stats_ticks
;
118 if (bp
->stats_coal_ticks
)
119 bp
->current_interval
=
120 bp
->stats_coal_ticks
* HZ
/ 1000000;
122 bp
->current_interval
= BNXT_TIMER_INTERVAL
;
127 if (netif_running(dev
)) {
129 rc
= bnxt_close_nic(bp
, true, false);
131 rc
= bnxt_open_nic(bp
, true, false);
133 rc
= bnxt_hwrm_set_coal(bp
);
140 static const char * const bnxt_ring_stats_str
[] = {
159 static const char * const bnxt_ring_tpa_stats_str
[] = {
166 static const char * const bnxt_ring_tpa2_stats_str
[] = {
167 "rx_tpa_eligible_pkt",
168 "rx_tpa_eligible_bytes",
174 static const char * const bnxt_ring_sw_stats_str
[] = {
180 #define BNXT_RX_STATS_ENTRY(counter) \
181 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
183 #define BNXT_TX_STATS_ENTRY(counter) \
184 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
186 #define BNXT_RX_STATS_EXT_ENTRY(counter) \
187 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
189 #define BNXT_TX_STATS_EXT_ENTRY(counter) \
190 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
192 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \
193 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \
194 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
196 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \
197 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \
198 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
200 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \
201 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \
202 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \
203 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \
204 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \
205 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \
206 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \
207 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \
208 BNXT_RX_STATS_EXT_PFC_ENTRY(7)
210 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \
211 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \
212 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \
213 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \
214 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \
215 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \
216 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \
217 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \
218 BNXT_TX_STATS_EXT_PFC_ENTRY(7)
220 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \
221 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \
222 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
224 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \
225 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \
226 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
228 #define BNXT_RX_STATS_EXT_COS_ENTRIES \
229 BNXT_RX_STATS_EXT_COS_ENTRY(0), \
230 BNXT_RX_STATS_EXT_COS_ENTRY(1), \
231 BNXT_RX_STATS_EXT_COS_ENTRY(2), \
232 BNXT_RX_STATS_EXT_COS_ENTRY(3), \
233 BNXT_RX_STATS_EXT_COS_ENTRY(4), \
234 BNXT_RX_STATS_EXT_COS_ENTRY(5), \
235 BNXT_RX_STATS_EXT_COS_ENTRY(6), \
236 BNXT_RX_STATS_EXT_COS_ENTRY(7) \
238 #define BNXT_TX_STATS_EXT_COS_ENTRIES \
239 BNXT_TX_STATS_EXT_COS_ENTRY(0), \
240 BNXT_TX_STATS_EXT_COS_ENTRY(1), \
241 BNXT_TX_STATS_EXT_COS_ENTRY(2), \
242 BNXT_TX_STATS_EXT_COS_ENTRY(3), \
243 BNXT_TX_STATS_EXT_COS_ENTRY(4), \
244 BNXT_TX_STATS_EXT_COS_ENTRY(5), \
245 BNXT_TX_STATS_EXT_COS_ENTRY(6), \
246 BNXT_TX_STATS_EXT_COS_ENTRY(7) \
248 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
249 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
250 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
252 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
253 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
254 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
255 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
256 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
257 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
258 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
259 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
260 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
262 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
263 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
264 __stringify(counter##_pri##n) }
266 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \
267 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \
268 __stringify(counter##_pri##n) }
270 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \
271 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \
272 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \
273 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \
274 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \
275 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \
276 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \
277 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \
278 BNXT_RX_STATS_PRI_ENTRY(counter, 7)
280 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \
281 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \
282 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \
283 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \
284 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \
285 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \
286 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \
287 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
288 BNXT_TX_STATS_PRI_ENTRY(counter, 7)
290 #define BNXT_PCIE_STATS_ENTRY(counter) \
291 { BNXT_PCIE_STATS_OFFSET(counter), __stringify(counter) }
300 char string
[ETH_GSTRING_LEN
];
301 } bnxt_sw_func_stats
[] = {
302 {0, "rx_total_discard_pkts"},
303 {0, "tx_total_discard_pkts"},
306 static const struct {
308 char string
[ETH_GSTRING_LEN
];
309 } bnxt_port_stats_arr
[] = {
310 BNXT_RX_STATS_ENTRY(rx_64b_frames
),
311 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames
),
312 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames
),
313 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames
),
314 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames
),
315 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames
),
316 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames
),
317 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames
),
318 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames
),
319 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames
),
320 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames
),
321 BNXT_RX_STATS_ENTRY(rx_total_frames
),
322 BNXT_RX_STATS_ENTRY(rx_ucast_frames
),
323 BNXT_RX_STATS_ENTRY(rx_mcast_frames
),
324 BNXT_RX_STATS_ENTRY(rx_bcast_frames
),
325 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames
),
326 BNXT_RX_STATS_ENTRY(rx_ctrl_frames
),
327 BNXT_RX_STATS_ENTRY(rx_pause_frames
),
328 BNXT_RX_STATS_ENTRY(rx_pfc_frames
),
329 BNXT_RX_STATS_ENTRY(rx_align_err_frames
),
330 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames
),
331 BNXT_RX_STATS_ENTRY(rx_jbr_frames
),
332 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames
),
333 BNXT_RX_STATS_ENTRY(rx_tagged_frames
),
334 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames
),
335 BNXT_RX_STATS_ENTRY(rx_good_frames
),
336 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0
),
337 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1
),
338 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2
),
339 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3
),
340 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4
),
341 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5
),
342 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6
),
343 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7
),
344 BNXT_RX_STATS_ENTRY(rx_undrsz_frames
),
345 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events
),
346 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration
),
347 BNXT_RX_STATS_ENTRY(rx_bytes
),
348 BNXT_RX_STATS_ENTRY(rx_runt_bytes
),
349 BNXT_RX_STATS_ENTRY(rx_runt_frames
),
350 BNXT_RX_STATS_ENTRY(rx_stat_discard
),
351 BNXT_RX_STATS_ENTRY(rx_stat_err
),
353 BNXT_TX_STATS_ENTRY(tx_64b_frames
),
354 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames
),
355 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames
),
356 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames
),
357 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames
),
358 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames
),
359 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames
),
360 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames
),
361 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames
),
362 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames
),
363 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames
),
364 BNXT_TX_STATS_ENTRY(tx_good_frames
),
365 BNXT_TX_STATS_ENTRY(tx_total_frames
),
366 BNXT_TX_STATS_ENTRY(tx_ucast_frames
),
367 BNXT_TX_STATS_ENTRY(tx_mcast_frames
),
368 BNXT_TX_STATS_ENTRY(tx_bcast_frames
),
369 BNXT_TX_STATS_ENTRY(tx_pause_frames
),
370 BNXT_TX_STATS_ENTRY(tx_pfc_frames
),
371 BNXT_TX_STATS_ENTRY(tx_jabber_frames
),
372 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames
),
373 BNXT_TX_STATS_ENTRY(tx_err
),
374 BNXT_TX_STATS_ENTRY(tx_fifo_underruns
),
375 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0
),
376 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1
),
377 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2
),
378 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3
),
379 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4
),
380 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5
),
381 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6
),
382 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7
),
383 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events
),
384 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration
),
385 BNXT_TX_STATS_ENTRY(tx_total_collisions
),
386 BNXT_TX_STATS_ENTRY(tx_bytes
),
387 BNXT_TX_STATS_ENTRY(tx_xthol_frames
),
388 BNXT_TX_STATS_ENTRY(tx_stat_discard
),
389 BNXT_TX_STATS_ENTRY(tx_stat_error
),
392 static const struct {
394 char string
[ETH_GSTRING_LEN
];
395 } bnxt_port_stats_ext_arr
[] = {
396 BNXT_RX_STATS_EXT_ENTRY(link_down_events
),
397 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events
),
398 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events
),
399 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events
),
400 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events
),
401 BNXT_RX_STATS_EXT_COS_ENTRIES
,
402 BNXT_RX_STATS_EXT_PFC_ENTRIES
,
403 BNXT_RX_STATS_EXT_ENTRY(rx_bits
),
404 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold
),
405 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err
),
406 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits
),
407 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES
,
410 static const struct {
412 char string
[ETH_GSTRING_LEN
];
413 } bnxt_tx_port_stats_ext_arr
[] = {
414 BNXT_TX_STATS_EXT_COS_ENTRIES
,
415 BNXT_TX_STATS_EXT_PFC_ENTRIES
,
418 static const struct {
420 char string
[ETH_GSTRING_LEN
];
421 } bnxt_rx_bytes_pri_arr
[] = {
422 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes
),
425 static const struct {
427 char string
[ETH_GSTRING_LEN
];
428 } bnxt_rx_pkts_pri_arr
[] = {
429 BNXT_RX_STATS_PRI_ENTRIES(rx_packets
),
432 static const struct {
434 char string
[ETH_GSTRING_LEN
];
435 } bnxt_tx_bytes_pri_arr
[] = {
436 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes
),
439 static const struct {
441 char string
[ETH_GSTRING_LEN
];
442 } bnxt_tx_pkts_pri_arr
[] = {
443 BNXT_TX_STATS_PRI_ENTRIES(tx_packets
),
446 static const struct {
448 char string
[ETH_GSTRING_LEN
];
449 } bnxt_pcie_stats_arr
[] = {
450 BNXT_PCIE_STATS_ENTRY(pcie_pl_signal_integrity
),
451 BNXT_PCIE_STATS_ENTRY(pcie_dl_signal_integrity
),
452 BNXT_PCIE_STATS_ENTRY(pcie_tl_signal_integrity
),
453 BNXT_PCIE_STATS_ENTRY(pcie_link_integrity
),
454 BNXT_PCIE_STATS_ENTRY(pcie_tx_traffic_rate
),
455 BNXT_PCIE_STATS_ENTRY(pcie_rx_traffic_rate
),
456 BNXT_PCIE_STATS_ENTRY(pcie_tx_dllp_statistics
),
457 BNXT_PCIE_STATS_ENTRY(pcie_rx_dllp_statistics
),
458 BNXT_PCIE_STATS_ENTRY(pcie_equalization_time
),
459 BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram
[0]),
460 BNXT_PCIE_STATS_ENTRY(pcie_ltssm_histogram
[2]),
461 BNXT_PCIE_STATS_ENTRY(pcie_recovery_histogram
),
464 #define BNXT_NUM_SW_FUNC_STATS ARRAY_SIZE(bnxt_sw_func_stats)
465 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
466 #define BNXT_NUM_STATS_PRI \
467 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \
468 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
469 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
470 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
471 #define BNXT_NUM_PCIE_STATS ARRAY_SIZE(bnxt_pcie_stats_arr)
473 static int bnxt_get_num_tpa_ring_stats(struct bnxt
*bp
)
475 if (BNXT_SUPPORTS_TPA(bp
)) {
477 return ARRAY_SIZE(bnxt_ring_tpa2_stats_str
);
478 return ARRAY_SIZE(bnxt_ring_tpa_stats_str
);
483 static int bnxt_get_num_ring_stats(struct bnxt
*bp
)
487 num_stats
= ARRAY_SIZE(bnxt_ring_stats_str
) +
488 ARRAY_SIZE(bnxt_ring_sw_stats_str
) +
489 bnxt_get_num_tpa_ring_stats(bp
);
490 return num_stats
* bp
->cp_nr_rings
;
493 static int bnxt_get_num_stats(struct bnxt
*bp
)
495 int num_stats
= bnxt_get_num_ring_stats(bp
);
497 num_stats
+= BNXT_NUM_SW_FUNC_STATS
;
499 if (bp
->flags
& BNXT_FLAG_PORT_STATS
)
500 num_stats
+= BNXT_NUM_PORT_STATS
;
502 if (bp
->flags
& BNXT_FLAG_PORT_STATS_EXT
) {
503 num_stats
+= bp
->fw_rx_stats_ext_size
+
504 bp
->fw_tx_stats_ext_size
;
505 if (bp
->pri2cos_valid
)
506 num_stats
+= BNXT_NUM_STATS_PRI
;
509 if (bp
->flags
& BNXT_FLAG_PCIE_STATS
)
510 num_stats
+= BNXT_NUM_PCIE_STATS
;
515 static int bnxt_get_sset_count(struct net_device
*dev
, int sset
)
517 struct bnxt
*bp
= netdev_priv(dev
);
521 return bnxt_get_num_stats(bp
);
525 return bp
->num_tests
;
531 static void bnxt_get_ethtool_stats(struct net_device
*dev
,
532 struct ethtool_stats
*stats
, u64
*buf
)
535 struct bnxt
*bp
= netdev_priv(dev
);
536 u32 stat_fields
= ARRAY_SIZE(bnxt_ring_stats_str
) +
537 bnxt_get_num_tpa_ring_stats(bp
);
540 j
+= bnxt_get_num_ring_stats(bp
) + BNXT_NUM_SW_FUNC_STATS
;
541 goto skip_ring_stats
;
544 for (i
= 0; i
< BNXT_NUM_SW_FUNC_STATS
; i
++)
545 bnxt_sw_func_stats
[i
].counter
= 0;
547 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
548 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
549 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
550 __le64
*hw_stats
= (__le64
*)cpr
->hw_stats
;
553 for (k
= 0; k
< stat_fields
; j
++, k
++)
554 buf
[j
] = le64_to_cpu(hw_stats
[k
]);
555 buf
[j
++] = cpr
->rx_l4_csum_errors
;
556 buf
[j
++] = cpr
->rx_buf_errors
;
557 buf
[j
++] = cpr
->missed_irqs
;
559 bnxt_sw_func_stats
[RX_TOTAL_DISCARDS
].counter
+=
560 le64_to_cpu(cpr
->hw_stats
->rx_discard_pkts
);
561 bnxt_sw_func_stats
[TX_TOTAL_DISCARDS
].counter
+=
562 le64_to_cpu(cpr
->hw_stats
->tx_discard_pkts
);
565 for (i
= 0; i
< BNXT_NUM_SW_FUNC_STATS
; i
++, j
++)
566 buf
[j
] = bnxt_sw_func_stats
[i
].counter
;
569 if (bp
->flags
& BNXT_FLAG_PORT_STATS
) {
570 __le64
*port_stats
= (__le64
*)bp
->hw_rx_port_stats
;
572 for (i
= 0; i
< BNXT_NUM_PORT_STATS
; i
++, j
++) {
573 buf
[j
] = le64_to_cpu(*(port_stats
+
574 bnxt_port_stats_arr
[i
].offset
));
577 if (bp
->flags
& BNXT_FLAG_PORT_STATS_EXT
) {
578 __le64
*rx_port_stats_ext
= (__le64
*)bp
->hw_rx_port_stats_ext
;
579 __le64
*tx_port_stats_ext
= (__le64
*)bp
->hw_tx_port_stats_ext
;
581 for (i
= 0; i
< bp
->fw_rx_stats_ext_size
; i
++, j
++) {
582 buf
[j
] = le64_to_cpu(*(rx_port_stats_ext
+
583 bnxt_port_stats_ext_arr
[i
].offset
));
585 for (i
= 0; i
< bp
->fw_tx_stats_ext_size
; i
++, j
++) {
586 buf
[j
] = le64_to_cpu(*(tx_port_stats_ext
+
587 bnxt_tx_port_stats_ext_arr
[i
].offset
));
589 if (bp
->pri2cos_valid
) {
590 for (i
= 0; i
< 8; i
++, j
++) {
591 long n
= bnxt_rx_bytes_pri_arr
[i
].base_off
+
594 buf
[j
] = le64_to_cpu(*(rx_port_stats_ext
+ n
));
596 for (i
= 0; i
< 8; i
++, j
++) {
597 long n
= bnxt_rx_pkts_pri_arr
[i
].base_off
+
600 buf
[j
] = le64_to_cpu(*(rx_port_stats_ext
+ n
));
602 for (i
= 0; i
< 8; i
++, j
++) {
603 long n
= bnxt_tx_bytes_pri_arr
[i
].base_off
+
606 buf
[j
] = le64_to_cpu(*(tx_port_stats_ext
+ n
));
608 for (i
= 0; i
< 8; i
++, j
++) {
609 long n
= bnxt_tx_pkts_pri_arr
[i
].base_off
+
612 buf
[j
] = le64_to_cpu(*(tx_port_stats_ext
+ n
));
616 if (bp
->flags
& BNXT_FLAG_PCIE_STATS
) {
617 __le64
*pcie_stats
= (__le64
*)bp
->hw_pcie_stats
;
619 for (i
= 0; i
< BNXT_NUM_PCIE_STATS
; i
++, j
++) {
620 buf
[j
] = le64_to_cpu(*(pcie_stats
+
621 bnxt_pcie_stats_arr
[i
].offset
));
626 static void bnxt_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
628 struct bnxt
*bp
= netdev_priv(dev
);
629 static const char * const *str
;
634 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
635 num_str
= ARRAY_SIZE(bnxt_ring_stats_str
);
636 for (j
= 0; j
< num_str
; j
++) {
637 sprintf(buf
, "[%d]: %s", i
,
638 bnxt_ring_stats_str
[j
]);
639 buf
+= ETH_GSTRING_LEN
;
641 if (!BNXT_SUPPORTS_TPA(bp
))
644 if (bp
->max_tpa_v2
) {
645 num_str
= ARRAY_SIZE(bnxt_ring_tpa2_stats_str
);
646 str
= bnxt_ring_tpa2_stats_str
;
648 num_str
= ARRAY_SIZE(bnxt_ring_tpa_stats_str
);
649 str
= bnxt_ring_tpa_stats_str
;
651 for (j
= 0; j
< num_str
; j
++) {
652 sprintf(buf
, "[%d]: %s", i
, str
[j
]);
653 buf
+= ETH_GSTRING_LEN
;
656 num_str
= ARRAY_SIZE(bnxt_ring_sw_stats_str
);
657 for (j
= 0; j
< num_str
; j
++) {
658 sprintf(buf
, "[%d]: %s", i
,
659 bnxt_ring_sw_stats_str
[j
]);
660 buf
+= ETH_GSTRING_LEN
;
663 for (i
= 0; i
< BNXT_NUM_SW_FUNC_STATS
; i
++) {
664 strcpy(buf
, bnxt_sw_func_stats
[i
].string
);
665 buf
+= ETH_GSTRING_LEN
;
668 if (bp
->flags
& BNXT_FLAG_PORT_STATS
) {
669 for (i
= 0; i
< BNXT_NUM_PORT_STATS
; i
++) {
670 strcpy(buf
, bnxt_port_stats_arr
[i
].string
);
671 buf
+= ETH_GSTRING_LEN
;
674 if (bp
->flags
& BNXT_FLAG_PORT_STATS_EXT
) {
675 for (i
= 0; i
< bp
->fw_rx_stats_ext_size
; i
++) {
676 strcpy(buf
, bnxt_port_stats_ext_arr
[i
].string
);
677 buf
+= ETH_GSTRING_LEN
;
679 for (i
= 0; i
< bp
->fw_tx_stats_ext_size
; i
++) {
681 bnxt_tx_port_stats_ext_arr
[i
].string
);
682 buf
+= ETH_GSTRING_LEN
;
684 if (bp
->pri2cos_valid
) {
685 for (i
= 0; i
< 8; i
++) {
687 bnxt_rx_bytes_pri_arr
[i
].string
);
688 buf
+= ETH_GSTRING_LEN
;
690 for (i
= 0; i
< 8; i
++) {
692 bnxt_rx_pkts_pri_arr
[i
].string
);
693 buf
+= ETH_GSTRING_LEN
;
695 for (i
= 0; i
< 8; i
++) {
697 bnxt_tx_bytes_pri_arr
[i
].string
);
698 buf
+= ETH_GSTRING_LEN
;
700 for (i
= 0; i
< 8; i
++) {
702 bnxt_tx_pkts_pri_arr
[i
].string
);
703 buf
+= ETH_GSTRING_LEN
;
707 if (bp
->flags
& BNXT_FLAG_PCIE_STATS
) {
708 for (i
= 0; i
< BNXT_NUM_PCIE_STATS
; i
++) {
709 strcpy(buf
, bnxt_pcie_stats_arr
[i
].string
);
710 buf
+= ETH_GSTRING_LEN
;
716 memcpy(buf
, bp
->test_info
->string
,
717 bp
->num_tests
* ETH_GSTRING_LEN
);
720 netdev_err(bp
->dev
, "bnxt_get_strings invalid request %x\n",
726 static void bnxt_get_ringparam(struct net_device
*dev
,
727 struct ethtool_ringparam
*ering
)
729 struct bnxt
*bp
= netdev_priv(dev
);
731 ering
->rx_max_pending
= BNXT_MAX_RX_DESC_CNT
;
732 ering
->rx_jumbo_max_pending
= BNXT_MAX_RX_JUM_DESC_CNT
;
733 ering
->tx_max_pending
= BNXT_MAX_TX_DESC_CNT
;
735 ering
->rx_pending
= bp
->rx_ring_size
;
736 ering
->rx_jumbo_pending
= bp
->rx_agg_ring_size
;
737 ering
->tx_pending
= bp
->tx_ring_size
;
740 static int bnxt_set_ringparam(struct net_device
*dev
,
741 struct ethtool_ringparam
*ering
)
743 struct bnxt
*bp
= netdev_priv(dev
);
745 if ((ering
->rx_pending
> BNXT_MAX_RX_DESC_CNT
) ||
746 (ering
->tx_pending
> BNXT_MAX_TX_DESC_CNT
) ||
747 (ering
->tx_pending
<= MAX_SKB_FRAGS
))
750 if (netif_running(dev
))
751 bnxt_close_nic(bp
, false, false);
753 bp
->rx_ring_size
= ering
->rx_pending
;
754 bp
->tx_ring_size
= ering
->tx_pending
;
755 bnxt_set_ring_params(bp
);
757 if (netif_running(dev
))
758 return bnxt_open_nic(bp
, false, false);
763 static void bnxt_get_channels(struct net_device
*dev
,
764 struct ethtool_channels
*channel
)
766 struct bnxt
*bp
= netdev_priv(dev
);
767 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
768 int max_rx_rings
, max_tx_rings
, tcs
;
769 int max_tx_sch_inputs
;
771 /* Get the most up-to-date max_tx_sch_inputs. */
773 bnxt_hwrm_func_resc_qcaps(bp
, false);
774 max_tx_sch_inputs
= hw_resc
->max_tx_sch_inputs
;
776 bnxt_get_max_rings(bp
, &max_rx_rings
, &max_tx_rings
, true);
777 if (max_tx_sch_inputs
)
778 max_tx_rings
= min_t(int, max_tx_rings
, max_tx_sch_inputs
);
779 channel
->max_combined
= min_t(int, max_rx_rings
, max_tx_rings
);
781 if (bnxt_get_max_rings(bp
, &max_rx_rings
, &max_tx_rings
, false)) {
785 if (max_tx_sch_inputs
)
786 max_tx_rings
= min_t(int, max_tx_rings
, max_tx_sch_inputs
);
788 tcs
= netdev_get_num_tc(dev
);
792 channel
->max_rx
= max_rx_rings
;
793 channel
->max_tx
= max_tx_rings
;
794 channel
->max_other
= 0;
795 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
) {
796 channel
->combined_count
= bp
->rx_nr_rings
;
797 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
798 channel
->combined_count
--;
800 if (!BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
801 channel
->rx_count
= bp
->rx_nr_rings
;
802 channel
->tx_count
= bp
->tx_nr_rings_per_tc
;
807 static int bnxt_set_channels(struct net_device
*dev
,
808 struct ethtool_channels
*channel
)
810 struct bnxt
*bp
= netdev_priv(dev
);
811 int req_tx_rings
, req_rx_rings
, tcs
;
816 if (channel
->other_count
)
819 if (!channel
->combined_count
&&
820 (!channel
->rx_count
|| !channel
->tx_count
))
823 if (channel
->combined_count
&&
824 (channel
->rx_count
|| channel
->tx_count
))
827 if (BNXT_CHIP_TYPE_NITRO_A0(bp
) && (channel
->rx_count
||
831 if (channel
->combined_count
)
834 tcs
= netdev_get_num_tc(dev
);
836 req_tx_rings
= sh
? channel
->combined_count
: channel
->tx_count
;
837 req_rx_rings
= sh
? channel
->combined_count
: channel
->rx_count
;
838 if (bp
->tx_nr_rings_xdp
) {
840 netdev_err(dev
, "Only combined mode supported when XDP is enabled.\n");
843 tx_xdp
= req_rx_rings
;
845 rc
= bnxt_check_rings(bp
, req_tx_rings
, req_rx_rings
, sh
, tcs
, tx_xdp
);
847 netdev_warn(dev
, "Unable to allocate the requested rings\n");
851 if (netif_running(dev
)) {
853 /* TODO CHIMP_FW: Send message to all VF's
857 rc
= bnxt_close_nic(bp
, true, false);
859 netdev_err(bp
->dev
, "Set channel failure rc :%x\n",
866 bp
->flags
|= BNXT_FLAG_SHARED_RINGS
;
867 bp
->rx_nr_rings
= channel
->combined_count
;
868 bp
->tx_nr_rings_per_tc
= channel
->combined_count
;
870 bp
->flags
&= ~BNXT_FLAG_SHARED_RINGS
;
871 bp
->rx_nr_rings
= channel
->rx_count
;
872 bp
->tx_nr_rings_per_tc
= channel
->tx_count
;
874 bp
->tx_nr_rings_xdp
= tx_xdp
;
875 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
+ tx_xdp
;
877 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
* tcs
+ tx_xdp
;
879 bp
->cp_nr_rings
= sh
? max_t(int, bp
->tx_nr_rings
, bp
->rx_nr_rings
) :
880 bp
->tx_nr_rings
+ bp
->rx_nr_rings
;
882 /* After changing number of rx channels, update NTUPLE feature. */
883 netdev_update_features(dev
);
884 if (netif_running(dev
)) {
885 rc
= bnxt_open_nic(bp
, true, false);
886 if ((!rc
) && BNXT_PF(bp
)) {
887 /* TODO CHIMP_FW: Send message to all VF's
892 rc
= bnxt_reserve_rings(bp
, true);
898 #ifdef CONFIG_RFS_ACCEL
899 static int bnxt_grxclsrlall(struct bnxt
*bp
, struct ethtool_rxnfc
*cmd
,
904 cmd
->data
= bp
->ntp_fltr_count
;
905 for (i
= 0; i
< BNXT_NTP_FLTR_HASH_SIZE
; i
++) {
906 struct hlist_head
*head
;
907 struct bnxt_ntuple_filter
*fltr
;
909 head
= &bp
->ntp_fltr_hash_tbl
[i
];
911 hlist_for_each_entry_rcu(fltr
, head
, hash
) {
912 if (j
== cmd
->rule_cnt
)
914 rule_locs
[j
++] = fltr
->sw_id
;
917 if (j
== cmd
->rule_cnt
)
924 static int bnxt_grxclsrule(struct bnxt
*bp
, struct ethtool_rxnfc
*cmd
)
926 struct ethtool_rx_flow_spec
*fs
=
927 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
928 struct bnxt_ntuple_filter
*fltr
;
929 struct flow_keys
*fkeys
;
932 if (fs
->location
>= BNXT_NTP_FLTR_MAX_FLTR
)
935 for (i
= 0; i
< BNXT_NTP_FLTR_HASH_SIZE
; i
++) {
936 struct hlist_head
*head
;
938 head
= &bp
->ntp_fltr_hash_tbl
[i
];
940 hlist_for_each_entry_rcu(fltr
, head
, hash
) {
941 if (fltr
->sw_id
== fs
->location
)
949 fkeys
= &fltr
->fkeys
;
950 if (fkeys
->basic
.n_proto
== htons(ETH_P_IP
)) {
951 if (fkeys
->basic
.ip_proto
== IPPROTO_TCP
)
952 fs
->flow_type
= TCP_V4_FLOW
;
953 else if (fkeys
->basic
.ip_proto
== IPPROTO_UDP
)
954 fs
->flow_type
= UDP_V4_FLOW
;
958 fs
->h_u
.tcp_ip4_spec
.ip4src
= fkeys
->addrs
.v4addrs
.src
;
959 fs
->m_u
.tcp_ip4_spec
.ip4src
= cpu_to_be32(~0);
961 fs
->h_u
.tcp_ip4_spec
.ip4dst
= fkeys
->addrs
.v4addrs
.dst
;
962 fs
->m_u
.tcp_ip4_spec
.ip4dst
= cpu_to_be32(~0);
964 fs
->h_u
.tcp_ip4_spec
.psrc
= fkeys
->ports
.src
;
965 fs
->m_u
.tcp_ip4_spec
.psrc
= cpu_to_be16(~0);
967 fs
->h_u
.tcp_ip4_spec
.pdst
= fkeys
->ports
.dst
;
968 fs
->m_u
.tcp_ip4_spec
.pdst
= cpu_to_be16(~0);
972 if (fkeys
->basic
.ip_proto
== IPPROTO_TCP
)
973 fs
->flow_type
= TCP_V6_FLOW
;
974 else if (fkeys
->basic
.ip_proto
== IPPROTO_UDP
)
975 fs
->flow_type
= UDP_V6_FLOW
;
979 *(struct in6_addr
*)&fs
->h_u
.tcp_ip6_spec
.ip6src
[0] =
980 fkeys
->addrs
.v6addrs
.src
;
981 *(struct in6_addr
*)&fs
->h_u
.tcp_ip6_spec
.ip6dst
[0] =
982 fkeys
->addrs
.v6addrs
.dst
;
983 for (i
= 0; i
< 4; i
++) {
984 fs
->m_u
.tcp_ip6_spec
.ip6src
[i
] = cpu_to_be32(~0);
985 fs
->m_u
.tcp_ip6_spec
.ip6dst
[i
] = cpu_to_be32(~0);
987 fs
->h_u
.tcp_ip6_spec
.psrc
= fkeys
->ports
.src
;
988 fs
->m_u
.tcp_ip6_spec
.psrc
= cpu_to_be16(~0);
990 fs
->h_u
.tcp_ip6_spec
.pdst
= fkeys
->ports
.dst
;
991 fs
->m_u
.tcp_ip6_spec
.pdst
= cpu_to_be16(~0);
994 fs
->ring_cookie
= fltr
->rxq
;
1004 static u64
get_ethtool_ipv4_rss(struct bnxt
*bp
)
1006 if (bp
->rss_hash_cfg
& VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4
)
1007 return RXH_IP_SRC
| RXH_IP_DST
;
1011 static u64
get_ethtool_ipv6_rss(struct bnxt
*bp
)
1013 if (bp
->rss_hash_cfg
& VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6
)
1014 return RXH_IP_SRC
| RXH_IP_DST
;
1018 static int bnxt_grxfh(struct bnxt
*bp
, struct ethtool_rxnfc
*cmd
)
1021 switch (cmd
->flow_type
) {
1023 if (bp
->rss_hash_cfg
& VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4
)
1024 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
|
1025 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1026 cmd
->data
|= get_ethtool_ipv4_rss(bp
);
1029 if (bp
->rss_hash_cfg
& VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4
)
1030 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
|
1031 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1034 case AH_ESP_V4_FLOW
:
1038 cmd
->data
|= get_ethtool_ipv4_rss(bp
);
1042 if (bp
->rss_hash_cfg
& VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6
)
1043 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
|
1044 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1045 cmd
->data
|= get_ethtool_ipv6_rss(bp
);
1048 if (bp
->rss_hash_cfg
& VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6
)
1049 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
|
1050 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1053 case AH_ESP_V6_FLOW
:
1057 cmd
->data
|= get_ethtool_ipv6_rss(bp
);
1063 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1064 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1066 static int bnxt_srxfh(struct bnxt
*bp
, struct ethtool_rxnfc
*cmd
)
1068 u32 rss_hash_cfg
= bp
->rss_hash_cfg
;
1071 if (cmd
->data
== RXH_4TUPLE
)
1073 else if (cmd
->data
== RXH_2TUPLE
)
1075 else if (!cmd
->data
)
1080 if (cmd
->flow_type
== TCP_V4_FLOW
) {
1081 rss_hash_cfg
&= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4
;
1083 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4
;
1084 } else if (cmd
->flow_type
== UDP_V4_FLOW
) {
1085 if (tuple
== 4 && !(bp
->flags
& BNXT_FLAG_UDP_RSS_CAP
))
1087 rss_hash_cfg
&= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4
;
1089 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4
;
1090 } else if (cmd
->flow_type
== TCP_V6_FLOW
) {
1091 rss_hash_cfg
&= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6
;
1093 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6
;
1094 } else if (cmd
->flow_type
== UDP_V6_FLOW
) {
1095 if (tuple
== 4 && !(bp
->flags
& BNXT_FLAG_UDP_RSS_CAP
))
1097 rss_hash_cfg
&= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6
;
1099 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6
;
1100 } else if (tuple
== 4) {
1104 switch (cmd
->flow_type
) {
1108 case AH_ESP_V4_FLOW
:
1113 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4
;
1115 rss_hash_cfg
&= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4
;
1121 case AH_ESP_V6_FLOW
:
1126 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6
;
1128 rss_hash_cfg
&= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6
;
1132 if (bp
->rss_hash_cfg
== rss_hash_cfg
)
1135 bp
->rss_hash_cfg
= rss_hash_cfg
;
1136 if (netif_running(bp
->dev
)) {
1137 bnxt_close_nic(bp
, false, false);
1138 rc
= bnxt_open_nic(bp
, false, false);
1143 static int bnxt_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
1146 struct bnxt
*bp
= netdev_priv(dev
);
1150 #ifdef CONFIG_RFS_ACCEL
1151 case ETHTOOL_GRXRINGS
:
1152 cmd
->data
= bp
->rx_nr_rings
;
1155 case ETHTOOL_GRXCLSRLCNT
:
1156 cmd
->rule_cnt
= bp
->ntp_fltr_count
;
1157 cmd
->data
= BNXT_NTP_FLTR_MAX_FLTR
;
1160 case ETHTOOL_GRXCLSRLALL
:
1161 rc
= bnxt_grxclsrlall(bp
, cmd
, (u32
*)rule_locs
);
1164 case ETHTOOL_GRXCLSRULE
:
1165 rc
= bnxt_grxclsrule(bp
, cmd
);
1170 rc
= bnxt_grxfh(bp
, cmd
);
1181 static int bnxt_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
1183 struct bnxt
*bp
= netdev_priv(dev
);
1188 rc
= bnxt_srxfh(bp
, cmd
);
1198 static u32
bnxt_get_rxfh_indir_size(struct net_device
*dev
)
1200 return HW_HASH_INDEX_SIZE
;
1203 static u32
bnxt_get_rxfh_key_size(struct net_device
*dev
)
1205 return HW_HASH_KEY_SIZE
;
1208 static int bnxt_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*key
,
1211 struct bnxt
*bp
= netdev_priv(dev
);
1212 struct bnxt_vnic_info
*vnic
;
1216 *hfunc
= ETH_RSS_HASH_TOP
;
1221 vnic
= &bp
->vnic_info
[0];
1222 if (indir
&& vnic
->rss_table
) {
1223 for (i
= 0; i
< HW_HASH_INDEX_SIZE
; i
++)
1224 indir
[i
] = le16_to_cpu(vnic
->rss_table
[i
]);
1227 if (key
&& vnic
->rss_hash_key
)
1228 memcpy(key
, vnic
->rss_hash_key
, HW_HASH_KEY_SIZE
);
1233 static void bnxt_get_drvinfo(struct net_device
*dev
,
1234 struct ethtool_drvinfo
*info
)
1236 struct bnxt
*bp
= netdev_priv(dev
);
1238 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
1239 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
1240 strlcpy(info
->fw_version
, bp
->fw_ver_str
, sizeof(info
->fw_version
));
1241 strlcpy(info
->bus_info
, pci_name(bp
->pdev
), sizeof(info
->bus_info
));
1242 info
->n_stats
= bnxt_get_num_stats(bp
);
1243 info
->testinfo_len
= bp
->num_tests
;
1244 /* TODO CHIMP_FW: eeprom dump details */
1245 info
->eedump_len
= 0;
1246 /* TODO CHIMP FW: reg dump details */
1247 info
->regdump_len
= 0;
1250 static void bnxt_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1252 struct bnxt
*bp
= netdev_priv(dev
);
1256 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
1257 if (bp
->flags
& BNXT_FLAG_WOL_CAP
) {
1258 wol
->supported
= WAKE_MAGIC
;
1260 wol
->wolopts
= WAKE_MAGIC
;
1264 static int bnxt_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
1266 struct bnxt
*bp
= netdev_priv(dev
);
1268 if (wol
->wolopts
& ~WAKE_MAGIC
)
1271 if (wol
->wolopts
& WAKE_MAGIC
) {
1272 if (!(bp
->flags
& BNXT_FLAG_WOL_CAP
))
1275 if (bnxt_hwrm_alloc_wol_fltr(bp
))
1281 if (bnxt_hwrm_free_wol_fltr(bp
))
1289 u32
_bnxt_fw_to_ethtool_adv_spds(u16 fw_speeds
, u8 fw_pause
)
1293 /* TODO: support 25GB, 40GB, 50GB with different cable type */
1294 /* set the advertised speeds */
1295 if (fw_speeds
& BNXT_LINK_SPEED_MSK_100MB
)
1296 speed_mask
|= ADVERTISED_100baseT_Full
;
1297 if (fw_speeds
& BNXT_LINK_SPEED_MSK_1GB
)
1298 speed_mask
|= ADVERTISED_1000baseT_Full
;
1299 if (fw_speeds
& BNXT_LINK_SPEED_MSK_2_5GB
)
1300 speed_mask
|= ADVERTISED_2500baseX_Full
;
1301 if (fw_speeds
& BNXT_LINK_SPEED_MSK_10GB
)
1302 speed_mask
|= ADVERTISED_10000baseT_Full
;
1303 if (fw_speeds
& BNXT_LINK_SPEED_MSK_40GB
)
1304 speed_mask
|= ADVERTISED_40000baseCR4_Full
;
1306 if ((fw_pause
& BNXT_LINK_PAUSE_BOTH
) == BNXT_LINK_PAUSE_BOTH
)
1307 speed_mask
|= ADVERTISED_Pause
;
1308 else if (fw_pause
& BNXT_LINK_PAUSE_TX
)
1309 speed_mask
|= ADVERTISED_Asym_Pause
;
1310 else if (fw_pause
& BNXT_LINK_PAUSE_RX
)
1311 speed_mask
|= ADVERTISED_Pause
| ADVERTISED_Asym_Pause
;
1316 #define BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds, fw_pause, lk_ksettings, name)\
1318 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100MB) \
1319 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1321 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_1GB) \
1322 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1324 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_10GB) \
1325 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1327 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_25GB) \
1328 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1329 25000baseCR_Full); \
1330 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_40GB) \
1331 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1332 40000baseCR4_Full);\
1333 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_50GB) \
1334 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1335 50000baseCR2_Full);\
1336 if ((fw_speeds) & BNXT_LINK_SPEED_MSK_100GB) \
1337 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1338 100000baseCR4_Full);\
1339 if ((fw_pause) & BNXT_LINK_PAUSE_RX) { \
1340 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1342 if (!((fw_pause) & BNXT_LINK_PAUSE_TX)) \
1343 ethtool_link_ksettings_add_link_mode( \
1344 lk_ksettings, name, Asym_Pause);\
1345 } else if ((fw_pause) & BNXT_LINK_PAUSE_TX) { \
1346 ethtool_link_ksettings_add_link_mode(lk_ksettings, name,\
1351 #define BNXT_ETHTOOL_TO_FW_SPDS(fw_speeds, lk_ksettings, name) \
1353 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1355 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1357 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100MB; \
1358 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1359 1000baseT_Full) || \
1360 ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1362 (fw_speeds) |= BNXT_LINK_SPEED_MSK_1GB; \
1363 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1365 (fw_speeds) |= BNXT_LINK_SPEED_MSK_10GB; \
1366 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1367 25000baseCR_Full)) \
1368 (fw_speeds) |= BNXT_LINK_SPEED_MSK_25GB; \
1369 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1370 40000baseCR4_Full)) \
1371 (fw_speeds) |= BNXT_LINK_SPEED_MSK_40GB; \
1372 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1373 50000baseCR2_Full)) \
1374 (fw_speeds) |= BNXT_LINK_SPEED_MSK_50GB; \
1375 if (ethtool_link_ksettings_test_link_mode(lk_ksettings, name, \
1376 100000baseCR4_Full)) \
1377 (fw_speeds) |= BNXT_LINK_SPEED_MSK_100GB; \
1380 static void bnxt_fw_to_ethtool_advertised_spds(struct bnxt_link_info
*link_info
,
1381 struct ethtool_link_ksettings
*lk_ksettings
)
1383 u16 fw_speeds
= link_info
->advertising
;
1386 if (link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
)
1387 fw_pause
= link_info
->auto_pause_setting
;
1389 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds
, fw_pause
, lk_ksettings
, advertising
);
1392 static void bnxt_fw_to_ethtool_lp_adv(struct bnxt_link_info
*link_info
,
1393 struct ethtool_link_ksettings
*lk_ksettings
)
1395 u16 fw_speeds
= link_info
->lp_auto_link_speeds
;
1398 if (link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
)
1399 fw_pause
= link_info
->lp_pause
;
1401 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds
, fw_pause
, lk_ksettings
,
1405 static void bnxt_fw_to_ethtool_support_spds(struct bnxt_link_info
*link_info
,
1406 struct ethtool_link_ksettings
*lk_ksettings
)
1408 u16 fw_speeds
= link_info
->support_speeds
;
1410 BNXT_FW_TO_ETHTOOL_SPDS(fw_speeds
, 0, lk_ksettings
, supported
);
1412 ethtool_link_ksettings_add_link_mode(lk_ksettings
, supported
, Pause
);
1413 ethtool_link_ksettings_add_link_mode(lk_ksettings
, supported
,
1416 if (link_info
->support_auto_speeds
)
1417 ethtool_link_ksettings_add_link_mode(lk_ksettings
, supported
,
1421 u32
bnxt_fw_to_ethtool_speed(u16 fw_link_speed
)
1423 switch (fw_link_speed
) {
1424 case BNXT_LINK_SPEED_100MB
:
1426 case BNXT_LINK_SPEED_1GB
:
1428 case BNXT_LINK_SPEED_2_5GB
:
1430 case BNXT_LINK_SPEED_10GB
:
1432 case BNXT_LINK_SPEED_20GB
:
1434 case BNXT_LINK_SPEED_25GB
:
1436 case BNXT_LINK_SPEED_40GB
:
1438 case BNXT_LINK_SPEED_50GB
:
1440 case BNXT_LINK_SPEED_100GB
:
1441 return SPEED_100000
;
1443 return SPEED_UNKNOWN
;
1447 static int bnxt_get_link_ksettings(struct net_device
*dev
,
1448 struct ethtool_link_ksettings
*lk_ksettings
)
1450 struct bnxt
*bp
= netdev_priv(dev
);
1451 struct bnxt_link_info
*link_info
= &bp
->link_info
;
1452 struct ethtool_link_settings
*base
= &lk_ksettings
->base
;
1455 ethtool_link_ksettings_zero_link_mode(lk_ksettings
, supported
);
1456 mutex_lock(&bp
->link_lock
);
1457 bnxt_fw_to_ethtool_support_spds(link_info
, lk_ksettings
);
1459 ethtool_link_ksettings_zero_link_mode(lk_ksettings
, advertising
);
1460 if (link_info
->autoneg
) {
1461 bnxt_fw_to_ethtool_advertised_spds(link_info
, lk_ksettings
);
1462 ethtool_link_ksettings_add_link_mode(lk_ksettings
,
1463 advertising
, Autoneg
);
1464 base
->autoneg
= AUTONEG_ENABLE
;
1465 base
->duplex
= DUPLEX_UNKNOWN
;
1466 if (link_info
->phy_link_status
== BNXT_LINK_LINK
) {
1467 bnxt_fw_to_ethtool_lp_adv(link_info
, lk_ksettings
);
1468 if (link_info
->duplex
& BNXT_LINK_DUPLEX_FULL
)
1469 base
->duplex
= DUPLEX_FULL
;
1471 base
->duplex
= DUPLEX_HALF
;
1473 ethtool_speed
= bnxt_fw_to_ethtool_speed(link_info
->link_speed
);
1475 base
->autoneg
= AUTONEG_DISABLE
;
1477 bnxt_fw_to_ethtool_speed(link_info
->req_link_speed
);
1478 base
->duplex
= DUPLEX_HALF
;
1479 if (link_info
->req_duplex
== BNXT_LINK_DUPLEX_FULL
)
1480 base
->duplex
= DUPLEX_FULL
;
1482 base
->speed
= ethtool_speed
;
1484 base
->port
= PORT_NONE
;
1485 if (link_info
->media_type
== PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP
) {
1486 base
->port
= PORT_TP
;
1487 ethtool_link_ksettings_add_link_mode(lk_ksettings
, supported
,
1489 ethtool_link_ksettings_add_link_mode(lk_ksettings
, advertising
,
1492 ethtool_link_ksettings_add_link_mode(lk_ksettings
, supported
,
1494 ethtool_link_ksettings_add_link_mode(lk_ksettings
, advertising
,
1497 if (link_info
->media_type
== PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC
)
1498 base
->port
= PORT_DA
;
1499 else if (link_info
->media_type
==
1500 PORT_PHY_QCFG_RESP_MEDIA_TYPE_FIBRE
)
1501 base
->port
= PORT_FIBRE
;
1503 base
->phy_address
= link_info
->phy_addr
;
1504 mutex_unlock(&bp
->link_lock
);
1509 static u32
bnxt_get_fw_speed(struct net_device
*dev
, u32 ethtool_speed
)
1511 struct bnxt
*bp
= netdev_priv(dev
);
1512 struct bnxt_link_info
*link_info
= &bp
->link_info
;
1513 u16 support_spds
= link_info
->support_speeds
;
1516 switch (ethtool_speed
) {
1518 if (support_spds
& BNXT_LINK_SPEED_MSK_100MB
)
1519 fw_speed
= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100MB
;
1522 if (support_spds
& BNXT_LINK_SPEED_MSK_1GB
)
1523 fw_speed
= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_1GB
;
1526 if (support_spds
& BNXT_LINK_SPEED_MSK_2_5GB
)
1527 fw_speed
= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_2_5GB
;
1530 if (support_spds
& BNXT_LINK_SPEED_MSK_10GB
)
1531 fw_speed
= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_10GB
;
1534 if (support_spds
& BNXT_LINK_SPEED_MSK_20GB
)
1535 fw_speed
= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_20GB
;
1538 if (support_spds
& BNXT_LINK_SPEED_MSK_25GB
)
1539 fw_speed
= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_25GB
;
1542 if (support_spds
& BNXT_LINK_SPEED_MSK_40GB
)
1543 fw_speed
= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_40GB
;
1546 if (support_spds
& BNXT_LINK_SPEED_MSK_50GB
)
1547 fw_speed
= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_50GB
;
1550 if (support_spds
& BNXT_LINK_SPEED_MSK_100GB
)
1551 fw_speed
= PORT_PHY_CFG_REQ_AUTO_LINK_SPEED_100GB
;
1554 netdev_err(dev
, "unsupported speed!\n");
1560 u16
bnxt_get_fw_auto_link_speeds(u32 advertising
)
1562 u16 fw_speed_mask
= 0;
1564 /* only support autoneg at speed 100, 1000, and 10000 */
1565 if (advertising
& (ADVERTISED_100baseT_Full
|
1566 ADVERTISED_100baseT_Half
)) {
1567 fw_speed_mask
|= BNXT_LINK_SPEED_MSK_100MB
;
1569 if (advertising
& (ADVERTISED_1000baseT_Full
|
1570 ADVERTISED_1000baseT_Half
)) {
1571 fw_speed_mask
|= BNXT_LINK_SPEED_MSK_1GB
;
1573 if (advertising
& ADVERTISED_10000baseT_Full
)
1574 fw_speed_mask
|= BNXT_LINK_SPEED_MSK_10GB
;
1576 if (advertising
& ADVERTISED_40000baseCR4_Full
)
1577 fw_speed_mask
|= BNXT_LINK_SPEED_MSK_40GB
;
1579 return fw_speed_mask
;
1582 static int bnxt_set_link_ksettings(struct net_device
*dev
,
1583 const struct ethtool_link_ksettings
*lk_ksettings
)
1585 struct bnxt
*bp
= netdev_priv(dev
);
1586 struct bnxt_link_info
*link_info
= &bp
->link_info
;
1587 const struct ethtool_link_settings
*base
= &lk_ksettings
->base
;
1588 bool set_pause
= false;
1589 u16 fw_advertising
= 0;
1593 if (!BNXT_PHY_CFG_ABLE(bp
))
1596 mutex_lock(&bp
->link_lock
);
1597 if (base
->autoneg
== AUTONEG_ENABLE
) {
1598 BNXT_ETHTOOL_TO_FW_SPDS(fw_advertising
, lk_ksettings
,
1600 link_info
->autoneg
|= BNXT_AUTONEG_SPEED
;
1601 if (!fw_advertising
)
1602 link_info
->advertising
= link_info
->support_auto_speeds
;
1604 link_info
->advertising
= fw_advertising
;
1605 /* any change to autoneg will cause link change, therefore the
1606 * driver should put back the original pause setting in autoneg
1611 u8 phy_type
= link_info
->phy_type
;
1613 if (phy_type
== PORT_PHY_QCFG_RESP_PHY_TYPE_BASET
||
1614 phy_type
== PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE
||
1615 link_info
->media_type
== PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP
) {
1616 netdev_err(dev
, "10GBase-T devices must autoneg\n");
1618 goto set_setting_exit
;
1620 if (base
->duplex
== DUPLEX_HALF
) {
1621 netdev_err(dev
, "HALF DUPLEX is not supported!\n");
1623 goto set_setting_exit
;
1625 speed
= base
->speed
;
1626 fw_speed
= bnxt_get_fw_speed(dev
, speed
);
1629 goto set_setting_exit
;
1631 link_info
->req_link_speed
= fw_speed
;
1632 link_info
->req_duplex
= BNXT_LINK_DUPLEX_FULL
;
1633 link_info
->autoneg
= 0;
1634 link_info
->advertising
= 0;
1637 if (netif_running(dev
))
1638 rc
= bnxt_hwrm_set_link_setting(bp
, set_pause
, false);
1641 mutex_unlock(&bp
->link_lock
);
1645 static void bnxt_get_pauseparam(struct net_device
*dev
,
1646 struct ethtool_pauseparam
*epause
)
1648 struct bnxt
*bp
= netdev_priv(dev
);
1649 struct bnxt_link_info
*link_info
= &bp
->link_info
;
1653 epause
->autoneg
= !!(link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
);
1654 epause
->rx_pause
= !!(link_info
->req_flow_ctrl
& BNXT_LINK_PAUSE_RX
);
1655 epause
->tx_pause
= !!(link_info
->req_flow_ctrl
& BNXT_LINK_PAUSE_TX
);
1658 static int bnxt_set_pauseparam(struct net_device
*dev
,
1659 struct ethtool_pauseparam
*epause
)
1662 struct bnxt
*bp
= netdev_priv(dev
);
1663 struct bnxt_link_info
*link_info
= &bp
->link_info
;
1665 if (!BNXT_PHY_CFG_ABLE(bp
))
1668 if (epause
->autoneg
) {
1669 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
))
1672 link_info
->autoneg
|= BNXT_AUTONEG_FLOW_CTRL
;
1673 if (bp
->hwrm_spec_code
>= 0x10201)
1674 link_info
->req_flow_ctrl
=
1675 PORT_PHY_CFG_REQ_AUTO_PAUSE_AUTONEG_PAUSE
;
1677 /* when transition from auto pause to force pause,
1678 * force a link change
1680 if (link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
)
1681 link_info
->force_link_chng
= true;
1682 link_info
->autoneg
&= ~BNXT_AUTONEG_FLOW_CTRL
;
1683 link_info
->req_flow_ctrl
= 0;
1685 if (epause
->rx_pause
)
1686 link_info
->req_flow_ctrl
|= BNXT_LINK_PAUSE_RX
;
1688 if (epause
->tx_pause
)
1689 link_info
->req_flow_ctrl
|= BNXT_LINK_PAUSE_TX
;
1691 if (netif_running(dev
))
1692 rc
= bnxt_hwrm_set_pause(bp
);
1696 static u32
bnxt_get_link(struct net_device
*dev
)
1698 struct bnxt
*bp
= netdev_priv(dev
);
1700 /* TODO: handle MF, VF, driver close case */
1701 return bp
->link_info
.link_up
;
1704 static void bnxt_print_admin_err(struct bnxt
*bp
)
1706 netdev_info(bp
->dev
, "PF does not have admin privileges to flash or reset the device\n");
1709 static int bnxt_find_nvram_item(struct net_device
*dev
, u16 type
, u16 ordinal
,
1710 u16 ext
, u16
*index
, u32
*item_length
,
1713 static int bnxt_flash_nvram(struct net_device
*dev
,
1721 struct bnxt
*bp
= netdev_priv(dev
);
1723 struct hwrm_nvm_write_input req
= {0};
1724 dma_addr_t dma_handle
;
1727 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_NVM_WRITE
, -1, -1);
1729 req
.dir_type
= cpu_to_le16(dir_type
);
1730 req
.dir_ordinal
= cpu_to_le16(dir_ordinal
);
1731 req
.dir_ext
= cpu_to_le16(dir_ext
);
1732 req
.dir_attr
= cpu_to_le16(dir_attr
);
1733 req
.dir_data_length
= cpu_to_le32(data_len
);
1735 kmem
= dma_alloc_coherent(&bp
->pdev
->dev
, data_len
, &dma_handle
,
1738 netdev_err(dev
, "dma_alloc_coherent failure, length = %u\n",
1739 (unsigned)data_len
);
1742 memcpy(kmem
, data
, data_len
);
1743 req
.host_src_addr
= cpu_to_le64(dma_handle
);
1745 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), FLASH_NVRAM_TIMEOUT
);
1746 dma_free_coherent(&bp
->pdev
->dev
, data_len
, kmem
, dma_handle
);
1749 bnxt_print_admin_err(bp
);
1753 static int bnxt_firmware_reset(struct net_device
*dev
,
1756 struct hwrm_fw_reset_input req
= {0};
1757 struct bnxt
*bp
= netdev_priv(dev
);
1760 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_FW_RESET
, -1, -1);
1762 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
1763 /* (e.g. when firmware isn't already running) */
1765 case BNX_DIR_TYPE_CHIMP_PATCH
:
1766 case BNX_DIR_TYPE_BOOTCODE
:
1767 case BNX_DIR_TYPE_BOOTCODE_2
:
1768 req
.embedded_proc_type
= FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT
;
1769 /* Self-reset ChiMP upon next PCIe reset: */
1770 req
.selfrst_status
= FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
;
1772 case BNX_DIR_TYPE_APE_FW
:
1773 case BNX_DIR_TYPE_APE_PATCH
:
1774 req
.embedded_proc_type
= FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT
;
1775 /* Self-reset APE upon next PCIe reset: */
1776 req
.selfrst_status
= FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
;
1778 case BNX_DIR_TYPE_KONG_FW
:
1779 case BNX_DIR_TYPE_KONG_PATCH
:
1780 req
.embedded_proc_type
=
1781 FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL
;
1783 case BNX_DIR_TYPE_BONO_FW
:
1784 case BNX_DIR_TYPE_BONO_PATCH
:
1785 req
.embedded_proc_type
= FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE
;
1787 case BNXT_FW_RESET_CHIP
:
1788 req
.embedded_proc_type
= FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP
;
1789 req
.selfrst_status
= FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP
;
1790 if (bp
->fw_cap
& BNXT_FW_CAP_HOT_RESET
)
1791 req
.flags
= FW_RESET_REQ_FLAGS_RESET_GRACEFUL
;
1793 case BNXT_FW_RESET_AP
:
1794 req
.embedded_proc_type
= FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP
;
1800 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
1802 bnxt_print_admin_err(bp
);
1806 static int bnxt_flash_firmware(struct net_device
*dev
,
1815 struct bnxt_fw_header
*header
= (struct bnxt_fw_header
*)fw_data
;
1818 case BNX_DIR_TYPE_BOOTCODE
:
1819 case BNX_DIR_TYPE_BOOTCODE_2
:
1820 code_type
= CODE_BOOT
;
1822 case BNX_DIR_TYPE_CHIMP_PATCH
:
1823 code_type
= CODE_CHIMP_PATCH
;
1825 case BNX_DIR_TYPE_APE_FW
:
1826 code_type
= CODE_MCTP_PASSTHRU
;
1828 case BNX_DIR_TYPE_APE_PATCH
:
1829 code_type
= CODE_APE_PATCH
;
1831 case BNX_DIR_TYPE_KONG_FW
:
1832 code_type
= CODE_KONG_FW
;
1834 case BNX_DIR_TYPE_KONG_PATCH
:
1835 code_type
= CODE_KONG_PATCH
;
1837 case BNX_DIR_TYPE_BONO_FW
:
1838 code_type
= CODE_BONO_FW
;
1840 case BNX_DIR_TYPE_BONO_PATCH
:
1841 code_type
= CODE_BONO_PATCH
;
1844 netdev_err(dev
, "Unsupported directory entry type: %u\n",
1848 if (fw_size
< sizeof(struct bnxt_fw_header
)) {
1849 netdev_err(dev
, "Invalid firmware file size: %u\n",
1850 (unsigned int)fw_size
);
1853 if (header
->signature
!= cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE
)) {
1854 netdev_err(dev
, "Invalid firmware signature: %08X\n",
1855 le32_to_cpu(header
->signature
));
1858 if (header
->code_type
!= code_type
) {
1859 netdev_err(dev
, "Expected firmware type: %d, read: %d\n",
1860 code_type
, header
->code_type
);
1863 if (header
->device
!= DEVICE_CUMULUS_FAMILY
) {
1864 netdev_err(dev
, "Expected firmware device family %d, read: %d\n",
1865 DEVICE_CUMULUS_FAMILY
, header
->device
);
1868 /* Confirm the CRC32 checksum of the file: */
1869 stored_crc
= le32_to_cpu(*(__le32
*)(fw_data
+ fw_size
-
1870 sizeof(stored_crc
)));
1871 calculated_crc
= ~crc32(~0, fw_data
, fw_size
- sizeof(stored_crc
));
1872 if (calculated_crc
!= stored_crc
) {
1873 netdev_err(dev
, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
1874 (unsigned long)stored_crc
,
1875 (unsigned long)calculated_crc
);
1878 rc
= bnxt_flash_nvram(dev
, dir_type
, BNX_DIR_ORDINAL_FIRST
,
1879 0, 0, fw_data
, fw_size
);
1880 if (rc
== 0) /* Firmware update successful */
1881 rc
= bnxt_firmware_reset(dev
, dir_type
);
1886 static int bnxt_flash_microcode(struct net_device
*dev
,
1891 struct bnxt_ucode_trailer
*trailer
;
1896 if (fw_size
< sizeof(struct bnxt_ucode_trailer
)) {
1897 netdev_err(dev
, "Invalid microcode file size: %u\n",
1898 (unsigned int)fw_size
);
1901 trailer
= (struct bnxt_ucode_trailer
*)(fw_data
+ (fw_size
-
1903 if (trailer
->sig
!= cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE
)) {
1904 netdev_err(dev
, "Invalid microcode trailer signature: %08X\n",
1905 le32_to_cpu(trailer
->sig
));
1908 if (le16_to_cpu(trailer
->dir_type
) != dir_type
) {
1909 netdev_err(dev
, "Expected microcode type: %d, read: %d\n",
1910 dir_type
, le16_to_cpu(trailer
->dir_type
));
1913 if (le16_to_cpu(trailer
->trailer_length
) <
1914 sizeof(struct bnxt_ucode_trailer
)) {
1915 netdev_err(dev
, "Invalid microcode trailer length: %d\n",
1916 le16_to_cpu(trailer
->trailer_length
));
1920 /* Confirm the CRC32 checksum of the file: */
1921 stored_crc
= le32_to_cpu(*(__le32
*)(fw_data
+ fw_size
-
1922 sizeof(stored_crc
)));
1923 calculated_crc
= ~crc32(~0, fw_data
, fw_size
- sizeof(stored_crc
));
1924 if (calculated_crc
!= stored_crc
) {
1926 "CRC32 (%08lX) does not match calculated: %08lX\n",
1927 (unsigned long)stored_crc
,
1928 (unsigned long)calculated_crc
);
1931 rc
= bnxt_flash_nvram(dev
, dir_type
, BNX_DIR_ORDINAL_FIRST
,
1932 0, 0, fw_data
, fw_size
);
1937 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type
)
1940 case BNX_DIR_TYPE_CHIMP_PATCH
:
1941 case BNX_DIR_TYPE_BOOTCODE
:
1942 case BNX_DIR_TYPE_BOOTCODE_2
:
1943 case BNX_DIR_TYPE_APE_FW
:
1944 case BNX_DIR_TYPE_APE_PATCH
:
1945 case BNX_DIR_TYPE_KONG_FW
:
1946 case BNX_DIR_TYPE_KONG_PATCH
:
1947 case BNX_DIR_TYPE_BONO_FW
:
1948 case BNX_DIR_TYPE_BONO_PATCH
:
1955 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type
)
1958 case BNX_DIR_TYPE_AVS
:
1959 case BNX_DIR_TYPE_EXP_ROM_MBA
:
1960 case BNX_DIR_TYPE_PCIE
:
1961 case BNX_DIR_TYPE_TSCF_UCODE
:
1962 case BNX_DIR_TYPE_EXT_PHY
:
1963 case BNX_DIR_TYPE_CCM
:
1964 case BNX_DIR_TYPE_ISCSI_BOOT
:
1965 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6
:
1966 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6
:
1973 static bool bnxt_dir_type_is_executable(u16 dir_type
)
1975 return bnxt_dir_type_is_ape_bin_format(dir_type
) ||
1976 bnxt_dir_type_is_other_exec_format(dir_type
);
1979 static int bnxt_flash_firmware_from_file(struct net_device
*dev
,
1981 const char *filename
)
1983 const struct firmware
*fw
;
1986 rc
= request_firmware(&fw
, filename
, &dev
->dev
);
1988 netdev_err(dev
, "Error %d requesting firmware file: %s\n",
1992 if (bnxt_dir_type_is_ape_bin_format(dir_type
) == true)
1993 rc
= bnxt_flash_firmware(dev
, dir_type
, fw
->data
, fw
->size
);
1994 else if (bnxt_dir_type_is_other_exec_format(dir_type
) == true)
1995 rc
= bnxt_flash_microcode(dev
, dir_type
, fw
->data
, fw
->size
);
1997 rc
= bnxt_flash_nvram(dev
, dir_type
, BNX_DIR_ORDINAL_FIRST
,
1998 0, 0, fw
->data
, fw
->size
);
1999 release_firmware(fw
);
2003 int bnxt_flash_package_from_file(struct net_device
*dev
, const char *filename
,
2006 struct bnxt
*bp
= netdev_priv(dev
);
2007 struct hwrm_nvm_install_update_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2008 struct hwrm_nvm_install_update_input install
= {0};
2009 const struct firmware
*fw
;
2010 int rc
, hwrm_err
= 0;
2014 bnxt_hwrm_fw_set_time(bp
);
2016 if (bnxt_find_nvram_item(dev
, BNX_DIR_TYPE_UPDATE
,
2017 BNX_DIR_ORDINAL_FIRST
, BNX_DIR_EXT_NONE
,
2018 &index
, &item_len
, NULL
) != 0) {
2019 netdev_err(dev
, "PKG update area not created in nvram\n");
2023 rc
= request_firmware(&fw
, filename
, &dev
->dev
);
2025 netdev_err(dev
, "PKG error %d requesting file: %s\n",
2030 if (fw
->size
> item_len
) {
2031 netdev_err(dev
, "PKG insufficient update area in nvram: %lu",
2032 (unsigned long)fw
->size
);
2035 dma_addr_t dma_handle
;
2037 struct hwrm_nvm_modify_input modify
= {0};
2039 bnxt_hwrm_cmd_hdr_init(bp
, &modify
, HWRM_NVM_MODIFY
, -1, -1);
2041 modify
.dir_idx
= cpu_to_le16(index
);
2042 modify
.len
= cpu_to_le32(fw
->size
);
2044 kmem
= dma_alloc_coherent(&bp
->pdev
->dev
, fw
->size
,
2045 &dma_handle
, GFP_KERNEL
);
2048 "dma_alloc_coherent failure, length = %u\n",
2049 (unsigned int)fw
->size
);
2052 memcpy(kmem
, fw
->data
, fw
->size
);
2053 modify
.host_src_addr
= cpu_to_le64(dma_handle
);
2055 hwrm_err
= hwrm_send_message(bp
, &modify
,
2057 FLASH_PACKAGE_TIMEOUT
);
2058 dma_free_coherent(&bp
->pdev
->dev
, fw
->size
, kmem
,
2062 release_firmware(fw
);
2066 if ((install_type
& 0xffff) == 0)
2067 install_type
>>= 16;
2068 bnxt_hwrm_cmd_hdr_init(bp
, &install
, HWRM_NVM_INSTALL_UPDATE
, -1, -1);
2069 install
.install_type
= cpu_to_le32(install_type
);
2071 mutex_lock(&bp
->hwrm_cmd_lock
);
2072 hwrm_err
= _hwrm_send_message(bp
, &install
, sizeof(install
),
2073 INSTALL_PACKAGE_TIMEOUT
);
2075 u8 error_code
= ((struct hwrm_err_output
*)resp
)->cmd_err
;
2077 if (resp
->error_code
&& error_code
==
2078 NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR
) {
2079 install
.flags
|= cpu_to_le16(
2080 NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG
);
2081 hwrm_err
= _hwrm_send_message(bp
, &install
,
2083 INSTALL_PACKAGE_TIMEOUT
);
2086 goto flash_pkg_exit
;
2090 netdev_err(dev
, "PKG install error = %d, problem_item = %d\n",
2091 (s8
)resp
->result
, (int)resp
->problem_item
);
2095 mutex_unlock(&bp
->hwrm_cmd_lock
);
2097 if (hwrm_err
== -EACCES
)
2098 bnxt_print_admin_err(bp
);
2102 static int bnxt_flash_device(struct net_device
*dev
,
2103 struct ethtool_flash
*flash
)
2105 if (!BNXT_PF((struct bnxt
*)netdev_priv(dev
))) {
2106 netdev_err(dev
, "flashdev not supported from a virtual function\n");
2110 if (flash
->region
== ETHTOOL_FLASH_ALL_REGIONS
||
2111 flash
->region
> 0xffff)
2112 return bnxt_flash_package_from_file(dev
, flash
->data
,
2115 return bnxt_flash_firmware_from_file(dev
, flash
->region
, flash
->data
);
2118 static int nvm_get_dir_info(struct net_device
*dev
, u32
*entries
, u32
*length
)
2120 struct bnxt
*bp
= netdev_priv(dev
);
2122 struct hwrm_nvm_get_dir_info_input req
= {0};
2123 struct hwrm_nvm_get_dir_info_output
*output
= bp
->hwrm_cmd_resp_addr
;
2125 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_NVM_GET_DIR_INFO
, -1, -1);
2127 mutex_lock(&bp
->hwrm_cmd_lock
);
2128 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
2130 *entries
= le32_to_cpu(output
->entries
);
2131 *length
= le32_to_cpu(output
->entry_length
);
2133 mutex_unlock(&bp
->hwrm_cmd_lock
);
2137 static int bnxt_get_eeprom_len(struct net_device
*dev
)
2139 struct bnxt
*bp
= netdev_priv(dev
);
2144 /* The -1 return value allows the entire 32-bit range of offsets to be
2145 * passed via the ethtool command-line utility.
2150 static int bnxt_get_nvram_directory(struct net_device
*dev
, u32 len
, u8
*data
)
2152 struct bnxt
*bp
= netdev_priv(dev
);
2158 dma_addr_t dma_handle
;
2159 struct hwrm_nvm_get_dir_entries_input req
= {0};
2161 rc
= nvm_get_dir_info(dev
, &dir_entries
, &entry_length
);
2165 /* Insert 2 bytes of directory info (count and size of entries) */
2169 *data
++ = dir_entries
;
2170 *data
++ = entry_length
;
2172 memset(data
, 0xff, len
);
2174 buflen
= dir_entries
* entry_length
;
2175 buf
= dma_alloc_coherent(&bp
->pdev
->dev
, buflen
, &dma_handle
,
2178 netdev_err(dev
, "dma_alloc_coherent failure, length = %u\n",
2182 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_NVM_GET_DIR_ENTRIES
, -1, -1);
2183 req
.host_dest_addr
= cpu_to_le64(dma_handle
);
2184 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
2186 memcpy(data
, buf
, len
> buflen
? buflen
: len
);
2187 dma_free_coherent(&bp
->pdev
->dev
, buflen
, buf
, dma_handle
);
2191 static int bnxt_get_nvram_item(struct net_device
*dev
, u32 index
, u32 offset
,
2192 u32 length
, u8
*data
)
2194 struct bnxt
*bp
= netdev_priv(dev
);
2197 dma_addr_t dma_handle
;
2198 struct hwrm_nvm_read_input req
= {0};
2203 buf
= dma_alloc_coherent(&bp
->pdev
->dev
, length
, &dma_handle
,
2206 netdev_err(dev
, "dma_alloc_coherent failure, length = %u\n",
2210 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_NVM_READ
, -1, -1);
2211 req
.host_dest_addr
= cpu_to_le64(dma_handle
);
2212 req
.dir_idx
= cpu_to_le16(index
);
2213 req
.offset
= cpu_to_le32(offset
);
2214 req
.len
= cpu_to_le32(length
);
2216 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
2218 memcpy(data
, buf
, length
);
2219 dma_free_coherent(&bp
->pdev
->dev
, length
, buf
, dma_handle
);
2223 static int bnxt_find_nvram_item(struct net_device
*dev
, u16 type
, u16 ordinal
,
2224 u16 ext
, u16
*index
, u32
*item_length
,
2227 struct bnxt
*bp
= netdev_priv(dev
);
2229 struct hwrm_nvm_find_dir_entry_input req
= {0};
2230 struct hwrm_nvm_find_dir_entry_output
*output
= bp
->hwrm_cmd_resp_addr
;
2232 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_NVM_FIND_DIR_ENTRY
, -1, -1);
2235 req
.dir_type
= cpu_to_le16(type
);
2236 req
.dir_ordinal
= cpu_to_le16(ordinal
);
2237 req
.dir_ext
= cpu_to_le16(ext
);
2238 req
.opt_ordinal
= NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ
;
2239 mutex_lock(&bp
->hwrm_cmd_lock
);
2240 rc
= _hwrm_send_message_silent(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
2243 *index
= le16_to_cpu(output
->dir_idx
);
2245 *item_length
= le32_to_cpu(output
->dir_item_length
);
2247 *data_length
= le32_to_cpu(output
->dir_data_length
);
2249 mutex_unlock(&bp
->hwrm_cmd_lock
);
2253 static char *bnxt_parse_pkglog(int desired_field
, u8
*data
, size_t datalen
)
2255 char *retval
= NULL
;
2262 /* null-terminate the log data (removing last '\n'): */
2263 data
[datalen
- 1] = 0;
2264 for (p
= data
; *p
!= 0; p
++) {
2267 while (*p
!= 0 && *p
!= '\n') {
2269 while (*p
!= 0 && *p
!= '\t' && *p
!= '\n')
2271 if (field
== desired_field
)
2286 static void bnxt_get_pkgver(struct net_device
*dev
)
2288 struct bnxt
*bp
= netdev_priv(dev
);
2295 if (bnxt_find_nvram_item(dev
, BNX_DIR_TYPE_PKG_LOG
,
2296 BNX_DIR_ORDINAL_FIRST
, BNX_DIR_EXT_NONE
,
2297 &index
, NULL
, &pkglen
) != 0)
2300 pkgbuf
= kzalloc(pkglen
, GFP_KERNEL
);
2302 dev_err(&bp
->pdev
->dev
, "Unable to allocate memory for pkg version, length = %u\n",
2307 if (bnxt_get_nvram_item(dev
, index
, 0, pkglen
, pkgbuf
))
2310 pkgver
= bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION
, pkgbuf
,
2312 if (pkgver
&& *pkgver
!= 0 && isdigit(*pkgver
)) {
2313 len
= strlen(bp
->fw_ver_str
);
2314 snprintf(bp
->fw_ver_str
+ len
, FW_VER_STR_LEN
- len
- 1,
2321 static int bnxt_get_eeprom(struct net_device
*dev
,
2322 struct ethtool_eeprom
*eeprom
,
2328 if (eeprom
->offset
== 0) /* special offset value to get directory */
2329 return bnxt_get_nvram_directory(dev
, eeprom
->len
, data
);
2331 index
= eeprom
->offset
>> 24;
2332 offset
= eeprom
->offset
& 0xffffff;
2335 netdev_err(dev
, "unsupported index value: %d\n", index
);
2339 return bnxt_get_nvram_item(dev
, index
- 1, offset
, eeprom
->len
, data
);
2342 static int bnxt_erase_nvram_directory(struct net_device
*dev
, u8 index
)
2344 struct bnxt
*bp
= netdev_priv(dev
);
2345 struct hwrm_nvm_erase_dir_entry_input req
= {0};
2347 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_NVM_ERASE_DIR_ENTRY
, -1, -1);
2348 req
.dir_idx
= cpu_to_le16(index
);
2349 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
2352 static int bnxt_set_eeprom(struct net_device
*dev
,
2353 struct ethtool_eeprom
*eeprom
,
2356 struct bnxt
*bp
= netdev_priv(dev
);
2358 u16 type
, ext
, ordinal
, attr
;
2361 netdev_err(dev
, "NVM write not supported from a virtual function\n");
2365 type
= eeprom
->magic
>> 16;
2367 if (type
== 0xffff) { /* special value for directory operations */
2368 index
= eeprom
->magic
& 0xff;
2369 dir_op
= eeprom
->magic
>> 8;
2373 case 0x0e: /* erase */
2374 if (eeprom
->offset
!= ~eeprom
->magic
)
2376 return bnxt_erase_nvram_directory(dev
, index
- 1);
2382 /* Create or re-write an NVM item: */
2383 if (bnxt_dir_type_is_executable(type
) == true)
2385 ext
= eeprom
->magic
& 0xffff;
2386 ordinal
= eeprom
->offset
>> 16;
2387 attr
= eeprom
->offset
& 0xffff;
2389 return bnxt_flash_nvram(dev
, type
, ordinal
, ext
, attr
, data
,
2393 static int bnxt_set_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
2395 struct bnxt
*bp
= netdev_priv(dev
);
2396 struct ethtool_eee
*eee
= &bp
->eee
;
2397 struct bnxt_link_info
*link_info
= &bp
->link_info
;
2399 _bnxt_fw_to_ethtool_adv_spds(link_info
->advertising
, 0);
2402 if (!BNXT_PHY_CFG_ABLE(bp
))
2405 if (!(bp
->flags
& BNXT_FLAG_EEE_CAP
))
2408 if (!edata
->eee_enabled
)
2411 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
)) {
2412 netdev_warn(dev
, "EEE requires autoneg\n");
2415 if (edata
->tx_lpi_enabled
) {
2416 if (bp
->lpi_tmr_hi
&& (edata
->tx_lpi_timer
> bp
->lpi_tmr_hi
||
2417 edata
->tx_lpi_timer
< bp
->lpi_tmr_lo
)) {
2418 netdev_warn(dev
, "Valid LPI timer range is %d and %d microsecs\n",
2419 bp
->lpi_tmr_lo
, bp
->lpi_tmr_hi
);
2421 } else if (!bp
->lpi_tmr_hi
) {
2422 edata
->tx_lpi_timer
= eee
->tx_lpi_timer
;
2425 if (!edata
->advertised
) {
2426 edata
->advertised
= advertising
& eee
->supported
;
2427 } else if (edata
->advertised
& ~advertising
) {
2428 netdev_warn(dev
, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n",
2429 edata
->advertised
, advertising
);
2433 eee
->advertised
= edata
->advertised
;
2434 eee
->tx_lpi_enabled
= edata
->tx_lpi_enabled
;
2435 eee
->tx_lpi_timer
= edata
->tx_lpi_timer
;
2437 eee
->eee_enabled
= edata
->eee_enabled
;
2439 if (netif_running(dev
))
2440 rc
= bnxt_hwrm_set_link_setting(bp
, false, true);
2445 static int bnxt_get_eee(struct net_device
*dev
, struct ethtool_eee
*edata
)
2447 struct bnxt
*bp
= netdev_priv(dev
);
2449 if (!(bp
->flags
& BNXT_FLAG_EEE_CAP
))
2453 if (!bp
->eee
.eee_enabled
) {
2454 /* Preserve tx_lpi_timer so that the last value will be used
2455 * by default when it is re-enabled.
2457 edata
->advertised
= 0;
2458 edata
->tx_lpi_enabled
= 0;
2461 if (!bp
->eee
.eee_active
)
2462 edata
->lp_advertised
= 0;
2467 static int bnxt_read_sfp_module_eeprom_info(struct bnxt
*bp
, u16 i2c_addr
,
2468 u16 page_number
, u16 start_addr
,
2469 u16 data_length
, u8
*buf
)
2471 struct hwrm_port_phy_i2c_read_input req
= {0};
2472 struct hwrm_port_phy_i2c_read_output
*output
= bp
->hwrm_cmd_resp_addr
;
2473 int rc
, byte_offset
= 0;
2475 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_I2C_READ
, -1, -1);
2476 req
.i2c_slave_addr
= i2c_addr
;
2477 req
.page_number
= cpu_to_le16(page_number
);
2478 req
.port_id
= cpu_to_le16(bp
->pf
.port_id
);
2482 xfer_size
= min_t(u16
, data_length
, BNXT_MAX_PHY_I2C_RESP_SIZE
);
2483 data_length
-= xfer_size
;
2484 req
.page_offset
= cpu_to_le16(start_addr
+ byte_offset
);
2485 req
.data_length
= xfer_size
;
2486 req
.enables
= cpu_to_le32(start_addr
+ byte_offset
?
2487 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET
: 0);
2488 mutex_lock(&bp
->hwrm_cmd_lock
);
2489 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
),
2492 memcpy(buf
+ byte_offset
, output
->data
, xfer_size
);
2493 mutex_unlock(&bp
->hwrm_cmd_lock
);
2494 byte_offset
+= xfer_size
;
2495 } while (!rc
&& data_length
> 0);
2500 static int bnxt_get_module_info(struct net_device
*dev
,
2501 struct ethtool_modinfo
*modinfo
)
2503 u8 data
[SFF_DIAG_SUPPORT_OFFSET
+ 1];
2504 struct bnxt
*bp
= netdev_priv(dev
);
2507 /* No point in going further if phy status indicates
2508 * module is not inserted or if it is powered down or
2509 * if it is of type 10GBase-T
2511 if (bp
->link_info
.module_status
>
2512 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG
)
2515 /* This feature is not supported in older firmware versions */
2516 if (bp
->hwrm_spec_code
< 0x10202)
2519 rc
= bnxt_read_sfp_module_eeprom_info(bp
, I2C_DEV_ADDR_A0
, 0, 0,
2520 SFF_DIAG_SUPPORT_OFFSET
+ 1,
2523 u8 module_id
= data
[0];
2524 u8 diag_supported
= data
[SFF_DIAG_SUPPORT_OFFSET
];
2526 switch (module_id
) {
2527 case SFF_MODULE_ID_SFP
:
2528 modinfo
->type
= ETH_MODULE_SFF_8472
;
2529 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
2530 if (!diag_supported
)
2531 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
2533 case SFF_MODULE_ID_QSFP
:
2534 case SFF_MODULE_ID_QSFP_PLUS
:
2535 modinfo
->type
= ETH_MODULE_SFF_8436
;
2536 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
2538 case SFF_MODULE_ID_QSFP28
:
2539 modinfo
->type
= ETH_MODULE_SFF_8636
;
2540 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_LEN
;
2550 static int bnxt_get_module_eeprom(struct net_device
*dev
,
2551 struct ethtool_eeprom
*eeprom
,
2554 struct bnxt
*bp
= netdev_priv(dev
);
2555 u16 start
= eeprom
->offset
, length
= eeprom
->len
;
2558 memset(data
, 0, eeprom
->len
);
2560 /* Read A0 portion of the EEPROM */
2561 if (start
< ETH_MODULE_SFF_8436_LEN
) {
2562 if (start
+ eeprom
->len
> ETH_MODULE_SFF_8436_LEN
)
2563 length
= ETH_MODULE_SFF_8436_LEN
- start
;
2564 rc
= bnxt_read_sfp_module_eeprom_info(bp
, I2C_DEV_ADDR_A0
, 0,
2565 start
, length
, data
);
2570 length
= eeprom
->len
- length
;
2573 /* Read A2 portion of the EEPROM */
2575 start
-= ETH_MODULE_SFF_8436_LEN
;
2576 rc
= bnxt_read_sfp_module_eeprom_info(bp
, I2C_DEV_ADDR_A2
, 1,
2577 start
, length
, data
);
2582 static int bnxt_nway_reset(struct net_device
*dev
)
2586 struct bnxt
*bp
= netdev_priv(dev
);
2587 struct bnxt_link_info
*link_info
= &bp
->link_info
;
2589 if (!BNXT_PHY_CFG_ABLE(bp
))
2592 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
))
2595 if (netif_running(dev
))
2596 rc
= bnxt_hwrm_set_link_setting(bp
, true, false);
2601 static int bnxt_set_phys_id(struct net_device
*dev
,
2602 enum ethtool_phys_id_state state
)
2604 struct hwrm_port_led_cfg_input req
= {0};
2605 struct bnxt
*bp
= netdev_priv(dev
);
2606 struct bnxt_pf_info
*pf
= &bp
->pf
;
2607 struct bnxt_led_cfg
*led_cfg
;
2612 if (!bp
->num_leds
|| BNXT_VF(bp
))
2615 if (state
== ETHTOOL_ID_ACTIVE
) {
2616 led_state
= PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
;
2617 duration
= cpu_to_le16(500);
2618 } else if (state
== ETHTOOL_ID_INACTIVE
) {
2619 led_state
= PORT_LED_CFG_REQ_LED1_STATE_DEFAULT
;
2620 duration
= cpu_to_le16(0);
2624 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_LED_CFG
, -1, -1);
2625 req
.port_id
= cpu_to_le16(pf
->port_id
);
2626 req
.num_leds
= bp
->num_leds
;
2627 led_cfg
= (struct bnxt_led_cfg
*)&req
.led0_id
;
2628 for (i
= 0; i
< bp
->num_leds
; i
++, led_cfg
++) {
2629 req
.enables
|= BNXT_LED_DFLT_ENABLES(i
);
2630 led_cfg
->led_id
= bp
->leds
[i
].led_id
;
2631 led_cfg
->led_state
= led_state
;
2632 led_cfg
->led_blink_on
= duration
;
2633 led_cfg
->led_blink_off
= duration
;
2634 led_cfg
->led_group_id
= bp
->leds
[i
].led_group_id
;
2636 rc
= hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
2640 static int bnxt_hwrm_selftest_irq(struct bnxt
*bp
, u16 cmpl_ring
)
2642 struct hwrm_selftest_irq_input req
= {0};
2644 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_SELFTEST_IRQ
, cmpl_ring
, -1);
2645 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
2648 static int bnxt_test_irq(struct bnxt
*bp
)
2652 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
2653 u16 cmpl_ring
= bp
->grp_info
[i
].cp_fw_ring_id
;
2656 rc
= bnxt_hwrm_selftest_irq(bp
, cmpl_ring
);
2663 static int bnxt_hwrm_mac_loopback(struct bnxt
*bp
, bool enable
)
2665 struct hwrm_port_mac_cfg_input req
= {0};
2667 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_MAC_CFG
, -1, -1);
2669 req
.enables
= cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK
);
2671 req
.lpbk
= PORT_MAC_CFG_REQ_LPBK_LOCAL
;
2673 req
.lpbk
= PORT_MAC_CFG_REQ_LPBK_NONE
;
2674 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
2677 static int bnxt_query_force_speeds(struct bnxt
*bp
, u16
*force_speeds
)
2679 struct hwrm_port_phy_qcaps_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2680 struct hwrm_port_phy_qcaps_input req
= {0};
2683 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_QCAPS
, -1, -1);
2684 mutex_lock(&bp
->hwrm_cmd_lock
);
2685 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
2687 *force_speeds
= le16_to_cpu(resp
->supported_speeds_force_mode
);
2689 mutex_unlock(&bp
->hwrm_cmd_lock
);
2693 static int bnxt_disable_an_for_lpbk(struct bnxt
*bp
,
2694 struct hwrm_port_phy_cfg_input
*req
)
2696 struct bnxt_link_info
*link_info
= &bp
->link_info
;
2701 if (!link_info
->autoneg
||
2702 (bp
->test_info
->flags
& BNXT_TEST_FL_AN_PHY_LPBK
))
2705 rc
= bnxt_query_force_speeds(bp
, &fw_advertising
);
2709 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB
;
2710 if (bp
->link_info
.link_up
)
2711 fw_speed
= bp
->link_info
.link_speed
;
2712 else if (fw_advertising
& BNXT_LINK_SPEED_MSK_10GB
)
2713 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB
;
2714 else if (fw_advertising
& BNXT_LINK_SPEED_MSK_25GB
)
2715 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB
;
2716 else if (fw_advertising
& BNXT_LINK_SPEED_MSK_40GB
)
2717 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB
;
2718 else if (fw_advertising
& BNXT_LINK_SPEED_MSK_50GB
)
2719 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB
;
2721 req
->force_link_speed
= cpu_to_le16(fw_speed
);
2722 req
->flags
|= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE
|
2723 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY
);
2724 rc
= hwrm_send_message(bp
, req
, sizeof(*req
), HWRM_CMD_TIMEOUT
);
2726 req
->force_link_speed
= cpu_to_le16(0);
2730 static int bnxt_hwrm_phy_loopback(struct bnxt
*bp
, bool enable
, bool ext
)
2732 struct hwrm_port_phy_cfg_input req
= {0};
2734 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_PORT_PHY_CFG
, -1, -1);
2737 bnxt_disable_an_for_lpbk(bp
, &req
);
2739 req
.lpbk
= PORT_PHY_CFG_REQ_LPBK_EXTERNAL
;
2741 req
.lpbk
= PORT_PHY_CFG_REQ_LPBK_LOCAL
;
2743 req
.lpbk
= PORT_PHY_CFG_REQ_LPBK_NONE
;
2745 req
.enables
= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK
);
2746 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
2749 static int bnxt_rx_loopback(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
2750 u32 raw_cons
, int pkt_size
)
2752 struct bnxt_napi
*bnapi
= cpr
->bnapi
;
2753 struct bnxt_rx_ring_info
*rxr
;
2754 struct bnxt_sw_rx_bd
*rx_buf
;
2755 struct rx_cmp
*rxcmp
;
2761 rxr
= bnapi
->rx_ring
;
2762 cp_cons
= RING_CMP(raw_cons
);
2763 rxcmp
= (struct rx_cmp
*)
2764 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
2765 cons
= rxcmp
->rx_cmp_opaque
;
2766 rx_buf
= &rxr
->rx_buf_ring
[cons
];
2767 data
= rx_buf
->data_ptr
;
2768 len
= le32_to_cpu(rxcmp
->rx_cmp_len_flags_type
) >> RX_CMP_LEN_SHIFT
;
2769 if (len
!= pkt_size
)
2772 if (!ether_addr_equal(data
+ i
, bnapi
->bp
->dev
->dev_addr
))
2775 for ( ; i
< pkt_size
; i
++) {
2776 if (data
[i
] != (u8
)(i
& 0xff))
2782 static int bnxt_poll_loopback(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
2785 struct tx_cmp
*txcmp
;
2791 raw_cons
= cpr
->cp_raw_cons
;
2792 for (i
= 0; i
< 200; i
++) {
2793 cons
= RING_CMP(raw_cons
);
2794 txcmp
= &cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)];
2796 if (!TX_CMP_VALID(txcmp
, raw_cons
)) {
2801 /* The valid test of the entry must be done first before
2802 * reading any further.
2805 if (TX_CMP_TYPE(txcmp
) == CMP_TYPE_RX_L2_CMP
) {
2806 rc
= bnxt_rx_loopback(bp
, cpr
, raw_cons
, pkt_size
);
2807 raw_cons
= NEXT_RAW_CMP(raw_cons
);
2808 raw_cons
= NEXT_RAW_CMP(raw_cons
);
2811 raw_cons
= NEXT_RAW_CMP(raw_cons
);
2813 cpr
->cp_raw_cons
= raw_cons
;
2817 static int bnxt_run_loopback(struct bnxt
*bp
)
2819 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[0];
2820 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[0];
2821 struct bnxt_cp_ring_info
*cpr
;
2822 int pkt_size
, i
= 0;
2823 struct sk_buff
*skb
;
2828 cpr
= &rxr
->bnapi
->cp_ring
;
2829 if (bp
->flags
& BNXT_FLAG_CHIP_P5
)
2830 cpr
= cpr
->cp_ring_arr
[BNXT_RX_HDL
];
2831 pkt_size
= min(bp
->dev
->mtu
+ ETH_HLEN
, bp
->rx_copy_thresh
);
2832 skb
= netdev_alloc_skb(bp
->dev
, pkt_size
);
2835 data
= skb_put(skb
, pkt_size
);
2836 eth_broadcast_addr(data
);
2838 ether_addr_copy(&data
[i
], bp
->dev
->dev_addr
);
2840 for ( ; i
< pkt_size
; i
++)
2841 data
[i
] = (u8
)(i
& 0xff);
2843 map
= dma_map_single(&bp
->pdev
->dev
, skb
->data
, pkt_size
,
2845 if (dma_mapping_error(&bp
->pdev
->dev
, map
)) {
2849 bnxt_xmit_bd(bp
, txr
, map
, pkt_size
);
2851 /* Sync BD data before updating doorbell */
2854 bnxt_db_write(bp
, &txr
->tx_db
, txr
->tx_prod
);
2855 rc
= bnxt_poll_loopback(bp
, cpr
, pkt_size
);
2857 dma_unmap_single(&bp
->pdev
->dev
, map
, pkt_size
, PCI_DMA_TODEVICE
);
2862 static int bnxt_run_fw_tests(struct bnxt
*bp
, u8 test_mask
, u8
*test_results
)
2864 struct hwrm_selftest_exec_output
*resp
= bp
->hwrm_cmd_resp_addr
;
2865 struct hwrm_selftest_exec_input req
= {0};
2868 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_SELFTEST_EXEC
, -1, -1);
2869 mutex_lock(&bp
->hwrm_cmd_lock
);
2870 resp
->test_success
= 0;
2871 req
.flags
= test_mask
;
2872 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), bp
->test_info
->timeout
);
2873 *test_results
= resp
->test_success
;
2874 mutex_unlock(&bp
->hwrm_cmd_lock
);
2878 #define BNXT_DRV_TESTS 4
2879 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
2880 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
2881 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
2882 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3)
2884 static void bnxt_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
2887 struct bnxt
*bp
= netdev_priv(dev
);
2888 bool do_ext_lpbk
= false;
2889 bool offline
= false;
2890 u8 test_results
= 0;
2894 if (!bp
->num_tests
|| !BNXT_SINGLE_PF(bp
))
2896 memset(buf
, 0, sizeof(u64
) * bp
->num_tests
);
2897 if (!netif_running(dev
)) {
2898 etest
->flags
|= ETH_TEST_FL_FAILED
;
2902 if ((etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
) &&
2903 (bp
->test_info
->flags
& BNXT_TEST_FL_EXT_LPBK
))
2906 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
2907 if (bp
->pf
.active_vfs
) {
2908 etest
->flags
|= ETH_TEST_FL_FAILED
;
2909 netdev_warn(dev
, "Offline tests cannot be run with active VFs\n");
2915 for (i
= 0; i
< bp
->num_tests
- BNXT_DRV_TESTS
; i
++) {
2916 u8 bit_val
= 1 << i
;
2918 if (!(bp
->test_info
->offline_mask
& bit_val
))
2919 test_mask
|= bit_val
;
2921 test_mask
|= bit_val
;
2924 bnxt_run_fw_tests(bp
, test_mask
, &test_results
);
2926 rc
= bnxt_close_nic(bp
, false, false);
2929 bnxt_run_fw_tests(bp
, test_mask
, &test_results
);
2931 buf
[BNXT_MACLPBK_TEST_IDX
] = 1;
2932 bnxt_hwrm_mac_loopback(bp
, true);
2934 rc
= bnxt_half_open_nic(bp
);
2936 bnxt_hwrm_mac_loopback(bp
, false);
2937 etest
->flags
|= ETH_TEST_FL_FAILED
;
2940 if (bnxt_run_loopback(bp
))
2941 etest
->flags
|= ETH_TEST_FL_FAILED
;
2943 buf
[BNXT_MACLPBK_TEST_IDX
] = 0;
2945 bnxt_hwrm_mac_loopback(bp
, false);
2946 bnxt_hwrm_phy_loopback(bp
, true, false);
2948 if (bnxt_run_loopback(bp
)) {
2949 buf
[BNXT_PHYLPBK_TEST_IDX
] = 1;
2950 etest
->flags
|= ETH_TEST_FL_FAILED
;
2953 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
2954 bnxt_hwrm_phy_loopback(bp
, true, true);
2956 if (bnxt_run_loopback(bp
)) {
2957 buf
[BNXT_EXTLPBK_TEST_IDX
] = 1;
2958 etest
->flags
|= ETH_TEST_FL_FAILED
;
2961 bnxt_hwrm_phy_loopback(bp
, false, false);
2962 bnxt_half_close_nic(bp
);
2963 rc
= bnxt_open_nic(bp
, false, true);
2965 if (rc
|| bnxt_test_irq(bp
)) {
2966 buf
[BNXT_IRQ_TEST_IDX
] = 1;
2967 etest
->flags
|= ETH_TEST_FL_FAILED
;
2969 for (i
= 0; i
< bp
->num_tests
- BNXT_DRV_TESTS
; i
++) {
2970 u8 bit_val
= 1 << i
;
2972 if ((test_mask
& bit_val
) && !(test_results
& bit_val
)) {
2974 etest
->flags
|= ETH_TEST_FL_FAILED
;
2979 static int bnxt_reset(struct net_device
*dev
, u32
*flags
)
2981 struct bnxt
*bp
= netdev_priv(dev
);
2985 netdev_err(dev
, "Reset is not supported from a VF\n");
2989 if (pci_vfs_assigned(bp
->pdev
) &&
2990 !(bp
->fw_cap
& BNXT_FW_CAP_HOT_RESET
)) {
2992 "Reset not allowed when VFs are assigned to VMs\n");
2996 if (*flags
== ETH_RESET_ALL
) {
2997 /* This feature is not supported in older firmware versions */
2998 if (bp
->hwrm_spec_code
< 0x10803)
3001 rc
= bnxt_firmware_reset(dev
, BNXT_FW_RESET_CHIP
);
3003 netdev_info(dev
, "Reset request successful.\n");
3004 if (!(bp
->fw_cap
& BNXT_FW_CAP_HOT_RESET
))
3005 netdev_info(dev
, "Reload driver to complete reset\n");
3008 } else if (*flags
== ETH_RESET_AP
) {
3009 /* This feature is not supported in older firmware versions */
3010 if (bp
->hwrm_spec_code
< 0x10803)
3013 rc
= bnxt_firmware_reset(dev
, BNXT_FW_RESET_AP
);
3015 netdev_info(dev
, "Reset Application Processor request successful.\n");
3025 static int bnxt_hwrm_dbg_dma_data(struct bnxt
*bp
, void *msg
, int msg_len
,
3026 struct bnxt_hwrm_dbg_dma_info
*info
)
3028 struct hwrm_dbg_cmn_output
*cmn_resp
= bp
->hwrm_cmd_resp_addr
;
3029 struct hwrm_dbg_cmn_input
*cmn_req
= msg
;
3030 __le16
*seq_ptr
= msg
+ info
->seq_off
;
3031 u16 seq
= 0, len
, segs_off
;
3032 void *resp
= cmn_resp
;
3033 dma_addr_t dma_handle
;
3037 dma_buf
= dma_alloc_coherent(&bp
->pdev
->dev
, info
->dma_len
, &dma_handle
,
3042 segs_off
= offsetof(struct hwrm_dbg_coredump_list_output
,
3044 cmn_req
->host_dest_addr
= cpu_to_le64(dma_handle
);
3045 cmn_req
->host_buf_len
= cpu_to_le32(info
->dma_len
);
3046 mutex_lock(&bp
->hwrm_cmd_lock
);
3048 *seq_ptr
= cpu_to_le16(seq
);
3049 rc
= _hwrm_send_message(bp
, msg
, msg_len
,
3050 HWRM_COREDUMP_TIMEOUT
);
3054 len
= le16_to_cpu(*((__le16
*)(resp
+ info
->data_len_off
)));
3056 cmn_req
->req_type
== cpu_to_le16(HWRM_DBG_COREDUMP_LIST
)) {
3057 info
->segs
= le16_to_cpu(*((__le16
*)(resp
+
3064 info
->dest_buf_size
= info
->segs
*
3065 sizeof(struct coredump_segment_record
);
3066 info
->dest_buf
= kmalloc(info
->dest_buf_size
,
3068 if (!info
->dest_buf
) {
3074 if (info
->dest_buf
) {
3075 if ((info
->seg_start
+ off
+ len
) <=
3076 BNXT_COREDUMP_BUF_LEN(info
->buf_len
)) {
3077 memcpy(info
->dest_buf
+ off
, dma_buf
, len
);
3084 if (cmn_req
->req_type
==
3085 cpu_to_le16(HWRM_DBG_COREDUMP_RETRIEVE
))
3086 info
->dest_buf_size
+= len
;
3088 if (!(cmn_resp
->flags
& HWRM_DBG_CMN_FLAGS_MORE
))
3094 mutex_unlock(&bp
->hwrm_cmd_lock
);
3095 dma_free_coherent(&bp
->pdev
->dev
, info
->dma_len
, dma_buf
, dma_handle
);
3099 static int bnxt_hwrm_dbg_coredump_list(struct bnxt
*bp
,
3100 struct bnxt_coredump
*coredump
)
3102 struct hwrm_dbg_coredump_list_input req
= {0};
3103 struct bnxt_hwrm_dbg_dma_info info
= {NULL
};
3106 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_DBG_COREDUMP_LIST
, -1, -1);
3108 info
.dma_len
= COREDUMP_LIST_BUF_LEN
;
3109 info
.seq_off
= offsetof(struct hwrm_dbg_coredump_list_input
, seq_no
);
3110 info
.data_len_off
= offsetof(struct hwrm_dbg_coredump_list_output
,
3113 rc
= bnxt_hwrm_dbg_dma_data(bp
, &req
, sizeof(req
), &info
);
3115 coredump
->data
= info
.dest_buf
;
3116 coredump
->data_size
= info
.dest_buf_size
;
3117 coredump
->total_segs
= info
.segs
;
3122 static int bnxt_hwrm_dbg_coredump_initiate(struct bnxt
*bp
, u16 component_id
,
3125 struct hwrm_dbg_coredump_initiate_input req
= {0};
3127 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_DBG_COREDUMP_INITIATE
, -1, -1);
3128 req
.component_id
= cpu_to_le16(component_id
);
3129 req
.segment_id
= cpu_to_le16(segment_id
);
3131 return hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_COREDUMP_TIMEOUT
);
3134 static int bnxt_hwrm_dbg_coredump_retrieve(struct bnxt
*bp
, u16 component_id
,
3135 u16 segment_id
, u32
*seg_len
,
3136 void *buf
, u32 buf_len
, u32 offset
)
3138 struct hwrm_dbg_coredump_retrieve_input req
= {0};
3139 struct bnxt_hwrm_dbg_dma_info info
= {NULL
};
3142 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_DBG_COREDUMP_RETRIEVE
, -1, -1);
3143 req
.component_id
= cpu_to_le16(component_id
);
3144 req
.segment_id
= cpu_to_le16(segment_id
);
3146 info
.dma_len
= COREDUMP_RETRIEVE_BUF_LEN
;
3147 info
.seq_off
= offsetof(struct hwrm_dbg_coredump_retrieve_input
,
3149 info
.data_len_off
= offsetof(struct hwrm_dbg_coredump_retrieve_output
,
3152 info
.dest_buf
= buf
+ offset
;
3153 info
.buf_len
= buf_len
;
3154 info
.seg_start
= offset
;
3157 rc
= bnxt_hwrm_dbg_dma_data(bp
, &req
, sizeof(req
), &info
);
3159 *seg_len
= info
.dest_buf_size
;
3165 bnxt_fill_coredump_seg_hdr(struct bnxt
*bp
,
3166 struct bnxt_coredump_segment_hdr
*seg_hdr
,
3167 struct coredump_segment_record
*seg_rec
, u32 seg_len
,
3168 int status
, u32 duration
, u32 instance
)
3170 memset(seg_hdr
, 0, sizeof(*seg_hdr
));
3171 memcpy(seg_hdr
->signature
, "sEgM", 4);
3173 seg_hdr
->component_id
= (__force __le32
)seg_rec
->component_id
;
3174 seg_hdr
->segment_id
= (__force __le32
)seg_rec
->segment_id
;
3175 seg_hdr
->low_version
= seg_rec
->version_low
;
3176 seg_hdr
->high_version
= seg_rec
->version_hi
;
3178 /* For hwrm_ver_get response Component id = 2
3179 * and Segment id = 0
3181 seg_hdr
->component_id
= cpu_to_le32(2);
3182 seg_hdr
->segment_id
= 0;
3184 seg_hdr
->function_id
= cpu_to_le16(bp
->pdev
->devfn
);
3185 seg_hdr
->length
= cpu_to_le32(seg_len
);
3186 seg_hdr
->status
= cpu_to_le32(status
);
3187 seg_hdr
->duration
= cpu_to_le32(duration
);
3188 seg_hdr
->data_offset
= cpu_to_le32(sizeof(*seg_hdr
));
3189 seg_hdr
->instance
= cpu_to_le32(instance
);
3193 bnxt_fill_coredump_record(struct bnxt
*bp
, struct bnxt_coredump_record
*record
,
3194 time64_t start
, s16 start_utc
, u16 total_segs
,
3197 time64_t end
= ktime_get_real_seconds();
3198 u32 os_ver_major
= 0, os_ver_minor
= 0;
3201 time64_to_tm(start
, 0, &tm
);
3202 memset(record
, 0, sizeof(*record
));
3203 memcpy(record
->signature
, "cOrE", 4);
3205 record
->low_version
= 0;
3206 record
->high_version
= 1;
3207 record
->asic_state
= 0;
3208 strlcpy(record
->system_name
, utsname()->nodename
,
3209 sizeof(record
->system_name
));
3210 record
->year
= cpu_to_le16(tm
.tm_year
+ 1900);
3211 record
->month
= cpu_to_le16(tm
.tm_mon
+ 1);
3212 record
->day
= cpu_to_le16(tm
.tm_mday
);
3213 record
->hour
= cpu_to_le16(tm
.tm_hour
);
3214 record
->minute
= cpu_to_le16(tm
.tm_min
);
3215 record
->second
= cpu_to_le16(tm
.tm_sec
);
3216 record
->utc_bias
= cpu_to_le16(start_utc
);
3217 strcpy(record
->commandline
, "ethtool -w");
3218 record
->total_segments
= cpu_to_le32(total_segs
);
3220 sscanf(utsname()->release
, "%u.%u", &os_ver_major
, &os_ver_minor
);
3221 record
->os_ver_major
= cpu_to_le32(os_ver_major
);
3222 record
->os_ver_minor
= cpu_to_le32(os_ver_minor
);
3224 strlcpy(record
->os_name
, utsname()->sysname
, 32);
3225 time64_to_tm(end
, 0, &tm
);
3226 record
->end_year
= cpu_to_le16(tm
.tm_year
+ 1900);
3227 record
->end_month
= cpu_to_le16(tm
.tm_mon
+ 1);
3228 record
->end_day
= cpu_to_le16(tm
.tm_mday
);
3229 record
->end_hour
= cpu_to_le16(tm
.tm_hour
);
3230 record
->end_minute
= cpu_to_le16(tm
.tm_min
);
3231 record
->end_second
= cpu_to_le16(tm
.tm_sec
);
3232 record
->end_utc_bias
= cpu_to_le16(sys_tz
.tz_minuteswest
* 60);
3233 record
->asic_id1
= cpu_to_le32(bp
->chip_num
<< 16 |
3234 bp
->ver_resp
.chip_rev
<< 8 |
3235 bp
->ver_resp
.chip_metal
);
3236 record
->asic_id2
= 0;
3237 record
->coredump_status
= cpu_to_le32(status
);
3238 record
->ioctl_low_version
= 0;
3239 record
->ioctl_high_version
= 0;
3242 static int bnxt_get_coredump(struct bnxt
*bp
, void *buf
, u32
*dump_len
)
3244 u32 ver_get_resp_len
= sizeof(struct hwrm_ver_get_output
);
3245 u32 offset
= 0, seg_hdr_len
, seg_record_len
, buf_len
= 0;
3246 struct coredump_segment_record
*seg_record
= NULL
;
3247 struct bnxt_coredump_segment_hdr seg_hdr
;
3248 struct bnxt_coredump coredump
= {NULL
};
3249 time64_t start_time
;
3254 buf_len
= *dump_len
;
3256 start_time
= ktime_get_real_seconds();
3257 start_utc
= sys_tz
.tz_minuteswest
* 60;
3258 seg_hdr_len
= sizeof(seg_hdr
);
3260 /* First segment should be hwrm_ver_get response */
3261 *dump_len
= seg_hdr_len
+ ver_get_resp_len
;
3263 bnxt_fill_coredump_seg_hdr(bp
, &seg_hdr
, NULL
, ver_get_resp_len
,
3265 memcpy(buf
+ offset
, &seg_hdr
, seg_hdr_len
);
3266 offset
+= seg_hdr_len
;
3267 memcpy(buf
+ offset
, &bp
->ver_resp
, ver_get_resp_len
);
3268 offset
+= ver_get_resp_len
;
3271 rc
= bnxt_hwrm_dbg_coredump_list(bp
, &coredump
);
3273 netdev_err(bp
->dev
, "Failed to get coredump segment list\n");
3277 *dump_len
+= seg_hdr_len
* coredump
.total_segs
;
3279 seg_record
= (struct coredump_segment_record
*)coredump
.data
;
3280 seg_record_len
= sizeof(*seg_record
);
3282 for (i
= 0; i
< coredump
.total_segs
; i
++) {
3283 u16 comp_id
= le16_to_cpu(seg_record
->component_id
);
3284 u16 seg_id
= le16_to_cpu(seg_record
->segment_id
);
3285 u32 duration
= 0, seg_len
= 0;
3286 unsigned long start
, end
;
3288 if (buf
&& ((offset
+ seg_hdr_len
) >
3289 BNXT_COREDUMP_BUF_LEN(buf_len
))) {
3296 rc
= bnxt_hwrm_dbg_coredump_initiate(bp
, comp_id
, seg_id
);
3299 "Failed to initiate coredump for seg = %d\n",
3300 seg_record
->segment_id
);
3304 /* Write segment data into the buffer */
3305 rc
= bnxt_hwrm_dbg_coredump_retrieve(bp
, comp_id
, seg_id
,
3306 &seg_len
, buf
, buf_len
,
3307 offset
+ seg_hdr_len
);
3308 if (rc
&& rc
== -ENOBUFS
)
3312 "Failed to retrieve coredump for seg = %d\n",
3313 seg_record
->segment_id
);
3317 duration
= jiffies_to_msecs(end
- start
);
3318 bnxt_fill_coredump_seg_hdr(bp
, &seg_hdr
, seg_record
, seg_len
,
3322 /* Write segment header into the buffer */
3323 memcpy(buf
+ offset
, &seg_hdr
, seg_hdr_len
);
3324 offset
+= seg_hdr_len
+ seg_len
;
3327 *dump_len
+= seg_len
;
3329 (struct coredump_segment_record
*)((u8
*)seg_record
+
3335 bnxt_fill_coredump_record(bp
, buf
+ offset
, start_time
,
3336 start_utc
, coredump
.total_segs
+ 1,
3338 kfree(coredump
.data
);
3339 *dump_len
+= sizeof(struct bnxt_coredump_record
);
3341 netdev_err(bp
->dev
, "Firmware returned large coredump buffer");
3345 static int bnxt_set_dump(struct net_device
*dev
, struct ethtool_dump
*dump
)
3347 struct bnxt
*bp
= netdev_priv(dev
);
3349 if (dump
->flag
> BNXT_DUMP_CRASH
) {
3350 netdev_info(dev
, "Supports only Live(0) and Crash(1) dumps.\n");
3354 if (!IS_ENABLED(CONFIG_TEE_BNXT_FW
) && dump
->flag
== BNXT_DUMP_CRASH
) {
3355 netdev_info(dev
, "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
3359 bp
->dump_flag
= dump
->flag
;
3363 static int bnxt_get_dump_flag(struct net_device
*dev
, struct ethtool_dump
*dump
)
3365 struct bnxt
*bp
= netdev_priv(dev
);
3367 if (bp
->hwrm_spec_code
< 0x10801)
3370 dump
->version
= bp
->ver_resp
.hwrm_fw_maj_8b
<< 24 |
3371 bp
->ver_resp
.hwrm_fw_min_8b
<< 16 |
3372 bp
->ver_resp
.hwrm_fw_bld_8b
<< 8 |
3373 bp
->ver_resp
.hwrm_fw_rsvd_8b
;
3375 dump
->flag
= bp
->dump_flag
;
3376 if (bp
->dump_flag
== BNXT_DUMP_CRASH
)
3377 dump
->len
= BNXT_CRASH_DUMP_LEN
;
3379 bnxt_get_coredump(bp
, NULL
, &dump
->len
);
3383 static int bnxt_get_dump_data(struct net_device
*dev
, struct ethtool_dump
*dump
,
3386 struct bnxt
*bp
= netdev_priv(dev
);
3388 if (bp
->hwrm_spec_code
< 0x10801)
3391 memset(buf
, 0, dump
->len
);
3393 dump
->flag
= bp
->dump_flag
;
3394 if (dump
->flag
== BNXT_DUMP_CRASH
) {
3395 #ifdef CONFIG_TEE_BNXT_FW
3396 return tee_bnxt_copy_coredump(buf
, 0, dump
->len
);
3399 return bnxt_get_coredump(bp
, buf
, &dump
->len
);
3405 void bnxt_ethtool_init(struct bnxt
*bp
)
3407 struct hwrm_selftest_qlist_output
*resp
= bp
->hwrm_cmd_resp_addr
;
3408 struct hwrm_selftest_qlist_input req
= {0};
3409 struct bnxt_test_info
*test_info
;
3410 struct net_device
*dev
= bp
->dev
;
3413 if (!(bp
->fw_cap
& BNXT_FW_CAP_PKG_VER
))
3414 bnxt_get_pkgver(dev
);
3417 if (bp
->hwrm_spec_code
< 0x10704 || !BNXT_SINGLE_PF(bp
))
3420 bnxt_hwrm_cmd_hdr_init(bp
, &req
, HWRM_SELFTEST_QLIST
, -1, -1);
3421 mutex_lock(&bp
->hwrm_cmd_lock
);
3422 rc
= _hwrm_send_message(bp
, &req
, sizeof(req
), HWRM_CMD_TIMEOUT
);
3424 goto ethtool_init_exit
;
3426 test_info
= bp
->test_info
;
3428 test_info
= kzalloc(sizeof(*bp
->test_info
), GFP_KERNEL
);
3430 goto ethtool_init_exit
;
3432 bp
->test_info
= test_info
;
3433 bp
->num_tests
= resp
->num_tests
+ BNXT_DRV_TESTS
;
3434 if (bp
->num_tests
> BNXT_MAX_TEST
)
3435 bp
->num_tests
= BNXT_MAX_TEST
;
3437 test_info
->offline_mask
= resp
->offline_tests
;
3438 test_info
->timeout
= le16_to_cpu(resp
->test_timeout
);
3439 if (!test_info
->timeout
)
3440 test_info
->timeout
= HWRM_CMD_TIMEOUT
;
3441 for (i
= 0; i
< bp
->num_tests
; i
++) {
3442 char *str
= test_info
->string
[i
];
3443 char *fw_str
= resp
->test0_name
+ i
* 32;
3445 if (i
== BNXT_MACLPBK_TEST_IDX
) {
3446 strcpy(str
, "Mac loopback test (offline)");
3447 } else if (i
== BNXT_PHYLPBK_TEST_IDX
) {
3448 strcpy(str
, "Phy loopback test (offline)");
3449 } else if (i
== BNXT_EXTLPBK_TEST_IDX
) {
3450 strcpy(str
, "Ext loopback test (offline)");
3451 } else if (i
== BNXT_IRQ_TEST_IDX
) {
3452 strcpy(str
, "Interrupt_test (offline)");
3454 strlcpy(str
, fw_str
, ETH_GSTRING_LEN
);
3455 strncat(str
, " test", ETH_GSTRING_LEN
- strlen(str
));
3456 if (test_info
->offline_mask
& (1 << i
))
3457 strncat(str
, " (offline)",
3458 ETH_GSTRING_LEN
- strlen(str
));
3460 strncat(str
, " (online)",
3461 ETH_GSTRING_LEN
- strlen(str
));
3466 mutex_unlock(&bp
->hwrm_cmd_lock
);
3469 void bnxt_ethtool_free(struct bnxt
*bp
)
3471 kfree(bp
->test_info
);
3472 bp
->test_info
= NULL
;
3475 const struct ethtool_ops bnxt_ethtool_ops
= {
3476 .get_link_ksettings
= bnxt_get_link_ksettings
,
3477 .set_link_ksettings
= bnxt_set_link_ksettings
,
3478 .get_pauseparam
= bnxt_get_pauseparam
,
3479 .set_pauseparam
= bnxt_set_pauseparam
,
3480 .get_drvinfo
= bnxt_get_drvinfo
,
3481 .get_wol
= bnxt_get_wol
,
3482 .set_wol
= bnxt_set_wol
,
3483 .get_coalesce
= bnxt_get_coalesce
,
3484 .set_coalesce
= bnxt_set_coalesce
,
3485 .get_msglevel
= bnxt_get_msglevel
,
3486 .set_msglevel
= bnxt_set_msglevel
,
3487 .get_sset_count
= bnxt_get_sset_count
,
3488 .get_strings
= bnxt_get_strings
,
3489 .get_ethtool_stats
= bnxt_get_ethtool_stats
,
3490 .set_ringparam
= bnxt_set_ringparam
,
3491 .get_ringparam
= bnxt_get_ringparam
,
3492 .get_channels
= bnxt_get_channels
,
3493 .set_channels
= bnxt_set_channels
,
3494 .get_rxnfc
= bnxt_get_rxnfc
,
3495 .set_rxnfc
= bnxt_set_rxnfc
,
3496 .get_rxfh_indir_size
= bnxt_get_rxfh_indir_size
,
3497 .get_rxfh_key_size
= bnxt_get_rxfh_key_size
,
3498 .get_rxfh
= bnxt_get_rxfh
,
3499 .flash_device
= bnxt_flash_device
,
3500 .get_eeprom_len
= bnxt_get_eeprom_len
,
3501 .get_eeprom
= bnxt_get_eeprom
,
3502 .set_eeprom
= bnxt_set_eeprom
,
3503 .get_link
= bnxt_get_link
,
3504 .get_eee
= bnxt_get_eee
,
3505 .set_eee
= bnxt_set_eee
,
3506 .get_module_info
= bnxt_get_module_info
,
3507 .get_module_eeprom
= bnxt_get_module_eeprom
,
3508 .nway_reset
= bnxt_nway_reset
,
3509 .set_phys_id
= bnxt_set_phys_id
,
3510 .self_test
= bnxt_self_test
,
3511 .reset
= bnxt_reset
,
3512 .set_dump
= bnxt_set_dump
,
3513 .get_dump_flag
= bnxt_get_dump_flag
,
3514 .get_dump_data
= bnxt_get_dump_data
,