1 /* Broadcom NetXtreme-C/E network driver.
3 * Copyright (c) 2014-2016 Broadcom Corporation
4 * Copyright (c) 2016-2017 Broadcom Limited
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation.
11 #include <linux/bitops.h>
12 #include <linux/ctype.h>
13 #include <linux/stringify.h>
14 #include <linux/ethtool.h>
15 #include <linux/ethtool_netlink.h>
16 #include <linux/linkmode.h>
17 #include <linux/interrupt.h>
18 #include <linux/pci.h>
19 #include <linux/etherdevice.h>
20 #include <linux/crc32.h>
21 #include <linux/firmware.h>
22 #include <linux/utsname.h>
23 #include <linux/time.h>
24 #include <linux/ptp_clock_kernel.h>
25 #include <linux/net_tstamp.h>
26 #include <linux/timecounter.h>
27 #include <net/netlink.h>
30 #include "bnxt_hwrm.h"
34 #include "bnxt_ethtool.h"
35 #include "bnxt_nvm_defs.h" /* NVRAM content constant and structure defs */
36 #include "bnxt_fw_hdr.h" /* Firmware hdr constant and structure defs */
37 #include "bnxt_coredump.h"
39 #define BNXT_NVM_ERR_MSG(dev, extack, msg) \
42 NL_SET_ERR_MSG_MOD(extack, msg); \
43 netdev_err(dev, "%s\n", msg); \
46 static u32
bnxt_get_msglevel(struct net_device
*dev
)
48 struct bnxt
*bp
= netdev_priv(dev
);
50 return bp
->msg_enable
;
53 static void bnxt_set_msglevel(struct net_device
*dev
, u32 value
)
55 struct bnxt
*bp
= netdev_priv(dev
);
57 bp
->msg_enable
= value
;
60 static int bnxt_get_coalesce(struct net_device
*dev
,
61 struct ethtool_coalesce
*coal
,
62 struct kernel_ethtool_coalesce
*kernel_coal
,
63 struct netlink_ext_ack
*extack
)
65 struct bnxt
*bp
= netdev_priv(dev
);
66 struct bnxt_coal
*hw_coal
;
69 memset(coal
, 0, sizeof(*coal
));
71 coal
->use_adaptive_rx_coalesce
= bp
->flags
& BNXT_FLAG_DIM
;
73 hw_coal
= &bp
->rx_coal
;
74 mult
= hw_coal
->bufs_per_record
;
75 coal
->rx_coalesce_usecs
= hw_coal
->coal_ticks
;
76 coal
->rx_max_coalesced_frames
= hw_coal
->coal_bufs
/ mult
;
77 coal
->rx_coalesce_usecs_irq
= hw_coal
->coal_ticks_irq
;
78 coal
->rx_max_coalesced_frames_irq
= hw_coal
->coal_bufs_irq
/ mult
;
80 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET
)
81 kernel_coal
->use_cqe_mode_rx
= true;
83 hw_coal
= &bp
->tx_coal
;
84 mult
= hw_coal
->bufs_per_record
;
85 coal
->tx_coalesce_usecs
= hw_coal
->coal_ticks
;
86 coal
->tx_max_coalesced_frames
= hw_coal
->coal_bufs
/ mult
;
87 coal
->tx_coalesce_usecs_irq
= hw_coal
->coal_ticks_irq
;
88 coal
->tx_max_coalesced_frames_irq
= hw_coal
->coal_bufs_irq
/ mult
;
90 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET
)
91 kernel_coal
->use_cqe_mode_tx
= true;
93 coal
->stats_block_coalesce_usecs
= bp
->stats_coal_ticks
;
98 static int bnxt_set_coalesce(struct net_device
*dev
,
99 struct ethtool_coalesce
*coal
,
100 struct kernel_ethtool_coalesce
*kernel_coal
,
101 struct netlink_ext_ack
*extack
)
103 struct bnxt
*bp
= netdev_priv(dev
);
104 bool update_stats
= false;
105 struct bnxt_coal
*hw_coal
;
109 if (coal
->use_adaptive_rx_coalesce
) {
110 bp
->flags
|= BNXT_FLAG_DIM
;
112 if (bp
->flags
& BNXT_FLAG_DIM
) {
113 bp
->flags
&= ~(BNXT_FLAG_DIM
);
118 if ((kernel_coal
->use_cqe_mode_rx
|| kernel_coal
->use_cqe_mode_tx
) &&
119 !(bp
->coal_cap
.cmpl_params
&
120 RING_AGGINT_QCAPS_RESP_CMPL_PARAMS_TIMER_RESET
))
123 hw_coal
= &bp
->rx_coal
;
124 mult
= hw_coal
->bufs_per_record
;
125 hw_coal
->coal_ticks
= coal
->rx_coalesce_usecs
;
126 hw_coal
->coal_bufs
= coal
->rx_max_coalesced_frames
* mult
;
127 hw_coal
->coal_ticks_irq
= coal
->rx_coalesce_usecs_irq
;
128 hw_coal
->coal_bufs_irq
= coal
->rx_max_coalesced_frames_irq
* mult
;
130 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET
;
131 if (kernel_coal
->use_cqe_mode_rx
)
133 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET
;
135 hw_coal
= &bp
->tx_coal
;
136 mult
= hw_coal
->bufs_per_record
;
137 hw_coal
->coal_ticks
= coal
->tx_coalesce_usecs
;
138 hw_coal
->coal_bufs
= coal
->tx_max_coalesced_frames
* mult
;
139 hw_coal
->coal_ticks_irq
= coal
->tx_coalesce_usecs_irq
;
140 hw_coal
->coal_bufs_irq
= coal
->tx_max_coalesced_frames_irq
* mult
;
142 ~RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET
;
143 if (kernel_coal
->use_cqe_mode_tx
)
145 RING_CMPL_RING_CFG_AGGINT_PARAMS_REQ_FLAGS_TIMER_RESET
;
147 if (bp
->stats_coal_ticks
!= coal
->stats_block_coalesce_usecs
) {
148 u32 stats_ticks
= coal
->stats_block_coalesce_usecs
;
150 /* Allow 0, which means disable. */
152 stats_ticks
= clamp_t(u32
, stats_ticks
,
153 BNXT_MIN_STATS_COAL_TICKS
,
154 BNXT_MAX_STATS_COAL_TICKS
);
155 stats_ticks
= rounddown(stats_ticks
, BNXT_MIN_STATS_COAL_TICKS
);
156 bp
->stats_coal_ticks
= stats_ticks
;
157 if (bp
->stats_coal_ticks
)
158 bp
->current_interval
=
159 bp
->stats_coal_ticks
* HZ
/ 1000000;
161 bp
->current_interval
= BNXT_TIMER_INTERVAL
;
166 if (test_bit(BNXT_STATE_OPEN
, &bp
->state
)) {
168 bnxt_close_nic(bp
, true, false);
169 rc
= bnxt_open_nic(bp
, true, false);
171 rc
= bnxt_hwrm_set_coal(bp
);
178 static const char * const bnxt_ring_rx_stats_str
[] = {
189 static const char * const bnxt_ring_tx_stats_str
[] = {
200 static const char * const bnxt_ring_tpa_stats_str
[] = {
207 static const char * const bnxt_ring_tpa2_stats_str
[] = {
208 "rx_tpa_eligible_pkt",
209 "rx_tpa_eligible_bytes",
216 static const char * const bnxt_rx_sw_stats_str
[] = {
222 static const char * const bnxt_cmn_sw_stats_str
[] = {
226 #define BNXT_RX_STATS_ENTRY(counter) \
227 { BNXT_RX_STATS_OFFSET(counter), __stringify(counter) }
229 #define BNXT_TX_STATS_ENTRY(counter) \
230 { BNXT_TX_STATS_OFFSET(counter), __stringify(counter) }
232 #define BNXT_RX_STATS_EXT_ENTRY(counter) \
233 { BNXT_RX_STATS_EXT_OFFSET(counter), __stringify(counter) }
235 #define BNXT_TX_STATS_EXT_ENTRY(counter) \
236 { BNXT_TX_STATS_EXT_OFFSET(counter), __stringify(counter) }
238 #define BNXT_RX_STATS_EXT_PFC_ENTRY(n) \
239 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_duration_us), \
240 BNXT_RX_STATS_EXT_ENTRY(pfc_pri##n##_rx_transitions)
242 #define BNXT_TX_STATS_EXT_PFC_ENTRY(n) \
243 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_duration_us), \
244 BNXT_TX_STATS_EXT_ENTRY(pfc_pri##n##_tx_transitions)
246 #define BNXT_RX_STATS_EXT_PFC_ENTRIES \
247 BNXT_RX_STATS_EXT_PFC_ENTRY(0), \
248 BNXT_RX_STATS_EXT_PFC_ENTRY(1), \
249 BNXT_RX_STATS_EXT_PFC_ENTRY(2), \
250 BNXT_RX_STATS_EXT_PFC_ENTRY(3), \
251 BNXT_RX_STATS_EXT_PFC_ENTRY(4), \
252 BNXT_RX_STATS_EXT_PFC_ENTRY(5), \
253 BNXT_RX_STATS_EXT_PFC_ENTRY(6), \
254 BNXT_RX_STATS_EXT_PFC_ENTRY(7)
256 #define BNXT_TX_STATS_EXT_PFC_ENTRIES \
257 BNXT_TX_STATS_EXT_PFC_ENTRY(0), \
258 BNXT_TX_STATS_EXT_PFC_ENTRY(1), \
259 BNXT_TX_STATS_EXT_PFC_ENTRY(2), \
260 BNXT_TX_STATS_EXT_PFC_ENTRY(3), \
261 BNXT_TX_STATS_EXT_PFC_ENTRY(4), \
262 BNXT_TX_STATS_EXT_PFC_ENTRY(5), \
263 BNXT_TX_STATS_EXT_PFC_ENTRY(6), \
264 BNXT_TX_STATS_EXT_PFC_ENTRY(7)
266 #define BNXT_RX_STATS_EXT_COS_ENTRY(n) \
267 BNXT_RX_STATS_EXT_ENTRY(rx_bytes_cos##n), \
268 BNXT_RX_STATS_EXT_ENTRY(rx_packets_cos##n)
270 #define BNXT_TX_STATS_EXT_COS_ENTRY(n) \
271 BNXT_TX_STATS_EXT_ENTRY(tx_bytes_cos##n), \
272 BNXT_TX_STATS_EXT_ENTRY(tx_packets_cos##n)
274 #define BNXT_RX_STATS_EXT_COS_ENTRIES \
275 BNXT_RX_STATS_EXT_COS_ENTRY(0), \
276 BNXT_RX_STATS_EXT_COS_ENTRY(1), \
277 BNXT_RX_STATS_EXT_COS_ENTRY(2), \
278 BNXT_RX_STATS_EXT_COS_ENTRY(3), \
279 BNXT_RX_STATS_EXT_COS_ENTRY(4), \
280 BNXT_RX_STATS_EXT_COS_ENTRY(5), \
281 BNXT_RX_STATS_EXT_COS_ENTRY(6), \
282 BNXT_RX_STATS_EXT_COS_ENTRY(7) \
284 #define BNXT_TX_STATS_EXT_COS_ENTRIES \
285 BNXT_TX_STATS_EXT_COS_ENTRY(0), \
286 BNXT_TX_STATS_EXT_COS_ENTRY(1), \
287 BNXT_TX_STATS_EXT_COS_ENTRY(2), \
288 BNXT_TX_STATS_EXT_COS_ENTRY(3), \
289 BNXT_TX_STATS_EXT_COS_ENTRY(4), \
290 BNXT_TX_STATS_EXT_COS_ENTRY(5), \
291 BNXT_TX_STATS_EXT_COS_ENTRY(6), \
292 BNXT_TX_STATS_EXT_COS_ENTRY(7) \
294 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(n) \
295 BNXT_RX_STATS_EXT_ENTRY(rx_discard_bytes_cos##n), \
296 BNXT_RX_STATS_EXT_ENTRY(rx_discard_packets_cos##n)
298 #define BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES \
299 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(0), \
300 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(1), \
301 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(2), \
302 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(3), \
303 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(4), \
304 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(5), \
305 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(6), \
306 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRY(7)
308 #define BNXT_RX_STATS_PRI_ENTRY(counter, n) \
309 { BNXT_RX_STATS_EXT_OFFSET(counter##_cos0), \
310 __stringify(counter##_pri##n) }
312 #define BNXT_TX_STATS_PRI_ENTRY(counter, n) \
313 { BNXT_TX_STATS_EXT_OFFSET(counter##_cos0), \
314 __stringify(counter##_pri##n) }
316 #define BNXT_RX_STATS_PRI_ENTRIES(counter) \
317 BNXT_RX_STATS_PRI_ENTRY(counter, 0), \
318 BNXT_RX_STATS_PRI_ENTRY(counter, 1), \
319 BNXT_RX_STATS_PRI_ENTRY(counter, 2), \
320 BNXT_RX_STATS_PRI_ENTRY(counter, 3), \
321 BNXT_RX_STATS_PRI_ENTRY(counter, 4), \
322 BNXT_RX_STATS_PRI_ENTRY(counter, 5), \
323 BNXT_RX_STATS_PRI_ENTRY(counter, 6), \
324 BNXT_RX_STATS_PRI_ENTRY(counter, 7)
326 #define BNXT_TX_STATS_PRI_ENTRIES(counter) \
327 BNXT_TX_STATS_PRI_ENTRY(counter, 0), \
328 BNXT_TX_STATS_PRI_ENTRY(counter, 1), \
329 BNXT_TX_STATS_PRI_ENTRY(counter, 2), \
330 BNXT_TX_STATS_PRI_ENTRY(counter, 3), \
331 BNXT_TX_STATS_PRI_ENTRY(counter, 4), \
332 BNXT_TX_STATS_PRI_ENTRY(counter, 5), \
333 BNXT_TX_STATS_PRI_ENTRY(counter, 6), \
334 BNXT_TX_STATS_PRI_ENTRY(counter, 7)
342 static const char *const bnxt_ring_err_stats_arr
[] = {
343 "rx_total_l4_csum_errors",
345 "rx_total_buf_errors",
346 "rx_total_oom_discards",
347 "rx_total_netpoll_discards",
348 "rx_total_ring_discards",
350 "tx_total_ring_discards",
354 #define NUM_RING_RX_SW_STATS ARRAY_SIZE(bnxt_rx_sw_stats_str)
355 #define NUM_RING_CMN_SW_STATS ARRAY_SIZE(bnxt_cmn_sw_stats_str)
356 #define NUM_RING_RX_HW_STATS ARRAY_SIZE(bnxt_ring_rx_stats_str)
357 #define NUM_RING_TX_HW_STATS ARRAY_SIZE(bnxt_ring_tx_stats_str)
359 static const struct {
361 char string
[ETH_GSTRING_LEN
];
362 } bnxt_port_stats_arr
[] = {
363 BNXT_RX_STATS_ENTRY(rx_64b_frames
),
364 BNXT_RX_STATS_ENTRY(rx_65b_127b_frames
),
365 BNXT_RX_STATS_ENTRY(rx_128b_255b_frames
),
366 BNXT_RX_STATS_ENTRY(rx_256b_511b_frames
),
367 BNXT_RX_STATS_ENTRY(rx_512b_1023b_frames
),
368 BNXT_RX_STATS_ENTRY(rx_1024b_1518b_frames
),
369 BNXT_RX_STATS_ENTRY(rx_good_vlan_frames
),
370 BNXT_RX_STATS_ENTRY(rx_1519b_2047b_frames
),
371 BNXT_RX_STATS_ENTRY(rx_2048b_4095b_frames
),
372 BNXT_RX_STATS_ENTRY(rx_4096b_9216b_frames
),
373 BNXT_RX_STATS_ENTRY(rx_9217b_16383b_frames
),
374 BNXT_RX_STATS_ENTRY(rx_total_frames
),
375 BNXT_RX_STATS_ENTRY(rx_ucast_frames
),
376 BNXT_RX_STATS_ENTRY(rx_mcast_frames
),
377 BNXT_RX_STATS_ENTRY(rx_bcast_frames
),
378 BNXT_RX_STATS_ENTRY(rx_fcs_err_frames
),
379 BNXT_RX_STATS_ENTRY(rx_ctrl_frames
),
380 BNXT_RX_STATS_ENTRY(rx_pause_frames
),
381 BNXT_RX_STATS_ENTRY(rx_pfc_frames
),
382 BNXT_RX_STATS_ENTRY(rx_align_err_frames
),
383 BNXT_RX_STATS_ENTRY(rx_ovrsz_frames
),
384 BNXT_RX_STATS_ENTRY(rx_jbr_frames
),
385 BNXT_RX_STATS_ENTRY(rx_mtu_err_frames
),
386 BNXT_RX_STATS_ENTRY(rx_tagged_frames
),
387 BNXT_RX_STATS_ENTRY(rx_double_tagged_frames
),
388 BNXT_RX_STATS_ENTRY(rx_good_frames
),
389 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri0
),
390 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri1
),
391 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri2
),
392 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri3
),
393 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri4
),
394 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri5
),
395 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri6
),
396 BNXT_RX_STATS_ENTRY(rx_pfc_ena_frames_pri7
),
397 BNXT_RX_STATS_ENTRY(rx_undrsz_frames
),
398 BNXT_RX_STATS_ENTRY(rx_eee_lpi_events
),
399 BNXT_RX_STATS_ENTRY(rx_eee_lpi_duration
),
400 BNXT_RX_STATS_ENTRY(rx_bytes
),
401 BNXT_RX_STATS_ENTRY(rx_runt_bytes
),
402 BNXT_RX_STATS_ENTRY(rx_runt_frames
),
403 BNXT_RX_STATS_ENTRY(rx_stat_discard
),
404 BNXT_RX_STATS_ENTRY(rx_stat_err
),
406 BNXT_TX_STATS_ENTRY(tx_64b_frames
),
407 BNXT_TX_STATS_ENTRY(tx_65b_127b_frames
),
408 BNXT_TX_STATS_ENTRY(tx_128b_255b_frames
),
409 BNXT_TX_STATS_ENTRY(tx_256b_511b_frames
),
410 BNXT_TX_STATS_ENTRY(tx_512b_1023b_frames
),
411 BNXT_TX_STATS_ENTRY(tx_1024b_1518b_frames
),
412 BNXT_TX_STATS_ENTRY(tx_good_vlan_frames
),
413 BNXT_TX_STATS_ENTRY(tx_1519b_2047b_frames
),
414 BNXT_TX_STATS_ENTRY(tx_2048b_4095b_frames
),
415 BNXT_TX_STATS_ENTRY(tx_4096b_9216b_frames
),
416 BNXT_TX_STATS_ENTRY(tx_9217b_16383b_frames
),
417 BNXT_TX_STATS_ENTRY(tx_good_frames
),
418 BNXT_TX_STATS_ENTRY(tx_total_frames
),
419 BNXT_TX_STATS_ENTRY(tx_ucast_frames
),
420 BNXT_TX_STATS_ENTRY(tx_mcast_frames
),
421 BNXT_TX_STATS_ENTRY(tx_bcast_frames
),
422 BNXT_TX_STATS_ENTRY(tx_pause_frames
),
423 BNXT_TX_STATS_ENTRY(tx_pfc_frames
),
424 BNXT_TX_STATS_ENTRY(tx_jabber_frames
),
425 BNXT_TX_STATS_ENTRY(tx_fcs_err_frames
),
426 BNXT_TX_STATS_ENTRY(tx_err
),
427 BNXT_TX_STATS_ENTRY(tx_fifo_underruns
),
428 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri0
),
429 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri1
),
430 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri2
),
431 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri3
),
432 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri4
),
433 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri5
),
434 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri6
),
435 BNXT_TX_STATS_ENTRY(tx_pfc_ena_frames_pri7
),
436 BNXT_TX_STATS_ENTRY(tx_eee_lpi_events
),
437 BNXT_TX_STATS_ENTRY(tx_eee_lpi_duration
),
438 BNXT_TX_STATS_ENTRY(tx_total_collisions
),
439 BNXT_TX_STATS_ENTRY(tx_bytes
),
440 BNXT_TX_STATS_ENTRY(tx_xthol_frames
),
441 BNXT_TX_STATS_ENTRY(tx_stat_discard
),
442 BNXT_TX_STATS_ENTRY(tx_stat_error
),
445 static const struct {
447 char string
[ETH_GSTRING_LEN
];
448 } bnxt_port_stats_ext_arr
[] = {
449 BNXT_RX_STATS_EXT_ENTRY(link_down_events
),
450 BNXT_RX_STATS_EXT_ENTRY(continuous_pause_events
),
451 BNXT_RX_STATS_EXT_ENTRY(resume_pause_events
),
452 BNXT_RX_STATS_EXT_ENTRY(continuous_roce_pause_events
),
453 BNXT_RX_STATS_EXT_ENTRY(resume_roce_pause_events
),
454 BNXT_RX_STATS_EXT_COS_ENTRIES
,
455 BNXT_RX_STATS_EXT_PFC_ENTRIES
,
456 BNXT_RX_STATS_EXT_ENTRY(rx_bits
),
457 BNXT_RX_STATS_EXT_ENTRY(rx_buffer_passed_threshold
),
458 BNXT_RX_STATS_EXT_ENTRY(rx_pcs_symbol_err
),
459 BNXT_RX_STATS_EXT_ENTRY(rx_corrected_bits
),
460 BNXT_RX_STATS_EXT_DISCARD_COS_ENTRIES
,
461 BNXT_RX_STATS_EXT_ENTRY(rx_fec_corrected_blocks
),
462 BNXT_RX_STATS_EXT_ENTRY(rx_fec_uncorrectable_blocks
),
463 BNXT_RX_STATS_EXT_ENTRY(rx_filter_miss
),
466 static const struct {
468 char string
[ETH_GSTRING_LEN
];
469 } bnxt_tx_port_stats_ext_arr
[] = {
470 BNXT_TX_STATS_EXT_COS_ENTRIES
,
471 BNXT_TX_STATS_EXT_PFC_ENTRIES
,
474 static const struct {
476 char string
[ETH_GSTRING_LEN
];
477 } bnxt_rx_bytes_pri_arr
[] = {
478 BNXT_RX_STATS_PRI_ENTRIES(rx_bytes
),
481 static const struct {
483 char string
[ETH_GSTRING_LEN
];
484 } bnxt_rx_pkts_pri_arr
[] = {
485 BNXT_RX_STATS_PRI_ENTRIES(rx_packets
),
488 static const struct {
490 char string
[ETH_GSTRING_LEN
];
491 } bnxt_tx_bytes_pri_arr
[] = {
492 BNXT_TX_STATS_PRI_ENTRIES(tx_bytes
),
495 static const struct {
497 char string
[ETH_GSTRING_LEN
];
498 } bnxt_tx_pkts_pri_arr
[] = {
499 BNXT_TX_STATS_PRI_ENTRIES(tx_packets
),
502 #define BNXT_NUM_RING_ERR_STATS ARRAY_SIZE(bnxt_ring_err_stats_arr)
503 #define BNXT_NUM_PORT_STATS ARRAY_SIZE(bnxt_port_stats_arr)
504 #define BNXT_NUM_STATS_PRI \
505 (ARRAY_SIZE(bnxt_rx_bytes_pri_arr) + \
506 ARRAY_SIZE(bnxt_rx_pkts_pri_arr) + \
507 ARRAY_SIZE(bnxt_tx_bytes_pri_arr) + \
508 ARRAY_SIZE(bnxt_tx_pkts_pri_arr))
510 static int bnxt_get_num_tpa_ring_stats(struct bnxt
*bp
)
512 if (BNXT_SUPPORTS_TPA(bp
)) {
513 if (bp
->max_tpa_v2
) {
514 if (BNXT_CHIP_P5(bp
))
515 return BNXT_NUM_TPA_RING_STATS_P5
;
516 return BNXT_NUM_TPA_RING_STATS_P7
;
518 return BNXT_NUM_TPA_RING_STATS
;
523 static int bnxt_get_num_ring_stats(struct bnxt
*bp
)
527 rx
= NUM_RING_RX_HW_STATS
+ NUM_RING_RX_SW_STATS
+
528 bnxt_get_num_tpa_ring_stats(bp
);
529 tx
= NUM_RING_TX_HW_STATS
;
530 cmn
= NUM_RING_CMN_SW_STATS
;
531 return rx
* bp
->rx_nr_rings
+
532 tx
* (bp
->tx_nr_rings_xdp
+ bp
->tx_nr_rings_per_tc
) +
533 cmn
* bp
->cp_nr_rings
;
536 static int bnxt_get_num_stats(struct bnxt
*bp
)
538 int num_stats
= bnxt_get_num_ring_stats(bp
);
541 num_stats
+= BNXT_NUM_RING_ERR_STATS
;
543 if (bp
->flags
& BNXT_FLAG_PORT_STATS
)
544 num_stats
+= BNXT_NUM_PORT_STATS
;
546 if (bp
->flags
& BNXT_FLAG_PORT_STATS_EXT
) {
547 len
= min_t(int, bp
->fw_rx_stats_ext_size
,
548 ARRAY_SIZE(bnxt_port_stats_ext_arr
));
550 len
= min_t(int, bp
->fw_tx_stats_ext_size
,
551 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr
));
553 if (bp
->pri2cos_valid
)
554 num_stats
+= BNXT_NUM_STATS_PRI
;
560 static int bnxt_get_sset_count(struct net_device
*dev
, int sset
)
562 struct bnxt
*bp
= netdev_priv(dev
);
566 return bnxt_get_num_stats(bp
);
570 return bp
->num_tests
;
576 static bool is_rx_ring(struct bnxt
*bp
, int ring_num
)
578 return ring_num
< bp
->rx_nr_rings
;
581 static bool is_tx_ring(struct bnxt
*bp
, int ring_num
)
585 if (!(bp
->flags
& BNXT_FLAG_SHARED_RINGS
))
586 tx_base
= bp
->rx_nr_rings
;
588 if (ring_num
>= tx_base
&& ring_num
< (tx_base
+ bp
->tx_nr_rings
))
593 static void bnxt_get_ethtool_stats(struct net_device
*dev
,
594 struct ethtool_stats
*stats
, u64
*buf
)
596 struct bnxt_total_ring_err_stats ring_err_stats
= {0};
597 struct bnxt
*bp
= netdev_priv(dev
);
603 j
+= bnxt_get_num_ring_stats(bp
);
604 goto skip_ring_stats
;
607 tpa_stats
= bnxt_get_num_tpa_ring_stats(bp
);
608 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
609 struct bnxt_napi
*bnapi
= bp
->bnapi
[i
];
610 struct bnxt_cp_ring_info
*cpr
= &bnapi
->cp_ring
;
611 u64
*sw_stats
= cpr
->stats
.sw_stats
;
615 if (is_rx_ring(bp
, i
)) {
616 for (k
= 0; k
< NUM_RING_RX_HW_STATS
; j
++, k
++)
617 buf
[j
] = sw_stats
[k
];
619 if (is_tx_ring(bp
, i
)) {
620 k
= NUM_RING_RX_HW_STATS
;
621 for (; k
< NUM_RING_RX_HW_STATS
+ NUM_RING_TX_HW_STATS
;
623 buf
[j
] = sw_stats
[k
];
625 if (!tpa_stats
|| !is_rx_ring(bp
, i
))
626 goto skip_tpa_ring_stats
;
628 k
= NUM_RING_RX_HW_STATS
+ NUM_RING_TX_HW_STATS
;
629 for (; k
< NUM_RING_RX_HW_STATS
+ NUM_RING_TX_HW_STATS
+
631 buf
[j
] = sw_stats
[k
];
634 sw
= (u64
*)&cpr
->sw_stats
->rx
;
635 if (is_rx_ring(bp
, i
)) {
636 for (k
= 0; k
< NUM_RING_RX_SW_STATS
; j
++, k
++)
640 sw
= (u64
*)&cpr
->sw_stats
->cmn
;
641 for (k
= 0; k
< NUM_RING_CMN_SW_STATS
; j
++, k
++)
645 bnxt_get_ring_err_stats(bp
, &ring_err_stats
);
648 curr
= &ring_err_stats
.rx_total_l4_csum_errors
;
649 prev
= &bp
->ring_err_stats_prev
.rx_total_l4_csum_errors
;
650 for (i
= 0; i
< BNXT_NUM_RING_ERR_STATS
; i
++, j
++, curr
++, prev
++)
651 buf
[j
] = *curr
+ *prev
;
653 if (bp
->flags
& BNXT_FLAG_PORT_STATS
) {
654 u64
*port_stats
= bp
->port_stats
.sw_stats
;
656 for (i
= 0; i
< BNXT_NUM_PORT_STATS
; i
++, j
++)
657 buf
[j
] = *(port_stats
+ bnxt_port_stats_arr
[i
].offset
);
659 if (bp
->flags
& BNXT_FLAG_PORT_STATS_EXT
) {
660 u64
*rx_port_stats_ext
= bp
->rx_port_stats_ext
.sw_stats
;
661 u64
*tx_port_stats_ext
= bp
->tx_port_stats_ext
.sw_stats
;
664 len
= min_t(u32
, bp
->fw_rx_stats_ext_size
,
665 ARRAY_SIZE(bnxt_port_stats_ext_arr
));
666 for (i
= 0; i
< len
; i
++, j
++) {
667 buf
[j
] = *(rx_port_stats_ext
+
668 bnxt_port_stats_ext_arr
[i
].offset
);
670 len
= min_t(u32
, bp
->fw_tx_stats_ext_size
,
671 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr
));
672 for (i
= 0; i
< len
; i
++, j
++) {
673 buf
[j
] = *(tx_port_stats_ext
+
674 bnxt_tx_port_stats_ext_arr
[i
].offset
);
676 if (bp
->pri2cos_valid
) {
677 for (i
= 0; i
< 8; i
++, j
++) {
678 long n
= bnxt_rx_bytes_pri_arr
[i
].base_off
+
681 buf
[j
] = *(rx_port_stats_ext
+ n
);
683 for (i
= 0; i
< 8; i
++, j
++) {
684 long n
= bnxt_rx_pkts_pri_arr
[i
].base_off
+
687 buf
[j
] = *(rx_port_stats_ext
+ n
);
689 for (i
= 0; i
< 8; i
++, j
++) {
690 long n
= bnxt_tx_bytes_pri_arr
[i
].base_off
+
693 buf
[j
] = *(tx_port_stats_ext
+ n
);
695 for (i
= 0; i
< 8; i
++, j
++) {
696 long n
= bnxt_tx_pkts_pri_arr
[i
].base_off
+
699 buf
[j
] = *(tx_port_stats_ext
+ n
);
705 static void bnxt_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
707 struct bnxt
*bp
= netdev_priv(dev
);
713 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
714 if (is_rx_ring(bp
, i
))
715 for (j
= 0; j
< NUM_RING_RX_HW_STATS
; j
++) {
716 str
= bnxt_ring_rx_stats_str
[j
];
717 ethtool_sprintf(&buf
, "[%d]: %s", i
,
720 if (is_tx_ring(bp
, i
))
721 for (j
= 0; j
< NUM_RING_TX_HW_STATS
; j
++) {
722 str
= bnxt_ring_tx_stats_str
[j
];
723 ethtool_sprintf(&buf
, "[%d]: %s", i
,
726 num_str
= bnxt_get_num_tpa_ring_stats(bp
);
727 if (!num_str
|| !is_rx_ring(bp
, i
))
731 for (j
= 0; j
< num_str
; j
++) {
732 str
= bnxt_ring_tpa2_stats_str
[j
];
733 ethtool_sprintf(&buf
, "[%d]: %s", i
,
737 for (j
= 0; j
< num_str
; j
++) {
738 str
= bnxt_ring_tpa_stats_str
[j
];
739 ethtool_sprintf(&buf
, "[%d]: %s", i
,
743 if (is_rx_ring(bp
, i
))
744 for (j
= 0; j
< NUM_RING_RX_SW_STATS
; j
++) {
745 str
= bnxt_rx_sw_stats_str
[j
];
746 ethtool_sprintf(&buf
, "[%d]: %s", i
,
749 for (j
= 0; j
< NUM_RING_CMN_SW_STATS
; j
++) {
750 str
= bnxt_cmn_sw_stats_str
[j
];
751 ethtool_sprintf(&buf
, "[%d]: %s", i
, str
);
754 for (i
= 0; i
< BNXT_NUM_RING_ERR_STATS
; i
++)
755 ethtool_puts(&buf
, bnxt_ring_err_stats_arr
[i
]);
757 if (bp
->flags
& BNXT_FLAG_PORT_STATS
)
758 for (i
= 0; i
< BNXT_NUM_PORT_STATS
; i
++) {
759 str
= bnxt_port_stats_arr
[i
].string
;
760 ethtool_puts(&buf
, str
);
763 if (bp
->flags
& BNXT_FLAG_PORT_STATS_EXT
) {
766 len
= min_t(u32
, bp
->fw_rx_stats_ext_size
,
767 ARRAY_SIZE(bnxt_port_stats_ext_arr
));
768 for (i
= 0; i
< len
; i
++) {
769 str
= bnxt_port_stats_ext_arr
[i
].string
;
770 ethtool_puts(&buf
, str
);
773 len
= min_t(u32
, bp
->fw_tx_stats_ext_size
,
774 ARRAY_SIZE(bnxt_tx_port_stats_ext_arr
));
775 for (i
= 0; i
< len
; i
++) {
776 str
= bnxt_tx_port_stats_ext_arr
[i
].string
;
777 ethtool_puts(&buf
, str
);
780 if (bp
->pri2cos_valid
) {
781 for (i
= 0; i
< 8; i
++) {
782 str
= bnxt_rx_bytes_pri_arr
[i
].string
;
783 ethtool_puts(&buf
, str
);
786 for (i
= 0; i
< 8; i
++) {
787 str
= bnxt_rx_pkts_pri_arr
[i
].string
;
788 ethtool_puts(&buf
, str
);
791 for (i
= 0; i
< 8; i
++) {
792 str
= bnxt_tx_bytes_pri_arr
[i
].string
;
793 ethtool_puts(&buf
, str
);
796 for (i
= 0; i
< 8; i
++) {
797 str
= bnxt_tx_pkts_pri_arr
[i
].string
;
798 ethtool_puts(&buf
, str
);
805 for (i
= 0; i
< bp
->num_tests
; i
++)
806 ethtool_puts(&buf
, bp
->test_info
->string
[i
]);
809 netdev_err(bp
->dev
, "bnxt_get_strings invalid request %x\n",
815 static void bnxt_get_ringparam(struct net_device
*dev
,
816 struct ethtool_ringparam
*ering
,
817 struct kernel_ethtool_ringparam
*kernel_ering
,
818 struct netlink_ext_ack
*extack
)
820 struct bnxt
*bp
= netdev_priv(dev
);
822 if (bp
->flags
& BNXT_FLAG_AGG_RINGS
) {
823 ering
->rx_max_pending
= BNXT_MAX_RX_DESC_CNT_JUM_ENA
;
824 ering
->rx_jumbo_max_pending
= BNXT_MAX_RX_JUM_DESC_CNT
;
825 kernel_ering
->tcp_data_split
= ETHTOOL_TCP_DATA_SPLIT_ENABLED
;
827 ering
->rx_max_pending
= BNXT_MAX_RX_DESC_CNT
;
828 ering
->rx_jumbo_max_pending
= 0;
829 kernel_ering
->tcp_data_split
= ETHTOOL_TCP_DATA_SPLIT_DISABLED
;
831 ering
->tx_max_pending
= BNXT_MAX_TX_DESC_CNT
;
833 ering
->rx_pending
= bp
->rx_ring_size
;
834 ering
->rx_jumbo_pending
= bp
->rx_agg_ring_size
;
835 ering
->tx_pending
= bp
->tx_ring_size
;
838 static int bnxt_set_ringparam(struct net_device
*dev
,
839 struct ethtool_ringparam
*ering
,
840 struct kernel_ethtool_ringparam
*kernel_ering
,
841 struct netlink_ext_ack
*extack
)
843 struct bnxt
*bp
= netdev_priv(dev
);
845 if ((ering
->rx_pending
> BNXT_MAX_RX_DESC_CNT
) ||
846 (ering
->tx_pending
> BNXT_MAX_TX_DESC_CNT
) ||
847 (ering
->tx_pending
< BNXT_MIN_TX_DESC_CNT
))
850 if (netif_running(dev
))
851 bnxt_close_nic(bp
, false, false);
853 bp
->rx_ring_size
= ering
->rx_pending
;
854 bp
->tx_ring_size
= ering
->tx_pending
;
855 bnxt_set_ring_params(bp
);
857 if (netif_running(dev
))
858 return bnxt_open_nic(bp
, false, false);
863 static void bnxt_get_channels(struct net_device
*dev
,
864 struct ethtool_channels
*channel
)
866 struct bnxt
*bp
= netdev_priv(dev
);
867 struct bnxt_hw_resc
*hw_resc
= &bp
->hw_resc
;
868 int max_rx_rings
, max_tx_rings
, tcs
;
869 int max_tx_sch_inputs
, tx_grps
;
871 /* Get the most up-to-date max_tx_sch_inputs. */
872 if (netif_running(dev
) && BNXT_NEW_RM(bp
))
873 bnxt_hwrm_func_resc_qcaps(bp
, false);
874 max_tx_sch_inputs
= hw_resc
->max_tx_sch_inputs
;
876 bnxt_get_max_rings(bp
, &max_rx_rings
, &max_tx_rings
, true);
877 if (max_tx_sch_inputs
)
878 max_tx_rings
= min_t(int, max_tx_rings
, max_tx_sch_inputs
);
881 tx_grps
= max(tcs
, 1);
882 if (bp
->tx_nr_rings_xdp
)
884 max_tx_rings
/= tx_grps
;
885 channel
->max_combined
= min_t(int, max_rx_rings
, max_tx_rings
);
887 if (bnxt_get_max_rings(bp
, &max_rx_rings
, &max_tx_rings
, false)) {
891 if (max_tx_sch_inputs
)
892 max_tx_rings
= min_t(int, max_tx_rings
, max_tx_sch_inputs
);
897 channel
->max_rx
= max_rx_rings
;
898 channel
->max_tx
= max_tx_rings
;
899 channel
->max_other
= 0;
900 if (bp
->flags
& BNXT_FLAG_SHARED_RINGS
) {
901 channel
->combined_count
= bp
->rx_nr_rings
;
902 if (BNXT_CHIP_TYPE_NITRO_A0(bp
))
903 channel
->combined_count
--;
905 if (!BNXT_CHIP_TYPE_NITRO_A0(bp
)) {
906 channel
->rx_count
= bp
->rx_nr_rings
;
907 channel
->tx_count
= bp
->tx_nr_rings_per_tc
;
912 static int bnxt_set_channels(struct net_device
*dev
,
913 struct ethtool_channels
*channel
)
915 struct bnxt
*bp
= netdev_priv(dev
);
916 int req_tx_rings
, req_rx_rings
, tcs
;
922 if (channel
->other_count
)
925 if (!channel
->combined_count
&&
926 (!channel
->rx_count
|| !channel
->tx_count
))
929 if (channel
->combined_count
&&
930 (channel
->rx_count
|| channel
->tx_count
))
933 if (BNXT_CHIP_TYPE_NITRO_A0(bp
) && (channel
->rx_count
||
937 if (channel
->combined_count
)
942 req_tx_rings
= sh
? channel
->combined_count
: channel
->tx_count
;
943 req_rx_rings
= sh
? channel
->combined_count
: channel
->rx_count
;
944 if (bp
->tx_nr_rings_xdp
) {
946 netdev_err(dev
, "Only combined mode supported when XDP is enabled.\n");
949 tx_xdp
= req_rx_rings
;
952 if (bnxt_get_nr_rss_ctxs(bp
, req_rx_rings
) !=
953 bnxt_get_nr_rss_ctxs(bp
, bp
->rx_nr_rings
) &&
954 netif_is_rxfh_configured(dev
)) {
955 netdev_warn(dev
, "RSS table size change required, RSS table entries must be default to proceed\n");
959 rc
= bnxt_check_rings(bp
, req_tx_rings
, req_rx_rings
, sh
, tcs
, tx_xdp
);
961 netdev_warn(dev
, "Unable to allocate the requested rings\n");
965 if (netif_running(dev
)) {
967 /* TODO CHIMP_FW: Send message to all VF's
971 bnxt_close_nic(bp
, true, false);
975 bp
->flags
|= BNXT_FLAG_SHARED_RINGS
;
976 bp
->rx_nr_rings
= channel
->combined_count
;
977 bp
->tx_nr_rings_per_tc
= channel
->combined_count
;
979 bp
->flags
&= ~BNXT_FLAG_SHARED_RINGS
;
980 bp
->rx_nr_rings
= channel
->rx_count
;
981 bp
->tx_nr_rings_per_tc
= channel
->tx_count
;
983 bp
->tx_nr_rings_xdp
= tx_xdp
;
984 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
+ tx_xdp
;
986 bp
->tx_nr_rings
= bp
->tx_nr_rings_per_tc
* tcs
+ tx_xdp
;
988 tx_cp
= bnxt_num_tx_to_cp(bp
, bp
->tx_nr_rings
);
989 bp
->cp_nr_rings
= sh
? max_t(int, tx_cp
, bp
->rx_nr_rings
) :
990 tx_cp
+ bp
->rx_nr_rings
;
992 /* After changing number of rx channels, update NTUPLE feature. */
993 netdev_update_features(dev
);
994 if (netif_running(dev
)) {
995 rc
= bnxt_open_nic(bp
, true, false);
996 if ((!rc
) && BNXT_PF(bp
)) {
997 /* TODO CHIMP_FW: Send message to all VF's
1002 rc
= bnxt_reserve_rings(bp
, true);
1008 static u32
bnxt_get_all_fltr_ids_rcu(struct bnxt
*bp
, struct hlist_head tbl
[],
1009 int tbl_size
, u32
*ids
, u32 start
,
1016 for (i
= 0; i
< tbl_size
; i
++) {
1017 struct hlist_head
*head
;
1018 struct bnxt_filter_base
*fltr
;
1021 hlist_for_each_entry_rcu(fltr
, head
, hash
) {
1023 test_bit(BNXT_FLTR_FW_DELETED
, &fltr
->state
))
1025 ids
[j
++] = fltr
->sw_id
;
1033 static struct bnxt_filter_base
*bnxt_get_one_fltr_rcu(struct bnxt
*bp
,
1034 struct hlist_head tbl
[],
1035 int tbl_size
, u32 id
)
1039 for (i
= 0; i
< tbl_size
; i
++) {
1040 struct hlist_head
*head
;
1041 struct bnxt_filter_base
*fltr
;
1044 hlist_for_each_entry_rcu(fltr
, head
, hash
) {
1045 if (fltr
->flags
&& fltr
->sw_id
== id
)
1052 static int bnxt_grxclsrlall(struct bnxt
*bp
, struct ethtool_rxnfc
*cmd
,
1057 cmd
->data
= bp
->ntp_fltr_count
;
1059 count
= bnxt_get_all_fltr_ids_rcu(bp
, bp
->l2_fltr_hash_tbl
,
1060 BNXT_L2_FLTR_HASH_SIZE
, rule_locs
, 0,
1062 cmd
->rule_cnt
= bnxt_get_all_fltr_ids_rcu(bp
, bp
->ntp_fltr_hash_tbl
,
1063 BNXT_NTP_FLTR_HASH_SIZE
,
1071 static int bnxt_grxclsrule(struct bnxt
*bp
, struct ethtool_rxnfc
*cmd
)
1073 struct ethtool_rx_flow_spec
*fs
=
1074 (struct ethtool_rx_flow_spec
*)&cmd
->fs
;
1075 struct bnxt_filter_base
*fltr_base
;
1076 struct bnxt_ntuple_filter
*fltr
;
1077 struct bnxt_flow_masks
*fmasks
;
1078 struct flow_keys
*fkeys
;
1081 if (fs
->location
>= bp
->max_fltr
)
1085 fltr_base
= bnxt_get_one_fltr_rcu(bp
, bp
->l2_fltr_hash_tbl
,
1086 BNXT_L2_FLTR_HASH_SIZE
,
1089 struct ethhdr
*h_ether
= &fs
->h_u
.ether_spec
;
1090 struct ethhdr
*m_ether
= &fs
->m_u
.ether_spec
;
1091 struct bnxt_l2_filter
*l2_fltr
;
1092 struct bnxt_l2_key
*l2_key
;
1094 l2_fltr
= container_of(fltr_base
, struct bnxt_l2_filter
, base
);
1095 l2_key
= &l2_fltr
->l2_key
;
1096 fs
->flow_type
= ETHER_FLOW
;
1097 ether_addr_copy(h_ether
->h_dest
, l2_key
->dst_mac_addr
);
1098 eth_broadcast_addr(m_ether
->h_dest
);
1100 struct ethtool_flow_ext
*m_ext
= &fs
->m_ext
;
1101 struct ethtool_flow_ext
*h_ext
= &fs
->h_ext
;
1103 fs
->flow_type
|= FLOW_EXT
;
1104 m_ext
->vlan_tci
= htons(0xfff);
1105 h_ext
->vlan_tci
= htons(l2_key
->vlan
);
1107 if (fltr_base
->flags
& BNXT_ACT_RING_DST
)
1108 fs
->ring_cookie
= fltr_base
->rxq
;
1109 if (fltr_base
->flags
& BNXT_ACT_FUNC_DST
)
1110 fs
->ring_cookie
= (u64
)(fltr_base
->vf_idx
+ 1) <<
1111 ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF
;
1115 fltr_base
= bnxt_get_one_fltr_rcu(bp
, bp
->ntp_fltr_hash_tbl
,
1116 BNXT_NTP_FLTR_HASH_SIZE
,
1122 fltr
= container_of(fltr_base
, struct bnxt_ntuple_filter
, base
);
1124 fkeys
= &fltr
->fkeys
;
1125 fmasks
= &fltr
->fmasks
;
1126 if (fkeys
->basic
.n_proto
== htons(ETH_P_IP
)) {
1127 if (fkeys
->basic
.ip_proto
== BNXT_IP_PROTO_WILDCARD
) {
1128 fs
->flow_type
= IP_USER_FLOW
;
1129 fs
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
1130 fs
->h_u
.usr_ip4_spec
.proto
= BNXT_IP_PROTO_WILDCARD
;
1131 fs
->m_u
.usr_ip4_spec
.proto
= 0;
1132 } else if (fkeys
->basic
.ip_proto
== IPPROTO_ICMP
) {
1133 fs
->flow_type
= IP_USER_FLOW
;
1134 fs
->h_u
.usr_ip4_spec
.ip_ver
= ETH_RX_NFC_IP4
;
1135 fs
->h_u
.usr_ip4_spec
.proto
= IPPROTO_ICMP
;
1136 fs
->m_u
.usr_ip4_spec
.proto
= BNXT_IP_PROTO_FULL_MASK
;
1137 } else if (fkeys
->basic
.ip_proto
== IPPROTO_TCP
) {
1138 fs
->flow_type
= TCP_V4_FLOW
;
1139 } else if (fkeys
->basic
.ip_proto
== IPPROTO_UDP
) {
1140 fs
->flow_type
= UDP_V4_FLOW
;
1145 fs
->h_u
.tcp_ip4_spec
.ip4src
= fkeys
->addrs
.v4addrs
.src
;
1146 fs
->m_u
.tcp_ip4_spec
.ip4src
= fmasks
->addrs
.v4addrs
.src
;
1147 fs
->h_u
.tcp_ip4_spec
.ip4dst
= fkeys
->addrs
.v4addrs
.dst
;
1148 fs
->m_u
.tcp_ip4_spec
.ip4dst
= fmasks
->addrs
.v4addrs
.dst
;
1149 if (fs
->flow_type
== TCP_V4_FLOW
||
1150 fs
->flow_type
== UDP_V4_FLOW
) {
1151 fs
->h_u
.tcp_ip4_spec
.psrc
= fkeys
->ports
.src
;
1152 fs
->m_u
.tcp_ip4_spec
.psrc
= fmasks
->ports
.src
;
1153 fs
->h_u
.tcp_ip4_spec
.pdst
= fkeys
->ports
.dst
;
1154 fs
->m_u
.tcp_ip4_spec
.pdst
= fmasks
->ports
.dst
;
1157 if (fkeys
->basic
.ip_proto
== BNXT_IP_PROTO_WILDCARD
) {
1158 fs
->flow_type
= IPV6_USER_FLOW
;
1159 fs
->h_u
.usr_ip6_spec
.l4_proto
= BNXT_IP_PROTO_WILDCARD
;
1160 fs
->m_u
.usr_ip6_spec
.l4_proto
= 0;
1161 } else if (fkeys
->basic
.ip_proto
== IPPROTO_ICMPV6
) {
1162 fs
->flow_type
= IPV6_USER_FLOW
;
1163 fs
->h_u
.usr_ip6_spec
.l4_proto
= IPPROTO_ICMPV6
;
1164 fs
->m_u
.usr_ip6_spec
.l4_proto
= BNXT_IP_PROTO_FULL_MASK
;
1165 } else if (fkeys
->basic
.ip_proto
== IPPROTO_TCP
) {
1166 fs
->flow_type
= TCP_V6_FLOW
;
1167 } else if (fkeys
->basic
.ip_proto
== IPPROTO_UDP
) {
1168 fs
->flow_type
= UDP_V6_FLOW
;
1173 *(struct in6_addr
*)&fs
->h_u
.tcp_ip6_spec
.ip6src
[0] =
1174 fkeys
->addrs
.v6addrs
.src
;
1175 *(struct in6_addr
*)&fs
->m_u
.tcp_ip6_spec
.ip6src
[0] =
1176 fmasks
->addrs
.v6addrs
.src
;
1177 *(struct in6_addr
*)&fs
->h_u
.tcp_ip6_spec
.ip6dst
[0] =
1178 fkeys
->addrs
.v6addrs
.dst
;
1179 *(struct in6_addr
*)&fs
->m_u
.tcp_ip6_spec
.ip6dst
[0] =
1180 fmasks
->addrs
.v6addrs
.dst
;
1181 if (fs
->flow_type
== TCP_V6_FLOW
||
1182 fs
->flow_type
== UDP_V6_FLOW
) {
1183 fs
->h_u
.tcp_ip6_spec
.psrc
= fkeys
->ports
.src
;
1184 fs
->m_u
.tcp_ip6_spec
.psrc
= fmasks
->ports
.src
;
1185 fs
->h_u
.tcp_ip6_spec
.pdst
= fkeys
->ports
.dst
;
1186 fs
->m_u
.tcp_ip6_spec
.pdst
= fmasks
->ports
.dst
;
1190 if (fltr
->base
.flags
& BNXT_ACT_DROP
)
1191 fs
->ring_cookie
= RX_CLS_FLOW_DISC
;
1193 fs
->ring_cookie
= fltr
->base
.rxq
;
1202 static struct bnxt_rss_ctx
*bnxt_get_rss_ctx_from_index(struct bnxt
*bp
,
1205 struct ethtool_rxfh_context
*ctx
;
1207 ctx
= xa_load(&bp
->dev
->ethtool
->rss_ctx
, index
);
1210 return ethtool_rxfh_context_priv(ctx
);
1213 static int bnxt_alloc_vnic_rss_table(struct bnxt
*bp
,
1214 struct bnxt_vnic_info
*vnic
)
1216 int size
= L1_CACHE_ALIGN(BNXT_MAX_RSS_TABLE_SIZE_P5
);
1218 vnic
->rss_table_size
= size
+ HW_HASH_KEY_SIZE
;
1219 vnic
->rss_table
= dma_alloc_coherent(&bp
->pdev
->dev
,
1220 vnic
->rss_table_size
,
1221 &vnic
->rss_table_dma_addr
,
1223 if (!vnic
->rss_table
)
1226 vnic
->rss_hash_key
= ((void *)vnic
->rss_table
) + size
;
1227 vnic
->rss_hash_key_dma_addr
= vnic
->rss_table_dma_addr
+ size
;
1231 static int bnxt_add_l2_cls_rule(struct bnxt
*bp
,
1232 struct ethtool_rx_flow_spec
*fs
)
1234 u32 ring
= ethtool_get_flow_spec_ring(fs
->ring_cookie
);
1235 u8 vf
= ethtool_get_flow_spec_ring_vf(fs
->ring_cookie
);
1236 struct ethhdr
*h_ether
= &fs
->h_u
.ether_spec
;
1237 struct ethhdr
*m_ether
= &fs
->m_u
.ether_spec
;
1238 struct bnxt_l2_filter
*fltr
;
1239 struct bnxt_l2_key key
;
1244 if (BNXT_CHIP_P5_PLUS(bp
))
1247 if (!is_broadcast_ether_addr(m_ether
->h_dest
))
1249 ether_addr_copy(key
.dst_mac_addr
, h_ether
->h_dest
);
1251 if (fs
->flow_type
& FLOW_EXT
) {
1252 struct ethtool_flow_ext
*m_ext
= &fs
->m_ext
;
1253 struct ethtool_flow_ext
*h_ext
= &fs
->h_ext
;
1255 if (m_ext
->vlan_tci
!= htons(0xfff) || !h_ext
->vlan_tci
)
1257 key
.vlan
= ntohs(h_ext
->vlan_tci
);
1261 flags
= BNXT_ACT_FUNC_DST
;
1265 flags
= BNXT_ACT_RING_DST
;
1266 vnic_id
= bp
->vnic_info
[ring
+ 1].fw_vnic_id
;
1268 fltr
= bnxt_alloc_new_l2_filter(bp
, &key
, flags
);
1270 return PTR_ERR(fltr
);
1272 fltr
->base
.fw_vnic_id
= vnic_id
;
1273 fltr
->base
.rxq
= ring
;
1274 fltr
->base
.vf_idx
= vf
;
1275 rc
= bnxt_hwrm_l2_filter_alloc(bp
, fltr
);
1277 bnxt_del_l2_filter(bp
, fltr
);
1279 fs
->location
= fltr
->base
.sw_id
;
1283 static bool bnxt_verify_ntuple_ip4_flow(struct ethtool_usrip4_spec
*ip_spec
,
1284 struct ethtool_usrip4_spec
*ip_mask
)
1286 u8 mproto
= ip_mask
->proto
;
1287 u8 sproto
= ip_spec
->proto
;
1289 if (ip_mask
->l4_4_bytes
|| ip_mask
->tos
||
1290 ip_spec
->ip_ver
!= ETH_RX_NFC_IP4
||
1291 (mproto
&& (mproto
!= BNXT_IP_PROTO_FULL_MASK
|| sproto
!= IPPROTO_ICMP
)))
1296 static bool bnxt_verify_ntuple_ip6_flow(struct ethtool_usrip6_spec
*ip_spec
,
1297 struct ethtool_usrip6_spec
*ip_mask
)
1299 u8 mproto
= ip_mask
->l4_proto
;
1300 u8 sproto
= ip_spec
->l4_proto
;
1302 if (ip_mask
->l4_4_bytes
|| ip_mask
->tclass
||
1303 (mproto
&& (mproto
!= BNXT_IP_PROTO_FULL_MASK
|| sproto
!= IPPROTO_ICMPV6
)))
1308 static int bnxt_add_ntuple_cls_rule(struct bnxt
*bp
,
1309 struct ethtool_rxnfc
*cmd
)
1311 struct ethtool_rx_flow_spec
*fs
= &cmd
->fs
;
1312 struct bnxt_ntuple_filter
*new_fltr
, *fltr
;
1313 u32 flow_type
= fs
->flow_type
& 0xff;
1314 struct bnxt_l2_filter
*l2_fltr
;
1315 struct bnxt_flow_masks
*fmasks
;
1316 struct flow_keys
*fkeys
;
1324 vf
= ethtool_get_flow_spec_ring_vf(fs
->ring_cookie
);
1325 ring
= ethtool_get_flow_spec_ring(fs
->ring_cookie
);
1326 if ((fs
->flow_type
& (FLOW_MAC_EXT
| FLOW_EXT
)) || vf
)
1329 if (flow_type
== IP_USER_FLOW
) {
1330 if (!bnxt_verify_ntuple_ip4_flow(&fs
->h_u
.usr_ip4_spec
,
1331 &fs
->m_u
.usr_ip4_spec
))
1335 if (flow_type
== IPV6_USER_FLOW
) {
1336 if (!bnxt_verify_ntuple_ip6_flow(&fs
->h_u
.usr_ip6_spec
,
1337 &fs
->m_u
.usr_ip6_spec
))
1341 new_fltr
= kzalloc(sizeof(*new_fltr
), GFP_KERNEL
);
1345 l2_fltr
= bp
->vnic_info
[BNXT_VNIC_DEFAULT
].l2_filters
[0];
1346 atomic_inc(&l2_fltr
->refcnt
);
1347 new_fltr
->l2_fltr
= l2_fltr
;
1348 fmasks
= &new_fltr
->fmasks
;
1349 fkeys
= &new_fltr
->fkeys
;
1352 switch (flow_type
) {
1353 case IP_USER_FLOW
: {
1354 struct ethtool_usrip4_spec
*ip_spec
= &fs
->h_u
.usr_ip4_spec
;
1355 struct ethtool_usrip4_spec
*ip_mask
= &fs
->m_u
.usr_ip4_spec
;
1357 fkeys
->basic
.ip_proto
= ip_mask
->proto
? ip_spec
->proto
1358 : BNXT_IP_PROTO_WILDCARD
;
1359 fkeys
->basic
.n_proto
= htons(ETH_P_IP
);
1360 fkeys
->addrs
.v4addrs
.src
= ip_spec
->ip4src
;
1361 fmasks
->addrs
.v4addrs
.src
= ip_mask
->ip4src
;
1362 fkeys
->addrs
.v4addrs
.dst
= ip_spec
->ip4dst
;
1363 fmasks
->addrs
.v4addrs
.dst
= ip_mask
->ip4dst
;
1368 struct ethtool_tcpip4_spec
*ip_spec
= &fs
->h_u
.tcp_ip4_spec
;
1369 struct ethtool_tcpip4_spec
*ip_mask
= &fs
->m_u
.tcp_ip4_spec
;
1371 fkeys
->basic
.ip_proto
= IPPROTO_TCP
;
1372 if (flow_type
== UDP_V4_FLOW
)
1373 fkeys
->basic
.ip_proto
= IPPROTO_UDP
;
1374 fkeys
->basic
.n_proto
= htons(ETH_P_IP
);
1375 fkeys
->addrs
.v4addrs
.src
= ip_spec
->ip4src
;
1376 fmasks
->addrs
.v4addrs
.src
= ip_mask
->ip4src
;
1377 fkeys
->addrs
.v4addrs
.dst
= ip_spec
->ip4dst
;
1378 fmasks
->addrs
.v4addrs
.dst
= ip_mask
->ip4dst
;
1379 fkeys
->ports
.src
= ip_spec
->psrc
;
1380 fmasks
->ports
.src
= ip_mask
->psrc
;
1381 fkeys
->ports
.dst
= ip_spec
->pdst
;
1382 fmasks
->ports
.dst
= ip_mask
->pdst
;
1385 case IPV6_USER_FLOW
: {
1386 struct ethtool_usrip6_spec
*ip_spec
= &fs
->h_u
.usr_ip6_spec
;
1387 struct ethtool_usrip6_spec
*ip_mask
= &fs
->m_u
.usr_ip6_spec
;
1389 fkeys
->basic
.ip_proto
= ip_mask
->l4_proto
? ip_spec
->l4_proto
1390 : BNXT_IP_PROTO_WILDCARD
;
1391 fkeys
->basic
.n_proto
= htons(ETH_P_IPV6
);
1392 fkeys
->addrs
.v6addrs
.src
= *(struct in6_addr
*)&ip_spec
->ip6src
;
1393 fmasks
->addrs
.v6addrs
.src
= *(struct in6_addr
*)&ip_mask
->ip6src
;
1394 fkeys
->addrs
.v6addrs
.dst
= *(struct in6_addr
*)&ip_spec
->ip6dst
;
1395 fmasks
->addrs
.v6addrs
.dst
= *(struct in6_addr
*)&ip_mask
->ip6dst
;
1400 struct ethtool_tcpip6_spec
*ip_spec
= &fs
->h_u
.tcp_ip6_spec
;
1401 struct ethtool_tcpip6_spec
*ip_mask
= &fs
->m_u
.tcp_ip6_spec
;
1403 fkeys
->basic
.ip_proto
= IPPROTO_TCP
;
1404 if (flow_type
== UDP_V6_FLOW
)
1405 fkeys
->basic
.ip_proto
= IPPROTO_UDP
;
1406 fkeys
->basic
.n_proto
= htons(ETH_P_IPV6
);
1408 fkeys
->addrs
.v6addrs
.src
= *(struct in6_addr
*)&ip_spec
->ip6src
;
1409 fmasks
->addrs
.v6addrs
.src
= *(struct in6_addr
*)&ip_mask
->ip6src
;
1410 fkeys
->addrs
.v6addrs
.dst
= *(struct in6_addr
*)&ip_spec
->ip6dst
;
1411 fmasks
->addrs
.v6addrs
.dst
= *(struct in6_addr
*)&ip_mask
->ip6dst
;
1412 fkeys
->ports
.src
= ip_spec
->psrc
;
1413 fmasks
->ports
.src
= ip_mask
->psrc
;
1414 fkeys
->ports
.dst
= ip_spec
->pdst
;
1415 fmasks
->ports
.dst
= ip_mask
->pdst
;
1422 if (!memcmp(&BNXT_FLOW_MASK_NONE
, fmasks
, sizeof(*fmasks
)))
1425 idx
= bnxt_get_ntp_filter_idx(bp
, fkeys
, NULL
);
1427 fltr
= bnxt_lookup_ntp_filter_from_idx(bp
, new_fltr
, idx
);
1435 new_fltr
->base
.flags
= BNXT_ACT_NO_AGING
;
1436 if (fs
->flow_type
& FLOW_RSS
) {
1437 struct bnxt_rss_ctx
*rss_ctx
;
1439 new_fltr
->base
.fw_vnic_id
= 0;
1440 new_fltr
->base
.flags
|= BNXT_ACT_RSS_CTX
;
1441 rss_ctx
= bnxt_get_rss_ctx_from_index(bp
, cmd
->rss_context
);
1443 new_fltr
->base
.fw_vnic_id
= rss_ctx
->index
;
1449 if (fs
->ring_cookie
== RX_CLS_FLOW_DISC
)
1450 new_fltr
->base
.flags
|= BNXT_ACT_DROP
;
1452 new_fltr
->base
.rxq
= ring
;
1453 __set_bit(BNXT_FLTR_VALID
, &new_fltr
->base
.state
);
1454 rc
= bnxt_insert_ntp_filter(bp
, new_fltr
, idx
);
1456 rc
= bnxt_hwrm_cfa_ntuple_filter_alloc(bp
, new_fltr
);
1458 bnxt_del_ntp_filter(bp
, new_fltr
);
1461 fs
->location
= new_fltr
->base
.sw_id
;
1466 atomic_dec(&l2_fltr
->refcnt
);
1471 static int bnxt_srxclsrlins(struct bnxt
*bp
, struct ethtool_rxnfc
*cmd
)
1473 struct ethtool_rx_flow_spec
*fs
= &cmd
->fs
;
1474 u32 ring
, flow_type
;
1478 if (!netif_running(bp
->dev
))
1480 if (!(bp
->flags
& BNXT_FLAG_RFS
))
1482 if (fs
->location
!= RX_CLS_LOC_ANY
)
1485 flow_type
= fs
->flow_type
;
1486 if ((flow_type
== IP_USER_FLOW
||
1487 flow_type
== IPV6_USER_FLOW
) &&
1488 !(bp
->fw_cap
& BNXT_FW_CAP_CFA_NTUPLE_RX_EXT_IP_PROTO
))
1490 if (flow_type
& FLOW_MAC_EXT
)
1492 flow_type
&= ~FLOW_EXT
;
1494 if (fs
->ring_cookie
== RX_CLS_FLOW_DISC
&& flow_type
!= ETHER_FLOW
)
1495 return bnxt_add_ntuple_cls_rule(bp
, cmd
);
1497 ring
= ethtool_get_flow_spec_ring(fs
->ring_cookie
);
1498 vf
= ethtool_get_flow_spec_ring_vf(fs
->ring_cookie
);
1499 if (BNXT_VF(bp
) && vf
)
1501 if (BNXT_PF(bp
) && vf
> bp
->pf
.active_vfs
)
1503 if (!vf
&& ring
>= bp
->rx_nr_rings
)
1506 if (flow_type
== ETHER_FLOW
)
1507 rc
= bnxt_add_l2_cls_rule(bp
, fs
);
1509 rc
= bnxt_add_ntuple_cls_rule(bp
, cmd
);
1513 static int bnxt_srxclsrldel(struct bnxt
*bp
, struct ethtool_rxnfc
*cmd
)
1515 struct ethtool_rx_flow_spec
*fs
= &cmd
->fs
;
1516 struct bnxt_filter_base
*fltr_base
;
1517 struct bnxt_ntuple_filter
*fltr
;
1518 u32 id
= fs
->location
;
1521 fltr_base
= bnxt_get_one_fltr_rcu(bp
, bp
->l2_fltr_hash_tbl
,
1522 BNXT_L2_FLTR_HASH_SIZE
, id
);
1524 struct bnxt_l2_filter
*l2_fltr
;
1526 l2_fltr
= container_of(fltr_base
, struct bnxt_l2_filter
, base
);
1528 bnxt_hwrm_l2_filter_free(bp
, l2_fltr
);
1529 bnxt_del_l2_filter(bp
, l2_fltr
);
1532 fltr_base
= bnxt_get_one_fltr_rcu(bp
, bp
->ntp_fltr_hash_tbl
,
1533 BNXT_NTP_FLTR_HASH_SIZE
, id
);
1539 fltr
= container_of(fltr_base
, struct bnxt_ntuple_filter
, base
);
1540 if (!(fltr
->base
.flags
& BNXT_ACT_NO_AGING
)) {
1545 bnxt_hwrm_cfa_ntuple_filter_free(bp
, fltr
);
1546 bnxt_del_ntp_filter(bp
, fltr
);
1550 static u64
get_ethtool_ipv4_rss(struct bnxt
*bp
)
1552 if (bp
->rss_hash_cfg
& VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4
)
1553 return RXH_IP_SRC
| RXH_IP_DST
;
1557 static u64
get_ethtool_ipv6_rss(struct bnxt
*bp
)
1559 if (bp
->rss_hash_cfg
& VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6
)
1560 return RXH_IP_SRC
| RXH_IP_DST
;
1564 static int bnxt_grxfh(struct bnxt
*bp
, struct ethtool_rxnfc
*cmd
)
1567 switch (cmd
->flow_type
) {
1569 if (bp
->rss_hash_cfg
& VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4
)
1570 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
|
1571 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1572 cmd
->data
|= get_ethtool_ipv4_rss(bp
);
1575 if (bp
->rss_hash_cfg
& VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4
)
1576 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
|
1577 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1579 case AH_ESP_V4_FLOW
:
1580 if (bp
->rss_hash_cfg
&
1581 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4
|
1582 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4
))
1583 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
|
1584 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1590 cmd
->data
|= get_ethtool_ipv4_rss(bp
);
1594 if (bp
->rss_hash_cfg
& VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6
)
1595 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
|
1596 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1597 cmd
->data
|= get_ethtool_ipv6_rss(bp
);
1600 if (bp
->rss_hash_cfg
& VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6
)
1601 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
|
1602 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1604 case AH_ESP_V6_FLOW
:
1605 if (bp
->rss_hash_cfg
&
1606 (VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6
|
1607 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6
))
1608 cmd
->data
|= RXH_IP_SRC
| RXH_IP_DST
|
1609 RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1615 cmd
->data
|= get_ethtool_ipv6_rss(bp
);
1621 #define RXH_4TUPLE (RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3)
1622 #define RXH_2TUPLE (RXH_IP_SRC | RXH_IP_DST)
1624 static int bnxt_srxfh(struct bnxt
*bp
, struct ethtool_rxnfc
*cmd
)
1626 u32 rss_hash_cfg
= bp
->rss_hash_cfg
;
1629 if (cmd
->data
== RXH_4TUPLE
)
1631 else if (cmd
->data
== RXH_2TUPLE
)
1633 else if (!cmd
->data
)
1638 if (cmd
->flow_type
== TCP_V4_FLOW
) {
1639 rss_hash_cfg
&= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4
;
1641 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV4
;
1642 } else if (cmd
->flow_type
== UDP_V4_FLOW
) {
1643 if (tuple
== 4 && !(bp
->rss_cap
& BNXT_RSS_CAP_UDP_RSS_CAP
))
1645 rss_hash_cfg
&= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4
;
1647 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV4
;
1648 } else if (cmd
->flow_type
== TCP_V6_FLOW
) {
1649 rss_hash_cfg
&= ~VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6
;
1651 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_TCP_IPV6
;
1652 } else if (cmd
->flow_type
== UDP_V6_FLOW
) {
1653 if (tuple
== 4 && !(bp
->rss_cap
& BNXT_RSS_CAP_UDP_RSS_CAP
))
1655 rss_hash_cfg
&= ~VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6
;
1657 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_UDP_IPV6
;
1658 } else if (cmd
->flow_type
== AH_ESP_V4_FLOW
) {
1659 if (tuple
== 4 && (!(bp
->rss_cap
& BNXT_RSS_CAP_AH_V4_RSS_CAP
) ||
1660 !(bp
->rss_cap
& BNXT_RSS_CAP_ESP_V4_RSS_CAP
)))
1662 rss_hash_cfg
&= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4
|
1663 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4
);
1665 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV4
|
1666 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV4
;
1667 } else if (cmd
->flow_type
== AH_ESP_V6_FLOW
) {
1668 if (tuple
== 4 && (!(bp
->rss_cap
& BNXT_RSS_CAP_AH_V6_RSS_CAP
) ||
1669 !(bp
->rss_cap
& BNXT_RSS_CAP_ESP_V6_RSS_CAP
)))
1671 rss_hash_cfg
&= ~(VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6
|
1672 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6
);
1674 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_AH_SPI_IPV6
|
1675 VNIC_RSS_CFG_REQ_HASH_TYPE_ESP_SPI_IPV6
;
1676 } else if (tuple
== 4) {
1680 switch (cmd
->flow_type
) {
1684 case AH_ESP_V4_FLOW
:
1689 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4
;
1691 rss_hash_cfg
&= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV4
;
1697 case AH_ESP_V6_FLOW
:
1702 rss_hash_cfg
|= VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6
;
1704 rss_hash_cfg
&= ~VNIC_RSS_CFG_REQ_HASH_TYPE_IPV6
;
1708 if (bp
->rss_hash_cfg
== rss_hash_cfg
)
1711 if (bp
->rss_cap
& BNXT_RSS_CAP_RSS_HASH_TYPE_DELTA
)
1712 bp
->rss_hash_delta
= bp
->rss_hash_cfg
^ rss_hash_cfg
;
1713 bp
->rss_hash_cfg
= rss_hash_cfg
;
1714 if (netif_running(bp
->dev
)) {
1715 bnxt_close_nic(bp
, false, false);
1716 rc
= bnxt_open_nic(bp
, false, false);
1721 static int bnxt_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
,
1724 struct bnxt
*bp
= netdev_priv(dev
);
1728 case ETHTOOL_GRXRINGS
:
1729 cmd
->data
= bp
->rx_nr_rings
;
1732 case ETHTOOL_GRXCLSRLCNT
:
1733 cmd
->rule_cnt
= bp
->ntp_fltr_count
;
1734 cmd
->data
= bp
->max_fltr
| RX_CLS_LOC_SPECIAL
;
1737 case ETHTOOL_GRXCLSRLALL
:
1738 rc
= bnxt_grxclsrlall(bp
, cmd
, (u32
*)rule_locs
);
1741 case ETHTOOL_GRXCLSRULE
:
1742 rc
= bnxt_grxclsrule(bp
, cmd
);
1746 rc
= bnxt_grxfh(bp
, cmd
);
1757 static int bnxt_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*cmd
)
1759 struct bnxt
*bp
= netdev_priv(dev
);
1764 rc
= bnxt_srxfh(bp
, cmd
);
1767 case ETHTOOL_SRXCLSRLINS
:
1768 rc
= bnxt_srxclsrlins(bp
, cmd
);
1771 case ETHTOOL_SRXCLSRLDEL
:
1772 rc
= bnxt_srxclsrldel(bp
, cmd
);
1782 u32
bnxt_get_rxfh_indir_size(struct net_device
*dev
)
1784 struct bnxt
*bp
= netdev_priv(dev
);
1786 if (bp
->flags
& BNXT_FLAG_CHIP_P5_PLUS
)
1787 return bnxt_get_nr_rss_ctxs(bp
, bp
->rx_nr_rings
) *
1788 BNXT_RSS_TABLE_ENTRIES_P5
;
1789 return HW_HASH_INDEX_SIZE
;
1792 static u32
bnxt_get_rxfh_key_size(struct net_device
*dev
)
1794 return HW_HASH_KEY_SIZE
;
1797 static int bnxt_get_rxfh(struct net_device
*dev
,
1798 struct ethtool_rxfh_param
*rxfh
)
1800 struct bnxt_rss_ctx
*rss_ctx
= NULL
;
1801 struct bnxt
*bp
= netdev_priv(dev
);
1802 u32
*indir_tbl
= bp
->rss_indir_tbl
;
1803 struct bnxt_vnic_info
*vnic
;
1806 rxfh
->hfunc
= ETH_RSS_HASH_TOP
;
1811 vnic
= &bp
->vnic_info
[BNXT_VNIC_DEFAULT
];
1812 if (rxfh
->rss_context
) {
1813 struct ethtool_rxfh_context
*ctx
;
1815 ctx
= xa_load(&bp
->dev
->ethtool
->rss_ctx
, rxfh
->rss_context
);
1818 indir_tbl
= ethtool_rxfh_context_indir(ctx
);
1819 rss_ctx
= ethtool_rxfh_context_priv(ctx
);
1820 vnic
= &rss_ctx
->vnic
;
1823 if (rxfh
->indir
&& indir_tbl
) {
1824 tbl_size
= bnxt_get_rxfh_indir_size(dev
);
1825 for (i
= 0; i
< tbl_size
; i
++)
1826 rxfh
->indir
[i
] = indir_tbl
[i
];
1829 if (rxfh
->key
&& vnic
->rss_hash_key
)
1830 memcpy(rxfh
->key
, vnic
->rss_hash_key
, HW_HASH_KEY_SIZE
);
1835 static void bnxt_modify_rss(struct bnxt
*bp
, struct ethtool_rxfh_context
*ctx
,
1836 struct bnxt_rss_ctx
*rss_ctx
,
1837 const struct ethtool_rxfh_param
*rxfh
)
1841 memcpy(rss_ctx
->vnic
.rss_hash_key
, rxfh
->key
,
1844 memcpy(bp
->rss_hash_key
, rxfh
->key
, HW_HASH_KEY_SIZE
);
1845 bp
->rss_hash_key_updated
= true;
1849 u32 i
, pad
, tbl_size
= bnxt_get_rxfh_indir_size(bp
->dev
);
1850 u32
*indir_tbl
= bp
->rss_indir_tbl
;
1853 indir_tbl
= ethtool_rxfh_context_indir(ctx
);
1854 for (i
= 0; i
< tbl_size
; i
++)
1855 indir_tbl
[i
] = rxfh
->indir
[i
];
1856 pad
= bp
->rss_indir_tbl_entries
- tbl_size
;
1858 memset(&indir_tbl
[i
], 0, pad
* sizeof(*indir_tbl
));
1862 static int bnxt_rxfh_context_check(struct bnxt
*bp
,
1863 const struct ethtool_rxfh_param
*rxfh
,
1864 struct netlink_ext_ack
*extack
)
1866 if (rxfh
->hfunc
&& rxfh
->hfunc
!= ETH_RSS_HASH_TOP
) {
1867 NL_SET_ERR_MSG_MOD(extack
, "RSS hash function not supported");
1871 if (!BNXT_SUPPORTS_MULTI_RSS_CTX(bp
)) {
1872 NL_SET_ERR_MSG_MOD(extack
, "RSS contexts not supported");
1876 if (!netif_running(bp
->dev
)) {
1877 NL_SET_ERR_MSG_MOD(extack
, "Unable to set RSS contexts when interface is down");
1884 static int bnxt_create_rxfh_context(struct net_device
*dev
,
1885 struct ethtool_rxfh_context
*ctx
,
1886 const struct ethtool_rxfh_param
*rxfh
,
1887 struct netlink_ext_ack
*extack
)
1889 struct bnxt
*bp
= netdev_priv(dev
);
1890 struct bnxt_rss_ctx
*rss_ctx
;
1891 struct bnxt_vnic_info
*vnic
;
1894 rc
= bnxt_rxfh_context_check(bp
, rxfh
, extack
);
1898 if (bp
->num_rss_ctx
>= BNXT_MAX_ETH_RSS_CTX
) {
1899 NL_SET_ERR_MSG_FMT_MOD(extack
, "Out of RSS contexts, maximum %u",
1900 BNXT_MAX_ETH_RSS_CTX
);
1904 if (!bnxt_rfs_capable(bp
, true)) {
1905 NL_SET_ERR_MSG_MOD(extack
, "Out hardware resources");
1909 rss_ctx
= ethtool_rxfh_context_priv(ctx
);
1913 vnic
= &rss_ctx
->vnic
;
1914 vnic
->rss_ctx
= ctx
;
1915 vnic
->flags
|= BNXT_VNIC_RSSCTX_FLAG
;
1916 vnic
->vnic_id
= BNXT_VNIC_ID_INVALID
;
1917 rc
= bnxt_alloc_vnic_rss_table(bp
, vnic
);
1921 /* Populate defaults in the context */
1922 bnxt_set_dflt_rss_indir_tbl(bp
, ctx
);
1923 ctx
->hfunc
= ETH_RSS_HASH_TOP
;
1924 memcpy(vnic
->rss_hash_key
, bp
->rss_hash_key
, HW_HASH_KEY_SIZE
);
1925 memcpy(ethtool_rxfh_context_key(ctx
),
1926 bp
->rss_hash_key
, HW_HASH_KEY_SIZE
);
1928 rc
= bnxt_hwrm_vnic_alloc(bp
, vnic
, 0, bp
->rx_nr_rings
);
1930 NL_SET_ERR_MSG_MOD(extack
, "Unable to allocate VNIC");
1934 rc
= bnxt_hwrm_vnic_set_tpa(bp
, vnic
, bp
->flags
& BNXT_FLAG_TPA
);
1936 NL_SET_ERR_MSG_MOD(extack
, "Unable to setup TPA");
1939 bnxt_modify_rss(bp
, ctx
, rss_ctx
, rxfh
);
1941 rc
= __bnxt_setup_vnic_p5(bp
, vnic
);
1943 NL_SET_ERR_MSG_MOD(extack
, "Unable to setup TPA");
1947 rss_ctx
->index
= rxfh
->rss_context
;
1950 bnxt_del_one_rss_ctx(bp
, rss_ctx
, true);
1954 static int bnxt_modify_rxfh_context(struct net_device
*dev
,
1955 struct ethtool_rxfh_context
*ctx
,
1956 const struct ethtool_rxfh_param
*rxfh
,
1957 struct netlink_ext_ack
*extack
)
1959 struct bnxt
*bp
= netdev_priv(dev
);
1960 struct bnxt_rss_ctx
*rss_ctx
;
1963 rc
= bnxt_rxfh_context_check(bp
, rxfh
, extack
);
1967 rss_ctx
= ethtool_rxfh_context_priv(ctx
);
1969 bnxt_modify_rss(bp
, ctx
, rss_ctx
, rxfh
);
1971 return bnxt_hwrm_vnic_rss_cfg_p5(bp
, &rss_ctx
->vnic
);
1974 static int bnxt_remove_rxfh_context(struct net_device
*dev
,
1975 struct ethtool_rxfh_context
*ctx
,
1977 struct netlink_ext_ack
*extack
)
1979 struct bnxt
*bp
= netdev_priv(dev
);
1980 struct bnxt_rss_ctx
*rss_ctx
;
1982 rss_ctx
= ethtool_rxfh_context_priv(ctx
);
1984 bnxt_del_one_rss_ctx(bp
, rss_ctx
, true);
1988 static int bnxt_set_rxfh(struct net_device
*dev
,
1989 struct ethtool_rxfh_param
*rxfh
,
1990 struct netlink_ext_ack
*extack
)
1992 struct bnxt
*bp
= netdev_priv(dev
);
1995 if (rxfh
->hfunc
&& rxfh
->hfunc
!= ETH_RSS_HASH_TOP
)
1998 bnxt_modify_rss(bp
, NULL
, NULL
, rxfh
);
2000 if (netif_running(bp
->dev
)) {
2001 bnxt_close_nic(bp
, false, false);
2002 rc
= bnxt_open_nic(bp
, false, false);
2007 static void bnxt_get_drvinfo(struct net_device
*dev
,
2008 struct ethtool_drvinfo
*info
)
2010 struct bnxt
*bp
= netdev_priv(dev
);
2012 strscpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
2013 strscpy(info
->fw_version
, bp
->fw_ver_str
, sizeof(info
->fw_version
));
2014 strscpy(info
->bus_info
, pci_name(bp
->pdev
), sizeof(info
->bus_info
));
2015 info
->n_stats
= bnxt_get_num_stats(bp
);
2016 info
->testinfo_len
= bp
->num_tests
;
2017 /* TODO CHIMP_FW: eeprom dump details */
2018 info
->eedump_len
= 0;
2019 /* TODO CHIMP FW: reg dump details */
2020 info
->regdump_len
= 0;
2023 static int bnxt_get_regs_len(struct net_device
*dev
)
2025 struct bnxt
*bp
= netdev_priv(dev
);
2031 reg_len
= BNXT_PXP_REG_LEN
;
2033 if (bp
->fw_cap
& BNXT_FW_CAP_PCIE_STATS_SUPPORTED
)
2034 reg_len
+= sizeof(struct pcie_ctx_hw_stats
);
2039 static void bnxt_get_regs(struct net_device
*dev
, struct ethtool_regs
*regs
,
2042 struct pcie_ctx_hw_stats
*hw_pcie_stats
;
2043 struct hwrm_pcie_qstats_input
*req
;
2044 struct bnxt
*bp
= netdev_priv(dev
);
2045 dma_addr_t hw_pcie_stats_addr
;
2049 bnxt_dbg_hwrm_rd_reg(bp
, 0, BNXT_PXP_REG_LEN
/ 4, _p
);
2051 if (!(bp
->fw_cap
& BNXT_FW_CAP_PCIE_STATS_SUPPORTED
))
2054 if (hwrm_req_init(bp
, req
, HWRM_PCIE_QSTATS
))
2057 hw_pcie_stats
= hwrm_req_dma_slice(bp
, req
, sizeof(*hw_pcie_stats
),
2058 &hw_pcie_stats_addr
);
2059 if (!hw_pcie_stats
) {
2060 hwrm_req_drop(bp
, req
);
2065 hwrm_req_hold(bp
, req
); /* hold on to slice */
2066 req
->pcie_stat_size
= cpu_to_le16(sizeof(*hw_pcie_stats
));
2067 req
->pcie_stat_host_addr
= cpu_to_le64(hw_pcie_stats_addr
);
2068 rc
= hwrm_req_send(bp
, req
);
2070 __le64
*src
= (__le64
*)hw_pcie_stats
;
2071 u64
*dst
= (u64
*)(_p
+ BNXT_PXP_REG_LEN
);
2074 for (i
= 0; i
< sizeof(*hw_pcie_stats
) / sizeof(__le64
); i
++)
2075 dst
[i
] = le64_to_cpu(src
[i
]);
2077 hwrm_req_drop(bp
, req
);
2080 static void bnxt_get_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2082 struct bnxt
*bp
= netdev_priv(dev
);
2086 memset(&wol
->sopass
, 0, sizeof(wol
->sopass
));
2087 if (bp
->flags
& BNXT_FLAG_WOL_CAP
) {
2088 wol
->supported
= WAKE_MAGIC
;
2090 wol
->wolopts
= WAKE_MAGIC
;
2094 static int bnxt_set_wol(struct net_device
*dev
, struct ethtool_wolinfo
*wol
)
2096 struct bnxt
*bp
= netdev_priv(dev
);
2098 if (wol
->wolopts
& ~WAKE_MAGIC
)
2101 if (wol
->wolopts
& WAKE_MAGIC
) {
2102 if (!(bp
->flags
& BNXT_FLAG_WOL_CAP
))
2105 if (bnxt_hwrm_alloc_wol_fltr(bp
))
2111 if (bnxt_hwrm_free_wol_fltr(bp
))
2119 /* TODO: support 25GB, 40GB, 50GB with different cable type */
2120 void _bnxt_fw_to_linkmode(unsigned long *mode
, u16 fw_speeds
)
2122 linkmode_zero(mode
);
2124 if (fw_speeds
& BNXT_LINK_SPEED_MSK_100MB
)
2125 linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT
, mode
);
2126 if (fw_speeds
& BNXT_LINK_SPEED_MSK_1GB
)
2127 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
, mode
);
2128 if (fw_speeds
& BNXT_LINK_SPEED_MSK_2_5GB
)
2129 linkmode_set_bit(ETHTOOL_LINK_MODE_2500baseX_Full_BIT
, mode
);
2130 if (fw_speeds
& BNXT_LINK_SPEED_MSK_10GB
)
2131 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT
, mode
);
2132 if (fw_speeds
& BNXT_LINK_SPEED_MSK_40GB
)
2133 linkmode_set_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
, mode
);
2136 enum bnxt_media_type
{
2137 BNXT_MEDIA_UNKNOWN
= 0,
2141 BNXT_MEDIA_LR_ER_FR
,
2148 static const enum bnxt_media_type bnxt_phy_types
[] = {
2149 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASECR
] = BNXT_MEDIA_CR
,
2150 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR4
] = BNXT_MEDIA_KR
,
2151 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASELR
] = BNXT_MEDIA_LR_ER_FR
,
2152 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASESR
] = BNXT_MEDIA_SR
,
2153 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR2
] = BNXT_MEDIA_KR
,
2154 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKX
] = BNXT_MEDIA_KX
,
2155 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASEKR
] = BNXT_MEDIA_KR
,
2156 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASET
] = BNXT_MEDIA_TP
,
2157 [PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE
] = BNXT_MEDIA_TP
,
2158 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_L
] = BNXT_MEDIA_CR
,
2159 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_S
] = BNXT_MEDIA_CR
,
2160 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASECR_CA_N
] = BNXT_MEDIA_CR
,
2161 [PORT_PHY_QCFG_RESP_PHY_TYPE_25G_BASESR
] = BNXT_MEDIA_SR
,
2162 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR4
] = BNXT_MEDIA_CR
,
2163 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR4
] = BNXT_MEDIA_SR
,
2164 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR4
] = BNXT_MEDIA_LR_ER_FR
,
2165 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER4
] = BNXT_MEDIA_LR_ER_FR
,
2166 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR10
] = BNXT_MEDIA_SR
,
2167 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASECR4
] = BNXT_MEDIA_CR
,
2168 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASESR4
] = BNXT_MEDIA_SR
,
2169 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASELR4
] = BNXT_MEDIA_LR_ER_FR
,
2170 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_BASEER4
] = BNXT_MEDIA_LR_ER_FR
,
2171 [PORT_PHY_QCFG_RESP_PHY_TYPE_40G_ACTIVE_CABLE
] = BNXT_MEDIA_SR
,
2172 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASET
] = BNXT_MEDIA_TP
,
2173 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASESX
] = BNXT_MEDIA_X
,
2174 [PORT_PHY_QCFG_RESP_PHY_TYPE_1G_BASECX
] = BNXT_MEDIA_X
,
2175 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR4
] = BNXT_MEDIA_CR
,
2176 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR4
] = BNXT_MEDIA_SR
,
2177 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR4
] = BNXT_MEDIA_LR_ER_FR
,
2178 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER4
] = BNXT_MEDIA_LR_ER_FR
,
2179 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASECR
] = BNXT_MEDIA_CR
,
2180 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASESR
] = BNXT_MEDIA_SR
,
2181 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASELR
] = BNXT_MEDIA_LR_ER_FR
,
2182 [PORT_PHY_QCFG_RESP_PHY_TYPE_50G_BASEER
] = BNXT_MEDIA_LR_ER_FR
,
2183 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR2
] = BNXT_MEDIA_CR
,
2184 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR2
] = BNXT_MEDIA_SR
,
2185 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR2
] = BNXT_MEDIA_LR_ER_FR
,
2186 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER2
] = BNXT_MEDIA_LR_ER_FR
,
2187 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASECR
] = BNXT_MEDIA_CR
,
2188 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASESR
] = BNXT_MEDIA_SR
,
2189 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASELR
] = BNXT_MEDIA_LR_ER_FR
,
2190 [PORT_PHY_QCFG_RESP_PHY_TYPE_100G_BASEER
] = BNXT_MEDIA_LR_ER_FR
,
2191 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASECR2
] = BNXT_MEDIA_CR
,
2192 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASESR2
] = BNXT_MEDIA_SR
,
2193 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASELR2
] = BNXT_MEDIA_LR_ER_FR
,
2194 [PORT_PHY_QCFG_RESP_PHY_TYPE_200G_BASEER2
] = BNXT_MEDIA_LR_ER_FR
,
2195 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR8
] = BNXT_MEDIA_CR
,
2196 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR8
] = BNXT_MEDIA_SR
,
2197 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR8
] = BNXT_MEDIA_LR_ER_FR
,
2198 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER8
] = BNXT_MEDIA_LR_ER_FR
,
2199 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASECR4
] = BNXT_MEDIA_CR
,
2200 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASESR4
] = BNXT_MEDIA_SR
,
2201 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASELR4
] = BNXT_MEDIA_LR_ER_FR
,
2202 [PORT_PHY_QCFG_RESP_PHY_TYPE_400G_BASEER4
] = BNXT_MEDIA_LR_ER_FR
,
2205 static enum bnxt_media_type
2206 bnxt_get_media(struct bnxt_link_info
*link_info
)
2208 switch (link_info
->media_type
) {
2209 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP
:
2210 return BNXT_MEDIA_TP
;
2211 case PORT_PHY_QCFG_RESP_MEDIA_TYPE_DAC
:
2212 return BNXT_MEDIA_CR
;
2214 if (link_info
->phy_type
< ARRAY_SIZE(bnxt_phy_types
))
2215 return bnxt_phy_types
[link_info
->phy_type
];
2216 return BNXT_MEDIA_UNKNOWN
;
2220 enum bnxt_link_speed_indices
{
2221 BNXT_LINK_SPEED_UNKNOWN
= 0,
2222 BNXT_LINK_SPEED_100MB_IDX
,
2223 BNXT_LINK_SPEED_1GB_IDX
,
2224 BNXT_LINK_SPEED_10GB_IDX
,
2225 BNXT_LINK_SPEED_25GB_IDX
,
2226 BNXT_LINK_SPEED_40GB_IDX
,
2227 BNXT_LINK_SPEED_50GB_IDX
,
2228 BNXT_LINK_SPEED_100GB_IDX
,
2229 BNXT_LINK_SPEED_200GB_IDX
,
2230 BNXT_LINK_SPEED_400GB_IDX
,
2231 __BNXT_LINK_SPEED_END
2234 static enum bnxt_link_speed_indices
bnxt_fw_speed_idx(u16 speed
)
2237 case BNXT_LINK_SPEED_100MB
: return BNXT_LINK_SPEED_100MB_IDX
;
2238 case BNXT_LINK_SPEED_1GB
: return BNXT_LINK_SPEED_1GB_IDX
;
2239 case BNXT_LINK_SPEED_10GB
: return BNXT_LINK_SPEED_10GB_IDX
;
2240 case BNXT_LINK_SPEED_25GB
: return BNXT_LINK_SPEED_25GB_IDX
;
2241 case BNXT_LINK_SPEED_40GB
: return BNXT_LINK_SPEED_40GB_IDX
;
2242 case BNXT_LINK_SPEED_50GB
:
2243 case BNXT_LINK_SPEED_50GB_PAM4
:
2244 return BNXT_LINK_SPEED_50GB_IDX
;
2245 case BNXT_LINK_SPEED_100GB
:
2246 case BNXT_LINK_SPEED_100GB_PAM4
:
2247 case BNXT_LINK_SPEED_100GB_PAM4_112
:
2248 return BNXT_LINK_SPEED_100GB_IDX
;
2249 case BNXT_LINK_SPEED_200GB
:
2250 case BNXT_LINK_SPEED_200GB_PAM4
:
2251 case BNXT_LINK_SPEED_200GB_PAM4_112
:
2252 return BNXT_LINK_SPEED_200GB_IDX
;
2253 case BNXT_LINK_SPEED_400GB
:
2254 case BNXT_LINK_SPEED_400GB_PAM4
:
2255 case BNXT_LINK_SPEED_400GB_PAM4_112
:
2256 return BNXT_LINK_SPEED_400GB_IDX
;
2257 default: return BNXT_LINK_SPEED_UNKNOWN
;
2261 static const enum ethtool_link_mode_bit_indices
2262 bnxt_link_modes
[__BNXT_LINK_SPEED_END
][BNXT_SIG_MODE_MAX
][__BNXT_MEDIA_END
] = {
2263 [BNXT_LINK_SPEED_100MB_IDX
] = {
2265 [BNXT_MEDIA_TP
] = ETHTOOL_LINK_MODE_100baseT_Full_BIT
,
2268 [BNXT_LINK_SPEED_1GB_IDX
] = {
2270 [BNXT_MEDIA_TP
] = ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
2271 /* historically baseT, but DAC is more correctly baseX */
2272 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
2273 [BNXT_MEDIA_KX
] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
2274 [BNXT_MEDIA_X
] = ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
2275 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
2278 [BNXT_LINK_SPEED_10GB_IDX
] = {
2280 [BNXT_MEDIA_TP
] = ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
2281 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
,
2282 [BNXT_MEDIA_SR
] = ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
2283 [BNXT_MEDIA_LR_ER_FR
] = ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
,
2284 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
2285 [BNXT_MEDIA_KX
] = ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
2288 [BNXT_LINK_SPEED_25GB_IDX
] = {
2290 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
2291 [BNXT_MEDIA_SR
] = ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
2292 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
2295 [BNXT_LINK_SPEED_40GB_IDX
] = {
2297 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
2298 [BNXT_MEDIA_SR
] = ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
2299 [BNXT_MEDIA_LR_ER_FR
] = ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
2300 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
2303 [BNXT_LINK_SPEED_50GB_IDX
] = {
2304 [BNXT_SIG_MODE_NRZ
] = {
2305 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
2306 [BNXT_MEDIA_SR
] = ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
2307 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
2309 [BNXT_SIG_MODE_PAM4
] = {
2310 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_50000baseCR_Full_BIT
,
2311 [BNXT_MEDIA_SR
] = ETHTOOL_LINK_MODE_50000baseSR_Full_BIT
,
2312 [BNXT_MEDIA_LR_ER_FR
] = ETHTOOL_LINK_MODE_50000baseLR_ER_FR_Full_BIT
,
2313 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_50000baseKR_Full_BIT
,
2316 [BNXT_LINK_SPEED_100GB_IDX
] = {
2317 [BNXT_SIG_MODE_NRZ
] = {
2318 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
2319 [BNXT_MEDIA_SR
] = ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
2320 [BNXT_MEDIA_LR_ER_FR
] = ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
2321 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
2323 [BNXT_SIG_MODE_PAM4
] = {
2324 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_100000baseCR2_Full_BIT
,
2325 [BNXT_MEDIA_SR
] = ETHTOOL_LINK_MODE_100000baseSR2_Full_BIT
,
2326 [BNXT_MEDIA_LR_ER_FR
] = ETHTOOL_LINK_MODE_100000baseLR2_ER2_FR2_Full_BIT
,
2327 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_100000baseKR2_Full_BIT
,
2329 [BNXT_SIG_MODE_PAM4_112
] = {
2330 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_100000baseCR_Full_BIT
,
2331 [BNXT_MEDIA_SR
] = ETHTOOL_LINK_MODE_100000baseSR_Full_BIT
,
2332 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_100000baseKR_Full_BIT
,
2333 [BNXT_MEDIA_LR_ER_FR
] = ETHTOOL_LINK_MODE_100000baseLR_ER_FR_Full_BIT
,
2336 [BNXT_LINK_SPEED_200GB_IDX
] = {
2337 [BNXT_SIG_MODE_PAM4
] = {
2338 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT
,
2339 [BNXT_MEDIA_SR
] = ETHTOOL_LINK_MODE_200000baseSR4_Full_BIT
,
2340 [BNXT_MEDIA_LR_ER_FR
] = ETHTOOL_LINK_MODE_200000baseLR4_ER4_FR4_Full_BIT
,
2341 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_200000baseKR4_Full_BIT
,
2343 [BNXT_SIG_MODE_PAM4_112
] = {
2344 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_200000baseCR2_Full_BIT
,
2345 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_200000baseKR2_Full_BIT
,
2346 [BNXT_MEDIA_SR
] = ETHTOOL_LINK_MODE_200000baseSR2_Full_BIT
,
2347 [BNXT_MEDIA_LR_ER_FR
] = ETHTOOL_LINK_MODE_200000baseLR2_ER2_FR2_Full_BIT
,
2350 [BNXT_LINK_SPEED_400GB_IDX
] = {
2351 [BNXT_SIG_MODE_PAM4
] = {
2352 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT
,
2353 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT
,
2354 [BNXT_MEDIA_SR
] = ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT
,
2355 [BNXT_MEDIA_LR_ER_FR
] = ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT
,
2357 [BNXT_SIG_MODE_PAM4_112
] = {
2358 [BNXT_MEDIA_CR
] = ETHTOOL_LINK_MODE_400000baseCR4_Full_BIT
,
2359 [BNXT_MEDIA_KR
] = ETHTOOL_LINK_MODE_400000baseKR4_Full_BIT
,
2360 [BNXT_MEDIA_SR
] = ETHTOOL_LINK_MODE_400000baseSR4_Full_BIT
,
2361 [BNXT_MEDIA_LR_ER_FR
] = ETHTOOL_LINK_MODE_400000baseLR4_ER4_FR4_Full_BIT
,
2366 #define BNXT_LINK_MODE_UNKNOWN -1
2368 static enum ethtool_link_mode_bit_indices
2369 bnxt_get_link_mode(struct bnxt_link_info
*link_info
)
2371 enum ethtool_link_mode_bit_indices link_mode
;
2372 enum bnxt_link_speed_indices speed
;
2373 enum bnxt_media_type media
;
2376 if (link_info
->phy_link_status
!= BNXT_LINK_LINK
)
2377 return BNXT_LINK_MODE_UNKNOWN
;
2379 media
= bnxt_get_media(link_info
);
2380 if (BNXT_AUTO_MODE(link_info
->auto_mode
)) {
2381 speed
= bnxt_fw_speed_idx(link_info
->link_speed
);
2382 sig_mode
= link_info
->active_fec_sig_mode
&
2383 PORT_PHY_QCFG_RESP_SIGNAL_MODE_MASK
;
2385 speed
= bnxt_fw_speed_idx(link_info
->req_link_speed
);
2386 sig_mode
= link_info
->req_signal_mode
;
2388 if (sig_mode
>= BNXT_SIG_MODE_MAX
)
2389 return BNXT_LINK_MODE_UNKNOWN
;
2391 /* Note ETHTOOL_LINK_MODE_10baseT_Half_BIT == 0 is a legal Linux
2392 * link mode, but since no such devices exist, the zeroes in the
2393 * map can be conveniently used to represent unknown link modes.
2395 link_mode
= bnxt_link_modes
[speed
][sig_mode
][media
];
2397 return BNXT_LINK_MODE_UNKNOWN
;
2399 switch (link_mode
) {
2400 case ETHTOOL_LINK_MODE_100baseT_Full_BIT
:
2401 if (~link_info
->duplex
& BNXT_LINK_DUPLEX_FULL
)
2402 link_mode
= ETHTOOL_LINK_MODE_100baseT_Half_BIT
;
2404 case ETHTOOL_LINK_MODE_1000baseT_Full_BIT
:
2405 if (~link_info
->duplex
& BNXT_LINK_DUPLEX_FULL
)
2406 link_mode
= ETHTOOL_LINK_MODE_1000baseT_Half_BIT
;
2415 static void bnxt_get_ethtool_modes(struct bnxt_link_info
*link_info
,
2416 struct ethtool_link_ksettings
*lk_ksettings
)
2418 struct bnxt
*bp
= container_of(link_info
, struct bnxt
, link_info
);
2420 if (!(bp
->phy_flags
& BNXT_PHY_FL_NO_PAUSE
)) {
2421 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
2422 lk_ksettings
->link_modes
.supported
);
2423 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
,
2424 lk_ksettings
->link_modes
.supported
);
2427 if (link_info
->support_auto_speeds
|| link_info
->support_auto_speeds2
||
2428 link_info
->support_pam4_auto_speeds
)
2429 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
,
2430 lk_ksettings
->link_modes
.supported
);
2432 if (~link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
)
2435 if (link_info
->auto_pause_setting
& BNXT_LINK_PAUSE_RX
)
2436 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
2437 lk_ksettings
->link_modes
.advertising
);
2438 if (hweight8(link_info
->auto_pause_setting
& BNXT_LINK_PAUSE_BOTH
) == 1)
2439 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
,
2440 lk_ksettings
->link_modes
.advertising
);
2441 if (link_info
->lp_pause
& BNXT_LINK_PAUSE_RX
)
2442 linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT
,
2443 lk_ksettings
->link_modes
.lp_advertising
);
2444 if (hweight8(link_info
->lp_pause
& BNXT_LINK_PAUSE_BOTH
) == 1)
2445 linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT
,
2446 lk_ksettings
->link_modes
.lp_advertising
);
2449 static const u16 bnxt_nrz_speed_masks
[] = {
2450 [BNXT_LINK_SPEED_100MB_IDX
] = BNXT_LINK_SPEED_MSK_100MB
,
2451 [BNXT_LINK_SPEED_1GB_IDX
] = BNXT_LINK_SPEED_MSK_1GB
,
2452 [BNXT_LINK_SPEED_10GB_IDX
] = BNXT_LINK_SPEED_MSK_10GB
,
2453 [BNXT_LINK_SPEED_25GB_IDX
] = BNXT_LINK_SPEED_MSK_25GB
,
2454 [BNXT_LINK_SPEED_40GB_IDX
] = BNXT_LINK_SPEED_MSK_40GB
,
2455 [BNXT_LINK_SPEED_50GB_IDX
] = BNXT_LINK_SPEED_MSK_50GB
,
2456 [BNXT_LINK_SPEED_100GB_IDX
] = BNXT_LINK_SPEED_MSK_100GB
,
2457 [__BNXT_LINK_SPEED_END
- 1] = 0 /* make any legal speed a valid index */
2460 static const u16 bnxt_pam4_speed_masks
[] = {
2461 [BNXT_LINK_SPEED_50GB_IDX
] = BNXT_LINK_PAM4_SPEED_MSK_50GB
,
2462 [BNXT_LINK_SPEED_100GB_IDX
] = BNXT_LINK_PAM4_SPEED_MSK_100GB
,
2463 [BNXT_LINK_SPEED_200GB_IDX
] = BNXT_LINK_PAM4_SPEED_MSK_200GB
,
2464 [__BNXT_LINK_SPEED_END
- 1] = 0 /* make any legal speed a valid index */
2467 static const u16 bnxt_nrz_speeds2_masks
[] = {
2468 [BNXT_LINK_SPEED_1GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_1GB
,
2469 [BNXT_LINK_SPEED_10GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_10GB
,
2470 [BNXT_LINK_SPEED_25GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_25GB
,
2471 [BNXT_LINK_SPEED_40GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_40GB
,
2472 [BNXT_LINK_SPEED_50GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_50GB
,
2473 [BNXT_LINK_SPEED_100GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_100GB
,
2474 [__BNXT_LINK_SPEED_END
- 1] = 0 /* make any legal speed a valid index */
2477 static const u16 bnxt_pam4_speeds2_masks
[] = {
2478 [BNXT_LINK_SPEED_50GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_50GB_PAM4
,
2479 [BNXT_LINK_SPEED_100GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4
,
2480 [BNXT_LINK_SPEED_200GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4
,
2481 [BNXT_LINK_SPEED_400GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4
,
2484 static const u16 bnxt_pam4_112_speeds2_masks
[] = {
2485 [BNXT_LINK_SPEED_100GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112
,
2486 [BNXT_LINK_SPEED_200GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112
,
2487 [BNXT_LINK_SPEED_400GB_IDX
] = BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112
,
2490 static enum bnxt_link_speed_indices
2491 bnxt_encoding_speed_idx(u8 sig_mode
, u16 phy_flags
, u16 speed_msk
)
2497 case BNXT_SIG_MODE_NRZ
:
2498 if (phy_flags
& BNXT_PHY_FL_SPEEDS2
) {
2499 speeds
= bnxt_nrz_speeds2_masks
;
2500 len
= ARRAY_SIZE(bnxt_nrz_speeds2_masks
);
2502 speeds
= bnxt_nrz_speed_masks
;
2503 len
= ARRAY_SIZE(bnxt_nrz_speed_masks
);
2506 case BNXT_SIG_MODE_PAM4
:
2507 if (phy_flags
& BNXT_PHY_FL_SPEEDS2
) {
2508 speeds
= bnxt_pam4_speeds2_masks
;
2509 len
= ARRAY_SIZE(bnxt_pam4_speeds2_masks
);
2511 speeds
= bnxt_pam4_speed_masks
;
2512 len
= ARRAY_SIZE(bnxt_pam4_speed_masks
);
2515 case BNXT_SIG_MODE_PAM4_112
:
2516 speeds
= bnxt_pam4_112_speeds2_masks
;
2517 len
= ARRAY_SIZE(bnxt_pam4_112_speeds2_masks
);
2520 return BNXT_LINK_SPEED_UNKNOWN
;
2523 for (idx
= 0; idx
< len
; idx
++) {
2524 if (speeds
[idx
] == speed_msk
)
2528 return BNXT_LINK_SPEED_UNKNOWN
;
2531 #define BNXT_FW_SPEED_MSK_BITS 16
2534 __bnxt_get_ethtool_speeds(unsigned long fw_mask
, enum bnxt_media_type media
,
2535 u8 sig_mode
, u16 phy_flags
, unsigned long *et_mask
)
2537 enum ethtool_link_mode_bit_indices link_mode
;
2538 enum bnxt_link_speed_indices speed
;
2541 for_each_set_bit(bit
, &fw_mask
, BNXT_FW_SPEED_MSK_BITS
) {
2542 speed
= bnxt_encoding_speed_idx(sig_mode
, phy_flags
, 1 << bit
);
2546 link_mode
= bnxt_link_modes
[speed
][sig_mode
][media
];
2550 linkmode_set_bit(link_mode
, et_mask
);
2555 bnxt_get_ethtool_speeds(unsigned long fw_mask
, enum bnxt_media_type media
,
2556 u8 sig_mode
, u16 phy_flags
, unsigned long *et_mask
)
2559 __bnxt_get_ethtool_speeds(fw_mask
, media
, sig_mode
, phy_flags
,
2564 /* list speeds for all media if unknown */
2565 for (media
= 1; media
< __BNXT_MEDIA_END
; media
++)
2566 __bnxt_get_ethtool_speeds(fw_mask
, media
, sig_mode
, phy_flags
,
2571 bnxt_get_all_ethtool_support_speeds(struct bnxt_link_info
*link_info
,
2572 enum bnxt_media_type media
,
2573 struct ethtool_link_ksettings
*lk_ksettings
)
2575 struct bnxt
*bp
= container_of(link_info
, struct bnxt
, link_info
);
2576 u16 sp_nrz
, sp_pam4
, sp_pam4_112
= 0;
2577 u16 phy_flags
= bp
->phy_flags
;
2579 if (phy_flags
& BNXT_PHY_FL_SPEEDS2
) {
2580 sp_nrz
= link_info
->support_speeds2
;
2581 sp_pam4
= link_info
->support_speeds2
;
2582 sp_pam4_112
= link_info
->support_speeds2
;
2584 sp_nrz
= link_info
->support_speeds
;
2585 sp_pam4
= link_info
->support_pam4_speeds
;
2587 bnxt_get_ethtool_speeds(sp_nrz
, media
, BNXT_SIG_MODE_NRZ
, phy_flags
,
2588 lk_ksettings
->link_modes
.supported
);
2589 bnxt_get_ethtool_speeds(sp_pam4
, media
, BNXT_SIG_MODE_PAM4
, phy_flags
,
2590 lk_ksettings
->link_modes
.supported
);
2591 bnxt_get_ethtool_speeds(sp_pam4_112
, media
, BNXT_SIG_MODE_PAM4_112
,
2592 phy_flags
, lk_ksettings
->link_modes
.supported
);
2596 bnxt_get_all_ethtool_adv_speeds(struct bnxt_link_info
*link_info
,
2597 enum bnxt_media_type media
,
2598 struct ethtool_link_ksettings
*lk_ksettings
)
2600 struct bnxt
*bp
= container_of(link_info
, struct bnxt
, link_info
);
2601 u16 sp_nrz
, sp_pam4
, sp_pam4_112
= 0;
2602 u16 phy_flags
= bp
->phy_flags
;
2604 sp_nrz
= link_info
->advertising
;
2605 if (phy_flags
& BNXT_PHY_FL_SPEEDS2
) {
2606 sp_pam4
= link_info
->advertising
;
2607 sp_pam4_112
= link_info
->advertising
;
2609 sp_pam4
= link_info
->advertising_pam4
;
2611 bnxt_get_ethtool_speeds(sp_nrz
, media
, BNXT_SIG_MODE_NRZ
, phy_flags
,
2612 lk_ksettings
->link_modes
.advertising
);
2613 bnxt_get_ethtool_speeds(sp_pam4
, media
, BNXT_SIG_MODE_PAM4
, phy_flags
,
2614 lk_ksettings
->link_modes
.advertising
);
2615 bnxt_get_ethtool_speeds(sp_pam4_112
, media
, BNXT_SIG_MODE_PAM4_112
,
2616 phy_flags
, lk_ksettings
->link_modes
.advertising
);
2620 bnxt_get_all_ethtool_lp_speeds(struct bnxt_link_info
*link_info
,
2621 enum bnxt_media_type media
,
2622 struct ethtool_link_ksettings
*lk_ksettings
)
2624 struct bnxt
*bp
= container_of(link_info
, struct bnxt
, link_info
);
2625 u16 phy_flags
= bp
->phy_flags
;
2627 bnxt_get_ethtool_speeds(link_info
->lp_auto_link_speeds
, media
,
2628 BNXT_SIG_MODE_NRZ
, phy_flags
,
2629 lk_ksettings
->link_modes
.lp_advertising
);
2630 bnxt_get_ethtool_speeds(link_info
->lp_auto_pam4_link_speeds
, media
,
2631 BNXT_SIG_MODE_PAM4
, phy_flags
,
2632 lk_ksettings
->link_modes
.lp_advertising
);
2635 static void bnxt_update_speed(u32
*delta
, bool installed_media
, u16
*speeds
,
2636 u16 speed_msk
, const unsigned long *et_mask
,
2637 enum ethtool_link_mode_bit_indices mode
)
2639 bool mode_desired
= linkmode_test_bit(mode
, et_mask
);
2644 /* enabled speeds for installed media should override */
2645 if (installed_media
&& mode_desired
) {
2646 *speeds
|= speed_msk
;
2647 *delta
|= speed_msk
;
2651 /* many to one mapping, only allow one change per fw_speed bit */
2652 if (!(*delta
& speed_msk
) && (mode_desired
== !(*speeds
& speed_msk
))) {
2653 *speeds
^= speed_msk
;
2654 *delta
|= speed_msk
;
2658 static void bnxt_set_ethtool_speeds(struct bnxt_link_info
*link_info
,
2659 const unsigned long *et_mask
)
2661 struct bnxt
*bp
= container_of(link_info
, struct bnxt
, link_info
);
2662 u16
const *sp_msks
, *sp_pam4_msks
, *sp_pam4_112_msks
;
2663 enum bnxt_media_type media
= bnxt_get_media(link_info
);
2664 u16
*adv
, *adv_pam4
, *adv_pam4_112
= NULL
;
2665 u32 delta_pam4_112
= 0;
2670 adv
= &link_info
->advertising
;
2671 if (bp
->phy_flags
& BNXT_PHY_FL_SPEEDS2
) {
2672 adv_pam4
= &link_info
->advertising
;
2673 adv_pam4_112
= &link_info
->advertising
;
2674 sp_msks
= bnxt_nrz_speeds2_masks
;
2675 sp_pam4_msks
= bnxt_pam4_speeds2_masks
;
2676 sp_pam4_112_msks
= bnxt_pam4_112_speeds2_masks
;
2678 adv_pam4
= &link_info
->advertising_pam4
;
2679 sp_msks
= bnxt_nrz_speed_masks
;
2680 sp_pam4_msks
= bnxt_pam4_speed_masks
;
2682 for (i
= 1; i
< __BNXT_LINK_SPEED_END
; i
++) {
2683 /* accept any legal media from user */
2684 for (m
= 1; m
< __BNXT_MEDIA_END
; m
++) {
2685 bnxt_update_speed(&delta_nrz
, m
== media
,
2686 adv
, sp_msks
[i
], et_mask
,
2687 bnxt_link_modes
[i
][BNXT_SIG_MODE_NRZ
][m
]);
2688 bnxt_update_speed(&delta_pam4
, m
== media
,
2689 adv_pam4
, sp_pam4_msks
[i
], et_mask
,
2690 bnxt_link_modes
[i
][BNXT_SIG_MODE_PAM4
][m
]);
2694 bnxt_update_speed(&delta_pam4_112
, m
== media
,
2695 adv_pam4_112
, sp_pam4_112_msks
[i
], et_mask
,
2696 bnxt_link_modes
[i
][BNXT_SIG_MODE_PAM4_112
][m
]);
2701 static void bnxt_fw_to_ethtool_advertised_fec(struct bnxt_link_info
*link_info
,
2702 struct ethtool_link_ksettings
*lk_ksettings
)
2704 u16 fec_cfg
= link_info
->fec_cfg
;
2706 if ((fec_cfg
& BNXT_FEC_NONE
) || !(fec_cfg
& BNXT_FEC_AUTONEG
)) {
2707 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
,
2708 lk_ksettings
->link_modes
.advertising
);
2711 if (fec_cfg
& BNXT_FEC_ENC_BASE_R
)
2712 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT
,
2713 lk_ksettings
->link_modes
.advertising
);
2714 if (fec_cfg
& BNXT_FEC_ENC_RS
)
2715 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT
,
2716 lk_ksettings
->link_modes
.advertising
);
2717 if (fec_cfg
& BNXT_FEC_ENC_LLRS
)
2718 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT
,
2719 lk_ksettings
->link_modes
.advertising
);
2722 static void bnxt_fw_to_ethtool_support_fec(struct bnxt_link_info
*link_info
,
2723 struct ethtool_link_ksettings
*lk_ksettings
)
2725 u16 fec_cfg
= link_info
->fec_cfg
;
2727 if (fec_cfg
& BNXT_FEC_NONE
) {
2728 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_NONE_BIT
,
2729 lk_ksettings
->link_modes
.supported
);
2732 if (fec_cfg
& BNXT_FEC_ENC_BASE_R_CAP
)
2733 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_BASER_BIT
,
2734 lk_ksettings
->link_modes
.supported
);
2735 if (fec_cfg
& BNXT_FEC_ENC_RS_CAP
)
2736 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_RS_BIT
,
2737 lk_ksettings
->link_modes
.supported
);
2738 if (fec_cfg
& BNXT_FEC_ENC_LLRS_CAP
)
2739 linkmode_set_bit(ETHTOOL_LINK_MODE_FEC_LLRS_BIT
,
2740 lk_ksettings
->link_modes
.supported
);
2743 u32
bnxt_fw_to_ethtool_speed(u16 fw_link_speed
)
2745 switch (fw_link_speed
) {
2746 case BNXT_LINK_SPEED_100MB
:
2748 case BNXT_LINK_SPEED_1GB
:
2750 case BNXT_LINK_SPEED_2_5GB
:
2752 case BNXT_LINK_SPEED_10GB
:
2754 case BNXT_LINK_SPEED_20GB
:
2756 case BNXT_LINK_SPEED_25GB
:
2758 case BNXT_LINK_SPEED_40GB
:
2760 case BNXT_LINK_SPEED_50GB
:
2761 case BNXT_LINK_SPEED_50GB_PAM4
:
2763 case BNXT_LINK_SPEED_100GB
:
2764 case BNXT_LINK_SPEED_100GB_PAM4
:
2765 case BNXT_LINK_SPEED_100GB_PAM4_112
:
2766 return SPEED_100000
;
2767 case BNXT_LINK_SPEED_200GB
:
2768 case BNXT_LINK_SPEED_200GB_PAM4
:
2769 case BNXT_LINK_SPEED_200GB_PAM4_112
:
2770 return SPEED_200000
;
2771 case BNXT_LINK_SPEED_400GB
:
2772 case BNXT_LINK_SPEED_400GB_PAM4
:
2773 case BNXT_LINK_SPEED_400GB_PAM4_112
:
2774 return SPEED_400000
;
2776 return SPEED_UNKNOWN
;
2780 static void bnxt_get_default_speeds(struct ethtool_link_ksettings
*lk_ksettings
,
2781 struct bnxt_link_info
*link_info
)
2783 struct ethtool_link_settings
*base
= &lk_ksettings
->base
;
2785 if (link_info
->link_state
== BNXT_LINK_STATE_UP
) {
2786 base
->speed
= bnxt_fw_to_ethtool_speed(link_info
->link_speed
);
2787 base
->duplex
= DUPLEX_HALF
;
2788 if (link_info
->duplex
& BNXT_LINK_DUPLEX_FULL
)
2789 base
->duplex
= DUPLEX_FULL
;
2790 lk_ksettings
->lanes
= link_info
->active_lanes
;
2791 } else if (!link_info
->autoneg
) {
2792 base
->speed
= bnxt_fw_to_ethtool_speed(link_info
->req_link_speed
);
2793 base
->duplex
= DUPLEX_HALF
;
2794 if (link_info
->req_duplex
== BNXT_LINK_DUPLEX_FULL
)
2795 base
->duplex
= DUPLEX_FULL
;
2799 static int bnxt_get_link_ksettings(struct net_device
*dev
,
2800 struct ethtool_link_ksettings
*lk_ksettings
)
2802 struct ethtool_link_settings
*base
= &lk_ksettings
->base
;
2803 enum ethtool_link_mode_bit_indices link_mode
;
2804 struct bnxt
*bp
= netdev_priv(dev
);
2805 struct bnxt_link_info
*link_info
;
2806 enum bnxt_media_type media
;
2808 ethtool_link_ksettings_zero_link_mode(lk_ksettings
, lp_advertising
);
2809 ethtool_link_ksettings_zero_link_mode(lk_ksettings
, advertising
);
2810 ethtool_link_ksettings_zero_link_mode(lk_ksettings
, supported
);
2811 base
->duplex
= DUPLEX_UNKNOWN
;
2812 base
->speed
= SPEED_UNKNOWN
;
2813 link_info
= &bp
->link_info
;
2815 mutex_lock(&bp
->link_lock
);
2816 bnxt_get_ethtool_modes(link_info
, lk_ksettings
);
2817 media
= bnxt_get_media(link_info
);
2818 bnxt_get_all_ethtool_support_speeds(link_info
, media
, lk_ksettings
);
2819 bnxt_fw_to_ethtool_support_fec(link_info
, lk_ksettings
);
2820 link_mode
= bnxt_get_link_mode(link_info
);
2821 if (link_mode
!= BNXT_LINK_MODE_UNKNOWN
)
2822 ethtool_params_from_link_mode(lk_ksettings
, link_mode
);
2824 bnxt_get_default_speeds(lk_ksettings
, link_info
);
2826 if (link_info
->autoneg
) {
2827 bnxt_fw_to_ethtool_advertised_fec(link_info
, lk_ksettings
);
2828 linkmode_set_bit(ETHTOOL_LINK_MODE_Autoneg_BIT
,
2829 lk_ksettings
->link_modes
.advertising
);
2830 base
->autoneg
= AUTONEG_ENABLE
;
2831 bnxt_get_all_ethtool_adv_speeds(link_info
, media
, lk_ksettings
);
2832 if (link_info
->phy_link_status
== BNXT_LINK_LINK
)
2833 bnxt_get_all_ethtool_lp_speeds(link_info
, media
,
2836 base
->autoneg
= AUTONEG_DISABLE
;
2839 base
->port
= PORT_NONE
;
2840 if (media
== BNXT_MEDIA_TP
) {
2841 base
->port
= PORT_TP
;
2842 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT
,
2843 lk_ksettings
->link_modes
.supported
);
2844 linkmode_set_bit(ETHTOOL_LINK_MODE_TP_BIT
,
2845 lk_ksettings
->link_modes
.advertising
);
2846 } else if (media
== BNXT_MEDIA_KR
) {
2847 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT
,
2848 lk_ksettings
->link_modes
.supported
);
2849 linkmode_set_bit(ETHTOOL_LINK_MODE_Backplane_BIT
,
2850 lk_ksettings
->link_modes
.advertising
);
2852 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT
,
2853 lk_ksettings
->link_modes
.supported
);
2854 linkmode_set_bit(ETHTOOL_LINK_MODE_FIBRE_BIT
,
2855 lk_ksettings
->link_modes
.advertising
);
2857 if (media
== BNXT_MEDIA_CR
)
2858 base
->port
= PORT_DA
;
2860 base
->port
= PORT_FIBRE
;
2862 base
->phy_address
= link_info
->phy_addr
;
2863 mutex_unlock(&bp
->link_lock
);
2869 bnxt_force_link_speed(struct net_device
*dev
, u32 ethtool_speed
, u32 lanes
)
2871 struct bnxt
*bp
= netdev_priv(dev
);
2872 struct bnxt_link_info
*link_info
= &bp
->link_info
;
2873 u16 support_pam4_spds
= link_info
->support_pam4_speeds
;
2874 u16 support_spds2
= link_info
->support_speeds2
;
2875 u16 support_spds
= link_info
->support_speeds
;
2876 u8 sig_mode
= BNXT_SIG_MODE_NRZ
;
2877 u32 lanes_needed
= 1;
2880 switch (ethtool_speed
) {
2882 if (support_spds
& BNXT_LINK_SPEED_MSK_100MB
)
2883 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100MB
;
2886 if ((support_spds
& BNXT_LINK_SPEED_MSK_1GB
) ||
2887 (support_spds2
& BNXT_LINK_SPEEDS2_MSK_1GB
))
2888 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB
;
2891 if (support_spds
& BNXT_LINK_SPEED_MSK_2_5GB
)
2892 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_2_5GB
;
2895 if ((support_spds
& BNXT_LINK_SPEED_MSK_10GB
) ||
2896 (support_spds2
& BNXT_LINK_SPEEDS2_MSK_10GB
))
2897 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB
;
2900 if (support_spds
& BNXT_LINK_SPEED_MSK_20GB
) {
2901 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_20GB
;
2906 if ((support_spds
& BNXT_LINK_SPEED_MSK_25GB
) ||
2907 (support_spds2
& BNXT_LINK_SPEEDS2_MSK_25GB
))
2908 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB
;
2911 if ((support_spds
& BNXT_LINK_SPEED_MSK_40GB
) ||
2912 (support_spds2
& BNXT_LINK_SPEEDS2_MSK_40GB
)) {
2913 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB
;
2918 if (((support_spds
& BNXT_LINK_SPEED_MSK_50GB
) ||
2919 (support_spds2
& BNXT_LINK_SPEEDS2_MSK_50GB
)) &&
2921 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB
;
2923 } else if (support_pam4_spds
& BNXT_LINK_PAM4_SPEED_MSK_50GB
) {
2924 fw_speed
= PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_50GB
;
2925 sig_mode
= BNXT_SIG_MODE_PAM4
;
2926 } else if (support_spds2
& BNXT_LINK_SPEEDS2_MSK_50GB_PAM4
) {
2927 fw_speed
= BNXT_LINK_SPEED_50GB_PAM4
;
2928 sig_mode
= BNXT_SIG_MODE_PAM4
;
2932 if (((support_spds
& BNXT_LINK_SPEED_MSK_100GB
) ||
2933 (support_spds2
& BNXT_LINK_SPEEDS2_MSK_100GB
)) &&
2934 lanes
!= 2 && lanes
!= 1) {
2935 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_100GB
;
2937 } else if (support_pam4_spds
& BNXT_LINK_PAM4_SPEED_MSK_100GB
) {
2938 fw_speed
= PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_100GB
;
2939 sig_mode
= BNXT_SIG_MODE_PAM4
;
2941 } else if ((support_spds2
& BNXT_LINK_SPEEDS2_MSK_100GB_PAM4
) &&
2943 fw_speed
= BNXT_LINK_SPEED_100GB_PAM4
;
2944 sig_mode
= BNXT_SIG_MODE_PAM4
;
2946 } else if (support_spds2
& BNXT_LINK_SPEEDS2_MSK_100GB_PAM4_112
) {
2947 fw_speed
= BNXT_LINK_SPEED_100GB_PAM4_112
;
2948 sig_mode
= BNXT_SIG_MODE_PAM4_112
;
2952 if (support_pam4_spds
& BNXT_LINK_PAM4_SPEED_MSK_200GB
) {
2953 fw_speed
= PORT_PHY_CFG_REQ_FORCE_PAM4_LINK_SPEED_200GB
;
2954 sig_mode
= BNXT_SIG_MODE_PAM4
;
2956 } else if ((support_spds2
& BNXT_LINK_SPEEDS2_MSK_200GB_PAM4
) &&
2958 fw_speed
= BNXT_LINK_SPEED_200GB_PAM4
;
2959 sig_mode
= BNXT_SIG_MODE_PAM4
;
2961 } else if (support_spds2
& BNXT_LINK_SPEEDS2_MSK_200GB_PAM4_112
) {
2962 fw_speed
= BNXT_LINK_SPEED_200GB_PAM4_112
;
2963 sig_mode
= BNXT_SIG_MODE_PAM4_112
;
2968 if ((support_spds2
& BNXT_LINK_SPEEDS2_MSK_400GB_PAM4
) &&
2970 fw_speed
= BNXT_LINK_SPEED_400GB_PAM4
;
2971 sig_mode
= BNXT_SIG_MODE_PAM4
;
2973 } else if (support_spds2
& BNXT_LINK_SPEEDS2_MSK_400GB_PAM4_112
) {
2974 fw_speed
= BNXT_LINK_SPEED_400GB_PAM4_112
;
2975 sig_mode
= BNXT_SIG_MODE_PAM4_112
;
2982 netdev_err(dev
, "unsupported speed!\n");
2986 if (lanes
&& lanes
!= lanes_needed
) {
2987 netdev_err(dev
, "unsupported number of lanes for speed\n");
2991 if (link_info
->req_link_speed
== fw_speed
&&
2992 link_info
->req_signal_mode
== sig_mode
&&
2993 link_info
->autoneg
== 0)
2996 link_info
->req_link_speed
= fw_speed
;
2997 link_info
->req_signal_mode
= sig_mode
;
2998 link_info
->req_duplex
= BNXT_LINK_DUPLEX_FULL
;
2999 link_info
->autoneg
= 0;
3000 link_info
->advertising
= 0;
3001 link_info
->advertising_pam4
= 0;
3006 u16
bnxt_get_fw_auto_link_speeds(const unsigned long *mode
)
3008 u16 fw_speed_mask
= 0;
3010 if (linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT
, mode
) ||
3011 linkmode_test_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT
, mode
))
3012 fw_speed_mask
|= BNXT_LINK_SPEED_MSK_100MB
;
3014 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
, mode
) ||
3015 linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT
, mode
))
3016 fw_speed_mask
|= BNXT_LINK_SPEED_MSK_1GB
;
3018 if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT
, mode
))
3019 fw_speed_mask
|= BNXT_LINK_SPEED_MSK_10GB
;
3021 if (linkmode_test_bit(ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
, mode
))
3022 fw_speed_mask
|= BNXT_LINK_SPEED_MSK_40GB
;
3024 return fw_speed_mask
;
3027 static int bnxt_set_link_ksettings(struct net_device
*dev
,
3028 const struct ethtool_link_ksettings
*lk_ksettings
)
3030 struct bnxt
*bp
= netdev_priv(dev
);
3031 struct bnxt_link_info
*link_info
= &bp
->link_info
;
3032 const struct ethtool_link_settings
*base
= &lk_ksettings
->base
;
3033 bool set_pause
= false;
3034 u32 speed
, lanes
= 0;
3037 if (!BNXT_PHY_CFG_ABLE(bp
))
3040 mutex_lock(&bp
->link_lock
);
3041 if (base
->autoneg
== AUTONEG_ENABLE
) {
3042 bnxt_set_ethtool_speeds(link_info
,
3043 lk_ksettings
->link_modes
.advertising
);
3044 link_info
->autoneg
|= BNXT_AUTONEG_SPEED
;
3045 if (!link_info
->advertising
&& !link_info
->advertising_pam4
) {
3046 link_info
->advertising
= link_info
->support_auto_speeds
;
3047 link_info
->advertising_pam4
=
3048 link_info
->support_pam4_auto_speeds
;
3050 /* any change to autoneg will cause link change, therefore the
3051 * driver should put back the original pause setting in autoneg
3053 if (!(bp
->phy_flags
& BNXT_PHY_FL_NO_PAUSE
))
3056 u8 phy_type
= link_info
->phy_type
;
3058 if (phy_type
== PORT_PHY_QCFG_RESP_PHY_TYPE_BASET
||
3059 phy_type
== PORT_PHY_QCFG_RESP_PHY_TYPE_BASETE
||
3060 link_info
->media_type
== PORT_PHY_QCFG_RESP_MEDIA_TYPE_TP
) {
3061 netdev_err(dev
, "10GBase-T devices must autoneg\n");
3063 goto set_setting_exit
;
3065 if (base
->duplex
== DUPLEX_HALF
) {
3066 netdev_err(dev
, "HALF DUPLEX is not supported!\n");
3068 goto set_setting_exit
;
3070 speed
= base
->speed
;
3071 lanes
= lk_ksettings
->lanes
;
3072 rc
= bnxt_force_link_speed(dev
, speed
, lanes
);
3074 if (rc
== -EALREADY
)
3076 goto set_setting_exit
;
3080 if (netif_running(dev
))
3081 rc
= bnxt_hwrm_set_link_setting(bp
, set_pause
, false);
3084 mutex_unlock(&bp
->link_lock
);
3088 static int bnxt_get_fecparam(struct net_device
*dev
,
3089 struct ethtool_fecparam
*fec
)
3091 struct bnxt
*bp
= netdev_priv(dev
);
3092 struct bnxt_link_info
*link_info
;
3096 link_info
= &bp
->link_info
;
3097 fec_cfg
= link_info
->fec_cfg
;
3098 active_fec
= link_info
->active_fec_sig_mode
&
3099 PORT_PHY_QCFG_RESP_ACTIVE_FEC_MASK
;
3100 if (fec_cfg
& BNXT_FEC_NONE
) {
3101 fec
->fec
= ETHTOOL_FEC_NONE
;
3102 fec
->active_fec
= ETHTOOL_FEC_NONE
;
3105 if (fec_cfg
& BNXT_FEC_AUTONEG
)
3106 fec
->fec
|= ETHTOOL_FEC_AUTO
;
3107 if (fec_cfg
& BNXT_FEC_ENC_BASE_R
)
3108 fec
->fec
|= ETHTOOL_FEC_BASER
;
3109 if (fec_cfg
& BNXT_FEC_ENC_RS
)
3110 fec
->fec
|= ETHTOOL_FEC_RS
;
3111 if (fec_cfg
& BNXT_FEC_ENC_LLRS
)
3112 fec
->fec
|= ETHTOOL_FEC_LLRS
;
3114 switch (active_fec
) {
3115 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE74_ACTIVE
:
3116 fec
->active_fec
|= ETHTOOL_FEC_BASER
;
3118 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_CLAUSE91_ACTIVE
:
3119 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_1XN_ACTIVE
:
3120 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS544_IEEE_ACTIVE
:
3121 fec
->active_fec
|= ETHTOOL_FEC_RS
;
3123 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_1XN_ACTIVE
:
3124 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_RS272_IEEE_ACTIVE
:
3125 fec
->active_fec
|= ETHTOOL_FEC_LLRS
;
3127 case PORT_PHY_QCFG_RESP_ACTIVE_FEC_FEC_NONE_ACTIVE
:
3128 fec
->active_fec
|= ETHTOOL_FEC_OFF
;
3134 static void bnxt_get_fec_stats(struct net_device
*dev
,
3135 struct ethtool_fec_stats
*fec_stats
)
3137 struct bnxt
*bp
= netdev_priv(dev
);
3140 if (BNXT_VF(bp
) || !(bp
->flags
& BNXT_FLAG_PORT_STATS_EXT
))
3143 rx
= bp
->rx_port_stats_ext
.sw_stats
;
3144 fec_stats
->corrected_bits
.total
=
3145 *(rx
+ BNXT_RX_STATS_EXT_OFFSET(rx_corrected_bits
));
3147 if (bp
->fw_rx_stats_ext_size
<= BNXT_RX_STATS_EXT_NUM_LEGACY
)
3150 fec_stats
->corrected_blocks
.total
=
3151 *(rx
+ BNXT_RX_STATS_EXT_OFFSET(rx_fec_corrected_blocks
));
3152 fec_stats
->uncorrectable_blocks
.total
=
3153 *(rx
+ BNXT_RX_STATS_EXT_OFFSET(rx_fec_uncorrectable_blocks
));
3156 static u32
bnxt_ethtool_forced_fec_to_fw(struct bnxt_link_info
*link_info
,
3159 u32 fw_fec
= PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE
;
3161 if (fec
& ETHTOOL_FEC_BASER
)
3162 fw_fec
|= BNXT_FEC_BASE_R_ON(link_info
);
3163 else if (fec
& ETHTOOL_FEC_RS
)
3164 fw_fec
|= BNXT_FEC_RS_ON(link_info
);
3165 else if (fec
& ETHTOOL_FEC_LLRS
)
3166 fw_fec
|= BNXT_FEC_LLRS_ON
;
3170 static int bnxt_set_fecparam(struct net_device
*dev
,
3171 struct ethtool_fecparam
*fecparam
)
3173 struct hwrm_port_phy_cfg_input
*req
;
3174 struct bnxt
*bp
= netdev_priv(dev
);
3175 struct bnxt_link_info
*link_info
;
3176 u32 new_cfg
, fec
= fecparam
->fec
;
3180 link_info
= &bp
->link_info
;
3181 fec_cfg
= link_info
->fec_cfg
;
3182 if (fec_cfg
& BNXT_FEC_NONE
)
3185 if (fec
& ETHTOOL_FEC_OFF
) {
3186 new_cfg
= PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_DISABLE
|
3187 BNXT_FEC_ALL_OFF(link_info
);
3190 if (((fec
& ETHTOOL_FEC_AUTO
) && !(fec_cfg
& BNXT_FEC_AUTONEG_CAP
)) ||
3191 ((fec
& ETHTOOL_FEC_RS
) && !(fec_cfg
& BNXT_FEC_ENC_RS_CAP
)) ||
3192 ((fec
& ETHTOOL_FEC_LLRS
) && !(fec_cfg
& BNXT_FEC_ENC_LLRS_CAP
)) ||
3193 ((fec
& ETHTOOL_FEC_BASER
) && !(fec_cfg
& BNXT_FEC_ENC_BASE_R_CAP
)))
3196 if (fec
& ETHTOOL_FEC_AUTO
) {
3197 if (!link_info
->autoneg
)
3199 new_cfg
= PORT_PHY_CFG_REQ_FLAGS_FEC_AUTONEG_ENABLE
;
3201 new_cfg
= bnxt_ethtool_forced_fec_to_fw(link_info
, fec
);
3205 rc
= hwrm_req_init(bp
, req
, HWRM_PORT_PHY_CFG
);
3208 req
->flags
= cpu_to_le32(new_cfg
| PORT_PHY_CFG_REQ_FLAGS_RESET_PHY
);
3209 rc
= hwrm_req_send(bp
, req
);
3210 /* update current settings */
3212 mutex_lock(&bp
->link_lock
);
3213 bnxt_update_link(bp
, false);
3214 mutex_unlock(&bp
->link_lock
);
3219 static void bnxt_get_pauseparam(struct net_device
*dev
,
3220 struct ethtool_pauseparam
*epause
)
3222 struct bnxt
*bp
= netdev_priv(dev
);
3223 struct bnxt_link_info
*link_info
= &bp
->link_info
;
3227 epause
->autoneg
= !!(link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
);
3228 epause
->rx_pause
= !!(link_info
->req_flow_ctrl
& BNXT_LINK_PAUSE_RX
);
3229 epause
->tx_pause
= !!(link_info
->req_flow_ctrl
& BNXT_LINK_PAUSE_TX
);
3232 static void bnxt_get_pause_stats(struct net_device
*dev
,
3233 struct ethtool_pause_stats
*epstat
)
3235 struct bnxt
*bp
= netdev_priv(dev
);
3238 if (BNXT_VF(bp
) || !(bp
->flags
& BNXT_FLAG_PORT_STATS
))
3241 rx
= bp
->port_stats
.sw_stats
;
3242 tx
= bp
->port_stats
.sw_stats
+ BNXT_TX_PORT_STATS_BYTE_OFFSET
/ 8;
3244 epstat
->rx_pause_frames
= BNXT_GET_RX_PORT_STATS64(rx
, rx_pause_frames
);
3245 epstat
->tx_pause_frames
= BNXT_GET_TX_PORT_STATS64(tx
, tx_pause_frames
);
3248 static int bnxt_set_pauseparam(struct net_device
*dev
,
3249 struct ethtool_pauseparam
*epause
)
3252 struct bnxt
*bp
= netdev_priv(dev
);
3253 struct bnxt_link_info
*link_info
= &bp
->link_info
;
3255 if (!BNXT_PHY_CFG_ABLE(bp
) || (bp
->phy_flags
& BNXT_PHY_FL_NO_PAUSE
))
3258 mutex_lock(&bp
->link_lock
);
3259 if (epause
->autoneg
) {
3260 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
)) {
3265 link_info
->autoneg
|= BNXT_AUTONEG_FLOW_CTRL
;
3266 link_info
->req_flow_ctrl
= 0;
3268 /* when transition from auto pause to force pause,
3269 * force a link change
3271 if (link_info
->autoneg
& BNXT_AUTONEG_FLOW_CTRL
)
3272 link_info
->force_link_chng
= true;
3273 link_info
->autoneg
&= ~BNXT_AUTONEG_FLOW_CTRL
;
3274 link_info
->req_flow_ctrl
= 0;
3276 if (epause
->rx_pause
)
3277 link_info
->req_flow_ctrl
|= BNXT_LINK_PAUSE_RX
;
3279 if (epause
->tx_pause
)
3280 link_info
->req_flow_ctrl
|= BNXT_LINK_PAUSE_TX
;
3282 if (netif_running(dev
))
3283 rc
= bnxt_hwrm_set_pause(bp
);
3286 mutex_unlock(&bp
->link_lock
);
3290 static u32
bnxt_get_link(struct net_device
*dev
)
3292 struct bnxt
*bp
= netdev_priv(dev
);
3294 /* TODO: handle MF, VF, driver close case */
3295 return BNXT_LINK_IS_UP(bp
);
3298 int bnxt_hwrm_nvm_get_dev_info(struct bnxt
*bp
,
3299 struct hwrm_nvm_get_dev_info_output
*nvm_dev_info
)
3301 struct hwrm_nvm_get_dev_info_output
*resp
;
3302 struct hwrm_nvm_get_dev_info_input
*req
;
3308 rc
= hwrm_req_init(bp
, req
, HWRM_NVM_GET_DEV_INFO
);
3312 resp
= hwrm_req_hold(bp
, req
);
3313 rc
= hwrm_req_send(bp
, req
);
3315 memcpy(nvm_dev_info
, resp
, sizeof(*resp
));
3316 hwrm_req_drop(bp
, req
);
3320 static void bnxt_print_admin_err(struct bnxt
*bp
)
3322 netdev_info(bp
->dev
, "PF does not have admin privileges to flash or reset the device\n");
3325 int bnxt_find_nvram_item(struct net_device
*dev
, u16 type
, u16 ordinal
,
3326 u16 ext
, u16
*index
, u32
*item_length
,
3329 int bnxt_flash_nvram(struct net_device
*dev
, u16 dir_type
,
3330 u16 dir_ordinal
, u16 dir_ext
, u16 dir_attr
,
3331 u32 dir_item_len
, const u8
*data
,
3334 struct bnxt
*bp
= netdev_priv(dev
);
3335 struct hwrm_nvm_write_input
*req
;
3338 rc
= hwrm_req_init(bp
, req
, HWRM_NVM_WRITE
);
3342 if (data_len
&& data
) {
3343 dma_addr_t dma_handle
;
3346 kmem
= hwrm_req_dma_slice(bp
, req
, data_len
, &dma_handle
);
3348 hwrm_req_drop(bp
, req
);
3352 req
->dir_data_length
= cpu_to_le32(data_len
);
3354 memcpy(kmem
, data
, data_len
);
3355 req
->host_src_addr
= cpu_to_le64(dma_handle
);
3358 hwrm_req_timeout(bp
, req
, bp
->hwrm_cmd_max_timeout
);
3359 req
->dir_type
= cpu_to_le16(dir_type
);
3360 req
->dir_ordinal
= cpu_to_le16(dir_ordinal
);
3361 req
->dir_ext
= cpu_to_le16(dir_ext
);
3362 req
->dir_attr
= cpu_to_le16(dir_attr
);
3363 req
->dir_item_length
= cpu_to_le32(dir_item_len
);
3364 rc
= hwrm_req_send(bp
, req
);
3367 bnxt_print_admin_err(bp
);
3371 int bnxt_hwrm_firmware_reset(struct net_device
*dev
, u8 proc_type
,
3372 u8 self_reset
, u8 flags
)
3374 struct bnxt
*bp
= netdev_priv(dev
);
3375 struct hwrm_fw_reset_input
*req
;
3378 if (!bnxt_hwrm_reset_permitted(bp
)) {
3379 netdev_warn(bp
->dev
, "Reset denied by firmware, it may be inhibited by remote driver");
3383 rc
= hwrm_req_init(bp
, req
, HWRM_FW_RESET
);
3387 req
->embedded_proc_type
= proc_type
;
3388 req
->selfrst_status
= self_reset
;
3391 if (proc_type
== FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP
) {
3392 rc
= hwrm_req_send_silent(bp
, req
);
3394 rc
= hwrm_req_send(bp
, req
);
3396 bnxt_print_admin_err(bp
);
3401 static int bnxt_firmware_reset(struct net_device
*dev
,
3402 enum bnxt_nvm_directory_type dir_type
)
3404 u8 self_reset
= FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE
;
3405 u8 proc_type
, flags
= 0;
3407 /* TODO: Address self-reset of APE/KONG/BONO/TANG or ungraceful reset */
3408 /* (e.g. when firmware isn't already running) */
3410 case BNX_DIR_TYPE_CHIMP_PATCH
:
3411 case BNX_DIR_TYPE_BOOTCODE
:
3412 case BNX_DIR_TYPE_BOOTCODE_2
:
3413 proc_type
= FW_RESET_REQ_EMBEDDED_PROC_TYPE_BOOT
;
3414 /* Self-reset ChiMP upon next PCIe reset: */
3415 self_reset
= FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
;
3417 case BNX_DIR_TYPE_APE_FW
:
3418 case BNX_DIR_TYPE_APE_PATCH
:
3419 proc_type
= FW_RESET_REQ_EMBEDDED_PROC_TYPE_MGMT
;
3420 /* Self-reset APE upon next PCIe reset: */
3421 self_reset
= FW_RESET_REQ_SELFRST_STATUS_SELFRSTPCIERST
;
3423 case BNX_DIR_TYPE_KONG_FW
:
3424 case BNX_DIR_TYPE_KONG_PATCH
:
3425 proc_type
= FW_RESET_REQ_EMBEDDED_PROC_TYPE_NETCTRL
;
3427 case BNX_DIR_TYPE_BONO_FW
:
3428 case BNX_DIR_TYPE_BONO_PATCH
:
3429 proc_type
= FW_RESET_REQ_EMBEDDED_PROC_TYPE_ROCE
;
3435 return bnxt_hwrm_firmware_reset(dev
, proc_type
, self_reset
, flags
);
3438 static int bnxt_firmware_reset_chip(struct net_device
*dev
)
3440 struct bnxt
*bp
= netdev_priv(dev
);
3443 if (bp
->fw_cap
& BNXT_FW_CAP_HOT_RESET
)
3444 flags
= FW_RESET_REQ_FLAGS_RESET_GRACEFUL
;
3446 return bnxt_hwrm_firmware_reset(dev
,
3447 FW_RESET_REQ_EMBEDDED_PROC_TYPE_CHIP
,
3448 FW_RESET_REQ_SELFRST_STATUS_SELFRSTASAP
,
3452 static int bnxt_firmware_reset_ap(struct net_device
*dev
)
3454 return bnxt_hwrm_firmware_reset(dev
, FW_RESET_REQ_EMBEDDED_PROC_TYPE_AP
,
3455 FW_RESET_REQ_SELFRST_STATUS_SELFRSTNONE
,
3459 static int bnxt_flash_firmware(struct net_device
*dev
,
3468 struct bnxt_fw_header
*header
= (struct bnxt_fw_header
*)fw_data
;
3471 case BNX_DIR_TYPE_BOOTCODE
:
3472 case BNX_DIR_TYPE_BOOTCODE_2
:
3473 code_type
= CODE_BOOT
;
3475 case BNX_DIR_TYPE_CHIMP_PATCH
:
3476 code_type
= CODE_CHIMP_PATCH
;
3478 case BNX_DIR_TYPE_APE_FW
:
3479 code_type
= CODE_MCTP_PASSTHRU
;
3481 case BNX_DIR_TYPE_APE_PATCH
:
3482 code_type
= CODE_APE_PATCH
;
3484 case BNX_DIR_TYPE_KONG_FW
:
3485 code_type
= CODE_KONG_FW
;
3487 case BNX_DIR_TYPE_KONG_PATCH
:
3488 code_type
= CODE_KONG_PATCH
;
3490 case BNX_DIR_TYPE_BONO_FW
:
3491 code_type
= CODE_BONO_FW
;
3493 case BNX_DIR_TYPE_BONO_PATCH
:
3494 code_type
= CODE_BONO_PATCH
;
3497 netdev_err(dev
, "Unsupported directory entry type: %u\n",
3501 if (fw_size
< sizeof(struct bnxt_fw_header
)) {
3502 netdev_err(dev
, "Invalid firmware file size: %u\n",
3503 (unsigned int)fw_size
);
3506 if (header
->signature
!= cpu_to_le32(BNXT_FIRMWARE_BIN_SIGNATURE
)) {
3507 netdev_err(dev
, "Invalid firmware signature: %08X\n",
3508 le32_to_cpu(header
->signature
));
3511 if (header
->code_type
!= code_type
) {
3512 netdev_err(dev
, "Expected firmware type: %d, read: %d\n",
3513 code_type
, header
->code_type
);
3516 if (header
->device
!= DEVICE_CUMULUS_FAMILY
) {
3517 netdev_err(dev
, "Expected firmware device family %d, read: %d\n",
3518 DEVICE_CUMULUS_FAMILY
, header
->device
);
3521 /* Confirm the CRC32 checksum of the file: */
3522 stored_crc
= le32_to_cpu(*(__le32
*)(fw_data
+ fw_size
-
3523 sizeof(stored_crc
)));
3524 calculated_crc
= ~crc32(~0, fw_data
, fw_size
- sizeof(stored_crc
));
3525 if (calculated_crc
!= stored_crc
) {
3526 netdev_err(dev
, "Firmware file CRC32 checksum (%08lX) does not match calculated checksum (%08lX)\n",
3527 (unsigned long)stored_crc
,
3528 (unsigned long)calculated_crc
);
3531 rc
= bnxt_flash_nvram(dev
, dir_type
, BNX_DIR_ORDINAL_FIRST
,
3532 0, 0, 0, fw_data
, fw_size
);
3533 if (rc
== 0) /* Firmware update successful */
3534 rc
= bnxt_firmware_reset(dev
, dir_type
);
3539 static int bnxt_flash_microcode(struct net_device
*dev
,
3544 struct bnxt_ucode_trailer
*trailer
;
3549 if (fw_size
< sizeof(struct bnxt_ucode_trailer
)) {
3550 netdev_err(dev
, "Invalid microcode file size: %u\n",
3551 (unsigned int)fw_size
);
3554 trailer
= (struct bnxt_ucode_trailer
*)(fw_data
+ (fw_size
-
3556 if (trailer
->sig
!= cpu_to_le32(BNXT_UCODE_TRAILER_SIGNATURE
)) {
3557 netdev_err(dev
, "Invalid microcode trailer signature: %08X\n",
3558 le32_to_cpu(trailer
->sig
));
3561 if (le16_to_cpu(trailer
->dir_type
) != dir_type
) {
3562 netdev_err(dev
, "Expected microcode type: %d, read: %d\n",
3563 dir_type
, le16_to_cpu(trailer
->dir_type
));
3566 if (le16_to_cpu(trailer
->trailer_length
) <
3567 sizeof(struct bnxt_ucode_trailer
)) {
3568 netdev_err(dev
, "Invalid microcode trailer length: %d\n",
3569 le16_to_cpu(trailer
->trailer_length
));
3573 /* Confirm the CRC32 checksum of the file: */
3574 stored_crc
= le32_to_cpu(*(__le32
*)(fw_data
+ fw_size
-
3575 sizeof(stored_crc
)));
3576 calculated_crc
= ~crc32(~0, fw_data
, fw_size
- sizeof(stored_crc
));
3577 if (calculated_crc
!= stored_crc
) {
3579 "CRC32 (%08lX) does not match calculated: %08lX\n",
3580 (unsigned long)stored_crc
,
3581 (unsigned long)calculated_crc
);
3584 rc
= bnxt_flash_nvram(dev
, dir_type
, BNX_DIR_ORDINAL_FIRST
,
3585 0, 0, 0, fw_data
, fw_size
);
3590 static bool bnxt_dir_type_is_ape_bin_format(u16 dir_type
)
3593 case BNX_DIR_TYPE_CHIMP_PATCH
:
3594 case BNX_DIR_TYPE_BOOTCODE
:
3595 case BNX_DIR_TYPE_BOOTCODE_2
:
3596 case BNX_DIR_TYPE_APE_FW
:
3597 case BNX_DIR_TYPE_APE_PATCH
:
3598 case BNX_DIR_TYPE_KONG_FW
:
3599 case BNX_DIR_TYPE_KONG_PATCH
:
3600 case BNX_DIR_TYPE_BONO_FW
:
3601 case BNX_DIR_TYPE_BONO_PATCH
:
3608 static bool bnxt_dir_type_is_other_exec_format(u16 dir_type
)
3611 case BNX_DIR_TYPE_AVS
:
3612 case BNX_DIR_TYPE_EXP_ROM_MBA
:
3613 case BNX_DIR_TYPE_PCIE
:
3614 case BNX_DIR_TYPE_TSCF_UCODE
:
3615 case BNX_DIR_TYPE_EXT_PHY
:
3616 case BNX_DIR_TYPE_CCM
:
3617 case BNX_DIR_TYPE_ISCSI_BOOT
:
3618 case BNX_DIR_TYPE_ISCSI_BOOT_IPV6
:
3619 case BNX_DIR_TYPE_ISCSI_BOOT_IPV4N6
:
3626 static bool bnxt_dir_type_is_executable(u16 dir_type
)
3628 return bnxt_dir_type_is_ape_bin_format(dir_type
) ||
3629 bnxt_dir_type_is_other_exec_format(dir_type
);
3632 static int bnxt_flash_firmware_from_file(struct net_device
*dev
,
3634 const char *filename
)
3636 const struct firmware
*fw
;
3639 rc
= request_firmware(&fw
, filename
, &dev
->dev
);
3641 netdev_err(dev
, "Error %d requesting firmware file: %s\n",
3645 if (bnxt_dir_type_is_ape_bin_format(dir_type
))
3646 rc
= bnxt_flash_firmware(dev
, dir_type
, fw
->data
, fw
->size
);
3647 else if (bnxt_dir_type_is_other_exec_format(dir_type
))
3648 rc
= bnxt_flash_microcode(dev
, dir_type
, fw
->data
, fw
->size
);
3650 rc
= bnxt_flash_nvram(dev
, dir_type
, BNX_DIR_ORDINAL_FIRST
,
3651 0, 0, 0, fw
->data
, fw
->size
);
3652 release_firmware(fw
);
3656 #define MSG_INTEGRITY_ERR "PKG install error : Data integrity on NVM"
3657 #define MSG_INVALID_PKG "PKG install error : Invalid package"
3658 #define MSG_AUTHENTICATION_ERR "PKG install error : Authentication error"
3659 #define MSG_INVALID_DEV "PKG install error : Invalid device"
3660 #define MSG_INTERNAL_ERR "PKG install error : Internal error"
3661 #define MSG_NO_PKG_UPDATE_AREA_ERR "PKG update area not created in nvram"
3662 #define MSG_NO_SPACE_ERR "PKG insufficient update area in nvram"
3663 #define MSG_RESIZE_UPDATE_ERR "Resize UPDATE entry error"
3664 #define MSG_ANTI_ROLLBACK_ERR "HWRM_NVM_INSTALL_UPDATE failure due to Anti-rollback detected"
3665 #define MSG_GENERIC_FAILURE_ERR "HWRM_NVM_INSTALL_UPDATE failure"
3667 static int nvm_update_err_to_stderr(struct net_device
*dev
, u8 result
,
3668 struct netlink_ext_ack
*extack
)
3671 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TYPE_PARAMETER
:
3672 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_INDEX_PARAMETER
:
3673 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_DATA_ERROR
:
3674 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_CHECKSUM_ERROR
:
3675 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_NOT_FOUND
:
3676 case NVM_INSTALL_UPDATE_RESP_RESULT_ITEM_LOCKED
:
3677 BNXT_NVM_ERR_MSG(dev
, extack
, MSG_INTEGRITY_ERR
);
3679 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PREREQUISITE
:
3680 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_FILE_HEADER
:
3681 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_SIGNATURE
:
3682 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_STREAM
:
3683 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_PROP_LENGTH
:
3684 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_MANIFEST
:
3685 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_TRAILER
:
3686 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_CHECKSUM
:
3687 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_ITEM_CHECKSUM
:
3688 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DATA_LENGTH
:
3689 case NVM_INSTALL_UPDATE_RESP_RESULT_INVALID_DIRECTIVE
:
3690 case NVM_INSTALL_UPDATE_RESP_RESULT_DUPLICATE_ITEM
:
3691 case NVM_INSTALL_UPDATE_RESP_RESULT_ZERO_LENGTH_ITEM
:
3692 BNXT_NVM_ERR_MSG(dev
, extack
, MSG_INVALID_PKG
);
3694 case NVM_INSTALL_UPDATE_RESP_RESULT_INSTALL_AUTHENTICATION_ERROR
:
3695 BNXT_NVM_ERR_MSG(dev
, extack
, MSG_AUTHENTICATION_ERR
);
3697 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_CHIP_REV
:
3698 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_DEVICE_ID
:
3699 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_VENDOR
:
3700 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_SUBSYS_ID
:
3701 case NVM_INSTALL_UPDATE_RESP_RESULT_UNSUPPORTED_PLATFORM
:
3702 BNXT_NVM_ERR_MSG(dev
, extack
, MSG_INVALID_DEV
);
3705 BNXT_NVM_ERR_MSG(dev
, extack
, MSG_INTERNAL_ERR
);
3710 #define BNXT_PKG_DMA_SIZE 0x40000
3711 #define BNXT_NVM_MORE_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_MODE))
3712 #define BNXT_NVM_LAST_FLAG (cpu_to_le16(NVM_MODIFY_REQ_FLAGS_BATCH_LAST))
3714 static int bnxt_resize_update_entry(struct net_device
*dev
, size_t fw_size
,
3715 struct netlink_ext_ack
*extack
)
3720 rc
= bnxt_find_nvram_item(dev
, BNX_DIR_TYPE_UPDATE
,
3721 BNX_DIR_ORDINAL_FIRST
, BNX_DIR_EXT_NONE
, NULL
,
3724 BNXT_NVM_ERR_MSG(dev
, extack
, MSG_NO_PKG_UPDATE_AREA_ERR
);
3728 if (fw_size
> item_len
) {
3729 rc
= bnxt_flash_nvram(dev
, BNX_DIR_TYPE_UPDATE
,
3730 BNX_DIR_ORDINAL_FIRST
, 0, 1,
3731 round_up(fw_size
, 4096), NULL
, 0);
3733 BNXT_NVM_ERR_MSG(dev
, extack
, MSG_RESIZE_UPDATE_ERR
);
3740 int bnxt_flash_package_from_fw_obj(struct net_device
*dev
, const struct firmware
*fw
,
3741 u32 install_type
, struct netlink_ext_ack
*extack
)
3743 struct hwrm_nvm_install_update_input
*install
;
3744 struct hwrm_nvm_install_update_output
*resp
;
3745 struct hwrm_nvm_modify_input
*modify
;
3746 struct bnxt
*bp
= netdev_priv(dev
);
3747 bool defrag_attempted
= false;
3748 dma_addr_t dma_handle
;
3756 /* resize before flashing larger image than available space */
3757 rc
= bnxt_resize_update_entry(dev
, fw
->size
, extack
);
3761 bnxt_hwrm_fw_set_time(bp
);
3763 rc
= hwrm_req_init(bp
, modify
, HWRM_NVM_MODIFY
);
3767 /* Try allocating a large DMA buffer first. Older fw will
3768 * cause excessive NVRAM erases when using small blocks.
3770 modify_len
= roundup_pow_of_two(fw
->size
);
3771 modify_len
= min_t(u32
, modify_len
, BNXT_PKG_DMA_SIZE
);
3773 kmem
= hwrm_req_dma_slice(bp
, modify
, modify_len
, &dma_handle
);
3774 if (!kmem
&& modify_len
> PAGE_SIZE
)
3780 hwrm_req_drop(bp
, modify
);
3784 rc
= hwrm_req_init(bp
, install
, HWRM_NVM_INSTALL_UPDATE
);
3786 hwrm_req_drop(bp
, modify
);
3790 hwrm_req_timeout(bp
, modify
, bp
->hwrm_cmd_max_timeout
);
3791 hwrm_req_timeout(bp
, install
, bp
->hwrm_cmd_max_timeout
);
3793 hwrm_req_hold(bp
, modify
);
3794 modify
->host_src_addr
= cpu_to_le64(dma_handle
);
3796 resp
= hwrm_req_hold(bp
, install
);
3797 if ((install_type
& 0xffff) == 0)
3798 install_type
>>= 16;
3799 install
->install_type
= cpu_to_le32(install_type
);
3802 u32 copied
= 0, len
= modify_len
;
3804 rc
= bnxt_find_nvram_item(dev
, BNX_DIR_TYPE_UPDATE
,
3805 BNX_DIR_ORDINAL_FIRST
,
3807 &index
, &item_len
, NULL
);
3809 BNXT_NVM_ERR_MSG(dev
, extack
, MSG_NO_PKG_UPDATE_AREA_ERR
);
3812 if (fw
->size
> item_len
) {
3813 BNXT_NVM_ERR_MSG(dev
, extack
, MSG_NO_SPACE_ERR
);
3818 modify
->dir_idx
= cpu_to_le16(index
);
3820 if (fw
->size
> modify_len
)
3821 modify
->flags
= BNXT_NVM_MORE_FLAG
;
3822 while (copied
< fw
->size
) {
3823 u32 balance
= fw
->size
- copied
;
3825 if (balance
<= modify_len
) {
3828 modify
->flags
|= BNXT_NVM_LAST_FLAG
;
3830 memcpy(kmem
, fw
->data
+ copied
, len
);
3831 modify
->len
= cpu_to_le32(len
);
3832 modify
->offset
= cpu_to_le32(copied
);
3833 rc
= hwrm_req_send(bp
, modify
);
3839 rc
= hwrm_req_send_silent(bp
, install
);
3843 if (defrag_attempted
) {
3844 /* We have tried to defragment already in the previous
3845 * iteration. Return with the result for INSTALL_UPDATE
3850 cmd_err
= ((struct hwrm_err_output
*)resp
)->cmd_err
;
3853 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_ANTI_ROLLBACK
:
3854 BNXT_NVM_ERR_MSG(dev
, extack
, MSG_ANTI_ROLLBACK_ERR
);
3857 case NVM_INSTALL_UPDATE_CMD_ERR_CODE_FRAG_ERR
:
3859 cpu_to_le16(NVM_INSTALL_UPDATE_REQ_FLAGS_ALLOWED_TO_DEFRAG
);
3861 rc
= hwrm_req_send_silent(bp
, install
);
3865 cmd_err
= ((struct hwrm_err_output
*)resp
)->cmd_err
;
3867 if (cmd_err
== NVM_INSTALL_UPDATE_CMD_ERR_CODE_NO_SPACE
) {
3868 /* FW has cleared NVM area, driver will create
3869 * UPDATE directory and try the flash again
3871 defrag_attempted
= true;
3873 rc
= bnxt_flash_nvram(bp
->dev
,
3874 BNX_DIR_TYPE_UPDATE
,
3875 BNX_DIR_ORDINAL_FIRST
,
3876 0, 0, item_len
, NULL
, 0);
3882 BNXT_NVM_ERR_MSG(dev
, extack
, MSG_GENERIC_FAILURE_ERR
);
3884 } while (defrag_attempted
&& !rc
);
3887 hwrm_req_drop(bp
, modify
);
3888 hwrm_req_drop(bp
, install
);
3891 netdev_err(dev
, "PKG install error = %d, problem_item = %d\n",
3892 (s8
)resp
->result
, (int)resp
->problem_item
);
3893 rc
= nvm_update_err_to_stderr(dev
, resp
->result
, extack
);
3896 bnxt_print_admin_err(bp
);
3900 static int bnxt_flash_package_from_file(struct net_device
*dev
, const char *filename
,
3901 u32 install_type
, struct netlink_ext_ack
*extack
)
3903 const struct firmware
*fw
;
3906 rc
= request_firmware(&fw
, filename
, &dev
->dev
);
3908 netdev_err(dev
, "PKG error %d requesting file: %s\n",
3913 rc
= bnxt_flash_package_from_fw_obj(dev
, fw
, install_type
, extack
);
3915 release_firmware(fw
);
3920 static int bnxt_flash_device(struct net_device
*dev
,
3921 struct ethtool_flash
*flash
)
3923 if (!BNXT_PF((struct bnxt
*)netdev_priv(dev
))) {
3924 netdev_err(dev
, "flashdev not supported from a virtual function\n");
3928 if (flash
->region
== ETHTOOL_FLASH_ALL_REGIONS
||
3929 flash
->region
> 0xffff)
3930 return bnxt_flash_package_from_file(dev
, flash
->data
,
3931 flash
->region
, NULL
);
3933 return bnxt_flash_firmware_from_file(dev
, flash
->region
, flash
->data
);
3936 static int nvm_get_dir_info(struct net_device
*dev
, u32
*entries
, u32
*length
)
3938 struct hwrm_nvm_get_dir_info_output
*output
;
3939 struct hwrm_nvm_get_dir_info_input
*req
;
3940 struct bnxt
*bp
= netdev_priv(dev
);
3943 rc
= hwrm_req_init(bp
, req
, HWRM_NVM_GET_DIR_INFO
);
3947 output
= hwrm_req_hold(bp
, req
);
3948 rc
= hwrm_req_send(bp
, req
);
3950 *entries
= le32_to_cpu(output
->entries
);
3951 *length
= le32_to_cpu(output
->entry_length
);
3953 hwrm_req_drop(bp
, req
);
3957 static int bnxt_get_eeprom_len(struct net_device
*dev
)
3959 struct bnxt
*bp
= netdev_priv(dev
);
3964 /* The -1 return value allows the entire 32-bit range of offsets to be
3965 * passed via the ethtool command-line utility.
3970 static int bnxt_get_nvram_directory(struct net_device
*dev
, u32 len
, u8
*data
)
3972 struct bnxt
*bp
= netdev_priv(dev
);
3978 dma_addr_t dma_handle
;
3979 struct hwrm_nvm_get_dir_entries_input
*req
;
3981 rc
= nvm_get_dir_info(dev
, &dir_entries
, &entry_length
);
3985 if (!dir_entries
|| !entry_length
)
3988 /* Insert 2 bytes of directory info (count and size of entries) */
3992 *data
++ = dir_entries
;
3993 *data
++ = entry_length
;
3995 memset(data
, 0xff, len
);
3997 rc
= hwrm_req_init(bp
, req
, HWRM_NVM_GET_DIR_ENTRIES
);
4001 buflen
= mul_u32_u32(dir_entries
, entry_length
);
4002 buf
= hwrm_req_dma_slice(bp
, req
, buflen
, &dma_handle
);
4004 hwrm_req_drop(bp
, req
);
4007 req
->host_dest_addr
= cpu_to_le64(dma_handle
);
4009 hwrm_req_hold(bp
, req
); /* hold the slice */
4010 rc
= hwrm_req_send(bp
, req
);
4012 memcpy(data
, buf
, len
> buflen
? buflen
: len
);
4013 hwrm_req_drop(bp
, req
);
4017 int bnxt_get_nvram_item(struct net_device
*dev
, u32 index
, u32 offset
,
4018 u32 length
, u8
*data
)
4020 struct bnxt
*bp
= netdev_priv(dev
);
4023 dma_addr_t dma_handle
;
4024 struct hwrm_nvm_read_input
*req
;
4029 rc
= hwrm_req_init(bp
, req
, HWRM_NVM_READ
);
4033 buf
= hwrm_req_dma_slice(bp
, req
, length
, &dma_handle
);
4035 hwrm_req_drop(bp
, req
);
4039 req
->host_dest_addr
= cpu_to_le64(dma_handle
);
4040 req
->dir_idx
= cpu_to_le16(index
);
4041 req
->offset
= cpu_to_le32(offset
);
4042 req
->len
= cpu_to_le32(length
);
4044 hwrm_req_hold(bp
, req
); /* hold the slice */
4045 rc
= hwrm_req_send(bp
, req
);
4047 memcpy(data
, buf
, length
);
4048 hwrm_req_drop(bp
, req
);
4052 int bnxt_find_nvram_item(struct net_device
*dev
, u16 type
, u16 ordinal
,
4053 u16 ext
, u16
*index
, u32
*item_length
,
4056 struct hwrm_nvm_find_dir_entry_output
*output
;
4057 struct hwrm_nvm_find_dir_entry_input
*req
;
4058 struct bnxt
*bp
= netdev_priv(dev
);
4061 rc
= hwrm_req_init(bp
, req
, HWRM_NVM_FIND_DIR_ENTRY
);
4067 req
->dir_type
= cpu_to_le16(type
);
4068 req
->dir_ordinal
= cpu_to_le16(ordinal
);
4069 req
->dir_ext
= cpu_to_le16(ext
);
4070 req
->opt_ordinal
= NVM_FIND_DIR_ENTRY_REQ_OPT_ORDINAL_EQ
;
4071 output
= hwrm_req_hold(bp
, req
);
4072 rc
= hwrm_req_send_silent(bp
, req
);
4075 *index
= le16_to_cpu(output
->dir_idx
);
4077 *item_length
= le32_to_cpu(output
->dir_item_length
);
4079 *data_length
= le32_to_cpu(output
->dir_data_length
);
4081 hwrm_req_drop(bp
, req
);
4085 static char *bnxt_parse_pkglog(int desired_field
, u8
*data
, size_t datalen
)
4087 char *retval
= NULL
;
4094 /* null-terminate the log data (removing last '\n'): */
4095 data
[datalen
- 1] = 0;
4096 for (p
= data
; *p
!= 0; p
++) {
4099 while (*p
!= 0 && *p
!= '\n') {
4101 while (*p
!= 0 && *p
!= '\t' && *p
!= '\n')
4103 if (field
== desired_field
)
4118 int bnxt_get_pkginfo(struct net_device
*dev
, char *ver
, int size
)
4120 struct bnxt
*bp
= netdev_priv(dev
);
4127 rc
= bnxt_find_nvram_item(dev
, BNX_DIR_TYPE_PKG_LOG
,
4128 BNX_DIR_ORDINAL_FIRST
, BNX_DIR_EXT_NONE
,
4129 &index
, NULL
, &pkglen
);
4133 pkgbuf
= kzalloc(pkglen
, GFP_KERNEL
);
4135 dev_err(&bp
->pdev
->dev
, "Unable to allocate memory for pkg version, length = %u\n",
4140 rc
= bnxt_get_nvram_item(dev
, index
, 0, pkglen
, pkgbuf
);
4144 pkgver
= bnxt_parse_pkglog(BNX_PKG_LOG_FIELD_IDX_PKG_VERSION
, pkgbuf
,
4146 if (pkgver
&& *pkgver
!= 0 && isdigit(*pkgver
))
4147 strscpy(ver
, pkgver
, size
);
4157 static void bnxt_get_pkgver(struct net_device
*dev
)
4159 struct bnxt
*bp
= netdev_priv(dev
);
4160 char buf
[FW_VER_STR_LEN
];
4163 if (!bnxt_get_pkginfo(dev
, buf
, sizeof(buf
))) {
4164 len
= strlen(bp
->fw_ver_str
);
4165 snprintf(bp
->fw_ver_str
+ len
, FW_VER_STR_LEN
- len
,
4170 static int bnxt_get_eeprom(struct net_device
*dev
,
4171 struct ethtool_eeprom
*eeprom
,
4177 if (eeprom
->offset
== 0) /* special offset value to get directory */
4178 return bnxt_get_nvram_directory(dev
, eeprom
->len
, data
);
4180 index
= eeprom
->offset
>> 24;
4181 offset
= eeprom
->offset
& 0xffffff;
4184 netdev_err(dev
, "unsupported index value: %d\n", index
);
4188 return bnxt_get_nvram_item(dev
, index
- 1, offset
, eeprom
->len
, data
);
4191 static int bnxt_erase_nvram_directory(struct net_device
*dev
, u8 index
)
4193 struct hwrm_nvm_erase_dir_entry_input
*req
;
4194 struct bnxt
*bp
= netdev_priv(dev
);
4197 rc
= hwrm_req_init(bp
, req
, HWRM_NVM_ERASE_DIR_ENTRY
);
4201 req
->dir_idx
= cpu_to_le16(index
);
4202 return hwrm_req_send(bp
, req
);
4205 static int bnxt_set_eeprom(struct net_device
*dev
,
4206 struct ethtool_eeprom
*eeprom
,
4209 struct bnxt
*bp
= netdev_priv(dev
);
4211 u16 type
, ext
, ordinal
, attr
;
4214 netdev_err(dev
, "NVM write not supported from a virtual function\n");
4218 type
= eeprom
->magic
>> 16;
4220 if (type
== 0xffff) { /* special value for directory operations */
4221 index
= eeprom
->magic
& 0xff;
4222 dir_op
= eeprom
->magic
>> 8;
4226 case 0x0e: /* erase */
4227 if (eeprom
->offset
!= ~eeprom
->magic
)
4229 return bnxt_erase_nvram_directory(dev
, index
- 1);
4235 /* Create or re-write an NVM item: */
4236 if (bnxt_dir_type_is_executable(type
))
4238 ext
= eeprom
->magic
& 0xffff;
4239 ordinal
= eeprom
->offset
>> 16;
4240 attr
= eeprom
->offset
& 0xffff;
4242 return bnxt_flash_nvram(dev
, type
, ordinal
, ext
, attr
, 0, data
,
4246 static int bnxt_set_eee(struct net_device
*dev
, struct ethtool_keee
*edata
)
4248 __ETHTOOL_DECLARE_LINK_MODE_MASK(advertising
);
4249 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp
);
4250 struct bnxt
*bp
= netdev_priv(dev
);
4251 struct ethtool_keee
*eee
= &bp
->eee
;
4252 struct bnxt_link_info
*link_info
= &bp
->link_info
;
4255 if (!BNXT_PHY_CFG_ABLE(bp
))
4258 if (!(bp
->phy_flags
& BNXT_PHY_FL_EEE_CAP
))
4261 mutex_lock(&bp
->link_lock
);
4262 _bnxt_fw_to_linkmode(advertising
, link_info
->advertising
);
4263 if (!edata
->eee_enabled
)
4266 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
)) {
4267 netdev_warn(dev
, "EEE requires autoneg\n");
4271 if (edata
->tx_lpi_enabled
) {
4272 if (bp
->lpi_tmr_hi
&& (edata
->tx_lpi_timer
> bp
->lpi_tmr_hi
||
4273 edata
->tx_lpi_timer
< bp
->lpi_tmr_lo
)) {
4274 netdev_warn(dev
, "Valid LPI timer range is %d and %d microsecs\n",
4275 bp
->lpi_tmr_lo
, bp
->lpi_tmr_hi
);
4278 } else if (!bp
->lpi_tmr_hi
) {
4279 edata
->tx_lpi_timer
= eee
->tx_lpi_timer
;
4282 if (linkmode_empty(edata
->advertised
)) {
4283 linkmode_and(edata
->advertised
, advertising
, eee
->supported
);
4284 } else if (linkmode_andnot(tmp
, edata
->advertised
, advertising
)) {
4285 netdev_warn(dev
, "EEE advertised must be a subset of autoneg advertised speeds\n");
4290 linkmode_copy(eee
->advertised
, edata
->advertised
);
4291 eee
->tx_lpi_enabled
= edata
->tx_lpi_enabled
;
4292 eee
->tx_lpi_timer
= edata
->tx_lpi_timer
;
4294 eee
->eee_enabled
= edata
->eee_enabled
;
4296 if (netif_running(dev
))
4297 rc
= bnxt_hwrm_set_link_setting(bp
, false, true);
4300 mutex_unlock(&bp
->link_lock
);
4304 static int bnxt_get_eee(struct net_device
*dev
, struct ethtool_keee
*edata
)
4306 struct bnxt
*bp
= netdev_priv(dev
);
4308 if (!(bp
->phy_flags
& BNXT_PHY_FL_EEE_CAP
))
4312 if (!bp
->eee
.eee_enabled
) {
4313 /* Preserve tx_lpi_timer so that the last value will be used
4314 * by default when it is re-enabled.
4316 linkmode_zero(edata
->advertised
);
4317 edata
->tx_lpi_enabled
= 0;
4320 if (!bp
->eee
.eee_active
)
4321 linkmode_zero(edata
->lp_advertised
);
4326 static int bnxt_read_sfp_module_eeprom_info(struct bnxt
*bp
, u16 i2c_addr
,
4327 u16 page_number
, u8 bank
,
4328 u16 start_addr
, u16 data_length
,
4331 struct hwrm_port_phy_i2c_read_output
*output
;
4332 struct hwrm_port_phy_i2c_read_input
*req
;
4333 int rc
, byte_offset
= 0;
4335 rc
= hwrm_req_init(bp
, req
, HWRM_PORT_PHY_I2C_READ
);
4339 output
= hwrm_req_hold(bp
, req
);
4340 req
->i2c_slave_addr
= i2c_addr
;
4341 req
->page_number
= cpu_to_le16(page_number
);
4342 req
->port_id
= cpu_to_le16(bp
->pf
.port_id
);
4346 xfer_size
= min_t(u16
, data_length
, BNXT_MAX_PHY_I2C_RESP_SIZE
);
4347 data_length
-= xfer_size
;
4348 req
->page_offset
= cpu_to_le16(start_addr
+ byte_offset
);
4349 req
->data_length
= xfer_size
;
4351 cpu_to_le32((start_addr
+ byte_offset
?
4352 PORT_PHY_I2C_READ_REQ_ENABLES_PAGE_OFFSET
:
4355 PORT_PHY_I2C_READ_REQ_ENABLES_BANK_NUMBER
:
4357 rc
= hwrm_req_send(bp
, req
);
4359 memcpy(buf
+ byte_offset
, output
->data
, xfer_size
);
4360 byte_offset
+= xfer_size
;
4361 } while (!rc
&& data_length
> 0);
4362 hwrm_req_drop(bp
, req
);
4367 static int bnxt_get_module_info(struct net_device
*dev
,
4368 struct ethtool_modinfo
*modinfo
)
4370 u8 data
[SFF_DIAG_SUPPORT_OFFSET
+ 1];
4371 struct bnxt
*bp
= netdev_priv(dev
);
4374 /* No point in going further if phy status indicates
4375 * module is not inserted or if it is powered down or
4376 * if it is of type 10GBase-T
4378 if (bp
->link_info
.module_status
>
4379 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG
)
4382 /* This feature is not supported in older firmware versions */
4383 if (bp
->hwrm_spec_code
< 0x10202)
4386 rc
= bnxt_read_sfp_module_eeprom_info(bp
, I2C_DEV_ADDR_A0
, 0, 0, 0,
4387 SFF_DIAG_SUPPORT_OFFSET
+ 1,
4390 u8 module_id
= data
[0];
4391 u8 diag_supported
= data
[SFF_DIAG_SUPPORT_OFFSET
];
4393 switch (module_id
) {
4394 case SFF_MODULE_ID_SFP
:
4395 modinfo
->type
= ETH_MODULE_SFF_8472
;
4396 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
4397 if (!diag_supported
)
4398 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
4400 case SFF_MODULE_ID_QSFP
:
4401 case SFF_MODULE_ID_QSFP_PLUS
:
4402 modinfo
->type
= ETH_MODULE_SFF_8436
;
4403 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
4405 case SFF_MODULE_ID_QSFP28
:
4406 modinfo
->type
= ETH_MODULE_SFF_8636
;
4407 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_LEN
;
4417 static int bnxt_get_module_eeprom(struct net_device
*dev
,
4418 struct ethtool_eeprom
*eeprom
,
4421 struct bnxt
*bp
= netdev_priv(dev
);
4422 u16 start
= eeprom
->offset
, length
= eeprom
->len
;
4425 memset(data
, 0, eeprom
->len
);
4427 /* Read A0 portion of the EEPROM */
4428 if (start
< ETH_MODULE_SFF_8436_LEN
) {
4429 if (start
+ eeprom
->len
> ETH_MODULE_SFF_8436_LEN
)
4430 length
= ETH_MODULE_SFF_8436_LEN
- start
;
4431 rc
= bnxt_read_sfp_module_eeprom_info(bp
, I2C_DEV_ADDR_A0
, 0, 0,
4432 start
, length
, data
);
4437 length
= eeprom
->len
- length
;
4440 /* Read A2 portion of the EEPROM */
4442 start
-= ETH_MODULE_SFF_8436_LEN
;
4443 rc
= bnxt_read_sfp_module_eeprom_info(bp
, I2C_DEV_ADDR_A2
, 0, 0,
4444 start
, length
, data
);
4449 static int bnxt_get_module_status(struct bnxt
*bp
, struct netlink_ext_ack
*extack
)
4451 if (bp
->link_info
.module_status
<=
4452 PORT_PHY_QCFG_RESP_MODULE_STATUS_WARNINGMSG
)
4455 switch (bp
->link_info
.module_status
) {
4456 case PORT_PHY_QCFG_RESP_MODULE_STATUS_PWRDOWN
:
4457 NL_SET_ERR_MSG_MOD(extack
, "Transceiver module is powering down");
4459 case PORT_PHY_QCFG_RESP_MODULE_STATUS_NOTINSERTED
:
4460 NL_SET_ERR_MSG_MOD(extack
, "Transceiver module not inserted");
4462 case PORT_PHY_QCFG_RESP_MODULE_STATUS_CURRENTFAULT
:
4463 NL_SET_ERR_MSG_MOD(extack
, "Transceiver module disabled due to current fault");
4466 NL_SET_ERR_MSG_MOD(extack
, "Unknown error");
4472 static int bnxt_get_module_eeprom_by_page(struct net_device
*dev
,
4473 const struct ethtool_module_eeprom
*page_data
,
4474 struct netlink_ext_ack
*extack
)
4476 struct bnxt
*bp
= netdev_priv(dev
);
4479 rc
= bnxt_get_module_status(bp
, extack
);
4483 if (bp
->hwrm_spec_code
< 0x10202) {
4484 NL_SET_ERR_MSG_MOD(extack
, "Firmware version too old");
4488 if (page_data
->bank
&& !(bp
->phy_flags
& BNXT_PHY_FL_BANK_SEL
)) {
4489 NL_SET_ERR_MSG_MOD(extack
, "Firmware not capable for bank selection");
4493 rc
= bnxt_read_sfp_module_eeprom_info(bp
, page_data
->i2c_address
<< 1,
4494 page_data
->page
, page_data
->bank
,
4499 NL_SET_ERR_MSG_MOD(extack
, "Module`s eeprom read failed");
4502 return page_data
->length
;
4505 static int bnxt_nway_reset(struct net_device
*dev
)
4509 struct bnxt
*bp
= netdev_priv(dev
);
4510 struct bnxt_link_info
*link_info
= &bp
->link_info
;
4512 if (!BNXT_PHY_CFG_ABLE(bp
))
4515 if (!(link_info
->autoneg
& BNXT_AUTONEG_SPEED
))
4518 if (netif_running(dev
))
4519 rc
= bnxt_hwrm_set_link_setting(bp
, true, false);
4524 static int bnxt_set_phys_id(struct net_device
*dev
,
4525 enum ethtool_phys_id_state state
)
4527 struct hwrm_port_led_cfg_input
*req
;
4528 struct bnxt
*bp
= netdev_priv(dev
);
4529 struct bnxt_pf_info
*pf
= &bp
->pf
;
4530 struct bnxt_led_cfg
*led_cfg
;
4535 if (!bp
->num_leds
|| BNXT_VF(bp
))
4538 if (state
== ETHTOOL_ID_ACTIVE
) {
4539 led_state
= PORT_LED_CFG_REQ_LED0_STATE_BLINKALT
;
4540 duration
= cpu_to_le16(500);
4541 } else if (state
== ETHTOOL_ID_INACTIVE
) {
4542 led_state
= PORT_LED_CFG_REQ_LED1_STATE_DEFAULT
;
4543 duration
= cpu_to_le16(0);
4547 rc
= hwrm_req_init(bp
, req
, HWRM_PORT_LED_CFG
);
4551 req
->port_id
= cpu_to_le16(pf
->port_id
);
4552 req
->num_leds
= bp
->num_leds
;
4553 led_cfg
= (struct bnxt_led_cfg
*)&req
->led0_id
;
4554 for (i
= 0; i
< bp
->num_leds
; i
++, led_cfg
++) {
4555 req
->enables
|= BNXT_LED_DFLT_ENABLES(i
);
4556 led_cfg
->led_id
= bp
->leds
[i
].led_id
;
4557 led_cfg
->led_state
= led_state
;
4558 led_cfg
->led_blink_on
= duration
;
4559 led_cfg
->led_blink_off
= duration
;
4560 led_cfg
->led_group_id
= bp
->leds
[i
].led_group_id
;
4562 return hwrm_req_send(bp
, req
);
4565 static int bnxt_hwrm_selftest_irq(struct bnxt
*bp
, u16 cmpl_ring
)
4567 struct hwrm_selftest_irq_input
*req
;
4570 rc
= hwrm_req_init(bp
, req
, HWRM_SELFTEST_IRQ
);
4574 req
->cmpl_ring
= cpu_to_le16(cmpl_ring
);
4575 return hwrm_req_send(bp
, req
);
4578 static int bnxt_test_irq(struct bnxt
*bp
)
4582 for (i
= 0; i
< bp
->cp_nr_rings
; i
++) {
4583 u16 cmpl_ring
= bp
->grp_info
[i
].cp_fw_ring_id
;
4586 rc
= bnxt_hwrm_selftest_irq(bp
, cmpl_ring
);
4593 static int bnxt_hwrm_mac_loopback(struct bnxt
*bp
, bool enable
)
4595 struct hwrm_port_mac_cfg_input
*req
;
4598 rc
= hwrm_req_init(bp
, req
, HWRM_PORT_MAC_CFG
);
4602 req
->enables
= cpu_to_le32(PORT_MAC_CFG_REQ_ENABLES_LPBK
);
4604 req
->lpbk
= PORT_MAC_CFG_REQ_LPBK_LOCAL
;
4606 req
->lpbk
= PORT_MAC_CFG_REQ_LPBK_NONE
;
4607 return hwrm_req_send(bp
, req
);
4610 static int bnxt_query_force_speeds(struct bnxt
*bp
, u16
*force_speeds
)
4612 struct hwrm_port_phy_qcaps_output
*resp
;
4613 struct hwrm_port_phy_qcaps_input
*req
;
4616 rc
= hwrm_req_init(bp
, req
, HWRM_PORT_PHY_QCAPS
);
4620 resp
= hwrm_req_hold(bp
, req
);
4621 rc
= hwrm_req_send(bp
, req
);
4623 *force_speeds
= le16_to_cpu(resp
->supported_speeds_force_mode
);
4625 hwrm_req_drop(bp
, req
);
4629 static int bnxt_disable_an_for_lpbk(struct bnxt
*bp
,
4630 struct hwrm_port_phy_cfg_input
*req
)
4632 struct bnxt_link_info
*link_info
= &bp
->link_info
;
4637 if (!link_info
->autoneg
||
4638 (bp
->phy_flags
& BNXT_PHY_FL_AN_PHY_LPBK
))
4641 rc
= bnxt_query_force_speeds(bp
, &fw_advertising
);
4645 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_1GB
;
4646 if (BNXT_LINK_IS_UP(bp
))
4647 fw_speed
= bp
->link_info
.link_speed
;
4648 else if (fw_advertising
& BNXT_LINK_SPEED_MSK_10GB
)
4649 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_10GB
;
4650 else if (fw_advertising
& BNXT_LINK_SPEED_MSK_25GB
)
4651 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_25GB
;
4652 else if (fw_advertising
& BNXT_LINK_SPEED_MSK_40GB
)
4653 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_40GB
;
4654 else if (fw_advertising
& BNXT_LINK_SPEED_MSK_50GB
)
4655 fw_speed
= PORT_PHY_CFG_REQ_FORCE_LINK_SPEED_50GB
;
4657 req
->force_link_speed
= cpu_to_le16(fw_speed
);
4658 req
->flags
|= cpu_to_le32(PORT_PHY_CFG_REQ_FLAGS_FORCE
|
4659 PORT_PHY_CFG_REQ_FLAGS_RESET_PHY
);
4660 rc
= hwrm_req_send(bp
, req
);
4662 req
->force_link_speed
= cpu_to_le16(0);
4666 static int bnxt_hwrm_phy_loopback(struct bnxt
*bp
, bool enable
, bool ext
)
4668 struct hwrm_port_phy_cfg_input
*req
;
4671 rc
= hwrm_req_init(bp
, req
, HWRM_PORT_PHY_CFG
);
4675 /* prevent bnxt_disable_an_for_lpbk() from consuming the request */
4676 hwrm_req_hold(bp
, req
);
4679 bnxt_disable_an_for_lpbk(bp
, req
);
4681 req
->lpbk
= PORT_PHY_CFG_REQ_LPBK_EXTERNAL
;
4683 req
->lpbk
= PORT_PHY_CFG_REQ_LPBK_LOCAL
;
4685 req
->lpbk
= PORT_PHY_CFG_REQ_LPBK_NONE
;
4687 req
->enables
= cpu_to_le32(PORT_PHY_CFG_REQ_ENABLES_LPBK
);
4688 rc
= hwrm_req_send(bp
, req
);
4689 hwrm_req_drop(bp
, req
);
4693 static int bnxt_rx_loopback(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
4694 u32 raw_cons
, int pkt_size
)
4696 struct bnxt_napi
*bnapi
= cpr
->bnapi
;
4697 struct bnxt_rx_ring_info
*rxr
;
4698 struct bnxt_sw_rx_bd
*rx_buf
;
4699 struct rx_cmp
*rxcmp
;
4705 rxr
= bnapi
->rx_ring
;
4706 cp_cons
= RING_CMP(raw_cons
);
4707 rxcmp
= (struct rx_cmp
*)
4708 &cpr
->cp_desc_ring
[CP_RING(cp_cons
)][CP_IDX(cp_cons
)];
4709 cons
= rxcmp
->rx_cmp_opaque
;
4710 rx_buf
= &rxr
->rx_buf_ring
[cons
];
4711 data
= rx_buf
->data_ptr
;
4712 len
= le32_to_cpu(rxcmp
->rx_cmp_len_flags_type
) >> RX_CMP_LEN_SHIFT
;
4713 if (len
!= pkt_size
)
4716 if (!ether_addr_equal(data
+ i
, bnapi
->bp
->dev
->dev_addr
))
4719 for ( ; i
< pkt_size
; i
++) {
4720 if (data
[i
] != (u8
)(i
& 0xff))
4726 static int bnxt_poll_loopback(struct bnxt
*bp
, struct bnxt_cp_ring_info
*cpr
,
4729 struct tx_cmp
*txcmp
;
4735 raw_cons
= cpr
->cp_raw_cons
;
4736 for (i
= 0; i
< 200; i
++) {
4737 cons
= RING_CMP(raw_cons
);
4738 txcmp
= &cpr
->cp_desc_ring
[CP_RING(cons
)][CP_IDX(cons
)];
4740 if (!TX_CMP_VALID(txcmp
, raw_cons
)) {
4745 /* The valid test of the entry must be done first before
4746 * reading any further.
4749 if (TX_CMP_TYPE(txcmp
) == CMP_TYPE_RX_L2_CMP
||
4750 TX_CMP_TYPE(txcmp
) == CMP_TYPE_RX_L2_V3_CMP
) {
4751 rc
= bnxt_rx_loopback(bp
, cpr
, raw_cons
, pkt_size
);
4752 raw_cons
= NEXT_RAW_CMP(raw_cons
);
4753 raw_cons
= NEXT_RAW_CMP(raw_cons
);
4756 raw_cons
= NEXT_RAW_CMP(raw_cons
);
4758 cpr
->cp_raw_cons
= raw_cons
;
4762 static int bnxt_run_loopback(struct bnxt
*bp
)
4764 struct bnxt_tx_ring_info
*txr
= &bp
->tx_ring
[0];
4765 struct bnxt_rx_ring_info
*rxr
= &bp
->rx_ring
[0];
4766 struct bnxt_cp_ring_info
*cpr
;
4767 int pkt_size
, i
= 0;
4768 struct sk_buff
*skb
;
4773 cpr
= &rxr
->bnapi
->cp_ring
;
4774 if (bp
->flags
& BNXT_FLAG_CHIP_P5_PLUS
)
4776 pkt_size
= min(bp
->dev
->mtu
+ ETH_HLEN
, bp
->rx_copy_thresh
);
4777 skb
= netdev_alloc_skb(bp
->dev
, pkt_size
);
4780 data
= skb_put(skb
, pkt_size
);
4781 ether_addr_copy(&data
[i
], bp
->dev
->dev_addr
);
4783 ether_addr_copy(&data
[i
], bp
->dev
->dev_addr
);
4785 for ( ; i
< pkt_size
; i
++)
4786 data
[i
] = (u8
)(i
& 0xff);
4788 map
= dma_map_single(&bp
->pdev
->dev
, skb
->data
, pkt_size
,
4790 if (dma_mapping_error(&bp
->pdev
->dev
, map
)) {
4794 bnxt_xmit_bd(bp
, txr
, map
, pkt_size
, NULL
);
4796 /* Sync BD data before updating doorbell */
4799 bnxt_db_write(bp
, &txr
->tx_db
, txr
->tx_prod
);
4800 rc
= bnxt_poll_loopback(bp
, cpr
, pkt_size
);
4802 dma_unmap_single(&bp
->pdev
->dev
, map
, pkt_size
, DMA_TO_DEVICE
);
4807 static int bnxt_run_fw_tests(struct bnxt
*bp
, u8 test_mask
, u8
*test_results
)
4809 struct hwrm_selftest_exec_output
*resp
;
4810 struct hwrm_selftest_exec_input
*req
;
4813 rc
= hwrm_req_init(bp
, req
, HWRM_SELFTEST_EXEC
);
4817 hwrm_req_timeout(bp
, req
, bp
->test_info
->timeout
);
4818 req
->flags
= test_mask
;
4820 resp
= hwrm_req_hold(bp
, req
);
4821 rc
= hwrm_req_send(bp
, req
);
4822 *test_results
= resp
->test_success
;
4823 hwrm_req_drop(bp
, req
);
4827 #define BNXT_DRV_TESTS 4
4828 #define BNXT_MACLPBK_TEST_IDX (bp->num_tests - BNXT_DRV_TESTS)
4829 #define BNXT_PHYLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 1)
4830 #define BNXT_EXTLPBK_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 2)
4831 #define BNXT_IRQ_TEST_IDX (BNXT_MACLPBK_TEST_IDX + 3)
4833 static void bnxt_self_test(struct net_device
*dev
, struct ethtool_test
*etest
,
4836 struct bnxt
*bp
= netdev_priv(dev
);
4837 bool do_ext_lpbk
= false;
4838 bool offline
= false;
4839 u8 test_results
= 0;
4843 if (!bp
->num_tests
|| !BNXT_PF(bp
))
4846 if (etest
->flags
& ETH_TEST_FL_OFFLINE
&&
4847 bnxt_ulp_registered(bp
->edev
)) {
4848 etest
->flags
|= ETH_TEST_FL_FAILED
;
4849 netdev_warn(dev
, "Offline tests cannot be run with RoCE driver loaded\n");
4853 memset(buf
, 0, sizeof(u64
) * bp
->num_tests
);
4854 if (!netif_running(dev
)) {
4855 etest
->flags
|= ETH_TEST_FL_FAILED
;
4859 if ((etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
) &&
4860 (bp
->phy_flags
& BNXT_PHY_FL_EXT_LPBK
))
4863 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
4864 if (bp
->pf
.active_vfs
|| !BNXT_SINGLE_PF(bp
)) {
4865 etest
->flags
|= ETH_TEST_FL_FAILED
;
4866 netdev_warn(dev
, "Offline tests cannot be run with active VFs or on shared PF\n");
4872 for (i
= 0; i
< bp
->num_tests
- BNXT_DRV_TESTS
; i
++) {
4873 u8 bit_val
= 1 << i
;
4875 if (!(bp
->test_info
->offline_mask
& bit_val
))
4876 test_mask
|= bit_val
;
4878 test_mask
|= bit_val
;
4881 bnxt_run_fw_tests(bp
, test_mask
, &test_results
);
4883 bnxt_close_nic(bp
, true, false);
4884 bnxt_run_fw_tests(bp
, test_mask
, &test_results
);
4886 buf
[BNXT_MACLPBK_TEST_IDX
] = 1;
4887 bnxt_hwrm_mac_loopback(bp
, true);
4889 rc
= bnxt_half_open_nic(bp
);
4891 bnxt_hwrm_mac_loopback(bp
, false);
4892 etest
->flags
|= ETH_TEST_FL_FAILED
;
4895 if (bnxt_run_loopback(bp
))
4896 etest
->flags
|= ETH_TEST_FL_FAILED
;
4898 buf
[BNXT_MACLPBK_TEST_IDX
] = 0;
4900 bnxt_hwrm_mac_loopback(bp
, false);
4901 bnxt_hwrm_phy_loopback(bp
, true, false);
4903 if (bnxt_run_loopback(bp
)) {
4904 buf
[BNXT_PHYLPBK_TEST_IDX
] = 1;
4905 etest
->flags
|= ETH_TEST_FL_FAILED
;
4908 etest
->flags
|= ETH_TEST_FL_EXTERNAL_LB_DONE
;
4909 bnxt_hwrm_phy_loopback(bp
, true, true);
4911 if (bnxt_run_loopback(bp
)) {
4912 buf
[BNXT_EXTLPBK_TEST_IDX
] = 1;
4913 etest
->flags
|= ETH_TEST_FL_FAILED
;
4916 bnxt_hwrm_phy_loopback(bp
, false, false);
4917 bnxt_half_close_nic(bp
);
4918 rc
= bnxt_open_nic(bp
, true, true);
4920 if (rc
|| bnxt_test_irq(bp
)) {
4921 buf
[BNXT_IRQ_TEST_IDX
] = 1;
4922 etest
->flags
|= ETH_TEST_FL_FAILED
;
4924 for (i
= 0; i
< bp
->num_tests
- BNXT_DRV_TESTS
; i
++) {
4925 u8 bit_val
= 1 << i
;
4927 if ((test_mask
& bit_val
) && !(test_results
& bit_val
)) {
4929 etest
->flags
|= ETH_TEST_FL_FAILED
;
4934 static int bnxt_reset(struct net_device
*dev
, u32
*flags
)
4936 struct bnxt
*bp
= netdev_priv(dev
);
4937 bool reload
= false;
4944 netdev_err(dev
, "Reset is not supported from a VF\n");
4948 if (pci_vfs_assigned(bp
->pdev
) &&
4949 !(bp
->fw_cap
& BNXT_FW_CAP_HOT_RESET
)) {
4951 "Reset not allowed when VFs are assigned to VMs\n");
4955 if ((req
& BNXT_FW_RESET_CHIP
) == BNXT_FW_RESET_CHIP
) {
4956 /* This feature is not supported in older firmware versions */
4957 if (bp
->hwrm_spec_code
>= 0x10803) {
4958 if (!bnxt_firmware_reset_chip(dev
)) {
4959 netdev_info(dev
, "Firmware reset request successful.\n");
4960 if (!(bp
->fw_cap
& BNXT_FW_CAP_HOT_RESET
))
4962 *flags
&= ~BNXT_FW_RESET_CHIP
;
4964 } else if (req
== BNXT_FW_RESET_CHIP
) {
4965 return -EOPNOTSUPP
; /* only request, fail hard */
4969 if (!BNXT_CHIP_P4_PLUS(bp
) && (req
& BNXT_FW_RESET_AP
)) {
4970 /* This feature is not supported in older firmware versions */
4971 if (bp
->hwrm_spec_code
>= 0x10803) {
4972 if (!bnxt_firmware_reset_ap(dev
)) {
4973 netdev_info(dev
, "Reset application processor successful.\n");
4975 *flags
&= ~BNXT_FW_RESET_AP
;
4977 } else if (req
== BNXT_FW_RESET_AP
) {
4978 return -EOPNOTSUPP
; /* only request, fail hard */
4983 netdev_info(dev
, "Reload driver to complete reset\n");
4988 static int bnxt_set_dump(struct net_device
*dev
, struct ethtool_dump
*dump
)
4990 struct bnxt
*bp
= netdev_priv(dev
);
4992 if (dump
->flag
> BNXT_DUMP_DRIVER
) {
4993 netdev_info(dev
, "Supports only Live(0), Crash(1), Driver(2) dumps.\n");
4997 if (dump
->flag
== BNXT_DUMP_CRASH
) {
4998 if (bp
->fw_dbg_cap
& DBG_QCAPS_RESP_FLAGS_CRASHDUMP_SOC_DDR
&&
4999 (!IS_ENABLED(CONFIG_TEE_BNXT_FW
))) {
5001 "Cannot collect crash dump as TEE_BNXT_FW config option is not enabled.\n");
5003 } else if (!(bp
->fw_dbg_cap
& DBG_QCAPS_RESP_FLAGS_CRASHDUMP_HOST_DDR
)) {
5004 netdev_info(dev
, "Crash dump collection from host memory is not supported on this interface.\n");
5009 bp
->dump_flag
= dump
->flag
;
5013 static int bnxt_get_dump_flag(struct net_device
*dev
, struct ethtool_dump
*dump
)
5015 struct bnxt
*bp
= netdev_priv(dev
);
5017 if (bp
->hwrm_spec_code
< 0x10801)
5020 dump
->version
= bp
->ver_resp
.hwrm_fw_maj_8b
<< 24 |
5021 bp
->ver_resp
.hwrm_fw_min_8b
<< 16 |
5022 bp
->ver_resp
.hwrm_fw_bld_8b
<< 8 |
5023 bp
->ver_resp
.hwrm_fw_rsvd_8b
;
5025 dump
->flag
= bp
->dump_flag
;
5026 dump
->len
= bnxt_get_coredump_length(bp
, bp
->dump_flag
);
5030 static int bnxt_get_dump_data(struct net_device
*dev
, struct ethtool_dump
*dump
,
5033 struct bnxt
*bp
= netdev_priv(dev
);
5035 if (bp
->hwrm_spec_code
< 0x10801)
5038 memset(buf
, 0, dump
->len
);
5040 dump
->flag
= bp
->dump_flag
;
5041 return bnxt_get_coredump(bp
, dump
->flag
, buf
, &dump
->len
);
5044 static int bnxt_get_ts_info(struct net_device
*dev
,
5045 struct kernel_ethtool_ts_info
*info
)
5047 struct bnxt
*bp
= netdev_priv(dev
);
5048 struct bnxt_ptp_cfg
*ptp
;
5051 info
->so_timestamping
= SOF_TIMESTAMPING_TX_SOFTWARE
;
5056 info
->so_timestamping
|= SOF_TIMESTAMPING_TX_HARDWARE
|
5057 SOF_TIMESTAMPING_RX_HARDWARE
|
5058 SOF_TIMESTAMPING_RAW_HARDWARE
;
5060 info
->phc_index
= ptp_clock_index(ptp
->ptp_clock
);
5062 info
->tx_types
= (1 << HWTSTAMP_TX_OFF
) | (1 << HWTSTAMP_TX_ON
);
5064 info
->rx_filters
= (1 << HWTSTAMP_FILTER_NONE
) |
5065 (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT
) |
5066 (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT
);
5068 if (bp
->fw_cap
& BNXT_FW_CAP_RX_ALL_PKT_TS
)
5069 info
->rx_filters
|= (1 << HWTSTAMP_FILTER_ALL
);
5073 void bnxt_ethtool_init(struct bnxt
*bp
)
5075 struct hwrm_selftest_qlist_output
*resp
;
5076 struct hwrm_selftest_qlist_input
*req
;
5077 struct bnxt_test_info
*test_info
;
5078 struct net_device
*dev
= bp
->dev
;
5081 if (!(bp
->fw_cap
& BNXT_FW_CAP_PKG_VER
))
5082 bnxt_get_pkgver(dev
);
5085 if (bp
->hwrm_spec_code
< 0x10704 || !BNXT_PF(bp
))
5088 test_info
= bp
->test_info
;
5090 test_info
= kzalloc(sizeof(*bp
->test_info
), GFP_KERNEL
);
5093 bp
->test_info
= test_info
;
5096 if (hwrm_req_init(bp
, req
, HWRM_SELFTEST_QLIST
))
5099 resp
= hwrm_req_hold(bp
, req
);
5100 rc
= hwrm_req_send_silent(bp
, req
);
5102 goto ethtool_init_exit
;
5104 bp
->num_tests
= resp
->num_tests
+ BNXT_DRV_TESTS
;
5105 if (bp
->num_tests
> BNXT_MAX_TEST
)
5106 bp
->num_tests
= BNXT_MAX_TEST
;
5108 test_info
->offline_mask
= resp
->offline_tests
;
5109 test_info
->timeout
= le16_to_cpu(resp
->test_timeout
);
5110 if (!test_info
->timeout
)
5111 test_info
->timeout
= HWRM_CMD_TIMEOUT
;
5112 for (i
= 0; i
< bp
->num_tests
; i
++) {
5113 char *str
= test_info
->string
[i
];
5114 char *fw_str
= resp
->test_name
[i
];
5116 if (i
== BNXT_MACLPBK_TEST_IDX
) {
5117 strcpy(str
, "Mac loopback test (offline)");
5118 } else if (i
== BNXT_PHYLPBK_TEST_IDX
) {
5119 strcpy(str
, "Phy loopback test (offline)");
5120 } else if (i
== BNXT_EXTLPBK_TEST_IDX
) {
5121 strcpy(str
, "Ext loopback test (offline)");
5122 } else if (i
== BNXT_IRQ_TEST_IDX
) {
5123 strcpy(str
, "Interrupt_test (offline)");
5125 snprintf(str
, ETH_GSTRING_LEN
, "%s test (%s)",
5126 fw_str
, test_info
->offline_mask
& (1 << i
) ?
5127 "offline" : "online");
5132 hwrm_req_drop(bp
, req
);
5135 static void bnxt_get_eth_phy_stats(struct net_device
*dev
,
5136 struct ethtool_eth_phy_stats
*phy_stats
)
5138 struct bnxt
*bp
= netdev_priv(dev
);
5141 if (BNXT_VF(bp
) || !(bp
->flags
& BNXT_FLAG_PORT_STATS_EXT
))
5144 rx
= bp
->rx_port_stats_ext
.sw_stats
;
5145 phy_stats
->SymbolErrorDuringCarrier
=
5146 *(rx
+ BNXT_RX_STATS_EXT_OFFSET(rx_pcs_symbol_err
));
5149 static void bnxt_get_eth_mac_stats(struct net_device
*dev
,
5150 struct ethtool_eth_mac_stats
*mac_stats
)
5152 struct bnxt
*bp
= netdev_priv(dev
);
5155 if (BNXT_VF(bp
) || !(bp
->flags
& BNXT_FLAG_PORT_STATS
))
5158 rx
= bp
->port_stats
.sw_stats
;
5159 tx
= bp
->port_stats
.sw_stats
+ BNXT_TX_PORT_STATS_BYTE_OFFSET
/ 8;
5161 mac_stats
->FramesReceivedOK
=
5162 BNXT_GET_RX_PORT_STATS64(rx
, rx_good_frames
);
5163 mac_stats
->FramesTransmittedOK
=
5164 BNXT_GET_TX_PORT_STATS64(tx
, tx_good_frames
);
5165 mac_stats
->FrameCheckSequenceErrors
=
5166 BNXT_GET_RX_PORT_STATS64(rx
, rx_fcs_err_frames
);
5167 mac_stats
->AlignmentErrors
=
5168 BNXT_GET_RX_PORT_STATS64(rx
, rx_align_err_frames
);
5169 mac_stats
->OutOfRangeLengthField
=
5170 BNXT_GET_RX_PORT_STATS64(rx
, rx_oor_len_frames
);
5173 static void bnxt_get_eth_ctrl_stats(struct net_device
*dev
,
5174 struct ethtool_eth_ctrl_stats
*ctrl_stats
)
5176 struct bnxt
*bp
= netdev_priv(dev
);
5179 if (BNXT_VF(bp
) || !(bp
->flags
& BNXT_FLAG_PORT_STATS
))
5182 rx
= bp
->port_stats
.sw_stats
;
5183 ctrl_stats
->MACControlFramesReceived
=
5184 BNXT_GET_RX_PORT_STATS64(rx
, rx_ctrl_frames
);
5187 static const struct ethtool_rmon_hist_range bnxt_rmon_ranges
[] = {
5201 static void bnxt_get_rmon_stats(struct net_device
*dev
,
5202 struct ethtool_rmon_stats
*rmon_stats
,
5203 const struct ethtool_rmon_hist_range
**ranges
)
5205 struct bnxt
*bp
= netdev_priv(dev
);
5208 if (BNXT_VF(bp
) || !(bp
->flags
& BNXT_FLAG_PORT_STATS
))
5211 rx
= bp
->port_stats
.sw_stats
;
5212 tx
= bp
->port_stats
.sw_stats
+ BNXT_TX_PORT_STATS_BYTE_OFFSET
/ 8;
5214 rmon_stats
->jabbers
=
5215 BNXT_GET_RX_PORT_STATS64(rx
, rx_jbr_frames
);
5216 rmon_stats
->oversize_pkts
=
5217 BNXT_GET_RX_PORT_STATS64(rx
, rx_ovrsz_frames
);
5218 rmon_stats
->undersize_pkts
=
5219 BNXT_GET_RX_PORT_STATS64(rx
, rx_undrsz_frames
);
5221 rmon_stats
->hist
[0] = BNXT_GET_RX_PORT_STATS64(rx
, rx_64b_frames
);
5222 rmon_stats
->hist
[1] = BNXT_GET_RX_PORT_STATS64(rx
, rx_65b_127b_frames
);
5223 rmon_stats
->hist
[2] = BNXT_GET_RX_PORT_STATS64(rx
, rx_128b_255b_frames
);
5224 rmon_stats
->hist
[3] = BNXT_GET_RX_PORT_STATS64(rx
, rx_256b_511b_frames
);
5225 rmon_stats
->hist
[4] =
5226 BNXT_GET_RX_PORT_STATS64(rx
, rx_512b_1023b_frames
);
5227 rmon_stats
->hist
[5] =
5228 BNXT_GET_RX_PORT_STATS64(rx
, rx_1024b_1518b_frames
);
5229 rmon_stats
->hist
[6] =
5230 BNXT_GET_RX_PORT_STATS64(rx
, rx_1519b_2047b_frames
);
5231 rmon_stats
->hist
[7] =
5232 BNXT_GET_RX_PORT_STATS64(rx
, rx_2048b_4095b_frames
);
5233 rmon_stats
->hist
[8] =
5234 BNXT_GET_RX_PORT_STATS64(rx
, rx_4096b_9216b_frames
);
5235 rmon_stats
->hist
[9] =
5236 BNXT_GET_RX_PORT_STATS64(rx
, rx_9217b_16383b_frames
);
5238 rmon_stats
->hist_tx
[0] =
5239 BNXT_GET_TX_PORT_STATS64(tx
, tx_64b_frames
);
5240 rmon_stats
->hist_tx
[1] =
5241 BNXT_GET_TX_PORT_STATS64(tx
, tx_65b_127b_frames
);
5242 rmon_stats
->hist_tx
[2] =
5243 BNXT_GET_TX_PORT_STATS64(tx
, tx_128b_255b_frames
);
5244 rmon_stats
->hist_tx
[3] =
5245 BNXT_GET_TX_PORT_STATS64(tx
, tx_256b_511b_frames
);
5246 rmon_stats
->hist_tx
[4] =
5247 BNXT_GET_TX_PORT_STATS64(tx
, tx_512b_1023b_frames
);
5248 rmon_stats
->hist_tx
[5] =
5249 BNXT_GET_TX_PORT_STATS64(tx
, tx_1024b_1518b_frames
);
5250 rmon_stats
->hist_tx
[6] =
5251 BNXT_GET_TX_PORT_STATS64(tx
, tx_1519b_2047b_frames
);
5252 rmon_stats
->hist_tx
[7] =
5253 BNXT_GET_TX_PORT_STATS64(tx
, tx_2048b_4095b_frames
);
5254 rmon_stats
->hist_tx
[8] =
5255 BNXT_GET_TX_PORT_STATS64(tx
, tx_4096b_9216b_frames
);
5256 rmon_stats
->hist_tx
[9] =
5257 BNXT_GET_TX_PORT_STATS64(tx
, tx_9217b_16383b_frames
);
5259 *ranges
= bnxt_rmon_ranges
;
5262 static void bnxt_get_ptp_stats(struct net_device
*dev
,
5263 struct ethtool_ts_stats
*ts_stats
)
5265 struct bnxt
*bp
= netdev_priv(dev
);
5266 struct bnxt_ptp_cfg
*ptp
= bp
->ptp_cfg
;
5269 ts_stats
->pkts
= ptp
->stats
.ts_pkts
;
5270 ts_stats
->lost
= ptp
->stats
.ts_lost
;
5271 ts_stats
->err
= atomic64_read(&ptp
->stats
.ts_err
);
5275 static void bnxt_get_link_ext_stats(struct net_device
*dev
,
5276 struct ethtool_link_ext_stats
*stats
)
5278 struct bnxt
*bp
= netdev_priv(dev
);
5281 if (BNXT_VF(bp
) || !(bp
->flags
& BNXT_FLAG_PORT_STATS_EXT
))
5284 rx
= bp
->rx_port_stats_ext
.sw_stats
;
5285 stats
->link_down_events
=
5286 *(rx
+ BNXT_RX_STATS_EXT_OFFSET(link_down_events
));
5289 void bnxt_ethtool_free(struct bnxt
*bp
)
5291 kfree(bp
->test_info
);
5292 bp
->test_info
= NULL
;
5295 const struct ethtool_ops bnxt_ethtool_ops
= {
5296 .cap_link_lanes_supported
= 1,
5297 .rxfh_per_ctx_key
= 1,
5298 .rxfh_max_num_contexts
= BNXT_MAX_ETH_RSS_CTX
+ 1,
5299 .rxfh_indir_space
= BNXT_MAX_RSS_TABLE_ENTRIES_P5
,
5300 .rxfh_priv_size
= sizeof(struct bnxt_rss_ctx
),
5301 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
5302 ETHTOOL_COALESCE_MAX_FRAMES
|
5303 ETHTOOL_COALESCE_USECS_IRQ
|
5304 ETHTOOL_COALESCE_MAX_FRAMES_IRQ
|
5305 ETHTOOL_COALESCE_STATS_BLOCK_USECS
|
5306 ETHTOOL_COALESCE_USE_ADAPTIVE_RX
|
5307 ETHTOOL_COALESCE_USE_CQE
,
5308 .get_link_ksettings
= bnxt_get_link_ksettings
,
5309 .set_link_ksettings
= bnxt_set_link_ksettings
,
5310 .get_fec_stats
= bnxt_get_fec_stats
,
5311 .get_fecparam
= bnxt_get_fecparam
,
5312 .set_fecparam
= bnxt_set_fecparam
,
5313 .get_pause_stats
= bnxt_get_pause_stats
,
5314 .get_pauseparam
= bnxt_get_pauseparam
,
5315 .set_pauseparam
= bnxt_set_pauseparam
,
5316 .get_drvinfo
= bnxt_get_drvinfo
,
5317 .get_regs_len
= bnxt_get_regs_len
,
5318 .get_regs
= bnxt_get_regs
,
5319 .get_wol
= bnxt_get_wol
,
5320 .set_wol
= bnxt_set_wol
,
5321 .get_coalesce
= bnxt_get_coalesce
,
5322 .set_coalesce
= bnxt_set_coalesce
,
5323 .get_msglevel
= bnxt_get_msglevel
,
5324 .set_msglevel
= bnxt_set_msglevel
,
5325 .get_sset_count
= bnxt_get_sset_count
,
5326 .get_strings
= bnxt_get_strings
,
5327 .get_ethtool_stats
= bnxt_get_ethtool_stats
,
5328 .set_ringparam
= bnxt_set_ringparam
,
5329 .get_ringparam
= bnxt_get_ringparam
,
5330 .get_channels
= bnxt_get_channels
,
5331 .set_channels
= bnxt_set_channels
,
5332 .get_rxnfc
= bnxt_get_rxnfc
,
5333 .set_rxnfc
= bnxt_set_rxnfc
,
5334 .get_rxfh_indir_size
= bnxt_get_rxfh_indir_size
,
5335 .get_rxfh_key_size
= bnxt_get_rxfh_key_size
,
5336 .get_rxfh
= bnxt_get_rxfh
,
5337 .set_rxfh
= bnxt_set_rxfh
,
5338 .create_rxfh_context
= bnxt_create_rxfh_context
,
5339 .modify_rxfh_context
= bnxt_modify_rxfh_context
,
5340 .remove_rxfh_context
= bnxt_remove_rxfh_context
,
5341 .flash_device
= bnxt_flash_device
,
5342 .get_eeprom_len
= bnxt_get_eeprom_len
,
5343 .get_eeprom
= bnxt_get_eeprom
,
5344 .set_eeprom
= bnxt_set_eeprom
,
5345 .get_link
= bnxt_get_link
,
5346 .get_link_ext_stats
= bnxt_get_link_ext_stats
,
5347 .get_eee
= bnxt_get_eee
,
5348 .set_eee
= bnxt_set_eee
,
5349 .get_module_info
= bnxt_get_module_info
,
5350 .get_module_eeprom
= bnxt_get_module_eeprom
,
5351 .get_module_eeprom_by_page
= bnxt_get_module_eeprom_by_page
,
5352 .nway_reset
= bnxt_nway_reset
,
5353 .set_phys_id
= bnxt_set_phys_id
,
5354 .self_test
= bnxt_self_test
,
5355 .get_ts_info
= bnxt_get_ts_info
,
5356 .reset
= bnxt_reset
,
5357 .set_dump
= bnxt_set_dump
,
5358 .get_dump_flag
= bnxt_get_dump_flag
,
5359 .get_dump_data
= bnxt_get_dump_data
,
5360 .get_eth_phy_stats
= bnxt_get_eth_phy_stats
,
5361 .get_eth_mac_stats
= bnxt_get_eth_mac_stats
,
5362 .get_eth_ctrl_stats
= bnxt_get_eth_ctrl_stats
,
5363 .get_rmon_stats
= bnxt_get_rmon_stats
,
5364 .get_ts_stats
= bnxt_get_ptp_stats
,