1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 /* QLogic qede NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
7 #include <linux/types.h>
8 #include <linux/netdevice.h>
9 #include <linux/etherdevice.h>
10 #include <linux/ethtool.h>
11 #include <linux/string.h>
12 #include <linux/pci.h>
13 #include <linux/capability.h>
14 #include <linux/vmalloc.h>
15 #include <linux/phylink.h>
20 #define QEDE_RQSTAT_OFFSET(stat_name) \
21 (offsetof(struct qede_rx_queue, stat_name))
22 #define QEDE_RQSTAT_STRING(stat_name) (#stat_name)
23 #define QEDE_RQSTAT(stat_name) \
24 {QEDE_RQSTAT_OFFSET(stat_name), QEDE_RQSTAT_STRING(stat_name)}
26 #define QEDE_SELFTEST_POLL_COUNT 100
27 #define QEDE_DUMP_VERSION 0x1
28 #define QEDE_DUMP_NVM_ARG_COUNT 2
32 char string
[ETH_GSTRING_LEN
];
33 } qede_rqstats_arr
[] = {
34 QEDE_RQSTAT(rcv_pkts
),
35 QEDE_RQSTAT(rx_hw_errors
),
36 QEDE_RQSTAT(rx_alloc_errors
),
37 QEDE_RQSTAT(rx_ip_frags
),
38 QEDE_RQSTAT(xdp_no_pass
),
41 #define QEDE_NUM_RQSTATS ARRAY_SIZE(qede_rqstats_arr)
42 #define QEDE_TQSTAT_OFFSET(stat_name) \
43 (offsetof(struct qede_tx_queue, stat_name))
44 #define QEDE_TQSTAT_STRING(stat_name) (#stat_name)
45 #define QEDE_TQSTAT(stat_name) \
46 {QEDE_TQSTAT_OFFSET(stat_name), QEDE_TQSTAT_STRING(stat_name)}
47 #define QEDE_NUM_TQSTATS ARRAY_SIZE(qede_tqstats_arr)
50 char string
[ETH_GSTRING_LEN
];
51 } qede_tqstats_arr
[] = {
52 QEDE_TQSTAT(xmit_pkts
),
53 QEDE_TQSTAT(stopped_cnt
),
54 QEDE_TQSTAT(tx_mem_alloc_err
),
57 #define QEDE_STAT_OFFSET(stat_name, type, base) \
58 (offsetof(type, stat_name) + (base))
59 #define QEDE_STAT_STRING(stat_name) (#stat_name)
60 #define _QEDE_STAT(stat_name, type, base, attr) \
61 {QEDE_STAT_OFFSET(stat_name, type, base), \
62 QEDE_STAT_STRING(stat_name), \
64 #define QEDE_STAT(stat_name) \
65 _QEDE_STAT(stat_name, struct qede_stats_common, 0, 0x0)
66 #define QEDE_PF_STAT(stat_name) \
67 _QEDE_STAT(stat_name, struct qede_stats_common, 0, \
68 BIT(QEDE_STAT_PF_ONLY))
69 #define QEDE_PF_BB_STAT(stat_name) \
70 _QEDE_STAT(stat_name, struct qede_stats_bb, \
71 offsetof(struct qede_stats, bb), \
72 BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_BB_ONLY))
73 #define QEDE_PF_AH_STAT(stat_name) \
74 _QEDE_STAT(stat_name, struct qede_stats_ah, \
75 offsetof(struct qede_stats, ah), \
76 BIT(QEDE_STAT_PF_ONLY) | BIT(QEDE_STAT_AH_ONLY))
79 char string
[ETH_GSTRING_LEN
];
81 #define QEDE_STAT_PF_ONLY 0
82 #define QEDE_STAT_BB_ONLY 1
83 #define QEDE_STAT_AH_ONLY 2
84 } qede_stats_arr
[] = {
85 QEDE_STAT(rx_ucast_bytes
),
86 QEDE_STAT(rx_mcast_bytes
),
87 QEDE_STAT(rx_bcast_bytes
),
88 QEDE_STAT(rx_ucast_pkts
),
89 QEDE_STAT(rx_mcast_pkts
),
90 QEDE_STAT(rx_bcast_pkts
),
92 QEDE_STAT(tx_ucast_bytes
),
93 QEDE_STAT(tx_mcast_bytes
),
94 QEDE_STAT(tx_bcast_bytes
),
95 QEDE_STAT(tx_ucast_pkts
),
96 QEDE_STAT(tx_mcast_pkts
),
97 QEDE_STAT(tx_bcast_pkts
),
99 QEDE_PF_STAT(rx_64_byte_packets
),
100 QEDE_PF_STAT(rx_65_to_127_byte_packets
),
101 QEDE_PF_STAT(rx_128_to_255_byte_packets
),
102 QEDE_PF_STAT(rx_256_to_511_byte_packets
),
103 QEDE_PF_STAT(rx_512_to_1023_byte_packets
),
104 QEDE_PF_STAT(rx_1024_to_1518_byte_packets
),
105 QEDE_PF_BB_STAT(rx_1519_to_1522_byte_packets
),
106 QEDE_PF_BB_STAT(rx_1519_to_2047_byte_packets
),
107 QEDE_PF_BB_STAT(rx_2048_to_4095_byte_packets
),
108 QEDE_PF_BB_STAT(rx_4096_to_9216_byte_packets
),
109 QEDE_PF_BB_STAT(rx_9217_to_16383_byte_packets
),
110 QEDE_PF_AH_STAT(rx_1519_to_max_byte_packets
),
111 QEDE_PF_STAT(tx_64_byte_packets
),
112 QEDE_PF_STAT(tx_65_to_127_byte_packets
),
113 QEDE_PF_STAT(tx_128_to_255_byte_packets
),
114 QEDE_PF_STAT(tx_256_to_511_byte_packets
),
115 QEDE_PF_STAT(tx_512_to_1023_byte_packets
),
116 QEDE_PF_STAT(tx_1024_to_1518_byte_packets
),
117 QEDE_PF_BB_STAT(tx_1519_to_2047_byte_packets
),
118 QEDE_PF_BB_STAT(tx_2048_to_4095_byte_packets
),
119 QEDE_PF_BB_STAT(tx_4096_to_9216_byte_packets
),
120 QEDE_PF_BB_STAT(tx_9217_to_16383_byte_packets
),
121 QEDE_PF_AH_STAT(tx_1519_to_max_byte_packets
),
122 QEDE_PF_STAT(rx_mac_crtl_frames
),
123 QEDE_PF_STAT(tx_mac_ctrl_frames
),
124 QEDE_PF_STAT(rx_pause_frames
),
125 QEDE_PF_STAT(tx_pause_frames
),
126 QEDE_PF_STAT(rx_pfc_frames
),
127 QEDE_PF_STAT(tx_pfc_frames
),
129 QEDE_PF_STAT(rx_crc_errors
),
130 QEDE_PF_STAT(rx_align_errors
),
131 QEDE_PF_STAT(rx_carrier_errors
),
132 QEDE_PF_STAT(rx_oversize_packets
),
133 QEDE_PF_STAT(rx_jabbers
),
134 QEDE_PF_STAT(rx_undersize_packets
),
135 QEDE_PF_STAT(rx_fragments
),
136 QEDE_PF_BB_STAT(tx_lpi_entry_count
),
137 QEDE_PF_BB_STAT(tx_total_collisions
),
138 QEDE_PF_STAT(brb_truncates
),
139 QEDE_PF_STAT(brb_discards
),
140 QEDE_STAT(no_buff_discards
),
141 QEDE_PF_STAT(mftag_filter_discards
),
142 QEDE_PF_STAT(mac_filter_discards
),
143 QEDE_PF_STAT(gft_filter_drop
),
144 QEDE_STAT(tx_err_drop_pkts
),
145 QEDE_STAT(ttl0_discard
),
146 QEDE_STAT(packet_too_big_discard
),
148 QEDE_STAT(coalesced_pkts
),
149 QEDE_STAT(coalesced_events
),
150 QEDE_STAT(coalesced_aborts_num
),
151 QEDE_STAT(non_coalesced_pkts
),
152 QEDE_STAT(coalesced_bytes
),
154 QEDE_STAT(link_change_count
),
155 QEDE_STAT(ptp_skip_txts
),
158 #define QEDE_NUM_STATS ARRAY_SIZE(qede_stats_arr)
159 #define QEDE_STAT_IS_PF_ONLY(i) \
160 test_bit(QEDE_STAT_PF_ONLY, &qede_stats_arr[i].attr)
161 #define QEDE_STAT_IS_BB_ONLY(i) \
162 test_bit(QEDE_STAT_BB_ONLY, &qede_stats_arr[i].attr)
163 #define QEDE_STAT_IS_AH_ONLY(i) \
164 test_bit(QEDE_STAT_AH_ONLY, &qede_stats_arr[i].attr)
168 QEDE_PRI_FLAG_SMART_AN_SUPPORT
, /* MFW supports SmartAN */
169 QEDE_PRI_FLAG_RECOVER_ON_ERROR
,
170 QEDE_PRI_FLAG_ESL_SUPPORT
, /* MFW supports Enhanced System Lockdown */
171 QEDE_PRI_FLAG_ESL_ACTIVE
, /* Enhanced System Lockdown Active status */
175 static const char qede_private_arr
[QEDE_PRI_FLAG_LEN
][ETH_GSTRING_LEN
] = {
183 enum qede_ethtool_tests
{
184 QEDE_ETHTOOL_INT_LOOPBACK
,
185 QEDE_ETHTOOL_INTERRUPT_TEST
,
186 QEDE_ETHTOOL_MEMORY_TEST
,
187 QEDE_ETHTOOL_REGISTER_TEST
,
188 QEDE_ETHTOOL_CLOCK_TEST
,
189 QEDE_ETHTOOL_NVRAM_TEST
,
190 QEDE_ETHTOOL_TEST_MAX
193 static const char qede_tests_str_arr
[QEDE_ETHTOOL_TEST_MAX
][ETH_GSTRING_LEN
] = {
194 "Internal loopback (offline)",
195 "Interrupt (online)\t",
196 "Memory (online)\t\t",
197 "Register (online)\t",
198 "Clock (online)\t\t",
199 "Nvram (online)\t\t",
202 /* Forced speed capabilities maps */
204 static const u32 qede_forced_speed_1000
[] __initconst
= {
205 ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
206 ETHTOOL_LINK_MODE_1000baseKX_Full_BIT
,
207 ETHTOOL_LINK_MODE_1000baseX_Full_BIT
,
210 static const u32 qede_forced_speed_10000
[] __initconst
= {
211 ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
212 ETHTOOL_LINK_MODE_10000baseKR_Full_BIT
,
213 ETHTOOL_LINK_MODE_10000baseKX4_Full_BIT
,
214 ETHTOOL_LINK_MODE_10000baseR_FEC_BIT
,
215 ETHTOOL_LINK_MODE_10000baseCR_Full_BIT
,
216 ETHTOOL_LINK_MODE_10000baseSR_Full_BIT
,
217 ETHTOOL_LINK_MODE_10000baseLR_Full_BIT
,
218 ETHTOOL_LINK_MODE_10000baseLRM_Full_BIT
,
221 static const u32 qede_forced_speed_20000
[] __initconst
= {
222 ETHTOOL_LINK_MODE_20000baseKR2_Full_BIT
,
225 static const u32 qede_forced_speed_25000
[] __initconst
= {
226 ETHTOOL_LINK_MODE_25000baseKR_Full_BIT
,
227 ETHTOOL_LINK_MODE_25000baseCR_Full_BIT
,
228 ETHTOOL_LINK_MODE_25000baseSR_Full_BIT
,
231 static const u32 qede_forced_speed_40000
[] __initconst
= {
232 ETHTOOL_LINK_MODE_40000baseLR4_Full_BIT
,
233 ETHTOOL_LINK_MODE_40000baseKR4_Full_BIT
,
234 ETHTOOL_LINK_MODE_40000baseCR4_Full_BIT
,
235 ETHTOOL_LINK_MODE_40000baseSR4_Full_BIT
,
238 static const u32 qede_forced_speed_50000
[] __initconst
= {
239 ETHTOOL_LINK_MODE_50000baseKR2_Full_BIT
,
240 ETHTOOL_LINK_MODE_50000baseCR2_Full_BIT
,
241 ETHTOOL_LINK_MODE_50000baseSR2_Full_BIT
,
244 static const u32 qede_forced_speed_100000
[] __initconst
= {
245 ETHTOOL_LINK_MODE_100000baseKR4_Full_BIT
,
246 ETHTOOL_LINK_MODE_100000baseSR4_Full_BIT
,
247 ETHTOOL_LINK_MODE_100000baseCR4_Full_BIT
,
248 ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT
,
251 static struct ethtool_forced_speed_map
252 qede_forced_speed_maps
[] __ro_after_init
= {
253 ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed
, 1000),
254 ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed
, 10000),
255 ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed
, 20000),
256 ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed
, 25000),
257 ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed
, 40000),
258 ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed
, 50000),
259 ETHTOOL_FORCED_SPEED_MAP(qede_forced_speed
, 100000),
262 void __init
qede_forced_speed_maps_init(void)
264 ethtool_forced_speed_maps_init(qede_forced_speed_maps
,
265 ARRAY_SIZE(qede_forced_speed_maps
));
268 /* Ethtool callbacks */
270 static void qede_get_strings_stats_txq(struct qede_dev
*edev
,
271 struct qede_tx_queue
*txq
, u8
**buf
)
275 for (i
= 0; i
< QEDE_NUM_TQSTATS
; i
++)
277 ethtool_sprintf(buf
, "%d [XDP]: %s",
278 QEDE_TXQ_XDP_TO_IDX(edev
, txq
),
279 qede_tqstats_arr
[i
].string
);
281 ethtool_sprintf(buf
, "%d_%d: %s", txq
->index
, txq
->cos
,
282 qede_tqstats_arr
[i
].string
);
285 static void qede_get_strings_stats_rxq(struct qede_dev
*edev
,
286 struct qede_rx_queue
*rxq
, u8
**buf
)
290 for (i
= 0; i
< QEDE_NUM_RQSTATS
; i
++)
291 ethtool_sprintf(buf
, "%d: %s", rxq
->rxq_id
,
292 qede_rqstats_arr
[i
].string
);
295 static bool qede_is_irrelevant_stat(struct qede_dev
*edev
, int stat_index
)
297 return (IS_VF(edev
) && QEDE_STAT_IS_PF_ONLY(stat_index
)) ||
298 (QEDE_IS_BB(edev
) && QEDE_STAT_IS_AH_ONLY(stat_index
)) ||
299 (QEDE_IS_AH(edev
) && QEDE_STAT_IS_BB_ONLY(stat_index
));
302 static void qede_get_strings_stats(struct qede_dev
*edev
, u8
*buf
)
304 struct qede_fastpath
*fp
;
307 /* Account for queue statistics */
308 for (i
= 0; i
< QEDE_QUEUE_CNT(edev
); i
++) {
309 fp
= &edev
->fp_array
[i
];
311 if (fp
->type
& QEDE_FASTPATH_RX
)
312 qede_get_strings_stats_rxq(edev
, fp
->rxq
, &buf
);
314 if (fp
->type
& QEDE_FASTPATH_XDP
)
315 qede_get_strings_stats_txq(edev
, fp
->xdp_tx
, &buf
);
317 if (fp
->type
& QEDE_FASTPATH_TX
) {
320 for_each_cos_in_txq(edev
, cos
)
321 qede_get_strings_stats_txq(edev
,
322 &fp
->txq
[cos
], &buf
);
326 /* Account for non-queue statistics */
327 for (i
= 0; i
< QEDE_NUM_STATS
; i
++) {
328 if (qede_is_irrelevant_stat(edev
, i
))
330 ethtool_puts(&buf
, qede_stats_arr
[i
].string
);
334 static void qede_get_strings(struct net_device
*dev
, u32 stringset
, u8
*buf
)
336 struct qede_dev
*edev
= netdev_priv(dev
);
341 qede_get_strings_stats(edev
, buf
);
343 case ETH_SS_PRIV_FLAGS
:
344 for (i
= 0; i
< QEDE_PRI_FLAG_LEN
; i
++)
345 ethtool_puts(&buf
, qede_private_arr
[i
]);
348 for (i
= 0; i
< QEDE_ETHTOOL_TEST_MAX
; i
++)
349 ethtool_puts(&buf
, qede_tests_str_arr
[i
]);
352 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
353 "Unsupported stringset 0x%08x\n", stringset
);
357 static void qede_get_ethtool_stats_txq(struct qede_tx_queue
*txq
, u64
**buf
)
361 for (i
= 0; i
< QEDE_NUM_TQSTATS
; i
++) {
362 **buf
= *((u64
*)(((void *)txq
) + qede_tqstats_arr
[i
].offset
));
367 static void qede_get_ethtool_stats_rxq(struct qede_rx_queue
*rxq
, u64
**buf
)
371 for (i
= 0; i
< QEDE_NUM_RQSTATS
; i
++) {
372 **buf
= *((u64
*)(((void *)rxq
) + qede_rqstats_arr
[i
].offset
));
377 static void qede_get_ethtool_stats(struct net_device
*dev
,
378 struct ethtool_stats
*stats
, u64
*buf
)
380 struct qede_dev
*edev
= netdev_priv(dev
);
381 struct qede_fastpath
*fp
;
384 qede_fill_by_demand_stats(edev
);
386 /* Need to protect the access to the fastpath array */
389 for (i
= 0; i
< QEDE_QUEUE_CNT(edev
); i
++) {
390 fp
= &edev
->fp_array
[i
];
392 if (fp
->type
& QEDE_FASTPATH_RX
)
393 qede_get_ethtool_stats_rxq(fp
->rxq
, &buf
);
395 if (fp
->type
& QEDE_FASTPATH_XDP
)
396 qede_get_ethtool_stats_txq(fp
->xdp_tx
, &buf
);
398 if (fp
->type
& QEDE_FASTPATH_TX
) {
401 for_each_cos_in_txq(edev
, cos
)
402 qede_get_ethtool_stats_txq(&fp
->txq
[cos
], &buf
);
406 spin_lock(&edev
->stats_lock
);
408 for (i
= 0; i
< QEDE_NUM_STATS
; i
++) {
409 if (qede_is_irrelevant_stat(edev
, i
))
411 *buf
= *((u64
*)(((void *)&edev
->stats
) +
412 qede_stats_arr
[i
].offset
));
417 spin_unlock(&edev
->stats_lock
);
422 static int qede_get_sset_count(struct net_device
*dev
, int stringset
)
424 struct qede_dev
*edev
= netdev_priv(dev
);
425 int num_stats
= QEDE_NUM_STATS
, i
;
429 for (i
= 0; i
< QEDE_NUM_STATS
; i
++)
430 if (qede_is_irrelevant_stat(edev
, i
))
433 /* Account for the Regular Tx statistics */
434 num_stats
+= QEDE_TSS_COUNT(edev
) * QEDE_NUM_TQSTATS
*
435 edev
->dev_info
.num_tc
;
437 /* Account for the Regular Rx statistics */
438 num_stats
+= QEDE_RSS_COUNT(edev
) * QEDE_NUM_RQSTATS
;
440 /* Account for XDP statistics [if needed] */
442 num_stats
+= QEDE_RSS_COUNT(edev
) * QEDE_NUM_TQSTATS
;
445 case ETH_SS_PRIV_FLAGS
:
446 return QEDE_PRI_FLAG_LEN
;
449 return QEDE_ETHTOOL_TEST_MAX
;
453 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
454 "Unsupported stringset 0x%08x\n", stringset
);
459 static u32
qede_get_priv_flags(struct net_device
*dev
)
461 struct qede_dev
*edev
= netdev_priv(dev
);
465 if (edev
->dev_info
.common
.num_hwfns
> 1)
466 flags
|= BIT(QEDE_PRI_FLAG_CMT
);
468 if (edev
->dev_info
.common
.smart_an
)
469 flags
|= BIT(QEDE_PRI_FLAG_SMART_AN_SUPPORT
);
471 if (edev
->err_flags
& BIT(QEDE_ERR_IS_RECOVERABLE
))
472 flags
|= BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR
);
474 if (edev
->dev_info
.common
.esl
)
475 flags
|= BIT(QEDE_PRI_FLAG_ESL_SUPPORT
);
477 edev
->ops
->common
->get_esl_status(edev
->cdev
, &esl_active
);
480 flags
|= BIT(QEDE_PRI_FLAG_ESL_ACTIVE
);
485 static int qede_set_priv_flags(struct net_device
*dev
, u32 flags
)
487 struct qede_dev
*edev
= netdev_priv(dev
);
488 u32 cflags
= qede_get_priv_flags(dev
);
489 u32 dflags
= flags
^ cflags
;
491 /* can only change RECOVER_ON_ERROR flag */
492 if (dflags
& ~BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR
))
495 if (flags
& BIT(QEDE_PRI_FLAG_RECOVER_ON_ERROR
))
496 set_bit(QEDE_ERR_IS_RECOVERABLE
, &edev
->err_flags
);
498 clear_bit(QEDE_ERR_IS_RECOVERABLE
, &edev
->err_flags
);
503 static int qede_get_link_ksettings(struct net_device
*dev
,
504 struct ethtool_link_ksettings
*cmd
)
506 typeof(cmd
->link_modes
) *link_modes
= &cmd
->link_modes
;
507 struct ethtool_link_settings
*base
= &cmd
->base
;
508 struct qede_dev
*edev
= netdev_priv(dev
);
509 struct qed_link_output current_link
;
513 memset(¤t_link
, 0, sizeof(current_link
));
514 edev
->ops
->common
->get_link(edev
->cdev
, ¤t_link
);
516 linkmode_copy(link_modes
->supported
, current_link
.supported_caps
);
517 linkmode_copy(link_modes
->advertising
, current_link
.advertised_caps
);
518 linkmode_copy(link_modes
->lp_advertising
, current_link
.lp_caps
);
520 if ((edev
->state
== QEDE_STATE_OPEN
) && (current_link
.link_up
)) {
521 base
->speed
= current_link
.speed
;
522 base
->duplex
= current_link
.duplex
;
524 base
->speed
= SPEED_UNKNOWN
;
525 base
->duplex
= DUPLEX_UNKNOWN
;
530 base
->port
= current_link
.port
;
531 base
->autoneg
= (current_link
.autoneg
) ? AUTONEG_ENABLE
:
537 static int qede_set_link_ksettings(struct net_device
*dev
,
538 const struct ethtool_link_ksettings
*cmd
)
540 const struct ethtool_link_settings
*base
= &cmd
->base
;
541 const struct ethtool_forced_speed_map
*map
;
542 struct qede_dev
*edev
= netdev_priv(dev
);
543 struct qed_link_output current_link
;
544 struct qed_link_params params
;
547 if (!edev
->ops
|| !edev
->ops
->common
->can_link_change(edev
->cdev
)) {
548 DP_INFO(edev
, "Link settings are not allowed to be changed\n");
551 memset(¤t_link
, 0, sizeof(current_link
));
552 memset(¶ms
, 0, sizeof(params
));
553 edev
->ops
->common
->get_link(edev
->cdev
, ¤t_link
);
555 params
.override_flags
|= QED_LINK_OVERRIDE_SPEED_ADV_SPEEDS
;
556 params
.override_flags
|= QED_LINK_OVERRIDE_SPEED_AUTONEG
;
558 if (base
->autoneg
== AUTONEG_ENABLE
) {
559 if (!phylink_test(current_link
.supported_caps
, Autoneg
)) {
560 DP_INFO(edev
, "Auto negotiation is not supported\n");
564 params
.autoneg
= true;
565 params
.forced_speed
= 0;
567 linkmode_copy(params
.adv_speeds
, cmd
->link_modes
.advertising
);
568 } else { /* forced speed */
569 params
.override_flags
|= QED_LINK_OVERRIDE_SPEED_FORCED_SPEED
;
570 params
.autoneg
= false;
571 params
.forced_speed
= base
->speed
;
573 for (i
= 0; i
< ARRAY_SIZE(qede_forced_speed_maps
); i
++) {
574 map
= qede_forced_speed_maps
+ i
;
576 if (base
->speed
!= map
->speed
||
577 !linkmode_intersects(current_link
.supported_caps
,
581 linkmode_and(params
.adv_speeds
,
582 current_link
.supported_caps
, map
->caps
);
586 DP_INFO(edev
, "Unsupported speed %u\n", base
->speed
);
591 params
.link_up
= true;
592 edev
->ops
->common
->set_link(edev
->cdev
, ¶ms
);
597 static void qede_get_drvinfo(struct net_device
*ndev
,
598 struct ethtool_drvinfo
*info
)
600 char mfw
[ETHTOOL_FWVERS_LEN
], storm
[ETHTOOL_FWVERS_LEN
];
601 struct qede_dev
*edev
= netdev_priv(ndev
);
602 char mbi
[ETHTOOL_FWVERS_LEN
];
604 strscpy(info
->driver
, "qede", sizeof(info
->driver
));
606 snprintf(storm
, ETHTOOL_FWVERS_LEN
, "%d.%d.%d.%d",
607 edev
->dev_info
.common
.fw_major
,
608 edev
->dev_info
.common
.fw_minor
,
609 edev
->dev_info
.common
.fw_rev
,
610 edev
->dev_info
.common
.fw_eng
);
612 snprintf(mfw
, ETHTOOL_FWVERS_LEN
, "%d.%d.%d.%d",
613 (edev
->dev_info
.common
.mfw_rev
>> 24) & 0xFF,
614 (edev
->dev_info
.common
.mfw_rev
>> 16) & 0xFF,
615 (edev
->dev_info
.common
.mfw_rev
>> 8) & 0xFF,
616 edev
->dev_info
.common
.mfw_rev
& 0xFF);
618 if ((strlen(storm
) + strlen("[storm]")) <
619 sizeof(info
->version
))
620 snprintf(info
->version
, sizeof(info
->version
),
621 "[storm %s]", storm
);
623 snprintf(info
->version
, sizeof(info
->version
),
626 if (edev
->dev_info
.common
.mbi_version
) {
627 snprintf(mbi
, ETHTOOL_FWVERS_LEN
, "%d.%d.%d",
628 (edev
->dev_info
.common
.mbi_version
&
629 QED_MBI_VERSION_2_MASK
) >> QED_MBI_VERSION_2_OFFSET
,
630 (edev
->dev_info
.common
.mbi_version
&
631 QED_MBI_VERSION_1_MASK
) >> QED_MBI_VERSION_1_OFFSET
,
632 (edev
->dev_info
.common
.mbi_version
&
633 QED_MBI_VERSION_0_MASK
) >> QED_MBI_VERSION_0_OFFSET
);
634 snprintf(info
->fw_version
, sizeof(info
->fw_version
),
635 "mbi %s [mfw %s]", mbi
, mfw
);
637 snprintf(info
->fw_version
, sizeof(info
->fw_version
),
641 strscpy(info
->bus_info
, pci_name(edev
->pdev
), sizeof(info
->bus_info
));
644 static void qede_get_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
646 struct qede_dev
*edev
= netdev_priv(ndev
);
648 if (edev
->dev_info
.common
.wol_support
) {
649 wol
->supported
= WAKE_MAGIC
;
650 wol
->wolopts
= edev
->wol_enabled
? WAKE_MAGIC
: 0;
654 static int qede_set_wol(struct net_device
*ndev
, struct ethtool_wolinfo
*wol
)
656 struct qede_dev
*edev
= netdev_priv(ndev
);
660 if (wol
->wolopts
& ~WAKE_MAGIC
) {
662 "Can't support WoL options other than magic-packet\n");
666 wol_requested
= !!(wol
->wolopts
& WAKE_MAGIC
);
667 if (wol_requested
== edev
->wol_enabled
)
670 /* Need to actually change configuration */
671 if (!edev
->dev_info
.common
.wol_support
) {
672 DP_INFO(edev
, "Device doesn't support WoL\n");
676 rc
= edev
->ops
->common
->update_wol(edev
->cdev
, wol_requested
);
678 edev
->wol_enabled
= wol_requested
;
683 static u32
qede_get_msglevel(struct net_device
*ndev
)
685 struct qede_dev
*edev
= netdev_priv(ndev
);
687 return ((u32
)edev
->dp_level
<< QED_LOG_LEVEL_SHIFT
) | edev
->dp_module
;
690 static void qede_set_msglevel(struct net_device
*ndev
, u32 level
)
692 struct qede_dev
*edev
= netdev_priv(ndev
);
696 qede_config_debug(level
, &dp_module
, &dp_level
);
698 edev
->dp_level
= dp_level
;
699 edev
->dp_module
= dp_module
;
700 edev
->ops
->common
->update_msglvl(edev
->cdev
,
701 dp_module
, dp_level
);
704 static int qede_nway_reset(struct net_device
*dev
)
706 struct qede_dev
*edev
= netdev_priv(dev
);
707 struct qed_link_output current_link
;
708 struct qed_link_params link_params
;
710 if (!edev
->ops
|| !edev
->ops
->common
->can_link_change(edev
->cdev
)) {
711 DP_INFO(edev
, "Link settings are not allowed to be changed\n");
715 if (!netif_running(dev
))
718 memset(¤t_link
, 0, sizeof(current_link
));
719 edev
->ops
->common
->get_link(edev
->cdev
, ¤t_link
);
720 if (!current_link
.link_up
)
723 /* Toggle the link */
724 memset(&link_params
, 0, sizeof(link_params
));
725 link_params
.link_up
= false;
726 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
727 link_params
.link_up
= true;
728 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
733 static u32
qede_get_link(struct net_device
*dev
)
735 struct qede_dev
*edev
= netdev_priv(dev
);
736 struct qed_link_output current_link
;
738 memset(¤t_link
, 0, sizeof(current_link
));
739 edev
->ops
->common
->get_link(edev
->cdev
, ¤t_link
);
741 return current_link
.link_up
;
744 static int qede_flash_device(struct net_device
*dev
,
745 struct ethtool_flash
*flash
)
747 struct qede_dev
*edev
= netdev_priv(dev
);
749 return edev
->ops
->common
->nvm_flash(edev
->cdev
, flash
->data
);
752 static int qede_get_coalesce(struct net_device
*dev
,
753 struct ethtool_coalesce
*coal
,
754 struct kernel_ethtool_coalesce
*kernel_coal
,
755 struct netlink_ext_ack
*extack
)
757 void *rx_handle
= NULL
, *tx_handle
= NULL
;
758 struct qede_dev
*edev
= netdev_priv(dev
);
759 u16 rx_coal
, tx_coal
, i
, rc
= 0;
760 struct qede_fastpath
*fp
;
762 rx_coal
= QED_DEFAULT_RX_USECS
;
763 tx_coal
= QED_DEFAULT_TX_USECS
;
765 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
768 if (edev
->state
== QEDE_STATE_OPEN
) {
770 fp
= &edev
->fp_array
[i
];
772 if (fp
->type
& QEDE_FASTPATH_RX
) {
773 rx_handle
= fp
->rxq
->handle
;
778 rc
= edev
->ops
->get_coalesce(edev
->cdev
, &rx_coal
, rx_handle
);
780 DP_INFO(edev
, "Read Rx coalesce error\n");
785 struct qede_tx_queue
*txq
;
787 fp
= &edev
->fp_array
[i
];
789 /* All TX queues of given fastpath uses same
790 * coalescing value, so no need to iterate over
791 * all TCs, TC0 txq should suffice.
793 if (fp
->type
& QEDE_FASTPATH_TX
) {
794 txq
= QEDE_FP_TC0_TXQ(fp
);
795 tx_handle
= txq
->handle
;
800 rc
= edev
->ops
->get_coalesce(edev
->cdev
, &tx_coal
, tx_handle
);
802 DP_INFO(edev
, "Read Tx coalesce error\n");
808 coal
->rx_coalesce_usecs
= rx_coal
;
809 coal
->tx_coalesce_usecs
= tx_coal
;
810 coal
->stats_block_coalesce_usecs
= edev
->stats_coal_usecs
;
815 int qede_set_coalesce(struct net_device
*dev
, struct ethtool_coalesce
*coal
,
816 struct kernel_ethtool_coalesce
*kernel_coal
,
817 struct netlink_ext_ack
*extack
)
819 struct qede_dev
*edev
= netdev_priv(dev
);
820 struct qede_fastpath
*fp
;
824 if (edev
->stats_coal_usecs
!= coal
->stats_block_coalesce_usecs
) {
825 edev
->stats_coal_usecs
= coal
->stats_block_coalesce_usecs
;
826 if (edev
->stats_coal_usecs
) {
827 edev
->stats_coal_ticks
= usecs_to_jiffies(edev
->stats_coal_usecs
);
828 schedule_delayed_work(&edev
->periodic_task
, 0);
830 DP_INFO(edev
, "Configured stats coal ticks=%lu jiffies\n",
831 edev
->stats_coal_ticks
);
833 cancel_delayed_work_sync(&edev
->periodic_task
);
837 if (!netif_running(dev
)) {
838 DP_INFO(edev
, "Interface is down\n");
842 if (coal
->rx_coalesce_usecs
> QED_COALESCE_MAX
||
843 coal
->tx_coalesce_usecs
> QED_COALESCE_MAX
) {
845 "Can't support requested %s coalesce value [max supported value %d]\n",
846 coal
->rx_coalesce_usecs
> QED_COALESCE_MAX
? "rx" :
847 "tx", QED_COALESCE_MAX
);
851 rxc
= (u16
)coal
->rx_coalesce_usecs
;
852 txc
= (u16
)coal
->tx_coalesce_usecs
;
854 fp
= &edev
->fp_array
[i
];
856 if (edev
->fp_array
[i
].type
& QEDE_FASTPATH_RX
) {
857 rc
= edev
->ops
->common
->set_coalesce(edev
->cdev
,
862 "Set RX coalesce error, rc = %d\n", rc
);
865 edev
->coal_entry
[i
].rxc
= rxc
;
866 edev
->coal_entry
[i
].isvalid
= true;
869 if (edev
->fp_array
[i
].type
& QEDE_FASTPATH_TX
) {
870 struct qede_tx_queue
*txq
;
872 /* All TX queues of given fastpath uses same
873 * coalescing value, so no need to iterate over
874 * all TCs, TC0 txq should suffice.
876 txq
= QEDE_FP_TC0_TXQ(fp
);
878 rc
= edev
->ops
->common
->set_coalesce(edev
->cdev
,
883 "Set TX coalesce error, rc = %d\n", rc
);
886 edev
->coal_entry
[i
].txc
= txc
;
887 edev
->coal_entry
[i
].isvalid
= true;
894 static void qede_get_ringparam(struct net_device
*dev
,
895 struct ethtool_ringparam
*ering
,
896 struct kernel_ethtool_ringparam
*kernel_ering
,
897 struct netlink_ext_ack
*extack
)
899 struct qede_dev
*edev
= netdev_priv(dev
);
901 ering
->rx_max_pending
= NUM_RX_BDS_MAX
;
902 ering
->rx_pending
= edev
->q_num_rx_buffers
;
903 ering
->tx_max_pending
= NUM_TX_BDS_MAX
;
904 ering
->tx_pending
= edev
->q_num_tx_buffers
;
907 static int qede_set_ringparam(struct net_device
*dev
,
908 struct ethtool_ringparam
*ering
,
909 struct kernel_ethtool_ringparam
*kernel_ering
,
910 struct netlink_ext_ack
*extack
)
912 struct qede_dev
*edev
= netdev_priv(dev
);
914 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
915 "Set ring params command parameters: rx_pending = %d, tx_pending = %d\n",
916 ering
->rx_pending
, ering
->tx_pending
);
918 /* Validate legality of configuration */
919 if (ering
->rx_pending
> NUM_RX_BDS_MAX
||
920 ering
->rx_pending
< NUM_RX_BDS_MIN
||
921 ering
->tx_pending
> NUM_TX_BDS_MAX
||
922 ering
->tx_pending
< NUM_TX_BDS_MIN
) {
923 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
924 "Can only support Rx Buffer size [0%08x,...,0x%08x] and Tx Buffer size [0x%08x,...,0x%08x]\n",
925 NUM_RX_BDS_MIN
, NUM_RX_BDS_MAX
,
926 NUM_TX_BDS_MIN
, NUM_TX_BDS_MAX
);
930 /* Change ring size and re-load */
931 edev
->q_num_rx_buffers
= ering
->rx_pending
;
932 edev
->q_num_tx_buffers
= ering
->tx_pending
;
934 qede_reload(edev
, NULL
, false);
939 static void qede_get_pauseparam(struct net_device
*dev
,
940 struct ethtool_pauseparam
*epause
)
942 struct qede_dev
*edev
= netdev_priv(dev
);
943 struct qed_link_output current_link
;
945 memset(¤t_link
, 0, sizeof(current_link
));
946 edev
->ops
->common
->get_link(edev
->cdev
, ¤t_link
);
948 if (current_link
.pause_config
& QED_LINK_PAUSE_AUTONEG_ENABLE
)
949 epause
->autoneg
= true;
950 if (current_link
.pause_config
& QED_LINK_PAUSE_RX_ENABLE
)
951 epause
->rx_pause
= true;
952 if (current_link
.pause_config
& QED_LINK_PAUSE_TX_ENABLE
)
953 epause
->tx_pause
= true;
955 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
956 "ethtool_pauseparam: cmd %d autoneg %d rx_pause %d tx_pause %d\n",
957 epause
->cmd
, epause
->autoneg
, epause
->rx_pause
,
961 static int qede_set_pauseparam(struct net_device
*dev
,
962 struct ethtool_pauseparam
*epause
)
964 struct qede_dev
*edev
= netdev_priv(dev
);
965 struct qed_link_params params
;
966 struct qed_link_output current_link
;
968 if (!edev
->ops
|| !edev
->ops
->common
->can_link_change(edev
->cdev
)) {
970 "Pause settings are not allowed to be changed\n");
974 memset(¤t_link
, 0, sizeof(current_link
));
975 edev
->ops
->common
->get_link(edev
->cdev
, ¤t_link
);
977 memset(¶ms
, 0, sizeof(params
));
978 params
.override_flags
|= QED_LINK_OVERRIDE_PAUSE_CONFIG
;
980 if (epause
->autoneg
) {
981 if (!phylink_test(current_link
.supported_caps
, Autoneg
)) {
982 DP_INFO(edev
, "autoneg not supported\n");
986 params
.pause_config
|= QED_LINK_PAUSE_AUTONEG_ENABLE
;
989 if (epause
->rx_pause
)
990 params
.pause_config
|= QED_LINK_PAUSE_RX_ENABLE
;
991 if (epause
->tx_pause
)
992 params
.pause_config
|= QED_LINK_PAUSE_TX_ENABLE
;
994 params
.link_up
= true;
995 edev
->ops
->common
->set_link(edev
->cdev
, ¶ms
);
1000 static void qede_get_regs(struct net_device
*ndev
,
1001 struct ethtool_regs
*regs
, void *buffer
)
1003 struct qede_dev
*edev
= netdev_priv(ndev
);
1006 memset(buffer
, 0, regs
->len
);
1008 if (edev
->ops
&& edev
->ops
->common
)
1009 edev
->ops
->common
->dbg_all_data(edev
->cdev
, buffer
);
1012 static int qede_get_regs_len(struct net_device
*ndev
)
1014 struct qede_dev
*edev
= netdev_priv(ndev
);
1016 if (edev
->ops
&& edev
->ops
->common
)
1017 return edev
->ops
->common
->dbg_all_data_size(edev
->cdev
);
1022 static void qede_update_mtu(struct qede_dev
*edev
,
1023 struct qede_reload_args
*args
)
1025 WRITE_ONCE(edev
->ndev
->mtu
, args
->u
.mtu
);
1028 /* Netdevice NDOs */
1029 int qede_change_mtu(struct net_device
*ndev
, int new_mtu
)
1031 struct qede_dev
*edev
= netdev_priv(ndev
);
1032 struct qede_reload_args args
;
1034 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
1035 "Configuring MTU size of %d\n", new_mtu
);
1037 if (new_mtu
> PAGE_SIZE
)
1038 ndev
->features
&= ~NETIF_F_GRO_HW
;
1040 /* Set the mtu field and re-start the interface if needed */
1041 args
.u
.mtu
= new_mtu
;
1042 args
.func
= &qede_update_mtu
;
1043 qede_reload(edev
, &args
, false);
1044 #if IS_ENABLED(CONFIG_QED_RDMA)
1045 qede_rdma_event_change_mtu(edev
);
1047 edev
->ops
->common
->update_mtu(edev
->cdev
, new_mtu
);
1052 static void qede_get_channels(struct net_device
*dev
,
1053 struct ethtool_channels
*channels
)
1055 struct qede_dev
*edev
= netdev_priv(dev
);
1057 channels
->max_combined
= QEDE_MAX_RSS_CNT(edev
);
1058 channels
->max_rx
= QEDE_MAX_RSS_CNT(edev
);
1059 channels
->max_tx
= QEDE_MAX_RSS_CNT(edev
);
1060 channels
->combined_count
= QEDE_QUEUE_CNT(edev
) - edev
->fp_num_tx
-
1062 channels
->tx_count
= edev
->fp_num_tx
;
1063 channels
->rx_count
= edev
->fp_num_rx
;
1066 static int qede_set_channels(struct net_device
*dev
,
1067 struct ethtool_channels
*channels
)
1069 struct qede_dev
*edev
= netdev_priv(dev
);
1072 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
1073 "set-channels command parameters: rx = %d, tx = %d, other = %d, combined = %d\n",
1074 channels
->rx_count
, channels
->tx_count
,
1075 channels
->other_count
, channels
->combined_count
);
1077 count
= channels
->rx_count
+ channels
->tx_count
+
1078 channels
->combined_count
;
1080 /* We don't support `other' channels */
1081 if (channels
->other_count
) {
1082 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
1083 "command parameters not supported\n");
1087 if (!(channels
->combined_count
|| (channels
->rx_count
&&
1088 channels
->tx_count
))) {
1089 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
1090 "need to request at least one transmit and one receive channel\n");
1094 if (count
> QEDE_MAX_RSS_CNT(edev
)) {
1095 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
1096 "requested channels = %d max supported channels = %d\n",
1097 count
, QEDE_MAX_RSS_CNT(edev
));
1101 /* Check if there was a change in the active parameters */
1102 if ((count
== QEDE_QUEUE_CNT(edev
)) &&
1103 (channels
->tx_count
== edev
->fp_num_tx
) &&
1104 (channels
->rx_count
== edev
->fp_num_rx
)) {
1105 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
1106 "No change in active parameters\n");
1110 /* We need the number of queues to be divisible between the hwfns */
1111 if ((count
% edev
->dev_info
.common
.num_hwfns
) ||
1112 (channels
->tx_count
% edev
->dev_info
.common
.num_hwfns
) ||
1113 (channels
->rx_count
% edev
->dev_info
.common
.num_hwfns
)) {
1114 DP_VERBOSE(edev
, (NETIF_MSG_IFUP
| NETIF_MSG_IFDOWN
),
1115 "Number of channels must be divisible by %04x\n",
1116 edev
->dev_info
.common
.num_hwfns
);
1120 /* Set number of queues and reload if necessary */
1121 edev
->req_queues
= count
;
1122 edev
->req_num_tx
= channels
->tx_count
;
1123 edev
->req_num_rx
= channels
->rx_count
;
1124 /* Reset the indirection table if rx queue count is updated */
1125 if ((edev
->req_queues
- edev
->req_num_tx
) != QEDE_RSS_COUNT(edev
)) {
1126 edev
->rss_params_inited
&= ~QEDE_RSS_INDIR_INITED
;
1127 memset(edev
->rss_ind_table
, 0, sizeof(edev
->rss_ind_table
));
1130 qede_reload(edev
, NULL
, false);
1135 static int qede_get_ts_info(struct net_device
*dev
,
1136 struct kernel_ethtool_ts_info
*info
)
1138 struct qede_dev
*edev
= netdev_priv(dev
);
1140 return qede_ptp_get_ts_info(edev
, info
);
1143 static int qede_set_phys_id(struct net_device
*dev
,
1144 enum ethtool_phys_id_state state
)
1146 struct qede_dev
*edev
= netdev_priv(dev
);
1150 case ETHTOOL_ID_ACTIVE
:
1151 return 1; /* cycle on/off once per second */
1154 led_state
= QED_LED_MODE_ON
;
1157 case ETHTOOL_ID_OFF
:
1158 led_state
= QED_LED_MODE_OFF
;
1161 case ETHTOOL_ID_INACTIVE
:
1162 led_state
= QED_LED_MODE_RESTORE
;
1166 edev
->ops
->common
->set_led(edev
->cdev
, led_state
);
1171 static int qede_get_rss_flags(struct qede_dev
*edev
, struct ethtool_rxnfc
*info
)
1173 info
->data
= RXH_IP_SRC
| RXH_IP_DST
;
1175 switch (info
->flow_type
) {
1178 info
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1181 if (edev
->rss_caps
& QED_RSS_IPV4_UDP
)
1182 info
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1185 if (edev
->rss_caps
& QED_RSS_IPV6_UDP
)
1186 info
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
1199 static int qede_get_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
,
1202 struct qede_dev
*edev
= netdev_priv(dev
);
1205 switch (info
->cmd
) {
1206 case ETHTOOL_GRXRINGS
:
1207 info
->data
= QEDE_RSS_COUNT(edev
);
1210 rc
= qede_get_rss_flags(edev
, info
);
1212 case ETHTOOL_GRXCLSRLCNT
:
1213 info
->rule_cnt
= qede_get_arfs_filter_count(edev
);
1214 info
->data
= QEDE_RFS_MAX_FLTR
;
1216 case ETHTOOL_GRXCLSRULE
:
1217 rc
= qede_get_cls_rule_entry(edev
, info
);
1219 case ETHTOOL_GRXCLSRLALL
:
1220 rc
= qede_get_cls_rule_all(edev
, info
, rule_locs
);
1223 DP_ERR(edev
, "Command parameters not supported\n");
1230 static int qede_set_rss_flags(struct qede_dev
*edev
, struct ethtool_rxnfc
*info
)
1232 struct qed_update_vport_params
*vport_update_params
;
1233 u8 set_caps
= 0, clr_caps
= 0;
1236 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
1237 "Set rss flags command parameters: flow type = %d, data = %llu\n",
1238 info
->flow_type
, info
->data
);
1240 switch (info
->flow_type
) {
1243 /* For TCP only 4-tuple hash is supported */
1244 if (info
->data
^ (RXH_IP_SRC
| RXH_IP_DST
|
1245 RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
1246 DP_INFO(edev
, "Command parameters not supported\n");
1251 /* For UDP either 2-tuple hash or 4-tuple hash is supported */
1252 if (info
->data
== (RXH_IP_SRC
| RXH_IP_DST
|
1253 RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
1254 set_caps
= QED_RSS_IPV4_UDP
;
1255 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
1256 "UDP 4-tuple enabled\n");
1257 } else if (info
->data
== (RXH_IP_SRC
| RXH_IP_DST
)) {
1258 clr_caps
= QED_RSS_IPV4_UDP
;
1259 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
1260 "UDP 4-tuple disabled\n");
1266 /* For UDP either 2-tuple hash or 4-tuple hash is supported */
1267 if (info
->data
== (RXH_IP_SRC
| RXH_IP_DST
|
1268 RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
1269 set_caps
= QED_RSS_IPV6_UDP
;
1270 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
1271 "UDP 4-tuple enabled\n");
1272 } else if (info
->data
== (RXH_IP_SRC
| RXH_IP_DST
)) {
1273 clr_caps
= QED_RSS_IPV6_UDP
;
1274 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
1275 "UDP 4-tuple disabled\n");
1282 /* For IP only 2-tuple hash is supported */
1283 if (info
->data
^ (RXH_IP_SRC
| RXH_IP_DST
)) {
1284 DP_INFO(edev
, "Command parameters not supported\n");
1289 case AH_ESP_V4_FLOW
:
1293 case AH_ESP_V6_FLOW
:
1298 /* RSS is not supported for these protocols */
1300 DP_INFO(edev
, "Command parameters not supported\n");
1308 /* No action is needed if there is no change in the rss capability */
1309 if (edev
->rss_caps
== ((edev
->rss_caps
& ~clr_caps
) | set_caps
))
1312 /* Update internal configuration */
1313 edev
->rss_caps
= ((edev
->rss_caps
& ~clr_caps
) | set_caps
);
1314 edev
->rss_params_inited
|= QEDE_RSS_CAPS_INITED
;
1316 /* Re-configure if possible */
1318 if (edev
->state
== QEDE_STATE_OPEN
) {
1319 vport_update_params
= vzalloc(sizeof(*vport_update_params
));
1320 if (!vport_update_params
) {
1321 __qede_unlock(edev
);
1324 qede_fill_rss_params(edev
, &vport_update_params
->rss_params
,
1325 &vport_update_params
->update_rss_flg
);
1326 rc
= edev
->ops
->vport_update(edev
->cdev
, vport_update_params
);
1327 vfree(vport_update_params
);
1329 __qede_unlock(edev
);
1334 static int qede_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
)
1336 struct qede_dev
*edev
= netdev_priv(dev
);
1339 switch (info
->cmd
) {
1341 rc
= qede_set_rss_flags(edev
, info
);
1343 case ETHTOOL_SRXCLSRLINS
:
1344 rc
= qede_add_cls_rule(edev
, info
);
1346 case ETHTOOL_SRXCLSRLDEL
:
1347 rc
= qede_delete_flow_filter(edev
, info
->fs
.location
);
1350 DP_INFO(edev
, "Command parameters not supported\n");
1357 static u32
qede_get_rxfh_indir_size(struct net_device
*dev
)
1359 return QED_RSS_IND_TABLE_SIZE
;
1362 static u32
qede_get_rxfh_key_size(struct net_device
*dev
)
1364 struct qede_dev
*edev
= netdev_priv(dev
);
1366 return sizeof(edev
->rss_key
);
1369 static int qede_get_rxfh(struct net_device
*dev
,
1370 struct ethtool_rxfh_param
*rxfh
)
1372 struct qede_dev
*edev
= netdev_priv(dev
);
1375 rxfh
->hfunc
= ETH_RSS_HASH_TOP
;
1380 for (i
= 0; i
< QED_RSS_IND_TABLE_SIZE
; i
++)
1381 rxfh
->indir
[i
] = edev
->rss_ind_table
[i
];
1384 memcpy(rxfh
->key
, edev
->rss_key
, qede_get_rxfh_key_size(dev
));
1389 static int qede_set_rxfh(struct net_device
*dev
,
1390 struct ethtool_rxfh_param
*rxfh
,
1391 struct netlink_ext_ack
*extack
)
1393 struct qed_update_vport_params
*vport_update_params
;
1394 struct qede_dev
*edev
= netdev_priv(dev
);
1397 if (edev
->dev_info
.common
.num_hwfns
> 1) {
1399 "RSS configuration is not supported for 100G devices\n");
1403 if (rxfh
->hfunc
!= ETH_RSS_HASH_NO_CHANGE
&&
1404 rxfh
->hfunc
!= ETH_RSS_HASH_TOP
)
1407 if (!rxfh
->indir
&& !rxfh
->key
)
1411 for (i
= 0; i
< QED_RSS_IND_TABLE_SIZE
; i
++)
1412 edev
->rss_ind_table
[i
] = rxfh
->indir
[i
];
1413 edev
->rss_params_inited
|= QEDE_RSS_INDIR_INITED
;
1417 memcpy(&edev
->rss_key
, rxfh
->key
, qede_get_rxfh_key_size(dev
));
1418 edev
->rss_params_inited
|= QEDE_RSS_KEY_INITED
;
1422 if (edev
->state
== QEDE_STATE_OPEN
) {
1423 vport_update_params
= vzalloc(sizeof(*vport_update_params
));
1424 if (!vport_update_params
) {
1425 __qede_unlock(edev
);
1428 qede_fill_rss_params(edev
, &vport_update_params
->rss_params
,
1429 &vport_update_params
->update_rss_flg
);
1430 rc
= edev
->ops
->vport_update(edev
->cdev
, vport_update_params
);
1431 vfree(vport_update_params
);
1433 __qede_unlock(edev
);
1438 /* This function enables the interrupt generation and the NAPI on the device */
1439 static void qede_netif_start(struct qede_dev
*edev
)
1443 if (!netif_running(edev
->ndev
))
1447 /* Update and reenable interrupts */
1448 qed_sb_ack(edev
->fp_array
[i
].sb_info
, IGU_INT_ENABLE
, 1);
1449 napi_enable(&edev
->fp_array
[i
].napi
);
1453 /* This function disables the NAPI and the interrupt generation on the device */
1454 static void qede_netif_stop(struct qede_dev
*edev
)
1459 napi_disable(&edev
->fp_array
[i
].napi
);
1460 /* Disable interrupts */
1461 qed_sb_ack(edev
->fp_array
[i
].sb_info
, IGU_INT_DISABLE
, 0);
1465 static int qede_selftest_transmit_traffic(struct qede_dev
*edev
,
1466 struct sk_buff
*skb
)
1468 struct qede_tx_queue
*txq
= NULL
;
1469 struct eth_tx_1st_bd
*first_bd
;
1475 struct qede_fastpath
*fp
= &edev
->fp_array
[i
];
1477 if (fp
->type
& QEDE_FASTPATH_TX
) {
1478 txq
= QEDE_FP_TC0_TXQ(fp
);
1484 DP_NOTICE(edev
, "Tx path is not available\n");
1488 /* Fill the entry in the SW ring and the BDs in the FW ring */
1489 idx
= txq
->sw_tx_prod
;
1490 txq
->sw_tx_ring
.skbs
[idx
].skb
= skb
;
1491 first_bd
= qed_chain_produce(&txq
->tx_pbl
);
1492 memset(first_bd
, 0, sizeof(*first_bd
));
1493 val
= 1 << ETH_TX_1ST_BD_FLAGS_START_BD_SHIFT
;
1494 first_bd
->data
.bd_flags
.bitfields
= val
;
1495 val
= skb
->len
& ETH_TX_DATA_1ST_BD_PKT_LEN_MASK
;
1496 val
= val
<< ETH_TX_DATA_1ST_BD_PKT_LEN_SHIFT
;
1497 first_bd
->data
.bitfields
|= cpu_to_le16(val
);
1499 /* Map skb linear data for DMA and set in the first BD */
1500 mapping
= dma_map_single(&edev
->pdev
->dev
, skb
->data
,
1501 skb_headlen(skb
), DMA_TO_DEVICE
);
1502 if (unlikely(dma_mapping_error(&edev
->pdev
->dev
, mapping
))) {
1503 DP_NOTICE(edev
, "SKB mapping failed\n");
1506 BD_SET_UNMAP_ADDR_LEN(first_bd
, mapping
, skb_headlen(skb
));
1508 /* update the first BD with the actual num BDs */
1509 first_bd
->data
.nbds
= 1;
1510 txq
->sw_tx_prod
= (txq
->sw_tx_prod
+ 1) % txq
->num_tx_buffers
;
1511 /* 'next page' entries are counted in the producer value */
1512 val
= qed_chain_get_prod_idx(&txq
->tx_pbl
);
1513 txq
->tx_db
.data
.bd_prod
= cpu_to_le16(val
);
1515 /* wmb makes sure that the BDs data is updated before updating the
1516 * producer, otherwise FW may read old data from the BDs.
1520 writel(txq
->tx_db
.raw
, txq
->doorbell_addr
);
1522 for (i
= 0; i
< QEDE_SELFTEST_POLL_COUNT
; i
++) {
1523 if (qede_txq_has_work(txq
))
1525 usleep_range(100, 200);
1528 if (!qede_txq_has_work(txq
)) {
1529 DP_NOTICE(edev
, "Tx completion didn't happen\n");
1533 first_bd
= (struct eth_tx_1st_bd
*)qed_chain_consume(&txq
->tx_pbl
);
1534 dma_unmap_single(&edev
->pdev
->dev
, BD_UNMAP_ADDR(first_bd
),
1535 BD_UNMAP_LEN(first_bd
), DMA_TO_DEVICE
);
1536 txq
->sw_tx_cons
= (txq
->sw_tx_cons
+ 1) % txq
->num_tx_buffers
;
1537 txq
->sw_tx_ring
.skbs
[idx
].skb
= NULL
;
1542 static int qede_selftest_receive_traffic(struct qede_dev
*edev
)
1544 u16 sw_rx_index
, len
;
1545 struct eth_fast_path_rx_reg_cqe
*fp_cqe
;
1546 struct qede_rx_queue
*rxq
= NULL
;
1547 struct sw_rx_data
*sw_rx_data
;
1548 union eth_rx_cqe
*cqe
;
1549 int i
, iter
, rc
= 0;
1553 if (edev
->fp_array
[i
].type
& QEDE_FASTPATH_RX
) {
1554 rxq
= edev
->fp_array
[i
].rxq
;
1560 DP_NOTICE(edev
, "Rx path is not available\n");
1564 /* The packet is expected to receive on rx-queue 0 even though RSS is
1565 * enabled. This is because the queue 0 is configured as the default
1566 * queue and that the loopback traffic is not IP.
1568 for (iter
= 0; iter
< QEDE_SELFTEST_POLL_COUNT
; iter
++) {
1569 if (!qede_has_rx_work(rxq
)) {
1570 usleep_range(100, 200);
1574 /* Get the CQE from the completion ring */
1575 cqe
= (union eth_rx_cqe
*)qed_chain_consume(&rxq
->rx_comp_ring
);
1577 /* Get the data from the SW ring */
1578 sw_rx_index
= rxq
->sw_rx_cons
& NUM_RX_BDS_MAX
;
1579 sw_rx_data
= &rxq
->sw_rx_ring
[sw_rx_index
];
1580 fp_cqe
= &cqe
->fast_path_regular
;
1581 len
= le16_to_cpu(fp_cqe
->len_on_first_bd
);
1582 data_ptr
= (u8
*)(page_address(sw_rx_data
->data
) +
1583 fp_cqe
->placement_offset
+
1584 sw_rx_data
->page_offset
+
1586 if (ether_addr_equal(data_ptr
, edev
->ndev
->dev_addr
) &&
1587 ether_addr_equal(data_ptr
+ ETH_ALEN
,
1588 edev
->ndev
->dev_addr
)) {
1589 for (i
= ETH_HLEN
; i
< len
; i
++)
1590 if (data_ptr
[i
] != (unsigned char)(i
& 0xff)) {
1595 qede_recycle_rx_bd_ring(rxq
, 1);
1596 qed_chain_recycle_consumed(&rxq
->rx_comp_ring
);
1600 DP_INFO(edev
, "Not the transmitted packet\n");
1601 qede_recycle_rx_bd_ring(rxq
, 1);
1602 qed_chain_recycle_consumed(&rxq
->rx_comp_ring
);
1605 if (iter
== QEDE_SELFTEST_POLL_COUNT
) {
1606 DP_NOTICE(edev
, "Failed to receive the traffic\n");
1610 qede_update_rx_prod(edev
, rxq
);
1615 static int qede_selftest_run_loopback(struct qede_dev
*edev
, u32 loopback_mode
)
1617 struct qed_link_params link_params
;
1618 struct sk_buff
*skb
= NULL
;
1623 if (!netif_running(edev
->ndev
)) {
1624 DP_NOTICE(edev
, "Interface is down\n");
1628 qede_netif_stop(edev
);
1630 /* Bring up the link in Loopback mode */
1631 memset(&link_params
, 0, sizeof(link_params
));
1632 link_params
.link_up
= true;
1633 link_params
.override_flags
= QED_LINK_OVERRIDE_LOOPBACK_MODE
;
1634 link_params
.loopback_mode
= loopback_mode
;
1635 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
1637 /* Wait for loopback configuration to apply */
1638 msleep_interruptible(500);
1640 /* Setting max packet size to 1.5K to avoid data being split over
1641 * multiple BDs in cases where MTU > PAGE_SIZE.
1643 pkt_size
= (((edev
->ndev
->mtu
< ETH_DATA_LEN
) ?
1644 edev
->ndev
->mtu
: ETH_DATA_LEN
) + ETH_HLEN
);
1646 skb
= netdev_alloc_skb(edev
->ndev
, pkt_size
);
1648 DP_INFO(edev
, "Can't allocate skb\n");
1650 goto test_loopback_exit
;
1652 packet
= skb_put(skb
, pkt_size
);
1653 ether_addr_copy(packet
, edev
->ndev
->dev_addr
);
1654 ether_addr_copy(packet
+ ETH_ALEN
, edev
->ndev
->dev_addr
);
1655 memset(packet
+ (2 * ETH_ALEN
), 0x77, (ETH_HLEN
- (2 * ETH_ALEN
)));
1656 for (i
= ETH_HLEN
; i
< pkt_size
; i
++)
1657 packet
[i
] = (unsigned char)(i
& 0xff);
1659 rc
= qede_selftest_transmit_traffic(edev
, skb
);
1661 goto test_loopback_exit
;
1663 rc
= qede_selftest_receive_traffic(edev
);
1665 goto test_loopback_exit
;
1667 DP_VERBOSE(edev
, NETIF_MSG_RX_STATUS
, "Loopback test successful\n");
1672 /* Bring up the link in Normal mode */
1673 memset(&link_params
, 0, sizeof(link_params
));
1674 link_params
.link_up
= true;
1675 link_params
.override_flags
= QED_LINK_OVERRIDE_LOOPBACK_MODE
;
1676 link_params
.loopback_mode
= QED_LINK_LOOPBACK_NONE
;
1677 edev
->ops
->common
->set_link(edev
->cdev
, &link_params
);
1679 /* Wait for loopback configuration to apply */
1680 msleep_interruptible(500);
1682 qede_netif_start(edev
);
1687 static void qede_self_test(struct net_device
*dev
,
1688 struct ethtool_test
*etest
, u64
*buf
)
1690 struct qede_dev
*edev
= netdev_priv(dev
);
1692 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
1693 "Self-test command parameters: offline = %d, external_lb = %d\n",
1694 (etest
->flags
& ETH_TEST_FL_OFFLINE
),
1695 (etest
->flags
& ETH_TEST_FL_EXTERNAL_LB
) >> 2);
1697 memset(buf
, 0, sizeof(u64
) * QEDE_ETHTOOL_TEST_MAX
);
1699 if (etest
->flags
& ETH_TEST_FL_OFFLINE
) {
1700 if (qede_selftest_run_loopback(edev
,
1701 QED_LINK_LOOPBACK_INT_PHY
)) {
1702 buf
[QEDE_ETHTOOL_INT_LOOPBACK
] = 1;
1703 etest
->flags
|= ETH_TEST_FL_FAILED
;
1707 if (edev
->ops
->common
->selftest
->selftest_interrupt(edev
->cdev
)) {
1708 buf
[QEDE_ETHTOOL_INTERRUPT_TEST
] = 1;
1709 etest
->flags
|= ETH_TEST_FL_FAILED
;
1712 if (edev
->ops
->common
->selftest
->selftest_memory(edev
->cdev
)) {
1713 buf
[QEDE_ETHTOOL_MEMORY_TEST
] = 1;
1714 etest
->flags
|= ETH_TEST_FL_FAILED
;
1717 if (edev
->ops
->common
->selftest
->selftest_register(edev
->cdev
)) {
1718 buf
[QEDE_ETHTOOL_REGISTER_TEST
] = 1;
1719 etest
->flags
|= ETH_TEST_FL_FAILED
;
1722 if (edev
->ops
->common
->selftest
->selftest_clock(edev
->cdev
)) {
1723 buf
[QEDE_ETHTOOL_CLOCK_TEST
] = 1;
1724 etest
->flags
|= ETH_TEST_FL_FAILED
;
1727 if (edev
->ops
->common
->selftest
->selftest_nvram(edev
->cdev
)) {
1728 buf
[QEDE_ETHTOOL_NVRAM_TEST
] = 1;
1729 etest
->flags
|= ETH_TEST_FL_FAILED
;
1733 static int qede_set_tunable(struct net_device
*dev
,
1734 const struct ethtool_tunable
*tuna
,
1737 struct qede_dev
*edev
= netdev_priv(dev
);
1741 case ETHTOOL_RX_COPYBREAK
:
1743 if (val
< QEDE_MIN_PKT_LEN
|| val
> QEDE_RX_HDR_SIZE
) {
1744 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
1745 "Invalid rx copy break value, range is [%u, %u]",
1746 QEDE_MIN_PKT_LEN
, QEDE_RX_HDR_SIZE
);
1750 edev
->rx_copybreak
= *(u32
*)data
;
1759 static int qede_get_tunable(struct net_device
*dev
,
1760 const struct ethtool_tunable
*tuna
, void *data
)
1762 struct qede_dev
*edev
= netdev_priv(dev
);
1765 case ETHTOOL_RX_COPYBREAK
:
1766 *(u32
*)data
= edev
->rx_copybreak
;
1775 static int qede_get_eee(struct net_device
*dev
, struct ethtool_keee
*edata
)
1777 struct qede_dev
*edev
= netdev_priv(dev
);
1778 struct qed_link_output current_link
;
1780 memset(¤t_link
, 0, sizeof(current_link
));
1781 edev
->ops
->common
->get_link(edev
->cdev
, ¤t_link
);
1783 if (!current_link
.eee_supported
) {
1784 DP_INFO(edev
, "EEE is not supported\n");
1788 linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
1790 current_link
.eee
.adv_caps
& QED_EEE_1G_ADV
);
1791 linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
1793 current_link
.eee
.adv_caps
& QED_EEE_10G_ADV
);
1795 linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
1797 current_link
.sup_caps
& QED_EEE_1G_ADV
);
1798 linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
1800 current_link
.sup_caps
& QED_EEE_10G_ADV
);
1802 linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
1803 edata
->lp_advertised
,
1804 current_link
.eee
.lp_adv_caps
& QED_EEE_1G_ADV
);
1805 linkmode_mod_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
1806 edata
->lp_advertised
,
1807 current_link
.eee
.lp_adv_caps
& QED_EEE_10G_ADV
);
1809 edata
->tx_lpi_timer
= current_link
.eee
.tx_lpi_timer
;
1810 edata
->eee_enabled
= current_link
.eee
.enable
;
1811 edata
->tx_lpi_enabled
= current_link
.eee
.tx_lpi_enable
;
1812 edata
->eee_active
= current_link
.eee_active
;
1817 static int qede_set_eee(struct net_device
*dev
, struct ethtool_keee
*edata
)
1819 __ETHTOOL_DECLARE_LINK_MODE_MASK(supported
) = {};
1820 __ETHTOOL_DECLARE_LINK_MODE_MASK(tmp
) = {};
1821 struct qede_dev
*edev
= netdev_priv(dev
);
1822 struct qed_link_output current_link
;
1823 struct qed_link_params params
;
1826 if (!edev
->ops
->common
->can_link_change(edev
->cdev
)) {
1827 DP_INFO(edev
, "Link settings are not allowed to be changed\n");
1831 memset(¤t_link
, 0, sizeof(current_link
));
1832 edev
->ops
->common
->get_link(edev
->cdev
, ¤t_link
);
1834 if (!current_link
.eee_supported
) {
1835 DP_INFO(edev
, "EEE is not supported\n");
1839 memset(¶ms
, 0, sizeof(params
));
1840 params
.override_flags
|= QED_LINK_OVERRIDE_EEE_CONFIG
;
1842 linkmode_set_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
1844 linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
1847 unsupp
= linkmode_andnot(tmp
, edata
->advertised
, supported
);
1849 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
1850 "Invalid advertised capabilities %*pb\n",
1851 __ETHTOOL_LINK_MODE_MASK_NBITS
, edata
->advertised
);
1855 if (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT
,
1857 params
.eee
.adv_caps
= QED_EEE_1G_ADV
;
1858 if (linkmode_test_bit(ETHTOOL_LINK_MODE_10000baseT_Full_BIT
,
1860 params
.eee
.adv_caps
= QED_EEE_10G_ADV
;
1862 params
.eee
.enable
= edata
->eee_enabled
;
1863 params
.eee
.tx_lpi_enable
= edata
->tx_lpi_enabled
;
1864 params
.eee
.tx_lpi_timer
= edata
->tx_lpi_timer
;
1866 params
.link_up
= true;
1867 edev
->ops
->common
->set_link(edev
->cdev
, ¶ms
);
1872 static u32
qede_link_to_ethtool_fec(u32 link_fec
)
1876 if (link_fec
& QED_FEC_MODE_NONE
)
1877 eth_fec
|= ETHTOOL_FEC_OFF
;
1878 if (link_fec
& QED_FEC_MODE_FIRECODE
)
1879 eth_fec
|= ETHTOOL_FEC_BASER
;
1880 if (link_fec
& QED_FEC_MODE_RS
)
1881 eth_fec
|= ETHTOOL_FEC_RS
;
1882 if (link_fec
& QED_FEC_MODE_AUTO
)
1883 eth_fec
|= ETHTOOL_FEC_AUTO
;
1884 if (link_fec
& QED_FEC_MODE_UNSUPPORTED
)
1885 eth_fec
|= ETHTOOL_FEC_NONE
;
1890 static u32
qede_ethtool_to_link_fec(u32 eth_fec
)
1894 if (eth_fec
& ETHTOOL_FEC_OFF
)
1895 link_fec
|= QED_FEC_MODE_NONE
;
1896 if (eth_fec
& ETHTOOL_FEC_BASER
)
1897 link_fec
|= QED_FEC_MODE_FIRECODE
;
1898 if (eth_fec
& ETHTOOL_FEC_RS
)
1899 link_fec
|= QED_FEC_MODE_RS
;
1900 if (eth_fec
& ETHTOOL_FEC_AUTO
)
1901 link_fec
|= QED_FEC_MODE_AUTO
;
1902 if (eth_fec
& ETHTOOL_FEC_NONE
)
1903 link_fec
|= QED_FEC_MODE_UNSUPPORTED
;
1908 static int qede_get_fecparam(struct net_device
*dev
,
1909 struct ethtool_fecparam
*fecparam
)
1911 struct qede_dev
*edev
= netdev_priv(dev
);
1912 struct qed_link_output curr_link
;
1914 memset(&curr_link
, 0, sizeof(curr_link
));
1915 edev
->ops
->common
->get_link(edev
->cdev
, &curr_link
);
1917 fecparam
->active_fec
= qede_link_to_ethtool_fec(curr_link
.active_fec
);
1918 fecparam
->fec
= qede_link_to_ethtool_fec(curr_link
.sup_fec
);
1923 static int qede_set_fecparam(struct net_device
*dev
,
1924 struct ethtool_fecparam
*fecparam
)
1926 struct qede_dev
*edev
= netdev_priv(dev
);
1927 struct qed_link_params params
;
1929 if (!edev
->ops
|| !edev
->ops
->common
->can_link_change(edev
->cdev
)) {
1930 DP_INFO(edev
, "Link settings are not allowed to be changed\n");
1934 memset(¶ms
, 0, sizeof(params
));
1935 params
.override_flags
|= QED_LINK_OVERRIDE_FEC_CONFIG
;
1936 params
.fec
= qede_ethtool_to_link_fec(fecparam
->fec
);
1937 params
.link_up
= true;
1939 edev
->ops
->common
->set_link(edev
->cdev
, ¶ms
);
1944 static int qede_get_module_info(struct net_device
*dev
,
1945 struct ethtool_modinfo
*modinfo
)
1947 struct qede_dev
*edev
= netdev_priv(dev
);
1951 /* Read first 4 bytes to find the sfp type */
1952 rc
= edev
->ops
->common
->read_module_eeprom(edev
->cdev
, buf
,
1953 QED_I2C_DEV_ADDR_A0
, 0, 4);
1955 DP_ERR(edev
, "Failed reading EEPROM data %d\n", rc
);
1960 case 0x3: /* SFP, SFP+, SFP-28 */
1961 modinfo
->type
= ETH_MODULE_SFF_8472
;
1962 modinfo
->eeprom_len
= ETH_MODULE_SFF_8472_LEN
;
1964 case 0xc: /* QSFP */
1965 case 0xd: /* QSFP+ */
1966 modinfo
->type
= ETH_MODULE_SFF_8436
;
1967 modinfo
->eeprom_len
= ETH_MODULE_SFF_8436_LEN
;
1969 case 0x11: /* QSFP-28 */
1970 modinfo
->type
= ETH_MODULE_SFF_8636
;
1971 modinfo
->eeprom_len
= ETH_MODULE_SFF_8636_LEN
;
1974 DP_ERR(edev
, "Unknown transceiver type 0x%x\n", buf
[0]);
1981 static int qede_get_module_eeprom(struct net_device
*dev
,
1982 struct ethtool_eeprom
*ee
, u8
*data
)
1984 struct qede_dev
*edev
= netdev_priv(dev
);
1985 u32 start_addr
= ee
->offset
, size
= 0;
1989 /* Read A0 section */
1990 if (ee
->offset
< ETH_MODULE_SFF_8079_LEN
) {
1991 /* Limit transfer size to the A0 section boundary */
1992 if (ee
->offset
+ ee
->len
> ETH_MODULE_SFF_8079_LEN
)
1993 size
= ETH_MODULE_SFF_8079_LEN
- ee
->offset
;
1997 rc
= edev
->ops
->common
->read_module_eeprom(edev
->cdev
, buf
,
1998 QED_I2C_DEV_ADDR_A0
,
2001 DP_ERR(edev
, "Failed reading A0 section %d\n", rc
);
2009 /* Read A2 section */
2010 if (start_addr
>= ETH_MODULE_SFF_8079_LEN
&&
2011 start_addr
< ETH_MODULE_SFF_8472_LEN
) {
2012 size
= ee
->len
- size
;
2013 /* Limit transfer size to the A2 section boundary */
2014 if (start_addr
+ size
> ETH_MODULE_SFF_8472_LEN
)
2015 size
= ETH_MODULE_SFF_8472_LEN
- start_addr
;
2016 start_addr
-= ETH_MODULE_SFF_8079_LEN
;
2017 rc
= edev
->ops
->common
->read_module_eeprom(edev
->cdev
, buf
,
2018 QED_I2C_DEV_ADDR_A2
,
2021 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
2022 "Failed reading A2 section %d\n", rc
);
2030 static int qede_set_dump(struct net_device
*dev
, struct ethtool_dump
*val
)
2032 struct qede_dev
*edev
= netdev_priv(dev
);
2035 if (edev
->dump_info
.cmd
== QEDE_DUMP_CMD_NONE
) {
2036 if (val
->flag
> QEDE_DUMP_CMD_MAX
) {
2037 DP_ERR(edev
, "Invalid command %d\n", val
->flag
);
2040 edev
->dump_info
.cmd
= val
->flag
;
2041 edev
->dump_info
.num_args
= 0;
2045 if (edev
->dump_info
.num_args
== QEDE_DUMP_MAX_ARGS
) {
2046 DP_ERR(edev
, "Arg count = %d\n", edev
->dump_info
.num_args
);
2050 switch (edev
->dump_info
.cmd
) {
2051 case QEDE_DUMP_CMD_NVM_CFG
:
2052 edev
->dump_info
.args
[edev
->dump_info
.num_args
] = val
->flag
;
2053 edev
->dump_info
.num_args
++;
2055 case QEDE_DUMP_CMD_GRCDUMP
:
2056 rc
= edev
->ops
->common
->set_grc_config(edev
->cdev
,
2066 static int qede_get_dump_flag(struct net_device
*dev
,
2067 struct ethtool_dump
*dump
)
2069 struct qede_dev
*edev
= netdev_priv(dev
);
2071 if (!edev
->ops
|| !edev
->ops
->common
) {
2072 DP_ERR(edev
, "Edev ops not populated\n");
2076 dump
->version
= QEDE_DUMP_VERSION
;
2077 switch (edev
->dump_info
.cmd
) {
2078 case QEDE_DUMP_CMD_NVM_CFG
:
2079 dump
->flag
= QEDE_DUMP_CMD_NVM_CFG
;
2080 dump
->len
= edev
->ops
->common
->read_nvm_cfg_len(edev
->cdev
,
2081 edev
->dump_info
.args
[0]);
2083 case QEDE_DUMP_CMD_GRCDUMP
:
2084 dump
->flag
= QEDE_DUMP_CMD_GRCDUMP
;
2085 dump
->len
= edev
->ops
->common
->dbg_all_data_size(edev
->cdev
);
2088 DP_ERR(edev
, "Invalid cmd = %d\n", edev
->dump_info
.cmd
);
2092 DP_VERBOSE(edev
, QED_MSG_DEBUG
,
2093 "dump->version = 0x%x dump->flag = %d dump->len = %d\n",
2094 dump
->version
, dump
->flag
, dump
->len
);
2098 static int qede_get_dump_data(struct net_device
*dev
,
2099 struct ethtool_dump
*dump
, void *buf
)
2101 struct qede_dev
*edev
= netdev_priv(dev
);
2104 if (!edev
->ops
|| !edev
->ops
->common
) {
2105 DP_ERR(edev
, "Edev ops not populated\n");
2110 switch (edev
->dump_info
.cmd
) {
2111 case QEDE_DUMP_CMD_NVM_CFG
:
2112 if (edev
->dump_info
.num_args
!= QEDE_DUMP_NVM_ARG_COUNT
) {
2113 DP_ERR(edev
, "Arg count = %d required = %d\n",
2114 edev
->dump_info
.num_args
,
2115 QEDE_DUMP_NVM_ARG_COUNT
);
2119 rc
= edev
->ops
->common
->read_nvm_cfg(edev
->cdev
, (u8
**)&buf
,
2120 edev
->dump_info
.args
[0],
2121 edev
->dump_info
.args
[1]);
2123 case QEDE_DUMP_CMD_GRCDUMP
:
2124 memset(buf
, 0, dump
->len
);
2125 rc
= edev
->ops
->common
->dbg_all_data(edev
->cdev
, buf
);
2128 DP_ERR(edev
, "Invalid cmd = %d\n", edev
->dump_info
.cmd
);
2134 edev
->dump_info
.cmd
= QEDE_DUMP_CMD_NONE
;
2135 edev
->dump_info
.num_args
= 0;
2136 memset(edev
->dump_info
.args
, 0, sizeof(edev
->dump_info
.args
));
2141 int qede_set_per_coalesce(struct net_device
*dev
, u32 queue
,
2142 struct ethtool_coalesce
*coal
)
2144 struct qede_dev
*edev
= netdev_priv(dev
);
2145 struct qede_fastpath
*fp
;
2149 if (coal
->rx_coalesce_usecs
> QED_COALESCE_MAX
||
2150 coal
->tx_coalesce_usecs
> QED_COALESCE_MAX
) {
2152 "Can't support requested %s coalesce value [max supported value %d]\n",
2153 coal
->rx_coalesce_usecs
> QED_COALESCE_MAX
? "rx"
2159 rxc
= (u16
)coal
->rx_coalesce_usecs
;
2160 txc
= (u16
)coal
->tx_coalesce_usecs
;
2163 if (queue
>= edev
->num_queues
) {
2164 DP_INFO(edev
, "Invalid queue\n");
2169 if (edev
->state
!= QEDE_STATE_OPEN
) {
2174 fp
= &edev
->fp_array
[queue
];
2176 if (edev
->fp_array
[queue
].type
& QEDE_FASTPATH_RX
) {
2177 rc
= edev
->ops
->common
->set_coalesce(edev
->cdev
,
2182 "Set RX coalesce error, rc = %d\n", rc
);
2185 edev
->coal_entry
[queue
].rxc
= rxc
;
2186 edev
->coal_entry
[queue
].isvalid
= true;
2189 if (edev
->fp_array
[queue
].type
& QEDE_FASTPATH_TX
) {
2190 rc
= edev
->ops
->common
->set_coalesce(edev
->cdev
,
2195 "Set TX coalesce error, rc = %d\n", rc
);
2198 edev
->coal_entry
[queue
].txc
= txc
;
2199 edev
->coal_entry
[queue
].isvalid
= true;
2202 __qede_unlock(edev
);
2207 static int qede_get_per_coalesce(struct net_device
*dev
,
2209 struct ethtool_coalesce
*coal
)
2211 void *rx_handle
= NULL
, *tx_handle
= NULL
;
2212 struct qede_dev
*edev
= netdev_priv(dev
);
2213 struct qede_fastpath
*fp
;
2214 u16 rx_coal
, tx_coal
;
2217 rx_coal
= QED_DEFAULT_RX_USECS
;
2218 tx_coal
= QED_DEFAULT_TX_USECS
;
2220 memset(coal
, 0, sizeof(struct ethtool_coalesce
));
2223 if (queue
>= edev
->num_queues
) {
2224 DP_INFO(edev
, "Invalid queue\n");
2229 if (edev
->state
!= QEDE_STATE_OPEN
) {
2234 fp
= &edev
->fp_array
[queue
];
2236 if (fp
->type
& QEDE_FASTPATH_RX
)
2237 rx_handle
= fp
->rxq
->handle
;
2239 rc
= edev
->ops
->get_coalesce(edev
->cdev
, &rx_coal
,
2242 DP_INFO(edev
, "Read Rx coalesce error\n");
2246 fp
= &edev
->fp_array
[queue
];
2247 if (fp
->type
& QEDE_FASTPATH_TX
)
2248 tx_handle
= fp
->txq
->handle
;
2250 rc
= edev
->ops
->get_coalesce(edev
->cdev
, &tx_coal
,
2253 DP_INFO(edev
, "Read Tx coalesce error\n");
2256 __qede_unlock(edev
);
2258 coal
->rx_coalesce_usecs
= rx_coal
;
2259 coal
->tx_coalesce_usecs
= tx_coal
;
2264 static const struct ethtool_ops qede_ethtool_ops
= {
2265 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
2266 ETHTOOL_COALESCE_STATS_BLOCK_USECS
,
2267 .get_link_ksettings
= qede_get_link_ksettings
,
2268 .set_link_ksettings
= qede_set_link_ksettings
,
2269 .get_drvinfo
= qede_get_drvinfo
,
2270 .get_regs_len
= qede_get_regs_len
,
2271 .get_regs
= qede_get_regs
,
2272 .get_wol
= qede_get_wol
,
2273 .set_wol
= qede_set_wol
,
2274 .get_msglevel
= qede_get_msglevel
,
2275 .set_msglevel
= qede_set_msglevel
,
2276 .nway_reset
= qede_nway_reset
,
2277 .get_link
= qede_get_link
,
2278 .get_coalesce
= qede_get_coalesce
,
2279 .set_coalesce
= qede_set_coalesce
,
2280 .get_ringparam
= qede_get_ringparam
,
2281 .set_ringparam
= qede_set_ringparam
,
2282 .get_pauseparam
= qede_get_pauseparam
,
2283 .set_pauseparam
= qede_set_pauseparam
,
2284 .get_strings
= qede_get_strings
,
2285 .set_phys_id
= qede_set_phys_id
,
2286 .get_ethtool_stats
= qede_get_ethtool_stats
,
2287 .get_priv_flags
= qede_get_priv_flags
,
2288 .set_priv_flags
= qede_set_priv_flags
,
2289 .get_sset_count
= qede_get_sset_count
,
2290 .get_rxnfc
= qede_get_rxnfc
,
2291 .set_rxnfc
= qede_set_rxnfc
,
2292 .get_rxfh_indir_size
= qede_get_rxfh_indir_size
,
2293 .get_rxfh_key_size
= qede_get_rxfh_key_size
,
2294 .get_rxfh
= qede_get_rxfh
,
2295 .set_rxfh
= qede_set_rxfh
,
2296 .get_ts_info
= qede_get_ts_info
,
2297 .get_channels
= qede_get_channels
,
2298 .set_channels
= qede_set_channels
,
2299 .self_test
= qede_self_test
,
2300 .get_module_info
= qede_get_module_info
,
2301 .get_module_eeprom
= qede_get_module_eeprom
,
2302 .get_eee
= qede_get_eee
,
2303 .set_eee
= qede_set_eee
,
2304 .get_fecparam
= qede_get_fecparam
,
2305 .set_fecparam
= qede_set_fecparam
,
2306 .get_tunable
= qede_get_tunable
,
2307 .set_tunable
= qede_set_tunable
,
2308 .get_per_queue_coalesce
= qede_get_per_coalesce
,
2309 .set_per_queue_coalesce
= qede_set_per_coalesce
,
2310 .flash_device
= qede_flash_device
,
2311 .get_dump_flag
= qede_get_dump_flag
,
2312 .get_dump_data
= qede_get_dump_data
,
2313 .set_dump
= qede_set_dump
,
2316 static const struct ethtool_ops qede_vf_ethtool_ops
= {
2317 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
2318 ETHTOOL_COALESCE_STATS_BLOCK_USECS
,
2319 .get_link_ksettings
= qede_get_link_ksettings
,
2320 .get_drvinfo
= qede_get_drvinfo
,
2321 .get_msglevel
= qede_get_msglevel
,
2322 .set_msglevel
= qede_set_msglevel
,
2323 .get_link
= qede_get_link
,
2324 .get_coalesce
= qede_get_coalesce
,
2325 .set_coalesce
= qede_set_coalesce
,
2326 .get_ringparam
= qede_get_ringparam
,
2327 .set_ringparam
= qede_set_ringparam
,
2328 .get_strings
= qede_get_strings
,
2329 .get_ethtool_stats
= qede_get_ethtool_stats
,
2330 .get_priv_flags
= qede_get_priv_flags
,
2331 .get_sset_count
= qede_get_sset_count
,
2332 .get_rxnfc
= qede_get_rxnfc
,
2333 .set_rxnfc
= qede_set_rxnfc
,
2334 .get_rxfh_indir_size
= qede_get_rxfh_indir_size
,
2335 .get_rxfh_key_size
= qede_get_rxfh_key_size
,
2336 .get_rxfh
= qede_get_rxfh
,
2337 .set_rxfh
= qede_set_rxfh
,
2338 .get_channels
= qede_get_channels
,
2339 .set_channels
= qede_set_channels
,
2340 .get_per_queue_coalesce
= qede_get_per_coalesce
,
2341 .set_per_queue_coalesce
= qede_set_per_coalesce
,
2342 .get_tunable
= qede_get_tunable
,
2343 .set_tunable
= qede_set_tunable
,
2346 void qede_set_ethtool_ops(struct net_device
*dev
)
2348 struct qede_dev
*edev
= netdev_priv(dev
);
2351 dev
->ethtool_ops
= &qede_vf_ethtool_ops
;
2353 dev
->ethtool_ops
= &qede_ethtool_ops
;