WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / cavium / thunder / nicvf_ethtool.c
blob2f218fbfed0615cfe85721d7f9fe173cdb589288
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2015 Cavium, Inc.
4 */
6 /* ETHTOOL Support for VNIC_VF Device*/
8 #include <linux/ethtool.h>
9 #include <linux/pci.h>
10 #include <linux/net_tstamp.h>
12 #include "nic_reg.h"
13 #include "nic.h"
14 #include "nicvf_queues.h"
15 #include "q_struct.h"
16 #include "thunder_bgx.h"
17 #include "../common/cavium_ptp.h"
19 #define DRV_NAME "nicvf"
21 struct nicvf_stat {
22 char name[ETH_GSTRING_LEN];
23 unsigned int index;
26 #define NICVF_HW_STAT(stat) { \
27 .name = #stat, \
28 .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
31 #define NICVF_DRV_STAT(stat) { \
32 .name = #stat, \
33 .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
36 static const struct nicvf_stat nicvf_hw_stats[] = {
37 NICVF_HW_STAT(rx_bytes),
38 NICVF_HW_STAT(rx_frames),
39 NICVF_HW_STAT(rx_ucast_frames),
40 NICVF_HW_STAT(rx_bcast_frames),
41 NICVF_HW_STAT(rx_mcast_frames),
42 NICVF_HW_STAT(rx_drops),
43 NICVF_HW_STAT(rx_drop_red),
44 NICVF_HW_STAT(rx_drop_red_bytes),
45 NICVF_HW_STAT(rx_drop_overrun),
46 NICVF_HW_STAT(rx_drop_overrun_bytes),
47 NICVF_HW_STAT(rx_drop_bcast),
48 NICVF_HW_STAT(rx_drop_mcast),
49 NICVF_HW_STAT(rx_drop_l3_bcast),
50 NICVF_HW_STAT(rx_drop_l3_mcast),
51 NICVF_HW_STAT(rx_fcs_errors),
52 NICVF_HW_STAT(rx_l2_errors),
53 NICVF_HW_STAT(tx_bytes),
54 NICVF_HW_STAT(tx_frames),
55 NICVF_HW_STAT(tx_ucast_frames),
56 NICVF_HW_STAT(tx_bcast_frames),
57 NICVF_HW_STAT(tx_mcast_frames),
58 NICVF_HW_STAT(tx_drops),
61 static const struct nicvf_stat nicvf_drv_stats[] = {
62 NICVF_DRV_STAT(rx_bgx_truncated_pkts),
63 NICVF_DRV_STAT(rx_jabber_errs),
64 NICVF_DRV_STAT(rx_fcs_errs),
65 NICVF_DRV_STAT(rx_bgx_errs),
66 NICVF_DRV_STAT(rx_prel2_errs),
67 NICVF_DRV_STAT(rx_l2_hdr_malformed),
68 NICVF_DRV_STAT(rx_oversize),
69 NICVF_DRV_STAT(rx_undersize),
70 NICVF_DRV_STAT(rx_l2_len_mismatch),
71 NICVF_DRV_STAT(rx_l2_pclp),
72 NICVF_DRV_STAT(rx_ip_ver_errs),
73 NICVF_DRV_STAT(rx_ip_csum_errs),
74 NICVF_DRV_STAT(rx_ip_hdr_malformed),
75 NICVF_DRV_STAT(rx_ip_payload_malformed),
76 NICVF_DRV_STAT(rx_ip_ttl_errs),
77 NICVF_DRV_STAT(rx_l3_pclp),
78 NICVF_DRV_STAT(rx_l4_malformed),
79 NICVF_DRV_STAT(rx_l4_csum_errs),
80 NICVF_DRV_STAT(rx_udp_len_errs),
81 NICVF_DRV_STAT(rx_l4_port_errs),
82 NICVF_DRV_STAT(rx_tcp_flag_errs),
83 NICVF_DRV_STAT(rx_tcp_offset_errs),
84 NICVF_DRV_STAT(rx_l4_pclp),
85 NICVF_DRV_STAT(rx_truncated_pkts),
87 NICVF_DRV_STAT(tx_desc_fault),
88 NICVF_DRV_STAT(tx_hdr_cons_err),
89 NICVF_DRV_STAT(tx_subdesc_err),
90 NICVF_DRV_STAT(tx_max_size_exceeded),
91 NICVF_DRV_STAT(tx_imm_size_oflow),
92 NICVF_DRV_STAT(tx_data_seq_err),
93 NICVF_DRV_STAT(tx_mem_seq_err),
94 NICVF_DRV_STAT(tx_lock_viol),
95 NICVF_DRV_STAT(tx_data_fault),
96 NICVF_DRV_STAT(tx_tstmp_conflict),
97 NICVF_DRV_STAT(tx_tstmp_timeout),
98 NICVF_DRV_STAT(tx_mem_fault),
99 NICVF_DRV_STAT(tx_csum_overlap),
100 NICVF_DRV_STAT(tx_csum_overflow),
102 NICVF_DRV_STAT(tx_tso),
103 NICVF_DRV_STAT(tx_timeout),
104 NICVF_DRV_STAT(txq_stop),
105 NICVF_DRV_STAT(txq_wake),
106 NICVF_DRV_STAT(rcv_buffer_alloc_failures),
107 NICVF_DRV_STAT(page_alloc),
110 static const struct nicvf_stat nicvf_queue_stats[] = {
111 { "bytes", 0 },
112 { "frames", 1 },
115 static const unsigned int nicvf_n_hw_stats = ARRAY_SIZE(nicvf_hw_stats);
116 static const unsigned int nicvf_n_drv_stats = ARRAY_SIZE(nicvf_drv_stats);
117 static const unsigned int nicvf_n_queue_stats = ARRAY_SIZE(nicvf_queue_stats);
119 static int nicvf_get_link_ksettings(struct net_device *netdev,
120 struct ethtool_link_ksettings *cmd)
122 struct nicvf *nic = netdev_priv(netdev);
123 u32 supported, advertising;
125 supported = 0;
126 advertising = 0;
128 if (!nic->link_up) {
129 cmd->base.duplex = DUPLEX_UNKNOWN;
130 cmd->base.speed = SPEED_UNKNOWN;
131 return 0;
134 switch (nic->speed) {
135 case SPEED_1000:
136 cmd->base.port = PORT_MII | PORT_TP;
137 cmd->base.autoneg = AUTONEG_ENABLE;
138 supported |= SUPPORTED_MII | SUPPORTED_TP;
139 supported |= SUPPORTED_1000baseT_Full |
140 SUPPORTED_1000baseT_Half |
141 SUPPORTED_100baseT_Full |
142 SUPPORTED_100baseT_Half |
143 SUPPORTED_10baseT_Full |
144 SUPPORTED_10baseT_Half;
145 supported |= SUPPORTED_Autoneg;
146 advertising |= ADVERTISED_1000baseT_Full |
147 ADVERTISED_1000baseT_Half |
148 ADVERTISED_100baseT_Full |
149 ADVERTISED_100baseT_Half |
150 ADVERTISED_10baseT_Full |
151 ADVERTISED_10baseT_Half;
152 break;
153 case SPEED_10000:
154 if (nic->mac_type == BGX_MODE_RXAUI) {
155 cmd->base.port = PORT_TP;
156 supported |= SUPPORTED_TP;
157 } else {
158 cmd->base.port = PORT_FIBRE;
159 supported |= SUPPORTED_FIBRE;
161 cmd->base.autoneg = AUTONEG_DISABLE;
162 supported |= SUPPORTED_10000baseT_Full;
163 break;
164 case SPEED_40000:
165 cmd->base.port = PORT_FIBRE;
166 cmd->base.autoneg = AUTONEG_DISABLE;
167 supported |= SUPPORTED_FIBRE;
168 supported |= SUPPORTED_40000baseCR4_Full;
169 break;
171 cmd->base.duplex = nic->duplex;
172 cmd->base.speed = nic->speed;
174 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported,
175 supported);
176 ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising,
177 advertising);
179 return 0;
182 static u32 nicvf_get_link(struct net_device *netdev)
184 struct nicvf *nic = netdev_priv(netdev);
186 return nic->link_up;
189 static void nicvf_get_drvinfo(struct net_device *netdev,
190 struct ethtool_drvinfo *info)
192 struct nicvf *nic = netdev_priv(netdev);
194 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
195 strlcpy(info->bus_info, pci_name(nic->pdev), sizeof(info->bus_info));
198 static u32 nicvf_get_msglevel(struct net_device *netdev)
200 struct nicvf *nic = netdev_priv(netdev);
202 return nic->msg_enable;
205 static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
207 struct nicvf *nic = netdev_priv(netdev);
209 nic->msg_enable = lvl;
212 static void nicvf_get_qset_strings(struct nicvf *nic, u8 **data, int qset)
214 int stats, qidx;
215 int start_qidx = qset * MAX_RCV_QUEUES_PER_QS;
217 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
218 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
219 sprintf(*data, "rxq%d: %s", qidx + start_qidx,
220 nicvf_queue_stats[stats].name);
221 *data += ETH_GSTRING_LEN;
225 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
226 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
227 sprintf(*data, "txq%d: %s", qidx + start_qidx,
228 nicvf_queue_stats[stats].name);
229 *data += ETH_GSTRING_LEN;
234 static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
236 struct nicvf *nic = netdev_priv(netdev);
237 int stats;
238 int sqs;
240 if (sset != ETH_SS_STATS)
241 return;
243 for (stats = 0; stats < nicvf_n_hw_stats; stats++) {
244 memcpy(data, nicvf_hw_stats[stats].name, ETH_GSTRING_LEN);
245 data += ETH_GSTRING_LEN;
248 for (stats = 0; stats < nicvf_n_drv_stats; stats++) {
249 memcpy(data, nicvf_drv_stats[stats].name, ETH_GSTRING_LEN);
250 data += ETH_GSTRING_LEN;
253 nicvf_get_qset_strings(nic, &data, 0);
255 for (sqs = 0; sqs < nic->sqs_count; sqs++) {
256 if (!nic->snicvf[sqs])
257 continue;
258 nicvf_get_qset_strings(nic->snicvf[sqs], &data, sqs + 1);
261 for (stats = 0; stats < BGX_RX_STATS_COUNT; stats++) {
262 sprintf(data, "bgx_rxstat%d: ", stats);
263 data += ETH_GSTRING_LEN;
266 for (stats = 0; stats < BGX_TX_STATS_COUNT; stats++) {
267 sprintf(data, "bgx_txstat%d: ", stats);
268 data += ETH_GSTRING_LEN;
272 static int nicvf_get_sset_count(struct net_device *netdev, int sset)
274 struct nicvf *nic = netdev_priv(netdev);
275 int qstats_count;
276 int sqs;
278 if (sset != ETH_SS_STATS)
279 return -EINVAL;
281 qstats_count = nicvf_n_queue_stats *
282 (nic->qs->rq_cnt + nic->qs->sq_cnt);
283 for (sqs = 0; sqs < nic->sqs_count; sqs++) {
284 struct nicvf *snic;
286 snic = nic->snicvf[sqs];
287 if (!snic)
288 continue;
289 qstats_count += nicvf_n_queue_stats *
290 (snic->qs->rq_cnt + snic->qs->sq_cnt);
293 return nicvf_n_hw_stats + nicvf_n_drv_stats +
294 qstats_count +
295 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
298 static void nicvf_get_qset_stats(struct nicvf *nic,
299 struct ethtool_stats *stats, u64 **data)
301 int stat, qidx;
303 if (!nic)
304 return;
306 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
307 nicvf_update_rq_stats(nic, qidx);
308 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
309 *((*data)++) = ((u64 *)&nic->qs->rq[qidx].stats)
310 [nicvf_queue_stats[stat].index];
313 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
314 nicvf_update_sq_stats(nic, qidx);
315 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
316 *((*data)++) = ((u64 *)&nic->qs->sq[qidx].stats)
317 [nicvf_queue_stats[stat].index];
321 static void nicvf_get_ethtool_stats(struct net_device *netdev,
322 struct ethtool_stats *stats, u64 *data)
324 struct nicvf *nic = netdev_priv(netdev);
325 int stat, tmp_stats;
326 int sqs, cpu;
328 nicvf_update_stats(nic);
330 /* Update LMAC stats */
331 nicvf_update_lmac_stats(nic);
333 for (stat = 0; stat < nicvf_n_hw_stats; stat++)
334 *(data++) = ((u64 *)&nic->hw_stats)
335 [nicvf_hw_stats[stat].index];
336 for (stat = 0; stat < nicvf_n_drv_stats; stat++) {
337 tmp_stats = 0;
338 for_each_possible_cpu(cpu)
339 tmp_stats += ((u64 *)per_cpu_ptr(nic->drv_stats, cpu))
340 [nicvf_drv_stats[stat].index];
341 *(data++) = tmp_stats;
344 nicvf_get_qset_stats(nic, stats, &data);
346 for (sqs = 0; sqs < nic->sqs_count; sqs++) {
347 if (!nic->snicvf[sqs])
348 continue;
349 nicvf_get_qset_stats(nic->snicvf[sqs], stats, &data);
352 for (stat = 0; stat < BGX_RX_STATS_COUNT; stat++)
353 *(data++) = nic->bgx_stats.rx_stats[stat];
354 for (stat = 0; stat < BGX_TX_STATS_COUNT; stat++)
355 *(data++) = nic->bgx_stats.tx_stats[stat];
358 static int nicvf_get_regs_len(struct net_device *dev)
360 return sizeof(u64) * NIC_VF_REG_COUNT;
363 static void nicvf_get_regs(struct net_device *dev,
364 struct ethtool_regs *regs, void *reg)
366 struct nicvf *nic = netdev_priv(dev);
367 u64 *p = (u64 *)reg;
368 u64 reg_offset;
369 int mbox, key, stat, q;
370 int i = 0;
372 regs->version = 0;
373 memset(p, 0, NIC_VF_REG_COUNT);
375 p[i++] = nicvf_reg_read(nic, NIC_VNIC_CFG);
376 /* Mailbox registers */
377 for (mbox = 0; mbox < NIC_PF_VF_MAILBOX_SIZE; mbox++)
378 p[i++] = nicvf_reg_read(nic,
379 NIC_VF_PF_MAILBOX_0_1 | (mbox << 3));
381 p[i++] = nicvf_reg_read(nic, NIC_VF_INT);
382 p[i++] = nicvf_reg_read(nic, NIC_VF_INT_W1S);
383 p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1C);
384 p[i++] = nicvf_reg_read(nic, NIC_VF_ENA_W1S);
385 p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
387 for (key = 0; key < RSS_HASH_KEY_SIZE; key++)
388 p[i++] = nicvf_reg_read(nic, NIC_VNIC_RSS_KEY_0_4 | (key << 3));
390 /* Tx/Rx statistics */
391 for (stat = 0; stat < TX_STATS_ENUM_LAST; stat++)
392 p[i++] = nicvf_reg_read(nic,
393 NIC_VNIC_TX_STAT_0_4 | (stat << 3));
395 for (i = 0; i < RX_STATS_ENUM_LAST; i++)
396 p[i++] = nicvf_reg_read(nic,
397 NIC_VNIC_RX_STAT_0_13 | (stat << 3));
399 p[i++] = nicvf_reg_read(nic, NIC_QSET_RQ_GEN_CFG);
401 /* All completion queue's registers */
402 for (q = 0; q < MAX_CMP_QUEUES_PER_QS; q++) {
403 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG, q);
404 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_CFG2, q);
405 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_THRESH, q);
406 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_BASE, q);
407 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, q);
408 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_TAIL, q);
409 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DOOR, q);
410 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS, q);
411 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_STATUS2, q);
412 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_DEBUG, q);
415 /* All receive queue's registers */
416 for (q = 0; q < MAX_RCV_QUEUES_PER_QS; q++) {
417 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RQ_0_7_CFG, q);
418 p[i++] = nicvf_queue_reg_read(nic,
419 NIC_QSET_RQ_0_7_STAT_0_1, q);
420 reg_offset = NIC_QSET_RQ_0_7_STAT_0_1 | (1 << 3);
421 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
424 for (q = 0; q < MAX_SND_QUEUES_PER_QS; q++) {
425 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_CFG, q);
426 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_THRESH, q);
427 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_BASE, q);
428 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_HEAD, q);
429 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_TAIL, q);
430 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DOOR, q);
431 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STATUS, q);
432 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_DEBUG, q);
433 /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
434 * produces bus errors when read
436 p[i++] = 0;
437 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_SQ_0_7_STAT_0_1, q);
438 reg_offset = NIC_QSET_SQ_0_7_STAT_0_1 | (1 << 3);
439 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
442 for (q = 0; q < MAX_RCV_BUF_DESC_RINGS_PER_QS; q++) {
443 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_CFG, q);
444 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_THRESH, q);
445 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_BASE, q);
446 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_HEAD, q);
447 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_TAIL, q);
448 p[i++] = nicvf_queue_reg_read(nic, NIC_QSET_RBDR_0_1_DOOR, q);
449 p[i++] = nicvf_queue_reg_read(nic,
450 NIC_QSET_RBDR_0_1_STATUS0, q);
451 p[i++] = nicvf_queue_reg_read(nic,
452 NIC_QSET_RBDR_0_1_STATUS1, q);
453 reg_offset = NIC_QSET_RBDR_0_1_PREFETCH_STATUS;
454 p[i++] = nicvf_queue_reg_read(nic, reg_offset, q);
458 static int nicvf_get_coalesce(struct net_device *netdev,
459 struct ethtool_coalesce *cmd)
461 struct nicvf *nic = netdev_priv(netdev);
463 cmd->rx_coalesce_usecs = nic->cq_coalesce_usecs;
464 return 0;
467 static void nicvf_get_ringparam(struct net_device *netdev,
468 struct ethtool_ringparam *ring)
470 struct nicvf *nic = netdev_priv(netdev);
471 struct queue_set *qs = nic->qs;
473 ring->rx_max_pending = MAX_CMP_QUEUE_LEN;
474 ring->rx_pending = qs->cq_len;
475 ring->tx_max_pending = MAX_SND_QUEUE_LEN;
476 ring->tx_pending = qs->sq_len;
479 static int nicvf_set_ringparam(struct net_device *netdev,
480 struct ethtool_ringparam *ring)
482 struct nicvf *nic = netdev_priv(netdev);
483 struct queue_set *qs = nic->qs;
484 u32 rx_count, tx_count;
486 /* Due to HW errata this is not supported on T88 pass 1.x silicon */
487 if (pass1_silicon(nic->pdev))
488 return -EINVAL;
490 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
491 return -EINVAL;
493 tx_count = clamp_t(u32, ring->tx_pending,
494 MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN);
495 rx_count = clamp_t(u32, ring->rx_pending,
496 MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN);
498 if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len))
499 return 0;
501 /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */
502 qs->sq_len = rounddown_pow_of_two(tx_count);
503 qs->cq_len = rounddown_pow_of_two(rx_count);
505 if (netif_running(netdev)) {
506 nicvf_stop(netdev);
507 nicvf_open(netdev);
510 return 0;
513 static int nicvf_get_rss_hash_opts(struct nicvf *nic,
514 struct ethtool_rxnfc *info)
516 info->data = 0;
518 switch (info->flow_type) {
519 case TCP_V4_FLOW:
520 case TCP_V6_FLOW:
521 case UDP_V4_FLOW:
522 case UDP_V6_FLOW:
523 case SCTP_V4_FLOW:
524 case SCTP_V6_FLOW:
525 info->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3;
526 fallthrough;
527 case IPV4_FLOW:
528 case IPV6_FLOW:
529 info->data |= RXH_IP_SRC | RXH_IP_DST;
530 break;
531 default:
532 return -EINVAL;
535 return 0;
538 static int nicvf_get_rxnfc(struct net_device *dev,
539 struct ethtool_rxnfc *info, u32 *rules)
541 struct nicvf *nic = netdev_priv(dev);
542 int ret = -EOPNOTSUPP;
544 switch (info->cmd) {
545 case ETHTOOL_GRXRINGS:
546 info->data = nic->rx_queues;
547 ret = 0;
548 break;
549 case ETHTOOL_GRXFH:
550 return nicvf_get_rss_hash_opts(nic, info);
551 default:
552 break;
554 return ret;
557 static int nicvf_set_rss_hash_opts(struct nicvf *nic,
558 struct ethtool_rxnfc *info)
560 struct nicvf_rss_info *rss = &nic->rss_info;
561 u64 rss_cfg = nicvf_reg_read(nic, NIC_VNIC_RSS_CFG);
563 if (!rss->enable)
564 netdev_err(nic->netdev,
565 "RSS is disabled, hash cannot be set\n");
567 netdev_info(nic->netdev, "Set RSS flow type = %d, data = %lld\n",
568 info->flow_type, info->data);
570 if (!(info->data & RXH_IP_SRC) || !(info->data & RXH_IP_DST))
571 return -EINVAL;
573 switch (info->flow_type) {
574 case TCP_V4_FLOW:
575 case TCP_V6_FLOW:
576 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
577 case 0:
578 rss_cfg &= ~(1ULL << RSS_HASH_TCP);
579 break;
580 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
581 rss_cfg |= (1ULL << RSS_HASH_TCP);
582 break;
583 default:
584 return -EINVAL;
586 break;
587 case UDP_V4_FLOW:
588 case UDP_V6_FLOW:
589 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
590 case 0:
591 rss_cfg &= ~(1ULL << RSS_HASH_UDP);
592 break;
593 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
594 rss_cfg |= (1ULL << RSS_HASH_UDP);
595 break;
596 default:
597 return -EINVAL;
599 break;
600 case SCTP_V4_FLOW:
601 case SCTP_V6_FLOW:
602 switch (info->data & (RXH_L4_B_0_1 | RXH_L4_B_2_3)) {
603 case 0:
604 rss_cfg &= ~(1ULL << RSS_HASH_L4ETC);
605 break;
606 case (RXH_L4_B_0_1 | RXH_L4_B_2_3):
607 rss_cfg |= (1ULL << RSS_HASH_L4ETC);
608 break;
609 default:
610 return -EINVAL;
612 break;
613 case IPV4_FLOW:
614 case IPV6_FLOW:
615 rss_cfg = RSS_HASH_IP;
616 break;
617 default:
618 return -EINVAL;
621 nicvf_reg_write(nic, NIC_VNIC_RSS_CFG, rss_cfg);
622 return 0;
625 static int nicvf_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info)
627 struct nicvf *nic = netdev_priv(dev);
629 switch (info->cmd) {
630 case ETHTOOL_SRXFH:
631 return nicvf_set_rss_hash_opts(nic, info);
632 default:
633 break;
635 return -EOPNOTSUPP;
638 static u32 nicvf_get_rxfh_key_size(struct net_device *netdev)
640 return RSS_HASH_KEY_SIZE * sizeof(u64);
643 static u32 nicvf_get_rxfh_indir_size(struct net_device *dev)
645 struct nicvf *nic = netdev_priv(dev);
647 return nic->rss_info.rss_size;
650 static int nicvf_get_rxfh(struct net_device *dev, u32 *indir, u8 *hkey,
651 u8 *hfunc)
653 struct nicvf *nic = netdev_priv(dev);
654 struct nicvf_rss_info *rss = &nic->rss_info;
655 int idx;
657 if (indir) {
658 for (idx = 0; idx < rss->rss_size; idx++)
659 indir[idx] = rss->ind_tbl[idx];
662 if (hkey)
663 memcpy(hkey, rss->key, RSS_HASH_KEY_SIZE * sizeof(u64));
665 if (hfunc)
666 *hfunc = ETH_RSS_HASH_TOP;
668 return 0;
671 static int nicvf_set_rxfh(struct net_device *dev, const u32 *indir,
672 const u8 *hkey, const u8 hfunc)
674 struct nicvf *nic = netdev_priv(dev);
675 struct nicvf_rss_info *rss = &nic->rss_info;
676 int idx;
678 if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
679 return -EOPNOTSUPP;
681 if (!rss->enable) {
682 netdev_err(nic->netdev,
683 "RSS is disabled, cannot change settings\n");
684 return -EIO;
687 if (indir) {
688 for (idx = 0; idx < rss->rss_size; idx++)
689 rss->ind_tbl[idx] = indir[idx];
692 if (hkey) {
693 memcpy(rss->key, hkey, RSS_HASH_KEY_SIZE * sizeof(u64));
694 nicvf_set_rss_key(nic);
697 nicvf_config_rss(nic);
698 return 0;
701 /* Get no of queues device supports and current queue count */
702 static void nicvf_get_channels(struct net_device *dev,
703 struct ethtool_channels *channel)
705 struct nicvf *nic = netdev_priv(dev);
707 memset(channel, 0, sizeof(*channel));
709 channel->max_rx = nic->max_queues;
710 channel->max_tx = nic->max_queues;
712 channel->rx_count = nic->rx_queues;
713 channel->tx_count = nic->tx_queues;
716 /* Set no of Tx, Rx queues to be used */
717 static int nicvf_set_channels(struct net_device *dev,
718 struct ethtool_channels *channel)
720 struct nicvf *nic = netdev_priv(dev);
721 int err = 0;
722 bool if_up = netif_running(dev);
723 u8 cqcount, txq_count;
725 if (!channel->rx_count || !channel->tx_count)
726 return -EINVAL;
727 if (channel->rx_count > nic->max_queues)
728 return -EINVAL;
729 if (channel->tx_count > nic->max_queues)
730 return -EINVAL;
732 if (nic->xdp_prog &&
733 ((channel->tx_count + channel->rx_count) > nic->max_queues)) {
734 netdev_err(nic->netdev,
735 "XDP mode, RXQs + TXQs > Max %d\n",
736 nic->max_queues);
737 return -EINVAL;
740 if (if_up)
741 nicvf_stop(dev);
743 nic->rx_queues = channel->rx_count;
744 nic->tx_queues = channel->tx_count;
745 if (!nic->xdp_prog)
746 nic->xdp_tx_queues = 0;
747 else
748 nic->xdp_tx_queues = channel->rx_count;
750 txq_count = nic->xdp_tx_queues + nic->tx_queues;
751 cqcount = max(nic->rx_queues, txq_count);
753 if (cqcount > MAX_CMP_QUEUES_PER_QS) {
754 nic->sqs_count = roundup(cqcount, MAX_CMP_QUEUES_PER_QS);
755 nic->sqs_count = (nic->sqs_count / MAX_CMP_QUEUES_PER_QS) - 1;
756 } else {
757 nic->sqs_count = 0;
760 nic->qs->rq_cnt = min_t(u8, nic->rx_queues, MAX_RCV_QUEUES_PER_QS);
761 nic->qs->sq_cnt = min_t(u8, txq_count, MAX_SND_QUEUES_PER_QS);
762 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
764 err = nicvf_set_real_num_queues(dev, nic->tx_queues, nic->rx_queues);
765 if (err)
766 return err;
768 if (if_up)
769 nicvf_open(dev);
771 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
772 nic->tx_queues, nic->rx_queues);
774 return err;
777 static void nicvf_get_pauseparam(struct net_device *dev,
778 struct ethtool_pauseparam *pause)
780 struct nicvf *nic = netdev_priv(dev);
781 union nic_mbx mbx = {};
783 /* Supported only for 10G/40G interfaces */
784 if ((nic->mac_type == BGX_MODE_SGMII) ||
785 (nic->mac_type == BGX_MODE_QSGMII) ||
786 (nic->mac_type == BGX_MODE_RGMII))
787 return;
789 mbx.pfc.msg = NIC_MBOX_MSG_PFC;
790 mbx.pfc.get = 1;
791 if (!nicvf_send_msg_to_pf(nic, &mbx)) {
792 pause->autoneg = nic->pfc.autoneg;
793 pause->rx_pause = nic->pfc.fc_rx;
794 pause->tx_pause = nic->pfc.fc_tx;
798 static int nicvf_set_pauseparam(struct net_device *dev,
799 struct ethtool_pauseparam *pause)
801 struct nicvf *nic = netdev_priv(dev);
802 union nic_mbx mbx = {};
804 /* Supported only for 10G/40G interfaces */
805 if ((nic->mac_type == BGX_MODE_SGMII) ||
806 (nic->mac_type == BGX_MODE_QSGMII) ||
807 (nic->mac_type == BGX_MODE_RGMII))
808 return -EOPNOTSUPP;
810 if (pause->autoneg)
811 return -EOPNOTSUPP;
813 mbx.pfc.msg = NIC_MBOX_MSG_PFC;
814 mbx.pfc.get = 0;
815 mbx.pfc.fc_rx = pause->rx_pause;
816 mbx.pfc.fc_tx = pause->tx_pause;
817 if (nicvf_send_msg_to_pf(nic, &mbx))
818 return -EAGAIN;
820 nic->pfc.fc_rx = pause->rx_pause;
821 nic->pfc.fc_tx = pause->tx_pause;
823 return 0;
826 static int nicvf_get_ts_info(struct net_device *netdev,
827 struct ethtool_ts_info *info)
829 struct nicvf *nic = netdev_priv(netdev);
831 if (!nic->ptp_clock)
832 return ethtool_op_get_ts_info(netdev, info);
834 info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
835 SOF_TIMESTAMPING_RX_SOFTWARE |
836 SOF_TIMESTAMPING_SOFTWARE |
837 SOF_TIMESTAMPING_TX_HARDWARE |
838 SOF_TIMESTAMPING_RX_HARDWARE |
839 SOF_TIMESTAMPING_RAW_HARDWARE;
841 info->phc_index = cavium_ptp_clock_index(nic->ptp_clock);
843 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
845 info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
846 (1 << HWTSTAMP_FILTER_ALL);
848 return 0;
851 static const struct ethtool_ops nicvf_ethtool_ops = {
852 .get_link = nicvf_get_link,
853 .get_drvinfo = nicvf_get_drvinfo,
854 .get_msglevel = nicvf_get_msglevel,
855 .set_msglevel = nicvf_set_msglevel,
856 .get_strings = nicvf_get_strings,
857 .get_sset_count = nicvf_get_sset_count,
858 .get_ethtool_stats = nicvf_get_ethtool_stats,
859 .get_regs_len = nicvf_get_regs_len,
860 .get_regs = nicvf_get_regs,
861 .get_coalesce = nicvf_get_coalesce,
862 .get_ringparam = nicvf_get_ringparam,
863 .set_ringparam = nicvf_set_ringparam,
864 .get_rxnfc = nicvf_get_rxnfc,
865 .set_rxnfc = nicvf_set_rxnfc,
866 .get_rxfh_key_size = nicvf_get_rxfh_key_size,
867 .get_rxfh_indir_size = nicvf_get_rxfh_indir_size,
868 .get_rxfh = nicvf_get_rxfh,
869 .set_rxfh = nicvf_set_rxfh,
870 .get_channels = nicvf_get_channels,
871 .set_channels = nicvf_set_channels,
872 .get_pauseparam = nicvf_get_pauseparam,
873 .set_pauseparam = nicvf_set_pauseparam,
874 .get_ts_info = nicvf_get_ts_info,
875 .get_link_ksettings = nicvf_get_link_ksettings,
878 void nicvf_set_ethtool_ops(struct net_device *netdev)
880 netdev->ethtool_ops = &nicvf_ethtool_ops;