2 * Copyright (C) 2015 Cavium, Inc.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of version 2 of the GNU General Public License
6 * as published by the Free Software Foundation.
9 /* ETHTOOL Support for VNIC_VF Device*/
11 #include <linux/pci.h>
15 #include "nicvf_queues.h"
17 #include "thunder_bgx.h"
19 #define DRV_NAME "thunder-nicvf"
20 #define DRV_VERSION "1.0"
23 char name
[ETH_GSTRING_LEN
];
27 #define NICVF_HW_STAT(stat) { \
29 .index = offsetof(struct nicvf_hw_stats, stat) / sizeof(u64), \
32 #define NICVF_DRV_STAT(stat) { \
34 .index = offsetof(struct nicvf_drv_stats, stat) / sizeof(u64), \
37 static const struct nicvf_stat nicvf_hw_stats
[] = {
38 NICVF_HW_STAT(rx_bytes
),
39 NICVF_HW_STAT(rx_frames
),
40 NICVF_HW_STAT(rx_ucast_frames
),
41 NICVF_HW_STAT(rx_bcast_frames
),
42 NICVF_HW_STAT(rx_mcast_frames
),
43 NICVF_HW_STAT(rx_drops
),
44 NICVF_HW_STAT(rx_drop_red
),
45 NICVF_HW_STAT(rx_drop_red_bytes
),
46 NICVF_HW_STAT(rx_drop_overrun
),
47 NICVF_HW_STAT(rx_drop_overrun_bytes
),
48 NICVF_HW_STAT(rx_drop_bcast
),
49 NICVF_HW_STAT(rx_drop_mcast
),
50 NICVF_HW_STAT(rx_drop_l3_bcast
),
51 NICVF_HW_STAT(rx_drop_l3_mcast
),
52 NICVF_HW_STAT(rx_fcs_errors
),
53 NICVF_HW_STAT(rx_l2_errors
),
54 NICVF_HW_STAT(tx_bytes
),
55 NICVF_HW_STAT(tx_frames
),
56 NICVF_HW_STAT(tx_ucast_frames
),
57 NICVF_HW_STAT(tx_bcast_frames
),
58 NICVF_HW_STAT(tx_mcast_frames
),
59 NICVF_HW_STAT(tx_drops
),
62 static const struct nicvf_stat nicvf_drv_stats
[] = {
63 NICVF_DRV_STAT(rx_bgx_truncated_pkts
),
64 NICVF_DRV_STAT(rx_jabber_errs
),
65 NICVF_DRV_STAT(rx_fcs_errs
),
66 NICVF_DRV_STAT(rx_bgx_errs
),
67 NICVF_DRV_STAT(rx_prel2_errs
),
68 NICVF_DRV_STAT(rx_l2_hdr_malformed
),
69 NICVF_DRV_STAT(rx_oversize
),
70 NICVF_DRV_STAT(rx_undersize
),
71 NICVF_DRV_STAT(rx_l2_len_mismatch
),
72 NICVF_DRV_STAT(rx_l2_pclp
),
73 NICVF_DRV_STAT(rx_ip_ver_errs
),
74 NICVF_DRV_STAT(rx_ip_csum_errs
),
75 NICVF_DRV_STAT(rx_ip_hdr_malformed
),
76 NICVF_DRV_STAT(rx_ip_payload_malformed
),
77 NICVF_DRV_STAT(rx_ip_ttl_errs
),
78 NICVF_DRV_STAT(rx_l3_pclp
),
79 NICVF_DRV_STAT(rx_l4_malformed
),
80 NICVF_DRV_STAT(rx_l4_csum_errs
),
81 NICVF_DRV_STAT(rx_udp_len_errs
),
82 NICVF_DRV_STAT(rx_l4_port_errs
),
83 NICVF_DRV_STAT(rx_tcp_flag_errs
),
84 NICVF_DRV_STAT(rx_tcp_offset_errs
),
85 NICVF_DRV_STAT(rx_l4_pclp
),
86 NICVF_DRV_STAT(rx_truncated_pkts
),
88 NICVF_DRV_STAT(tx_desc_fault
),
89 NICVF_DRV_STAT(tx_hdr_cons_err
),
90 NICVF_DRV_STAT(tx_subdesc_err
),
91 NICVF_DRV_STAT(tx_max_size_exceeded
),
92 NICVF_DRV_STAT(tx_imm_size_oflow
),
93 NICVF_DRV_STAT(tx_data_seq_err
),
94 NICVF_DRV_STAT(tx_mem_seq_err
),
95 NICVF_DRV_STAT(tx_lock_viol
),
96 NICVF_DRV_STAT(tx_data_fault
),
97 NICVF_DRV_STAT(tx_tstmp_conflict
),
98 NICVF_DRV_STAT(tx_tstmp_timeout
),
99 NICVF_DRV_STAT(tx_mem_fault
),
100 NICVF_DRV_STAT(tx_csum_overlap
),
101 NICVF_DRV_STAT(tx_csum_overflow
),
103 NICVF_DRV_STAT(rcv_buffer_alloc_failures
),
104 NICVF_DRV_STAT(tx_tso
),
105 NICVF_DRV_STAT(tx_timeout
),
106 NICVF_DRV_STAT(txq_stop
),
107 NICVF_DRV_STAT(txq_wake
),
110 static const struct nicvf_stat nicvf_queue_stats
[] = {
115 static const unsigned int nicvf_n_hw_stats
= ARRAY_SIZE(nicvf_hw_stats
);
116 static const unsigned int nicvf_n_drv_stats
= ARRAY_SIZE(nicvf_drv_stats
);
117 static const unsigned int nicvf_n_queue_stats
= ARRAY_SIZE(nicvf_queue_stats
);
119 static int nicvf_get_link_ksettings(struct net_device
*netdev
,
120 struct ethtool_link_ksettings
*cmd
)
122 struct nicvf
*nic
= netdev_priv(netdev
);
123 u32 supported
, advertising
;
129 cmd
->base
.duplex
= DUPLEX_UNKNOWN
;
130 cmd
->base
.speed
= SPEED_UNKNOWN
;
134 switch (nic
->speed
) {
136 cmd
->base
.port
= PORT_MII
| PORT_TP
;
137 cmd
->base
.autoneg
= AUTONEG_ENABLE
;
138 supported
|= SUPPORTED_MII
| SUPPORTED_TP
;
139 supported
|= SUPPORTED_1000baseT_Full
|
140 SUPPORTED_1000baseT_Half
|
141 SUPPORTED_100baseT_Full
|
142 SUPPORTED_100baseT_Half
|
143 SUPPORTED_10baseT_Full
|
144 SUPPORTED_10baseT_Half
;
145 supported
|= SUPPORTED_Autoneg
;
146 advertising
|= ADVERTISED_1000baseT_Full
|
147 ADVERTISED_1000baseT_Half
|
148 ADVERTISED_100baseT_Full
|
149 ADVERTISED_100baseT_Half
|
150 ADVERTISED_10baseT_Full
|
151 ADVERTISED_10baseT_Half
;
154 if (nic
->mac_type
== BGX_MODE_RXAUI
) {
155 cmd
->base
.port
= PORT_TP
;
156 supported
|= SUPPORTED_TP
;
158 cmd
->base
.port
= PORT_FIBRE
;
159 supported
|= SUPPORTED_FIBRE
;
161 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
162 supported
|= SUPPORTED_10000baseT_Full
;
165 cmd
->base
.port
= PORT_FIBRE
;
166 cmd
->base
.autoneg
= AUTONEG_DISABLE
;
167 supported
|= SUPPORTED_FIBRE
;
168 supported
|= SUPPORTED_40000baseCR4_Full
;
171 cmd
->base
.duplex
= nic
->duplex
;
172 cmd
->base
.speed
= nic
->speed
;
174 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.supported
,
176 ethtool_convert_legacy_u32_to_link_mode(cmd
->link_modes
.advertising
,
182 static u32
nicvf_get_link(struct net_device
*netdev
)
184 struct nicvf
*nic
= netdev_priv(netdev
);
189 static void nicvf_get_drvinfo(struct net_device
*netdev
,
190 struct ethtool_drvinfo
*info
)
192 struct nicvf
*nic
= netdev_priv(netdev
);
194 strlcpy(info
->driver
, DRV_NAME
, sizeof(info
->driver
));
195 strlcpy(info
->version
, DRV_VERSION
, sizeof(info
->version
));
196 strlcpy(info
->bus_info
, pci_name(nic
->pdev
), sizeof(info
->bus_info
));
199 static u32
nicvf_get_msglevel(struct net_device
*netdev
)
201 struct nicvf
*nic
= netdev_priv(netdev
);
203 return nic
->msg_enable
;
206 static void nicvf_set_msglevel(struct net_device
*netdev
, u32 lvl
)
208 struct nicvf
*nic
= netdev_priv(netdev
);
210 nic
->msg_enable
= lvl
;
213 static void nicvf_get_qset_strings(struct nicvf
*nic
, u8
**data
, int qset
)
216 int start_qidx
= qset
* MAX_RCV_QUEUES_PER_QS
;
218 for (qidx
= 0; qidx
< nic
->qs
->rq_cnt
; qidx
++) {
219 for (stats
= 0; stats
< nicvf_n_queue_stats
; stats
++) {
220 sprintf(*data
, "rxq%d: %s", qidx
+ start_qidx
,
221 nicvf_queue_stats
[stats
].name
);
222 *data
+= ETH_GSTRING_LEN
;
226 for (qidx
= 0; qidx
< nic
->qs
->sq_cnt
; qidx
++) {
227 for (stats
= 0; stats
< nicvf_n_queue_stats
; stats
++) {
228 sprintf(*data
, "txq%d: %s", qidx
+ start_qidx
,
229 nicvf_queue_stats
[stats
].name
);
230 *data
+= ETH_GSTRING_LEN
;
235 static void nicvf_get_strings(struct net_device
*netdev
, u32 sset
, u8
*data
)
237 struct nicvf
*nic
= netdev_priv(netdev
);
241 if (sset
!= ETH_SS_STATS
)
244 for (stats
= 0; stats
< nicvf_n_hw_stats
; stats
++) {
245 memcpy(data
, nicvf_hw_stats
[stats
].name
, ETH_GSTRING_LEN
);
246 data
+= ETH_GSTRING_LEN
;
249 for (stats
= 0; stats
< nicvf_n_drv_stats
; stats
++) {
250 memcpy(data
, nicvf_drv_stats
[stats
].name
, ETH_GSTRING_LEN
);
251 data
+= ETH_GSTRING_LEN
;
254 nicvf_get_qset_strings(nic
, &data
, 0);
256 for (sqs
= 0; sqs
< nic
->sqs_count
; sqs
++) {
257 if (!nic
->snicvf
[sqs
])
259 nicvf_get_qset_strings(nic
->snicvf
[sqs
], &data
, sqs
+ 1);
262 for (stats
= 0; stats
< BGX_RX_STATS_COUNT
; stats
++) {
263 sprintf(data
, "bgx_rxstat%d: ", stats
);
264 data
+= ETH_GSTRING_LEN
;
267 for (stats
= 0; stats
< BGX_TX_STATS_COUNT
; stats
++) {
268 sprintf(data
, "bgx_txstat%d: ", stats
);
269 data
+= ETH_GSTRING_LEN
;
273 static int nicvf_get_sset_count(struct net_device
*netdev
, int sset
)
275 struct nicvf
*nic
= netdev_priv(netdev
);
279 if (sset
!= ETH_SS_STATS
)
282 qstats_count
= nicvf_n_queue_stats
*
283 (nic
->qs
->rq_cnt
+ nic
->qs
->sq_cnt
);
284 for (sqs
= 0; sqs
< nic
->sqs_count
; sqs
++) {
287 snic
= nic
->snicvf
[sqs
];
290 qstats_count
+= nicvf_n_queue_stats
*
291 (snic
->qs
->rq_cnt
+ snic
->qs
->sq_cnt
);
294 return nicvf_n_hw_stats
+ nicvf_n_drv_stats
+
296 BGX_RX_STATS_COUNT
+ BGX_TX_STATS_COUNT
;
299 static void nicvf_get_qset_stats(struct nicvf
*nic
,
300 struct ethtool_stats
*stats
, u64
**data
)
307 for (qidx
= 0; qidx
< nic
->qs
->rq_cnt
; qidx
++) {
308 nicvf_update_rq_stats(nic
, qidx
);
309 for (stat
= 0; stat
< nicvf_n_queue_stats
; stat
++)
310 *((*data
)++) = ((u64
*)&nic
->qs
->rq
[qidx
].stats
)
311 [nicvf_queue_stats
[stat
].index
];
314 for (qidx
= 0; qidx
< nic
->qs
->sq_cnt
; qidx
++) {
315 nicvf_update_sq_stats(nic
, qidx
);
316 for (stat
= 0; stat
< nicvf_n_queue_stats
; stat
++)
317 *((*data
)++) = ((u64
*)&nic
->qs
->sq
[qidx
].stats
)
318 [nicvf_queue_stats
[stat
].index
];
322 static void nicvf_get_ethtool_stats(struct net_device
*netdev
,
323 struct ethtool_stats
*stats
, u64
*data
)
325 struct nicvf
*nic
= netdev_priv(netdev
);
329 nicvf_update_stats(nic
);
331 /* Update LMAC stats */
332 nicvf_update_lmac_stats(nic
);
334 for (stat
= 0; stat
< nicvf_n_hw_stats
; stat
++)
335 *(data
++) = ((u64
*)&nic
->hw_stats
)
336 [nicvf_hw_stats
[stat
].index
];
337 for (stat
= 0; stat
< nicvf_n_drv_stats
; stat
++) {
339 for_each_possible_cpu(cpu
)
340 tmp_stats
+= ((u64
*)per_cpu_ptr(nic
->drv_stats
, cpu
))
341 [nicvf_drv_stats
[stat
].index
];
342 *(data
++) = tmp_stats
;
345 nicvf_get_qset_stats(nic
, stats
, &data
);
347 for (sqs
= 0; sqs
< nic
->sqs_count
; sqs
++) {
348 if (!nic
->snicvf
[sqs
])
350 nicvf_get_qset_stats(nic
->snicvf
[sqs
], stats
, &data
);
353 for (stat
= 0; stat
< BGX_RX_STATS_COUNT
; stat
++)
354 *(data
++) = nic
->bgx_stats
.rx_stats
[stat
];
355 for (stat
= 0; stat
< BGX_TX_STATS_COUNT
; stat
++)
356 *(data
++) = nic
->bgx_stats
.tx_stats
[stat
];
359 static int nicvf_get_regs_len(struct net_device
*dev
)
361 return sizeof(u64
) * NIC_VF_REG_COUNT
;
364 static void nicvf_get_regs(struct net_device
*dev
,
365 struct ethtool_regs
*regs
, void *reg
)
367 struct nicvf
*nic
= netdev_priv(dev
);
370 int mbox
, key
, stat
, q
;
374 memset(p
, 0, NIC_VF_REG_COUNT
);
376 p
[i
++] = nicvf_reg_read(nic
, NIC_VNIC_CFG
);
377 /* Mailbox registers */
378 for (mbox
= 0; mbox
< NIC_PF_VF_MAILBOX_SIZE
; mbox
++)
379 p
[i
++] = nicvf_reg_read(nic
,
380 NIC_VF_PF_MAILBOX_0_1
| (mbox
<< 3));
382 p
[i
++] = nicvf_reg_read(nic
, NIC_VF_INT
);
383 p
[i
++] = nicvf_reg_read(nic
, NIC_VF_INT_W1S
);
384 p
[i
++] = nicvf_reg_read(nic
, NIC_VF_ENA_W1C
);
385 p
[i
++] = nicvf_reg_read(nic
, NIC_VF_ENA_W1S
);
386 p
[i
++] = nicvf_reg_read(nic
, NIC_VNIC_RSS_CFG
);
388 for (key
= 0; key
< RSS_HASH_KEY_SIZE
; key
++)
389 p
[i
++] = nicvf_reg_read(nic
, NIC_VNIC_RSS_KEY_0_4
| (key
<< 3));
391 /* Tx/Rx statistics */
392 for (stat
= 0; stat
< TX_STATS_ENUM_LAST
; stat
++)
393 p
[i
++] = nicvf_reg_read(nic
,
394 NIC_VNIC_TX_STAT_0_4
| (stat
<< 3));
396 for (i
= 0; i
< RX_STATS_ENUM_LAST
; i
++)
397 p
[i
++] = nicvf_reg_read(nic
,
398 NIC_VNIC_RX_STAT_0_13
| (stat
<< 3));
400 p
[i
++] = nicvf_reg_read(nic
, NIC_QSET_RQ_GEN_CFG
);
402 /* All completion queue's registers */
403 for (q
= 0; q
< MAX_CMP_QUEUES_PER_QS
; q
++) {
404 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_CFG
, q
);
405 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_CFG2
, q
);
406 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_THRESH
, q
);
407 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_BASE
, q
);
408 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_HEAD
, q
);
409 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_TAIL
, q
);
410 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_DOOR
, q
);
411 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_STATUS
, q
);
412 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_STATUS2
, q
);
413 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_CQ_0_7_DEBUG
, q
);
416 /* All receive queue's registers */
417 for (q
= 0; q
< MAX_RCV_QUEUES_PER_QS
; q
++) {
418 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_RQ_0_7_CFG
, q
);
419 p
[i
++] = nicvf_queue_reg_read(nic
,
420 NIC_QSET_RQ_0_7_STAT_0_1
, q
);
421 reg_offset
= NIC_QSET_RQ_0_7_STAT_0_1
| (1 << 3);
422 p
[i
++] = nicvf_queue_reg_read(nic
, reg_offset
, q
);
425 for (q
= 0; q
< MAX_SND_QUEUES_PER_QS
; q
++) {
426 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_CFG
, q
);
427 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_THRESH
, q
);
428 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_BASE
, q
);
429 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_HEAD
, q
);
430 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_TAIL
, q
);
431 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_DOOR
, q
);
432 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_STATUS
, q
);
433 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_DEBUG
, q
);
434 /* Padding, was NIC_QSET_SQ_0_7_CNM_CHG, which
435 * produces bus errors when read
438 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_SQ_0_7_STAT_0_1
, q
);
439 reg_offset
= NIC_QSET_SQ_0_7_STAT_0_1
| (1 << 3);
440 p
[i
++] = nicvf_queue_reg_read(nic
, reg_offset
, q
);
443 for (q
= 0; q
< MAX_RCV_BUF_DESC_RINGS_PER_QS
; q
++) {
444 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_CFG
, q
);
445 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_THRESH
, q
);
446 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_BASE
, q
);
447 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_HEAD
, q
);
448 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_TAIL
, q
);
449 p
[i
++] = nicvf_queue_reg_read(nic
, NIC_QSET_RBDR_0_1_DOOR
, q
);
450 p
[i
++] = nicvf_queue_reg_read(nic
,
451 NIC_QSET_RBDR_0_1_STATUS0
, q
);
452 p
[i
++] = nicvf_queue_reg_read(nic
,
453 NIC_QSET_RBDR_0_1_STATUS1
, q
);
454 reg_offset
= NIC_QSET_RBDR_0_1_PREFETCH_STATUS
;
455 p
[i
++] = nicvf_queue_reg_read(nic
, reg_offset
, q
);
459 static int nicvf_get_coalesce(struct net_device
*netdev
,
460 struct ethtool_coalesce
*cmd
)
462 struct nicvf
*nic
= netdev_priv(netdev
);
464 cmd
->rx_coalesce_usecs
= nic
->cq_coalesce_usecs
;
468 static void nicvf_get_ringparam(struct net_device
*netdev
,
469 struct ethtool_ringparam
*ring
)
471 struct nicvf
*nic
= netdev_priv(netdev
);
472 struct queue_set
*qs
= nic
->qs
;
474 ring
->rx_max_pending
= MAX_CMP_QUEUE_LEN
;
475 ring
->rx_pending
= qs
->cq_len
;
476 ring
->tx_max_pending
= MAX_SND_QUEUE_LEN
;
477 ring
->tx_pending
= qs
->sq_len
;
480 static int nicvf_set_ringparam(struct net_device
*netdev
,
481 struct ethtool_ringparam
*ring
)
483 struct nicvf
*nic
= netdev_priv(netdev
);
484 struct queue_set
*qs
= nic
->qs
;
485 u32 rx_count
, tx_count
;
487 /* Due to HW errata this is not supported on T88 pass 1.x silicon */
488 if (pass1_silicon(nic
->pdev
))
491 if ((ring
->rx_mini_pending
) || (ring
->rx_jumbo_pending
))
494 tx_count
= clamp_t(u32
, ring
->tx_pending
,
495 MIN_SND_QUEUE_LEN
, MAX_SND_QUEUE_LEN
);
496 rx_count
= clamp_t(u32
, ring
->rx_pending
,
497 MIN_CMP_QUEUE_LEN
, MAX_CMP_QUEUE_LEN
);
499 if ((tx_count
== qs
->sq_len
) && (rx_count
== qs
->cq_len
))
502 /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */
503 qs
->sq_len
= rounddown_pow_of_two(tx_count
);
504 qs
->cq_len
= rounddown_pow_of_two(rx_count
);
506 if (netif_running(netdev
)) {
514 static int nicvf_get_rss_hash_opts(struct nicvf
*nic
,
515 struct ethtool_rxnfc
*info
)
519 switch (info
->flow_type
) {
526 info
->data
|= RXH_L4_B_0_1
| RXH_L4_B_2_3
;
529 info
->data
|= RXH_IP_SRC
| RXH_IP_DST
;
538 static int nicvf_get_rxnfc(struct net_device
*dev
,
539 struct ethtool_rxnfc
*info
, u32
*rules
)
541 struct nicvf
*nic
= netdev_priv(dev
);
542 int ret
= -EOPNOTSUPP
;
545 case ETHTOOL_GRXRINGS
:
546 info
->data
= nic
->rx_queues
;
550 return nicvf_get_rss_hash_opts(nic
, info
);
557 static int nicvf_set_rss_hash_opts(struct nicvf
*nic
,
558 struct ethtool_rxnfc
*info
)
560 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
561 u64 rss_cfg
= nicvf_reg_read(nic
, NIC_VNIC_RSS_CFG
);
564 netdev_err(nic
->netdev
,
565 "RSS is disabled, hash cannot be set\n");
567 netdev_info(nic
->netdev
, "Set RSS flow type = %d, data = %lld\n",
568 info
->flow_type
, info
->data
);
570 if (!(info
->data
& RXH_IP_SRC
) || !(info
->data
& RXH_IP_DST
))
573 switch (info
->flow_type
) {
576 switch (info
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
578 rss_cfg
&= ~(1ULL << RSS_HASH_TCP
);
580 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
581 rss_cfg
|= (1ULL << RSS_HASH_TCP
);
589 switch (info
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
591 rss_cfg
&= ~(1ULL << RSS_HASH_UDP
);
593 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
594 rss_cfg
|= (1ULL << RSS_HASH_UDP
);
602 switch (info
->data
& (RXH_L4_B_0_1
| RXH_L4_B_2_3
)) {
604 rss_cfg
&= ~(1ULL << RSS_HASH_L4ETC
);
606 case (RXH_L4_B_0_1
| RXH_L4_B_2_3
):
607 rss_cfg
|= (1ULL << RSS_HASH_L4ETC
);
615 rss_cfg
= RSS_HASH_IP
;
621 nicvf_reg_write(nic
, NIC_VNIC_RSS_CFG
, rss_cfg
);
625 static int nicvf_set_rxnfc(struct net_device
*dev
, struct ethtool_rxnfc
*info
)
627 struct nicvf
*nic
= netdev_priv(dev
);
631 return nicvf_set_rss_hash_opts(nic
, info
);
638 static u32
nicvf_get_rxfh_key_size(struct net_device
*netdev
)
640 return RSS_HASH_KEY_SIZE
* sizeof(u64
);
643 static u32
nicvf_get_rxfh_indir_size(struct net_device
*dev
)
645 struct nicvf
*nic
= netdev_priv(dev
);
647 return nic
->rss_info
.rss_size
;
650 static int nicvf_get_rxfh(struct net_device
*dev
, u32
*indir
, u8
*hkey
,
653 struct nicvf
*nic
= netdev_priv(dev
);
654 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
658 for (idx
= 0; idx
< rss
->rss_size
; idx
++)
659 indir
[idx
] = rss
->ind_tbl
[idx
];
663 memcpy(hkey
, rss
->key
, RSS_HASH_KEY_SIZE
* sizeof(u64
));
666 *hfunc
= ETH_RSS_HASH_TOP
;
671 static int nicvf_set_rxfh(struct net_device
*dev
, const u32
*indir
,
672 const u8
*hkey
, const u8 hfunc
)
674 struct nicvf
*nic
= netdev_priv(dev
);
675 struct nicvf_rss_info
*rss
= &nic
->rss_info
;
678 if (hfunc
!= ETH_RSS_HASH_NO_CHANGE
&& hfunc
!= ETH_RSS_HASH_TOP
)
682 netdev_err(nic
->netdev
,
683 "RSS is disabled, cannot change settings\n");
688 for (idx
= 0; idx
< rss
->rss_size
; idx
++)
689 rss
->ind_tbl
[idx
] = indir
[idx
];
693 memcpy(rss
->key
, hkey
, RSS_HASH_KEY_SIZE
* sizeof(u64
));
694 nicvf_set_rss_key(nic
);
697 nicvf_config_rss(nic
);
701 /* Get no of queues device supports and current queue count */
702 static void nicvf_get_channels(struct net_device
*dev
,
703 struct ethtool_channels
*channel
)
705 struct nicvf
*nic
= netdev_priv(dev
);
707 memset(channel
, 0, sizeof(*channel
));
709 channel
->max_rx
= nic
->max_queues
;
710 channel
->max_tx
= nic
->max_queues
;
712 channel
->rx_count
= nic
->rx_queues
;
713 channel
->tx_count
= nic
->tx_queues
;
716 /* Set no of Tx, Rx queues to be used */
717 static int nicvf_set_channels(struct net_device
*dev
,
718 struct ethtool_channels
*channel
)
720 struct nicvf
*nic
= netdev_priv(dev
);
722 bool if_up
= netif_running(dev
);
725 if (!channel
->rx_count
|| !channel
->tx_count
)
727 if (channel
->rx_count
> nic
->max_queues
)
729 if (channel
->tx_count
> nic
->max_queues
)
735 cqcount
= max(channel
->rx_count
, channel
->tx_count
);
737 if (cqcount
> MAX_CMP_QUEUES_PER_QS
) {
738 nic
->sqs_count
= roundup(cqcount
, MAX_CMP_QUEUES_PER_QS
);
739 nic
->sqs_count
= (nic
->sqs_count
/ MAX_CMP_QUEUES_PER_QS
) - 1;
744 nic
->qs
->rq_cnt
= min_t(u32
, channel
->rx_count
, MAX_RCV_QUEUES_PER_QS
);
745 nic
->qs
->sq_cnt
= min_t(u32
, channel
->tx_count
, MAX_SND_QUEUES_PER_QS
);
746 nic
->qs
->cq_cnt
= max(nic
->qs
->rq_cnt
, nic
->qs
->sq_cnt
);
748 nic
->rx_queues
= channel
->rx_count
;
749 nic
->tx_queues
= channel
->tx_count
;
750 err
= nicvf_set_real_num_queues(dev
, nic
->tx_queues
, nic
->rx_queues
);
757 netdev_info(dev
, "Setting num Tx rings to %d, Rx rings to %d success\n",
758 nic
->tx_queues
, nic
->rx_queues
);
763 static void nicvf_get_pauseparam(struct net_device
*dev
,
764 struct ethtool_pauseparam
*pause
)
766 struct nicvf
*nic
= netdev_priv(dev
);
767 union nic_mbx mbx
= {};
769 /* Supported only for 10G/40G interfaces */
770 if ((nic
->mac_type
== BGX_MODE_SGMII
) ||
771 (nic
->mac_type
== BGX_MODE_QSGMII
) ||
772 (nic
->mac_type
== BGX_MODE_RGMII
))
775 mbx
.pfc
.msg
= NIC_MBOX_MSG_PFC
;
777 if (!nicvf_send_msg_to_pf(nic
, &mbx
)) {
778 pause
->autoneg
= nic
->pfc
.autoneg
;
779 pause
->rx_pause
= nic
->pfc
.fc_rx
;
780 pause
->tx_pause
= nic
->pfc
.fc_tx
;
784 static int nicvf_set_pauseparam(struct net_device
*dev
,
785 struct ethtool_pauseparam
*pause
)
787 struct nicvf
*nic
= netdev_priv(dev
);
788 union nic_mbx mbx
= {};
790 /* Supported only for 10G/40G interfaces */
791 if ((nic
->mac_type
== BGX_MODE_SGMII
) ||
792 (nic
->mac_type
== BGX_MODE_QSGMII
) ||
793 (nic
->mac_type
== BGX_MODE_RGMII
))
799 mbx
.pfc
.msg
= NIC_MBOX_MSG_PFC
;
801 mbx
.pfc
.fc_rx
= pause
->rx_pause
;
802 mbx
.pfc
.fc_tx
= pause
->tx_pause
;
803 if (nicvf_send_msg_to_pf(nic
, &mbx
))
806 nic
->pfc
.fc_rx
= pause
->rx_pause
;
807 nic
->pfc
.fc_tx
= pause
->tx_pause
;
812 static const struct ethtool_ops nicvf_ethtool_ops
= {
813 .get_link
= nicvf_get_link
,
814 .get_drvinfo
= nicvf_get_drvinfo
,
815 .get_msglevel
= nicvf_get_msglevel
,
816 .set_msglevel
= nicvf_set_msglevel
,
817 .get_strings
= nicvf_get_strings
,
818 .get_sset_count
= nicvf_get_sset_count
,
819 .get_ethtool_stats
= nicvf_get_ethtool_stats
,
820 .get_regs_len
= nicvf_get_regs_len
,
821 .get_regs
= nicvf_get_regs
,
822 .get_coalesce
= nicvf_get_coalesce
,
823 .get_ringparam
= nicvf_get_ringparam
,
824 .set_ringparam
= nicvf_set_ringparam
,
825 .get_rxnfc
= nicvf_get_rxnfc
,
826 .set_rxnfc
= nicvf_set_rxnfc
,
827 .get_rxfh_key_size
= nicvf_get_rxfh_key_size
,
828 .get_rxfh_indir_size
= nicvf_get_rxfh_indir_size
,
829 .get_rxfh
= nicvf_get_rxfh
,
830 .set_rxfh
= nicvf_set_rxfh
,
831 .get_channels
= nicvf_get_channels
,
832 .set_channels
= nicvf_set_channels
,
833 .get_pauseparam
= nicvf_get_pauseparam
,
834 .set_pauseparam
= nicvf_set_pauseparam
,
835 .get_ts_info
= ethtool_op_get_ts_info
,
836 .get_link_ksettings
= nicvf_get_link_ksettings
,
839 void nicvf_set_ethtool_ops(struct net_device
*netdev
)
841 netdev
->ethtool_ops
= &nicvf_ethtool_ops
;