1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright 2015-2020 Amazon.com, Inc. or its affiliates. All rights reserved.
6 #include <linux/ethtool.h>
9 #include "ena_netdev.h"
12 char name
[ETH_GSTRING_LEN
];
16 #define ENA_STAT_ENA_COM_ENTRY(stat) { \
18 .stat_offset = offsetof(struct ena_com_stats_admin, stat) / sizeof(u64) \
21 #define ENA_STAT_ENTRY(stat, stat_type) { \
23 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) / sizeof(u64) \
26 #define ENA_STAT_HW_ENTRY(stat, stat_type) { \
28 .stat_offset = offsetof(struct ena_admin_##stat_type, stat) / sizeof(u64) \
31 #define ENA_STAT_RX_ENTRY(stat) \
32 ENA_STAT_ENTRY(stat, rx)
34 #define ENA_STAT_TX_ENTRY(stat) \
35 ENA_STAT_ENTRY(stat, tx)
37 #define ENA_STAT_GLOBAL_ENTRY(stat) \
38 ENA_STAT_ENTRY(stat, dev)
40 #define ENA_STAT_ENI_ENTRY(stat) \
41 ENA_STAT_HW_ENTRY(stat, eni_stats)
43 static const struct ena_stats ena_stats_global_strings
[] = {
44 ENA_STAT_GLOBAL_ENTRY(tx_timeout
),
45 ENA_STAT_GLOBAL_ENTRY(suspend
),
46 ENA_STAT_GLOBAL_ENTRY(resume
),
47 ENA_STAT_GLOBAL_ENTRY(wd_expired
),
48 ENA_STAT_GLOBAL_ENTRY(interface_up
),
49 ENA_STAT_GLOBAL_ENTRY(interface_down
),
50 ENA_STAT_GLOBAL_ENTRY(admin_q_pause
),
53 static const struct ena_stats ena_stats_eni_strings
[] = {
54 ENA_STAT_ENI_ENTRY(bw_in_allowance_exceeded
),
55 ENA_STAT_ENI_ENTRY(bw_out_allowance_exceeded
),
56 ENA_STAT_ENI_ENTRY(pps_allowance_exceeded
),
57 ENA_STAT_ENI_ENTRY(conntrack_allowance_exceeded
),
58 ENA_STAT_ENI_ENTRY(linklocal_allowance_exceeded
),
61 static const struct ena_stats ena_stats_tx_strings
[] = {
62 ENA_STAT_TX_ENTRY(cnt
),
63 ENA_STAT_TX_ENTRY(bytes
),
64 ENA_STAT_TX_ENTRY(queue_stop
),
65 ENA_STAT_TX_ENTRY(queue_wakeup
),
66 ENA_STAT_TX_ENTRY(dma_mapping_err
),
67 ENA_STAT_TX_ENTRY(linearize
),
68 ENA_STAT_TX_ENTRY(linearize_failed
),
69 ENA_STAT_TX_ENTRY(napi_comp
),
70 ENA_STAT_TX_ENTRY(tx_poll
),
71 ENA_STAT_TX_ENTRY(doorbells
),
72 ENA_STAT_TX_ENTRY(prepare_ctx_err
),
73 ENA_STAT_TX_ENTRY(bad_req_id
),
74 ENA_STAT_TX_ENTRY(llq_buffer_copy
),
75 ENA_STAT_TX_ENTRY(missed_tx
),
76 ENA_STAT_TX_ENTRY(unmask_interrupt
),
79 static const struct ena_stats ena_stats_rx_strings
[] = {
80 ENA_STAT_RX_ENTRY(cnt
),
81 ENA_STAT_RX_ENTRY(bytes
),
82 ENA_STAT_RX_ENTRY(rx_copybreak_pkt
),
83 ENA_STAT_RX_ENTRY(csum_good
),
84 ENA_STAT_RX_ENTRY(refil_partial
),
85 ENA_STAT_RX_ENTRY(bad_csum
),
86 ENA_STAT_RX_ENTRY(page_alloc_fail
),
87 ENA_STAT_RX_ENTRY(skb_alloc_fail
),
88 ENA_STAT_RX_ENTRY(dma_mapping_err
),
89 ENA_STAT_RX_ENTRY(bad_desc_num
),
90 ENA_STAT_RX_ENTRY(bad_req_id
),
91 ENA_STAT_RX_ENTRY(empty_rx_ring
),
92 ENA_STAT_RX_ENTRY(csum_unchecked
),
93 ENA_STAT_RX_ENTRY(xdp_aborted
),
94 ENA_STAT_RX_ENTRY(xdp_drop
),
95 ENA_STAT_RX_ENTRY(xdp_pass
),
96 ENA_STAT_RX_ENTRY(xdp_tx
),
97 ENA_STAT_RX_ENTRY(xdp_invalid
),
98 ENA_STAT_RX_ENTRY(xdp_redirect
),
101 static const struct ena_stats ena_stats_ena_com_strings
[] = {
102 ENA_STAT_ENA_COM_ENTRY(aborted_cmd
),
103 ENA_STAT_ENA_COM_ENTRY(submitted_cmd
),
104 ENA_STAT_ENA_COM_ENTRY(completed_cmd
),
105 ENA_STAT_ENA_COM_ENTRY(out_of_space
),
106 ENA_STAT_ENA_COM_ENTRY(no_completion
),
109 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
110 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
111 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
112 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
113 #define ENA_STATS_ARRAY_ENI(adapter) \
114 (ARRAY_SIZE(ena_stats_eni_strings) * (adapter)->eni_stats_supported)
116 static void ena_safe_update_stat(u64
*src
, u64
*dst
,
117 struct u64_stats_sync
*syncp
)
122 start
= u64_stats_fetch_begin_irq(syncp
);
124 } while (u64_stats_fetch_retry_irq(syncp
, start
));
127 static void ena_queue_stats(struct ena_adapter
*adapter
, u64
**data
)
129 const struct ena_stats
*ena_stats
;
130 struct ena_ring
*ring
;
135 for (i
= 0; i
< adapter
->num_io_queues
+ adapter
->xdp_num_queues
; i
++) {
137 ring
= &adapter
->tx_ring
[i
];
139 for (j
= 0; j
< ENA_STATS_ARRAY_TX
; j
++) {
140 ena_stats
= &ena_stats_tx_strings
[j
];
142 ptr
= (u64
*)&ring
->tx_stats
+ ena_stats
->stat_offset
;
144 ena_safe_update_stat(ptr
, (*data
)++, &ring
->syncp
);
146 /* XDP TX queues don't have a RX queue counterpart */
147 if (!ENA_IS_XDP_INDEX(adapter
, i
)) {
149 ring
= &adapter
->rx_ring
[i
];
151 for (j
= 0; j
< ENA_STATS_ARRAY_RX
; j
++) {
152 ena_stats
= &ena_stats_rx_strings
[j
];
154 ptr
= (u64
*)&ring
->rx_stats
+
155 ena_stats
->stat_offset
;
157 ena_safe_update_stat(ptr
, (*data
)++, &ring
->syncp
);
163 static void ena_dev_admin_queue_stats(struct ena_adapter
*adapter
, u64
**data
)
165 const struct ena_stats
*ena_stats
;
169 for (i
= 0; i
< ENA_STATS_ARRAY_ENA_COM
; i
++) {
170 ena_stats
= &ena_stats_ena_com_strings
[i
];
172 ptr
= (u64
*)&adapter
->ena_dev
->admin_queue
.stats
+
173 ena_stats
->stat_offset
;
179 static void ena_get_stats(struct ena_adapter
*adapter
,
181 bool eni_stats_needed
)
183 const struct ena_stats
*ena_stats
;
187 for (i
= 0; i
< ENA_STATS_ARRAY_GLOBAL
; i
++) {
188 ena_stats
= &ena_stats_global_strings
[i
];
190 ptr
= (u64
*)&adapter
->dev_stats
+ ena_stats
->stat_offset
;
192 ena_safe_update_stat(ptr
, data
++, &adapter
->syncp
);
195 if (eni_stats_needed
) {
196 ena_update_hw_stats(adapter
);
197 for (i
= 0; i
< ENA_STATS_ARRAY_ENI(adapter
); i
++) {
198 ena_stats
= &ena_stats_eni_strings
[i
];
200 ptr
= (u64
*)&adapter
->eni_stats
+
201 ena_stats
->stat_offset
;
203 ena_safe_update_stat(ptr
, data
++, &adapter
->syncp
);
207 ena_queue_stats(adapter
, &data
);
208 ena_dev_admin_queue_stats(adapter
, &data
);
211 static void ena_get_ethtool_stats(struct net_device
*netdev
,
212 struct ethtool_stats
*stats
,
215 struct ena_adapter
*adapter
= netdev_priv(netdev
);
217 ena_get_stats(adapter
, data
, adapter
->eni_stats_supported
);
220 static int ena_get_sw_stats_count(struct ena_adapter
*adapter
)
222 return adapter
->num_io_queues
* (ENA_STATS_ARRAY_TX
+ ENA_STATS_ARRAY_RX
)
223 + adapter
->xdp_num_queues
* ENA_STATS_ARRAY_TX
224 + ENA_STATS_ARRAY_GLOBAL
+ ENA_STATS_ARRAY_ENA_COM
;
227 static int ena_get_hw_stats_count(struct ena_adapter
*adapter
)
229 return ENA_STATS_ARRAY_ENI(adapter
);
232 int ena_get_sset_count(struct net_device
*netdev
, int sset
)
234 struct ena_adapter
*adapter
= netdev_priv(netdev
);
236 if (sset
!= ETH_SS_STATS
)
239 return ena_get_sw_stats_count(adapter
) + ena_get_hw_stats_count(adapter
);
242 static void ena_queue_strings(struct ena_adapter
*adapter
, u8
**data
)
244 const struct ena_stats
*ena_stats
;
248 for (i
= 0; i
< adapter
->num_io_queues
+ adapter
->xdp_num_queues
; i
++) {
249 is_xdp
= ENA_IS_XDP_INDEX(adapter
, i
);
251 for (j
= 0; j
< ENA_STATS_ARRAY_TX
; j
++) {
252 ena_stats
= &ena_stats_tx_strings
[j
];
254 snprintf(*data
, ETH_GSTRING_LEN
,
256 is_xdp
? "xdp_tx" : "tx", ena_stats
->name
);
257 (*data
) += ETH_GSTRING_LEN
;
261 /* RX stats, in XDP there isn't a RX queue
264 for (j
= 0; j
< ENA_STATS_ARRAY_RX
; j
++) {
265 ena_stats
= &ena_stats_rx_strings
[j
];
267 snprintf(*data
, ETH_GSTRING_LEN
,
268 "queue_%u_rx_%s", i
, ena_stats
->name
);
269 (*data
) += ETH_GSTRING_LEN
;
275 static void ena_com_dev_strings(u8
**data
)
277 const struct ena_stats
*ena_stats
;
280 for (i
= 0; i
< ENA_STATS_ARRAY_ENA_COM
; i
++) {
281 ena_stats
= &ena_stats_ena_com_strings
[i
];
283 snprintf(*data
, ETH_GSTRING_LEN
,
284 "ena_admin_q_%s", ena_stats
->name
);
285 (*data
) += ETH_GSTRING_LEN
;
289 static void ena_get_strings(struct ena_adapter
*adapter
,
291 bool eni_stats_needed
)
293 const struct ena_stats
*ena_stats
;
296 for (i
= 0; i
< ENA_STATS_ARRAY_GLOBAL
; i
++) {
297 ena_stats
= &ena_stats_global_strings
[i
];
298 memcpy(data
, ena_stats
->name
, ETH_GSTRING_LEN
);
299 data
+= ETH_GSTRING_LEN
;
302 if (eni_stats_needed
) {
303 for (i
= 0; i
< ENA_STATS_ARRAY_ENI(adapter
); i
++) {
304 ena_stats
= &ena_stats_eni_strings
[i
];
305 memcpy(data
, ena_stats
->name
, ETH_GSTRING_LEN
);
306 data
+= ETH_GSTRING_LEN
;
310 ena_queue_strings(adapter
, &data
);
311 ena_com_dev_strings(&data
);
314 static void ena_get_ethtool_strings(struct net_device
*netdev
,
318 struct ena_adapter
*adapter
= netdev_priv(netdev
);
320 if (sset
!= ETH_SS_STATS
)
323 ena_get_strings(adapter
, data
, adapter
->eni_stats_supported
);
326 static int ena_get_link_ksettings(struct net_device
*netdev
,
327 struct ethtool_link_ksettings
*link_ksettings
)
329 struct ena_adapter
*adapter
= netdev_priv(netdev
);
330 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
331 struct ena_admin_get_feature_link_desc
*link
;
332 struct ena_admin_get_feat_resp feat_resp
;
335 rc
= ena_com_get_link_params(ena_dev
, &feat_resp
);
339 link
= &feat_resp
.u
.link
;
340 link_ksettings
->base
.speed
= link
->speed
;
342 if (link
->flags
& ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK
) {
343 ethtool_link_ksettings_add_link_mode(link_ksettings
,
345 ethtool_link_ksettings_add_link_mode(link_ksettings
,
349 link_ksettings
->base
.autoneg
=
350 (link
->flags
& ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK
) ?
351 AUTONEG_ENABLE
: AUTONEG_DISABLE
;
353 link_ksettings
->base
.duplex
= DUPLEX_FULL
;
358 static int ena_get_coalesce(struct net_device
*net_dev
,
359 struct ethtool_coalesce
*coalesce
)
361 struct ena_adapter
*adapter
= netdev_priv(net_dev
);
362 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
364 if (!ena_com_interrupt_moderation_supported(ena_dev
))
367 coalesce
->tx_coalesce_usecs
=
368 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev
) *
369 ena_dev
->intr_delay_resolution
;
371 coalesce
->rx_coalesce_usecs
=
372 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev
)
373 * ena_dev
->intr_delay_resolution
;
375 coalesce
->use_adaptive_rx_coalesce
=
376 ena_com_get_adaptive_moderation_enabled(ena_dev
);
381 static void ena_update_tx_rings_nonadaptive_intr_moderation(struct ena_adapter
*adapter
)
386 val
= ena_com_get_nonadaptive_moderation_interval_tx(adapter
->ena_dev
);
388 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
389 adapter
->tx_ring
[i
].smoothed_interval
= val
;
392 static void ena_update_rx_rings_nonadaptive_intr_moderation(struct ena_adapter
*adapter
)
397 val
= ena_com_get_nonadaptive_moderation_interval_rx(adapter
->ena_dev
);
399 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
400 adapter
->rx_ring
[i
].smoothed_interval
= val
;
403 static int ena_set_coalesce(struct net_device
*net_dev
,
404 struct ethtool_coalesce
*coalesce
)
406 struct ena_adapter
*adapter
= netdev_priv(net_dev
);
407 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
410 if (!ena_com_interrupt_moderation_supported(ena_dev
))
413 rc
= ena_com_update_nonadaptive_moderation_interval_tx(ena_dev
,
414 coalesce
->tx_coalesce_usecs
);
418 ena_update_tx_rings_nonadaptive_intr_moderation(adapter
);
420 rc
= ena_com_update_nonadaptive_moderation_interval_rx(ena_dev
,
421 coalesce
->rx_coalesce_usecs
);
425 ena_update_rx_rings_nonadaptive_intr_moderation(adapter
);
427 if (coalesce
->use_adaptive_rx_coalesce
&&
428 !ena_com_get_adaptive_moderation_enabled(ena_dev
))
429 ena_com_enable_adaptive_moderation(ena_dev
);
431 if (!coalesce
->use_adaptive_rx_coalesce
&&
432 ena_com_get_adaptive_moderation_enabled(ena_dev
))
433 ena_com_disable_adaptive_moderation(ena_dev
);
438 static u32
ena_get_msglevel(struct net_device
*netdev
)
440 struct ena_adapter
*adapter
= netdev_priv(netdev
);
442 return adapter
->msg_enable
;
445 static void ena_set_msglevel(struct net_device
*netdev
, u32 value
)
447 struct ena_adapter
*adapter
= netdev_priv(netdev
);
449 adapter
->msg_enable
= value
;
452 static void ena_get_drvinfo(struct net_device
*dev
,
453 struct ethtool_drvinfo
*info
)
455 struct ena_adapter
*adapter
= netdev_priv(dev
);
457 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
458 strlcpy(info
->bus_info
, pci_name(adapter
->pdev
),
459 sizeof(info
->bus_info
));
462 static void ena_get_ringparam(struct net_device
*netdev
,
463 struct ethtool_ringparam
*ring
)
465 struct ena_adapter
*adapter
= netdev_priv(netdev
);
467 ring
->tx_max_pending
= adapter
->max_tx_ring_size
;
468 ring
->rx_max_pending
= adapter
->max_rx_ring_size
;
469 ring
->tx_pending
= adapter
->tx_ring
[0].ring_size
;
470 ring
->rx_pending
= adapter
->rx_ring
[0].ring_size
;
473 static int ena_set_ringparam(struct net_device
*netdev
,
474 struct ethtool_ringparam
*ring
)
476 struct ena_adapter
*adapter
= netdev_priv(netdev
);
477 u32 new_tx_size
, new_rx_size
;
479 new_tx_size
= ring
->tx_pending
< ENA_MIN_RING_SIZE
?
480 ENA_MIN_RING_SIZE
: ring
->tx_pending
;
481 new_tx_size
= rounddown_pow_of_two(new_tx_size
);
483 new_rx_size
= ring
->rx_pending
< ENA_MIN_RING_SIZE
?
484 ENA_MIN_RING_SIZE
: ring
->rx_pending
;
485 new_rx_size
= rounddown_pow_of_two(new_rx_size
);
487 if (new_tx_size
== adapter
->requested_tx_ring_size
&&
488 new_rx_size
== adapter
->requested_rx_ring_size
)
491 return ena_update_queue_sizes(adapter
, new_tx_size
, new_rx_size
);
494 static u32
ena_flow_hash_to_flow_type(u16 hash_fields
)
498 if (hash_fields
& ENA_ADMIN_RSS_L2_DA
)
501 if (hash_fields
& ENA_ADMIN_RSS_L3_DA
)
504 if (hash_fields
& ENA_ADMIN_RSS_L3_SA
)
507 if (hash_fields
& ENA_ADMIN_RSS_L4_DP
)
508 data
|= RXH_L4_B_2_3
;
510 if (hash_fields
& ENA_ADMIN_RSS_L4_SP
)
511 data
|= RXH_L4_B_0_1
;
516 static u16
ena_flow_data_to_flow_hash(u32 hash_fields
)
520 if (hash_fields
& RXH_L2DA
)
521 data
|= ENA_ADMIN_RSS_L2_DA
;
523 if (hash_fields
& RXH_IP_DST
)
524 data
|= ENA_ADMIN_RSS_L3_DA
;
526 if (hash_fields
& RXH_IP_SRC
)
527 data
|= ENA_ADMIN_RSS_L3_SA
;
529 if (hash_fields
& RXH_L4_B_2_3
)
530 data
|= ENA_ADMIN_RSS_L4_DP
;
532 if (hash_fields
& RXH_L4_B_0_1
)
533 data
|= ENA_ADMIN_RSS_L4_SP
;
538 static int ena_get_rss_hash(struct ena_com_dev
*ena_dev
,
539 struct ethtool_rxnfc
*cmd
)
541 enum ena_admin_flow_hash_proto proto
;
547 switch (cmd
->flow_type
) {
549 proto
= ENA_ADMIN_RSS_TCP4
;
552 proto
= ENA_ADMIN_RSS_UDP4
;
555 proto
= ENA_ADMIN_RSS_TCP6
;
558 proto
= ENA_ADMIN_RSS_UDP6
;
561 proto
= ENA_ADMIN_RSS_IP4
;
564 proto
= ENA_ADMIN_RSS_IP6
;
567 proto
= ENA_ADMIN_RSS_NOT_IP
;
580 rc
= ena_com_get_hash_ctrl(ena_dev
, proto
, &hash_fields
);
584 cmd
->data
= ena_flow_hash_to_flow_type(hash_fields
);
589 static int ena_set_rss_hash(struct ena_com_dev
*ena_dev
,
590 struct ethtool_rxnfc
*cmd
)
592 enum ena_admin_flow_hash_proto proto
;
595 switch (cmd
->flow_type
) {
597 proto
= ENA_ADMIN_RSS_TCP4
;
600 proto
= ENA_ADMIN_RSS_UDP4
;
603 proto
= ENA_ADMIN_RSS_TCP6
;
606 proto
= ENA_ADMIN_RSS_UDP6
;
609 proto
= ENA_ADMIN_RSS_IP4
;
612 proto
= ENA_ADMIN_RSS_IP6
;
615 proto
= ENA_ADMIN_RSS_NOT_IP
;
628 hash_fields
= ena_flow_data_to_flow_hash(cmd
->data
);
630 return ena_com_fill_hash_ctrl(ena_dev
, proto
, hash_fields
);
633 static int ena_set_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*info
)
635 struct ena_adapter
*adapter
= netdev_priv(netdev
);
640 rc
= ena_set_rss_hash(adapter
->ena_dev
, info
);
642 case ETHTOOL_SRXCLSRLDEL
:
643 case ETHTOOL_SRXCLSRLINS
:
645 netif_err(adapter
, drv
, netdev
,
646 "Command parameter %d is not supported\n", info
->cmd
);
653 static int ena_get_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*info
,
656 struct ena_adapter
*adapter
= netdev_priv(netdev
);
660 case ETHTOOL_GRXRINGS
:
661 info
->data
= adapter
->num_io_queues
;
665 rc
= ena_get_rss_hash(adapter
->ena_dev
, info
);
667 case ETHTOOL_GRXCLSRLCNT
:
668 case ETHTOOL_GRXCLSRULE
:
669 case ETHTOOL_GRXCLSRLALL
:
671 netif_err(adapter
, drv
, netdev
,
672 "Command parameter %d is not supported\n", info
->cmd
);
679 static u32
ena_get_rxfh_indir_size(struct net_device
*netdev
)
681 return ENA_RX_RSS_TABLE_SIZE
;
684 static u32
ena_get_rxfh_key_size(struct net_device
*netdev
)
686 return ENA_HASH_KEY_SIZE
;
689 static int ena_indirection_table_set(struct ena_adapter
*adapter
,
692 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
695 for (i
= 0; i
< ENA_RX_RSS_TABLE_SIZE
; i
++) {
696 rc
= ena_com_indirect_table_fill_entry(ena_dev
,
698 ENA_IO_RXQ_IDX(indir
[i
]));
700 netif_err(adapter
, drv
, adapter
->netdev
,
701 "Cannot fill indirect table (index is too large)\n");
706 rc
= ena_com_indirect_table_set(ena_dev
);
708 netif_err(adapter
, drv
, adapter
->netdev
,
709 "Cannot set indirect table\n");
710 return rc
== -EPERM
? -EOPNOTSUPP
: rc
;
715 static int ena_indirection_table_get(struct ena_adapter
*adapter
, u32
*indir
)
717 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
723 rc
= ena_com_indirect_table_get(ena_dev
, indir
);
727 /* Our internal representation of the indices is: even indices
728 * for Tx and uneven indices for Rx. We need to convert the Rx
729 * indices to be consecutive
731 for (i
= 0; i
< ENA_RX_RSS_TABLE_SIZE
; i
++)
732 indir
[i
] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir
[i
]);
737 static int ena_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
,
740 struct ena_adapter
*adapter
= netdev_priv(netdev
);
741 enum ena_admin_hash_functions ena_func
;
745 rc
= ena_indirection_table_get(adapter
, indir
);
749 /* We call this function in order to check if the device
750 * supports getting/setting the hash function.
752 rc
= ena_com_get_hash_function(adapter
->ena_dev
, &ena_func
);
754 if (rc
== -EOPNOTSUPP
)
760 rc
= ena_com_get_hash_key(adapter
->ena_dev
, key
);
765 case ENA_ADMIN_TOEPLITZ
:
766 func
= ETH_RSS_HASH_TOP
;
768 case ENA_ADMIN_CRC32
:
769 func
= ETH_RSS_HASH_CRC32
;
772 netif_err(adapter
, drv
, netdev
,
773 "Command parameter is not supported\n");
783 static int ena_set_rxfh(struct net_device
*netdev
, const u32
*indir
,
784 const u8
*key
, const u8 hfunc
)
786 struct ena_adapter
*adapter
= netdev_priv(netdev
);
787 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
788 enum ena_admin_hash_functions func
= 0;
792 rc
= ena_indirection_table_set(adapter
, indir
);
798 case ETH_RSS_HASH_NO_CHANGE
:
799 func
= ena_com_get_current_hash_function(ena_dev
);
801 case ETH_RSS_HASH_TOP
:
802 func
= ENA_ADMIN_TOEPLITZ
;
804 case ETH_RSS_HASH_CRC32
:
805 func
= ENA_ADMIN_CRC32
;
808 netif_err(adapter
, drv
, netdev
, "Unsupported hfunc %d\n",
814 rc
= ena_com_fill_hash_function(ena_dev
, func
, key
,
818 netif_err(adapter
, drv
, netdev
, "Cannot fill key\n");
819 return rc
== -EPERM
? -EOPNOTSUPP
: rc
;
826 static void ena_get_channels(struct net_device
*netdev
,
827 struct ethtool_channels
*channels
)
829 struct ena_adapter
*adapter
= netdev_priv(netdev
);
831 channels
->max_combined
= adapter
->max_num_io_queues
;
832 channels
->combined_count
= adapter
->num_io_queues
;
835 static int ena_set_channels(struct net_device
*netdev
,
836 struct ethtool_channels
*channels
)
838 struct ena_adapter
*adapter
= netdev_priv(netdev
);
839 u32 count
= channels
->combined_count
;
840 /* The check for max value is already done in ethtool */
841 if (count
< ENA_MIN_NUM_IO_QUEUES
||
842 (ena_xdp_present(adapter
) &&
843 !ena_xdp_legal_queue_count(adapter
, count
)))
846 return ena_update_queue_count(adapter
, count
);
849 static int ena_get_tunable(struct net_device
*netdev
,
850 const struct ethtool_tunable
*tuna
, void *data
)
852 struct ena_adapter
*adapter
= netdev_priv(netdev
);
856 case ETHTOOL_RX_COPYBREAK
:
857 *(u32
*)data
= adapter
->rx_copybreak
;
867 static int ena_set_tunable(struct net_device
*netdev
,
868 const struct ethtool_tunable
*tuna
,
871 struct ena_adapter
*adapter
= netdev_priv(netdev
);
876 case ETHTOOL_RX_COPYBREAK
:
878 if (len
> adapter
->netdev
->mtu
) {
882 adapter
->rx_copybreak
= len
;
892 static const struct ethtool_ops ena_ethtool_ops
= {
893 .supported_coalesce_params
= ETHTOOL_COALESCE_USECS
|
894 ETHTOOL_COALESCE_USE_ADAPTIVE_RX
,
895 .get_link_ksettings
= ena_get_link_ksettings
,
896 .get_drvinfo
= ena_get_drvinfo
,
897 .get_msglevel
= ena_get_msglevel
,
898 .set_msglevel
= ena_set_msglevel
,
899 .get_link
= ethtool_op_get_link
,
900 .get_coalesce
= ena_get_coalesce
,
901 .set_coalesce
= ena_set_coalesce
,
902 .get_ringparam
= ena_get_ringparam
,
903 .set_ringparam
= ena_set_ringparam
,
904 .get_sset_count
= ena_get_sset_count
,
905 .get_strings
= ena_get_ethtool_strings
,
906 .get_ethtool_stats
= ena_get_ethtool_stats
,
907 .get_rxnfc
= ena_get_rxnfc
,
908 .set_rxnfc
= ena_set_rxnfc
,
909 .get_rxfh_indir_size
= ena_get_rxfh_indir_size
,
910 .get_rxfh_key_size
= ena_get_rxfh_key_size
,
911 .get_rxfh
= ena_get_rxfh
,
912 .set_rxfh
= ena_set_rxfh
,
913 .get_channels
= ena_get_channels
,
914 .set_channels
= ena_set_channels
,
915 .get_tunable
= ena_get_tunable
,
916 .set_tunable
= ena_set_tunable
,
917 .get_ts_info
= ethtool_op_get_ts_info
,
920 void ena_set_ethtool_ops(struct net_device
*netdev
)
922 netdev
->ethtool_ops
= &ena_ethtool_ops
;
925 static void ena_dump_stats_ex(struct ena_adapter
*adapter
, u8
*buf
)
927 struct net_device
*netdev
= adapter
->netdev
;
933 strings_num
= ena_get_sw_stats_count(adapter
);
934 if (strings_num
<= 0) {
935 netif_err(adapter
, drv
, netdev
, "Can't get stats num\n");
939 strings_buf
= devm_kcalloc(&adapter
->pdev
->dev
,
940 ETH_GSTRING_LEN
, strings_num
,
943 netif_err(adapter
, drv
, netdev
,
944 "Failed to allocate strings_buf\n");
948 data_buf
= devm_kcalloc(&adapter
->pdev
->dev
,
949 strings_num
, sizeof(u64
),
952 netif_err(adapter
, drv
, netdev
,
953 "Failed to allocate data buf\n");
954 devm_kfree(&adapter
->pdev
->dev
, strings_buf
);
958 ena_get_strings(adapter
, strings_buf
, false);
959 ena_get_stats(adapter
, data_buf
, false);
961 /* If there is a buffer, dump stats, otherwise print them to dmesg */
963 for (i
= 0; i
< strings_num
; i
++) {
964 rc
= snprintf(buf
, ETH_GSTRING_LEN
+ sizeof(u64
),
966 strings_buf
+ i
* ETH_GSTRING_LEN
,
971 for (i
= 0; i
< strings_num
; i
++)
972 netif_err(adapter
, drv
, netdev
, "%s: %llu\n",
973 strings_buf
+ i
* ETH_GSTRING_LEN
,
976 devm_kfree(&adapter
->pdev
->dev
, strings_buf
);
977 devm_kfree(&adapter
->pdev
->dev
, data_buf
);
980 void ena_dump_stats_to_buf(struct ena_adapter
*adapter
, u8
*buf
)
985 ena_dump_stats_ex(adapter
, buf
);
988 void ena_dump_stats_to_dmesg(struct ena_adapter
*adapter
)
990 ena_dump_stats_ex(adapter
, NULL
);