2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/pci.h>
35 #include "ena_netdev.h"
38 char name
[ETH_GSTRING_LEN
];
42 #define ENA_STAT_ENA_COM_ENTRY(stat) { \
44 .stat_offset = offsetof(struct ena_com_stats_admin, stat) \
47 #define ENA_STAT_ENTRY(stat, stat_type) { \
49 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
52 #define ENA_STAT_RX_ENTRY(stat) \
53 ENA_STAT_ENTRY(stat, rx)
55 #define ENA_STAT_TX_ENTRY(stat) \
56 ENA_STAT_ENTRY(stat, tx)
58 #define ENA_STAT_GLOBAL_ENTRY(stat) \
59 ENA_STAT_ENTRY(stat, dev)
61 static const struct ena_stats ena_stats_global_strings
[] = {
62 ENA_STAT_GLOBAL_ENTRY(tx_timeout
),
63 ENA_STAT_GLOBAL_ENTRY(suspend
),
64 ENA_STAT_GLOBAL_ENTRY(resume
),
65 ENA_STAT_GLOBAL_ENTRY(wd_expired
),
66 ENA_STAT_GLOBAL_ENTRY(interface_up
),
67 ENA_STAT_GLOBAL_ENTRY(interface_down
),
68 ENA_STAT_GLOBAL_ENTRY(admin_q_pause
),
71 static const struct ena_stats ena_stats_tx_strings
[] = {
72 ENA_STAT_TX_ENTRY(cnt
),
73 ENA_STAT_TX_ENTRY(bytes
),
74 ENA_STAT_TX_ENTRY(queue_stop
),
75 ENA_STAT_TX_ENTRY(queue_wakeup
),
76 ENA_STAT_TX_ENTRY(dma_mapping_err
),
77 ENA_STAT_TX_ENTRY(linearize
),
78 ENA_STAT_TX_ENTRY(linearize_failed
),
79 ENA_STAT_TX_ENTRY(napi_comp
),
80 ENA_STAT_TX_ENTRY(tx_poll
),
81 ENA_STAT_TX_ENTRY(doorbells
),
82 ENA_STAT_TX_ENTRY(prepare_ctx_err
),
83 ENA_STAT_TX_ENTRY(bad_req_id
),
84 ENA_STAT_TX_ENTRY(missed_tx
),
87 static const struct ena_stats ena_stats_rx_strings
[] = {
88 ENA_STAT_RX_ENTRY(cnt
),
89 ENA_STAT_RX_ENTRY(bytes
),
90 ENA_STAT_RX_ENTRY(refil_partial
),
91 ENA_STAT_RX_ENTRY(bad_csum
),
92 ENA_STAT_RX_ENTRY(page_alloc_fail
),
93 ENA_STAT_RX_ENTRY(skb_alloc_fail
),
94 ENA_STAT_RX_ENTRY(dma_mapping_err
),
95 ENA_STAT_RX_ENTRY(bad_desc_num
),
96 ENA_STAT_RX_ENTRY(rx_copybreak_pkt
),
97 ENA_STAT_RX_ENTRY(bad_req_id
),
98 ENA_STAT_RX_ENTRY(empty_rx_ring
),
101 static const struct ena_stats ena_stats_ena_com_strings
[] = {
102 ENA_STAT_ENA_COM_ENTRY(aborted_cmd
),
103 ENA_STAT_ENA_COM_ENTRY(submitted_cmd
),
104 ENA_STAT_ENA_COM_ENTRY(completed_cmd
),
105 ENA_STAT_ENA_COM_ENTRY(out_of_space
),
106 ENA_STAT_ENA_COM_ENTRY(no_completion
),
109 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
110 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
111 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
112 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
114 static void ena_safe_update_stat(u64
*src
, u64
*dst
,
115 struct u64_stats_sync
*syncp
)
120 start
= u64_stats_fetch_begin_irq(syncp
);
122 } while (u64_stats_fetch_retry_irq(syncp
, start
));
125 static void ena_queue_stats(struct ena_adapter
*adapter
, u64
**data
)
127 const struct ena_stats
*ena_stats
;
128 struct ena_ring
*ring
;
133 for (i
= 0; i
< adapter
->num_queues
; i
++) {
135 ring
= &adapter
->tx_ring
[i
];
137 for (j
= 0; j
< ENA_STATS_ARRAY_TX
; j
++) {
138 ena_stats
= &ena_stats_tx_strings
[j
];
140 ptr
= (u64
*)((uintptr_t)&ring
->tx_stats
+
141 (uintptr_t)ena_stats
->stat_offset
);
143 ena_safe_update_stat(ptr
, (*data
)++, &ring
->syncp
);
147 ring
= &adapter
->rx_ring
[i
];
149 for (j
= 0; j
< ENA_STATS_ARRAY_RX
; j
++) {
150 ena_stats
= &ena_stats_rx_strings
[j
];
152 ptr
= (u64
*)((uintptr_t)&ring
->rx_stats
+
153 (uintptr_t)ena_stats
->stat_offset
);
155 ena_safe_update_stat(ptr
, (*data
)++, &ring
->syncp
);
160 static void ena_dev_admin_queue_stats(struct ena_adapter
*adapter
, u64
**data
)
162 const struct ena_stats
*ena_stats
;
166 for (i
= 0; i
< ENA_STATS_ARRAY_ENA_COM
; i
++) {
167 ena_stats
= &ena_stats_ena_com_strings
[i
];
169 ptr
= (u32
*)((uintptr_t)&adapter
->ena_dev
->admin_queue
.stats
+
170 (uintptr_t)ena_stats
->stat_offset
);
176 static void ena_get_ethtool_stats(struct net_device
*netdev
,
177 struct ethtool_stats
*stats
,
180 struct ena_adapter
*adapter
= netdev_priv(netdev
);
181 const struct ena_stats
*ena_stats
;
185 for (i
= 0; i
< ENA_STATS_ARRAY_GLOBAL
; i
++) {
186 ena_stats
= &ena_stats_global_strings
[i
];
188 ptr
= (u64
*)((uintptr_t)&adapter
->dev_stats
+
189 (uintptr_t)ena_stats
->stat_offset
);
191 ena_safe_update_stat(ptr
, data
++, &adapter
->syncp
);
194 ena_queue_stats(adapter
, &data
);
195 ena_dev_admin_queue_stats(adapter
, &data
);
198 int ena_get_sset_count(struct net_device
*netdev
, int sset
)
200 struct ena_adapter
*adapter
= netdev_priv(netdev
);
202 if (sset
!= ETH_SS_STATS
)
205 return adapter
->num_queues
* (ENA_STATS_ARRAY_TX
+ ENA_STATS_ARRAY_RX
)
206 + ENA_STATS_ARRAY_GLOBAL
+ ENA_STATS_ARRAY_ENA_COM
;
209 static void ena_queue_strings(struct ena_adapter
*adapter
, u8
**data
)
211 const struct ena_stats
*ena_stats
;
214 for (i
= 0; i
< adapter
->num_queues
; i
++) {
216 for (j
= 0; j
< ENA_STATS_ARRAY_TX
; j
++) {
217 ena_stats
= &ena_stats_tx_strings
[j
];
219 snprintf(*data
, ETH_GSTRING_LEN
,
220 "queue_%u_tx_%s", i
, ena_stats
->name
);
221 (*data
) += ETH_GSTRING_LEN
;
224 for (j
= 0; j
< ENA_STATS_ARRAY_RX
; j
++) {
225 ena_stats
= &ena_stats_rx_strings
[j
];
227 snprintf(*data
, ETH_GSTRING_LEN
,
228 "queue_%u_rx_%s", i
, ena_stats
->name
);
229 (*data
) += ETH_GSTRING_LEN
;
234 static void ena_com_dev_strings(u8
**data
)
236 const struct ena_stats
*ena_stats
;
239 for (i
= 0; i
< ENA_STATS_ARRAY_ENA_COM
; i
++) {
240 ena_stats
= &ena_stats_ena_com_strings
[i
];
242 snprintf(*data
, ETH_GSTRING_LEN
,
243 "ena_admin_q_%s", ena_stats
->name
);
244 (*data
) += ETH_GSTRING_LEN
;
248 static void ena_get_strings(struct net_device
*netdev
, u32 sset
, u8
*data
)
250 struct ena_adapter
*adapter
= netdev_priv(netdev
);
251 const struct ena_stats
*ena_stats
;
254 if (sset
!= ETH_SS_STATS
)
257 for (i
= 0; i
< ENA_STATS_ARRAY_GLOBAL
; i
++) {
258 ena_stats
= &ena_stats_global_strings
[i
];
260 memcpy(data
, ena_stats
->name
, ETH_GSTRING_LEN
);
261 data
+= ETH_GSTRING_LEN
;
264 ena_queue_strings(adapter
, &data
);
265 ena_com_dev_strings(&data
);
268 static int ena_get_link_ksettings(struct net_device
*netdev
,
269 struct ethtool_link_ksettings
*link_ksettings
)
271 struct ena_adapter
*adapter
= netdev_priv(netdev
);
272 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
273 struct ena_admin_get_feature_link_desc
*link
;
274 struct ena_admin_get_feat_resp feat_resp
;
277 rc
= ena_com_get_link_params(ena_dev
, &feat_resp
);
281 link
= &feat_resp
.u
.link
;
282 link_ksettings
->base
.speed
= link
->speed
;
284 if (link
->flags
& ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK
) {
285 ethtool_link_ksettings_add_link_mode(link_ksettings
,
287 ethtool_link_ksettings_add_link_mode(link_ksettings
,
291 link_ksettings
->base
.autoneg
=
292 (link
->flags
& ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK
) ?
293 AUTONEG_ENABLE
: AUTONEG_DISABLE
;
295 link_ksettings
->base
.duplex
= DUPLEX_FULL
;
300 static int ena_get_coalesce(struct net_device
*net_dev
,
301 struct ethtool_coalesce
*coalesce
)
303 struct ena_adapter
*adapter
= netdev_priv(net_dev
);
304 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
305 struct ena_intr_moder_entry intr_moder_entry
;
307 if (!ena_com_interrupt_moderation_supported(ena_dev
)) {
308 /* the devie doesn't support interrupt moderation */
311 coalesce
->tx_coalesce_usecs
=
312 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev
) /
313 ena_dev
->intr_delay_resolution
;
314 if (!ena_com_get_adaptive_moderation_enabled(ena_dev
)) {
315 coalesce
->rx_coalesce_usecs
=
316 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev
)
317 / ena_dev
->intr_delay_resolution
;
319 ena_com_get_intr_moderation_entry(adapter
->ena_dev
, ENA_INTR_MODER_LOWEST
, &intr_moder_entry
);
320 coalesce
->rx_coalesce_usecs_low
= intr_moder_entry
.intr_moder_interval
;
321 coalesce
->rx_max_coalesced_frames_low
= intr_moder_entry
.pkts_per_interval
;
323 ena_com_get_intr_moderation_entry(adapter
->ena_dev
, ENA_INTR_MODER_MID
, &intr_moder_entry
);
324 coalesce
->rx_coalesce_usecs
= intr_moder_entry
.intr_moder_interval
;
325 coalesce
->rx_max_coalesced_frames
= intr_moder_entry
.pkts_per_interval
;
327 ena_com_get_intr_moderation_entry(adapter
->ena_dev
, ENA_INTR_MODER_HIGHEST
, &intr_moder_entry
);
328 coalesce
->rx_coalesce_usecs_high
= intr_moder_entry
.intr_moder_interval
;
329 coalesce
->rx_max_coalesced_frames_high
= intr_moder_entry
.pkts_per_interval
;
331 coalesce
->use_adaptive_rx_coalesce
=
332 ena_com_get_adaptive_moderation_enabled(ena_dev
);
337 static void ena_update_tx_rings_intr_moderation(struct ena_adapter
*adapter
)
342 val
= ena_com_get_nonadaptive_moderation_interval_tx(adapter
->ena_dev
);
344 for (i
= 0; i
< adapter
->num_queues
; i
++)
345 adapter
->tx_ring
[i
].smoothed_interval
= val
;
348 static int ena_set_coalesce(struct net_device
*net_dev
,
349 struct ethtool_coalesce
*coalesce
)
351 struct ena_adapter
*adapter
= netdev_priv(net_dev
);
352 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
353 struct ena_intr_moder_entry intr_moder_entry
;
356 if (!ena_com_interrupt_moderation_supported(ena_dev
)) {
357 /* the devie doesn't support interrupt moderation */
361 if (coalesce
->rx_coalesce_usecs_irq
||
362 coalesce
->rx_max_coalesced_frames_irq
||
363 coalesce
->tx_coalesce_usecs_irq
||
364 coalesce
->tx_max_coalesced_frames
||
365 coalesce
->tx_max_coalesced_frames_irq
||
366 coalesce
->stats_block_coalesce_usecs
||
367 coalesce
->use_adaptive_tx_coalesce
||
368 coalesce
->pkt_rate_low
||
369 coalesce
->tx_coalesce_usecs_low
||
370 coalesce
->tx_max_coalesced_frames_low
||
371 coalesce
->pkt_rate_high
||
372 coalesce
->tx_coalesce_usecs_high
||
373 coalesce
->tx_max_coalesced_frames_high
||
374 coalesce
->rate_sample_interval
)
377 rc
= ena_com_update_nonadaptive_moderation_interval_tx(ena_dev
,
378 coalesce
->tx_coalesce_usecs
);
382 ena_update_tx_rings_intr_moderation(adapter
);
384 if (ena_com_get_adaptive_moderation_enabled(ena_dev
)) {
385 if (!coalesce
->use_adaptive_rx_coalesce
) {
386 ena_com_disable_adaptive_moderation(ena_dev
);
387 rc
= ena_com_update_nonadaptive_moderation_interval_rx(ena_dev
,
388 coalesce
->rx_coalesce_usecs
);
391 } else { /* was in non-adaptive mode */
392 if (coalesce
->use_adaptive_rx_coalesce
) {
393 ena_com_enable_adaptive_moderation(ena_dev
);
395 rc
= ena_com_update_nonadaptive_moderation_interval_rx(ena_dev
,
396 coalesce
->rx_coalesce_usecs
);
401 intr_moder_entry
.intr_moder_interval
= coalesce
->rx_coalesce_usecs_low
;
402 intr_moder_entry
.pkts_per_interval
= coalesce
->rx_max_coalesced_frames_low
;
403 intr_moder_entry
.bytes_per_interval
= ENA_INTR_BYTE_COUNT_NOT_SUPPORTED
;
404 ena_com_init_intr_moderation_entry(adapter
->ena_dev
, ENA_INTR_MODER_LOWEST
, &intr_moder_entry
);
406 intr_moder_entry
.intr_moder_interval
= coalesce
->rx_coalesce_usecs
;
407 intr_moder_entry
.pkts_per_interval
= coalesce
->rx_max_coalesced_frames
;
408 intr_moder_entry
.bytes_per_interval
= ENA_INTR_BYTE_COUNT_NOT_SUPPORTED
;
409 ena_com_init_intr_moderation_entry(adapter
->ena_dev
, ENA_INTR_MODER_MID
, &intr_moder_entry
);
411 intr_moder_entry
.intr_moder_interval
= coalesce
->rx_coalesce_usecs_high
;
412 intr_moder_entry
.pkts_per_interval
= coalesce
->rx_max_coalesced_frames_high
;
413 intr_moder_entry
.bytes_per_interval
= ENA_INTR_BYTE_COUNT_NOT_SUPPORTED
;
414 ena_com_init_intr_moderation_entry(adapter
->ena_dev
, ENA_INTR_MODER_HIGHEST
, &intr_moder_entry
);
419 static u32
ena_get_msglevel(struct net_device
*netdev
)
421 struct ena_adapter
*adapter
= netdev_priv(netdev
);
423 return adapter
->msg_enable
;
426 static void ena_set_msglevel(struct net_device
*netdev
, u32 value
)
428 struct ena_adapter
*adapter
= netdev_priv(netdev
);
430 adapter
->msg_enable
= value
;
433 static void ena_get_drvinfo(struct net_device
*dev
,
434 struct ethtool_drvinfo
*info
)
436 struct ena_adapter
*adapter
= netdev_priv(dev
);
438 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
439 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
440 strlcpy(info
->bus_info
, pci_name(adapter
->pdev
),
441 sizeof(info
->bus_info
));
444 static void ena_get_ringparam(struct net_device
*netdev
,
445 struct ethtool_ringparam
*ring
)
447 struct ena_adapter
*adapter
= netdev_priv(netdev
);
448 struct ena_ring
*tx_ring
= &adapter
->tx_ring
[0];
449 struct ena_ring
*rx_ring
= &adapter
->rx_ring
[0];
451 ring
->rx_max_pending
= rx_ring
->ring_size
;
452 ring
->tx_max_pending
= tx_ring
->ring_size
;
453 ring
->rx_pending
= rx_ring
->ring_size
;
454 ring
->tx_pending
= tx_ring
->ring_size
;
457 static u32
ena_flow_hash_to_flow_type(u16 hash_fields
)
461 if (hash_fields
& ENA_ADMIN_RSS_L2_DA
)
464 if (hash_fields
& ENA_ADMIN_RSS_L3_DA
)
467 if (hash_fields
& ENA_ADMIN_RSS_L3_SA
)
470 if (hash_fields
& ENA_ADMIN_RSS_L4_DP
)
471 data
|= RXH_L4_B_2_3
;
473 if (hash_fields
& ENA_ADMIN_RSS_L4_SP
)
474 data
|= RXH_L4_B_0_1
;
479 static u16
ena_flow_data_to_flow_hash(u32 hash_fields
)
483 if (hash_fields
& RXH_L2DA
)
484 data
|= ENA_ADMIN_RSS_L2_DA
;
486 if (hash_fields
& RXH_IP_DST
)
487 data
|= ENA_ADMIN_RSS_L3_DA
;
489 if (hash_fields
& RXH_IP_SRC
)
490 data
|= ENA_ADMIN_RSS_L3_SA
;
492 if (hash_fields
& RXH_L4_B_2_3
)
493 data
|= ENA_ADMIN_RSS_L4_DP
;
495 if (hash_fields
& RXH_L4_B_0_1
)
496 data
|= ENA_ADMIN_RSS_L4_SP
;
501 static int ena_get_rss_hash(struct ena_com_dev
*ena_dev
,
502 struct ethtool_rxnfc
*cmd
)
504 enum ena_admin_flow_hash_proto proto
;
510 switch (cmd
->flow_type
) {
512 proto
= ENA_ADMIN_RSS_TCP4
;
515 proto
= ENA_ADMIN_RSS_UDP4
;
518 proto
= ENA_ADMIN_RSS_TCP6
;
521 proto
= ENA_ADMIN_RSS_UDP6
;
524 proto
= ENA_ADMIN_RSS_IP4
;
527 proto
= ENA_ADMIN_RSS_IP6
;
530 proto
= ENA_ADMIN_RSS_NOT_IP
;
543 rc
= ena_com_get_hash_ctrl(ena_dev
, proto
, &hash_fields
);
547 cmd
->data
= ena_flow_hash_to_flow_type(hash_fields
);
552 static int ena_set_rss_hash(struct ena_com_dev
*ena_dev
,
553 struct ethtool_rxnfc
*cmd
)
555 enum ena_admin_flow_hash_proto proto
;
558 switch (cmd
->flow_type
) {
560 proto
= ENA_ADMIN_RSS_TCP4
;
563 proto
= ENA_ADMIN_RSS_UDP4
;
566 proto
= ENA_ADMIN_RSS_TCP6
;
569 proto
= ENA_ADMIN_RSS_UDP6
;
572 proto
= ENA_ADMIN_RSS_IP4
;
575 proto
= ENA_ADMIN_RSS_IP6
;
578 proto
= ENA_ADMIN_RSS_NOT_IP
;
591 hash_fields
= ena_flow_data_to_flow_hash(cmd
->data
);
593 return ena_com_fill_hash_ctrl(ena_dev
, proto
, hash_fields
);
596 static int ena_set_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*info
)
598 struct ena_adapter
*adapter
= netdev_priv(netdev
);
603 rc
= ena_set_rss_hash(adapter
->ena_dev
, info
);
605 case ETHTOOL_SRXCLSRLDEL
:
606 case ETHTOOL_SRXCLSRLINS
:
608 netif_err(adapter
, drv
, netdev
,
609 "Command parameter %d is not supported\n", info
->cmd
);
616 static int ena_get_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*info
,
619 struct ena_adapter
*adapter
= netdev_priv(netdev
);
623 case ETHTOOL_GRXRINGS
:
624 info
->data
= adapter
->num_queues
;
628 rc
= ena_get_rss_hash(adapter
->ena_dev
, info
);
630 case ETHTOOL_GRXCLSRLCNT
:
631 case ETHTOOL_GRXCLSRULE
:
632 case ETHTOOL_GRXCLSRLALL
:
634 netif_err(adapter
, drv
, netdev
,
635 "Command parameter %d is not supported\n", info
->cmd
);
642 static u32
ena_get_rxfh_indir_size(struct net_device
*netdev
)
644 return ENA_RX_RSS_TABLE_SIZE
;
647 static u32
ena_get_rxfh_key_size(struct net_device
*netdev
)
649 return ENA_HASH_KEY_SIZE
;
652 static int ena_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
,
655 struct ena_adapter
*adapter
= netdev_priv(netdev
);
656 enum ena_admin_hash_functions ena_func
;
660 rc
= ena_com_indirect_table_get(adapter
->ena_dev
, indir
);
664 rc
= ena_com_get_hash_function(adapter
->ena_dev
, &ena_func
, key
);
669 case ENA_ADMIN_TOEPLITZ
:
670 func
= ETH_RSS_HASH_TOP
;
672 case ENA_ADMIN_CRC32
:
673 func
= ETH_RSS_HASH_XOR
;
676 netif_err(adapter
, drv
, netdev
,
677 "Command parameter is not supported\n");
687 static int ena_set_rxfh(struct net_device
*netdev
, const u32
*indir
,
688 const u8
*key
, const u8 hfunc
)
690 struct ena_adapter
*adapter
= netdev_priv(netdev
);
691 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
692 enum ena_admin_hash_functions func
;
696 for (i
= 0; i
< ENA_RX_RSS_TABLE_SIZE
; i
++) {
697 rc
= ena_com_indirect_table_fill_entry(ena_dev
,
698 ENA_IO_RXQ_IDX(indir
[i
]),
701 netif_err(adapter
, drv
, netdev
,
702 "Cannot fill indirect table (index is too large)\n");
707 rc
= ena_com_indirect_table_set(ena_dev
);
709 netif_err(adapter
, drv
, netdev
,
710 "Cannot set indirect table\n");
711 return rc
== -EPERM
? -EOPNOTSUPP
: rc
;
716 case ETH_RSS_HASH_TOP
:
717 func
= ENA_ADMIN_TOEPLITZ
;
719 case ETH_RSS_HASH_XOR
:
720 func
= ENA_ADMIN_CRC32
;
723 netif_err(adapter
, drv
, netdev
, "Unsupported hfunc %d\n",
729 rc
= ena_com_fill_hash_function(ena_dev
, func
, key
,
733 netif_err(adapter
, drv
, netdev
, "Cannot fill key\n");
734 return rc
== -EPERM
? -EOPNOTSUPP
: rc
;
741 static void ena_get_channels(struct net_device
*netdev
,
742 struct ethtool_channels
*channels
)
744 struct ena_adapter
*adapter
= netdev_priv(netdev
);
746 channels
->max_rx
= adapter
->num_queues
;
747 channels
->max_tx
= adapter
->num_queues
;
748 channels
->max_other
= 0;
749 channels
->max_combined
= 0;
750 channels
->rx_count
= adapter
->num_queues
;
751 channels
->tx_count
= adapter
->num_queues
;
752 channels
->other_count
= 0;
753 channels
->combined_count
= 0;
756 static int ena_get_tunable(struct net_device
*netdev
,
757 const struct ethtool_tunable
*tuna
, void *data
)
759 struct ena_adapter
*adapter
= netdev_priv(netdev
);
763 case ETHTOOL_RX_COPYBREAK
:
764 *(u32
*)data
= adapter
->rx_copybreak
;
774 static int ena_set_tunable(struct net_device
*netdev
,
775 const struct ethtool_tunable
*tuna
,
778 struct ena_adapter
*adapter
= netdev_priv(netdev
);
783 case ETHTOOL_RX_COPYBREAK
:
785 if (len
> adapter
->netdev
->mtu
) {
789 adapter
->rx_copybreak
= len
;
799 static const struct ethtool_ops ena_ethtool_ops
= {
800 .get_link_ksettings
= ena_get_link_ksettings
,
801 .get_drvinfo
= ena_get_drvinfo
,
802 .get_msglevel
= ena_get_msglevel
,
803 .set_msglevel
= ena_set_msglevel
,
804 .get_link
= ethtool_op_get_link
,
805 .get_coalesce
= ena_get_coalesce
,
806 .set_coalesce
= ena_set_coalesce
,
807 .get_ringparam
= ena_get_ringparam
,
808 .get_sset_count
= ena_get_sset_count
,
809 .get_strings
= ena_get_strings
,
810 .get_ethtool_stats
= ena_get_ethtool_stats
,
811 .get_rxnfc
= ena_get_rxnfc
,
812 .set_rxnfc
= ena_set_rxnfc
,
813 .get_rxfh_indir_size
= ena_get_rxfh_indir_size
,
814 .get_rxfh_key_size
= ena_get_rxfh_key_size
,
815 .get_rxfh
= ena_get_rxfh
,
816 .set_rxfh
= ena_set_rxfh
,
817 .get_channels
= ena_get_channels
,
818 .get_tunable
= ena_get_tunable
,
819 .set_tunable
= ena_set_tunable
,
822 void ena_set_ethtool_ops(struct net_device
*netdev
)
824 netdev
->ethtool_ops
= &ena_ethtool_ops
;
827 static void ena_dump_stats_ex(struct ena_adapter
*adapter
, u8
*buf
)
829 struct net_device
*netdev
= adapter
->netdev
;
835 strings_num
= ena_get_sset_count(netdev
, ETH_SS_STATS
);
836 if (strings_num
<= 0) {
837 netif_err(adapter
, drv
, netdev
, "Can't get stats num\n");
841 strings_buf
= devm_kzalloc(&adapter
->pdev
->dev
,
842 strings_num
* ETH_GSTRING_LEN
,
845 netif_err(adapter
, drv
, netdev
,
846 "failed to alloc strings_buf\n");
850 data_buf
= devm_kzalloc(&adapter
->pdev
->dev
,
851 strings_num
* sizeof(u64
),
854 netif_err(adapter
, drv
, netdev
,
855 "failed to allocate data buf\n");
856 devm_kfree(&adapter
->pdev
->dev
, strings_buf
);
860 ena_get_strings(netdev
, ETH_SS_STATS
, strings_buf
);
861 ena_get_ethtool_stats(netdev
, NULL
, data_buf
);
863 /* If there is a buffer, dump stats, otherwise print them to dmesg */
865 for (i
= 0; i
< strings_num
; i
++) {
866 rc
= snprintf(buf
, ETH_GSTRING_LEN
+ sizeof(u64
),
868 strings_buf
+ i
* ETH_GSTRING_LEN
,
873 for (i
= 0; i
< strings_num
; i
++)
874 netif_err(adapter
, drv
, netdev
, "%s: %llu\n",
875 strings_buf
+ i
* ETH_GSTRING_LEN
,
878 devm_kfree(&adapter
->pdev
->dev
, strings_buf
);
879 devm_kfree(&adapter
->pdev
->dev
, data_buf
);
882 void ena_dump_stats_to_buf(struct ena_adapter
*adapter
, u8
*buf
)
887 ena_dump_stats_ex(adapter
, buf
);
890 void ena_dump_stats_to_dmesg(struct ena_adapter
*adapter
)
892 ena_dump_stats_ex(adapter
, NULL
);