2 * Copyright 2015 Amazon.com, Inc. or its affiliates.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/pci.h>
35 #include "ena_netdev.h"
38 char name
[ETH_GSTRING_LEN
];
42 #define ENA_STAT_ENA_COM_ENTRY(stat) { \
44 .stat_offset = offsetof(struct ena_com_stats_admin, stat) \
47 #define ENA_STAT_ENTRY(stat, stat_type) { \
49 .stat_offset = offsetof(struct ena_stats_##stat_type, stat) \
52 #define ENA_STAT_RX_ENTRY(stat) \
53 ENA_STAT_ENTRY(stat, rx)
55 #define ENA_STAT_TX_ENTRY(stat) \
56 ENA_STAT_ENTRY(stat, tx)
58 #define ENA_STAT_GLOBAL_ENTRY(stat) \
59 ENA_STAT_ENTRY(stat, dev)
61 static const struct ena_stats ena_stats_global_strings
[] = {
62 ENA_STAT_GLOBAL_ENTRY(tx_timeout
),
63 ENA_STAT_GLOBAL_ENTRY(suspend
),
64 ENA_STAT_GLOBAL_ENTRY(resume
),
65 ENA_STAT_GLOBAL_ENTRY(wd_expired
),
66 ENA_STAT_GLOBAL_ENTRY(interface_up
),
67 ENA_STAT_GLOBAL_ENTRY(interface_down
),
68 ENA_STAT_GLOBAL_ENTRY(admin_q_pause
),
71 static const struct ena_stats ena_stats_tx_strings
[] = {
72 ENA_STAT_TX_ENTRY(cnt
),
73 ENA_STAT_TX_ENTRY(bytes
),
74 ENA_STAT_TX_ENTRY(queue_stop
),
75 ENA_STAT_TX_ENTRY(queue_wakeup
),
76 ENA_STAT_TX_ENTRY(dma_mapping_err
),
77 ENA_STAT_TX_ENTRY(linearize
),
78 ENA_STAT_TX_ENTRY(linearize_failed
),
79 ENA_STAT_TX_ENTRY(napi_comp
),
80 ENA_STAT_TX_ENTRY(tx_poll
),
81 ENA_STAT_TX_ENTRY(doorbells
),
82 ENA_STAT_TX_ENTRY(prepare_ctx_err
),
83 ENA_STAT_TX_ENTRY(bad_req_id
),
84 ENA_STAT_TX_ENTRY(llq_buffer_copy
),
85 ENA_STAT_TX_ENTRY(missed_tx
),
88 static const struct ena_stats ena_stats_rx_strings
[] = {
89 ENA_STAT_RX_ENTRY(cnt
),
90 ENA_STAT_RX_ENTRY(bytes
),
91 ENA_STAT_RX_ENTRY(rx_copybreak_pkt
),
92 ENA_STAT_RX_ENTRY(csum_good
),
93 ENA_STAT_RX_ENTRY(refil_partial
),
94 ENA_STAT_RX_ENTRY(bad_csum
),
95 ENA_STAT_RX_ENTRY(page_alloc_fail
),
96 ENA_STAT_RX_ENTRY(skb_alloc_fail
),
97 ENA_STAT_RX_ENTRY(dma_mapping_err
),
98 ENA_STAT_RX_ENTRY(bad_desc_num
),
99 ENA_STAT_RX_ENTRY(bad_req_id
),
100 ENA_STAT_RX_ENTRY(empty_rx_ring
),
101 ENA_STAT_RX_ENTRY(csum_unchecked
),
104 static const struct ena_stats ena_stats_ena_com_strings
[] = {
105 ENA_STAT_ENA_COM_ENTRY(aborted_cmd
),
106 ENA_STAT_ENA_COM_ENTRY(submitted_cmd
),
107 ENA_STAT_ENA_COM_ENTRY(completed_cmd
),
108 ENA_STAT_ENA_COM_ENTRY(out_of_space
),
109 ENA_STAT_ENA_COM_ENTRY(no_completion
),
112 #define ENA_STATS_ARRAY_GLOBAL ARRAY_SIZE(ena_stats_global_strings)
113 #define ENA_STATS_ARRAY_TX ARRAY_SIZE(ena_stats_tx_strings)
114 #define ENA_STATS_ARRAY_RX ARRAY_SIZE(ena_stats_rx_strings)
115 #define ENA_STATS_ARRAY_ENA_COM ARRAY_SIZE(ena_stats_ena_com_strings)
117 static void ena_safe_update_stat(u64
*src
, u64
*dst
,
118 struct u64_stats_sync
*syncp
)
123 start
= u64_stats_fetch_begin_irq(syncp
);
125 } while (u64_stats_fetch_retry_irq(syncp
, start
));
128 static void ena_queue_stats(struct ena_adapter
*adapter
, u64
**data
)
130 const struct ena_stats
*ena_stats
;
131 struct ena_ring
*ring
;
136 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
138 ring
= &adapter
->tx_ring
[i
];
140 for (j
= 0; j
< ENA_STATS_ARRAY_TX
; j
++) {
141 ena_stats
= &ena_stats_tx_strings
[j
];
143 ptr
= (u64
*)((uintptr_t)&ring
->tx_stats
+
144 (uintptr_t)ena_stats
->stat_offset
);
146 ena_safe_update_stat(ptr
, (*data
)++, &ring
->syncp
);
150 ring
= &adapter
->rx_ring
[i
];
152 for (j
= 0; j
< ENA_STATS_ARRAY_RX
; j
++) {
153 ena_stats
= &ena_stats_rx_strings
[j
];
155 ptr
= (u64
*)((uintptr_t)&ring
->rx_stats
+
156 (uintptr_t)ena_stats
->stat_offset
);
158 ena_safe_update_stat(ptr
, (*data
)++, &ring
->syncp
);
163 static void ena_dev_admin_queue_stats(struct ena_adapter
*adapter
, u64
**data
)
165 const struct ena_stats
*ena_stats
;
169 for (i
= 0; i
< ENA_STATS_ARRAY_ENA_COM
; i
++) {
170 ena_stats
= &ena_stats_ena_com_strings
[i
];
172 ptr
= (u32
*)((uintptr_t)&adapter
->ena_dev
->admin_queue
.stats
+
173 (uintptr_t)ena_stats
->stat_offset
);
179 static void ena_get_ethtool_stats(struct net_device
*netdev
,
180 struct ethtool_stats
*stats
,
183 struct ena_adapter
*adapter
= netdev_priv(netdev
);
184 const struct ena_stats
*ena_stats
;
188 for (i
= 0; i
< ENA_STATS_ARRAY_GLOBAL
; i
++) {
189 ena_stats
= &ena_stats_global_strings
[i
];
191 ptr
= (u64
*)((uintptr_t)&adapter
->dev_stats
+
192 (uintptr_t)ena_stats
->stat_offset
);
194 ena_safe_update_stat(ptr
, data
++, &adapter
->syncp
);
197 ena_queue_stats(adapter
, &data
);
198 ena_dev_admin_queue_stats(adapter
, &data
);
201 int ena_get_sset_count(struct net_device
*netdev
, int sset
)
203 struct ena_adapter
*adapter
= netdev_priv(netdev
);
205 if (sset
!= ETH_SS_STATS
)
208 return adapter
->num_io_queues
* (ENA_STATS_ARRAY_TX
+ ENA_STATS_ARRAY_RX
)
209 + ENA_STATS_ARRAY_GLOBAL
+ ENA_STATS_ARRAY_ENA_COM
;
212 static void ena_queue_strings(struct ena_adapter
*adapter
, u8
**data
)
214 const struct ena_stats
*ena_stats
;
217 for (i
= 0; i
< adapter
->num_io_queues
; i
++) {
219 for (j
= 0; j
< ENA_STATS_ARRAY_TX
; j
++) {
220 ena_stats
= &ena_stats_tx_strings
[j
];
222 snprintf(*data
, ETH_GSTRING_LEN
,
223 "queue_%u_tx_%s", i
, ena_stats
->name
);
224 (*data
) += ETH_GSTRING_LEN
;
227 for (j
= 0; j
< ENA_STATS_ARRAY_RX
; j
++) {
228 ena_stats
= &ena_stats_rx_strings
[j
];
230 snprintf(*data
, ETH_GSTRING_LEN
,
231 "queue_%u_rx_%s", i
, ena_stats
->name
);
232 (*data
) += ETH_GSTRING_LEN
;
237 static void ena_com_dev_strings(u8
**data
)
239 const struct ena_stats
*ena_stats
;
242 for (i
= 0; i
< ENA_STATS_ARRAY_ENA_COM
; i
++) {
243 ena_stats
= &ena_stats_ena_com_strings
[i
];
245 snprintf(*data
, ETH_GSTRING_LEN
,
246 "ena_admin_q_%s", ena_stats
->name
);
247 (*data
) += ETH_GSTRING_LEN
;
251 static void ena_get_strings(struct net_device
*netdev
, u32 sset
, u8
*data
)
253 struct ena_adapter
*adapter
= netdev_priv(netdev
);
254 const struct ena_stats
*ena_stats
;
257 if (sset
!= ETH_SS_STATS
)
260 for (i
= 0; i
< ENA_STATS_ARRAY_GLOBAL
; i
++) {
261 ena_stats
= &ena_stats_global_strings
[i
];
263 memcpy(data
, ena_stats
->name
, ETH_GSTRING_LEN
);
264 data
+= ETH_GSTRING_LEN
;
267 ena_queue_strings(adapter
, &data
);
268 ena_com_dev_strings(&data
);
271 static int ena_get_link_ksettings(struct net_device
*netdev
,
272 struct ethtool_link_ksettings
*link_ksettings
)
274 struct ena_adapter
*adapter
= netdev_priv(netdev
);
275 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
276 struct ena_admin_get_feature_link_desc
*link
;
277 struct ena_admin_get_feat_resp feat_resp
;
280 rc
= ena_com_get_link_params(ena_dev
, &feat_resp
);
284 link
= &feat_resp
.u
.link
;
285 link_ksettings
->base
.speed
= link
->speed
;
287 if (link
->flags
& ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK
) {
288 ethtool_link_ksettings_add_link_mode(link_ksettings
,
290 ethtool_link_ksettings_add_link_mode(link_ksettings
,
294 link_ksettings
->base
.autoneg
=
295 (link
->flags
& ENA_ADMIN_GET_FEATURE_LINK_DESC_AUTONEG_MASK
) ?
296 AUTONEG_ENABLE
: AUTONEG_DISABLE
;
298 link_ksettings
->base
.duplex
= DUPLEX_FULL
;
303 static int ena_get_coalesce(struct net_device
*net_dev
,
304 struct ethtool_coalesce
*coalesce
)
306 struct ena_adapter
*adapter
= netdev_priv(net_dev
);
307 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
309 if (!ena_com_interrupt_moderation_supported(ena_dev
)) {
310 /* the devie doesn't support interrupt moderation */
314 coalesce
->tx_coalesce_usecs
=
315 ena_com_get_nonadaptive_moderation_interval_tx(ena_dev
) *
316 ena_dev
->intr_delay_resolution
;
318 coalesce
->rx_coalesce_usecs
=
319 ena_com_get_nonadaptive_moderation_interval_rx(ena_dev
)
320 * ena_dev
->intr_delay_resolution
;
322 coalesce
->use_adaptive_rx_coalesce
=
323 ena_com_get_adaptive_moderation_enabled(ena_dev
);
328 static void ena_update_tx_rings_intr_moderation(struct ena_adapter
*adapter
)
333 val
= ena_com_get_nonadaptive_moderation_interval_tx(adapter
->ena_dev
);
335 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
336 adapter
->tx_ring
[i
].smoothed_interval
= val
;
339 static void ena_update_rx_rings_intr_moderation(struct ena_adapter
*adapter
)
344 val
= ena_com_get_nonadaptive_moderation_interval_rx(adapter
->ena_dev
);
346 for (i
= 0; i
< adapter
->num_io_queues
; i
++)
347 adapter
->rx_ring
[i
].smoothed_interval
= val
;
350 static int ena_set_coalesce(struct net_device
*net_dev
,
351 struct ethtool_coalesce
*coalesce
)
353 struct ena_adapter
*adapter
= netdev_priv(net_dev
);
354 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
357 if (!ena_com_interrupt_moderation_supported(ena_dev
)) {
358 /* the devie doesn't support interrupt moderation */
362 rc
= ena_com_update_nonadaptive_moderation_interval_tx(ena_dev
,
363 coalesce
->tx_coalesce_usecs
);
367 ena_update_tx_rings_intr_moderation(adapter
);
369 rc
= ena_com_update_nonadaptive_moderation_interval_rx(ena_dev
,
370 coalesce
->rx_coalesce_usecs
);
374 ena_update_rx_rings_intr_moderation(adapter
);
376 if (coalesce
->use_adaptive_rx_coalesce
&&
377 !ena_com_get_adaptive_moderation_enabled(ena_dev
))
378 ena_com_enable_adaptive_moderation(ena_dev
);
380 if (!coalesce
->use_adaptive_rx_coalesce
&&
381 ena_com_get_adaptive_moderation_enabled(ena_dev
))
382 ena_com_disable_adaptive_moderation(ena_dev
);
387 static u32
ena_get_msglevel(struct net_device
*netdev
)
389 struct ena_adapter
*adapter
= netdev_priv(netdev
);
391 return adapter
->msg_enable
;
394 static void ena_set_msglevel(struct net_device
*netdev
, u32 value
)
396 struct ena_adapter
*adapter
= netdev_priv(netdev
);
398 adapter
->msg_enable
= value
;
401 static void ena_get_drvinfo(struct net_device
*dev
,
402 struct ethtool_drvinfo
*info
)
404 struct ena_adapter
*adapter
= netdev_priv(dev
);
406 strlcpy(info
->driver
, DRV_MODULE_NAME
, sizeof(info
->driver
));
407 strlcpy(info
->version
, DRV_MODULE_VERSION
, sizeof(info
->version
));
408 strlcpy(info
->bus_info
, pci_name(adapter
->pdev
),
409 sizeof(info
->bus_info
));
412 static void ena_get_ringparam(struct net_device
*netdev
,
413 struct ethtool_ringparam
*ring
)
415 struct ena_adapter
*adapter
= netdev_priv(netdev
);
417 ring
->tx_max_pending
= adapter
->max_tx_ring_size
;
418 ring
->rx_max_pending
= adapter
->max_rx_ring_size
;
419 ring
->tx_pending
= adapter
->tx_ring
[0].ring_size
;
420 ring
->rx_pending
= adapter
->rx_ring
[0].ring_size
;
423 static int ena_set_ringparam(struct net_device
*netdev
,
424 struct ethtool_ringparam
*ring
)
426 struct ena_adapter
*adapter
= netdev_priv(netdev
);
427 u32 new_tx_size
, new_rx_size
;
429 new_tx_size
= ring
->tx_pending
< ENA_MIN_RING_SIZE
?
430 ENA_MIN_RING_SIZE
: ring
->tx_pending
;
431 new_tx_size
= rounddown_pow_of_two(new_tx_size
);
433 new_rx_size
= ring
->rx_pending
< ENA_MIN_RING_SIZE
?
434 ENA_MIN_RING_SIZE
: ring
->rx_pending
;
435 new_rx_size
= rounddown_pow_of_two(new_rx_size
);
437 if (new_tx_size
== adapter
->requested_tx_ring_size
&&
438 new_rx_size
== adapter
->requested_rx_ring_size
)
441 return ena_update_queue_sizes(adapter
, new_tx_size
, new_rx_size
);
444 static u32
ena_flow_hash_to_flow_type(u16 hash_fields
)
448 if (hash_fields
& ENA_ADMIN_RSS_L2_DA
)
451 if (hash_fields
& ENA_ADMIN_RSS_L3_DA
)
454 if (hash_fields
& ENA_ADMIN_RSS_L3_SA
)
457 if (hash_fields
& ENA_ADMIN_RSS_L4_DP
)
458 data
|= RXH_L4_B_2_3
;
460 if (hash_fields
& ENA_ADMIN_RSS_L4_SP
)
461 data
|= RXH_L4_B_0_1
;
466 static u16
ena_flow_data_to_flow_hash(u32 hash_fields
)
470 if (hash_fields
& RXH_L2DA
)
471 data
|= ENA_ADMIN_RSS_L2_DA
;
473 if (hash_fields
& RXH_IP_DST
)
474 data
|= ENA_ADMIN_RSS_L3_DA
;
476 if (hash_fields
& RXH_IP_SRC
)
477 data
|= ENA_ADMIN_RSS_L3_SA
;
479 if (hash_fields
& RXH_L4_B_2_3
)
480 data
|= ENA_ADMIN_RSS_L4_DP
;
482 if (hash_fields
& RXH_L4_B_0_1
)
483 data
|= ENA_ADMIN_RSS_L4_SP
;
488 static int ena_get_rss_hash(struct ena_com_dev
*ena_dev
,
489 struct ethtool_rxnfc
*cmd
)
491 enum ena_admin_flow_hash_proto proto
;
497 switch (cmd
->flow_type
) {
499 proto
= ENA_ADMIN_RSS_TCP4
;
502 proto
= ENA_ADMIN_RSS_UDP4
;
505 proto
= ENA_ADMIN_RSS_TCP6
;
508 proto
= ENA_ADMIN_RSS_UDP6
;
511 proto
= ENA_ADMIN_RSS_IP4
;
514 proto
= ENA_ADMIN_RSS_IP6
;
517 proto
= ENA_ADMIN_RSS_NOT_IP
;
530 rc
= ena_com_get_hash_ctrl(ena_dev
, proto
, &hash_fields
);
534 cmd
->data
= ena_flow_hash_to_flow_type(hash_fields
);
539 static int ena_set_rss_hash(struct ena_com_dev
*ena_dev
,
540 struct ethtool_rxnfc
*cmd
)
542 enum ena_admin_flow_hash_proto proto
;
545 switch (cmd
->flow_type
) {
547 proto
= ENA_ADMIN_RSS_TCP4
;
550 proto
= ENA_ADMIN_RSS_UDP4
;
553 proto
= ENA_ADMIN_RSS_TCP6
;
556 proto
= ENA_ADMIN_RSS_UDP6
;
559 proto
= ENA_ADMIN_RSS_IP4
;
562 proto
= ENA_ADMIN_RSS_IP6
;
565 proto
= ENA_ADMIN_RSS_NOT_IP
;
578 hash_fields
= ena_flow_data_to_flow_hash(cmd
->data
);
580 return ena_com_fill_hash_ctrl(ena_dev
, proto
, hash_fields
);
583 static int ena_set_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*info
)
585 struct ena_adapter
*adapter
= netdev_priv(netdev
);
590 rc
= ena_set_rss_hash(adapter
->ena_dev
, info
);
592 case ETHTOOL_SRXCLSRLDEL
:
593 case ETHTOOL_SRXCLSRLINS
:
595 netif_err(adapter
, drv
, netdev
,
596 "Command parameter %d is not supported\n", info
->cmd
);
603 static int ena_get_rxnfc(struct net_device
*netdev
, struct ethtool_rxnfc
*info
,
606 struct ena_adapter
*adapter
= netdev_priv(netdev
);
610 case ETHTOOL_GRXRINGS
:
611 info
->data
= adapter
->num_io_queues
;
615 rc
= ena_get_rss_hash(adapter
->ena_dev
, info
);
617 case ETHTOOL_GRXCLSRLCNT
:
618 case ETHTOOL_GRXCLSRULE
:
619 case ETHTOOL_GRXCLSRLALL
:
621 netif_err(adapter
, drv
, netdev
,
622 "Command parameter %d is not supported\n", info
->cmd
);
629 static u32
ena_get_rxfh_indir_size(struct net_device
*netdev
)
631 return ENA_RX_RSS_TABLE_SIZE
;
634 static u32
ena_get_rxfh_key_size(struct net_device
*netdev
)
636 return ENA_HASH_KEY_SIZE
;
639 static int ena_get_rxfh(struct net_device
*netdev
, u32
*indir
, u8
*key
,
642 struct ena_adapter
*adapter
= netdev_priv(netdev
);
643 enum ena_admin_hash_functions ena_func
;
647 rc
= ena_com_indirect_table_get(adapter
->ena_dev
, indir
);
651 rc
= ena_com_get_hash_function(adapter
->ena_dev
, &ena_func
, key
);
656 case ENA_ADMIN_TOEPLITZ
:
657 func
= ETH_RSS_HASH_TOP
;
659 case ENA_ADMIN_CRC32
:
660 func
= ETH_RSS_HASH_XOR
;
663 netif_err(adapter
, drv
, netdev
,
664 "Command parameter is not supported\n");
674 static int ena_set_rxfh(struct net_device
*netdev
, const u32
*indir
,
675 const u8
*key
, const u8 hfunc
)
677 struct ena_adapter
*adapter
= netdev_priv(netdev
);
678 struct ena_com_dev
*ena_dev
= adapter
->ena_dev
;
679 enum ena_admin_hash_functions func
;
683 for (i
= 0; i
< ENA_RX_RSS_TABLE_SIZE
; i
++) {
684 rc
= ena_com_indirect_table_fill_entry(ena_dev
,
686 ENA_IO_RXQ_IDX(indir
[i
]));
688 netif_err(adapter
, drv
, netdev
,
689 "Cannot fill indirect table (index is too large)\n");
694 rc
= ena_com_indirect_table_set(ena_dev
);
696 netif_err(adapter
, drv
, netdev
,
697 "Cannot set indirect table\n");
698 return rc
== -EPERM
? -EOPNOTSUPP
: rc
;
703 case ETH_RSS_HASH_TOP
:
704 func
= ENA_ADMIN_TOEPLITZ
;
706 case ETH_RSS_HASH_XOR
:
707 func
= ENA_ADMIN_CRC32
;
710 netif_err(adapter
, drv
, netdev
, "Unsupported hfunc %d\n",
716 rc
= ena_com_fill_hash_function(ena_dev
, func
, key
,
720 netif_err(adapter
, drv
, netdev
, "Cannot fill key\n");
721 return rc
== -EPERM
? -EOPNOTSUPP
: rc
;
728 static void ena_get_channels(struct net_device
*netdev
,
729 struct ethtool_channels
*channels
)
731 struct ena_adapter
*adapter
= netdev_priv(netdev
);
733 channels
->max_combined
= adapter
->max_num_io_queues
;
734 channels
->combined_count
= adapter
->num_io_queues
;
737 static int ena_set_channels(struct net_device
*netdev
,
738 struct ethtool_channels
*channels
)
740 struct ena_adapter
*adapter
= netdev_priv(netdev
);
741 u32 count
= channels
->combined_count
;
742 /* The check for max value is already done in ethtool */
743 if (count
< ENA_MIN_NUM_IO_QUEUES
||
744 (ena_xdp_present(adapter
) &&
745 !ena_xdp_legal_queue_count(adapter
, channels
->combined_count
)))
748 return ena_update_queue_count(adapter
, count
);
751 static int ena_get_tunable(struct net_device
*netdev
,
752 const struct ethtool_tunable
*tuna
, void *data
)
754 struct ena_adapter
*adapter
= netdev_priv(netdev
);
758 case ETHTOOL_RX_COPYBREAK
:
759 *(u32
*)data
= adapter
->rx_copybreak
;
769 static int ena_set_tunable(struct net_device
*netdev
,
770 const struct ethtool_tunable
*tuna
,
773 struct ena_adapter
*adapter
= netdev_priv(netdev
);
778 case ETHTOOL_RX_COPYBREAK
:
780 if (len
> adapter
->netdev
->mtu
) {
784 adapter
->rx_copybreak
= len
;
794 static const struct ethtool_ops ena_ethtool_ops
= {
795 .get_link_ksettings
= ena_get_link_ksettings
,
796 .get_drvinfo
= ena_get_drvinfo
,
797 .get_msglevel
= ena_get_msglevel
,
798 .set_msglevel
= ena_set_msglevel
,
799 .get_link
= ethtool_op_get_link
,
800 .get_coalesce
= ena_get_coalesce
,
801 .set_coalesce
= ena_set_coalesce
,
802 .get_ringparam
= ena_get_ringparam
,
803 .set_ringparam
= ena_set_ringparam
,
804 .get_sset_count
= ena_get_sset_count
,
805 .get_strings
= ena_get_strings
,
806 .get_ethtool_stats
= ena_get_ethtool_stats
,
807 .get_rxnfc
= ena_get_rxnfc
,
808 .set_rxnfc
= ena_set_rxnfc
,
809 .get_rxfh_indir_size
= ena_get_rxfh_indir_size
,
810 .get_rxfh_key_size
= ena_get_rxfh_key_size
,
811 .get_rxfh
= ena_get_rxfh
,
812 .set_rxfh
= ena_set_rxfh
,
813 .get_channels
= ena_get_channels
,
814 .set_channels
= ena_set_channels
,
815 .get_tunable
= ena_get_tunable
,
816 .set_tunable
= ena_set_tunable
,
819 void ena_set_ethtool_ops(struct net_device
*netdev
)
821 netdev
->ethtool_ops
= &ena_ethtool_ops
;
824 static void ena_dump_stats_ex(struct ena_adapter
*adapter
, u8
*buf
)
826 struct net_device
*netdev
= adapter
->netdev
;
832 strings_num
= ena_get_sset_count(netdev
, ETH_SS_STATS
);
833 if (strings_num
<= 0) {
834 netif_err(adapter
, drv
, netdev
, "Can't get stats num\n");
838 strings_buf
= devm_kcalloc(&adapter
->pdev
->dev
,
839 ETH_GSTRING_LEN
, strings_num
,
842 netif_err(adapter
, drv
, netdev
,
843 "failed to alloc strings_buf\n");
847 data_buf
= devm_kcalloc(&adapter
->pdev
->dev
,
848 strings_num
, sizeof(u64
),
851 netif_err(adapter
, drv
, netdev
,
852 "failed to allocate data buf\n");
853 devm_kfree(&adapter
->pdev
->dev
, strings_buf
);
857 ena_get_strings(netdev
, ETH_SS_STATS
, strings_buf
);
858 ena_get_ethtool_stats(netdev
, NULL
, data_buf
);
860 /* If there is a buffer, dump stats, otherwise print them to dmesg */
862 for (i
= 0; i
< strings_num
; i
++) {
863 rc
= snprintf(buf
, ETH_GSTRING_LEN
+ sizeof(u64
),
865 strings_buf
+ i
* ETH_GSTRING_LEN
,
870 for (i
= 0; i
< strings_num
; i
++)
871 netif_err(adapter
, drv
, netdev
, "%s: %llu\n",
872 strings_buf
+ i
* ETH_GSTRING_LEN
,
875 devm_kfree(&adapter
->pdev
->dev
, strings_buf
);
876 devm_kfree(&adapter
->pdev
->dev
, data_buf
);
879 void ena_dump_stats_to_buf(struct ena_adapter
*adapter
, u8
*buf
)
884 ena_dump_stats_ex(adapter
, buf
);
887 void ena_dump_stats_to_dmesg(struct ena_adapter
*adapter
)
889 ena_dump_stats_ex(adapter
, NULL
);