1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
4 #include <linux/inetdevice.h>
5 #include <linux/etherdevice.h>
6 #include <linux/ethtool.h>
8 #include <net/mana/mana.h>
11 char name
[ETH_GSTRING_LEN
];
13 } mana_eth_stats
[] = {
14 {"stop_queue", offsetof(struct mana_ethtool_stats
, stop_queue
)},
15 {"wake_queue", offsetof(struct mana_ethtool_stats
, wake_queue
)},
16 {"hc_rx_discards_no_wqe", offsetof(struct mana_ethtool_stats
,
17 hc_rx_discards_no_wqe
)},
18 {"hc_rx_err_vport_disabled", offsetof(struct mana_ethtool_stats
,
19 hc_rx_err_vport_disabled
)},
20 {"hc_rx_bytes", offsetof(struct mana_ethtool_stats
, hc_rx_bytes
)},
21 {"hc_rx_ucast_pkts", offsetof(struct mana_ethtool_stats
,
23 {"hc_rx_ucast_bytes", offsetof(struct mana_ethtool_stats
,
25 {"hc_rx_bcast_pkts", offsetof(struct mana_ethtool_stats
,
27 {"hc_rx_bcast_bytes", offsetof(struct mana_ethtool_stats
,
29 {"hc_rx_mcast_pkts", offsetof(struct mana_ethtool_stats
,
31 {"hc_rx_mcast_bytes", offsetof(struct mana_ethtool_stats
,
33 {"hc_tx_err_gf_disabled", offsetof(struct mana_ethtool_stats
,
34 hc_tx_err_gf_disabled
)},
35 {"hc_tx_err_vport_disabled", offsetof(struct mana_ethtool_stats
,
36 hc_tx_err_vport_disabled
)},
37 {"hc_tx_err_inval_vportoffset_pkt",
38 offsetof(struct mana_ethtool_stats
,
39 hc_tx_err_inval_vportoffset_pkt
)},
40 {"hc_tx_err_vlan_enforcement", offsetof(struct mana_ethtool_stats
,
41 hc_tx_err_vlan_enforcement
)},
42 {"hc_tx_err_eth_type_enforcement",
43 offsetof(struct mana_ethtool_stats
, hc_tx_err_eth_type_enforcement
)},
44 {"hc_tx_err_sa_enforcement", offsetof(struct mana_ethtool_stats
,
45 hc_tx_err_sa_enforcement
)},
46 {"hc_tx_err_sqpdid_enforcement",
47 offsetof(struct mana_ethtool_stats
, hc_tx_err_sqpdid_enforcement
)},
48 {"hc_tx_err_cqpdid_enforcement",
49 offsetof(struct mana_ethtool_stats
, hc_tx_err_cqpdid_enforcement
)},
50 {"hc_tx_err_mtu_violation", offsetof(struct mana_ethtool_stats
,
51 hc_tx_err_mtu_violation
)},
52 {"hc_tx_err_inval_oob", offsetof(struct mana_ethtool_stats
,
53 hc_tx_err_inval_oob
)},
54 {"hc_tx_err_gdma", offsetof(struct mana_ethtool_stats
,
56 {"hc_tx_bytes", offsetof(struct mana_ethtool_stats
, hc_tx_bytes
)},
57 {"hc_tx_ucast_pkts", offsetof(struct mana_ethtool_stats
,
59 {"hc_tx_ucast_bytes", offsetof(struct mana_ethtool_stats
,
61 {"hc_tx_bcast_pkts", offsetof(struct mana_ethtool_stats
,
63 {"hc_tx_bcast_bytes", offsetof(struct mana_ethtool_stats
,
65 {"hc_tx_mcast_pkts", offsetof(struct mana_ethtool_stats
,
67 {"hc_tx_mcast_bytes", offsetof(struct mana_ethtool_stats
,
69 {"tx_cq_err", offsetof(struct mana_ethtool_stats
, tx_cqe_err
)},
70 {"tx_cqe_unknown_type", offsetof(struct mana_ethtool_stats
,
71 tx_cqe_unknown_type
)},
72 {"rx_coalesced_err", offsetof(struct mana_ethtool_stats
,
74 {"rx_cqe_unknown_type", offsetof(struct mana_ethtool_stats
,
75 rx_cqe_unknown_type
)},
78 static int mana_get_sset_count(struct net_device
*ndev
, int stringset
)
80 struct mana_port_context
*apc
= netdev_priv(ndev
);
81 unsigned int num_queues
= apc
->num_queues
;
83 if (stringset
!= ETH_SS_STATS
)
86 return ARRAY_SIZE(mana_eth_stats
) + num_queues
*
87 (MANA_STATS_RX_COUNT
+ MANA_STATS_TX_COUNT
);
90 static void mana_get_strings(struct net_device
*ndev
, u32 stringset
, u8
*data
)
92 struct mana_port_context
*apc
= netdev_priv(ndev
);
93 unsigned int num_queues
= apc
->num_queues
;
96 if (stringset
!= ETH_SS_STATS
)
99 for (i
= 0; i
< ARRAY_SIZE(mana_eth_stats
); i
++)
100 ethtool_puts(&data
, mana_eth_stats
[i
].name
);
102 for (i
= 0; i
< num_queues
; i
++) {
103 ethtool_sprintf(&data
, "rx_%d_packets", i
);
104 ethtool_sprintf(&data
, "rx_%d_bytes", i
);
105 ethtool_sprintf(&data
, "rx_%d_xdp_drop", i
);
106 ethtool_sprintf(&data
, "rx_%d_xdp_tx", i
);
107 ethtool_sprintf(&data
, "rx_%d_xdp_redirect", i
);
110 for (i
= 0; i
< num_queues
; i
++) {
111 ethtool_sprintf(&data
, "tx_%d_packets", i
);
112 ethtool_sprintf(&data
, "tx_%d_bytes", i
);
113 ethtool_sprintf(&data
, "tx_%d_xdp_xmit", i
);
114 ethtool_sprintf(&data
, "tx_%d_tso_packets", i
);
115 ethtool_sprintf(&data
, "tx_%d_tso_bytes", i
);
116 ethtool_sprintf(&data
, "tx_%d_tso_inner_packets", i
);
117 ethtool_sprintf(&data
, "tx_%d_tso_inner_bytes", i
);
118 ethtool_sprintf(&data
, "tx_%d_long_pkt_fmt", i
);
119 ethtool_sprintf(&data
, "tx_%d_short_pkt_fmt", i
);
120 ethtool_sprintf(&data
, "tx_%d_csum_partial", i
);
121 ethtool_sprintf(&data
, "tx_%d_mana_map_err", i
);
125 static void mana_get_ethtool_stats(struct net_device
*ndev
,
126 struct ethtool_stats
*e_stats
, u64
*data
)
128 struct mana_port_context
*apc
= netdev_priv(ndev
);
129 unsigned int num_queues
= apc
->num_queues
;
130 void *eth_stats
= &apc
->eth_stats
;
131 struct mana_stats_rx
*rx_stats
;
132 struct mana_stats_tx
*tx_stats
;
141 u64 tso_inner_packets
;
149 if (!apc
->port_is_up
)
151 /* we call mana function to update stats from GDMA */
152 mana_query_gf_stats(apc
);
154 for (q
= 0; q
< ARRAY_SIZE(mana_eth_stats
); q
++)
155 data
[i
++] = *(u64
*)(eth_stats
+ mana_eth_stats
[q
].offset
);
157 for (q
= 0; q
< num_queues
; q
++) {
158 rx_stats
= &apc
->rxqs
[q
]->stats
;
161 start
= u64_stats_fetch_begin(&rx_stats
->syncp
);
162 packets
= rx_stats
->packets
;
163 bytes
= rx_stats
->bytes
;
164 xdp_drop
= rx_stats
->xdp_drop
;
165 xdp_tx
= rx_stats
->xdp_tx
;
166 xdp_redirect
= rx_stats
->xdp_redirect
;
167 } while (u64_stats_fetch_retry(&rx_stats
->syncp
, start
));
171 data
[i
++] = xdp_drop
;
173 data
[i
++] = xdp_redirect
;
176 for (q
= 0; q
< num_queues
; q
++) {
177 tx_stats
= &apc
->tx_qp
[q
].txq
.stats
;
180 start
= u64_stats_fetch_begin(&tx_stats
->syncp
);
181 packets
= tx_stats
->packets
;
182 bytes
= tx_stats
->bytes
;
183 xdp_xmit
= tx_stats
->xdp_xmit
;
184 tso_packets
= tx_stats
->tso_packets
;
185 tso_bytes
= tx_stats
->tso_bytes
;
186 tso_inner_packets
= tx_stats
->tso_inner_packets
;
187 tso_inner_bytes
= tx_stats
->tso_inner_bytes
;
188 long_pkt_fmt
= tx_stats
->long_pkt_fmt
;
189 short_pkt_fmt
= tx_stats
->short_pkt_fmt
;
190 csum_partial
= tx_stats
->csum_partial
;
191 mana_map_err
= tx_stats
->mana_map_err
;
192 } while (u64_stats_fetch_retry(&tx_stats
->syncp
, start
));
196 data
[i
++] = xdp_xmit
;
197 data
[i
++] = tso_packets
;
198 data
[i
++] = tso_bytes
;
199 data
[i
++] = tso_inner_packets
;
200 data
[i
++] = tso_inner_bytes
;
201 data
[i
++] = long_pkt_fmt
;
202 data
[i
++] = short_pkt_fmt
;
203 data
[i
++] = csum_partial
;
204 data
[i
++] = mana_map_err
;
208 static int mana_get_rxnfc(struct net_device
*ndev
, struct ethtool_rxnfc
*cmd
,
211 struct mana_port_context
*apc
= netdev_priv(ndev
);
214 case ETHTOOL_GRXRINGS
:
215 cmd
->data
= apc
->num_queues
;
222 static u32
mana_get_rxfh_key_size(struct net_device
*ndev
)
224 return MANA_HASH_KEY_SIZE
;
227 static u32
mana_rss_indir_size(struct net_device
*ndev
)
229 struct mana_port_context
*apc
= netdev_priv(ndev
);
231 return apc
->indir_table_sz
;
234 static int mana_get_rxfh(struct net_device
*ndev
,
235 struct ethtool_rxfh_param
*rxfh
)
237 struct mana_port_context
*apc
= netdev_priv(ndev
);
240 rxfh
->hfunc
= ETH_RSS_HASH_TOP
; /* Toeplitz */
243 for (i
= 0; i
< apc
->indir_table_sz
; i
++)
244 rxfh
->indir
[i
] = apc
->indir_table
[i
];
248 memcpy(rxfh
->key
, apc
->hashkey
, MANA_HASH_KEY_SIZE
);
253 static int mana_set_rxfh(struct net_device
*ndev
,
254 struct ethtool_rxfh_param
*rxfh
,
255 struct netlink_ext_ack
*extack
)
257 struct mana_port_context
*apc
= netdev_priv(ndev
);
258 bool update_hash
= false, update_table
= false;
259 u8 save_key
[MANA_HASH_KEY_SIZE
];
263 if (!apc
->port_is_up
)
266 if (rxfh
->hfunc
!= ETH_RSS_HASH_NO_CHANGE
&&
267 rxfh
->hfunc
!= ETH_RSS_HASH_TOP
)
270 save_table
= kcalloc(apc
->indir_table_sz
, sizeof(u32
), GFP_KERNEL
);
275 for (i
= 0; i
< apc
->indir_table_sz
; i
++)
276 if (rxfh
->indir
[i
] >= apc
->num_queues
) {
282 for (i
= 0; i
< apc
->indir_table_sz
; i
++) {
283 save_table
[i
] = apc
->indir_table
[i
];
284 apc
->indir_table
[i
] = rxfh
->indir
[i
];
290 memcpy(save_key
, apc
->hashkey
, MANA_HASH_KEY_SIZE
);
291 memcpy(apc
->hashkey
, rxfh
->key
, MANA_HASH_KEY_SIZE
);
294 err
= mana_config_rss(apc
, TRI_STATE_TRUE
, update_hash
, update_table
);
296 if (err
) { /* recover to original values */
298 for (i
= 0; i
< apc
->indir_table_sz
; i
++)
299 apc
->indir_table
[i
] = save_table
[i
];
303 memcpy(apc
->hashkey
, save_key
, MANA_HASH_KEY_SIZE
);
305 mana_config_rss(apc
, TRI_STATE_TRUE
, update_hash
, update_table
);
314 static void mana_get_channels(struct net_device
*ndev
,
315 struct ethtool_channels
*channel
)
317 struct mana_port_context
*apc
= netdev_priv(ndev
);
319 channel
->max_combined
= apc
->max_queues
;
320 channel
->combined_count
= apc
->num_queues
;
323 static int mana_set_channels(struct net_device
*ndev
,
324 struct ethtool_channels
*channels
)
326 struct mana_port_context
*apc
= netdev_priv(ndev
);
327 unsigned int new_count
= channels
->combined_count
;
328 unsigned int old_count
= apc
->num_queues
;
331 err
= mana_pre_alloc_rxbufs(apc
, ndev
->mtu
, new_count
);
333 netdev_err(ndev
, "Insufficient memory for new allocations");
337 err
= mana_detach(ndev
, false);
339 netdev_err(ndev
, "mana_detach failed: %d\n", err
);
343 apc
->num_queues
= new_count
;
344 err
= mana_attach(ndev
);
346 apc
->num_queues
= old_count
;
347 netdev_err(ndev
, "mana_attach failed: %d\n", err
);
351 mana_pre_dealloc_rxbufs(apc
);
355 static void mana_get_ringparam(struct net_device
*ndev
,
356 struct ethtool_ringparam
*ring
,
357 struct kernel_ethtool_ringparam
*kernel_ring
,
358 struct netlink_ext_ack
*extack
)
360 struct mana_port_context
*apc
= netdev_priv(ndev
);
362 ring
->rx_pending
= apc
->rx_queue_size
;
363 ring
->tx_pending
= apc
->tx_queue_size
;
364 ring
->rx_max_pending
= MAX_RX_BUFFERS_PER_QUEUE
;
365 ring
->tx_max_pending
= MAX_TX_BUFFERS_PER_QUEUE
;
368 static int mana_set_ringparam(struct net_device
*ndev
,
369 struct ethtool_ringparam
*ring
,
370 struct kernel_ethtool_ringparam
*kernel_ring
,
371 struct netlink_ext_ack
*extack
)
373 struct mana_port_context
*apc
= netdev_priv(ndev
);
378 old_tx
= apc
->tx_queue_size
;
379 old_rx
= apc
->rx_queue_size
;
381 if (ring
->tx_pending
< MIN_TX_BUFFERS_PER_QUEUE
) {
382 NL_SET_ERR_MSG_FMT(extack
, "tx:%d less than the min:%d", ring
->tx_pending
,
383 MIN_TX_BUFFERS_PER_QUEUE
);
387 if (ring
->rx_pending
< MIN_RX_BUFFERS_PER_QUEUE
) {
388 NL_SET_ERR_MSG_FMT(extack
, "rx:%d less than the min:%d", ring
->rx_pending
,
389 MIN_RX_BUFFERS_PER_QUEUE
);
393 new_rx
= roundup_pow_of_two(ring
->rx_pending
);
394 new_tx
= roundup_pow_of_two(ring
->tx_pending
);
395 netdev_info(ndev
, "Using nearest power of 2 values for Txq:%d Rxq:%d\n",
398 /* pre-allocating new buffers to prevent failures in mana_attach() later */
399 apc
->rx_queue_size
= new_rx
;
400 err
= mana_pre_alloc_rxbufs(apc
, ndev
->mtu
, apc
->num_queues
);
401 apc
->rx_queue_size
= old_rx
;
403 netdev_err(ndev
, "Insufficient memory for new allocations\n");
407 err
= mana_detach(ndev
, false);
409 netdev_err(ndev
, "mana_detach failed: %d\n", err
);
413 apc
->tx_queue_size
= new_tx
;
414 apc
->rx_queue_size
= new_rx
;
416 err
= mana_attach(ndev
);
418 netdev_err(ndev
, "mana_attach failed: %d\n", err
);
419 apc
->tx_queue_size
= old_tx
;
420 apc
->rx_queue_size
= old_rx
;
423 mana_pre_dealloc_rxbufs(apc
);
427 static int mana_get_link_ksettings(struct net_device
*ndev
,
428 struct ethtool_link_ksettings
*cmd
)
430 cmd
->base
.duplex
= DUPLEX_FULL
;
431 cmd
->base
.port
= PORT_OTHER
;
436 const struct ethtool_ops mana_ethtool_ops
= {
437 .get_ethtool_stats
= mana_get_ethtool_stats
,
438 .get_sset_count
= mana_get_sset_count
,
439 .get_strings
= mana_get_strings
,
440 .get_rxnfc
= mana_get_rxnfc
,
441 .get_rxfh_key_size
= mana_get_rxfh_key_size
,
442 .get_rxfh_indir_size
= mana_rss_indir_size
,
443 .get_rxfh
= mana_get_rxfh
,
444 .set_rxfh
= mana_set_rxfh
,
445 .get_channels
= mana_get_channels
,
446 .set_channels
= mana_set_channels
,
447 .get_ringparam
= mana_get_ringparam
,
448 .set_ringparam
= mana_set_ringparam
,
449 .get_link_ksettings
= mana_get_link_ksettings
,
450 .get_link
= ethtool_op_get_link
,