1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/etherdevice.h>
5 #include <linux/io-64-nonatomic-hi-lo.h>
6 #include <linux/lockdep.h>
7 #include <net/dst_metadata.h>
9 #include "nfpcore/nfp_cpp.h"
10 #include "nfpcore/nfp_nsp.h"
14 #include "nfp_net_ctrl.h"
15 #include "nfp_net_repr.h"
16 #include "nfp_net_sriov.h"
20 nfp_repr_get_locked(struct nfp_app
*app
, struct nfp_reprs
*set
, unsigned int id
)
22 return rcu_dereference_protected(set
->reprs
[id
],
23 lockdep_is_held(&app
->pf
->lock
));
27 nfp_repr_inc_tx_stats(struct net_device
*netdev
, unsigned int len
,
30 struct nfp_repr
*repr
= netdev_priv(netdev
);
31 struct nfp_repr_pcpu_stats
*stats
;
33 if (unlikely(tx_status
!= NET_XMIT_SUCCESS
&&
34 tx_status
!= NET_XMIT_CN
)) {
35 this_cpu_inc(repr
->stats
->tx_drops
);
39 stats
= this_cpu_ptr(repr
->stats
);
40 u64_stats_update_begin(&stats
->syncp
);
42 stats
->tx_bytes
+= len
;
43 u64_stats_update_end(&stats
->syncp
);
46 void nfp_repr_inc_rx_stats(struct net_device
*netdev
, unsigned int len
)
48 struct nfp_repr
*repr
= netdev_priv(netdev
);
49 struct nfp_repr_pcpu_stats
*stats
;
51 stats
= this_cpu_ptr(repr
->stats
);
52 u64_stats_update_begin(&stats
->syncp
);
54 stats
->rx_bytes
+= len
;
55 u64_stats_update_end(&stats
->syncp
);
59 nfp_repr_phy_port_get_stats64(struct nfp_port
*port
,
60 struct rtnl_link_stats64
*stats
)
62 u8 __iomem
*mem
= port
->eth_stats
;
64 stats
->tx_packets
= readq(mem
+ NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK
);
65 stats
->tx_bytes
= readq(mem
+ NFP_MAC_STATS_TX_OUT_OCTETS
);
66 stats
->tx_dropped
= readq(mem
+ NFP_MAC_STATS_TX_OUT_ERRORS
);
68 stats
->rx_packets
= readq(mem
+ NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK
);
69 stats
->rx_bytes
= readq(mem
+ NFP_MAC_STATS_RX_IN_OCTETS
);
70 stats
->rx_dropped
= readq(mem
+ NFP_MAC_STATS_RX_IN_ERRORS
);
74 nfp_repr_vnic_get_stats64(struct nfp_port
*port
,
75 struct rtnl_link_stats64
*stats
)
77 /* TX and RX stats are flipped as we are returning the stats as seen
78 * at the switch port corresponding to the VF.
80 stats
->tx_packets
= readq(port
->vnic
+ NFP_NET_CFG_STATS_RX_FRAMES
);
81 stats
->tx_bytes
= readq(port
->vnic
+ NFP_NET_CFG_STATS_RX_OCTETS
);
82 stats
->tx_dropped
= readq(port
->vnic
+ NFP_NET_CFG_STATS_RX_DISCARDS
);
84 stats
->rx_packets
= readq(port
->vnic
+ NFP_NET_CFG_STATS_TX_FRAMES
);
85 stats
->rx_bytes
= readq(port
->vnic
+ NFP_NET_CFG_STATS_TX_OCTETS
);
86 stats
->rx_dropped
= readq(port
->vnic
+ NFP_NET_CFG_STATS_TX_DISCARDS
);
90 nfp_repr_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
92 struct nfp_repr
*repr
= netdev_priv(netdev
);
94 if (WARN_ON(!repr
->port
))
97 switch (repr
->port
->type
) {
98 case NFP_PORT_PHYS_PORT
:
99 if (!__nfp_port_get_eth_port(repr
->port
))
101 nfp_repr_phy_port_get_stats64(repr
->port
, stats
);
103 case NFP_PORT_PF_PORT
:
104 case NFP_PORT_VF_PORT
:
105 nfp_repr_vnic_get_stats64(repr
->port
, stats
);
112 nfp_repr_has_offload_stats(const struct net_device
*dev
, int attr_id
)
115 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
123 nfp_repr_get_host_stats64(const struct net_device
*netdev
,
124 struct rtnl_link_stats64
*stats
)
126 struct nfp_repr
*repr
= netdev_priv(netdev
);
129 for_each_possible_cpu(i
) {
130 u64 tbytes
, tpkts
, tdrops
, rbytes
, rpkts
;
131 struct nfp_repr_pcpu_stats
*repr_stats
;
134 repr_stats
= per_cpu_ptr(repr
->stats
, i
);
136 start
= u64_stats_fetch_begin_irq(&repr_stats
->syncp
);
137 tbytes
= repr_stats
->tx_bytes
;
138 tpkts
= repr_stats
->tx_packets
;
139 tdrops
= repr_stats
->tx_drops
;
140 rbytes
= repr_stats
->rx_bytes
;
141 rpkts
= repr_stats
->rx_packets
;
142 } while (u64_stats_fetch_retry_irq(&repr_stats
->syncp
, start
));
144 stats
->tx_bytes
+= tbytes
;
145 stats
->tx_packets
+= tpkts
;
146 stats
->tx_dropped
+= tdrops
;
147 stats
->rx_bytes
+= rbytes
;
148 stats
->rx_packets
+= rpkts
;
155 nfp_repr_get_offload_stats(int attr_id
, const struct net_device
*dev
,
159 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
160 return nfp_repr_get_host_stats64(dev
, stats
);
166 static int nfp_repr_change_mtu(struct net_device
*netdev
, int new_mtu
)
168 struct nfp_repr
*repr
= netdev_priv(netdev
);
171 err
= nfp_app_check_mtu(repr
->app
, netdev
, new_mtu
);
175 err
= nfp_app_repr_change_mtu(repr
->app
, netdev
, new_mtu
);
179 netdev
->mtu
= new_mtu
;
184 static netdev_tx_t
nfp_repr_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
186 struct nfp_repr
*repr
= netdev_priv(netdev
);
187 unsigned int len
= skb
->len
;
191 dst_hold((struct dst_entry
*)repr
->dst
);
192 skb_dst_set(skb
, (struct dst_entry
*)repr
->dst
);
193 skb
->dev
= repr
->dst
->u
.port_info
.lower_dev
;
195 ret
= dev_queue_xmit(skb
);
196 nfp_repr_inc_tx_stats(netdev
, len
, ret
);
201 static int nfp_repr_stop(struct net_device
*netdev
)
203 struct nfp_repr
*repr
= netdev_priv(netdev
);
206 err
= nfp_app_repr_stop(repr
->app
, repr
);
210 nfp_port_configure(netdev
, false);
214 static int nfp_repr_open(struct net_device
*netdev
)
216 struct nfp_repr
*repr
= netdev_priv(netdev
);
219 err
= nfp_port_configure(netdev
, true);
223 err
= nfp_app_repr_open(repr
->app
, repr
);
225 goto err_port_disable
;
230 nfp_port_configure(netdev
, false);
234 static netdev_features_t
235 nfp_repr_fix_features(struct net_device
*netdev
, netdev_features_t features
)
237 struct nfp_repr
*repr
= netdev_priv(netdev
);
238 netdev_features_t old_features
= features
;
239 netdev_features_t lower_features
;
240 struct net_device
*lower_dev
;
242 lower_dev
= repr
->dst
->u
.port_info
.lower_dev
;
244 lower_features
= lower_dev
->features
;
245 if (lower_features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
))
246 lower_features
|= NETIF_F_HW_CSUM
;
248 features
= netdev_intersect_features(features
, lower_features
);
249 features
|= old_features
& (NETIF_F_SOFT_FEATURES
| NETIF_F_HW_TC
);
250 features
|= NETIF_F_LLTX
;
255 const struct net_device_ops nfp_repr_netdev_ops
= {
256 .ndo_init
= nfp_app_ndo_init
,
257 .ndo_uninit
= nfp_app_ndo_uninit
,
258 .ndo_open
= nfp_repr_open
,
259 .ndo_stop
= nfp_repr_stop
,
260 .ndo_start_xmit
= nfp_repr_xmit
,
261 .ndo_change_mtu
= nfp_repr_change_mtu
,
262 .ndo_get_stats64
= nfp_repr_get_stats64
,
263 .ndo_has_offload_stats
= nfp_repr_has_offload_stats
,
264 .ndo_get_offload_stats
= nfp_repr_get_offload_stats
,
265 .ndo_get_phys_port_name
= nfp_port_get_phys_port_name
,
266 .ndo_setup_tc
= nfp_port_setup_tc
,
267 .ndo_set_vf_mac
= nfp_app_set_vf_mac
,
268 .ndo_set_vf_vlan
= nfp_app_set_vf_vlan
,
269 .ndo_set_vf_spoofchk
= nfp_app_set_vf_spoofchk
,
270 .ndo_set_vf_trust
= nfp_app_set_vf_trust
,
271 .ndo_get_vf_config
= nfp_app_get_vf_config
,
272 .ndo_set_vf_link_state
= nfp_app_set_vf_link_state
,
273 .ndo_fix_features
= nfp_repr_fix_features
,
274 .ndo_set_features
= nfp_port_set_features
,
275 .ndo_set_mac_address
= eth_mac_addr
,
276 .ndo_get_port_parent_id
= nfp_port_get_port_parent_id
,
277 .ndo_get_devlink_port
= nfp_devlink_get_devlink_port
,
281 nfp_repr_transfer_features(struct net_device
*netdev
, struct net_device
*lower
)
283 struct nfp_repr
*repr
= netdev_priv(netdev
);
285 if (repr
->dst
->u
.port_info
.lower_dev
!= lower
)
288 netdev
->gso_max_size
= lower
->gso_max_size
;
289 netdev
->gso_max_segs
= lower
->gso_max_segs
;
291 netdev_update_features(netdev
);
294 static void nfp_repr_clean(struct nfp_repr
*repr
)
296 unregister_netdev(repr
->netdev
);
297 nfp_app_repr_clean(repr
->app
, repr
->netdev
);
298 dst_release((struct dst_entry
*)repr
->dst
);
299 nfp_port_free(repr
->port
);
302 static struct lock_class_key nfp_repr_netdev_xmit_lock_key
;
304 static void nfp_repr_set_lockdep_class_one(struct net_device
*dev
,
305 struct netdev_queue
*txq
,
308 lockdep_set_class(&txq
->_xmit_lock
, &nfp_repr_netdev_xmit_lock_key
);
311 static void nfp_repr_set_lockdep_class(struct net_device
*dev
)
313 netdev_for_each_tx_queue(dev
, nfp_repr_set_lockdep_class_one
, NULL
);
316 int nfp_repr_init(struct nfp_app
*app
, struct net_device
*netdev
,
317 u32 cmsg_port_id
, struct nfp_port
*port
,
318 struct net_device
*pf_netdev
)
320 struct nfp_repr
*repr
= netdev_priv(netdev
);
321 struct nfp_net
*nn
= netdev_priv(pf_netdev
);
322 u32 repr_cap
= nn
->tlv_caps
.repr_cap
;
325 nfp_repr_set_lockdep_class(netdev
);
328 repr
->dst
= metadata_dst_alloc(0, METADATA_HW_PORT_MUX
, GFP_KERNEL
);
331 repr
->dst
->u
.port_info
.port_id
= cmsg_port_id
;
332 repr
->dst
->u
.port_info
.lower_dev
= pf_netdev
;
334 netdev
->netdev_ops
= &nfp_repr_netdev_ops
;
335 netdev
->ethtool_ops
= &nfp_port_ethtool_ops
;
337 netdev
->max_mtu
= pf_netdev
->max_mtu
;
339 /* Set features the lower device can support with representors */
340 if (repr_cap
& NFP_NET_CFG_CTRL_LIVE_ADDR
)
341 netdev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
343 netdev
->hw_features
= NETIF_F_HIGHDMA
;
344 if (repr_cap
& NFP_NET_CFG_CTRL_RXCSUM_ANY
)
345 netdev
->hw_features
|= NETIF_F_RXCSUM
;
346 if (repr_cap
& NFP_NET_CFG_CTRL_TXCSUM
)
347 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
348 if (repr_cap
& NFP_NET_CFG_CTRL_GATHER
)
349 netdev
->hw_features
|= NETIF_F_SG
;
350 if ((repr_cap
& NFP_NET_CFG_CTRL_LSO
&& nn
->fw_ver
.major
> 2) ||
351 repr_cap
& NFP_NET_CFG_CTRL_LSO2
)
352 netdev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
353 if (repr_cap
& NFP_NET_CFG_CTRL_RSS_ANY
)
354 netdev
->hw_features
|= NETIF_F_RXHASH
;
355 if (repr_cap
& NFP_NET_CFG_CTRL_VXLAN
) {
356 if (repr_cap
& NFP_NET_CFG_CTRL_LSO
)
357 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
;
359 if (repr_cap
& NFP_NET_CFG_CTRL_NVGRE
) {
360 if (repr_cap
& NFP_NET_CFG_CTRL_LSO
)
361 netdev
->hw_features
|= NETIF_F_GSO_GRE
;
363 if (repr_cap
& (NFP_NET_CFG_CTRL_VXLAN
| NFP_NET_CFG_CTRL_NVGRE
))
364 netdev
->hw_enc_features
= netdev
->hw_features
;
366 netdev
->vlan_features
= netdev
->hw_features
;
368 if (repr_cap
& NFP_NET_CFG_CTRL_RXVLAN
)
369 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
370 if (repr_cap
& NFP_NET_CFG_CTRL_TXVLAN
) {
371 if (repr_cap
& NFP_NET_CFG_CTRL_LSO2
)
372 netdev_warn(netdev
, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
374 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
376 if (repr_cap
& NFP_NET_CFG_CTRL_CTAG_FILTER
)
377 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
379 netdev
->features
= netdev
->hw_features
;
381 /* Advertise but disable TSO by default. */
382 netdev
->features
&= ~(NETIF_F_TSO
| NETIF_F_TSO6
);
383 netdev
->gso_max_segs
= NFP_NET_LSO_MAX_SEGS
;
385 netdev
->priv_flags
|= IFF_NO_QUEUE
| IFF_DISABLE_NETPOLL
;
386 netdev
->features
|= NETIF_F_LLTX
;
388 if (nfp_app_has_tc(app
)) {
389 netdev
->features
|= NETIF_F_HW_TC
;
390 netdev
->hw_features
|= NETIF_F_HW_TC
;
393 err
= nfp_app_repr_init(app
, netdev
);
397 err
= register_netdev(netdev
);
404 nfp_app_repr_clean(app
, netdev
);
406 dst_release((struct dst_entry
*)repr
->dst
);
410 static void __nfp_repr_free(struct nfp_repr
*repr
)
412 free_percpu(repr
->stats
);
413 free_netdev(repr
->netdev
);
416 void nfp_repr_free(struct net_device
*netdev
)
418 __nfp_repr_free(netdev_priv(netdev
));
422 nfp_repr_alloc_mqs(struct nfp_app
*app
, unsigned int txqs
, unsigned int rxqs
)
424 struct net_device
*netdev
;
425 struct nfp_repr
*repr
;
427 netdev
= alloc_etherdev_mqs(sizeof(*repr
), txqs
, rxqs
);
431 netif_carrier_off(netdev
);
433 repr
= netdev_priv(netdev
);
434 repr
->netdev
= netdev
;
437 repr
->stats
= netdev_alloc_pcpu_stats(struct nfp_repr_pcpu_stats
);
439 goto err_free_netdev
;
448 void nfp_repr_clean_and_free(struct nfp_repr
*repr
)
450 nfp_info(repr
->app
->cpp
, "Destroying Representor(%s)\n",
452 nfp_repr_clean(repr
);
453 __nfp_repr_free(repr
);
456 void nfp_reprs_clean_and_free(struct nfp_app
*app
, struct nfp_reprs
*reprs
)
458 struct net_device
*netdev
;
461 for (i
= 0; i
< reprs
->num_reprs
; i
++) {
462 netdev
= nfp_repr_get_locked(app
, reprs
, i
);
464 nfp_repr_clean_and_free(netdev_priv(netdev
));
471 nfp_reprs_clean_and_free_by_type(struct nfp_app
*app
, enum nfp_repr_type type
)
473 struct net_device
*netdev
;
474 struct nfp_reprs
*reprs
;
477 reprs
= rcu_dereference_protected(app
->reprs
[type
],
478 lockdep_is_held(&app
->pf
->lock
));
482 /* Preclean must happen before we remove the reprs reference from the
485 for (i
= 0; i
< reprs
->num_reprs
; i
++) {
486 netdev
= nfp_repr_get_locked(app
, reprs
, i
);
488 nfp_app_repr_preclean(app
, netdev
);
491 reprs
= nfp_app_reprs_set(app
, type
, NULL
);
494 nfp_reprs_clean_and_free(app
, reprs
);
497 struct nfp_reprs
*nfp_reprs_alloc(unsigned int num_reprs
)
499 struct nfp_reprs
*reprs
;
501 reprs
= kzalloc(sizeof(*reprs
) +
502 num_reprs
* sizeof(struct net_device
*), GFP_KERNEL
);
505 reprs
->num_reprs
= num_reprs
;
510 int nfp_reprs_resync_phys_ports(struct nfp_app
*app
)
512 struct net_device
*netdev
;
513 struct nfp_reprs
*reprs
;
514 struct nfp_repr
*repr
;
517 reprs
= nfp_reprs_get_locked(app
, NFP_REPR_TYPE_PHYS_PORT
);
521 for (i
= 0; i
< reprs
->num_reprs
; i
++) {
522 netdev
= nfp_repr_get_locked(app
, reprs
, i
);
526 repr
= netdev_priv(netdev
);
527 if (repr
->port
->type
!= NFP_PORT_INVALID
)
530 nfp_app_repr_preclean(app
, netdev
);
532 rcu_assign_pointer(reprs
->reprs
[i
], NULL
);
535 nfp_repr_clean(repr
);