1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/etherdevice.h>
5 #include <linux/io-64-nonatomic-hi-lo.h>
6 #include <linux/lockdep.h>
7 #include <net/dst_metadata.h>
9 #include "nfpcore/nfp_cpp.h"
10 #include "nfpcore/nfp_nsp.h"
14 #include "nfp_net_ctrl.h"
15 #include "nfp_net_repr.h"
16 #include "nfp_net_sriov.h"
20 nfp_repr_get_locked(struct nfp_app
*app
, struct nfp_reprs
*set
, unsigned int id
)
22 return rcu_dereference_protected(set
->reprs
[id
],
23 nfp_app_is_locked(app
));
27 nfp_repr_inc_tx_stats(struct net_device
*netdev
, unsigned int len
,
30 struct nfp_repr
*repr
= netdev_priv(netdev
);
31 struct nfp_repr_pcpu_stats
*stats
;
33 if (unlikely(tx_status
!= NET_XMIT_SUCCESS
&&
34 tx_status
!= NET_XMIT_CN
)) {
35 this_cpu_inc(repr
->stats
->tx_drops
);
39 stats
= this_cpu_ptr(repr
->stats
);
40 u64_stats_update_begin(&stats
->syncp
);
42 stats
->tx_bytes
+= len
;
43 u64_stats_update_end(&stats
->syncp
);
46 void nfp_repr_inc_rx_stats(struct net_device
*netdev
, unsigned int len
)
48 struct nfp_repr
*repr
= netdev_priv(netdev
);
49 struct nfp_repr_pcpu_stats
*stats
;
51 stats
= this_cpu_ptr(repr
->stats
);
52 u64_stats_update_begin(&stats
->syncp
);
54 stats
->rx_bytes
+= len
;
55 u64_stats_update_end(&stats
->syncp
);
59 nfp_repr_phy_port_get_stats64(struct nfp_port
*port
,
60 struct rtnl_link_stats64
*stats
)
62 u8 __iomem
*mem
= port
->eth_stats
;
64 stats
->tx_packets
= readq(mem
+ NFP_MAC_STATS_TX_FRAMES_TRANSMITTED_OK
);
65 stats
->tx_bytes
= readq(mem
+ NFP_MAC_STATS_TX_OUT_OCTETS
);
66 stats
->tx_dropped
= readq(mem
+ NFP_MAC_STATS_TX_OUT_ERRORS
);
68 stats
->rx_packets
= readq(mem
+ NFP_MAC_STATS_RX_FRAMES_RECEIVED_OK
);
69 stats
->rx_bytes
= readq(mem
+ NFP_MAC_STATS_RX_IN_OCTETS
);
70 stats
->rx_dropped
= readq(mem
+ NFP_MAC_STATS_RX_IN_ERRORS
);
74 nfp_repr_vnic_get_stats64(struct nfp_port
*port
,
75 struct rtnl_link_stats64
*stats
)
77 /* TX and RX stats are flipped as we are returning the stats as seen
78 * at the switch port corresponding to the VF.
80 stats
->tx_packets
= readq(port
->vnic
+ NFP_NET_CFG_STATS_RX_FRAMES
);
81 stats
->tx_bytes
= readq(port
->vnic
+ NFP_NET_CFG_STATS_RX_OCTETS
);
82 stats
->tx_dropped
= readq(port
->vnic
+ NFP_NET_CFG_STATS_RX_DISCARDS
);
84 stats
->rx_packets
= readq(port
->vnic
+ NFP_NET_CFG_STATS_TX_FRAMES
);
85 stats
->rx_bytes
= readq(port
->vnic
+ NFP_NET_CFG_STATS_TX_OCTETS
);
86 stats
->rx_dropped
= readq(port
->vnic
+ NFP_NET_CFG_STATS_TX_DISCARDS
);
90 nfp_repr_get_stats64(struct net_device
*netdev
, struct rtnl_link_stats64
*stats
)
92 struct nfp_repr
*repr
= netdev_priv(netdev
);
94 if (WARN_ON(!repr
->port
))
97 switch (repr
->port
->type
) {
98 case NFP_PORT_PHYS_PORT
:
99 if (!__nfp_port_get_eth_port(repr
->port
))
101 nfp_repr_phy_port_get_stats64(repr
->port
, stats
);
103 case NFP_PORT_PF_PORT
:
104 case NFP_PORT_VF_PORT
:
105 nfp_repr_vnic_get_stats64(repr
->port
, stats
);
113 nfp_repr_has_offload_stats(const struct net_device
*dev
, int attr_id
)
116 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
124 nfp_repr_get_host_stats64(const struct net_device
*netdev
,
125 struct rtnl_link_stats64
*stats
)
127 struct nfp_repr
*repr
= netdev_priv(netdev
);
130 for_each_possible_cpu(i
) {
131 u64 tbytes
, tpkts
, tdrops
, rbytes
, rpkts
;
132 struct nfp_repr_pcpu_stats
*repr_stats
;
135 repr_stats
= per_cpu_ptr(repr
->stats
, i
);
137 start
= u64_stats_fetch_begin(&repr_stats
->syncp
);
138 tbytes
= repr_stats
->tx_bytes
;
139 tpkts
= repr_stats
->tx_packets
;
140 tdrops
= repr_stats
->tx_drops
;
141 rbytes
= repr_stats
->rx_bytes
;
142 rpkts
= repr_stats
->rx_packets
;
143 } while (u64_stats_fetch_retry(&repr_stats
->syncp
, start
));
145 stats
->tx_bytes
+= tbytes
;
146 stats
->tx_packets
+= tpkts
;
147 stats
->tx_dropped
+= tdrops
;
148 stats
->rx_bytes
+= rbytes
;
149 stats
->rx_packets
+= rpkts
;
156 nfp_repr_get_offload_stats(int attr_id
, const struct net_device
*dev
,
160 case IFLA_OFFLOAD_XSTATS_CPU_HIT
:
161 return nfp_repr_get_host_stats64(dev
, stats
);
167 static int nfp_repr_change_mtu(struct net_device
*netdev
, int new_mtu
)
169 struct nfp_repr
*repr
= netdev_priv(netdev
);
172 err
= nfp_app_check_mtu(repr
->app
, netdev
, new_mtu
);
176 err
= nfp_app_repr_change_mtu(repr
->app
, netdev
, new_mtu
);
180 WRITE_ONCE(netdev
->mtu
, new_mtu
);
185 static netdev_tx_t
nfp_repr_xmit(struct sk_buff
*skb
, struct net_device
*netdev
)
187 struct nfp_repr
*repr
= netdev_priv(netdev
);
188 unsigned int len
= skb
->len
;
192 dst_hold((struct dst_entry
*)repr
->dst
);
193 skb_dst_set(skb
, (struct dst_entry
*)repr
->dst
);
194 skb
->dev
= repr
->dst
->u
.port_info
.lower_dev
;
196 ret
= dev_queue_xmit(skb
);
197 nfp_repr_inc_tx_stats(netdev
, len
, ret
);
202 static int nfp_repr_stop(struct net_device
*netdev
)
204 struct nfp_repr
*repr
= netdev_priv(netdev
);
207 err
= nfp_app_repr_stop(repr
->app
, repr
);
211 nfp_port_configure(netdev
, false);
215 static int nfp_repr_open(struct net_device
*netdev
)
217 struct nfp_repr
*repr
= netdev_priv(netdev
);
220 err
= nfp_port_configure(netdev
, true);
224 err
= nfp_app_repr_open(repr
->app
, repr
);
226 goto err_port_disable
;
231 nfp_port_configure(netdev
, false);
235 static netdev_features_t
236 nfp_repr_fix_features(struct net_device
*netdev
, netdev_features_t features
)
238 struct nfp_repr
*repr
= netdev_priv(netdev
);
239 netdev_features_t old_features
= features
;
240 netdev_features_t lower_features
;
241 struct net_device
*lower_dev
;
243 lower_dev
= repr
->dst
->u
.port_info
.lower_dev
;
245 lower_features
= lower_dev
->features
;
246 if (lower_features
& (NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
))
247 lower_features
|= NETIF_F_HW_CSUM
;
249 features
= netdev_intersect_features(features
, lower_features
);
250 features
|= old_features
& (NETIF_F_SOFT_FEATURES
| NETIF_F_HW_TC
);
255 const struct net_device_ops nfp_repr_netdev_ops
= {
256 .ndo_init
= nfp_app_ndo_init
,
257 .ndo_uninit
= nfp_app_ndo_uninit
,
258 .ndo_open
= nfp_repr_open
,
259 .ndo_stop
= nfp_repr_stop
,
260 .ndo_start_xmit
= nfp_repr_xmit
,
261 .ndo_change_mtu
= nfp_repr_change_mtu
,
262 .ndo_get_stats64
= nfp_repr_get_stats64
,
263 .ndo_has_offload_stats
= nfp_repr_has_offload_stats
,
264 .ndo_get_offload_stats
= nfp_repr_get_offload_stats
,
265 .ndo_get_phys_port_name
= nfp_port_get_phys_port_name
,
266 .ndo_setup_tc
= nfp_port_setup_tc
,
267 .ndo_set_vf_mac
= nfp_app_set_vf_mac
,
268 .ndo_set_vf_vlan
= nfp_app_set_vf_vlan
,
269 .ndo_set_vf_spoofchk
= nfp_app_set_vf_spoofchk
,
270 .ndo_set_vf_trust
= nfp_app_set_vf_trust
,
271 .ndo_get_vf_config
= nfp_app_get_vf_config
,
272 .ndo_set_vf_link_state
= nfp_app_set_vf_link_state
,
273 .ndo_fix_features
= nfp_repr_fix_features
,
274 .ndo_set_features
= nfp_port_set_features
,
275 .ndo_set_mac_address
= eth_mac_addr
,
276 .ndo_get_port_parent_id
= nfp_port_get_port_parent_id
,
280 nfp_repr_transfer_features(struct net_device
*netdev
, struct net_device
*lower
)
282 struct nfp_repr
*repr
= netdev_priv(netdev
);
284 if (repr
->dst
->u
.port_info
.lower_dev
!= lower
)
287 netif_inherit_tso_max(netdev
, lower
);
289 netdev_update_features(netdev
);
292 static void nfp_repr_clean(struct nfp_repr
*repr
)
294 unregister_netdev(repr
->netdev
);
295 nfp_app_repr_clean(repr
->app
, repr
->netdev
);
296 dst_release((struct dst_entry
*)repr
->dst
);
297 nfp_port_free(repr
->port
);
300 static struct lock_class_key nfp_repr_netdev_xmit_lock_key
;
302 static void nfp_repr_set_lockdep_class_one(struct net_device
*dev
,
303 struct netdev_queue
*txq
,
306 lockdep_set_class(&txq
->_xmit_lock
, &nfp_repr_netdev_xmit_lock_key
);
309 static void nfp_repr_set_lockdep_class(struct net_device
*dev
)
311 netdev_for_each_tx_queue(dev
, nfp_repr_set_lockdep_class_one
, NULL
);
314 int nfp_repr_init(struct nfp_app
*app
, struct net_device
*netdev
,
315 u32 cmsg_port_id
, struct nfp_port
*port
,
316 struct net_device
*pf_netdev
)
318 struct nfp_repr
*repr
= netdev_priv(netdev
);
319 struct nfp_net
*nn
= netdev_priv(pf_netdev
);
320 u32 repr_cap
= nn
->tlv_caps
.repr_cap
;
323 nfp_repr_set_lockdep_class(netdev
);
326 repr
->dst
= metadata_dst_alloc(0, METADATA_HW_PORT_MUX
, GFP_KERNEL
);
329 repr
->dst
->u
.port_info
.port_id
= cmsg_port_id
;
330 repr
->dst
->u
.port_info
.lower_dev
= pf_netdev
;
332 netdev
->netdev_ops
= &nfp_repr_netdev_ops
;
333 netdev
->ethtool_ops
= &nfp_port_ethtool_ops
;
335 netdev
->max_mtu
= pf_netdev
->max_mtu
;
337 /* Set features the lower device can support with representors */
338 if (repr_cap
& NFP_NET_CFG_CTRL_LIVE_ADDR
)
339 netdev
->priv_flags
|= IFF_LIVE_ADDR_CHANGE
;
341 netdev
->hw_features
= NETIF_F_HIGHDMA
;
342 if (repr_cap
& NFP_NET_CFG_CTRL_RXCSUM_ANY
)
343 netdev
->hw_features
|= NETIF_F_RXCSUM
;
344 if (repr_cap
& NFP_NET_CFG_CTRL_TXCSUM
)
345 netdev
->hw_features
|= NETIF_F_IP_CSUM
| NETIF_F_IPV6_CSUM
;
346 if (repr_cap
& NFP_NET_CFG_CTRL_GATHER
)
347 netdev
->hw_features
|= NETIF_F_SG
;
348 if ((repr_cap
& NFP_NET_CFG_CTRL_LSO
&& nn
->fw_ver
.major
> 2) ||
349 repr_cap
& NFP_NET_CFG_CTRL_LSO2
)
350 netdev
->hw_features
|= NETIF_F_TSO
| NETIF_F_TSO6
;
351 if (repr_cap
& NFP_NET_CFG_CTRL_RSS_ANY
)
352 netdev
->hw_features
|= NETIF_F_RXHASH
;
353 if (repr_cap
& NFP_NET_CFG_CTRL_VXLAN
) {
354 if (repr_cap
& NFP_NET_CFG_CTRL_LSO
)
355 netdev
->hw_features
|= NETIF_F_GSO_UDP_TUNNEL
;
357 if (repr_cap
& NFP_NET_CFG_CTRL_NVGRE
) {
358 if (repr_cap
& NFP_NET_CFG_CTRL_LSO
)
359 netdev
->hw_features
|= NETIF_F_GSO_GRE
;
361 if (repr_cap
& (NFP_NET_CFG_CTRL_VXLAN
| NFP_NET_CFG_CTRL_NVGRE
))
362 netdev
->hw_enc_features
= netdev
->hw_features
;
364 netdev
->vlan_features
= netdev
->hw_features
;
366 if (repr_cap
& NFP_NET_CFG_CTRL_RXVLAN_ANY
)
367 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_RX
;
368 if (repr_cap
& NFP_NET_CFG_CTRL_TXVLAN_ANY
) {
369 if (repr_cap
& NFP_NET_CFG_CTRL_LSO2
)
370 netdev_warn(netdev
, "Device advertises both TSO2 and TXVLAN. Refusing to enable TXVLAN.\n");
372 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_TX
;
374 if (repr_cap
& NFP_NET_CFG_CTRL_CTAG_FILTER
)
375 netdev
->hw_features
|= NETIF_F_HW_VLAN_CTAG_FILTER
;
376 if (repr_cap
& NFP_NET_CFG_CTRL_RXQINQ
)
377 netdev
->hw_features
|= NETIF_F_HW_VLAN_STAG_RX
;
379 netdev
->features
= netdev
->hw_features
;
381 /* C-Tag strip and S-Tag strip can't be supported simultaneously,
382 * so enable C-Tag strip and disable S-Tag strip by default.
384 netdev
->features
&= ~NETIF_F_HW_VLAN_STAG_RX
;
385 netif_set_tso_max_segs(netdev
, NFP_NET_LSO_MAX_SEGS
);
387 netdev
->priv_flags
|= IFF_NO_QUEUE
| IFF_DISABLE_NETPOLL
;
390 if (nfp_app_has_tc(app
)) {
391 netdev
->features
|= NETIF_F_HW_TC
;
392 netdev
->hw_features
|= NETIF_F_HW_TC
;
395 err
= nfp_app_repr_init(app
, netdev
);
399 err
= register_netdev(netdev
);
406 nfp_app_repr_clean(app
, netdev
);
408 dst_release((struct dst_entry
*)repr
->dst
);
412 static void __nfp_repr_free(struct nfp_repr
*repr
)
414 free_percpu(repr
->stats
);
415 free_netdev(repr
->netdev
);
418 void nfp_repr_free(struct net_device
*netdev
)
420 __nfp_repr_free(netdev_priv(netdev
));
424 nfp_repr_alloc_mqs(struct nfp_app
*app
, unsigned int txqs
, unsigned int rxqs
)
426 struct net_device
*netdev
;
427 struct nfp_repr
*repr
;
429 netdev
= alloc_etherdev_mqs(sizeof(*repr
), txqs
, rxqs
);
433 netif_carrier_off(netdev
);
435 repr
= netdev_priv(netdev
);
436 repr
->netdev
= netdev
;
439 repr
->stats
= netdev_alloc_pcpu_stats(struct nfp_repr_pcpu_stats
);
441 goto err_free_netdev
;
450 void nfp_repr_clean_and_free(struct nfp_repr
*repr
)
452 nfp_info(repr
->app
->cpp
, "Destroying Representor(%s)\n",
454 nfp_repr_clean(repr
);
455 __nfp_repr_free(repr
);
458 void nfp_reprs_clean_and_free(struct nfp_app
*app
, struct nfp_reprs
*reprs
)
460 struct net_device
*netdev
;
463 for (i
= 0; i
< reprs
->num_reprs
; i
++) {
464 netdev
= nfp_repr_get_locked(app
, reprs
, i
);
466 nfp_repr_clean_and_free(netdev_priv(netdev
));
473 nfp_reprs_clean_and_free_by_type(struct nfp_app
*app
, enum nfp_repr_type type
)
475 struct net_device
*netdev
;
476 struct nfp_reprs
*reprs
;
479 reprs
= rcu_dereference_protected(app
->reprs
[type
],
480 nfp_app_is_locked(app
));
484 /* Preclean must happen before we remove the reprs reference from the
487 for (i
= 0; i
< reprs
->num_reprs
; i
++) {
488 netdev
= nfp_repr_get_locked(app
, reprs
, i
);
490 nfp_app_repr_preclean(app
, netdev
);
493 reprs
= nfp_app_reprs_set(app
, type
, NULL
);
496 nfp_reprs_clean_and_free(app
, reprs
);
499 struct nfp_reprs
*nfp_reprs_alloc(unsigned int num_reprs
)
501 struct nfp_reprs
*reprs
;
503 reprs
= kzalloc(struct_size(reprs
, reprs
, num_reprs
), GFP_KERNEL
);
506 reprs
->num_reprs
= num_reprs
;
511 int nfp_reprs_resync_phys_ports(struct nfp_app
*app
)
513 struct net_device
*netdev
;
514 struct nfp_reprs
*reprs
;
515 struct nfp_repr
*repr
;
518 reprs
= nfp_reprs_get_locked(app
, NFP_REPR_TYPE_PHYS_PORT
);
522 for (i
= 0; i
< reprs
->num_reprs
; i
++) {
523 netdev
= nfp_repr_get_locked(app
, reprs
, i
);
527 repr
= netdev_priv(netdev
);
528 if (repr
->port
->type
!= NFP_PORT_INVALID
)
531 nfp_app_repr_preclean(app
, netdev
);
533 rcu_assign_pointer(reprs
->reprs
[i
], NULL
);
536 nfp_repr_clean(repr
);