1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/etherdevice.h>
5 #include <linux/lockdep.h>
7 #include <linux/skbuff.h>
8 #include <linux/vmalloc.h>
9 #include <net/devlink.h>
10 #include <net/dst_metadata.h>
13 #include "../nfpcore/nfp_cpp.h"
14 #include "../nfpcore/nfp_nffw.h"
15 #include "../nfpcore/nfp_nsp.h"
16 #include "../nfp_app.h"
17 #include "../nfp_main.h"
18 #include "../nfp_net.h"
19 #include "../nfp_net_repr.h"
20 #include "../nfp_port.h"
23 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
25 #define NFP_MIN_INT_PORT_ID 1
26 #define NFP_MAX_INT_PORT_ID 256
28 static const char *nfp_flower_extra_cap(struct nfp_app
*app
, struct nfp_net
*nn
)
33 static enum devlink_eswitch_mode
eswitch_mode_get(struct nfp_app
*app
)
35 return DEVLINK_ESWITCH_MODE_SWITCHDEV
;
39 nfp_flower_lookup_internal_port_id(struct nfp_flower_priv
*priv
,
40 struct net_device
*netdev
)
42 struct net_device
*entry
;
46 idr_for_each_entry(&priv
->internal_ports
.port_ids
, entry
, i
)
47 if (entry
== netdev
) {
57 nfp_flower_get_internal_port_id(struct nfp_app
*app
, struct net_device
*netdev
)
59 struct nfp_flower_priv
*priv
= app
->priv
;
62 id
= nfp_flower_lookup_internal_port_id(priv
, netdev
);
66 idr_preload(GFP_ATOMIC
);
67 spin_lock_bh(&priv
->internal_ports
.lock
);
68 id
= idr_alloc(&priv
->internal_ports
.port_ids
, netdev
,
69 NFP_MIN_INT_PORT_ID
, NFP_MAX_INT_PORT_ID
, GFP_ATOMIC
);
70 spin_unlock_bh(&priv
->internal_ports
.lock
);
76 u32
nfp_flower_get_port_id_from_netdev(struct nfp_app
*app
,
77 struct net_device
*netdev
)
79 struct nfp_flower_priv
*priv
= app
->priv
;
83 if (nfp_netdev_is_nfp_repr(netdev
)) {
84 return nfp_repr_get_port_id(netdev
);
85 } else if (nfp_flower_internal_port_can_offload(app
, netdev
)) {
86 ext_port
= nfp_flower_get_internal_port_id(app
, netdev
);
90 return nfp_flower_internal_port_get_port_id(ext_port
);
91 } else if (netif_is_lag_master(netdev
) &&
92 priv
->flower_ext_feats
& NFP_FL_FEATS_TUNNEL_NEIGH_LAG
) {
93 gid
= nfp_flower_lag_get_output_id(app
, netdev
);
97 return (NFP_FL_LAG_OUT
| gid
);
103 static struct net_device
*
104 nfp_flower_get_netdev_from_internal_port_id(struct nfp_app
*app
, int port_id
)
106 struct nfp_flower_priv
*priv
= app
->priv
;
107 struct net_device
*netdev
;
110 netdev
= idr_find(&priv
->internal_ports
.port_ids
, port_id
);
117 nfp_flower_free_internal_port_id(struct nfp_app
*app
, struct net_device
*netdev
)
119 struct nfp_flower_priv
*priv
= app
->priv
;
122 id
= nfp_flower_lookup_internal_port_id(priv
, netdev
);
126 spin_lock_bh(&priv
->internal_ports
.lock
);
127 idr_remove(&priv
->internal_ports
.port_ids
, id
);
128 spin_unlock_bh(&priv
->internal_ports
.lock
);
132 nfp_flower_internal_port_event_handler(struct nfp_app
*app
,
133 struct net_device
*netdev
,
136 if (event
== NETDEV_UNREGISTER
&&
137 nfp_flower_internal_port_can_offload(app
, netdev
))
138 nfp_flower_free_internal_port_id(app
, netdev
);
143 static void nfp_flower_internal_port_init(struct nfp_flower_priv
*priv
)
145 spin_lock_init(&priv
->internal_ports
.lock
);
146 idr_init(&priv
->internal_ports
.port_ids
);
149 static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv
*priv
)
151 idr_destroy(&priv
->internal_ports
.port_ids
);
154 static struct nfp_flower_non_repr_priv
*
155 nfp_flower_non_repr_priv_lookup(struct nfp_app
*app
, struct net_device
*netdev
)
157 struct nfp_flower_priv
*priv
= app
->priv
;
158 struct nfp_flower_non_repr_priv
*entry
;
162 list_for_each_entry(entry
, &priv
->non_repr_priv
, list
)
163 if (entry
->netdev
== netdev
)
170 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv
*non_repr_priv
)
172 non_repr_priv
->ref_count
++;
175 struct nfp_flower_non_repr_priv
*
176 nfp_flower_non_repr_priv_get(struct nfp_app
*app
, struct net_device
*netdev
)
178 struct nfp_flower_priv
*priv
= app
->priv
;
179 struct nfp_flower_non_repr_priv
*entry
;
181 entry
= nfp_flower_non_repr_priv_lookup(app
, netdev
);
185 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
189 entry
->netdev
= netdev
;
190 list_add(&entry
->list
, &priv
->non_repr_priv
);
193 __nfp_flower_non_repr_priv_get(entry
);
198 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv
*non_repr_priv
)
200 if (--non_repr_priv
->ref_count
)
203 list_del(&non_repr_priv
->list
);
204 kfree(non_repr_priv
);
208 nfp_flower_non_repr_priv_put(struct nfp_app
*app
, struct net_device
*netdev
)
210 struct nfp_flower_non_repr_priv
*entry
;
212 entry
= nfp_flower_non_repr_priv_lookup(app
, netdev
);
216 __nfp_flower_non_repr_priv_put(entry
);
219 static enum nfp_repr_type
220 nfp_flower_repr_get_type_and_port(struct nfp_app
*app
, u32 port_id
, u8
*port
)
222 switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE
, port_id
)) {
223 case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT
:
224 *port
= FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM
,
226 return NFP_REPR_TYPE_PHYS_PORT
;
228 case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT
:
229 *port
= FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC
, port_id
);
230 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE
, port_id
) ==
231 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF
)
232 return NFP_REPR_TYPE_PF
;
234 return NFP_REPR_TYPE_VF
;
237 return __NFP_REPR_TYPE_MAX
;
240 static struct net_device
*
241 nfp_flower_dev_get(struct nfp_app
*app
, u32 port_id
, bool *redir_egress
)
243 enum nfp_repr_type repr_type
;
244 struct nfp_reprs
*reprs
;
247 /* Check if the port is internal. */
248 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE
, port_id
) ==
249 NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT
) {
251 *redir_egress
= true;
252 port
= FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM
, port_id
);
253 return nfp_flower_get_netdev_from_internal_port_id(app
, port
);
256 repr_type
= nfp_flower_repr_get_type_and_port(app
, port_id
, &port
);
257 if (repr_type
> NFP_REPR_TYPE_MAX
)
260 reprs
= rcu_dereference(app
->reprs
[repr_type
]);
264 if (port
>= reprs
->num_reprs
)
267 return rcu_dereference(reprs
->reprs
[port
]);
271 nfp_flower_reprs_reify(struct nfp_app
*app
, enum nfp_repr_type type
,
274 struct nfp_reprs
*reprs
;
275 int i
, err
, count
= 0;
277 reprs
= rcu_dereference_protected(app
->reprs
[type
],
278 nfp_app_is_locked(app
));
282 for (i
= 0; i
< reprs
->num_reprs
; i
++) {
283 struct net_device
*netdev
;
285 netdev
= nfp_repr_get_locked(app
, reprs
, i
);
287 struct nfp_repr
*repr
= netdev_priv(netdev
);
289 err
= nfp_flower_cmsg_portreify(repr
, exists
);
300 nfp_flower_wait_repr_reify(struct nfp_app
*app
, atomic_t
*replies
, int tot_repl
)
302 struct nfp_flower_priv
*priv
= app
->priv
;
307 assert_nfp_app_locked(app
);
308 if (!wait_event_timeout(priv
->reify_wait_queue
,
309 atomic_read(replies
) >= tot_repl
,
310 NFP_FL_REPLY_TIMEOUT
)) {
311 nfp_warn(app
->cpp
, "Not all reprs responded to reify\n");
319 nfp_flower_repr_netdev_open(struct nfp_app
*app
, struct nfp_repr
*repr
)
323 err
= nfp_flower_cmsg_portmod(repr
, true, repr
->netdev
->mtu
, false);
327 netif_tx_wake_all_queues(repr
->netdev
);
333 nfp_flower_repr_netdev_stop(struct nfp_app
*app
, struct nfp_repr
*repr
)
335 netif_tx_disable(repr
->netdev
);
337 return nfp_flower_cmsg_portmod(repr
, false, repr
->netdev
->mtu
, false);
341 nfp_flower_repr_netdev_clean(struct nfp_app
*app
, struct net_device
*netdev
)
343 struct nfp_repr
*repr
= netdev_priv(netdev
);
345 kfree(repr
->app_priv
);
349 nfp_flower_repr_netdev_preclean(struct nfp_app
*app
, struct net_device
*netdev
)
351 struct nfp_repr
*repr
= netdev_priv(netdev
);
352 struct nfp_flower_priv
*priv
= app
->priv
;
353 atomic_t
*replies
= &priv
->reify_replies
;
356 atomic_set(replies
, 0);
357 err
= nfp_flower_cmsg_portreify(repr
, false);
359 nfp_warn(app
->cpp
, "Failed to notify firmware about repr destruction\n");
363 nfp_flower_wait_repr_reify(app
, replies
, 1);
366 static void nfp_flower_sriov_disable(struct nfp_app
*app
)
368 struct nfp_flower_priv
*priv
= app
->priv
;
373 nfp_reprs_clean_and_free_by_type(app
, NFP_REPR_TYPE_VF
);
377 nfp_flower_spawn_vnic_reprs(struct nfp_app
*app
,
378 enum nfp_flower_cmsg_port_vnic_type vnic_type
,
379 enum nfp_repr_type repr_type
, unsigned int cnt
)
381 u8 nfp_pcie
= nfp_cppcore_pcie_unit(app
->pf
->cpp
);
382 struct nfp_flower_priv
*priv
= app
->priv
;
383 atomic_t
*replies
= &priv
->reify_replies
;
384 struct nfp_flower_repr_priv
*repr_priv
;
385 enum nfp_port_type port_type
;
386 struct nfp_repr
*nfp_repr
;
387 struct nfp_reprs
*reprs
;
388 int i
, err
, reify_cnt
;
391 port_type
= repr_type
== NFP_REPR_TYPE_PF
? NFP_PORT_PF_PORT
:
394 reprs
= nfp_reprs_alloc(cnt
);
398 for (i
= 0; i
< cnt
; i
++) {
399 struct net_device
*repr
;
400 struct nfp_port
*port
;
403 repr
= nfp_repr_alloc(app
);
406 goto err_reprs_clean
;
409 repr_priv
= kzalloc(sizeof(*repr_priv
), GFP_KERNEL
);
413 goto err_reprs_clean
;
416 nfp_repr
= netdev_priv(repr
);
417 nfp_repr
->app_priv
= repr_priv
;
418 repr_priv
->nfp_repr
= nfp_repr
;
420 /* For now we only support 1 PF */
421 WARN_ON(repr_type
== NFP_REPR_TYPE_PF
&& i
);
423 port
= nfp_port_alloc(app
, port_type
, repr
);
428 goto err_reprs_clean
;
430 if (repr_type
== NFP_REPR_TYPE_PF
) {
432 port
->vnic
= priv
->nn
->dp
.ctrl_bar
;
437 app
->pf
->vf_cfg_mem
+ i
* NFP_NET_CFG_BAR_SZ
;
440 eth_hw_addr_random(repr
);
442 port_id
= nfp_flower_cmsg_pcie_port(nfp_pcie
, vnic_type
,
444 err
= nfp_repr_init(app
, repr
,
445 port_id
, port
, priv
->nn
->dp
.netdev
);
450 goto err_reprs_clean
;
453 RCU_INIT_POINTER(reprs
->reprs
[i
], repr
);
454 nfp_info(app
->cpp
, "%s%d Representor(%s) created\n",
455 repr_type
== NFP_REPR_TYPE_PF
? "PF" : "VF", i
,
459 nfp_app_reprs_set(app
, repr_type
, reprs
);
461 atomic_set(replies
, 0);
462 reify_cnt
= nfp_flower_reprs_reify(app
, repr_type
, true);
465 nfp_warn(app
->cpp
, "Failed to notify firmware about repr creation\n");
466 goto err_reprs_remove
;
469 err
= nfp_flower_wait_repr_reify(app
, replies
, reify_cnt
);
471 goto err_reprs_remove
;
475 reprs
= nfp_app_reprs_set(app
, repr_type
, NULL
);
477 nfp_reprs_clean_and_free(app
, reprs
);
481 static int nfp_flower_sriov_enable(struct nfp_app
*app
, int num_vfs
)
483 struct nfp_flower_priv
*priv
= app
->priv
;
488 return nfp_flower_spawn_vnic_reprs(app
,
489 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF
,
490 NFP_REPR_TYPE_VF
, num_vfs
);
494 nfp_flower_spawn_phy_reprs(struct nfp_app
*app
, struct nfp_flower_priv
*priv
)
496 struct nfp_eth_table
*eth_tbl
= app
->pf
->eth_tbl
;
497 atomic_t
*replies
= &priv
->reify_replies
;
498 struct nfp_flower_repr_priv
*repr_priv
;
499 struct nfp_repr
*nfp_repr
;
500 struct sk_buff
*ctrl_skb
;
501 struct nfp_reprs
*reprs
;
505 ctrl_skb
= nfp_flower_cmsg_mac_repr_start(app
, eth_tbl
->count
);
509 reprs
= nfp_reprs_alloc(eth_tbl
->max_index
+ 1);
512 goto err_free_ctrl_skb
;
515 for (i
= 0; i
< eth_tbl
->count
; i
++) {
516 unsigned int phys_port
= eth_tbl
->ports
[i
].index
;
517 struct net_device
*repr
;
518 struct nfp_port
*port
;
521 repr
= nfp_repr_alloc(app
);
524 goto err_reprs_clean
;
527 repr_priv
= kzalloc(sizeof(*repr_priv
), GFP_KERNEL
);
531 goto err_reprs_clean
;
534 nfp_repr
= netdev_priv(repr
);
535 nfp_repr
->app_priv
= repr_priv
;
536 repr_priv
->nfp_repr
= nfp_repr
;
538 port
= nfp_port_alloc(app
, NFP_PORT_PHYS_PORT
, repr
);
543 goto err_reprs_clean
;
545 err
= nfp_port_init_phy_port(app
->pf
, app
, port
, i
);
550 goto err_reprs_clean
;
553 SET_NETDEV_DEV(repr
, &priv
->nn
->pdev
->dev
);
554 nfp_net_get_mac_addr(app
->pf
, repr
, port
);
556 cmsg_port_id
= nfp_flower_cmsg_phys_port(phys_port
);
557 err
= nfp_repr_init(app
, repr
,
558 cmsg_port_id
, port
, priv
->nn
->dp
.netdev
);
563 goto err_reprs_clean
;
566 nfp_flower_cmsg_mac_repr_add(ctrl_skb
, i
,
567 eth_tbl
->ports
[i
].nbi
,
568 eth_tbl
->ports
[i
].base
,
571 RCU_INIT_POINTER(reprs
->reprs
[phys_port
], repr
);
572 nfp_info(app
->cpp
, "Phys Port %d Representor(%s) created\n",
573 phys_port
, repr
->name
);
576 nfp_app_reprs_set(app
, NFP_REPR_TYPE_PHYS_PORT
, reprs
);
578 /* The REIFY/MAC_REPR control messages should be sent after the MAC
579 * representors are registered using nfp_app_reprs_set(). This is
580 * because the firmware may respond with control messages for the
581 * MAC representors, f.e. to provide the driver with information
582 * about their state, and without registration the driver will drop
585 atomic_set(replies
, 0);
586 reify_cnt
= nfp_flower_reprs_reify(app
, NFP_REPR_TYPE_PHYS_PORT
, true);
589 nfp_warn(app
->cpp
, "Failed to notify firmware about repr creation\n");
590 goto err_reprs_remove
;
593 err
= nfp_flower_wait_repr_reify(app
, replies
, reify_cnt
);
595 goto err_reprs_remove
;
597 nfp_ctrl_tx(app
->ctrl
, ctrl_skb
);
601 reprs
= nfp_app_reprs_set(app
, NFP_REPR_TYPE_PHYS_PORT
, NULL
);
603 nfp_reprs_clean_and_free(app
, reprs
);
609 static int nfp_flower_vnic_alloc(struct nfp_app
*app
, struct nfp_net
*nn
,
613 nfp_warn(app
->cpp
, "FlowerNIC doesn't support more than one data vNIC\n");
614 goto err_invalid_port
;
617 eth_hw_addr_random(nn
->dp
.netdev
);
618 netif_keep_dst(nn
->dp
.netdev
);
619 nn
->vnic_no_name
= true;
624 nn
->port
= nfp_port_alloc(app
, NFP_PORT_INVALID
, nn
->dp
.netdev
);
625 return PTR_ERR_OR_ZERO(nn
->port
);
628 static void nfp_flower_vnic_clean(struct nfp_app
*app
, struct nfp_net
*nn
)
630 struct nfp_flower_priv
*priv
= app
->priv
;
632 if (app
->pf
->num_vfs
)
633 nfp_reprs_clean_and_free_by_type(app
, NFP_REPR_TYPE_VF
);
634 nfp_reprs_clean_and_free_by_type(app
, NFP_REPR_TYPE_PF
);
635 nfp_reprs_clean_and_free_by_type(app
, NFP_REPR_TYPE_PHYS_PORT
);
640 static int nfp_flower_vnic_init(struct nfp_app
*app
, struct nfp_net
*nn
)
642 struct nfp_flower_priv
*priv
= app
->priv
;
647 err
= nfp_flower_spawn_phy_reprs(app
, app
->priv
);
651 err
= nfp_flower_spawn_vnic_reprs(app
,
652 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF
,
653 NFP_REPR_TYPE_PF
, 1);
655 goto err_destroy_reprs_phy
;
657 if (app
->pf
->num_vfs
) {
658 err
= nfp_flower_spawn_vnic_reprs(app
,
659 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF
,
663 goto err_destroy_reprs_pf
;
668 err_destroy_reprs_pf
:
669 nfp_reprs_clean_and_free_by_type(app
, NFP_REPR_TYPE_PF
);
670 err_destroy_reprs_phy
:
671 nfp_reprs_clean_and_free_by_type(app
, NFP_REPR_TYPE_PHYS_PORT
);
677 static void nfp_flower_wait_host_bit(struct nfp_app
*app
)
679 unsigned long err_at
;
683 /* Wait for HOST_ACK flag bit to propagate */
684 err_at
= jiffies
+ msecs_to_jiffies(100);
686 feat
= nfp_rtsym_read_le(app
->pf
->rtbl
,
687 "_abi_flower_combined_features_global",
689 if (time_is_before_eq_jiffies(err_at
)) {
691 "HOST_ACK bit not propagated in FW.\n");
694 usleep_range(1000, 2000);
695 } while (!err
&& !(feat
& NFP_FL_FEATS_HOST_ACK
));
699 "Could not read global features entry from FW\n");
702 static int nfp_flower_sync_feature_bits(struct nfp_app
*app
)
704 struct nfp_flower_priv
*app_priv
= app
->priv
;
707 /* Tell the firmware of the host supported features. */
708 err
= nfp_rtsym_write_le(app
->pf
->rtbl
, "_abi_flower_host_mask",
709 app_priv
->flower_ext_feats
|
710 NFP_FL_FEATS_HOST_ACK
);
712 nfp_flower_wait_host_bit(app
);
713 else if (err
!= -ENOENT
)
716 /* Tell the firmware that the driver supports lag. */
717 err
= nfp_rtsym_write_le(app
->pf
->rtbl
,
718 "_abi_flower_balance_sync_enable", 1);
720 app_priv
->flower_en_feats
|= NFP_FL_ENABLE_LAG
;
721 nfp_flower_lag_init(&app_priv
->nfp_lag
);
722 } else if (err
== -ENOENT
) {
723 nfp_warn(app
->cpp
, "LAG not supported by FW.\n");
728 if (app_priv
->flower_ext_feats
& NFP_FL_FEATS_FLOW_MOD
) {
729 /* Tell the firmware that the driver supports flow merging. */
730 err
= nfp_rtsym_write_le(app
->pf
->rtbl
,
731 "_abi_flower_merge_hint_enable", 1);
733 app_priv
->flower_en_feats
|= NFP_FL_ENABLE_FLOW_MERGE
;
734 nfp_flower_internal_port_init(app_priv
);
735 } else if (err
== -ENOENT
) {
737 "Flow merge not supported by FW.\n");
742 nfp_warn(app
->cpp
, "Flow mod/merge not supported by FW.\n");
748 static int nfp_flower_init(struct nfp_app
*app
)
750 u64 version
, features
, ctx_count
, num_mems
;
751 const struct nfp_pf
*pf
= app
->pf
;
752 struct nfp_flower_priv
*app_priv
;
756 nfp_warn(app
->cpp
, "FlowerNIC requires eth table\n");
760 if (!pf
->mac_stats_bar
) {
761 nfp_warn(app
->cpp
, "FlowerNIC requires mac_stats BAR\n");
765 if (!pf
->vf_cfg_bar
) {
766 nfp_warn(app
->cpp
, "FlowerNIC requires vf_cfg BAR\n");
770 version
= nfp_rtsym_read_le(app
->pf
->rtbl
, "hw_flower_version", &err
);
772 nfp_warn(app
->cpp
, "FlowerNIC requires hw_flower_version memory symbol\n");
776 num_mems
= nfp_rtsym_read_le(app
->pf
->rtbl
, "CONFIG_FC_HOST_CTX_SPLIT",
780 "FlowerNIC: unsupported host context memory: %d\n",
786 if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM
, num_mems
) || !num_mems
) {
788 "FlowerNIC: invalid host context memory: %llu\n",
793 ctx_count
= nfp_rtsym_read_le(app
->pf
->rtbl
, "CONFIG_FC_HOST_CTX_COUNT",
797 "FlowerNIC: unsupported host context count: %d\n",
803 /* We need to ensure hardware has enough flower capabilities. */
804 if (version
!= NFP_FLOWER_ALLOWED_VER
) {
805 nfp_warn(app
->cpp
, "FlowerNIC: unsupported firmware version\n");
809 app_priv
= vzalloc(sizeof(struct nfp_flower_priv
));
813 app_priv
->total_mem_units
= num_mems
;
814 app_priv
->active_mem_unit
= 0;
815 app_priv
->stats_ring_size
= roundup_pow_of_two(ctx_count
);
816 app
->priv
= app_priv
;
818 skb_queue_head_init(&app_priv
->cmsg_skbs_high
);
819 skb_queue_head_init(&app_priv
->cmsg_skbs_low
);
820 INIT_WORK(&app_priv
->cmsg_work
, nfp_flower_cmsg_process_rx
);
821 init_waitqueue_head(&app_priv
->reify_wait_queue
);
823 init_waitqueue_head(&app_priv
->mtu_conf
.wait_q
);
824 spin_lock_init(&app_priv
->mtu_conf
.lock
);
826 err
= nfp_flower_metadata_init(app
, ctx_count
, num_mems
);
828 goto err_free_app_priv
;
830 /* Extract the extra features supported by the firmware. */
831 features
= nfp_rtsym_read_le(app
->pf
->rtbl
,
832 "_abi_flower_extra_features", &err
);
834 app_priv
->flower_ext_feats
= 0;
836 app_priv
->flower_ext_feats
= features
& NFP_FL_FEATS_HOST
;
838 err
= nfp_flower_sync_feature_bits(app
);
842 if (app_priv
->flower_ext_feats
& NFP_FL_FEATS_VF_RLIM
)
843 nfp_flower_qos_init(app
);
845 INIT_LIST_HEAD(&app_priv
->indr_block_cb_priv
);
846 INIT_LIST_HEAD(&app_priv
->non_repr_priv
);
847 app_priv
->pre_tun_rule_cnt
= 0;
852 if (app_priv
->flower_en_feats
& NFP_FL_ENABLE_LAG
)
853 nfp_flower_lag_cleanup(&app_priv
->nfp_lag
);
854 nfp_flower_metadata_cleanup(app
);
860 static void nfp_flower_clean(struct nfp_app
*app
)
862 struct nfp_flower_priv
*app_priv
= app
->priv
;
864 skb_queue_purge(&app_priv
->cmsg_skbs_high
);
865 skb_queue_purge(&app_priv
->cmsg_skbs_low
);
866 flush_work(&app_priv
->cmsg_work
);
868 if (app_priv
->flower_ext_feats
& NFP_FL_FEATS_VF_RLIM
)
869 nfp_flower_qos_cleanup(app
);
871 if (app_priv
->flower_en_feats
& NFP_FL_ENABLE_LAG
)
872 nfp_flower_lag_cleanup(&app_priv
->nfp_lag
);
874 if (app_priv
->flower_en_feats
& NFP_FL_ENABLE_FLOW_MERGE
)
875 nfp_flower_internal_port_cleanup(app_priv
);
877 nfp_flower_metadata_cleanup(app
);
882 static bool nfp_flower_check_ack(struct nfp_flower_priv
*app_priv
)
886 spin_lock_bh(&app_priv
->mtu_conf
.lock
);
887 ret
= app_priv
->mtu_conf
.ack
;
888 spin_unlock_bh(&app_priv
->mtu_conf
.lock
);
894 nfp_flower_repr_change_mtu(struct nfp_app
*app
, struct net_device
*netdev
,
897 struct nfp_flower_priv
*app_priv
= app
->priv
;
898 struct nfp_repr
*repr
= netdev_priv(netdev
);
901 /* Only need to config FW for physical port MTU change. */
902 if (repr
->port
->type
!= NFP_PORT_PHYS_PORT
)
905 if (!(app_priv
->flower_ext_feats
& NFP_FL_NBI_MTU_SETTING
)) {
906 nfp_err(app
->cpp
, "Physical port MTU setting not supported\n");
910 spin_lock_bh(&app_priv
->mtu_conf
.lock
);
911 app_priv
->mtu_conf
.ack
= false;
912 app_priv
->mtu_conf
.requested_val
= new_mtu
;
913 app_priv
->mtu_conf
.portnum
= repr
->dst
->u
.port_info
.port_id
;
914 spin_unlock_bh(&app_priv
->mtu_conf
.lock
);
916 err
= nfp_flower_cmsg_portmod(repr
, netif_carrier_ok(netdev
), new_mtu
,
919 spin_lock_bh(&app_priv
->mtu_conf
.lock
);
920 app_priv
->mtu_conf
.requested_val
= 0;
921 spin_unlock_bh(&app_priv
->mtu_conf
.lock
);
925 /* Wait for fw to ack the change. */
926 if (!wait_event_timeout(app_priv
->mtu_conf
.wait_q
,
927 nfp_flower_check_ack(app_priv
),
928 NFP_FL_REPLY_TIMEOUT
)) {
929 spin_lock_bh(&app_priv
->mtu_conf
.lock
);
930 app_priv
->mtu_conf
.requested_val
= 0;
931 spin_unlock_bh(&app_priv
->mtu_conf
.lock
);
932 nfp_warn(app
->cpp
, "MTU change not verified with fw\n");
939 static int nfp_flower_start(struct nfp_app
*app
)
941 struct nfp_flower_priv
*app_priv
= app
->priv
;
944 if (app_priv
->flower_en_feats
& NFP_FL_ENABLE_LAG
) {
945 err
= nfp_flower_lag_reset(&app_priv
->nfp_lag
);
950 err
= flow_indr_dev_register(nfp_flower_indr_setup_tc_cb
, app
);
954 err
= nfp_tunnel_config_start(app
);
956 goto err_tunnel_config
;
961 flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb
, app
,
962 nfp_flower_setup_indr_tc_release
);
966 static void nfp_flower_stop(struct nfp_app
*app
)
968 nfp_tunnel_config_stop(app
);
970 flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb
, app
,
971 nfp_flower_setup_indr_tc_release
);
975 nfp_flower_netdev_event(struct nfp_app
*app
, struct net_device
*netdev
,
976 unsigned long event
, void *ptr
)
978 struct nfp_flower_priv
*app_priv
= app
->priv
;
981 if (app_priv
->flower_en_feats
& NFP_FL_ENABLE_LAG
) {
982 ret
= nfp_flower_lag_netdev_event(app_priv
, netdev
, event
, ptr
);
983 if (ret
& NOTIFY_STOP_MASK
)
987 ret
= nfp_flower_internal_port_event_handler(app
, netdev
, event
);
988 if (ret
& NOTIFY_STOP_MASK
)
991 return nfp_tunnel_mac_event_handler(app
, netdev
, event
, ptr
);
994 const struct nfp_app_type app_flower
= {
995 .id
= NFP_APP_FLOWER_NIC
,
998 .ctrl_cap_mask
= ~0U,
999 .ctrl_has_meta
= true,
1001 .extra_cap
= nfp_flower_extra_cap
,
1003 .init
= nfp_flower_init
,
1004 .clean
= nfp_flower_clean
,
1006 .repr_change_mtu
= nfp_flower_repr_change_mtu
,
1008 .vnic_alloc
= nfp_flower_vnic_alloc
,
1009 .vnic_init
= nfp_flower_vnic_init
,
1010 .vnic_clean
= nfp_flower_vnic_clean
,
1012 .repr_preclean
= nfp_flower_repr_netdev_preclean
,
1013 .repr_clean
= nfp_flower_repr_netdev_clean
,
1015 .repr_open
= nfp_flower_repr_netdev_open
,
1016 .repr_stop
= nfp_flower_repr_netdev_stop
,
1018 .start
= nfp_flower_start
,
1019 .stop
= nfp_flower_stop
,
1021 .netdev_event
= nfp_flower_netdev_event
,
1023 .ctrl_msg_rx
= nfp_flower_cmsg_rx
,
1025 .sriov_enable
= nfp_flower_sriov_enable
,
1026 .sriov_disable
= nfp_flower_sriov_disable
,
1028 .eswitch_mode_get
= eswitch_mode_get
,
1029 .dev_get
= nfp_flower_dev_get
,
1031 .setup_tc
= nfp_flower_setup_tc
,