1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/etherdevice.h>
5 #include <linux/lockdep.h>
7 #include <linux/skbuff.h>
8 #include <linux/vmalloc.h>
9 #include <net/devlink.h>
10 #include <net/dst_metadata.h>
13 #include "../nfpcore/nfp_cpp.h"
14 #include "../nfpcore/nfp_nffw.h"
15 #include "../nfpcore/nfp_nsp.h"
16 #include "../nfp_app.h"
17 #include "../nfp_main.h"
18 #include "../nfp_net.h"
19 #include "../nfp_net_repr.h"
20 #include "../nfp_port.h"
23 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
25 #define NFP_MIN_INT_PORT_ID 1
26 #define NFP_MAX_INT_PORT_ID 256
28 static const char *nfp_flower_extra_cap(struct nfp_app
*app
, struct nfp_net
*nn
)
33 static enum devlink_eswitch_mode
eswitch_mode_get(struct nfp_app
*app
)
35 return DEVLINK_ESWITCH_MODE_SWITCHDEV
;
39 nfp_flower_lookup_internal_port_id(struct nfp_flower_priv
*priv
,
40 struct net_device
*netdev
)
42 struct net_device
*entry
;
46 idr_for_each_entry(&priv
->internal_ports
.port_ids
, entry
, i
)
47 if (entry
== netdev
) {
57 nfp_flower_get_internal_port_id(struct nfp_app
*app
, struct net_device
*netdev
)
59 struct nfp_flower_priv
*priv
= app
->priv
;
62 id
= nfp_flower_lookup_internal_port_id(priv
, netdev
);
66 idr_preload(GFP_ATOMIC
);
67 spin_lock_bh(&priv
->internal_ports
.lock
);
68 id
= idr_alloc(&priv
->internal_ports
.port_ids
, netdev
,
69 NFP_MIN_INT_PORT_ID
, NFP_MAX_INT_PORT_ID
, GFP_ATOMIC
);
70 spin_unlock_bh(&priv
->internal_ports
.lock
);
76 u32
nfp_flower_get_port_id_from_netdev(struct nfp_app
*app
,
77 struct net_device
*netdev
)
81 if (nfp_netdev_is_nfp_repr(netdev
)) {
82 return nfp_repr_get_port_id(netdev
);
83 } else if (nfp_flower_internal_port_can_offload(app
, netdev
)) {
84 ext_port
= nfp_flower_get_internal_port_id(app
, netdev
);
88 return nfp_flower_internal_port_get_port_id(ext_port
);
94 static struct net_device
*
95 nfp_flower_get_netdev_from_internal_port_id(struct nfp_app
*app
, int port_id
)
97 struct nfp_flower_priv
*priv
= app
->priv
;
98 struct net_device
*netdev
;
101 netdev
= idr_find(&priv
->internal_ports
.port_ids
, port_id
);
108 nfp_flower_free_internal_port_id(struct nfp_app
*app
, struct net_device
*netdev
)
110 struct nfp_flower_priv
*priv
= app
->priv
;
113 id
= nfp_flower_lookup_internal_port_id(priv
, netdev
);
117 spin_lock_bh(&priv
->internal_ports
.lock
);
118 idr_remove(&priv
->internal_ports
.port_ids
, id
);
119 spin_unlock_bh(&priv
->internal_ports
.lock
);
123 nfp_flower_internal_port_event_handler(struct nfp_app
*app
,
124 struct net_device
*netdev
,
127 if (event
== NETDEV_UNREGISTER
&&
128 nfp_flower_internal_port_can_offload(app
, netdev
))
129 nfp_flower_free_internal_port_id(app
, netdev
);
134 static void nfp_flower_internal_port_init(struct nfp_flower_priv
*priv
)
136 spin_lock_init(&priv
->internal_ports
.lock
);
137 idr_init(&priv
->internal_ports
.port_ids
);
140 static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv
*priv
)
142 idr_destroy(&priv
->internal_ports
.port_ids
);
145 static struct nfp_flower_non_repr_priv
*
146 nfp_flower_non_repr_priv_lookup(struct nfp_app
*app
, struct net_device
*netdev
)
148 struct nfp_flower_priv
*priv
= app
->priv
;
149 struct nfp_flower_non_repr_priv
*entry
;
153 list_for_each_entry(entry
, &priv
->non_repr_priv
, list
)
154 if (entry
->netdev
== netdev
)
161 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv
*non_repr_priv
)
163 non_repr_priv
->ref_count
++;
166 struct nfp_flower_non_repr_priv
*
167 nfp_flower_non_repr_priv_get(struct nfp_app
*app
, struct net_device
*netdev
)
169 struct nfp_flower_priv
*priv
= app
->priv
;
170 struct nfp_flower_non_repr_priv
*entry
;
172 entry
= nfp_flower_non_repr_priv_lookup(app
, netdev
);
176 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
180 entry
->netdev
= netdev
;
181 list_add(&entry
->list
, &priv
->non_repr_priv
);
184 __nfp_flower_non_repr_priv_get(entry
);
189 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv
*non_repr_priv
)
191 if (--non_repr_priv
->ref_count
)
194 list_del(&non_repr_priv
->list
);
195 kfree(non_repr_priv
);
199 nfp_flower_non_repr_priv_put(struct nfp_app
*app
, struct net_device
*netdev
)
201 struct nfp_flower_non_repr_priv
*entry
;
203 entry
= nfp_flower_non_repr_priv_lookup(app
, netdev
);
207 __nfp_flower_non_repr_priv_put(entry
);
210 static enum nfp_repr_type
211 nfp_flower_repr_get_type_and_port(struct nfp_app
*app
, u32 port_id
, u8
*port
)
213 switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE
, port_id
)) {
214 case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT
:
215 *port
= FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM
,
217 return NFP_REPR_TYPE_PHYS_PORT
;
219 case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT
:
220 *port
= FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC
, port_id
);
221 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE
, port_id
) ==
222 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF
)
223 return NFP_REPR_TYPE_PF
;
225 return NFP_REPR_TYPE_VF
;
228 return __NFP_REPR_TYPE_MAX
;
231 static struct net_device
*
232 nfp_flower_dev_get(struct nfp_app
*app
, u32 port_id
, bool *redir_egress
)
234 enum nfp_repr_type repr_type
;
235 struct nfp_reprs
*reprs
;
238 /* Check if the port is internal. */
239 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE
, port_id
) ==
240 NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT
) {
242 *redir_egress
= true;
243 port
= FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM
, port_id
);
244 return nfp_flower_get_netdev_from_internal_port_id(app
, port
);
247 repr_type
= nfp_flower_repr_get_type_and_port(app
, port_id
, &port
);
248 if (repr_type
> NFP_REPR_TYPE_MAX
)
251 reprs
= rcu_dereference(app
->reprs
[repr_type
]);
255 if (port
>= reprs
->num_reprs
)
258 return rcu_dereference(reprs
->reprs
[port
]);
262 nfp_flower_reprs_reify(struct nfp_app
*app
, enum nfp_repr_type type
,
265 struct nfp_reprs
*reprs
;
266 int i
, err
, count
= 0;
268 reprs
= rcu_dereference_protected(app
->reprs
[type
],
269 lockdep_is_held(&app
->pf
->lock
));
273 for (i
= 0; i
< reprs
->num_reprs
; i
++) {
274 struct net_device
*netdev
;
276 netdev
= nfp_repr_get_locked(app
, reprs
, i
);
278 struct nfp_repr
*repr
= netdev_priv(netdev
);
280 err
= nfp_flower_cmsg_portreify(repr
, exists
);
291 nfp_flower_wait_repr_reify(struct nfp_app
*app
, atomic_t
*replies
, int tot_repl
)
293 struct nfp_flower_priv
*priv
= app
->priv
;
298 lockdep_assert_held(&app
->pf
->lock
);
299 if (!wait_event_timeout(priv
->reify_wait_queue
,
300 atomic_read(replies
) >= tot_repl
,
301 NFP_FL_REPLY_TIMEOUT
)) {
302 nfp_warn(app
->cpp
, "Not all reprs responded to reify\n");
310 nfp_flower_repr_netdev_open(struct nfp_app
*app
, struct nfp_repr
*repr
)
314 err
= nfp_flower_cmsg_portmod(repr
, true, repr
->netdev
->mtu
, false);
318 netif_tx_wake_all_queues(repr
->netdev
);
324 nfp_flower_repr_netdev_stop(struct nfp_app
*app
, struct nfp_repr
*repr
)
326 netif_tx_disable(repr
->netdev
);
328 return nfp_flower_cmsg_portmod(repr
, false, repr
->netdev
->mtu
, false);
332 nfp_flower_repr_netdev_clean(struct nfp_app
*app
, struct net_device
*netdev
)
334 struct nfp_repr
*repr
= netdev_priv(netdev
);
336 kfree(repr
->app_priv
);
340 nfp_flower_repr_netdev_preclean(struct nfp_app
*app
, struct net_device
*netdev
)
342 struct nfp_repr
*repr
= netdev_priv(netdev
);
343 struct nfp_flower_priv
*priv
= app
->priv
;
344 atomic_t
*replies
= &priv
->reify_replies
;
347 atomic_set(replies
, 0);
348 err
= nfp_flower_cmsg_portreify(repr
, false);
350 nfp_warn(app
->cpp
, "Failed to notify firmware about repr destruction\n");
354 nfp_flower_wait_repr_reify(app
, replies
, 1);
357 static void nfp_flower_sriov_disable(struct nfp_app
*app
)
359 struct nfp_flower_priv
*priv
= app
->priv
;
364 nfp_reprs_clean_and_free_by_type(app
, NFP_REPR_TYPE_VF
);
368 nfp_flower_spawn_vnic_reprs(struct nfp_app
*app
,
369 enum nfp_flower_cmsg_port_vnic_type vnic_type
,
370 enum nfp_repr_type repr_type
, unsigned int cnt
)
372 u8 nfp_pcie
= nfp_cppcore_pcie_unit(app
->pf
->cpp
);
373 struct nfp_flower_priv
*priv
= app
->priv
;
374 atomic_t
*replies
= &priv
->reify_replies
;
375 struct nfp_flower_repr_priv
*repr_priv
;
376 enum nfp_port_type port_type
;
377 struct nfp_repr
*nfp_repr
;
378 struct nfp_reprs
*reprs
;
379 int i
, err
, reify_cnt
;
382 port_type
= repr_type
== NFP_REPR_TYPE_PF
? NFP_PORT_PF_PORT
:
385 reprs
= nfp_reprs_alloc(cnt
);
389 for (i
= 0; i
< cnt
; i
++) {
390 struct net_device
*repr
;
391 struct nfp_port
*port
;
394 repr
= nfp_repr_alloc(app
);
397 goto err_reprs_clean
;
400 repr_priv
= kzalloc(sizeof(*repr_priv
), GFP_KERNEL
);
404 goto err_reprs_clean
;
407 nfp_repr
= netdev_priv(repr
);
408 nfp_repr
->app_priv
= repr_priv
;
409 repr_priv
->nfp_repr
= nfp_repr
;
411 /* For now we only support 1 PF */
412 WARN_ON(repr_type
== NFP_REPR_TYPE_PF
&& i
);
414 port
= nfp_port_alloc(app
, port_type
, repr
);
419 goto err_reprs_clean
;
421 if (repr_type
== NFP_REPR_TYPE_PF
) {
423 port
->vnic
= priv
->nn
->dp
.ctrl_bar
;
428 app
->pf
->vf_cfg_mem
+ i
* NFP_NET_CFG_BAR_SZ
;
431 eth_hw_addr_random(repr
);
433 port_id
= nfp_flower_cmsg_pcie_port(nfp_pcie
, vnic_type
,
435 err
= nfp_repr_init(app
, repr
,
436 port_id
, port
, priv
->nn
->dp
.netdev
);
441 goto err_reprs_clean
;
444 RCU_INIT_POINTER(reprs
->reprs
[i
], repr
);
445 nfp_info(app
->cpp
, "%s%d Representor(%s) created\n",
446 repr_type
== NFP_REPR_TYPE_PF
? "PF" : "VF", i
,
450 nfp_app_reprs_set(app
, repr_type
, reprs
);
452 atomic_set(replies
, 0);
453 reify_cnt
= nfp_flower_reprs_reify(app
, repr_type
, true);
456 nfp_warn(app
->cpp
, "Failed to notify firmware about repr creation\n");
457 goto err_reprs_remove
;
460 err
= nfp_flower_wait_repr_reify(app
, replies
, reify_cnt
);
462 goto err_reprs_remove
;
466 reprs
= nfp_app_reprs_set(app
, repr_type
, NULL
);
468 nfp_reprs_clean_and_free(app
, reprs
);
472 static int nfp_flower_sriov_enable(struct nfp_app
*app
, int num_vfs
)
474 struct nfp_flower_priv
*priv
= app
->priv
;
479 return nfp_flower_spawn_vnic_reprs(app
,
480 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF
,
481 NFP_REPR_TYPE_VF
, num_vfs
);
485 nfp_flower_spawn_phy_reprs(struct nfp_app
*app
, struct nfp_flower_priv
*priv
)
487 struct nfp_eth_table
*eth_tbl
= app
->pf
->eth_tbl
;
488 atomic_t
*replies
= &priv
->reify_replies
;
489 struct nfp_flower_repr_priv
*repr_priv
;
490 struct nfp_repr
*nfp_repr
;
491 struct sk_buff
*ctrl_skb
;
492 struct nfp_reprs
*reprs
;
496 ctrl_skb
= nfp_flower_cmsg_mac_repr_start(app
, eth_tbl
->count
);
500 reprs
= nfp_reprs_alloc(eth_tbl
->max_index
+ 1);
503 goto err_free_ctrl_skb
;
506 for (i
= 0; i
< eth_tbl
->count
; i
++) {
507 unsigned int phys_port
= eth_tbl
->ports
[i
].index
;
508 struct net_device
*repr
;
509 struct nfp_port
*port
;
512 repr
= nfp_repr_alloc(app
);
515 goto err_reprs_clean
;
518 repr_priv
= kzalloc(sizeof(*repr_priv
), GFP_KERNEL
);
522 goto err_reprs_clean
;
525 nfp_repr
= netdev_priv(repr
);
526 nfp_repr
->app_priv
= repr_priv
;
527 repr_priv
->nfp_repr
= nfp_repr
;
529 port
= nfp_port_alloc(app
, NFP_PORT_PHYS_PORT
, repr
);
534 goto err_reprs_clean
;
536 err
= nfp_port_init_phy_port(app
->pf
, app
, port
, i
);
541 goto err_reprs_clean
;
544 SET_NETDEV_DEV(repr
, &priv
->nn
->pdev
->dev
);
545 nfp_net_get_mac_addr(app
->pf
, repr
, port
);
547 cmsg_port_id
= nfp_flower_cmsg_phys_port(phys_port
);
548 err
= nfp_repr_init(app
, repr
,
549 cmsg_port_id
, port
, priv
->nn
->dp
.netdev
);
554 goto err_reprs_clean
;
557 nfp_flower_cmsg_mac_repr_add(ctrl_skb
, i
,
558 eth_tbl
->ports
[i
].nbi
,
559 eth_tbl
->ports
[i
].base
,
562 RCU_INIT_POINTER(reprs
->reprs
[phys_port
], repr
);
563 nfp_info(app
->cpp
, "Phys Port %d Representor(%s) created\n",
564 phys_port
, repr
->name
);
567 nfp_app_reprs_set(app
, NFP_REPR_TYPE_PHYS_PORT
, reprs
);
569 /* The REIFY/MAC_REPR control messages should be sent after the MAC
570 * representors are registered using nfp_app_reprs_set(). This is
571 * because the firmware may respond with control messages for the
572 * MAC representors, f.e. to provide the driver with information
573 * about their state, and without registration the driver will drop
576 atomic_set(replies
, 0);
577 reify_cnt
= nfp_flower_reprs_reify(app
, NFP_REPR_TYPE_PHYS_PORT
, true);
580 nfp_warn(app
->cpp
, "Failed to notify firmware about repr creation\n");
581 goto err_reprs_remove
;
584 err
= nfp_flower_wait_repr_reify(app
, replies
, reify_cnt
);
586 goto err_reprs_remove
;
588 nfp_ctrl_tx(app
->ctrl
, ctrl_skb
);
592 reprs
= nfp_app_reprs_set(app
, NFP_REPR_TYPE_PHYS_PORT
, NULL
);
594 nfp_reprs_clean_and_free(app
, reprs
);
600 static int nfp_flower_vnic_alloc(struct nfp_app
*app
, struct nfp_net
*nn
,
604 nfp_warn(app
->cpp
, "FlowerNIC doesn't support more than one data vNIC\n");
605 goto err_invalid_port
;
608 eth_hw_addr_random(nn
->dp
.netdev
);
609 netif_keep_dst(nn
->dp
.netdev
);
610 nn
->vnic_no_name
= true;
615 nn
->port
= nfp_port_alloc(app
, NFP_PORT_INVALID
, nn
->dp
.netdev
);
616 return PTR_ERR_OR_ZERO(nn
->port
);
619 static void nfp_flower_vnic_clean(struct nfp_app
*app
, struct nfp_net
*nn
)
621 struct nfp_flower_priv
*priv
= app
->priv
;
623 if (app
->pf
->num_vfs
)
624 nfp_reprs_clean_and_free_by_type(app
, NFP_REPR_TYPE_VF
);
625 nfp_reprs_clean_and_free_by_type(app
, NFP_REPR_TYPE_PF
);
626 nfp_reprs_clean_and_free_by_type(app
, NFP_REPR_TYPE_PHYS_PORT
);
631 static int nfp_flower_vnic_init(struct nfp_app
*app
, struct nfp_net
*nn
)
633 struct nfp_flower_priv
*priv
= app
->priv
;
638 err
= nfp_flower_spawn_phy_reprs(app
, app
->priv
);
642 err
= nfp_flower_spawn_vnic_reprs(app
,
643 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF
,
644 NFP_REPR_TYPE_PF
, 1);
646 goto err_destroy_reprs_phy
;
648 if (app
->pf
->num_vfs
) {
649 err
= nfp_flower_spawn_vnic_reprs(app
,
650 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF
,
654 goto err_destroy_reprs_pf
;
659 err_destroy_reprs_pf
:
660 nfp_reprs_clean_and_free_by_type(app
, NFP_REPR_TYPE_PF
);
661 err_destroy_reprs_phy
:
662 nfp_reprs_clean_and_free_by_type(app
, NFP_REPR_TYPE_PHYS_PORT
);
668 static void nfp_flower_wait_host_bit(struct nfp_app
*app
)
670 unsigned long err_at
;
674 /* Wait for HOST_ACK flag bit to propagate */
675 err_at
= jiffies
+ msecs_to_jiffies(100);
677 feat
= nfp_rtsym_read_le(app
->pf
->rtbl
,
678 "_abi_flower_combined_features_global",
680 if (time_is_before_eq_jiffies(err_at
)) {
682 "HOST_ACK bit not propagated in FW.\n");
685 usleep_range(1000, 2000);
686 } while (!err
&& !(feat
& NFP_FL_FEATS_HOST_ACK
));
690 "Could not read global features entry from FW\n");
693 static int nfp_flower_sync_feature_bits(struct nfp_app
*app
)
695 struct nfp_flower_priv
*app_priv
= app
->priv
;
698 /* Tell the firmware of the host supported features. */
699 err
= nfp_rtsym_write_le(app
->pf
->rtbl
, "_abi_flower_host_mask",
700 app_priv
->flower_ext_feats
|
701 NFP_FL_FEATS_HOST_ACK
);
703 nfp_flower_wait_host_bit(app
);
704 else if (err
!= -ENOENT
)
707 /* Tell the firmware that the driver supports lag. */
708 err
= nfp_rtsym_write_le(app
->pf
->rtbl
,
709 "_abi_flower_balance_sync_enable", 1);
711 app_priv
->flower_en_feats
|= NFP_FL_ENABLE_LAG
;
712 nfp_flower_lag_init(&app_priv
->nfp_lag
);
713 } else if (err
== -ENOENT
) {
714 nfp_warn(app
->cpp
, "LAG not supported by FW.\n");
719 if (app_priv
->flower_ext_feats
& NFP_FL_FEATS_FLOW_MOD
) {
720 /* Tell the firmware that the driver supports flow merging. */
721 err
= nfp_rtsym_write_le(app
->pf
->rtbl
,
722 "_abi_flower_merge_hint_enable", 1);
724 app_priv
->flower_en_feats
|= NFP_FL_ENABLE_FLOW_MERGE
;
725 nfp_flower_internal_port_init(app_priv
);
726 } else if (err
== -ENOENT
) {
728 "Flow merge not supported by FW.\n");
733 nfp_warn(app
->cpp
, "Flow mod/merge not supported by FW.\n");
739 static int nfp_flower_init(struct nfp_app
*app
)
741 u64 version
, features
, ctx_count
, num_mems
;
742 const struct nfp_pf
*pf
= app
->pf
;
743 struct nfp_flower_priv
*app_priv
;
747 nfp_warn(app
->cpp
, "FlowerNIC requires eth table\n");
751 if (!pf
->mac_stats_bar
) {
752 nfp_warn(app
->cpp
, "FlowerNIC requires mac_stats BAR\n");
756 if (!pf
->vf_cfg_bar
) {
757 nfp_warn(app
->cpp
, "FlowerNIC requires vf_cfg BAR\n");
761 version
= nfp_rtsym_read_le(app
->pf
->rtbl
, "hw_flower_version", &err
);
763 nfp_warn(app
->cpp
, "FlowerNIC requires hw_flower_version memory symbol\n");
767 num_mems
= nfp_rtsym_read_le(app
->pf
->rtbl
, "CONFIG_FC_HOST_CTX_SPLIT",
771 "FlowerNIC: unsupported host context memory: %d\n",
777 if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM
, num_mems
) || !num_mems
) {
779 "FlowerNIC: invalid host context memory: %llu\n",
784 ctx_count
= nfp_rtsym_read_le(app
->pf
->rtbl
, "CONFIG_FC_HOST_CTX_COUNT",
788 "FlowerNIC: unsupported host context count: %d\n",
794 /* We need to ensure hardware has enough flower capabilities. */
795 if (version
!= NFP_FLOWER_ALLOWED_VER
) {
796 nfp_warn(app
->cpp
, "FlowerNIC: unsupported firmware version\n");
800 app_priv
= vzalloc(sizeof(struct nfp_flower_priv
));
804 app_priv
->total_mem_units
= num_mems
;
805 app_priv
->active_mem_unit
= 0;
806 app_priv
->stats_ring_size
= roundup_pow_of_two(ctx_count
);
807 app
->priv
= app_priv
;
809 skb_queue_head_init(&app_priv
->cmsg_skbs_high
);
810 skb_queue_head_init(&app_priv
->cmsg_skbs_low
);
811 INIT_WORK(&app_priv
->cmsg_work
, nfp_flower_cmsg_process_rx
);
812 init_waitqueue_head(&app_priv
->reify_wait_queue
);
814 init_waitqueue_head(&app_priv
->mtu_conf
.wait_q
);
815 spin_lock_init(&app_priv
->mtu_conf
.lock
);
817 err
= nfp_flower_metadata_init(app
, ctx_count
, num_mems
);
819 goto err_free_app_priv
;
821 /* Extract the extra features supported by the firmware. */
822 features
= nfp_rtsym_read_le(app
->pf
->rtbl
,
823 "_abi_flower_extra_features", &err
);
825 app_priv
->flower_ext_feats
= 0;
827 app_priv
->flower_ext_feats
= features
& NFP_FL_FEATS_HOST
;
829 err
= nfp_flower_sync_feature_bits(app
);
833 err
= flow_indr_dev_register(nfp_flower_indr_setup_tc_cb
, app
);
837 if (app_priv
->flower_ext_feats
& NFP_FL_FEATS_VF_RLIM
)
838 nfp_flower_qos_init(app
);
840 INIT_LIST_HEAD(&app_priv
->indr_block_cb_priv
);
841 INIT_LIST_HEAD(&app_priv
->non_repr_priv
);
842 app_priv
->pre_tun_rule_cnt
= 0;
847 if (app_priv
->flower_en_feats
& NFP_FL_ENABLE_LAG
)
848 nfp_flower_lag_cleanup(&app_priv
->nfp_lag
);
849 nfp_flower_metadata_cleanup(app
);
855 static void nfp_flower_clean(struct nfp_app
*app
)
857 struct nfp_flower_priv
*app_priv
= app
->priv
;
859 skb_queue_purge(&app_priv
->cmsg_skbs_high
);
860 skb_queue_purge(&app_priv
->cmsg_skbs_low
);
861 flush_work(&app_priv
->cmsg_work
);
863 if (app_priv
->flower_ext_feats
& NFP_FL_FEATS_VF_RLIM
)
864 nfp_flower_qos_cleanup(app
);
866 if (app_priv
->flower_en_feats
& NFP_FL_ENABLE_LAG
)
867 nfp_flower_lag_cleanup(&app_priv
->nfp_lag
);
869 if (app_priv
->flower_en_feats
& NFP_FL_ENABLE_FLOW_MERGE
)
870 nfp_flower_internal_port_cleanup(app_priv
);
872 nfp_flower_metadata_cleanup(app
);
877 static bool nfp_flower_check_ack(struct nfp_flower_priv
*app_priv
)
881 spin_lock_bh(&app_priv
->mtu_conf
.lock
);
882 ret
= app_priv
->mtu_conf
.ack
;
883 spin_unlock_bh(&app_priv
->mtu_conf
.lock
);
889 nfp_flower_repr_change_mtu(struct nfp_app
*app
, struct net_device
*netdev
,
892 struct nfp_flower_priv
*app_priv
= app
->priv
;
893 struct nfp_repr
*repr
= netdev_priv(netdev
);
896 /* Only need to config FW for physical port MTU change. */
897 if (repr
->port
->type
!= NFP_PORT_PHYS_PORT
)
900 if (!(app_priv
->flower_ext_feats
& NFP_FL_NBI_MTU_SETTING
)) {
901 nfp_err(app
->cpp
, "Physical port MTU setting not supported\n");
905 spin_lock_bh(&app_priv
->mtu_conf
.lock
);
906 app_priv
->mtu_conf
.ack
= false;
907 app_priv
->mtu_conf
.requested_val
= new_mtu
;
908 app_priv
->mtu_conf
.portnum
= repr
->dst
->u
.port_info
.port_id
;
909 spin_unlock_bh(&app_priv
->mtu_conf
.lock
);
911 err
= nfp_flower_cmsg_portmod(repr
, netif_carrier_ok(netdev
), new_mtu
,
914 spin_lock_bh(&app_priv
->mtu_conf
.lock
);
915 app_priv
->mtu_conf
.requested_val
= 0;
916 spin_unlock_bh(&app_priv
->mtu_conf
.lock
);
920 /* Wait for fw to ack the change. */
921 if (!wait_event_timeout(app_priv
->mtu_conf
.wait_q
,
922 nfp_flower_check_ack(app_priv
),
923 NFP_FL_REPLY_TIMEOUT
)) {
924 spin_lock_bh(&app_priv
->mtu_conf
.lock
);
925 app_priv
->mtu_conf
.requested_val
= 0;
926 spin_unlock_bh(&app_priv
->mtu_conf
.lock
);
927 nfp_warn(app
->cpp
, "MTU change not verified with fw\n");
934 static int nfp_flower_start(struct nfp_app
*app
)
936 struct nfp_flower_priv
*app_priv
= app
->priv
;
939 if (app_priv
->flower_en_feats
& NFP_FL_ENABLE_LAG
) {
940 err
= nfp_flower_lag_reset(&app_priv
->nfp_lag
);
945 return nfp_tunnel_config_start(app
);
948 static void nfp_flower_stop(struct nfp_app
*app
)
950 nfp_tunnel_config_stop(app
);
952 flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb
, app
,
953 nfp_flower_setup_indr_tc_release
);
957 nfp_flower_netdev_event(struct nfp_app
*app
, struct net_device
*netdev
,
958 unsigned long event
, void *ptr
)
960 struct nfp_flower_priv
*app_priv
= app
->priv
;
963 if (app_priv
->flower_en_feats
& NFP_FL_ENABLE_LAG
) {
964 ret
= nfp_flower_lag_netdev_event(app_priv
, netdev
, event
, ptr
);
965 if (ret
& NOTIFY_STOP_MASK
)
969 ret
= nfp_flower_internal_port_event_handler(app
, netdev
, event
);
970 if (ret
& NOTIFY_STOP_MASK
)
973 return nfp_tunnel_mac_event_handler(app
, netdev
, event
, ptr
);
976 const struct nfp_app_type app_flower
= {
977 .id
= NFP_APP_FLOWER_NIC
,
980 .ctrl_cap_mask
= ~0U,
981 .ctrl_has_meta
= true,
983 .extra_cap
= nfp_flower_extra_cap
,
985 .init
= nfp_flower_init
,
986 .clean
= nfp_flower_clean
,
988 .repr_change_mtu
= nfp_flower_repr_change_mtu
,
990 .vnic_alloc
= nfp_flower_vnic_alloc
,
991 .vnic_init
= nfp_flower_vnic_init
,
992 .vnic_clean
= nfp_flower_vnic_clean
,
994 .repr_preclean
= nfp_flower_repr_netdev_preclean
,
995 .repr_clean
= nfp_flower_repr_netdev_clean
,
997 .repr_open
= nfp_flower_repr_netdev_open
,
998 .repr_stop
= nfp_flower_repr_netdev_stop
,
1000 .start
= nfp_flower_start
,
1001 .stop
= nfp_flower_stop
,
1003 .netdev_event
= nfp_flower_netdev_event
,
1005 .ctrl_msg_rx
= nfp_flower_cmsg_rx
,
1007 .sriov_enable
= nfp_flower_sriov_enable
,
1008 .sriov_disable
= nfp_flower_sriov_disable
,
1010 .eswitch_mode_get
= eswitch_mode_get
,
1011 .dev_get
= nfp_flower_dev_get
,
1013 .setup_tc
= nfp_flower_setup_tc
,