WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / ethernet / netronome / nfp / flower / main.c
blobc029950a81e202230ea8b8b4e427bf018643c258
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2017-2018 Netronome Systems, Inc. */
4 #include <linux/etherdevice.h>
5 #include <linux/lockdep.h>
6 #include <linux/pci.h>
7 #include <linux/skbuff.h>
8 #include <linux/vmalloc.h>
9 #include <net/devlink.h>
10 #include <net/dst_metadata.h>
12 #include "main.h"
13 #include "../nfpcore/nfp_cpp.h"
14 #include "../nfpcore/nfp_nffw.h"
15 #include "../nfpcore/nfp_nsp.h"
16 #include "../nfp_app.h"
17 #include "../nfp_main.h"
18 #include "../nfp_net.h"
19 #include "../nfp_net_repr.h"
20 #include "../nfp_port.h"
21 #include "./cmsg.h"
23 #define NFP_FLOWER_ALLOWED_VER 0x0001000000010000UL
25 #define NFP_MIN_INT_PORT_ID 1
26 #define NFP_MAX_INT_PORT_ID 256
28 static const char *nfp_flower_extra_cap(struct nfp_app *app, struct nfp_net *nn)
30 return "FLOWER";
33 static enum devlink_eswitch_mode eswitch_mode_get(struct nfp_app *app)
35 return DEVLINK_ESWITCH_MODE_SWITCHDEV;
38 static int
39 nfp_flower_lookup_internal_port_id(struct nfp_flower_priv *priv,
40 struct net_device *netdev)
42 struct net_device *entry;
43 int i, id = 0;
45 rcu_read_lock();
46 idr_for_each_entry(&priv->internal_ports.port_ids, entry, i)
47 if (entry == netdev) {
48 id = i;
49 break;
51 rcu_read_unlock();
53 return id;
56 static int
57 nfp_flower_get_internal_port_id(struct nfp_app *app, struct net_device *netdev)
59 struct nfp_flower_priv *priv = app->priv;
60 int id;
62 id = nfp_flower_lookup_internal_port_id(priv, netdev);
63 if (id > 0)
64 return id;
66 idr_preload(GFP_ATOMIC);
67 spin_lock_bh(&priv->internal_ports.lock);
68 id = idr_alloc(&priv->internal_ports.port_ids, netdev,
69 NFP_MIN_INT_PORT_ID, NFP_MAX_INT_PORT_ID, GFP_ATOMIC);
70 spin_unlock_bh(&priv->internal_ports.lock);
71 idr_preload_end();
73 return id;
76 u32 nfp_flower_get_port_id_from_netdev(struct nfp_app *app,
77 struct net_device *netdev)
79 int ext_port;
81 if (nfp_netdev_is_nfp_repr(netdev)) {
82 return nfp_repr_get_port_id(netdev);
83 } else if (nfp_flower_internal_port_can_offload(app, netdev)) {
84 ext_port = nfp_flower_get_internal_port_id(app, netdev);
85 if (ext_port < 0)
86 return 0;
88 return nfp_flower_internal_port_get_port_id(ext_port);
91 return 0;
94 static struct net_device *
95 nfp_flower_get_netdev_from_internal_port_id(struct nfp_app *app, int port_id)
97 struct nfp_flower_priv *priv = app->priv;
98 struct net_device *netdev;
100 rcu_read_lock();
101 netdev = idr_find(&priv->internal_ports.port_ids, port_id);
102 rcu_read_unlock();
104 return netdev;
107 static void
108 nfp_flower_free_internal_port_id(struct nfp_app *app, struct net_device *netdev)
110 struct nfp_flower_priv *priv = app->priv;
111 int id;
113 id = nfp_flower_lookup_internal_port_id(priv, netdev);
114 if (!id)
115 return;
117 spin_lock_bh(&priv->internal_ports.lock);
118 idr_remove(&priv->internal_ports.port_ids, id);
119 spin_unlock_bh(&priv->internal_ports.lock);
122 static int
123 nfp_flower_internal_port_event_handler(struct nfp_app *app,
124 struct net_device *netdev,
125 unsigned long event)
127 if (event == NETDEV_UNREGISTER &&
128 nfp_flower_internal_port_can_offload(app, netdev))
129 nfp_flower_free_internal_port_id(app, netdev);
131 return NOTIFY_OK;
134 static void nfp_flower_internal_port_init(struct nfp_flower_priv *priv)
136 spin_lock_init(&priv->internal_ports.lock);
137 idr_init(&priv->internal_ports.port_ids);
140 static void nfp_flower_internal_port_cleanup(struct nfp_flower_priv *priv)
142 idr_destroy(&priv->internal_ports.port_ids);
145 static struct nfp_flower_non_repr_priv *
146 nfp_flower_non_repr_priv_lookup(struct nfp_app *app, struct net_device *netdev)
148 struct nfp_flower_priv *priv = app->priv;
149 struct nfp_flower_non_repr_priv *entry;
151 ASSERT_RTNL();
153 list_for_each_entry(entry, &priv->non_repr_priv, list)
154 if (entry->netdev == netdev)
155 return entry;
157 return NULL;
160 void
161 __nfp_flower_non_repr_priv_get(struct nfp_flower_non_repr_priv *non_repr_priv)
163 non_repr_priv->ref_count++;
166 struct nfp_flower_non_repr_priv *
167 nfp_flower_non_repr_priv_get(struct nfp_app *app, struct net_device *netdev)
169 struct nfp_flower_priv *priv = app->priv;
170 struct nfp_flower_non_repr_priv *entry;
172 entry = nfp_flower_non_repr_priv_lookup(app, netdev);
173 if (entry)
174 goto inc_ref;
176 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
177 if (!entry)
178 return NULL;
180 entry->netdev = netdev;
181 list_add(&entry->list, &priv->non_repr_priv);
183 inc_ref:
184 __nfp_flower_non_repr_priv_get(entry);
185 return entry;
188 void
189 __nfp_flower_non_repr_priv_put(struct nfp_flower_non_repr_priv *non_repr_priv)
191 if (--non_repr_priv->ref_count)
192 return;
194 list_del(&non_repr_priv->list);
195 kfree(non_repr_priv);
198 void
199 nfp_flower_non_repr_priv_put(struct nfp_app *app, struct net_device *netdev)
201 struct nfp_flower_non_repr_priv *entry;
203 entry = nfp_flower_non_repr_priv_lookup(app, netdev);
204 if (!entry)
205 return;
207 __nfp_flower_non_repr_priv_put(entry);
210 static enum nfp_repr_type
211 nfp_flower_repr_get_type_and_port(struct nfp_app *app, u32 port_id, u8 *port)
213 switch (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id)) {
214 case NFP_FLOWER_CMSG_PORT_TYPE_PHYS_PORT:
215 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM,
216 port_id);
217 return NFP_REPR_TYPE_PHYS_PORT;
219 case NFP_FLOWER_CMSG_PORT_TYPE_PCIE_PORT:
220 *port = FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC, port_id);
221 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_VNIC_TYPE, port_id) ==
222 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF)
223 return NFP_REPR_TYPE_PF;
224 else
225 return NFP_REPR_TYPE_VF;
228 return __NFP_REPR_TYPE_MAX;
231 static struct net_device *
232 nfp_flower_dev_get(struct nfp_app *app, u32 port_id, bool *redir_egress)
234 enum nfp_repr_type repr_type;
235 struct nfp_reprs *reprs;
236 u8 port = 0;
238 /* Check if the port is internal. */
239 if (FIELD_GET(NFP_FLOWER_CMSG_PORT_TYPE, port_id) ==
240 NFP_FLOWER_CMSG_PORT_TYPE_OTHER_PORT) {
241 if (redir_egress)
242 *redir_egress = true;
243 port = FIELD_GET(NFP_FLOWER_CMSG_PORT_PHYS_PORT_NUM, port_id);
244 return nfp_flower_get_netdev_from_internal_port_id(app, port);
247 repr_type = nfp_flower_repr_get_type_and_port(app, port_id, &port);
248 if (repr_type > NFP_REPR_TYPE_MAX)
249 return NULL;
251 reprs = rcu_dereference(app->reprs[repr_type]);
252 if (!reprs)
253 return NULL;
255 if (port >= reprs->num_reprs)
256 return NULL;
258 return rcu_dereference(reprs->reprs[port]);
261 static int
262 nfp_flower_reprs_reify(struct nfp_app *app, enum nfp_repr_type type,
263 bool exists)
265 struct nfp_reprs *reprs;
266 int i, err, count = 0;
268 reprs = rcu_dereference_protected(app->reprs[type],
269 lockdep_is_held(&app->pf->lock));
270 if (!reprs)
271 return 0;
273 for (i = 0; i < reprs->num_reprs; i++) {
274 struct net_device *netdev;
276 netdev = nfp_repr_get_locked(app, reprs, i);
277 if (netdev) {
278 struct nfp_repr *repr = netdev_priv(netdev);
280 err = nfp_flower_cmsg_portreify(repr, exists);
281 if (err)
282 return err;
283 count++;
287 return count;
290 static int
291 nfp_flower_wait_repr_reify(struct nfp_app *app, atomic_t *replies, int tot_repl)
293 struct nfp_flower_priv *priv = app->priv;
295 if (!tot_repl)
296 return 0;
298 lockdep_assert_held(&app->pf->lock);
299 if (!wait_event_timeout(priv->reify_wait_queue,
300 atomic_read(replies) >= tot_repl,
301 NFP_FL_REPLY_TIMEOUT)) {
302 nfp_warn(app->cpp, "Not all reprs responded to reify\n");
303 return -EIO;
306 return 0;
309 static int
310 nfp_flower_repr_netdev_open(struct nfp_app *app, struct nfp_repr *repr)
312 int err;
314 err = nfp_flower_cmsg_portmod(repr, true, repr->netdev->mtu, false);
315 if (err)
316 return err;
318 netif_tx_wake_all_queues(repr->netdev);
320 return 0;
323 static int
324 nfp_flower_repr_netdev_stop(struct nfp_app *app, struct nfp_repr *repr)
326 netif_tx_disable(repr->netdev);
328 return nfp_flower_cmsg_portmod(repr, false, repr->netdev->mtu, false);
331 static void
332 nfp_flower_repr_netdev_clean(struct nfp_app *app, struct net_device *netdev)
334 struct nfp_repr *repr = netdev_priv(netdev);
336 kfree(repr->app_priv);
339 static void
340 nfp_flower_repr_netdev_preclean(struct nfp_app *app, struct net_device *netdev)
342 struct nfp_repr *repr = netdev_priv(netdev);
343 struct nfp_flower_priv *priv = app->priv;
344 atomic_t *replies = &priv->reify_replies;
345 int err;
347 atomic_set(replies, 0);
348 err = nfp_flower_cmsg_portreify(repr, false);
349 if (err) {
350 nfp_warn(app->cpp, "Failed to notify firmware about repr destruction\n");
351 return;
354 nfp_flower_wait_repr_reify(app, replies, 1);
357 static void nfp_flower_sriov_disable(struct nfp_app *app)
359 struct nfp_flower_priv *priv = app->priv;
361 if (!priv->nn)
362 return;
364 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
367 static int
368 nfp_flower_spawn_vnic_reprs(struct nfp_app *app,
369 enum nfp_flower_cmsg_port_vnic_type vnic_type,
370 enum nfp_repr_type repr_type, unsigned int cnt)
372 u8 nfp_pcie = nfp_cppcore_pcie_unit(app->pf->cpp);
373 struct nfp_flower_priv *priv = app->priv;
374 atomic_t *replies = &priv->reify_replies;
375 struct nfp_flower_repr_priv *repr_priv;
376 enum nfp_port_type port_type;
377 struct nfp_repr *nfp_repr;
378 struct nfp_reprs *reprs;
379 int i, err, reify_cnt;
380 const u8 queue = 0;
382 port_type = repr_type == NFP_REPR_TYPE_PF ? NFP_PORT_PF_PORT :
383 NFP_PORT_VF_PORT;
385 reprs = nfp_reprs_alloc(cnt);
386 if (!reprs)
387 return -ENOMEM;
389 for (i = 0; i < cnt; i++) {
390 struct net_device *repr;
391 struct nfp_port *port;
392 u32 port_id;
394 repr = nfp_repr_alloc(app);
395 if (!repr) {
396 err = -ENOMEM;
397 goto err_reprs_clean;
400 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
401 if (!repr_priv) {
402 err = -ENOMEM;
403 nfp_repr_free(repr);
404 goto err_reprs_clean;
407 nfp_repr = netdev_priv(repr);
408 nfp_repr->app_priv = repr_priv;
409 repr_priv->nfp_repr = nfp_repr;
411 /* For now we only support 1 PF */
412 WARN_ON(repr_type == NFP_REPR_TYPE_PF && i);
414 port = nfp_port_alloc(app, port_type, repr);
415 if (IS_ERR(port)) {
416 err = PTR_ERR(port);
417 kfree(repr_priv);
418 nfp_repr_free(repr);
419 goto err_reprs_clean;
421 if (repr_type == NFP_REPR_TYPE_PF) {
422 port->pf_id = i;
423 port->vnic = priv->nn->dp.ctrl_bar;
424 } else {
425 port->pf_id = 0;
426 port->vf_id = i;
427 port->vnic =
428 app->pf->vf_cfg_mem + i * NFP_NET_CFG_BAR_SZ;
431 eth_hw_addr_random(repr);
433 port_id = nfp_flower_cmsg_pcie_port(nfp_pcie, vnic_type,
434 i, queue);
435 err = nfp_repr_init(app, repr,
436 port_id, port, priv->nn->dp.netdev);
437 if (err) {
438 kfree(repr_priv);
439 nfp_port_free(port);
440 nfp_repr_free(repr);
441 goto err_reprs_clean;
444 RCU_INIT_POINTER(reprs->reprs[i], repr);
445 nfp_info(app->cpp, "%s%d Representor(%s) created\n",
446 repr_type == NFP_REPR_TYPE_PF ? "PF" : "VF", i,
447 repr->name);
450 nfp_app_reprs_set(app, repr_type, reprs);
452 atomic_set(replies, 0);
453 reify_cnt = nfp_flower_reprs_reify(app, repr_type, true);
454 if (reify_cnt < 0) {
455 err = reify_cnt;
456 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
457 goto err_reprs_remove;
460 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
461 if (err)
462 goto err_reprs_remove;
464 return 0;
465 err_reprs_remove:
466 reprs = nfp_app_reprs_set(app, repr_type, NULL);
467 err_reprs_clean:
468 nfp_reprs_clean_and_free(app, reprs);
469 return err;
472 static int nfp_flower_sriov_enable(struct nfp_app *app, int num_vfs)
474 struct nfp_flower_priv *priv = app->priv;
476 if (!priv->nn)
477 return 0;
479 return nfp_flower_spawn_vnic_reprs(app,
480 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
481 NFP_REPR_TYPE_VF, num_vfs);
484 static int
485 nfp_flower_spawn_phy_reprs(struct nfp_app *app, struct nfp_flower_priv *priv)
487 struct nfp_eth_table *eth_tbl = app->pf->eth_tbl;
488 atomic_t *replies = &priv->reify_replies;
489 struct nfp_flower_repr_priv *repr_priv;
490 struct nfp_repr *nfp_repr;
491 struct sk_buff *ctrl_skb;
492 struct nfp_reprs *reprs;
493 int err, reify_cnt;
494 unsigned int i;
496 ctrl_skb = nfp_flower_cmsg_mac_repr_start(app, eth_tbl->count);
497 if (!ctrl_skb)
498 return -ENOMEM;
500 reprs = nfp_reprs_alloc(eth_tbl->max_index + 1);
501 if (!reprs) {
502 err = -ENOMEM;
503 goto err_free_ctrl_skb;
506 for (i = 0; i < eth_tbl->count; i++) {
507 unsigned int phys_port = eth_tbl->ports[i].index;
508 struct net_device *repr;
509 struct nfp_port *port;
510 u32 cmsg_port_id;
512 repr = nfp_repr_alloc(app);
513 if (!repr) {
514 err = -ENOMEM;
515 goto err_reprs_clean;
518 repr_priv = kzalloc(sizeof(*repr_priv), GFP_KERNEL);
519 if (!repr_priv) {
520 err = -ENOMEM;
521 nfp_repr_free(repr);
522 goto err_reprs_clean;
525 nfp_repr = netdev_priv(repr);
526 nfp_repr->app_priv = repr_priv;
527 repr_priv->nfp_repr = nfp_repr;
529 port = nfp_port_alloc(app, NFP_PORT_PHYS_PORT, repr);
530 if (IS_ERR(port)) {
531 err = PTR_ERR(port);
532 kfree(repr_priv);
533 nfp_repr_free(repr);
534 goto err_reprs_clean;
536 err = nfp_port_init_phy_port(app->pf, app, port, i);
537 if (err) {
538 kfree(repr_priv);
539 nfp_port_free(port);
540 nfp_repr_free(repr);
541 goto err_reprs_clean;
544 SET_NETDEV_DEV(repr, &priv->nn->pdev->dev);
545 nfp_net_get_mac_addr(app->pf, repr, port);
547 cmsg_port_id = nfp_flower_cmsg_phys_port(phys_port);
548 err = nfp_repr_init(app, repr,
549 cmsg_port_id, port, priv->nn->dp.netdev);
550 if (err) {
551 kfree(repr_priv);
552 nfp_port_free(port);
553 nfp_repr_free(repr);
554 goto err_reprs_clean;
557 nfp_flower_cmsg_mac_repr_add(ctrl_skb, i,
558 eth_tbl->ports[i].nbi,
559 eth_tbl->ports[i].base,
560 phys_port);
562 RCU_INIT_POINTER(reprs->reprs[phys_port], repr);
563 nfp_info(app->cpp, "Phys Port %d Representor(%s) created\n",
564 phys_port, repr->name);
567 nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, reprs);
569 /* The REIFY/MAC_REPR control messages should be sent after the MAC
570 * representors are registered using nfp_app_reprs_set(). This is
571 * because the firmware may respond with control messages for the
572 * MAC representors, f.e. to provide the driver with information
573 * about their state, and without registration the driver will drop
574 * any such messages.
576 atomic_set(replies, 0);
577 reify_cnt = nfp_flower_reprs_reify(app, NFP_REPR_TYPE_PHYS_PORT, true);
578 if (reify_cnt < 0) {
579 err = reify_cnt;
580 nfp_warn(app->cpp, "Failed to notify firmware about repr creation\n");
581 goto err_reprs_remove;
584 err = nfp_flower_wait_repr_reify(app, replies, reify_cnt);
585 if (err)
586 goto err_reprs_remove;
588 nfp_ctrl_tx(app->ctrl, ctrl_skb);
590 return 0;
591 err_reprs_remove:
592 reprs = nfp_app_reprs_set(app, NFP_REPR_TYPE_PHYS_PORT, NULL);
593 err_reprs_clean:
594 nfp_reprs_clean_and_free(app, reprs);
595 err_free_ctrl_skb:
596 kfree_skb(ctrl_skb);
597 return err;
600 static int nfp_flower_vnic_alloc(struct nfp_app *app, struct nfp_net *nn,
601 unsigned int id)
603 if (id > 0) {
604 nfp_warn(app->cpp, "FlowerNIC doesn't support more than one data vNIC\n");
605 goto err_invalid_port;
608 eth_hw_addr_random(nn->dp.netdev);
609 netif_keep_dst(nn->dp.netdev);
610 nn->vnic_no_name = true;
612 return 0;
614 err_invalid_port:
615 nn->port = nfp_port_alloc(app, NFP_PORT_INVALID, nn->dp.netdev);
616 return PTR_ERR_OR_ZERO(nn->port);
619 static void nfp_flower_vnic_clean(struct nfp_app *app, struct nfp_net *nn)
621 struct nfp_flower_priv *priv = app->priv;
623 if (app->pf->num_vfs)
624 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_VF);
625 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
626 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
628 priv->nn = NULL;
631 static int nfp_flower_vnic_init(struct nfp_app *app, struct nfp_net *nn)
633 struct nfp_flower_priv *priv = app->priv;
634 int err;
636 priv->nn = nn;
638 err = nfp_flower_spawn_phy_reprs(app, app->priv);
639 if (err)
640 goto err_clear_nn;
642 err = nfp_flower_spawn_vnic_reprs(app,
643 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_PF,
644 NFP_REPR_TYPE_PF, 1);
645 if (err)
646 goto err_destroy_reprs_phy;
648 if (app->pf->num_vfs) {
649 err = nfp_flower_spawn_vnic_reprs(app,
650 NFP_FLOWER_CMSG_PORT_VNIC_TYPE_VF,
651 NFP_REPR_TYPE_VF,
652 app->pf->num_vfs);
653 if (err)
654 goto err_destroy_reprs_pf;
657 return 0;
659 err_destroy_reprs_pf:
660 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PF);
661 err_destroy_reprs_phy:
662 nfp_reprs_clean_and_free_by_type(app, NFP_REPR_TYPE_PHYS_PORT);
663 err_clear_nn:
664 priv->nn = NULL;
665 return err;
668 static void nfp_flower_wait_host_bit(struct nfp_app *app)
670 unsigned long err_at;
671 u64 feat;
672 int err;
674 /* Wait for HOST_ACK flag bit to propagate */
675 err_at = jiffies + msecs_to_jiffies(100);
676 do {
677 feat = nfp_rtsym_read_le(app->pf->rtbl,
678 "_abi_flower_combined_features_global",
679 &err);
680 if (time_is_before_eq_jiffies(err_at)) {
681 nfp_warn(app->cpp,
682 "HOST_ACK bit not propagated in FW.\n");
683 break;
685 usleep_range(1000, 2000);
686 } while (!err && !(feat & NFP_FL_FEATS_HOST_ACK));
688 if (err)
689 nfp_warn(app->cpp,
690 "Could not read global features entry from FW\n");
693 static int nfp_flower_sync_feature_bits(struct nfp_app *app)
695 struct nfp_flower_priv *app_priv = app->priv;
696 int err;
698 /* Tell the firmware of the host supported features. */
699 err = nfp_rtsym_write_le(app->pf->rtbl, "_abi_flower_host_mask",
700 app_priv->flower_ext_feats |
701 NFP_FL_FEATS_HOST_ACK);
702 if (!err)
703 nfp_flower_wait_host_bit(app);
704 else if (err != -ENOENT)
705 return err;
707 /* Tell the firmware that the driver supports lag. */
708 err = nfp_rtsym_write_le(app->pf->rtbl,
709 "_abi_flower_balance_sync_enable", 1);
710 if (!err) {
711 app_priv->flower_en_feats |= NFP_FL_ENABLE_LAG;
712 nfp_flower_lag_init(&app_priv->nfp_lag);
713 } else if (err == -ENOENT) {
714 nfp_warn(app->cpp, "LAG not supported by FW.\n");
715 } else {
716 return err;
719 if (app_priv->flower_ext_feats & NFP_FL_FEATS_FLOW_MOD) {
720 /* Tell the firmware that the driver supports flow merging. */
721 err = nfp_rtsym_write_le(app->pf->rtbl,
722 "_abi_flower_merge_hint_enable", 1);
723 if (!err) {
724 app_priv->flower_en_feats |= NFP_FL_ENABLE_FLOW_MERGE;
725 nfp_flower_internal_port_init(app_priv);
726 } else if (err == -ENOENT) {
727 nfp_warn(app->cpp,
728 "Flow merge not supported by FW.\n");
729 } else {
730 return err;
732 } else {
733 nfp_warn(app->cpp, "Flow mod/merge not supported by FW.\n");
736 return 0;
739 static int nfp_flower_init(struct nfp_app *app)
741 u64 version, features, ctx_count, num_mems;
742 const struct nfp_pf *pf = app->pf;
743 struct nfp_flower_priv *app_priv;
744 int err;
746 if (!pf->eth_tbl) {
747 nfp_warn(app->cpp, "FlowerNIC requires eth table\n");
748 return -EINVAL;
751 if (!pf->mac_stats_bar) {
752 nfp_warn(app->cpp, "FlowerNIC requires mac_stats BAR\n");
753 return -EINVAL;
756 if (!pf->vf_cfg_bar) {
757 nfp_warn(app->cpp, "FlowerNIC requires vf_cfg BAR\n");
758 return -EINVAL;
761 version = nfp_rtsym_read_le(app->pf->rtbl, "hw_flower_version", &err);
762 if (err) {
763 nfp_warn(app->cpp, "FlowerNIC requires hw_flower_version memory symbol\n");
764 return err;
767 num_mems = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_SPLIT",
768 &err);
769 if (err) {
770 nfp_warn(app->cpp,
771 "FlowerNIC: unsupported host context memory: %d\n",
772 err);
773 err = 0;
774 num_mems = 1;
777 if (!FIELD_FIT(NFP_FL_STAT_ID_MU_NUM, num_mems) || !num_mems) {
778 nfp_warn(app->cpp,
779 "FlowerNIC: invalid host context memory: %llu\n",
780 num_mems);
781 return -EINVAL;
784 ctx_count = nfp_rtsym_read_le(app->pf->rtbl, "CONFIG_FC_HOST_CTX_COUNT",
785 &err);
786 if (err) {
787 nfp_warn(app->cpp,
788 "FlowerNIC: unsupported host context count: %d\n",
789 err);
790 err = 0;
791 ctx_count = BIT(17);
794 /* We need to ensure hardware has enough flower capabilities. */
795 if (version != NFP_FLOWER_ALLOWED_VER) {
796 nfp_warn(app->cpp, "FlowerNIC: unsupported firmware version\n");
797 return -EINVAL;
800 app_priv = vzalloc(sizeof(struct nfp_flower_priv));
801 if (!app_priv)
802 return -ENOMEM;
804 app_priv->total_mem_units = num_mems;
805 app_priv->active_mem_unit = 0;
806 app_priv->stats_ring_size = roundup_pow_of_two(ctx_count);
807 app->priv = app_priv;
808 app_priv->app = app;
809 skb_queue_head_init(&app_priv->cmsg_skbs_high);
810 skb_queue_head_init(&app_priv->cmsg_skbs_low);
811 INIT_WORK(&app_priv->cmsg_work, nfp_flower_cmsg_process_rx);
812 init_waitqueue_head(&app_priv->reify_wait_queue);
814 init_waitqueue_head(&app_priv->mtu_conf.wait_q);
815 spin_lock_init(&app_priv->mtu_conf.lock);
817 err = nfp_flower_metadata_init(app, ctx_count, num_mems);
818 if (err)
819 goto err_free_app_priv;
821 /* Extract the extra features supported by the firmware. */
822 features = nfp_rtsym_read_le(app->pf->rtbl,
823 "_abi_flower_extra_features", &err);
824 if (err)
825 app_priv->flower_ext_feats = 0;
826 else
827 app_priv->flower_ext_feats = features & NFP_FL_FEATS_HOST;
829 err = nfp_flower_sync_feature_bits(app);
830 if (err)
831 goto err_cleanup;
833 err = flow_indr_dev_register(nfp_flower_indr_setup_tc_cb, app);
834 if (err)
835 goto err_cleanup;
837 if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
838 nfp_flower_qos_init(app);
840 INIT_LIST_HEAD(&app_priv->indr_block_cb_priv);
841 INIT_LIST_HEAD(&app_priv->non_repr_priv);
842 app_priv->pre_tun_rule_cnt = 0;
844 return 0;
846 err_cleanup:
847 if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG)
848 nfp_flower_lag_cleanup(&app_priv->nfp_lag);
849 nfp_flower_metadata_cleanup(app);
850 err_free_app_priv:
851 vfree(app->priv);
852 return err;
855 static void nfp_flower_clean(struct nfp_app *app)
857 struct nfp_flower_priv *app_priv = app->priv;
859 skb_queue_purge(&app_priv->cmsg_skbs_high);
860 skb_queue_purge(&app_priv->cmsg_skbs_low);
861 flush_work(&app_priv->cmsg_work);
863 if (app_priv->flower_ext_feats & NFP_FL_FEATS_VF_RLIM)
864 nfp_flower_qos_cleanup(app);
866 if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG)
867 nfp_flower_lag_cleanup(&app_priv->nfp_lag);
869 if (app_priv->flower_en_feats & NFP_FL_ENABLE_FLOW_MERGE)
870 nfp_flower_internal_port_cleanup(app_priv);
872 nfp_flower_metadata_cleanup(app);
873 vfree(app->priv);
874 app->priv = NULL;
877 static bool nfp_flower_check_ack(struct nfp_flower_priv *app_priv)
879 bool ret;
881 spin_lock_bh(&app_priv->mtu_conf.lock);
882 ret = app_priv->mtu_conf.ack;
883 spin_unlock_bh(&app_priv->mtu_conf.lock);
885 return ret;
888 static int
889 nfp_flower_repr_change_mtu(struct nfp_app *app, struct net_device *netdev,
890 int new_mtu)
892 struct nfp_flower_priv *app_priv = app->priv;
893 struct nfp_repr *repr = netdev_priv(netdev);
894 int err;
896 /* Only need to config FW for physical port MTU change. */
897 if (repr->port->type != NFP_PORT_PHYS_PORT)
898 return 0;
900 if (!(app_priv->flower_ext_feats & NFP_FL_NBI_MTU_SETTING)) {
901 nfp_err(app->cpp, "Physical port MTU setting not supported\n");
902 return -EINVAL;
905 spin_lock_bh(&app_priv->mtu_conf.lock);
906 app_priv->mtu_conf.ack = false;
907 app_priv->mtu_conf.requested_val = new_mtu;
908 app_priv->mtu_conf.portnum = repr->dst->u.port_info.port_id;
909 spin_unlock_bh(&app_priv->mtu_conf.lock);
911 err = nfp_flower_cmsg_portmod(repr, netif_carrier_ok(netdev), new_mtu,
912 true);
913 if (err) {
914 spin_lock_bh(&app_priv->mtu_conf.lock);
915 app_priv->mtu_conf.requested_val = 0;
916 spin_unlock_bh(&app_priv->mtu_conf.lock);
917 return err;
920 /* Wait for fw to ack the change. */
921 if (!wait_event_timeout(app_priv->mtu_conf.wait_q,
922 nfp_flower_check_ack(app_priv),
923 NFP_FL_REPLY_TIMEOUT)) {
924 spin_lock_bh(&app_priv->mtu_conf.lock);
925 app_priv->mtu_conf.requested_val = 0;
926 spin_unlock_bh(&app_priv->mtu_conf.lock);
927 nfp_warn(app->cpp, "MTU change not verified with fw\n");
928 return -EIO;
931 return 0;
934 static int nfp_flower_start(struct nfp_app *app)
936 struct nfp_flower_priv *app_priv = app->priv;
937 int err;
939 if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
940 err = nfp_flower_lag_reset(&app_priv->nfp_lag);
941 if (err)
942 return err;
945 return nfp_tunnel_config_start(app);
948 static void nfp_flower_stop(struct nfp_app *app)
950 nfp_tunnel_config_stop(app);
952 flow_indr_dev_unregister(nfp_flower_indr_setup_tc_cb, app,
953 nfp_flower_setup_indr_tc_release);
956 static int
957 nfp_flower_netdev_event(struct nfp_app *app, struct net_device *netdev,
958 unsigned long event, void *ptr)
960 struct nfp_flower_priv *app_priv = app->priv;
961 int ret;
963 if (app_priv->flower_en_feats & NFP_FL_ENABLE_LAG) {
964 ret = nfp_flower_lag_netdev_event(app_priv, netdev, event, ptr);
965 if (ret & NOTIFY_STOP_MASK)
966 return ret;
969 ret = nfp_flower_internal_port_event_handler(app, netdev, event);
970 if (ret & NOTIFY_STOP_MASK)
971 return ret;
973 return nfp_tunnel_mac_event_handler(app, netdev, event, ptr);
976 const struct nfp_app_type app_flower = {
977 .id = NFP_APP_FLOWER_NIC,
978 .name = "flower",
980 .ctrl_cap_mask = ~0U,
981 .ctrl_has_meta = true,
983 .extra_cap = nfp_flower_extra_cap,
985 .init = nfp_flower_init,
986 .clean = nfp_flower_clean,
988 .repr_change_mtu = nfp_flower_repr_change_mtu,
990 .vnic_alloc = nfp_flower_vnic_alloc,
991 .vnic_init = nfp_flower_vnic_init,
992 .vnic_clean = nfp_flower_vnic_clean,
994 .repr_preclean = nfp_flower_repr_netdev_preclean,
995 .repr_clean = nfp_flower_repr_netdev_clean,
997 .repr_open = nfp_flower_repr_netdev_open,
998 .repr_stop = nfp_flower_repr_netdev_stop,
1000 .start = nfp_flower_start,
1001 .stop = nfp_flower_stop,
1003 .netdev_event = nfp_flower_netdev_event,
1005 .ctrl_msg_rx = nfp_flower_cmsg_rx,
1007 .sriov_enable = nfp_flower_sriov_enable,
1008 .sriov_disable = nfp_flower_sriov_disable,
1010 .eswitch_mode_get = eswitch_mode_get,
1011 .dev_get = nfp_flower_dev_get,
1013 .setup_tc = nfp_flower_setup_tc,