1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
6 * Netronome network device driver: Main entry point
7 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8 * Alejandro Lucero <alejandro.lucero@netronome.com>
9 * Jason McMullan <jason.mcmullan@netronome.com>
10 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
13 #include <linux/etherdevice.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/lockdep.h>
17 #include <linux/pci.h>
18 #include <linux/pci_regs.h>
19 #include <linux/msi.h>
20 #include <linux/random.h>
21 #include <linux/rtnetlink.h>
23 #include "nfpcore/nfp.h"
24 #include "nfpcore/nfp_cpp.h"
25 #include "nfpcore/nfp_nffw.h"
26 #include "nfpcore/nfp_nsp.h"
27 #include "nfpcore/nfp6000_pcie.h"
29 #include "nfp_net_ctrl.h"
30 #include "nfp_net_sriov.h"
35 #define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
38 * nfp_net_get_mac_addr() - Get the MAC address.
40 * @netdev: net_device to set MAC address on
41 * @port: NFP port structure
43 * First try to get the MAC address from NSP ETH table. If that
44 * fails generate a random address.
47 nfp_net_get_mac_addr(struct nfp_pf
*pf
, struct net_device
*netdev
,
48 struct nfp_port
*port
)
50 struct nfp_eth_table_port
*eth_port
;
52 eth_port
= __nfp_port_get_eth_port(port
);
54 eth_hw_addr_random(netdev
);
58 ether_addr_copy(netdev
->dev_addr
, eth_port
->mac_addr
);
59 ether_addr_copy(netdev
->perm_addr
, eth_port
->mac_addr
);
62 static struct nfp_eth_table_port
*
63 nfp_net_find_port(struct nfp_eth_table
*eth_tbl
, unsigned int index
)
67 for (i
= 0; eth_tbl
&& i
< eth_tbl
->count
; i
++)
68 if (eth_tbl
->ports
[i
].index
== index
)
69 return ð_tbl
->ports
[i
];
74 static int nfp_net_pf_get_num_ports(struct nfp_pf
*pf
)
76 return nfp_pf_rtsym_read_optional(pf
, "nfd_cfg_pf%u_num_ports", 1);
79 static int nfp_net_pf_get_app_id(struct nfp_pf
*pf
)
81 return nfp_pf_rtsym_read_optional(pf
, "_pf%u_net_app_id",
85 static void nfp_net_pf_free_vnic(struct nfp_pf
*pf
, struct nfp_net
*nn
)
87 if (nfp_net_is_data_vnic(nn
))
88 nfp_app_vnic_free(pf
->app
, nn
);
89 nfp_port_free(nn
->port
);
90 list_del(&nn
->vnic_list
);
95 static void nfp_net_pf_free_vnics(struct nfp_pf
*pf
)
97 struct nfp_net
*nn
, *next
;
99 list_for_each_entry_safe(nn
, next
, &pf
->vnics
, vnic_list
)
100 if (nfp_net_is_data_vnic(nn
))
101 nfp_net_pf_free_vnic(pf
, nn
);
104 static struct nfp_net
*
105 nfp_net_pf_alloc_vnic(struct nfp_pf
*pf
, bool needs_netdev
,
106 void __iomem
*ctrl_bar
, void __iomem
*qc_bar
,
107 int stride
, unsigned int id
)
109 u32 tx_base
, rx_base
, n_tx_rings
, n_rx_rings
;
113 tx_base
= readl(ctrl_bar
+ NFP_NET_CFG_START_TXQ
);
114 rx_base
= readl(ctrl_bar
+ NFP_NET_CFG_START_RXQ
);
115 n_tx_rings
= readl(ctrl_bar
+ NFP_NET_CFG_MAX_TXRINGS
);
116 n_rx_rings
= readl(ctrl_bar
+ NFP_NET_CFG_MAX_RXRINGS
);
118 /* Allocate and initialise the vNIC */
119 nn
= nfp_net_alloc(pf
->pdev
, ctrl_bar
, needs_netdev
,
120 n_tx_rings
, n_rx_rings
);
125 nfp_net_get_fw_version(&nn
->fw_ver
, ctrl_bar
);
126 nn
->tx_bar
= qc_bar
+ tx_base
* NFP_QCP_QUEUE_ADDR_SZ
;
127 nn
->rx_bar
= qc_bar
+ rx_base
* NFP_QCP_QUEUE_ADDR_SZ
;
129 nn
->stride_rx
= stride
;
130 nn
->stride_tx
= stride
;
133 err
= nfp_app_vnic_alloc(pf
->app
, nn
, id
);
141 list_add_tail(&nn
->vnic_list
, &pf
->vnics
);
147 nfp_net_pf_init_vnic(struct nfp_pf
*pf
, struct nfp_net
*nn
, unsigned int id
)
154 err
= nfp_devlink_port_register(pf
->app
, nn
->port
);
159 err
= nfp_net_init(nn
);
161 goto err_devlink_port_clean
;
163 nfp_net_debugfs_vnic_add(nn
, pf
->ddir
);
166 nfp_devlink_port_type_eth_set(nn
->port
);
170 if (nfp_net_is_data_vnic(nn
)) {
171 err
= nfp_app_vnic_init(pf
->app
, nn
);
173 goto err_devlink_port_type_clean
;
178 err_devlink_port_type_clean
:
180 nfp_devlink_port_type_clear(nn
->port
);
181 nfp_net_debugfs_dir_clean(&nn
->debugfs_dir
);
183 err_devlink_port_clean
:
185 nfp_devlink_port_unregister(nn
->port
);
190 nfp_net_pf_alloc_vnics(struct nfp_pf
*pf
, void __iomem
*ctrl_bar
,
191 void __iomem
*qc_bar
, int stride
)
197 for (i
= 0; i
< pf
->max_data_vnics
; i
++) {
198 nn
= nfp_net_pf_alloc_vnic(pf
, true, ctrl_bar
, qc_bar
,
205 ctrl_bar
+= NFP_PF_CSR_SLICE_SIZE
;
207 /* Kill the vNIC if app init marked it as invalid */
208 if (nn
->port
&& nn
->port
->type
== NFP_PORT_INVALID
)
209 nfp_net_pf_free_vnic(pf
, nn
);
212 if (list_empty(&pf
->vnics
))
218 nfp_net_pf_free_vnics(pf
);
222 static void nfp_net_pf_clean_vnic(struct nfp_pf
*pf
, struct nfp_net
*nn
)
224 if (nfp_net_is_data_vnic(nn
))
225 nfp_app_vnic_clean(pf
->app
, nn
);
227 nfp_devlink_port_type_clear(nn
->port
);
228 nfp_net_debugfs_dir_clean(&nn
->debugfs_dir
);
231 nfp_devlink_port_unregister(nn
->port
);
234 static int nfp_net_pf_alloc_irqs(struct nfp_pf
*pf
)
236 unsigned int wanted_irqs
, num_irqs
, vnics_left
, irqs_left
;
239 /* Get MSI-X vectors */
241 list_for_each_entry(nn
, &pf
->vnics
, vnic_list
)
242 wanted_irqs
+= NFP_NET_NON_Q_VECTORS
+ nn
->dp
.num_r_vecs
;
243 pf
->irq_entries
= kcalloc(wanted_irqs
, sizeof(*pf
->irq_entries
),
245 if (!pf
->irq_entries
)
248 num_irqs
= nfp_net_irqs_alloc(pf
->pdev
, pf
->irq_entries
,
249 NFP_NET_MIN_VNIC_IRQS
* pf
->num_vnics
,
252 nfp_warn(pf
->cpp
, "Unable to allocate MSI-X vectors\n");
253 kfree(pf
->irq_entries
);
257 /* Distribute IRQs to vNICs */
258 irqs_left
= num_irqs
;
259 vnics_left
= pf
->num_vnics
;
260 list_for_each_entry(nn
, &pf
->vnics
, vnic_list
) {
263 n
= min(NFP_NET_NON_Q_VECTORS
+ nn
->dp
.num_r_vecs
,
264 DIV_ROUND_UP(irqs_left
, vnics_left
));
265 nfp_net_irqs_assign(nn
, &pf
->irq_entries
[num_irqs
- irqs_left
],
274 static void nfp_net_pf_free_irqs(struct nfp_pf
*pf
)
276 nfp_net_irqs_disable(pf
->pdev
);
277 kfree(pf
->irq_entries
);
280 static int nfp_net_pf_init_vnics(struct nfp_pf
*pf
)
286 /* Finish vNIC init and register */
288 list_for_each_entry(nn
, &pf
->vnics
, vnic_list
) {
289 if (!nfp_net_is_data_vnic(nn
))
291 err
= nfp_net_pf_init_vnic(pf
, nn
, id
);
293 goto err_prev_deinit
;
301 list_for_each_entry_continue_reverse(nn
, &pf
->vnics
, vnic_list
)
302 if (nfp_net_is_data_vnic(nn
))
303 nfp_net_pf_clean_vnic(pf
, nn
);
308 nfp_net_pf_app_init(struct nfp_pf
*pf
, u8 __iomem
*qc_bar
, unsigned int stride
)
310 u8 __iomem
*ctrl_bar
;
313 pf
->app
= nfp_app_alloc(pf
, nfp_net_pf_get_app_id(pf
));
315 return PTR_ERR(pf
->app
);
317 mutex_lock(&pf
->lock
);
318 err
= nfp_app_init(pf
->app
);
319 mutex_unlock(&pf
->lock
);
323 if (!nfp_app_needs_ctrl_vnic(pf
->app
))
326 ctrl_bar
= nfp_pf_map_rtsym(pf
, "net.ctrl", "_pf%u_net_ctrl_bar",
327 NFP_PF_CSR_SLICE_SIZE
, &pf
->ctrl_vnic_bar
);
328 if (IS_ERR(ctrl_bar
)) {
329 nfp_err(pf
->cpp
, "Failed to find ctrl vNIC memory symbol\n");
330 err
= PTR_ERR(ctrl_bar
);
334 pf
->ctrl_vnic
= nfp_net_pf_alloc_vnic(pf
, false, ctrl_bar
, qc_bar
,
336 if (IS_ERR(pf
->ctrl_vnic
)) {
337 err
= PTR_ERR(pf
->ctrl_vnic
);
344 nfp_cpp_area_release_free(pf
->ctrl_vnic_bar
);
346 mutex_lock(&pf
->lock
);
347 nfp_app_clean(pf
->app
);
348 mutex_unlock(&pf
->lock
);
350 nfp_app_free(pf
->app
);
355 static void nfp_net_pf_app_clean(struct nfp_pf
*pf
)
358 nfp_net_pf_free_vnic(pf
, pf
->ctrl_vnic
);
359 nfp_cpp_area_release_free(pf
->ctrl_vnic_bar
);
362 mutex_lock(&pf
->lock
);
363 nfp_app_clean(pf
->app
);
364 mutex_unlock(&pf
->lock
);
366 nfp_app_free(pf
->app
);
370 static int nfp_net_pf_app_start_ctrl(struct nfp_pf
*pf
)
376 err
= nfp_net_pf_init_vnic(pf
, pf
->ctrl_vnic
, 0);
380 err
= nfp_ctrl_open(pf
->ctrl_vnic
);
387 nfp_net_pf_clean_vnic(pf
, pf
->ctrl_vnic
);
391 static void nfp_net_pf_app_stop_ctrl(struct nfp_pf
*pf
)
395 nfp_ctrl_close(pf
->ctrl_vnic
);
396 nfp_net_pf_clean_vnic(pf
, pf
->ctrl_vnic
);
399 static int nfp_net_pf_app_start(struct nfp_pf
*pf
)
403 err
= nfp_net_pf_app_start_ctrl(pf
);
407 err
= nfp_app_start(pf
->app
, pf
->ctrl_vnic
);
412 err
= nfp_app_sriov_enable(pf
->app
, pf
->num_vfs
);
420 nfp_app_stop(pf
->app
);
422 nfp_net_pf_app_stop_ctrl(pf
);
426 static void nfp_net_pf_app_stop(struct nfp_pf
*pf
)
429 nfp_app_sriov_disable(pf
->app
);
430 nfp_app_stop(pf
->app
);
431 nfp_net_pf_app_stop_ctrl(pf
);
434 static void nfp_net_pci_unmap_mem(struct nfp_pf
*pf
)
436 if (pf
->vfcfg_tbl2_area
)
437 nfp_cpp_area_release_free(pf
->vfcfg_tbl2_area
);
439 nfp_cpp_area_release_free(pf
->vf_cfg_bar
);
440 if (pf
->mac_stats_bar
)
441 nfp_cpp_area_release_free(pf
->mac_stats_bar
);
442 nfp_cpp_area_release_free(pf
->qc_area
);
443 nfp_cpp_area_release_free(pf
->data_vnic_bar
);
446 static int nfp_net_pci_map_mem(struct nfp_pf
*pf
)
448 u32 min_size
, cpp_id
;
452 min_size
= pf
->max_data_vnics
* NFP_PF_CSR_SLICE_SIZE
;
453 mem
= nfp_pf_map_rtsym(pf
, "net.bar0", "_pf%d_net_bar0",
454 min_size
, &pf
->data_vnic_bar
);
456 nfp_err(pf
->cpp
, "Failed to find data vNIC memory symbol\n");
461 min_size
= NFP_MAC_STATS_SIZE
* (pf
->eth_tbl
->max_index
+ 1);
462 pf
->mac_stats_mem
= nfp_rtsym_map(pf
->rtbl
, "_mac_stats",
463 "net.macstats", min_size
,
465 if (IS_ERR(pf
->mac_stats_mem
)) {
466 if (PTR_ERR(pf
->mac_stats_mem
) != -ENOENT
) {
467 err
= PTR_ERR(pf
->mac_stats_mem
);
470 pf
->mac_stats_mem
= NULL
;
474 pf
->vf_cfg_mem
= nfp_pf_map_rtsym(pf
, "net.vfcfg", "_pf%d_net_vf_bar",
475 NFP_NET_CFG_BAR_SZ
* pf
->limit_vfs
,
477 if (IS_ERR(pf
->vf_cfg_mem
)) {
478 if (PTR_ERR(pf
->vf_cfg_mem
) != -ENOENT
) {
479 err
= PTR_ERR(pf
->vf_cfg_mem
);
480 goto err_unmap_mac_stats
;
482 pf
->vf_cfg_mem
= NULL
;
485 min_size
= NFP_NET_VF_CFG_SZ
* pf
->limit_vfs
+ NFP_NET_VF_CFG_MB_SZ
;
486 pf
->vfcfg_tbl2
= nfp_pf_map_rtsym(pf
, "net.vfcfg_tbl2",
488 min_size
, &pf
->vfcfg_tbl2_area
);
489 if (IS_ERR(pf
->vfcfg_tbl2
)) {
490 if (PTR_ERR(pf
->vfcfg_tbl2
) != -ENOENT
) {
491 err
= PTR_ERR(pf
->vfcfg_tbl2
);
492 goto err_unmap_vf_cfg
;
494 pf
->vfcfg_tbl2
= NULL
;
497 cpp_id
= NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW
, 0, 0);
498 mem
= nfp_cpp_map_area(pf
->cpp
, "net.qc", cpp_id
, NFP_PCIE_QUEUE(0),
499 NFP_QCP_QUEUE_AREA_SZ
, &pf
->qc_area
);
501 nfp_err(pf
->cpp
, "Failed to map Queue Controller area.\n");
503 goto err_unmap_vfcfg_tbl2
;
508 err_unmap_vfcfg_tbl2
:
509 if (pf
->vfcfg_tbl2_area
)
510 nfp_cpp_area_release_free(pf
->vfcfg_tbl2_area
);
513 nfp_cpp_area_release_free(pf
->vf_cfg_bar
);
515 if (pf
->mac_stats_bar
)
516 nfp_cpp_area_release_free(pf
->mac_stats_bar
);
518 nfp_cpp_area_release_free(pf
->data_vnic_bar
);
523 nfp_net_eth_port_update(struct nfp_cpp
*cpp
, struct nfp_port
*port
,
524 struct nfp_eth_table
*eth_table
)
526 struct nfp_eth_table_port
*eth_port
;
530 eth_port
= nfp_net_find_port(eth_table
, port
->eth_id
);
532 set_bit(NFP_PORT_CHANGED
, &port
->flags
);
533 nfp_warn(cpp
, "Warning: port #%d not present after reconfig\n",
537 if (eth_port
->override_changed
) {
538 nfp_warn(cpp
, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port
->eth_id
);
539 port
->type
= NFP_PORT_INVALID
;
542 memcpy(port
->eth_port
, eth_port
, sizeof(*eth_port
));
547 int nfp_net_refresh_port_table_sync(struct nfp_pf
*pf
)
549 struct nfp_eth_table
*eth_table
;
550 struct nfp_net
*nn
, *next
;
551 struct nfp_port
*port
;
554 lockdep_assert_held(&pf
->lock
);
556 /* Check for nfp_net_pci_remove() racing against us */
557 if (list_empty(&pf
->vnics
))
560 /* Update state of all ports */
562 list_for_each_entry(port
, &pf
->ports
, port_list
)
563 clear_bit(NFP_PORT_CHANGED
, &port
->flags
);
565 eth_table
= nfp_eth_read_ports(pf
->cpp
);
567 list_for_each_entry(port
, &pf
->ports
, port_list
)
568 if (__nfp_port_get_eth_port(port
))
569 set_bit(NFP_PORT_CHANGED
, &port
->flags
);
571 nfp_err(pf
->cpp
, "Error refreshing port config!\n");
575 list_for_each_entry(port
, &pf
->ports
, port_list
)
576 if (__nfp_port_get_eth_port(port
))
577 nfp_net_eth_port_update(pf
->cpp
, port
, eth_table
);
582 /* Resync repr state. This may cause reprs to be removed. */
583 err
= nfp_reprs_resync_phys_ports(pf
->app
);
587 /* Shoot off the ports which became invalid */
588 list_for_each_entry_safe(nn
, next
, &pf
->vnics
, vnic_list
) {
589 if (!nn
->port
|| nn
->port
->type
!= NFP_PORT_INVALID
)
592 nfp_net_pf_clean_vnic(pf
, nn
);
593 nfp_net_pf_free_vnic(pf
, nn
);
599 static void nfp_net_refresh_vnics(struct work_struct
*work
)
601 struct nfp_pf
*pf
= container_of(work
, struct nfp_pf
,
604 mutex_lock(&pf
->lock
);
605 nfp_net_refresh_port_table_sync(pf
);
606 mutex_unlock(&pf
->lock
);
609 void nfp_net_refresh_port_table(struct nfp_port
*port
)
611 struct nfp_pf
*pf
= port
->app
->pf
;
613 set_bit(NFP_PORT_CHANGED
, &port
->flags
);
615 queue_work(pf
->wq
, &pf
->port_refresh_work
);
618 int nfp_net_refresh_eth_port(struct nfp_port
*port
)
620 struct nfp_cpp
*cpp
= port
->app
->cpp
;
621 struct nfp_eth_table
*eth_table
;
624 clear_bit(NFP_PORT_CHANGED
, &port
->flags
);
626 eth_table
= nfp_eth_read_ports(cpp
);
628 set_bit(NFP_PORT_CHANGED
, &port
->flags
);
629 nfp_err(cpp
, "Error refreshing port state table!\n");
633 ret
= nfp_net_eth_port_update(cpp
, port
, eth_table
);
641 * PCI device functions
643 int nfp_net_pci_probe(struct nfp_pf
*pf
)
645 struct devlink
*devlink
= priv_to_devlink(pf
);
646 struct nfp_net_fw_version fw_ver
;
647 u8 __iomem
*ctrl_bar
, *qc_bar
;
651 INIT_WORK(&pf
->port_refresh_work
, nfp_net_refresh_vnics
);
654 nfp_err(pf
->cpp
, "No %s, giving up.\n",
655 pf
->fw_loaded
? "symbol table" : "firmware found");
659 pf
->max_data_vnics
= nfp_net_pf_get_num_ports(pf
);
660 if ((int)pf
->max_data_vnics
< 0)
661 return pf
->max_data_vnics
;
663 err
= nfp_net_pci_map_mem(pf
);
667 ctrl_bar
= nfp_cpp_area_iomem(pf
->data_vnic_bar
);
668 qc_bar
= nfp_cpp_area_iomem(pf
->qc_area
);
669 if (!ctrl_bar
|| !qc_bar
) {
674 nfp_net_get_fw_version(&fw_ver
, ctrl_bar
);
675 if (fw_ver
.resv
|| fw_ver
.class != NFP_NET_CFG_VERSION_CLASS_GENERIC
) {
676 nfp_err(pf
->cpp
, "Unknown Firmware ABI %d.%d.%d.%d\n",
677 fw_ver
.resv
, fw_ver
.class, fw_ver
.major
, fw_ver
.minor
);
682 /* Determine stride */
683 if (nfp_net_fw_ver_eq(&fw_ver
, 0, 0, 0, 1)) {
685 nfp_warn(pf
->cpp
, "OBSOLETE Firmware detected - VF isolation not available\n");
687 switch (fw_ver
.major
) {
692 nfp_err(pf
->cpp
, "Unsupported Firmware ABI %d.%d.%d.%d\n",
693 fw_ver
.resv
, fw_ver
.class,
694 fw_ver
.major
, fw_ver
.minor
);
700 err
= nfp_net_pf_app_init(pf
, qc_bar
, stride
);
704 err
= devlink_register(devlink
, &pf
->pdev
->dev
);
708 err
= nfp_shared_buf_register(pf
);
710 goto err_devlink_unreg
;
712 err
= nfp_devlink_params_register(pf
);
714 goto err_shared_buf_unreg
;
716 mutex_lock(&pf
->lock
);
717 pf
->ddir
= nfp_net_debugfs_device_add(pf
->pdev
);
719 /* Allocate the vnics and do basic init */
720 err
= nfp_net_pf_alloc_vnics(pf
, ctrl_bar
, qc_bar
, stride
);
724 err
= nfp_net_pf_alloc_irqs(pf
);
728 err
= nfp_net_pf_app_start(pf
);
732 err
= nfp_net_pf_init_vnics(pf
);
736 mutex_unlock(&pf
->lock
);
741 nfp_net_pf_app_stop(pf
);
743 nfp_net_pf_free_irqs(pf
);
745 nfp_net_pf_free_vnics(pf
);
747 nfp_net_debugfs_dir_clean(&pf
->ddir
);
748 mutex_unlock(&pf
->lock
);
749 nfp_devlink_params_unregister(pf
);
750 err_shared_buf_unreg
:
751 nfp_shared_buf_unregister(pf
);
753 cancel_work_sync(&pf
->port_refresh_work
);
754 devlink_unregister(devlink
);
756 nfp_net_pf_app_clean(pf
);
758 nfp_net_pci_unmap_mem(pf
);
762 void nfp_net_pci_remove(struct nfp_pf
*pf
)
764 struct nfp_net
*nn
, *next
;
766 mutex_lock(&pf
->lock
);
767 list_for_each_entry_safe(nn
, next
, &pf
->vnics
, vnic_list
) {
768 if (!nfp_net_is_data_vnic(nn
))
770 nfp_net_pf_clean_vnic(pf
, nn
);
771 nfp_net_pf_free_vnic(pf
, nn
);
774 nfp_net_pf_app_stop(pf
);
775 /* stop app first, to avoid double free of ctrl vNIC's ddir */
776 nfp_net_debugfs_dir_clean(&pf
->ddir
);
778 mutex_unlock(&pf
->lock
);
780 nfp_devlink_params_unregister(pf
);
781 nfp_shared_buf_unregister(pf
);
782 devlink_unregister(priv_to_devlink(pf
));
784 nfp_net_pf_free_irqs(pf
);
785 nfp_net_pf_app_clean(pf
);
786 nfp_net_pci_unmap_mem(pf
);
788 cancel_work_sync(&pf
->port_refresh_work
);