1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
6 * Netronome network device driver: Main entry point
7 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
8 * Alejandro Lucero <alejandro.lucero@netronome.com>
9 * Jason McMullan <jason.mcmullan@netronome.com>
10 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
13 #include <linux/etherdevice.h>
14 #include <linux/kernel.h>
15 #include <linux/init.h>
16 #include <linux/lockdep.h>
17 #include <linux/pci.h>
18 #include <linux/pci_regs.h>
19 #include <linux/random.h>
20 #include <linux/rtnetlink.h>
22 #include "nfpcore/nfp.h"
23 #include "nfpcore/nfp_cpp.h"
24 #include "nfpcore/nfp_dev.h"
25 #include "nfpcore/nfp_nffw.h"
26 #include "nfpcore/nfp_nsp.h"
27 #include "nfpcore/nfp6000_pcie.h"
29 #include "nfp_net_ctrl.h"
30 #include "nfp_net_sriov.h"
35 #define NFP_PF_CSR_SLICE_SIZE (32 * 1024)
38 * nfp_net_get_mac_addr() - Get the MAC address.
40 * @netdev: net_device to set MAC address on
41 * @port: NFP port structure
43 * First try to get the MAC address from NSP ETH table. If that
44 * fails generate a random address.
47 nfp_net_get_mac_addr(struct nfp_pf
*pf
, struct net_device
*netdev
,
48 struct nfp_port
*port
)
50 struct nfp_eth_table_port
*eth_port
;
52 eth_port
= __nfp_port_get_eth_port(port
);
54 eth_hw_addr_random(netdev
);
58 eth_hw_addr_set(netdev
, eth_port
->mac_addr
);
59 ether_addr_copy(netdev
->perm_addr
, eth_port
->mac_addr
);
62 static struct nfp_eth_table_port
*
63 nfp_net_find_port(struct nfp_eth_table
*eth_tbl
, unsigned int index
)
67 for (i
= 0; eth_tbl
&& i
< eth_tbl
->count
; i
++)
68 if (eth_tbl
->ports
[i
].index
== index
)
69 return ð_tbl
->ports
[i
];
74 static int nfp_net_pf_get_num_ports(struct nfp_pf
*pf
)
76 return nfp_pf_rtsym_read_optional(pf
, "nfd_cfg_pf%u_num_ports", 1);
79 static void nfp_net_pf_free_vnic(struct nfp_pf
*pf
, struct nfp_net
*nn
)
81 if (nfp_net_is_data_vnic(nn
))
82 nfp_app_vnic_free(pf
->app
, nn
);
83 nfp_port_free(nn
->port
);
84 list_del(&nn
->vnic_list
);
89 static void nfp_net_pf_free_vnics(struct nfp_pf
*pf
)
91 struct nfp_net
*nn
, *next
;
93 list_for_each_entry_safe(nn
, next
, &pf
->vnics
, vnic_list
)
94 if (nfp_net_is_data_vnic(nn
))
95 nfp_net_pf_free_vnic(pf
, nn
);
98 static struct nfp_net
*
99 nfp_net_pf_alloc_vnic(struct nfp_pf
*pf
, bool needs_netdev
,
100 void __iomem
*ctrl_bar
, void __iomem
*qc_bar
,
101 int stride
, unsigned int id
)
103 u32 tx_base
, rx_base
, n_tx_rings
, n_rx_rings
;
107 tx_base
= readl(ctrl_bar
+ NFP_NET_CFG_START_TXQ
);
108 rx_base
= readl(ctrl_bar
+ NFP_NET_CFG_START_RXQ
);
109 n_tx_rings
= readl(ctrl_bar
+ NFP_NET_CFG_MAX_TXRINGS
);
110 n_rx_rings
= readl(ctrl_bar
+ NFP_NET_CFG_MAX_RXRINGS
);
112 /* Allocate and initialise the vNIC */
113 nn
= nfp_net_alloc(pf
->pdev
, pf
->dev_info
, ctrl_bar
, needs_netdev
,
114 n_tx_rings
, n_rx_rings
);
119 nn
->tx_bar
= qc_bar
+ tx_base
* NFP_QCP_QUEUE_ADDR_SZ
;
120 nn
->rx_bar
= qc_bar
+ rx_base
* NFP_QCP_QUEUE_ADDR_SZ
;
122 nn
->stride_rx
= stride
;
123 nn
->stride_tx
= stride
;
126 err
= nfp_app_vnic_alloc(pf
->app
, nn
, id
);
134 list_add_tail(&nn
->vnic_list
, &pf
->vnics
);
140 nfp_net_pf_init_vnic(struct nfp_pf
*pf
, struct nfp_net
*nn
, unsigned int id
)
147 err
= nfp_devlink_port_register(pf
->app
, nn
->port
);
152 err
= nfp_net_init(nn
);
154 goto err_devlink_port_clean
;
156 nfp_net_debugfs_vnic_add(nn
, pf
->ddir
);
160 if (nfp_net_is_data_vnic(nn
)) {
161 err
= nfp_app_vnic_init(pf
->app
, nn
);
163 goto err_debugfs_vnic_clean
;
168 err_debugfs_vnic_clean
:
169 nfp_net_debugfs_dir_clean(&nn
->debugfs_dir
);
171 err_devlink_port_clean
:
173 nfp_devlink_port_unregister(nn
->port
);
178 nfp_net_pf_alloc_vnics(struct nfp_pf
*pf
, void __iomem
*ctrl_bar
,
179 void __iomem
*qc_bar
, int stride
)
185 for (i
= 0; i
< pf
->max_data_vnics
; i
++) {
186 nn
= nfp_net_pf_alloc_vnic(pf
, true, ctrl_bar
, qc_bar
,
194 nn
->port
->link_cb
= nfp_net_refresh_port_table
;
196 ctrl_bar
+= NFP_PF_CSR_SLICE_SIZE
;
198 /* Kill the vNIC if app init marked it as invalid */
199 if (nn
->port
&& nn
->port
->type
== NFP_PORT_INVALID
)
200 nfp_net_pf_free_vnic(pf
, nn
);
203 if (list_empty(&pf
->vnics
))
209 nfp_net_pf_free_vnics(pf
);
213 static void nfp_net_pf_clean_vnic(struct nfp_pf
*pf
, struct nfp_net
*nn
)
215 if (nfp_net_is_data_vnic(nn
))
216 nfp_app_vnic_clean(pf
->app
, nn
);
217 nfp_net_debugfs_dir_clean(&nn
->debugfs_dir
);
220 nfp_devlink_port_unregister(nn
->port
);
223 static int nfp_net_pf_alloc_irqs(struct nfp_pf
*pf
)
225 unsigned int wanted_irqs
, num_irqs
, vnics_left
, irqs_left
;
228 /* Get MSI-X vectors */
230 list_for_each_entry(nn
, &pf
->vnics
, vnic_list
)
231 wanted_irqs
+= NFP_NET_NON_Q_VECTORS
+ nn
->dp
.num_r_vecs
;
232 pf
->irq_entries
= kcalloc(wanted_irqs
, sizeof(*pf
->irq_entries
),
234 if (!pf
->irq_entries
)
237 num_irqs
= nfp_net_irqs_alloc(pf
->pdev
, pf
->irq_entries
,
238 NFP_NET_MIN_VNIC_IRQS
* pf
->num_vnics
,
241 nfp_warn(pf
->cpp
, "Unable to allocate MSI-X vectors\n");
242 kfree(pf
->irq_entries
);
246 /* Distribute IRQs to vNICs */
247 irqs_left
= num_irqs
;
248 vnics_left
= pf
->num_vnics
;
249 list_for_each_entry(nn
, &pf
->vnics
, vnic_list
) {
252 n
= min(NFP_NET_NON_Q_VECTORS
+ nn
->dp
.num_r_vecs
,
253 DIV_ROUND_UP(irqs_left
, vnics_left
));
254 nfp_net_irqs_assign(nn
, &pf
->irq_entries
[num_irqs
- irqs_left
],
263 static void nfp_net_pf_free_irqs(struct nfp_pf
*pf
)
265 nfp_net_irqs_disable(pf
->pdev
);
266 kfree(pf
->irq_entries
);
269 static int nfp_net_pf_init_vnics(struct nfp_pf
*pf
)
275 /* Finish vNIC init and register */
277 list_for_each_entry(nn
, &pf
->vnics
, vnic_list
) {
278 if (!nfp_net_is_data_vnic(nn
))
280 err
= nfp_net_pf_init_vnic(pf
, nn
, id
);
282 goto err_prev_deinit
;
290 list_for_each_entry_continue_reverse(nn
, &pf
->vnics
, vnic_list
)
291 if (nfp_net_is_data_vnic(nn
))
292 nfp_net_pf_clean_vnic(pf
, nn
);
297 nfp_net_pf_app_init(struct nfp_pf
*pf
, u8 __iomem
*qc_bar
, unsigned int stride
)
299 struct devlink
*devlink
= priv_to_devlink(pf
);
300 u8 __iomem
*ctrl_bar
;
303 pf
->app
= nfp_app_alloc(pf
, nfp_net_pf_get_app_id(pf
));
305 return PTR_ERR(pf
->app
);
308 err
= nfp_app_init(pf
->app
);
309 devl_unlock(devlink
);
313 if (!nfp_app_needs_ctrl_vnic(pf
->app
))
316 ctrl_bar
= nfp_pf_map_rtsym(pf
, "net.ctrl", "_pf%u_net_ctrl_bar",
317 NFP_PF_CSR_SLICE_SIZE
, &pf
->ctrl_vnic_bar
);
318 if (IS_ERR(ctrl_bar
)) {
319 nfp_err(pf
->cpp
, "Failed to find ctrl vNIC memory symbol\n");
320 err
= PTR_ERR(ctrl_bar
);
324 pf
->ctrl_vnic
= nfp_net_pf_alloc_vnic(pf
, false, ctrl_bar
, qc_bar
,
326 if (IS_ERR(pf
->ctrl_vnic
)) {
327 err
= PTR_ERR(pf
->ctrl_vnic
);
334 nfp_cpp_area_release_free(pf
->ctrl_vnic_bar
);
337 nfp_app_clean(pf
->app
);
338 devl_unlock(devlink
);
340 nfp_app_free(pf
->app
);
345 static void nfp_net_pf_app_clean(struct nfp_pf
*pf
)
347 struct devlink
*devlink
= priv_to_devlink(pf
);
350 nfp_net_pf_free_vnic(pf
, pf
->ctrl_vnic
);
351 nfp_cpp_area_release_free(pf
->ctrl_vnic_bar
);
355 nfp_app_clean(pf
->app
);
356 devl_unlock(devlink
);
358 nfp_app_free(pf
->app
);
362 static int nfp_net_pf_app_start_ctrl(struct nfp_pf
*pf
)
368 err
= nfp_net_pf_init_vnic(pf
, pf
->ctrl_vnic
, 0);
372 err
= nfp_ctrl_open(pf
->ctrl_vnic
);
379 nfp_net_pf_clean_vnic(pf
, pf
->ctrl_vnic
);
383 static void nfp_net_pf_app_stop_ctrl(struct nfp_pf
*pf
)
387 nfp_ctrl_close(pf
->ctrl_vnic
);
388 nfp_net_pf_clean_vnic(pf
, pf
->ctrl_vnic
);
391 static int nfp_net_pf_app_start(struct nfp_pf
*pf
)
395 err
= nfp_net_pf_app_start_ctrl(pf
);
399 err
= nfp_app_start(pf
->app
, pf
->ctrl_vnic
);
404 err
= nfp_app_sriov_enable(pf
->app
, pf
->num_vfs
);
412 nfp_app_stop(pf
->app
);
414 nfp_net_pf_app_stop_ctrl(pf
);
418 static void nfp_net_pf_app_stop(struct nfp_pf
*pf
)
421 nfp_app_sriov_disable(pf
->app
);
422 nfp_app_stop(pf
->app
);
423 nfp_net_pf_app_stop_ctrl(pf
);
426 static void nfp_net_pci_unmap_mem(struct nfp_pf
*pf
)
428 if (pf
->vfcfg_tbl2_area
)
429 nfp_cpp_area_release_free(pf
->vfcfg_tbl2_area
);
431 nfp_cpp_area_release_free(pf
->vf_cfg_bar
);
432 if (pf
->mac_stats_bar
)
433 nfp_cpp_area_release_free(pf
->mac_stats_bar
);
434 nfp_cpp_area_release_free(pf
->qc_area
);
435 nfp_cpp_area_release_free(pf
->data_vnic_bar
);
438 static int nfp_net_pci_map_mem(struct nfp_pf
*pf
)
440 u32 min_size
, cpp_id
;
444 min_size
= pf
->max_data_vnics
* NFP_PF_CSR_SLICE_SIZE
;
445 mem
= nfp_pf_map_rtsym(pf
, "net.bar0", "_pf%d_net_bar0",
446 min_size
, &pf
->data_vnic_bar
);
448 nfp_err(pf
->cpp
, "Failed to find data vNIC memory symbol\n");
453 min_size
= NFP_MAC_STATS_SIZE
* (pf
->eth_tbl
->max_index
+ 1);
454 pf
->mac_stats_mem
= nfp_rtsym_map(pf
->rtbl
, "_mac_stats",
455 "net.macstats", min_size
,
457 if (IS_ERR(pf
->mac_stats_mem
)) {
458 if (PTR_ERR(pf
->mac_stats_mem
) != -ENOENT
) {
459 err
= PTR_ERR(pf
->mac_stats_mem
);
462 pf
->mac_stats_mem
= NULL
;
466 pf
->vf_cfg_mem
= nfp_pf_map_rtsym(pf
, "net.vfcfg", "_pf%d_net_vf_bar",
467 NFP_NET_CFG_BAR_SZ
* pf
->limit_vfs
,
469 if (IS_ERR(pf
->vf_cfg_mem
)) {
470 if (PTR_ERR(pf
->vf_cfg_mem
) != -ENOENT
) {
471 err
= PTR_ERR(pf
->vf_cfg_mem
);
472 goto err_unmap_mac_stats
;
474 pf
->vf_cfg_mem
= NULL
;
477 min_size
= NFP_NET_VF_CFG_SZ
* pf
->limit_vfs
+ NFP_NET_VF_CFG_MB_SZ
;
478 pf
->vfcfg_tbl2
= nfp_pf_map_rtsym(pf
, "net.vfcfg_tbl2",
480 min_size
, &pf
->vfcfg_tbl2_area
);
481 if (IS_ERR(pf
->vfcfg_tbl2
)) {
482 if (PTR_ERR(pf
->vfcfg_tbl2
) != -ENOENT
) {
483 err
= PTR_ERR(pf
->vfcfg_tbl2
);
484 goto err_unmap_vf_cfg
;
486 pf
->vfcfg_tbl2
= NULL
;
489 cpp_id
= NFP_CPP_ISLAND_ID(0, NFP_CPP_ACTION_RW
, 0, 0);
490 mem
= nfp_cpp_map_area(pf
->cpp
, "net.qc", cpp_id
,
491 nfp_qcp_queue_offset(pf
->dev_info
, 0),
492 pf
->dev_info
->qc_area_sz
, &pf
->qc_area
);
494 nfp_err(pf
->cpp
, "Failed to map Queue Controller area.\n");
496 goto err_unmap_vfcfg_tbl2
;
501 err_unmap_vfcfg_tbl2
:
502 if (pf
->vfcfg_tbl2_area
)
503 nfp_cpp_area_release_free(pf
->vfcfg_tbl2_area
);
506 nfp_cpp_area_release_free(pf
->vf_cfg_bar
);
508 if (pf
->mac_stats_bar
)
509 nfp_cpp_area_release_free(pf
->mac_stats_bar
);
511 nfp_cpp_area_release_free(pf
->data_vnic_bar
);
515 static const unsigned int lr_to_speed
[] = {
516 [NFP_NET_CFG_STS_LINK_RATE_UNSUPPORTED
] = 0,
517 [NFP_NET_CFG_STS_LINK_RATE_UNKNOWN
] = SPEED_UNKNOWN
,
518 [NFP_NET_CFG_STS_LINK_RATE_1G
] = SPEED_1000
,
519 [NFP_NET_CFG_STS_LINK_RATE_10G
] = SPEED_10000
,
520 [NFP_NET_CFG_STS_LINK_RATE_25G
] = SPEED_25000
,
521 [NFP_NET_CFG_STS_LINK_RATE_40G
] = SPEED_40000
,
522 [NFP_NET_CFG_STS_LINK_RATE_50G
] = SPEED_50000
,
523 [NFP_NET_CFG_STS_LINK_RATE_100G
] = SPEED_100000
,
526 unsigned int nfp_net_lr2speed(unsigned int linkrate
)
528 if (linkrate
< ARRAY_SIZE(lr_to_speed
))
529 return lr_to_speed
[linkrate
];
531 return SPEED_UNKNOWN
;
534 unsigned int nfp_net_speed2lr(unsigned int speed
)
538 for (i
= 0; i
< ARRAY_SIZE(lr_to_speed
); i
++) {
539 if (speed
== lr_to_speed
[i
])
543 return NFP_NET_CFG_STS_LINK_RATE_UNKNOWN
;
546 static void nfp_net_notify_port_speed(struct nfp_port
*port
)
548 struct net_device
*netdev
= port
->netdev
;
552 if (!nfp_netdev_is_nfp_net(netdev
))
555 nn
= netdev_priv(netdev
);
556 sts
= nn_readw(nn
, NFP_NET_CFG_STS
);
558 if (!(sts
& NFP_NET_CFG_STS_LINK
)) {
559 nn_writew(nn
, NFP_NET_CFG_STS_NSP_LINK_RATE
, NFP_NET_CFG_STS_LINK_RATE_UNKNOWN
);
563 nn_writew(nn
, NFP_NET_CFG_STS_NSP_LINK_RATE
, nfp_net_speed2lr(port
->eth_port
->speed
));
567 nfp_net_eth_port_update(struct nfp_cpp
*cpp
, struct nfp_port
*port
,
568 struct nfp_eth_table
*eth_table
)
570 struct nfp_eth_table_port
*eth_port
;
574 eth_port
= nfp_net_find_port(eth_table
, port
->eth_id
);
576 set_bit(NFP_PORT_CHANGED
, &port
->flags
);
577 nfp_warn(cpp
, "Warning: port #%d not present after reconfig\n",
581 if (eth_port
->override_changed
) {
582 nfp_warn(cpp
, "Port #%d config changed, unregistering. Driver reload required before port will be operational again.\n", port
->eth_id
);
583 port
->type
= NFP_PORT_INVALID
;
586 memcpy(port
->eth_port
, eth_port
, sizeof(*eth_port
));
587 nfp_net_notify_port_speed(port
);
592 int nfp_net_refresh_port_table_sync(struct nfp_pf
*pf
)
594 struct devlink
*devlink
= priv_to_devlink(pf
);
595 struct nfp_eth_table
*eth_table
;
596 struct nfp_net
*nn
, *next
;
597 struct nfp_port
*port
;
600 devl_assert_locked(devlink
);
602 /* Check for nfp_net_pci_remove() racing against us */
603 if (list_empty(&pf
->vnics
))
606 /* Update state of all ports */
608 list_for_each_entry(port
, &pf
->ports
, port_list
)
609 clear_bit(NFP_PORT_CHANGED
, &port
->flags
);
611 eth_table
= nfp_eth_read_ports(pf
->cpp
);
613 list_for_each_entry(port
, &pf
->ports
, port_list
)
614 if (__nfp_port_get_eth_port(port
))
615 set_bit(NFP_PORT_CHANGED
, &port
->flags
);
617 nfp_err(pf
->cpp
, "Error refreshing port config!\n");
621 list_for_each_entry(port
, &pf
->ports
, port_list
)
622 if (__nfp_port_get_eth_port(port
))
623 nfp_net_eth_port_update(pf
->cpp
, port
, eth_table
);
628 /* Resync repr state. This may cause reprs to be removed. */
629 err
= nfp_reprs_resync_phys_ports(pf
->app
);
633 /* Shoot off the ports which became invalid */
634 list_for_each_entry_safe(nn
, next
, &pf
->vnics
, vnic_list
) {
635 if (!nn
->port
|| nn
->port
->type
!= NFP_PORT_INVALID
)
638 nfp_net_pf_clean_vnic(pf
, nn
);
639 nfp_net_pf_free_vnic(pf
, nn
);
645 static void nfp_net_refresh_vnics(struct work_struct
*work
)
647 struct nfp_pf
*pf
= container_of(work
, struct nfp_pf
,
649 struct devlink
*devlink
= priv_to_devlink(pf
);
652 nfp_net_refresh_port_table_sync(pf
);
653 devl_unlock(devlink
);
656 void nfp_net_refresh_port_table(struct nfp_port
*port
)
658 struct nfp_pf
*pf
= port
->app
->pf
;
660 set_bit(NFP_PORT_CHANGED
, &port
->flags
);
662 queue_work(pf
->wq
, &pf
->port_refresh_work
);
665 int nfp_net_refresh_eth_port(struct nfp_port
*port
)
667 struct nfp_cpp
*cpp
= port
->app
->cpp
;
668 struct nfp_eth_table
*eth_table
;
671 clear_bit(NFP_PORT_CHANGED
, &port
->flags
);
673 eth_table
= nfp_eth_read_ports(cpp
);
675 set_bit(NFP_PORT_CHANGED
, &port
->flags
);
676 nfp_err(cpp
, "Error refreshing port state table!\n");
680 ret
= nfp_net_eth_port_update(cpp
, port
, eth_table
);
688 * PCI device functions
690 int nfp_net_pci_probe(struct nfp_pf
*pf
)
692 struct devlink
*devlink
= priv_to_devlink(pf
);
693 struct nfp_net_fw_version fw_ver
;
694 u8 __iomem
*ctrl_bar
, *qc_bar
;
698 INIT_WORK(&pf
->port_refresh_work
, nfp_net_refresh_vnics
);
701 nfp_err(pf
->cpp
, "No %s, giving up.\n",
702 pf
->fw_loaded
? "symbol table" : "firmware found");
706 pf
->max_data_vnics
= nfp_net_pf_get_num_ports(pf
);
707 if ((int)pf
->max_data_vnics
< 0)
708 return pf
->max_data_vnics
;
710 err
= nfp_net_pci_map_mem(pf
);
714 ctrl_bar
= nfp_cpp_area_iomem(pf
->data_vnic_bar
);
715 qc_bar
= nfp_cpp_area_iomem(pf
->qc_area
);
716 if (!ctrl_bar
|| !qc_bar
) {
721 nfp_net_get_fw_version(&fw_ver
, ctrl_bar
);
722 if (fw_ver
.extend
& NFP_NET_CFG_VERSION_RESERVED_MASK
||
723 fw_ver
.class != NFP_NET_CFG_VERSION_CLASS_GENERIC
) {
724 nfp_err(pf
->cpp
, "Unknown Firmware ABI %d.%d.%d.%d\n",
725 fw_ver
.extend
, fw_ver
.class,
726 fw_ver
.major
, fw_ver
.minor
);
731 /* Determine stride */
732 if (nfp_net_fw_ver_eq(&fw_ver
, 0, 0, 0, 1)) {
734 nfp_warn(pf
->cpp
, "OBSOLETE Firmware detected - VF isolation not available\n");
736 switch (fw_ver
.major
) {
741 nfp_err(pf
->cpp
, "Unsupported Firmware ABI %d.%d.%d.%d\n",
742 fw_ver
.extend
, fw_ver
.class,
743 fw_ver
.major
, fw_ver
.minor
);
749 err
= nfp_net_pf_app_init(pf
, qc_bar
, stride
);
753 err
= nfp_shared_buf_register(pf
);
755 goto err_devlink_unreg
;
758 err
= nfp_devlink_params_register(pf
);
760 goto err_shared_buf_unreg
;
762 pf
->ddir
= nfp_net_debugfs_device_add(pf
->pdev
);
764 /* Allocate the vnics and do basic init */
765 err
= nfp_net_pf_alloc_vnics(pf
, ctrl_bar
, qc_bar
, stride
);
769 err
= nfp_net_pf_alloc_irqs(pf
);
773 err
= nfp_net_pf_app_start(pf
);
777 err
= nfp_net_pf_init_vnics(pf
);
781 devl_unlock(devlink
);
782 devlink_register(devlink
);
787 nfp_net_pf_app_stop(pf
);
789 nfp_net_pf_free_irqs(pf
);
791 nfp_net_pf_free_vnics(pf
);
793 nfp_net_debugfs_dir_clean(&pf
->ddir
);
794 nfp_devlink_params_unregister(pf
);
795 err_shared_buf_unreg
:
796 devl_unlock(devlink
);
797 nfp_shared_buf_unregister(pf
);
799 cancel_work_sync(&pf
->port_refresh_work
);
800 nfp_net_pf_app_clean(pf
);
802 nfp_net_pci_unmap_mem(pf
);
806 void nfp_net_pci_remove(struct nfp_pf
*pf
)
808 struct devlink
*devlink
= priv_to_devlink(pf
);
809 struct nfp_net
*nn
, *next
;
811 devlink_unregister(priv_to_devlink(pf
));
813 list_for_each_entry_safe(nn
, next
, &pf
->vnics
, vnic_list
) {
814 if (!nfp_net_is_data_vnic(nn
))
816 nfp_net_pf_clean_vnic(pf
, nn
);
817 nfp_net_pf_free_vnic(pf
, nn
);
820 nfp_net_pf_app_stop(pf
);
821 /* stop app first, to avoid double free of ctrl vNIC's ddir */
822 nfp_net_debugfs_dir_clean(&pf
->ddir
);
824 nfp_devlink_params_unregister(pf
);
826 devl_unlock(devlink
);
828 nfp_shared_buf_unregister(pf
);
830 nfp_net_pf_free_irqs(pf
);
831 nfp_net_pf_app_clean(pf
);
832 nfp_net_pci_unmap_mem(pf
);
834 cancel_work_sync(&pf
->port_refresh_work
);