1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
6 * Netronome virtual function network device driver: Main entry point
7 * Author: Jason McMullan <jason.mcmullan@netronome.com>
8 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/etherdevice.h>
16 #include "nfpcore/nfp_dev.h"
17 #include "nfp_net_ctrl.h"
22 * struct nfp_net_vf - NFP VF-specific device structure
23 * @nn: NFP Net structure for this device
24 * @irq_entries: Pre-allocated array of MSI-X entries
25 * @q_bar: Pointer to mapped QC memory (NULL if TX/RX mapped directly)
26 * @ddir: Per-device debugfs directory
31 struct msix_entry irq_entries
[NFP_NET_NON_Q_VECTORS
+
32 NFP_NET_MAX_TX_RINGS
];
38 static const char nfp_net_driver_name
[] = "nfp_netvf";
40 static const struct pci_device_id nfp_netvf_pci_device_ids
[] = {
41 { PCI_VENDOR_ID_NETRONOME
, PCI_DEVICE_ID_NFP3800_VF
,
42 PCI_VENDOR_ID_NETRONOME
, PCI_ANY_ID
,
43 PCI_ANY_ID
, 0, NFP_DEV_NFP3800_VF
,
45 { PCI_VENDOR_ID_NETRONOME
, PCI_DEVICE_ID_NFP6000_VF
,
46 PCI_VENDOR_ID_NETRONOME
, PCI_ANY_ID
,
47 PCI_ANY_ID
, 0, NFP_DEV_NFP6000_VF
,
49 { PCI_VENDOR_ID_CORIGINE
, PCI_DEVICE_ID_NFP3800_VF
,
50 PCI_VENDOR_ID_CORIGINE
, PCI_ANY_ID
,
51 PCI_ANY_ID
, 0, NFP_DEV_NFP3800_VF
,
53 { PCI_VENDOR_ID_CORIGINE
, PCI_DEVICE_ID_NFP6000_VF
,
54 PCI_VENDOR_ID_CORIGINE
, PCI_ANY_ID
,
55 PCI_ANY_ID
, 0, NFP_DEV_NFP6000_VF
,
57 { 0, } /* Required last entry. */
59 MODULE_DEVICE_TABLE(pci
, nfp_netvf_pci_device_ids
);
61 static void nfp_netvf_get_mac_addr(struct nfp_net
*nn
)
63 u8 mac_addr
[ETH_ALEN
];
65 put_unaligned_be32(nn_readl(nn
, NFP_NET_CFG_MACADDR
+ 0), &mac_addr
[0]);
66 put_unaligned_be16(nn_readw(nn
, NFP_NET_CFG_MACADDR
+ 6), &mac_addr
[4]);
68 if (!is_valid_ether_addr(mac_addr
)) {
69 eth_hw_addr_random(nn
->dp
.netdev
);
73 eth_hw_addr_set(nn
->dp
.netdev
, mac_addr
);
74 ether_addr_copy(nn
->dp
.netdev
->perm_addr
, mac_addr
);
77 static int nfp_netvf_pci_probe(struct pci_dev
*pdev
,
78 const struct pci_device_id
*pci_id
)
80 const struct nfp_dev_info
*dev_info
;
81 struct nfp_net_fw_version fw_ver
;
82 int max_tx_rings
, max_rx_rings
;
83 u32 tx_bar_off
, rx_bar_off
;
84 u32 tx_bar_sz
, rx_bar_sz
;
85 int tx_bar_no
, rx_bar_no
;
86 struct nfp_net_vf
*vf
;
87 unsigned int num_irqs
;
94 dev_info
= &nfp_dev_info
[pci_id
->driver_data
];
96 vf
= kzalloc(sizeof(*vf
), GFP_KERNEL
);
99 pci_set_drvdata(pdev
, vf
);
101 err
= pci_enable_device_mem(pdev
);
105 err
= pci_request_regions(pdev
, nfp_net_driver_name
);
107 dev_err(&pdev
->dev
, "Unable to allocate device memory.\n");
108 goto err_pci_disable
;
111 pci_set_master(pdev
);
113 err
= dma_set_mask_and_coherent(&pdev
->dev
, dev_info
->dma_mask
);
115 goto err_pci_regions
;
117 /* Map the Control BAR.
119 * Irrespective of the advertised BAR size we only map the
120 * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code
121 * the identical for PF and VF drivers.
123 ctrl_bar
= ioremap(pci_resource_start(pdev
, NFP_NET_CTRL_BAR
),
127 "Failed to map resource %d\n", NFP_NET_CTRL_BAR
);
129 goto err_pci_regions
;
132 nfp_net_get_fw_version(&fw_ver
, ctrl_bar
);
133 if (fw_ver
.extend
& NFP_NET_CFG_VERSION_RESERVED_MASK
||
134 fw_ver
.class != NFP_NET_CFG_VERSION_CLASS_GENERIC
) {
135 dev_err(&pdev
->dev
, "Unknown Firmware ABI %d.%d.%d.%d\n",
136 fw_ver
.extend
, fw_ver
.class,
137 fw_ver
.major
, fw_ver
.minor
);
142 /* Determine stride */
143 if (nfp_net_fw_ver_eq(&fw_ver
, 0, 0, 0, 1)) {
145 tx_bar_no
= NFP_NET_Q0_BAR
;
146 rx_bar_no
= NFP_NET_Q1_BAR
;
147 dev_warn(&pdev
->dev
, "OBSOLETE Firmware detected - VF isolation not available\n");
149 switch (fw_ver
.major
) {
152 tx_bar_no
= NFP_NET_Q0_BAR
;
153 rx_bar_no
= tx_bar_no
;
156 dev_err(&pdev
->dev
, "Unsupported Firmware ABI %d.%d.%d.%d\n",
157 fw_ver
.extend
, fw_ver
.class,
158 fw_ver
.major
, fw_ver
.minor
);
164 /* Find out how many rings are supported */
165 max_tx_rings
= readl(ctrl_bar
+ NFP_NET_CFG_MAX_TXRINGS
);
166 max_rx_rings
= readl(ctrl_bar
+ NFP_NET_CFG_MAX_RXRINGS
);
168 tx_bar_sz
= NFP_QCP_QUEUE_ADDR_SZ
* max_tx_rings
* stride
;
169 rx_bar_sz
= NFP_QCP_QUEUE_ADDR_SZ
* max_rx_rings
* stride
;
172 if (tx_bar_sz
> pci_resource_len(pdev
, tx_bar_no
)) {
174 "TX BAR too small for number of TX rings. Adjusting\n");
175 tx_bar_sz
= pci_resource_len(pdev
, tx_bar_no
);
176 max_tx_rings
= (tx_bar_sz
/ NFP_QCP_QUEUE_ADDR_SZ
) / 2;
178 if (rx_bar_sz
> pci_resource_len(pdev
, rx_bar_no
)) {
180 "RX BAR too small for number of RX rings. Adjusting\n");
181 rx_bar_sz
= pci_resource_len(pdev
, rx_bar_no
);
182 max_rx_rings
= (rx_bar_sz
/ NFP_QCP_QUEUE_ADDR_SZ
) / 2;
185 startq
= readl(ctrl_bar
+ NFP_NET_CFG_START_TXQ
);
186 tx_bar_off
= nfp_qcp_queue_offset(dev_info
, startq
);
187 startq
= readl(ctrl_bar
+ NFP_NET_CFG_START_RXQ
);
188 rx_bar_off
= nfp_qcp_queue_offset(dev_info
, startq
);
190 /* Allocate and initialise the netdev */
191 nn
= nfp_net_alloc(pdev
, dev_info
, ctrl_bar
, true,
192 max_tx_rings
, max_rx_rings
);
200 nn
->stride_tx
= stride
;
201 nn
->stride_rx
= stride
;
203 if (rx_bar_no
== tx_bar_no
) {
205 resource_size_t map_addr
;
207 /* Make a single overlapping BAR mapping */
208 if (tx_bar_off
< rx_bar_off
)
209 bar_off
= tx_bar_off
;
211 bar_off
= rx_bar_off
;
213 if ((tx_bar_off
+ tx_bar_sz
) > (rx_bar_off
+ rx_bar_sz
))
214 bar_sz
= (tx_bar_off
+ tx_bar_sz
) - bar_off
;
216 bar_sz
= (rx_bar_off
+ rx_bar_sz
) - bar_off
;
218 map_addr
= pci_resource_start(pdev
, tx_bar_no
) + bar_off
;
219 vf
->q_bar
= ioremap(map_addr
, bar_sz
);
221 nn_err(nn
, "Failed to map resource %d\n", tx_bar_no
);
223 goto err_netdev_free
;
227 nn
->tx_bar
= vf
->q_bar
+ (tx_bar_off
- bar_off
);
229 nn
->rx_bar
= vf
->q_bar
+ (rx_bar_off
- bar_off
);
231 resource_size_t map_addr
;
234 map_addr
= pci_resource_start(pdev
, tx_bar_no
) + tx_bar_off
;
235 nn
->tx_bar
= ioremap(map_addr
, tx_bar_sz
);
237 nn_err(nn
, "Failed to map resource %d\n", tx_bar_no
);
239 goto err_netdev_free
;
243 map_addr
= pci_resource_start(pdev
, rx_bar_no
) + rx_bar_off
;
244 nn
->rx_bar
= ioremap(map_addr
, rx_bar_sz
);
246 nn_err(nn
, "Failed to map resource %d\n", rx_bar_no
);
252 nfp_netvf_get_mac_addr(nn
);
254 num_irqs
= nfp_net_irqs_alloc(pdev
, vf
->irq_entries
,
255 NFP_NET_MIN_VNIC_IRQS
,
256 NFP_NET_NON_Q_VECTORS
+
259 nn_warn(nn
, "Unable to allocate MSI-X Vectors. Exiting\n");
263 nfp_net_irqs_assign(nn
, vf
->irq_entries
, num_irqs
);
265 err
= nfp_net_init(nn
);
267 goto err_irqs_disable
;
270 vf
->ddir
= nfp_net_debugfs_device_add(pdev
);
271 nfp_net_debugfs_vnic_add(nn
, vf
->ddir
);
276 nfp_net_irqs_disable(pdev
);
290 pci_release_regions(pdev
);
292 pci_disable_device(pdev
);
294 pci_set_drvdata(pdev
, NULL
);
299 static void nfp_netvf_pci_remove(struct pci_dev
*pdev
)
301 struct nfp_net_vf
*vf
;
304 vf
= pci_get_drvdata(pdev
);
310 /* Note, the order is slightly different from above as we need
311 * to keep the nn pointer around till we have freed everything.
313 nfp_net_debugfs_dir_clean(&nn
->debugfs_dir
);
314 nfp_net_debugfs_dir_clean(&vf
->ddir
);
318 nfp_net_irqs_disable(pdev
);
326 iounmap(nn
->dp
.ctrl_bar
);
330 pci_release_regions(pdev
);
331 pci_disable_device(pdev
);
333 pci_set_drvdata(pdev
, NULL
);
337 struct pci_driver nfp_netvf_pci_driver
= {
338 .name
= nfp_net_driver_name
,
339 .id_table
= nfp_netvf_pci_device_ids
,
340 .probe
= nfp_netvf_pci_probe
,
341 .remove
= nfp_netvf_pci_remove
,
342 .shutdown
= nfp_netvf_pci_remove
,