1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2015-2018 Netronome Systems, Inc. */
6 * Netronome virtual function network device driver: Main entry point
7 * Author: Jason McMullan <jason.mcmullan@netronome.com>
8 * Rolf Neugebauer <rolf.neugebauer@netronome.com>
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/etherdevice.h>
16 #include "nfp_net_ctrl.h"
21 * struct nfp_net_vf - NFP VF-specific device structure
22 * @nn: NFP Net structure for this device
23 * @irq_entries: Pre-allocated array of MSI-X entries
24 * @q_bar: Pointer to mapped QC memory (NULL if TX/RX mapped directly)
25 * @ddir: Per-device debugfs directory
30 struct msix_entry irq_entries
[NFP_NET_NON_Q_VECTORS
+
31 NFP_NET_MAX_TX_RINGS
];
37 static const char nfp_net_driver_name
[] = "nfp_netvf";
39 #define PCI_DEVICE_NFP6000VF 0x6003
40 static const struct pci_device_id nfp_netvf_pci_device_ids
[] = {
41 { PCI_VENDOR_ID_NETRONOME
, PCI_DEVICE_NFP6000VF
,
42 PCI_VENDOR_ID_NETRONOME
, PCI_ANY_ID
,
45 { 0, } /* Required last entry. */
47 MODULE_DEVICE_TABLE(pci
, nfp_netvf_pci_device_ids
);
49 static void nfp_netvf_get_mac_addr(struct nfp_net
*nn
)
51 u8 mac_addr
[ETH_ALEN
];
53 put_unaligned_be32(nn_readl(nn
, NFP_NET_CFG_MACADDR
+ 0), &mac_addr
[0]);
54 put_unaligned_be16(nn_readw(nn
, NFP_NET_CFG_MACADDR
+ 6), &mac_addr
[4]);
56 if (!is_valid_ether_addr(mac_addr
)) {
57 eth_hw_addr_random(nn
->dp
.netdev
);
61 ether_addr_copy(nn
->dp
.netdev
->dev_addr
, mac_addr
);
62 ether_addr_copy(nn
->dp
.netdev
->perm_addr
, mac_addr
);
65 static int nfp_netvf_pci_probe(struct pci_dev
*pdev
,
66 const struct pci_device_id
*pci_id
)
68 struct nfp_net_fw_version fw_ver
;
69 int max_tx_rings
, max_rx_rings
;
70 u32 tx_bar_off
, rx_bar_off
;
71 u32 tx_bar_sz
, rx_bar_sz
;
72 int tx_bar_no
, rx_bar_no
;
73 struct nfp_net_vf
*vf
;
74 unsigned int num_irqs
;
81 vf
= kzalloc(sizeof(*vf
), GFP_KERNEL
);
84 pci_set_drvdata(pdev
, vf
);
86 err
= pci_enable_device_mem(pdev
);
90 err
= pci_request_regions(pdev
, nfp_net_driver_name
);
92 dev_err(&pdev
->dev
, "Unable to allocate device memory.\n");
98 err
= dma_set_mask_and_coherent(&pdev
->dev
,
99 DMA_BIT_MASK(NFP_NET_MAX_DMA_BITS
));
101 goto err_pci_regions
;
103 /* Map the Control BAR.
105 * Irrespective of the advertised BAR size we only map the
106 * first NFP_NET_CFG_BAR_SZ of the BAR. This keeps the code
107 * the identical for PF and VF drivers.
109 ctrl_bar
= ioremap(pci_resource_start(pdev
, NFP_NET_CTRL_BAR
),
113 "Failed to map resource %d\n", NFP_NET_CTRL_BAR
);
115 goto err_pci_regions
;
118 nfp_net_get_fw_version(&fw_ver
, ctrl_bar
);
119 if (fw_ver
.resv
|| fw_ver
.class != NFP_NET_CFG_VERSION_CLASS_GENERIC
) {
120 dev_err(&pdev
->dev
, "Unknown Firmware ABI %d.%d.%d.%d\n",
121 fw_ver
.resv
, fw_ver
.class, fw_ver
.major
, fw_ver
.minor
);
126 /* Determine stride */
127 if (nfp_net_fw_ver_eq(&fw_ver
, 0, 0, 0, 1)) {
129 tx_bar_no
= NFP_NET_Q0_BAR
;
130 rx_bar_no
= NFP_NET_Q1_BAR
;
131 dev_warn(&pdev
->dev
, "OBSOLETE Firmware detected - VF isolation not available\n");
133 switch (fw_ver
.major
) {
136 tx_bar_no
= NFP_NET_Q0_BAR
;
137 rx_bar_no
= tx_bar_no
;
140 dev_err(&pdev
->dev
, "Unsupported Firmware ABI %d.%d.%d.%d\n",
141 fw_ver
.resv
, fw_ver
.class,
142 fw_ver
.major
, fw_ver
.minor
);
148 /* Find out how many rings are supported */
149 max_tx_rings
= readl(ctrl_bar
+ NFP_NET_CFG_MAX_TXRINGS
);
150 max_rx_rings
= readl(ctrl_bar
+ NFP_NET_CFG_MAX_RXRINGS
);
152 tx_bar_sz
= NFP_QCP_QUEUE_ADDR_SZ
* max_tx_rings
* stride
;
153 rx_bar_sz
= NFP_QCP_QUEUE_ADDR_SZ
* max_rx_rings
* stride
;
156 if (tx_bar_sz
> pci_resource_len(pdev
, tx_bar_no
)) {
158 "TX BAR too small for number of TX rings. Adjusting\n");
159 tx_bar_sz
= pci_resource_len(pdev
, tx_bar_no
);
160 max_tx_rings
= (tx_bar_sz
/ NFP_QCP_QUEUE_ADDR_SZ
) / 2;
162 if (rx_bar_sz
> pci_resource_len(pdev
, rx_bar_no
)) {
164 "RX BAR too small for number of RX rings. Adjusting\n");
165 rx_bar_sz
= pci_resource_len(pdev
, rx_bar_no
);
166 max_rx_rings
= (rx_bar_sz
/ NFP_QCP_QUEUE_ADDR_SZ
) / 2;
169 startq
= readl(ctrl_bar
+ NFP_NET_CFG_START_TXQ
);
170 tx_bar_off
= NFP_PCIE_QUEUE(startq
);
171 startq
= readl(ctrl_bar
+ NFP_NET_CFG_START_RXQ
);
172 rx_bar_off
= NFP_PCIE_QUEUE(startq
);
174 /* Allocate and initialise the netdev */
175 nn
= nfp_net_alloc(pdev
, ctrl_bar
, true, max_tx_rings
, max_rx_rings
);
184 nn
->stride_tx
= stride
;
185 nn
->stride_rx
= stride
;
187 if (rx_bar_no
== tx_bar_no
) {
189 resource_size_t map_addr
;
191 /* Make a single overlapping BAR mapping */
192 if (tx_bar_off
< rx_bar_off
)
193 bar_off
= tx_bar_off
;
195 bar_off
= rx_bar_off
;
197 if ((tx_bar_off
+ tx_bar_sz
) > (rx_bar_off
+ rx_bar_sz
))
198 bar_sz
= (tx_bar_off
+ tx_bar_sz
) - bar_off
;
200 bar_sz
= (rx_bar_off
+ rx_bar_sz
) - bar_off
;
202 map_addr
= pci_resource_start(pdev
, tx_bar_no
) + bar_off
;
203 vf
->q_bar
= ioremap(map_addr
, bar_sz
);
205 nn_err(nn
, "Failed to map resource %d\n", tx_bar_no
);
207 goto err_netdev_free
;
211 nn
->tx_bar
= vf
->q_bar
+ (tx_bar_off
- bar_off
);
213 nn
->rx_bar
= vf
->q_bar
+ (rx_bar_off
- bar_off
);
215 resource_size_t map_addr
;
218 map_addr
= pci_resource_start(pdev
, tx_bar_no
) + tx_bar_off
;
219 nn
->tx_bar
= ioremap(map_addr
, tx_bar_sz
);
221 nn_err(nn
, "Failed to map resource %d\n", tx_bar_no
);
223 goto err_netdev_free
;
227 map_addr
= pci_resource_start(pdev
, rx_bar_no
) + rx_bar_off
;
228 nn
->rx_bar
= ioremap(map_addr
, rx_bar_sz
);
230 nn_err(nn
, "Failed to map resource %d\n", rx_bar_no
);
236 nfp_netvf_get_mac_addr(nn
);
238 num_irqs
= nfp_net_irqs_alloc(pdev
, vf
->irq_entries
,
239 NFP_NET_MIN_VNIC_IRQS
,
240 NFP_NET_NON_Q_VECTORS
+
243 nn_warn(nn
, "Unable to allocate MSI-X Vectors. Exiting\n");
247 nfp_net_irqs_assign(nn
, vf
->irq_entries
, num_irqs
);
249 err
= nfp_net_init(nn
);
251 goto err_irqs_disable
;
254 vf
->ddir
= nfp_net_debugfs_device_add(pdev
);
255 nfp_net_debugfs_vnic_add(nn
, vf
->ddir
);
260 nfp_net_irqs_disable(pdev
);
274 pci_release_regions(pdev
);
276 pci_disable_device(pdev
);
278 pci_set_drvdata(pdev
, NULL
);
283 static void nfp_netvf_pci_remove(struct pci_dev
*pdev
)
285 struct nfp_net_vf
*vf
;
288 vf
= pci_get_drvdata(pdev
);
294 /* Note, the order is slightly different from above as we need
295 * to keep the nn pointer around till we have freed everything.
297 nfp_net_debugfs_dir_clean(&nn
->debugfs_dir
);
298 nfp_net_debugfs_dir_clean(&vf
->ddir
);
302 nfp_net_irqs_disable(pdev
);
310 iounmap(nn
->dp
.ctrl_bar
);
314 pci_release_regions(pdev
);
315 pci_disable_device(pdev
);
317 pci_set_drvdata(pdev
, NULL
);
321 struct pci_driver nfp_netvf_pci_driver
= {
322 .name
= nfp_net_driver_name
,
323 .id_table
= nfp_netvf_pci_device_ids
,
324 .probe
= nfp_netvf_pci_probe
,
325 .remove
= nfp_netvf_pci_remove
,
326 .shutdown
= nfp_netvf_pci_remove
,