1 // SPDX-License-Identifier: GPL-2.0-only
3 * aQuantia Corporation Network Driver
4 * Copyright (C) 2014-2019 aQuantia Corporation. All rights reserved
7 /* File aq_pci_func.c: Definition of PCI functions. */
9 #include <linux/interrupt.h>
10 #include <linux/module.h>
16 #include "aq_pci_func.h"
17 #include "hw_atl/hw_atl_a0.h"
18 #include "hw_atl/hw_atl_b0.h"
19 #include "aq_filters.h"
20 #include "aq_drvinfo.h"
21 #include "aq_macsec.h"
23 static const struct pci_device_id aq_pci_tbl
[] = {
24 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_0001
), },
25 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_D100
), },
26 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_D107
), },
27 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_D108
), },
28 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_D109
), },
30 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_AQC100
), },
31 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_AQC107
), },
32 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_AQC108
), },
33 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_AQC109
), },
34 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_AQC111
), },
35 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_AQC112
), },
37 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_AQC100S
), },
38 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_AQC107S
), },
39 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_AQC108S
), },
40 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_AQC109S
), },
41 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_AQC111S
), },
42 { PCI_VDEVICE(AQUANTIA
, AQ_DEVICE_ID_AQC112S
), },
47 static const struct aq_board_revision_s hw_atl_boards
[] = {
48 { AQ_DEVICE_ID_0001
, AQ_HWREV_1
, &hw_atl_ops_a0
, &hw_atl_a0_caps_aqc107
, },
49 { AQ_DEVICE_ID_D100
, AQ_HWREV_1
, &hw_atl_ops_a0
, &hw_atl_a0_caps_aqc100
, },
50 { AQ_DEVICE_ID_D107
, AQ_HWREV_1
, &hw_atl_ops_a0
, &hw_atl_a0_caps_aqc107
, },
51 { AQ_DEVICE_ID_D108
, AQ_HWREV_1
, &hw_atl_ops_a0
, &hw_atl_a0_caps_aqc108
, },
52 { AQ_DEVICE_ID_D109
, AQ_HWREV_1
, &hw_atl_ops_a0
, &hw_atl_a0_caps_aqc109
, },
54 { AQ_DEVICE_ID_0001
, AQ_HWREV_2
, &hw_atl_ops_b0
, &hw_atl_b0_caps_aqc107
, },
55 { AQ_DEVICE_ID_D100
, AQ_HWREV_2
, &hw_atl_ops_b0
, &hw_atl_b0_caps_aqc100
, },
56 { AQ_DEVICE_ID_D107
, AQ_HWREV_2
, &hw_atl_ops_b0
, &hw_atl_b0_caps_aqc107
, },
57 { AQ_DEVICE_ID_D108
, AQ_HWREV_2
, &hw_atl_ops_b0
, &hw_atl_b0_caps_aqc108
, },
58 { AQ_DEVICE_ID_D109
, AQ_HWREV_2
, &hw_atl_ops_b0
, &hw_atl_b0_caps_aqc109
, },
60 { AQ_DEVICE_ID_AQC100
, AQ_HWREV_ANY
, &hw_atl_ops_b1
, &hw_atl_b0_caps_aqc107
, },
61 { AQ_DEVICE_ID_AQC107
, AQ_HWREV_ANY
, &hw_atl_ops_b1
, &hw_atl_b0_caps_aqc107
, },
62 { AQ_DEVICE_ID_AQC108
, AQ_HWREV_ANY
, &hw_atl_ops_b1
, &hw_atl_b0_caps_aqc108
, },
63 { AQ_DEVICE_ID_AQC109
, AQ_HWREV_ANY
, &hw_atl_ops_b1
, &hw_atl_b0_caps_aqc109
, },
64 { AQ_DEVICE_ID_AQC111
, AQ_HWREV_ANY
, &hw_atl_ops_b1
, &hw_atl_b0_caps_aqc111
, },
65 { AQ_DEVICE_ID_AQC112
, AQ_HWREV_ANY
, &hw_atl_ops_b1
, &hw_atl_b0_caps_aqc112
, },
67 { AQ_DEVICE_ID_AQC100S
, AQ_HWREV_ANY
, &hw_atl_ops_b1
, &hw_atl_b0_caps_aqc100s
, },
68 { AQ_DEVICE_ID_AQC107S
, AQ_HWREV_ANY
, &hw_atl_ops_b1
, &hw_atl_b0_caps_aqc107s
, },
69 { AQ_DEVICE_ID_AQC108S
, AQ_HWREV_ANY
, &hw_atl_ops_b1
, &hw_atl_b0_caps_aqc108s
, },
70 { AQ_DEVICE_ID_AQC109S
, AQ_HWREV_ANY
, &hw_atl_ops_b1
, &hw_atl_b0_caps_aqc109s
, },
71 { AQ_DEVICE_ID_AQC111S
, AQ_HWREV_ANY
, &hw_atl_ops_b1
, &hw_atl_b0_caps_aqc111s
, },
72 { AQ_DEVICE_ID_AQC112S
, AQ_HWREV_ANY
, &hw_atl_ops_b1
, &hw_atl_b0_caps_aqc112s
, },
75 MODULE_DEVICE_TABLE(pci
, aq_pci_tbl
);
77 static int aq_pci_probe_get_hw_by_id(struct pci_dev
*pdev
,
78 const struct aq_hw_ops
**ops
,
79 const struct aq_hw_caps_s
**caps
)
83 if (pdev
->vendor
!= PCI_VENDOR_ID_AQUANTIA
)
86 for (i
= 0; i
< ARRAY_SIZE(hw_atl_boards
); i
++) {
87 if (hw_atl_boards
[i
].devid
== pdev
->device
&&
88 (hw_atl_boards
[i
].revision
== AQ_HWREV_ANY
||
89 hw_atl_boards
[i
].revision
== pdev
->revision
)) {
90 *ops
= hw_atl_boards
[i
].ops
;
91 *caps
= hw_atl_boards
[i
].caps
;
96 if (i
== ARRAY_SIZE(hw_atl_boards
))
102 int aq_pci_func_init(struct pci_dev
*pdev
)
106 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
108 err
= pci_set_consistent_dma_mask(pdev
, DMA_BIT_MASK(64));
112 err
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
114 err
= pci_set_consistent_dma_mask(pdev
,
122 err
= pci_request_regions(pdev
, AQ_CFG_DRV_NAME
"_mmio");
126 pci_set_master(pdev
);
134 int aq_pci_func_alloc_irq(struct aq_nic_s
*self
, unsigned int i
,
135 char *name
, irq_handler_t irq_handler
,
136 void *irq_arg
, cpumask_t
*affinity_mask
)
138 struct pci_dev
*pdev
= self
->pdev
;
141 if (pdev
->msix_enabled
|| pdev
->msi_enabled
)
142 err
= request_irq(pci_irq_vector(pdev
, i
), irq_handler
, 0,
145 err
= request_irq(pci_irq_vector(pdev
, i
), aq_vec_isr_legacy
,
146 IRQF_SHARED
, name
, irq_arg
);
149 self
->msix_entry_mask
|= (1 << i
);
151 if (pdev
->msix_enabled
&& affinity_mask
)
152 irq_set_affinity_hint(pci_irq_vector(pdev
, i
),
159 void aq_pci_func_free_irqs(struct aq_nic_s
*self
)
161 struct pci_dev
*pdev
= self
->pdev
;
165 for (i
= 32U; i
--;) {
166 if (!((1U << i
) & self
->msix_entry_mask
))
168 if (self
->aq_nic_cfg
.link_irq_vec
&&
169 i
== self
->aq_nic_cfg
.link_irq_vec
)
171 else if (i
< AQ_CFG_VECS_MAX
)
172 irq_data
= self
->aq_vec
[i
];
176 if (pdev
->msix_enabled
)
177 irq_set_affinity_hint(pci_irq_vector(pdev
, i
), NULL
);
178 free_irq(pci_irq_vector(pdev
, i
), irq_data
);
179 self
->msix_entry_mask
&= ~(1U << i
);
183 unsigned int aq_pci_func_get_irq_type(struct aq_nic_s
*self
)
185 if (self
->pdev
->msix_enabled
)
186 return AQ_HW_IRQ_MSIX
;
187 if (self
->pdev
->msi_enabled
)
188 return AQ_HW_IRQ_MSI
;
190 return AQ_HW_IRQ_LEGACY
;
193 static void aq_pci_free_irq_vectors(struct aq_nic_s
*self
)
195 pci_free_irq_vectors(self
->pdev
);
198 static int aq_pci_probe(struct pci_dev
*pdev
,
199 const struct pci_device_id
*pci_id
)
201 struct net_device
*ndev
;
202 resource_size_t mmio_pa
;
203 struct aq_nic_s
*self
;
208 err
= pci_enable_device(pdev
);
212 err
= aq_pci_func_init(pdev
);
216 ndev
= aq_ndev_alloc();
222 self
= netdev_priv(ndev
);
224 SET_NETDEV_DEV(ndev
, &pdev
->dev
);
225 pci_set_drvdata(pdev
, self
);
227 mutex_init(&self
->fwreq_mutex
);
229 err
= aq_pci_probe_get_hw_by_id(pdev
, &self
->aq_hw_ops
,
230 &aq_nic_get_cfg(self
)->aq_hw_caps
);
234 self
->aq_hw
= kzalloc(sizeof(*self
->aq_hw
), GFP_KERNEL
);
239 self
->aq_hw
->aq_nic_cfg
= aq_nic_get_cfg(self
);
241 for (bar
= 0; bar
< 4; ++bar
) {
242 if (IORESOURCE_MEM
& pci_resource_flags(pdev
, bar
)) {
243 resource_size_t reg_sz
;
245 mmio_pa
= pci_resource_start(pdev
, bar
);
251 reg_sz
= pci_resource_len(pdev
, bar
);
252 if ((reg_sz
<= 24 /*ATL_REGS_SIZE*/)) {
257 self
->aq_hw
->mmio
= ioremap(mmio_pa
, reg_sz
);
258 if (!self
->aq_hw
->mmio
) {
271 numvecs
= min((u8
)AQ_CFG_VECS_DEF
,
272 aq_nic_get_cfg(self
)->aq_hw_caps
->msix_irqs
);
273 numvecs
= min(numvecs
, num_online_cpus());
274 /* Request IRQ vector for PTP */
277 numvecs
+= AQ_HW_SERVICE_IRQS
;
278 /*enable interrupts */
279 #if !AQ_CFG_FORCE_LEGACY_INT
280 err
= pci_alloc_irq_vectors(self
->pdev
, 1, numvecs
,
281 PCI_IRQ_MSIX
| PCI_IRQ_MSI
|
288 self
->irqvecs
= numvecs
;
290 /* net device init */
291 aq_nic_cfg_start(self
);
293 aq_nic_ndev_init(self
);
295 err
= aq_nic_ndev_register(self
);
299 aq_drvinfo_init(ndev
);
304 aq_nic_free_vectors(self
);
305 aq_pci_free_irq_vectors(self
);
307 iounmap(self
->aq_hw
->mmio
);
313 pci_release_regions(pdev
);
315 pci_disable_device(pdev
);
320 static void aq_pci_remove(struct pci_dev
*pdev
)
322 struct aq_nic_s
*self
= pci_get_drvdata(pdev
);
325 aq_clear_rxnfc_all_rules(self
);
326 if (self
->ndev
->reg_state
== NETREG_REGISTERED
)
327 unregister_netdev(self
->ndev
);
329 #if IS_ENABLED(CONFIG_MACSEC)
330 aq_macsec_free(self
);
332 aq_nic_free_vectors(self
);
333 aq_pci_free_irq_vectors(self
);
334 iounmap(self
->aq_hw
->mmio
);
336 pci_release_regions(pdev
);
337 free_netdev(self
->ndev
);
340 pci_disable_device(pdev
);
343 static void aq_pci_shutdown(struct pci_dev
*pdev
)
345 struct aq_nic_s
*self
= pci_get_drvdata(pdev
);
347 aq_nic_shutdown(self
);
349 pci_disable_device(pdev
);
351 if (system_state
== SYSTEM_POWER_OFF
) {
352 pci_wake_from_d3(pdev
, false);
353 pci_set_power_state(pdev
, PCI_D3hot
);
357 static int aq_suspend_common(struct device
*dev
, bool deep
)
359 struct aq_nic_s
*nic
= pci_get_drvdata(to_pci_dev(dev
));
363 nic
->power_state
= AQ_HW_POWER_STATE_D3
;
364 netif_device_detach(nic
->ndev
);
365 netif_tx_stop_all_queues(nic
->ndev
);
367 if (netif_running(nic
->ndev
))
371 aq_nic_deinit(nic
, !nic
->aq_hw
->aq_nic_cfg
->wol
);
372 aq_nic_set_power(nic
);
380 static int atl_resume_common(struct device
*dev
, bool deep
)
382 struct pci_dev
*pdev
= to_pci_dev(dev
);
383 struct aq_nic_s
*nic
;
386 nic
= pci_get_drvdata(pdev
);
390 pci_set_power_state(pdev
, PCI_D0
);
391 pci_restore_state(pdev
);
394 ret
= aq_nic_init(nic
);
399 if (netif_running(nic
->ndev
)) {
400 ret
= aq_nic_start(nic
);
405 netif_device_attach(nic
->ndev
);
406 netif_tx_start_all_queues(nic
->ndev
);
414 static int aq_pm_freeze(struct device
*dev
)
416 return aq_suspend_common(dev
, false);
419 static int aq_pm_suspend_poweroff(struct device
*dev
)
421 return aq_suspend_common(dev
, true);
424 static int aq_pm_thaw(struct device
*dev
)
426 return atl_resume_common(dev
, false);
429 static int aq_pm_resume_restore(struct device
*dev
)
431 return atl_resume_common(dev
, true);
434 static const struct dev_pm_ops aq_pm_ops
= {
435 .suspend
= aq_pm_suspend_poweroff
,
436 .poweroff
= aq_pm_suspend_poweroff
,
437 .freeze
= aq_pm_freeze
,
438 .resume
= aq_pm_resume_restore
,
439 .restore
= aq_pm_resume_restore
,
443 static struct pci_driver aq_pci_ops
= {
444 .name
= AQ_CFG_DRV_NAME
,
445 .id_table
= aq_pci_tbl
,
446 .probe
= aq_pci_probe
,
447 .remove
= aq_pci_remove
,
448 .shutdown
= aq_pci_shutdown
,
450 .driver
.pm
= &aq_pm_ops
,
454 int aq_pci_func_register_driver(void)
456 return pci_register_driver(&aq_pci_ops
);
459 void aq_pci_func_unregister_driver(void)
461 pci_unregister_driver(&aq_pci_ops
);