1 // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2 /* Copyright(c) 2014 - 2020 Intel Corporation */
3 #include <linux/kernel.h>
4 #include <linux/init.h>
5 #include <linux/types.h>
7 #include <linux/slab.h>
8 #include <linux/errno.h>
9 #include <linux/interrupt.h>
10 #include "adf_accel_devices.h"
11 #include "adf_common_drv.h"
13 #include "adf_cfg_strings.h"
14 #include "adf_cfg_common.h"
15 #include "adf_transport_access_macros.h"
16 #include "adf_transport_internal.h"
18 static int adf_enable_msix(struct adf_accel_dev
*accel_dev
)
20 struct adf_accel_pci
*pci_dev_info
= &accel_dev
->accel_pci_dev
;
21 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
22 u32 msix_num_entries
= 1;
24 if (hw_data
->set_msix_rttable
)
25 hw_data
->set_msix_rttable(accel_dev
);
27 /* If SR-IOV is disabled, add entries for each bank */
28 if (!accel_dev
->pf
.vf_info
) {
31 msix_num_entries
+= hw_data
->num_banks
;
32 for (i
= 0; i
< msix_num_entries
; i
++)
33 pci_dev_info
->msix_entries
.entries
[i
].entry
= i
;
35 pci_dev_info
->msix_entries
.entries
[0].entry
=
39 if (pci_enable_msix_exact(pci_dev_info
->pci_dev
,
40 pci_dev_info
->msix_entries
.entries
,
42 dev_err(&GET_DEV(accel_dev
), "Failed to enable MSI-X IRQ(s)\n");
48 static void adf_disable_msix(struct adf_accel_pci
*pci_dev_info
)
50 pci_disable_msix(pci_dev_info
->pci_dev
);
53 static irqreturn_t
adf_msix_isr_bundle(int irq
, void *bank_ptr
)
55 struct adf_etr_bank_data
*bank
= bank_ptr
;
56 struct adf_hw_csr_ops
*csr_ops
= GET_CSR_OPS(bank
->accel_dev
);
58 csr_ops
->write_csr_int_flag_and_col(bank
->csr_addr
, bank
->bank_number
,
60 tasklet_hi_schedule(&bank
->resp_handler
);
64 static irqreturn_t
adf_msix_isr_ae(int irq
, void *dev_ptr
)
66 struct adf_accel_dev
*accel_dev
= dev_ptr
;
69 /* If SR-IOV is enabled (vf_info is non-NULL), check for VF->PF ints */
70 if (accel_dev
->pf
.vf_info
) {
71 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
72 struct adf_bar
*pmisc
=
73 &GET_BARS(accel_dev
)[hw_data
->get_misc_bar_id(hw_data
)];
74 void __iomem
*pmisc_bar_addr
= pmisc
->virt_addr
;
77 /* Get the interrupt sources triggered by VFs */
78 vf_mask
= ((ADF_CSR_RD(pmisc_bar_addr
, ADF_ERRSOU5
) &
80 ((ADF_CSR_RD(pmisc_bar_addr
, ADF_ERRSOU3
) &
84 struct adf_accel_vf_info
*vf_info
;
85 bool irq_handled
= false;
88 /* Disable VF2PF interrupts for VFs with pending ints */
89 adf_disable_vf2pf_interrupts(accel_dev
, vf_mask
);
92 * Schedule tasklets to handle VF2PF interrupt BHs
93 * unless the VF is malicious and is attempting to
94 * flood the host OS with VF2PF interrupts.
96 for_each_set_bit(i
, (const unsigned long *)&vf_mask
,
97 (sizeof(vf_mask
) * BITS_PER_BYTE
)) {
98 vf_info
= accel_dev
->pf
.vf_info
+ i
;
100 if (!__ratelimit(&vf_info
->vf2pf_ratelimit
)) {
101 dev_info(&GET_DEV(accel_dev
),
102 "Too many ints from VF%d\n",
107 /* Tasklet will re-enable ints from this VF */
108 tasklet_hi_schedule(&vf_info
->vf2pf_bh_tasklet
);
116 #endif /* CONFIG_PCI_IOV */
118 dev_dbg(&GET_DEV(accel_dev
), "qat_dev%d spurious AE interrupt\n",
119 accel_dev
->accel_id
);
124 static int adf_request_irqs(struct adf_accel_dev
*accel_dev
)
126 struct adf_accel_pci
*pci_dev_info
= &accel_dev
->accel_pci_dev
;
127 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
128 struct msix_entry
*msixe
= pci_dev_info
->msix_entries
.entries
;
129 struct adf_etr_data
*etr_data
= accel_dev
->transport
;
133 /* Request msix irq for all banks unless SR-IOV enabled */
134 if (!accel_dev
->pf
.vf_info
) {
135 for (i
= 0; i
< hw_data
->num_banks
; i
++) {
136 struct adf_etr_bank_data
*bank
= &etr_data
->banks
[i
];
137 unsigned int cpu
, cpus
= num_online_cpus();
139 name
= *(pci_dev_info
->msix_entries
.names
+ i
);
140 snprintf(name
, ADF_MAX_MSIX_VECTOR_NAME
,
141 "qat%d-bundle%d", accel_dev
->accel_id
, i
);
142 ret
= request_irq(msixe
[i
].vector
,
143 adf_msix_isr_bundle
, 0, name
, bank
);
145 dev_err(&GET_DEV(accel_dev
),
146 "failed to enable irq %d for %s\n",
147 msixe
[i
].vector
, name
);
151 cpu
= ((accel_dev
->accel_id
* hw_data
->num_banks
) +
153 irq_set_affinity_hint(msixe
[i
].vector
,
158 /* Request msix irq for AE */
159 name
= *(pci_dev_info
->msix_entries
.names
+ i
);
160 snprintf(name
, ADF_MAX_MSIX_VECTOR_NAME
,
161 "qat%d-ae-cluster", accel_dev
->accel_id
);
162 ret
= request_irq(msixe
[i
].vector
, adf_msix_isr_ae
, 0, name
, accel_dev
);
164 dev_err(&GET_DEV(accel_dev
),
165 "failed to enable irq %d, for %s\n",
166 msixe
[i
].vector
, name
);
172 static void adf_free_irqs(struct adf_accel_dev
*accel_dev
)
174 struct adf_accel_pci
*pci_dev_info
= &accel_dev
->accel_pci_dev
;
175 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
176 struct msix_entry
*msixe
= pci_dev_info
->msix_entries
.entries
;
177 struct adf_etr_data
*etr_data
= accel_dev
->transport
;
180 if (pci_dev_info
->msix_entries
.num_entries
> 1) {
181 for (i
= 0; i
< hw_data
->num_banks
; i
++) {
182 irq_set_affinity_hint(msixe
[i
].vector
, NULL
);
183 free_irq(msixe
[i
].vector
, &etr_data
->banks
[i
]);
186 irq_set_affinity_hint(msixe
[i
].vector
, NULL
);
187 free_irq(msixe
[i
].vector
, accel_dev
);
190 static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev
*accel_dev
)
194 struct msix_entry
*entries
;
195 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
196 u32 msix_num_entries
= 1;
198 /* If SR-IOV is disabled (vf_info is NULL), add entries for each bank */
199 if (!accel_dev
->pf
.vf_info
)
200 msix_num_entries
+= hw_data
->num_banks
;
202 entries
= kcalloc_node(msix_num_entries
, sizeof(*entries
),
203 GFP_KERNEL
, dev_to_node(&GET_DEV(accel_dev
)));
207 names
= kcalloc(msix_num_entries
, sizeof(char *), GFP_KERNEL
);
212 for (i
= 0; i
< msix_num_entries
; i
++) {
213 *(names
+ i
) = kzalloc(ADF_MAX_MSIX_VECTOR_NAME
, GFP_KERNEL
);
217 accel_dev
->accel_pci_dev
.msix_entries
.num_entries
= msix_num_entries
;
218 accel_dev
->accel_pci_dev
.msix_entries
.entries
= entries
;
219 accel_dev
->accel_pci_dev
.msix_entries
.names
= names
;
222 for (i
= 0; i
< msix_num_entries
; i
++)
229 static void adf_isr_free_msix_entry_table(struct adf_accel_dev
*accel_dev
)
231 char **names
= accel_dev
->accel_pci_dev
.msix_entries
.names
;
234 kfree(accel_dev
->accel_pci_dev
.msix_entries
.entries
);
235 for (i
= 0; i
< accel_dev
->accel_pci_dev
.msix_entries
.num_entries
; i
++)
240 static int adf_setup_bh(struct adf_accel_dev
*accel_dev
)
242 struct adf_etr_data
*priv_data
= accel_dev
->transport
;
243 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
246 for (i
= 0; i
< hw_data
->num_banks
; i
++)
247 tasklet_init(&priv_data
->banks
[i
].resp_handler
,
248 adf_response_handler
,
249 (unsigned long)&priv_data
->banks
[i
]);
253 static void adf_cleanup_bh(struct adf_accel_dev
*accel_dev
)
255 struct adf_etr_data
*priv_data
= accel_dev
->transport
;
256 struct adf_hw_device_data
*hw_data
= accel_dev
->hw_device
;
259 for (i
= 0; i
< hw_data
->num_banks
; i
++) {
260 tasklet_disable(&priv_data
->banks
[i
].resp_handler
);
261 tasklet_kill(&priv_data
->banks
[i
].resp_handler
);
266 * adf_isr_resource_free() - Free IRQ for acceleration device
267 * @accel_dev: Pointer to acceleration device.
269 * Function frees interrupts for acceleration device.
271 void adf_isr_resource_free(struct adf_accel_dev
*accel_dev
)
273 adf_free_irqs(accel_dev
);
274 adf_cleanup_bh(accel_dev
);
275 adf_disable_msix(&accel_dev
->accel_pci_dev
);
276 adf_isr_free_msix_entry_table(accel_dev
);
278 EXPORT_SYMBOL_GPL(adf_isr_resource_free
);
281 * adf_isr_resource_alloc() - Allocate IRQ for acceleration device
282 * @accel_dev: Pointer to acceleration device.
284 * Function allocates interrupts for acceleration device.
286 * Return: 0 on success, error code otherwise.
288 int adf_isr_resource_alloc(struct adf_accel_dev
*accel_dev
)
292 ret
= adf_isr_alloc_msix_entry_table(accel_dev
);
295 if (adf_enable_msix(accel_dev
))
298 if (adf_setup_bh(accel_dev
))
301 if (adf_request_irqs(accel_dev
))
306 adf_isr_resource_free(accel_dev
);
309 EXPORT_SYMBOL_GPL(adf_isr_resource_alloc
);