1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/printk.h>
4 #include <linux/slab.h>
6 #include "nitrox_dev.h"
7 #include "nitrox_csr.h"
8 #include "nitrox_common.h"
9 #include "nitrox_hal.h"
10 #include "nitrox_mbx.h"
13 * One vector for each type of ring
14 * - NPS packet ring, AQMQ ring and ZQMQ ring
16 #define NR_RING_VECTORS 3
17 #define NR_NON_RING_VECTORS 1
18 /* base entry for packet ring/port */
19 #define PKT_RING_MSIX_BASE 0
20 #define NON_RING_MSIX_BASE 192
23 * nps_pkt_slc_isr - IRQ handler for NPS solicit port
27 static irqreturn_t
nps_pkt_slc_isr(int irq
, void *data
)
29 struct nitrox_q_vector
*qvec
= data
;
30 union nps_pkt_slc_cnts slc_cnts
;
31 struct nitrox_cmdq
*cmdq
= qvec
->cmdq
;
33 slc_cnts
.value
= readq(cmdq
->compl_cnt_csr_addr
);
34 /* New packet on SLC output port */
35 if (slc_cnts
.s
.slc_int
)
36 tasklet_hi_schedule(&qvec
->resp_tasklet
);
41 static void clear_nps_core_err_intr(struct nitrox_device
*ndev
)
45 /* Write 1 to clear */
46 value
= nitrox_read_csr(ndev
, NPS_CORE_INT
);
47 nitrox_write_csr(ndev
, NPS_CORE_INT
, value
);
49 dev_err_ratelimited(DEV(ndev
), "NSP_CORE_INT 0x%016llx\n", value
);
52 static void clear_nps_pkt_err_intr(struct nitrox_device
*ndev
)
54 union nps_pkt_int pkt_int
;
55 unsigned long value
, offset
;
58 pkt_int
.value
= nitrox_read_csr(ndev
, NPS_PKT_INT
);
59 dev_err_ratelimited(DEV(ndev
), "NPS_PKT_INT 0x%016llx\n",
62 if (pkt_int
.s
.slc_err
) {
63 offset
= NPS_PKT_SLC_ERR_TYPE
;
64 value
= nitrox_read_csr(ndev
, offset
);
65 nitrox_write_csr(ndev
, offset
, value
);
66 dev_err_ratelimited(DEV(ndev
),
67 "NPS_PKT_SLC_ERR_TYPE 0x%016lx\n", value
);
69 offset
= NPS_PKT_SLC_RERR_LO
;
70 value
= nitrox_read_csr(ndev
, offset
);
71 nitrox_write_csr(ndev
, offset
, value
);
72 /* enable the solicit ports */
73 for_each_set_bit(i
, &value
, BITS_PER_LONG
)
74 enable_pkt_solicit_port(ndev
, i
);
76 dev_err_ratelimited(DEV(ndev
),
77 "NPS_PKT_SLC_RERR_LO 0x%016lx\n", value
);
79 offset
= NPS_PKT_SLC_RERR_HI
;
80 value
= nitrox_read_csr(ndev
, offset
);
81 nitrox_write_csr(ndev
, offset
, value
);
82 dev_err_ratelimited(DEV(ndev
),
83 "NPS_PKT_SLC_RERR_HI 0x%016lx\n", value
);
86 if (pkt_int
.s
.in_err
) {
87 offset
= NPS_PKT_IN_ERR_TYPE
;
88 value
= nitrox_read_csr(ndev
, offset
);
89 nitrox_write_csr(ndev
, offset
, value
);
90 dev_err_ratelimited(DEV(ndev
),
91 "NPS_PKT_IN_ERR_TYPE 0x%016lx\n", value
);
92 offset
= NPS_PKT_IN_RERR_LO
;
93 value
= nitrox_read_csr(ndev
, offset
);
94 nitrox_write_csr(ndev
, offset
, value
);
95 /* enable the input ring */
96 for_each_set_bit(i
, &value
, BITS_PER_LONG
)
97 enable_pkt_input_ring(ndev
, i
);
99 dev_err_ratelimited(DEV(ndev
),
100 "NPS_PKT_IN_RERR_LO 0x%016lx\n", value
);
102 offset
= NPS_PKT_IN_RERR_HI
;
103 value
= nitrox_read_csr(ndev
, offset
);
104 nitrox_write_csr(ndev
, offset
, value
);
105 dev_err_ratelimited(DEV(ndev
),
106 "NPS_PKT_IN_RERR_HI 0x%016lx\n", value
);
110 static void clear_pom_err_intr(struct nitrox_device
*ndev
)
114 value
= nitrox_read_csr(ndev
, POM_INT
);
115 nitrox_write_csr(ndev
, POM_INT
, value
);
116 dev_err_ratelimited(DEV(ndev
), "POM_INT 0x%016llx\n", value
);
119 static void clear_pem_err_intr(struct nitrox_device
*ndev
)
123 value
= nitrox_read_csr(ndev
, PEM0_INT
);
124 nitrox_write_csr(ndev
, PEM0_INT
, value
);
125 dev_err_ratelimited(DEV(ndev
), "PEM(0)_INT 0x%016llx\n", value
);
128 static void clear_lbc_err_intr(struct nitrox_device
*ndev
)
130 union lbc_int lbc_int
;
134 lbc_int
.value
= nitrox_read_csr(ndev
, LBC_INT
);
135 dev_err_ratelimited(DEV(ndev
), "LBC_INT 0x%016llx\n", lbc_int
.value
);
137 if (lbc_int
.s
.dma_rd_err
) {
138 for (i
= 0; i
< NR_CLUSTERS
; i
++) {
139 offset
= EFL_CORE_VF_ERR_INT0X(i
);
140 value
= nitrox_read_csr(ndev
, offset
);
141 nitrox_write_csr(ndev
, offset
, value
);
142 offset
= EFL_CORE_VF_ERR_INT1X(i
);
143 value
= nitrox_read_csr(ndev
, offset
);
144 nitrox_write_csr(ndev
, offset
, value
);
148 if (lbc_int
.s
.cam_soft_err
) {
149 dev_err_ratelimited(DEV(ndev
), "CAM_SOFT_ERR, invalidating LBC\n");
150 invalidate_lbc(ndev
);
153 if (lbc_int
.s
.pref_dat_len_mismatch_err
) {
154 offset
= LBC_PLM_VF1_64_INT
;
155 value
= nitrox_read_csr(ndev
, offset
);
156 nitrox_write_csr(ndev
, offset
, value
);
157 offset
= LBC_PLM_VF65_128_INT
;
158 value
= nitrox_read_csr(ndev
, offset
);
159 nitrox_write_csr(ndev
, offset
, value
);
162 if (lbc_int
.s
.rd_dat_len_mismatch_err
) {
163 offset
= LBC_ELM_VF1_64_INT
;
164 value
= nitrox_read_csr(ndev
, offset
);
165 nitrox_write_csr(ndev
, offset
, value
);
166 offset
= LBC_ELM_VF65_128_INT
;
167 value
= nitrox_read_csr(ndev
, offset
);
168 nitrox_write_csr(ndev
, offset
, value
);
170 nitrox_write_csr(ndev
, LBC_INT
, lbc_int
.value
);
173 static void clear_efl_err_intr(struct nitrox_device
*ndev
)
177 for (i
= 0; i
< NR_CLUSTERS
; i
++) {
178 union efl_core_int core_int
;
181 offset
= EFL_CORE_INTX(i
);
182 core_int
.value
= nitrox_read_csr(ndev
, offset
);
183 nitrox_write_csr(ndev
, offset
, core_int
.value
);
184 dev_err_ratelimited(DEV(ndev
), "ELF_CORE(%d)_INT 0x%016llx\n",
186 if (core_int
.s
.se_err
) {
187 offset
= EFL_CORE_SE_ERR_INTX(i
);
188 value
= nitrox_read_csr(ndev
, offset
);
189 nitrox_write_csr(ndev
, offset
, value
);
194 static void clear_bmi_err_intr(struct nitrox_device
*ndev
)
198 value
= nitrox_read_csr(ndev
, BMI_INT
);
199 nitrox_write_csr(ndev
, BMI_INT
, value
);
200 dev_err_ratelimited(DEV(ndev
), "BMI_INT 0x%016llx\n", value
);
203 static void nps_core_int_tasklet(unsigned long data
)
205 struct nitrox_q_vector
*qvec
= (void *)(uintptr_t)(data
);
206 struct nitrox_device
*ndev
= qvec
->ndev
;
208 /* if pf mode do queue recovery */
209 if (ndev
->mode
== __NDEV_MODE_PF
) {
212 * if VF(s) enabled communicate the error information
219 * nps_core_int_isr - interrupt handler for NITROX errors and
220 * mailbox communication
222 static irqreturn_t
nps_core_int_isr(int irq
, void *data
)
224 struct nitrox_q_vector
*qvec
= data
;
225 struct nitrox_device
*ndev
= qvec
->ndev
;
226 union nps_core_int_active core_int
;
228 core_int
.value
= nitrox_read_csr(ndev
, NPS_CORE_INT_ACTIVE
);
230 if (core_int
.s
.nps_core
)
231 clear_nps_core_err_intr(ndev
);
233 if (core_int
.s
.nps_pkt
)
234 clear_nps_pkt_err_intr(ndev
);
237 clear_pom_err_intr(ndev
);
240 clear_pem_err_intr(ndev
);
243 clear_lbc_err_intr(ndev
);
246 clear_efl_err_intr(ndev
);
249 clear_bmi_err_intr(ndev
);
251 /* Mailbox interrupt */
253 nitrox_pf2vf_mbox_handler(ndev
);
255 /* If more work callback the ISR, set resend */
256 core_int
.s
.resend
= 1;
257 nitrox_write_csr(ndev
, NPS_CORE_INT_ACTIVE
, core_int
.value
);
262 void nitrox_unregister_interrupts(struct nitrox_device
*ndev
)
264 struct pci_dev
*pdev
= ndev
->pdev
;
267 for (i
= 0; i
< ndev
->num_vecs
; i
++) {
268 struct nitrox_q_vector
*qvec
;
271 qvec
= ndev
->qvec
+ i
;
275 /* get the vector number */
276 vec
= pci_irq_vector(pdev
, i
);
277 irq_set_affinity_hint(vec
, NULL
);
280 tasklet_disable(&qvec
->resp_tasklet
);
281 tasklet_kill(&qvec
->resp_tasklet
);
286 pci_free_irq_vectors(pdev
);
289 int nitrox_register_interrupts(struct nitrox_device
*ndev
)
291 struct pci_dev
*pdev
= ndev
->pdev
;
292 struct nitrox_q_vector
*qvec
;
293 int nr_vecs
, vec
, cpu
;
299 * Entry 0: NPS PKT ring 0
300 * Entry 1: AQMQ ring 0
301 * Entry 2: ZQM ring 0
302 * Entry 3: NPS PKT ring 1
303 * Entry 4: AQMQ ring 1
304 * Entry 5: ZQM ring 1
306 * Entry 192: NPS_CORE_INT_ACTIVE
308 nr_vecs
= pci_msix_vec_count(pdev
);
311 ret
= pci_alloc_irq_vectors(pdev
, nr_vecs
, nr_vecs
, PCI_IRQ_MSIX
);
313 dev_err(DEV(ndev
), "msix vectors %d alloc failed\n", nr_vecs
);
316 ndev
->num_vecs
= nr_vecs
;
318 ndev
->qvec
= kcalloc(nr_vecs
, sizeof(*qvec
), GFP_KERNEL
);
320 pci_free_irq_vectors(pdev
);
324 /* request irqs for packet rings/ports */
325 for (i
= PKT_RING_MSIX_BASE
; i
< (nr_vecs
- 1); i
+= NR_RING_VECTORS
) {
326 qvec
= &ndev
->qvec
[i
];
328 qvec
->ring
= i
/ NR_RING_VECTORS
;
329 if (qvec
->ring
>= ndev
->nr_queues
)
332 qvec
->cmdq
= &ndev
->pkt_inq
[qvec
->ring
];
333 snprintf(qvec
->name
, IRQ_NAMESZ
, "nitrox-pkt%d", qvec
->ring
);
334 /* get the vector number */
335 vec
= pci_irq_vector(pdev
, i
);
336 ret
= request_irq(vec
, nps_pkt_slc_isr
, 0, qvec
->name
, qvec
);
338 dev_err(DEV(ndev
), "irq failed for pkt ring/port%d\n",
342 cpu
= qvec
->ring
% num_online_cpus();
343 irq_set_affinity_hint(vec
, get_cpu_mask(cpu
));
345 tasklet_init(&qvec
->resp_tasklet
, pkt_slc_resp_tasklet
,
346 (unsigned long)qvec
);
350 /* request irqs for non ring vectors */
351 i
= NON_RING_MSIX_BASE
;
352 qvec
= &ndev
->qvec
[i
];
355 snprintf(qvec
->name
, IRQ_NAMESZ
, "nitrox-core-int%d", i
);
356 /* get the vector number */
357 vec
= pci_irq_vector(pdev
, i
);
358 ret
= request_irq(vec
, nps_core_int_isr
, 0, qvec
->name
, qvec
);
360 dev_err(DEV(ndev
), "irq failed for nitrox-core-int%d\n", i
);
363 cpu
= num_online_cpus();
364 irq_set_affinity_hint(vec
, get_cpu_mask(cpu
));
366 tasklet_init(&qvec
->resp_tasklet
, nps_core_int_tasklet
,
367 (unsigned long)qvec
);
373 nitrox_unregister_interrupts(ndev
);
377 void nitrox_sriov_unregister_interrupts(struct nitrox_device
*ndev
)
379 struct pci_dev
*pdev
= ndev
->pdev
;
382 for (i
= 0; i
< ndev
->num_vecs
; i
++) {
383 struct nitrox_q_vector
*qvec
;
386 qvec
= ndev
->qvec
+ i
;
390 vec
= ndev
->iov
.msix
.vector
;
391 irq_set_affinity_hint(vec
, NULL
);
394 tasklet_disable(&qvec
->resp_tasklet
);
395 tasklet_kill(&qvec
->resp_tasklet
);
400 pci_disable_msix(pdev
);
403 int nitrox_sriov_register_interupts(struct nitrox_device
*ndev
)
405 struct pci_dev
*pdev
= ndev
->pdev
;
406 struct nitrox_q_vector
*qvec
;
411 * only non ring vectors i.e Entry 192 is available
412 * for PF in SR-IOV mode.
414 ndev
->iov
.msix
.entry
= NON_RING_MSIX_BASE
;
415 ret
= pci_enable_msix_exact(pdev
, &ndev
->iov
.msix
, NR_NON_RING_VECTORS
);
417 dev_err(DEV(ndev
), "failed to allocate nps-core-int%d\n",
422 qvec
= kcalloc(NR_NON_RING_VECTORS
, sizeof(*qvec
), GFP_KERNEL
);
424 pci_disable_msix(pdev
);
430 ndev
->num_vecs
= NR_NON_RING_VECTORS
;
431 snprintf(qvec
->name
, IRQ_NAMESZ
, "nitrox-core-int%d",
434 vec
= ndev
->iov
.msix
.vector
;
435 ret
= request_irq(vec
, nps_core_int_isr
, 0, qvec
->name
, qvec
);
437 dev_err(DEV(ndev
), "irq failed for nitrox-core-int%d\n",
441 cpu
= num_online_cpus();
442 irq_set_affinity_hint(vec
, get_cpu_mask(cpu
));
444 tasklet_init(&qvec
->resp_tasklet
, nps_core_int_tasklet
,
445 (unsigned long)qvec
);
451 nitrox_sriov_unregister_interrupts(ndev
);