1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/printk.h>
4 #include <linux/slab.h>
6 #include "nitrox_dev.h"
7 #include "nitrox_csr.h"
8 #include "nitrox_common.h"
9 #include "nitrox_hal.h"
10 #include "nitrox_isr.h"
11 #include "nitrox_mbx.h"
14 * One vector for each type of ring
15 * - NPS packet ring, AQMQ ring and ZQMQ ring
17 #define NR_RING_VECTORS 3
18 #define NR_NON_RING_VECTORS 1
19 /* base entry for packet ring/port */
20 #define PKT_RING_MSIX_BASE 0
21 #define NON_RING_MSIX_BASE 192
24 * nps_pkt_slc_isr - IRQ handler for NPS solicit port
28 static irqreturn_t
nps_pkt_slc_isr(int irq
, void *data
)
30 struct nitrox_q_vector
*qvec
= data
;
31 union nps_pkt_slc_cnts slc_cnts
;
32 struct nitrox_cmdq
*cmdq
= qvec
->cmdq
;
34 slc_cnts
.value
= readq(cmdq
->compl_cnt_csr_addr
);
35 /* New packet on SLC output port */
36 if (slc_cnts
.s
.slc_int
)
37 tasklet_hi_schedule(&qvec
->resp_tasklet
);
42 static void clear_nps_core_err_intr(struct nitrox_device
*ndev
)
46 /* Write 1 to clear */
47 value
= nitrox_read_csr(ndev
, NPS_CORE_INT
);
48 nitrox_write_csr(ndev
, NPS_CORE_INT
, value
);
50 dev_err_ratelimited(DEV(ndev
), "NSP_CORE_INT 0x%016llx\n", value
);
53 static void clear_nps_pkt_err_intr(struct nitrox_device
*ndev
)
55 union nps_pkt_int pkt_int
;
56 unsigned long value
, offset
;
59 pkt_int
.value
= nitrox_read_csr(ndev
, NPS_PKT_INT
);
60 dev_err_ratelimited(DEV(ndev
), "NPS_PKT_INT 0x%016llx\n",
63 if (pkt_int
.s
.slc_err
) {
64 offset
= NPS_PKT_SLC_ERR_TYPE
;
65 value
= nitrox_read_csr(ndev
, offset
);
66 nitrox_write_csr(ndev
, offset
, value
);
67 dev_err_ratelimited(DEV(ndev
),
68 "NPS_PKT_SLC_ERR_TYPE 0x%016lx\n", value
);
70 offset
= NPS_PKT_SLC_RERR_LO
;
71 value
= nitrox_read_csr(ndev
, offset
);
72 nitrox_write_csr(ndev
, offset
, value
);
73 /* enable the solicit ports */
74 for_each_set_bit(i
, &value
, BITS_PER_LONG
)
75 enable_pkt_solicit_port(ndev
, i
);
77 dev_err_ratelimited(DEV(ndev
),
78 "NPS_PKT_SLC_RERR_LO 0x%016lx\n", value
);
80 offset
= NPS_PKT_SLC_RERR_HI
;
81 value
= nitrox_read_csr(ndev
, offset
);
82 nitrox_write_csr(ndev
, offset
, value
);
83 dev_err_ratelimited(DEV(ndev
),
84 "NPS_PKT_SLC_RERR_HI 0x%016lx\n", value
);
87 if (pkt_int
.s
.in_err
) {
88 offset
= NPS_PKT_IN_ERR_TYPE
;
89 value
= nitrox_read_csr(ndev
, offset
);
90 nitrox_write_csr(ndev
, offset
, value
);
91 dev_err_ratelimited(DEV(ndev
),
92 "NPS_PKT_IN_ERR_TYPE 0x%016lx\n", value
);
93 offset
= NPS_PKT_IN_RERR_LO
;
94 value
= nitrox_read_csr(ndev
, offset
);
95 nitrox_write_csr(ndev
, offset
, value
);
96 /* enable the input ring */
97 for_each_set_bit(i
, &value
, BITS_PER_LONG
)
98 enable_pkt_input_ring(ndev
, i
);
100 dev_err_ratelimited(DEV(ndev
),
101 "NPS_PKT_IN_RERR_LO 0x%016lx\n", value
);
103 offset
= NPS_PKT_IN_RERR_HI
;
104 value
= nitrox_read_csr(ndev
, offset
);
105 nitrox_write_csr(ndev
, offset
, value
);
106 dev_err_ratelimited(DEV(ndev
),
107 "NPS_PKT_IN_RERR_HI 0x%016lx\n", value
);
111 static void clear_pom_err_intr(struct nitrox_device
*ndev
)
115 value
= nitrox_read_csr(ndev
, POM_INT
);
116 nitrox_write_csr(ndev
, POM_INT
, value
);
117 dev_err_ratelimited(DEV(ndev
), "POM_INT 0x%016llx\n", value
);
120 static void clear_pem_err_intr(struct nitrox_device
*ndev
)
124 value
= nitrox_read_csr(ndev
, PEM0_INT
);
125 nitrox_write_csr(ndev
, PEM0_INT
, value
);
126 dev_err_ratelimited(DEV(ndev
), "PEM(0)_INT 0x%016llx\n", value
);
129 static void clear_lbc_err_intr(struct nitrox_device
*ndev
)
131 union lbc_int lbc_int
;
135 lbc_int
.value
= nitrox_read_csr(ndev
, LBC_INT
);
136 dev_err_ratelimited(DEV(ndev
), "LBC_INT 0x%016llx\n", lbc_int
.value
);
138 if (lbc_int
.s
.dma_rd_err
) {
139 for (i
= 0; i
< NR_CLUSTERS
; i
++) {
140 offset
= EFL_CORE_VF_ERR_INT0X(i
);
141 value
= nitrox_read_csr(ndev
, offset
);
142 nitrox_write_csr(ndev
, offset
, value
);
143 offset
= EFL_CORE_VF_ERR_INT1X(i
);
144 value
= nitrox_read_csr(ndev
, offset
);
145 nitrox_write_csr(ndev
, offset
, value
);
149 if (lbc_int
.s
.cam_soft_err
) {
150 dev_err_ratelimited(DEV(ndev
), "CAM_SOFT_ERR, invalidating LBC\n");
151 invalidate_lbc(ndev
);
154 if (lbc_int
.s
.pref_dat_len_mismatch_err
) {
155 offset
= LBC_PLM_VF1_64_INT
;
156 value
= nitrox_read_csr(ndev
, offset
);
157 nitrox_write_csr(ndev
, offset
, value
);
158 offset
= LBC_PLM_VF65_128_INT
;
159 value
= nitrox_read_csr(ndev
, offset
);
160 nitrox_write_csr(ndev
, offset
, value
);
163 if (lbc_int
.s
.rd_dat_len_mismatch_err
) {
164 offset
= LBC_ELM_VF1_64_INT
;
165 value
= nitrox_read_csr(ndev
, offset
);
166 nitrox_write_csr(ndev
, offset
, value
);
167 offset
= LBC_ELM_VF65_128_INT
;
168 value
= nitrox_read_csr(ndev
, offset
);
169 nitrox_write_csr(ndev
, offset
, value
);
171 nitrox_write_csr(ndev
, LBC_INT
, lbc_int
.value
);
174 static void clear_efl_err_intr(struct nitrox_device
*ndev
)
178 for (i
= 0; i
< NR_CLUSTERS
; i
++) {
179 union efl_core_int core_int
;
182 offset
= EFL_CORE_INTX(i
);
183 core_int
.value
= nitrox_read_csr(ndev
, offset
);
184 nitrox_write_csr(ndev
, offset
, core_int
.value
);
185 dev_err_ratelimited(DEV(ndev
), "ELF_CORE(%d)_INT 0x%016llx\n",
187 if (core_int
.s
.se_err
) {
188 offset
= EFL_CORE_SE_ERR_INTX(i
);
189 value
= nitrox_read_csr(ndev
, offset
);
190 nitrox_write_csr(ndev
, offset
, value
);
195 static void clear_bmi_err_intr(struct nitrox_device
*ndev
)
199 value
= nitrox_read_csr(ndev
, BMI_INT
);
200 nitrox_write_csr(ndev
, BMI_INT
, value
);
201 dev_err_ratelimited(DEV(ndev
), "BMI_INT 0x%016llx\n", value
);
204 static void nps_core_int_tasklet(unsigned long data
)
206 struct nitrox_q_vector
*qvec
= (void *)(uintptr_t)(data
);
207 struct nitrox_device
*ndev
= qvec
->ndev
;
209 /* if pf mode do queue recovery */
210 if (ndev
->mode
== __NDEV_MODE_PF
) {
213 * if VF(s) enabled communicate the error information
220 * nps_core_int_isr - interrupt handler for NITROX errors and
221 * mailbox communication
223 static irqreturn_t
nps_core_int_isr(int irq
, void *data
)
225 struct nitrox_q_vector
*qvec
= data
;
226 struct nitrox_device
*ndev
= qvec
->ndev
;
227 union nps_core_int_active core_int
;
229 core_int
.value
= nitrox_read_csr(ndev
, NPS_CORE_INT_ACTIVE
);
231 if (core_int
.s
.nps_core
)
232 clear_nps_core_err_intr(ndev
);
234 if (core_int
.s
.nps_pkt
)
235 clear_nps_pkt_err_intr(ndev
);
238 clear_pom_err_intr(ndev
);
241 clear_pem_err_intr(ndev
);
244 clear_lbc_err_intr(ndev
);
247 clear_efl_err_intr(ndev
);
250 clear_bmi_err_intr(ndev
);
252 /* Mailbox interrupt */
254 nitrox_pf2vf_mbox_handler(ndev
);
256 /* If more work callback the ISR, set resend */
257 core_int
.s
.resend
= 1;
258 nitrox_write_csr(ndev
, NPS_CORE_INT_ACTIVE
, core_int
.value
);
263 void nitrox_unregister_interrupts(struct nitrox_device
*ndev
)
265 struct pci_dev
*pdev
= ndev
->pdev
;
268 for (i
= 0; i
< ndev
->num_vecs
; i
++) {
269 struct nitrox_q_vector
*qvec
;
272 qvec
= ndev
->qvec
+ i
;
276 /* get the vector number */
277 vec
= pci_irq_vector(pdev
, i
);
278 irq_set_affinity_hint(vec
, NULL
);
281 tasklet_disable(&qvec
->resp_tasklet
);
282 tasklet_kill(&qvec
->resp_tasklet
);
287 pci_free_irq_vectors(pdev
);
290 int nitrox_register_interrupts(struct nitrox_device
*ndev
)
292 struct pci_dev
*pdev
= ndev
->pdev
;
293 struct nitrox_q_vector
*qvec
;
294 int nr_vecs
, vec
, cpu
;
300 * Entry 0: NPS PKT ring 0
301 * Entry 1: AQMQ ring 0
302 * Entry 2: ZQM ring 0
303 * Entry 3: NPS PKT ring 1
304 * Entry 4: AQMQ ring 1
305 * Entry 5: ZQM ring 1
307 * Entry 192: NPS_CORE_INT_ACTIVE
309 nr_vecs
= pci_msix_vec_count(pdev
);
312 ret
= pci_alloc_irq_vectors(pdev
, nr_vecs
, nr_vecs
, PCI_IRQ_MSIX
);
314 dev_err(DEV(ndev
), "msix vectors %d alloc failed\n", nr_vecs
);
317 ndev
->num_vecs
= nr_vecs
;
319 ndev
->qvec
= kcalloc(nr_vecs
, sizeof(*qvec
), GFP_KERNEL
);
321 pci_free_irq_vectors(pdev
);
325 /* request irqs for packet rings/ports */
326 for (i
= PKT_RING_MSIX_BASE
; i
< (nr_vecs
- 1); i
+= NR_RING_VECTORS
) {
327 qvec
= &ndev
->qvec
[i
];
329 qvec
->ring
= i
/ NR_RING_VECTORS
;
330 if (qvec
->ring
>= ndev
->nr_queues
)
333 qvec
->cmdq
= &ndev
->pkt_inq
[qvec
->ring
];
334 snprintf(qvec
->name
, IRQ_NAMESZ
, "nitrox-pkt%d", qvec
->ring
);
335 /* get the vector number */
336 vec
= pci_irq_vector(pdev
, i
);
337 ret
= request_irq(vec
, nps_pkt_slc_isr
, 0, qvec
->name
, qvec
);
339 dev_err(DEV(ndev
), "irq failed for pkt ring/port%d\n",
343 cpu
= qvec
->ring
% num_online_cpus();
344 irq_set_affinity_hint(vec
, get_cpu_mask(cpu
));
346 tasklet_init(&qvec
->resp_tasklet
, pkt_slc_resp_tasklet
,
347 (unsigned long)qvec
);
351 /* request irqs for non ring vectors */
352 i
= NON_RING_MSIX_BASE
;
353 qvec
= &ndev
->qvec
[i
];
356 snprintf(qvec
->name
, IRQ_NAMESZ
, "nitrox-core-int%d", i
);
357 /* get the vector number */
358 vec
= pci_irq_vector(pdev
, i
);
359 ret
= request_irq(vec
, nps_core_int_isr
, 0, qvec
->name
, qvec
);
361 dev_err(DEV(ndev
), "irq failed for nitrox-core-int%d\n", i
);
364 cpu
= num_online_cpus();
365 irq_set_affinity_hint(vec
, get_cpu_mask(cpu
));
367 tasklet_init(&qvec
->resp_tasklet
, nps_core_int_tasklet
,
368 (unsigned long)qvec
);
374 nitrox_unregister_interrupts(ndev
);
378 void nitrox_sriov_unregister_interrupts(struct nitrox_device
*ndev
)
380 struct pci_dev
*pdev
= ndev
->pdev
;
383 for (i
= 0; i
< ndev
->num_vecs
; i
++) {
384 struct nitrox_q_vector
*qvec
;
387 qvec
= ndev
->qvec
+ i
;
391 vec
= ndev
->iov
.msix
.vector
;
392 irq_set_affinity_hint(vec
, NULL
);
395 tasklet_disable(&qvec
->resp_tasklet
);
396 tasklet_kill(&qvec
->resp_tasklet
);
401 pci_disable_msix(pdev
);
404 int nitrox_sriov_register_interupts(struct nitrox_device
*ndev
)
406 struct pci_dev
*pdev
= ndev
->pdev
;
407 struct nitrox_q_vector
*qvec
;
412 * only non ring vectors i.e Entry 192 is available
413 * for PF in SR-IOV mode.
415 ndev
->iov
.msix
.entry
= NON_RING_MSIX_BASE
;
416 ret
= pci_enable_msix_exact(pdev
, &ndev
->iov
.msix
, NR_NON_RING_VECTORS
);
418 dev_err(DEV(ndev
), "failed to allocate nps-core-int%d\n",
423 qvec
= kcalloc(NR_NON_RING_VECTORS
, sizeof(*qvec
), GFP_KERNEL
);
425 pci_disable_msix(pdev
);
431 ndev
->num_vecs
= NR_NON_RING_VECTORS
;
432 snprintf(qvec
->name
, IRQ_NAMESZ
, "nitrox-core-int%d",
435 vec
= ndev
->iov
.msix
.vector
;
436 ret
= request_irq(vec
, nps_core_int_isr
, 0, qvec
->name
, qvec
);
438 dev_err(DEV(ndev
), "irq failed for nitrox-core-int%d\n",
442 cpu
= num_online_cpus();
443 irq_set_affinity_hint(vec
, get_cpu_mask(cpu
));
445 tasklet_init(&qvec
->resp_tasklet
, nps_core_int_tasklet
,
446 (unsigned long)qvec
);
452 nitrox_sriov_unregister_interrupts(ndev
);