2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/kernel.h>
36 #include <linux/pci.h>
37 #include <linux/interrupt.h>
38 #include <linux/cpumask.h>
39 #include <linux/string.h>
41 #include "csio_init.h"
45 csio_nondata_isr(int irq
, void *dev_id
)
47 struct csio_hw
*hw
= (struct csio_hw
*) dev_id
;
54 if (unlikely(pci_channel_offline(hw
->pdev
))) {
55 CSIO_INC_STATS(hw
, n_pcich_offline
);
59 spin_lock_irqsave(&hw
->lock
, flags
);
60 csio_hw_slow_intr_handler(hw
);
61 rv
= csio_mb_isr_handler(hw
);
63 if (rv
== 0 && !(hw
->flags
& CSIO_HWF_FWEVT_PENDING
)) {
64 hw
->flags
|= CSIO_HWF_FWEVT_PENDING
;
65 spin_unlock_irqrestore(&hw
->lock
, flags
);
66 schedule_work(&hw
->evtq_work
);
69 spin_unlock_irqrestore(&hw
->lock
, flags
);
74 * csio_fwevt_handler - Common FW event handler routine.
77 * This is the ISR for FW events. It is shared b/w MSIX
81 csio_fwevt_handler(struct csio_hw
*hw
)
86 rv
= csio_fwevtq_handler(hw
);
88 spin_lock_irqsave(&hw
->lock
, flags
);
89 if (rv
== 0 && !(hw
->flags
& CSIO_HWF_FWEVT_PENDING
)) {
90 hw
->flags
|= CSIO_HWF_FWEVT_PENDING
;
91 spin_unlock_irqrestore(&hw
->lock
, flags
);
92 schedule_work(&hw
->evtq_work
);
95 spin_unlock_irqrestore(&hw
->lock
, flags
);
97 } /* csio_fwevt_handler */
100 * csio_fwevt_isr() - FW events MSIX ISR
104 * Process WRs on the FW event queue.
108 csio_fwevt_isr(int irq
, void *dev_id
)
110 struct csio_hw
*hw
= (struct csio_hw
*) dev_id
;
115 if (unlikely(pci_channel_offline(hw
->pdev
))) {
116 CSIO_INC_STATS(hw
, n_pcich_offline
);
120 csio_fwevt_handler(hw
);
126 * csio_fwevt_isr() - INTx wrapper for handling FW events.
131 csio_fwevt_intx_handler(struct csio_hw
*hw
, void *wr
, uint32_t len
,
132 struct csio_fl_dma_buf
*flb
, void *priv
)
134 csio_fwevt_handler(hw
);
135 } /* csio_fwevt_intx_handler */
138 * csio_process_scsi_cmpl - Process a SCSI WR completion.
140 * @wr: The completed WR from the ingress queue.
141 * @len: Length of the WR.
142 * @flb: Freelist buffer array.
146 csio_process_scsi_cmpl(struct csio_hw
*hw
, void *wr
, uint32_t len
,
147 struct csio_fl_dma_buf
*flb
, void *cbfn_q
)
149 struct csio_ioreq
*ioreq
;
155 ioreq
= csio_scsi_cmpl_handler(hw
, wr
, len
, flb
, NULL
, &scsiwr
);
157 if (unlikely(*scsiwr
== FW_SCSI_ABRT_CLS_WR
)) {
158 subop
= FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
159 ((struct fw_scsi_abrt_cls_wr
*)
160 scsiwr
)->sub_opcode_to_chk_all_io
);
162 csio_dbg(hw
, "%s cmpl recvd ioreq:%p status:%d\n",
163 subop
? "Close" : "Abort",
164 ioreq
, ioreq
->wr_status
);
166 spin_lock_irqsave(&hw
->lock
, flags
);
168 csio_scsi_closed(ioreq
,
169 (struct list_head
*)cbfn_q
);
171 csio_scsi_aborted(ioreq
,
172 (struct list_head
*)cbfn_q
);
174 * We call scsi_done for I/Os that driver thinks aborts
175 * have timed out. If there is a race caused by FW
176 * completing abort at the exact same time that the
177 * driver has deteced the abort timeout, the following
178 * check prevents calling of scsi_done twice for the
179 * same command: once from the eh_abort_handler, another
180 * from csio_scsi_isr_handler(). This also avoids the
181 * need to check if csio_scsi_cmnd(req) is NULL in the
184 cmnd
= csio_scsi_cmnd(ioreq
);
185 if (unlikely(cmnd
== NULL
))
186 list_del_init(&ioreq
->sm
.sm_list
);
188 spin_unlock_irqrestore(&hw
->lock
, flags
);
190 if (unlikely(cmnd
== NULL
))
191 csio_put_scsi_ioreq_lock(hw
,
192 csio_hw_to_scsim(hw
), ioreq
);
194 spin_lock_irqsave(&hw
->lock
, flags
);
195 csio_scsi_completed(ioreq
, (struct list_head
*)cbfn_q
);
196 spin_unlock_irqrestore(&hw
->lock
, flags
);
202 * csio_scsi_isr_handler() - Common SCSI ISR handler.
203 * @iq: Ingress queue pointer.
205 * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
206 * by calling csio_wr_process_iq_idx. If there are completions on the
207 * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
208 * Once done, add these completions onto the freelist.
209 * This routine is shared b/w MSIX and INTx.
211 static inline irqreturn_t
212 csio_scsi_isr_handler(struct csio_q
*iq
)
214 struct csio_hw
*hw
= (struct csio_hw
*)iq
->owner
;
216 struct list_head
*tmp
;
217 struct csio_scsim
*scm
;
218 struct csio_ioreq
*ioreq
;
219 int isr_completions
= 0;
221 scm
= csio_hw_to_scsim(hw
);
223 if (unlikely(csio_wr_process_iq(hw
, iq
, csio_process_scsi_cmpl
,
227 /* Call back the completion routines */
228 list_for_each(tmp
, &cbfn_q
) {
229 ioreq
= (struct csio_ioreq
*)tmp
;
231 ioreq
->io_cbfn(hw
, ioreq
);
232 /* Release ddp buffer if used for this req */
233 if (unlikely(ioreq
->dcopy
))
234 csio_put_scsi_ddp_list_lock(hw
, scm
, &ioreq
->gen_list
,
238 if (isr_completions
) {
239 /* Return the ioreqs back to ioreq->freelist */
240 csio_put_scsi_ioreq_list_lock(hw
, scm
, &cbfn_q
,
248 * csio_scsi_isr() - SCSI MSIX handler
252 * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
253 * for handling SCSI completions.
256 csio_scsi_isr(int irq
, void *dev_id
)
258 struct csio_q
*iq
= (struct csio_q
*) dev_id
;
264 hw
= (struct csio_hw
*)iq
->owner
;
266 if (unlikely(pci_channel_offline(hw
->pdev
))) {
267 CSIO_INC_STATS(hw
, n_pcich_offline
);
271 csio_scsi_isr_handler(iq
);
277 * csio_scsi_intx_handler() - SCSI INTx handler
281 * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
282 * for handling SCSI completions.
285 csio_scsi_intx_handler(struct csio_hw
*hw
, void *wr
, uint32_t len
,
286 struct csio_fl_dma_buf
*flb
, void *priv
)
288 struct csio_q
*iq
= priv
;
290 csio_scsi_isr_handler(iq
);
292 } /* csio_scsi_intx_handler */
295 * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
302 csio_fcoe_isr(int irq
, void *dev_id
)
304 struct csio_hw
*hw
= (struct csio_hw
*) dev_id
;
305 struct csio_q
*intx_q
= NULL
;
307 irqreturn_t ret
= IRQ_NONE
;
313 if (unlikely(pci_channel_offline(hw
->pdev
))) {
314 CSIO_INC_STATS(hw
, n_pcich_offline
);
318 /* Disable the interrupt for this PCI function. */
319 if (hw
->intr_mode
== CSIO_IM_INTX
)
320 csio_wr_reg32(hw
, 0, MYPF_REG(PCIE_PF_CLI_A
));
323 * The read in the following function will flush the
326 if (csio_hw_slow_intr_handler(hw
))
329 /* Get the INTx Forward interrupt IQ. */
330 intx_q
= csio_get_q(hw
, hw
->intr_iq_idx
);
332 CSIO_DB_ASSERT(intx_q
);
334 /* IQ handler is not possible for intx_q, hence pass in NULL */
335 if (likely(csio_wr_process_iq(hw
, intx_q
, NULL
, NULL
) == 0))
338 spin_lock_irqsave(&hw
->lock
, flags
);
339 rv
= csio_mb_isr_handler(hw
);
340 if (rv
== 0 && !(hw
->flags
& CSIO_HWF_FWEVT_PENDING
)) {
341 hw
->flags
|= CSIO_HWF_FWEVT_PENDING
;
342 spin_unlock_irqrestore(&hw
->lock
, flags
);
343 schedule_work(&hw
->evtq_work
);
346 spin_unlock_irqrestore(&hw
->lock
, flags
);
352 csio_add_msix_desc(struct csio_hw
*hw
)
355 struct csio_msix_entries
*entryp
= &hw
->msix_entries
[0];
356 int k
= CSIO_EXTRA_VECS
;
357 int len
= sizeof(entryp
->desc
) - 1;
358 int cnt
= hw
->num_sqsets
+ k
;
360 /* Non-data vector */
361 memset(entryp
->desc
, 0, len
+ 1);
362 snprintf(entryp
->desc
, len
, "csio-%02x:%02x:%x-nondata",
363 CSIO_PCI_BUS(hw
), CSIO_PCI_DEV(hw
), CSIO_PCI_FUNC(hw
));
366 memset(entryp
->desc
, 0, len
+ 1);
367 snprintf(entryp
->desc
, len
, "csio-%02x:%02x:%x-fwevt",
368 CSIO_PCI_BUS(hw
), CSIO_PCI_DEV(hw
), CSIO_PCI_FUNC(hw
));
372 for (i
= k
; i
< cnt
; i
++, entryp
++) {
373 memset(entryp
->desc
, 0, len
+ 1);
374 snprintf(entryp
->desc
, len
, "csio-%02x:%02x:%x-scsi%d",
375 CSIO_PCI_BUS(hw
), CSIO_PCI_DEV(hw
),
376 CSIO_PCI_FUNC(hw
), i
- CSIO_EXTRA_VECS
);
381 csio_request_irqs(struct csio_hw
*hw
)
384 struct csio_msix_entries
*entryp
= &hw
->msix_entries
[0];
385 struct csio_scsi_cpu_info
*info
;
386 struct pci_dev
*pdev
= hw
->pdev
;
388 if (hw
->intr_mode
!= CSIO_IM_MSIX
) {
389 rv
= request_irq(pci_irq_vector(pdev
, 0), csio_fcoe_isr
,
390 hw
->intr_mode
== CSIO_IM_MSI
? 0 : IRQF_SHARED
,
393 csio_err(hw
, "Failed to allocate interrupt line.\n");
400 /* Add the MSIX vector descriptions */
401 csio_add_msix_desc(hw
);
403 rv
= request_irq(pci_irq_vector(pdev
, k
), csio_nondata_isr
, 0,
406 csio_err(hw
, "IRQ request failed for vec %d err:%d\n",
407 pci_irq_vector(pdev
, k
), rv
);
411 entryp
[k
++].dev_id
= hw
;
413 rv
= request_irq(pci_irq_vector(pdev
, k
), csio_fwevt_isr
, 0,
416 csio_err(hw
, "IRQ request failed for vec %d err:%d\n",
417 pci_irq_vector(pdev
, k
), rv
);
421 entryp
[k
++].dev_id
= (void *)hw
;
423 /* Allocate IRQs for SCSI */
424 for (i
= 0; i
< hw
->num_pports
; i
++) {
425 info
= &hw
->scsi_cpu_info
[i
];
426 for (j
= 0; j
< info
->max_cpus
; j
++, k
++) {
427 struct csio_scsi_qset
*sqset
= &hw
->sqset
[i
][j
];
428 struct csio_q
*q
= hw
->wrm
.q_arr
[sqset
->iq_idx
];
430 rv
= request_irq(pci_irq_vector(pdev
, k
), csio_scsi_isr
, 0,
434 "IRQ request failed for vec %d err:%d\n",
435 pci_irq_vector(pdev
, k
), rv
);
439 entryp
[k
].dev_id
= q
;
441 } /* for all scsi cpus */
442 } /* for all ports */
445 hw
->flags
|= CSIO_HWF_HOST_INTR_ENABLED
;
449 for (i
= 0; i
< k
; i
++)
450 free_irq(pci_irq_vector(pdev
, i
), hw
->msix_entries
[i
].dev_id
);
451 pci_free_irq_vectors(hw
->pdev
);
455 /* Reduce per-port max possible CPUs */
457 csio_reduce_sqsets(struct csio_hw
*hw
, int cnt
)
460 struct csio_scsi_cpu_info
*info
;
462 while (cnt
< hw
->num_sqsets
) {
463 for (i
= 0; i
< hw
->num_pports
; i
++) {
464 info
= &hw
->scsi_cpu_info
[i
];
465 if (info
->max_cpus
> 1) {
468 if (hw
->num_sqsets
<= cnt
)
474 csio_dbg(hw
, "Reduced sqsets to %d\n", hw
->num_sqsets
);
478 csio_enable_msix(struct csio_hw
*hw
)
480 int i
, j
, k
, n
, min
, cnt
;
481 int extra
= CSIO_EXTRA_VECS
;
482 struct csio_scsi_cpu_info
*info
;
483 struct irq_affinity desc
= { .pre_vectors
= 2 };
485 min
= hw
->num_pports
+ extra
;
486 cnt
= hw
->num_sqsets
+ extra
;
488 /* Max vectors required based on #niqs configured in fw */
489 if (hw
->flags
& CSIO_HWF_USING_SOFT_PARAMS
|| !csio_is_hw_master(hw
))
490 cnt
= min_t(uint8_t, hw
->cfg_niq
, cnt
);
492 csio_dbg(hw
, "FW supp #niq:%d, trying %d msix's\n", hw
->cfg_niq
, cnt
);
494 cnt
= pci_alloc_irq_vectors_affinity(hw
->pdev
, min
, cnt
,
495 PCI_IRQ_MSIX
| PCI_IRQ_AFFINITY
, &desc
);
499 if (cnt
< (hw
->num_sqsets
+ extra
)) {
500 csio_dbg(hw
, "Reducing sqsets to %d\n", cnt
- extra
);
501 csio_reduce_sqsets(hw
, cnt
- extra
);
504 /* Distribute vectors */
506 csio_set_nondata_intr_idx(hw
, k
);
507 csio_set_mb_intr_idx(csio_hw_to_mbm(hw
), k
++);
508 csio_set_fwevt_intr_idx(hw
, k
++);
510 for (i
= 0; i
< hw
->num_pports
; i
++) {
511 info
= &hw
->scsi_cpu_info
[i
];
513 for (j
= 0; j
< hw
->num_scsi_msix_cpus
; j
++) {
514 n
= (j
% info
->max_cpus
) + k
;
515 hw
->sqset
[i
][j
].intr_idx
= n
;
525 csio_intr_enable(struct csio_hw
*hw
)
527 hw
->intr_mode
= CSIO_IM_NONE
;
528 hw
->flags
&= ~CSIO_HWF_HOST_INTR_ENABLED
;
530 /* Try MSIX, then MSI or fall back to INTx */
531 if ((csio_msi
== 2) && !csio_enable_msix(hw
))
532 hw
->intr_mode
= CSIO_IM_MSIX
;
534 /* Max iqs required based on #niqs configured in fw */
535 if (hw
->flags
& CSIO_HWF_USING_SOFT_PARAMS
||
536 !csio_is_hw_master(hw
)) {
537 int extra
= CSIO_EXTRA_MSI_IQS
;
539 if (hw
->cfg_niq
< (hw
->num_sqsets
+ extra
)) {
540 csio_dbg(hw
, "Reducing sqsets to %d\n",
541 hw
->cfg_niq
- extra
);
542 csio_reduce_sqsets(hw
, hw
->cfg_niq
- extra
);
546 if ((csio_msi
== 1) && !pci_enable_msi(hw
->pdev
))
547 hw
->intr_mode
= CSIO_IM_MSI
;
549 hw
->intr_mode
= CSIO_IM_INTX
;
552 csio_dbg(hw
, "Using %s interrupt mode.\n",
553 (hw
->intr_mode
== CSIO_IM_MSIX
) ? "MSIX" :
554 ((hw
->intr_mode
== CSIO_IM_MSI
) ? "MSI" : "INTx"));
558 csio_intr_disable(struct csio_hw
*hw
, bool free
)
560 csio_hw_intr_disable(hw
);
565 switch (hw
->intr_mode
) {
567 for (i
= 0; i
< hw
->num_sqsets
+ CSIO_EXTRA_VECS
; i
++) {
568 free_irq(pci_irq_vector(hw
->pdev
, i
),
569 hw
->msix_entries
[i
].dev_id
);
574 free_irq(pci_irq_vector(hw
->pdev
, 0), hw
);
581 pci_free_irq_vectors(hw
->pdev
);
582 hw
->intr_mode
= CSIO_IM_NONE
;
583 hw
->flags
&= ~CSIO_HWF_HOST_INTR_ENABLED
;