2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
35 #include <linux/kernel.h>
36 #include <linux/pci.h>
37 #include <linux/interrupt.h>
38 #include <linux/cpumask.h>
39 #include <linux/string.h>
41 #include "csio_init.h"
45 csio_nondata_isr(int irq
, void *dev_id
)
47 struct csio_hw
*hw
= (struct csio_hw
*) dev_id
;
54 if (unlikely(pci_channel_offline(hw
->pdev
))) {
55 CSIO_INC_STATS(hw
, n_pcich_offline
);
59 spin_lock_irqsave(&hw
->lock
, flags
);
60 csio_hw_slow_intr_handler(hw
);
61 rv
= csio_mb_isr_handler(hw
);
63 if (rv
== 0 && !(hw
->flags
& CSIO_HWF_FWEVT_PENDING
)) {
64 hw
->flags
|= CSIO_HWF_FWEVT_PENDING
;
65 spin_unlock_irqrestore(&hw
->lock
, flags
);
66 schedule_work(&hw
->evtq_work
);
69 spin_unlock_irqrestore(&hw
->lock
, flags
);
74 * csio_fwevt_handler - Common FW event handler routine.
77 * This is the ISR for FW events. It is shared b/w MSIX
81 csio_fwevt_handler(struct csio_hw
*hw
)
86 rv
= csio_fwevtq_handler(hw
);
88 spin_lock_irqsave(&hw
->lock
, flags
);
89 if (rv
== 0 && !(hw
->flags
& CSIO_HWF_FWEVT_PENDING
)) {
90 hw
->flags
|= CSIO_HWF_FWEVT_PENDING
;
91 spin_unlock_irqrestore(&hw
->lock
, flags
);
92 schedule_work(&hw
->evtq_work
);
95 spin_unlock_irqrestore(&hw
->lock
, flags
);
97 } /* csio_fwevt_handler */
100 * csio_fwevt_isr() - FW events MSIX ISR
104 * Process WRs on the FW event queue.
108 csio_fwevt_isr(int irq
, void *dev_id
)
110 struct csio_hw
*hw
= (struct csio_hw
*) dev_id
;
115 if (unlikely(pci_channel_offline(hw
->pdev
))) {
116 CSIO_INC_STATS(hw
, n_pcich_offline
);
120 csio_fwevt_handler(hw
);
126 * csio_fwevt_isr() - INTx wrapper for handling FW events.
131 csio_fwevt_intx_handler(struct csio_hw
*hw
, void *wr
, uint32_t len
,
132 struct csio_fl_dma_buf
*flb
, void *priv
)
134 csio_fwevt_handler(hw
);
135 } /* csio_fwevt_intx_handler */
138 * csio_process_scsi_cmpl - Process a SCSI WR completion.
140 * @wr: The completed WR from the ingress queue.
141 * @len: Length of the WR.
142 * @flb: Freelist buffer array.
146 csio_process_scsi_cmpl(struct csio_hw
*hw
, void *wr
, uint32_t len
,
147 struct csio_fl_dma_buf
*flb
, void *cbfn_q
)
149 struct csio_ioreq
*ioreq
;
155 ioreq
= csio_scsi_cmpl_handler(hw
, wr
, len
, flb
, NULL
, &scsiwr
);
157 if (unlikely(*scsiwr
== FW_SCSI_ABRT_CLS_WR
)) {
158 subop
= FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
159 ((struct fw_scsi_abrt_cls_wr
*)
160 scsiwr
)->sub_opcode_to_chk_all_io
);
162 csio_dbg(hw
, "%s cmpl recvd ioreq:%p status:%d\n",
163 subop
? "Close" : "Abort",
164 ioreq
, ioreq
->wr_status
);
166 spin_lock_irqsave(&hw
->lock
, flags
);
168 csio_scsi_closed(ioreq
,
169 (struct list_head
*)cbfn_q
);
171 csio_scsi_aborted(ioreq
,
172 (struct list_head
*)cbfn_q
);
174 * We call scsi_done for I/Os that driver thinks aborts
175 * have timed out. If there is a race caused by FW
176 * completing abort at the exact same time that the
177 * driver has deteced the abort timeout, the following
178 * check prevents calling of scsi_done twice for the
179 * same command: once from the eh_abort_handler, another
180 * from csio_scsi_isr_handler(). This also avoids the
181 * need to check if csio_scsi_cmnd(req) is NULL in the
184 cmnd
= csio_scsi_cmnd(ioreq
);
185 if (unlikely(cmnd
== NULL
))
186 list_del_init(&ioreq
->sm
.sm_list
);
188 spin_unlock_irqrestore(&hw
->lock
, flags
);
190 if (unlikely(cmnd
== NULL
))
191 csio_put_scsi_ioreq_lock(hw
,
192 csio_hw_to_scsim(hw
), ioreq
);
194 spin_lock_irqsave(&hw
->lock
, flags
);
195 csio_scsi_completed(ioreq
, (struct list_head
*)cbfn_q
);
196 spin_unlock_irqrestore(&hw
->lock
, flags
);
202 * csio_scsi_isr_handler() - Common SCSI ISR handler.
203 * @iq: Ingress queue pointer.
205 * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
206 * by calling csio_wr_process_iq_idx. If there are completions on the
207 * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
208 * Once done, add these completions onto the freelist.
209 * This routine is shared b/w MSIX and INTx.
211 static inline irqreturn_t
212 csio_scsi_isr_handler(struct csio_q
*iq
)
214 struct csio_hw
*hw
= (struct csio_hw
*)iq
->owner
;
216 struct list_head
*tmp
;
217 struct csio_scsim
*scm
;
218 struct csio_ioreq
*ioreq
;
219 int isr_completions
= 0;
221 scm
= csio_hw_to_scsim(hw
);
223 if (unlikely(csio_wr_process_iq(hw
, iq
, csio_process_scsi_cmpl
,
227 /* Call back the completion routines */
228 list_for_each(tmp
, &cbfn_q
) {
229 ioreq
= (struct csio_ioreq
*)tmp
;
231 ioreq
->io_cbfn(hw
, ioreq
);
232 /* Release ddp buffer if used for this req */
233 if (unlikely(ioreq
->dcopy
))
234 csio_put_scsi_ddp_list_lock(hw
, scm
, &ioreq
->gen_list
,
238 if (isr_completions
) {
239 /* Return the ioreqs back to ioreq->freelist */
240 csio_put_scsi_ioreq_list_lock(hw
, scm
, &cbfn_q
,
248 * csio_scsi_isr() - SCSI MSIX handler
252 * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
253 * for handling SCSI completions.
256 csio_scsi_isr(int irq
, void *dev_id
)
258 struct csio_q
*iq
= (struct csio_q
*) dev_id
;
264 hw
= (struct csio_hw
*)iq
->owner
;
266 if (unlikely(pci_channel_offline(hw
->pdev
))) {
267 CSIO_INC_STATS(hw
, n_pcich_offline
);
271 csio_scsi_isr_handler(iq
);
277 * csio_scsi_intx_handler() - SCSI INTx handler
281 * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
282 * for handling SCSI completions.
285 csio_scsi_intx_handler(struct csio_hw
*hw
, void *wr
, uint32_t len
,
286 struct csio_fl_dma_buf
*flb
, void *priv
)
288 struct csio_q
*iq
= priv
;
290 csio_scsi_isr_handler(iq
);
292 } /* csio_scsi_intx_handler */
295 * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
302 csio_fcoe_isr(int irq
, void *dev_id
)
304 struct csio_hw
*hw
= (struct csio_hw
*) dev_id
;
305 struct csio_q
*intx_q
= NULL
;
307 irqreturn_t ret
= IRQ_NONE
;
313 if (unlikely(pci_channel_offline(hw
->pdev
))) {
314 CSIO_INC_STATS(hw
, n_pcich_offline
);
318 /* Disable the interrupt for this PCI function. */
319 if (hw
->intr_mode
== CSIO_IM_INTX
)
320 csio_wr_reg32(hw
, 0, MYPF_REG(PCIE_PF_CLI_A
));
323 * The read in the following function will flush the
326 if (csio_hw_slow_intr_handler(hw
))
329 /* Get the INTx Forward interrupt IQ. */
330 intx_q
= csio_get_q(hw
, hw
->intr_iq_idx
);
332 CSIO_DB_ASSERT(intx_q
);
334 /* IQ handler is not possible for intx_q, hence pass in NULL */
335 if (likely(csio_wr_process_iq(hw
, intx_q
, NULL
, NULL
) == 0))
338 spin_lock_irqsave(&hw
->lock
, flags
);
339 rv
= csio_mb_isr_handler(hw
);
340 if (rv
== 0 && !(hw
->flags
& CSIO_HWF_FWEVT_PENDING
)) {
341 hw
->flags
|= CSIO_HWF_FWEVT_PENDING
;
342 spin_unlock_irqrestore(&hw
->lock
, flags
);
343 schedule_work(&hw
->evtq_work
);
346 spin_unlock_irqrestore(&hw
->lock
, flags
);
352 csio_add_msix_desc(struct csio_hw
*hw
)
355 struct csio_msix_entries
*entryp
= &hw
->msix_entries
[0];
356 int k
= CSIO_EXTRA_VECS
;
357 int len
= sizeof(entryp
->desc
) - 1;
358 int cnt
= hw
->num_sqsets
+ k
;
360 /* Non-data vector */
361 memset(entryp
->desc
, 0, len
+ 1);
362 snprintf(entryp
->desc
, len
, "csio-%02x:%02x:%x-nondata",
363 CSIO_PCI_BUS(hw
), CSIO_PCI_DEV(hw
), CSIO_PCI_FUNC(hw
));
366 memset(entryp
->desc
, 0, len
+ 1);
367 snprintf(entryp
->desc
, len
, "csio-%02x:%02x:%x-fwevt",
368 CSIO_PCI_BUS(hw
), CSIO_PCI_DEV(hw
), CSIO_PCI_FUNC(hw
));
372 for (i
= k
; i
< cnt
; i
++, entryp
++) {
373 memset(entryp
->desc
, 0, len
+ 1);
374 snprintf(entryp
->desc
, len
, "csio-%02x:%02x:%x-scsi%d",
375 CSIO_PCI_BUS(hw
), CSIO_PCI_DEV(hw
),
376 CSIO_PCI_FUNC(hw
), i
- CSIO_EXTRA_VECS
);
381 csio_request_irqs(struct csio_hw
*hw
)
384 struct csio_msix_entries
*entryp
= &hw
->msix_entries
[0];
385 struct csio_scsi_cpu_info
*info
;
387 if (hw
->intr_mode
!= CSIO_IM_MSIX
) {
388 rv
= request_irq(hw
->pdev
->irq
, csio_fcoe_isr
,
389 (hw
->intr_mode
== CSIO_IM_MSI
) ?
393 if (hw
->intr_mode
== CSIO_IM_MSI
)
394 pci_disable_msi(hw
->pdev
);
395 csio_err(hw
, "Failed to allocate interrupt line.\n");
402 /* Add the MSIX vector descriptions */
403 csio_add_msix_desc(hw
);
405 rv
= request_irq(entryp
[k
].vector
, csio_nondata_isr
, 0,
408 csio_err(hw
, "IRQ request failed for vec %d err:%d\n",
409 entryp
[k
].vector
, rv
);
413 entryp
[k
++].dev_id
= (void *)hw
;
415 rv
= request_irq(entryp
[k
].vector
, csio_fwevt_isr
, 0,
418 csio_err(hw
, "IRQ request failed for vec %d err:%d\n",
419 entryp
[k
].vector
, rv
);
423 entryp
[k
++].dev_id
= (void *)hw
;
425 /* Allocate IRQs for SCSI */
426 for (i
= 0; i
< hw
->num_pports
; i
++) {
427 info
= &hw
->scsi_cpu_info
[i
];
428 for (j
= 0; j
< info
->max_cpus
; j
++, k
++) {
429 struct csio_scsi_qset
*sqset
= &hw
->sqset
[i
][j
];
430 struct csio_q
*q
= hw
->wrm
.q_arr
[sqset
->iq_idx
];
432 rv
= request_irq(entryp
[k
].vector
, csio_scsi_isr
, 0,
436 "IRQ request failed for vec %d err:%d\n",
437 entryp
[k
].vector
, rv
);
441 entryp
[k
].dev_id
= (void *)q
;
443 } /* for all scsi cpus */
444 } /* for all ports */
447 hw
->flags
|= CSIO_HWF_HOST_INTR_ENABLED
;
452 for (i
= 0; i
< k
; i
++) {
453 entryp
= &hw
->msix_entries
[i
];
454 free_irq(entryp
->vector
, entryp
->dev_id
);
456 pci_disable_msix(hw
->pdev
);
462 csio_disable_msix(struct csio_hw
*hw
, bool free
)
465 struct csio_msix_entries
*entryp
;
466 int cnt
= hw
->num_sqsets
+ CSIO_EXTRA_VECS
;
469 for (i
= 0; i
< cnt
; i
++) {
470 entryp
= &hw
->msix_entries
[i
];
471 free_irq(entryp
->vector
, entryp
->dev_id
);
474 pci_disable_msix(hw
->pdev
);
477 /* Reduce per-port max possible CPUs */
479 csio_reduce_sqsets(struct csio_hw
*hw
, int cnt
)
482 struct csio_scsi_cpu_info
*info
;
484 while (cnt
< hw
->num_sqsets
) {
485 for (i
= 0; i
< hw
->num_pports
; i
++) {
486 info
= &hw
->scsi_cpu_info
[i
];
487 if (info
->max_cpus
> 1) {
490 if (hw
->num_sqsets
<= cnt
)
496 csio_dbg(hw
, "Reduced sqsets to %d\n", hw
->num_sqsets
);
500 csio_enable_msix(struct csio_hw
*hw
)
502 int i
, j
, k
, n
, min
, cnt
;
503 struct csio_msix_entries
*entryp
;
504 struct msix_entry
*entries
;
505 int extra
= CSIO_EXTRA_VECS
;
506 struct csio_scsi_cpu_info
*info
;
508 min
= hw
->num_pports
+ extra
;
509 cnt
= hw
->num_sqsets
+ extra
;
511 /* Max vectors required based on #niqs configured in fw */
512 if (hw
->flags
& CSIO_HWF_USING_SOFT_PARAMS
|| !csio_is_hw_master(hw
))
513 cnt
= min_t(uint8_t, hw
->cfg_niq
, cnt
);
515 entries
= kzalloc(sizeof(struct msix_entry
) * cnt
, GFP_KERNEL
);
519 for (i
= 0; i
< cnt
; i
++)
520 entries
[i
].entry
= (uint16_t)i
;
522 csio_dbg(hw
, "FW supp #niq:%d, trying %d msix's\n", hw
->cfg_niq
, cnt
);
524 cnt
= pci_enable_msix_range(hw
->pdev
, entries
, min
, cnt
);
530 if (cnt
< (hw
->num_sqsets
+ extra
)) {
531 csio_dbg(hw
, "Reducing sqsets to %d\n", cnt
- extra
);
532 csio_reduce_sqsets(hw
, cnt
- extra
);
535 /* Save off vectors */
536 for (i
= 0; i
< cnt
; i
++) {
537 entryp
= &hw
->msix_entries
[i
];
538 entryp
->vector
= entries
[i
].vector
;
541 /* Distribute vectors */
543 csio_set_nondata_intr_idx(hw
, entries
[k
].entry
);
544 csio_set_mb_intr_idx(csio_hw_to_mbm(hw
), entries
[k
++].entry
);
545 csio_set_fwevt_intr_idx(hw
, entries
[k
++].entry
);
547 for (i
= 0; i
< hw
->num_pports
; i
++) {
548 info
= &hw
->scsi_cpu_info
[i
];
550 for (j
= 0; j
< hw
->num_scsi_msix_cpus
; j
++) {
551 n
= (j
% info
->max_cpus
) + k
;
552 hw
->sqset
[i
][j
].intr_idx
= entries
[n
].entry
;
563 csio_intr_enable(struct csio_hw
*hw
)
565 hw
->intr_mode
= CSIO_IM_NONE
;
566 hw
->flags
&= ~CSIO_HWF_HOST_INTR_ENABLED
;
568 /* Try MSIX, then MSI or fall back to INTx */
569 if ((csio_msi
== 2) && !csio_enable_msix(hw
))
570 hw
->intr_mode
= CSIO_IM_MSIX
;
572 /* Max iqs required based on #niqs configured in fw */
573 if (hw
->flags
& CSIO_HWF_USING_SOFT_PARAMS
||
574 !csio_is_hw_master(hw
)) {
575 int extra
= CSIO_EXTRA_MSI_IQS
;
577 if (hw
->cfg_niq
< (hw
->num_sqsets
+ extra
)) {
578 csio_dbg(hw
, "Reducing sqsets to %d\n",
579 hw
->cfg_niq
- extra
);
580 csio_reduce_sqsets(hw
, hw
->cfg_niq
- extra
);
584 if ((csio_msi
== 1) && !pci_enable_msi(hw
->pdev
))
585 hw
->intr_mode
= CSIO_IM_MSI
;
587 hw
->intr_mode
= CSIO_IM_INTX
;
590 csio_dbg(hw
, "Using %s interrupt mode.\n",
591 (hw
->intr_mode
== CSIO_IM_MSIX
) ? "MSIX" :
592 ((hw
->intr_mode
== CSIO_IM_MSI
) ? "MSI" : "INTx"));
596 csio_intr_disable(struct csio_hw
*hw
, bool free
)
598 csio_hw_intr_disable(hw
);
600 switch (hw
->intr_mode
) {
602 csio_disable_msix(hw
, free
);
606 free_irq(hw
->pdev
->irq
, hw
);
607 pci_disable_msi(hw
->pdev
);
611 free_irq(hw
->pdev
->irq
, hw
);
616 hw
->intr_mode
= CSIO_IM_NONE
;
617 hw
->flags
&= ~CSIO_HWF_HOST_INTR_ENABLED
;