Merge git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending
[linux/fpc-iii.git] / drivers / scsi / csiostor / csio_isr.c
blob2fb71c6c3b3723bcdf0485341502931b7843da61
1 /*
2 * This file is part of the Chelsio FCoE driver for Linux.
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
35 #include <linux/kernel.h>
36 #include <linux/pci.h>
37 #include <linux/interrupt.h>
38 #include <linux/cpumask.h>
39 #include <linux/string.h>
41 #include "csio_init.h"
42 #include "csio_hw.h"
44 static irqreturn_t
45 csio_nondata_isr(int irq, void *dev_id)
47 struct csio_hw *hw = (struct csio_hw *) dev_id;
48 int rv;
49 unsigned long flags;
51 if (unlikely(!hw))
52 return IRQ_NONE;
54 if (unlikely(pci_channel_offline(hw->pdev))) {
55 CSIO_INC_STATS(hw, n_pcich_offline);
56 return IRQ_NONE;
59 spin_lock_irqsave(&hw->lock, flags);
60 csio_hw_slow_intr_handler(hw);
61 rv = csio_mb_isr_handler(hw);
63 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
64 hw->flags |= CSIO_HWF_FWEVT_PENDING;
65 spin_unlock_irqrestore(&hw->lock, flags);
66 schedule_work(&hw->evtq_work);
67 return IRQ_HANDLED;
69 spin_unlock_irqrestore(&hw->lock, flags);
70 return IRQ_HANDLED;
74 * csio_fwevt_handler - Common FW event handler routine.
75 * @hw: HW module.
77 * This is the ISR for FW events. It is shared b/w MSIX
78 * and INTx handlers.
80 static void
81 csio_fwevt_handler(struct csio_hw *hw)
83 int rv;
84 unsigned long flags;
86 rv = csio_fwevtq_handler(hw);
88 spin_lock_irqsave(&hw->lock, flags);
89 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
90 hw->flags |= CSIO_HWF_FWEVT_PENDING;
91 spin_unlock_irqrestore(&hw->lock, flags);
92 schedule_work(&hw->evtq_work);
93 return;
95 spin_unlock_irqrestore(&hw->lock, flags);
97 } /* csio_fwevt_handler */
100 * csio_fwevt_isr() - FW events MSIX ISR
101 * @irq:
102 * @dev_id:
104 * Process WRs on the FW event queue.
107 static irqreturn_t
108 csio_fwevt_isr(int irq, void *dev_id)
110 struct csio_hw *hw = (struct csio_hw *) dev_id;
112 if (unlikely(!hw))
113 return IRQ_NONE;
115 if (unlikely(pci_channel_offline(hw->pdev))) {
116 CSIO_INC_STATS(hw, n_pcich_offline);
117 return IRQ_NONE;
120 csio_fwevt_handler(hw);
122 return IRQ_HANDLED;
126 * csio_fwevt_isr() - INTx wrapper for handling FW events.
127 * @irq:
128 * @dev_id:
130 void
131 csio_fwevt_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
132 struct csio_fl_dma_buf *flb, void *priv)
134 csio_fwevt_handler(hw);
135 } /* csio_fwevt_intx_handler */
138 * csio_process_scsi_cmpl - Process a SCSI WR completion.
139 * @hw: HW module.
140 * @wr: The completed WR from the ingress queue.
141 * @len: Length of the WR.
142 * @flb: Freelist buffer array.
145 static void
146 csio_process_scsi_cmpl(struct csio_hw *hw, void *wr, uint32_t len,
147 struct csio_fl_dma_buf *flb, void *cbfn_q)
149 struct csio_ioreq *ioreq;
150 uint8_t *scsiwr;
151 uint8_t subop;
152 void *cmnd;
153 unsigned long flags;
155 ioreq = csio_scsi_cmpl_handler(hw, wr, len, flb, NULL, &scsiwr);
156 if (likely(ioreq)) {
157 if (unlikely(*scsiwr == FW_SCSI_ABRT_CLS_WR)) {
158 subop = FW_SCSI_ABRT_CLS_WR_SUB_OPCODE_GET(
159 ((struct fw_scsi_abrt_cls_wr *)
160 scsiwr)->sub_opcode_to_chk_all_io);
162 csio_dbg(hw, "%s cmpl recvd ioreq:%p status:%d\n",
163 subop ? "Close" : "Abort",
164 ioreq, ioreq->wr_status);
166 spin_lock_irqsave(&hw->lock, flags);
167 if (subop)
168 csio_scsi_closed(ioreq,
169 (struct list_head *)cbfn_q);
170 else
171 csio_scsi_aborted(ioreq,
172 (struct list_head *)cbfn_q);
174 * We call scsi_done for I/Os that driver thinks aborts
175 * have timed out. If there is a race caused by FW
176 * completing abort at the exact same time that the
177 * driver has deteced the abort timeout, the following
178 * check prevents calling of scsi_done twice for the
179 * same command: once from the eh_abort_handler, another
180 * from csio_scsi_isr_handler(). This also avoids the
181 * need to check if csio_scsi_cmnd(req) is NULL in the
182 * fast path.
184 cmnd = csio_scsi_cmnd(ioreq);
185 if (unlikely(cmnd == NULL))
186 list_del_init(&ioreq->sm.sm_list);
188 spin_unlock_irqrestore(&hw->lock, flags);
190 if (unlikely(cmnd == NULL))
191 csio_put_scsi_ioreq_lock(hw,
192 csio_hw_to_scsim(hw), ioreq);
193 } else {
194 spin_lock_irqsave(&hw->lock, flags);
195 csio_scsi_completed(ioreq, (struct list_head *)cbfn_q);
196 spin_unlock_irqrestore(&hw->lock, flags);
202 * csio_scsi_isr_handler() - Common SCSI ISR handler.
203 * @iq: Ingress queue pointer.
205 * Processes SCSI completions on the SCSI IQ indicated by scm->iq_idx
206 * by calling csio_wr_process_iq_idx. If there are completions on the
207 * isr_cbfn_q, yank them out into a local queue and call their io_cbfns.
208 * Once done, add these completions onto the freelist.
209 * This routine is shared b/w MSIX and INTx.
211 static inline irqreturn_t
212 csio_scsi_isr_handler(struct csio_q *iq)
214 struct csio_hw *hw = (struct csio_hw *)iq->owner;
215 LIST_HEAD(cbfn_q);
216 struct list_head *tmp;
217 struct csio_scsim *scm;
218 struct csio_ioreq *ioreq;
219 int isr_completions = 0;
221 scm = csio_hw_to_scsim(hw);
223 if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl,
224 &cbfn_q) != 0))
225 return IRQ_NONE;
227 /* Call back the completion routines */
228 list_for_each(tmp, &cbfn_q) {
229 ioreq = (struct csio_ioreq *)tmp;
230 isr_completions++;
231 ioreq->io_cbfn(hw, ioreq);
232 /* Release ddp buffer if used for this req */
233 if (unlikely(ioreq->dcopy))
234 csio_put_scsi_ddp_list_lock(hw, scm, &ioreq->gen_list,
235 ioreq->nsge);
238 if (isr_completions) {
239 /* Return the ioreqs back to ioreq->freelist */
240 csio_put_scsi_ioreq_list_lock(hw, scm, &cbfn_q,
241 isr_completions);
244 return IRQ_HANDLED;
248 * csio_scsi_isr() - SCSI MSIX handler
249 * @irq:
250 * @dev_id:
252 * This is the top level SCSI MSIX handler. Calls csio_scsi_isr_handler()
253 * for handling SCSI completions.
255 static irqreturn_t
256 csio_scsi_isr(int irq, void *dev_id)
258 struct csio_q *iq = (struct csio_q *) dev_id;
259 struct csio_hw *hw;
261 if (unlikely(!iq))
262 return IRQ_NONE;
264 hw = (struct csio_hw *)iq->owner;
266 if (unlikely(pci_channel_offline(hw->pdev))) {
267 CSIO_INC_STATS(hw, n_pcich_offline);
268 return IRQ_NONE;
271 csio_scsi_isr_handler(iq);
273 return IRQ_HANDLED;
277 * csio_scsi_intx_handler() - SCSI INTx handler
278 * @irq:
279 * @dev_id:
281 * This is the top level SCSI INTx handler. Calls csio_scsi_isr_handler()
282 * for handling SCSI completions.
284 void
285 csio_scsi_intx_handler(struct csio_hw *hw, void *wr, uint32_t len,
286 struct csio_fl_dma_buf *flb, void *priv)
288 struct csio_q *iq = priv;
290 csio_scsi_isr_handler(iq);
292 } /* csio_scsi_intx_handler */
295 * csio_fcoe_isr() - INTx/MSI interrupt service routine for FCoE.
296 * @irq:
297 * @dev_id:
301 static irqreturn_t
302 csio_fcoe_isr(int irq, void *dev_id)
304 struct csio_hw *hw = (struct csio_hw *) dev_id;
305 struct csio_q *intx_q = NULL;
306 int rv;
307 irqreturn_t ret = IRQ_NONE;
308 unsigned long flags;
310 if (unlikely(!hw))
311 return IRQ_NONE;
313 if (unlikely(pci_channel_offline(hw->pdev))) {
314 CSIO_INC_STATS(hw, n_pcich_offline);
315 return IRQ_NONE;
318 /* Disable the interrupt for this PCI function. */
319 if (hw->intr_mode == CSIO_IM_INTX)
320 csio_wr_reg32(hw, 0, MYPF_REG(PCIE_PF_CLI_A));
323 * The read in the following function will flush the
324 * above write.
326 if (csio_hw_slow_intr_handler(hw))
327 ret = IRQ_HANDLED;
329 /* Get the INTx Forward interrupt IQ. */
330 intx_q = csio_get_q(hw, hw->intr_iq_idx);
332 CSIO_DB_ASSERT(intx_q);
334 /* IQ handler is not possible for intx_q, hence pass in NULL */
335 if (likely(csio_wr_process_iq(hw, intx_q, NULL, NULL) == 0))
336 ret = IRQ_HANDLED;
338 spin_lock_irqsave(&hw->lock, flags);
339 rv = csio_mb_isr_handler(hw);
340 if (rv == 0 && !(hw->flags & CSIO_HWF_FWEVT_PENDING)) {
341 hw->flags |= CSIO_HWF_FWEVT_PENDING;
342 spin_unlock_irqrestore(&hw->lock, flags);
343 schedule_work(&hw->evtq_work);
344 return IRQ_HANDLED;
346 spin_unlock_irqrestore(&hw->lock, flags);
348 return ret;
351 static void
352 csio_add_msix_desc(struct csio_hw *hw)
354 int i;
355 struct csio_msix_entries *entryp = &hw->msix_entries[0];
356 int k = CSIO_EXTRA_VECS;
357 int len = sizeof(entryp->desc) - 1;
358 int cnt = hw->num_sqsets + k;
360 /* Non-data vector */
361 memset(entryp->desc, 0, len + 1);
362 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-nondata",
363 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
365 entryp++;
366 memset(entryp->desc, 0, len + 1);
367 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-fwevt",
368 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw), CSIO_PCI_FUNC(hw));
369 entryp++;
371 /* Name SCSI vecs */
372 for (i = k; i < cnt; i++, entryp++) {
373 memset(entryp->desc, 0, len + 1);
374 snprintf(entryp->desc, len, "csio-%02x:%02x:%x-scsi%d",
375 CSIO_PCI_BUS(hw), CSIO_PCI_DEV(hw),
376 CSIO_PCI_FUNC(hw), i - CSIO_EXTRA_VECS);
381 csio_request_irqs(struct csio_hw *hw)
383 int rv, i, j, k = 0;
384 struct csio_msix_entries *entryp = &hw->msix_entries[0];
385 struct csio_scsi_cpu_info *info;
387 if (hw->intr_mode != CSIO_IM_MSIX) {
388 rv = request_irq(hw->pdev->irq, csio_fcoe_isr,
389 (hw->intr_mode == CSIO_IM_MSI) ?
390 0 : IRQF_SHARED,
391 KBUILD_MODNAME, hw);
392 if (rv) {
393 if (hw->intr_mode == CSIO_IM_MSI)
394 pci_disable_msi(hw->pdev);
395 csio_err(hw, "Failed to allocate interrupt line.\n");
396 return -EINVAL;
399 goto out;
402 /* Add the MSIX vector descriptions */
403 csio_add_msix_desc(hw);
405 rv = request_irq(entryp[k].vector, csio_nondata_isr, 0,
406 entryp[k].desc, hw);
407 if (rv) {
408 csio_err(hw, "IRQ request failed for vec %d err:%d\n",
409 entryp[k].vector, rv);
410 goto err;
413 entryp[k++].dev_id = (void *)hw;
415 rv = request_irq(entryp[k].vector, csio_fwevt_isr, 0,
416 entryp[k].desc, hw);
417 if (rv) {
418 csio_err(hw, "IRQ request failed for vec %d err:%d\n",
419 entryp[k].vector, rv);
420 goto err;
423 entryp[k++].dev_id = (void *)hw;
425 /* Allocate IRQs for SCSI */
426 for (i = 0; i < hw->num_pports; i++) {
427 info = &hw->scsi_cpu_info[i];
428 for (j = 0; j < info->max_cpus; j++, k++) {
429 struct csio_scsi_qset *sqset = &hw->sqset[i][j];
430 struct csio_q *q = hw->wrm.q_arr[sqset->iq_idx];
432 rv = request_irq(entryp[k].vector, csio_scsi_isr, 0,
433 entryp[k].desc, q);
434 if (rv) {
435 csio_err(hw,
436 "IRQ request failed for vec %d err:%d\n",
437 entryp[k].vector, rv);
438 goto err;
441 entryp[k].dev_id = (void *)q;
443 } /* for all scsi cpus */
444 } /* for all ports */
446 out:
447 hw->flags |= CSIO_HWF_HOST_INTR_ENABLED;
449 return 0;
451 err:
452 for (i = 0; i < k; i++) {
453 entryp = &hw->msix_entries[i];
454 free_irq(entryp->vector, entryp->dev_id);
456 pci_disable_msix(hw->pdev);
458 return -EINVAL;
461 static void
462 csio_disable_msix(struct csio_hw *hw, bool free)
464 int i;
465 struct csio_msix_entries *entryp;
466 int cnt = hw->num_sqsets + CSIO_EXTRA_VECS;
468 if (free) {
469 for (i = 0; i < cnt; i++) {
470 entryp = &hw->msix_entries[i];
471 free_irq(entryp->vector, entryp->dev_id);
474 pci_disable_msix(hw->pdev);
477 /* Reduce per-port max possible CPUs */
478 static void
479 csio_reduce_sqsets(struct csio_hw *hw, int cnt)
481 int i;
482 struct csio_scsi_cpu_info *info;
484 while (cnt < hw->num_sqsets) {
485 for (i = 0; i < hw->num_pports; i++) {
486 info = &hw->scsi_cpu_info[i];
487 if (info->max_cpus > 1) {
488 info->max_cpus--;
489 hw->num_sqsets--;
490 if (hw->num_sqsets <= cnt)
491 break;
496 csio_dbg(hw, "Reduced sqsets to %d\n", hw->num_sqsets);
499 static int
500 csio_enable_msix(struct csio_hw *hw)
502 int i, j, k, n, min, cnt;
503 struct csio_msix_entries *entryp;
504 struct msix_entry *entries;
505 int extra = CSIO_EXTRA_VECS;
506 struct csio_scsi_cpu_info *info;
508 min = hw->num_pports + extra;
509 cnt = hw->num_sqsets + extra;
511 /* Max vectors required based on #niqs configured in fw */
512 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS || !csio_is_hw_master(hw))
513 cnt = min_t(uint8_t, hw->cfg_niq, cnt);
515 entries = kzalloc(sizeof(struct msix_entry) * cnt, GFP_KERNEL);
516 if (!entries)
517 return -ENOMEM;
519 for (i = 0; i < cnt; i++)
520 entries[i].entry = (uint16_t)i;
522 csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
524 cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt);
525 if (cnt < 0) {
526 kfree(entries);
527 return cnt;
530 if (cnt < (hw->num_sqsets + extra)) {
531 csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
532 csio_reduce_sqsets(hw, cnt - extra);
535 /* Save off vectors */
536 for (i = 0; i < cnt; i++) {
537 entryp = &hw->msix_entries[i];
538 entryp->vector = entries[i].vector;
541 /* Distribute vectors */
542 k = 0;
543 csio_set_nondata_intr_idx(hw, entries[k].entry);
544 csio_set_mb_intr_idx(csio_hw_to_mbm(hw), entries[k++].entry);
545 csio_set_fwevt_intr_idx(hw, entries[k++].entry);
547 for (i = 0; i < hw->num_pports; i++) {
548 info = &hw->scsi_cpu_info[i];
550 for (j = 0; j < hw->num_scsi_msix_cpus; j++) {
551 n = (j % info->max_cpus) + k;
552 hw->sqset[i][j].intr_idx = entries[n].entry;
555 k += info->max_cpus;
558 kfree(entries);
559 return 0;
562 void
563 csio_intr_enable(struct csio_hw *hw)
565 hw->intr_mode = CSIO_IM_NONE;
566 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;
568 /* Try MSIX, then MSI or fall back to INTx */
569 if ((csio_msi == 2) && !csio_enable_msix(hw))
570 hw->intr_mode = CSIO_IM_MSIX;
571 else {
572 /* Max iqs required based on #niqs configured in fw */
573 if (hw->flags & CSIO_HWF_USING_SOFT_PARAMS ||
574 !csio_is_hw_master(hw)) {
575 int extra = CSIO_EXTRA_MSI_IQS;
577 if (hw->cfg_niq < (hw->num_sqsets + extra)) {
578 csio_dbg(hw, "Reducing sqsets to %d\n",
579 hw->cfg_niq - extra);
580 csio_reduce_sqsets(hw, hw->cfg_niq - extra);
584 if ((csio_msi == 1) && !pci_enable_msi(hw->pdev))
585 hw->intr_mode = CSIO_IM_MSI;
586 else
587 hw->intr_mode = CSIO_IM_INTX;
590 csio_dbg(hw, "Using %s interrupt mode.\n",
591 (hw->intr_mode == CSIO_IM_MSIX) ? "MSIX" :
592 ((hw->intr_mode == CSIO_IM_MSI) ? "MSI" : "INTx"));
595 void
596 csio_intr_disable(struct csio_hw *hw, bool free)
598 csio_hw_intr_disable(hw);
600 switch (hw->intr_mode) {
601 case CSIO_IM_MSIX:
602 csio_disable_msix(hw, free);
603 break;
604 case CSIO_IM_MSI:
605 if (free)
606 free_irq(hw->pdev->irq, hw);
607 pci_disable_msi(hw->pdev);
608 break;
609 case CSIO_IM_INTX:
610 if (free)
611 free_irq(hw->pdev->irq, hw);
612 break;
613 default:
614 break;
616 hw->intr_mode = CSIO_IM_NONE;
617 hw->flags &= ~CSIO_HWF_HOST_INTR_ENABLED;