1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
27 #include <scsi/scsi_cmnd.h>
28 #include <scsi/scsi_device.h>
29 #include <scsi/scsi_host.h>
33 #include "lpfc_disc.h"
34 #include "lpfc_scsi.h"
36 #include "lpfc_crtn.h"
37 #include "lpfc_logmsg.h"
38 #include "lpfc_compat.h"
41 * Define macro to log: Mailbox command x%x cannot issue Data
42 * This allows multiple uses of lpfc_msgBlk0311
43 * w/o perturbing log msg utility.
45 #define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
46 lpfc_printf_log(phba, \
49 "%d:0311 Mailbox command x%x cannot issue " \
50 "Data: x%x x%x x%x\n", \
58 /* There are only four IOCB completion types. */
59 typedef enum _lpfc_iocb_type
{
67 * Translate the iocb command to an iocb command type used to decide the final
68 * disposition of each completed IOCB.
71 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd
)
73 lpfc_iocb_type type
= LPFC_UNKNOWN_IOCB
;
75 if (iocb_cmnd
> CMD_MAX_IOCB_CMD
)
79 case CMD_XMIT_SEQUENCE_CR
:
80 case CMD_XMIT_SEQUENCE_CX
:
81 case CMD_XMIT_BCAST_CN
:
82 case CMD_XMIT_BCAST_CX
:
83 case CMD_ELS_REQUEST_CR
:
84 case CMD_ELS_REQUEST_CX
:
85 case CMD_CREATE_XRI_CR
:
86 case CMD_CREATE_XRI_CX
:
88 case CMD_XMIT_ELS_RSP_CX
:
90 case CMD_FCP_IWRITE_CR
:
91 case CMD_FCP_IWRITE_CX
:
92 case CMD_FCP_IREAD_CR
:
93 case CMD_FCP_IREAD_CX
:
94 case CMD_FCP_ICMND_CR
:
95 case CMD_FCP_ICMND_CX
:
97 case CMD_ADAPTER_DUMP
:
98 case CMD_XMIT_SEQUENCE64_CR
:
99 case CMD_XMIT_SEQUENCE64_CX
:
100 case CMD_XMIT_BCAST64_CN
:
101 case CMD_XMIT_BCAST64_CX
:
102 case CMD_ELS_REQUEST64_CR
:
103 case CMD_ELS_REQUEST64_CX
:
104 case CMD_FCP_IWRITE64_CR
:
105 case CMD_FCP_IWRITE64_CX
:
106 case CMD_FCP_IREAD64_CR
:
107 case CMD_FCP_IREAD64_CX
:
108 case CMD_FCP_ICMND64_CR
:
109 case CMD_FCP_ICMND64_CX
:
110 case CMD_GEN_REQUEST64_CR
:
111 case CMD_GEN_REQUEST64_CX
:
112 case CMD_XMIT_ELS_RSP64_CX
:
113 type
= LPFC_SOL_IOCB
;
115 case CMD_ABORT_XRI_CN
:
116 case CMD_ABORT_XRI_CX
:
117 case CMD_CLOSE_XRI_CN
:
118 case CMD_CLOSE_XRI_CX
:
119 case CMD_XRI_ABORTED_CX
:
120 case CMD_ABORT_MXRI64_CN
:
121 type
= LPFC_ABORT_IOCB
;
123 case CMD_RCV_SEQUENCE_CX
:
124 case CMD_RCV_ELS_REQ_CX
:
125 case CMD_RCV_SEQUENCE64_CX
:
126 case CMD_RCV_ELS_REQ64_CX
:
127 type
= LPFC_UNSOL_IOCB
;
130 type
= LPFC_UNKNOWN_IOCB
;
138 lpfc_sli_ring_map(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
*pmb
)
140 struct lpfc_sli
*psli
= &phba
->sli
;
141 MAILBOX_t
*pmbox
= &pmb
->mb
;
144 for (i
= 0; i
< psli
->num_rings
; i
++) {
145 phba
->hba_state
= LPFC_INIT_MBX_CMDS
;
146 lpfc_config_ring(phba
, i
, pmb
);
147 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
148 if (rc
!= MBX_SUCCESS
) {
149 lpfc_printf_log(phba
,
152 "%d:0446 Adapter failed to init, "
153 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
159 phba
->hba_state
= LPFC_HBA_ERROR
;
167 lpfc_sli_ringtxcmpl_put(struct lpfc_hba
* phba
,
168 struct lpfc_sli_ring
* pring
, struct lpfc_iocbq
* piocb
)
172 list_add_tail(&piocb
->list
, &pring
->txcmplq
);
173 pring
->txcmplq_cnt
++;
174 if (unlikely(pring
->ringno
== LPFC_ELS_RING
))
175 mod_timer(&phba
->els_tmofunc
,
176 jiffies
+ HZ
* (phba
->fc_ratov
<< 1));
178 if (pring
->fast_lookup
) {
179 /* Setup fast lookup based on iotag for completion */
180 iotag
= piocb
->iocb
.ulpIoTag
;
181 if (iotag
&& (iotag
< pring
->fast_iotag
))
182 *(pring
->fast_lookup
+ iotag
) = piocb
;
185 /* Cmd ring <ringno> put: iotag <iotag> greater then
186 configured max <fast_iotag> wd0 <icmd> */
187 lpfc_printf_log(phba
,
190 "%d:0316 Cmd ring %d put: iotag x%x "
191 "greater then configured max x%x "
194 pring
->ringno
, iotag
,
196 *(((uint32_t *)(&piocb
->iocb
)) + 7));
202 static struct lpfc_iocbq
*
203 lpfc_sli_ringtx_get(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
)
205 struct list_head
*dlp
;
206 struct lpfc_iocbq
*cmd_iocb
;
210 list_remove_head((&pring
->txq
), cmd_iocb
,
214 /* If the first ptr is not equal to the list header,
215 * deque the IOCBQ_t and return it.
223 lpfc_sli_next_iocb_slot (struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
225 struct lpfc_pgp
*pgp
= &phba
->slim2p
->mbx
.us
.s2
.port
[pring
->ringno
];
226 uint32_t max_cmd_idx
= pring
->numCiocb
;
229 if ((pring
->next_cmdidx
== pring
->cmdidx
) &&
230 (++pring
->next_cmdidx
>= max_cmd_idx
))
231 pring
->next_cmdidx
= 0;
233 if (unlikely(pring
->local_getidx
== pring
->next_cmdidx
)) {
235 pring
->local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
237 if (unlikely(pring
->local_getidx
>= max_cmd_idx
)) {
238 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
239 "%d:0315 Ring %d issue: portCmdGet %d "
240 "is bigger then cmd ring %d\n",
241 phba
->brd_no
, pring
->ringno
,
242 pring
->local_getidx
, max_cmd_idx
);
244 phba
->hba_state
= LPFC_HBA_ERROR
;
246 * All error attention handlers are posted to
249 phba
->work_ha
|= HA_ERATT
;
250 phba
->work_hs
= HS_FFER3
;
252 wake_up(phba
->work_wait
);
257 if (pring
->local_getidx
== pring
->next_cmdidx
)
261 iocb
= IOCB_ENTRY(pring
->cmdringaddr
, pring
->cmdidx
);
267 lpfc_sli_next_iotag(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
)
269 uint32_t search_start
;
271 if (pring
->fast_lookup
== NULL
) {
273 if (pring
->iotag_ctr
>= pring
->iotag_max
)
274 pring
->iotag_ctr
= 1;
275 return pring
->iotag_ctr
;
278 search_start
= pring
->iotag_ctr
;
282 if (pring
->iotag_ctr
>= pring
->fast_iotag
)
283 pring
->iotag_ctr
= 1;
285 if (*(pring
->fast_lookup
+ pring
->iotag_ctr
) == NULL
)
286 return pring
->iotag_ctr
;
288 } while (pring
->iotag_ctr
!= search_start
);
291 * Outstanding I/O count for ring <ringno> is at max <fast_iotag>
293 lpfc_printf_log(phba
,
296 "%d:0318 Outstanding I/O count for ring %d is at max x%x\n",
304 lpfc_sli_submit_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
305 IOCB_t
*iocb
, struct lpfc_iocbq
*nextiocb
)
308 * Allocate and set up an iotag
310 nextiocb
->iocb
.ulpIoTag
=
311 lpfc_sli_next_iotag(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
]);
314 * Issue iocb command to adapter
316 lpfc_sli_pcimem_bcopy(&nextiocb
->iocb
, iocb
, sizeof (IOCB_t
));
318 pring
->stats
.iocb_cmd
++;
321 * If there is no completion routine to call, we can release the
322 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
323 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
325 if (nextiocb
->iocb_cmpl
)
326 lpfc_sli_ringtxcmpl_put(phba
, pring
, nextiocb
);
328 list_add_tail(&nextiocb
->list
, &phba
->lpfc_iocb_list
);
332 * Let the HBA know what IOCB slot will be the next one the
333 * driver will put a command into.
335 pring
->cmdidx
= pring
->next_cmdidx
;
336 writeb(pring
->cmdidx
, phba
->MBslimaddr
337 + (SLIMOFF
+ (pring
->ringno
* 2)) * 4);
341 lpfc_sli_update_full_ring(struct lpfc_hba
* phba
,
342 struct lpfc_sli_ring
*pring
)
344 int ringno
= pring
->ringno
;
346 pring
->flag
|= LPFC_CALL_RING_AVAILABLE
;
351 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
352 * The HBA will tell us when an IOCB entry is available.
354 writel((CA_R0ATT
|CA_R0CE_REQ
) << (ringno
*4), phba
->CAregaddr
);
355 readl(phba
->CAregaddr
); /* flush */
357 pring
->stats
.iocb_cmd_full
++;
361 lpfc_sli_update_ring(struct lpfc_hba
* phba
,
362 struct lpfc_sli_ring
*pring
)
364 int ringno
= pring
->ringno
;
367 * Tell the HBA that there is work to do in this ring.
370 writel(CA_R0ATT
<< (ringno
* 4), phba
->CAregaddr
);
371 readl(phba
->CAregaddr
); /* flush */
375 lpfc_sli_resume_iocb(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
)
378 struct lpfc_iocbq
*nextiocb
;
382 * (a) there is anything on the txq to send
384 * (c) link attention events can be processed (fcp ring only)
385 * (d) IOCB processing is not blocked by the outstanding mbox command.
387 if (pring
->txq_cnt
&&
388 (phba
->hba_state
> LPFC_LINK_DOWN
) &&
389 (pring
->ringno
!= phba
->sli
.fcp_ring
||
390 phba
->sli
.sli_flag
& LPFC_PROCESS_LA
) &&
391 !(pring
->flag
& LPFC_STOP_IOCB_MBX
)) {
393 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
394 (nextiocb
= lpfc_sli_ringtx_get(phba
, pring
)))
395 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
398 lpfc_sli_update_ring(phba
, pring
);
400 lpfc_sli_update_full_ring(phba
, pring
);
406 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
408 lpfc_sli_turn_on_ring(struct lpfc_hba
* phba
, int ringno
)
410 struct lpfc_pgp
*pgp
= &phba
->slim2p
->mbx
.us
.s2
.port
[ringno
];
412 /* If the ring is active, flag it */
413 if (phba
->sli
.ring
[ringno
].cmdringaddr
) {
414 if (phba
->sli
.ring
[ringno
].flag
& LPFC_STOP_IOCB_MBX
) {
415 phba
->sli
.ring
[ringno
].flag
&= ~LPFC_STOP_IOCB_MBX
;
417 * Force update of the local copy of cmdGetInx
419 phba
->sli
.ring
[ringno
].local_getidx
420 = le32_to_cpu(pgp
->cmdGetInx
);
421 spin_lock_irq(phba
->host
->host_lock
);
422 lpfc_sli_resume_iocb(phba
, &phba
->sli
.ring
[ringno
]);
423 spin_unlock_irq(phba
->host
->host_lock
);
429 lpfc_sli_chk_mbx_command(uint8_t mbxCommand
)
433 switch (mbxCommand
) {
437 case MBX_RUN_BIU_DIAG
:
440 case MBX_CONFIG_LINK
:
441 case MBX_CONFIG_RING
:
443 case MBX_READ_CONFIG
:
444 case MBX_READ_RCONFIG
:
446 case MBX_READ_STATUS
:
450 case MBX_READ_LNK_STAT
:
452 case MBX_UNREG_LOGIN
:
455 case MBX_DUMP_MEMORY
:
456 case MBX_DUMP_CONTEXT
:
461 case MBX_DEL_LD_ENTRY
:
462 case MBX_RUN_PROGRAM
:
466 case MBX_CONFIG_FARP
:
468 case MBX_RUN_BIU_DIAG64
:
469 case MBX_CONFIG_PORT
:
470 case MBX_READ_SPARM64
:
472 case MBX_REG_LOGIN64
:
474 case MBX_FLASH_WR_ULA
:
476 case MBX_LOAD_EXP_ROM
:
486 lpfc_sli_wake_mbox_wait(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
)
488 wait_queue_head_t
*pdone_q
;
491 * If pdone_q is empty, the driver thread gave up waiting and
494 pdone_q
= (wait_queue_head_t
*) pmboxq
->context1
;
496 wake_up_interruptible(pdone_q
);
501 lpfc_sli_def_mbox_cmpl(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
503 struct lpfc_dmabuf
*mp
;
504 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
506 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
509 mempool_free( pmb
, phba
->mbox_mem_pool
);
514 lpfc_sli_handle_mb_event(struct lpfc_hba
* phba
)
519 struct lpfc_sli
*psli
;
521 uint32_t process_next
;
524 /* We should only get here if we are in SLI2 mode */
525 if (!(phba
->sli
.sli_flag
& LPFC_SLI2_ACTIVE
)) {
529 phba
->sli
.slistat
.mbox_event
++;
531 /* Get a Mailbox buffer to setup mailbox commands for callback */
532 if ((pmb
= phba
->sli
.mbox_active
)) {
534 mbox
= &phba
->slim2p
->mbx
;
536 /* First check out the status word */
537 lpfc_sli_pcimem_bcopy(mbox
, pmbox
, sizeof (uint32_t));
539 /* Sanity check to ensure the host owns the mailbox */
540 if (pmbox
->mbxOwner
!= OWN_HOST
) {
541 /* Lets try for a while */
542 for (i
= 0; i
< 10240; i
++) {
543 /* First copy command data */
544 lpfc_sli_pcimem_bcopy(mbox
, pmbox
,
546 if (pmbox
->mbxOwner
== OWN_HOST
)
549 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
551 lpfc_printf_log(phba
,
554 "%d:0304 Stray Mailbox Interrupt "
555 "mbxCommand x%x mbxStatus x%x\n",
560 spin_lock_irq(phba
->host
->host_lock
);
561 phba
->sli
.sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
562 spin_unlock_irq(phba
->host
->host_lock
);
567 del_timer_sync(&phba
->sli
.mbox_tmo
);
568 phba
->work_hba_events
&= ~WORKER_MBOX_TMO
;
571 * It is a fatal error if unknown mbox command completion.
573 if (lpfc_sli_chk_mbx_command(pmbox
->mbxCommand
) ==
576 /* Unknow mailbox command compl */
577 lpfc_printf_log(phba
,
580 "%d:0323 Unknown Mailbox command %x Cmpl\n",
583 phba
->hba_state
= LPFC_HBA_ERROR
;
584 phba
->work_hs
= HS_FFER3
;
585 lpfc_handle_eratt(phba
);
589 phba
->sli
.mbox_active
= NULL
;
590 if (pmbox
->mbxStatus
) {
591 phba
->sli
.slistat
.mbox_stat_err
++;
592 if (pmbox
->mbxStatus
== MBXERR_NO_RESOURCES
) {
593 /* Mbox cmd cmpl error - RETRYing */
594 lpfc_printf_log(phba
,
597 "%d:0305 Mbox cmd cmpl error - "
598 "RETRYing Data: x%x x%x x%x x%x\n",
602 pmbox
->un
.varWords
[0],
604 pmbox
->mbxStatus
= 0;
605 pmbox
->mbxOwner
= OWN_HOST
;
606 spin_lock_irq(phba
->host
->host_lock
);
607 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
608 spin_unlock_irq(phba
->host
->host_lock
);
609 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
610 if (rc
== MBX_SUCCESS
)
615 /* Mailbox cmd <cmd> Cmpl <cmpl> */
616 lpfc_printf_log(phba
,
619 "%d:0307 Mailbox cmd x%x Cmpl x%p "
620 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
624 *((uint32_t *) pmbox
),
625 pmbox
->un
.varWords
[0],
626 pmbox
->un
.varWords
[1],
627 pmbox
->un
.varWords
[2],
628 pmbox
->un
.varWords
[3],
629 pmbox
->un
.varWords
[4],
630 pmbox
->un
.varWords
[5],
631 pmbox
->un
.varWords
[6],
632 pmbox
->un
.varWords
[7]);
634 if (pmb
->mbox_cmpl
) {
635 lpfc_sli_pcimem_bcopy(mbox
, pmbox
, MAILBOX_CMD_SIZE
);
636 pmb
->mbox_cmpl(phba
,pmb
);
642 process_next
= 0; /* by default don't loop */
643 spin_lock_irq(phba
->host
->host_lock
);
644 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
646 /* Process next mailbox command if there is one */
647 if ((pmb
= lpfc_mbox_get(phba
))) {
648 spin_unlock_irq(phba
->host
->host_lock
);
649 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
650 if (rc
== MBX_NOT_FINISHED
) {
651 pmb
->mb
.mbxStatus
= MBX_NOT_FINISHED
;
652 pmb
->mbox_cmpl(phba
,pmb
);
654 continue; /* loop back */
657 spin_unlock_irq(phba
->host
->host_lock
);
658 /* Turn on IOCB processing */
659 for (i
= 0; i
< phba
->sli
.num_rings
; i
++) {
660 lpfc_sli_turn_on_ring(phba
, i
);
663 /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
664 while (!list_empty(&phba
->freebufList
)) {
665 struct lpfc_dmabuf
*mp
;
668 list_remove_head((&phba
->freebufList
),
673 lpfc_mbuf_free(phba
, mp
->virt
,
680 } while (process_next
);
685 lpfc_sli_process_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
686 struct lpfc_iocbq
*saveq
)
694 irsp
= &(saveq
->iocb
);
695 if ((irsp
->ulpCommand
== CMD_RCV_ELS_REQ64_CX
)
696 || (irsp
->ulpCommand
== CMD_RCV_ELS_REQ_CX
)) {
701 (WORD5
*) & (saveq
->iocb
.un
.
703 Rctl
= w5p
->hcsw
.Rctl
;
704 Type
= w5p
->hcsw
.Type
;
706 /* Firmware Workaround */
707 if ((Rctl
== 0) && (pring
->ringno
== LPFC_ELS_RING
) &&
708 (irsp
->ulpCommand
== CMD_RCV_SEQUENCE64_CX
)) {
711 w5p
->hcsw
.Rctl
= Rctl
;
712 w5p
->hcsw
.Type
= Type
;
715 /* unSolicited Responses */
716 if (pring
->prt
[0].profile
) {
717 (pring
->prt
[0].lpfc_sli_rcv_unsol_event
) (phba
, pring
, saveq
);
720 /* We must search, based on rctl / type
721 for the right routine */
722 for (i
= 0; i
< pring
->num_mask
;
724 if ((pring
->prt
[i
].rctl
==
728 (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
729 (phba
, pring
, saveq
);
736 /* Unexpected Rctl / Type received */
737 /* Ring <ringno> handler: unexpected
738 Rctl <Rctl> Type <Type> received */
739 lpfc_printf_log(phba
,
742 "%d:0313 Ring %d handler: unexpected Rctl x%x "
743 "Type x%x received \n",
752 static struct lpfc_iocbq
*
753 lpfc_sli_txcmpl_ring_search_slow(struct lpfc_sli_ring
* pring
,
754 struct lpfc_iocbq
* prspiocb
)
758 struct lpfc_iocbq
*cmd_iocb
;
759 struct lpfc_iocbq
*iocb
, *next_iocb
;
762 irsp
= &prspiocb
->iocb
;
763 iotag
= irsp
->ulpIoTag
;
766 /* Search through txcmpl from the begining */
767 list_for_each_entry_safe(iocb
, next_iocb
, &(pring
->txcmplq
), list
) {
769 if (iotag
== icmd
->ulpIoTag
) {
772 list_del(&iocb
->list
);
773 pring
->txcmplq_cnt
--;
781 static struct lpfc_iocbq
*
782 lpfc_sli_txcmpl_ring_iotag_lookup(struct lpfc_hba
* phba
,
783 struct lpfc_sli_ring
* pring
,
784 struct lpfc_iocbq
* prspiocb
)
787 struct lpfc_iocbq
*cmd_iocb
= NULL
;
790 if (unlikely(pring
->fast_lookup
== NULL
))
793 /* Use fast lookup based on iotag for completion */
794 irsp
= &prspiocb
->iocb
;
795 iotag
= irsp
->ulpIoTag
;
796 if (iotag
< pring
->fast_iotag
) {
797 cmd_iocb
= *(pring
->fast_lookup
+ iotag
);
798 *(pring
->fast_lookup
+ iotag
) = NULL
;
800 list_del(&cmd_iocb
->list
);
801 pring
->txcmplq_cnt
--;
805 * This is clearly an error. A ring that uses iotags
806 * should never have a interrupt for a completion that
807 * is not on the ring. Return NULL and log a error.
809 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
810 "%d:0327 Rsp ring %d error - command "
811 "completion for iotag x%x not found\n",
812 phba
->brd_no
, pring
->ringno
, iotag
);
818 * Rsp ring <ringno> get: iotag <iotag> greater then
819 * configured max <fast_iotag> wd0 <irsp>. This is an
820 * error. Just return NULL.
822 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
823 "%d:0317 Rsp ring %d get: iotag x%x greater then "
824 "configured max x%x wd0 x%x\n",
825 phba
->brd_no
, pring
->ringno
, iotag
, pring
->fast_iotag
,
826 *(((uint32_t *) irsp
) + 7));
831 lpfc_sli_process_sol_iocb(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
,
832 struct lpfc_iocbq
*saveq
)
834 struct lpfc_iocbq
* cmdiocbp
;
838 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
839 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
840 cmdiocbp
= lpfc_sli_txcmpl_ring_search_slow(pring
, saveq
);
842 if (cmdiocbp
->iocb_cmpl
) {
844 * Post all ELS completions to the worker thread.
845 * All other are passed to the completion callback.
847 if (pring
->ringno
== LPFC_ELS_RING
) {
848 spin_unlock_irqrestore(phba
->host
->host_lock
,
850 (cmdiocbp
->iocb_cmpl
) (phba
, cmdiocbp
, saveq
);
851 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
854 if (cmdiocbp
->iocb_flag
& LPFC_IO_POLL
)
857 spin_unlock_irqrestore(phba
->host
->host_lock
,
859 (cmdiocbp
->iocb_cmpl
) (phba
, cmdiocbp
, saveq
);
860 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
863 list_add_tail(&cmdiocbp
->list
, &phba
->lpfc_iocb_list
);
867 * Unknown initiating command based on the response iotag.
868 * This could be the case on the ELS ring because of
871 if (pring
->ringno
!= LPFC_ELS_RING
) {
873 * Ring <ringno> handler: unexpected completion IoTag
876 lpfc_printf_log(phba
,
879 "%d:0322 Ring %d handler: unexpected "
880 "completion IoTag x%x Data: x%x x%x x%x x%x\n",
883 saveq
->iocb
.ulpIoTag
,
884 saveq
->iocb
.ulpStatus
,
885 saveq
->iocb
.un
.ulpWord
[4],
886 saveq
->iocb
.ulpCommand
,
887 saveq
->iocb
.ulpContext
);
890 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
895 * This routine presumes LPFC_FCP_RING handling and doesn't bother
896 * to check it explicitly.
899 lpfc_sli_handle_fast_ring_event(struct lpfc_hba
* phba
,
900 struct lpfc_sli_ring
* pring
, uint32_t mask
)
902 struct lpfc_pgp
*pgp
= &phba
->slim2p
->mbx
.us
.s2
.port
[pring
->ringno
];
904 IOCB_t
*entry
= NULL
;
905 struct lpfc_iocbq
*cmdiocbq
= NULL
;
906 struct lpfc_iocbq rspiocbq
;
908 uint32_t portRspPut
, portRspMax
;
912 uint32_t rsp_cmpl
= 0;
913 void __iomem
*to_slim
;
915 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
916 pring
->stats
.iocb_event
++;
919 * The next available response entry should never exceed the maximum
920 * entries. If it does, treat it as an adapter hardware error.
922 portRspMax
= pring
->numRiocb
;
923 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
924 if (unlikely(portRspPut
>= portRspMax
)) {
926 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
927 * rsp ring <portRspMax>
929 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
930 "%d:0312 Ring %d handler: portRspPut %d "
931 "is bigger then rsp ring %d\n",
932 phba
->brd_no
, pring
->ringno
, portRspPut
,
935 phba
->hba_state
= LPFC_HBA_ERROR
;
937 /* All error attention handlers are posted to worker thread */
938 phba
->work_ha
|= HA_ERATT
;
939 phba
->work_hs
= HS_FFER3
;
941 wake_up(phba
->work_wait
);
943 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
948 while (pring
->rspidx
!= portRspPut
) {
950 * Fetch an entry off the ring and copy it into a local data
951 * structure. The copy involves a byte-swap since the
952 * network byte order and pci byte orders are different.
954 entry
= (IOCB_t
*) IOCB_ENTRY(pring
->rspringaddr
, pring
->rspidx
);
955 lpfc_sli_pcimem_bcopy((uint32_t *) entry
,
956 (uint32_t *) &rspiocbq
.iocb
,
958 irsp
= &rspiocbq
.iocb
;
960 type
= lpfc_sli_iocb_cmd_type(irsp
->ulpCommand
& CMD_IOCB_MASK
);
961 pring
->stats
.iocb_rsp
++;
964 if (unlikely(irsp
->ulpStatus
)) {
965 /* Rsp ring <ringno> error: IOCB */
966 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
967 "%d:0326 Rsp Ring %d error: IOCB Data: "
968 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
969 phba
->brd_no
, pring
->ringno
,
970 irsp
->un
.ulpWord
[0], irsp
->un
.ulpWord
[1],
971 irsp
->un
.ulpWord
[2], irsp
->un
.ulpWord
[3],
972 irsp
->un
.ulpWord
[4], irsp
->un
.ulpWord
[5],
973 *(((uint32_t *) irsp
) + 6),
974 *(((uint32_t *) irsp
) + 7));
978 case LPFC_ABORT_IOCB
:
981 * Idle exchange closed via ABTS from port. No iocb
982 * resources need to be recovered.
984 if (unlikely(irsp
->ulpCommand
== CMD_XRI_ABORTED_CX
)) {
985 printk(KERN_INFO
"%s: IOCB cmd 0x%x processed. "
986 "Skipping completion\n", __FUNCTION__
,
991 cmdiocbq
= lpfc_sli_txcmpl_ring_iotag_lookup(phba
,
994 if ((cmdiocbq
) && (cmdiocbq
->iocb_cmpl
)) {
995 spin_unlock_irqrestore(
996 phba
->host
->host_lock
, iflag
);
997 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
,
999 spin_lock_irqsave(phba
->host
->host_lock
,
1004 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
1005 char adaptermsg
[LPFC_MAX_ADPTMSG
];
1006 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
1007 memcpy(&adaptermsg
[0], (uint8_t *) irsp
,
1009 dev_warn(&((phba
->pcidev
)->dev
), "lpfc%d: %s",
1010 phba
->brd_no
, adaptermsg
);
1012 /* Unknown IOCB command */
1013 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
1014 "%d:0321 Unknown IOCB command "
1015 "Data: x%x, x%x x%x x%x x%x\n",
1016 phba
->brd_no
, type
, irsp
->ulpCommand
,
1017 irsp
->ulpStatus
, irsp
->ulpIoTag
,
1024 * The response IOCB has been processed. Update the ring
1025 * pointer in SLIM. If the port response put pointer has not
1026 * been updated, sync the pgp->rspPutInx and fetch the new port
1027 * response put pointer.
1029 if (++pring
->rspidx
>= portRspMax
)
1032 to_slim
= phba
->MBslimaddr
+
1033 (SLIMOFF
+ (pring
->ringno
* 2) + 1) * 4;
1034 writeb(pring
->rspidx
, to_slim
);
1036 if (pring
->rspidx
== portRspPut
)
1037 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
1040 if ((rsp_cmpl
> 0) && (mask
& HA_R0RE_REQ
)) {
1041 pring
->stats
.iocb_rsp_full
++;
1042 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
1043 writel(status
, phba
->CAregaddr
);
1044 readl(phba
->CAregaddr
);
1046 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
1047 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
1048 pring
->stats
.iocb_cmd_empty
++;
1050 /* Force update of the local copy of cmdGetInx */
1051 pring
->local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
1052 lpfc_sli_resume_iocb(phba
, pring
);
1054 if ((pring
->lpfc_sli_cmd_available
))
1055 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
1059 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
1065 lpfc_sli_handle_slow_ring_event(struct lpfc_hba
* phba
,
1066 struct lpfc_sli_ring
* pring
, uint32_t mask
)
1069 IOCB_t
*irsp
= NULL
;
1070 struct lpfc_iocbq
*rspiocbp
= NULL
;
1071 struct lpfc_iocbq
*next_iocb
;
1072 struct lpfc_iocbq
*cmdiocbp
;
1073 struct lpfc_iocbq
*saveq
;
1074 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
1075 struct lpfc_pgp
*pgp
= &phba
->slim2p
->mbx
.us
.s2
.port
[pring
->ringno
];
1076 uint8_t iocb_cmd_type
;
1077 lpfc_iocb_type type
;
1078 uint32_t status
, free_saveq
;
1079 uint32_t portRspPut
, portRspMax
;
1081 unsigned long iflag
;
1082 void __iomem
*to_slim
;
1084 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
1085 pring
->stats
.iocb_event
++;
1088 * The next available response entry should never exceed the maximum
1089 * entries. If it does, treat it as an adapter hardware error.
1091 portRspMax
= pring
->numRiocb
;
1092 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
1093 if (portRspPut
>= portRspMax
) {
1095 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1096 * rsp ring <portRspMax>
1098 lpfc_printf_log(phba
,
1101 "%d:0312 Ring %d handler: portRspPut %d "
1102 "is bigger then rsp ring %d\n",
1104 pring
->ringno
, portRspPut
, portRspMax
);
1106 phba
->hba_state
= LPFC_HBA_ERROR
;
1107 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
1109 phba
->work_hs
= HS_FFER3
;
1110 lpfc_handle_eratt(phba
);
1116 lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
1117 while (pring
->rspidx
!= portRspPut
) {
1119 * Build a completion list and call the appropriate handler.
1120 * The process is to get the next available response iocb, get
1121 * a free iocb from the list, copy the response data into the
1122 * free iocb, insert to the continuation list, and update the
1123 * next response index to slim. This process makes response
1124 * iocb's in the ring available to DMA as fast as possible but
1125 * pays a penalty for a copy operation. Since the iocb is
1126 * only 32 bytes, this penalty is considered small relative to
1127 * the PCI reads for register values and a slim write. When
1128 * the ulpLe field is set, the entire Command has been
1131 entry
= IOCB_ENTRY(pring
->rspringaddr
, pring
->rspidx
);
1132 list_remove_head(lpfc_iocb_list
, rspiocbp
, struct lpfc_iocbq
,
1134 if (rspiocbp
== NULL
) {
1135 printk(KERN_ERR
"%s: out of buffers! Failing "
1136 "completion.\n", __FUNCTION__
);
1140 lpfc_sli_pcimem_bcopy(entry
, &rspiocbp
->iocb
, sizeof (IOCB_t
));
1141 irsp
= &rspiocbp
->iocb
;
1143 if (++pring
->rspidx
>= portRspMax
)
1146 to_slim
= phba
->MBslimaddr
+ (SLIMOFF
+ (pring
->ringno
* 2)
1148 writeb(pring
->rspidx
, to_slim
);
1150 if (list_empty(&(pring
->iocb_continueq
))) {
1151 list_add(&rspiocbp
->list
, &(pring
->iocb_continueq
));
1153 list_add_tail(&rspiocbp
->list
,
1154 &(pring
->iocb_continueq
));
1157 pring
->iocb_continueq_cnt
++;
1160 * By default, the driver expects to free all resources
1161 * associated with this iocb completion.
1164 saveq
= list_get_first(&pring
->iocb_continueq
,
1165 struct lpfc_iocbq
, list
);
1166 irsp
= &(saveq
->iocb
);
1167 list_del_init(&pring
->iocb_continueq
);
1168 pring
->iocb_continueq_cnt
= 0;
1170 pring
->stats
.iocb_rsp
++;
1172 if (irsp
->ulpStatus
) {
1173 /* Rsp ring <ringno> error: IOCB */
1174 lpfc_printf_log(phba
,
1177 "%d:0328 Rsp Ring %d error: IOCB Data: "
1178 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1181 irsp
->un
.ulpWord
[0],
1182 irsp
->un
.ulpWord
[1],
1183 irsp
->un
.ulpWord
[2],
1184 irsp
->un
.ulpWord
[3],
1185 irsp
->un
.ulpWord
[4],
1186 irsp
->un
.ulpWord
[5],
1187 *(((uint32_t *) irsp
) + 6),
1188 *(((uint32_t *) irsp
) + 7));
1192 * Fetch the IOCB command type and call the correct
1193 * completion routine. Solicited and Unsolicited
1194 * IOCBs on the ELS ring get freed back to the
1195 * lpfc_iocb_list by the discovery kernel thread.
1197 iocb_cmd_type
= irsp
->ulpCommand
& CMD_IOCB_MASK
;
1198 type
= lpfc_sli_iocb_cmd_type(iocb_cmd_type
);
1199 if (type
== LPFC_SOL_IOCB
) {
1200 spin_unlock_irqrestore(phba
->host
->host_lock
,
1202 rc
= lpfc_sli_process_sol_iocb(phba
, pring
,
1204 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
1205 } else if (type
== LPFC_UNSOL_IOCB
) {
1206 spin_unlock_irqrestore(phba
->host
->host_lock
,
1208 rc
= lpfc_sli_process_unsol_iocb(phba
, pring
,
1210 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
1211 } else if (type
== LPFC_ABORT_IOCB
) {
1212 if ((irsp
->ulpCommand
!= CMD_XRI_ABORTED_CX
) &&
1214 lpfc_sli_txcmpl_ring_search_slow(pring
,
1216 /* Call the specified completion
1218 if (cmdiocbp
->iocb_cmpl
) {
1219 spin_unlock_irqrestore(
1220 phba
->host
->host_lock
,
1222 (cmdiocbp
->iocb_cmpl
) (phba
,
1225 phba
->host
->host_lock
,
1228 list_add_tail(&cmdiocbp
->list
,
1232 } else if (type
== LPFC_UNKNOWN_IOCB
) {
1233 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
1235 char adaptermsg
[LPFC_MAX_ADPTMSG
];
1237 memset(adaptermsg
, 0,
1239 memcpy(&adaptermsg
[0], (uint8_t *) irsp
,
1241 dev_warn(&((phba
->pcidev
)->dev
),
1243 phba
->brd_no
, adaptermsg
);
1245 /* Unknown IOCB command */
1246 lpfc_printf_log(phba
,
1249 "%d:0321 Unknown IOCB command "
1250 "Data: x%x x%x x%x x%x\n",
1260 if (!list_empty(&saveq
->list
)) {
1261 list_for_each_entry_safe(rspiocbp
,
1265 list_add_tail(&rspiocbp
->list
,
1270 list_add_tail(&saveq
->list
, lpfc_iocb_list
);
1275 * If the port response put pointer has not been updated, sync
1276 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1277 * response put pointer.
1279 if (pring
->rspidx
== portRspPut
) {
1280 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
1282 } /* while (pring->rspidx != portRspPut) */
1284 if ((rspiocbp
!= 0) && (mask
& HA_R0RE_REQ
)) {
1285 /* At least one response entry has been freed */
1286 pring
->stats
.iocb_rsp_full
++;
1287 /* SET RxRE_RSP in Chip Att register */
1288 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
1289 writel(status
, phba
->CAregaddr
);
1290 readl(phba
->CAregaddr
); /* flush */
1292 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
1293 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
1294 pring
->stats
.iocb_cmd_empty
++;
1296 /* Force update of the local copy of cmdGetInx */
1297 pring
->local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
1298 lpfc_sli_resume_iocb(phba
, pring
);
1300 if ((pring
->lpfc_sli_cmd_available
))
1301 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
1305 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
1310 lpfc_sli_abort_iocb_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1312 struct lpfc_iocbq
*iocb
, *next_iocb
;
1313 IOCB_t
*icmd
= NULL
, *cmd
= NULL
;
1319 /* Error everything on txq and txcmplq
1322 spin_lock_irq(phba
->host
->host_lock
);
1323 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
1324 list_del_init(&iocb
->list
);
1325 if (iocb
->iocb_cmpl
) {
1327 icmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
1328 icmd
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
1329 spin_unlock_irq(phba
->host
->host_lock
);
1330 (iocb
->iocb_cmpl
) (phba
, iocb
, iocb
);
1331 spin_lock_irq(phba
->host
->host_lock
);
1333 list_add_tail(&iocb
->list
, &phba
->lpfc_iocb_list
);
1337 INIT_LIST_HEAD(&(pring
->txq
));
1339 /* Next issue ABTS for everything on the txcmplq */
1340 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
1344 * Imediate abort of IOCB, clear fast_lookup entry,
1345 * if any, deque and call compl
1347 iotag
= cmd
->ulpIoTag
;
1348 if (iotag
&& pring
->fast_lookup
&&
1349 (iotag
< pring
->fast_iotag
))
1350 pring
->fast_lookup
[iotag
] = NULL
;
1352 list_del_init(&iocb
->list
);
1353 pring
->txcmplq_cnt
--;
1355 if (iocb
->iocb_cmpl
) {
1356 cmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
1357 cmd
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
1358 spin_unlock_irq(phba
->host
->host_lock
);
1359 (iocb
->iocb_cmpl
) (phba
, iocb
, iocb
);
1360 spin_lock_irq(phba
->host
->host_lock
);
1362 list_add_tail(&iocb
->list
, &phba
->lpfc_iocb_list
);
1366 INIT_LIST_HEAD(&pring
->txcmplq
);
1367 pring
->txcmplq_cnt
= 0;
1368 spin_unlock_irq(phba
->host
->host_lock
);
1373 /******************************************************************************
1374 * lpfc_sli_send_reset
1376 * Note: After returning from this function, the HBA cannot be accessed for
1377 * 1 ms. Since we do not wish to delay in interrupt context, it is the
1378 * responsibility of the caller to perform the mdelay(1) and flush via readl().
1379 ******************************************************************************/
1381 lpfc_sli_send_reset(struct lpfc_hba
* phba
, uint16_t skip_post
)
1384 volatile uint32_t word0
;
1385 void __iomem
*to_slim
;
1386 unsigned long flags
= 0;
1388 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
1390 /* A board reset must use REAL SLIM. */
1391 phba
->sli
.sli_flag
&= ~LPFC_SLI2_ACTIVE
;
1394 swpmb
= (MAILBOX_t
*) & word0
;
1395 swpmb
->mbxCommand
= MBX_RESTART
;
1398 to_slim
= phba
->MBslimaddr
;
1399 writel(*(uint32_t *) swpmb
, to_slim
);
1400 readl(to_slim
); /* flush */
1402 /* Only skip post after fc_ffinit is completed */
1404 word0
= 1; /* This is really setting up word1 */
1406 word0
= 0; /* This is really setting up word1 */
1408 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
1409 writel(*(uint32_t *) swpmb
, to_slim
);
1410 readl(to_slim
); /* flush */
1412 /* Turn off parity checking and serr during the physical reset */
1413 pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &phba
->pci_cfg_value
);
1414 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
,
1415 (phba
->pci_cfg_value
&
1416 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
1418 writel(HC_INITFF
, phba
->HCregaddr
);
1420 phba
->hba_state
= LPFC_INIT_START
;
1421 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
1427 lpfc_sli_brdreset(struct lpfc_hba
* phba
, uint16_t skip_post
)
1429 struct lpfc_sli_ring
*pring
;
1431 struct lpfc_dmabuf
*mp
, *next_mp
;
1432 unsigned long flags
= 0;
1434 lpfc_sli_send_reset(phba
, skip_post
);
1437 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
1438 /* Risk the write on flush case ie no delay after the readl */
1439 readl(phba
->HCregaddr
); /* flush */
1440 /* Now toggle INITFF bit set by lpfc_sli_send_reset */
1441 writel(0, phba
->HCregaddr
);
1442 readl(phba
->HCregaddr
); /* flush */
1444 /* Restore PCI cmd register */
1445 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, phba
->pci_cfg_value
);
1447 /* perform board reset */
1448 phba
->fc_eventTag
= 0;
1450 phba
->fc_prevDID
= Mask_DID
;
1453 lpfc_printf_log(phba
,
1456 "%d:0325 Reset HBA Data: x%x x%x x%x\n",
1462 /* Initialize relevant SLI info */
1463 for (i
= 0; i
< phba
->sli
.num_rings
; i
++) {
1464 pring
= &phba
->sli
.ring
[i
];
1467 pring
->next_cmdidx
= 0;
1468 pring
->local_getidx
= 0;
1470 pring
->missbufcnt
= 0;
1472 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
1480 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
1481 /* Cleanup preposted buffers on the ELS ring */
1482 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
1483 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
1484 list_del(&mp
->list
);
1485 pring
->postbufq_cnt
--;
1486 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1489 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
1491 for (i
= 0; i
< phba
->sli
.num_rings
; i
++)
1492 lpfc_sli_abort_iocb_ring(phba
, &phba
->sli
.ring
[i
]);
1498 lpfc_sli_chipset_init(struct lpfc_hba
*phba
)
1500 uint32_t status
, i
= 0;
1502 /* Read the HBA Host Status Register */
1503 status
= readl(phba
->HSregaddr
);
1505 /* Check status register to see what current state is */
1507 while ((status
& (HS_FFRDY
| HS_MBRDY
)) != (HS_FFRDY
| HS_MBRDY
)) {
1509 /* Check every 100ms for 5 retries, then every 500ms for 5, then
1510 * every 2.5 sec for 5, then reset board and every 2.5 sec for
1514 /* Adapter failed to init, timeout, status reg
1516 lpfc_printf_log(phba
,
1519 "%d:0436 Adapter failed to init, "
1520 "timeout, status reg x%x\n",
1523 phba
->hba_state
= LPFC_HBA_ERROR
;
1527 /* Check to see if any errors occurred during init */
1528 if (status
& HS_FFERM
) {
1529 /* ERROR: During chipset initialization */
1530 /* Adapter failed to init, chipset, status reg
1532 lpfc_printf_log(phba
,
1535 "%d:0437 Adapter failed to init, "
1536 "chipset, status reg x%x\n",
1539 phba
->hba_state
= LPFC_HBA_ERROR
;
1545 } else if (i
<= 10) {
1552 lpfc_sli_brdreset(phba
, 0);
1554 /* Read the HBA Host Status Register */
1555 status
= readl(phba
->HSregaddr
);
1558 /* Check to see if any errors occurred during init */
1559 if (status
& HS_FFERM
) {
1560 /* ERROR: During chipset initialization */
1561 /* Adapter failed to init, chipset, status reg <status> */
1562 lpfc_printf_log(phba
,
1565 "%d:0438 Adapter failed to init, chipset, "
1569 phba
->hba_state
= LPFC_HBA_ERROR
;
1573 /* Clear all interrupt enable conditions */
1574 writel(0, phba
->HCregaddr
);
1575 readl(phba
->HCregaddr
); /* flush */
1577 /* setup host attn register */
1578 writel(0xffffffff, phba
->HAregaddr
);
1579 readl(phba
->HAregaddr
); /* flush */
1584 lpfc_sli_hba_setup(struct lpfc_hba
* phba
)
1587 uint32_t resetcount
= 0, rc
= 0, done
= 0;
1589 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1591 phba
->hba_state
= LPFC_HBA_ERROR
;
1595 while (resetcount
< 2 && !done
) {
1596 phba
->hba_state
= 0;
1597 lpfc_sli_brdreset(phba
, 0);
1599 rc
= lpfc_sli_chipset_init(phba
);
1605 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
1606 * means the call was successful. Any other nonzero value is a failure,
1607 * but if ERESTART is returned, the driver may reset the HBA and try
1610 rc
= lpfc_config_port_prep(phba
);
1611 if (rc
== -ERESTART
) {
1612 phba
->hba_state
= 0;
1618 phba
->hba_state
= LPFC_INIT_MBX_CMDS
;
1619 lpfc_config_port(phba
, pmb
);
1620 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
1621 if (rc
== MBX_SUCCESS
)
1624 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1625 "%d:0442 Adapter failed to init, mbxCmd x%x "
1626 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
1627 phba
->brd_no
, pmb
->mb
.mbxCommand
,
1628 pmb
->mb
.mbxStatus
, 0);
1629 phba
->sli
.sli_flag
&= ~LPFC_SLI2_ACTIVE
;
1633 goto lpfc_sli_hba_setup_error
;
1635 rc
= lpfc_sli_ring_map(phba
, pmb
);
1638 goto lpfc_sli_hba_setup_error
;
1640 phba
->sli
.sli_flag
|= LPFC_PROCESS_LA
;
1642 rc
= lpfc_config_port_post(phba
);
1644 goto lpfc_sli_hba_setup_error
;
1646 goto lpfc_sli_hba_setup_exit
;
1647 lpfc_sli_hba_setup_error
:
1648 phba
->hba_state
= LPFC_HBA_ERROR
;
1649 lpfc_sli_hba_setup_exit
:
1650 mempool_free(pmb
, phba
->mbox_mem_pool
);
1655 lpfc_mbox_abort(struct lpfc_hba
* phba
)
1657 LPFC_MBOXQ_t
*pmbox
;
1660 if (phba
->sli
.mbox_active
) {
1661 del_timer_sync(&phba
->sli
.mbox_tmo
);
1662 phba
->work_hba_events
&= ~WORKER_MBOX_TMO
;
1663 pmbox
= phba
->sli
.mbox_active
;
1665 phba
->sli
.mbox_active
= NULL
;
1666 if (pmbox
->mbox_cmpl
) {
1667 mb
->mbxStatus
= MBX_NOT_FINISHED
;
1668 (pmbox
->mbox_cmpl
) (phba
, pmbox
);
1670 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
1673 /* Abort all the non active mailbox commands. */
1674 spin_lock_irq(phba
->host
->host_lock
);
1675 pmbox
= lpfc_mbox_get(phba
);
1678 if (pmbox
->mbox_cmpl
) {
1679 mb
->mbxStatus
= MBX_NOT_FINISHED
;
1680 spin_unlock_irq(phba
->host
->host_lock
);
1681 (pmbox
->mbox_cmpl
) (phba
, pmbox
);
1682 spin_lock_irq(phba
->host
->host_lock
);
1684 pmbox
= lpfc_mbox_get(phba
);
1686 spin_unlock_irq(phba
->host
->host_lock
);
1690 /*! lpfc_mbox_timeout
1694 * \param hba Pointer to per struct lpfc_hba structure
1695 * \param l1 Pointer to the driver's mailbox queue.
1701 * This routine handles mailbox timeout events at timer interrupt context.
1704 lpfc_mbox_timeout(unsigned long ptr
)
1706 struct lpfc_hba
*phba
;
1707 unsigned long iflag
;
1709 phba
= (struct lpfc_hba
*)ptr
;
1710 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
1711 if (!(phba
->work_hba_events
& WORKER_MBOX_TMO
)) {
1712 phba
->work_hba_events
|= WORKER_MBOX_TMO
;
1713 if (phba
->work_wait
)
1714 wake_up(phba
->work_wait
);
1716 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
1720 lpfc_mbox_timeout_handler(struct lpfc_hba
*phba
)
1722 LPFC_MBOXQ_t
*pmbox
;
1725 spin_lock_irq(phba
->host
->host_lock
);
1726 if (!(phba
->work_hba_events
& WORKER_MBOX_TMO
)) {
1727 spin_unlock_irq(phba
->host
->host_lock
);
1731 phba
->work_hba_events
&= ~WORKER_MBOX_TMO
;
1733 pmbox
= phba
->sli
.mbox_active
;
1736 /* Mbox cmd <mbxCommand> timeout */
1737 lpfc_printf_log(phba
,
1740 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
1745 phba
->sli
.mbox_active
);
1747 phba
->sli
.mbox_active
= NULL
;
1748 if (pmbox
->mbox_cmpl
) {
1749 mb
->mbxStatus
= MBX_NOT_FINISHED
;
1750 spin_unlock_irq(phba
->host
->host_lock
);
1751 (pmbox
->mbox_cmpl
) (phba
, pmbox
);
1752 spin_lock_irq(phba
->host
->host_lock
);
1754 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
1756 spin_unlock_irq(phba
->host
->host_lock
);
1757 lpfc_mbox_abort(phba
);
1762 lpfc_sli_issue_mbox(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmbox
, uint32_t flag
)
1765 struct lpfc_sli
*psli
;
1766 uint32_t status
, evtctr
;
1769 unsigned long drvr_flag
= 0;
1770 volatile uint32_t word0
, ldata
;
1771 void __iomem
*to_slim
;
1775 spin_lock_irqsave(phba
->host
->host_lock
, drvr_flag
);
1779 status
= MBX_SUCCESS
;
1781 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
1782 /* Polling for a mbox command when another one is already active
1783 * is not allowed in SLI. Also, the driver must have established
1784 * SLI2 mode to queue and process multiple mbox commands.
1787 if (flag
& MBX_POLL
) {
1788 spin_unlock_irqrestore(phba
->host
->host_lock
,
1791 /* Mbox command <mbxCommand> cannot issue */
1792 LOG_MBOX_CANNOT_ISSUE_DATA( phba
, mb
, psli
, flag
)
1793 return (MBX_NOT_FINISHED
);
1796 if (!(psli
->sli_flag
& LPFC_SLI2_ACTIVE
)) {
1797 spin_unlock_irqrestore(phba
->host
->host_lock
,
1799 /* Mbox command <mbxCommand> cannot issue */
1800 LOG_MBOX_CANNOT_ISSUE_DATA( phba
, mb
, psli
, flag
)
1801 return (MBX_NOT_FINISHED
);
1804 /* Handle STOP IOCB processing flag. This is only meaningful
1805 * if we are not polling for mbox completion.
1807 if (flag
& MBX_STOP_IOCB
) {
1808 flag
&= ~MBX_STOP_IOCB
;
1809 /* Now flag each ring */
1810 for (i
= 0; i
< psli
->num_rings
; i
++) {
1811 /* If the ring is active, flag it */
1812 if (psli
->ring
[i
].cmdringaddr
) {
1813 psli
->ring
[i
].flag
|=
1819 /* Another mailbox command is still being processed, queue this
1820 * command to be processed later.
1822 lpfc_mbox_put(phba
, pmbox
);
1824 /* Mbox cmd issue - BUSY */
1825 lpfc_printf_log(phba
,
1828 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
1835 psli
->slistat
.mbox_busy
++;
1836 spin_unlock_irqrestore(phba
->host
->host_lock
,
1842 /* Handle STOP IOCB processing flag. This is only meaningful
1843 * if we are not polling for mbox completion.
1845 if (flag
& MBX_STOP_IOCB
) {
1846 flag
&= ~MBX_STOP_IOCB
;
1847 if (flag
== MBX_NOWAIT
) {
1848 /* Now flag each ring */
1849 for (i
= 0; i
< psli
->num_rings
; i
++) {
1850 /* If the ring is active, flag it */
1851 if (psli
->ring
[i
].cmdringaddr
) {
1852 psli
->ring
[i
].flag
|=
1859 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
1861 /* If we are not polling, we MUST be in SLI2 mode */
1862 if (flag
!= MBX_POLL
) {
1863 if (!(psli
->sli_flag
& LPFC_SLI2_ACTIVE
)) {
1864 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
1865 spin_unlock_irqrestore(phba
->host
->host_lock
,
1867 /* Mbox command <mbxCommand> cannot issue */
1868 LOG_MBOX_CANNOT_ISSUE_DATA( phba
, mb
, psli
, flag
);
1869 return (MBX_NOT_FINISHED
);
1871 /* timeout active mbox command */
1872 mod_timer(&psli
->mbox_tmo
, jiffies
+ HZ
* LPFC_MBOX_TMO
);
1875 /* Mailbox cmd <cmd> issue */
1876 lpfc_printf_log(phba
,
1879 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
1886 psli
->slistat
.mbox_cmd
++;
1887 evtctr
= psli
->slistat
.mbox_event
;
1889 /* next set own bit for the adapter and copy over command word */
1890 mb
->mbxOwner
= OWN_CHIP
;
1892 if (psli
->sli_flag
& LPFC_SLI2_ACTIVE
) {
1893 /* First copy command data to host SLIM area */
1894 lpfc_sli_pcimem_bcopy(mb
, &phba
->slim2p
->mbx
, MAILBOX_CMD_SIZE
);
1896 if (mb
->mbxCommand
== MBX_CONFIG_PORT
) {
1897 /* copy command data into host mbox for cmpl */
1898 lpfc_sli_pcimem_bcopy(mb
, &phba
->slim2p
->mbx
,
1902 /* First copy mbox command data to HBA SLIM, skip past first
1904 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
1905 lpfc_memcpy_to_slim(to_slim
, &mb
->un
.varWords
[0],
1906 MAILBOX_CMD_SIZE
- sizeof (uint32_t));
1908 /* Next copy over first word, with mbxOwner set */
1909 ldata
= *((volatile uint32_t *)mb
);
1910 to_slim
= phba
->MBslimaddr
;
1911 writel(ldata
, to_slim
);
1912 readl(to_slim
); /* flush */
1914 if (mb
->mbxCommand
== MBX_CONFIG_PORT
) {
1915 /* switch over to host mailbox */
1916 psli
->sli_flag
|= LPFC_SLI2_ACTIVE
;
1921 /* interrupt board to doit right away */
1922 writel(CA_MBATT
, phba
->CAregaddr
);
1923 readl(phba
->CAregaddr
); /* flush */
1927 /* Don't wait for it to finish, just return */
1928 psli
->mbox_active
= pmbox
;
1933 psli
->mbox_active
= NULL
;
1934 if (psli
->sli_flag
& LPFC_SLI2_ACTIVE
) {
1935 /* First read mbox status word */
1936 word0
= *((volatile uint32_t *)&phba
->slim2p
->mbx
);
1937 word0
= le32_to_cpu(word0
);
1939 /* First read mbox status word */
1940 word0
= readl(phba
->MBslimaddr
);
1943 /* Read the HBA Host Attention Register */
1944 ha_copy
= readl(phba
->HAregaddr
);
1946 /* Wait for command to complete */
1947 while (((word0
& OWN_CHIP
) == OWN_CHIP
)
1948 || !(ha_copy
& HA_MBATT
)) {
1950 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
1951 spin_unlock_irqrestore(phba
->host
->host_lock
,
1953 return (MBX_NOT_FINISHED
);
1956 /* Check if we took a mbox interrupt while we were
1958 if (((word0
& OWN_CHIP
) != OWN_CHIP
)
1959 && (evtctr
!= psli
->slistat
.mbox_event
))
1962 spin_unlock_irqrestore(phba
->host
->host_lock
,
1965 /* Can be in interrupt context, do not sleep */
1966 /* (or might be called with interrupts disabled) */
1969 spin_lock_irqsave(phba
->host
->host_lock
, drvr_flag
);
1971 if (psli
->sli_flag
& LPFC_SLI2_ACTIVE
) {
1972 /* First copy command data */
1973 word0
= *((volatile uint32_t *)
1974 &phba
->slim2p
->mbx
);
1975 word0
= le32_to_cpu(word0
);
1976 if (mb
->mbxCommand
== MBX_CONFIG_PORT
) {
1978 volatile uint32_t slimword0
;
1979 /* Check real SLIM for any errors */
1980 slimword0
= readl(phba
->MBslimaddr
);
1981 slimmb
= (MAILBOX_t
*) & slimword0
;
1982 if (((slimword0
& OWN_CHIP
) != OWN_CHIP
)
1983 && slimmb
->mbxStatus
) {
1990 /* First copy command data */
1991 word0
= readl(phba
->MBslimaddr
);
1993 /* Read the HBA Host Attention Register */
1994 ha_copy
= readl(phba
->HAregaddr
);
1997 if (psli
->sli_flag
& LPFC_SLI2_ACTIVE
) {
1998 /* copy results back to user */
1999 lpfc_sli_pcimem_bcopy(&phba
->slim2p
->mbx
, mb
,
2002 /* First copy command data */
2003 lpfc_memcpy_from_slim(mb
, phba
->MBslimaddr
,
2005 if ((mb
->mbxCommand
== MBX_DUMP_MEMORY
) &&
2007 lpfc_memcpy_from_slim((void *)pmbox
->context2
,
2008 phba
->MBslimaddr
+ DMP_RSP_OFFSET
,
2009 mb
->un
.varDmp
.word_cnt
);
2013 writel(HA_MBATT
, phba
->HAregaddr
);
2014 readl(phba
->HAregaddr
); /* flush */
2016 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
2017 status
= mb
->mbxStatus
;
2020 spin_unlock_irqrestore(phba
->host
->host_lock
, drvr_flag
);
2025 lpfc_sli_ringtx_put(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
,
2026 struct lpfc_iocbq
* piocb
)
2028 /* Insert the caller's iocb in the txq tail for later processing. */
2029 list_add_tail(&piocb
->list
, &pring
->txq
);
2034 static struct lpfc_iocbq
*
2035 lpfc_sli_next_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2036 struct lpfc_iocbq
** piocb
)
2038 struct lpfc_iocbq
* nextiocb
;
2040 nextiocb
= lpfc_sli_ringtx_get(phba
, pring
);
2050 lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2051 struct lpfc_iocbq
*piocb
, uint32_t flag
)
2053 struct lpfc_iocbq
*nextiocb
;
2057 * We should never get an IOCB if we are in a < LINK_DOWN state
2059 if (unlikely(phba
->hba_state
< LPFC_LINK_DOWN
))
2063 * Check to see if we are blocking IOCB processing because of a
2064 * outstanding mbox command.
2066 if (unlikely(pring
->flag
& LPFC_STOP_IOCB_MBX
))
2069 if (unlikely(phba
->hba_state
== LPFC_LINK_DOWN
)) {
2071 * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF
2072 * can be issued if the link is not up.
2074 switch (piocb
->iocb
.ulpCommand
) {
2075 case CMD_QUE_RING_BUF_CN
:
2076 case CMD_QUE_RING_BUF64_CN
:
2078 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2079 * completion, iocb_cmpl MUST be 0.
2081 if (piocb
->iocb_cmpl
)
2082 piocb
->iocb_cmpl
= NULL
;
2084 case CMD_CREATE_XRI_CR
:
2091 * For FCP commands, we must be in a state where we can process link
2094 } else if (unlikely(pring
->ringno
== phba
->sli
.fcp_ring
&&
2095 !(phba
->sli
.sli_flag
& LPFC_PROCESS_LA
)))
2099 * Check to see if this is a high priority command.
2100 * If so bypass tx queue processing.
2102 if (unlikely((flag
& SLI_IOCB_HIGH_PRIORITY
) &&
2103 (iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)))) {
2104 lpfc_sli_submit_iocb(phba
, pring
, iocb
, piocb
);
2108 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
2109 (nextiocb
= lpfc_sli_next_iocb(phba
, pring
, &piocb
)))
2110 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
2113 lpfc_sli_update_ring(phba
, pring
);
2115 lpfc_sli_update_full_ring(phba
, pring
);
2118 return IOCB_SUCCESS
;
2123 pring
->stats
.iocb_cmd_delay
++;
2127 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
2128 lpfc_sli_ringtx_put(phba
, pring
, piocb
);
2129 return IOCB_SUCCESS
;
2136 lpfc_sli_setup(struct lpfc_hba
*phba
)
2139 struct lpfc_sli
*psli
= &phba
->sli
;
2140 struct lpfc_sli_ring
*pring
;
2142 psli
->num_rings
= MAX_CONFIGURED_RINGS
;
2144 psli
->fcp_ring
= LPFC_FCP_RING
;
2145 psli
->next_ring
= LPFC_FCP_NEXT_RING
;
2146 psli
->ip_ring
= LPFC_IP_RING
;
2148 for (i
= 0; i
< psli
->num_rings
; i
++) {
2149 pring
= &psli
->ring
[i
];
2151 case LPFC_FCP_RING
: /* ring 0 - FCP */
2152 /* numCiocb and numRiocb are used in config_port */
2153 pring
->numCiocb
= SLI2_IOCB_CMD_R0_ENTRIES
;
2154 pring
->numRiocb
= SLI2_IOCB_RSP_R0_ENTRIES
;
2155 pring
->numCiocb
+= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
2156 pring
->numRiocb
+= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
2157 pring
->numCiocb
+= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
2158 pring
->numRiocb
+= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
2159 pring
->iotag_ctr
= 0;
2161 (phba
->cfg_hba_queue_depth
* 2);
2162 pring
->fast_iotag
= pring
->iotag_max
;
2163 pring
->num_mask
= 0;
2165 case LPFC_IP_RING
: /* ring 1 - IP */
2166 /* numCiocb and numRiocb are used in config_port */
2167 pring
->numCiocb
= SLI2_IOCB_CMD_R1_ENTRIES
;
2168 pring
->numRiocb
= SLI2_IOCB_RSP_R1_ENTRIES
;
2169 pring
->num_mask
= 0;
2171 case LPFC_ELS_RING
: /* ring 2 - ELS / CT */
2172 /* numCiocb and numRiocb are used in config_port */
2173 pring
->numCiocb
= SLI2_IOCB_CMD_R2_ENTRIES
;
2174 pring
->numRiocb
= SLI2_IOCB_RSP_R2_ENTRIES
;
2175 pring
->fast_iotag
= 0;
2176 pring
->iotag_ctr
= 0;
2177 pring
->iotag_max
= 4096;
2178 pring
->num_mask
= 4;
2179 pring
->prt
[0].profile
= 0; /* Mask 0 */
2180 pring
->prt
[0].rctl
= FC_ELS_REQ
;
2181 pring
->prt
[0].type
= FC_ELS_DATA
;
2182 pring
->prt
[0].lpfc_sli_rcv_unsol_event
=
2183 lpfc_els_unsol_event
;
2184 pring
->prt
[1].profile
= 0; /* Mask 1 */
2185 pring
->prt
[1].rctl
= FC_ELS_RSP
;
2186 pring
->prt
[1].type
= FC_ELS_DATA
;
2187 pring
->prt
[1].lpfc_sli_rcv_unsol_event
=
2188 lpfc_els_unsol_event
;
2189 pring
->prt
[2].profile
= 0; /* Mask 2 */
2190 /* NameServer Inquiry */
2191 pring
->prt
[2].rctl
= FC_UNSOL_CTL
;
2193 pring
->prt
[2].type
= FC_COMMON_TRANSPORT_ULP
;
2194 pring
->prt
[2].lpfc_sli_rcv_unsol_event
=
2195 lpfc_ct_unsol_event
;
2196 pring
->prt
[3].profile
= 0; /* Mask 3 */
2197 /* NameServer response */
2198 pring
->prt
[3].rctl
= FC_SOL_CTL
;
2200 pring
->prt
[3].type
= FC_COMMON_TRANSPORT_ULP
;
2201 pring
->prt
[3].lpfc_sli_rcv_unsol_event
=
2202 lpfc_ct_unsol_event
;
2205 totiocb
+= (pring
->numCiocb
+ pring
->numRiocb
);
2207 if (totiocb
> MAX_SLI2_IOCB
) {
2208 /* Too many cmd / rsp ring entries in SLI2 SLIM */
2209 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2210 "%d:0462 Too many cmd / rsp ring entries in "
2211 "SLI2 SLIM Data: x%x x%x\n",
2212 phba
->brd_no
, totiocb
, MAX_SLI2_IOCB
);
2219 lpfc_sli_queue_setup(struct lpfc_hba
* phba
)
2221 struct lpfc_sli
*psli
;
2222 struct lpfc_sli_ring
*pring
;
2226 spin_lock_irq(phba
->host
->host_lock
);
2227 INIT_LIST_HEAD(&psli
->mboxq
);
2228 /* Initialize list headers for txq and txcmplq as double linked lists */
2229 for (i
= 0; i
< psli
->num_rings
; i
++) {
2230 pring
= &psli
->ring
[i
];
2232 pring
->next_cmdidx
= 0;
2233 pring
->local_getidx
= 0;
2235 INIT_LIST_HEAD(&pring
->txq
);
2236 INIT_LIST_HEAD(&pring
->txcmplq
);
2237 INIT_LIST_HEAD(&pring
->iocb_continueq
);
2238 INIT_LIST_HEAD(&pring
->postbufq
);
2239 cnt
= pring
->fast_iotag
;
2240 spin_unlock_irq(phba
->host
->host_lock
);
2242 pring
->fast_lookup
=
2243 kmalloc(cnt
* sizeof (struct lpfc_iocbq
*),
2245 if (pring
->fast_lookup
== 0) {
2248 memset((char *)pring
->fast_lookup
, 0,
2249 cnt
* sizeof (struct lpfc_iocbq
*));
2251 spin_lock_irq(phba
->host
->host_lock
);
2253 spin_unlock_irq(phba
->host
->host_lock
);
2258 lpfc_sli_hba_down(struct lpfc_hba
* phba
)
2260 struct lpfc_sli
*psli
;
2261 struct lpfc_sli_ring
*pring
;
2263 struct lpfc_iocbq
*iocb
, *next_iocb
;
2264 IOCB_t
*icmd
= NULL
;
2266 unsigned long flags
= 0;
2269 lpfc_hba_down_prep(phba
);
2271 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2273 for (i
= 0; i
< psli
->num_rings
; i
++) {
2274 pring
= &psli
->ring
[i
];
2275 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
2278 * Error everything on the txq since these iocbs have not been
2279 * given to the FW yet.
2283 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
2284 list_del_init(&iocb
->list
);
2285 if (iocb
->iocb_cmpl
) {
2287 icmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
2288 icmd
->un
.ulpWord
[4] = IOERR_SLI_DOWN
;
2289 spin_unlock_irqrestore(phba
->host
->host_lock
,
2291 (iocb
->iocb_cmpl
) (phba
, iocb
, iocb
);
2292 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2294 list_add_tail(&iocb
->list
,
2295 &phba
->lpfc_iocb_list
);
2299 INIT_LIST_HEAD(&(pring
->txq
));
2301 if (pring
->fast_lookup
) {
2302 kfree(pring
->fast_lookup
);
2303 pring
->fast_lookup
= NULL
;
2308 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
2310 /* Return any active mbox cmds */
2311 del_timer_sync(&psli
->mbox_tmo
);
2312 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2313 phba
->work_hba_events
&= ~WORKER_MBOX_TMO
;
2314 if (psli
->mbox_active
) {
2315 pmb
= psli
->mbox_active
;
2316 pmb
->mb
.mbxStatus
= MBX_NOT_FINISHED
;
2317 if (pmb
->mbox_cmpl
) {
2318 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
2319 pmb
->mbox_cmpl(phba
,pmb
);
2320 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2323 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
2324 psli
->mbox_active
= NULL
;
2326 /* Return any pending mbox cmds */
2327 while ((pmb
= lpfc_mbox_get(phba
)) != NULL
) {
2328 pmb
->mb
.mbxStatus
= MBX_NOT_FINISHED
;
2329 if (pmb
->mbox_cmpl
) {
2330 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
2331 pmb
->mbox_cmpl(phba
,pmb
);
2332 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2336 INIT_LIST_HEAD(&psli
->mboxq
);
2338 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
2341 * Provided the hba is not in an error state, reset it. It is not
2342 * capable of IO anymore.
2344 if (phba
->hba_state
!= LPFC_HBA_ERROR
) {
2345 phba
->hba_state
= LPFC_INIT_START
;
2346 lpfc_sli_brdreset(phba
, 1);
2353 lpfc_sli_pcimem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
2355 uint32_t *src
= srcp
;
2356 uint32_t *dest
= destp
;
2360 for (i
= 0; i
< (int)cnt
; i
+= sizeof (uint32_t)) {
2362 ldata
= le32_to_cpu(ldata
);
2370 lpfc_sli_ringpostbuf_put(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
,
2371 struct lpfc_dmabuf
* mp
)
2373 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
2375 list_add_tail(&mp
->list
, &pring
->postbufq
);
2377 pring
->postbufq_cnt
++;
2382 struct lpfc_dmabuf
*
2383 lpfc_sli_ringpostbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2386 struct lpfc_dmabuf
*mp
, *next_mp
;
2387 struct list_head
*slp
= &pring
->postbufq
;
2389 /* Search postbufq, from the begining, looking for a match on phys */
2390 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
2391 if (mp
->phys
== phys
) {
2392 list_del_init(&mp
->list
);
2393 pring
->postbufq_cnt
--;
2398 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2399 "%d:0410 Cannot find virtual addr for mapped buf on "
2400 "ring %d Data x%llx x%p x%p x%x\n",
2401 phba
->brd_no
, pring
->ringno
, (unsigned long long)phys
,
2402 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
2407 lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba
* phba
, struct lpfc_iocbq
* cmdiocb
,
2408 struct lpfc_iocbq
* rspiocb
)
2410 struct lpfc_dmabuf
*buf_ptr
, *buf_ptr1
;
2411 /* Free the resources associated with the ELS_REQUEST64 IOCB the driver
2413 * In this case, context2 = cmd, context2->next = rsp, context3 = bpl
2415 if (cmdiocb
->context2
) {
2416 buf_ptr1
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
2418 /* Free the response IOCB before completing the abort
2421 list_remove_head((&buf_ptr1
->list
), buf_ptr
,
2422 struct lpfc_dmabuf
, list
);
2424 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
2427 lpfc_mbuf_free(phba
, buf_ptr1
->virt
, buf_ptr1
->phys
);
2431 if (cmdiocb
->context3
) {
2432 buf_ptr
= (struct lpfc_dmabuf
*) cmdiocb
->context3
;
2433 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
2437 list_add_tail(&cmdiocb
->list
, &phba
->lpfc_iocb_list
);
2442 lpfc_sli_issue_abort_iotag32(struct lpfc_hba
* phba
,
2443 struct lpfc_sli_ring
* pring
,
2444 struct lpfc_iocbq
* cmdiocb
)
2446 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
2447 struct lpfc_iocbq
*abtsiocbp
= NULL
;
2448 IOCB_t
*icmd
= NULL
;
2449 IOCB_t
*iabt
= NULL
;
2451 /* issue ABTS for this IOCB based on iotag */
2452 list_remove_head(lpfc_iocb_list
, abtsiocbp
, struct lpfc_iocbq
, list
);
2453 if (abtsiocbp
== NULL
)
2455 memset(abtsiocbp
, 0, sizeof (struct lpfc_iocbq
));
2457 iabt
= &abtsiocbp
->iocb
;
2458 icmd
= &cmdiocb
->iocb
;
2459 switch (icmd
->ulpCommand
) {
2460 case CMD_ELS_REQUEST64_CR
:
2461 /* Even though we abort the ELS command, the firmware may access
2462 * the BPL or other resources before it processes our
2463 * ABORT_MXRI64. Thus we must delay reusing the cmdiocb
2464 * resources till the actual abort request completes.
2466 abtsiocbp
->context1
= (void *)((unsigned long)icmd
->ulpCommand
);
2467 abtsiocbp
->context2
= cmdiocb
->context2
;
2468 abtsiocbp
->context3
= cmdiocb
->context3
;
2469 cmdiocb
->context2
= NULL
;
2470 cmdiocb
->context3
= NULL
;
2471 abtsiocbp
->iocb_cmpl
= lpfc_sli_abort_elsreq_cmpl
;
2474 list_add_tail(&abtsiocbp
->list
, lpfc_iocb_list
);
2478 iabt
->un
.amxri
.abortType
= ABORT_TYPE_ABTS
;
2479 iabt
->un
.amxri
.iotag32
= icmd
->un
.elsreq64
.bdl
.ulpIoTag32
;
2482 iabt
->ulpClass
= CLASS3
;
2483 iabt
->ulpCommand
= CMD_ABORT_MXRI64_CN
;
2485 if (lpfc_sli_issue_iocb(phba
, pring
, abtsiocbp
, 0) == IOCB_ERROR
) {
2486 list_add_tail(&abtsiocbp
->list
, lpfc_iocb_list
);
2494 lpfc_sli_validate_iocb_cmd(struct lpfc_scsi_buf
*lpfc_cmd
, uint16_t tgt_id
,
2495 uint64_t lun_id
, struct lpfc_iocbq
*iocb
,
2496 uint32_t ctx
, lpfc_ctx_cmd ctx_cmd
)
2500 if (lpfc_cmd
== NULL
)
2505 if ((lpfc_cmd
->pCmd
->device
->id
== tgt_id
) &&
2506 (lpfc_cmd
->pCmd
->device
->lun
== lun_id
))
2510 if (lpfc_cmd
->pCmd
->device
->id
== tgt_id
)
2514 if (iocb
->iocb
.ulpContext
== ctx
)
2520 printk(KERN_ERR
"%s: Unknown context cmd type, value %d\n",
2521 __FUNCTION__
, ctx_cmd
);
2529 lpfc_sli_sum_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2530 uint16_t tgt_id
, uint64_t lun_id
, lpfc_ctx_cmd ctx_cmd
)
2532 struct lpfc_iocbq
*iocb
, *next_iocb
;
2534 struct lpfc_scsi_buf
*lpfc_cmd
;
2535 int sum
= 0, ret_val
= 0;
2537 /* Next check the txcmplq */
2538 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
2541 /* Must be a FCP command */
2542 if ((cmd
->ulpCommand
!= CMD_FCP_ICMND64_CR
) &&
2543 (cmd
->ulpCommand
!= CMD_FCP_IWRITE64_CR
) &&
2544 (cmd
->ulpCommand
!= CMD_FCP_IREAD64_CR
)) {
2548 /* context1 MUST be a struct lpfc_scsi_buf */
2549 lpfc_cmd
= (struct lpfc_scsi_buf
*) (iocb
->context1
);
2550 ret_val
= lpfc_sli_validate_iocb_cmd(lpfc_cmd
, tgt_id
, lun_id
,
2560 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba
* phba
, struct lpfc_iocbq
* cmdiocb
,
2561 struct lpfc_iocbq
* rspiocb
)
2563 spin_lock_irq(phba
->host
->host_lock
);
2564 list_add_tail(&cmdiocb
->list
, &phba
->lpfc_iocb_list
);
2565 spin_unlock_irq(phba
->host
->host_lock
);
2570 lpfc_sli_abort_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2571 uint16_t tgt_id
, uint64_t lun_id
, uint32_t ctx
,
2572 lpfc_ctx_cmd abort_cmd
)
2574 struct lpfc_iocbq
*iocb
, *next_iocb
;
2575 struct lpfc_iocbq
*abtsiocb
= NULL
;
2576 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
2578 struct lpfc_scsi_buf
*lpfc_cmd
;
2579 int errcnt
= 0, ret_val
= 0;
2581 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
2584 /* Must be a FCP command */
2585 if ((cmd
->ulpCommand
!= CMD_FCP_ICMND64_CR
) &&
2586 (cmd
->ulpCommand
!= CMD_FCP_IWRITE64_CR
) &&
2587 (cmd
->ulpCommand
!= CMD_FCP_IREAD64_CR
)) {
2591 /* context1 MUST be a struct lpfc_scsi_buf */
2592 lpfc_cmd
= (struct lpfc_scsi_buf
*) (iocb
->context1
);
2593 ret_val
= lpfc_sli_validate_iocb_cmd(lpfc_cmd
, tgt_id
, lun_id
,
2594 iocb
, ctx
, abort_cmd
);
2598 /* issue ABTS for this IOCB based on iotag */
2599 list_remove_head(lpfc_iocb_list
, abtsiocb
, struct lpfc_iocbq
,
2601 if (abtsiocb
== NULL
) {
2605 memset(abtsiocb
, 0, sizeof (struct lpfc_iocbq
));
2607 abtsiocb
->iocb
.un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
2608 abtsiocb
->iocb
.un
.acxri
.abortContextTag
= cmd
->ulpContext
;
2609 abtsiocb
->iocb
.un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
2610 abtsiocb
->iocb
.ulpLe
= 1;
2611 abtsiocb
->iocb
.ulpClass
= cmd
->ulpClass
;
2613 if (phba
->hba_state
>= LPFC_LINK_UP
)
2614 abtsiocb
->iocb
.ulpCommand
= CMD_ABORT_XRI_CN
;
2616 abtsiocb
->iocb
.ulpCommand
= CMD_CLOSE_XRI_CN
;
2618 /* Setup callback routine and issue the command. */
2619 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
2620 ret_val
= lpfc_sli_issue_iocb(phba
, pring
, abtsiocb
, 0);
2621 if (ret_val
== IOCB_ERROR
) {
2622 list_add_tail(&abtsiocb
->list
, lpfc_iocb_list
);
2632 lpfc_sli_wake_iocb_high_priority(struct lpfc_hba
* phba
,
2633 struct lpfc_iocbq
* queue1
,
2634 struct lpfc_iocbq
* queue2
)
2636 if (queue1
->context2
&& queue2
)
2637 memcpy(queue1
->context2
, queue2
, sizeof (struct lpfc_iocbq
));
2639 /* The waiter is looking for LPFC_IO_HIPRI bit to be set
2640 as a signal to wake up */
2641 queue1
->iocb_flag
|= LPFC_IO_HIPRI
;
2646 lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba
* phba
,
2647 struct lpfc_sli_ring
* pring
,
2648 struct lpfc_iocbq
* piocb
,
2650 struct lpfc_iocbq
* prspiocbq
,
2653 int j
, delay_time
, retval
= IOCB_ERROR
;
2655 /* The caller must left context1 empty. */
2656 if (piocb
->context_un
.hipri_wait_queue
!= 0) {
2661 * If the caller has provided a response iocbq buffer, context2 must
2662 * be NULL or its an error.
2664 if (prspiocbq
&& piocb
->context2
) {
2668 piocb
->context2
= prspiocbq
;
2670 /* Setup callback routine and issue the command. */
2671 piocb
->iocb_cmpl
= lpfc_sli_wake_iocb_high_priority
;
2672 retval
= lpfc_sli_issue_iocb(phba
, pring
, piocb
,
2673 flag
| SLI_IOCB_HIGH_PRIORITY
);
2674 if (retval
!= IOCB_SUCCESS
) {
2675 piocb
->context2
= NULL
;
2680 * This high-priority iocb was sent out-of-band. Poll for its
2681 * completion rather than wait for a signal. Note that the host_lock
2682 * is held by the midlayer and must be released here to allow the
2683 * interrupt handlers to complete the IO and signal this routine via
2685 * Also, the delay_time is computed to be one second longer than
2686 * the scsi command timeout to give the FW time to abort on
2687 * timeout rather than the driver just giving up. Typically,
2688 * the midlayer does not specify a time for this command so the
2689 * driver is free to enforce its own timeout.
2692 delay_time
= ((timeout
+ 1) * 1000) >> 6;
2693 retval
= IOCB_ERROR
;
2694 spin_unlock_irq(phba
->host
->host_lock
);
2695 for (j
= 0; j
< 64; j
++) {
2697 if (piocb
->iocb_flag
& LPFC_IO_HIPRI
) {
2698 piocb
->iocb_flag
&= ~LPFC_IO_HIPRI
;
2699 retval
= IOCB_SUCCESS
;
2704 spin_lock_irq(phba
->host
->host_lock
);
2705 piocb
->context2
= NULL
;
2709 lpfc_sli_issue_mbox_wait(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
,
2712 DECLARE_WAIT_QUEUE_HEAD(done_q
);
2713 DECLARE_WAITQUEUE(wq_entry
, current
);
2714 uint32_t timeleft
= 0;
2717 /* The caller must leave context1 empty. */
2718 if (pmboxq
->context1
!= 0) {
2719 return (MBX_NOT_FINISHED
);
2722 /* setup wake call as IOCB callback */
2723 pmboxq
->mbox_cmpl
= lpfc_sli_wake_mbox_wait
;
2724 /* setup context field to pass wait_queue pointer to wake function */
2725 pmboxq
->context1
= &done_q
;
2727 /* start to sleep before we wait, to avoid races */
2728 set_current_state(TASK_INTERRUPTIBLE
);
2729 add_wait_queue(&done_q
, &wq_entry
);
2731 /* now issue the command */
2732 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
2734 if (retval
== MBX_BUSY
|| retval
== MBX_SUCCESS
) {
2735 timeleft
= schedule_timeout(timeout
* HZ
);
2736 pmboxq
->context1
= NULL
;
2737 /* if schedule_timeout returns 0, we timed out and were not
2739 if (timeleft
== 0) {
2740 retval
= MBX_TIMEOUT
;
2742 retval
= MBX_SUCCESS
;
2747 set_current_state(TASK_RUNNING
);
2748 remove_wait_queue(&done_q
, &wq_entry
);
2753 lpfc_intr_handler(int irq
, void *dev_id
, struct pt_regs
* regs
)
2755 struct lpfc_hba
*phba
;
2757 uint32_t work_ha_copy
;
2758 unsigned long status
;
2763 * Get the driver's phba structure from the dev_id and
2764 * assume the HBA is not interrupting.
2766 phba
= (struct lpfc_hba
*) dev_id
;
2768 if (unlikely(!phba
))
2771 phba
->sli
.slistat
.sli_intr
++;
2774 * Call the HBA to see if it is interrupting. If not, don't claim
2778 /* Ignore all interrupts during initialization. */
2779 if (unlikely(phba
->hba_state
< LPFC_LINK_DOWN
))
2783 * Read host attention register to determine interrupt source
2784 * Clear Attention Sources, except Error Attention (to
2785 * preserve status) and Link Attention
2787 spin_lock(phba
->host
->host_lock
);
2788 ha_copy
= readl(phba
->HAregaddr
);
2789 writel((ha_copy
& ~(HA_LATT
| HA_ERATT
)), phba
->HAregaddr
);
2790 readl(phba
->HAregaddr
); /* flush */
2791 spin_unlock(phba
->host
->host_lock
);
2793 if (unlikely(!ha_copy
))
2796 work_ha_copy
= ha_copy
& phba
->work_ha_mask
;
2798 if (unlikely(work_ha_copy
)) {
2799 if (work_ha_copy
& HA_LATT
) {
2800 if (phba
->sli
.sli_flag
& LPFC_PROCESS_LA
) {
2802 * Turn off Link Attention interrupts
2803 * until CLEAR_LA done
2805 spin_lock(phba
->host
->host_lock
);
2806 phba
->sli
.sli_flag
&= ~LPFC_PROCESS_LA
;
2807 control
= readl(phba
->HCregaddr
);
2808 control
&= ~HC_LAINT_ENA
;
2809 writel(control
, phba
->HCregaddr
);
2810 readl(phba
->HCregaddr
); /* flush */
2811 spin_unlock(phba
->host
->host_lock
);
2814 work_ha_copy
&= ~HA_LATT
;
2817 if (work_ha_copy
& ~(HA_ERATT
|HA_MBATT
|HA_LATT
)) {
2818 for (i
= 0; i
< phba
->sli
.num_rings
; i
++) {
2819 if (work_ha_copy
& (HA_RXATT
<< (4*i
))) {
2821 * Turn off Slow Rings interrupts
2823 spin_lock(phba
->host
->host_lock
);
2824 control
= readl(phba
->HCregaddr
);
2825 control
&= ~(HC_R0INT_ENA
<< i
);
2826 writel(control
, phba
->HCregaddr
);
2827 readl(phba
->HCregaddr
); /* flush */
2828 spin_unlock(phba
->host
->host_lock
);
2833 if (work_ha_copy
& HA_ERATT
) {
2834 phba
->hba_state
= LPFC_HBA_ERROR
;
2836 * There was a link/board error. Read the
2837 * status register to retrieve the error event
2840 phba
->sli
.slistat
.err_attn_event
++;
2841 /* Save status info */
2842 phba
->work_hs
= readl(phba
->HSregaddr
);
2843 phba
->work_status
[0] = readl(phba
->MBslimaddr
+ 0xa8);
2844 phba
->work_status
[1] = readl(phba
->MBslimaddr
+ 0xac);
2846 /* Clear Chip error bit */
2847 writel(HA_ERATT
, phba
->HAregaddr
);
2848 readl(phba
->HAregaddr
); /* flush */
2851 * Reseting the HBA is the only reliable way
2852 * to shutdown interrupt when there is a
2855 lpfc_sli_send_reset(phba
, phba
->hba_state
);
2858 spin_lock(phba
->host
->host_lock
);
2859 phba
->work_ha
|= work_ha_copy
;
2860 if (phba
->work_wait
)
2861 wake_up(phba
->work_wait
);
2862 spin_unlock(phba
->host
->host_lock
);
2865 ha_copy
&= ~(phba
->work_ha_mask
);
2868 * Process all events on FCP ring. Take the optimized path for
2869 * FCP IO. Any other IO is slow path and is handled by
2870 * the worker thread.
2872 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
2873 status
>>= (4*LPFC_FCP_RING
);
2874 if (status
& HA_RXATT
)
2875 lpfc_sli_handle_fast_ring_event(phba
,
2876 &phba
->sli
.ring
[LPFC_FCP_RING
],
2880 } /* lpfc_intr_handler */