1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2004-2005 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
7 * Portions Copyright (C) 2004-2005 Christoph Hellwig *
9 * This program is free software; you can redistribute it and/or *
10 * modify it under the terms of version 2 of the GNU General *
11 * Public License as published by the Free Software Foundation. *
12 * This program is distributed in the hope that it will be useful. *
13 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
14 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
15 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
16 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
17 * TO BE LEGALLY INVALID. See the GNU General Public License for *
18 * more details, a copy of which can be found in the file COPYING *
19 * included with this package. *
20 *******************************************************************/
22 #include <linux/blkdev.h>
23 #include <linux/pci.h>
24 #include <linux/interrupt.h>
25 #include <linux/delay.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_device.h>
30 #include <scsi/scsi_host.h>
31 #include <scsi/scsi_transport_fc.h>
35 #include "lpfc_disc.h"
36 #include "lpfc_scsi.h"
38 #include "lpfc_crtn.h"
39 #include "lpfc_logmsg.h"
40 #include "lpfc_compat.h"
43 * Define macro to log: Mailbox command x%x cannot issue Data
44 * This allows multiple uses of lpfc_msgBlk0311
45 * w/o perturbing log msg utility.
47 #define LOG_MBOX_CANNOT_ISSUE_DATA( phba, mb, psli, flag) \
48 lpfc_printf_log(phba, \
51 "%d:0311 Mailbox command x%x cannot issue " \
52 "Data: x%x x%x x%x\n", \
60 /* There are only four IOCB completion types. */
61 typedef enum _lpfc_iocb_type
{
69 * Translate the iocb command to an iocb command type used to decide the final
70 * disposition of each completed IOCB.
73 lpfc_sli_iocb_cmd_type(uint8_t iocb_cmnd
)
75 lpfc_iocb_type type
= LPFC_UNKNOWN_IOCB
;
77 if (iocb_cmnd
> CMD_MAX_IOCB_CMD
)
81 case CMD_XMIT_SEQUENCE_CR
:
82 case CMD_XMIT_SEQUENCE_CX
:
83 case CMD_XMIT_BCAST_CN
:
84 case CMD_XMIT_BCAST_CX
:
85 case CMD_ELS_REQUEST_CR
:
86 case CMD_ELS_REQUEST_CX
:
87 case CMD_CREATE_XRI_CR
:
88 case CMD_CREATE_XRI_CX
:
90 case CMD_XMIT_ELS_RSP_CX
:
92 case CMD_FCP_IWRITE_CR
:
93 case CMD_FCP_IWRITE_CX
:
94 case CMD_FCP_IREAD_CR
:
95 case CMD_FCP_IREAD_CX
:
96 case CMD_FCP_ICMND_CR
:
97 case CMD_FCP_ICMND_CX
:
99 case CMD_ADAPTER_DUMP
:
100 case CMD_XMIT_SEQUENCE64_CR
:
101 case CMD_XMIT_SEQUENCE64_CX
:
102 case CMD_XMIT_BCAST64_CN
:
103 case CMD_XMIT_BCAST64_CX
:
104 case CMD_ELS_REQUEST64_CR
:
105 case CMD_ELS_REQUEST64_CX
:
106 case CMD_FCP_IWRITE64_CR
:
107 case CMD_FCP_IWRITE64_CX
:
108 case CMD_FCP_IREAD64_CR
:
109 case CMD_FCP_IREAD64_CX
:
110 case CMD_FCP_ICMND64_CR
:
111 case CMD_FCP_ICMND64_CX
:
112 case CMD_GEN_REQUEST64_CR
:
113 case CMD_GEN_REQUEST64_CX
:
114 case CMD_XMIT_ELS_RSP64_CX
:
115 type
= LPFC_SOL_IOCB
;
117 case CMD_ABORT_XRI_CN
:
118 case CMD_ABORT_XRI_CX
:
119 case CMD_CLOSE_XRI_CN
:
120 case CMD_CLOSE_XRI_CX
:
121 case CMD_XRI_ABORTED_CX
:
122 case CMD_ABORT_MXRI64_CN
:
123 type
= LPFC_ABORT_IOCB
;
125 case CMD_RCV_SEQUENCE_CX
:
126 case CMD_RCV_ELS_REQ_CX
:
127 case CMD_RCV_SEQUENCE64_CX
:
128 case CMD_RCV_ELS_REQ64_CX
:
129 type
= LPFC_UNSOL_IOCB
;
132 type
= LPFC_UNKNOWN_IOCB
;
140 lpfc_sli_ring_map(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
*pmb
)
142 struct lpfc_sli
*psli
= &phba
->sli
;
143 MAILBOX_t
*pmbox
= &pmb
->mb
;
146 for (i
= 0; i
< psli
->num_rings
; i
++) {
147 phba
->hba_state
= LPFC_INIT_MBX_CMDS
;
148 lpfc_config_ring(phba
, i
, pmb
);
149 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
150 if (rc
!= MBX_SUCCESS
) {
151 lpfc_printf_log(phba
,
154 "%d:0446 Adapter failed to init, "
155 "mbxCmd x%x CFG_RING, mbxStatus x%x, "
161 phba
->hba_state
= LPFC_HBA_ERROR
;
169 lpfc_sli_ringtxcmpl_put(struct lpfc_hba
* phba
,
170 struct lpfc_sli_ring
* pring
, struct lpfc_iocbq
* piocb
)
174 list_add_tail(&piocb
->list
, &pring
->txcmplq
);
175 pring
->txcmplq_cnt
++;
176 if (unlikely(pring
->ringno
== LPFC_ELS_RING
))
177 mod_timer(&phba
->els_tmofunc
,
178 jiffies
+ HZ
* (phba
->fc_ratov
<< 1));
180 if (pring
->fast_lookup
) {
181 /* Setup fast lookup based on iotag for completion */
182 iotag
= piocb
->iocb
.ulpIoTag
;
183 if (iotag
&& (iotag
< pring
->fast_iotag
))
184 *(pring
->fast_lookup
+ iotag
) = piocb
;
187 /* Cmd ring <ringno> put: iotag <iotag> greater then
188 configured max <fast_iotag> wd0 <icmd> */
189 lpfc_printf_log(phba
,
192 "%d:0316 Cmd ring %d put: iotag x%x "
193 "greater then configured max x%x "
196 pring
->ringno
, iotag
,
198 *(((uint32_t *)(&piocb
->iocb
)) + 7));
204 static struct lpfc_iocbq
*
205 lpfc_sli_ringtx_get(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
)
207 struct list_head
*dlp
;
208 struct lpfc_iocbq
*cmd_iocb
;
212 list_remove_head((&pring
->txq
), cmd_iocb
,
216 /* If the first ptr is not equal to the list header,
217 * deque the IOCBQ_t and return it.
225 lpfc_sli_next_iocb_slot (struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
227 struct lpfc_pgp
*pgp
= &phba
->slim2p
->mbx
.us
.s2
.port
[pring
->ringno
];
228 uint32_t max_cmd_idx
= pring
->numCiocb
;
231 if ((pring
->next_cmdidx
== pring
->cmdidx
) &&
232 (++pring
->next_cmdidx
>= max_cmd_idx
))
233 pring
->next_cmdidx
= 0;
235 if (unlikely(pring
->local_getidx
== pring
->next_cmdidx
)) {
237 pring
->local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
239 if (unlikely(pring
->local_getidx
>= max_cmd_idx
)) {
240 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
241 "%d:0315 Ring %d issue: portCmdGet %d "
242 "is bigger then cmd ring %d\n",
243 phba
->brd_no
, pring
->ringno
,
244 pring
->local_getidx
, max_cmd_idx
);
246 phba
->hba_state
= LPFC_HBA_ERROR
;
248 * All error attention handlers are posted to
251 phba
->work_ha
|= HA_ERATT
;
252 phba
->work_hs
= HS_FFER3
;
254 wake_up(phba
->work_wait
);
259 if (pring
->local_getidx
== pring
->next_cmdidx
)
263 iocb
= IOCB_ENTRY(pring
->cmdringaddr
, pring
->cmdidx
);
269 lpfc_sli_next_iotag(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
)
271 uint32_t search_start
;
273 if (pring
->fast_lookup
== NULL
) {
275 if (pring
->iotag_ctr
>= pring
->iotag_max
)
276 pring
->iotag_ctr
= 1;
277 return pring
->iotag_ctr
;
280 search_start
= pring
->iotag_ctr
;
284 if (pring
->iotag_ctr
>= pring
->fast_iotag
)
285 pring
->iotag_ctr
= 1;
287 if (*(pring
->fast_lookup
+ pring
->iotag_ctr
) == NULL
)
288 return pring
->iotag_ctr
;
290 } while (pring
->iotag_ctr
!= search_start
);
293 * Outstanding I/O count for ring <ringno> is at max <fast_iotag>
295 lpfc_printf_log(phba
,
298 "%d:0318 Outstanding I/O count for ring %d is at max x%x\n",
306 lpfc_sli_submit_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
307 IOCB_t
*iocb
, struct lpfc_iocbq
*nextiocb
)
310 * Allocate and set up an iotag
312 nextiocb
->iocb
.ulpIoTag
=
313 lpfc_sli_next_iotag(phba
, &phba
->sli
.ring
[phba
->sli
.fcp_ring
]);
316 * Issue iocb command to adapter
318 lpfc_sli_pcimem_bcopy(&nextiocb
->iocb
, iocb
, sizeof (IOCB_t
));
320 pring
->stats
.iocb_cmd
++;
323 * If there is no completion routine to call, we can release the
324 * IOCB buffer back right now. For IOCBs, like QUE_RING_BUF,
325 * that have no rsp ring completion, iocb_cmpl MUST be NULL.
327 if (nextiocb
->iocb_cmpl
)
328 lpfc_sli_ringtxcmpl_put(phba
, pring
, nextiocb
);
330 list_add_tail(&nextiocb
->list
, &phba
->lpfc_iocb_list
);
334 * Let the HBA know what IOCB slot will be the next one the
335 * driver will put a command into.
337 pring
->cmdidx
= pring
->next_cmdidx
;
338 writeb(pring
->cmdidx
, phba
->MBslimaddr
339 + (SLIMOFF
+ (pring
->ringno
* 2)) * 4);
343 lpfc_sli_update_full_ring(struct lpfc_hba
* phba
,
344 struct lpfc_sli_ring
*pring
)
346 int ringno
= pring
->ringno
;
348 pring
->flag
|= LPFC_CALL_RING_AVAILABLE
;
353 * Set ring 'ringno' to SET R0CE_REQ in Chip Att register.
354 * The HBA will tell us when an IOCB entry is available.
356 writel((CA_R0ATT
|CA_R0CE_REQ
) << (ringno
*4), phba
->CAregaddr
);
357 readl(phba
->CAregaddr
); /* flush */
359 pring
->stats
.iocb_cmd_full
++;
363 lpfc_sli_update_ring(struct lpfc_hba
* phba
,
364 struct lpfc_sli_ring
*pring
)
366 int ringno
= pring
->ringno
;
369 * Tell the HBA that there is work to do in this ring.
372 writel(CA_R0ATT
<< (ringno
* 4), phba
->CAregaddr
);
373 readl(phba
->CAregaddr
); /* flush */
377 lpfc_sli_resume_iocb(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
)
380 struct lpfc_iocbq
*nextiocb
;
384 * (a) there is anything on the txq to send
386 * (c) link attention events can be processed (fcp ring only)
387 * (d) IOCB processing is not blocked by the outstanding mbox command.
389 if (pring
->txq_cnt
&&
390 (phba
->hba_state
> LPFC_LINK_DOWN
) &&
391 (pring
->ringno
!= phba
->sli
.fcp_ring
||
392 phba
->sli
.sli_flag
& LPFC_PROCESS_LA
) &&
393 !(pring
->flag
& LPFC_STOP_IOCB_MBX
)) {
395 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
396 (nextiocb
= lpfc_sli_ringtx_get(phba
, pring
)))
397 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
400 lpfc_sli_update_ring(phba
, pring
);
402 lpfc_sli_update_full_ring(phba
, pring
);
408 /* lpfc_sli_turn_on_ring is only called by lpfc_sli_handle_mb_event below */
410 lpfc_sli_turn_on_ring(struct lpfc_hba
* phba
, int ringno
)
412 struct lpfc_pgp
*pgp
= &phba
->slim2p
->mbx
.us
.s2
.port
[ringno
];
414 /* If the ring is active, flag it */
415 if (phba
->sli
.ring
[ringno
].cmdringaddr
) {
416 if (phba
->sli
.ring
[ringno
].flag
& LPFC_STOP_IOCB_MBX
) {
417 phba
->sli
.ring
[ringno
].flag
&= ~LPFC_STOP_IOCB_MBX
;
419 * Force update of the local copy of cmdGetInx
421 phba
->sli
.ring
[ringno
].local_getidx
422 = le32_to_cpu(pgp
->cmdGetInx
);
423 spin_lock_irq(phba
->host
->host_lock
);
424 lpfc_sli_resume_iocb(phba
, &phba
->sli
.ring
[ringno
]);
425 spin_unlock_irq(phba
->host
->host_lock
);
431 lpfc_sli_chk_mbx_command(uint8_t mbxCommand
)
435 switch (mbxCommand
) {
439 case MBX_RUN_BIU_DIAG
:
442 case MBX_CONFIG_LINK
:
443 case MBX_CONFIG_RING
:
445 case MBX_READ_CONFIG
:
446 case MBX_READ_RCONFIG
:
448 case MBX_READ_STATUS
:
452 case MBX_READ_LNK_STAT
:
454 case MBX_UNREG_LOGIN
:
457 case MBX_DUMP_MEMORY
:
458 case MBX_DUMP_CONTEXT
:
463 case MBX_DEL_LD_ENTRY
:
464 case MBX_RUN_PROGRAM
:
468 case MBX_CONFIG_FARP
:
470 case MBX_RUN_BIU_DIAG64
:
471 case MBX_CONFIG_PORT
:
472 case MBX_READ_SPARM64
:
474 case MBX_REG_LOGIN64
:
476 case MBX_FLASH_WR_ULA
:
478 case MBX_LOAD_EXP_ROM
:
488 lpfc_sli_wake_mbox_wait(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
)
490 wait_queue_head_t
*pdone_q
;
493 * If pdone_q is empty, the driver thread gave up waiting and
496 pdone_q
= (wait_queue_head_t
*) pmboxq
->context1
;
498 wake_up_interruptible(pdone_q
);
503 lpfc_sli_def_mbox_cmpl(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmb
)
505 struct lpfc_dmabuf
*mp
;
506 mp
= (struct lpfc_dmabuf
*) (pmb
->context1
);
508 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
511 mempool_free( pmb
, phba
->mbox_mem_pool
);
516 lpfc_sli_handle_mb_event(struct lpfc_hba
* phba
)
521 struct lpfc_sli
*psli
;
523 uint32_t process_next
;
526 /* We should only get here if we are in SLI2 mode */
527 if (!(phba
->sli
.sli_flag
& LPFC_SLI2_ACTIVE
)) {
531 phba
->sli
.slistat
.mbox_event
++;
533 /* Get a Mailbox buffer to setup mailbox commands for callback */
534 if ((pmb
= phba
->sli
.mbox_active
)) {
536 mbox
= &phba
->slim2p
->mbx
;
538 /* First check out the status word */
539 lpfc_sli_pcimem_bcopy(mbox
, pmbox
, sizeof (uint32_t));
541 /* Sanity check to ensure the host owns the mailbox */
542 if (pmbox
->mbxOwner
!= OWN_HOST
) {
543 /* Lets try for a while */
544 for (i
= 0; i
< 10240; i
++) {
545 /* First copy command data */
546 lpfc_sli_pcimem_bcopy(mbox
, pmbox
,
548 if (pmbox
->mbxOwner
== OWN_HOST
)
551 /* Stray Mailbox Interrupt, mbxCommand <cmd> mbxStatus
553 lpfc_printf_log(phba
,
556 "%d:0304 Stray Mailbox Interrupt "
557 "mbxCommand x%x mbxStatus x%x\n",
562 spin_lock_irq(phba
->host
->host_lock
);
563 phba
->sli
.sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
564 spin_unlock_irq(phba
->host
->host_lock
);
569 del_timer_sync(&phba
->sli
.mbox_tmo
);
570 phba
->work_hba_events
&= ~WORKER_MBOX_TMO
;
573 * It is a fatal error if unknown mbox command completion.
575 if (lpfc_sli_chk_mbx_command(pmbox
->mbxCommand
) ==
578 /* Unknow mailbox command compl */
579 lpfc_printf_log(phba
,
582 "%d:0323 Unknown Mailbox command %x Cmpl\n",
585 phba
->hba_state
= LPFC_HBA_ERROR
;
586 phba
->work_hs
= HS_FFER3
;
587 lpfc_handle_eratt(phba
);
591 phba
->sli
.mbox_active
= NULL
;
592 if (pmbox
->mbxStatus
) {
593 phba
->sli
.slistat
.mbox_stat_err
++;
594 if (pmbox
->mbxStatus
== MBXERR_NO_RESOURCES
) {
595 /* Mbox cmd cmpl error - RETRYing */
596 lpfc_printf_log(phba
,
599 "%d:0305 Mbox cmd cmpl error - "
600 "RETRYing Data: x%x x%x x%x x%x\n",
604 pmbox
->un
.varWords
[0],
606 pmbox
->mbxStatus
= 0;
607 pmbox
->mbxOwner
= OWN_HOST
;
608 spin_lock_irq(phba
->host
->host_lock
);
609 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
610 spin_unlock_irq(phba
->host
->host_lock
);
611 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
612 if (rc
== MBX_SUCCESS
)
617 /* Mailbox cmd <cmd> Cmpl <cmpl> */
618 lpfc_printf_log(phba
,
621 "%d:0307 Mailbox cmd x%x Cmpl x%p "
622 "Data: x%x x%x x%x x%x x%x x%x x%x x%x x%x\n",
626 *((uint32_t *) pmbox
),
627 pmbox
->un
.varWords
[0],
628 pmbox
->un
.varWords
[1],
629 pmbox
->un
.varWords
[2],
630 pmbox
->un
.varWords
[3],
631 pmbox
->un
.varWords
[4],
632 pmbox
->un
.varWords
[5],
633 pmbox
->un
.varWords
[6],
634 pmbox
->un
.varWords
[7]);
636 if (pmb
->mbox_cmpl
) {
637 lpfc_sli_pcimem_bcopy(mbox
, pmbox
, MAILBOX_CMD_SIZE
);
638 pmb
->mbox_cmpl(phba
,pmb
);
644 process_next
= 0; /* by default don't loop */
645 spin_lock_irq(phba
->host
->host_lock
);
646 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
648 /* Process next mailbox command if there is one */
649 if ((pmb
= lpfc_mbox_get(phba
))) {
650 spin_unlock_irq(phba
->host
->host_lock
);
651 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_NOWAIT
);
652 if (rc
== MBX_NOT_FINISHED
) {
653 pmb
->mb
.mbxStatus
= MBX_NOT_FINISHED
;
654 pmb
->mbox_cmpl(phba
,pmb
);
656 continue; /* loop back */
659 spin_unlock_irq(phba
->host
->host_lock
);
660 /* Turn on IOCB processing */
661 for (i
= 0; i
< phba
->sli
.num_rings
; i
++) {
662 lpfc_sli_turn_on_ring(phba
, i
);
665 /* Free any lpfc_dmabuf's waiting for mbox cmd cmpls */
666 while (!list_empty(&phba
->freebufList
)) {
667 struct lpfc_dmabuf
*mp
;
670 list_remove_head((&phba
->freebufList
),
675 lpfc_mbuf_free(phba
, mp
->virt
,
682 } while (process_next
);
687 lpfc_sli_process_unsol_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
688 struct lpfc_iocbq
*saveq
)
696 irsp
= &(saveq
->iocb
);
697 if ((irsp
->ulpCommand
== CMD_RCV_ELS_REQ64_CX
)
698 || (irsp
->ulpCommand
== CMD_RCV_ELS_REQ_CX
)) {
703 (WORD5
*) & (saveq
->iocb
.un
.
705 Rctl
= w5p
->hcsw
.Rctl
;
706 Type
= w5p
->hcsw
.Type
;
708 /* Firmware Workaround */
709 if ((Rctl
== 0) && (pring
->ringno
== LPFC_ELS_RING
) &&
710 (irsp
->ulpCommand
== CMD_RCV_SEQUENCE64_CX
)) {
713 w5p
->hcsw
.Rctl
= Rctl
;
714 w5p
->hcsw
.Type
= Type
;
717 /* unSolicited Responses */
718 if (pring
->prt
[0].profile
) {
719 (pring
->prt
[0].lpfc_sli_rcv_unsol_event
) (phba
, pring
, saveq
);
722 /* We must search, based on rctl / type
723 for the right routine */
724 for (i
= 0; i
< pring
->num_mask
;
726 if ((pring
->prt
[i
].rctl
==
730 (pring
->prt
[i
].lpfc_sli_rcv_unsol_event
)
731 (phba
, pring
, saveq
);
738 /* Unexpected Rctl / Type received */
739 /* Ring <ringno> handler: unexpected
740 Rctl <Rctl> Type <Type> received */
741 lpfc_printf_log(phba
,
744 "%d:0313 Ring %d handler: unexpected Rctl x%x "
745 "Type x%x received \n",
754 static struct lpfc_iocbq
*
755 lpfc_sli_txcmpl_ring_search_slow(struct lpfc_sli_ring
* pring
,
756 struct lpfc_iocbq
* prspiocb
)
760 struct lpfc_iocbq
*cmd_iocb
;
761 struct lpfc_iocbq
*iocb
, *next_iocb
;
764 irsp
= &prspiocb
->iocb
;
765 iotag
= irsp
->ulpIoTag
;
768 /* Search through txcmpl from the begining */
769 list_for_each_entry_safe(iocb
, next_iocb
, &(pring
->txcmplq
), list
) {
771 if (iotag
== icmd
->ulpIoTag
) {
774 list_del(&iocb
->list
);
775 pring
->txcmplq_cnt
--;
783 static struct lpfc_iocbq
*
784 lpfc_sli_txcmpl_ring_iotag_lookup(struct lpfc_hba
* phba
,
785 struct lpfc_sli_ring
* pring
,
786 struct lpfc_iocbq
* prspiocb
)
789 struct lpfc_iocbq
*cmd_iocb
= NULL
;
792 if (unlikely(pring
->fast_lookup
== NULL
))
795 /* Use fast lookup based on iotag for completion */
796 irsp
= &prspiocb
->iocb
;
797 iotag
= irsp
->ulpIoTag
;
798 if (iotag
< pring
->fast_iotag
) {
799 cmd_iocb
= *(pring
->fast_lookup
+ iotag
);
800 *(pring
->fast_lookup
+ iotag
) = NULL
;
802 list_del(&cmd_iocb
->list
);
803 pring
->txcmplq_cnt
--;
807 * This is clearly an error. A ring that uses iotags
808 * should never have a interrupt for a completion that
809 * is not on the ring. Return NULL and log a error.
811 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
812 "%d:0327 Rsp ring %d error - command "
813 "completion for iotag x%x not found\n",
814 phba
->brd_no
, pring
->ringno
, iotag
);
820 * Rsp ring <ringno> get: iotag <iotag> greater then
821 * configured max <fast_iotag> wd0 <irsp>. This is an
822 * error. Just return NULL.
824 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
825 "%d:0317 Rsp ring %d get: iotag x%x greater then "
826 "configured max x%x wd0 x%x\n",
827 phba
->brd_no
, pring
->ringno
, iotag
, pring
->fast_iotag
,
828 *(((uint32_t *) irsp
) + 7));
833 lpfc_sli_process_sol_iocb(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
,
834 struct lpfc_iocbq
*saveq
)
836 struct lpfc_iocbq
* cmdiocbp
;
840 /* Based on the iotag field, get the cmd IOCB from the txcmplq */
841 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
842 cmdiocbp
= lpfc_sli_txcmpl_ring_search_slow(pring
, saveq
);
844 if (cmdiocbp
->iocb_cmpl
) {
846 * Post all ELS completions to the worker thread.
847 * All other are passed to the completion callback.
849 if (pring
->ringno
== LPFC_ELS_RING
) {
850 spin_unlock_irqrestore(phba
->host
->host_lock
,
852 (cmdiocbp
->iocb_cmpl
) (phba
, cmdiocbp
, saveq
);
853 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
856 if (cmdiocbp
->iocb_flag
& LPFC_IO_POLL
)
859 spin_unlock_irqrestore(phba
->host
->host_lock
,
861 (cmdiocbp
->iocb_cmpl
) (phba
, cmdiocbp
, saveq
);
862 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
865 list_add_tail(&cmdiocbp
->list
, &phba
->lpfc_iocb_list
);
869 * Unknown initiating command based on the response iotag.
870 * This could be the case on the ELS ring because of
873 if (pring
->ringno
!= LPFC_ELS_RING
) {
875 * Ring <ringno> handler: unexpected completion IoTag
878 lpfc_printf_log(phba
,
881 "%d:0322 Ring %d handler: unexpected "
882 "completion IoTag x%x Data: x%x x%x x%x x%x\n",
885 saveq
->iocb
.ulpIoTag
,
886 saveq
->iocb
.ulpStatus
,
887 saveq
->iocb
.un
.ulpWord
[4],
888 saveq
->iocb
.ulpCommand
,
889 saveq
->iocb
.ulpContext
);
892 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
897 * This routine presumes LPFC_FCP_RING handling and doesn't bother
898 * to check it explicitly.
901 lpfc_sli_handle_fast_ring_event(struct lpfc_hba
* phba
,
902 struct lpfc_sli_ring
* pring
, uint32_t mask
)
904 struct lpfc_pgp
*pgp
= &phba
->slim2p
->mbx
.us
.s2
.port
[pring
->ringno
];
906 IOCB_t
*entry
= NULL
;
907 struct lpfc_iocbq
*cmdiocbq
= NULL
;
908 struct lpfc_iocbq rspiocbq
;
910 uint32_t portRspPut
, portRspMax
;
914 uint32_t rsp_cmpl
= 0;
915 void __iomem
*to_slim
;
917 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
918 pring
->stats
.iocb_event
++;
921 * The next available response entry should never exceed the maximum
922 * entries. If it does, treat it as an adapter hardware error.
924 portRspMax
= pring
->numRiocb
;
925 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
926 if (unlikely(portRspPut
>= portRspMax
)) {
928 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
929 * rsp ring <portRspMax>
931 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
932 "%d:0312 Ring %d handler: portRspPut %d "
933 "is bigger then rsp ring %d\n",
934 phba
->brd_no
, pring
->ringno
, portRspPut
,
937 phba
->hba_state
= LPFC_HBA_ERROR
;
939 /* All error attention handlers are posted to worker thread */
940 phba
->work_ha
|= HA_ERATT
;
941 phba
->work_hs
= HS_FFER3
;
943 wake_up(phba
->work_wait
);
945 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
950 while (pring
->rspidx
!= portRspPut
) {
952 * Fetch an entry off the ring and copy it into a local data
953 * structure. The copy involves a byte-swap since the
954 * network byte order and pci byte orders are different.
956 entry
= (IOCB_t
*) IOCB_ENTRY(pring
->rspringaddr
, pring
->rspidx
);
957 lpfc_sli_pcimem_bcopy((uint32_t *) entry
,
958 (uint32_t *) &rspiocbq
.iocb
,
960 irsp
= &rspiocbq
.iocb
;
962 type
= lpfc_sli_iocb_cmd_type(irsp
->ulpCommand
& CMD_IOCB_MASK
);
963 pring
->stats
.iocb_rsp
++;
966 if (unlikely(irsp
->ulpStatus
)) {
967 /* Rsp ring <ringno> error: IOCB */
968 lpfc_printf_log(phba
, KERN_WARNING
, LOG_SLI
,
969 "%d:0326 Rsp Ring %d error: IOCB Data: "
970 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
971 phba
->brd_no
, pring
->ringno
,
972 irsp
->un
.ulpWord
[0], irsp
->un
.ulpWord
[1],
973 irsp
->un
.ulpWord
[2], irsp
->un
.ulpWord
[3],
974 irsp
->un
.ulpWord
[4], irsp
->un
.ulpWord
[5],
975 *(((uint32_t *) irsp
) + 6),
976 *(((uint32_t *) irsp
) + 7));
980 case LPFC_ABORT_IOCB
:
983 * Idle exchange closed via ABTS from port. No iocb
984 * resources need to be recovered.
986 if (unlikely(irsp
->ulpCommand
== CMD_XRI_ABORTED_CX
)) {
987 printk(KERN_INFO
"%s: IOCB cmd 0x%x processed. "
988 "Skipping completion\n", __FUNCTION__
,
993 cmdiocbq
= lpfc_sli_txcmpl_ring_iotag_lookup(phba
,
996 if ((cmdiocbq
) && (cmdiocbq
->iocb_cmpl
)) {
997 spin_unlock_irqrestore(
998 phba
->host
->host_lock
, iflag
);
999 (cmdiocbq
->iocb_cmpl
)(phba
, cmdiocbq
,
1001 spin_lock_irqsave(phba
->host
->host_lock
,
1006 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
1007 char adaptermsg
[LPFC_MAX_ADPTMSG
];
1008 memset(adaptermsg
, 0, LPFC_MAX_ADPTMSG
);
1009 memcpy(&adaptermsg
[0], (uint8_t *) irsp
,
1011 dev_warn(&((phba
->pcidev
)->dev
), "lpfc%d: %s",
1012 phba
->brd_no
, adaptermsg
);
1014 /* Unknown IOCB command */
1015 lpfc_printf_log(phba
, KERN_ERR
, LOG_SLI
,
1016 "%d:0321 Unknown IOCB command "
1017 "Data: x%x, x%x x%x x%x x%x\n",
1018 phba
->brd_no
, type
, irsp
->ulpCommand
,
1019 irsp
->ulpStatus
, irsp
->ulpIoTag
,
1026 * The response IOCB has been processed. Update the ring
1027 * pointer in SLIM. If the port response put pointer has not
1028 * been updated, sync the pgp->rspPutInx and fetch the new port
1029 * response put pointer.
1031 if (++pring
->rspidx
>= portRspMax
)
1034 to_slim
= phba
->MBslimaddr
+
1035 (SLIMOFF
+ (pring
->ringno
* 2) + 1) * 4;
1036 writeb(pring
->rspidx
, to_slim
);
1038 if (pring
->rspidx
== portRspPut
)
1039 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
1042 if ((rsp_cmpl
> 0) && (mask
& HA_R0RE_REQ
)) {
1043 pring
->stats
.iocb_rsp_full
++;
1044 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
1045 writel(status
, phba
->CAregaddr
);
1046 readl(phba
->CAregaddr
);
1048 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
1049 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
1050 pring
->stats
.iocb_cmd_empty
++;
1052 /* Force update of the local copy of cmdGetInx */
1053 pring
->local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
1054 lpfc_sli_resume_iocb(phba
, pring
);
1056 if ((pring
->lpfc_sli_cmd_available
))
1057 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
1061 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
1067 lpfc_sli_handle_slow_ring_event(struct lpfc_hba
* phba
,
1068 struct lpfc_sli_ring
* pring
, uint32_t mask
)
1071 IOCB_t
*irsp
= NULL
;
1072 struct lpfc_iocbq
*rspiocbp
= NULL
;
1073 struct lpfc_iocbq
*next_iocb
;
1074 struct lpfc_iocbq
*cmdiocbp
;
1075 struct lpfc_iocbq
*saveq
;
1076 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
1077 struct lpfc_pgp
*pgp
= &phba
->slim2p
->mbx
.us
.s2
.port
[pring
->ringno
];
1078 uint8_t iocb_cmd_type
;
1079 lpfc_iocb_type type
;
1080 uint32_t status
, free_saveq
;
1081 uint32_t portRspPut
, portRspMax
;
1083 unsigned long iflag
;
1084 void __iomem
*to_slim
;
1086 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
1087 pring
->stats
.iocb_event
++;
1090 * The next available response entry should never exceed the maximum
1091 * entries. If it does, treat it as an adapter hardware error.
1093 portRspMax
= pring
->numRiocb
;
1094 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
1095 if (portRspPut
>= portRspMax
) {
1097 * Ring <ringno> handler: portRspPut <portRspPut> is bigger then
1098 * rsp ring <portRspMax>
1100 lpfc_printf_log(phba
,
1103 "%d:0312 Ring %d handler: portRspPut %d "
1104 "is bigger then rsp ring %d\n",
1106 pring
->ringno
, portRspPut
, portRspMax
);
1108 phba
->hba_state
= LPFC_HBA_ERROR
;
1109 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
1111 phba
->work_hs
= HS_FFER3
;
1112 lpfc_handle_eratt(phba
);
1118 lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
1119 while (pring
->rspidx
!= portRspPut
) {
1121 * Build a completion list and call the appropriate handler.
1122 * The process is to get the next available response iocb, get
1123 * a free iocb from the list, copy the response data into the
1124 * free iocb, insert to the continuation list, and update the
1125 * next response index to slim. This process makes response
1126 * iocb's in the ring available to DMA as fast as possible but
1127 * pays a penalty for a copy operation. Since the iocb is
1128 * only 32 bytes, this penalty is considered small relative to
1129 * the PCI reads for register values and a slim write. When
1130 * the ulpLe field is set, the entire Command has been
1133 entry
= IOCB_ENTRY(pring
->rspringaddr
, pring
->rspidx
);
1134 list_remove_head(lpfc_iocb_list
, rspiocbp
, struct lpfc_iocbq
,
1136 if (rspiocbp
== NULL
) {
1137 printk(KERN_ERR
"%s: out of buffers! Failing "
1138 "completion.\n", __FUNCTION__
);
1142 lpfc_sli_pcimem_bcopy(entry
, &rspiocbp
->iocb
, sizeof (IOCB_t
));
1143 irsp
= &rspiocbp
->iocb
;
1145 if (++pring
->rspidx
>= portRspMax
)
1148 to_slim
= phba
->MBslimaddr
+ (SLIMOFF
+ (pring
->ringno
* 2)
1150 writeb(pring
->rspidx
, to_slim
);
1152 if (list_empty(&(pring
->iocb_continueq
))) {
1153 list_add(&rspiocbp
->list
, &(pring
->iocb_continueq
));
1155 list_add_tail(&rspiocbp
->list
,
1156 &(pring
->iocb_continueq
));
1159 pring
->iocb_continueq_cnt
++;
1162 * By default, the driver expects to free all resources
1163 * associated with this iocb completion.
1166 saveq
= list_get_first(&pring
->iocb_continueq
,
1167 struct lpfc_iocbq
, list
);
1168 irsp
= &(saveq
->iocb
);
1169 list_del_init(&pring
->iocb_continueq
);
1170 pring
->iocb_continueq_cnt
= 0;
1172 pring
->stats
.iocb_rsp
++;
1174 if (irsp
->ulpStatus
) {
1175 /* Rsp ring <ringno> error: IOCB */
1176 lpfc_printf_log(phba
,
1179 "%d:0328 Rsp Ring %d error: IOCB Data: "
1180 "x%x x%x x%x x%x x%x x%x x%x x%x\n",
1183 irsp
->un
.ulpWord
[0],
1184 irsp
->un
.ulpWord
[1],
1185 irsp
->un
.ulpWord
[2],
1186 irsp
->un
.ulpWord
[3],
1187 irsp
->un
.ulpWord
[4],
1188 irsp
->un
.ulpWord
[5],
1189 *(((uint32_t *) irsp
) + 6),
1190 *(((uint32_t *) irsp
) + 7));
1194 * Fetch the IOCB command type and call the correct
1195 * completion routine. Solicited and Unsolicited
1196 * IOCBs on the ELS ring get freed back to the
1197 * lpfc_iocb_list by the discovery kernel thread.
1199 iocb_cmd_type
= irsp
->ulpCommand
& CMD_IOCB_MASK
;
1200 type
= lpfc_sli_iocb_cmd_type(iocb_cmd_type
);
1201 if (type
== LPFC_SOL_IOCB
) {
1202 spin_unlock_irqrestore(phba
->host
->host_lock
,
1204 rc
= lpfc_sli_process_sol_iocb(phba
, pring
,
1206 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
1207 } else if (type
== LPFC_UNSOL_IOCB
) {
1208 spin_unlock_irqrestore(phba
->host
->host_lock
,
1210 rc
= lpfc_sli_process_unsol_iocb(phba
, pring
,
1212 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
1213 } else if (type
== LPFC_ABORT_IOCB
) {
1214 if ((irsp
->ulpCommand
!= CMD_XRI_ABORTED_CX
) &&
1216 lpfc_sli_txcmpl_ring_search_slow(pring
,
1218 /* Call the specified completion
1220 if (cmdiocbp
->iocb_cmpl
) {
1221 spin_unlock_irqrestore(
1222 phba
->host
->host_lock
,
1224 (cmdiocbp
->iocb_cmpl
) (phba
,
1227 phba
->host
->host_lock
,
1230 list_add_tail(&cmdiocbp
->list
,
1234 } else if (type
== LPFC_UNKNOWN_IOCB
) {
1235 if (irsp
->ulpCommand
== CMD_ADAPTER_MSG
) {
1237 char adaptermsg
[LPFC_MAX_ADPTMSG
];
1239 memset(adaptermsg
, 0,
1241 memcpy(&adaptermsg
[0], (uint8_t *) irsp
,
1243 dev_warn(&((phba
->pcidev
)->dev
),
1245 phba
->brd_no
, adaptermsg
);
1247 /* Unknown IOCB command */
1248 lpfc_printf_log(phba
,
1251 "%d:0321 Unknown IOCB command "
1252 "Data: x%x x%x x%x x%x\n",
1262 if (!list_empty(&saveq
->list
)) {
1263 list_for_each_entry_safe(rspiocbp
,
1267 list_add_tail(&rspiocbp
->list
,
1272 list_add_tail(&saveq
->list
, lpfc_iocb_list
);
1277 * If the port response put pointer has not been updated, sync
1278 * the pgp->rspPutInx in the MAILBOX_tand fetch the new port
1279 * response put pointer.
1281 if (pring
->rspidx
== portRspPut
) {
1282 portRspPut
= le32_to_cpu(pgp
->rspPutInx
);
1284 } /* while (pring->rspidx != portRspPut) */
1286 if ((rspiocbp
!= 0) && (mask
& HA_R0RE_REQ
)) {
1287 /* At least one response entry has been freed */
1288 pring
->stats
.iocb_rsp_full
++;
1289 /* SET RxRE_RSP in Chip Att register */
1290 status
= ((CA_R0ATT
| CA_R0RE_RSP
) << (pring
->ringno
* 4));
1291 writel(status
, phba
->CAregaddr
);
1292 readl(phba
->CAregaddr
); /* flush */
1294 if ((mask
& HA_R0CE_RSP
) && (pring
->flag
& LPFC_CALL_RING_AVAILABLE
)) {
1295 pring
->flag
&= ~LPFC_CALL_RING_AVAILABLE
;
1296 pring
->stats
.iocb_cmd_empty
++;
1298 /* Force update of the local copy of cmdGetInx */
1299 pring
->local_getidx
= le32_to_cpu(pgp
->cmdGetInx
);
1300 lpfc_sli_resume_iocb(phba
, pring
);
1302 if ((pring
->lpfc_sli_cmd_available
))
1303 (pring
->lpfc_sli_cmd_available
) (phba
, pring
);
1307 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
1312 lpfc_sli_abort_iocb_ring(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
)
1314 struct lpfc_iocbq
*iocb
, *next_iocb
;
1315 IOCB_t
*icmd
= NULL
, *cmd
= NULL
;
1321 /* Error everything on txq and txcmplq
1324 spin_lock_irq(phba
->host
->host_lock
);
1325 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
1326 list_del_init(&iocb
->list
);
1327 if (iocb
->iocb_cmpl
) {
1329 icmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
1330 icmd
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
1331 spin_unlock_irq(phba
->host
->host_lock
);
1332 (iocb
->iocb_cmpl
) (phba
, iocb
, iocb
);
1333 spin_lock_irq(phba
->host
->host_lock
);
1335 list_add_tail(&iocb
->list
, &phba
->lpfc_iocb_list
);
1339 INIT_LIST_HEAD(&(pring
->txq
));
1341 /* Next issue ABTS for everything on the txcmplq */
1342 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
1346 * Imediate abort of IOCB, clear fast_lookup entry,
1347 * if any, deque and call compl
1349 iotag
= cmd
->ulpIoTag
;
1350 if (iotag
&& pring
->fast_lookup
&&
1351 (iotag
< pring
->fast_iotag
))
1352 pring
->fast_lookup
[iotag
] = NULL
;
1354 list_del_init(&iocb
->list
);
1355 pring
->txcmplq_cnt
--;
1357 if (iocb
->iocb_cmpl
) {
1358 cmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
1359 cmd
->un
.ulpWord
[4] = IOERR_SLI_ABORTED
;
1360 spin_unlock_irq(phba
->host
->host_lock
);
1361 (iocb
->iocb_cmpl
) (phba
, iocb
, iocb
);
1362 spin_lock_irq(phba
->host
->host_lock
);
1364 list_add_tail(&iocb
->list
, &phba
->lpfc_iocb_list
);
1368 INIT_LIST_HEAD(&pring
->txcmplq
);
1369 pring
->txcmplq_cnt
= 0;
1370 spin_unlock_irq(phba
->host
->host_lock
);
1375 /******************************************************************************
1376 * lpfc_sli_send_reset
1378 * Note: After returning from this function, the HBA cannot be accessed for
1379 * 1 ms. Since we do not wish to delay in interrupt context, it is the
1380 * responsibility of the caller to perform the mdelay(1) and flush via readl().
1381 ******************************************************************************/
1383 lpfc_sli_send_reset(struct lpfc_hba
* phba
, uint16_t skip_post
)
1386 volatile uint32_t word0
;
1387 void __iomem
*to_slim
;
1388 unsigned long flags
= 0;
1390 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
1392 /* A board reset must use REAL SLIM. */
1393 phba
->sli
.sli_flag
&= ~LPFC_SLI2_ACTIVE
;
1396 swpmb
= (MAILBOX_t
*) & word0
;
1397 swpmb
->mbxCommand
= MBX_RESTART
;
1400 to_slim
= phba
->MBslimaddr
;
1401 writel(*(uint32_t *) swpmb
, to_slim
);
1402 readl(to_slim
); /* flush */
1404 /* Only skip post after fc_ffinit is completed */
1406 word0
= 1; /* This is really setting up word1 */
1408 word0
= 0; /* This is really setting up word1 */
1410 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
1411 writel(*(uint32_t *) swpmb
, to_slim
);
1412 readl(to_slim
); /* flush */
1414 /* Turn off parity checking and serr during the physical reset */
1415 pci_read_config_word(phba
->pcidev
, PCI_COMMAND
, &phba
->pci_cfg_value
);
1416 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
,
1417 (phba
->pci_cfg_value
&
1418 ~(PCI_COMMAND_PARITY
| PCI_COMMAND_SERR
)));
1420 writel(HC_INITFF
, phba
->HCregaddr
);
1422 phba
->hba_state
= LPFC_INIT_START
;
1423 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
1429 lpfc_sli_brdreset(struct lpfc_hba
* phba
, uint16_t skip_post
)
1431 struct lpfc_sli_ring
*pring
;
1433 struct lpfc_dmabuf
*mp
, *next_mp
;
1434 unsigned long flags
= 0;
1436 lpfc_sli_send_reset(phba
, skip_post
);
1439 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
1440 /* Risk the write on flush case ie no delay after the readl */
1441 readl(phba
->HCregaddr
); /* flush */
1442 /* Now toggle INITFF bit set by lpfc_sli_send_reset */
1443 writel(0, phba
->HCregaddr
);
1444 readl(phba
->HCregaddr
); /* flush */
1446 /* Restore PCI cmd register */
1447 pci_write_config_word(phba
->pcidev
, PCI_COMMAND
, phba
->pci_cfg_value
);
1449 /* perform board reset */
1450 phba
->fc_eventTag
= 0;
1452 phba
->fc_prevDID
= Mask_DID
;
1455 lpfc_printf_log(phba
,
1458 "%d:0325 Reset HBA Data: x%x x%x x%x\n",
1464 /* Initialize relevant SLI info */
1465 for (i
= 0; i
< phba
->sli
.num_rings
; i
++) {
1466 pring
= &phba
->sli
.ring
[i
];
1469 pring
->next_cmdidx
= 0;
1470 pring
->local_getidx
= 0;
1472 pring
->missbufcnt
= 0;
1474 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
1482 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
1483 /* Cleanup preposted buffers on the ELS ring */
1484 pring
= &phba
->sli
.ring
[LPFC_ELS_RING
];
1485 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
1486 list_del(&mp
->list
);
1487 pring
->postbufq_cnt
--;
1488 lpfc_mbuf_free(phba
, mp
->virt
, mp
->phys
);
1491 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
1493 for (i
= 0; i
< phba
->sli
.num_rings
; i
++)
1494 lpfc_sli_abort_iocb_ring(phba
, &phba
->sli
.ring
[i
]);
1500 lpfc_sli_chipset_init(struct lpfc_hba
*phba
)
1502 uint32_t status
, i
= 0;
1504 /* Read the HBA Host Status Register */
1505 status
= readl(phba
->HSregaddr
);
1507 /* Check status register to see what current state is */
1509 while ((status
& (HS_FFRDY
| HS_MBRDY
)) != (HS_FFRDY
| HS_MBRDY
)) {
1511 /* Check every 100ms for 5 retries, then every 500ms for 5, then
1512 * every 2.5 sec for 5, then reset board and every 2.5 sec for
1516 /* Adapter failed to init, timeout, status reg
1518 lpfc_printf_log(phba
,
1521 "%d:0436 Adapter failed to init, "
1522 "timeout, status reg x%x\n",
1525 phba
->hba_state
= LPFC_HBA_ERROR
;
1529 /* Check to see if any errors occurred during init */
1530 if (status
& HS_FFERM
) {
1531 /* ERROR: During chipset initialization */
1532 /* Adapter failed to init, chipset, status reg
1534 lpfc_printf_log(phba
,
1537 "%d:0437 Adapter failed to init, "
1538 "chipset, status reg x%x\n",
1541 phba
->hba_state
= LPFC_HBA_ERROR
;
1547 } else if (i
<= 10) {
1554 lpfc_sli_brdreset(phba
, 0);
1556 /* Read the HBA Host Status Register */
1557 status
= readl(phba
->HSregaddr
);
1560 /* Check to see if any errors occurred during init */
1561 if (status
& HS_FFERM
) {
1562 /* ERROR: During chipset initialization */
1563 /* Adapter failed to init, chipset, status reg <status> */
1564 lpfc_printf_log(phba
,
1567 "%d:0438 Adapter failed to init, chipset, "
1571 phba
->hba_state
= LPFC_HBA_ERROR
;
1575 /* Clear all interrupt enable conditions */
1576 writel(0, phba
->HCregaddr
);
1577 readl(phba
->HCregaddr
); /* flush */
1579 /* setup host attn register */
1580 writel(0xffffffff, phba
->HAregaddr
);
1581 readl(phba
->HAregaddr
); /* flush */
1586 lpfc_sli_hba_setup(struct lpfc_hba
* phba
)
1589 uint32_t resetcount
= 0, rc
= 0, done
= 0;
1591 pmb
= (LPFC_MBOXQ_t
*) mempool_alloc(phba
->mbox_mem_pool
, GFP_KERNEL
);
1593 phba
->hba_state
= LPFC_HBA_ERROR
;
1597 while (resetcount
< 2 && !done
) {
1598 phba
->hba_state
= 0;
1599 lpfc_sli_brdreset(phba
, 0);
1601 rc
= lpfc_sli_chipset_init(phba
);
1607 /* Call pre CONFIG_PORT mailbox command initialization. A value of 0
1608 * means the call was successful. Any other nonzero value is a failure,
1609 * but if ERESTART is returned, the driver may reset the HBA and try
1612 rc
= lpfc_config_port_prep(phba
);
1613 if (rc
== -ERESTART
) {
1614 phba
->hba_state
= 0;
1620 phba
->hba_state
= LPFC_INIT_MBX_CMDS
;
1621 lpfc_config_port(phba
, pmb
);
1622 rc
= lpfc_sli_issue_mbox(phba
, pmb
, MBX_POLL
);
1623 if (rc
== MBX_SUCCESS
)
1626 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
1627 "%d:0442 Adapter failed to init, mbxCmd x%x "
1628 "CONFIG_PORT, mbxStatus x%x Data: x%x\n",
1629 phba
->brd_no
, pmb
->mb
.mbxCommand
,
1630 pmb
->mb
.mbxStatus
, 0);
1631 phba
->sli
.sli_flag
&= ~LPFC_SLI2_ACTIVE
;
1635 goto lpfc_sli_hba_setup_error
;
1637 rc
= lpfc_sli_ring_map(phba
, pmb
);
1640 goto lpfc_sli_hba_setup_error
;
1642 phba
->sli
.sli_flag
|= LPFC_PROCESS_LA
;
1644 rc
= lpfc_config_port_post(phba
);
1646 goto lpfc_sli_hba_setup_error
;
1648 goto lpfc_sli_hba_setup_exit
;
1649 lpfc_sli_hba_setup_error
:
1650 phba
->hba_state
= LPFC_HBA_ERROR
;
1651 lpfc_sli_hba_setup_exit
:
1652 mempool_free(pmb
, phba
->mbox_mem_pool
);
1657 lpfc_mbox_abort(struct lpfc_hba
* phba
)
1659 LPFC_MBOXQ_t
*pmbox
;
1662 if (phba
->sli
.mbox_active
) {
1663 del_timer_sync(&phba
->sli
.mbox_tmo
);
1664 phba
->work_hba_events
&= ~WORKER_MBOX_TMO
;
1665 pmbox
= phba
->sli
.mbox_active
;
1667 phba
->sli
.mbox_active
= NULL
;
1668 if (pmbox
->mbox_cmpl
) {
1669 mb
->mbxStatus
= MBX_NOT_FINISHED
;
1670 (pmbox
->mbox_cmpl
) (phba
, pmbox
);
1672 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
1675 /* Abort all the non active mailbox commands. */
1676 spin_lock_irq(phba
->host
->host_lock
);
1677 pmbox
= lpfc_mbox_get(phba
);
1680 if (pmbox
->mbox_cmpl
) {
1681 mb
->mbxStatus
= MBX_NOT_FINISHED
;
1682 spin_unlock_irq(phba
->host
->host_lock
);
1683 (pmbox
->mbox_cmpl
) (phba
, pmbox
);
1684 spin_lock_irq(phba
->host
->host_lock
);
1686 pmbox
= lpfc_mbox_get(phba
);
1688 spin_unlock_irq(phba
->host
->host_lock
);
1692 /*! lpfc_mbox_timeout
1696 * \param hba Pointer to per struct lpfc_hba structure
1697 * \param l1 Pointer to the driver's mailbox queue.
1703 * This routine handles mailbox timeout events at timer interrupt context.
1706 lpfc_mbox_timeout(unsigned long ptr
)
1708 struct lpfc_hba
*phba
;
1709 unsigned long iflag
;
1711 phba
= (struct lpfc_hba
*)ptr
;
1712 spin_lock_irqsave(phba
->host
->host_lock
, iflag
);
1713 if (!(phba
->work_hba_events
& WORKER_MBOX_TMO
)) {
1714 phba
->work_hba_events
|= WORKER_MBOX_TMO
;
1715 if (phba
->work_wait
)
1716 wake_up(phba
->work_wait
);
1718 spin_unlock_irqrestore(phba
->host
->host_lock
, iflag
);
1722 lpfc_mbox_timeout_handler(struct lpfc_hba
*phba
)
1724 LPFC_MBOXQ_t
*pmbox
;
1727 spin_lock_irq(phba
->host
->host_lock
);
1728 if (!(phba
->work_hba_events
& WORKER_MBOX_TMO
)) {
1729 spin_unlock_irq(phba
->host
->host_lock
);
1733 phba
->work_hba_events
&= ~WORKER_MBOX_TMO
;
1735 pmbox
= phba
->sli
.mbox_active
;
1738 /* Mbox cmd <mbxCommand> timeout */
1739 lpfc_printf_log(phba
,
1742 "%d:0310 Mailbox command x%x timeout Data: x%x x%x x%p\n",
1747 phba
->sli
.mbox_active
);
1749 phba
->sli
.mbox_active
= NULL
;
1750 if (pmbox
->mbox_cmpl
) {
1751 mb
->mbxStatus
= MBX_NOT_FINISHED
;
1752 spin_unlock_irq(phba
->host
->host_lock
);
1753 (pmbox
->mbox_cmpl
) (phba
, pmbox
);
1754 spin_lock_irq(phba
->host
->host_lock
);
1756 phba
->sli
.sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
1758 spin_unlock_irq(phba
->host
->host_lock
);
1759 lpfc_mbox_abort(phba
);
1764 lpfc_sli_issue_mbox(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmbox
, uint32_t flag
)
1767 struct lpfc_sli
*psli
;
1768 uint32_t status
, evtctr
;
1771 unsigned long drvr_flag
= 0;
1772 volatile uint32_t word0
, ldata
;
1773 void __iomem
*to_slim
;
1777 spin_lock_irqsave(phba
->host
->host_lock
, drvr_flag
);
1781 status
= MBX_SUCCESS
;
1783 if (psli
->sli_flag
& LPFC_SLI_MBOX_ACTIVE
) {
1784 /* Polling for a mbox command when another one is already active
1785 * is not allowed in SLI. Also, the driver must have established
1786 * SLI2 mode to queue and process multiple mbox commands.
1789 if (flag
& MBX_POLL
) {
1790 spin_unlock_irqrestore(phba
->host
->host_lock
,
1793 /* Mbox command <mbxCommand> cannot issue */
1794 LOG_MBOX_CANNOT_ISSUE_DATA( phba
, mb
, psli
, flag
)
1795 return (MBX_NOT_FINISHED
);
1798 if (!(psli
->sli_flag
& LPFC_SLI2_ACTIVE
)) {
1799 spin_unlock_irqrestore(phba
->host
->host_lock
,
1801 /* Mbox command <mbxCommand> cannot issue */
1802 LOG_MBOX_CANNOT_ISSUE_DATA( phba
, mb
, psli
, flag
)
1803 return (MBX_NOT_FINISHED
);
1806 /* Handle STOP IOCB processing flag. This is only meaningful
1807 * if we are not polling for mbox completion.
1809 if (flag
& MBX_STOP_IOCB
) {
1810 flag
&= ~MBX_STOP_IOCB
;
1811 /* Now flag each ring */
1812 for (i
= 0; i
< psli
->num_rings
; i
++) {
1813 /* If the ring is active, flag it */
1814 if (psli
->ring
[i
].cmdringaddr
) {
1815 psli
->ring
[i
].flag
|=
1821 /* Another mailbox command is still being processed, queue this
1822 * command to be processed later.
1824 lpfc_mbox_put(phba
, pmbox
);
1826 /* Mbox cmd issue - BUSY */
1827 lpfc_printf_log(phba
,
1830 "%d:0308 Mbox cmd issue - BUSY Data: x%x x%x x%x x%x\n",
1837 psli
->slistat
.mbox_busy
++;
1838 spin_unlock_irqrestore(phba
->host
->host_lock
,
1844 /* Handle STOP IOCB processing flag. This is only meaningful
1845 * if we are not polling for mbox completion.
1847 if (flag
& MBX_STOP_IOCB
) {
1848 flag
&= ~MBX_STOP_IOCB
;
1849 if (flag
== MBX_NOWAIT
) {
1850 /* Now flag each ring */
1851 for (i
= 0; i
< psli
->num_rings
; i
++) {
1852 /* If the ring is active, flag it */
1853 if (psli
->ring
[i
].cmdringaddr
) {
1854 psli
->ring
[i
].flag
|=
1861 psli
->sli_flag
|= LPFC_SLI_MBOX_ACTIVE
;
1863 /* If we are not polling, we MUST be in SLI2 mode */
1864 if (flag
!= MBX_POLL
) {
1865 if (!(psli
->sli_flag
& LPFC_SLI2_ACTIVE
)) {
1866 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
1867 spin_unlock_irqrestore(phba
->host
->host_lock
,
1869 /* Mbox command <mbxCommand> cannot issue */
1870 LOG_MBOX_CANNOT_ISSUE_DATA( phba
, mb
, psli
, flag
);
1871 return (MBX_NOT_FINISHED
);
1873 /* timeout active mbox command */
1874 mod_timer(&psli
->mbox_tmo
, jiffies
+ HZ
* LPFC_MBOX_TMO
);
1877 /* Mailbox cmd <cmd> issue */
1878 lpfc_printf_log(phba
,
1881 "%d:0309 Mailbox cmd x%x issue Data: x%x x%x x%x\n",
1888 psli
->slistat
.mbox_cmd
++;
1889 evtctr
= psli
->slistat
.mbox_event
;
1891 /* next set own bit for the adapter and copy over command word */
1892 mb
->mbxOwner
= OWN_CHIP
;
1894 if (psli
->sli_flag
& LPFC_SLI2_ACTIVE
) {
1895 /* First copy command data to host SLIM area */
1896 lpfc_sli_pcimem_bcopy(mb
, &phba
->slim2p
->mbx
, MAILBOX_CMD_SIZE
);
1898 if (mb
->mbxCommand
== MBX_CONFIG_PORT
) {
1899 /* copy command data into host mbox for cmpl */
1900 lpfc_sli_pcimem_bcopy(mb
, &phba
->slim2p
->mbx
,
1904 /* First copy mbox command data to HBA SLIM, skip past first
1906 to_slim
= phba
->MBslimaddr
+ sizeof (uint32_t);
1907 lpfc_memcpy_to_slim(to_slim
, &mb
->un
.varWords
[0],
1908 MAILBOX_CMD_SIZE
- sizeof (uint32_t));
1910 /* Next copy over first word, with mbxOwner set */
1911 ldata
= *((volatile uint32_t *)mb
);
1912 to_slim
= phba
->MBslimaddr
;
1913 writel(ldata
, to_slim
);
1914 readl(to_slim
); /* flush */
1916 if (mb
->mbxCommand
== MBX_CONFIG_PORT
) {
1917 /* switch over to host mailbox */
1918 psli
->sli_flag
|= LPFC_SLI2_ACTIVE
;
1923 /* interrupt board to doit right away */
1924 writel(CA_MBATT
, phba
->CAregaddr
);
1925 readl(phba
->CAregaddr
); /* flush */
1929 /* Don't wait for it to finish, just return */
1930 psli
->mbox_active
= pmbox
;
1935 psli
->mbox_active
= NULL
;
1936 if (psli
->sli_flag
& LPFC_SLI2_ACTIVE
) {
1937 /* First read mbox status word */
1938 word0
= *((volatile uint32_t *)&phba
->slim2p
->mbx
);
1939 word0
= le32_to_cpu(word0
);
1941 /* First read mbox status word */
1942 word0
= readl(phba
->MBslimaddr
);
1945 /* Read the HBA Host Attention Register */
1946 ha_copy
= readl(phba
->HAregaddr
);
1948 /* Wait for command to complete */
1949 while (((word0
& OWN_CHIP
) == OWN_CHIP
)
1950 || !(ha_copy
& HA_MBATT
)) {
1952 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
1953 spin_unlock_irqrestore(phba
->host
->host_lock
,
1955 return (MBX_NOT_FINISHED
);
1958 /* Check if we took a mbox interrupt while we were
1960 if (((word0
& OWN_CHIP
) != OWN_CHIP
)
1961 && (evtctr
!= psli
->slistat
.mbox_event
))
1964 spin_unlock_irqrestore(phba
->host
->host_lock
,
1967 /* Can be in interrupt context, do not sleep */
1968 /* (or might be called with interrupts disabled) */
1971 spin_lock_irqsave(phba
->host
->host_lock
, drvr_flag
);
1973 if (psli
->sli_flag
& LPFC_SLI2_ACTIVE
) {
1974 /* First copy command data */
1975 word0
= *((volatile uint32_t *)
1976 &phba
->slim2p
->mbx
);
1977 word0
= le32_to_cpu(word0
);
1978 if (mb
->mbxCommand
== MBX_CONFIG_PORT
) {
1980 volatile uint32_t slimword0
;
1981 /* Check real SLIM for any errors */
1982 slimword0
= readl(phba
->MBslimaddr
);
1983 slimmb
= (MAILBOX_t
*) & slimword0
;
1984 if (((slimword0
& OWN_CHIP
) != OWN_CHIP
)
1985 && slimmb
->mbxStatus
) {
1992 /* First copy command data */
1993 word0
= readl(phba
->MBslimaddr
);
1995 /* Read the HBA Host Attention Register */
1996 ha_copy
= readl(phba
->HAregaddr
);
1999 if (psli
->sli_flag
& LPFC_SLI2_ACTIVE
) {
2000 /* copy results back to user */
2001 lpfc_sli_pcimem_bcopy(&phba
->slim2p
->mbx
, mb
,
2004 /* First copy command data */
2005 lpfc_memcpy_from_slim(mb
, phba
->MBslimaddr
,
2007 if ((mb
->mbxCommand
== MBX_DUMP_MEMORY
) &&
2009 lpfc_memcpy_from_slim((void *)pmbox
->context2
,
2010 phba
->MBslimaddr
+ DMP_RSP_OFFSET
,
2011 mb
->un
.varDmp
.word_cnt
);
2015 writel(HA_MBATT
, phba
->HAregaddr
);
2016 readl(phba
->HAregaddr
); /* flush */
2018 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
2019 status
= mb
->mbxStatus
;
2022 spin_unlock_irqrestore(phba
->host
->host_lock
, drvr_flag
);
2027 lpfc_sli_ringtx_put(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
,
2028 struct lpfc_iocbq
* piocb
)
2030 /* Insert the caller's iocb in the txq tail for later processing. */
2031 list_add_tail(&piocb
->list
, &pring
->txq
);
2036 static struct lpfc_iocbq
*
2037 lpfc_sli_next_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2038 struct lpfc_iocbq
** piocb
)
2040 struct lpfc_iocbq
* nextiocb
;
2042 nextiocb
= lpfc_sli_ringtx_get(phba
, pring
);
2052 lpfc_sli_issue_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2053 struct lpfc_iocbq
*piocb
, uint32_t flag
)
2055 struct lpfc_iocbq
*nextiocb
;
2059 * We should never get an IOCB if we are in a < LINK_DOWN state
2061 if (unlikely(phba
->hba_state
< LPFC_LINK_DOWN
))
2065 * Check to see if we are blocking IOCB processing because of a
2066 * outstanding mbox command.
2068 if (unlikely(pring
->flag
& LPFC_STOP_IOCB_MBX
))
2071 if (unlikely(phba
->hba_state
== LPFC_LINK_DOWN
)) {
2073 * Only CREATE_XRI, CLOSE_XRI, ABORT_XRI, and QUE_RING_BUF
2074 * can be issued if the link is not up.
2076 switch (piocb
->iocb
.ulpCommand
) {
2077 case CMD_QUE_RING_BUF_CN
:
2078 case CMD_QUE_RING_BUF64_CN
:
2080 * For IOCBs, like QUE_RING_BUF, that have no rsp ring
2081 * completion, iocb_cmpl MUST be 0.
2083 if (piocb
->iocb_cmpl
)
2084 piocb
->iocb_cmpl
= NULL
;
2086 case CMD_CREATE_XRI_CR
:
2093 * For FCP commands, we must be in a state where we can process link
2096 } else if (unlikely(pring
->ringno
== phba
->sli
.fcp_ring
&&
2097 !(phba
->sli
.sli_flag
& LPFC_PROCESS_LA
)))
2101 * Check to see if this is a high priority command.
2102 * If so bypass tx queue processing.
2104 if (unlikely((flag
& SLI_IOCB_HIGH_PRIORITY
) &&
2105 (iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)))) {
2106 lpfc_sli_submit_iocb(phba
, pring
, iocb
, piocb
);
2110 while ((iocb
= lpfc_sli_next_iocb_slot(phba
, pring
)) &&
2111 (nextiocb
= lpfc_sli_next_iocb(phba
, pring
, &piocb
)))
2112 lpfc_sli_submit_iocb(phba
, pring
, iocb
, nextiocb
);
2115 lpfc_sli_update_ring(phba
, pring
);
2117 lpfc_sli_update_full_ring(phba
, pring
);
2120 return IOCB_SUCCESS
;
2125 pring
->stats
.iocb_cmd_delay
++;
2129 if (!(flag
& SLI_IOCB_RET_IOCB
)) {
2130 lpfc_sli_ringtx_put(phba
, pring
, piocb
);
2131 return IOCB_SUCCESS
;
2138 lpfc_sli_setup(struct lpfc_hba
*phba
)
2141 struct lpfc_sli
*psli
= &phba
->sli
;
2142 struct lpfc_sli_ring
*pring
;
2144 psli
->num_rings
= MAX_CONFIGURED_RINGS
;
2146 psli
->fcp_ring
= LPFC_FCP_RING
;
2147 psli
->next_ring
= LPFC_FCP_NEXT_RING
;
2148 psli
->ip_ring
= LPFC_IP_RING
;
2150 for (i
= 0; i
< psli
->num_rings
; i
++) {
2151 pring
= &psli
->ring
[i
];
2153 case LPFC_FCP_RING
: /* ring 0 - FCP */
2154 /* numCiocb and numRiocb are used in config_port */
2155 pring
->numCiocb
= SLI2_IOCB_CMD_R0_ENTRIES
;
2156 pring
->numRiocb
= SLI2_IOCB_RSP_R0_ENTRIES
;
2157 pring
->numCiocb
+= SLI2_IOCB_CMD_R1XTRA_ENTRIES
;
2158 pring
->numRiocb
+= SLI2_IOCB_RSP_R1XTRA_ENTRIES
;
2159 pring
->numCiocb
+= SLI2_IOCB_CMD_R3XTRA_ENTRIES
;
2160 pring
->numRiocb
+= SLI2_IOCB_RSP_R3XTRA_ENTRIES
;
2161 pring
->iotag_ctr
= 0;
2163 (phba
->cfg_hba_queue_depth
* 2);
2164 pring
->fast_iotag
= pring
->iotag_max
;
2165 pring
->num_mask
= 0;
2167 case LPFC_IP_RING
: /* ring 1 - IP */
2168 /* numCiocb and numRiocb are used in config_port */
2169 pring
->numCiocb
= SLI2_IOCB_CMD_R1_ENTRIES
;
2170 pring
->numRiocb
= SLI2_IOCB_RSP_R1_ENTRIES
;
2171 pring
->num_mask
= 0;
2173 case LPFC_ELS_RING
: /* ring 2 - ELS / CT */
2174 /* numCiocb and numRiocb are used in config_port */
2175 pring
->numCiocb
= SLI2_IOCB_CMD_R2_ENTRIES
;
2176 pring
->numRiocb
= SLI2_IOCB_RSP_R2_ENTRIES
;
2177 pring
->fast_iotag
= 0;
2178 pring
->iotag_ctr
= 0;
2179 pring
->iotag_max
= 4096;
2180 pring
->num_mask
= 4;
2181 pring
->prt
[0].profile
= 0; /* Mask 0 */
2182 pring
->prt
[0].rctl
= FC_ELS_REQ
;
2183 pring
->prt
[0].type
= FC_ELS_DATA
;
2184 pring
->prt
[0].lpfc_sli_rcv_unsol_event
=
2185 lpfc_els_unsol_event
;
2186 pring
->prt
[1].profile
= 0; /* Mask 1 */
2187 pring
->prt
[1].rctl
= FC_ELS_RSP
;
2188 pring
->prt
[1].type
= FC_ELS_DATA
;
2189 pring
->prt
[1].lpfc_sli_rcv_unsol_event
=
2190 lpfc_els_unsol_event
;
2191 pring
->prt
[2].profile
= 0; /* Mask 2 */
2192 /* NameServer Inquiry */
2193 pring
->prt
[2].rctl
= FC_UNSOL_CTL
;
2195 pring
->prt
[2].type
= FC_COMMON_TRANSPORT_ULP
;
2196 pring
->prt
[2].lpfc_sli_rcv_unsol_event
=
2197 lpfc_ct_unsol_event
;
2198 pring
->prt
[3].profile
= 0; /* Mask 3 */
2199 /* NameServer response */
2200 pring
->prt
[3].rctl
= FC_SOL_CTL
;
2202 pring
->prt
[3].type
= FC_COMMON_TRANSPORT_ULP
;
2203 pring
->prt
[3].lpfc_sli_rcv_unsol_event
=
2204 lpfc_ct_unsol_event
;
2207 totiocb
+= (pring
->numCiocb
+ pring
->numRiocb
);
2209 if (totiocb
> MAX_SLI2_IOCB
) {
2210 /* Too many cmd / rsp ring entries in SLI2 SLIM */
2211 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2212 "%d:0462 Too many cmd / rsp ring entries in "
2213 "SLI2 SLIM Data: x%x x%x\n",
2214 phba
->brd_no
, totiocb
, MAX_SLI2_IOCB
);
2221 lpfc_sli_queue_setup(struct lpfc_hba
* phba
)
2223 struct lpfc_sli
*psli
;
2224 struct lpfc_sli_ring
*pring
;
2228 spin_lock_irq(phba
->host
->host_lock
);
2229 INIT_LIST_HEAD(&psli
->mboxq
);
2230 /* Initialize list headers for txq and txcmplq as double linked lists */
2231 for (i
= 0; i
< psli
->num_rings
; i
++) {
2232 pring
= &psli
->ring
[i
];
2234 pring
->next_cmdidx
= 0;
2235 pring
->local_getidx
= 0;
2237 INIT_LIST_HEAD(&pring
->txq
);
2238 INIT_LIST_HEAD(&pring
->txcmplq
);
2239 INIT_LIST_HEAD(&pring
->iocb_continueq
);
2240 INIT_LIST_HEAD(&pring
->postbufq
);
2241 cnt
= pring
->fast_iotag
;
2242 spin_unlock_irq(phba
->host
->host_lock
);
2244 pring
->fast_lookup
=
2245 kmalloc(cnt
* sizeof (struct lpfc_iocbq
*),
2247 if (pring
->fast_lookup
== 0) {
2250 memset((char *)pring
->fast_lookup
, 0,
2251 cnt
* sizeof (struct lpfc_iocbq
*));
2253 spin_lock_irq(phba
->host
->host_lock
);
2255 spin_unlock_irq(phba
->host
->host_lock
);
2260 lpfc_sli_hba_down(struct lpfc_hba
* phba
)
2262 struct lpfc_sli
*psli
;
2263 struct lpfc_sli_ring
*pring
;
2265 struct lpfc_iocbq
*iocb
, *next_iocb
;
2266 IOCB_t
*icmd
= NULL
;
2268 unsigned long flags
= 0;
2271 lpfc_hba_down_prep(phba
);
2273 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2275 for (i
= 0; i
< psli
->num_rings
; i
++) {
2276 pring
= &psli
->ring
[i
];
2277 pring
->flag
|= LPFC_DEFERRED_RING_EVENT
;
2280 * Error everything on the txq since these iocbs have not been
2281 * given to the FW yet.
2285 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txq
, list
) {
2286 list_del_init(&iocb
->list
);
2287 if (iocb
->iocb_cmpl
) {
2289 icmd
->ulpStatus
= IOSTAT_LOCAL_REJECT
;
2290 icmd
->un
.ulpWord
[4] = IOERR_SLI_DOWN
;
2291 spin_unlock_irqrestore(phba
->host
->host_lock
,
2293 (iocb
->iocb_cmpl
) (phba
, iocb
, iocb
);
2294 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2296 list_add_tail(&iocb
->list
,
2297 &phba
->lpfc_iocb_list
);
2301 INIT_LIST_HEAD(&(pring
->txq
));
2303 if (pring
->fast_lookup
) {
2304 kfree(pring
->fast_lookup
);
2305 pring
->fast_lookup
= NULL
;
2310 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
2312 /* Return any active mbox cmds */
2313 del_timer_sync(&psli
->mbox_tmo
);
2314 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2315 phba
->work_hba_events
&= ~WORKER_MBOX_TMO
;
2316 if (psli
->mbox_active
) {
2317 pmb
= psli
->mbox_active
;
2318 pmb
->mb
.mbxStatus
= MBX_NOT_FINISHED
;
2319 if (pmb
->mbox_cmpl
) {
2320 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
2321 pmb
->mbox_cmpl(phba
,pmb
);
2322 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2325 psli
->sli_flag
&= ~LPFC_SLI_MBOX_ACTIVE
;
2326 psli
->mbox_active
= NULL
;
2328 /* Return any pending mbox cmds */
2329 while ((pmb
= lpfc_mbox_get(phba
)) != NULL
) {
2330 pmb
->mb
.mbxStatus
= MBX_NOT_FINISHED
;
2331 if (pmb
->mbox_cmpl
) {
2332 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
2333 pmb
->mbox_cmpl(phba
,pmb
);
2334 spin_lock_irqsave(phba
->host
->host_lock
, flags
);
2338 INIT_LIST_HEAD(&psli
->mboxq
);
2340 spin_unlock_irqrestore(phba
->host
->host_lock
, flags
);
2343 * Provided the hba is not in an error state, reset it. It is not
2344 * capable of IO anymore.
2346 if (phba
->hba_state
!= LPFC_HBA_ERROR
) {
2347 phba
->hba_state
= LPFC_INIT_START
;
2348 lpfc_sli_brdreset(phba
, 1);
2355 lpfc_sli_pcimem_bcopy(void *srcp
, void *destp
, uint32_t cnt
)
2357 uint32_t *src
= srcp
;
2358 uint32_t *dest
= destp
;
2362 for (i
= 0; i
< (int)cnt
; i
+= sizeof (uint32_t)) {
2364 ldata
= le32_to_cpu(ldata
);
2372 lpfc_sli_ringpostbuf_put(struct lpfc_hba
* phba
, struct lpfc_sli_ring
* pring
,
2373 struct lpfc_dmabuf
* mp
)
2375 /* Stick struct lpfc_dmabuf at end of postbufq so driver can look it up
2377 list_add_tail(&mp
->list
, &pring
->postbufq
);
2379 pring
->postbufq_cnt
++;
2384 struct lpfc_dmabuf
*
2385 lpfc_sli_ringpostbuf_get(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2388 struct lpfc_dmabuf
*mp
, *next_mp
;
2389 struct list_head
*slp
= &pring
->postbufq
;
2391 /* Search postbufq, from the begining, looking for a match on phys */
2392 list_for_each_entry_safe(mp
, next_mp
, &pring
->postbufq
, list
) {
2393 if (mp
->phys
== phys
) {
2394 list_del_init(&mp
->list
);
2395 pring
->postbufq_cnt
--;
2400 lpfc_printf_log(phba
, KERN_ERR
, LOG_INIT
,
2401 "%d:0410 Cannot find virtual addr for mapped buf on "
2402 "ring %d Data x%llx x%p x%p x%x\n",
2403 phba
->brd_no
, pring
->ringno
, (unsigned long long)phys
,
2404 slp
->next
, slp
->prev
, pring
->postbufq_cnt
);
2409 lpfc_sli_abort_elsreq_cmpl(struct lpfc_hba
* phba
, struct lpfc_iocbq
* cmdiocb
,
2410 struct lpfc_iocbq
* rspiocb
)
2412 struct lpfc_dmabuf
*buf_ptr
, *buf_ptr1
;
2413 /* Free the resources associated with the ELS_REQUEST64 IOCB the driver
2415 * In this case, context2 = cmd, context2->next = rsp, context3 = bpl
2417 if (cmdiocb
->context2
) {
2418 buf_ptr1
= (struct lpfc_dmabuf
*) cmdiocb
->context2
;
2420 /* Free the response IOCB before completing the abort
2423 list_remove_head((&buf_ptr1
->list
), buf_ptr
,
2424 struct lpfc_dmabuf
, list
);
2426 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
2429 lpfc_mbuf_free(phba
, buf_ptr1
->virt
, buf_ptr1
->phys
);
2433 if (cmdiocb
->context3
) {
2434 buf_ptr
= (struct lpfc_dmabuf
*) cmdiocb
->context3
;
2435 lpfc_mbuf_free(phba
, buf_ptr
->virt
, buf_ptr
->phys
);
2439 list_add_tail(&cmdiocb
->list
, &phba
->lpfc_iocb_list
);
2444 lpfc_sli_issue_abort_iotag32(struct lpfc_hba
* phba
,
2445 struct lpfc_sli_ring
* pring
,
2446 struct lpfc_iocbq
* cmdiocb
)
2448 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
2449 struct lpfc_iocbq
*abtsiocbp
= NULL
;
2450 IOCB_t
*icmd
= NULL
;
2451 IOCB_t
*iabt
= NULL
;
2453 /* issue ABTS for this IOCB based on iotag */
2454 list_remove_head(lpfc_iocb_list
, abtsiocbp
, struct lpfc_iocbq
, list
);
2455 if (abtsiocbp
== NULL
)
2457 memset(abtsiocbp
, 0, sizeof (struct lpfc_iocbq
));
2459 iabt
= &abtsiocbp
->iocb
;
2460 icmd
= &cmdiocb
->iocb
;
2461 switch (icmd
->ulpCommand
) {
2462 case CMD_ELS_REQUEST64_CR
:
2463 /* Even though we abort the ELS command, the firmware may access
2464 * the BPL or other resources before it processes our
2465 * ABORT_MXRI64. Thus we must delay reusing the cmdiocb
2466 * resources till the actual abort request completes.
2468 abtsiocbp
->context1
= (void *)((unsigned long)icmd
->ulpCommand
);
2469 abtsiocbp
->context2
= cmdiocb
->context2
;
2470 abtsiocbp
->context3
= cmdiocb
->context3
;
2471 cmdiocb
->context2
= NULL
;
2472 cmdiocb
->context3
= NULL
;
2473 abtsiocbp
->iocb_cmpl
= lpfc_sli_abort_elsreq_cmpl
;
2476 list_add_tail(&abtsiocbp
->list
, lpfc_iocb_list
);
2480 iabt
->un
.amxri
.abortType
= ABORT_TYPE_ABTS
;
2481 iabt
->un
.amxri
.iotag32
= icmd
->un
.elsreq64
.bdl
.ulpIoTag32
;
2484 iabt
->ulpClass
= CLASS3
;
2485 iabt
->ulpCommand
= CMD_ABORT_MXRI64_CN
;
2487 if (lpfc_sli_issue_iocb(phba
, pring
, abtsiocbp
, 0) == IOCB_ERROR
) {
2488 list_add_tail(&abtsiocbp
->list
, lpfc_iocb_list
);
2496 lpfc_sli_validate_iocb_cmd(struct lpfc_scsi_buf
*lpfc_cmd
, uint16_t tgt_id
,
2497 uint64_t lun_id
, struct lpfc_iocbq
*iocb
,
2498 uint32_t ctx
, lpfc_ctx_cmd ctx_cmd
)
2502 if (lpfc_cmd
== NULL
)
2507 if ((lpfc_cmd
->pCmd
->device
->id
== tgt_id
) &&
2508 (lpfc_cmd
->pCmd
->device
->lun
== lun_id
))
2512 if (lpfc_cmd
->pCmd
->device
->id
== tgt_id
)
2516 if (iocb
->iocb
.ulpContext
== ctx
)
2522 printk(KERN_ERR
"%s: Unknown context cmd type, value %d\n",
2523 __FUNCTION__
, ctx_cmd
);
2531 lpfc_sli_sum_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2532 uint16_t tgt_id
, uint64_t lun_id
, lpfc_ctx_cmd ctx_cmd
)
2534 struct lpfc_iocbq
*iocb
, *next_iocb
;
2536 struct lpfc_scsi_buf
*lpfc_cmd
;
2537 int sum
= 0, ret_val
= 0;
2539 /* Next check the txcmplq */
2540 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
2543 /* Must be a FCP command */
2544 if ((cmd
->ulpCommand
!= CMD_FCP_ICMND64_CR
) &&
2545 (cmd
->ulpCommand
!= CMD_FCP_IWRITE64_CR
) &&
2546 (cmd
->ulpCommand
!= CMD_FCP_IREAD64_CR
)) {
2550 /* context1 MUST be a struct lpfc_scsi_buf */
2551 lpfc_cmd
= (struct lpfc_scsi_buf
*) (iocb
->context1
);
2552 ret_val
= lpfc_sli_validate_iocb_cmd(lpfc_cmd
, tgt_id
, lun_id
,
2562 lpfc_sli_abort_fcp_cmpl(struct lpfc_hba
* phba
, struct lpfc_iocbq
* cmdiocb
,
2563 struct lpfc_iocbq
* rspiocb
)
2565 spin_lock_irq(phba
->host
->host_lock
);
2566 list_add_tail(&cmdiocb
->list
, &phba
->lpfc_iocb_list
);
2567 spin_unlock_irq(phba
->host
->host_lock
);
2572 lpfc_sli_abort_iocb(struct lpfc_hba
*phba
, struct lpfc_sli_ring
*pring
,
2573 uint16_t tgt_id
, uint64_t lun_id
, uint32_t ctx
,
2574 lpfc_ctx_cmd abort_cmd
)
2576 struct lpfc_iocbq
*iocb
, *next_iocb
;
2577 struct lpfc_iocbq
*abtsiocb
= NULL
;
2578 struct list_head
*lpfc_iocb_list
= &phba
->lpfc_iocb_list
;
2580 struct lpfc_scsi_buf
*lpfc_cmd
;
2581 int errcnt
= 0, ret_val
= 0;
2583 list_for_each_entry_safe(iocb
, next_iocb
, &pring
->txcmplq
, list
) {
2586 /* Must be a FCP command */
2587 if ((cmd
->ulpCommand
!= CMD_FCP_ICMND64_CR
) &&
2588 (cmd
->ulpCommand
!= CMD_FCP_IWRITE64_CR
) &&
2589 (cmd
->ulpCommand
!= CMD_FCP_IREAD64_CR
)) {
2593 /* context1 MUST be a struct lpfc_scsi_buf */
2594 lpfc_cmd
= (struct lpfc_scsi_buf
*) (iocb
->context1
);
2595 ret_val
= lpfc_sli_validate_iocb_cmd(lpfc_cmd
, tgt_id
, lun_id
,
2596 iocb
, ctx
, abort_cmd
);
2600 /* issue ABTS for this IOCB based on iotag */
2601 list_remove_head(lpfc_iocb_list
, abtsiocb
, struct lpfc_iocbq
,
2603 if (abtsiocb
== NULL
) {
2607 memset(abtsiocb
, 0, sizeof (struct lpfc_iocbq
));
2609 abtsiocb
->iocb
.un
.acxri
.abortType
= ABORT_TYPE_ABTS
;
2610 abtsiocb
->iocb
.un
.acxri
.abortContextTag
= cmd
->ulpContext
;
2611 abtsiocb
->iocb
.un
.acxri
.abortIoTag
= cmd
->ulpIoTag
;
2612 abtsiocb
->iocb
.ulpLe
= 1;
2613 abtsiocb
->iocb
.ulpClass
= cmd
->ulpClass
;
2615 if (phba
->hba_state
>= LPFC_LINK_UP
)
2616 abtsiocb
->iocb
.ulpCommand
= CMD_ABORT_XRI_CN
;
2618 abtsiocb
->iocb
.ulpCommand
= CMD_CLOSE_XRI_CN
;
2620 /* Setup callback routine and issue the command. */
2621 abtsiocb
->iocb_cmpl
= lpfc_sli_abort_fcp_cmpl
;
2622 ret_val
= lpfc_sli_issue_iocb(phba
, pring
, abtsiocb
, 0);
2623 if (ret_val
== IOCB_ERROR
) {
2624 list_add_tail(&abtsiocb
->list
, lpfc_iocb_list
);
2634 lpfc_sli_wake_iocb_high_priority(struct lpfc_hba
* phba
,
2635 struct lpfc_iocbq
* queue1
,
2636 struct lpfc_iocbq
* queue2
)
2638 if (queue1
->context2
&& queue2
)
2639 memcpy(queue1
->context2
, queue2
, sizeof (struct lpfc_iocbq
));
2641 /* The waiter is looking for LPFC_IO_HIPRI bit to be set
2642 as a signal to wake up */
2643 queue1
->iocb_flag
|= LPFC_IO_HIPRI
;
2648 lpfc_sli_issue_iocb_wait_high_priority(struct lpfc_hba
* phba
,
2649 struct lpfc_sli_ring
* pring
,
2650 struct lpfc_iocbq
* piocb
,
2652 struct lpfc_iocbq
* prspiocbq
,
2655 int j
, delay_time
, retval
= IOCB_ERROR
;
2657 /* The caller must left context1 empty. */
2658 if (piocb
->context_un
.hipri_wait_queue
!= 0) {
2663 * If the caller has provided a response iocbq buffer, context2 must
2664 * be NULL or its an error.
2666 if (prspiocbq
&& piocb
->context2
) {
2670 piocb
->context2
= prspiocbq
;
2672 /* Setup callback routine and issue the command. */
2673 piocb
->iocb_cmpl
= lpfc_sli_wake_iocb_high_priority
;
2674 retval
= lpfc_sli_issue_iocb(phba
, pring
, piocb
,
2675 flag
| SLI_IOCB_HIGH_PRIORITY
);
2676 if (retval
!= IOCB_SUCCESS
) {
2677 piocb
->context2
= NULL
;
2682 * This high-priority iocb was sent out-of-band. Poll for its
2683 * completion rather than wait for a signal. Note that the host_lock
2684 * is held by the midlayer and must be released here to allow the
2685 * interrupt handlers to complete the IO and signal this routine via
2687 * Also, the delay_time is computed to be one second longer than
2688 * the scsi command timeout to give the FW time to abort on
2689 * timeout rather than the driver just giving up. Typically,
2690 * the midlayer does not specify a time for this command so the
2691 * driver is free to enforce its own timeout.
2694 delay_time
= ((timeout
+ 1) * 1000) >> 6;
2695 retval
= IOCB_ERROR
;
2696 spin_unlock_irq(phba
->host
->host_lock
);
2697 for (j
= 0; j
< 64; j
++) {
2699 if (piocb
->iocb_flag
& LPFC_IO_HIPRI
) {
2700 piocb
->iocb_flag
&= ~LPFC_IO_HIPRI
;
2701 retval
= IOCB_SUCCESS
;
2706 spin_lock_irq(phba
->host
->host_lock
);
2707 piocb
->context2
= NULL
;
2711 lpfc_sli_issue_mbox_wait(struct lpfc_hba
* phba
, LPFC_MBOXQ_t
* pmboxq
,
2714 DECLARE_WAIT_QUEUE_HEAD(done_q
);
2715 DECLARE_WAITQUEUE(wq_entry
, current
);
2716 uint32_t timeleft
= 0;
2719 /* The caller must leave context1 empty. */
2720 if (pmboxq
->context1
!= 0) {
2721 return (MBX_NOT_FINISHED
);
2724 /* setup wake call as IOCB callback */
2725 pmboxq
->mbox_cmpl
= lpfc_sli_wake_mbox_wait
;
2726 /* setup context field to pass wait_queue pointer to wake function */
2727 pmboxq
->context1
= &done_q
;
2729 /* start to sleep before we wait, to avoid races */
2730 set_current_state(TASK_INTERRUPTIBLE
);
2731 add_wait_queue(&done_q
, &wq_entry
);
2733 /* now issue the command */
2734 retval
= lpfc_sli_issue_mbox(phba
, pmboxq
, MBX_NOWAIT
);
2736 if (retval
== MBX_BUSY
|| retval
== MBX_SUCCESS
) {
2737 timeleft
= schedule_timeout(timeout
* HZ
);
2738 pmboxq
->context1
= NULL
;
2739 /* if schedule_timeout returns 0, we timed out and were not
2741 if (timeleft
== 0) {
2742 retval
= MBX_TIMEOUT
;
2744 retval
= MBX_SUCCESS
;
2749 set_current_state(TASK_RUNNING
);
2750 remove_wait_queue(&done_q
, &wq_entry
);
2755 lpfc_intr_handler(int irq
, void *dev_id
, struct pt_regs
* regs
)
2757 struct lpfc_hba
*phba
;
2759 uint32_t work_ha_copy
;
2760 unsigned long status
;
2765 * Get the driver's phba structure from the dev_id and
2766 * assume the HBA is not interrupting.
2768 phba
= (struct lpfc_hba
*) dev_id
;
2770 if (unlikely(!phba
))
2773 phba
->sli
.slistat
.sli_intr
++;
2776 * Call the HBA to see if it is interrupting. If not, don't claim
2780 /* Ignore all interrupts during initialization. */
2781 if (unlikely(phba
->hba_state
< LPFC_LINK_DOWN
))
2785 * Read host attention register to determine interrupt source
2786 * Clear Attention Sources, except Error Attention (to
2787 * preserve status) and Link Attention
2789 spin_lock(phba
->host
->host_lock
);
2790 ha_copy
= readl(phba
->HAregaddr
);
2791 writel((ha_copy
& ~(HA_LATT
| HA_ERATT
)), phba
->HAregaddr
);
2792 readl(phba
->HAregaddr
); /* flush */
2793 spin_unlock(phba
->host
->host_lock
);
2795 if (unlikely(!ha_copy
))
2798 work_ha_copy
= ha_copy
& phba
->work_ha_mask
;
2800 if (unlikely(work_ha_copy
)) {
2801 if (work_ha_copy
& HA_LATT
) {
2802 if (phba
->sli
.sli_flag
& LPFC_PROCESS_LA
) {
2804 * Turn off Link Attention interrupts
2805 * until CLEAR_LA done
2807 spin_lock(phba
->host
->host_lock
);
2808 phba
->sli
.sli_flag
&= ~LPFC_PROCESS_LA
;
2809 control
= readl(phba
->HCregaddr
);
2810 control
&= ~HC_LAINT_ENA
;
2811 writel(control
, phba
->HCregaddr
);
2812 readl(phba
->HCregaddr
); /* flush */
2813 spin_unlock(phba
->host
->host_lock
);
2816 work_ha_copy
&= ~HA_LATT
;
2819 if (work_ha_copy
& ~(HA_ERATT
|HA_MBATT
|HA_LATT
)) {
2820 for (i
= 0; i
< phba
->sli
.num_rings
; i
++) {
2821 if (work_ha_copy
& (HA_RXATT
<< (4*i
))) {
2823 * Turn off Slow Rings interrupts
2825 spin_lock(phba
->host
->host_lock
);
2826 control
= readl(phba
->HCregaddr
);
2827 control
&= ~(HC_R0INT_ENA
<< i
);
2828 writel(control
, phba
->HCregaddr
);
2829 readl(phba
->HCregaddr
); /* flush */
2830 spin_unlock(phba
->host
->host_lock
);
2835 if (work_ha_copy
& HA_ERATT
) {
2836 phba
->hba_state
= LPFC_HBA_ERROR
;
2838 * There was a link/board error. Read the
2839 * status register to retrieve the error event
2842 phba
->sli
.slistat
.err_attn_event
++;
2843 /* Save status info */
2844 phba
->work_hs
= readl(phba
->HSregaddr
);
2845 phba
->work_status
[0] = readl(phba
->MBslimaddr
+ 0xa8);
2846 phba
->work_status
[1] = readl(phba
->MBslimaddr
+ 0xac);
2848 /* Clear Chip error bit */
2849 writel(HA_ERATT
, phba
->HAregaddr
);
2850 readl(phba
->HAregaddr
); /* flush */
2853 * Reseting the HBA is the only reliable way
2854 * to shutdown interrupt when there is a
2857 lpfc_sli_send_reset(phba
, phba
->hba_state
);
2860 spin_lock(phba
->host
->host_lock
);
2861 phba
->work_ha
|= work_ha_copy
;
2862 if (phba
->work_wait
)
2863 wake_up(phba
->work_wait
);
2864 spin_unlock(phba
->host
->host_lock
);
2867 ha_copy
&= ~(phba
->work_ha_mask
);
2870 * Process all events on FCP ring. Take the optimized path for
2871 * FCP IO. Any other IO is slow path and is handled by
2872 * the worker thread.
2874 status
= (ha_copy
& (HA_RXMASK
<< (4*LPFC_FCP_RING
)));
2875 status
>>= (4*LPFC_FCP_RING
);
2876 if (status
& HA_RXATT
)
2877 lpfc_sli_handle_fast_ring_event(phba
,
2878 &phba
->sli
.ring
[LPFC_FCP_RING
],
2882 } /* lpfc_intr_handler */