4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30 EMLXS_MSG_DEF(EMLXS_FCP_C
);
32 #define EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
33 PADDR(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
35 static void emlxs_sbp_abort_add(emlxs_port_t
*port
, emlxs_buf_t
*sbp
,
36 Q
*abort
, uint8_t *flag
, emlxs_buf_t
*fpkt
);
38 #define SCSI3_PERSISTENT_RESERVE_IN 0x5e
39 #define SCSI_INQUIRY 0x12
40 #define SCSI_RX_DIAG 0x1C
44 * emlxs_handle_fcp_event
46 * Description: Process an FCP Rsp Ring completion
51 emlxs_handle_fcp_event(emlxs_hba_t
*hba
, CHANNEL
*cp
, IOCBQ
*iocbq
)
53 emlxs_port_t
*port
= &PPORT
;
54 emlxs_config_t
*cfg
= &CFG
;
57 fc_packet_t
*pkt
= NULL
;
58 #ifdef SAN_DIAG_SUPPORT
64 uint32_t rsp_data_resid
;
65 uint32_t check_underrun
;
80 /* Initialize the status */
81 iostat
= cmd
->ULPSTATUS
;
92 sbp
= (emlxs_buf_t
*)iocbq
->sbp
;
95 /* completion with missing xmit command */
98 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_stray_fcp_completion_msg
,
99 "cmd=%x iotag=%d", cmd
->ULPCOMMAND
, cmd
->ULPIOTAG
);
104 HBASTATS
.FcpCompleted
++;
106 #ifdef SAN_DIAG_SUPPORT
107 emlxs_update_sd_bucket(sbp
);
108 #endif /* SAN_DIAG_SUPPORT */
112 did
= LE_SWAP24_LO(pkt
->pkt_cmd_fhdr
.d_id
);
113 scsi_cmd
= (uint8_t *)pkt
->pkt_cmd
;
114 scsi_opcode
= scsi_cmd
[12];
117 /* Sync data in data buffer only on FC_PKT_FCP_READ */
118 if (pkt
->pkt_datalen
&& (pkt
->pkt_tran_type
== FC_PKT_FCP_READ
)) {
119 EMLXS_MPDATA_SYNC(pkt
->pkt_data_dma
, 0, pkt
->pkt_datalen
,
120 DDI_DMA_SYNC_FORKERNEL
);
123 if (hba
->underrun_counter
&& (iostat
== IOSTAT_SUCCESS
) &&
124 (pkt
->pkt_datalen
>= 512)) {
125 hba
->underrun_counter
--;
126 iostat
= IOSTAT_FCP_RSP_ERROR
;
128 /* Report 512 bytes missing by adapter */
129 cmd
->un
.fcpi
.fcpi_parm
= pkt
->pkt_datalen
- 512;
131 /* Corrupt 512 bytes of Data buffer */
132 bzero((uint8_t *)pkt
->pkt_data
, 512);
134 /* Set FCP response to STATUS_GOOD */
135 bzero((uint8_t *)pkt
->pkt_resp
, pkt
->pkt_rsplen
);
137 #endif /* TEST_SUPPORT */
140 /* Process the pkt */
141 mutex_enter(&sbp
->mtx
);
143 /* Check for immediate return */
144 if ((iostat
== IOSTAT_SUCCESS
) &&
147 (PACKET_ULP_OWNED
| PACKET_COMPLETED
|
148 PACKET_IN_COMPLETION
| PACKET_IN_TXQ
| PACKET_IN_CHIPQ
|
149 PACKET_IN_DONEQ
| PACKET_IN_TIMEOUT
| PACKET_IN_FLUSH
|
150 PACKET_IN_ABORT
| PACKET_POLLED
))) {
154 (PACKET_STATE_VALID
| PACKET_IN_COMPLETION
|
155 PACKET_COMPLETED
| PACKET_ULP_OWNED
);
156 mutex_exit(&sbp
->mtx
);
158 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
159 emlxs_unswap_pkt(sbp
);
160 #endif /* EMLXS_MODREV2X */
163 emlxs_check_dma(hba
, sbp
);
164 #endif /* FMA_SUPPORT */
167 (*pkt
->pkt_comp
) (pkt
);
170 if (hba
->flag
& FC_DMA_CHECK_ERROR
) {
171 emlxs_thread_spawn(hba
, emlxs_restart_thread
,
174 #endif /* FMA_SUPPORT */
180 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
184 /* Check if a response buffer was not provided */
185 if ((iostat
!= IOSTAT_FCP_RSP_ERROR
) || (pkt
->pkt_rsplen
== 0)) {
189 EMLXS_MPDATA_SYNC(pkt
->pkt_resp_dma
, 0, pkt
->pkt_rsplen
,
190 DDI_DMA_SYNC_FORKERNEL
);
192 /* Get the response buffer pointer */
193 rsp
= (fcp_rsp_t
*)pkt
->pkt_resp
;
195 /* Validate the response payload */
196 if (!rsp
->fcp_u
.fcp_status
.resid_under
&&
197 !rsp
->fcp_u
.fcp_status
.resid_over
) {
201 if (!rsp
->fcp_u
.fcp_status
.rsp_len_set
) {
202 rsp
->fcp_response_len
= 0;
205 if (!rsp
->fcp_u
.fcp_status
.sense_len_set
) {
206 rsp
->fcp_sense_len
= 0;
209 length
= sizeof (fcp_rsp_t
) + LE_SWAP32(rsp
->fcp_response_len
) +
210 LE_SWAP32(rsp
->fcp_sense_len
);
212 if (length
> pkt
->pkt_rsplen
) {
213 iostat
= IOSTAT_RSP_INVALID
;
214 pkt
->pkt_data_resid
= pkt
->pkt_datalen
;
218 /* Set the valid response flag */
219 sbp
->pkt_flags
|= PACKET_FCP_RSP_VALID
;
221 scsi_status
= rsp
->fcp_u
.fcp_status
.scsi_status
;
223 #ifdef SAN_DIAG_SUPPORT
224 ndlp
= (NODELIST
*)iocbq
->node
;
225 if (scsi_status
== SCSI_STAT_QUE_FULL
) {
226 emlxs_log_sd_scsi_event(port
, SD_SCSI_SUBCATEGORY_QFULL
,
227 (HBA_WWN
*)&ndlp
->nlp_portname
, sbp
->lun
);
228 } else if (scsi_status
== SCSI_STAT_BUSY
) {
229 emlxs_log_sd_scsi_event(port
,
230 SD_SCSI_SUBCATEGORY_DEVBSY
,
231 (HBA_WWN
*)&ndlp
->nlp_portname
, sbp
->lun
);
236 * Convert a task abort to a check condition with no data
237 * transferred. We saw a data corruption when Solaris received
238 * a Task Abort from a tape.
241 if (scsi_status
== SCSI_STAT_TASK_ABORT
) {
242 EMLXS_MSGF(EMLXS_CONTEXT
,
243 &emlxs_fcp_completion_error_msg
,
245 "Fixed. did=0x%06x sbp=%p cmd=%02x dl=%d",
246 did
, sbp
, scsi_opcode
, pkt
->pkt_datalen
);
248 rsp
->fcp_u
.fcp_status
.scsi_status
=
249 SCSI_STAT_CHECK_COND
;
250 rsp
->fcp_u
.fcp_status
.rsp_len_set
= 0;
251 rsp
->fcp_u
.fcp_status
.sense_len_set
= 0;
252 rsp
->fcp_u
.fcp_status
.resid_over
= 0;
254 if (pkt
->pkt_datalen
) {
255 rsp
->fcp_u
.fcp_status
.resid_under
= 1;
257 LE_SWAP32(pkt
->pkt_datalen
);
259 rsp
->fcp_u
.fcp_status
.resid_under
= 0;
263 scsi_status
= SCSI_STAT_CHECK_COND
;
267 * We only need to check underrun if data could
271 /* Always check underrun if status is good */
272 if (scsi_status
== SCSI_STAT_GOOD
) {
275 /* Check the sense codes if this is a check condition */
276 else if (scsi_status
== SCSI_STAT_CHECK_COND
) {
279 /* Check if sense data was provided */
280 if (LE_SWAP32(rsp
->fcp_sense_len
) >= 14) {
281 sense
= *((uint8_t *)rsp
+ 32 + 2);
282 asc
= *((uint8_t *)rsp
+ 32 + 12);
283 ascq
= *((uint8_t *)rsp
+ 32 + 13);
286 #ifdef SAN_DIAG_SUPPORT
287 emlxs_log_sd_scsi_check_event(port
,
288 (HBA_WWN
*)&ndlp
->nlp_portname
, sbp
->lun
,
289 scsi_opcode
, sense
, asc
, ascq
);
292 /* Status is not good and this is not a check condition */
293 /* No data should have been sent */
298 /* Initialize the resids */
299 pkt
->pkt_resp_resid
= 0;
300 pkt
->pkt_data_resid
= 0;
302 /* Check if no data was to be transferred */
303 if (pkt
->pkt_datalen
== 0) {
307 /* Get the residual underrun count reported by the SCSI reply */
308 rsp_data_resid
= (rsp
->fcp_u
.fcp_status
.resid_under
) ?
309 LE_SWAP32(rsp
->fcp_resid
) : 0;
311 /* Set the pkt_data_resid to what the scsi response resid */
312 pkt
->pkt_data_resid
= rsp_data_resid
;
314 /* Adjust the pkt_data_resid field if needed */
315 if (pkt
->pkt_tran_type
== FC_PKT_FCP_READ
) {
317 * Get the residual underrun count reported by
320 pkt
->pkt_data_resid
= cmd
->un
.fcpi
.fcpi_parm
;
322 #ifdef SAN_DIAG_SUPPORT
323 if ((rsp_data_resid
== 0) && (pkt
->pkt_data_resid
)) {
324 emlxs_log_sd_fc_rdchk_event(port
,
325 (HBA_WWN
*)&ndlp
->nlp_portname
, sbp
->lun
,
326 scsi_opcode
, pkt
->pkt_data_resid
);
330 /* Get the actual amount of data transferred */
331 data_rx
= pkt
->pkt_datalen
- pkt
->pkt_data_resid
;
334 * If the residual being reported by the adapter is
335 * greater than the residual being reported in the
336 * reply, then we have a true underrun.
338 if (check_underrun
&& (pkt
->pkt_data_resid
> rsp_data_resid
)) {
339 switch (scsi_opcode
) {
341 scsi_dl
= scsi_cmd
[16];
346 (scsi_cmd
[15] * 0x100) +
351 scsi_dl
= pkt
->pkt_datalen
;
354 #ifdef FCP_UNDERRUN_PATCH1
355 if (cfg
[CFG_ENABLE_PATCH
].current
& FCP_UNDERRUN_PATCH1
) {
357 * If status is not good and no data was
358 * actually transferred, then we must fix
361 if ((scsi_status
!= SCSI_STAT_GOOD
) && (data_rx
== 0)) {
364 EMLXS_MSGF(EMLXS_CONTEXT
,
365 &emlxs_fcp_completion_error_msg
,
366 "Underrun(1). Fixed. "
367 "did=0x%06x sbp=%p cmd=%02x "
368 "dl=%d,%d rx=%d rsp=%d",
369 did
, sbp
, scsi_opcode
,
370 pkt
->pkt_datalen
, scsi_dl
,
372 pkt
->pkt_data_resid
),
377 #endif /* FCP_UNDERRUN_PATCH1 */
380 #ifdef FCP_UNDERRUN_PATCH2
381 if (cfg
[CFG_ENABLE_PATCH
].current
& FCP_UNDERRUN_PATCH2
) {
382 if (scsi_status
== SCSI_STAT_GOOD
) {
385 msg
= &emlxs_fcp_completion_error_msg
;
387 * If status is good and this is an
388 * inquiry request and the amount of
392 * requested <= data received, then we
393 * must fix the issue.
396 if ((scsi_opcode
== SCSI_INQUIRY
) &&
397 (pkt
->pkt_datalen
>= data_rx
) &&
398 (scsi_dl
<= data_rx
)) {
401 EMLXS_MSGF(EMLXS_CONTEXT
, msg
,
402 "Underrun(2). Fixed. "
406 did
, sbp
, scsi_opcode
,
407 pkt
->pkt_datalen
, scsi_dl
,
408 data_rx
, rsp_data_resid
);
413 * If status is good and this is an
414 * inquiry request and the amount of
415 * data requested >= 128 bytes, but
416 * only 128 bytes were received,
417 * then we must fix the issue.
419 else if ((scsi_opcode
== SCSI_INQUIRY
) &&
420 (pkt
->pkt_datalen
>= 128) &&
421 (scsi_dl
>= 128) && (data_rx
== 128)) {
424 EMLXS_MSGF(EMLXS_CONTEXT
, msg
,
425 "Underrun(3). Fixed. "
429 did
, sbp
, scsi_opcode
,
430 pkt
->pkt_datalen
, scsi_dl
,
431 data_rx
, rsp_data_resid
);
436 #endif /* FCP_UNDERRUN_PATCH2 */
439 * Check if SCSI response payload should be
440 * fixed or if a DATA_UNDERRUN should be
445 * Fix the SCSI response payload itself
447 rsp
->fcp_u
.fcp_status
.resid_under
= 1;
449 LE_SWAP32(pkt
->pkt_data_resid
);
452 * Change the status from
453 * IOSTAT_FCP_RSP_ERROR to
454 * IOSTAT_DATA_UNDERRUN
456 iostat
= IOSTAT_DATA_UNDERRUN
;
457 pkt
->pkt_data_resid
=
463 * If the residual being reported by the adapter is
464 * less than the residual being reported in the reply,
465 * then we have a true overrun. Since we don't know
466 * where the extra data came from or went to then we
467 * cannot trust anything we received
469 else if (rsp_data_resid
> pkt
->pkt_data_resid
) {
471 * Change the status from
472 * IOSTAT_FCP_RSP_ERROR to
473 * IOSTAT_DATA_OVERRUN
475 iostat
= IOSTAT_DATA_OVERRUN
;
476 pkt
->pkt_data_resid
= pkt
->pkt_datalen
;
479 } else if ((hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) &&
480 (pkt
->pkt_tran_type
== FC_PKT_FCP_WRITE
)) {
482 * Get the residual underrun count reported by
485 pkt
->pkt_data_resid
= cmd
->un
.fcpi
.fcpi_parm
;
487 #ifdef SAN_DIAG_SUPPORT
488 if ((rsp_data_resid
== 0) && (pkt
->pkt_data_resid
)) {
489 emlxs_log_sd_fc_rdchk_event(port
,
490 (HBA_WWN
*)&ndlp
->nlp_portname
, sbp
->lun
,
491 scsi_opcode
, pkt
->pkt_data_resid
);
493 #endif /* SAN_DIAG_SUPPORT */
495 /* Get the actual amount of data transferred */
496 data_rx
= pkt
->pkt_datalen
- pkt
->pkt_data_resid
;
499 * If the residual being reported by the adapter is
500 * greater than the residual being reported in the
501 * reply, then we have a true underrun.
503 if (check_underrun
&& (pkt
->pkt_data_resid
> rsp_data_resid
)) {
505 scsi_dl
= pkt
->pkt_datalen
;
507 #ifdef FCP_UNDERRUN_PATCH1
508 if (cfg
[CFG_ENABLE_PATCH
].current
& FCP_UNDERRUN_PATCH1
) {
510 * If status is not good and no data was
511 * actually transferred, then we must fix
514 if ((scsi_status
!= SCSI_STAT_GOOD
) && (data_rx
== 0)) {
517 EMLXS_MSGF(EMLXS_CONTEXT
,
518 &emlxs_fcp_completion_error_msg
,
519 "Underrun(1). Fixed. "
520 "did=0x%06x sbp=%p cmd=%02x "
521 "dl=%d,%d rx=%d rsp=%d",
522 did
, sbp
, scsi_opcode
,
523 pkt
->pkt_datalen
, scsi_dl
,
525 pkt
->pkt_data_resid
),
530 #endif /* FCP_UNDERRUN_PATCH1 */
533 * Check if SCSI response payload should be
534 * fixed or if a DATA_UNDERRUN should be
539 * Fix the SCSI response payload itself
541 rsp
->fcp_u
.fcp_status
.resid_under
= 1;
543 LE_SWAP32(pkt
->pkt_data_resid
);
546 * Change the status from
547 * IOSTAT_FCP_RSP_ERROR to
548 * IOSTAT_DATA_UNDERRUN
550 iostat
= IOSTAT_DATA_UNDERRUN
;
551 pkt
->pkt_data_resid
=
557 * If the residual being reported by the adapter is
558 * less than the residual being reported in the reply,
559 * then we have a true overrun. Since we don't know
560 * where the extra data came from or went to then we
561 * cannot trust anything we received
563 else if (rsp_data_resid
> pkt
->pkt_data_resid
) {
565 * Change the status from
566 * IOSTAT_FCP_RSP_ERROR to
567 * IOSTAT_DATA_OVERRUN
569 iostat
= IOSTAT_DATA_OVERRUN
;
570 pkt
->pkt_data_resid
= pkt
->pkt_datalen
;
576 /* Print completion message */
579 /* Build SCSI GOOD status */
580 if (pkt
->pkt_rsplen
) {
581 bzero((uint8_t *)pkt
->pkt_resp
, pkt
->pkt_rsplen
);
585 case IOSTAT_FCP_RSP_ERROR
:
588 case IOSTAT_REMOTE_STOP
:
589 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fcp_completion_error_msg
,
590 "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did
, sbp
,
594 case IOSTAT_LOCAL_REJECT
:
595 localstat
= cmd
->un
.grsp
.perr
.statLocalError
;
598 case IOERR_SEQUENCE_TIMEOUT
:
599 EMLXS_MSGF(EMLXS_CONTEXT
,
600 &emlxs_fcp_completion_error_msg
,
602 "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
603 emlxs_error_xlate(localstat
), did
, sbp
,
604 scsi_opcode
, pkt
->pkt_timeout
);
608 EMLXS_MSGF(EMLXS_CONTEXT
,
609 &emlxs_fcp_completion_error_msg
,
610 "Local reject. %s 0x%06x %p %02x (%x)(%x)",
611 emlxs_error_xlate(localstat
), did
, sbp
,
612 scsi_opcode
, (uint16_t)cmd
->ULPIOTAG
,
613 (uint16_t)cmd
->ULPCONTEXT
);
618 case IOSTAT_NPORT_RJT
:
619 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fcp_completion_error_msg
,
620 "Nport reject. did=0x%06x sbp=%p cmd=%02x", did
, sbp
,
624 case IOSTAT_FABRIC_RJT
:
625 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fcp_completion_error_msg
,
626 "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did
, sbp
,
630 case IOSTAT_NPORT_BSY
:
631 #ifdef SAN_DIAG_SUPPORT
632 ndlp
= (NODELIST
*)iocbq
->node
;
633 emlxs_log_sd_fc_bsy_event(port
, (HBA_WWN
*)&ndlp
->nlp_portname
);
636 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fcp_completion_error_msg
,
637 "Nport busy. did=0x%06x sbp=%p cmd=%02x", did
, sbp
,
641 case IOSTAT_FABRIC_BSY
:
642 #ifdef SAN_DIAG_SUPPORT
643 ndlp
= (NODELIST
*)iocbq
->node
;
644 emlxs_log_sd_fc_bsy_event(port
, NULL
);
647 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fcp_completion_error_msg
,
648 "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did
, sbp
,
652 case IOSTAT_INTERMED_RSP
:
653 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fcp_completion_error_msg
,
654 "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did
,
659 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fcp_completion_error_msg
,
660 "LS Reject. did=0x%06x sbp=%p cmd=%02x", did
, sbp
,
664 case IOSTAT_DATA_UNDERRUN
:
665 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fcp_completion_error_msg
,
666 "Underrun. did=0x%06x sbp=%p cmd=%02x "
667 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
668 did
, sbp
, scsi_opcode
, pkt
->pkt_datalen
, scsi_dl
, data_rx
,
669 rsp_data_resid
, scsi_status
, sense
, asc
, ascq
);
672 case IOSTAT_DATA_OVERRUN
:
673 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fcp_completion_error_msg
,
674 "Overrun. did=0x%06x sbp=%p cmd=%02x "
675 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
676 did
, sbp
, scsi_opcode
, pkt
->pkt_datalen
, scsi_dl
, data_rx
,
677 rsp_data_resid
, scsi_status
, sense
, asc
, ascq
);
680 case IOSTAT_RSP_INVALID
:
681 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fcp_completion_error_msg
,
682 "Rsp Invalid. did=0x%06x sbp=%p cmd=%02x dl=%d rl=%d"
684 did
, sbp
, scsi_opcode
, pkt
->pkt_datalen
, pkt
->pkt_rsplen
,
685 LE_SWAP32(rsp
->fcp_resid
),
686 LE_SWAP32(rsp
->fcp_sense_len
),
687 LE_SWAP32(rsp
->fcp_response_len
));
691 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_fcp_completion_error_msg
,
692 "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
693 iostat
, cmd
->un
.grsp
.perr
.statLocalError
, did
, sbp
,
698 if (iostat
== IOSTAT_SUCCESS
) {
704 mutex_exit(&sbp
->mtx
);
706 emlxs_pkt_complete(sbp
, iostat
, localstat
, 0);
710 } /* emlxs_handle_fcp_event() */
716 * This routine will post count buffers to the
717 * ring with the QUE_RING_BUF_CN command. This
718 * allows 2 buffers / command to be posted.
719 * Returns the number of buffers NOT posted.
723 emlxs_post_buffer(emlxs_hba_t
*hba
, RING
*rp
, int16_t cnt
)
725 emlxs_port_t
*port
= &PPORT
;
739 cnt
+= rp
->fc_missbufcnt
;
741 if (rp
->ringno
== hba
->channel_els
) {
743 size
= MEM_ELSBUF_SIZE
;
744 } else if (rp
->ringno
== hba
->channel_ip
) {
746 size
= MEM_IPBUF_SIZE
;
747 } else if (rp
->ringno
== hba
->channel_ct
) {
749 size
= MEM_CTBUF_SIZE
;
752 else if (rp
->ringno
== hba
->CHANNEL_FCT
) {
754 size
= MEM_FCTBUF_SIZE
;
756 #endif /* SFCT_SUPPORT */
762 * While there are buffers to post
765 if ((iocbq
= (IOCBQ
*)emlxs_mem_get(hba
, MEM_IOCB
)) == 0) {
766 rp
->fc_missbufcnt
= cnt
;
770 iocbq
->channel
= (void *)&hba
->chan
[rp
->ringno
];
771 iocbq
->port
= (void *)port
;
772 iocbq
->flag
|= (IOCB_PRIORITY
| IOCB_SPECIAL
);
777 * Max buffers can be posted per command
779 for (i
= 0; i
< maxqbuf
; i
++) {
783 /* fill in BDEs for command */
784 if ((mp
= (MATCHMAP
*)emlxs_mem_get(hba
, seg
))
786 icmd
->ULPBDECOUNT
= i
;
787 for (j
= 0; j
< i
; j
++) {
788 mp
= EMLXS_GET_VADDR(hba
, rp
, icmd
);
790 emlxs_mem_put(hba
, seg
,
795 rp
->fc_missbufcnt
= cnt
+ i
;
797 emlxs_mem_put(hba
, MEM_IOCB
, (void *)iocbq
);
803 * map that page and save the address pair for lookup
806 emlxs_mem_map_vaddr(hba
,
809 (uint32_t *)&icmd
->un
.cont64
[i
].addrHigh
,
810 (uint32_t *)&icmd
->un
.cont64
[i
].addrLow
);
812 icmd
->un
.cont64
[i
].tus
.f
.bdeSize
= size
;
813 icmd
->ULPCOMMAND
= CMD_QUE_RING_BUF64_CN
;
816 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
817 * "UB Post: ring=%d addr=%08x%08x size=%d",
818 * rp->ringno, icmd->un.cont64[i].addrHigh,
819 * icmd->un.cont64[i].addrLow, size);
825 icmd
->ULPIOTAG
= tag
;
826 icmd
->ULPBDECOUNT
= i
;
828 icmd
->ULPOWNER
= OWN_CHIP
;
829 /* used for delimiter between commands */
830 iocbq
->bp
= (void *)mp
;
832 EMLXS_SLI_ISSUE_IOCB_CMD(hba
, &hba
->chan
[rp
->ringno
], iocbq
);
835 rp
->fc_missbufcnt
= 0;
839 } /* emlxs_post_buffer() */
843 emlxs_fcp_tag_nodes(emlxs_port_t
*port
)
848 /* We will process all nodes with this tag later */
849 rw_enter(&port
->node_rwlock
, RW_READER
);
850 for (i
= 0; i
< EMLXS_NUM_HASH_QUES
; i
++) {
851 nlp
= port
->node_table
[i
];
852 while (nlp
!= NULL
) {
854 nlp
= nlp
->nlp_list_next
;
857 rw_exit(&port
->node_rwlock
);
862 emlxs_find_tagged_node(emlxs_port_t
*port
)
868 /* Find first node */
869 rw_enter(&port
->node_rwlock
, RW_READER
);
871 for (i
= 0; i
< EMLXS_NUM_HASH_QUES
; i
++) {
872 nlp
= port
->node_table
[i
];
873 while (nlp
!= NULL
) {
875 nlp
= nlp
->nlp_list_next
;
880 if (nlp
->nlp_Rpi
== FABRIC_RPI
) {
881 nlp
= nlp
->nlp_list_next
;
891 rw_exit(&port
->node_rwlock
);
897 emlxs_port_offline(emlxs_port_t
*port
, uint32_t scope
)
899 emlxs_hba_t
*hba
= HBA
;
902 fc_affected_id_t
*aid
;
911 uint32_t adisc_support
;
915 /* Target mode only uses this routine for linkdowns */
916 if ((port
->mode
== MODE_TARGET
) && (scope
!= 0xffffffff) &&
917 (scope
!= 0xfeffffff) && (scope
!= 0xfdffffff)) {
922 aid
= (fc_affected_id_t
*)&scope
;
929 if (!(port
->flag
& EMLXS_PORT_BOUND
)) {
933 format
= aid
->aff_format
;
948 case 3: /* Network */
952 #ifdef DHCHAP_SUPPORT
953 case 0xfe: /* Virtual link down */
957 #endif /* DHCHAP_SUPPORT */
959 case 0xff: /* link is down */
964 case 0xfd: /* New fabric */
972 aff_d_id
= aid
->aff_d_id
& mask
;
976 * If link is down then this is a hard shutdown and flush
977 * If link not down then this is a soft shutdown and flush
981 mutex_enter(&EMLXS_PORT_LOCK
);
983 port
->flag
&= EMLXS_PORT_LINKDOWN_MASK
;
985 if (port
->ulp_statec
!= FC_STATE_OFFLINE
) {
986 port
->ulp_statec
= FC_STATE_OFFLINE
;
988 port
->prev_did
= port
->did
;
992 bcopy(&port
->fabric_sparam
, &port
->prev_fabric_sparam
,
994 bzero(&port
->fabric_sparam
, sizeof (SERV_PARM
));
999 mutex_exit(&EMLXS_PORT_LOCK
);
1001 emlxs_timer_cancel_clean_address(port
);
1003 /* Tell ULP about it */
1005 if (port
->flag
& EMLXS_PORT_BOUND
) {
1006 if (port
->vpi
== 0) {
1007 EMLXS_MSGF(EMLXS_CONTEXT
,
1008 &emlxs_link_down_msg
, NULL
);
1011 if (port
->mode
== MODE_INITIATOR
) {
1012 emlxs_fca_link_down(port
);
1015 else if (port
->mode
== MODE_TARGET
) {
1016 emlxs_fct_link_down(port
);
1018 #endif /* SFCT_SUPPORT */
1021 if (port
->vpi
== 0) {
1022 EMLXS_MSGF(EMLXS_CONTEXT
,
1023 &emlxs_link_down_msg
, "*");
1032 #ifdef DHCHAP_SUPPORT
1033 /* Stop authentication with all nodes */
1034 emlxs_dhc_auth_stop(port
, NULL
);
1035 #endif /* DHCHAP_SUPPORT */
1037 /* Flush the base node */
1038 (void) emlxs_tx_node_flush(port
, &port
->node_base
, 0, 0, 0);
1039 (void) emlxs_chipq_node_flush(port
, 0, &port
->node_base
, 0);
1041 /* Flush any pending ub buffers */
1042 emlxs_ub_flush(port
);
1044 #ifdef DHCHAP_SUPPORT
1045 /* virtual link down */
1046 else if (vlinkdown
) {
1047 mutex_enter(&EMLXS_PORT_LOCK
);
1049 if (port
->ulp_statec
!= FC_STATE_OFFLINE
) {
1050 port
->ulp_statec
= FC_STATE_OFFLINE
;
1054 mutex_exit(&EMLXS_PORT_LOCK
);
1056 emlxs_timer_cancel_clean_address(port
);
1058 /* Tell ULP about it */
1060 if (port
->flag
& EMLXS_PORT_BOUND
) {
1061 if (port
->vpi
== 0) {
1062 EMLXS_MSGF(EMLXS_CONTEXT
,
1063 &emlxs_link_down_msg
,
1064 "Switch authentication failed.");
1067 if (port
->mode
== MODE_INITIATOR
) {
1068 emlxs_fca_link_down(port
);
1071 else if (port
->mode
== MODE_TARGET
) {
1072 emlxs_fct_link_down(port
);
1074 #endif /* SFCT_SUPPORT */
1076 if (port
->vpi
== 0) {
1077 EMLXS_MSGF(EMLXS_CONTEXT
,
1078 &emlxs_link_down_msg
,
1079 "Switch authentication failed. *");
1086 /* Flush the base node */
1087 (void) emlxs_tx_node_flush(port
, &port
->node_base
, 0, 0, 0);
1088 (void) emlxs_chipq_node_flush(port
, 0, &port
->node_base
, 0);
1090 #endif /* DHCHAP_SUPPORT */
1092 emlxs_timer_cancel_clean_address(port
);
1095 if (port
->mode
== MODE_TARGET
) {
1096 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
1097 /* Set the node tags */
1098 emlxs_fcp_tag_nodes(port
);
1100 while ((nlp
= emlxs_find_tagged_node(port
))) {
1101 (void) emlxs_rpi_pause_notify(port
,
1104 * In port_online we need to resume
1105 * these RPIs before we can use them.
1112 /* Set the node tags */
1113 emlxs_fcp_tag_nodes(port
);
1115 if (!clear_all
&& (hba
->flag
& FC_ONLINE_MODE
)) {
1116 adisc_support
= cfg
[CFG_ADISC_SUPPORT
].current
;
1121 /* Check ADISC support level */
1122 switch (adisc_support
) {
1123 case 0: /* No support - Flush all IO to all matching nodes */
1127 * We need to hold the locks this way because
1128 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1129 * same locks. Also, when we release the lock the list
1130 * can change out from under us.
1133 /* Find first node */
1134 rw_enter(&port
->node_rwlock
, RW_READER
);
1136 for (i
= 0; i
< EMLXS_NUM_HASH_QUES
; i
++) {
1137 nlp
= port
->node_table
[i
];
1138 while (nlp
!= NULL
) {
1139 if (!nlp
->nlp_tag
) {
1140 nlp
= nlp
->nlp_list_next
;
1146 * Check for any device that matches
1149 if ((nlp
->nlp_DID
& mask
) == aff_d_id
) {
1153 } else { /* Must be an RCSN */
1159 nlp
= nlp
->nlp_list_next
;
1166 rw_exit(&port
->node_rwlock
);
1169 /* Check if nothing was found */
1172 } else if (action
== 1) {
1173 (void) EMLXS_SLI_UNREG_NODE(port
, nlp
,
1175 } else if (action
== 2) {
1176 EMLXS_SET_DFC_STATE(nlp
, NODE_LIMBO
);
1178 #ifdef DHCHAP_SUPPORT
1179 emlxs_dhc_auth_stop(port
, nlp
);
1180 #endif /* DHCHAP_SUPPORT */
1183 * Close the node for any further normal IO
1184 * A PLOGI with reopen the node
1186 emlxs_node_close(port
, nlp
,
1187 hba
->channel_fcp
, 60);
1188 emlxs_node_close(port
, nlp
,
1189 hba
->channel_ip
, 60);
1191 /* Flush tx queue */
1192 (void) emlxs_tx_node_flush(port
, nlp
, 0, 0, 0);
1194 /* Flush chip queue */
1195 (void) emlxs_chipq_node_flush(port
, 0, nlp
, 0);
1202 case 1: /* Partial support - Flush IO for non-FCP2 matching nodes */
1207 * We need to hold the locks this way because
1208 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1209 * same locks. Also, when we release the lock the list
1210 * can change out from under us.
1212 rw_enter(&port
->node_rwlock
, RW_READER
);
1214 for (i
= 0; i
< EMLXS_NUM_HASH_QUES
; i
++) {
1215 nlp
= port
->node_table
[i
];
1216 while (nlp
!= NULL
) {
1217 if (!nlp
->nlp_tag
) {
1218 nlp
= nlp
->nlp_list_next
;
1224 * Check for special FCP2 target device
1225 * that matches our mask
1227 if ((nlp
->nlp_fcp_info
&
1228 NLP_FCP_TGT_DEVICE
) &&
1229 (nlp
-> nlp_fcp_info
&
1230 NLP_FCP_2_DEVICE
) &&
1231 (nlp
->nlp_DID
& mask
) ==
1238 * Check for any other device that
1241 else if ((nlp
->nlp_DID
& mask
) ==
1246 } else { /* Must be an RSCN */
1253 nlp
= nlp
->nlp_list_next
;
1260 rw_exit(&port
->node_rwlock
);
1262 /* Check if nothing was found */
1265 } else if (action
== 1) {
1266 (void) EMLXS_SLI_UNREG_NODE(port
, nlp
,
1268 } else if (action
== 2) {
1269 EMLXS_SET_DFC_STATE(nlp
, NODE_LIMBO
);
1271 #ifdef DHCHAP_SUPPORT
1272 emlxs_dhc_auth_stop(port
, nlp
);
1273 #endif /* DHCHAP_SUPPORT */
1276 * Close the node for any further normal IO
1277 * A PLOGI with reopen the node
1279 emlxs_node_close(port
, nlp
,
1280 hba
->channel_fcp
, 60);
1281 emlxs_node_close(port
, nlp
,
1282 hba
->channel_ip
, 60);
1284 /* Flush tx queue */
1285 (void) emlxs_tx_node_flush(port
, nlp
, 0, 0, 0);
1287 /* Flush chip queue */
1288 (void) emlxs_chipq_node_flush(port
, 0, nlp
, 0);
1290 } else if (action
== 3) { /* FCP2 devices */
1291 EMLXS_SET_DFC_STATE(nlp
, NODE_LIMBO
);
1295 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
1296 (void) emlxs_rpi_pause_notify(port
,
1300 #ifdef DHCHAP_SUPPORT
1301 emlxs_dhc_auth_stop(port
, nlp
);
1302 #endif /* DHCHAP_SUPPORT */
1305 * Close the node for any further normal IO
1306 * An ADISC or a PLOGI with reopen the node
1308 emlxs_node_close(port
, nlp
,
1309 hba
->channel_fcp
, -1);
1310 emlxs_node_close(port
, nlp
, hba
->channel_ip
,
1311 ((linkdown
) ? 0 : 60));
1313 /* Flush tx queues except for FCP ring */
1314 (void) emlxs_tx_node_flush(port
, nlp
,
1315 &hba
->chan
[hba
->channel_ct
], 0, 0);
1316 (void) emlxs_tx_node_flush(port
, nlp
,
1317 &hba
->chan
[hba
->channel_els
], 0, 0);
1318 (void) emlxs_tx_node_flush(port
, nlp
,
1319 &hba
->chan
[hba
->channel_ip
], 0, 0);
1321 /* Flush chip queues except for FCP ring */
1322 (void) emlxs_chipq_node_flush(port
,
1323 &hba
->chan
[hba
->channel_ct
], nlp
, 0);
1324 (void) emlxs_chipq_node_flush(port
,
1325 &hba
->chan
[hba
->channel_els
], nlp
, 0);
1326 (void) emlxs_chipq_node_flush(port
,
1327 &hba
->chan
[hba
->channel_ip
], nlp
, 0);
1332 case 2: /* Full support - Hold FCP IO to FCP target matching nodes */
1334 if (!linkdown
&& !vlinkdown
) {
1340 * We need to hold the locks this way because
1341 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1342 * same locks. Also, when we release the lock the list
1343 * can change out from under us.
1345 rw_enter(&port
->node_rwlock
, RW_READER
);
1347 for (i
= 0; i
< EMLXS_NUM_HASH_QUES
; i
++) {
1348 nlp
= port
->node_table
[i
];
1349 while (nlp
!= NULL
) {
1350 if (!nlp
->nlp_tag
) {
1351 nlp
= nlp
->nlp_list_next
;
1357 * Check for FCP target device that
1360 if ((nlp
-> nlp_fcp_info
&
1361 NLP_FCP_TGT_DEVICE
) &&
1362 (nlp
->nlp_DID
& mask
) ==
1369 * Check for any other device that
1372 else if ((nlp
->nlp_DID
& mask
) ==
1377 } else { /* Must be an RSCN */
1384 nlp
= nlp
->nlp_list_next
;
1390 rw_exit(&port
->node_rwlock
);
1392 /* Check if nothing was found */
1395 } else if (action
== 1) {
1396 (void) EMLXS_SLI_UNREG_NODE(port
, nlp
,
1398 } else if (action
== 2) {
1399 EMLXS_SET_DFC_STATE(nlp
, NODE_LIMBO
);
1402 * Close the node for any further normal IO
1403 * A PLOGI with reopen the node
1405 emlxs_node_close(port
, nlp
,
1406 hba
->channel_fcp
, 60);
1407 emlxs_node_close(port
, nlp
,
1408 hba
->channel_ip
, 60);
1410 /* Flush tx queue */
1411 (void) emlxs_tx_node_flush(port
, nlp
, 0, 0, 0);
1413 /* Flush chip queue */
1414 (void) emlxs_chipq_node_flush(port
, 0, nlp
, 0);
1416 } else if (action
== 3) { /* FCP2 devices */
1417 EMLXS_SET_DFC_STATE(nlp
, NODE_LIMBO
);
1421 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
1422 (void) emlxs_rpi_pause_notify(port
,
1427 * Close the node for any further normal IO
1428 * An ADISC or a PLOGI with reopen the node
1430 emlxs_node_close(port
, nlp
,
1431 hba
->channel_fcp
, -1);
1432 emlxs_node_close(port
, nlp
, hba
->channel_ip
,
1433 ((linkdown
) ? 0 : 60));
1435 /* Flush tx queues except for FCP ring */
1436 (void) emlxs_tx_node_flush(port
, nlp
,
1437 &hba
->chan
[hba
->channel_ct
], 0, 0);
1438 (void) emlxs_tx_node_flush(port
, nlp
,
1439 &hba
->chan
[hba
->channel_els
], 0, 0);
1440 (void) emlxs_tx_node_flush(port
, nlp
,
1441 &hba
->chan
[hba
->channel_ip
], 0, 0);
1443 /* Flush chip queues except for FCP ring */
1444 (void) emlxs_chipq_node_flush(port
,
1445 &hba
->chan
[hba
->channel_ct
], nlp
, 0);
1446 (void) emlxs_chipq_node_flush(port
,
1447 &hba
->chan
[hba
->channel_els
], nlp
, 0);
1448 (void) emlxs_chipq_node_flush(port
,
1449 &hba
->chan
[hba
->channel_ip
], nlp
, 0);
1460 (void) emlxs_mb_unreg_vpi(port
);
1465 } /* emlxs_port_offline() */
1469 emlxs_port_online(emlxs_port_t
*vport
)
1471 emlxs_hba_t
*hba
= vport
->hba
;
1472 emlxs_port_t
*port
= &PPORT
;
1476 uint32_t npiv_linkup
;
1482 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1483 * "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1486 if ((vport
->vpi
> 0) &&
1487 (!(hba
->flag
& FC_NPIV_ENABLED
) ||
1488 !(hba
->flag
& FC_NPIV_SUPPORTED
))) {
1492 if (!(vport
->flag
& EMLXS_PORT_BOUND
) ||
1493 !(vport
->flag
& EMLXS_PORT_ENABLED
)) {
1497 /* Check for mode */
1498 if (port
->mode
== MODE_TARGET
) {
1499 (void) strlcpy(mode
, ", target", sizeof (mode
));
1501 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
1502 /* Set the node tags */
1503 emlxs_fcp_tag_nodes(vport
);
1504 while ((nlp
= emlxs_find_tagged_node(vport
))) {
1505 /* The RPI was paused in port_offline */
1506 (void) emlxs_rpi_resume_notify(vport
,
1510 } else if (port
->mode
== MODE_INITIATOR
) {
1511 (void) strlcpy(mode
, ", initiator", sizeof (mode
));
1513 (void) strlcpy(mode
, "unknown", sizeof (mode
));
1515 mutex_enter(&EMLXS_PORT_LOCK
);
1517 /* Check for loop topology */
1518 if (hba
->topology
== TOPOLOGY_LOOP
) {
1519 state
= FC_STATE_LOOP
;
1520 (void) strlcpy(topology
, ", loop", sizeof (topology
));
1522 state
= FC_STATE_ONLINE
;
1523 (void) strlcpy(topology
, ", fabric", sizeof (topology
));
1526 /* Set the link speed */
1527 switch (hba
->linkspeed
) {
1529 (void) strlcpy(linkspeed
, "Gb", sizeof (linkspeed
));
1530 state
|= FC_STATE_1GBIT_SPEED
;
1534 (void) strlcpy(linkspeed
, "1Gb", sizeof (linkspeed
));
1535 state
|= FC_STATE_1GBIT_SPEED
;
1538 (void) strlcpy(linkspeed
, "2Gb", sizeof (linkspeed
));
1539 state
|= FC_STATE_2GBIT_SPEED
;
1542 (void) strlcpy(linkspeed
, "4Gb", sizeof (linkspeed
));
1543 state
|= FC_STATE_4GBIT_SPEED
;
1546 (void) strlcpy(linkspeed
, "8Gb", sizeof (linkspeed
));
1547 state
|= FC_STATE_8GBIT_SPEED
;
1550 (void) strlcpy(linkspeed
, "10Gb", sizeof (linkspeed
));
1551 state
|= FC_STATE_10GBIT_SPEED
;
1554 (void) strlcpy(linkspeed
, "16Gb", sizeof (linkspeed
));
1555 state
|= FC_STATE_16GBIT_SPEED
;
1558 (void) snprintf(linkspeed
, sizeof (linkspeed
), "unknown(0x%x)",
1566 if ((hba
->state
>= FC_LINK_UP
) &&
1567 !(hba
->flag
& FC_LOOPBACK_MODE
) && (vport
->ulp_statec
!= state
)) {
1569 vport
->ulp_statec
= state
;
1571 if ((vport
->vpi
> 0) && !(hba
->flag
& FC_NPIV_LINKUP
)) {
1572 hba
->flag
|= FC_NPIV_LINKUP
;
1577 mutex_exit(&EMLXS_PORT_LOCK
);
1580 if (vport
->flag
& EMLXS_PORT_BOUND
) {
1581 if (vport
->vpi
== 0) {
1582 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_link_up_msg
,
1583 "%s%s%s", linkspeed
, topology
, mode
);
1585 } else if (npiv_linkup
) {
1586 EMLXS_MSGF(EMLXS_CONTEXT
,
1587 &emlxs_npiv_link_up_msg
, "%s%s%s",
1588 linkspeed
, topology
, mode
);
1591 if (vport
->mode
== MODE_INITIATOR
) {
1592 emlxs_fca_link_up(vport
);
1595 else if (vport
->mode
== MODE_TARGET
) {
1596 emlxs_fct_link_up(vport
);
1598 #endif /* SFCT_SUPPORT */
1600 if (vport
->vpi
== 0) {
1601 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_link_up_msg
,
1602 "%s%s%s *", linkspeed
, topology
, mode
);
1604 } else if (npiv_linkup
) {
1605 EMLXS_MSGF(EMLXS_CONTEXT
,
1606 &emlxs_npiv_link_up_msg
, "%s%s%s *",
1607 linkspeed
, topology
, mode
);
1611 /* Check for waiting threads */
1612 if (vport
->vpi
== 0) {
1613 mutex_enter(&EMLXS_LINKUP_LOCK
);
1614 if (hba
->linkup_wait_flag
== TRUE
) {
1615 hba
->linkup_wait_flag
= FALSE
;
1616 cv_broadcast(&EMLXS_LINKUP_CV
);
1618 mutex_exit(&EMLXS_LINKUP_LOCK
);
1621 /* Flush any pending ub buffers */
1622 emlxs_ub_flush(vport
);
1627 } /* emlxs_port_online() */
1632 emlxs_linkdown(emlxs_hba_t
*hba
)
1634 emlxs_port_t
*port
= &PPORT
;
1638 mutex_enter(&EMLXS_PORT_LOCK
);
1640 if (hba
->state
> FC_LINK_DOWN
) {
1641 HBASTATS
.LinkDown
++;
1642 EMLXS_STATE_CHANGE_LOCKED(hba
, FC_LINK_DOWN
);
1646 scope
= (hba
->flag
& FC_NEW_FABRIC
)? 0xFDFFFFFF:0xFFFFFFFF;
1648 /* Filter hba flags */
1649 hba
->flag
&= FC_LINKDOWN_MASK
;
1650 hba
->discovery_timer
= 0;
1651 hba
->linkup_timer
= 0;
1653 mutex_exit(&EMLXS_PORT_LOCK
);
1655 for (i
= 0; i
< MAX_VPORTS
; i
++) {
1658 if (!(port
->flag
& EMLXS_PORT_BOUND
)) {
1662 (void) emlxs_port_offline(port
, scope
);
1666 emlxs_log_link_event(port
);
1670 } /* emlxs_linkdown() */
1675 emlxs_linkup(emlxs_hba_t
*hba
)
1677 emlxs_port_t
*port
= &PPORT
;
1678 emlxs_config_t
*cfg
= &CFG
;
1680 mutex_enter(&EMLXS_PORT_LOCK
);
1682 /* Check for any mode changes */
1683 emlxs_mode_set(hba
);
1686 EMLXS_STATE_CHANGE_LOCKED(hba
, FC_LINK_UP
);
1688 #ifdef MENLO_SUPPORT
1689 if (hba
->flag
& FC_MENLO_MODE
) {
1690 mutex_exit(&EMLXS_PORT_LOCK
);
1693 * Trigger linkup CV and don't start linkup & discovery
1696 mutex_enter(&EMLXS_LINKUP_LOCK
);
1697 cv_broadcast(&EMLXS_LINKUP_CV
);
1698 mutex_exit(&EMLXS_LINKUP_LOCK
);
1700 emlxs_log_link_event(port
);
1704 #endif /* MENLO_SUPPORT */
1706 /* Set the linkup & discovery timers */
1707 hba
->linkup_timer
= hba
->timer_tics
+ cfg
[CFG_LINKUP_TIMEOUT
].current
;
1708 hba
->discovery_timer
=
1709 hba
->timer_tics
+ cfg
[CFG_LINKUP_TIMEOUT
].current
+
1710 cfg
[CFG_DISC_TIMEOUT
].current
;
1712 mutex_exit(&EMLXS_PORT_LOCK
);
1714 emlxs_log_link_event(port
);
1718 } /* emlxs_linkup() */
1725 * Called to reset the link with an init_link
1731 emlxs_reset_link(emlxs_hba_t
*hba
, uint32_t linkup
, uint32_t wait
)
1733 emlxs_port_t
*port
= &PPORT
;
1734 emlxs_config_t
*cfg
;
1735 MAILBOXQ
*mbq
= NULL
;
1742 * Get a buffer to use for the mailbox command
1744 if ((mbq
= (MAILBOXQ
*)emlxs_mem_get(hba
, MEM_MBOX
))
1746 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_link_reset_failed_msg
,
1747 "Unable to allocate mailbox buffer.");
1749 goto reset_link_fail
;
1753 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_link_reset_msg
,
1754 "Resetting link...");
1756 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_link_reset_msg
,
1757 "Disabling link...");
1760 mb
= (MAILBOX
*)mbq
;
1762 /* Bring link down first */
1763 emlxs_mb_down_link(hba
, mbq
);
1765 #define MBXERR_LINK_DOWN 0x33
1772 rc
= EMLXS_SLI_ISSUE_MBOX_CMD(hba
, mbq
, wait
, 0);
1773 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
) &&
1774 (rc
!= MBXERR_LINK_DOWN
)) {
1776 goto reset_link_fail
;
1781 delay(drv_usectohz(500000));
1787 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_link_reset_msg
,
1788 "Linkdown timeout.");
1790 goto reset_link_fail
;
1792 } while ((hba
->state
>= FC_LINK_UP
) && (hba
->state
!= FC_ERROR
));
1796 * Setup and issue mailbox INITIALIZE LINK command
1799 if (wait
== MBX_NOWAIT
) {
1800 if ((mbq
= (MAILBOXQ
*)emlxs_mem_get(hba
, MEM_MBOX
))
1802 EMLXS_MSGF(EMLXS_CONTEXT
,
1803 &emlxs_link_reset_failed_msg
,
1804 "Unable to allocate mailbox buffer.");
1806 goto reset_link_fail
;
1808 mb
= (MAILBOX
*)mbq
;
1810 /* Reuse mbq from previous mbox */
1811 mb
= (MAILBOX
*)mbq
;
1815 emlxs_mb_init_link(hba
, mbq
,
1816 cfg
[CFG_TOPOLOGY
].current
, cfg
[CFG_LINK_SPEED
].current
);
1818 mb
->un
.varInitLnk
.lipsr_AL_PA
= 0;
1820 /* Clear the loopback mode */
1821 mutex_enter(&EMLXS_PORT_LOCK
);
1822 hba
->flag
&= ~FC_LOOPBACK_MODE
;
1823 hba
->loopback_tics
= 0;
1824 mutex_exit(&EMLXS_PORT_LOCK
);
1826 rc
= EMLXS_SLI_ISSUE_MBOX_CMD(hba
, mbq
, wait
, 0);
1827 if ((rc
!= MBX_BUSY
) && (rc
!= MBX_SUCCESS
)) {
1829 goto reset_link_fail
;
1832 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_link_reset_msg
, NULL
);
1837 if ((wait
== MBX_WAIT
) && mbq
) {
1838 emlxs_mem_put(hba
, MEM_MBOX
, (void *)mbq
);
1842 } /* emlxs_reset_link() */
1846 emlxs_online(emlxs_hba_t
*hba
)
1848 emlxs_port_t
*port
= &PPORT
;
1852 /* Make sure adapter is offline or exit trying (30 seconds) */
1854 /* Check if adapter is already going online */
1855 if (hba
->flag
& (FC_ONLINE_MODE
| FC_ONLINING_MODE
)) {
1859 mutex_enter(&EMLXS_PORT_LOCK
);
1862 if (hba
->flag
& (FC_ONLINE_MODE
| FC_ONLINING_MODE
)) {
1863 mutex_exit(&EMLXS_PORT_LOCK
);
1867 /* Check if adapter is offline */
1868 if (hba
->flag
& FC_OFFLINE_MODE
) {
1869 /* Mark it going online */
1870 hba
->flag
&= ~FC_OFFLINE_MODE
;
1871 hba
->flag
|= FC_ONLINING_MODE
;
1873 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1874 mutex_exit(&EMLXS_PORT_LOCK
);
1878 mutex_exit(&EMLXS_PORT_LOCK
);
1883 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_adapter_trans_msg
,
1886 if (rval
= EMLXS_SLI_ONLINE(hba
)) {
1887 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_init_failed_msg
, "status=%x",
1889 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_offline_msg
, NULL
);
1891 /* Set FC_OFFLINE_MODE */
1892 mutex_enter(&EMLXS_PORT_LOCK
);
1893 hba
->flag
|= FC_OFFLINE_MODE
;
1894 hba
->flag
&= ~FC_ONLINING_MODE
;
1895 mutex_exit(&EMLXS_PORT_LOCK
);
1900 /* Start the timer */
1901 emlxs_timer_start(hba
);
1903 /* Set FC_ONLINE_MODE */
1904 mutex_enter(&EMLXS_PORT_LOCK
);
1905 hba
->flag
|= FC_ONLINE_MODE
;
1906 hba
->flag
&= ~FC_ONLINING_MODE
;
1907 mutex_exit(&EMLXS_PORT_LOCK
);
1909 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_online_msg
, NULL
);
1912 if (port
->flag
& EMLXS_TGT_ENABLED
) {
1913 (void) emlxs_fct_port_initialize(port
);
1915 #endif /* SFCT_SUPPORT */
1919 } /* emlxs_online() */
1923 emlxs_offline(emlxs_hba_t
*hba
, uint32_t reset_requested
)
1925 emlxs_port_t
*port
= &PPORT
;
1929 /* Make sure adapter is online or exit trying (30 seconds) */
1931 /* Check if adapter is already going offline */
1932 if (hba
->flag
& (FC_OFFLINE_MODE
| FC_OFFLINING_MODE
)) {
1936 mutex_enter(&EMLXS_PORT_LOCK
);
1939 if (hba
->flag
& (FC_OFFLINE_MODE
| FC_OFFLINING_MODE
)) {
1940 mutex_exit(&EMLXS_PORT_LOCK
);
1944 /* Check if adapter is online */
1945 if (hba
->flag
& FC_ONLINE_MODE
) {
1946 /* Mark it going offline */
1947 hba
->flag
&= ~FC_ONLINE_MODE
;
1948 hba
->flag
|= FC_OFFLINING_MODE
;
1950 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1951 mutex_exit(&EMLXS_PORT_LOCK
);
1955 mutex_exit(&EMLXS_PORT_LOCK
);
1960 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_adapter_trans_msg
,
1961 "Going offline...");
1963 /* Declare link down */
1964 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
1965 (void) emlxs_fcf_shutdown_notify(port
, 1);
1967 emlxs_linkdown(hba
);
1971 if (port
->flag
& EMLXS_TGT_ENABLED
) {
1972 (void) emlxs_fct_port_shutdown(port
);
1974 #endif /* SFCT_SUPPORT */
1976 /* Check if adapter was shutdown */
1977 if (hba
->flag
& FC_HARDWARE_ERROR
) {
1979 * Force mailbox cleanup
1980 * This will wake any sleeping or polling threads
1982 emlxs_mb_fini(hba
, NULL
, MBX_HARDWARE_ERROR
);
1985 /* Pause here for the IO to settle */
1986 ddi_sleep(1); /* 1 sec */
1988 /* Unregister all nodes */
1989 emlxs_ffcleanup(hba
);
1991 if (hba
->bus_type
== SBUS_FC
) {
1992 WRITE_SBUS_CSR_REG(hba
, FC_SHS_REG(hba
), 0x9A);
1994 /* Access handle validation */
1995 EMLXS_CHK_ACC_HANDLE(hba
, hba
->sli
.sli3
.sbus_csr_handle
);
1996 #endif /* FMA_SUPPORT */
1999 /* Stop the timer */
2000 emlxs_timer_stop(hba
);
2002 /* For safety flush every iotag list */
2003 if (emlxs_iotag_flush(hba
)) {
2004 /* Pause here for the IO to flush */
2008 /* Wait for poll command request to settle */
2009 while (hba
->io_poll_count
> 0) {
2010 delay(drv_usectohz(2000000)); /* 2 sec */
2013 /* Shutdown the adapter interface */
2014 EMLXS_SLI_OFFLINE(hba
, reset_requested
);
2016 mutex_enter(&EMLXS_PORT_LOCK
);
2017 hba
->flag
|= FC_OFFLINE_MODE
;
2018 hba
->flag
&= ~FC_OFFLINING_MODE
;
2019 mutex_exit(&EMLXS_PORT_LOCK
);
2023 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_offline_msg
, NULL
);
2029 } /* emlxs_offline() */
2034 emlxs_power_down(emlxs_hba_t
*hba
)
2037 emlxs_port_t
*port
= &PPORT
;
2038 #endif /* FMA_SUPPORT */
2041 if ((rval
= emlxs_offline(hba
, 0))) {
2044 EMLXS_SLI_HBA_RESET(hba
, 1, 1, 0);
2048 if (emlxs_fm_check_acc_handle(hba
, hba
->pci_acc_handle
)
2050 EMLXS_MSGF(EMLXS_CONTEXT
,
2051 &emlxs_invalid_access_handle_msg
, NULL
);
2054 #endif /* FMA_SUPPORT */
2058 } /* End emlxs_power_down */
2062 emlxs_power_up(emlxs_hba_t
*hba
)
2065 emlxs_port_t
*port
= &PPORT
;
2066 #endif /* FMA_SUPPORT */
2071 if (emlxs_fm_check_acc_handle(hba
, hba
->pci_acc_handle
)
2073 EMLXS_MSGF(EMLXS_CONTEXT
,
2074 &emlxs_invalid_access_handle_msg
, NULL
);
2077 #endif /* FMA_SUPPORT */
2079 /* Bring adapter online */
2080 if ((rval
= emlxs_online(hba
))) {
2081 if (hba
->pci_cap_offset
[PCI_CAP_ID_PM
]) {
2082 /* Put chip in D3 state */
2083 (void) ddi_put8(hba
->pci_acc_handle
,
2084 (uint8_t *)(hba
->pci_addr
+
2085 hba
->pci_cap_offset
[PCI_CAP_ID_PM
] +
2087 (uint8_t)PCI_PMCSR_D3HOT
);
2094 } /* emlxs_power_up() */
2099 * NAME: emlxs_ffcleanup
2101 * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
2103 * EXECUTION ENVIRONMENT: process only
2105 * CALLED FROM: CFG_TERM
2107 * INPUT: hba - pointer to the dev_ctl area.
2112 emlxs_ffcleanup(emlxs_hba_t
*hba
)
2114 emlxs_port_t
*port
= &PPORT
;
2117 /* Disable all but the mailbox interrupt */
2118 EMLXS_SLI_DISABLE_INTR(hba
, HC_MBINT_ENA
);
2120 /* Make sure all port nodes are destroyed */
2121 for (i
= 0; i
< MAX_VPORTS
; i
++) {
2124 if (port
->node_count
) {
2125 (void) EMLXS_SLI_UNREG_NODE(port
, 0, 0, 0, 0);
2129 /* Clear all interrupt enable conditions */
2130 EMLXS_SLI_DISABLE_INTR(hba
, 0);
2134 } /* emlxs_ffcleanup() */
2138 emlxs_register_pkt(CHANNEL
*cp
, emlxs_buf_t
*sbp
)
2147 mutex_enter(&EMLXS_FCTAB_LOCK
);
2149 if (sbp
->iotag
!= 0) {
2152 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_detail_msg
,
2153 "Pkt already registered! channel=%d iotag=%d sbp=%p",
2154 sbp
->channel
, sbp
->iotag
, sbp
);
2158 for (i
= 0; i
< hba
->max_iotag
; i
++) {
2159 if (!hba
->fc_iotag
|| hba
->fc_iotag
>= hba
->max_iotag
) {
2162 iotag
= hba
->fc_iotag
++;
2164 if (hba
->fc_table
[iotag
] == 0 ||
2165 hba
->fc_table
[iotag
] == STALE_PACKET
) {
2167 hba
->fc_table
[iotag
] = sbp
;
2177 mutex_exit(&EMLXS_FCTAB_LOCK
);
2180 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2181 * "register_pkt: channel=%d iotag=%d sbp=%p",
2182 * cp->channelno, iotag, sbp);
2187 } /* emlxs_register_pkt() */
2191 extern emlxs_buf_t
*
2192 emlxs_unregister_pkt(CHANNEL
*cp
, uint16_t iotag
, uint32_t forced
)
2200 /* Check the iotag range */
2201 if ((iotag
== 0) || (iotag
>= hba
->max_iotag
)) {
2205 /* Remove the sbp from the table */
2206 mutex_enter(&EMLXS_FCTAB_LOCK
);
2207 sbp
= hba
->fc_table
[iotag
];
2209 if (!sbp
|| (sbp
== STALE_PACKET
)) {
2210 mutex_exit(&EMLXS_FCTAB_LOCK
);
2214 hba
->fc_table
[iotag
] = ((forced
) ? STALE_PACKET
: NULL
);
2218 mutex_exit(&EMLXS_FCTAB_LOCK
);
2221 /* Clean up the sbp */
2222 mutex_enter(&sbp
->mtx
);
2224 if (sbp
->pkt_flags
& PACKET_IN_TXQ
) {
2225 sbp
->pkt_flags
&= ~PACKET_IN_TXQ
;
2226 hba
->channel_tx_count
--;
2229 if (sbp
->pkt_flags
& PACKET_IN_CHIPQ
) {
2230 sbp
->pkt_flags
&= ~PACKET_IN_CHIPQ
;
2234 emlxs_mem_put(hba
, MEM_BPL
, (void *)sbp
->bmp
);
2238 mutex_exit(&sbp
->mtx
);
2242 } /* emlxs_unregister_pkt() */
2246 /* Flush all IO's to all nodes for a given IO Channel */
2248 emlxs_tx_channel_flush(emlxs_hba_t
*hba
, CHANNEL
*cp
, emlxs_buf_t
*fpkt
)
2250 emlxs_port_t
*port
= &PPORT
;
2261 uint8_t flag
[MAX_CHANNEL
];
2263 channelno
= cp
->channelno
;
2264 bzero((void *)&abort
, sizeof (Q
));
2265 bzero((void *)flag
, MAX_CHANNEL
* sizeof (uint8_t));
2267 mutex_enter(&EMLXS_TX_CHANNEL_LOCK
);
2269 /* While a node needs servicing */
2270 while (cp
->nodeq
.q_first
) {
2271 ndlp
= (NODELIST
*) cp
->nodeq
.q_first
;
2273 /* Check if priority queue is not empty */
2274 if (ndlp
->nlp_ptx
[channelno
].q_first
) {
2275 /* Transfer all iocb's to local queue */
2276 if (abort
.q_first
== 0) {
2278 ndlp
->nlp_ptx
[channelno
].q_first
;
2280 ((IOCBQ
*)abort
.q_last
)->next
=
2281 (IOCBQ
*)ndlp
->nlp_ptx
[channelno
].q_first
;
2283 flag
[channelno
] = 1;
2285 abort
.q_last
= ndlp
->nlp_ptx
[channelno
].q_last
;
2286 abort
.q_cnt
+= ndlp
->nlp_ptx
[channelno
].q_cnt
;
2289 /* Check if tx queue is not empty */
2290 if (ndlp
->nlp_tx
[channelno
].q_first
) {
2291 /* Transfer all iocb's to local queue */
2292 if (abort
.q_first
== 0) {
2293 abort
.q_first
= ndlp
->nlp_tx
[channelno
].q_first
;
2295 ((IOCBQ
*)abort
.q_last
)->next
=
2296 (IOCBQ
*)ndlp
->nlp_tx
[channelno
].q_first
;
2299 abort
.q_last
= ndlp
->nlp_tx
[channelno
].q_last
;
2300 abort
.q_cnt
+= ndlp
->nlp_tx
[channelno
].q_cnt
;
2303 /* Clear the queue pointers */
2304 ndlp
->nlp_ptx
[channelno
].q_first
= NULL
;
2305 ndlp
->nlp_ptx
[channelno
].q_last
= NULL
;
2306 ndlp
->nlp_ptx
[channelno
].q_cnt
= 0;
2308 ndlp
->nlp_tx
[channelno
].q_first
= NULL
;
2309 ndlp
->nlp_tx
[channelno
].q_last
= NULL
;
2310 ndlp
->nlp_tx
[channelno
].q_cnt
= 0;
2312 /* Remove node from service queue */
2314 /* If this is the last node on list */
2315 if (cp
->nodeq
.q_last
== (void *)ndlp
) {
2316 cp
->nodeq
.q_last
= NULL
;
2317 cp
->nodeq
.q_first
= NULL
;
2318 cp
->nodeq
.q_cnt
= 0;
2320 /* Remove node from head */
2321 cp
->nodeq
.q_first
= ndlp
->nlp_next
[channelno
];
2322 ((NODELIST
*)cp
->nodeq
.q_last
)->nlp_next
[channelno
] =
2328 ndlp
->nlp_next
[channelno
] = NULL
;
2331 /* First cleanup the iocb's while still holding the lock */
2332 iocbq
= (IOCBQ
*) abort
.q_first
;
2334 /* Free the IoTag and the bmp */
2335 iocb
= &iocbq
->iocb
;
2337 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
2340 emlxs_sli4_free_xri(port
, sbp
, sbp
->xrip
, 1);
2343 sbp
= emlxs_unregister_pkt((CHANNEL
*)iocbq
->channel
,
2347 if (sbp
&& (sbp
!= STALE_PACKET
)) {
2348 mutex_enter(&sbp
->mtx
);
2350 sbp
->pkt_flags
|= PACKET_IN_FLUSH
;
2352 * If the fpkt is already set, then we will leave it
2353 * alone. This ensures that this pkt is only accounted
2354 * for on one fpkt->flush_count
2356 if (!sbp
->fpkt
&& fpkt
) {
2357 mutex_enter(&fpkt
->mtx
);
2359 fpkt
->flush_count
++;
2360 mutex_exit(&fpkt
->mtx
);
2363 mutex_exit(&sbp
->mtx
);
2366 iocbq
= (IOCBQ
*)iocbq
->next
;
2367 } /* end of while */
2369 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
2371 /* Now abort the iocb's */
2372 iocbq
= (IOCBQ
*)abort
.q_first
;
2374 /* Save the next iocbq for now */
2375 next
= (IOCBQ
*)iocbq
->next
;
2377 /* Unlink this iocbq */
2381 sbp
= (emlxs_buf_t
*)iocbq
->sbp
;
2384 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_flush_msg
,
2385 "tx: sbp=%p node=%p", sbp
, sbp
->node
);
2387 if (hba
->state
>= FC_LINK_UP
) {
2388 emlxs_pkt_complete(sbp
, IOSTAT_LOCAL_REJECT
,
2389 IOERR_ABORT_REQUESTED
, 1);
2391 emlxs_pkt_complete(sbp
, IOSTAT_LOCAL_REJECT
,
2392 IOERR_LINK_DOWN
, 1);
2396 /* Free the iocb and its associated buffers */
2398 icmd
= &iocbq
->iocb
;
2401 if (icmd
->ULPCOMMAND
== CMD_QUE_RING_BUF64_CN
||
2402 icmd
->ULPCOMMAND
== CMD_QUE_RING_BUF_CN
||
2403 icmd
->ULPCOMMAND
== CMD_QUE_RING_LIST64_CN
) {
2405 (FC_ONLINE_MODE
| FC_ONLINING_MODE
)) == 0) {
2406 /* HBA is detaching or offlining */
2407 if (icmd
->ULPCOMMAND
!=
2408 CMD_QUE_RING_LIST64_CN
) {
2412 rp
= &hba
->sli
.sli3
.
2415 i
< icmd
->ULPBDECOUNT
;
2417 mp
= EMLXS_GET_VADDR(
2428 emlxs_mem_put(hba
, MEM_IOCB
,
2431 /* repost the unsolicited buffer */
2432 EMLXS_SLI_ISSUE_IOCB_CMD(hba
, cp
,
2435 } else if (icmd
->ULPCOMMAND
== CMD_CLOSE_XRI_CN
||
2436 icmd
->ULPCOMMAND
== CMD_CLOSE_XRI_CX
) {
2438 emlxs_tx_put(iocbq
, 1);
2444 } /* end of while */
2446 /* Now trigger channel service */
2447 for (channelno
= 0; channelno
< hba
->chan_count
; channelno
++) {
2448 if (!flag
[channelno
]) {
2452 EMLXS_SLI_ISSUE_IOCB_CMD(hba
, &hba
->chan
[channelno
], 0);
2455 return (abort
.q_cnt
);
2457 } /* emlxs_tx_channel_flush() */
2460 /* Flush all IO's on all or a given ring for a given node */
2462 emlxs_tx_node_flush(emlxs_port_t
*port
, NODELIST
*ndlp
, CHANNEL
*chan
,
2463 uint32_t shutdown
, emlxs_buf_t
*fpkt
)
2465 emlxs_hba_t
*hba
= HBA
;
2477 uint8_t flag
[MAX_CHANNEL
];
2479 bzero((void *)&abort
, sizeof (Q
));
2481 /* Flush all I/O's on tx queue to this target */
2482 mutex_enter(&EMLXS_TX_CHANNEL_LOCK
);
2484 if (!ndlp
->nlp_base
&& shutdown
) {
2485 ndlp
->nlp_active
= 0;
2488 for (channelno
= 0; channelno
< hba
->chan_count
; channelno
++) {
2489 cp
= &hba
->chan
[channelno
];
2491 if (chan
&& cp
!= chan
) {
2495 if (!ndlp
->nlp_base
|| shutdown
) {
2496 /* Check if priority queue is not empty */
2497 if (ndlp
->nlp_ptx
[channelno
].q_first
) {
2498 /* Transfer all iocb's to local queue */
2499 if (abort
.q_first
== 0) {
2501 ndlp
->nlp_ptx
[channelno
].q_first
;
2503 ((IOCBQ
*)(abort
.q_last
))->next
=
2504 (IOCBQ
*)ndlp
->nlp_ptx
[channelno
].
2508 flag
[channelno
] = 1;
2510 abort
.q_last
= ndlp
->nlp_ptx
[channelno
].q_last
;
2511 abort
.q_cnt
+= ndlp
->nlp_ptx
[channelno
].q_cnt
;
2515 /* Check if tx queue is not empty */
2516 if (ndlp
->nlp_tx
[channelno
].q_first
) {
2518 /* Transfer all iocb's to local queue */
2519 if (abort
.q_first
== 0) {
2520 abort
.q_first
= ndlp
->nlp_tx
[channelno
].q_first
;
2522 ((IOCBQ
*)abort
.q_last
)->next
=
2523 (IOCBQ
*)ndlp
->nlp_tx
[channelno
].q_first
;
2526 abort
.q_last
= ndlp
->nlp_tx
[channelno
].q_last
;
2527 abort
.q_cnt
+= ndlp
->nlp_tx
[channelno
].q_cnt
;
2530 /* Clear the queue pointers */
2531 ndlp
->nlp_ptx
[channelno
].q_first
= NULL
;
2532 ndlp
->nlp_ptx
[channelno
].q_last
= NULL
;
2533 ndlp
->nlp_ptx
[channelno
].q_cnt
= 0;
2535 ndlp
->nlp_tx
[channelno
].q_first
= NULL
;
2536 ndlp
->nlp_tx
[channelno
].q_last
= NULL
;
2537 ndlp
->nlp_tx
[channelno
].q_cnt
= 0;
2539 /* If this node was on the channel queue, remove it */
2540 if (ndlp
->nlp_next
[channelno
]) {
2541 /* If this is the only node on list */
2542 if (cp
->nodeq
.q_first
== (void *)ndlp
&&
2543 cp
->nodeq
.q_last
== (void *)ndlp
) {
2544 cp
->nodeq
.q_last
= NULL
;
2545 cp
->nodeq
.q_first
= NULL
;
2546 cp
->nodeq
.q_cnt
= 0;
2547 } else if (cp
->nodeq
.q_first
== (void *)ndlp
) {
2548 cp
->nodeq
.q_first
= ndlp
->nlp_next
[channelno
];
2549 ((NODELIST
*) cp
->nodeq
.q_last
)->
2550 nlp_next
[channelno
] = cp
->nodeq
.q_first
;
2554 * This is a little more difficult find the
2555 * previous node in the circular channel queue
2558 while (prev
->nlp_next
[channelno
] != ndlp
) {
2559 prev
= prev
->nlp_next
[channelno
];
2562 prev
->nlp_next
[channelno
] =
2563 ndlp
->nlp_next
[channelno
];
2565 if (cp
->nodeq
.q_last
== (void *)ndlp
) {
2566 cp
->nodeq
.q_last
= (void *)prev
;
2573 ndlp
->nlp_next
[channelno
] = NULL
;
2578 /* First cleanup the iocb's while still holding the lock */
2579 iocbq
= (IOCBQ
*) abort
.q_first
;
2581 /* Free the IoTag and the bmp */
2582 iocb
= &iocbq
->iocb
;
2584 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
2587 emlxs_sli4_free_xri(port
, sbp
, sbp
->xrip
, 1);
2590 sbp
= emlxs_unregister_pkt((CHANNEL
*)iocbq
->channel
,
2594 if (sbp
&& (sbp
!= STALE_PACKET
)) {
2595 mutex_enter(&sbp
->mtx
);
2596 sbp
->pkt_flags
|= PACKET_IN_FLUSH
;
2598 * If the fpkt is already set, then we will leave it
2599 * alone. This ensures that this pkt is only accounted
2600 * for on one fpkt->flush_count
2602 if (!sbp
->fpkt
&& fpkt
) {
2603 mutex_enter(&fpkt
->mtx
);
2605 fpkt
->flush_count
++;
2606 mutex_exit(&fpkt
->mtx
);
2609 mutex_exit(&sbp
->mtx
);
2612 iocbq
= (IOCBQ
*) iocbq
->next
;
2614 } /* end of while */
2616 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
2618 /* Now abort the iocb's outside the locks */
2619 iocbq
= (IOCBQ
*)abort
.q_first
;
2621 /* Save the next iocbq for now */
2622 next
= (IOCBQ
*)iocbq
->next
;
2624 /* Unlink this iocbq */
2628 sbp
= (emlxs_buf_t
*)iocbq
->sbp
;
2631 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_flush_msg
,
2632 "tx: sbp=%p node=%p", sbp
, sbp
->node
);
2634 if (hba
->state
>= FC_LINK_UP
) {
2635 emlxs_pkt_complete(sbp
, IOSTAT_LOCAL_REJECT
,
2636 IOERR_ABORT_REQUESTED
, 1);
2638 emlxs_pkt_complete(sbp
, IOSTAT_LOCAL_REJECT
,
2639 IOERR_LINK_DOWN
, 1);
2643 /* Free the iocb and its associated buffers */
2645 /* CMD_CLOSE_XRI_CN should also free the memory */
2646 icmd
= &iocbq
->iocb
;
2649 if (icmd
->ULPCOMMAND
== CMD_QUE_RING_BUF64_CN
||
2650 icmd
->ULPCOMMAND
== CMD_QUE_RING_BUF_CN
||
2651 icmd
->ULPCOMMAND
== CMD_QUE_RING_LIST64_CN
) {
2653 (FC_ONLINE_MODE
| FC_ONLINING_MODE
)) == 0) {
2654 /* HBA is detaching or offlining */
2655 if (icmd
->ULPCOMMAND
!=
2656 CMD_QUE_RING_LIST64_CN
) {
2662 iocbq
->channel
)->channelno
;
2663 rp
= &hba
->sli
.sli3
.ring
[ch
];
2665 i
< icmd
->ULPBDECOUNT
;
2667 mp
= EMLXS_GET_VADDR(
2678 emlxs_mem_put(hba
, MEM_IOCB
,
2681 /* repost the unsolicited buffer */
2682 EMLXS_SLI_ISSUE_IOCB_CMD(hba
,
2683 (CHANNEL
*)iocbq
->channel
, iocbq
);
2685 } else if (icmd
->ULPCOMMAND
== CMD_CLOSE_XRI_CN
||
2686 icmd
->ULPCOMMAND
== CMD_CLOSE_XRI_CX
) {
2688 * Resend the abort iocbq if any
2690 emlxs_tx_put(iocbq
, 1);
2696 } /* end of while */
2698 /* Now trigger channel service */
2699 for (channelno
= 0; channelno
< hba
->chan_count
; channelno
++) {
2700 if (!flag
[channelno
]) {
2704 EMLXS_SLI_ISSUE_IOCB_CMD(hba
, &hba
->chan
[channelno
], 0);
2707 return (abort
.q_cnt
);
2709 } /* emlxs_tx_node_flush() */
2712 /* Check for IO's on all or a given ring for a given node */
2714 emlxs_tx_node_check(emlxs_port_t
*port
, NODELIST
*ndlp
, CHANNEL
*chan
)
2716 emlxs_hba_t
*hba
= HBA
;
2723 /* Flush all I/O's on tx queue to this target */
2724 mutex_enter(&EMLXS_TX_CHANNEL_LOCK
);
2726 for (channelno
= 0; channelno
< hba
->chan_count
; channelno
++) {
2727 cp
= &hba
->chan
[channelno
];
2729 if (chan
&& cp
!= chan
) {
2733 /* Check if priority queue is not empty */
2734 if (ndlp
->nlp_ptx
[channelno
].q_first
) {
2735 count
+= ndlp
->nlp_ptx
[channelno
].q_cnt
;
2738 /* Check if tx queue is not empty */
2739 if (ndlp
->nlp_tx
[channelno
].q_first
) {
2740 count
+= ndlp
->nlp_tx
[channelno
].q_cnt
;
2745 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
2749 } /* emlxs_tx_node_check() */
2753 /* Flush all IO's on the any ring for a given node's lun */
2755 emlxs_tx_lun_flush(emlxs_port_t
*port
, NODELIST
*ndlp
, uint32_t lun
,
2758 emlxs_hba_t
*hba
= HBA
;
2769 uint8_t flag
[MAX_CHANNEL
];
2771 if (lun
== EMLXS_LUN_NONE
) {
2775 bzero((void *)&abort
, sizeof (Q
));
2777 /* Flush I/O's on txQ to this target's lun */
2778 mutex_enter(&EMLXS_TX_CHANNEL_LOCK
);
2780 for (channelno
= 0; channelno
< hba
->chan_count
; channelno
++) {
2782 /* Scan the priority queue first */
2784 iocbq
= (IOCBQ
*) ndlp
->nlp_ptx
[channelno
].q_first
;
2787 next
= (IOCBQ
*)iocbq
->next
;
2788 iocb
= &iocbq
->iocb
;
2789 sbp
= (emlxs_buf_t
*)iocbq
->sbp
;
2791 /* Check if this IO is for our lun */
2792 if (sbp
&& (sbp
->lun
== lun
)) {
2793 /* Remove iocb from the node's ptx queue */
2795 ndlp
->nlp_ptx
[channelno
].q_last
=
2800 ndlp
->nlp_ptx
[channelno
].q_first
=
2807 ndlp
->nlp_ptx
[channelno
].q_cnt
--;
2810 * Add this iocb to our local abort Q
2812 if (abort
.q_first
) {
2813 ((IOCBQ
*)abort
.q_last
)->next
= iocbq
;
2814 abort
.q_last
= (uint8_t *)iocbq
;
2817 abort
.q_first
= (uint8_t *)iocbq
;
2818 abort
.q_last
= (uint8_t *)iocbq
;
2822 flag
[channelno
] = 1;
2830 } /* while (iocbq) */
2833 /* Scan the regular queue */
2835 iocbq
= (IOCBQ
*)ndlp
->nlp_tx
[channelno
].q_first
;
2838 next
= (IOCBQ
*)iocbq
->next
;
2839 iocb
= &iocbq
->iocb
;
2840 sbp
= (emlxs_buf_t
*)iocbq
->sbp
;
2842 /* Check if this IO is for our lun */
2843 if (sbp
&& (sbp
->lun
== lun
)) {
2844 /* Remove iocb from the node's tx queue */
2846 ndlp
->nlp_tx
[channelno
].q_last
=
2851 ndlp
->nlp_tx
[channelno
].q_first
=
2858 ndlp
->nlp_tx
[channelno
].q_cnt
--;
2861 * Add this iocb to our local abort Q
2863 if (abort
.q_first
) {
2864 ((IOCBQ
*) abort
.q_last
)->next
= iocbq
;
2865 abort
.q_last
= (uint8_t *)iocbq
;
2868 abort
.q_first
= (uint8_t *)iocbq
;
2869 abort
.q_last
= (uint8_t *)iocbq
;
2879 } /* while (iocbq) */
2882 /* First cleanup the iocb's while still holding the lock */
2883 iocbq
= (IOCBQ
*)abort
.q_first
;
2885 /* Free the IoTag and the bmp */
2886 iocb
= &iocbq
->iocb
;
2888 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
2891 emlxs_sli4_free_xri(port
, sbp
, sbp
->xrip
, 1);
2894 sbp
= emlxs_unregister_pkt((CHANNEL
*)iocbq
->channel
,
2898 if (sbp
&& (sbp
!= STALE_PACKET
)) {
2899 mutex_enter(&sbp
->mtx
);
2900 sbp
->pkt_flags
|= PACKET_IN_FLUSH
;
2902 * If the fpkt is already set, then we will leave it
2903 * alone. This ensures that this pkt is only accounted
2904 * for on one fpkt->flush_count
2906 if (!sbp
->fpkt
&& fpkt
) {
2907 mutex_enter(&fpkt
->mtx
);
2909 fpkt
->flush_count
++;
2910 mutex_exit(&fpkt
->mtx
);
2913 mutex_exit(&sbp
->mtx
);
2916 iocbq
= (IOCBQ
*) iocbq
->next
;
2918 } /* end of while */
2920 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
2922 /* Now abort the iocb's outside the locks */
2923 iocbq
= (IOCBQ
*)abort
.q_first
;
2925 /* Save the next iocbq for now */
2926 next
= (IOCBQ
*)iocbq
->next
;
2928 /* Unlink this iocbq */
2932 sbp
= (emlxs_buf_t
*)iocbq
->sbp
;
2935 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_flush_msg
,
2936 "tx: sbp=%p node=%p", sbp
, sbp
->node
);
2938 if (hba
->state
>= FC_LINK_UP
) {
2939 emlxs_pkt_complete(sbp
, IOSTAT_LOCAL_REJECT
,
2940 IOERR_ABORT_REQUESTED
, 1);
2942 emlxs_pkt_complete(sbp
, IOSTAT_LOCAL_REJECT
,
2943 IOERR_LINK_DOWN
, 1);
2947 /* Free the iocb and its associated buffers */
2949 /* Should never happen! */
2950 icmd
= &iocbq
->iocb
;
2953 if (icmd
->ULPCOMMAND
== CMD_QUE_RING_BUF64_CN
||
2954 icmd
->ULPCOMMAND
== CMD_QUE_RING_BUF_CN
||
2955 icmd
->ULPCOMMAND
== CMD_QUE_RING_LIST64_CN
) {
2957 (FC_ONLINE_MODE
| FC_ONLINING_MODE
)) == 0) {
2958 /* HBA is detaching or offlining */
2959 if (icmd
->ULPCOMMAND
!=
2960 CMD_QUE_RING_LIST64_CN
) {
2966 iocbq
->channel
)->channelno
;
2967 rp
= &hba
->sli
.sli3
.ring
[ch
];
2969 i
< icmd
->ULPBDECOUNT
;
2971 mp
= EMLXS_GET_VADDR(
2982 emlxs_mem_put(hba
, MEM_IOCB
,
2985 /* repost the unsolicited buffer */
2986 EMLXS_SLI_ISSUE_IOCB_CMD(hba
,
2987 (CHANNEL
*)iocbq
->channel
, iocbq
);
2989 } else if (icmd
->ULPCOMMAND
== CMD_CLOSE_XRI_CN
||
2990 icmd
->ULPCOMMAND
== CMD_CLOSE_XRI_CX
) {
2992 * Resend the abort iocbq if any
2994 emlxs_tx_put(iocbq
, 1);
3000 } /* end of while */
3002 /* Now trigger channel service */
3003 for (channelno
= 0; channelno
< hba
->chan_count
; channelno
++) {
3004 if (!flag
[channelno
]) {
3008 EMLXS_SLI_ISSUE_IOCB_CMD(hba
, &hba
->chan
[channelno
], 0);
3011 return (abort
.q_cnt
);
3013 } /* emlxs_tx_lun_flush() */
3017 emlxs_tx_put(IOCBQ
*iocbq
, uint32_t lock
)
3026 port
= (emlxs_port_t
*)iocbq
->port
;
3028 cp
= (CHANNEL
*)iocbq
->channel
;
3029 nlp
= (NODELIST
*)iocbq
->node
;
3030 channelno
= cp
->channelno
;
3031 sbp
= (emlxs_buf_t
*)iocbq
->sbp
;
3034 /* Set node to base node by default */
3035 nlp
= &port
->node_base
;
3037 iocbq
->node
= (void *)nlp
;
3040 sbp
->node
= (void *)nlp
;
3045 mutex_enter(&EMLXS_TX_CHANNEL_LOCK
);
3048 if (!nlp
->nlp_active
|| (sbp
&& (sbp
->pkt_flags
& PACKET_IN_ABORT
))) {
3050 mutex_enter(&sbp
->mtx
);
3051 sbp
->pkt_flags
|= PACKET_IN_FLUSH
;
3052 mutex_exit(&sbp
->mtx
);
3054 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
3055 emlxs_sli4_free_xri(port
, sbp
, sbp
->xrip
, 1);
3057 (void) emlxs_unregister_pkt(cp
, sbp
->iotag
, 0);
3061 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
3064 if (hba
->state
>= FC_LINK_UP
) {
3065 emlxs_pkt_complete(sbp
, IOSTAT_LOCAL_REJECT
,
3066 IOERR_ABORT_REQUESTED
, 1);
3068 emlxs_pkt_complete(sbp
, IOSTAT_LOCAL_REJECT
,
3069 IOERR_LINK_DOWN
, 1);
3074 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
3077 emlxs_mem_put(hba
, MEM_IOCB
, (void *)iocbq
);
3085 mutex_enter(&sbp
->mtx
);
3087 if (sbp
->pkt_flags
&
3088 (PACKET_IN_COMPLETION
| PACKET_IN_CHIPQ
| PACKET_IN_TXQ
)) {
3089 mutex_exit(&sbp
->mtx
);
3091 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
3096 sbp
->pkt_flags
|= PACKET_IN_TXQ
;
3097 hba
->channel_tx_count
++;
3099 mutex_exit(&sbp
->mtx
);
3103 /* Check iocbq priority */
3104 /* Some IOCB has the high priority like reset/close xri etc */
3105 if (iocbq
->flag
& IOCB_PRIORITY
) {
3106 /* Add the iocb to the bottom of the node's ptx queue */
3107 if (nlp
->nlp_ptx
[channelno
].q_first
) {
3108 ((IOCBQ
*)nlp
->nlp_ptx
[channelno
].q_last
)->next
= iocbq
;
3109 nlp
->nlp_ptx
[channelno
].q_last
= (uint8_t *)iocbq
;
3110 nlp
->nlp_ptx
[channelno
].q_cnt
++;
3112 nlp
->nlp_ptx
[channelno
].q_first
= (uint8_t *)iocbq
;
3113 nlp
->nlp_ptx
[channelno
].q_last
= (uint8_t *)iocbq
;
3114 nlp
->nlp_ptx
[channelno
].q_cnt
= 1;
3118 } else { /* Normal priority */
3121 /* Add the iocb to the bottom of the node's tx queue */
3122 if (nlp
->nlp_tx
[channelno
].q_first
) {
3123 ((IOCBQ
*)nlp
->nlp_tx
[channelno
].q_last
)->next
= iocbq
;
3124 nlp
->nlp_tx
[channelno
].q_last
= (uint8_t *)iocbq
;
3125 nlp
->nlp_tx
[channelno
].q_cnt
++;
3127 nlp
->nlp_tx
[channelno
].q_first
= (uint8_t *)iocbq
;
3128 nlp
->nlp_tx
[channelno
].q_last
= (uint8_t *)iocbq
;
3129 nlp
->nlp_tx
[channelno
].q_cnt
= 1;
3137 * Check if the node is not already on channel queue and
3138 * (is not closed or is a priority request)
3140 if (!nlp
->nlp_next
[channelno
] &&
3141 (!(nlp
->nlp_flag
[channelno
] & NLP_CLOSED
) ||
3142 (iocbq
->flag
& IOCB_PRIORITY
))) {
3143 /* If so, then add it to the channel queue */
3144 if (cp
->nodeq
.q_first
) {
3145 ((NODELIST
*)cp
->nodeq
.q_last
)->nlp_next
[channelno
] =
3147 nlp
->nlp_next
[channelno
] = cp
->nodeq
.q_first
;
3150 * If this is not the base node then add it
3153 if (!nlp
->nlp_base
) {
3154 cp
->nodeq
.q_last
= (uint8_t *)nlp
;
3155 } else { /* Otherwise, add it to the head */
3157 /* The command node always gets priority */
3158 cp
->nodeq
.q_first
= (uint8_t *)nlp
;
3163 cp
->nodeq
.q_first
= (uint8_t *)nlp
;
3164 cp
->nodeq
.q_last
= (uint8_t *)nlp
;
3165 nlp
->nlp_next
[channelno
] = nlp
;
3166 cp
->nodeq
.q_cnt
= 1;
3170 HBASTATS
.IocbTxPut
[channelno
]++;
3172 /* Adjust the channel timeout timer */
3173 cp
->timeout
= hba
->timer_tics
+ 5;
3176 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
3181 } /* emlxs_tx_put() */
3185 emlxs_tx_get(CHANNEL
*cp
, uint32_t lock
)
3194 channelno
= cp
->channelno
;
3197 mutex_enter(&EMLXS_TX_CHANNEL_LOCK
);
3204 /* Check if a node needs servicing */
3205 if (cp
->nodeq
.q_first
) {
3206 nlp
= (NODELIST
*)cp
->nodeq
.q_first
;
3208 /* Get next iocb from node's priority queue */
3210 if (nlp
->nlp_ptx
[channelno
].q_first
) {
3211 iocbq
= (IOCBQ
*)nlp
->nlp_ptx
[channelno
].q_first
;
3213 /* Check if this is last entry */
3214 if (nlp
->nlp_ptx
[channelno
].q_last
== (void *)iocbq
) {
3215 nlp
->nlp_ptx
[channelno
].q_first
= NULL
;
3216 nlp
->nlp_ptx
[channelno
].q_last
= NULL
;
3217 nlp
->nlp_ptx
[channelno
].q_cnt
= 0;
3219 /* Remove iocb from head */
3220 nlp
->nlp_ptx
[channelno
].q_first
=
3221 (void *)iocbq
->next
;
3222 nlp
->nlp_ptx
[channelno
].q_cnt
--;
3228 /* Get next iocb from node tx queue if node not closed */
3229 else if (nlp
->nlp_tx
[channelno
].q_first
&&
3230 !(nlp
->nlp_flag
[channelno
] & NLP_CLOSED
)) {
3231 iocbq
= (IOCBQ
*)nlp
->nlp_tx
[channelno
].q_first
;
3233 /* Check if this is last entry */
3234 if (nlp
->nlp_tx
[channelno
].q_last
== (void *)iocbq
) {
3235 nlp
->nlp_tx
[channelno
].q_first
= NULL
;
3236 nlp
->nlp_tx
[channelno
].q_last
= NULL
;
3237 nlp
->nlp_tx
[channelno
].q_cnt
= 0;
3239 /* Remove iocb from head */
3240 nlp
->nlp_tx
[channelno
].q_first
=
3241 (void *)iocbq
->next
;
3242 nlp
->nlp_tx
[channelno
].q_cnt
--;
3248 /* Now deal with node itself */
3250 /* Check if node still needs servicing */
3251 if ((nlp
->nlp_ptx
[channelno
].q_first
) ||
3252 (nlp
->nlp_tx
[channelno
].q_first
&&
3253 !(nlp
->nlp_flag
[channelno
] & NLP_CLOSED
))) {
3256 * If this is the base node, then don't shift the
3257 * pointers. We want to drain the base node before
3260 if (!nlp
->nlp_base
) {
3262 * Just shift channel queue pointers to next
3265 cp
->nodeq
.q_last
= (void *)nlp
;
3266 cp
->nodeq
.q_first
= nlp
->nlp_next
[channelno
];
3269 /* Remove node from channel queue */
3271 /* If this is the last node on list */
3272 if (cp
->nodeq
.q_last
== (void *)nlp
) {
3273 cp
->nodeq
.q_last
= NULL
;
3274 cp
->nodeq
.q_first
= NULL
;
3275 cp
->nodeq
.q_cnt
= 0;
3277 /* Remove node from head */
3278 cp
->nodeq
.q_first
= nlp
->nlp_next
[channelno
];
3279 ((NODELIST
*)cp
->nodeq
.q_last
)->
3280 nlp_next
[channelno
] = cp
->nodeq
.q_first
;
3286 nlp
->nlp_next
[channelno
] = NULL
;
3290 * If no iocbq was found on this node, then it will have
3291 * been removed. So try again.
3297 sbp
= (emlxs_buf_t
*)iocbq
->sbp
;
3301 * Check flags before we enter mutex in case this
3302 * has been flushed and destroyed
3304 if ((sbp
->pkt_flags
&
3305 (PACKET_IN_COMPLETION
| PACKET_IN_CHIPQ
)) ||
3306 !(sbp
->pkt_flags
& PACKET_IN_TXQ
)) {
3310 mutex_enter(&sbp
->mtx
);
3312 if ((sbp
->pkt_flags
&
3313 (PACKET_IN_COMPLETION
| PACKET_IN_CHIPQ
)) ||
3314 !(sbp
->pkt_flags
& PACKET_IN_TXQ
)) {
3315 mutex_exit(&sbp
->mtx
);
3319 sbp
->pkt_flags
&= ~PACKET_IN_TXQ
;
3320 hba
->channel_tx_count
--;
3322 mutex_exit(&sbp
->mtx
);
3327 HBASTATS
.IocbTxGet
[channelno
]++;
3330 /* Adjust the ring timeout timer */
3331 cp
->timeout
= (cp
->nodeq
.q_first
) ? (hba
->timer_tics
+ 5) : 0;
3334 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
3339 } /* emlxs_tx_get() */
3343 * Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
3344 * The old IoTag has to be released, the new one has to be
3345 * allocated. Others no change
3346 * TX_CHANNEL lock is held
3349 emlxs_tx_move(NODELIST
*ndlp
, CHANNEL
*from_chan
, CHANNEL
*to_chan
,
3350 uint32_t cmd
, emlxs_buf_t
*fpkt
, uint32_t lock
)
3354 uint32_t fchanno
, tchanno
, i
;
3360 Q tbm
; /* To Be Moved Q */
3363 NODELIST
*nlp
= ndlp
;
3366 NODELIST
*n_prev
= NULL
;
3367 NODELIST
*n_next
= NULL
;
3370 hba
= from_chan
->hba
;
3372 cmd
= cmd
; /* To pass lint */
3374 fchanno
= from_chan
->channelno
;
3375 tchanno
= to_chan
->channelno
;
3378 mutex_enter(&EMLXS_TX_CHANNEL_LOCK
);
3381 bzero((void *)&tbm
, sizeof (Q
));
3383 /* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
3385 iocbq
= (IOCBQ
*)nlp
->nlp_tx
[fchanno
].q_first
;
3388 next
= (IOCBQ
*)iocbq
->next
;
3389 /* Check if this iocb is fcp cmd */
3390 iocb
= &iocbq
->iocb
;
3392 switch (iocb
->ULPCOMMAND
) {
3394 case CMD_FCP_ICMND_CR
:
3395 case CMD_FCP_ICMND_CX
:
3396 case CMD_FCP_IREAD_CR
:
3397 case CMD_FCP_IREAD_CX
:
3398 case CMD_FCP_IWRITE_CR
:
3399 case CMD_FCP_IWRITE_CX
:
3400 case CMD_FCP_ICMND64_CR
:
3401 case CMD_FCP_ICMND64_CX
:
3402 case CMD_FCP_IREAD64_CR
:
3403 case CMD_FCP_IREAD64_CX
:
3404 case CMD_FCP_IWRITE64_CR
:
3405 case CMD_FCP_IWRITE64_CX
:
3406 /* We found a fcp cmd */
3409 /* this is not fcp cmd continue */
3415 /* found a fcp cmd iocb in fchanno txq, now deque it */
3417 /* This is the last iocbq */
3418 nlp
->nlp_tx
[fchanno
].q_last
=
3423 /* This is the first one then remove it from head */
3424 nlp
->nlp_tx
[fchanno
].q_first
=
3431 nlp
->nlp_tx
[fchanno
].q_cnt
--;
3433 /* Add this iocb to our local toberemovedq */
3434 /* This way we donot hold the TX_CHANNEL lock too long */
3437 ((IOCBQ
*)tbm
.q_last
)->next
= iocbq
;
3438 tbm
.q_last
= (uint8_t *)iocbq
;
3441 tbm
.q_first
= (uint8_t *)iocbq
;
3442 tbm
.q_last
= (uint8_t *)iocbq
;
3448 } /* While (iocbq) */
3450 if ((tchanno
== hba
->channel_fcp
) && (tbm
.q_cnt
!= 0)) {
3452 /* from_chan->nodeq.q_first must be non NULL */
3453 if (from_chan
->nodeq
.q_first
) {
3455 /* nodeq is not empty, now deal with the node itself */
3456 if ((nlp
->nlp_tx
[fchanno
].q_first
)) {
3458 if (!nlp
->nlp_base
) {
3459 from_chan
->nodeq
.q_last
=
3461 from_chan
->nodeq
.q_first
=
3462 nlp
->nlp_next
[fchanno
];
3466 n_prev
= (NODELIST
*)from_chan
->nodeq
.q_first
;
3467 count
= from_chan
->nodeq
.q_cnt
;
3469 if (n_prev
== nlp
) {
3471 /* If this is the only node on list */
3472 if (from_chan
->nodeq
.q_last
==
3474 from_chan
->nodeq
.q_last
=
3476 from_chan
->nodeq
.q_first
=
3478 from_chan
->nodeq
.q_cnt
= 0;
3480 from_chan
->nodeq
.q_first
=
3481 nlp
->nlp_next
[fchanno
];
3482 ((NODELIST
*)from_chan
->
3485 from_chan
->nodeq
.q_first
;
3486 from_chan
->nodeq
.q_cnt
--;
3489 nlp
->nlp_next
[fchanno
] = NULL
;
3494 n_prev
->nlp_next
[fchanno
];
3495 if (n_next
== nlp
) {
3504 (NODELIST
*)from_chan
->
3514 from_chan
->nodeq
.q_last
3515 = (uint8_t *)n_prev
;
3524 from_chan
->nodeq
.q_cnt
--;
3526 nlp
->nlp_next
[fchanno
] =
3534 /* Now cleanup the iocb's */
3536 iocbq
= (IOCBQ
*)tbm
.q_first
;
3540 next
= (IOCBQ
*)iocbq
->next
;
3542 /* Free the IoTag and the bmp */
3543 iocb
= &iocbq
->iocb
;
3545 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
3548 emlxs_sli4_free_xri(port
, sbp
, sbp
->xrip
, 1);
3551 sbp
= emlxs_unregister_pkt((CHANNEL
*)iocbq
->channel
,
3555 if (sbp
&& (sbp
!= STALE_PACKET
)) {
3556 mutex_enter(&sbp
->mtx
);
3557 sbp
->pkt_flags
|= PACKET_IN_FLUSH
;
3560 * If the fpkt is already set, then we will leave it
3561 * alone. This ensures that this pkt is only accounted
3562 * for on one fpkt->flush_count
3564 if (!sbp
->fpkt
&& fpkt
) {
3565 mutex_enter(&fpkt
->mtx
);
3567 fpkt
->flush_count
++;
3568 mutex_exit(&fpkt
->mtx
);
3570 mutex_exit(&sbp
->mtx
);
3574 } /* end of while */
3576 iocbq
= (IOCBQ
*)tbm
.q_first
;
3578 /* Save the next iocbq for now */
3579 next
= (IOCBQ
*)iocbq
->next
;
3581 /* Unlink this iocbq */
3585 sbp
= (emlxs_buf_t
*)iocbq
->sbp
;
3588 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_pkt_flush_msg
,
3589 "tx: sbp=%p node=%p", sbp
, sbp
->node
);
3591 if (hba
->state
>= FC_LINK_UP
) {
3592 emlxs_pkt_complete(sbp
, IOSTAT_LOCAL_REJECT
,
3593 IOERR_ABORT_REQUESTED
, 1);
3595 emlxs_pkt_complete(sbp
, IOSTAT_LOCAL_REJECT
,
3596 IOERR_LINK_DOWN
, 1);
3600 /* Free the iocb and its associated buffers */
3602 icmd
= &iocbq
->iocb
;
3605 if (icmd
->ULPCOMMAND
== CMD_QUE_RING_BUF64_CN
||
3606 icmd
->ULPCOMMAND
== CMD_QUE_RING_BUF_CN
||
3607 icmd
->ULPCOMMAND
== CMD_QUE_RING_LIST64_CN
) {
3609 (FC_ONLINE_MODE
| FC_ONLINING_MODE
)) == 0) {
3610 /* HBA is detaching or offlining */
3611 if (icmd
->ULPCOMMAND
!=
3612 CMD_QUE_RING_LIST64_CN
) {
3617 ch
= from_chan
->channelno
;
3618 rp
= &hba
->sli
.sli3
.ring
[ch
];
3621 i
< icmd
->ULPBDECOUNT
;
3623 mp
= EMLXS_GET_VADDR(
3637 emlxs_mem_put(hba
, MEM_IOCB
,
3640 /* repost the unsolicited buffer */
3641 EMLXS_SLI_ISSUE_IOCB_CMD(hba
,
3649 } /* end of while */
3651 /* Now flush the chipq if any */
3652 if (!(nlp
->nlp_flag
[fchanno
] & NLP_CLOSED
)) {
3654 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
3656 (void) emlxs_chipq_node_flush(port
, from_chan
, nlp
, 0);
3658 mutex_enter(&EMLXS_TX_CHANNEL_LOCK
);
3662 mutex_exit(&EMLXS_TX_CHANNEL_LOCK
);
3667 } /* emlxs_tx_move */
3671 emlxs_chipq_node_flush(emlxs_port_t
*port
, CHANNEL
*chan
, NODELIST
*ndlp
,
3674 emlxs_hba_t
*hba
= HBA
;
3681 uint8_t flag
[MAX_CHANNEL
];
3684 bzero((void *)&abort
, sizeof (Q
));
3685 bzero((void *)flag
, sizeof (flag
));
3687 for (channelno
= 0; channelno
< hba
->chan_count
; channelno
++) {
3688 cp
= &hba
->chan
[channelno
];
3690 if (chan
&& cp
!= chan
) {
3694 mutex_enter(&EMLXS_FCTAB_LOCK
);
3696 for (iotag
= 1; iotag
< hba
->max_iotag
; iotag
++) {
3697 sbp
= hba
->fc_table
[iotag
];
3699 if (sbp
&& (sbp
!= STALE_PACKET
) &&
3700 (sbp
->pkt_flags
& PACKET_IN_CHIPQ
) &&
3701 (sbp
->node
== ndlp
) &&
3702 (sbp
->channel
== cp
) &&
3703 !(sbp
->pkt_flags
& PACKET_XRI_CLOSED
)) {
3704 emlxs_sbp_abort_add(port
, sbp
, &abort
, flag
,
3709 mutex_exit(&EMLXS_FCTAB_LOCK
);
3713 /* Now put the iocb's on the tx queue */
3714 iocbq
= (IOCBQ
*)abort
.q_first
;
3716 /* Save the next iocbq for now */
3717 next
= (IOCBQ
*)iocbq
->next
;
3719 /* Unlink this iocbq */
3722 /* Send this iocbq */
3723 emlxs_tx_put(iocbq
, 1);
3728 /* Now trigger channel service */
3729 for (channelno
= 0; channelno
< hba
->chan_count
; channelno
++) {
3730 if (!flag
[channelno
]) {
3734 EMLXS_SLI_ISSUE_IOCB_CMD(hba
, &hba
->chan
[channelno
], 0);
3737 return (abort
.q_cnt
);
3739 } /* emlxs_chipq_node_flush() */
3742 /* Flush all IO's left on all iotag lists */
3744 emlxs_iotag_flush(emlxs_hba_t
*hba
)
3746 emlxs_port_t
*port
= &PPORT
;
3757 for (channelno
= 0; channelno
< hba
->chan_count
; channelno
++) {
3758 cp
= &hba
->chan
[channelno
];
3760 bzero((void *)&abort
, sizeof (Q
));
3762 mutex_enter(&EMLXS_FCTAB_LOCK
);
3764 for (iotag
= 1; iotag
< hba
->max_iotag
; iotag
++) {
3765 sbp
= hba
->fc_table
[iotag
];
3767 /* Check if the slot is empty */
3768 if (!sbp
|| (sbp
== STALE_PACKET
)) {
3772 /* We are building an abort list per channel */
3773 if (sbp
->channel
!= cp
) {
3777 hba
->fc_table
[iotag
] = STALE_PACKET
;
3780 /* Check if IO is valid */
3781 if (!(sbp
->pkt_flags
& PACKET_VALID
) ||
3782 (sbp
->pkt_flags
& (PACKET_ULP_OWNED
|
3783 PACKET_COMPLETED
|PACKET_IN_COMPLETION
))) {
3784 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_debug_msg
,
3785 "iotag_flush: Invalid IO found. iotag=%d",
3793 /* Set IOCB status */
3794 iocbq
= &sbp
->iocbq
;
3795 iocb
= &iocbq
->iocb
;
3797 iocb
->ULPSTATUS
= IOSTAT_LOCAL_REJECT
;
3798 iocb
->un
.grsp
.perr
.statLocalError
= IOERR_LINK_DOWN
;
3802 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
3804 EMLXS_MSGF(EMLXS_CONTEXT
,
3805 &emlxs_sli_debug_msg
,
3806 "iotag_flush: iotag=%d sbp=%p "
3807 "xrip=%p state=%x flag=%x",
3808 iotag
, sbp
, sbp
->xrip
,
3809 sbp
->xrip
->state
, sbp
->xrip
->flag
);
3811 EMLXS_MSGF(EMLXS_CONTEXT
,
3812 &emlxs_sli_debug_msg
,
3813 "iotag_flush: iotag=%d sbp=%p "
3814 "xrip=NULL", iotag
, sbp
);
3817 emlxs_sli4_free_xri(port
, sbp
, sbp
->xrip
, 0);
3819 /* Clean up the sbp */
3820 mutex_enter(&sbp
->mtx
);
3822 if (sbp
->pkt_flags
& PACKET_IN_TXQ
) {
3823 sbp
->pkt_flags
&= ~PACKET_IN_TXQ
;
3824 hba
->channel_tx_count
--;
3827 if (sbp
->pkt_flags
& PACKET_IN_CHIPQ
) {
3828 sbp
->pkt_flags
&= ~PACKET_IN_CHIPQ
;
3832 emlxs_mem_put(hba
, MEM_BPL
,
3837 mutex_exit(&sbp
->mtx
);
3840 /* At this point all nodes are assumed destroyed */
3841 mutex_enter(&sbp
->mtx
);
3843 mutex_exit(&sbp
->mtx
);
3845 /* Add this iocb to our local abort Q */
3846 if (abort
.q_first
) {
3847 ((IOCBQ
*)abort
.q_last
)->next
= iocbq
;
3848 abort
.q_last
= (uint8_t *)iocbq
;
3851 abort
.q_first
= (uint8_t *)iocbq
;
3852 abort
.q_last
= (uint8_t *)iocbq
;
3857 mutex_exit(&EMLXS_FCTAB_LOCK
);
3859 /* Trigger deferred completion */
3860 if (abort
.q_first
) {
3861 mutex_enter(&cp
->rsp_lock
);
3862 if (cp
->rsp_head
== NULL
) {
3863 cp
->rsp_head
= (IOCBQ
*)abort
.q_first
;
3864 cp
->rsp_tail
= (IOCBQ
*)abort
.q_last
;
3866 cp
->rsp_tail
->next
= (IOCBQ
*)abort
.q_first
;
3867 cp
->rsp_tail
= (IOCBQ
*)abort
.q_last
;
3869 mutex_exit(&cp
->rsp_lock
);
3871 emlxs_thread_trigger2(&cp
->intr_thread
,
3872 emlxs_proc_channel
, cp
);
3874 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_sli_debug_msg
,
3875 "iotag_flush: channel=%d count=%d",
3876 channelno
, abort
.q_cnt
);
3878 count
+= abort
.q_cnt
;
3884 } /* emlxs_iotag_flush() */
3888 /* Checks for IO's on all or a given channel for a given node */
3890 emlxs_chipq_node_check(emlxs_port_t
*port
, CHANNEL
*chan
, NODELIST
*ndlp
)
3892 emlxs_hba_t
*hba
= HBA
;
3901 for (channelno
= 0; channelno
< hba
->chan_count
; channelno
++) {
3902 cp
= &hba
->chan
[channelno
];
3904 if (chan
&& cp
!= chan
) {
3908 mutex_enter(&EMLXS_FCTAB_LOCK
);
3910 for (iotag
= 1; iotag
< hba
->max_iotag
; iotag
++) {
3911 sbp
= hba
->fc_table
[iotag
];
3913 if (sbp
&& (sbp
!= STALE_PACKET
) &&
3914 (sbp
->pkt_flags
& PACKET_IN_CHIPQ
) &&
3915 (sbp
->node
== ndlp
) &&
3916 (sbp
->channel
== cp
) &&
3917 !(sbp
->pkt_flags
& PACKET_XRI_CLOSED
)) {
3922 mutex_exit(&EMLXS_FCTAB_LOCK
);
3928 } /* emlxs_chipq_node_check() */
3932 /* Flush all IO's for a given node's lun (on any channel) */
3934 emlxs_chipq_lun_flush(emlxs_port_t
*port
, NODELIST
*ndlp
,
3935 uint32_t lun
, emlxs_buf_t
*fpkt
)
3937 emlxs_hba_t
*hba
= HBA
;
3943 uint8_t flag
[MAX_CHANNEL
];
3946 if (lun
== EMLXS_LUN_NONE
) {
3950 bzero((void *)flag
, sizeof (flag
));
3951 bzero((void *)&abort
, sizeof (Q
));
3953 mutex_enter(&EMLXS_FCTAB_LOCK
);
3954 for (iotag
= 1; iotag
< hba
->max_iotag
; iotag
++) {
3955 sbp
= hba
->fc_table
[iotag
];
3957 if (sbp
&& (sbp
!= STALE_PACKET
) &&
3958 sbp
->pkt_flags
& PACKET_IN_CHIPQ
&&
3959 sbp
->node
== ndlp
&&
3961 !(sbp
->pkt_flags
& PACKET_XRI_CLOSED
)) {
3962 emlxs_sbp_abort_add(port
, sbp
,
3963 &abort
, flag
, fpkt
);
3966 mutex_exit(&EMLXS_FCTAB_LOCK
);
3968 /* Now put the iocb's on the tx queue */
3969 iocbq
= (IOCBQ
*)abort
.q_first
;
3971 /* Save the next iocbq for now */
3972 next
= (IOCBQ
*)iocbq
->next
;
3974 /* Unlink this iocbq */
3977 /* Send this iocbq */
3978 emlxs_tx_put(iocbq
, 1);
3983 /* Now trigger channel service */
3984 for (channelno
= 0; channelno
< hba
->chan_count
; channelno
++) {
3985 if (!flag
[channelno
]) {
3989 EMLXS_SLI_ISSUE_IOCB_CMD(hba
, &hba
->chan
[channelno
], 0);
3992 return (abort
.q_cnt
);
3994 } /* emlxs_chipq_lun_flush() */
3999 * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
4000 * This must be called while holding the EMLXS_FCTAB_LOCK
4003 emlxs_create_abort_xri_cn(emlxs_port_t
*port
, NODELIST
*ndlp
,
4004 uint16_t iotag
, CHANNEL
*cp
, uint8_t class, int32_t flag
)
4006 emlxs_hba_t
*hba
= HBA
;
4011 uint16_t abort_iotag
;
4013 if ((iocbq
= (IOCBQ
*)emlxs_mem_get(hba
, MEM_IOCB
)) == NULL
) {
4017 iocbq
->channel
= (void *)cp
;
4018 iocbq
->port
= (void *)port
;
4019 iocbq
->node
= (void *)ndlp
;
4020 iocbq
->flag
|= (IOCB_PRIORITY
| IOCB_SPECIAL
);
4023 * set up an iotag using special Abort iotags
4025 if ((hba
->fc_oor_iotag
>= EMLXS_MAX_ABORT_TAG
)) {
4026 hba
->fc_oor_iotag
= hba
->max_iotag
;
4028 abort_iotag
= hba
->fc_oor_iotag
++;
4031 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
4033 sbp
= hba
->fc_table
[iotag
];
4035 /* Try to issue abort by XRI if possible */
4036 if (sbp
== NULL
|| sbp
== STALE_PACKET
|| sbp
->xrip
== NULL
) {
4037 wqe
->un
.Abort
.Criteria
= ABORT_REQ_TAG
;
4038 wqe
->AbortTag
= iotag
;
4040 wqe
->un
.Abort
.Criteria
= ABORT_XRI_TAG
;
4041 wqe
->AbortTag
= sbp
->xrip
->XRI
;
4043 wqe
->un
.Abort
.IA
= 0;
4044 wqe
->RequestTag
= abort_iotag
;
4045 wqe
->Command
= CMD_ABORT_XRI_CX
;
4046 wqe
->Class
= CLASS3
;
4047 wqe
->CQId
= (uint16_t)0xffff; /* default CQ for response */
4048 wqe
->CmdType
= WQE_TYPE_ABORT
;
4050 iocb
= &iocbq
->iocb
;
4051 iocb
->ULPIOTAG
= abort_iotag
;
4052 iocb
->un
.acxri
.abortType
= flag
;
4053 iocb
->un
.acxri
.abortContextTag
= ndlp
->nlp_Rpi
;
4054 iocb
->un
.acxri
.abortIoTag
= iotag
;
4056 iocb
->ULPCLASS
= class;
4057 iocb
->ULPCOMMAND
= CMD_ABORT_XRI_CN
;
4058 iocb
->ULPOWNER
= OWN_CHIP
;
4063 } /* emlxs_create_abort_xri_cn() */
4066 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4068 emlxs_create_abort_xri_cx(emlxs_port_t
*port
, NODELIST
*ndlp
, uint16_t xid
,
4069 CHANNEL
*cp
, uint8_t class, int32_t flag
)
4071 emlxs_hba_t
*hba
= HBA
;
4075 uint16_t abort_iotag
;
4077 if ((iocbq
= (IOCBQ
*)emlxs_mem_get(hba
, MEM_IOCB
)) == NULL
) {
4081 iocbq
->channel
= (void *)cp
;
4082 iocbq
->port
= (void *)port
;
4083 iocbq
->node
= (void *)ndlp
;
4084 iocbq
->flag
|= (IOCB_PRIORITY
| IOCB_SPECIAL
);
4087 * set up an iotag using special Abort iotags
4089 if ((hba
->fc_oor_iotag
>= EMLXS_MAX_ABORT_TAG
)) {
4090 hba
->fc_oor_iotag
= hba
->max_iotag
;
4092 abort_iotag
= hba
->fc_oor_iotag
++;
4094 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
4096 wqe
->un
.Abort
.Criteria
= ABORT_XRI_TAG
;
4097 wqe
->un
.Abort
.IA
= 0;
4098 wqe
->RequestTag
= abort_iotag
;
4099 wqe
->AbortTag
= xid
;
4100 wqe
->Command
= CMD_ABORT_XRI_CX
;
4101 wqe
->Class
= CLASS3
;
4102 wqe
->CQId
= (uint16_t)0xffff; /* default CQ for response */
4103 wqe
->CmdType
= WQE_TYPE_ABORT
;
4105 iocb
= &iocbq
->iocb
;
4106 iocb
->ULPCONTEXT
= xid
;
4107 iocb
->ULPIOTAG
= abort_iotag
;
4108 iocb
->un
.acxri
.abortType
= flag
;
4110 iocb
->ULPCLASS
= class;
4111 iocb
->ULPCOMMAND
= CMD_ABORT_XRI_CX
;
4112 iocb
->ULPOWNER
= OWN_CHIP
;
4117 } /* emlxs_create_abort_xri_cx() */
4121 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4123 emlxs_create_close_xri_cn(emlxs_port_t
*port
, NODELIST
*ndlp
,
4124 uint16_t iotag
, CHANNEL
*cp
)
4126 emlxs_hba_t
*hba
= HBA
;
4131 uint16_t abort_iotag
;
4133 if ((iocbq
= (IOCBQ
*)emlxs_mem_get(hba
, MEM_IOCB
)) == NULL
) {
4137 iocbq
->channel
= (void *)cp
;
4138 iocbq
->port
= (void *)port
;
4139 iocbq
->node
= (void *)ndlp
;
4140 iocbq
->flag
|= (IOCB_PRIORITY
| IOCB_SPECIAL
);
4143 * set up an iotag using special Abort iotags
4145 if ((hba
->fc_oor_iotag
>= EMLXS_MAX_ABORT_TAG
)) {
4146 hba
->fc_oor_iotag
= hba
->max_iotag
;
4148 abort_iotag
= hba
->fc_oor_iotag
++;
4150 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
4152 sbp
= hba
->fc_table
[iotag
];
4154 /* Try to issue close by XRI if possible */
4155 if (sbp
== NULL
|| sbp
== STALE_PACKET
|| sbp
->xrip
== NULL
) {
4156 wqe
->un
.Abort
.Criteria
= ABORT_REQ_TAG
;
4157 wqe
->AbortTag
= iotag
;
4159 wqe
->un
.Abort
.Criteria
= ABORT_XRI_TAG
;
4160 wqe
->AbortTag
= sbp
->xrip
->XRI
;
4162 wqe
->un
.Abort
.IA
= 1;
4163 wqe
->RequestTag
= abort_iotag
;
4164 wqe
->Command
= CMD_ABORT_XRI_CX
;
4165 wqe
->Class
= CLASS3
;
4166 wqe
->CQId
= (uint16_t)0xffff; /* default CQ for response */
4167 wqe
->CmdType
= WQE_TYPE_ABORT
;
4169 iocb
= &iocbq
->iocb
;
4170 iocb
->ULPIOTAG
= abort_iotag
;
4171 iocb
->un
.acxri
.abortType
= 0;
4172 iocb
->un
.acxri
.abortContextTag
= ndlp
->nlp_Rpi
;
4173 iocb
->un
.acxri
.abortIoTag
= iotag
;
4176 iocb
->ULPCOMMAND
= CMD_CLOSE_XRI_CN
;
4177 iocb
->ULPOWNER
= OWN_CHIP
;
4182 } /* emlxs_create_close_xri_cn() */
4185 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4187 emlxs_create_close_xri_cx(emlxs_port_t
*port
, NODELIST
*ndlp
, uint16_t xid
,
4190 emlxs_hba_t
*hba
= HBA
;
4194 uint16_t abort_iotag
;
4196 if ((iocbq
= (IOCBQ
*)emlxs_mem_get(hba
, MEM_IOCB
)) == NULL
) {
4200 iocbq
->channel
= (void *)cp
;
4201 iocbq
->port
= (void *)port
;
4202 iocbq
->node
= (void *)ndlp
;
4203 iocbq
->flag
|= (IOCB_PRIORITY
| IOCB_SPECIAL
);
4206 * set up an iotag using special Abort iotags
4208 if ((hba
->fc_oor_iotag
>= EMLXS_MAX_ABORT_TAG
)) {
4209 hba
->fc_oor_iotag
= hba
->max_iotag
;
4211 abort_iotag
= hba
->fc_oor_iotag
++;
4213 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
4215 wqe
->un
.Abort
.Criteria
= ABORT_XRI_TAG
;
4216 wqe
->un
.Abort
.IA
= 1;
4217 wqe
->RequestTag
= abort_iotag
;
4218 wqe
->AbortTag
= xid
;
4219 wqe
->Command
= CMD_ABORT_XRI_CX
;
4220 wqe
->Class
= CLASS3
;
4221 wqe
->CQId
= (uint16_t)0xffff; /* default CQ for response */
4222 wqe
->CmdType
= WQE_TYPE_ABORT
;
4224 iocb
= &iocbq
->iocb
;
4225 iocb
->ULPCONTEXT
= xid
;
4226 iocb
->ULPIOTAG
= abort_iotag
;
4229 iocb
->ULPCOMMAND
= CMD_CLOSE_XRI_CX
;
4230 iocb
->ULPOWNER
= OWN_CHIP
;
4235 } /* emlxs_create_close_xri_cx() */
4239 emlxs_close_els_exchange(emlxs_hba_t
*hba
, emlxs_port_t
*port
, uint32_t rxid
)
4245 if (rxid
== 0 || rxid
== 0xFFFF) {
4249 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
4250 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_unsol_els_msg
,
4251 "Closing ELS exchange: xid=%x", rxid
);
4253 if (emlxs_sli4_unreserve_xri(port
, rxid
, 1) == 0) {
4258 cp
= &hba
->chan
[hba
->channel_els
];
4260 mutex_enter(&EMLXS_FCTAB_LOCK
);
4262 /* Create the abort IOCB */
4263 iocbq
= emlxs_create_close_xri_cx(port
, NULL
, rxid
, cp
);
4265 mutex_exit(&EMLXS_FCTAB_LOCK
);
4268 iocb
= &iocbq
->iocb
;
4269 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_unsol_els_msg
,
4270 "Closing ELS exchange: xid=%x iotag=%d", rxid
,
4273 EMLXS_SLI_ISSUE_IOCB_CMD(hba
, cp
, iocbq
);
4276 } /* emlxs_close_els_exchange() */
4280 emlxs_abort_els_exchange(emlxs_hba_t
*hba
, emlxs_port_t
*port
, uint32_t rxid
)
4286 if (rxid
== 0 || rxid
== 0xFFFF) {
4290 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
4292 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_unsol_els_msg
,
4293 "Aborting ELS exchange: xid=%x", rxid
);
4295 if (emlxs_sli4_unreserve_xri(port
, rxid
, 1) == 0) {
4296 /* We have no way to abort unsolicited exchanges */
4297 /* that we have not responded to at this time */
4298 /* So we will return for now */
4303 cp
= &hba
->chan
[hba
->channel_els
];
4305 mutex_enter(&EMLXS_FCTAB_LOCK
);
4307 /* Create the abort IOCB */
4308 if (hba
->state
>= FC_LINK_UP
) {
4309 iocbq
= emlxs_create_abort_xri_cx(port
, NULL
, rxid
, cp
,
4310 CLASS3
, ABORT_TYPE_ABTS
);
4312 iocbq
= emlxs_create_close_xri_cx(port
, NULL
, rxid
, cp
);
4315 mutex_exit(&EMLXS_FCTAB_LOCK
);
4318 iocb
= &iocbq
->iocb
;
4319 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_unsol_els_msg
,
4320 "Aborting ELS exchange: xid=%x iotag=%d", rxid
,
4323 EMLXS_SLI_ISSUE_IOCB_CMD(hba
, cp
, iocbq
);
4326 } /* emlxs_abort_els_exchange() */
4330 emlxs_abort_ct_exchange(emlxs_hba_t
*hba
, emlxs_port_t
*port
, uint32_t rxid
)
4336 if (rxid
== 0 || rxid
== 0xFFFF) {
4340 if (hba
->sli_mode
== EMLXS_HBA_SLI4_MODE
) {
4341 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_unsol_ct_msg
,
4342 "Aborting CT exchange: xid=%x", rxid
);
4344 if (emlxs_sli4_unreserve_xri(port
, rxid
, 1) == 0) {
4345 /* We have no way to abort unsolicited exchanges */
4346 /* that we have not responded to at this time */
4347 /* So we will return for now */
4352 cp
= &hba
->chan
[hba
->channel_ct
];
4354 mutex_enter(&EMLXS_FCTAB_LOCK
);
4356 /* Create the abort IOCB */
4357 if (hba
->state
>= FC_LINK_UP
) {
4358 iocbq
= emlxs_create_abort_xri_cx(port
, NULL
, rxid
, cp
,
4359 CLASS3
, ABORT_TYPE_ABTS
);
4361 iocbq
= emlxs_create_close_xri_cx(port
, NULL
, rxid
, cp
);
4364 mutex_exit(&EMLXS_FCTAB_LOCK
);
4367 iocb
= &iocbq
->iocb
;
4368 EMLXS_MSGF(EMLXS_CONTEXT
, &emlxs_unsol_els_msg
,
4369 "Aborting CT exchange: xid=%x iotag=%d", rxid
,
4372 EMLXS_SLI_ISSUE_IOCB_CMD(hba
, cp
, iocbq
);
4375 } /* emlxs_abort_ct_exchange() */
4378 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4380 emlxs_sbp_abort_add(emlxs_port_t
*port
, emlxs_buf_t
*sbp
, Q
*abort
,
4381 uint8_t *flag
, emlxs_buf_t
*fpkt
)
4383 emlxs_hba_t
*hba
= HBA
;
4388 cp
= (CHANNEL
*)sbp
->channel
;
4391 /* Create the close XRI IOCB */
4392 if (hba
->state
>= FC_LINK_UP
) {
4393 iocbq
= emlxs_create_abort_xri_cn(port
, ndlp
, sbp
->iotag
, cp
,
4394 CLASS3
, ABORT_TYPE_ABTS
);
4396 iocbq
= emlxs_create_close_xri_cn(port
, ndlp
, sbp
->iotag
, cp
);
4399 * Add this iocb to our local abort Q
4400 * This way we don't hold the CHIPQ lock too long
4403 if (abort
->q_first
) {
4404 ((IOCBQ
*)abort
->q_last
)->next
= iocbq
;
4405 abort
->q_last
= (uint8_t *)iocbq
;
4408 abort
->q_first
= (uint8_t *)iocbq
;
4409 abort
->q_last
= (uint8_t *)iocbq
;
4416 mutex_enter(&sbp
->mtx
);
4418 sbp
->pkt_flags
|= (PACKET_IN_FLUSH
| PACKET_XRI_CLOSED
);
4420 sbp
->ticks
= hba
->timer_tics
+ 10;
4421 sbp
->abort_attempts
++;
4423 flag
[cp
->channelno
] = 1;
4426 * If the fpkt is already set, then we will leave it alone
4427 * This ensures that this pkt is only accounted for on one
4430 if (!sbp
->fpkt
&& fpkt
) {
4431 mutex_enter(&fpkt
->mtx
);
4433 fpkt
->flush_count
++;
4434 mutex_exit(&fpkt
->mtx
);
4437 mutex_exit(&sbp
->mtx
);
4441 } /* emlxs_sbp_abort_add() */