Merge remote-tracking branch 'origin/master'
[unleashed/lotheac.git] / usr / src / uts / common / io / fibre-channel / fca / emlxs / emlxs_fcp.c
blobb0d990a2ff9f70f6a9796cb046a6fbe24ac73550
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at
9 * http://www.opensource.org/licenses/cddl1.txt.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright (c) 2004-2012 Emulex. All rights reserved.
24 * Use is subject to license terms.
27 #include <emlxs.h>
29 /* Required for EMLXS_CONTEXT in EMLXS_MSGF calls */
30 EMLXS_MSG_DEF(EMLXS_FCP_C);
32 #define EMLXS_GET_VADDR(hba, rp, icmd) emlxs_mem_get_vaddr(hba, rp, \
33 PADDR(icmd->un.cont64[i].addrHigh, icmd->un.cont64[i].addrLow));
35 static void emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp,
36 Q *abort, uint8_t *flag, emlxs_buf_t *fpkt);
38 #define SCSI3_PERSISTENT_RESERVE_IN 0x5e
39 #define SCSI_INQUIRY 0x12
40 #define SCSI_RX_DIAG 0x1C
44 * emlxs_handle_fcp_event
46 * Description: Process an FCP Rsp Ring completion
49 /* ARGSUSED */
50 extern void
51 emlxs_handle_fcp_event(emlxs_hba_t *hba, CHANNEL *cp, IOCBQ *iocbq)
53 emlxs_port_t *port = &PPORT;
54 emlxs_config_t *cfg = &CFG;
55 IOCB *cmd;
56 emlxs_buf_t *sbp;
57 fc_packet_t *pkt = NULL;
58 #ifdef SAN_DIAG_SUPPORT
59 NODELIST *ndlp;
60 #endif
61 uint32_t iostat;
62 uint8_t localstat;
63 fcp_rsp_t *rsp;
64 uint32_t rsp_data_resid;
65 uint32_t check_underrun;
66 uint8_t asc;
67 uint8_t ascq;
68 uint8_t scsi_status;
69 uint8_t sense;
70 uint32_t did;
71 uint32_t fix_it;
72 uint8_t *scsi_cmd;
73 uint8_t scsi_opcode;
74 uint16_t scsi_dl;
75 uint32_t data_rx;
76 uint32_t length;
78 cmd = &iocbq->iocb;
80 /* Initialize the status */
81 iostat = cmd->ULPSTATUS;
82 localstat = 0;
83 scsi_status = 0;
84 asc = 0;
85 ascq = 0;
86 sense = 0;
87 check_underrun = 0;
88 fix_it = 0;
90 HBASTATS.FcpEvent++;
92 sbp = (emlxs_buf_t *)iocbq->sbp;
94 if (!sbp) {
95 /* completion with missing xmit command */
96 HBASTATS.FcpStray++;
98 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_stray_fcp_completion_msg,
99 "cmd=%x iotag=%d", cmd->ULPCOMMAND, cmd->ULPIOTAG);
101 return;
104 HBASTATS.FcpCompleted++;
106 #ifdef SAN_DIAG_SUPPORT
107 emlxs_update_sd_bucket(sbp);
108 #endif /* SAN_DIAG_SUPPORT */
110 pkt = PRIV2PKT(sbp);
112 did = LE_SWAP24_LO(pkt->pkt_cmd_fhdr.d_id);
113 scsi_cmd = (uint8_t *)pkt->pkt_cmd;
114 scsi_opcode = scsi_cmd[12];
115 data_rx = 0;
117 /* Sync data in data buffer only on FC_PKT_FCP_READ */
118 if (pkt->pkt_datalen && (pkt->pkt_tran_type == FC_PKT_FCP_READ)) {
119 EMLXS_MPDATA_SYNC(pkt->pkt_data_dma, 0, pkt->pkt_datalen,
120 DDI_DMA_SYNC_FORKERNEL);
122 #ifdef TEST_SUPPORT
123 if (hba->underrun_counter && (iostat == IOSTAT_SUCCESS) &&
124 (pkt->pkt_datalen >= 512)) {
125 hba->underrun_counter--;
126 iostat = IOSTAT_FCP_RSP_ERROR;
128 /* Report 512 bytes missing by adapter */
129 cmd->un.fcpi.fcpi_parm = pkt->pkt_datalen - 512;
131 /* Corrupt 512 bytes of Data buffer */
132 bzero((uint8_t *)pkt->pkt_data, 512);
134 /* Set FCP response to STATUS_GOOD */
135 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
137 #endif /* TEST_SUPPORT */
140 /* Process the pkt */
141 mutex_enter(&sbp->mtx);
143 /* Check for immediate return */
144 if ((iostat == IOSTAT_SUCCESS) &&
145 (pkt->pkt_comp) &&
146 !(sbp->pkt_flags &
147 (PACKET_ULP_OWNED | PACKET_COMPLETED |
148 PACKET_IN_COMPLETION | PACKET_IN_TXQ | PACKET_IN_CHIPQ |
149 PACKET_IN_DONEQ | PACKET_IN_TIMEOUT | PACKET_IN_FLUSH |
150 PACKET_IN_ABORT | PACKET_POLLED))) {
151 HBASTATS.FcpGood++;
153 sbp->pkt_flags |=
154 (PACKET_STATE_VALID | PACKET_IN_COMPLETION |
155 PACKET_COMPLETED | PACKET_ULP_OWNED);
156 mutex_exit(&sbp->mtx);
158 #if (EMLXS_MODREVX == EMLXS_MODREV2X)
159 emlxs_unswap_pkt(sbp);
160 #endif /* EMLXS_MODREV2X */
162 #ifdef FMA_SUPPORT
163 emlxs_check_dma(hba, sbp);
164 #endif /* FMA_SUPPORT */
166 cp->ulpCmplCmd++;
167 (*pkt->pkt_comp) (pkt);
169 #ifdef FMA_SUPPORT
170 if (hba->flag & FC_DMA_CHECK_ERROR) {
171 emlxs_thread_spawn(hba, emlxs_restart_thread,
172 NULL, NULL);
174 #endif /* FMA_SUPPORT */
176 return;
180 * A response is only placed in the resp buffer if IOSTAT_FCP_RSP_ERROR
181 * is reported.
184 /* Check if a response buffer was not provided */
185 if ((iostat != IOSTAT_FCP_RSP_ERROR) || (pkt->pkt_rsplen == 0)) {
186 goto done;
189 EMLXS_MPDATA_SYNC(pkt->pkt_resp_dma, 0, pkt->pkt_rsplen,
190 DDI_DMA_SYNC_FORKERNEL);
192 /* Get the response buffer pointer */
193 rsp = (fcp_rsp_t *)pkt->pkt_resp;
195 /* Validate the response payload */
196 if (!rsp->fcp_u.fcp_status.resid_under &&
197 !rsp->fcp_u.fcp_status.resid_over) {
198 rsp->fcp_resid = 0;
201 if (!rsp->fcp_u.fcp_status.rsp_len_set) {
202 rsp->fcp_response_len = 0;
205 if (!rsp->fcp_u.fcp_status.sense_len_set) {
206 rsp->fcp_sense_len = 0;
209 length = sizeof (fcp_rsp_t) + LE_SWAP32(rsp->fcp_response_len) +
210 LE_SWAP32(rsp->fcp_sense_len);
212 if (length > pkt->pkt_rsplen) {
213 iostat = IOSTAT_RSP_INVALID;
214 pkt->pkt_data_resid = pkt->pkt_datalen;
215 goto done;
218 /* Set the valid response flag */
219 sbp->pkt_flags |= PACKET_FCP_RSP_VALID;
221 scsi_status = rsp->fcp_u.fcp_status.scsi_status;
223 #ifdef SAN_DIAG_SUPPORT
224 ndlp = (NODELIST *)iocbq->node;
225 if (scsi_status == SCSI_STAT_QUE_FULL) {
226 emlxs_log_sd_scsi_event(port, SD_SCSI_SUBCATEGORY_QFULL,
227 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
228 } else if (scsi_status == SCSI_STAT_BUSY) {
229 emlxs_log_sd_scsi_event(port,
230 SD_SCSI_SUBCATEGORY_DEVBSY,
231 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun);
233 #endif
236 * Convert a task abort to a check condition with no data
237 * transferred. We saw a data corruption when Solaris received
238 * a Task Abort from a tape.
241 if (scsi_status == SCSI_STAT_TASK_ABORT) {
242 EMLXS_MSGF(EMLXS_CONTEXT,
243 &emlxs_fcp_completion_error_msg,
244 "Task Abort. "
245 "Fixed. did=0x%06x sbp=%p cmd=%02x dl=%d",
246 did, sbp, scsi_opcode, pkt->pkt_datalen);
248 rsp->fcp_u.fcp_status.scsi_status =
249 SCSI_STAT_CHECK_COND;
250 rsp->fcp_u.fcp_status.rsp_len_set = 0;
251 rsp->fcp_u.fcp_status.sense_len_set = 0;
252 rsp->fcp_u.fcp_status.resid_over = 0;
254 if (pkt->pkt_datalen) {
255 rsp->fcp_u.fcp_status.resid_under = 1;
256 rsp->fcp_resid =
257 LE_SWAP32(pkt->pkt_datalen);
258 } else {
259 rsp->fcp_u.fcp_status.resid_under = 0;
260 rsp->fcp_resid = 0;
263 scsi_status = SCSI_STAT_CHECK_COND;
267 * We only need to check underrun if data could
268 * have been sent
271 /* Always check underrun if status is good */
272 if (scsi_status == SCSI_STAT_GOOD) {
273 check_underrun = 1;
275 /* Check the sense codes if this is a check condition */
276 else if (scsi_status == SCSI_STAT_CHECK_COND) {
277 check_underrun = 1;
279 /* Check if sense data was provided */
280 if (LE_SWAP32(rsp->fcp_sense_len) >= 14) {
281 sense = *((uint8_t *)rsp + 32 + 2);
282 asc = *((uint8_t *)rsp + 32 + 12);
283 ascq = *((uint8_t *)rsp + 32 + 13);
286 #ifdef SAN_DIAG_SUPPORT
287 emlxs_log_sd_scsi_check_event(port,
288 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
289 scsi_opcode, sense, asc, ascq);
290 #endif
292 /* Status is not good and this is not a check condition */
293 /* No data should have been sent */
294 else {
295 check_underrun = 0;
298 /* Initialize the resids */
299 pkt->pkt_resp_resid = 0;
300 pkt->pkt_data_resid = 0;
302 /* Check if no data was to be transferred */
303 if (pkt->pkt_datalen == 0) {
304 goto done;
307 /* Get the residual underrun count reported by the SCSI reply */
308 rsp_data_resid = (rsp->fcp_u.fcp_status.resid_under) ?
309 LE_SWAP32(rsp->fcp_resid) : 0;
311 /* Set the pkt_data_resid to what the scsi response resid */
312 pkt->pkt_data_resid = rsp_data_resid;
314 /* Adjust the pkt_data_resid field if needed */
315 if (pkt->pkt_tran_type == FC_PKT_FCP_READ) {
317 * Get the residual underrun count reported by
318 * our adapter
320 pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
322 #ifdef SAN_DIAG_SUPPORT
323 if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
324 emlxs_log_sd_fc_rdchk_event(port,
325 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
326 scsi_opcode, pkt->pkt_data_resid);
328 #endif
330 /* Get the actual amount of data transferred */
331 data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
334 * If the residual being reported by the adapter is
335 * greater than the residual being reported in the
336 * reply, then we have a true underrun.
338 if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
339 switch (scsi_opcode) {
340 case SCSI_INQUIRY:
341 scsi_dl = scsi_cmd[16];
342 break;
344 case SCSI_RX_DIAG:
345 scsi_dl =
346 (scsi_cmd[15] * 0x100) +
347 scsi_cmd[16];
348 break;
350 default:
351 scsi_dl = pkt->pkt_datalen;
354 #ifdef FCP_UNDERRUN_PATCH1
355 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
357 * If status is not good and no data was
358 * actually transferred, then we must fix
359 * the issue
361 if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
362 fix_it = 1;
364 EMLXS_MSGF(EMLXS_CONTEXT,
365 &emlxs_fcp_completion_error_msg,
366 "Underrun(1). Fixed. "
367 "did=0x%06x sbp=%p cmd=%02x "
368 "dl=%d,%d rx=%d rsp=%d",
369 did, sbp, scsi_opcode,
370 pkt->pkt_datalen, scsi_dl,
371 (pkt->pkt_datalen -
372 pkt->pkt_data_resid),
373 rsp_data_resid);
377 #endif /* FCP_UNDERRUN_PATCH1 */
380 #ifdef FCP_UNDERRUN_PATCH2
381 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH2) {
382 if (scsi_status == SCSI_STAT_GOOD) {
383 emlxs_msg_t *msg;
385 msg = &emlxs_fcp_completion_error_msg;
387 * If status is good and this is an
388 * inquiry request and the amount of
389 * data
392 * requested <= data received, then we
393 * must fix the issue.
396 if ((scsi_opcode == SCSI_INQUIRY) &&
397 (pkt->pkt_datalen >= data_rx) &&
398 (scsi_dl <= data_rx)) {
399 fix_it = 1;
401 EMLXS_MSGF(EMLXS_CONTEXT, msg,
402 "Underrun(2). Fixed. "
403 "did=0x%06x sbp=%p "
404 "cmd=%02x dl=%d,%d "
405 "rx=%d rsp=%d",
406 did, sbp, scsi_opcode,
407 pkt->pkt_datalen, scsi_dl,
408 data_rx, rsp_data_resid);
413 * If status is good and this is an
414 * inquiry request and the amount of
415 * data requested >= 128 bytes, but
416 * only 128 bytes were received,
417 * then we must fix the issue.
419 else if ((scsi_opcode == SCSI_INQUIRY) &&
420 (pkt->pkt_datalen >= 128) &&
421 (scsi_dl >= 128) && (data_rx == 128)) {
422 fix_it = 1;
424 EMLXS_MSGF(EMLXS_CONTEXT, msg,
425 "Underrun(3). Fixed. "
426 "did=0x%06x sbp=%p "
427 "cmd=%02x dl=%d,%d "
428 "rx=%d rsp=%d",
429 did, sbp, scsi_opcode,
430 pkt->pkt_datalen, scsi_dl,
431 data_rx, rsp_data_resid);
436 #endif /* FCP_UNDERRUN_PATCH2 */
439 * Check if SCSI response payload should be
440 * fixed or if a DATA_UNDERRUN should be
441 * reported
443 if (fix_it) {
445 * Fix the SCSI response payload itself
447 rsp->fcp_u.fcp_status.resid_under = 1;
448 rsp->fcp_resid =
449 LE_SWAP32(pkt->pkt_data_resid);
450 } else {
452 * Change the status from
453 * IOSTAT_FCP_RSP_ERROR to
454 * IOSTAT_DATA_UNDERRUN
456 iostat = IOSTAT_DATA_UNDERRUN;
457 pkt->pkt_data_resid =
458 pkt->pkt_datalen;
463 * If the residual being reported by the adapter is
464 * less than the residual being reported in the reply,
465 * then we have a true overrun. Since we don't know
466 * where the extra data came from or went to then we
467 * cannot trust anything we received
469 else if (rsp_data_resid > pkt->pkt_data_resid) {
471 * Change the status from
472 * IOSTAT_FCP_RSP_ERROR to
473 * IOSTAT_DATA_OVERRUN
475 iostat = IOSTAT_DATA_OVERRUN;
476 pkt->pkt_data_resid = pkt->pkt_datalen;
479 } else if ((hba->sli_mode == EMLXS_HBA_SLI4_MODE) &&
480 (pkt->pkt_tran_type == FC_PKT_FCP_WRITE)) {
482 * Get the residual underrun count reported by
483 * our adapter
485 pkt->pkt_data_resid = cmd->un.fcpi.fcpi_parm;
487 #ifdef SAN_DIAG_SUPPORT
488 if ((rsp_data_resid == 0) && (pkt->pkt_data_resid)) {
489 emlxs_log_sd_fc_rdchk_event(port,
490 (HBA_WWN *)&ndlp->nlp_portname, sbp->lun,
491 scsi_opcode, pkt->pkt_data_resid);
493 #endif /* SAN_DIAG_SUPPORT */
495 /* Get the actual amount of data transferred */
496 data_rx = pkt->pkt_datalen - pkt->pkt_data_resid;
499 * If the residual being reported by the adapter is
500 * greater than the residual being reported in the
501 * reply, then we have a true underrun.
503 if (check_underrun && (pkt->pkt_data_resid > rsp_data_resid)) {
505 scsi_dl = pkt->pkt_datalen;
507 #ifdef FCP_UNDERRUN_PATCH1
508 if (cfg[CFG_ENABLE_PATCH].current & FCP_UNDERRUN_PATCH1) {
510 * If status is not good and no data was
511 * actually transferred, then we must fix
512 * the issue
514 if ((scsi_status != SCSI_STAT_GOOD) && (data_rx == 0)) {
515 fix_it = 1;
517 EMLXS_MSGF(EMLXS_CONTEXT,
518 &emlxs_fcp_completion_error_msg,
519 "Underrun(1). Fixed. "
520 "did=0x%06x sbp=%p cmd=%02x "
521 "dl=%d,%d rx=%d rsp=%d",
522 did, sbp, scsi_opcode,
523 pkt->pkt_datalen, scsi_dl,
524 (pkt->pkt_datalen -
525 pkt->pkt_data_resid),
526 rsp_data_resid);
530 #endif /* FCP_UNDERRUN_PATCH1 */
533 * Check if SCSI response payload should be
534 * fixed or if a DATA_UNDERRUN should be
535 * reported
537 if (fix_it) {
539 * Fix the SCSI response payload itself
541 rsp->fcp_u.fcp_status.resid_under = 1;
542 rsp->fcp_resid =
543 LE_SWAP32(pkt->pkt_data_resid);
544 } else {
546 * Change the status from
547 * IOSTAT_FCP_RSP_ERROR to
548 * IOSTAT_DATA_UNDERRUN
550 iostat = IOSTAT_DATA_UNDERRUN;
551 pkt->pkt_data_resid =
552 pkt->pkt_datalen;
557 * If the residual being reported by the adapter is
558 * less than the residual being reported in the reply,
559 * then we have a true overrun. Since we don't know
560 * where the extra data came from or went to then we
561 * cannot trust anything we received
563 else if (rsp_data_resid > pkt->pkt_data_resid) {
565 * Change the status from
566 * IOSTAT_FCP_RSP_ERROR to
567 * IOSTAT_DATA_OVERRUN
569 iostat = IOSTAT_DATA_OVERRUN;
570 pkt->pkt_data_resid = pkt->pkt_datalen;
574 done:
576 /* Print completion message */
577 switch (iostat) {
578 case IOSTAT_SUCCESS:
579 /* Build SCSI GOOD status */
580 if (pkt->pkt_rsplen) {
581 bzero((uint8_t *)pkt->pkt_resp, pkt->pkt_rsplen);
583 break;
585 case IOSTAT_FCP_RSP_ERROR:
586 break;
588 case IOSTAT_REMOTE_STOP:
589 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
590 "Remote Stop. did=0x%06x sbp=%p cmd=%02x", did, sbp,
591 scsi_opcode);
592 break;
594 case IOSTAT_LOCAL_REJECT:
595 localstat = cmd->un.grsp.perr.statLocalError;
597 switch (localstat) {
598 case IOERR_SEQUENCE_TIMEOUT:
599 EMLXS_MSGF(EMLXS_CONTEXT,
600 &emlxs_fcp_completion_error_msg,
601 "Local reject. "
602 "%s did=0x%06x sbp=%p cmd=%02x tmo=%d ",
603 emlxs_error_xlate(localstat), did, sbp,
604 scsi_opcode, pkt->pkt_timeout);
605 break;
607 default:
608 EMLXS_MSGF(EMLXS_CONTEXT,
609 &emlxs_fcp_completion_error_msg,
610 "Local reject. %s 0x%06x %p %02x (%x)(%x)",
611 emlxs_error_xlate(localstat), did, sbp,
612 scsi_opcode, (uint16_t)cmd->ULPIOTAG,
613 (uint16_t)cmd->ULPCONTEXT);
616 break;
618 case IOSTAT_NPORT_RJT:
619 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
620 "Nport reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
621 scsi_opcode);
622 break;
624 case IOSTAT_FABRIC_RJT:
625 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
626 "Fabric reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
627 scsi_opcode);
628 break;
630 case IOSTAT_NPORT_BSY:
631 #ifdef SAN_DIAG_SUPPORT
632 ndlp = (NODELIST *)iocbq->node;
633 emlxs_log_sd_fc_bsy_event(port, (HBA_WWN *)&ndlp->nlp_portname);
634 #endif
636 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
637 "Nport busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
638 scsi_opcode);
639 break;
641 case IOSTAT_FABRIC_BSY:
642 #ifdef SAN_DIAG_SUPPORT
643 ndlp = (NODELIST *)iocbq->node;
644 emlxs_log_sd_fc_bsy_event(port, NULL);
645 #endif
647 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
648 "Fabric busy. did=0x%06x sbp=%p cmd=%02x", did, sbp,
649 scsi_opcode);
650 break;
652 case IOSTAT_INTERMED_RSP:
653 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
654 "Intermediate response. did=0x%06x sbp=%p cmd=%02x", did,
655 sbp, scsi_opcode);
656 break;
658 case IOSTAT_LS_RJT:
659 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
660 "LS Reject. did=0x%06x sbp=%p cmd=%02x", did, sbp,
661 scsi_opcode);
662 break;
664 case IOSTAT_DATA_UNDERRUN:
665 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
666 "Underrun. did=0x%06x sbp=%p cmd=%02x "
667 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
668 did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
669 rsp_data_resid, scsi_status, sense, asc, ascq);
670 break;
672 case IOSTAT_DATA_OVERRUN:
673 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
674 "Overrun. did=0x%06x sbp=%p cmd=%02x "
675 "dl=%d,%d rx=%d rsp=%d (%02x,%02x,%02x,%02x)",
676 did, sbp, scsi_opcode, pkt->pkt_datalen, scsi_dl, data_rx,
677 rsp_data_resid, scsi_status, sense, asc, ascq);
678 break;
680 case IOSTAT_RSP_INVALID:
681 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
682 "Rsp Invalid. did=0x%06x sbp=%p cmd=%02x dl=%d rl=%d"
683 "(%d, %d, %d)",
684 did, sbp, scsi_opcode, pkt->pkt_datalen, pkt->pkt_rsplen,
685 LE_SWAP32(rsp->fcp_resid),
686 LE_SWAP32(rsp->fcp_sense_len),
687 LE_SWAP32(rsp->fcp_response_len));
688 break;
690 default:
691 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_fcp_completion_error_msg,
692 "Unknown status=%x reason=%x did=0x%06x sbp=%p cmd=%02x",
693 iostat, cmd->un.grsp.perr.statLocalError, did, sbp,
694 scsi_opcode);
695 break;
698 if (iostat == IOSTAT_SUCCESS) {
699 HBASTATS.FcpGood++;
700 } else {
701 HBASTATS.FcpError++;
704 mutex_exit(&sbp->mtx);
706 emlxs_pkt_complete(sbp, iostat, localstat, 0);
708 return;
710 } /* emlxs_handle_fcp_event() */
714 * emlxs_post_buffer
716 * This routine will post count buffers to the
717 * ring with the QUE_RING_BUF_CN command. This
718 * allows 2 buffers / command to be posted.
719 * Returns the number of buffers NOT posted.
721 /* SLI3 */
722 extern int
723 emlxs_post_buffer(emlxs_hba_t *hba, RING *rp, int16_t cnt)
725 emlxs_port_t *port = &PPORT;
726 IOCB *icmd;
727 IOCBQ *iocbq;
728 MATCHMAP *mp;
729 uint16_t tag;
730 uint32_t maxqbuf;
731 int32_t i;
732 int32_t j;
733 uint32_t seg;
734 uint32_t size;
736 mp = 0;
737 maxqbuf = 2;
738 tag = (uint16_t)cnt;
739 cnt += rp->fc_missbufcnt;
741 if (rp->ringno == hba->channel_els) {
742 seg = MEM_BUF;
743 size = MEM_ELSBUF_SIZE;
744 } else if (rp->ringno == hba->channel_ip) {
745 seg = MEM_IPBUF;
746 size = MEM_IPBUF_SIZE;
747 } else if (rp->ringno == hba->channel_ct) {
748 seg = MEM_CTBUF;
749 size = MEM_CTBUF_SIZE;
751 #ifdef SFCT_SUPPORT
752 else if (rp->ringno == hba->CHANNEL_FCT) {
753 seg = MEM_FCTBUF;
754 size = MEM_FCTBUF_SIZE;
756 #endif /* SFCT_SUPPORT */
757 else {
758 return (0);
762 * While there are buffers to post
764 while (cnt) {
765 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == 0) {
766 rp->fc_missbufcnt = cnt;
767 return (cnt);
770 iocbq->channel = (void *)&hba->chan[rp->ringno];
771 iocbq->port = (void *)port;
772 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
774 icmd = &iocbq->iocb;
777 * Max buffers can be posted per command
779 for (i = 0; i < maxqbuf; i++) {
780 if (cnt <= 0)
781 break;
783 /* fill in BDEs for command */
784 if ((mp = (MATCHMAP *)emlxs_mem_get(hba, seg))
785 == 0) {
786 icmd->ULPBDECOUNT = i;
787 for (j = 0; j < i; j++) {
788 mp = EMLXS_GET_VADDR(hba, rp, icmd);
789 if (mp) {
790 emlxs_mem_put(hba, seg,
791 (void *)mp);
795 rp->fc_missbufcnt = cnt + i;
797 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
799 return (cnt + i);
803 * map that page and save the address pair for lookup
804 * later
806 emlxs_mem_map_vaddr(hba,
809 (uint32_t *)&icmd->un.cont64[i].addrHigh,
810 (uint32_t *)&icmd->un.cont64[i].addrLow);
812 icmd->un.cont64[i].tus.f.bdeSize = size;
813 icmd->ULPCOMMAND = CMD_QUE_RING_BUF64_CN;
816 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
817 * "UB Post: ring=%d addr=%08x%08x size=%d",
818 * rp->ringno, icmd->un.cont64[i].addrHigh,
819 * icmd->un.cont64[i].addrLow, size);
822 cnt--;
825 icmd->ULPIOTAG = tag;
826 icmd->ULPBDECOUNT = i;
827 icmd->ULPLE = 1;
828 icmd->ULPOWNER = OWN_CHIP;
829 /* used for delimiter between commands */
830 iocbq->bp = (void *)mp;
832 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[rp->ringno], iocbq);
835 rp->fc_missbufcnt = 0;
837 return (0);
839 } /* emlxs_post_buffer() */
842 static void
843 emlxs_fcp_tag_nodes(emlxs_port_t *port)
845 NODELIST *nlp;
846 int i;
848 /* We will process all nodes with this tag later */
849 rw_enter(&port->node_rwlock, RW_READER);
850 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
851 nlp = port->node_table[i];
852 while (nlp != NULL) {
853 nlp->nlp_tag = 1;
854 nlp = nlp->nlp_list_next;
857 rw_exit(&port->node_rwlock);
861 static NODELIST *
862 emlxs_find_tagged_node(emlxs_port_t *port)
864 NODELIST *nlp;
865 NODELIST *tagged;
866 int i;
868 /* Find first node */
869 rw_enter(&port->node_rwlock, RW_READER);
870 tagged = 0;
871 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
872 nlp = port->node_table[i];
873 while (nlp != NULL) {
874 if (!nlp->nlp_tag) {
875 nlp = nlp->nlp_list_next;
876 continue;
878 nlp->nlp_tag = 0;
880 if (nlp->nlp_Rpi == FABRIC_RPI) {
881 nlp = nlp->nlp_list_next;
882 continue;
884 tagged = nlp;
885 break;
887 if (tagged) {
888 break;
891 rw_exit(&port->node_rwlock);
892 return (tagged);
896 extern int
897 emlxs_port_offline(emlxs_port_t *port, uint32_t scope)
899 emlxs_hba_t *hba = HBA;
900 emlxs_config_t *cfg;
901 NODELIST *nlp;
902 fc_affected_id_t *aid;
903 uint32_t mask;
904 uint32_t aff_d_id;
905 uint32_t linkdown;
906 uint32_t vlinkdown;
907 uint32_t action;
908 int i;
909 uint32_t unreg_vpi;
910 uint32_t update;
911 uint32_t adisc_support;
912 uint32_t clear_all;
913 uint8_t format;
915 /* Target mode only uses this routine for linkdowns */
916 if ((port->mode == MODE_TARGET) && (scope != 0xffffffff) &&
917 (scope != 0xfeffffff) && (scope != 0xfdffffff)) {
918 return (0);
921 cfg = &CFG;
922 aid = (fc_affected_id_t *)&scope;
923 linkdown = 0;
924 vlinkdown = 0;
925 unreg_vpi = 0;
926 update = 0;
927 clear_all = 0;
929 if (!(port->flag & EMLXS_PORT_BOUND)) {
930 return (0);
933 format = aid->aff_format;
935 switch (format) {
936 case 0: /* Port */
937 mask = 0x00ffffff;
938 break;
940 case 1: /* Area */
941 mask = 0x00ffff00;
942 break;
944 case 2: /* Domain */
945 mask = 0x00ff0000;
946 break;
948 case 3: /* Network */
949 mask = 0x00000000;
950 break;
952 #ifdef DHCHAP_SUPPORT
953 case 0xfe: /* Virtual link down */
954 mask = 0x00000000;
955 vlinkdown = 1;
956 break;
957 #endif /* DHCHAP_SUPPORT */
959 case 0xff: /* link is down */
960 mask = 0x00000000;
961 linkdown = 1;
962 break;
964 case 0xfd: /* New fabric */
965 default:
966 mask = 0x00000000;
967 linkdown = 1;
968 clear_all = 1;
969 break;
972 aff_d_id = aid->aff_d_id & mask;
976 * If link is down then this is a hard shutdown and flush
977 * If link not down then this is a soft shutdown and flush
978 * (e.g. RSCN)
980 if (linkdown) {
981 mutex_enter(&EMLXS_PORT_LOCK);
983 port->flag &= EMLXS_PORT_LINKDOWN_MASK;
985 if (port->ulp_statec != FC_STATE_OFFLINE) {
986 port->ulp_statec = FC_STATE_OFFLINE;
988 port->prev_did = port->did;
989 port->did = 0;
990 port->rdid = 0;
992 bcopy(&port->fabric_sparam, &port->prev_fabric_sparam,
993 sizeof (SERV_PARM));
994 bzero(&port->fabric_sparam, sizeof (SERV_PARM));
996 update = 1;
999 mutex_exit(&EMLXS_PORT_LOCK);
1001 emlxs_timer_cancel_clean_address(port);
1003 /* Tell ULP about it */
1004 if (update) {
1005 if (port->flag & EMLXS_PORT_BOUND) {
1006 if (port->vpi == 0) {
1007 EMLXS_MSGF(EMLXS_CONTEXT,
1008 &emlxs_link_down_msg, NULL);
1011 if (port->mode == MODE_INITIATOR) {
1012 emlxs_fca_link_down(port);
1014 #ifdef SFCT_SUPPORT
1015 else if (port->mode == MODE_TARGET) {
1016 emlxs_fct_link_down(port);
1018 #endif /* SFCT_SUPPORT */
1020 } else {
1021 if (port->vpi == 0) {
1022 EMLXS_MSGF(EMLXS_CONTEXT,
1023 &emlxs_link_down_msg, "*");
1030 unreg_vpi = 1;
1032 #ifdef DHCHAP_SUPPORT
1033 /* Stop authentication with all nodes */
1034 emlxs_dhc_auth_stop(port, NULL);
1035 #endif /* DHCHAP_SUPPORT */
1037 /* Flush the base node */
1038 (void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1039 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1041 /* Flush any pending ub buffers */
1042 emlxs_ub_flush(port);
1044 #ifdef DHCHAP_SUPPORT
1045 /* virtual link down */
1046 else if (vlinkdown) {
1047 mutex_enter(&EMLXS_PORT_LOCK);
1049 if (port->ulp_statec != FC_STATE_OFFLINE) {
1050 port->ulp_statec = FC_STATE_OFFLINE;
1051 update = 1;
1054 mutex_exit(&EMLXS_PORT_LOCK);
1056 emlxs_timer_cancel_clean_address(port);
1058 /* Tell ULP about it */
1059 if (update) {
1060 if (port->flag & EMLXS_PORT_BOUND) {
1061 if (port->vpi == 0) {
1062 EMLXS_MSGF(EMLXS_CONTEXT,
1063 &emlxs_link_down_msg,
1064 "Switch authentication failed.");
1067 if (port->mode == MODE_INITIATOR) {
1068 emlxs_fca_link_down(port);
1070 #ifdef SFCT_SUPPORT
1071 else if (port->mode == MODE_TARGET) {
1072 emlxs_fct_link_down(port);
1074 #endif /* SFCT_SUPPORT */
1075 } else {
1076 if (port->vpi == 0) {
1077 EMLXS_MSGF(EMLXS_CONTEXT,
1078 &emlxs_link_down_msg,
1079 "Switch authentication failed. *");
1086 /* Flush the base node */
1087 (void) emlxs_tx_node_flush(port, &port->node_base, 0, 0, 0);
1088 (void) emlxs_chipq_node_flush(port, 0, &port->node_base, 0);
1090 #endif /* DHCHAP_SUPPORT */
1091 else {
1092 emlxs_timer_cancel_clean_address(port);
1095 if (port->mode == MODE_TARGET) {
1096 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1097 /* Set the node tags */
1098 emlxs_fcp_tag_nodes(port);
1099 unreg_vpi = 0;
1100 while ((nlp = emlxs_find_tagged_node(port))) {
1101 (void) emlxs_rpi_pause_notify(port,
1102 nlp->rpip);
1104 * In port_online we need to resume
1105 * these RPIs before we can use them.
1109 goto done;
1112 /* Set the node tags */
1113 emlxs_fcp_tag_nodes(port);
1115 if (!clear_all && (hba->flag & FC_ONLINE_MODE)) {
1116 adisc_support = cfg[CFG_ADISC_SUPPORT].current;
1117 } else {
1118 adisc_support = 0;
1121 /* Check ADISC support level */
1122 switch (adisc_support) {
1123 case 0: /* No support - Flush all IO to all matching nodes */
1125 for (;;) {
1127 * We need to hold the locks this way because
1128 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1129 * same locks. Also, when we release the lock the list
1130 * can change out from under us.
1133 /* Find first node */
1134 rw_enter(&port->node_rwlock, RW_READER);
1135 action = 0;
1136 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1137 nlp = port->node_table[i];
1138 while (nlp != NULL) {
1139 if (!nlp->nlp_tag) {
1140 nlp = nlp->nlp_list_next;
1141 continue;
1143 nlp->nlp_tag = 0;
1146 * Check for any device that matches
1147 * our mask
1149 if ((nlp->nlp_DID & mask) == aff_d_id) {
1150 if (linkdown) {
1151 action = 1;
1152 break;
1153 } else { /* Must be an RCSN */
1155 action = 2;
1156 break;
1159 nlp = nlp->nlp_list_next;
1162 if (action) {
1163 break;
1166 rw_exit(&port->node_rwlock);
1169 /* Check if nothing was found */
1170 if (action == 0) {
1171 break;
1172 } else if (action == 1) {
1173 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1174 NULL, NULL, NULL);
1175 } else if (action == 2) {
1176 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1178 #ifdef DHCHAP_SUPPORT
1179 emlxs_dhc_auth_stop(port, nlp);
1180 #endif /* DHCHAP_SUPPORT */
1183 * Close the node for any further normal IO
1184 * A PLOGI with reopen the node
1186 emlxs_node_close(port, nlp,
1187 hba->channel_fcp, 60);
1188 emlxs_node_close(port, nlp,
1189 hba->channel_ip, 60);
1191 /* Flush tx queue */
1192 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1194 /* Flush chip queue */
1195 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1200 break;
1202 case 1: /* Partial support - Flush IO for non-FCP2 matching nodes */
1204 for (;;) {
1207 * We need to hold the locks this way because
1208 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1209 * same locks. Also, when we release the lock the list
1210 * can change out from under us.
1212 rw_enter(&port->node_rwlock, RW_READER);
1213 action = 0;
1214 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1215 nlp = port->node_table[i];
1216 while (nlp != NULL) {
1217 if (!nlp->nlp_tag) {
1218 nlp = nlp->nlp_list_next;
1219 continue;
1221 nlp->nlp_tag = 0;
1224 * Check for special FCP2 target device
1225 * that matches our mask
1227 if ((nlp->nlp_fcp_info &
1228 NLP_FCP_TGT_DEVICE) &&
1229 (nlp-> nlp_fcp_info &
1230 NLP_FCP_2_DEVICE) &&
1231 (nlp->nlp_DID & mask) ==
1232 aff_d_id) {
1233 action = 3;
1234 break;
1238 * Check for any other device that
1239 * matches our mask
1241 else if ((nlp->nlp_DID & mask) ==
1242 aff_d_id) {
1243 if (linkdown) {
1244 action = 1;
1245 break;
1246 } else { /* Must be an RSCN */
1248 action = 2;
1249 break;
1253 nlp = nlp->nlp_list_next;
1256 if (action) {
1257 break;
1260 rw_exit(&port->node_rwlock);
1262 /* Check if nothing was found */
1263 if (action == 0) {
1264 break;
1265 } else if (action == 1) {
1266 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1267 NULL, NULL, NULL);
1268 } else if (action == 2) {
1269 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1271 #ifdef DHCHAP_SUPPORT
1272 emlxs_dhc_auth_stop(port, nlp);
1273 #endif /* DHCHAP_SUPPORT */
1276 * Close the node for any further normal IO
1277 * A PLOGI with reopen the node
1279 emlxs_node_close(port, nlp,
1280 hba->channel_fcp, 60);
1281 emlxs_node_close(port, nlp,
1282 hba->channel_ip, 60);
1284 /* Flush tx queue */
1285 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1287 /* Flush chip queue */
1288 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1290 } else if (action == 3) { /* FCP2 devices */
1291 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1293 unreg_vpi = 0;
1295 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1296 (void) emlxs_rpi_pause_notify(port,
1297 nlp->rpip);
1300 #ifdef DHCHAP_SUPPORT
1301 emlxs_dhc_auth_stop(port, nlp);
1302 #endif /* DHCHAP_SUPPORT */
1305 * Close the node for any further normal IO
1306 * An ADISC or a PLOGI with reopen the node
1308 emlxs_node_close(port, nlp,
1309 hba->channel_fcp, -1);
1310 emlxs_node_close(port, nlp, hba->channel_ip,
1311 ((linkdown) ? 0 : 60));
1313 /* Flush tx queues except for FCP ring */
1314 (void) emlxs_tx_node_flush(port, nlp,
1315 &hba->chan[hba->channel_ct], 0, 0);
1316 (void) emlxs_tx_node_flush(port, nlp,
1317 &hba->chan[hba->channel_els], 0, 0);
1318 (void) emlxs_tx_node_flush(port, nlp,
1319 &hba->chan[hba->channel_ip], 0, 0);
1321 /* Flush chip queues except for FCP ring */
1322 (void) emlxs_chipq_node_flush(port,
1323 &hba->chan[hba->channel_ct], nlp, 0);
1324 (void) emlxs_chipq_node_flush(port,
1325 &hba->chan[hba->channel_els], nlp, 0);
1326 (void) emlxs_chipq_node_flush(port,
1327 &hba->chan[hba->channel_ip], nlp, 0);
1330 break;
1332 case 2: /* Full support - Hold FCP IO to FCP target matching nodes */
1334 if (!linkdown && !vlinkdown) {
1335 break;
1338 for (;;) {
1340 * We need to hold the locks this way because
1341 * EMLXS_SLI_UNREG_NODE and the flush routines enter the
1342 * same locks. Also, when we release the lock the list
1343 * can change out from under us.
1345 rw_enter(&port->node_rwlock, RW_READER);
1346 action = 0;
1347 for (i = 0; i < EMLXS_NUM_HASH_QUES; i++) {
1348 nlp = port->node_table[i];
1349 while (nlp != NULL) {
1350 if (!nlp->nlp_tag) {
1351 nlp = nlp->nlp_list_next;
1352 continue;
1354 nlp->nlp_tag = 0;
1357 * Check for FCP target device that
1358 * matches our mask
1360 if ((nlp-> nlp_fcp_info &
1361 NLP_FCP_TGT_DEVICE) &&
1362 (nlp->nlp_DID & mask) ==
1363 aff_d_id) {
1364 action = 3;
1365 break;
1369 * Check for any other device that
1370 * matches our mask
1372 else if ((nlp->nlp_DID & mask) ==
1373 aff_d_id) {
1374 if (linkdown) {
1375 action = 1;
1376 break;
1377 } else { /* Must be an RSCN */
1379 action = 2;
1380 break;
1384 nlp = nlp->nlp_list_next;
1386 if (action) {
1387 break;
1390 rw_exit(&port->node_rwlock);
1392 /* Check if nothing was found */
1393 if (action == 0) {
1394 break;
1395 } else if (action == 1) {
1396 (void) EMLXS_SLI_UNREG_NODE(port, nlp,
1397 NULL, NULL, NULL);
1398 } else if (action == 2) {
1399 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1402 * Close the node for any further normal IO
1403 * A PLOGI with reopen the node
1405 emlxs_node_close(port, nlp,
1406 hba->channel_fcp, 60);
1407 emlxs_node_close(port, nlp,
1408 hba->channel_ip, 60);
1410 /* Flush tx queue */
1411 (void) emlxs_tx_node_flush(port, nlp, 0, 0, 0);
1413 /* Flush chip queue */
1414 (void) emlxs_chipq_node_flush(port, 0, nlp, 0);
1416 } else if (action == 3) { /* FCP2 devices */
1417 EMLXS_SET_DFC_STATE(nlp, NODE_LIMBO);
1419 unreg_vpi = 0;
1421 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1422 (void) emlxs_rpi_pause_notify(port,
1423 nlp->rpip);
1427 * Close the node for any further normal IO
1428 * An ADISC or a PLOGI with reopen the node
1430 emlxs_node_close(port, nlp,
1431 hba->channel_fcp, -1);
1432 emlxs_node_close(port, nlp, hba->channel_ip,
1433 ((linkdown) ? 0 : 60));
1435 /* Flush tx queues except for FCP ring */
1436 (void) emlxs_tx_node_flush(port, nlp,
1437 &hba->chan[hba->channel_ct], 0, 0);
1438 (void) emlxs_tx_node_flush(port, nlp,
1439 &hba->chan[hba->channel_els], 0, 0);
1440 (void) emlxs_tx_node_flush(port, nlp,
1441 &hba->chan[hba->channel_ip], 0, 0);
1443 /* Flush chip queues except for FCP ring */
1444 (void) emlxs_chipq_node_flush(port,
1445 &hba->chan[hba->channel_ct], nlp, 0);
1446 (void) emlxs_chipq_node_flush(port,
1447 &hba->chan[hba->channel_els], nlp, 0);
1448 (void) emlxs_chipq_node_flush(port,
1449 &hba->chan[hba->channel_ip], nlp, 0);
1453 break;
1455 } /* switch() */
1457 done:
1459 if (unreg_vpi) {
1460 (void) emlxs_mb_unreg_vpi(port);
1463 return (0);
1465 } /* emlxs_port_offline() */
1468 extern void
1469 emlxs_port_online(emlxs_port_t *vport)
1471 emlxs_hba_t *hba = vport->hba;
1472 emlxs_port_t *port = &PPORT;
1473 NODELIST *nlp;
1474 uint32_t state;
1475 uint32_t update;
1476 uint32_t npiv_linkup;
1477 char topology[32];
1478 char linkspeed[32];
1479 char mode[32];
1482 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1483 * "linkup_callback. vpi=%d fc_flag=%x", vport->vpi, hba->flag);
1486 if ((vport->vpi > 0) &&
1487 (!(hba->flag & FC_NPIV_ENABLED) ||
1488 !(hba->flag & FC_NPIV_SUPPORTED))) {
1489 return;
1492 if (!(vport->flag & EMLXS_PORT_BOUND) ||
1493 !(vport->flag & EMLXS_PORT_ENABLED)) {
1494 return;
1497 /* Check for mode */
1498 if (port->mode == MODE_TARGET) {
1499 (void) strlcpy(mode, ", target", sizeof (mode));
1501 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1502 /* Set the node tags */
1503 emlxs_fcp_tag_nodes(vport);
1504 while ((nlp = emlxs_find_tagged_node(vport))) {
1505 /* The RPI was paused in port_offline */
1506 (void) emlxs_rpi_resume_notify(vport,
1507 nlp->rpip, 0);
1510 } else if (port->mode == MODE_INITIATOR) {
1511 (void) strlcpy(mode, ", initiator", sizeof (mode));
1512 } else {
1513 (void) strlcpy(mode, "unknown", sizeof (mode));
1515 mutex_enter(&EMLXS_PORT_LOCK);
1517 /* Check for loop topology */
1518 if (hba->topology == TOPOLOGY_LOOP) {
1519 state = FC_STATE_LOOP;
1520 (void) strlcpy(topology, ", loop", sizeof (topology));
1521 } else {
1522 state = FC_STATE_ONLINE;
1523 (void) strlcpy(topology, ", fabric", sizeof (topology));
1526 /* Set the link speed */
1527 switch (hba->linkspeed) {
1528 case 0:
1529 (void) strlcpy(linkspeed, "Gb", sizeof (linkspeed));
1530 state |= FC_STATE_1GBIT_SPEED;
1531 break;
1533 case LA_1GHZ_LINK:
1534 (void) strlcpy(linkspeed, "1Gb", sizeof (linkspeed));
1535 state |= FC_STATE_1GBIT_SPEED;
1536 break;
1537 case LA_2GHZ_LINK:
1538 (void) strlcpy(linkspeed, "2Gb", sizeof (linkspeed));
1539 state |= FC_STATE_2GBIT_SPEED;
1540 break;
1541 case LA_4GHZ_LINK:
1542 (void) strlcpy(linkspeed, "4Gb", sizeof (linkspeed));
1543 state |= FC_STATE_4GBIT_SPEED;
1544 break;
1545 case LA_8GHZ_LINK:
1546 (void) strlcpy(linkspeed, "8Gb", sizeof (linkspeed));
1547 state |= FC_STATE_8GBIT_SPEED;
1548 break;
1549 case LA_10GHZ_LINK:
1550 (void) strlcpy(linkspeed, "10Gb", sizeof (linkspeed));
1551 state |= FC_STATE_10GBIT_SPEED;
1552 break;
1553 case LA_16GHZ_LINK:
1554 (void) strlcpy(linkspeed, "16Gb", sizeof (linkspeed));
1555 state |= FC_STATE_16GBIT_SPEED;
1556 break;
1557 default:
1558 (void) snprintf(linkspeed, sizeof (linkspeed), "unknown(0x%x)",
1559 hba->linkspeed);
1560 break;
1563 npiv_linkup = 0;
1564 update = 0;
1566 if ((hba->state >= FC_LINK_UP) &&
1567 !(hba->flag & FC_LOOPBACK_MODE) && (vport->ulp_statec != state)) {
1568 update = 1;
1569 vport->ulp_statec = state;
1571 if ((vport->vpi > 0) && !(hba->flag & FC_NPIV_LINKUP)) {
1572 hba->flag |= FC_NPIV_LINKUP;
1573 npiv_linkup = 1;
1577 mutex_exit(&EMLXS_PORT_LOCK);
1579 if (update) {
1580 if (vport->flag & EMLXS_PORT_BOUND) {
1581 if (vport->vpi == 0) {
1582 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1583 "%s%s%s", linkspeed, topology, mode);
1585 } else if (npiv_linkup) {
1586 EMLXS_MSGF(EMLXS_CONTEXT,
1587 &emlxs_npiv_link_up_msg, "%s%s%s",
1588 linkspeed, topology, mode);
1591 if (vport->mode == MODE_INITIATOR) {
1592 emlxs_fca_link_up(vport);
1594 #ifdef SFCT_SUPPORT
1595 else if (vport->mode == MODE_TARGET) {
1596 emlxs_fct_link_up(vport);
1598 #endif /* SFCT_SUPPORT */
1599 } else {
1600 if (vport->vpi == 0) {
1601 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_up_msg,
1602 "%s%s%s *", linkspeed, topology, mode);
1604 } else if (npiv_linkup) {
1605 EMLXS_MSGF(EMLXS_CONTEXT,
1606 &emlxs_npiv_link_up_msg, "%s%s%s *",
1607 linkspeed, topology, mode);
1611 /* Check for waiting threads */
1612 if (vport->vpi == 0) {
1613 mutex_enter(&EMLXS_LINKUP_LOCK);
1614 if (hba->linkup_wait_flag == TRUE) {
1615 hba->linkup_wait_flag = FALSE;
1616 cv_broadcast(&EMLXS_LINKUP_CV);
1618 mutex_exit(&EMLXS_LINKUP_LOCK);
1621 /* Flush any pending ub buffers */
1622 emlxs_ub_flush(vport);
1625 return;
1627 } /* emlxs_port_online() */
1630 /* SLI3 */
1631 extern void
1632 emlxs_linkdown(emlxs_hba_t *hba)
1634 emlxs_port_t *port = &PPORT;
1635 int i;
1636 uint32_t scope;
1638 mutex_enter(&EMLXS_PORT_LOCK);
1640 if (hba->state > FC_LINK_DOWN) {
1641 HBASTATS.LinkDown++;
1642 EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_DOWN);
1645 /* Set scope */
1646 scope = (hba->flag & FC_NEW_FABRIC)? 0xFDFFFFFF:0xFFFFFFFF;
1648 /* Filter hba flags */
1649 hba->flag &= FC_LINKDOWN_MASK;
1650 hba->discovery_timer = 0;
1651 hba->linkup_timer = 0;
1653 mutex_exit(&EMLXS_PORT_LOCK);
1655 for (i = 0; i < MAX_VPORTS; i++) {
1656 port = &VPORT(i);
1658 if (!(port->flag & EMLXS_PORT_BOUND)) {
1659 continue;
1662 (void) emlxs_port_offline(port, scope);
1666 emlxs_log_link_event(port);
1668 return;
1670 } /* emlxs_linkdown() */
1673 /* SLI3 */
1674 extern void
1675 emlxs_linkup(emlxs_hba_t *hba)
1677 emlxs_port_t *port = &PPORT;
1678 emlxs_config_t *cfg = &CFG;
1680 mutex_enter(&EMLXS_PORT_LOCK);
1682 /* Check for any mode changes */
1683 emlxs_mode_set(hba);
1685 HBASTATS.LinkUp++;
1686 EMLXS_STATE_CHANGE_LOCKED(hba, FC_LINK_UP);
1688 #ifdef MENLO_SUPPORT
1689 if (hba->flag & FC_MENLO_MODE) {
1690 mutex_exit(&EMLXS_PORT_LOCK);
1693 * Trigger linkup CV and don't start linkup & discovery
1694 * timers
1696 mutex_enter(&EMLXS_LINKUP_LOCK);
1697 cv_broadcast(&EMLXS_LINKUP_CV);
1698 mutex_exit(&EMLXS_LINKUP_LOCK);
1700 emlxs_log_link_event(port);
1702 return;
1704 #endif /* MENLO_SUPPORT */
1706 /* Set the linkup & discovery timers */
1707 hba->linkup_timer = hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current;
1708 hba->discovery_timer =
1709 hba->timer_tics + cfg[CFG_LINKUP_TIMEOUT].current +
1710 cfg[CFG_DISC_TIMEOUT].current;
1712 mutex_exit(&EMLXS_PORT_LOCK);
1714 emlxs_log_link_event(port);
1716 return;
1718 } /* emlxs_linkup() */
1722 * emlxs_reset_link
1724 * Description:
1725 * Called to reset the link with an init_link
1727 * Returns:
1730 extern int
1731 emlxs_reset_link(emlxs_hba_t *hba, uint32_t linkup, uint32_t wait)
1733 emlxs_port_t *port = &PPORT;
1734 emlxs_config_t *cfg;
1735 MAILBOXQ *mbq = NULL;
1736 MAILBOX *mb = NULL;
1737 int rval = 0;
1738 int tmo;
1739 int rc;
1742 * Get a buffer to use for the mailbox command
1744 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1745 == NULL) {
1746 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_failed_msg,
1747 "Unable to allocate mailbox buffer.");
1748 rval = 1;
1749 goto reset_link_fail;
1752 if (linkup) {
1753 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1754 "Resetting link...");
1755 } else {
1756 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1757 "Disabling link...");
1760 mb = (MAILBOX *)mbq;
1762 /* Bring link down first */
1763 emlxs_mb_down_link(hba, mbq);
1765 #define MBXERR_LINK_DOWN 0x33
1767 if (wait) {
1768 wait = MBX_WAIT;
1769 } else {
1770 wait = MBX_NOWAIT;
1772 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1773 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS) &&
1774 (rc != MBXERR_LINK_DOWN)) {
1775 rval = 1;
1776 goto reset_link_fail;
1779 tmo = 120;
1780 do {
1781 delay(drv_usectohz(500000));
1782 tmo--;
1784 if (!tmo) {
1785 rval = 1;
1787 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg,
1788 "Linkdown timeout.");
1790 goto reset_link_fail;
1792 } while ((hba->state >= FC_LINK_UP) && (hba->state != FC_ERROR));
1794 if (linkup) {
1796 * Setup and issue mailbox INITIALIZE LINK command
1799 if (wait == MBX_NOWAIT) {
1800 if ((mbq = (MAILBOXQ *)emlxs_mem_get(hba, MEM_MBOX))
1801 == NULL) {
1802 EMLXS_MSGF(EMLXS_CONTEXT,
1803 &emlxs_link_reset_failed_msg,
1804 "Unable to allocate mailbox buffer.");
1805 rval = 1;
1806 goto reset_link_fail;
1808 mb = (MAILBOX *)mbq;
1809 } else {
1810 /* Reuse mbq from previous mbox */
1811 mb = (MAILBOX *)mbq;
1813 cfg = &CFG;
1815 emlxs_mb_init_link(hba, mbq,
1816 cfg[CFG_TOPOLOGY].current, cfg[CFG_LINK_SPEED].current);
1818 mb->un.varInitLnk.lipsr_AL_PA = 0;
1820 /* Clear the loopback mode */
1821 mutex_enter(&EMLXS_PORT_LOCK);
1822 hba->flag &= ~FC_LOOPBACK_MODE;
1823 hba->loopback_tics = 0;
1824 mutex_exit(&EMLXS_PORT_LOCK);
1826 rc = EMLXS_SLI_ISSUE_MBOX_CMD(hba, mbq, wait, 0);
1827 if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
1828 rval = 1;
1829 goto reset_link_fail;
1832 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_link_reset_msg, NULL);
1835 reset_link_fail:
1837 if ((wait == MBX_WAIT) && mbq) {
1838 emlxs_mem_put(hba, MEM_MBOX, (void *)mbq);
1841 return (rval);
1842 } /* emlxs_reset_link() */
1845 extern int
1846 emlxs_online(emlxs_hba_t *hba)
1848 emlxs_port_t *port = &PPORT;
1849 int32_t rval = 0;
1850 uint32_t i = 0;
1852 /* Make sure adapter is offline or exit trying (30 seconds) */
1853 while (i++ < 30) {
1854 /* Check if adapter is already going online */
1855 if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1856 return (0);
1859 mutex_enter(&EMLXS_PORT_LOCK);
1861 /* Check again */
1862 if (hba->flag & (FC_ONLINE_MODE | FC_ONLINING_MODE)) {
1863 mutex_exit(&EMLXS_PORT_LOCK);
1864 return (0);
1867 /* Check if adapter is offline */
1868 if (hba->flag & FC_OFFLINE_MODE) {
1869 /* Mark it going online */
1870 hba->flag &= ~FC_OFFLINE_MODE;
1871 hba->flag |= FC_ONLINING_MODE;
1873 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1874 mutex_exit(&EMLXS_PORT_LOCK);
1875 break;
1878 mutex_exit(&EMLXS_PORT_LOCK);
1880 BUSYWAIT_MS(1000);
1883 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1884 "Going online...");
1886 if (rval = EMLXS_SLI_ONLINE(hba)) {
1887 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_init_failed_msg, "status=%x",
1888 rval);
1889 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
1891 /* Set FC_OFFLINE_MODE */
1892 mutex_enter(&EMLXS_PORT_LOCK);
1893 hba->flag |= FC_OFFLINE_MODE;
1894 hba->flag &= ~FC_ONLINING_MODE;
1895 mutex_exit(&EMLXS_PORT_LOCK);
1897 return (rval);
1900 /* Start the timer */
1901 emlxs_timer_start(hba);
1903 /* Set FC_ONLINE_MODE */
1904 mutex_enter(&EMLXS_PORT_LOCK);
1905 hba->flag |= FC_ONLINE_MODE;
1906 hba->flag &= ~FC_ONLINING_MODE;
1907 mutex_exit(&EMLXS_PORT_LOCK);
1909 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_online_msg, NULL);
1911 #ifdef SFCT_SUPPORT
1912 if (port->flag & EMLXS_TGT_ENABLED) {
1913 (void) emlxs_fct_port_initialize(port);
1915 #endif /* SFCT_SUPPORT */
1917 return (rval);
1919 } /* emlxs_online() */
1922 extern int
1923 emlxs_offline(emlxs_hba_t *hba, uint32_t reset_requested)
1925 emlxs_port_t *port = &PPORT;
1926 uint32_t i = 0;
1927 int rval = 1;
1929 /* Make sure adapter is online or exit trying (30 seconds) */
1930 while (i++ < 30) {
1931 /* Check if adapter is already going offline */
1932 if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1933 return (0);
1936 mutex_enter(&EMLXS_PORT_LOCK);
1938 /* Check again */
1939 if (hba->flag & (FC_OFFLINE_MODE | FC_OFFLINING_MODE)) {
1940 mutex_exit(&EMLXS_PORT_LOCK);
1941 return (0);
1944 /* Check if adapter is online */
1945 if (hba->flag & FC_ONLINE_MODE) {
1946 /* Mark it going offline */
1947 hba->flag &= ~FC_ONLINE_MODE;
1948 hba->flag |= FC_OFFLINING_MODE;
1950 /* Currently !FC_ONLINE_MODE and !FC_OFFLINE_MODE */
1951 mutex_exit(&EMLXS_PORT_LOCK);
1952 break;
1955 mutex_exit(&EMLXS_PORT_LOCK);
1957 BUSYWAIT_MS(1000);
1960 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_adapter_trans_msg,
1961 "Going offline...");
1963 /* Declare link down */
1964 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
1965 (void) emlxs_fcf_shutdown_notify(port, 1);
1966 } else {
1967 emlxs_linkdown(hba);
1970 #ifdef SFCT_SUPPORT
1971 if (port->flag & EMLXS_TGT_ENABLED) {
1972 (void) emlxs_fct_port_shutdown(port);
1974 #endif /* SFCT_SUPPORT */
1976 /* Check if adapter was shutdown */
1977 if (hba->flag & FC_HARDWARE_ERROR) {
1979 * Force mailbox cleanup
1980 * This will wake any sleeping or polling threads
1982 emlxs_mb_fini(hba, NULL, MBX_HARDWARE_ERROR);
1985 /* Pause here for the IO to settle */
1986 ddi_sleep(1); /* 1 sec */
1988 /* Unregister all nodes */
1989 emlxs_ffcleanup(hba);
1991 if (hba->bus_type == SBUS_FC) {
1992 WRITE_SBUS_CSR_REG(hba, FC_SHS_REG(hba), 0x9A);
1993 #ifdef FMA_SUPPORT
1994 /* Access handle validation */
1995 EMLXS_CHK_ACC_HANDLE(hba, hba->sli.sli3.sbus_csr_handle);
1996 #endif /* FMA_SUPPORT */
1999 /* Stop the timer */
2000 emlxs_timer_stop(hba);
2002 /* For safety flush every iotag list */
2003 if (emlxs_iotag_flush(hba)) {
2004 /* Pause here for the IO to flush */
2005 ddi_msleep(1);
2008 /* Wait for poll command request to settle */
2009 while (hba->io_poll_count > 0) {
2010 delay(drv_usectohz(2000000)); /* 2 sec */
2013 /* Shutdown the adapter interface */
2014 EMLXS_SLI_OFFLINE(hba, reset_requested);
2016 mutex_enter(&EMLXS_PORT_LOCK);
2017 hba->flag |= FC_OFFLINE_MODE;
2018 hba->flag &= ~FC_OFFLINING_MODE;
2019 mutex_exit(&EMLXS_PORT_LOCK);
2021 rval = 0;
2023 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_offline_msg, NULL);
2025 done:
2027 return (rval);
2029 } /* emlxs_offline() */
2033 extern int
2034 emlxs_power_down(emlxs_hba_t *hba)
2036 #ifdef FMA_SUPPORT
2037 emlxs_port_t *port = &PPORT;
2038 #endif /* FMA_SUPPORT */
2039 int32_t rval = 0;
2041 if ((rval = emlxs_offline(hba, 0))) {
2042 return (rval);
2044 EMLXS_SLI_HBA_RESET(hba, 1, 1, 0);
2047 #ifdef FMA_SUPPORT
2048 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2049 != DDI_FM_OK) {
2050 EMLXS_MSGF(EMLXS_CONTEXT,
2051 &emlxs_invalid_access_handle_msg, NULL);
2052 return (1);
2054 #endif /* FMA_SUPPORT */
2056 return (0);
2058 } /* End emlxs_power_down */
2061 extern int
2062 emlxs_power_up(emlxs_hba_t *hba)
2064 #ifdef FMA_SUPPORT
2065 emlxs_port_t *port = &PPORT;
2066 #endif /* FMA_SUPPORT */
2067 int32_t rval = 0;
2070 #ifdef FMA_SUPPORT
2071 if (emlxs_fm_check_acc_handle(hba, hba->pci_acc_handle)
2072 != DDI_FM_OK) {
2073 EMLXS_MSGF(EMLXS_CONTEXT,
2074 &emlxs_invalid_access_handle_msg, NULL);
2075 return (1);
2077 #endif /* FMA_SUPPORT */
2079 /* Bring adapter online */
2080 if ((rval = emlxs_online(hba))) {
2081 if (hba->pci_cap_offset[PCI_CAP_ID_PM]) {
2082 /* Put chip in D3 state */
2083 (void) ddi_put8(hba->pci_acc_handle,
2084 (uint8_t *)(hba->pci_addr +
2085 hba->pci_cap_offset[PCI_CAP_ID_PM] +
2086 PCI_PMCSR),
2087 (uint8_t)PCI_PMCSR_D3HOT);
2089 return (rval);
2092 return (rval);
2094 } /* emlxs_power_up() */
2099 * NAME: emlxs_ffcleanup
2101 * FUNCTION: Cleanup all the Firefly resources used by configuring the adapter
2103 * EXECUTION ENVIRONMENT: process only
2105 * CALLED FROM: CFG_TERM
2107 * INPUT: hba - pointer to the dev_ctl area.
2109 * RETURNS: none
2111 extern void
2112 emlxs_ffcleanup(emlxs_hba_t *hba)
2114 emlxs_port_t *port = &PPORT;
2115 uint32_t i;
2117 /* Disable all but the mailbox interrupt */
2118 EMLXS_SLI_DISABLE_INTR(hba, HC_MBINT_ENA);
2120 /* Make sure all port nodes are destroyed */
2121 for (i = 0; i < MAX_VPORTS; i++) {
2122 port = &VPORT(i);
2124 if (port->node_count) {
2125 (void) EMLXS_SLI_UNREG_NODE(port, 0, 0, 0, 0);
2129 /* Clear all interrupt enable conditions */
2130 EMLXS_SLI_DISABLE_INTR(hba, 0);
2132 return;
2134 } /* emlxs_ffcleanup() */
2137 extern uint16_t
2138 emlxs_register_pkt(CHANNEL *cp, emlxs_buf_t *sbp)
2140 emlxs_hba_t *hba;
2141 emlxs_port_t *port;
2142 uint16_t iotag;
2143 uint32_t i;
2145 hba = cp->hba;
2147 mutex_enter(&EMLXS_FCTAB_LOCK);
2149 if (sbp->iotag != 0) {
2150 port = &PPORT;
2152 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2153 "Pkt already registered! channel=%d iotag=%d sbp=%p",
2154 sbp->channel, sbp->iotag, sbp);
2157 iotag = 0;
2158 for (i = 0; i < hba->max_iotag; i++) {
2159 if (!hba->fc_iotag || hba->fc_iotag >= hba->max_iotag) {
2160 hba->fc_iotag = 1;
2162 iotag = hba->fc_iotag++;
2164 if (hba->fc_table[iotag] == 0 ||
2165 hba->fc_table[iotag] == STALE_PACKET) {
2166 hba->io_count++;
2167 hba->fc_table[iotag] = sbp;
2169 sbp->iotag = iotag;
2170 sbp->channel = cp;
2172 break;
2174 iotag = 0;
2177 mutex_exit(&EMLXS_FCTAB_LOCK);
2180 * EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_detail_msg,
2181 * "register_pkt: channel=%d iotag=%d sbp=%p",
2182 * cp->channelno, iotag, sbp);
2185 return (iotag);
2187 } /* emlxs_register_pkt() */
2191 extern emlxs_buf_t *
2192 emlxs_unregister_pkt(CHANNEL *cp, uint16_t iotag, uint32_t forced)
2194 emlxs_hba_t *hba;
2195 emlxs_buf_t *sbp;
2197 sbp = NULL;
2198 hba = cp->hba;
2200 /* Check the iotag range */
2201 if ((iotag == 0) || (iotag >= hba->max_iotag)) {
2202 return (NULL);
2205 /* Remove the sbp from the table */
2206 mutex_enter(&EMLXS_FCTAB_LOCK);
2207 sbp = hba->fc_table[iotag];
2209 if (!sbp || (sbp == STALE_PACKET)) {
2210 mutex_exit(&EMLXS_FCTAB_LOCK);
2211 return (sbp);
2214 hba->fc_table[iotag] = ((forced) ? STALE_PACKET : NULL);
2215 hba->io_count--;
2216 sbp->iotag = 0;
2218 mutex_exit(&EMLXS_FCTAB_LOCK);
2221 /* Clean up the sbp */
2222 mutex_enter(&sbp->mtx);
2224 if (sbp->pkt_flags & PACKET_IN_TXQ) {
2225 sbp->pkt_flags &= ~PACKET_IN_TXQ;
2226 hba->channel_tx_count--;
2229 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
2230 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
2233 if (sbp->bmp) {
2234 emlxs_mem_put(hba, MEM_BPL, (void *)sbp->bmp);
2235 sbp->bmp = 0;
2238 mutex_exit(&sbp->mtx);
2240 return (sbp);
2242 } /* emlxs_unregister_pkt() */
2246 /* Flush all IO's to all nodes for a given IO Channel */
2247 extern uint32_t
2248 emlxs_tx_channel_flush(emlxs_hba_t *hba, CHANNEL *cp, emlxs_buf_t *fpkt)
2250 emlxs_port_t *port = &PPORT;
2251 emlxs_buf_t *sbp;
2252 IOCBQ *iocbq;
2253 IOCBQ *next;
2254 IOCB *iocb;
2255 uint32_t channelno;
2256 Q abort;
2257 NODELIST *ndlp;
2258 IOCB *icmd;
2259 MATCHMAP *mp;
2260 uint32_t i;
2261 uint8_t flag[MAX_CHANNEL];
2263 channelno = cp->channelno;
2264 bzero((void *)&abort, sizeof (Q));
2265 bzero((void *)flag, MAX_CHANNEL * sizeof (uint8_t));
2267 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2269 /* While a node needs servicing */
2270 while (cp->nodeq.q_first) {
2271 ndlp = (NODELIST *) cp->nodeq.q_first;
2273 /* Check if priority queue is not empty */
2274 if (ndlp->nlp_ptx[channelno].q_first) {
2275 /* Transfer all iocb's to local queue */
2276 if (abort.q_first == 0) {
2277 abort.q_first =
2278 ndlp->nlp_ptx[channelno].q_first;
2279 } else {
2280 ((IOCBQ *)abort.q_last)->next =
2281 (IOCBQ *)ndlp->nlp_ptx[channelno].q_first;
2283 flag[channelno] = 1;
2285 abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2286 abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2289 /* Check if tx queue is not empty */
2290 if (ndlp->nlp_tx[channelno].q_first) {
2291 /* Transfer all iocb's to local queue */
2292 if (abort.q_first == 0) {
2293 abort.q_first = ndlp->nlp_tx[channelno].q_first;
2294 } else {
2295 ((IOCBQ *)abort.q_last)->next =
2296 (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2299 abort.q_last = ndlp->nlp_tx[channelno].q_last;
2300 abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2303 /* Clear the queue pointers */
2304 ndlp->nlp_ptx[channelno].q_first = NULL;
2305 ndlp->nlp_ptx[channelno].q_last = NULL;
2306 ndlp->nlp_ptx[channelno].q_cnt = 0;
2308 ndlp->nlp_tx[channelno].q_first = NULL;
2309 ndlp->nlp_tx[channelno].q_last = NULL;
2310 ndlp->nlp_tx[channelno].q_cnt = 0;
2312 /* Remove node from service queue */
2314 /* If this is the last node on list */
2315 if (cp->nodeq.q_last == (void *)ndlp) {
2316 cp->nodeq.q_last = NULL;
2317 cp->nodeq.q_first = NULL;
2318 cp->nodeq.q_cnt = 0;
2319 } else {
2320 /* Remove node from head */
2321 cp->nodeq.q_first = ndlp->nlp_next[channelno];
2322 ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
2323 cp->nodeq.q_first;
2324 cp->nodeq.q_cnt--;
2327 /* Clear node */
2328 ndlp->nlp_next[channelno] = NULL;
2331 /* First cleanup the iocb's while still holding the lock */
2332 iocbq = (IOCBQ *) abort.q_first;
2333 while (iocbq) {
2334 /* Free the IoTag and the bmp */
2335 iocb = &iocbq->iocb;
2337 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2338 sbp = iocbq->sbp;
2339 if (sbp) {
2340 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2342 } else {
2343 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2344 iocb->ULPIOTAG, 0);
2347 if (sbp && (sbp != STALE_PACKET)) {
2348 mutex_enter(&sbp->mtx);
2350 sbp->pkt_flags |= PACKET_IN_FLUSH;
2352 * If the fpkt is already set, then we will leave it
2353 * alone. This ensures that this pkt is only accounted
2354 * for on one fpkt->flush_count
2356 if (!sbp->fpkt && fpkt) {
2357 mutex_enter(&fpkt->mtx);
2358 sbp->fpkt = fpkt;
2359 fpkt->flush_count++;
2360 mutex_exit(&fpkt->mtx);
2363 mutex_exit(&sbp->mtx);
2366 iocbq = (IOCBQ *)iocbq->next;
2367 } /* end of while */
2369 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2371 /* Now abort the iocb's */
2372 iocbq = (IOCBQ *)abort.q_first;
2373 while (iocbq) {
2374 /* Save the next iocbq for now */
2375 next = (IOCBQ *)iocbq->next;
2377 /* Unlink this iocbq */
2378 iocbq->next = NULL;
2380 /* Get the pkt */
2381 sbp = (emlxs_buf_t *)iocbq->sbp;
2383 if (sbp) {
2384 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2385 "tx: sbp=%p node=%p", sbp, sbp->node);
2387 if (hba->state >= FC_LINK_UP) {
2388 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2389 IOERR_ABORT_REQUESTED, 1);
2390 } else {
2391 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2392 IOERR_LINK_DOWN, 1);
2396 /* Free the iocb and its associated buffers */
2397 else {
2398 icmd = &iocbq->iocb;
2400 /* SLI3 */
2401 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2402 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2403 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2404 if ((hba->flag &
2405 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2406 /* HBA is detaching or offlining */
2407 if (icmd->ULPCOMMAND !=
2408 CMD_QUE_RING_LIST64_CN) {
2409 void *tmp;
2410 RING *rp;
2412 rp = &hba->sli.sli3.
2413 ring[channelno];
2414 for (i = 0;
2415 i < icmd->ULPBDECOUNT;
2416 i++) {
2417 mp = EMLXS_GET_VADDR(
2418 hba, rp, icmd);
2420 tmp = (void *)mp;
2421 if (mp) {
2422 emlxs_mem_put(
2423 hba, MEM_BUF, tmp);
2428 emlxs_mem_put(hba, MEM_IOCB,
2429 (void *)iocbq);
2430 } else {
2431 /* repost the unsolicited buffer */
2432 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp,
2433 iocbq);
2435 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2436 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2438 emlxs_tx_put(iocbq, 1);
2442 iocbq = next;
2444 } /* end of while */
2446 /* Now trigger channel service */
2447 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2448 if (!flag[channelno]) {
2449 continue;
2452 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2455 return (abort.q_cnt);
2457 } /* emlxs_tx_channel_flush() */
2460 /* Flush all IO's on all or a given ring for a given node */
2461 extern uint32_t
2462 emlxs_tx_node_flush(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan,
2463 uint32_t shutdown, emlxs_buf_t *fpkt)
2465 emlxs_hba_t *hba = HBA;
2466 emlxs_buf_t *sbp;
2467 uint32_t channelno;
2468 CHANNEL *cp;
2469 IOCB *icmd;
2470 IOCBQ *iocbq;
2471 NODELIST *prev;
2472 IOCBQ *next;
2473 IOCB *iocb;
2474 Q abort;
2475 uint32_t i;
2476 MATCHMAP *mp;
2477 uint8_t flag[MAX_CHANNEL];
2479 bzero((void *)&abort, sizeof (Q));
2481 /* Flush all I/O's on tx queue to this target */
2482 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2484 if (!ndlp->nlp_base && shutdown) {
2485 ndlp->nlp_active = 0;
2488 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2489 cp = &hba->chan[channelno];
2491 if (chan && cp != chan) {
2492 continue;
2495 if (!ndlp->nlp_base || shutdown) {
2496 /* Check if priority queue is not empty */
2497 if (ndlp->nlp_ptx[channelno].q_first) {
2498 /* Transfer all iocb's to local queue */
2499 if (abort.q_first == 0) {
2500 abort.q_first =
2501 ndlp->nlp_ptx[channelno].q_first;
2502 } else {
2503 ((IOCBQ *)(abort.q_last))->next =
2504 (IOCBQ *)ndlp->nlp_ptx[channelno].
2505 q_first;
2508 flag[channelno] = 1;
2510 abort.q_last = ndlp->nlp_ptx[channelno].q_last;
2511 abort.q_cnt += ndlp->nlp_ptx[channelno].q_cnt;
2515 /* Check if tx queue is not empty */
2516 if (ndlp->nlp_tx[channelno].q_first) {
2518 /* Transfer all iocb's to local queue */
2519 if (abort.q_first == 0) {
2520 abort.q_first = ndlp->nlp_tx[channelno].q_first;
2521 } else {
2522 ((IOCBQ *)abort.q_last)->next =
2523 (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2526 abort.q_last = ndlp->nlp_tx[channelno].q_last;
2527 abort.q_cnt += ndlp->nlp_tx[channelno].q_cnt;
2530 /* Clear the queue pointers */
2531 ndlp->nlp_ptx[channelno].q_first = NULL;
2532 ndlp->nlp_ptx[channelno].q_last = NULL;
2533 ndlp->nlp_ptx[channelno].q_cnt = 0;
2535 ndlp->nlp_tx[channelno].q_first = NULL;
2536 ndlp->nlp_tx[channelno].q_last = NULL;
2537 ndlp->nlp_tx[channelno].q_cnt = 0;
2539 /* If this node was on the channel queue, remove it */
2540 if (ndlp->nlp_next[channelno]) {
2541 /* If this is the only node on list */
2542 if (cp->nodeq.q_first == (void *)ndlp &&
2543 cp->nodeq.q_last == (void *)ndlp) {
2544 cp->nodeq.q_last = NULL;
2545 cp->nodeq.q_first = NULL;
2546 cp->nodeq.q_cnt = 0;
2547 } else if (cp->nodeq.q_first == (void *)ndlp) {
2548 cp->nodeq.q_first = ndlp->nlp_next[channelno];
2549 ((NODELIST *) cp->nodeq.q_last)->
2550 nlp_next[channelno] = cp->nodeq.q_first;
2551 cp->nodeq.q_cnt--;
2552 } else {
2554 * This is a little more difficult find the
2555 * previous node in the circular channel queue
2557 prev = ndlp;
2558 while (prev->nlp_next[channelno] != ndlp) {
2559 prev = prev->nlp_next[channelno];
2562 prev->nlp_next[channelno] =
2563 ndlp->nlp_next[channelno];
2565 if (cp->nodeq.q_last == (void *)ndlp) {
2566 cp->nodeq.q_last = (void *)prev;
2568 cp->nodeq.q_cnt--;
2572 /* Clear node */
2573 ndlp->nlp_next[channelno] = NULL;
2578 /* First cleanup the iocb's while still holding the lock */
2579 iocbq = (IOCBQ *) abort.q_first;
2580 while (iocbq) {
2581 /* Free the IoTag and the bmp */
2582 iocb = &iocbq->iocb;
2584 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2585 sbp = iocbq->sbp;
2586 if (sbp) {
2587 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2589 } else {
2590 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2591 iocb->ULPIOTAG, 0);
2594 if (sbp && (sbp != STALE_PACKET)) {
2595 mutex_enter(&sbp->mtx);
2596 sbp->pkt_flags |= PACKET_IN_FLUSH;
2598 * If the fpkt is already set, then we will leave it
2599 * alone. This ensures that this pkt is only accounted
2600 * for on one fpkt->flush_count
2602 if (!sbp->fpkt && fpkt) {
2603 mutex_enter(&fpkt->mtx);
2604 sbp->fpkt = fpkt;
2605 fpkt->flush_count++;
2606 mutex_exit(&fpkt->mtx);
2609 mutex_exit(&sbp->mtx);
2612 iocbq = (IOCBQ *) iocbq->next;
2614 } /* end of while */
2616 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2618 /* Now abort the iocb's outside the locks */
2619 iocbq = (IOCBQ *)abort.q_first;
2620 while (iocbq) {
2621 /* Save the next iocbq for now */
2622 next = (IOCBQ *)iocbq->next;
2624 /* Unlink this iocbq */
2625 iocbq->next = NULL;
2627 /* Get the pkt */
2628 sbp = (emlxs_buf_t *)iocbq->sbp;
2630 if (sbp) {
2631 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2632 "tx: sbp=%p node=%p", sbp, sbp->node);
2634 if (hba->state >= FC_LINK_UP) {
2635 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2636 IOERR_ABORT_REQUESTED, 1);
2637 } else {
2638 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2639 IOERR_LINK_DOWN, 1);
2643 /* Free the iocb and its associated buffers */
2644 else {
2645 /* CMD_CLOSE_XRI_CN should also free the memory */
2646 icmd = &iocbq->iocb;
2648 /* SLI3 */
2649 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2650 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2651 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2652 if ((hba->flag &
2653 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2654 /* HBA is detaching or offlining */
2655 if (icmd->ULPCOMMAND !=
2656 CMD_QUE_RING_LIST64_CN) {
2657 void *tmp;
2658 RING *rp;
2659 int ch;
2661 ch = ((CHANNEL *)
2662 iocbq->channel)->channelno;
2663 rp = &hba->sli.sli3.ring[ch];
2664 for (i = 0;
2665 i < icmd->ULPBDECOUNT;
2666 i++) {
2667 mp = EMLXS_GET_VADDR(
2668 hba, rp, icmd);
2670 tmp = (void *)mp;
2671 if (mp) {
2672 emlxs_mem_put(
2673 hba, MEM_BUF, tmp);
2678 emlxs_mem_put(hba, MEM_IOCB,
2679 (void *)iocbq);
2680 } else {
2681 /* repost the unsolicited buffer */
2682 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2683 (CHANNEL *)iocbq->channel, iocbq);
2685 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2686 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2688 * Resend the abort iocbq if any
2690 emlxs_tx_put(iocbq, 1);
2694 iocbq = next;
2696 } /* end of while */
2698 /* Now trigger channel service */
2699 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2700 if (!flag[channelno]) {
2701 continue;
2704 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
2707 return (abort.q_cnt);
2709 } /* emlxs_tx_node_flush() */
2712 /* Check for IO's on all or a given ring for a given node */
2713 extern uint32_t
2714 emlxs_tx_node_check(emlxs_port_t *port, NODELIST *ndlp, CHANNEL *chan)
2716 emlxs_hba_t *hba = HBA;
2717 uint32_t channelno;
2718 CHANNEL *cp;
2719 uint32_t count;
2721 count = 0;
2723 /* Flush all I/O's on tx queue to this target */
2724 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2726 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2727 cp = &hba->chan[channelno];
2729 if (chan && cp != chan) {
2730 continue;
2733 /* Check if priority queue is not empty */
2734 if (ndlp->nlp_ptx[channelno].q_first) {
2735 count += ndlp->nlp_ptx[channelno].q_cnt;
2738 /* Check if tx queue is not empty */
2739 if (ndlp->nlp_tx[channelno].q_first) {
2740 count += ndlp->nlp_tx[channelno].q_cnt;
2745 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2747 return (count);
2749 } /* emlxs_tx_node_check() */
2753 /* Flush all IO's on the any ring for a given node's lun */
2754 extern uint32_t
2755 emlxs_tx_lun_flush(emlxs_port_t *port, NODELIST *ndlp, uint32_t lun,
2756 emlxs_buf_t *fpkt)
2758 emlxs_hba_t *hba = HBA;
2759 emlxs_buf_t *sbp;
2760 uint32_t channelno;
2761 IOCBQ *iocbq;
2762 IOCBQ *prev;
2763 IOCBQ *next;
2764 IOCB *iocb;
2765 IOCB *icmd;
2766 Q abort;
2767 uint32_t i;
2768 MATCHMAP *mp;
2769 uint8_t flag[MAX_CHANNEL];
2771 if (lun == EMLXS_LUN_NONE) {
2772 return (0);
2775 bzero((void *)&abort, sizeof (Q));
2777 /* Flush I/O's on txQ to this target's lun */
2778 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
2780 for (channelno = 0; channelno < hba->chan_count; channelno++) {
2782 /* Scan the priority queue first */
2783 prev = NULL;
2784 iocbq = (IOCBQ *) ndlp->nlp_ptx[channelno].q_first;
2786 while (iocbq) {
2787 next = (IOCBQ *)iocbq->next;
2788 iocb = &iocbq->iocb;
2789 sbp = (emlxs_buf_t *)iocbq->sbp;
2791 /* Check if this IO is for our lun */
2792 if (sbp && (sbp->lun == lun)) {
2793 /* Remove iocb from the node's ptx queue */
2794 if (next == 0) {
2795 ndlp->nlp_ptx[channelno].q_last =
2796 (uint8_t *)prev;
2799 if (prev == 0) {
2800 ndlp->nlp_ptx[channelno].q_first =
2801 (uint8_t *)next;
2802 } else {
2803 prev->next = next;
2806 iocbq->next = NULL;
2807 ndlp->nlp_ptx[channelno].q_cnt--;
2810 * Add this iocb to our local abort Q
2812 if (abort.q_first) {
2813 ((IOCBQ *)abort.q_last)->next = iocbq;
2814 abort.q_last = (uint8_t *)iocbq;
2815 abort.q_cnt++;
2816 } else {
2817 abort.q_first = (uint8_t *)iocbq;
2818 abort.q_last = (uint8_t *)iocbq;
2819 abort.q_cnt = 1;
2821 iocbq->next = NULL;
2822 flag[channelno] = 1;
2824 } else {
2825 prev = iocbq;
2828 iocbq = next;
2830 } /* while (iocbq) */
2833 /* Scan the regular queue */
2834 prev = NULL;
2835 iocbq = (IOCBQ *)ndlp->nlp_tx[channelno].q_first;
2837 while (iocbq) {
2838 next = (IOCBQ *)iocbq->next;
2839 iocb = &iocbq->iocb;
2840 sbp = (emlxs_buf_t *)iocbq->sbp;
2842 /* Check if this IO is for our lun */
2843 if (sbp && (sbp->lun == lun)) {
2844 /* Remove iocb from the node's tx queue */
2845 if (next == 0) {
2846 ndlp->nlp_tx[channelno].q_last =
2847 (uint8_t *)prev;
2850 if (prev == 0) {
2851 ndlp->nlp_tx[channelno].q_first =
2852 (uint8_t *)next;
2853 } else {
2854 prev->next = next;
2857 iocbq->next = NULL;
2858 ndlp->nlp_tx[channelno].q_cnt--;
2861 * Add this iocb to our local abort Q
2863 if (abort.q_first) {
2864 ((IOCBQ *) abort.q_last)->next = iocbq;
2865 abort.q_last = (uint8_t *)iocbq;
2866 abort.q_cnt++;
2867 } else {
2868 abort.q_first = (uint8_t *)iocbq;
2869 abort.q_last = (uint8_t *)iocbq;
2870 abort.q_cnt = 1;
2872 iocbq->next = NULL;
2873 } else {
2874 prev = iocbq;
2877 iocbq = next;
2879 } /* while (iocbq) */
2880 } /* for loop */
2882 /* First cleanup the iocb's while still holding the lock */
2883 iocbq = (IOCBQ *)abort.q_first;
2884 while (iocbq) {
2885 /* Free the IoTag and the bmp */
2886 iocb = &iocbq->iocb;
2888 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
2889 sbp = iocbq->sbp;
2890 if (sbp) {
2891 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
2893 } else {
2894 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
2895 iocb->ULPIOTAG, 0);
2898 if (sbp && (sbp != STALE_PACKET)) {
2899 mutex_enter(&sbp->mtx);
2900 sbp->pkt_flags |= PACKET_IN_FLUSH;
2902 * If the fpkt is already set, then we will leave it
2903 * alone. This ensures that this pkt is only accounted
2904 * for on one fpkt->flush_count
2906 if (!sbp->fpkt && fpkt) {
2907 mutex_enter(&fpkt->mtx);
2908 sbp->fpkt = fpkt;
2909 fpkt->flush_count++;
2910 mutex_exit(&fpkt->mtx);
2913 mutex_exit(&sbp->mtx);
2916 iocbq = (IOCBQ *) iocbq->next;
2918 } /* end of while */
2920 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
2922 /* Now abort the iocb's outside the locks */
2923 iocbq = (IOCBQ *)abort.q_first;
2924 while (iocbq) {
2925 /* Save the next iocbq for now */
2926 next = (IOCBQ *)iocbq->next;
2928 /* Unlink this iocbq */
2929 iocbq->next = NULL;
2931 /* Get the pkt */
2932 sbp = (emlxs_buf_t *)iocbq->sbp;
2934 if (sbp) {
2935 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
2936 "tx: sbp=%p node=%p", sbp, sbp->node);
2938 if (hba->state >= FC_LINK_UP) {
2939 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2940 IOERR_ABORT_REQUESTED, 1);
2941 } else {
2942 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
2943 IOERR_LINK_DOWN, 1);
2947 /* Free the iocb and its associated buffers */
2948 else {
2949 /* Should never happen! */
2950 icmd = &iocbq->iocb;
2952 /* SLI3 */
2953 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
2954 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
2955 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
2956 if ((hba->flag &
2957 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
2958 /* HBA is detaching or offlining */
2959 if (icmd->ULPCOMMAND !=
2960 CMD_QUE_RING_LIST64_CN) {
2961 void *tmp;
2962 RING *rp;
2963 int ch;
2965 ch = ((CHANNEL *)
2966 iocbq->channel)->channelno;
2967 rp = &hba->sli.sli3.ring[ch];
2968 for (i = 0;
2969 i < icmd->ULPBDECOUNT;
2970 i++) {
2971 mp = EMLXS_GET_VADDR(
2972 hba, rp, icmd);
2974 tmp = (void *)mp;
2975 if (mp) {
2976 emlxs_mem_put(
2977 hba, MEM_BUF, tmp);
2982 emlxs_mem_put(hba, MEM_IOCB,
2983 (void *)iocbq);
2984 } else {
2985 /* repost the unsolicited buffer */
2986 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
2987 (CHANNEL *)iocbq->channel, iocbq);
2989 } else if (icmd->ULPCOMMAND == CMD_CLOSE_XRI_CN ||
2990 icmd->ULPCOMMAND == CMD_CLOSE_XRI_CX) {
2992 * Resend the abort iocbq if any
2994 emlxs_tx_put(iocbq, 1);
2998 iocbq = next;
3000 } /* end of while */
3002 /* Now trigger channel service */
3003 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3004 if (!flag[channelno]) {
3005 continue;
3008 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3011 return (abort.q_cnt);
3013 } /* emlxs_tx_lun_flush() */
3016 extern void
3017 emlxs_tx_put(IOCBQ *iocbq, uint32_t lock)
3019 emlxs_hba_t *hba;
3020 emlxs_port_t *port;
3021 uint32_t channelno;
3022 NODELIST *nlp;
3023 CHANNEL *cp;
3024 emlxs_buf_t *sbp;
3026 port = (emlxs_port_t *)iocbq->port;
3027 hba = HBA;
3028 cp = (CHANNEL *)iocbq->channel;
3029 nlp = (NODELIST *)iocbq->node;
3030 channelno = cp->channelno;
3031 sbp = (emlxs_buf_t *)iocbq->sbp;
3033 if (nlp == NULL) {
3034 /* Set node to base node by default */
3035 nlp = &port->node_base;
3037 iocbq->node = (void *)nlp;
3039 if (sbp) {
3040 sbp->node = (void *)nlp;
3044 if (lock) {
3045 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3048 if (!nlp->nlp_active || (sbp && (sbp->pkt_flags & PACKET_IN_ABORT))) {
3049 if (sbp) {
3050 mutex_enter(&sbp->mtx);
3051 sbp->pkt_flags |= PACKET_IN_FLUSH;
3052 mutex_exit(&sbp->mtx);
3054 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3055 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3056 } else {
3057 (void) emlxs_unregister_pkt(cp, sbp->iotag, 0);
3060 if (lock) {
3061 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3064 if (hba->state >= FC_LINK_UP) {
3065 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3066 IOERR_ABORT_REQUESTED, 1);
3067 } else {
3068 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3069 IOERR_LINK_DOWN, 1);
3071 return;
3072 } else {
3073 if (lock) {
3074 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3077 emlxs_mem_put(hba, MEM_IOCB, (void *)iocbq);
3080 return;
3083 if (sbp) {
3085 mutex_enter(&sbp->mtx);
3087 if (sbp->pkt_flags &
3088 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ | PACKET_IN_TXQ)) {
3089 mutex_exit(&sbp->mtx);
3090 if (lock) {
3091 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3093 return;
3096 sbp->pkt_flags |= PACKET_IN_TXQ;
3097 hba->channel_tx_count++;
3099 mutex_exit(&sbp->mtx);
3103 /* Check iocbq priority */
3104 /* Some IOCB has the high priority like reset/close xri etc */
3105 if (iocbq->flag & IOCB_PRIORITY) {
3106 /* Add the iocb to the bottom of the node's ptx queue */
3107 if (nlp->nlp_ptx[channelno].q_first) {
3108 ((IOCBQ *)nlp->nlp_ptx[channelno].q_last)->next = iocbq;
3109 nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3110 nlp->nlp_ptx[channelno].q_cnt++;
3111 } else {
3112 nlp->nlp_ptx[channelno].q_first = (uint8_t *)iocbq;
3113 nlp->nlp_ptx[channelno].q_last = (uint8_t *)iocbq;
3114 nlp->nlp_ptx[channelno].q_cnt = 1;
3117 iocbq->next = NULL;
3118 } else { /* Normal priority */
3121 /* Add the iocb to the bottom of the node's tx queue */
3122 if (nlp->nlp_tx[channelno].q_first) {
3123 ((IOCBQ *)nlp->nlp_tx[channelno].q_last)->next = iocbq;
3124 nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3125 nlp->nlp_tx[channelno].q_cnt++;
3126 } else {
3127 nlp->nlp_tx[channelno].q_first = (uint8_t *)iocbq;
3128 nlp->nlp_tx[channelno].q_last = (uint8_t *)iocbq;
3129 nlp->nlp_tx[channelno].q_cnt = 1;
3132 iocbq->next = NULL;
3137 * Check if the node is not already on channel queue and
3138 * (is not closed or is a priority request)
3140 if (!nlp->nlp_next[channelno] &&
3141 (!(nlp->nlp_flag[channelno] & NLP_CLOSED) ||
3142 (iocbq->flag & IOCB_PRIORITY))) {
3143 /* If so, then add it to the channel queue */
3144 if (cp->nodeq.q_first) {
3145 ((NODELIST *)cp->nodeq.q_last)->nlp_next[channelno] =
3146 (uint8_t *)nlp;
3147 nlp->nlp_next[channelno] = cp->nodeq.q_first;
3150 * If this is not the base node then add it
3151 * to the tail
3153 if (!nlp->nlp_base) {
3154 cp->nodeq.q_last = (uint8_t *)nlp;
3155 } else { /* Otherwise, add it to the head */
3157 /* The command node always gets priority */
3158 cp->nodeq.q_first = (uint8_t *)nlp;
3161 cp->nodeq.q_cnt++;
3162 } else {
3163 cp->nodeq.q_first = (uint8_t *)nlp;
3164 cp->nodeq.q_last = (uint8_t *)nlp;
3165 nlp->nlp_next[channelno] = nlp;
3166 cp->nodeq.q_cnt = 1;
3170 HBASTATS.IocbTxPut[channelno]++;
3172 /* Adjust the channel timeout timer */
3173 cp->timeout = hba->timer_tics + 5;
3175 if (lock) {
3176 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3179 return;
3181 } /* emlxs_tx_put() */
3184 extern IOCBQ *
3185 emlxs_tx_get(CHANNEL *cp, uint32_t lock)
3187 emlxs_hba_t *hba;
3188 uint32_t channelno;
3189 IOCBQ *iocbq;
3190 NODELIST *nlp;
3191 emlxs_buf_t *sbp;
3193 hba = cp->hba;
3194 channelno = cp->channelno;
3196 if (lock) {
3197 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3200 begin:
3202 iocbq = NULL;
3204 /* Check if a node needs servicing */
3205 if (cp->nodeq.q_first) {
3206 nlp = (NODELIST *)cp->nodeq.q_first;
3208 /* Get next iocb from node's priority queue */
3210 if (nlp->nlp_ptx[channelno].q_first) {
3211 iocbq = (IOCBQ *)nlp->nlp_ptx[channelno].q_first;
3213 /* Check if this is last entry */
3214 if (nlp->nlp_ptx[channelno].q_last == (void *)iocbq) {
3215 nlp->nlp_ptx[channelno].q_first = NULL;
3216 nlp->nlp_ptx[channelno].q_last = NULL;
3217 nlp->nlp_ptx[channelno].q_cnt = 0;
3218 } else {
3219 /* Remove iocb from head */
3220 nlp->nlp_ptx[channelno].q_first =
3221 (void *)iocbq->next;
3222 nlp->nlp_ptx[channelno].q_cnt--;
3225 iocbq->next = NULL;
3228 /* Get next iocb from node tx queue if node not closed */
3229 else if (nlp->nlp_tx[channelno].q_first &&
3230 !(nlp->nlp_flag[channelno] & NLP_CLOSED)) {
3231 iocbq = (IOCBQ *)nlp->nlp_tx[channelno].q_first;
3233 /* Check if this is last entry */
3234 if (nlp->nlp_tx[channelno].q_last == (void *)iocbq) {
3235 nlp->nlp_tx[channelno].q_first = NULL;
3236 nlp->nlp_tx[channelno].q_last = NULL;
3237 nlp->nlp_tx[channelno].q_cnt = 0;
3238 } else {
3239 /* Remove iocb from head */
3240 nlp->nlp_tx[channelno].q_first =
3241 (void *)iocbq->next;
3242 nlp->nlp_tx[channelno].q_cnt--;
3245 iocbq->next = NULL;
3248 /* Now deal with node itself */
3250 /* Check if node still needs servicing */
3251 if ((nlp->nlp_ptx[channelno].q_first) ||
3252 (nlp->nlp_tx[channelno].q_first &&
3253 !(nlp->nlp_flag[channelno] & NLP_CLOSED))) {
3256 * If this is the base node, then don't shift the
3257 * pointers. We want to drain the base node before
3258 * moving on
3260 if (!nlp->nlp_base) {
3262 * Just shift channel queue pointers to next
3263 * node
3265 cp->nodeq.q_last = (void *)nlp;
3266 cp->nodeq.q_first = nlp->nlp_next[channelno];
3268 } else {
3269 /* Remove node from channel queue */
3271 /* If this is the last node on list */
3272 if (cp->nodeq.q_last == (void *)nlp) {
3273 cp->nodeq.q_last = NULL;
3274 cp->nodeq.q_first = NULL;
3275 cp->nodeq.q_cnt = 0;
3276 } else {
3277 /* Remove node from head */
3278 cp->nodeq.q_first = nlp->nlp_next[channelno];
3279 ((NODELIST *)cp->nodeq.q_last)->
3280 nlp_next[channelno] = cp->nodeq.q_first;
3281 cp->nodeq.q_cnt--;
3285 /* Clear node */
3286 nlp->nlp_next[channelno] = NULL;
3290 * If no iocbq was found on this node, then it will have
3291 * been removed. So try again.
3293 if (!iocbq) {
3294 goto begin;
3297 sbp = (emlxs_buf_t *)iocbq->sbp;
3299 if (sbp) {
3301 * Check flags before we enter mutex in case this
3302 * has been flushed and destroyed
3304 if ((sbp->pkt_flags &
3305 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3306 !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3307 goto begin;
3310 mutex_enter(&sbp->mtx);
3312 if ((sbp->pkt_flags &
3313 (PACKET_IN_COMPLETION | PACKET_IN_CHIPQ)) ||
3314 !(sbp->pkt_flags & PACKET_IN_TXQ)) {
3315 mutex_exit(&sbp->mtx);
3316 goto begin;
3319 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3320 hba->channel_tx_count--;
3322 mutex_exit(&sbp->mtx);
3326 if (iocbq) {
3327 HBASTATS.IocbTxGet[channelno]++;
3330 /* Adjust the ring timeout timer */
3331 cp->timeout = (cp->nodeq.q_first) ? (hba->timer_tics + 5) : 0;
3333 if (lock) {
3334 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3337 return (iocbq);
3339 } /* emlxs_tx_get() */
3343 * Remove all cmd from from_rp's txq to to_rp's txq for ndlp.
3344 * The old IoTag has to be released, the new one has to be
3345 * allocated. Others no change
3346 * TX_CHANNEL lock is held
3348 extern void
3349 emlxs_tx_move(NODELIST *ndlp, CHANNEL *from_chan, CHANNEL *to_chan,
3350 uint32_t cmd, emlxs_buf_t *fpkt, uint32_t lock)
3352 emlxs_hba_t *hba;
3353 emlxs_port_t *port;
3354 uint32_t fchanno, tchanno, i;
3356 IOCBQ *iocbq;
3357 IOCBQ *prev;
3358 IOCBQ *next;
3359 IOCB *iocb, *icmd;
3360 Q tbm; /* To Be Moved Q */
3361 MATCHMAP *mp;
3363 NODELIST *nlp = ndlp;
3364 emlxs_buf_t *sbp;
3366 NODELIST *n_prev = NULL;
3367 NODELIST *n_next = NULL;
3368 uint16_t count = 0;
3370 hba = from_chan->hba;
3371 port = &PPORT;
3372 cmd = cmd; /* To pass lint */
3374 fchanno = from_chan->channelno;
3375 tchanno = to_chan->channelno;
3377 if (lock) {
3378 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3381 bzero((void *)&tbm, sizeof (Q));
3383 /* Scan the ndlp's fchanno txq to get the iocb of fcp cmd */
3384 prev = NULL;
3385 iocbq = (IOCBQ *)nlp->nlp_tx[fchanno].q_first;
3387 while (iocbq) {
3388 next = (IOCBQ *)iocbq->next;
3389 /* Check if this iocb is fcp cmd */
3390 iocb = &iocbq->iocb;
3392 switch (iocb->ULPCOMMAND) {
3393 /* FCP commands */
3394 case CMD_FCP_ICMND_CR:
3395 case CMD_FCP_ICMND_CX:
3396 case CMD_FCP_IREAD_CR:
3397 case CMD_FCP_IREAD_CX:
3398 case CMD_FCP_IWRITE_CR:
3399 case CMD_FCP_IWRITE_CX:
3400 case CMD_FCP_ICMND64_CR:
3401 case CMD_FCP_ICMND64_CX:
3402 case CMD_FCP_IREAD64_CR:
3403 case CMD_FCP_IREAD64_CX:
3404 case CMD_FCP_IWRITE64_CR:
3405 case CMD_FCP_IWRITE64_CX:
3406 /* We found a fcp cmd */
3407 break;
3408 default:
3409 /* this is not fcp cmd continue */
3410 prev = iocbq;
3411 iocbq = next;
3412 continue;
3415 /* found a fcp cmd iocb in fchanno txq, now deque it */
3416 if (next == NULL) {
3417 /* This is the last iocbq */
3418 nlp->nlp_tx[fchanno].q_last =
3419 (uint8_t *)prev;
3422 if (prev == NULL) {
3423 /* This is the first one then remove it from head */
3424 nlp->nlp_tx[fchanno].q_first =
3425 (uint8_t *)next;
3426 } else {
3427 prev->next = next;
3430 iocbq->next = NULL;
3431 nlp->nlp_tx[fchanno].q_cnt--;
3433 /* Add this iocb to our local toberemovedq */
3434 /* This way we donot hold the TX_CHANNEL lock too long */
3436 if (tbm.q_first) {
3437 ((IOCBQ *)tbm.q_last)->next = iocbq;
3438 tbm.q_last = (uint8_t *)iocbq;
3439 tbm.q_cnt++;
3440 } else {
3441 tbm.q_first = (uint8_t *)iocbq;
3442 tbm.q_last = (uint8_t *)iocbq;
3443 tbm.q_cnt = 1;
3446 iocbq = next;
3448 } /* While (iocbq) */
3450 if ((tchanno == hba->channel_fcp) && (tbm.q_cnt != 0)) {
3452 /* from_chan->nodeq.q_first must be non NULL */
3453 if (from_chan->nodeq.q_first) {
3455 /* nodeq is not empty, now deal with the node itself */
3456 if ((nlp->nlp_tx[fchanno].q_first)) {
3458 if (!nlp->nlp_base) {
3459 from_chan->nodeq.q_last =
3460 (void *)nlp;
3461 from_chan->nodeq.q_first =
3462 nlp->nlp_next[fchanno];
3465 } else {
3466 n_prev = (NODELIST *)from_chan->nodeq.q_first;
3467 count = from_chan->nodeq.q_cnt;
3469 if (n_prev == nlp) {
3471 /* If this is the only node on list */
3472 if (from_chan->nodeq.q_last ==
3473 (void *)nlp) {
3474 from_chan->nodeq.q_last =
3475 NULL;
3476 from_chan->nodeq.q_first =
3477 NULL;
3478 from_chan->nodeq.q_cnt = 0;
3479 } else {
3480 from_chan->nodeq.q_first =
3481 nlp->nlp_next[fchanno];
3482 ((NODELIST *)from_chan->
3483 nodeq.q_last)->
3484 nlp_next[fchanno] =
3485 from_chan->nodeq.q_first;
3486 from_chan->nodeq.q_cnt--;
3488 /* Clear node */
3489 nlp->nlp_next[fchanno] = NULL;
3490 } else {
3491 count--;
3492 do {
3493 n_next =
3494 n_prev->nlp_next[fchanno];
3495 if (n_next == nlp) {
3496 break;
3498 n_prev = n_next;
3499 } while (count--);
3501 if (count != 0) {
3503 if (n_next ==
3504 (NODELIST *)from_chan->
3505 nodeq.q_last) {
3506 n_prev->
3507 nlp_next[fchanno]
3509 ((NODELIST *)
3510 from_chan->
3511 nodeq.q_last)->
3512 nlp_next
3513 [fchanno];
3514 from_chan->nodeq.q_last
3515 = (uint8_t *)n_prev;
3516 } else {
3518 n_prev->
3519 nlp_next[fchanno]
3521 n_next-> nlp_next
3522 [fchanno];
3524 from_chan->nodeq.q_cnt--;
3525 /* Clear node */
3526 nlp->nlp_next[fchanno] =
3527 NULL;
3534 /* Now cleanup the iocb's */
3535 prev = NULL;
3536 iocbq = (IOCBQ *)tbm.q_first;
3538 while (iocbq) {
3540 next = (IOCBQ *)iocbq->next;
3542 /* Free the IoTag and the bmp */
3543 iocb = &iocbq->iocb;
3545 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3546 sbp = iocbq->sbp;
3547 if (sbp) {
3548 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 1);
3550 } else {
3551 sbp = emlxs_unregister_pkt((CHANNEL *)iocbq->channel,
3552 iocb->ULPIOTAG, 0);
3555 if (sbp && (sbp != STALE_PACKET)) {
3556 mutex_enter(&sbp->mtx);
3557 sbp->pkt_flags |= PACKET_IN_FLUSH;
3560 * If the fpkt is already set, then we will leave it
3561 * alone. This ensures that this pkt is only accounted
3562 * for on one fpkt->flush_count
3564 if (!sbp->fpkt && fpkt) {
3565 mutex_enter(&fpkt->mtx);
3566 sbp->fpkt = fpkt;
3567 fpkt->flush_count++;
3568 mutex_exit(&fpkt->mtx);
3570 mutex_exit(&sbp->mtx);
3572 iocbq = next;
3574 } /* end of while */
3576 iocbq = (IOCBQ *)tbm.q_first;
3577 while (iocbq) {
3578 /* Save the next iocbq for now */
3579 next = (IOCBQ *)iocbq->next;
3581 /* Unlink this iocbq */
3582 iocbq->next = NULL;
3584 /* Get the pkt */
3585 sbp = (emlxs_buf_t *)iocbq->sbp;
3587 if (sbp) {
3588 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pkt_flush_msg,
3589 "tx: sbp=%p node=%p", sbp, sbp->node);
3591 if (hba->state >= FC_LINK_UP) {
3592 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3593 IOERR_ABORT_REQUESTED, 1);
3594 } else {
3595 emlxs_pkt_complete(sbp, IOSTAT_LOCAL_REJECT,
3596 IOERR_LINK_DOWN, 1);
3600 /* Free the iocb and its associated buffers */
3601 else {
3602 icmd = &iocbq->iocb;
3604 /* SLI3 */
3605 if (icmd->ULPCOMMAND == CMD_QUE_RING_BUF64_CN ||
3606 icmd->ULPCOMMAND == CMD_QUE_RING_BUF_CN ||
3607 icmd->ULPCOMMAND == CMD_QUE_RING_LIST64_CN) {
3608 if ((hba->flag &
3609 (FC_ONLINE_MODE | FC_ONLINING_MODE)) == 0) {
3610 /* HBA is detaching or offlining */
3611 if (icmd->ULPCOMMAND !=
3612 CMD_QUE_RING_LIST64_CN) {
3613 void *tmp;
3614 RING *rp;
3615 int ch;
3617 ch = from_chan->channelno;
3618 rp = &hba->sli.sli3.ring[ch];
3620 for (i = 0;
3621 i < icmd->ULPBDECOUNT;
3622 i++) {
3623 mp = EMLXS_GET_VADDR(
3624 hba, rp, icmd);
3626 tmp = (void *)mp;
3627 if (mp) {
3628 emlxs_mem_put(
3629 hba,
3630 MEM_BUF,
3631 tmp);
3637 emlxs_mem_put(hba, MEM_IOCB,
3638 (void *)iocbq);
3639 } else {
3640 /* repost the unsolicited buffer */
3641 EMLXS_SLI_ISSUE_IOCB_CMD(hba,
3642 from_chan, iocbq);
3647 iocbq = next;
3649 } /* end of while */
3651 /* Now flush the chipq if any */
3652 if (!(nlp->nlp_flag[fchanno] & NLP_CLOSED)) {
3654 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3656 (void) emlxs_chipq_node_flush(port, from_chan, nlp, 0);
3658 mutex_enter(&EMLXS_TX_CHANNEL_LOCK);
3661 if (lock) {
3662 mutex_exit(&EMLXS_TX_CHANNEL_LOCK);
3665 return;
3667 } /* emlxs_tx_move */
3670 extern uint32_t
3671 emlxs_chipq_node_flush(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp,
3672 emlxs_buf_t *fpkt)
3674 emlxs_hba_t *hba = HBA;
3675 emlxs_buf_t *sbp;
3676 IOCBQ *iocbq;
3677 IOCBQ *next;
3678 Q abort;
3679 CHANNEL *cp;
3680 uint32_t channelno;
3681 uint8_t flag[MAX_CHANNEL];
3682 uint32_t iotag;
3684 bzero((void *)&abort, sizeof (Q));
3685 bzero((void *)flag, sizeof (flag));
3687 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3688 cp = &hba->chan[channelno];
3690 if (chan && cp != chan) {
3691 continue;
3694 mutex_enter(&EMLXS_FCTAB_LOCK);
3696 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3697 sbp = hba->fc_table[iotag];
3699 if (sbp && (sbp != STALE_PACKET) &&
3700 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3701 (sbp->node == ndlp) &&
3702 (sbp->channel == cp) &&
3703 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3704 emlxs_sbp_abort_add(port, sbp, &abort, flag,
3705 fpkt);
3709 mutex_exit(&EMLXS_FCTAB_LOCK);
3711 } /* for */
3713 /* Now put the iocb's on the tx queue */
3714 iocbq = (IOCBQ *)abort.q_first;
3715 while (iocbq) {
3716 /* Save the next iocbq for now */
3717 next = (IOCBQ *)iocbq->next;
3719 /* Unlink this iocbq */
3720 iocbq->next = NULL;
3722 /* Send this iocbq */
3723 emlxs_tx_put(iocbq, 1);
3725 iocbq = next;
3728 /* Now trigger channel service */
3729 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3730 if (!flag[channelno]) {
3731 continue;
3734 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3737 return (abort.q_cnt);
3739 } /* emlxs_chipq_node_flush() */
3742 /* Flush all IO's left on all iotag lists */
3743 extern uint32_t
3744 emlxs_iotag_flush(emlxs_hba_t *hba)
3746 emlxs_port_t *port = &PPORT;
3747 emlxs_buf_t *sbp;
3748 IOCBQ *iocbq;
3749 IOCB *iocb;
3750 Q abort;
3751 CHANNEL *cp;
3752 uint32_t channelno;
3753 uint32_t iotag;
3754 uint32_t count;
3756 count = 0;
3757 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3758 cp = &hba->chan[channelno];
3760 bzero((void *)&abort, sizeof (Q));
3762 mutex_enter(&EMLXS_FCTAB_LOCK);
3764 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3765 sbp = hba->fc_table[iotag];
3767 /* Check if the slot is empty */
3768 if (!sbp || (sbp == STALE_PACKET)) {
3769 continue;
3772 /* We are building an abort list per channel */
3773 if (sbp->channel != cp) {
3774 continue;
3777 hba->fc_table[iotag] = STALE_PACKET;
3778 hba->io_count--;
3780 /* Check if IO is valid */
3781 if (!(sbp->pkt_flags & PACKET_VALID) ||
3782 (sbp->pkt_flags & (PACKET_ULP_OWNED|
3783 PACKET_COMPLETED|PACKET_IN_COMPLETION))) {
3784 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3785 "iotag_flush: Invalid IO found. iotag=%d",
3786 iotag);
3788 continue;
3791 sbp->iotag = 0;
3793 /* Set IOCB status */
3794 iocbq = &sbp->iocbq;
3795 iocb = &iocbq->iocb;
3797 iocb->ULPSTATUS = IOSTAT_LOCAL_REJECT;
3798 iocb->un.grsp.perr.statLocalError = IOERR_LINK_DOWN;
3799 iocb->ULPLE = 1;
3800 iocbq->next = NULL;
3802 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
3803 if (sbp->xrip) {
3804 EMLXS_MSGF(EMLXS_CONTEXT,
3805 &emlxs_sli_debug_msg,
3806 "iotag_flush: iotag=%d sbp=%p "
3807 "xrip=%p state=%x flag=%x",
3808 iotag, sbp, sbp->xrip,
3809 sbp->xrip->state, sbp->xrip->flag);
3810 } else {
3811 EMLXS_MSGF(EMLXS_CONTEXT,
3812 &emlxs_sli_debug_msg,
3813 "iotag_flush: iotag=%d sbp=%p "
3814 "xrip=NULL", iotag, sbp);
3817 emlxs_sli4_free_xri(port, sbp, sbp->xrip, 0);
3818 } else {
3819 /* Clean up the sbp */
3820 mutex_enter(&sbp->mtx);
3822 if (sbp->pkt_flags & PACKET_IN_TXQ) {
3823 sbp->pkt_flags &= ~PACKET_IN_TXQ;
3824 hba->channel_tx_count --;
3827 if (sbp->pkt_flags & PACKET_IN_CHIPQ) {
3828 sbp->pkt_flags &= ~PACKET_IN_CHIPQ;
3831 if (sbp->bmp) {
3832 emlxs_mem_put(hba, MEM_BPL,
3833 (void *)sbp->bmp);
3834 sbp->bmp = 0;
3837 mutex_exit(&sbp->mtx);
3840 /* At this point all nodes are assumed destroyed */
3841 mutex_enter(&sbp->mtx);
3842 sbp->node = 0;
3843 mutex_exit(&sbp->mtx);
3845 /* Add this iocb to our local abort Q */
3846 if (abort.q_first) {
3847 ((IOCBQ *)abort.q_last)->next = iocbq;
3848 abort.q_last = (uint8_t *)iocbq;
3849 abort.q_cnt++;
3850 } else {
3851 abort.q_first = (uint8_t *)iocbq;
3852 abort.q_last = (uint8_t *)iocbq;
3853 abort.q_cnt = 1;
3857 mutex_exit(&EMLXS_FCTAB_LOCK);
3859 /* Trigger deferred completion */
3860 if (abort.q_first) {
3861 mutex_enter(&cp->rsp_lock);
3862 if (cp->rsp_head == NULL) {
3863 cp->rsp_head = (IOCBQ *)abort.q_first;
3864 cp->rsp_tail = (IOCBQ *)abort.q_last;
3865 } else {
3866 cp->rsp_tail->next = (IOCBQ *)abort.q_first;
3867 cp->rsp_tail = (IOCBQ *)abort.q_last;
3869 mutex_exit(&cp->rsp_lock);
3871 emlxs_thread_trigger2(&cp->intr_thread,
3872 emlxs_proc_channel, cp);
3874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_sli_debug_msg,
3875 "iotag_flush: channel=%d count=%d",
3876 channelno, abort.q_cnt);
3878 count += abort.q_cnt;
3882 return (count);
3884 } /* emlxs_iotag_flush() */
3888 /* Checks for IO's on all or a given channel for a given node */
3889 extern uint32_t
3890 emlxs_chipq_node_check(emlxs_port_t *port, CHANNEL *chan, NODELIST *ndlp)
3892 emlxs_hba_t *hba = HBA;
3893 emlxs_buf_t *sbp;
3894 CHANNEL *cp;
3895 uint32_t channelno;
3896 uint32_t count;
3897 uint32_t iotag;
3899 count = 0;
3901 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3902 cp = &hba->chan[channelno];
3904 if (chan && cp != chan) {
3905 continue;
3908 mutex_enter(&EMLXS_FCTAB_LOCK);
3910 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3911 sbp = hba->fc_table[iotag];
3913 if (sbp && (sbp != STALE_PACKET) &&
3914 (sbp->pkt_flags & PACKET_IN_CHIPQ) &&
3915 (sbp->node == ndlp) &&
3916 (sbp->channel == cp) &&
3917 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3918 count++;
3922 mutex_exit(&EMLXS_FCTAB_LOCK);
3924 } /* for */
3926 return (count);
3928 } /* emlxs_chipq_node_check() */
3932 /* Flush all IO's for a given node's lun (on any channel) */
3933 extern uint32_t
3934 emlxs_chipq_lun_flush(emlxs_port_t *port, NODELIST *ndlp,
3935 uint32_t lun, emlxs_buf_t *fpkt)
3937 emlxs_hba_t *hba = HBA;
3938 emlxs_buf_t *sbp;
3939 IOCBQ *iocbq;
3940 IOCBQ *next;
3941 Q abort;
3942 uint32_t iotag;
3943 uint8_t flag[MAX_CHANNEL];
3944 uint32_t channelno;
3946 if (lun == EMLXS_LUN_NONE) {
3947 return (0);
3950 bzero((void *)flag, sizeof (flag));
3951 bzero((void *)&abort, sizeof (Q));
3953 mutex_enter(&EMLXS_FCTAB_LOCK);
3954 for (iotag = 1; iotag < hba->max_iotag; iotag++) {
3955 sbp = hba->fc_table[iotag];
3957 if (sbp && (sbp != STALE_PACKET) &&
3958 sbp->pkt_flags & PACKET_IN_CHIPQ &&
3959 sbp->node == ndlp &&
3960 sbp->lun == lun &&
3961 !(sbp->pkt_flags & PACKET_XRI_CLOSED)) {
3962 emlxs_sbp_abort_add(port, sbp,
3963 &abort, flag, fpkt);
3966 mutex_exit(&EMLXS_FCTAB_LOCK);
3968 /* Now put the iocb's on the tx queue */
3969 iocbq = (IOCBQ *)abort.q_first;
3970 while (iocbq) {
3971 /* Save the next iocbq for now */
3972 next = (IOCBQ *)iocbq->next;
3974 /* Unlink this iocbq */
3975 iocbq->next = NULL;
3977 /* Send this iocbq */
3978 emlxs_tx_put(iocbq, 1);
3980 iocbq = next;
3983 /* Now trigger channel service */
3984 for (channelno = 0; channelno < hba->chan_count; channelno++) {
3985 if (!flag[channelno]) {
3986 continue;
3989 EMLXS_SLI_ISSUE_IOCB_CMD(hba, &hba->chan[channelno], 0);
3992 return (abort.q_cnt);
3994 } /* emlxs_chipq_lun_flush() */
3999 * Issue an ABORT_XRI_CN iocb command to abort an FCP command already issued.
4000 * This must be called while holding the EMLXS_FCTAB_LOCK
4002 extern IOCBQ *
4003 emlxs_create_abort_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4004 uint16_t iotag, CHANNEL *cp, uint8_t class, int32_t flag)
4006 emlxs_hba_t *hba = HBA;
4007 IOCBQ *iocbq;
4008 IOCB *iocb;
4009 emlxs_wqe_t *wqe;
4010 emlxs_buf_t *sbp;
4011 uint16_t abort_iotag;
4013 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4014 return (NULL);
4017 iocbq->channel = (void *)cp;
4018 iocbq->port = (void *)port;
4019 iocbq->node = (void *)ndlp;
4020 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4023 * set up an iotag using special Abort iotags
4025 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4026 hba->fc_oor_iotag = hba->max_iotag;
4028 abort_iotag = hba->fc_oor_iotag++;
4031 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4032 wqe = &iocbq->wqe;
4033 sbp = hba->fc_table[iotag];
4035 /* Try to issue abort by XRI if possible */
4036 if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4037 wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4038 wqe->AbortTag = iotag;
4039 } else {
4040 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4041 wqe->AbortTag = sbp->xrip->XRI;
4043 wqe->un.Abort.IA = 0;
4044 wqe->RequestTag = abort_iotag;
4045 wqe->Command = CMD_ABORT_XRI_CX;
4046 wqe->Class = CLASS3;
4047 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4048 wqe->CmdType = WQE_TYPE_ABORT;
4049 } else {
4050 iocb = &iocbq->iocb;
4051 iocb->ULPIOTAG = abort_iotag;
4052 iocb->un.acxri.abortType = flag;
4053 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4054 iocb->un.acxri.abortIoTag = iotag;
4055 iocb->ULPLE = 1;
4056 iocb->ULPCLASS = class;
4057 iocb->ULPCOMMAND = CMD_ABORT_XRI_CN;
4058 iocb->ULPOWNER = OWN_CHIP;
4061 return (iocbq);
4063 } /* emlxs_create_abort_xri_cn() */
4066 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4067 extern IOCBQ *
4068 emlxs_create_abort_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4069 CHANNEL *cp, uint8_t class, int32_t flag)
4071 emlxs_hba_t *hba = HBA;
4072 IOCBQ *iocbq;
4073 IOCB *iocb;
4074 emlxs_wqe_t *wqe;
4075 uint16_t abort_iotag;
4077 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4078 return (NULL);
4081 iocbq->channel = (void *)cp;
4082 iocbq->port = (void *)port;
4083 iocbq->node = (void *)ndlp;
4084 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4087 * set up an iotag using special Abort iotags
4089 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4090 hba->fc_oor_iotag = hba->max_iotag;
4092 abort_iotag = hba->fc_oor_iotag++;
4094 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4095 wqe = &iocbq->wqe;
4096 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4097 wqe->un.Abort.IA = 0;
4098 wqe->RequestTag = abort_iotag;
4099 wqe->AbortTag = xid;
4100 wqe->Command = CMD_ABORT_XRI_CX;
4101 wqe->Class = CLASS3;
4102 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4103 wqe->CmdType = WQE_TYPE_ABORT;
4104 } else {
4105 iocb = &iocbq->iocb;
4106 iocb->ULPCONTEXT = xid;
4107 iocb->ULPIOTAG = abort_iotag;
4108 iocb->un.acxri.abortType = flag;
4109 iocb->ULPLE = 1;
4110 iocb->ULPCLASS = class;
4111 iocb->ULPCOMMAND = CMD_ABORT_XRI_CX;
4112 iocb->ULPOWNER = OWN_CHIP;
4115 return (iocbq);
4117 } /* emlxs_create_abort_xri_cx() */
4121 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4122 extern IOCBQ *
4123 emlxs_create_close_xri_cn(emlxs_port_t *port, NODELIST *ndlp,
4124 uint16_t iotag, CHANNEL *cp)
4126 emlxs_hba_t *hba = HBA;
4127 IOCBQ *iocbq;
4128 IOCB *iocb;
4129 emlxs_wqe_t *wqe;
4130 emlxs_buf_t *sbp;
4131 uint16_t abort_iotag;
4133 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4134 return (NULL);
4137 iocbq->channel = (void *)cp;
4138 iocbq->port = (void *)port;
4139 iocbq->node = (void *)ndlp;
4140 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4143 * set up an iotag using special Abort iotags
4145 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4146 hba->fc_oor_iotag = hba->max_iotag;
4148 abort_iotag = hba->fc_oor_iotag++;
4150 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4151 wqe = &iocbq->wqe;
4152 sbp = hba->fc_table[iotag];
4154 /* Try to issue close by XRI if possible */
4155 if (sbp == NULL || sbp == STALE_PACKET || sbp->xrip == NULL) {
4156 wqe->un.Abort.Criteria = ABORT_REQ_TAG;
4157 wqe->AbortTag = iotag;
4158 } else {
4159 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4160 wqe->AbortTag = sbp->xrip->XRI;
4162 wqe->un.Abort.IA = 1;
4163 wqe->RequestTag = abort_iotag;
4164 wqe->Command = CMD_ABORT_XRI_CX;
4165 wqe->Class = CLASS3;
4166 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4167 wqe->CmdType = WQE_TYPE_ABORT;
4168 } else {
4169 iocb = &iocbq->iocb;
4170 iocb->ULPIOTAG = abort_iotag;
4171 iocb->un.acxri.abortType = 0;
4172 iocb->un.acxri.abortContextTag = ndlp->nlp_Rpi;
4173 iocb->un.acxri.abortIoTag = iotag;
4174 iocb->ULPLE = 1;
4175 iocb->ULPCLASS = 0;
4176 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CN;
4177 iocb->ULPOWNER = OWN_CHIP;
4180 return (iocbq);
4182 } /* emlxs_create_close_xri_cn() */
4185 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4186 extern IOCBQ *
4187 emlxs_create_close_xri_cx(emlxs_port_t *port, NODELIST *ndlp, uint16_t xid,
4188 CHANNEL *cp)
4190 emlxs_hba_t *hba = HBA;
4191 IOCBQ *iocbq;
4192 IOCB *iocb;
4193 emlxs_wqe_t *wqe;
4194 uint16_t abort_iotag;
4196 if ((iocbq = (IOCBQ *)emlxs_mem_get(hba, MEM_IOCB)) == NULL) {
4197 return (NULL);
4200 iocbq->channel = (void *)cp;
4201 iocbq->port = (void *)port;
4202 iocbq->node = (void *)ndlp;
4203 iocbq->flag |= (IOCB_PRIORITY | IOCB_SPECIAL);
4206 * set up an iotag using special Abort iotags
4208 if ((hba->fc_oor_iotag >= EMLXS_MAX_ABORT_TAG)) {
4209 hba->fc_oor_iotag = hba->max_iotag;
4211 abort_iotag = hba->fc_oor_iotag++;
4213 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4214 wqe = &iocbq->wqe;
4215 wqe->un.Abort.Criteria = ABORT_XRI_TAG;
4216 wqe->un.Abort.IA = 1;
4217 wqe->RequestTag = abort_iotag;
4218 wqe->AbortTag = xid;
4219 wqe->Command = CMD_ABORT_XRI_CX;
4220 wqe->Class = CLASS3;
4221 wqe->CQId = (uint16_t)0xffff; /* default CQ for response */
4222 wqe->CmdType = WQE_TYPE_ABORT;
4223 } else {
4224 iocb = &iocbq->iocb;
4225 iocb->ULPCONTEXT = xid;
4226 iocb->ULPIOTAG = abort_iotag;
4227 iocb->ULPLE = 1;
4228 iocb->ULPCLASS = 0;
4229 iocb->ULPCOMMAND = CMD_CLOSE_XRI_CX;
4230 iocb->ULPOWNER = OWN_CHIP;
4233 return (iocbq);
4235 } /* emlxs_create_close_xri_cx() */
4238 void
4239 emlxs_close_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4241 CHANNEL *cp;
4242 IOCBQ *iocbq;
4243 IOCB *iocb;
4245 if (rxid == 0 || rxid == 0xFFFF) {
4246 return;
4249 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4250 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4251 "Closing ELS exchange: xid=%x", rxid);
4253 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4254 return;
4258 cp = &hba->chan[hba->channel_els];
4260 mutex_enter(&EMLXS_FCTAB_LOCK);
4262 /* Create the abort IOCB */
4263 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4265 mutex_exit(&EMLXS_FCTAB_LOCK);
4267 if (iocbq) {
4268 iocb = &iocbq->iocb;
4269 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4270 "Closing ELS exchange: xid=%x iotag=%d", rxid,
4271 iocb->ULPIOTAG);
4273 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4276 } /* emlxs_close_els_exchange() */
4279 void
4280 emlxs_abort_els_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4282 CHANNEL *cp;
4283 IOCBQ *iocbq;
4284 IOCB *iocb;
4286 if (rxid == 0 || rxid == 0xFFFF) {
4287 return;
4290 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4292 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4293 "Aborting ELS exchange: xid=%x", rxid);
4295 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4296 /* We have no way to abort unsolicited exchanges */
4297 /* that we have not responded to at this time */
4298 /* So we will return for now */
4299 return;
4303 cp = &hba->chan[hba->channel_els];
4305 mutex_enter(&EMLXS_FCTAB_LOCK);
4307 /* Create the abort IOCB */
4308 if (hba->state >= FC_LINK_UP) {
4309 iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4310 CLASS3, ABORT_TYPE_ABTS);
4311 } else {
4312 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4315 mutex_exit(&EMLXS_FCTAB_LOCK);
4317 if (iocbq) {
4318 iocb = &iocbq->iocb;
4319 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4320 "Aborting ELS exchange: xid=%x iotag=%d", rxid,
4321 iocb->ULPIOTAG);
4323 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4326 } /* emlxs_abort_els_exchange() */
4329 void
4330 emlxs_abort_ct_exchange(emlxs_hba_t *hba, emlxs_port_t *port, uint32_t rxid)
4332 CHANNEL *cp;
4333 IOCBQ *iocbq;
4334 IOCB *iocb;
4336 if (rxid == 0 || rxid == 0xFFFF) {
4337 return;
4340 if (hba->sli_mode == EMLXS_HBA_SLI4_MODE) {
4341 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_ct_msg,
4342 "Aborting CT exchange: xid=%x", rxid);
4344 if (emlxs_sli4_unreserve_xri(port, rxid, 1) == 0) {
4345 /* We have no way to abort unsolicited exchanges */
4346 /* that we have not responded to at this time */
4347 /* So we will return for now */
4348 return;
4352 cp = &hba->chan[hba->channel_ct];
4354 mutex_enter(&EMLXS_FCTAB_LOCK);
4356 /* Create the abort IOCB */
4357 if (hba->state >= FC_LINK_UP) {
4358 iocbq = emlxs_create_abort_xri_cx(port, NULL, rxid, cp,
4359 CLASS3, ABORT_TYPE_ABTS);
4360 } else {
4361 iocbq = emlxs_create_close_xri_cx(port, NULL, rxid, cp);
4364 mutex_exit(&EMLXS_FCTAB_LOCK);
4366 if (iocbq) {
4367 iocb = &iocbq->iocb;
4368 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_unsol_els_msg,
4369 "Aborting CT exchange: xid=%x iotag=%d", rxid,
4370 iocb->ULPIOTAG);
4372 EMLXS_SLI_ISSUE_IOCB_CMD(hba, cp, iocbq);
4375 } /* emlxs_abort_ct_exchange() */
4378 /* This must be called while holding the EMLXS_FCTAB_LOCK */
4379 static void
4380 emlxs_sbp_abort_add(emlxs_port_t *port, emlxs_buf_t *sbp, Q *abort,
4381 uint8_t *flag, emlxs_buf_t *fpkt)
4383 emlxs_hba_t *hba = HBA;
4384 IOCBQ *iocbq;
4385 CHANNEL *cp;
4386 NODELIST *ndlp;
4388 cp = (CHANNEL *)sbp->channel;
4389 ndlp = sbp->node;
4391 /* Create the close XRI IOCB */
4392 if (hba->state >= FC_LINK_UP) {
4393 iocbq = emlxs_create_abort_xri_cn(port, ndlp, sbp->iotag, cp,
4394 CLASS3, ABORT_TYPE_ABTS);
4395 } else {
4396 iocbq = emlxs_create_close_xri_cn(port, ndlp, sbp->iotag, cp);
4399 * Add this iocb to our local abort Q
4400 * This way we don't hold the CHIPQ lock too long
4402 if (iocbq) {
4403 if (abort->q_first) {
4404 ((IOCBQ *)abort->q_last)->next = iocbq;
4405 abort->q_last = (uint8_t *)iocbq;
4406 abort->q_cnt++;
4407 } else {
4408 abort->q_first = (uint8_t *)iocbq;
4409 abort->q_last = (uint8_t *)iocbq;
4410 abort->q_cnt = 1;
4412 iocbq->next = NULL;
4415 /* set the flags */
4416 mutex_enter(&sbp->mtx);
4418 sbp->pkt_flags |= (PACKET_IN_FLUSH | PACKET_XRI_CLOSED);
4420 sbp->ticks = hba->timer_tics + 10;
4421 sbp->abort_attempts++;
4423 flag[cp->channelno] = 1;
4426 * If the fpkt is already set, then we will leave it alone
4427 * This ensures that this pkt is only accounted for on one
4428 * fpkt->flush_count
4430 if (!sbp->fpkt && fpkt) {
4431 mutex_enter(&fpkt->mtx);
4432 sbp->fpkt = fpkt;
4433 fpkt->flush_count++;
4434 mutex_exit(&fpkt->mtx);
4437 mutex_exit(&sbp->mtx);
4439 return;
4441 } /* emlxs_sbp_abort_add() */