ia64/kvm: compilation fix. export account_system_vtime.
[pv_ops_mirror.git] / drivers / scsi / qla4xxx / ql4_isr.c
bloba91a57c57bffbd979d7fbc2c4b0c9b7880ef5ac4
1 /*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
8 #include "ql4_def.h"
9 #include "ql4_glbl.h"
10 #include "ql4_dbg.h"
11 #include "ql4_inline.h"
13 /**
14 * qla4xxx_status_entry - processes status IOCBs
15 * @ha: Pointer to host adapter structure.
16 * @sts_entry: Pointer to status entry structure.
17 **/
18 static void qla4xxx_status_entry(struct scsi_qla_host *ha,
19 struct status_entry *sts_entry)
21 uint8_t scsi_status;
22 struct scsi_cmnd *cmd;
23 struct srb *srb;
24 struct ddb_entry *ddb_entry;
25 uint32_t residual;
26 uint16_t sensebytecnt;
28 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
29 if (!srb) {
30 /* FIXMEdg: Don't we need to reset ISP in this case??? */
31 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
32 "handle 0x%x, sp=%p. This cmd may have already "
33 "been completed.\n", ha->host_no, __func__,
34 le32_to_cpu(sts_entry->handle), srb));
35 dev_warn(&ha->pdev->dev, "%s invalid status entry:"
36 " handle=0x%0x\n", __func__, sts_entry->handle);
37 set_bit(DPC_RESET_HA, &ha->dpc_flags);
38 return;
41 cmd = srb->cmd;
42 if (cmd == NULL) {
43 DEBUG2(printk("scsi%ld: %s: Command already returned back to "
44 "OS pkt->handle=%d srb=%p srb->state:%d\n",
45 ha->host_no, __func__, sts_entry->handle,
46 srb, srb->state));
47 dev_warn(&ha->pdev->dev, "Command is NULL:"
48 " already returned to OS (srb=%p)\n", srb);
49 return;
52 ddb_entry = srb->ddb;
53 if (ddb_entry == NULL) {
54 cmd->result = DID_NO_CONNECT << 16;
55 goto status_entry_exit;
58 residual = le32_to_cpu(sts_entry->residualByteCnt);
60 /* Translate ISP error to a Linux SCSI error. */
61 scsi_status = sts_entry->scsiStatus;
62 switch (sts_entry->completionStatus) {
63 case SCS_COMPLETE:
65 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
66 cmd->result = DID_ERROR << 16;
67 break;
70 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
71 scsi_set_resid(cmd, residual);
72 if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
73 cmd->underflow)) {
75 cmd->result = DID_ERROR << 16;
77 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
78 "Mid-layer Data underrun0, "
79 "xferlen = 0x%x, "
80 "residual = 0x%x\n", ha->host_no,
81 cmd->device->channel,
82 cmd->device->id,
83 cmd->device->lun, __func__,
84 scsi_bufflen(cmd), residual));
85 break;
89 cmd->result = DID_OK << 16 | scsi_status;
91 if (scsi_status != SCSI_CHECK_CONDITION)
92 break;
94 /* Copy Sense Data into sense buffer. */
95 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
97 sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
98 if (sensebytecnt == 0)
99 break;
101 memcpy(cmd->sense_buffer, sts_entry->senseData,
102 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
104 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
105 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
106 cmd->device->channel, cmd->device->id,
107 cmd->device->lun, __func__,
108 sts_entry->senseData[2] & 0x0f,
109 sts_entry->senseData[12],
110 sts_entry->senseData[13]));
112 srb->flags |= SRB_GOT_SENSE;
113 break;
115 case SCS_INCOMPLETE:
116 /* Always set the status to DID_ERROR, since
117 * all conditions result in that status anyway */
118 cmd->result = DID_ERROR << 16;
119 break;
121 case SCS_RESET_OCCURRED:
122 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
123 ha->host_no, cmd->device->channel,
124 cmd->device->id, cmd->device->lun, __func__));
126 cmd->result = DID_RESET << 16;
127 break;
129 case SCS_ABORTED:
130 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
131 ha->host_no, cmd->device->channel,
132 cmd->device->id, cmd->device->lun, __func__));
134 cmd->result = DID_RESET << 16;
135 break;
137 case SCS_TIMEOUT:
138 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
139 ha->host_no, cmd->device->channel,
140 cmd->device->id, cmd->device->lun));
142 cmd->result = DID_BUS_BUSY << 16;
145 * Mark device missing so that we won't continue to send
146 * I/O to this device. We should get a ddb state change
147 * AEN soon.
149 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
150 qla4xxx_mark_device_missing(ha, ddb_entry);
151 break;
153 case SCS_DATA_UNDERRUN:
154 case SCS_DATA_OVERRUN:
155 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
156 (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
157 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
158 "residual = 0x%x\n", ha->host_no,
159 cmd->device->channel, cmd->device->id,
160 cmd->device->lun, __func__, residual));
162 cmd->result = DID_ERROR << 16;
163 break;
166 scsi_set_resid(cmd, residual);
169 * If there is scsi_status, it takes precedense over
170 * underflow condition.
172 if (scsi_status != 0) {
173 cmd->result = DID_OK << 16 | scsi_status;
175 if (scsi_status != SCSI_CHECK_CONDITION)
176 break;
178 /* Copy Sense Data into sense buffer. */
179 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
181 sensebytecnt =
182 le16_to_cpu(sts_entry->senseDataByteCnt);
183 if (sensebytecnt == 0)
184 break;
186 memcpy(cmd->sense_buffer, sts_entry->senseData,
187 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
189 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
190 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
191 cmd->device->channel, cmd->device->id,
192 cmd->device->lun, __func__,
193 sts_entry->senseData[2] & 0x0f,
194 sts_entry->senseData[12],
195 sts_entry->senseData[13]));
196 } else {
198 * If RISC reports underrun and target does not
199 * report it then we must have a lost frame, so
200 * tell upper layer to retry it by reporting a
201 * bus busy.
203 if ((sts_entry->iscsiFlags &
204 ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
205 cmd->result = DID_BUS_BUSY << 16;
206 } else if ((scsi_bufflen(cmd) - residual) <
207 cmd->underflow) {
209 * Handle mid-layer underflow???
211 * For kernels less than 2.4, the driver must
212 * return an error if an underflow is detected.
213 * For kernels equal-to and above 2.4, the
214 * mid-layer will appearantly handle the
215 * underflow by detecting the residual count --
216 * unfortunately, we do not see where this is
217 * actually being done. In the interim, we
218 * will return DID_ERROR.
220 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
221 "Mid-layer Data underrun1, "
222 "xferlen = 0x%x, "
223 "residual = 0x%x\n", ha->host_no,
224 cmd->device->channel,
225 cmd->device->id,
226 cmd->device->lun, __func__,
227 scsi_bufflen(cmd), residual));
229 cmd->result = DID_ERROR << 16;
230 } else {
231 cmd->result = DID_OK << 16;
234 break;
236 case SCS_DEVICE_LOGGED_OUT:
237 case SCS_DEVICE_UNAVAILABLE:
239 * Mark device missing so that we won't continue to
240 * send I/O to this device. We should get a ddb
241 * state change AEN soon.
243 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
244 qla4xxx_mark_device_missing(ha, ddb_entry);
246 cmd->result = DID_BUS_BUSY << 16;
247 break;
249 case SCS_QUEUE_FULL:
251 * SCSI Mid-Layer handles device queue full
253 cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
254 DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
255 "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
256 " iResp=%02x\n", ha->host_no, cmd->device->id,
257 cmd->device->lun, __func__,
258 sts_entry->completionStatus,
259 sts_entry->scsiStatus, sts_entry->state_flags,
260 sts_entry->iscsiFlags,
261 sts_entry->iscsiResponse));
262 break;
264 default:
265 cmd->result = DID_ERROR << 16;
266 break;
269 status_entry_exit:
271 /* complete the request */
272 srb->cc_stat = sts_entry->completionStatus;
273 qla4xxx_srb_compl(ha, srb);
277 * qla4xxx_process_response_queue - process response queue completions
278 * @ha: Pointer to host adapter structure.
280 * This routine process response queue completions in interrupt context.
281 * Hardware_lock locked upon entry
283 static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
285 uint32_t count = 0;
286 struct srb *srb = NULL;
287 struct status_entry *sts_entry;
289 /* Process all responses from response queue */
290 while ((ha->response_in =
291 (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
292 ha->response_out) {
293 sts_entry = (struct status_entry *) ha->response_ptr;
294 count++;
296 /* Advance pointers for next entry */
297 if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
298 ha->response_out = 0;
299 ha->response_ptr = ha->response_ring;
300 } else {
301 ha->response_out++;
302 ha->response_ptr++;
305 /* process entry */
306 switch (sts_entry->hdr.entryType) {
307 case ET_STATUS:
309 * Common status - Single completion posted in single
310 * IOSB.
312 qla4xxx_status_entry(ha, sts_entry);
313 break;
315 case ET_PASSTHRU_STATUS:
316 break;
318 case ET_STATUS_CONTINUATION:
319 /* Just throw away the status continuation entries */
320 DEBUG2(printk("scsi%ld: %s: Status Continuation entry "
321 "- ignoring\n", ha->host_no, __func__));
322 break;
324 case ET_COMMAND:
325 /* ISP device queue is full. Command not
326 * accepted by ISP. Queue command for
327 * later */
329 srb = qla4xxx_del_from_active_array(ha,
330 le32_to_cpu(sts_entry->
331 handle));
332 if (srb == NULL)
333 goto exit_prq_invalid_handle;
335 DEBUG2(printk("scsi%ld: %s: FW device queue full, "
336 "srb %p\n", ha->host_no, __func__, srb));
338 /* ETRY normally by sending it back with
339 * DID_BUS_BUSY */
340 srb->cmd->result = DID_BUS_BUSY << 16;
341 qla4xxx_srb_compl(ha, srb);
342 break;
344 case ET_CONTINUE:
345 /* Just throw away the continuation entries */
346 DEBUG2(printk("scsi%ld: %s: Continuation entry - "
347 "ignoring\n", ha->host_no, __func__));
348 break;
350 default:
352 * Invalid entry in response queue, reset RISC
353 * firmware.
355 DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
356 "response queue \n", ha->host_no,
357 __func__,
358 sts_entry->hdr.entryType));
359 goto exit_prq_error;
364 * Done with responses, update the ISP For QLA4010, this also clears
365 * the interrupt.
367 writel(ha->response_out, &ha->reg->rsp_q_out);
368 readl(&ha->reg->rsp_q_out);
370 return;
372 exit_prq_invalid_handle:
373 DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
374 ha->host_no, __func__, srb, sts_entry->hdr.entryType,
375 sts_entry->completionStatus));
377 exit_prq_error:
378 writel(ha->response_out, &ha->reg->rsp_q_out);
379 readl(&ha->reg->rsp_q_out);
381 set_bit(DPC_RESET_HA, &ha->dpc_flags);
385 * qla4xxx_isr_decode_mailbox - decodes mailbox status
386 * @ha: Pointer to host adapter structure.
387 * @mailbox_status: Mailbox status.
389 * This routine decodes the mailbox status during the ISR.
390 * Hardware_lock locked upon entry. runs in interrupt context.
392 static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
393 uint32_t mbox_status)
395 int i;
396 uint32_t mbox_stat2, mbox_stat3;
398 if ((mbox_status == MBOX_STS_BUSY) ||
399 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
400 (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
401 ha->mbox_status[0] = mbox_status;
403 if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
405 * Copy all mailbox registers to a temporary
406 * location and set mailbox command done flag
408 for (i = 1; i < ha->mbox_status_count; i++)
409 ha->mbox_status[i] =
410 readl(&ha->reg->mailbox[i]);
412 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
414 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
415 /* Immediately process the AENs that don't require much work.
416 * Only queue the database_changed AENs */
417 if (ha->aen_log.count < MAX_AEN_ENTRIES) {
418 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
419 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
420 readl(&ha->reg->mailbox[i]);
421 ha->aen_log.count++;
423 switch (mbox_status) {
424 case MBOX_ASTS_SYSTEM_ERROR:
425 /* Log Mailbox registers */
426 if (ql4xdontresethba) {
427 DEBUG2(printk("%s:Dont Reset HBA\n",
428 __func__));
429 } else {
430 set_bit(AF_GET_CRASH_RECORD, &ha->flags);
431 set_bit(DPC_RESET_HA, &ha->dpc_flags);
433 break;
435 case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
436 case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
437 case MBOX_ASTS_NVRAM_INVALID:
438 case MBOX_ASTS_IP_ADDRESS_CHANGED:
439 case MBOX_ASTS_DHCP_LEASE_EXPIRED:
440 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
441 "Reset HA\n", ha->host_no, mbox_status));
442 set_bit(DPC_RESET_HA, &ha->dpc_flags);
443 break;
445 case MBOX_ASTS_LINK_UP:
446 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
447 ha->host_no, mbox_status));
448 set_bit(AF_LINK_UP, &ha->flags);
449 break;
451 case MBOX_ASTS_LINK_DOWN:
452 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
453 ha->host_no, mbox_status));
454 clear_bit(AF_LINK_UP, &ha->flags);
455 break;
457 case MBOX_ASTS_HEARTBEAT:
458 ha->seconds_since_last_heartbeat = 0;
459 break;
461 case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
462 DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
463 "ACQUIRED\n", ha->host_no, mbox_status));
464 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
465 break;
467 case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
468 case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
469 * mode
470 * only */
471 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
472 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
473 case MBOX_ASTS_SUBNET_STATE_CHANGE:
474 /* No action */
475 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
476 mbox_status));
477 break;
479 case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
480 mbox_stat2 = readl(&ha->reg->mailbox[2]);
481 mbox_stat3 = readl(&ha->reg->mailbox[3]);
483 if ((mbox_stat3 == 5) && (mbox_stat2 == 3))
484 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
485 else if ((mbox_stat3 == 2) && (mbox_stat2 == 5))
486 set_bit(DPC_RESET_HA, &ha->dpc_flags);
487 break;
489 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
490 case MBOX_ASTS_DNS:
491 /* No action */
492 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
493 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
494 ha->host_no, mbox_status,
495 readl(&ha->reg->mailbox[1]),
496 readl(&ha->reg->mailbox[2])));
497 break;
499 case MBOX_ASTS_SELF_TEST_FAILED:
500 case MBOX_ASTS_LOGIN_FAILED:
501 /* No action */
502 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
503 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
504 ha->host_no, mbox_status,
505 readl(&ha->reg->mailbox[1]),
506 readl(&ha->reg->mailbox[2]),
507 readl(&ha->reg->mailbox[3])));
508 break;
510 case MBOX_ASTS_DATABASE_CHANGED:
511 /* Queue AEN information and process it in the DPC
512 * routine */
513 if (ha->aen_q_count > 0) {
515 /* decrement available counter */
516 ha->aen_q_count--;
518 for (i = 1; i < MBOX_AEN_REG_COUNT; i++)
519 ha->aen_q[ha->aen_in].mbox_sts[i] =
520 readl(&ha->reg->mailbox[i]);
522 ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
524 /* print debug message */
525 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
526 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
527 ha->host_no, ha->aen_in,
528 mbox_status,
529 ha->aen_q[ha->aen_in].mbox_sts[1],
530 ha->aen_q[ha->aen_in].mbox_sts[2],
531 ha->aen_q[ha->aen_in].mbox_sts[3],
532 ha->aen_q[ha->aen_in]. mbox_sts[4]));
533 /* advance pointer */
534 ha->aen_in++;
535 if (ha->aen_in == MAX_AEN_ENTRIES)
536 ha->aen_in = 0;
538 /* The DPC routine will process the aen */
539 set_bit(DPC_AEN, &ha->dpc_flags);
540 } else {
541 DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
542 "overflowed! AEN LOST!!\n",
543 ha->host_no, __func__,
544 mbox_status));
546 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
547 ha->host_no));
549 for (i = 0; i < MAX_AEN_ENTRIES; i++) {
550 DEBUG2(printk("AEN[%d] %04x %04x %04x "
551 "%04x\n", i,
552 ha->aen_q[i].mbox_sts[0],
553 ha->aen_q[i].mbox_sts[1],
554 ha->aen_q[i].mbox_sts[2],
555 ha->aen_q[i].mbox_sts[3]));
558 break;
560 default:
561 DEBUG2(printk(KERN_WARNING
562 "scsi%ld: AEN %04x UNKNOWN\n",
563 ha->host_no, mbox_status));
564 break;
566 } else {
567 DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
568 ha->host_no, mbox_status));
570 ha->mbox_status[0] = mbox_status;
575 * qla4xxx_interrupt_service_routine - isr
576 * @ha: pointer to host adapter structure.
578 * This is the main interrupt service routine.
579 * hardware_lock locked upon entry. runs in interrupt context.
581 void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
582 uint32_t intr_status)
584 /* Process response queue interrupt. */
585 if (intr_status & CSR_SCSI_COMPLETION_INTR)
586 qla4xxx_process_response_queue(ha);
588 /* Process mailbox/asynch event interrupt.*/
589 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
590 qla4xxx_isr_decode_mailbox(ha,
591 readl(&ha->reg->mailbox[0]));
593 /* Clear Mailbox Interrupt */
594 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
595 &ha->reg->ctrl_status);
596 readl(&ha->reg->ctrl_status);
601 * qla4xxx_intr_handler - hardware interrupt handler.
602 * @irq: Unused
603 * @dev_id: Pointer to host adapter structure
605 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
607 struct scsi_qla_host *ha;
608 uint32_t intr_status;
609 unsigned long flags = 0;
610 uint8_t reqs_count = 0;
612 ha = (struct scsi_qla_host *) dev_id;
613 if (!ha) {
614 DEBUG2(printk(KERN_INFO
615 "qla4xxx: Interrupt with NULL host ptr\n"));
616 return IRQ_NONE;
619 spin_lock_irqsave(&ha->hardware_lock, flags);
621 ha->isr_count++;
623 * Repeatedly service interrupts up to a maximum of
624 * MAX_REQS_SERVICED_PER_INTR
626 while (1) {
628 * Read interrupt status
630 if (le32_to_cpu(ha->shadow_regs->rsp_q_in) !=
631 ha->response_out)
632 intr_status = CSR_SCSI_COMPLETION_INTR;
633 else
634 intr_status = readl(&ha->reg->ctrl_status);
636 if ((intr_status &
637 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) ==
638 0) {
639 if (reqs_count == 0)
640 ha->spurious_int_count++;
641 break;
644 if (intr_status & CSR_FATAL_ERROR) {
645 DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
646 "Status 0x%04x\n", ha->host_no,
647 readl(isp_port_error_status (ha))));
649 /* Issue Soft Reset to clear this error condition.
650 * This will prevent the RISC from repeatedly
651 * interrupting the driver; thus, allowing the DPC to
652 * get scheduled to continue error recovery.
653 * NOTE: Disabling RISC interrupts does not work in
654 * this case, as CSR_FATAL_ERROR overrides
655 * CSR_SCSI_INTR_ENABLE */
656 if ((readl(&ha->reg->ctrl_status) &
657 CSR_SCSI_RESET_INTR) == 0) {
658 writel(set_rmask(CSR_SOFT_RESET),
659 &ha->reg->ctrl_status);
660 readl(&ha->reg->ctrl_status);
663 writel(set_rmask(CSR_FATAL_ERROR),
664 &ha->reg->ctrl_status);
665 readl(&ha->reg->ctrl_status);
667 __qla4xxx_disable_intrs(ha);
669 set_bit(DPC_RESET_HA, &ha->dpc_flags);
671 break;
672 } else if (intr_status & CSR_SCSI_RESET_INTR) {
673 clear_bit(AF_ONLINE, &ha->flags);
674 __qla4xxx_disable_intrs(ha);
676 writel(set_rmask(CSR_SCSI_RESET_INTR),
677 &ha->reg->ctrl_status);
678 readl(&ha->reg->ctrl_status);
680 if (!ql4_mod_unload)
681 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
683 break;
684 } else if (intr_status & INTR_PENDING) {
685 qla4xxx_interrupt_service_routine(ha, intr_status);
686 ha->total_io_count++;
687 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
688 break;
690 intr_status = 0;
694 spin_unlock_irqrestore(&ha->hardware_lock, flags);
696 return IRQ_HANDLED;
700 * qla4xxx_process_aen - processes AENs generated by firmware
701 * @ha: pointer to host adapter structure.
702 * @process_aen: type of AENs to process
704 * Processes specific types of Asynchronous Events generated by firmware.
705 * The type of AENs to process is specified by process_aen and can be
706 * PROCESS_ALL_AENS 0
707 * FLUSH_DDB_CHANGED_AENS 1
708 * RELOGIN_DDB_CHANGED_AENS 2
710 void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
712 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
713 struct aen *aen;
714 int i;
715 unsigned long flags;
717 spin_lock_irqsave(&ha->hardware_lock, flags);
718 while (ha->aen_out != ha->aen_in) {
719 aen = &ha->aen_q[ha->aen_out];
720 /* copy aen information to local structure */
721 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
722 mbox_sts[i] = aen->mbox_sts[i];
724 ha->aen_q_count++;
725 ha->aen_out++;
727 if (ha->aen_out == MAX_AEN_ENTRIES)
728 ha->aen_out = 0;
730 spin_unlock_irqrestore(&ha->hardware_lock, flags);
732 DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
733 " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
734 (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
735 mbox_sts[0], mbox_sts[1], mbox_sts[2],
736 mbox_sts[3], mbox_sts[4]));
738 switch (mbox_sts[0]) {
739 case MBOX_ASTS_DATABASE_CHANGED:
740 if (process_aen == FLUSH_DDB_CHANGED_AENS) {
741 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
742 "[%d] state=%04x FLUSHED!\n",
743 ha->host_no, ha->aen_out,
744 mbox_sts[0], mbox_sts[2],
745 mbox_sts[3]));
746 break;
747 } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
748 /* for use during init time, we only want to
749 * relogin non-active ddbs */
750 struct ddb_entry *ddb_entry;
752 ddb_entry =
753 /* FIXME: name length? */
754 qla4xxx_lookup_ddb_by_fw_index(ha,
755 mbox_sts[2]);
756 if (!ddb_entry)
757 break;
759 ddb_entry->dev_scan_wait_to_complete_relogin =
761 ddb_entry->dev_scan_wait_to_start_relogin =
762 jiffies +
763 ((ddb_entry->default_time2wait +
764 4) * HZ);
766 DEBUG2(printk("scsi%ld: ddb index [%d] initate"
767 " RELOGIN after %d seconds\n",
768 ha->host_no,
769 ddb_entry->fw_ddb_index,
770 ddb_entry->default_time2wait +
771 4));
772 break;
775 if (mbox_sts[1] == 0) { /* Global DB change. */
776 qla4xxx_reinitialize_ddb_list(ha);
777 } else if (mbox_sts[1] == 1) { /* Specific device. */
778 qla4xxx_process_ddb_changed(ha, mbox_sts[2],
779 mbox_sts[3]);
781 break;
783 spin_lock_irqsave(&ha->hardware_lock, flags);
785 spin_unlock_irqrestore(&ha->hardware_lock, flags);