Merge git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
[wrt350n-kernel.git] / drivers / scsi / qla4xxx / ql4_isr.c
blobc743cb0a7cad61b2213ac0197b21427e3cda584a
1 /*
2 * QLogic iSCSI HBA Driver
3 * Copyright (c) 2003-2006 QLogic Corporation
5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */
8 #include "ql4_def.h"
9 #include "ql4_glbl.h"
10 #include "ql4_dbg.h"
11 #include "ql4_inline.h"
13 /**
14 * qla2x00_process_completed_request() - Process a Fast Post response.
15 * @ha: SCSI driver HA context
16 * @index: SRB index
17 **/
18 static void qla4xxx_process_completed_request(struct scsi_qla_host *ha,
19 uint32_t index)
21 struct srb *srb;
23 srb = qla4xxx_del_from_active_array(ha, index);
24 if (srb) {
25 /* Save ISP completion status */
26 srb->cmd->result = DID_OK << 16;
27 qla4xxx_srb_compl(ha, srb);
28 } else {
29 DEBUG2(printk("scsi%ld: Invalid ISP SCSI completion handle = "
30 "%d\n", ha->host_no, index));
31 set_bit(DPC_RESET_HA, &ha->dpc_flags);
35 /**
36 * qla4xxx_status_entry - processes status IOCBs
37 * @ha: Pointer to host adapter structure.
38 * @sts_entry: Pointer to status entry structure.
39 **/
40 static void qla4xxx_status_entry(struct scsi_qla_host *ha,
41 struct status_entry *sts_entry)
43 uint8_t scsi_status;
44 struct scsi_cmnd *cmd;
45 struct srb *srb;
46 struct ddb_entry *ddb_entry;
47 uint32_t residual;
48 uint16_t sensebytecnt;
50 if (sts_entry->completionStatus == SCS_COMPLETE &&
51 sts_entry->scsiStatus == 0) {
52 qla4xxx_process_completed_request(ha,
53 le32_to_cpu(sts_entry->
54 handle));
55 return;
58 srb = qla4xxx_del_from_active_array(ha, le32_to_cpu(sts_entry->handle));
59 if (!srb) {
60 /* FIXMEdg: Don't we need to reset ISP in this case??? */
61 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Status Entry invalid "
62 "handle 0x%x, sp=%p. This cmd may have already "
63 "been completed.\n", ha->host_no, __func__,
64 le32_to_cpu(sts_entry->handle), srb));
65 return;
68 cmd = srb->cmd;
69 if (cmd == NULL) {
70 DEBUG2(printk("scsi%ld: %s: Command already returned back to "
71 "OS pkt->handle=%d srb=%p srb->state:%d\n",
72 ha->host_no, __func__, sts_entry->handle,
73 srb, srb->state));
74 dev_warn(&ha->pdev->dev, "Command is NULL:"
75 " already returned to OS (srb=%p)\n", srb);
76 return;
79 ddb_entry = srb->ddb;
80 if (ddb_entry == NULL) {
81 cmd->result = DID_NO_CONNECT << 16;
82 goto status_entry_exit;
85 residual = le32_to_cpu(sts_entry->residualByteCnt);
87 /* Translate ISP error to a Linux SCSI error. */
88 scsi_status = sts_entry->scsiStatus;
89 switch (sts_entry->completionStatus) {
90 case SCS_COMPLETE:
91 if (scsi_status == 0) {
92 cmd->result = DID_OK << 16;
93 break;
96 if (sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) {
97 cmd->result = DID_ERROR << 16;
98 break;
101 if (sts_entry->iscsiFlags &ISCSI_FLAG_RESIDUAL_UNDER) {
102 scsi_set_resid(cmd, residual);
103 <<<<<<< HEAD:drivers/scsi/qla4xxx/ql4_isr.c
104 if (!scsi_status && ((scsi_bufflen(cmd) - residual) <
105 cmd->underflow)) {
106 =======
107 if ((scsi_bufflen(cmd) - residual) < cmd->underflow) {
108 >>>>>>> 264e3e889d86e552b4191d69bb60f4f3b383135a:drivers/scsi/qla4xxx/ql4_isr.c
110 cmd->result = DID_ERROR << 16;
112 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
113 "Mid-layer Data underrun0, "
114 "xferlen = 0x%x, "
115 "residual = 0x%x\n", ha->host_no,
116 cmd->device->channel,
117 cmd->device->id,
118 cmd->device->lun, __func__,
119 scsi_bufflen(cmd), residual));
120 break;
124 cmd->result = DID_OK << 16 | scsi_status;
126 if (scsi_status != SCSI_CHECK_CONDITION)
127 break;
129 /* Copy Sense Data into sense buffer. */
130 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
132 sensebytecnt = le16_to_cpu(sts_entry->senseDataByteCnt);
133 if (sensebytecnt == 0)
134 break;
136 memcpy(cmd->sense_buffer, sts_entry->senseData,
137 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
139 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
140 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
141 cmd->device->channel, cmd->device->id,
142 cmd->device->lun, __func__,
143 sts_entry->senseData[2] & 0x0f,
144 sts_entry->senseData[12],
145 sts_entry->senseData[13]));
147 srb->flags |= SRB_GOT_SENSE;
148 break;
150 case SCS_INCOMPLETE:
151 /* Always set the status to DID_ERROR, since
152 * all conditions result in that status anyway */
153 cmd->result = DID_ERROR << 16;
154 break;
156 case SCS_RESET_OCCURRED:
157 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Device RESET occurred\n",
158 ha->host_no, cmd->device->channel,
159 cmd->device->id, cmd->device->lun, __func__));
161 cmd->result = DID_RESET << 16;
162 break;
164 case SCS_ABORTED:
165 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: Abort occurred\n",
166 ha->host_no, cmd->device->channel,
167 cmd->device->id, cmd->device->lun, __func__));
169 cmd->result = DID_RESET << 16;
170 break;
172 case SCS_TIMEOUT:
173 DEBUG2(printk(KERN_INFO "scsi%ld:%d:%d:%d: Timeout\n",
174 ha->host_no, cmd->device->channel,
175 cmd->device->id, cmd->device->lun));
177 cmd->result = DID_BUS_BUSY << 16;
180 * Mark device missing so that we won't continue to send
181 * I/O to this device. We should get a ddb state change
182 * AEN soon.
184 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
185 qla4xxx_mark_device_missing(ha, ddb_entry);
186 break;
188 case SCS_DATA_UNDERRUN:
189 case SCS_DATA_OVERRUN:
190 if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
191 (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
192 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
193 "residual = 0x%x\n", ha->host_no,
194 cmd->device->channel, cmd->device->id,
195 cmd->device->lun, __func__, residual));
197 cmd->result = DID_ERROR << 16;
198 break;
201 scsi_set_resid(cmd, residual);
204 * If there is scsi_status, it takes precedense over
205 * underflow condition.
207 if (scsi_status != 0) {
208 cmd->result = DID_OK << 16 | scsi_status;
210 if (scsi_status != SCSI_CHECK_CONDITION)
211 break;
213 /* Copy Sense Data into sense buffer. */
214 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
216 sensebytecnt =
217 le16_to_cpu(sts_entry->senseDataByteCnt);
218 if (sensebytecnt == 0)
219 break;
221 memcpy(cmd->sense_buffer, sts_entry->senseData,
222 min_t(uint16_t, sensebytecnt, SCSI_SENSE_BUFFERSIZE));
224 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: sense key = %x, "
225 "ASC/ASCQ = %02x/%02x\n", ha->host_no,
226 cmd->device->channel, cmd->device->id,
227 cmd->device->lun, __func__,
228 sts_entry->senseData[2] & 0x0f,
229 sts_entry->senseData[12],
230 sts_entry->senseData[13]));
231 } else {
233 * If RISC reports underrun and target does not
234 * report it then we must have a lost frame, so
235 * tell upper layer to retry it by reporting a
236 * bus busy.
238 if ((sts_entry->iscsiFlags &
239 ISCSI_FLAG_RESIDUAL_UNDER) == 0) {
240 cmd->result = DID_BUS_BUSY << 16;
241 } else if ((scsi_bufflen(cmd) - residual) <
242 cmd->underflow) {
244 * Handle mid-layer underflow???
246 * For kernels less than 2.4, the driver must
247 * return an error if an underflow is detected.
248 * For kernels equal-to and above 2.4, the
249 * mid-layer will appearantly handle the
250 * underflow by detecting the residual count --
251 * unfortunately, we do not see where this is
252 * actually being done. In the interim, we
253 * will return DID_ERROR.
255 DEBUG2(printk("scsi%ld:%d:%d:%d: %s: "
256 "Mid-layer Data underrun1, "
257 "xferlen = 0x%x, "
258 "residual = 0x%x\n", ha->host_no,
259 cmd->device->channel,
260 cmd->device->id,
261 cmd->device->lun, __func__,
262 scsi_bufflen(cmd), residual));
264 cmd->result = DID_ERROR << 16;
265 } else {
266 cmd->result = DID_OK << 16;
269 break;
271 case SCS_DEVICE_LOGGED_OUT:
272 case SCS_DEVICE_UNAVAILABLE:
274 * Mark device missing so that we won't continue to
275 * send I/O to this device. We should get a ddb
276 * state change AEN soon.
278 if (atomic_read(&ddb_entry->state) == DDB_STATE_ONLINE)
279 qla4xxx_mark_device_missing(ha, ddb_entry);
281 cmd->result = DID_BUS_BUSY << 16;
282 break;
284 case SCS_QUEUE_FULL:
286 * SCSI Mid-Layer handles device queue full
288 cmd->result = DID_OK << 16 | sts_entry->scsiStatus;
289 DEBUG2(printk("scsi%ld:%d:%d: %s: QUEUE FULL detected "
290 "compl=%02x, scsi=%02x, state=%02x, iFlags=%02x,"
291 " iResp=%02x\n", ha->host_no, cmd->device->id,
292 cmd->device->lun, __func__,
293 sts_entry->completionStatus,
294 sts_entry->scsiStatus, sts_entry->state_flags,
295 sts_entry->iscsiFlags,
296 sts_entry->iscsiResponse));
297 break;
299 default:
300 cmd->result = DID_ERROR << 16;
301 break;
304 status_entry_exit:
306 /* complete the request */
307 srb->cc_stat = sts_entry->completionStatus;
308 qla4xxx_srb_compl(ha, srb);
312 * qla4xxx_process_response_queue - process response queue completions
313 * @ha: Pointer to host adapter structure.
315 * This routine process response queue completions in interrupt context.
316 * Hardware_lock locked upon entry
318 static void qla4xxx_process_response_queue(struct scsi_qla_host * ha)
320 uint32_t count = 0;
321 struct srb *srb = NULL;
322 struct status_entry *sts_entry;
324 /* Process all responses from response queue */
325 while ((ha->response_in =
326 (uint16_t)le32_to_cpu(ha->shadow_regs->rsp_q_in)) !=
327 ha->response_out) {
328 sts_entry = (struct status_entry *) ha->response_ptr;
329 count++;
331 /* Advance pointers for next entry */
332 if (ha->response_out == (RESPONSE_QUEUE_DEPTH - 1)) {
333 ha->response_out = 0;
334 ha->response_ptr = ha->response_ring;
335 } else {
336 ha->response_out++;
337 ha->response_ptr++;
340 /* process entry */
341 switch (sts_entry->hdr.entryType) {
342 case ET_STATUS:
344 * Common status - Single completion posted in single
345 * IOSB.
347 qla4xxx_status_entry(ha, sts_entry);
348 break;
350 case ET_PASSTHRU_STATUS:
351 break;
353 case ET_STATUS_CONTINUATION:
354 /* Just throw away the status continuation entries */
355 DEBUG2(printk("scsi%ld: %s: Status Continuation entry "
356 "- ignoring\n", ha->host_no, __func__));
357 break;
359 case ET_COMMAND:
360 /* ISP device queue is full. Command not
361 * accepted by ISP. Queue command for
362 * later */
364 srb = qla4xxx_del_from_active_array(ha,
365 le32_to_cpu(sts_entry->
366 handle));
367 if (srb == NULL)
368 goto exit_prq_invalid_handle;
370 DEBUG2(printk("scsi%ld: %s: FW device queue full, "
371 "srb %p\n", ha->host_no, __func__, srb));
373 /* ETRY normally by sending it back with
374 * DID_BUS_BUSY */
375 srb->cmd->result = DID_BUS_BUSY << 16;
376 qla4xxx_srb_compl(ha, srb);
377 break;
379 case ET_CONTINUE:
380 /* Just throw away the continuation entries */
381 DEBUG2(printk("scsi%ld: %s: Continuation entry - "
382 "ignoring\n", ha->host_no, __func__));
383 break;
385 default:
387 * Invalid entry in response queue, reset RISC
388 * firmware.
390 DEBUG2(printk("scsi%ld: %s: Invalid entry %x in "
391 "response queue \n", ha->host_no,
392 __func__,
393 sts_entry->hdr.entryType));
394 goto exit_prq_error;
399 * Done with responses, update the ISP For QLA4010, this also clears
400 * the interrupt.
402 writel(ha->response_out, &ha->reg->rsp_q_out);
403 readl(&ha->reg->rsp_q_out);
405 return;
407 exit_prq_invalid_handle:
408 DEBUG2(printk("scsi%ld: %s: Invalid handle(srb)=%p type=%x IOCS=%x\n",
409 ha->host_no, __func__, srb, sts_entry->hdr.entryType,
410 sts_entry->completionStatus));
412 exit_prq_error:
413 writel(ha->response_out, &ha->reg->rsp_q_out);
414 readl(&ha->reg->rsp_q_out);
416 set_bit(DPC_RESET_HA, &ha->dpc_flags);
420 * qla4xxx_isr_decode_mailbox - decodes mailbox status
421 * @ha: Pointer to host adapter structure.
422 * @mailbox_status: Mailbox status.
424 * This routine decodes the mailbox status during the ISR.
425 * Hardware_lock locked upon entry. runs in interrupt context.
427 static void qla4xxx_isr_decode_mailbox(struct scsi_qla_host * ha,
428 uint32_t mbox_status)
430 int i;
431 uint32_t mbox_stat2, mbox_stat3;
433 if ((mbox_status == MBOX_STS_BUSY) ||
434 (mbox_status == MBOX_STS_INTERMEDIATE_COMPLETION) ||
435 (mbox_status >> 12 == MBOX_COMPLETION_STATUS)) {
436 ha->mbox_status[0] = mbox_status;
438 if (test_bit(AF_MBOX_COMMAND, &ha->flags)) {
440 * Copy all mailbox registers to a temporary
441 * location and set mailbox command done flag
443 for (i = 1; i < ha->mbox_status_count; i++)
444 ha->mbox_status[i] =
445 readl(&ha->reg->mailbox[i]);
447 set_bit(AF_MBOX_COMMAND_DONE, &ha->flags);
449 } else if (mbox_status >> 12 == MBOX_ASYNC_EVENT_STATUS) {
450 /* Immediately process the AENs that don't require much work.
451 * Only queue the database_changed AENs */
452 if (ha->aen_log.count < MAX_AEN_ENTRIES) {
453 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
454 ha->aen_log.entry[ha->aen_log.count].mbox_sts[i] =
455 readl(&ha->reg->mailbox[i]);
456 ha->aen_log.count++;
458 switch (mbox_status) {
459 case MBOX_ASTS_SYSTEM_ERROR:
460 /* Log Mailbox registers */
461 if (ql4xdontresethba) {
462 DEBUG2(printk("%s:Dont Reset HBA\n",
463 __func__));
464 } else {
465 set_bit(AF_GET_CRASH_RECORD, &ha->flags);
466 set_bit(DPC_RESET_HA, &ha->dpc_flags);
468 break;
470 case MBOX_ASTS_REQUEST_TRANSFER_ERROR:
471 case MBOX_ASTS_RESPONSE_TRANSFER_ERROR:
472 case MBOX_ASTS_NVRAM_INVALID:
473 case MBOX_ASTS_IP_ADDRESS_CHANGED:
474 case MBOX_ASTS_DHCP_LEASE_EXPIRED:
475 DEBUG2(printk("scsi%ld: AEN %04x, ERROR Status, "
476 "Reset HA\n", ha->host_no, mbox_status));
477 set_bit(DPC_RESET_HA, &ha->dpc_flags);
478 break;
480 case MBOX_ASTS_LINK_UP:
481 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK UP\n",
482 ha->host_no, mbox_status));
483 set_bit(AF_LINK_UP, &ha->flags);
484 break;
486 case MBOX_ASTS_LINK_DOWN:
487 DEBUG2(printk("scsi%ld: AEN %04x Adapter LINK DOWN\n",
488 ha->host_no, mbox_status));
489 clear_bit(AF_LINK_UP, &ha->flags);
490 break;
492 case MBOX_ASTS_HEARTBEAT:
493 ha->seconds_since_last_heartbeat = 0;
494 break;
496 case MBOX_ASTS_DHCP_LEASE_ACQUIRED:
497 DEBUG2(printk("scsi%ld: AEN %04x DHCP LEASE "
498 "ACQUIRED\n", ha->host_no, mbox_status));
499 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
500 break;
502 case MBOX_ASTS_PROTOCOL_STATISTIC_ALARM:
503 case MBOX_ASTS_SCSI_COMMAND_PDU_REJECTED: /* Target
504 * mode
505 * only */
506 case MBOX_ASTS_UNSOLICITED_PDU_RECEIVED: /* Connection mode */
507 case MBOX_ASTS_IPSEC_SYSTEM_FATAL_ERROR:
508 case MBOX_ASTS_SUBNET_STATE_CHANGE:
509 /* No action */
510 DEBUG2(printk("scsi%ld: AEN %04x\n", ha->host_no,
511 mbox_status));
512 break;
514 case MBOX_ASTS_IP_ADDR_STATE_CHANGED:
515 mbox_stat2 = readl(&ha->reg->mailbox[2]);
516 mbox_stat3 = readl(&ha->reg->mailbox[3]);
518 if ((mbox_stat3 == 5) && (mbox_stat2 == 3))
519 set_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags);
520 else if ((mbox_stat3 == 2) && (mbox_stat2 == 5))
521 set_bit(DPC_RESET_HA, &ha->dpc_flags);
522 break;
524 case MBOX_ASTS_MAC_ADDRESS_CHANGED:
525 case MBOX_ASTS_DNS:
526 /* No action */
527 DEBUG2(printk(KERN_INFO "scsi%ld: AEN %04x, "
528 "mbox_sts[1]=%04x, mbox_sts[2]=%04x\n",
529 ha->host_no, mbox_status,
530 readl(&ha->reg->mailbox[1]),
531 readl(&ha->reg->mailbox[2])));
532 break;
534 case MBOX_ASTS_SELF_TEST_FAILED:
535 case MBOX_ASTS_LOGIN_FAILED:
536 /* No action */
537 DEBUG2(printk("scsi%ld: AEN %04x, mbox_sts[1]=%04x, "
538 "mbox_sts[2]=%04x, mbox_sts[3]=%04x\n",
539 ha->host_no, mbox_status,
540 readl(&ha->reg->mailbox[1]),
541 readl(&ha->reg->mailbox[2]),
542 readl(&ha->reg->mailbox[3])));
543 break;
545 case MBOX_ASTS_DATABASE_CHANGED:
546 /* Queue AEN information and process it in the DPC
547 * routine */
548 if (ha->aen_q_count > 0) {
550 /* decrement available counter */
551 ha->aen_q_count--;
553 for (i = 1; i < MBOX_AEN_REG_COUNT; i++)
554 ha->aen_q[ha->aen_in].mbox_sts[i] =
555 readl(&ha->reg->mailbox[i]);
557 ha->aen_q[ha->aen_in].mbox_sts[0] = mbox_status;
559 /* print debug message */
560 DEBUG2(printk("scsi%ld: AEN[%d] %04x queued"
561 " mb1:0x%x mb2:0x%x mb3:0x%x mb4:0x%x\n",
562 ha->host_no, ha->aen_in,
563 mbox_status,
564 ha->aen_q[ha->aen_in].mbox_sts[1],
565 ha->aen_q[ha->aen_in].mbox_sts[2],
566 ha->aen_q[ha->aen_in].mbox_sts[3],
567 ha->aen_q[ha->aen_in]. mbox_sts[4]));
568 /* advance pointer */
569 ha->aen_in++;
570 if (ha->aen_in == MAX_AEN_ENTRIES)
571 ha->aen_in = 0;
573 /* The DPC routine will process the aen */
574 set_bit(DPC_AEN, &ha->dpc_flags);
575 } else {
576 DEBUG2(printk("scsi%ld: %s: aen %04x, queue "
577 "overflowed! AEN LOST!!\n",
578 ha->host_no, __func__,
579 mbox_status));
581 DEBUG2(printk("scsi%ld: DUMP AEN QUEUE\n",
582 ha->host_no));
584 for (i = 0; i < MAX_AEN_ENTRIES; i++) {
585 DEBUG2(printk("AEN[%d] %04x %04x %04x "
586 "%04x\n", i,
587 ha->aen_q[i].mbox_sts[0],
588 ha->aen_q[i].mbox_sts[1],
589 ha->aen_q[i].mbox_sts[2],
590 ha->aen_q[i].mbox_sts[3]));
593 break;
595 default:
596 DEBUG2(printk(KERN_WARNING
597 "scsi%ld: AEN %04x UNKNOWN\n",
598 ha->host_no, mbox_status));
599 break;
601 } else {
602 DEBUG2(printk("scsi%ld: Unknown mailbox status %08X\n",
603 ha->host_no, mbox_status));
605 ha->mbox_status[0] = mbox_status;
610 * qla4xxx_interrupt_service_routine - isr
611 * @ha: pointer to host adapter structure.
613 * This is the main interrupt service routine.
614 * hardware_lock locked upon entry. runs in interrupt context.
616 void qla4xxx_interrupt_service_routine(struct scsi_qla_host * ha,
617 uint32_t intr_status)
619 /* Process response queue interrupt. */
620 if (intr_status & CSR_SCSI_COMPLETION_INTR)
621 qla4xxx_process_response_queue(ha);
623 /* Process mailbox/asynch event interrupt.*/
624 if (intr_status & CSR_SCSI_PROCESSOR_INTR) {
625 qla4xxx_isr_decode_mailbox(ha,
626 readl(&ha->reg->mailbox[0]));
628 /* Clear Mailbox Interrupt */
629 writel(set_rmask(CSR_SCSI_PROCESSOR_INTR),
630 &ha->reg->ctrl_status);
631 readl(&ha->reg->ctrl_status);
636 * qla4xxx_intr_handler - hardware interrupt handler.
637 * @irq: Unused
638 * @dev_id: Pointer to host adapter structure
640 irqreturn_t qla4xxx_intr_handler(int irq, void *dev_id)
642 struct scsi_qla_host *ha;
643 uint32_t intr_status;
644 unsigned long flags = 0;
645 uint8_t reqs_count = 0;
647 ha = (struct scsi_qla_host *) dev_id;
648 if (!ha) {
649 DEBUG2(printk(KERN_INFO
650 "qla4xxx: Interrupt with NULL host ptr\n"));
651 return IRQ_NONE;
654 spin_lock_irqsave(&ha->hardware_lock, flags);
656 ha->isr_count++;
658 * Repeatedly service interrupts up to a maximum of
659 * MAX_REQS_SERVICED_PER_INTR
661 while (1) {
663 * Read interrupt status
665 if (le32_to_cpu(ha->shadow_regs->rsp_q_in) !=
666 ha->response_out)
667 intr_status = CSR_SCSI_COMPLETION_INTR;
668 else
669 intr_status = readl(&ha->reg->ctrl_status);
671 if ((intr_status &
672 (CSR_SCSI_RESET_INTR|CSR_FATAL_ERROR|INTR_PENDING)) ==
673 0) {
674 if (reqs_count == 0)
675 ha->spurious_int_count++;
676 break;
679 if (intr_status & CSR_FATAL_ERROR) {
680 DEBUG2(printk(KERN_INFO "scsi%ld: Fatal Error, "
681 "Status 0x%04x\n", ha->host_no,
682 readl(isp_port_error_status (ha))));
684 /* Issue Soft Reset to clear this error condition.
685 * This will prevent the RISC from repeatedly
686 * interrupting the driver; thus, allowing the DPC to
687 * get scheduled to continue error recovery.
688 * NOTE: Disabling RISC interrupts does not work in
689 * this case, as CSR_FATAL_ERROR overrides
690 * CSR_SCSI_INTR_ENABLE */
691 if ((readl(&ha->reg->ctrl_status) &
692 CSR_SCSI_RESET_INTR) == 0) {
693 writel(set_rmask(CSR_SOFT_RESET),
694 &ha->reg->ctrl_status);
695 readl(&ha->reg->ctrl_status);
698 writel(set_rmask(CSR_FATAL_ERROR),
699 &ha->reg->ctrl_status);
700 readl(&ha->reg->ctrl_status);
702 __qla4xxx_disable_intrs(ha);
704 set_bit(DPC_RESET_HA, &ha->dpc_flags);
706 break;
707 } else if (intr_status & CSR_SCSI_RESET_INTR) {
708 clear_bit(AF_ONLINE, &ha->flags);
709 __qla4xxx_disable_intrs(ha);
711 writel(set_rmask(CSR_SCSI_RESET_INTR),
712 &ha->reg->ctrl_status);
713 readl(&ha->reg->ctrl_status);
715 if (!ql4_mod_unload)
716 set_bit(DPC_RESET_HA_INTR, &ha->dpc_flags);
718 break;
719 } else if (intr_status & INTR_PENDING) {
720 qla4xxx_interrupt_service_routine(ha, intr_status);
721 ha->total_io_count++;
722 if (++reqs_count == MAX_REQS_SERVICED_PER_INTR)
723 break;
725 intr_status = 0;
729 spin_unlock_irqrestore(&ha->hardware_lock, flags);
731 return IRQ_HANDLED;
735 * qla4xxx_process_aen - processes AENs generated by firmware
736 * @ha: pointer to host adapter structure.
737 * @process_aen: type of AENs to process
739 * Processes specific types of Asynchronous Events generated by firmware.
740 * The type of AENs to process is specified by process_aen and can be
741 * PROCESS_ALL_AENS 0
742 * FLUSH_DDB_CHANGED_AENS 1
743 * RELOGIN_DDB_CHANGED_AENS 2
745 void qla4xxx_process_aen(struct scsi_qla_host * ha, uint8_t process_aen)
747 uint32_t mbox_sts[MBOX_AEN_REG_COUNT];
748 struct aen *aen;
749 int i;
750 unsigned long flags;
752 spin_lock_irqsave(&ha->hardware_lock, flags);
753 while (ha->aen_out != ha->aen_in) {
754 aen = &ha->aen_q[ha->aen_out];
755 /* copy aen information to local structure */
756 for (i = 0; i < MBOX_AEN_REG_COUNT; i++)
757 mbox_sts[i] = aen->mbox_sts[i];
759 ha->aen_q_count++;
760 ha->aen_out++;
762 if (ha->aen_out == MAX_AEN_ENTRIES)
763 ha->aen_out = 0;
765 spin_unlock_irqrestore(&ha->hardware_lock, flags);
767 DEBUG2(printk("qla4xxx(%ld): AEN[%d]=0x%08x, mbx1=0x%08x mbx2=0x%08x"
768 " mbx3=0x%08x mbx4=0x%08x\n", ha->host_no,
769 (ha->aen_out ? (ha->aen_out-1): (MAX_AEN_ENTRIES-1)),
770 mbox_sts[0], mbox_sts[1], mbox_sts[2],
771 mbox_sts[3], mbox_sts[4]));
773 switch (mbox_sts[0]) {
774 case MBOX_ASTS_DATABASE_CHANGED:
775 if (process_aen == FLUSH_DDB_CHANGED_AENS) {
776 DEBUG2(printk("scsi%ld: AEN[%d] %04x, index "
777 "[%d] state=%04x FLUSHED!\n",
778 ha->host_no, ha->aen_out,
779 mbox_sts[0], mbox_sts[2],
780 mbox_sts[3]));
781 break;
782 } else if (process_aen == RELOGIN_DDB_CHANGED_AENS) {
783 /* for use during init time, we only want to
784 * relogin non-active ddbs */
785 struct ddb_entry *ddb_entry;
787 ddb_entry =
788 /* FIXME: name length? */
789 qla4xxx_lookup_ddb_by_fw_index(ha,
790 mbox_sts[2]);
791 if (!ddb_entry)
792 break;
794 ddb_entry->dev_scan_wait_to_complete_relogin =
796 ddb_entry->dev_scan_wait_to_start_relogin =
797 jiffies +
798 ((ddb_entry->default_time2wait +
799 4) * HZ);
801 DEBUG2(printk("scsi%ld: ddb index [%d] initate"
802 " RELOGIN after %d seconds\n",
803 ha->host_no,
804 ddb_entry->fw_ddb_index,
805 ddb_entry->default_time2wait +
806 4));
807 break;
810 if (mbox_sts[1] == 0) { /* Global DB change. */
811 qla4xxx_reinitialize_ddb_list(ha);
812 } else if (mbox_sts[1] == 1) { /* Specific device. */
813 qla4xxx_process_ddb_changed(ha, mbox_sts[2],
814 mbox_sts[3]);
816 break;
818 spin_lock_irqsave(&ha->hardware_lock, flags);
820 spin_unlock_irqrestore(&ha->hardware_lock, flags);