2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
12 static void qla2x00_mbx_completion(scsi_qla_host_t
*, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host
*,
14 struct req_que
*, uint32_t);
15 static void qla2x00_status_entry(scsi_qla_host_t
*, struct rsp_que
*, void *);
16 static void qla2x00_status_cont_entry(struct rsp_que
*, sts_cont_entry_t
*);
17 static void qla2x00_error_entry(scsi_qla_host_t
*, struct rsp_que
*,
21 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
23 * @dev_id: SCSI driver HA context
25 * Called by system whenever the host adapter generates an interrupt.
27 * Returns handled flag.
30 qla2100_intr_handler(int irq
, void *dev_id
)
33 struct qla_hw_data
*ha
;
34 struct device_reg_2xxx __iomem
*reg
;
42 rsp
= (struct rsp_que
*) dev_id
;
45 "%s(): NULL response queue pointer\n", __func__
);
50 reg
= &ha
->iobase
->isp
;
53 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
54 vha
= pci_get_drvdata(ha
->pdev
);
55 for (iter
= 50; iter
--; ) {
56 hccr
= RD_REG_WORD(®
->hccr
);
57 if (hccr
& HCCR_RISC_PAUSE
) {
58 if (pci_channel_offline(ha
->pdev
))
62 * Issue a "HARD" reset in order for the RISC interrupt
63 * bit to be cleared. Schedule a big hammmer to get
64 * out of the RISC PAUSED state.
66 WRT_REG_WORD(®
->hccr
, HCCR_RESET_RISC
);
67 RD_REG_WORD(®
->hccr
);
69 ha
->isp_ops
->fw_dump(vha
, 1);
70 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
72 } else if ((RD_REG_WORD(®
->istatus
) & ISR_RISC_INT
) == 0)
75 if (RD_REG_WORD(®
->semaphore
) & BIT_0
) {
76 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
77 RD_REG_WORD(®
->hccr
);
79 /* Get mailbox data. */
80 mb
[0] = RD_MAILBOX_REG(ha
, reg
, 0);
81 if (mb
[0] > 0x3fff && mb
[0] < 0x8000) {
82 qla2x00_mbx_completion(vha
, mb
[0]);
83 status
|= MBX_INTERRUPT
;
84 } else if (mb
[0] > 0x7fff && mb
[0] < 0xc000) {
85 mb
[1] = RD_MAILBOX_REG(ha
, reg
, 1);
86 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
87 mb
[3] = RD_MAILBOX_REG(ha
, reg
, 3);
88 qla2x00_async_event(vha
, rsp
, mb
);
91 DEBUG2(printk("scsi(%ld): Unrecognized "
92 "interrupt type (%d).\n",
93 vha
->host_no
, mb
[0]));
95 /* Release mailbox registers. */
96 WRT_REG_WORD(®
->semaphore
, 0);
97 RD_REG_WORD(®
->semaphore
);
99 qla2x00_process_response_queue(rsp
);
101 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
102 RD_REG_WORD(®
->hccr
);
105 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
107 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
108 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
109 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
110 complete(&ha
->mbx_intr_comp
);
113 return (IRQ_HANDLED
);
117 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
119 * @dev_id: SCSI driver HA context
121 * Called by system whenever the host adapter generates an interrupt.
123 * Returns handled flag.
126 qla2300_intr_handler(int irq
, void *dev_id
)
128 scsi_qla_host_t
*vha
;
129 struct device_reg_2xxx __iomem
*reg
;
136 struct qla_hw_data
*ha
;
139 rsp
= (struct rsp_que
*) dev_id
;
142 "%s(): NULL response queue pointer\n", __func__
);
147 reg
= &ha
->iobase
->isp
;
150 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
151 vha
= pci_get_drvdata(ha
->pdev
);
152 for (iter
= 50; iter
--; ) {
153 stat
= RD_REG_DWORD(®
->u
.isp2300
.host_status
);
154 if (stat
& HSR_RISC_PAUSED
) {
155 if (pci_channel_offline(ha
->pdev
))
158 hccr
= RD_REG_WORD(®
->hccr
);
159 if (hccr
& (BIT_15
| BIT_13
| BIT_11
| BIT_8
))
160 qla_printk(KERN_INFO
, ha
, "Parity error -- "
161 "HCCR=%x, Dumping firmware!\n", hccr
);
163 qla_printk(KERN_INFO
, ha
, "RISC paused -- "
164 "HCCR=%x, Dumping firmware!\n", hccr
);
167 * Issue a "HARD" reset in order for the RISC
168 * interrupt bit to be cleared. Schedule a big
169 * hammmer to get out of the RISC PAUSED state.
171 WRT_REG_WORD(®
->hccr
, HCCR_RESET_RISC
);
172 RD_REG_WORD(®
->hccr
);
174 ha
->isp_ops
->fw_dump(vha
, 1);
175 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
177 } else if ((stat
& HSR_RISC_INT
) == 0)
180 switch (stat
& 0xff) {
185 qla2x00_mbx_completion(vha
, MSW(stat
));
186 status
|= MBX_INTERRUPT
;
188 /* Release mailbox registers. */
189 WRT_REG_WORD(®
->semaphore
, 0);
193 mb
[1] = RD_MAILBOX_REG(ha
, reg
, 1);
194 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
195 mb
[3] = RD_MAILBOX_REG(ha
, reg
, 3);
196 qla2x00_async_event(vha
, rsp
, mb
);
199 qla2x00_process_response_queue(rsp
);
202 mb
[0] = MBA_CMPLT_1_16BIT
;
204 qla2x00_async_event(vha
, rsp
, mb
);
207 mb
[0] = MBA_SCSI_COMPLETION
;
209 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
210 qla2x00_async_event(vha
, rsp
, mb
);
213 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
215 vha
->host_no
, stat
& 0xff));
218 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
219 RD_REG_WORD_RELAXED(®
->hccr
);
221 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
223 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
224 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
225 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
226 complete(&ha
->mbx_intr_comp
);
229 return (IRQ_HANDLED
);
233 * qla2x00_mbx_completion() - Process mailbox command completions.
234 * @ha: SCSI driver HA context
235 * @mb0: Mailbox0 register
238 qla2x00_mbx_completion(scsi_qla_host_t
*vha
, uint16_t mb0
)
241 uint16_t __iomem
*wptr
;
242 struct qla_hw_data
*ha
= vha
->hw
;
243 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
245 /* Load return mailbox registers. */
246 ha
->flags
.mbox_int
= 1;
247 ha
->mailbox_out
[0] = mb0
;
248 wptr
= (uint16_t __iomem
*)MAILBOX_REG(ha
, reg
, 1);
250 for (cnt
= 1; cnt
< ha
->mbx_count
; cnt
++) {
251 if (IS_QLA2200(ha
) && cnt
== 8)
252 wptr
= (uint16_t __iomem
*)MAILBOX_REG(ha
, reg
, 8);
253 if (cnt
== 4 || cnt
== 5)
254 ha
->mailbox_out
[cnt
] = qla2x00_debounce_register(wptr
);
256 ha
->mailbox_out
[cnt
] = RD_REG_WORD(wptr
);
262 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
263 __func__
, vha
->host_no
, ha
->mcp
->mb
[0]));
265 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
266 __func__
, vha
->host_no
));
271 qla81xx_idc_event(scsi_qla_host_t
*vha
, uint16_t aen
, uint16_t descr
)
273 static char *event
[] =
274 { "Complete", "Request Notification", "Time Extension" };
276 struct device_reg_24xx __iomem
*reg24
= &vha
->hw
->iobase
->isp24
;
277 uint16_t __iomem
*wptr
;
278 uint16_t cnt
, timeout
, mb
[QLA_IDC_ACK_REGS
];
280 /* Seed data -- mailbox1 -> mailbox7. */
281 wptr
= (uint16_t __iomem
*)®24
->mailbox1
;
282 for (cnt
= 0; cnt
< QLA_IDC_ACK_REGS
; cnt
++, wptr
++)
283 mb
[cnt
] = RD_REG_WORD(wptr
);
285 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
286 "%04x %04x %04x %04x %04x %04x %04x.\n", vha
->host_no
,
288 mb
[0], mb
[1], mb
[2], mb
[3], mb
[4], mb
[5], mb
[6]));
290 /* Acknowledgement needed? [Notify && non-zero timeout]. */
291 timeout
= (descr
>> 8) & 0xf;
292 if (aen
!= MBA_IDC_NOTIFY
|| !timeout
)
295 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
296 "ACK timeout=%d.\n", vha
->host_no
, event
[aen
& 0xff], timeout
));
298 rval
= qla2x00_post_idc_ack_work(vha
, mb
);
299 if (rval
!= QLA_SUCCESS
)
300 qla_printk(KERN_WARNING
, vha
->hw
,
301 "IDC failed to post ACK.\n");
305 * qla2x00_async_event() - Process aynchronous events.
306 * @ha: SCSI driver HA context
307 * @mb: Mailbox registers (0 - 3)
310 qla2x00_async_event(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, uint16_t *mb
)
313 static char *link_speeds
[] = { "1", "2", "?", "4", "8", "10" };
318 struct qla_hw_data
*ha
= vha
->hw
;
319 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
320 uint32_t rscn_entry
, host_pid
;
321 uint8_t rscn_queue_index
;
324 /* Setup to process RIO completion. */
329 case MBA_SCSI_COMPLETION
:
330 handles
[0] = le32_to_cpu((uint32_t)((mb
[2] << 16) | mb
[1]));
333 case MBA_CMPLT_1_16BIT
:
336 mb
[0] = MBA_SCSI_COMPLETION
;
338 case MBA_CMPLT_2_16BIT
:
342 mb
[0] = MBA_SCSI_COMPLETION
;
344 case MBA_CMPLT_3_16BIT
:
349 mb
[0] = MBA_SCSI_COMPLETION
;
351 case MBA_CMPLT_4_16BIT
:
355 handles
[3] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 6);
357 mb
[0] = MBA_SCSI_COMPLETION
;
359 case MBA_CMPLT_5_16BIT
:
363 handles
[3] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 6);
364 handles
[4] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 7);
366 mb
[0] = MBA_SCSI_COMPLETION
;
368 case MBA_CMPLT_2_32BIT
:
369 handles
[0] = le32_to_cpu((uint32_t)((mb
[2] << 16) | mb
[1]));
370 handles
[1] = le32_to_cpu(
371 ((uint32_t)(RD_MAILBOX_REG(ha
, reg
, 7) << 16)) |
372 RD_MAILBOX_REG(ha
, reg
, 6));
374 mb
[0] = MBA_SCSI_COMPLETION
;
381 case MBA_SCSI_COMPLETION
: /* Fast Post */
382 if (!vha
->flags
.online
)
385 for (cnt
= 0; cnt
< handle_cnt
; cnt
++)
386 qla2x00_process_completed_request(vha
, rsp
->req
,
390 case MBA_RESET
: /* Reset */
391 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
394 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
397 case MBA_SYSTEM_ERR
: /* System Error */
398 qla_printk(KERN_INFO
, ha
,
399 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
400 mb
[1], mb
[2], mb
[3]);
402 ha
->isp_ops
->fw_dump(vha
, 1);
404 if (IS_FWI2_CAPABLE(ha
)) {
405 if (mb
[1] == 0 && mb
[2] == 0) {
406 qla_printk(KERN_ERR
, ha
,
407 "Unrecoverable Hardware Error: adapter "
408 "marked OFFLINE!\n");
409 vha
->flags
.online
= 0;
411 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
412 } else if (mb
[1] == 0) {
413 qla_printk(KERN_INFO
, ha
,
414 "Unrecoverable Hardware Error: adapter marked "
416 vha
->flags
.online
= 0;
418 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
421 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
422 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
424 qla_printk(KERN_WARNING
, ha
, "ISP Request Transfer Error.\n");
426 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
429 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
430 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
432 qla_printk(KERN_WARNING
, ha
, "ISP Response Transfer Error.\n");
434 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
437 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up */
438 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
442 case MBA_LIP_OCCURRED
: /* Loop Initialization Procedure */
443 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha
->host_no
,
445 qla_printk(KERN_INFO
, ha
, "LIP occurred (%x).\n", mb
[1]);
447 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
448 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
449 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
450 qla2x00_mark_all_devices_lost(vha
, 1);
454 atomic_set(&vha
->vp_state
, VP_FAILED
);
455 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
458 set_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
);
459 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
461 vha
->flags
.management_server_logged_in
= 0;
462 qla2x00_post_aen_work(vha
, FCH_EVT_LIP
, mb
[1]);
465 case MBA_LOOP_UP
: /* Loop Up Event */
466 if (IS_QLA2100(ha
) || IS_QLA2200(ha
)) {
467 link_speed
= link_speeds
[0];
468 ha
->link_data_rate
= PORT_SPEED_1GB
;
470 link_speed
= link_speeds
[LS_UNKNOWN
];
472 link_speed
= link_speeds
[mb
[1]];
473 else if (mb
[1] == 0x13)
474 link_speed
= link_speeds
[5];
475 ha
->link_data_rate
= mb
[1];
478 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
479 vha
->host_no
, link_speed
));
480 qla_printk(KERN_INFO
, ha
, "LOOP UP detected (%s Gbps).\n",
483 vha
->flags
.management_server_logged_in
= 0;
484 qla2x00_post_aen_work(vha
, FCH_EVT_LINKUP
, ha
->link_data_rate
);
487 case MBA_LOOP_DOWN
: /* Loop Down Event */
488 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
489 "(%x %x %x).\n", vha
->host_no
, mb
[1], mb
[2], mb
[3]));
490 qla_printk(KERN_INFO
, ha
, "LOOP DOWN detected (%x %x %x).\n",
491 mb
[1], mb
[2], mb
[3]);
493 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
494 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
495 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
496 vha
->device_flags
|= DFLG_NO_CABLE
;
497 qla2x00_mark_all_devices_lost(vha
, 1);
501 atomic_set(&vha
->vp_state
, VP_FAILED
);
502 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
505 vha
->flags
.management_server_logged_in
= 0;
506 ha
->link_data_rate
= PORT_SPEED_UNKNOWN
;
507 qla2x00_post_aen_work(vha
, FCH_EVT_LINKDOWN
, 0);
510 case MBA_LIP_RESET
: /* LIP reset occurred */
511 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
512 vha
->host_no
, mb
[1]));
513 qla_printk(KERN_INFO
, ha
,
514 "LIP reset occurred (%x).\n", mb
[1]);
516 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
517 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
518 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
519 qla2x00_mark_all_devices_lost(vha
, 1);
523 atomic_set(&vha
->vp_state
, VP_FAILED
);
524 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
527 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
529 ha
->operating_mode
= LOOP
;
530 vha
->flags
.management_server_logged_in
= 0;
531 qla2x00_post_aen_work(vha
, FCH_EVT_LIPRESET
, mb
[1]);
534 /* case MBA_DCBX_COMPLETE: */
535 case MBA_POINT_TO_POINT
: /* Point-to-Point */
540 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
541 "%04x\n", vha
->host_no
, mb
[1], mb
[2], mb
[3]));
543 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
544 "received.\n", vha
->host_no
));
547 * Until there's a transition from loop down to loop up, treat
548 * this as loop down only.
550 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
551 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
552 if (!atomic_read(&vha
->loop_down_timer
))
553 atomic_set(&vha
->loop_down_timer
,
555 qla2x00_mark_all_devices_lost(vha
, 1);
559 atomic_set(&vha
->vp_state
, VP_FAILED
);
560 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
563 if (!(test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
)))
564 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
566 set_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
);
567 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
569 ha
->flags
.gpsc_supported
= 1;
570 vha
->flags
.management_server_logged_in
= 0;
573 case MBA_CHG_IN_CONNECTION
: /* Change in connection mode */
577 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
580 qla_printk(KERN_INFO
, ha
,
581 "Configuration change detected: value=%x.\n", mb
[1]);
583 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
584 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
585 if (!atomic_read(&vha
->loop_down_timer
))
586 atomic_set(&vha
->loop_down_timer
,
588 qla2x00_mark_all_devices_lost(vha
, 1);
592 atomic_set(&vha
->vp_state
, VP_FAILED
);
593 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
596 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
597 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
600 case MBA_PORT_UPDATE
: /* Port database update */
602 * Handle only global and vn-port update events
605 * mb[1] = N_Port handle of changed port
606 * OR 0xffff for global event
607 * mb[2] = New login state
608 * 7 = Port logged out
609 * mb[3] = LSB is vp_idx, 0xff = all vps
611 * Skip processing if:
612 * Event is global, vp_idx is NOT all vps,
613 * vp_idx does not match
614 * Event is not global, vp_idx does not match
616 if ((mb
[1] == 0xffff && (mb
[3] & 0xff) != 0xff)
617 || (mb
[1] != 0xffff)) {
618 if (vha
->vp_idx
!= (mb
[3] & 0xff))
622 /* Global event -- port logout or port unavailable. */
623 if (mb
[1] == 0xffff && mb
[2] == 0x7) {
624 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
626 DEBUG(printk(KERN_INFO
627 "scsi(%ld): Port unavailable %04x %04x %04x.\n",
628 vha
->host_no
, mb
[1], mb
[2], mb
[3]));
630 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
631 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
632 atomic_set(&vha
->loop_down_timer
,
634 vha
->device_flags
|= DFLG_NO_CABLE
;
635 qla2x00_mark_all_devices_lost(vha
, 1);
639 atomic_set(&vha
->vp_state
, VP_FAILED
);
640 fc_vport_set_state(vha
->fc_vport
,
642 qla2x00_mark_all_devices_lost(vha
, 1);
645 vha
->flags
.management_server_logged_in
= 0;
646 ha
->link_data_rate
= PORT_SPEED_UNKNOWN
;
651 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
652 * event etc. earlier indicating loop is down) then process
653 * it. Otherwise ignore it and Wait for RSCN to come in.
655 atomic_set(&vha
->loop_down_timer
, 0);
656 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
&&
657 atomic_read(&vha
->loop_state
) != LOOP_DEAD
) {
658 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
659 "ignored %04x/%04x/%04x.\n", vha
->host_no
, mb
[1],
664 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
666 DEBUG(printk(KERN_INFO
667 "scsi(%ld): Port database changed %04x %04x %04x.\n",
668 vha
->host_no
, mb
[1], mb
[2], mb
[3]));
671 * Mark all devices as missing so we will login again.
673 atomic_set(&vha
->loop_state
, LOOP_UP
);
675 qla2x00_mark_all_devices_lost(vha
, 1);
677 vha
->flags
.rscn_queue_overflow
= 1;
679 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
680 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
683 case MBA_RSCN_UPDATE
: /* State Change Registration */
684 /* Check if the Vport has issued a SCR */
685 if (vha
->vp_idx
&& test_bit(VP_SCR_NEEDED
, &vha
->vp_flags
))
687 /* Only handle SCNs for our Vport index. */
688 if (ha
->flags
.npiv_supported
&& vha
->vp_idx
!= (mb
[3] & 0xff))
691 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
693 DEBUG(printk(KERN_INFO
694 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
695 vha
->host_no
, mb
[1], mb
[2], mb
[3]));
697 rscn_entry
= ((mb
[1] & 0xff) << 16) | mb
[2];
698 host_pid
= (vha
->d_id
.b
.domain
<< 16) | (vha
->d_id
.b
.area
<< 8)
700 if (rscn_entry
== host_pid
) {
701 DEBUG(printk(KERN_INFO
702 "scsi(%ld): Ignoring RSCN update to local host "
704 vha
->host_no
, host_pid
));
708 /* Ignore reserved bits from RSCN-payload. */
709 rscn_entry
= ((mb
[1] & 0x3ff) << 16) | mb
[2];
710 rscn_queue_index
= vha
->rscn_in_ptr
+ 1;
711 if (rscn_queue_index
== MAX_RSCN_COUNT
)
712 rscn_queue_index
= 0;
713 if (rscn_queue_index
!= vha
->rscn_out_ptr
) {
714 vha
->rscn_queue
[vha
->rscn_in_ptr
] = rscn_entry
;
715 vha
->rscn_in_ptr
= rscn_queue_index
;
717 vha
->flags
.rscn_queue_overflow
= 1;
720 atomic_set(&vha
->loop_down_timer
, 0);
721 vha
->flags
.management_server_logged_in
= 0;
723 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
724 set_bit(RSCN_UPDATE
, &vha
->dpc_flags
);
725 qla2x00_post_aen_work(vha
, FCH_EVT_RSCN
, rscn_entry
);
728 /* case MBA_RIO_RESPONSE: */
729 case MBA_ZIO_RESPONSE
:
730 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
733 if (IS_FWI2_CAPABLE(ha
))
734 qla24xx_process_response_queue(vha
, rsp
);
736 qla2x00_process_response_queue(rsp
);
739 case MBA_DISCARD_RND_FRAME
:
740 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
741 "%04x.\n", vha
->host_no
, mb
[1], mb
[2], mb
[3]));
744 case MBA_TRACE_NOTIFICATION
:
745 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
746 vha
->host_no
, mb
[1], mb
[2]));
749 case MBA_ISP84XX_ALERT
:
750 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
751 "%04x %04x %04x\n", vha
->host_no
, mb
[1], mb
[2], mb
[3]));
753 spin_lock_irqsave(&ha
->cs84xx
->access_lock
, flags
);
755 case A84_PANIC_RECOVERY
:
756 qla_printk(KERN_INFO
, ha
, "Alert 84XX: panic recovery "
757 "%04x %04x\n", mb
[2], mb
[3]);
759 case A84_OP_LOGIN_COMPLETE
:
760 ha
->cs84xx
->op_fw_version
= mb
[3] << 16 | mb
[2];
761 DEBUG2(qla_printk(KERN_INFO
, ha
, "Alert 84XX:"
762 "firmware version %x\n", ha
->cs84xx
->op_fw_version
));
764 case A84_DIAG_LOGIN_COMPLETE
:
765 ha
->cs84xx
->diag_fw_version
= mb
[3] << 16 | mb
[2];
766 DEBUG2(qla_printk(KERN_INFO
, ha
, "Alert 84XX:"
767 "diagnostic firmware version %x\n",
768 ha
->cs84xx
->diag_fw_version
));
770 case A84_GOLD_LOGIN_COMPLETE
:
771 ha
->cs84xx
->diag_fw_version
= mb
[3] << 16 | mb
[2];
772 ha
->cs84xx
->fw_update
= 1;
773 DEBUG2(qla_printk(KERN_INFO
, ha
, "Alert 84XX: gold "
774 "firmware version %x\n",
775 ha
->cs84xx
->gold_fw_version
));
778 qla_printk(KERN_ERR
, ha
,
779 "Alert 84xx: Invalid Alert %04x %04x %04x\n",
780 mb
[1], mb
[2], mb
[3]);
782 spin_unlock_irqrestore(&ha
->cs84xx
->access_lock
, flags
);
785 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
786 vha
->host_no
, mb
[1], mb
[2], mb
[3]));
788 case MBA_DCBX_PARAM_UPDATE
:
789 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
790 "%04x %04x %04x\n", vha
->host_no
, mb
[1], mb
[2], mb
[3]));
792 case MBA_FCF_CONF_ERR
:
793 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
794 "%04x %04x %04x\n", vha
->host_no
, mb
[1], mb
[2], mb
[3]));
796 case MBA_IDC_COMPLETE
:
798 case MBA_IDC_TIME_EXT
:
799 qla81xx_idc_event(vha
, mb
[0], mb
[1]);
803 if (!vha
->vp_idx
&& ha
->num_vhosts
)
804 qla2x00_alert_all_vps(rsp
, mb
);
808 qla2x00_adjust_sdev_qdepth_up(struct scsi_device
*sdev
, void *data
)
810 fc_port_t
*fcport
= data
;
811 struct scsi_qla_host
*vha
= fcport
->vha
;
812 struct qla_hw_data
*ha
= vha
->hw
;
813 struct req_que
*req
= NULL
;
815 if (!ql2xqfulltracking
)
821 if (req
->max_q_depth
<= sdev
->queue_depth
)
824 if (sdev
->ordered_tags
)
825 scsi_adjust_queue_depth(sdev
, MSG_ORDERED_TAG
,
826 sdev
->queue_depth
+ 1);
828 scsi_adjust_queue_depth(sdev
, MSG_SIMPLE_TAG
,
829 sdev
->queue_depth
+ 1);
831 fcport
->last_ramp_up
= jiffies
;
833 DEBUG2(qla_printk(KERN_INFO
, ha
,
834 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
835 fcport
->vha
->host_no
, sdev
->channel
, sdev
->id
, sdev
->lun
,
840 qla2x00_adjust_sdev_qdepth_down(struct scsi_device
*sdev
, void *data
)
842 fc_port_t
*fcport
= data
;
844 if (!scsi_track_queue_full(sdev
, sdev
->queue_depth
- 1))
847 DEBUG2(qla_printk(KERN_INFO
, fcport
->vha
->hw
,
848 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
849 fcport
->vha
->host_no
, sdev
->channel
, sdev
->id
, sdev
->lun
,
854 qla2x00_ramp_up_queue_depth(scsi_qla_host_t
*vha
, struct req_que
*req
,
858 struct scsi_device
*sdev
;
860 if (!ql2xqfulltracking
)
863 sdev
= sp
->cmd
->device
;
864 if (sdev
->queue_depth
>= req
->max_q_depth
)
868 if (time_before(jiffies
,
869 fcport
->last_ramp_up
+ ql2xqfullrampup
* HZ
))
871 if (time_before(jiffies
,
872 fcport
->last_queue_full
+ ql2xqfullrampup
* HZ
))
875 starget_for_each_device(sdev
->sdev_target
, fcport
,
876 qla2x00_adjust_sdev_qdepth_up
);
880 * qla2x00_process_completed_request() - Process a Fast Post response.
881 * @ha: SCSI driver HA context
885 qla2x00_process_completed_request(struct scsi_qla_host
*vha
,
886 struct req_que
*req
, uint32_t index
)
889 struct qla_hw_data
*ha
= vha
->hw
;
891 /* Validate handle. */
892 if (index
>= MAX_OUTSTANDING_COMMANDS
) {
893 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
894 vha
->host_no
, index
));
895 qla_printk(KERN_WARNING
, ha
,
896 "Invalid SCSI completion handle %d.\n", index
);
898 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
902 sp
= req
->outstanding_cmds
[index
];
904 /* Free outstanding command slot. */
905 req
->outstanding_cmds
[index
] = NULL
;
907 /* Save ISP completion status */
908 sp
->cmd
->result
= DID_OK
<< 16;
910 qla2x00_ramp_up_queue_depth(vha
, req
, sp
);
911 qla2x00_sp_compl(ha
, sp
);
913 DEBUG2(printk("scsi(%ld) Req:%d: Invalid ISP SCSI completion"
914 " handle(%d)\n", vha
->host_no
, req
->id
, index
));
915 qla_printk(KERN_WARNING
, ha
,
916 "Invalid ISP SCSI completion handle\n");
918 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
923 qla2x00_get_sp_from_handle(scsi_qla_host_t
*vha
, const char *func
,
924 struct req_que
*req
, void *iocb
)
926 struct qla_hw_data
*ha
= vha
->hw
;
927 sts_entry_t
*pkt
= iocb
;
931 index
= LSW(pkt
->handle
);
932 if (index
>= MAX_OUTSTANDING_COMMANDS
) {
933 qla_printk(KERN_WARNING
, ha
,
934 "%s: Invalid completion handle (%x).\n", func
, index
);
935 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
938 sp
= req
->outstanding_cmds
[index
];
940 qla_printk(KERN_WARNING
, ha
,
941 "%s: Invalid completion handle (%x) -- timed-out.\n", func
,
945 if (sp
->handle
!= index
) {
946 qla_printk(KERN_WARNING
, ha
,
947 "%s: SRB handle (%x) mismatch %x.\n", func
, sp
->handle
,
951 req
->outstanding_cmds
[index
] = NULL
;
957 qla2x00_mbx_iocb_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
958 struct mbx_entry
*mbx
)
960 const char func
[] = "MBX-IOCB";
962 struct qla_hw_data
*ha
= vha
->hw
;
965 struct srb_logio
*lio
;
968 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, mbx
);
974 switch (lio
->ctx
.type
) {
982 qla_printk(KERN_WARNING
, ha
,
983 "%s: Unrecognized SRB: (%p) type=%d.\n", func
, sp
,
988 del_timer(&lio
->ctx
.timer
);
991 data
[0] = data
[1] = 0;
992 if (mbx
->entry_status
) {
993 DEBUG2(printk(KERN_WARNING
994 "scsi(%ld:%x): Async-%s error entry - entry-status=%x "
995 "status=%x state-flag=%x status-flags=%x.\n",
996 fcport
->vha
->host_no
, sp
->handle
, type
,
997 mbx
->entry_status
, le16_to_cpu(mbx
->status
),
998 le16_to_cpu(mbx
->state_flags
),
999 le16_to_cpu(mbx
->status_flags
)));
1000 DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx
, sizeof(*mbx
)));
1002 data
[0] = MBS_COMMAND_ERROR
;
1003 data
[1] = lio
->flags
& SRB_LOGIN_RETRIED
?
1004 QLA_LOGIO_LOGIN_RETRIED
: 0;
1005 goto done_post_logio_done_work
;
1008 if (!mbx
->status
&& le16_to_cpu(mbx
->mb0
) == MBS_COMMAND_COMPLETE
) {
1009 DEBUG2(printk(KERN_DEBUG
1010 "scsi(%ld:%x): Async-%s complete - mbx1=%x.\n",
1011 fcport
->vha
->host_no
, sp
->handle
, type
,
1012 le16_to_cpu(mbx
->mb1
)));
1014 data
[0] = MBS_COMMAND_COMPLETE
;
1015 if (lio
->ctx
.type
== SRB_LOGIN_CMD
&& le16_to_cpu(mbx
->mb1
) & BIT_1
)
1016 fcport
->flags
|= FCF_FCP2_DEVICE
;
1018 goto done_post_logio_done_work
;
1021 data
[0] = le16_to_cpu(mbx
->mb0
);
1023 case MBS_PORT_ID_USED
:
1024 data
[1] = le16_to_cpu(mbx
->mb1
);
1026 case MBS_LOOP_ID_USED
:
1029 data
[0] = MBS_COMMAND_ERROR
;
1030 data
[1] = lio
->flags
& SRB_LOGIN_RETRIED
?
1031 QLA_LOGIO_LOGIN_RETRIED
: 0;
1035 DEBUG2(printk(KERN_WARNING
1036 "scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x "
1038 fcport
->vha
->host_no
, sp
->handle
, type
, le16_to_cpu(mbx
->status
),
1039 le16_to_cpu(mbx
->mb0
), le16_to_cpu(mbx
->mb1
),
1040 le16_to_cpu(mbx
->mb2
), le16_to_cpu(mbx
->mb6
),
1041 le16_to_cpu(mbx
->mb7
)));
1043 done_post_logio_done_work
:
1044 lio
->ctx
.type
== SRB_LOGIN_CMD
?
1045 qla2x00_post_async_login_done_work(fcport
->vha
, fcport
, data
):
1046 qla2x00_post_async_logout_done_work(fcport
->vha
, fcport
, data
);
1052 qla24xx_logio_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1053 struct logio_entry_24xx
*logio
)
1055 const char func
[] = "LOGIO-IOCB";
1057 struct qla_hw_data
*ha
= vha
->hw
;
1060 struct srb_logio
*lio
;
1064 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, logio
);
1070 switch (lio
->ctx
.type
) {
1074 case SRB_LOGOUT_CMD
:
1078 qla_printk(KERN_WARNING
, ha
,
1079 "%s: Unrecognized SRB: (%p) type=%d.\n", func
, sp
,
1084 del_timer(&lio
->ctx
.timer
);
1085 fcport
= sp
->fcport
;
1087 data
[0] = data
[1] = 0;
1088 if (logio
->entry_status
) {
1089 DEBUG2(printk(KERN_WARNING
1090 "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
1091 fcport
->vha
->host_no
, sp
->handle
, type
,
1092 logio
->entry_status
));
1093 DEBUG2(qla2x00_dump_buffer((uint8_t *)logio
, sizeof(*logio
)));
1095 data
[0] = MBS_COMMAND_ERROR
;
1096 data
[1] = lio
->flags
& SRB_LOGIN_RETRIED
?
1097 QLA_LOGIO_LOGIN_RETRIED
: 0;
1098 goto done_post_logio_done_work
;
1101 if (le16_to_cpu(logio
->comp_status
) == CS_COMPLETE
) {
1102 DEBUG2(printk(KERN_DEBUG
1103 "scsi(%ld:%x): Async-%s complete - iop0=%x.\n",
1104 fcport
->vha
->host_no
, sp
->handle
, type
,
1105 le32_to_cpu(logio
->io_parameter
[0])));
1107 data
[0] = MBS_COMMAND_COMPLETE
;
1108 if (lio
->ctx
.type
== SRB_LOGOUT_CMD
)
1109 goto done_post_logio_done_work
;
1111 iop
[0] = le32_to_cpu(logio
->io_parameter
[0]);
1112 if (iop
[0] & BIT_4
) {
1113 fcport
->port_type
= FCT_TARGET
;
1115 fcport
->flags
|= FCF_FCP2_DEVICE
;
1118 fcport
->port_type
= FCT_INITIATOR
;
1119 if (logio
->io_parameter
[7] || logio
->io_parameter
[8])
1120 fcport
->supported_classes
|= FC_COS_CLASS2
;
1121 if (logio
->io_parameter
[9] || logio
->io_parameter
[10])
1122 fcport
->supported_classes
|= FC_COS_CLASS3
;
1124 goto done_post_logio_done_work
;
1127 iop
[0] = le32_to_cpu(logio
->io_parameter
[0]);
1128 iop
[1] = le32_to_cpu(logio
->io_parameter
[1]);
1130 case LSC_SCODE_PORTID_USED
:
1131 data
[0] = MBS_PORT_ID_USED
;
1132 data
[1] = LSW(iop
[1]);
1134 case LSC_SCODE_NPORT_USED
:
1135 data
[0] = MBS_LOOP_ID_USED
;
1137 case LSC_SCODE_CMD_FAILED
:
1138 if ((iop
[1] & 0xff) == 0x05) {
1139 data
[0] = MBS_NOT_LOGGED_IN
;
1144 data
[0] = MBS_COMMAND_ERROR
;
1145 data
[1] = lio
->flags
& SRB_LOGIN_RETRIED
?
1146 QLA_LOGIO_LOGIN_RETRIED
: 0;
1150 DEBUG2(printk(KERN_WARNING
1151 "scsi(%ld:%x): Async-%s failed - comp=%x iop0=%x iop1=%x.\n",
1152 fcport
->vha
->host_no
, sp
->handle
, type
,
1153 le16_to_cpu(logio
->comp_status
),
1154 le32_to_cpu(logio
->io_parameter
[0]),
1155 le32_to_cpu(logio
->io_parameter
[1])));
1157 done_post_logio_done_work
:
1158 lio
->ctx
.type
== SRB_LOGIN_CMD
?
1159 qla2x00_post_async_login_done_work(fcport
->vha
, fcport
, data
):
1160 qla2x00_post_async_logout_done_work(fcport
->vha
, fcport
, data
);
1166 * qla2x00_process_response_queue() - Process response queue entries.
1167 * @ha: SCSI driver HA context
1170 qla2x00_process_response_queue(struct rsp_que
*rsp
)
1172 struct scsi_qla_host
*vha
;
1173 struct qla_hw_data
*ha
= rsp
->hw
;
1174 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
1176 uint16_t handle_cnt
;
1179 vha
= pci_get_drvdata(ha
->pdev
);
1181 if (!vha
->flags
.online
)
1184 while (rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
) {
1185 pkt
= (sts_entry_t
*)rsp
->ring_ptr
;
1188 if (rsp
->ring_index
== rsp
->length
) {
1189 rsp
->ring_index
= 0;
1190 rsp
->ring_ptr
= rsp
->ring
;
1195 if (pkt
->entry_status
!= 0) {
1196 DEBUG3(printk(KERN_INFO
1197 "scsi(%ld): Process error entry.\n", vha
->host_no
));
1199 qla2x00_error_entry(vha
, rsp
, pkt
);
1200 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
1205 switch (pkt
->entry_type
) {
1207 qla2x00_status_entry(vha
, rsp
, pkt
);
1209 case STATUS_TYPE_21
:
1210 handle_cnt
= ((sts21_entry_t
*)pkt
)->handle_count
;
1211 for (cnt
= 0; cnt
< handle_cnt
; cnt
++) {
1212 qla2x00_process_completed_request(vha
, rsp
->req
,
1213 ((sts21_entry_t
*)pkt
)->handle
[cnt
]);
1216 case STATUS_TYPE_22
:
1217 handle_cnt
= ((sts22_entry_t
*)pkt
)->handle_count
;
1218 for (cnt
= 0; cnt
< handle_cnt
; cnt
++) {
1219 qla2x00_process_completed_request(vha
, rsp
->req
,
1220 ((sts22_entry_t
*)pkt
)->handle
[cnt
]);
1223 case STATUS_CONT_TYPE
:
1224 qla2x00_status_cont_entry(rsp
, (sts_cont_entry_t
*)pkt
);
1227 qla2x00_mbx_iocb_entry(vha
, rsp
->req
,
1228 (struct mbx_entry
*)pkt
);
1230 /* Type Not Supported. */
1231 DEBUG4(printk(KERN_WARNING
1232 "scsi(%ld): Received unknown response pkt type %x "
1233 "entry status=%x.\n",
1234 vha
->host_no
, pkt
->entry_type
, pkt
->entry_status
));
1237 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
1241 /* Adjust ring index */
1242 WRT_REG_WORD(ISP_RSP_Q_OUT(ha
, reg
), rsp
->ring_index
);
1246 qla2x00_handle_sense(srb_t
*sp
, uint8_t *sense_data
, uint32_t sense_len
,
1247 struct rsp_que
*rsp
)
1249 struct scsi_cmnd
*cp
= sp
->cmd
;
1251 if (sense_len
>= SCSI_SENSE_BUFFERSIZE
)
1252 sense_len
= SCSI_SENSE_BUFFERSIZE
;
1254 sp
->request_sense_length
= sense_len
;
1255 sp
->request_sense_ptr
= cp
->sense_buffer
;
1256 if (sp
->request_sense_length
> 32)
1259 memcpy(cp
->sense_buffer
, sense_data
, sense_len
);
1261 sp
->request_sense_ptr
+= sense_len
;
1262 sp
->request_sense_length
-= sense_len
;
1263 if (sp
->request_sense_length
!= 0)
1264 rsp
->status_srb
= sp
;
1266 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
1267 "cmd=%p pid=%ld\n", __func__
, sp
->fcport
->vha
->host_no
,
1268 cp
->device
->channel
, cp
->device
->id
, cp
->device
->lun
, cp
,
1269 cp
->serial_number
));
1271 DEBUG5(qla2x00_dump_buffer(cp
->sense_buffer
, sense_len
));
1275 * qla2x00_status_entry() - Process a Status IOCB entry.
1276 * @ha: SCSI driver HA context
1277 * @pkt: Entry pointer
1280 qla2x00_status_entry(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, void *pkt
)
1284 struct scsi_cmnd
*cp
;
1286 struct sts_entry_24xx
*sts24
;
1287 uint16_t comp_status
;
1288 uint16_t scsi_status
;
1289 uint8_t lscsi_status
;
1291 uint32_t sense_len
, rsp_info_len
, resid_len
, fw_resid_len
;
1292 uint8_t *rsp_info
, *sense_data
;
1293 struct qla_hw_data
*ha
= vha
->hw
;
1296 struct req_que
*req
;
1298 sts
= (sts_entry_t
*) pkt
;
1299 sts24
= (struct sts_entry_24xx
*) pkt
;
1300 if (IS_FWI2_CAPABLE(ha
)) {
1301 comp_status
= le16_to_cpu(sts24
->comp_status
);
1302 scsi_status
= le16_to_cpu(sts24
->scsi_status
) & SS_MASK
;
1304 comp_status
= le16_to_cpu(sts
->comp_status
);
1305 scsi_status
= le16_to_cpu(sts
->scsi_status
) & SS_MASK
;
1307 handle
= (uint32_t) LSW(sts
->handle
);
1308 que
= MSW(sts
->handle
);
1309 req
= ha
->req_q_map
[que
];
1310 /* Fast path completion. */
1311 if (comp_status
== CS_COMPLETE
&& scsi_status
== 0) {
1312 qla2x00_process_completed_request(vha
, req
, handle
);
1317 /* Validate handle. */
1318 if (handle
< MAX_OUTSTANDING_COMMANDS
) {
1319 sp
= req
->outstanding_cmds
[handle
];
1320 req
->outstanding_cmds
[handle
] = NULL
;
1325 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
1327 qla_printk(KERN_WARNING
, ha
, "Status Entry invalid handle.\n");
1329 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1330 qla2xxx_wake_dpc(vha
);
1335 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1336 "pkt->handle=%d sp=%p.\n", vha
->host_no
, handle
, sp
));
1337 qla_printk(KERN_WARNING
, ha
,
1338 "Command is NULL: already returned to OS (sp=%p)\n", sp
);
1343 lscsi_status
= scsi_status
& STATUS_MASK
;
1345 fcport
= sp
->fcport
;
1347 sense_len
= rsp_info_len
= resid_len
= fw_resid_len
= 0;
1348 if (IS_FWI2_CAPABLE(ha
)) {
1349 if (scsi_status
& SS_SENSE_LEN_VALID
)
1350 sense_len
= le32_to_cpu(sts24
->sense_len
);
1351 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
)
1352 rsp_info_len
= le32_to_cpu(sts24
->rsp_data_len
);
1353 if (scsi_status
& (SS_RESIDUAL_UNDER
| SS_RESIDUAL_OVER
))
1354 resid_len
= le32_to_cpu(sts24
->rsp_residual_count
);
1355 if (comp_status
== CS_DATA_UNDERRUN
)
1356 fw_resid_len
= le32_to_cpu(sts24
->residual_len
);
1357 rsp_info
= sts24
->data
;
1358 sense_data
= sts24
->data
;
1359 host_to_fcp_swap(sts24
->data
, sizeof(sts24
->data
));
1361 if (scsi_status
& SS_SENSE_LEN_VALID
)
1362 sense_len
= le16_to_cpu(sts
->req_sense_length
);
1363 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
)
1364 rsp_info_len
= le16_to_cpu(sts
->rsp_info_len
);
1365 resid_len
= le32_to_cpu(sts
->residual_length
);
1366 rsp_info
= sts
->rsp_info
;
1367 sense_data
= sts
->req_sense_data
;
1370 /* Check for any FCP transport errors. */
1371 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
) {
1372 /* Sense data lies beyond any FCP RESPONSE data. */
1373 if (IS_FWI2_CAPABLE(ha
))
1374 sense_data
+= rsp_info_len
;
1375 if (rsp_info_len
> 3 && rsp_info
[3]) {
1376 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1377 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
1378 "retrying command\n", vha
->host_no
,
1379 cp
->device
->channel
, cp
->device
->id
,
1380 cp
->device
->lun
, rsp_info_len
, rsp_info
[0],
1381 rsp_info
[1], rsp_info
[2], rsp_info
[3], rsp_info
[4],
1382 rsp_info
[5], rsp_info
[6], rsp_info
[7]));
1384 cp
->result
= DID_BUS_BUSY
<< 16;
1385 qla2x00_sp_compl(ha
, sp
);
1390 /* Check for overrun. */
1391 if (IS_FWI2_CAPABLE(ha
) && comp_status
== CS_COMPLETE
&&
1392 scsi_status
& SS_RESIDUAL_OVER
)
1393 comp_status
= CS_DATA_OVERRUN
;
1396 * Based on Host and scsi status generate status code for Linux
1398 switch (comp_status
) {
1401 if (scsi_status
== 0) {
1402 cp
->result
= DID_OK
<< 16;
1405 if (scsi_status
& (SS_RESIDUAL_UNDER
| SS_RESIDUAL_OVER
)) {
1407 scsi_set_resid(cp
, resid
);
1409 if (!lscsi_status
&&
1410 ((unsigned)(scsi_bufflen(cp
) - resid
) <
1412 qla_printk(KERN_INFO
, ha
,
1413 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1414 "detected (%x of %x bytes)...returning "
1415 "error status.\n", vha
->host_no
,
1416 cp
->device
->channel
, cp
->device
->id
,
1417 cp
->device
->lun
, resid
,
1420 cp
->result
= DID_ERROR
<< 16;
1424 cp
->result
= DID_OK
<< 16 | lscsi_status
;
1426 if (lscsi_status
== SAM_STAT_TASK_SET_FULL
) {
1427 DEBUG2(printk(KERN_INFO
1428 "scsi(%ld): QUEUE FULL status detected "
1429 "0x%x-0x%x.\n", vha
->host_no
, comp_status
,
1432 /* Adjust queue depth for all luns on the port. */
1433 if (!ql2xqfulltracking
)
1435 fcport
->last_queue_full
= jiffies
;
1436 starget_for_each_device(cp
->device
->sdev_target
,
1437 fcport
, qla2x00_adjust_sdev_qdepth_down
);
1440 if (lscsi_status
!= SS_CHECK_CONDITION
)
1443 memset(cp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1444 if (!(scsi_status
& SS_SENSE_LEN_VALID
))
1447 qla2x00_handle_sense(sp
, sense_data
, sense_len
, rsp
);
1450 case CS_DATA_UNDERRUN
:
1451 DEBUG2(printk(KERN_INFO
1452 "scsi(%ld:%d:%d) UNDERRUN status detected 0x%x-0x%x. "
1453 "resid=0x%x fw_resid=0x%x cdb=0x%x os_underflow=0x%x\n",
1454 vha
->host_no
, cp
->device
->id
, cp
->device
->lun
, comp_status
,
1455 scsi_status
, resid_len
, fw_resid_len
, cp
->cmnd
[0],
1458 /* Use F/W calculated residual length. */
1459 resid
= IS_FWI2_CAPABLE(ha
) ? fw_resid_len
: resid_len
;
1460 scsi_set_resid(cp
, resid
);
1461 if (scsi_status
& SS_RESIDUAL_UNDER
) {
1462 if (IS_FWI2_CAPABLE(ha
) && fw_resid_len
!= resid_len
) {
1464 "scsi(%ld:%d:%d:%d) Dropped frame(s) "
1465 "detected (%x of %x bytes)...residual "
1466 "length mismatch...retrying command.\n",
1467 vha
->host_no
, cp
->device
->channel
,
1468 cp
->device
->id
, cp
->device
->lun
, resid
,
1471 cp
->result
= DID_ERROR
<< 16 | lscsi_status
;
1475 if (!lscsi_status
&&
1476 ((unsigned)(scsi_bufflen(cp
) - resid
) <
1478 qla_printk(KERN_INFO
, ha
,
1479 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1480 "detected (%x of %x bytes)...returning "
1481 "error status.\n", vha
->host_no
,
1482 cp
->device
->channel
, cp
->device
->id
,
1483 cp
->device
->lun
, resid
, scsi_bufflen(cp
));
1485 cp
->result
= DID_ERROR
<< 16;
1488 } else if (!lscsi_status
) {
1490 "scsi(%ld:%d:%d:%d) Dropped frame(s) detected "
1491 "(%x of %x bytes)...firmware reported underrun..."
1492 "retrying command.\n", vha
->host_no
,
1493 cp
->device
->channel
, cp
->device
->id
,
1494 cp
->device
->lun
, resid
, scsi_bufflen(cp
)));
1496 cp
->result
= DID_ERROR
<< 16;
1500 cp
->result
= DID_OK
<< 16 | lscsi_status
;
1503 * Check to see if SCSI Status is non zero. If so report SCSI
1506 if (lscsi_status
!= 0) {
1507 if (lscsi_status
== SAM_STAT_TASK_SET_FULL
) {
1508 DEBUG2(printk(KERN_INFO
1509 "scsi(%ld): QUEUE FULL status detected "
1510 "0x%x-0x%x.\n", vha
->host_no
, comp_status
,
1514 * Adjust queue depth for all luns on the
1517 if (!ql2xqfulltracking
)
1519 fcport
->last_queue_full
= jiffies
;
1520 starget_for_each_device(
1521 cp
->device
->sdev_target
, fcport
,
1522 qla2x00_adjust_sdev_qdepth_down
);
1525 if (lscsi_status
!= SS_CHECK_CONDITION
)
1528 memset(cp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1529 if (!(scsi_status
& SS_SENSE_LEN_VALID
))
1532 qla2x00_handle_sense(sp
, sense_data
, sense_len
, rsp
);
1536 case CS_DATA_OVERRUN
:
1537 DEBUG2(printk(KERN_INFO
1538 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1539 vha
->host_no
, cp
->device
->id
, cp
->device
->lun
, comp_status
,
1541 DEBUG2(printk(KERN_INFO
1542 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1543 cp
->cmnd
[0], cp
->cmnd
[1], cp
->cmnd
[2], cp
->cmnd
[3],
1544 cp
->cmnd
[4], cp
->cmnd
[5]));
1545 DEBUG2(printk(KERN_INFO
1546 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1548 cp
->serial_number
, scsi_bufflen(cp
), resid_len
));
1550 cp
->result
= DID_ERROR
<< 16;
1553 case CS_PORT_LOGGED_OUT
:
1554 case CS_PORT_CONFIG_CHG
:
1557 case CS_PORT_UNAVAILABLE
:
1559 * If the port is in Target Down state, return all IOs for this
1560 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1563 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1564 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1565 vha
->host_no
, cp
->device
->id
, cp
->device
->lun
,
1566 cp
->serial_number
, comp_status
,
1567 atomic_read(&fcport
->state
)));
1570 * We are going to have the fc class block the rport
1571 * while we try to recover so instruct the mid layer
1572 * to requeue until the class decides how to handle this.
1574 cp
->result
= DID_TRANSPORT_DISRUPTED
<< 16;
1575 if (atomic_read(&fcport
->state
) == FCS_ONLINE
)
1576 qla2x00_mark_device_lost(fcport
->vha
, fcport
, 1, 1);
1580 DEBUG2(printk(KERN_INFO
1581 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1582 vha
->host_no
, comp_status
, scsi_status
));
1584 cp
->result
= DID_RESET
<< 16;
1589 * hv2.19.12 - DID_ABORT does not retry the request if we
1590 * aborted this request then abort otherwise it must be a
1593 DEBUG2(printk(KERN_INFO
1594 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1595 vha
->host_no
, comp_status
, scsi_status
));
1597 cp
->result
= DID_RESET
<< 16;
1602 * We are going to have the fc class block the rport
1603 * while we try to recover so instruct the mid layer
1604 * to requeue until the class decides how to handle this.
1606 cp
->result
= DID_TRANSPORT_DISRUPTED
<< 16;
1608 if (IS_FWI2_CAPABLE(ha
)) {
1609 DEBUG2(printk(KERN_INFO
1610 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1611 "0x%x-0x%x\n", vha
->host_no
, cp
->device
->channel
,
1612 cp
->device
->id
, cp
->device
->lun
, comp_status
,
1616 DEBUG2(printk(KERN_INFO
1617 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1618 "sflags=%x.\n", vha
->host_no
, cp
->device
->channel
,
1619 cp
->device
->id
, cp
->device
->lun
, comp_status
, scsi_status
,
1620 le16_to_cpu(sts
->status_flags
)));
1622 /* Check to see if logout occurred. */
1623 if ((le16_to_cpu(sts
->status_flags
) & SF_LOGOUT_SENT
))
1624 qla2x00_mark_device_lost(fcport
->vha
, fcport
, 1, 1);
1628 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1629 "0x%x-0x%x.\n", vha
->host_no
, comp_status
, scsi_status
));
1630 qla_printk(KERN_INFO
, ha
,
1631 "Unknown status detected 0x%x-0x%x.\n",
1632 comp_status
, scsi_status
);
1634 cp
->result
= DID_ERROR
<< 16;
1638 /* Place command on done queue. */
1639 if (rsp
->status_srb
== NULL
)
1640 qla2x00_sp_compl(ha
, sp
);
1644 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1645 * @ha: SCSI driver HA context
1646 * @pkt: Entry pointer
1648 * Extended sense data.
1651 qla2x00_status_cont_entry(struct rsp_que
*rsp
, sts_cont_entry_t
*pkt
)
1653 uint8_t sense_sz
= 0;
1654 struct qla_hw_data
*ha
= rsp
->hw
;
1655 srb_t
*sp
= rsp
->status_srb
;
1656 struct scsi_cmnd
*cp
;
1658 if (sp
!= NULL
&& sp
->request_sense_length
!= 0) {
1661 DEBUG2(printk("%s(): Cmd already returned back to OS "
1662 "sp=%p.\n", __func__
, sp
));
1663 qla_printk(KERN_INFO
, ha
,
1664 "cmd is NULL: already returned to OS (sp=%p)\n",
1667 rsp
->status_srb
= NULL
;
1671 if (sp
->request_sense_length
> sizeof(pkt
->data
)) {
1672 sense_sz
= sizeof(pkt
->data
);
1674 sense_sz
= sp
->request_sense_length
;
1677 /* Move sense data. */
1678 if (IS_FWI2_CAPABLE(ha
))
1679 host_to_fcp_swap(pkt
->data
, sizeof(pkt
->data
));
1680 memcpy(sp
->request_sense_ptr
, pkt
->data
, sense_sz
);
1681 DEBUG5(qla2x00_dump_buffer(sp
->request_sense_ptr
, sense_sz
));
1683 sp
->request_sense_ptr
+= sense_sz
;
1684 sp
->request_sense_length
-= sense_sz
;
1686 /* Place command on done queue. */
1687 if (sp
->request_sense_length
== 0) {
1688 rsp
->status_srb
= NULL
;
1689 qla2x00_sp_compl(ha
, sp
);
1695 * qla2x00_error_entry() - Process an error entry.
1696 * @ha: SCSI driver HA context
1697 * @pkt: Entry pointer
1700 qla2x00_error_entry(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, sts_entry_t
*pkt
)
1703 struct qla_hw_data
*ha
= vha
->hw
;
1704 uint32_t handle
= LSW(pkt
->handle
);
1705 uint16_t que
= MSW(pkt
->handle
);
1706 struct req_que
*req
= ha
->req_q_map
[que
];
1707 #if defined(QL_DEBUG_LEVEL_2)
1708 if (pkt
->entry_status
& RF_INV_E_ORDER
)
1709 qla_printk(KERN_ERR
, ha
, "%s: Invalid Entry Order\n", __func__
);
1710 else if (pkt
->entry_status
& RF_INV_E_COUNT
)
1711 qla_printk(KERN_ERR
, ha
, "%s: Invalid Entry Count\n", __func__
);
1712 else if (pkt
->entry_status
& RF_INV_E_PARAM
)
1713 qla_printk(KERN_ERR
, ha
,
1714 "%s: Invalid Entry Parameter\n", __func__
);
1715 else if (pkt
->entry_status
& RF_INV_E_TYPE
)
1716 qla_printk(KERN_ERR
, ha
, "%s: Invalid Entry Type\n", __func__
);
1717 else if (pkt
->entry_status
& RF_BUSY
)
1718 qla_printk(KERN_ERR
, ha
, "%s: Busy\n", __func__
);
1720 qla_printk(KERN_ERR
, ha
, "%s: UNKNOWN flag error\n", __func__
);
1723 /* Validate handle. */
1724 if (handle
< MAX_OUTSTANDING_COMMANDS
)
1725 sp
= req
->outstanding_cmds
[handle
];
1730 /* Free outstanding command slot. */
1731 req
->outstanding_cmds
[handle
] = NULL
;
1733 /* Bad payload or header */
1734 if (pkt
->entry_status
&
1735 (RF_INV_E_ORDER
| RF_INV_E_COUNT
|
1736 RF_INV_E_PARAM
| RF_INV_E_TYPE
)) {
1737 sp
->cmd
->result
= DID_ERROR
<< 16;
1738 } else if (pkt
->entry_status
& RF_BUSY
) {
1739 sp
->cmd
->result
= DID_BUS_BUSY
<< 16;
1741 sp
->cmd
->result
= DID_ERROR
<< 16;
1743 qla2x00_sp_compl(ha
, sp
);
1745 } else if (pkt
->entry_type
== COMMAND_A64_TYPE
|| pkt
->entry_type
==
1746 COMMAND_TYPE
|| pkt
->entry_type
== COMMAND_TYPE_7
) {
1747 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1749 qla_printk(KERN_WARNING
, ha
,
1750 "Error entry - invalid handle\n");
1752 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1753 qla2xxx_wake_dpc(vha
);
1758 * qla24xx_mbx_completion() - Process mailbox command completions.
1759 * @ha: SCSI driver HA context
1760 * @mb0: Mailbox0 register
1763 qla24xx_mbx_completion(scsi_qla_host_t
*vha
, uint16_t mb0
)
1766 uint16_t __iomem
*wptr
;
1767 struct qla_hw_data
*ha
= vha
->hw
;
1768 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
1770 /* Load return mailbox registers. */
1771 ha
->flags
.mbox_int
= 1;
1772 ha
->mailbox_out
[0] = mb0
;
1773 wptr
= (uint16_t __iomem
*)®
->mailbox1
;
1775 for (cnt
= 1; cnt
< ha
->mbx_count
; cnt
++) {
1776 ha
->mailbox_out
[cnt
] = RD_REG_WORD(wptr
);
1781 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1782 __func__
, vha
->host_no
, ha
->mcp
->mb
[0]));
1784 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1785 __func__
, vha
->host_no
));
1790 * qla24xx_process_response_queue() - Process response queue entries.
1791 * @ha: SCSI driver HA context
1793 void qla24xx_process_response_queue(struct scsi_qla_host
*vha
,
1794 struct rsp_que
*rsp
)
1796 struct sts_entry_24xx
*pkt
;
1798 if (!vha
->flags
.online
)
1801 while (rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
) {
1802 pkt
= (struct sts_entry_24xx
*)rsp
->ring_ptr
;
1805 if (rsp
->ring_index
== rsp
->length
) {
1806 rsp
->ring_index
= 0;
1807 rsp
->ring_ptr
= rsp
->ring
;
1812 if (pkt
->entry_status
!= 0) {
1813 DEBUG3(printk(KERN_INFO
1814 "scsi(%ld): Process error entry.\n", vha
->host_no
));
1816 qla2x00_error_entry(vha
, rsp
, (sts_entry_t
*) pkt
);
1817 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
1822 switch (pkt
->entry_type
) {
1824 qla2x00_status_entry(vha
, rsp
, pkt
);
1826 case STATUS_CONT_TYPE
:
1827 qla2x00_status_cont_entry(rsp
, (sts_cont_entry_t
*)pkt
);
1829 case VP_RPT_ID_IOCB_TYPE
:
1830 qla24xx_report_id_acquisition(vha
,
1831 (struct vp_rpt_id_entry_24xx
*)pkt
);
1833 case LOGINOUT_PORT_IOCB_TYPE
:
1834 qla24xx_logio_entry(vha
, rsp
->req
,
1835 (struct logio_entry_24xx
*)pkt
);
1838 /* Type Not Supported. */
1839 DEBUG4(printk(KERN_WARNING
1840 "scsi(%ld): Received unknown response pkt type %x "
1841 "entry status=%x.\n",
1842 vha
->host_no
, pkt
->entry_type
, pkt
->entry_status
));
1845 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
1849 /* Adjust ring index */
1850 WRT_REG_DWORD(rsp
->rsp_q_out
, rsp
->ring_index
);
1854 qla2xxx_check_risc_status(scsi_qla_host_t
*vha
)
1858 struct qla_hw_data
*ha
= vha
->hw
;
1859 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
1861 if (!IS_QLA25XX(ha
) && !IS_QLA81XX(ha
))
1865 WRT_REG_DWORD(®
->iobase_addr
, 0x7C00);
1866 RD_REG_DWORD(®
->iobase_addr
);
1867 WRT_REG_DWORD(®
->iobase_window
, 0x0001);
1868 for (cnt
= 10000; (RD_REG_DWORD(®
->iobase_window
) & BIT_0
) == 0 &&
1869 rval
== QLA_SUCCESS
; cnt
--) {
1871 WRT_REG_DWORD(®
->iobase_window
, 0x0001);
1874 rval
= QLA_FUNCTION_TIMEOUT
;
1876 if (rval
== QLA_SUCCESS
)
1879 WRT_REG_DWORD(®
->iobase_window
, 0x0003);
1880 for (cnt
= 100; (RD_REG_DWORD(®
->iobase_window
) & BIT_0
) == 0 &&
1881 rval
== QLA_SUCCESS
; cnt
--) {
1883 WRT_REG_DWORD(®
->iobase_window
, 0x0003);
1886 rval
= QLA_FUNCTION_TIMEOUT
;
1888 if (rval
!= QLA_SUCCESS
)
1892 if (RD_REG_DWORD(®
->iobase_c8
) & BIT_3
)
1893 qla_printk(KERN_INFO
, ha
, "Additional code -- 0x55AA.\n");
1896 WRT_REG_DWORD(®
->iobase_window
, 0x0000);
1897 RD_REG_DWORD(®
->iobase_window
);
1901 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1903 * @dev_id: SCSI driver HA context
1905 * Called by system whenever the host adapter generates an interrupt.
1907 * Returns handled flag.
1910 qla24xx_intr_handler(int irq
, void *dev_id
)
1912 scsi_qla_host_t
*vha
;
1913 struct qla_hw_data
*ha
;
1914 struct device_reg_24xx __iomem
*reg
;
1920 struct rsp_que
*rsp
;
1921 unsigned long flags
;
1923 rsp
= (struct rsp_que
*) dev_id
;
1926 "%s(): NULL response queue pointer\n", __func__
);
1931 reg
= &ha
->iobase
->isp24
;
1934 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
1935 vha
= pci_get_drvdata(ha
->pdev
);
1936 for (iter
= 50; iter
--; ) {
1937 stat
= RD_REG_DWORD(®
->host_status
);
1938 if (stat
& HSRX_RISC_PAUSED
) {
1939 if (pci_channel_offline(ha
->pdev
))
1942 hccr
= RD_REG_DWORD(®
->hccr
);
1944 qla_printk(KERN_INFO
, ha
, "RISC paused -- HCCR=%x, "
1945 "Dumping firmware!\n", hccr
);
1947 qla2xxx_check_risc_status(vha
);
1949 ha
->isp_ops
->fw_dump(vha
, 1);
1950 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1952 } else if ((stat
& HSRX_RISC_INT
) == 0)
1955 switch (stat
& 0xff) {
1960 qla24xx_mbx_completion(vha
, MSW(stat
));
1961 status
|= MBX_INTERRUPT
;
1966 mb
[1] = RD_REG_WORD(®
->mailbox1
);
1967 mb
[2] = RD_REG_WORD(®
->mailbox2
);
1968 mb
[3] = RD_REG_WORD(®
->mailbox3
);
1969 qla2x00_async_event(vha
, rsp
, mb
);
1973 qla24xx_process_response_queue(vha
, rsp
);
1976 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1978 vha
->host_no
, stat
& 0xff));
1981 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
1982 RD_REG_DWORD_RELAXED(®
->hccr
);
1984 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
1986 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
1987 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
1988 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
1989 complete(&ha
->mbx_intr_comp
);
1996 qla24xx_msix_rsp_q(int irq
, void *dev_id
)
1998 struct qla_hw_data
*ha
;
1999 struct rsp_que
*rsp
;
2000 struct device_reg_24xx __iomem
*reg
;
2001 struct scsi_qla_host
*vha
;
2003 rsp
= (struct rsp_que
*) dev_id
;
2006 "%s(): NULL response queue pointer\n", __func__
);
2010 reg
= &ha
->iobase
->isp24
;
2012 spin_lock_irq(&ha
->hardware_lock
);
2014 vha
= pci_get_drvdata(ha
->pdev
);
2015 qla24xx_process_response_queue(vha
, rsp
);
2016 if (!ha
->mqenable
) {
2017 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2018 RD_REG_DWORD_RELAXED(®
->hccr
);
2020 spin_unlock_irq(&ha
->hardware_lock
);
2026 qla25xx_msix_rsp_q(int irq
, void *dev_id
)
2028 struct qla_hw_data
*ha
;
2029 struct rsp_que
*rsp
;
2031 rsp
= (struct rsp_que
*) dev_id
;
2034 "%s(): NULL response queue pointer\n", __func__
);
2039 queue_work_on((int) (rsp
->id
- 1), ha
->wq
, &rsp
->q_work
);
2045 qla24xx_msix_default(int irq
, void *dev_id
)
2047 scsi_qla_host_t
*vha
;
2048 struct qla_hw_data
*ha
;
2049 struct rsp_que
*rsp
;
2050 struct device_reg_24xx __iomem
*reg
;
2056 rsp
= (struct rsp_que
*) dev_id
;
2059 "%s(): NULL response queue pointer\n", __func__
));
2063 reg
= &ha
->iobase
->isp24
;
2066 spin_lock_irq(&ha
->hardware_lock
);
2067 vha
= pci_get_drvdata(ha
->pdev
);
2069 stat
= RD_REG_DWORD(®
->host_status
);
2070 if (stat
& HSRX_RISC_PAUSED
) {
2071 if (pci_channel_offline(ha
->pdev
))
2074 hccr
= RD_REG_DWORD(®
->hccr
);
2076 qla_printk(KERN_INFO
, ha
, "RISC paused -- HCCR=%x, "
2077 "Dumping firmware!\n", hccr
);
2079 qla2xxx_check_risc_status(vha
);
2081 ha
->isp_ops
->fw_dump(vha
, 1);
2082 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2084 } else if ((stat
& HSRX_RISC_INT
) == 0)
2087 switch (stat
& 0xff) {
2092 qla24xx_mbx_completion(vha
, MSW(stat
));
2093 status
|= MBX_INTERRUPT
;
2098 mb
[1] = RD_REG_WORD(®
->mailbox1
);
2099 mb
[2] = RD_REG_WORD(®
->mailbox2
);
2100 mb
[3] = RD_REG_WORD(®
->mailbox3
);
2101 qla2x00_async_event(vha
, rsp
, mb
);
2105 qla24xx_process_response_queue(vha
, rsp
);
2108 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
2110 vha
->host_no
, stat
& 0xff));
2113 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2115 spin_unlock_irq(&ha
->hardware_lock
);
2117 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
2118 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
2119 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
2120 complete(&ha
->mbx_intr_comp
);
2126 /* Interrupt handling helpers. */
2128 struct qla_init_msix_entry
{
2130 irq_handler_t handler
;
2133 static struct qla_init_msix_entry msix_entries
[3] = {
2134 { "qla2xxx (default)", qla24xx_msix_default
},
2135 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q
},
2136 { "qla2xxx (multiq)", qla25xx_msix_rsp_q
},
2140 qla24xx_disable_msix(struct qla_hw_data
*ha
)
2143 struct qla_msix_entry
*qentry
;
2145 for (i
= 0; i
< ha
->msix_count
; i
++) {
2146 qentry
= &ha
->msix_entries
[i
];
2147 if (qentry
->have_irq
)
2148 free_irq(qentry
->vector
, qentry
->rsp
);
2150 pci_disable_msix(ha
->pdev
);
2151 kfree(ha
->msix_entries
);
2152 ha
->msix_entries
= NULL
;
2153 ha
->flags
.msix_enabled
= 0;
2157 qla24xx_enable_msix(struct qla_hw_data
*ha
, struct rsp_que
*rsp
)
2159 #define MIN_MSIX_COUNT 2
2161 struct msix_entry
*entries
;
2162 struct qla_msix_entry
*qentry
;
2164 entries
= kzalloc(sizeof(struct msix_entry
) * ha
->msix_count
,
2169 for (i
= 0; i
< ha
->msix_count
; i
++)
2170 entries
[i
].entry
= i
;
2172 ret
= pci_enable_msix(ha
->pdev
, entries
, ha
->msix_count
);
2174 if (ret
< MIN_MSIX_COUNT
)
2177 qla_printk(KERN_WARNING
, ha
,
2178 "MSI-X: Failed to enable support -- %d/%d\n"
2179 " Retry with %d vectors\n", ha
->msix_count
, ret
, ret
);
2180 ha
->msix_count
= ret
;
2181 ret
= pci_enable_msix(ha
->pdev
, entries
, ha
->msix_count
);
2184 qla_printk(KERN_WARNING
, ha
, "MSI-X: Failed to enable"
2185 " support, giving up -- %d/%d\n",
2186 ha
->msix_count
, ret
);
2189 ha
->max_rsp_queues
= ha
->msix_count
- 1;
2191 ha
->msix_entries
= kzalloc(sizeof(struct qla_msix_entry
) *
2192 ha
->msix_count
, GFP_KERNEL
);
2193 if (!ha
->msix_entries
) {
2197 ha
->flags
.msix_enabled
= 1;
2199 for (i
= 0; i
< ha
->msix_count
; i
++) {
2200 qentry
= &ha
->msix_entries
[i
];
2201 qentry
->vector
= entries
[i
].vector
;
2202 qentry
->entry
= entries
[i
].entry
;
2203 qentry
->have_irq
= 0;
2207 /* Enable MSI-X vectors for the base queue */
2208 for (i
= 0; i
< 2; i
++) {
2209 qentry
= &ha
->msix_entries
[i
];
2210 ret
= request_irq(qentry
->vector
, msix_entries
[i
].handler
,
2211 0, msix_entries
[i
].name
, rsp
);
2213 qla_printk(KERN_WARNING
, ha
,
2214 "MSI-X: Unable to register handler -- %x/%d.\n",
2215 qentry
->vector
, ret
);
2216 qla24xx_disable_msix(ha
);
2220 qentry
->have_irq
= 1;
2225 /* Enable MSI-X vector for response queue update for queue 0 */
2226 if (ha
->mqiobase
&& (ha
->max_rsp_queues
> 1 || ha
->max_req_queues
> 1))
2235 qla2x00_request_irqs(struct qla_hw_data
*ha
, struct rsp_que
*rsp
)
2238 device_reg_t __iomem
*reg
= ha
->iobase
;
2240 /* If possible, enable MSI-X. */
2241 if (!IS_QLA2432(ha
) && !IS_QLA2532(ha
) &&
2242 !IS_QLA8432(ha
) && !IS_QLA8001(ha
))
2245 if (ha
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_HP
&&
2246 (ha
->pdev
->subsystem_device
== 0x7040 ||
2247 ha
->pdev
->subsystem_device
== 0x7041 ||
2248 ha
->pdev
->subsystem_device
== 0x1705)) {
2249 DEBUG2(qla_printk(KERN_WARNING
, ha
,
2250 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X,0x%X).\n",
2251 ha
->pdev
->subsystem_vendor
,
2252 ha
->pdev
->subsystem_device
));
2256 if (IS_QLA2432(ha
) && (ha
->pdev
->revision
< QLA_MSIX_CHIP_REV_24XX
||
2257 !QLA_MSIX_FW_MODE_1(ha
->fw_attributes
))) {
2258 DEBUG2(qla_printk(KERN_WARNING
, ha
,
2259 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
2260 ha
->pdev
->revision
, ha
->fw_attributes
));
2264 ret
= qla24xx_enable_msix(ha
, rsp
);
2266 DEBUG2(qla_printk(KERN_INFO
, ha
,
2267 "MSI-X: Enabled (0x%X, 0x%X).\n", ha
->chip_revision
,
2268 ha
->fw_attributes
));
2269 goto clear_risc_ints
;
2271 qla_printk(KERN_WARNING
, ha
,
2272 "MSI-X: Falling back-to INTa mode -- %d.\n", ret
);
2275 if (!IS_QLA24XX(ha
) && !IS_QLA2532(ha
) && !IS_QLA8432(ha
) &&
2279 ret
= pci_enable_msi(ha
->pdev
);
2281 DEBUG2(qla_printk(KERN_INFO
, ha
, "MSI: Enabled.\n"));
2282 ha
->flags
.msi_enabled
= 1;
2286 ret
= request_irq(ha
->pdev
->irq
, ha
->isp_ops
->intr_handler
,
2287 IRQF_SHARED
, QLA2XXX_DRIVER_NAME
, rsp
);
2289 qla_printk(KERN_WARNING
, ha
,
2290 "Failed to reserve interrupt %d already in use.\n",
2294 ha
->flags
.inta_enabled
= 1;
2298 * FIXME: Noted that 8014s were being dropped during NK testing.
2299 * Timing deltas during MSI-X/INTa transitions?
2303 spin_lock_irq(&ha
->hardware_lock
);
2304 if (IS_FWI2_CAPABLE(ha
)) {
2305 WRT_REG_DWORD(®
->isp24
.hccr
, HCCRX_CLR_HOST_INT
);
2306 WRT_REG_DWORD(®
->isp24
.hccr
, HCCRX_CLR_RISC_INT
);
2308 WRT_REG_WORD(®
->isp
.semaphore
, 0);
2309 WRT_REG_WORD(®
->isp
.hccr
, HCCR_CLR_RISC_INT
);
2310 WRT_REG_WORD(®
->isp
.hccr
, HCCR_CLR_HOST_INT
);
2312 spin_unlock_irq(&ha
->hardware_lock
);
2319 qla2x00_free_irqs(scsi_qla_host_t
*vha
)
2321 struct qla_hw_data
*ha
= vha
->hw
;
2322 struct rsp_que
*rsp
= ha
->rsp_q_map
[0];
2324 if (ha
->flags
.msix_enabled
)
2325 qla24xx_disable_msix(ha
);
2326 else if (ha
->flags
.inta_enabled
) {
2327 free_irq(ha
->pdev
->irq
, rsp
);
2328 pci_disable_msi(ha
->pdev
);
2333 int qla25xx_request_irq(struct rsp_que
*rsp
)
2335 struct qla_hw_data
*ha
= rsp
->hw
;
2336 struct qla_init_msix_entry
*intr
= &msix_entries
[2];
2337 struct qla_msix_entry
*msix
= rsp
->msix
;
2340 ret
= request_irq(msix
->vector
, intr
->handler
, 0, intr
->name
, rsp
);
2342 qla_printk(KERN_WARNING
, ha
,
2343 "MSI-X: Unable to register handler -- %x/%d.\n",