2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2008 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/delay.h>
10 #include <scsi/scsi_tcq.h>
12 static void qla2x00_mbx_completion(scsi_qla_host_t
*, uint16_t);
13 static void qla2x00_process_completed_request(struct scsi_qla_host
*,
14 struct req_que
*, uint32_t);
15 static void qla2x00_status_entry(scsi_qla_host_t
*, struct rsp_que
*, void *);
16 static void qla2x00_status_cont_entry(scsi_qla_host_t
*, sts_cont_entry_t
*);
17 static void qla2x00_error_entry(scsi_qla_host_t
*, struct rsp_que
*,
19 static struct scsi_qla_host
*qla2x00_get_rsp_host(struct rsp_que
*);
22 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
24 * @dev_id: SCSI driver HA context
26 * Called by system whenever the host adapter generates an interrupt.
28 * Returns handled flag.
31 qla2100_intr_handler(int irq
, void *dev_id
)
34 struct qla_hw_data
*ha
;
35 struct device_reg_2xxx __iomem
*reg
;
42 rsp
= (struct rsp_que
*) dev_id
;
45 "%s(): NULL response queue pointer\n", __func__
);
50 reg
= &ha
->iobase
->isp
;
53 spin_lock(&ha
->hardware_lock
);
54 vha
= qla2x00_get_rsp_host(rsp
);
55 for (iter
= 50; iter
--; ) {
56 hccr
= RD_REG_WORD(®
->hccr
);
57 if (hccr
& HCCR_RISC_PAUSE
) {
58 if (pci_channel_offline(ha
->pdev
))
62 * Issue a "HARD" reset in order for the RISC interrupt
63 * bit to be cleared. Schedule a big hammmer to get
64 * out of the RISC PAUSED state.
66 WRT_REG_WORD(®
->hccr
, HCCR_RESET_RISC
);
67 RD_REG_WORD(®
->hccr
);
69 ha
->isp_ops
->fw_dump(vha
, 1);
70 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
72 } else if ((RD_REG_WORD(®
->istatus
) & ISR_RISC_INT
) == 0)
75 if (RD_REG_WORD(®
->semaphore
) & BIT_0
) {
76 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
77 RD_REG_WORD(®
->hccr
);
79 /* Get mailbox data. */
80 mb
[0] = RD_MAILBOX_REG(ha
, reg
, 0);
81 if (mb
[0] > 0x3fff && mb
[0] < 0x8000) {
82 qla2x00_mbx_completion(vha
, mb
[0]);
83 status
|= MBX_INTERRUPT
;
84 } else if (mb
[0] > 0x7fff && mb
[0] < 0xc000) {
85 mb
[1] = RD_MAILBOX_REG(ha
, reg
, 1);
86 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
87 mb
[3] = RD_MAILBOX_REG(ha
, reg
, 3);
88 qla2x00_async_event(vha
, rsp
, mb
);
91 DEBUG2(printk("scsi(%ld): Unrecognized "
92 "interrupt type (%d).\n",
93 vha
->host_no
, mb
[0]));
95 /* Release mailbox registers. */
96 WRT_REG_WORD(®
->semaphore
, 0);
97 RD_REG_WORD(®
->semaphore
);
99 qla2x00_process_response_queue(rsp
);
101 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
102 RD_REG_WORD(®
->hccr
);
105 spin_unlock(&ha
->hardware_lock
);
107 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
108 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
109 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
110 complete(&ha
->mbx_intr_comp
);
113 return (IRQ_HANDLED
);
117 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
119 * @dev_id: SCSI driver HA context
121 * Called by system whenever the host adapter generates an interrupt.
123 * Returns handled flag.
126 qla2300_intr_handler(int irq
, void *dev_id
)
128 scsi_qla_host_t
*vha
;
129 struct device_reg_2xxx __iomem
*reg
;
136 struct qla_hw_data
*ha
;
138 rsp
= (struct rsp_que
*) dev_id
;
141 "%s(): NULL response queue pointer\n", __func__
);
146 reg
= &ha
->iobase
->isp
;
149 spin_lock(&ha
->hardware_lock
);
150 vha
= qla2x00_get_rsp_host(rsp
);
151 for (iter
= 50; iter
--; ) {
152 stat
= RD_REG_DWORD(®
->u
.isp2300
.host_status
);
153 if (stat
& HSR_RISC_PAUSED
) {
154 if (pci_channel_offline(ha
->pdev
))
157 hccr
= RD_REG_WORD(®
->hccr
);
158 if (hccr
& (BIT_15
| BIT_13
| BIT_11
| BIT_8
))
159 qla_printk(KERN_INFO
, ha
, "Parity error -- "
160 "HCCR=%x, Dumping firmware!\n", hccr
);
162 qla_printk(KERN_INFO
, ha
, "RISC paused -- "
163 "HCCR=%x, Dumping firmware!\n", hccr
);
166 * Issue a "HARD" reset in order for the RISC
167 * interrupt bit to be cleared. Schedule a big
168 * hammmer to get out of the RISC PAUSED state.
170 WRT_REG_WORD(®
->hccr
, HCCR_RESET_RISC
);
171 RD_REG_WORD(®
->hccr
);
173 ha
->isp_ops
->fw_dump(vha
, 1);
174 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
176 } else if ((stat
& HSR_RISC_INT
) == 0)
179 switch (stat
& 0xff) {
184 qla2x00_mbx_completion(vha
, MSW(stat
));
185 status
|= MBX_INTERRUPT
;
187 /* Release mailbox registers. */
188 WRT_REG_WORD(®
->semaphore
, 0);
192 mb
[1] = RD_MAILBOX_REG(ha
, reg
, 1);
193 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
194 mb
[3] = RD_MAILBOX_REG(ha
, reg
, 3);
195 qla2x00_async_event(vha
, rsp
, mb
);
198 qla2x00_process_response_queue(rsp
);
201 mb
[0] = MBA_CMPLT_1_16BIT
;
203 qla2x00_async_event(vha
, rsp
, mb
);
206 mb
[0] = MBA_SCSI_COMPLETION
;
208 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
209 qla2x00_async_event(vha
, rsp
, mb
);
212 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
214 vha
->host_no
, stat
& 0xff));
217 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
218 RD_REG_WORD_RELAXED(®
->hccr
);
220 spin_unlock(&ha
->hardware_lock
);
222 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
223 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
224 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
225 complete(&ha
->mbx_intr_comp
);
228 return (IRQ_HANDLED
);
232 * qla2x00_mbx_completion() - Process mailbox command completions.
233 * @ha: SCSI driver HA context
234 * @mb0: Mailbox0 register
237 qla2x00_mbx_completion(scsi_qla_host_t
*vha
, uint16_t mb0
)
240 uint16_t __iomem
*wptr
;
241 struct qla_hw_data
*ha
= vha
->hw
;
242 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
244 /* Load return mailbox registers. */
245 ha
->flags
.mbox_int
= 1;
246 ha
->mailbox_out
[0] = mb0
;
247 wptr
= (uint16_t __iomem
*)MAILBOX_REG(ha
, reg
, 1);
249 for (cnt
= 1; cnt
< ha
->mbx_count
; cnt
++) {
250 if (IS_QLA2200(ha
) && cnt
== 8)
251 wptr
= (uint16_t __iomem
*)MAILBOX_REG(ha
, reg
, 8);
252 if (cnt
== 4 || cnt
== 5)
253 ha
->mailbox_out
[cnt
] = qla2x00_debounce_register(wptr
);
255 ha
->mailbox_out
[cnt
] = RD_REG_WORD(wptr
);
261 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
262 __func__
, vha
->host_no
, ha
->mcp
->mb
[0]));
264 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
265 __func__
, vha
->host_no
));
270 qla81xx_idc_event(scsi_qla_host_t
*vha
, uint16_t aen
, uint16_t descr
)
272 static char *event
[] =
273 { "Complete", "Request Notification", "Time Extension" };
275 struct device_reg_24xx __iomem
*reg24
= &vha
->hw
->iobase
->isp24
;
276 uint16_t __iomem
*wptr
;
277 uint16_t cnt
, timeout
, mb
[QLA_IDC_ACK_REGS
];
279 /* Seed data -- mailbox1 -> mailbox7. */
280 wptr
= (uint16_t __iomem
*)®24
->mailbox1
;
281 for (cnt
= 0; cnt
< QLA_IDC_ACK_REGS
; cnt
++, wptr
++)
282 mb
[cnt
] = RD_REG_WORD(wptr
);
284 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
285 "%04x %04x %04x %04x %04x %04x %04x.\n", vha
->host_no
,
287 mb
[0], mb
[1], mb
[2], mb
[3], mb
[4], mb
[5], mb
[6]));
289 /* Acknowledgement needed? [Notify && non-zero timeout]. */
290 timeout
= (descr
>> 8) & 0xf;
291 if (aen
!= MBA_IDC_NOTIFY
|| !timeout
)
294 DEBUG2(printk("scsi(%ld): Inter-Driver Commucation %s -- "
295 "ACK timeout=%d.\n", vha
->host_no
, event
[aen
& 0xff], timeout
));
297 rval
= qla2x00_post_idc_ack_work(vha
, mb
);
298 if (rval
!= QLA_SUCCESS
)
299 qla_printk(KERN_WARNING
, vha
->hw
,
300 "IDC failed to post ACK.\n");
304 * qla2x00_async_event() - Process aynchronous events.
305 * @ha: SCSI driver HA context
306 * @mb: Mailbox registers (0 - 3)
309 qla2x00_async_event(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, uint16_t *mb
)
312 static char *link_speeds
[] = { "1", "2", "?", "4", "8", "10" };
317 struct qla_hw_data
*ha
= vha
->hw
;
318 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
319 uint32_t rscn_entry
, host_pid
;
320 uint8_t rscn_queue_index
;
323 /* Setup to process RIO completion. */
328 case MBA_SCSI_COMPLETION
:
329 handles
[0] = le32_to_cpu((uint32_t)((mb
[2] << 16) | mb
[1]));
332 case MBA_CMPLT_1_16BIT
:
335 mb
[0] = MBA_SCSI_COMPLETION
;
337 case MBA_CMPLT_2_16BIT
:
341 mb
[0] = MBA_SCSI_COMPLETION
;
343 case MBA_CMPLT_3_16BIT
:
348 mb
[0] = MBA_SCSI_COMPLETION
;
350 case MBA_CMPLT_4_16BIT
:
354 handles
[3] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 6);
356 mb
[0] = MBA_SCSI_COMPLETION
;
358 case MBA_CMPLT_5_16BIT
:
362 handles
[3] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 6);
363 handles
[4] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 7);
365 mb
[0] = MBA_SCSI_COMPLETION
;
367 case MBA_CMPLT_2_32BIT
:
368 handles
[0] = le32_to_cpu((uint32_t)((mb
[2] << 16) | mb
[1]));
369 handles
[1] = le32_to_cpu(
370 ((uint32_t)(RD_MAILBOX_REG(ha
, reg
, 7) << 16)) |
371 RD_MAILBOX_REG(ha
, reg
, 6));
373 mb
[0] = MBA_SCSI_COMPLETION
;
380 case MBA_SCSI_COMPLETION
: /* Fast Post */
381 if (!vha
->flags
.online
)
384 for (cnt
= 0; cnt
< handle_cnt
; cnt
++)
385 qla2x00_process_completed_request(vha
, rsp
->req
,
389 case MBA_RESET
: /* Reset */
390 DEBUG2(printk("scsi(%ld): Asynchronous RESET.\n",
393 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
396 case MBA_SYSTEM_ERR
: /* System Error */
397 qla_printk(KERN_INFO
, ha
,
398 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh.\n",
399 mb
[1], mb
[2], mb
[3]);
401 ha
->isp_ops
->fw_dump(vha
, 1);
403 if (IS_FWI2_CAPABLE(ha
)) {
404 if (mb
[1] == 0 && mb
[2] == 0) {
405 qla_printk(KERN_ERR
, ha
,
406 "Unrecoverable Hardware Error: adapter "
407 "marked OFFLINE!\n");
408 vha
->flags
.online
= 0;
410 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
411 } else if (mb
[1] == 0) {
412 qla_printk(KERN_INFO
, ha
,
413 "Unrecoverable Hardware Error: adapter marked "
415 vha
->flags
.online
= 0;
417 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
420 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
421 DEBUG2(printk("scsi(%ld): ISP Request Transfer Error.\n",
423 qla_printk(KERN_WARNING
, ha
, "ISP Request Transfer Error.\n");
425 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
428 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
429 DEBUG2(printk("scsi(%ld): ISP Response Transfer Error.\n",
431 qla_printk(KERN_WARNING
, ha
, "ISP Response Transfer Error.\n");
433 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
436 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up */
437 DEBUG2(printk("scsi(%ld): Asynchronous WAKEUP_THRES.\n",
441 case MBA_LIP_OCCURRED
: /* Loop Initialization Procedure */
442 DEBUG2(printk("scsi(%ld): LIP occurred (%x).\n", vha
->host_no
,
444 qla_printk(KERN_INFO
, ha
, "LIP occurred (%x).\n", mb
[1]);
446 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
447 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
448 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
449 qla2x00_mark_all_devices_lost(vha
, 1);
453 atomic_set(&vha
->vp_state
, VP_FAILED
);
454 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
457 set_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
);
458 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
460 vha
->flags
.management_server_logged_in
= 0;
461 qla2x00_post_aen_work(vha
, FCH_EVT_LIP
, mb
[1]);
464 case MBA_LOOP_UP
: /* Loop Up Event */
465 if (IS_QLA2100(ha
) || IS_QLA2200(ha
)) {
466 link_speed
= link_speeds
[0];
467 ha
->link_data_rate
= PORT_SPEED_1GB
;
469 link_speed
= link_speeds
[LS_UNKNOWN
];
471 link_speed
= link_speeds
[mb
[1]];
472 else if (mb
[1] == 0x13)
473 link_speed
= link_speeds
[5];
474 ha
->link_data_rate
= mb
[1];
477 DEBUG2(printk("scsi(%ld): Asynchronous LOOP UP (%s Gbps).\n",
478 vha
->host_no
, link_speed
));
479 qla_printk(KERN_INFO
, ha
, "LOOP UP detected (%s Gbps).\n",
482 vha
->flags
.management_server_logged_in
= 0;
483 qla2x00_post_aen_work(vha
, FCH_EVT_LINKUP
, ha
->link_data_rate
);
486 case MBA_LOOP_DOWN
: /* Loop Down Event */
487 DEBUG2(printk("scsi(%ld): Asynchronous LOOP DOWN "
488 "(%x %x %x).\n", vha
->host_no
, mb
[1], mb
[2], mb
[3]));
489 qla_printk(KERN_INFO
, ha
, "LOOP DOWN detected (%x %x %x).\n",
490 mb
[1], mb
[2], mb
[3]);
492 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
493 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
494 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
495 vha
->device_flags
|= DFLG_NO_CABLE
;
496 qla2x00_mark_all_devices_lost(vha
, 1);
500 atomic_set(&vha
->vp_state
, VP_FAILED
);
501 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
504 vha
->flags
.management_server_logged_in
= 0;
505 ha
->link_data_rate
= PORT_SPEED_UNKNOWN
;
506 qla2x00_post_aen_work(vha
, FCH_EVT_LINKDOWN
, 0);
509 case MBA_LIP_RESET
: /* LIP reset occurred */
510 DEBUG2(printk("scsi(%ld): Asynchronous LIP RESET (%x).\n",
511 vha
->host_no
, mb
[1]));
512 qla_printk(KERN_INFO
, ha
,
513 "LIP reset occurred (%x).\n", mb
[1]);
515 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
516 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
517 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
518 qla2x00_mark_all_devices_lost(vha
, 1);
522 atomic_set(&vha
->vp_state
, VP_FAILED
);
523 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
526 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
528 ha
->operating_mode
= LOOP
;
529 vha
->flags
.management_server_logged_in
= 0;
530 qla2x00_post_aen_work(vha
, FCH_EVT_LIPRESET
, mb
[1]);
533 /* case MBA_DCBX_COMPLETE: */
534 case MBA_POINT_TO_POINT
: /* Point-to-Point */
539 DEBUG2(printk("scsi(%ld): DCBX Completed -- %04x %04x "
540 "%04x\n", vha
->host_no
, mb
[1], mb
[2], mb
[3]));
542 DEBUG2(printk("scsi(%ld): Asynchronous P2P MODE "
543 "received.\n", vha
->host_no
));
546 * Until there's a transition from loop down to loop up, treat
547 * this as loop down only.
549 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
550 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
551 if (!atomic_read(&vha
->loop_down_timer
))
552 atomic_set(&vha
->loop_down_timer
,
554 qla2x00_mark_all_devices_lost(vha
, 1);
558 atomic_set(&vha
->vp_state
, VP_FAILED
);
559 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
562 if (!(test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
)))
563 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
565 set_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
);
566 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
568 ha
->flags
.gpsc_supported
= 1;
569 vha
->flags
.management_server_logged_in
= 0;
572 case MBA_CHG_IN_CONNECTION
: /* Change in connection mode */
576 DEBUG2(printk("scsi(%ld): Asynchronous Change In Connection "
579 qla_printk(KERN_INFO
, ha
,
580 "Configuration change detected: value=%x.\n", mb
[1]);
582 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
583 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
584 if (!atomic_read(&vha
->loop_down_timer
))
585 atomic_set(&vha
->loop_down_timer
,
587 qla2x00_mark_all_devices_lost(vha
, 1);
591 atomic_set(&vha
->vp_state
, VP_FAILED
);
592 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
595 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
596 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
599 case MBA_PORT_UPDATE
: /* Port database update */
600 /* Only handle SCNs for our Vport index. */
601 if (vha
->vp_idx
&& vha
->vp_idx
!= (mb
[3] & 0xff))
605 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
606 * event etc. earlier indicating loop is down) then process
607 * it. Otherwise ignore it and Wait for RSCN to come in.
609 atomic_set(&vha
->loop_down_timer
, 0);
610 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
&&
611 atomic_read(&vha
->loop_state
) != LOOP_DEAD
) {
612 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE "
613 "ignored %04x/%04x/%04x.\n", vha
->host_no
, mb
[1],
618 DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
620 DEBUG(printk(KERN_INFO
621 "scsi(%ld): Port database changed %04x %04x %04x.\n",
622 vha
->host_no
, mb
[1], mb
[2], mb
[3]));
625 * Mark all devices as missing so we will login again.
627 atomic_set(&vha
->loop_state
, LOOP_UP
);
629 qla2x00_mark_all_devices_lost(vha
, 1);
631 vha
->flags
.rscn_queue_overflow
= 1;
633 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
634 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
637 case MBA_RSCN_UPDATE
: /* State Change Registration */
638 /* Check if the Vport has issued a SCR */
639 if (vha
->vp_idx
&& test_bit(VP_SCR_NEEDED
, &vha
->vp_flags
))
641 /* Only handle SCNs for our Vport index. */
642 if (vha
->vp_idx
&& vha
->vp_idx
!= (mb
[3] & 0xff))
644 DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
646 DEBUG(printk(KERN_INFO
647 "scsi(%ld): RSCN database changed -- %04x %04x %04x.\n",
648 vha
->host_no
, mb
[1], mb
[2], mb
[3]));
650 rscn_entry
= ((mb
[1] & 0xff) << 16) | mb
[2];
651 host_pid
= (vha
->d_id
.b
.domain
<< 16) | (vha
->d_id
.b
.area
<< 8)
653 if (rscn_entry
== host_pid
) {
654 DEBUG(printk(KERN_INFO
655 "scsi(%ld): Ignoring RSCN update to local host "
657 vha
->host_no
, host_pid
));
661 /* Ignore reserved bits from RSCN-payload. */
662 rscn_entry
= ((mb
[1] & 0x3ff) << 16) | mb
[2];
663 rscn_queue_index
= vha
->rscn_in_ptr
+ 1;
664 if (rscn_queue_index
== MAX_RSCN_COUNT
)
665 rscn_queue_index
= 0;
666 if (rscn_queue_index
!= vha
->rscn_out_ptr
) {
667 vha
->rscn_queue
[vha
->rscn_in_ptr
] = rscn_entry
;
668 vha
->rscn_in_ptr
= rscn_queue_index
;
670 vha
->flags
.rscn_queue_overflow
= 1;
673 atomic_set(&vha
->loop_state
, LOOP_UPDATE
);
674 atomic_set(&vha
->loop_down_timer
, 0);
675 vha
->flags
.management_server_logged_in
= 0;
677 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
678 set_bit(RSCN_UPDATE
, &vha
->dpc_flags
);
679 qla2x00_post_aen_work(vha
, FCH_EVT_RSCN
, rscn_entry
);
682 /* case MBA_RIO_RESPONSE: */
683 case MBA_ZIO_RESPONSE
:
684 DEBUG3(printk("scsi(%ld): [R|Z]IO update completion.\n",
687 if (IS_FWI2_CAPABLE(ha
))
688 qla24xx_process_response_queue(rsp
);
690 qla2x00_process_response_queue(rsp
);
693 case MBA_DISCARD_RND_FRAME
:
694 DEBUG2(printk("scsi(%ld): Discard RND Frame -- %04x %04x "
695 "%04x.\n", vha
->host_no
, mb
[1], mb
[2], mb
[3]));
698 case MBA_TRACE_NOTIFICATION
:
699 DEBUG2(printk("scsi(%ld): Trace Notification -- %04x %04x.\n",
700 vha
->host_no
, mb
[1], mb
[2]));
703 case MBA_ISP84XX_ALERT
:
704 DEBUG2(printk("scsi(%ld): ISP84XX Alert Notification -- "
705 "%04x %04x %04x\n", vha
->host_no
, mb
[1], mb
[2], mb
[3]));
707 spin_lock_irqsave(&ha
->cs84xx
->access_lock
, flags
);
709 case A84_PANIC_RECOVERY
:
710 qla_printk(KERN_INFO
, ha
, "Alert 84XX: panic recovery "
711 "%04x %04x\n", mb
[2], mb
[3]);
713 case A84_OP_LOGIN_COMPLETE
:
714 ha
->cs84xx
->op_fw_version
= mb
[3] << 16 | mb
[2];
715 DEBUG2(qla_printk(KERN_INFO
, ha
, "Alert 84XX:"
716 "firmware version %x\n", ha
->cs84xx
->op_fw_version
));
718 case A84_DIAG_LOGIN_COMPLETE
:
719 ha
->cs84xx
->diag_fw_version
= mb
[3] << 16 | mb
[2];
720 DEBUG2(qla_printk(KERN_INFO
, ha
, "Alert 84XX:"
721 "diagnostic firmware version %x\n",
722 ha
->cs84xx
->diag_fw_version
));
724 case A84_GOLD_LOGIN_COMPLETE
:
725 ha
->cs84xx
->diag_fw_version
= mb
[3] << 16 | mb
[2];
726 ha
->cs84xx
->fw_update
= 1;
727 DEBUG2(qla_printk(KERN_INFO
, ha
, "Alert 84XX: gold "
728 "firmware version %x\n",
729 ha
->cs84xx
->gold_fw_version
));
732 qla_printk(KERN_ERR
, ha
,
733 "Alert 84xx: Invalid Alert %04x %04x %04x\n",
734 mb
[1], mb
[2], mb
[3]);
736 spin_unlock_irqrestore(&ha
->cs84xx
->access_lock
, flags
);
739 DEBUG2(printk("scsi(%ld): DCBX Started -- %04x %04x %04x\n",
740 vha
->host_no
, mb
[1], mb
[2], mb
[3]));
742 case MBA_DCBX_PARAM_UPDATE
:
743 DEBUG2(printk("scsi(%ld): DCBX Parameters Updated -- "
744 "%04x %04x %04x\n", vha
->host_no
, mb
[1], mb
[2], mb
[3]));
746 case MBA_FCF_CONF_ERR
:
747 DEBUG2(printk("scsi(%ld): FCF Configuration Error -- "
748 "%04x %04x %04x\n", vha
->host_no
, mb
[1], mb
[2], mb
[3]));
750 case MBA_IDC_COMPLETE
:
752 case MBA_IDC_TIME_EXT
:
753 qla81xx_idc_event(vha
, mb
[0], mb
[1]);
757 if (!vha
->vp_idx
&& ha
->num_vhosts
)
758 qla2x00_alert_all_vps(rsp
, mb
);
762 qla2x00_adjust_sdev_qdepth_up(struct scsi_device
*sdev
, void *data
)
764 fc_port_t
*fcport
= data
;
765 struct scsi_qla_host
*vha
= fcport
->vha
;
766 struct qla_hw_data
*ha
= vha
->hw
;
767 struct req_que
*req
= NULL
;
769 req
= ha
->req_q_map
[vha
->req_ques
[0]];
772 if (req
->max_q_depth
<= sdev
->queue_depth
)
775 if (sdev
->ordered_tags
)
776 scsi_adjust_queue_depth(sdev
, MSG_ORDERED_TAG
,
777 sdev
->queue_depth
+ 1);
779 scsi_adjust_queue_depth(sdev
, MSG_SIMPLE_TAG
,
780 sdev
->queue_depth
+ 1);
782 fcport
->last_ramp_up
= jiffies
;
784 DEBUG2(qla_printk(KERN_INFO
, ha
,
785 "scsi(%ld:%d:%d:%d): Queue depth adjusted-up to %d.\n",
786 fcport
->vha
->host_no
, sdev
->channel
, sdev
->id
, sdev
->lun
,
791 qla2x00_adjust_sdev_qdepth_down(struct scsi_device
*sdev
, void *data
)
793 fc_port_t
*fcport
= data
;
795 if (!scsi_track_queue_full(sdev
, sdev
->queue_depth
- 1))
798 DEBUG2(qla_printk(KERN_INFO
, fcport
->vha
->hw
,
799 "scsi(%ld:%d:%d:%d): Queue depth adjusted-down to %d.\n",
800 fcport
->vha
->host_no
, sdev
->channel
, sdev
->id
, sdev
->lun
,
805 qla2x00_ramp_up_queue_depth(scsi_qla_host_t
*vha
, struct req_que
*req
,
809 struct scsi_device
*sdev
;
811 sdev
= sp
->cmd
->device
;
812 if (sdev
->queue_depth
>= req
->max_q_depth
)
816 if (time_before(jiffies
,
817 fcport
->last_ramp_up
+ ql2xqfullrampup
* HZ
))
819 if (time_before(jiffies
,
820 fcport
->last_queue_full
+ ql2xqfullrampup
* HZ
))
823 starget_for_each_device(sdev
->sdev_target
, fcport
,
824 qla2x00_adjust_sdev_qdepth_up
);
828 * qla2x00_process_completed_request() - Process a Fast Post response.
829 * @ha: SCSI driver HA context
833 qla2x00_process_completed_request(struct scsi_qla_host
*vha
,
834 struct req_que
*req
, uint32_t index
)
837 struct qla_hw_data
*ha
= vha
->hw
;
839 /* Validate handle. */
840 if (index
>= MAX_OUTSTANDING_COMMANDS
) {
841 DEBUG2(printk("scsi(%ld): Invalid SCSI completion handle %d.\n",
842 vha
->host_no
, index
));
843 qla_printk(KERN_WARNING
, ha
,
844 "Invalid SCSI completion handle %d.\n", index
);
846 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
850 sp
= req
->outstanding_cmds
[index
];
852 /* Free outstanding command slot. */
853 req
->outstanding_cmds
[index
] = NULL
;
855 /* Save ISP completion status */
856 sp
->cmd
->result
= DID_OK
<< 16;
858 qla2x00_ramp_up_queue_depth(vha
, req
, sp
);
859 qla2x00_sp_compl(ha
, sp
);
861 DEBUG2(printk("scsi(%ld): Invalid ISP SCSI completion handle\n",
863 qla_printk(KERN_WARNING
, ha
,
864 "Invalid ISP SCSI completion handle\n");
866 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
871 * qla2x00_process_response_queue() - Process response queue entries.
872 * @ha: SCSI driver HA context
875 qla2x00_process_response_queue(struct rsp_que
*rsp
)
877 struct scsi_qla_host
*vha
;
878 struct qla_hw_data
*ha
= rsp
->hw
;
879 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
884 vha
= qla2x00_get_rsp_host(rsp
);
886 if (!vha
->flags
.online
)
889 while (rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
) {
890 pkt
= (sts_entry_t
*)rsp
->ring_ptr
;
893 if (rsp
->ring_index
== rsp
->length
) {
895 rsp
->ring_ptr
= rsp
->ring
;
900 if (pkt
->entry_status
!= 0) {
901 DEBUG3(printk(KERN_INFO
902 "scsi(%ld): Process error entry.\n", vha
->host_no
));
904 qla2x00_error_entry(vha
, rsp
, pkt
);
905 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
910 switch (pkt
->entry_type
) {
912 qla2x00_status_entry(vha
, rsp
, pkt
);
915 handle_cnt
= ((sts21_entry_t
*)pkt
)->handle_count
;
916 for (cnt
= 0; cnt
< handle_cnt
; cnt
++) {
917 qla2x00_process_completed_request(vha
, rsp
->req
,
918 ((sts21_entry_t
*)pkt
)->handle
[cnt
]);
922 handle_cnt
= ((sts22_entry_t
*)pkt
)->handle_count
;
923 for (cnt
= 0; cnt
< handle_cnt
; cnt
++) {
924 qla2x00_process_completed_request(vha
, rsp
->req
,
925 ((sts22_entry_t
*)pkt
)->handle
[cnt
]);
928 case STATUS_CONT_TYPE
:
929 qla2x00_status_cont_entry(vha
, (sts_cont_entry_t
*)pkt
);
932 /* Type Not Supported. */
933 DEBUG4(printk(KERN_WARNING
934 "scsi(%ld): Received unknown response pkt type %x "
935 "entry status=%x.\n",
936 vha
->host_no
, pkt
->entry_type
, pkt
->entry_status
));
939 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
943 /* Adjust ring index */
944 WRT_REG_WORD(ISP_RSP_Q_OUT(ha
, reg
), rsp
->ring_index
);
948 qla2x00_handle_sense(srb_t
*sp
, uint8_t *sense_data
, uint32_t sense_len
)
950 struct scsi_cmnd
*cp
= sp
->cmd
;
952 if (sense_len
>= SCSI_SENSE_BUFFERSIZE
)
953 sense_len
= SCSI_SENSE_BUFFERSIZE
;
955 sp
->request_sense_length
= sense_len
;
956 sp
->request_sense_ptr
= cp
->sense_buffer
;
957 if (sp
->request_sense_length
> 32)
960 memcpy(cp
->sense_buffer
, sense_data
, sense_len
);
962 sp
->request_sense_ptr
+= sense_len
;
963 sp
->request_sense_length
-= sense_len
;
964 if (sp
->request_sense_length
!= 0)
965 sp
->fcport
->vha
->status_srb
= sp
;
967 DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
968 "cmd=%p pid=%ld\n", __func__
, sp
->fcport
->vha
->host_no
,
969 cp
->device
->channel
, cp
->device
->id
, cp
->device
->lun
, cp
,
972 DEBUG5(qla2x00_dump_buffer(cp
->sense_buffer
, sense_len
));
976 * qla2x00_status_entry() - Process a Status IOCB entry.
977 * @ha: SCSI driver HA context
978 * @pkt: Entry pointer
981 qla2x00_status_entry(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, void *pkt
)
985 struct scsi_cmnd
*cp
;
987 struct sts_entry_24xx
*sts24
;
988 uint16_t comp_status
;
989 uint16_t scsi_status
;
990 uint8_t lscsi_status
;
992 uint32_t sense_len
, rsp_info_len
, resid_len
, fw_resid_len
;
993 uint8_t *rsp_info
, *sense_data
;
994 struct qla_hw_data
*ha
= vha
->hw
;
995 struct req_que
*req
= rsp
->req
;
997 sts
= (sts_entry_t
*) pkt
;
998 sts24
= (struct sts_entry_24xx
*) pkt
;
999 if (IS_FWI2_CAPABLE(ha
)) {
1000 comp_status
= le16_to_cpu(sts24
->comp_status
);
1001 scsi_status
= le16_to_cpu(sts24
->scsi_status
) & SS_MASK
;
1003 comp_status
= le16_to_cpu(sts
->comp_status
);
1004 scsi_status
= le16_to_cpu(sts
->scsi_status
) & SS_MASK
;
1007 /* Fast path completion. */
1008 if (comp_status
== CS_COMPLETE
&& scsi_status
== 0) {
1009 qla2x00_process_completed_request(vha
, req
, sts
->handle
);
1014 /* Validate handle. */
1015 if (sts
->handle
< MAX_OUTSTANDING_COMMANDS
) {
1016 sp
= req
->outstanding_cmds
[sts
->handle
];
1017 req
->outstanding_cmds
[sts
->handle
] = NULL
;
1022 DEBUG2(printk("scsi(%ld): Status Entry invalid handle.\n",
1024 qla_printk(KERN_WARNING
, ha
, "Status Entry invalid handle.\n");
1026 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1027 qla2xxx_wake_dpc(vha
);
1032 DEBUG2(printk("scsi(%ld): Command already returned back to OS "
1033 "pkt->handle=%d sp=%p.\n", vha
->host_no
, sts
->handle
, sp
));
1034 qla_printk(KERN_WARNING
, ha
,
1035 "Command is NULL: already returned to OS (sp=%p)\n", sp
);
1040 lscsi_status
= scsi_status
& STATUS_MASK
;
1042 fcport
= sp
->fcport
;
1044 sense_len
= rsp_info_len
= resid_len
= fw_resid_len
= 0;
1045 if (IS_FWI2_CAPABLE(ha
)) {
1046 sense_len
= le32_to_cpu(sts24
->sense_len
);
1047 rsp_info_len
= le32_to_cpu(sts24
->rsp_data_len
);
1048 resid_len
= le32_to_cpu(sts24
->rsp_residual_count
);
1049 fw_resid_len
= le32_to_cpu(sts24
->residual_len
);
1050 rsp_info
= sts24
->data
;
1051 sense_data
= sts24
->data
;
1052 host_to_fcp_swap(sts24
->data
, sizeof(sts24
->data
));
1054 sense_len
= le16_to_cpu(sts
->req_sense_length
);
1055 rsp_info_len
= le16_to_cpu(sts
->rsp_info_len
);
1056 resid_len
= le32_to_cpu(sts
->residual_length
);
1057 rsp_info
= sts
->rsp_info
;
1058 sense_data
= sts
->req_sense_data
;
1061 /* Check for any FCP transport errors. */
1062 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
) {
1063 /* Sense data lies beyond any FCP RESPONSE data. */
1064 if (IS_FWI2_CAPABLE(ha
))
1065 sense_data
+= rsp_info_len
;
1066 if (rsp_info_len
> 3 && rsp_info
[3]) {
1067 DEBUG2(printk("scsi(%ld:%d:%d:%d) FCP I/O protocol "
1068 "failure (%x/%02x%02x%02x%02x%02x%02x%02x%02x)..."
1069 "retrying command\n", vha
->host_no
,
1070 cp
->device
->channel
, cp
->device
->id
,
1071 cp
->device
->lun
, rsp_info_len
, rsp_info
[0],
1072 rsp_info
[1], rsp_info
[2], rsp_info
[3], rsp_info
[4],
1073 rsp_info
[5], rsp_info
[6], rsp_info
[7]));
1075 cp
->result
= DID_BUS_BUSY
<< 16;
1076 qla2x00_sp_compl(ha
, sp
);
1081 /* Check for overrun. */
1082 if (IS_FWI2_CAPABLE(ha
) && comp_status
== CS_COMPLETE
&&
1083 scsi_status
& SS_RESIDUAL_OVER
)
1084 comp_status
= CS_DATA_OVERRUN
;
1087 * Based on Host and scsi status generate status code for Linux
1089 switch (comp_status
) {
1092 if (scsi_status
== 0) {
1093 cp
->result
= DID_OK
<< 16;
1096 if (scsi_status
& (SS_RESIDUAL_UNDER
| SS_RESIDUAL_OVER
)) {
1098 scsi_set_resid(cp
, resid
);
1100 if (!lscsi_status
&&
1101 ((unsigned)(scsi_bufflen(cp
) - resid
) <
1103 qla_printk(KERN_INFO
, ha
,
1104 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1105 "detected (%x of %x bytes)...returning "
1106 "error status.\n", vha
->host_no
,
1107 cp
->device
->channel
, cp
->device
->id
,
1108 cp
->device
->lun
, resid
,
1111 cp
->result
= DID_ERROR
<< 16;
1115 cp
->result
= DID_OK
<< 16 | lscsi_status
;
1117 if (lscsi_status
== SAM_STAT_TASK_SET_FULL
) {
1118 DEBUG2(printk(KERN_INFO
1119 "scsi(%ld): QUEUE FULL status detected "
1120 "0x%x-0x%x.\n", vha
->host_no
, comp_status
,
1123 /* Adjust queue depth for all luns on the port. */
1124 fcport
->last_queue_full
= jiffies
;
1125 starget_for_each_device(cp
->device
->sdev_target
,
1126 fcport
, qla2x00_adjust_sdev_qdepth_down
);
1129 if (lscsi_status
!= SS_CHECK_CONDITION
)
1132 memset(cp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1133 if (!(scsi_status
& SS_SENSE_LEN_VALID
))
1136 qla2x00_handle_sense(sp
, sense_data
, sense_len
);
1139 case CS_DATA_UNDERRUN
:
1141 /* Use F/W calculated residual length. */
1142 if (IS_FWI2_CAPABLE(ha
)) {
1143 if (!(scsi_status
& SS_RESIDUAL_UNDER
)) {
1145 } else if (resid
!= fw_resid_len
) {
1146 scsi_status
&= ~SS_RESIDUAL_UNDER
;
1149 resid
= fw_resid_len
;
1152 if (scsi_status
& SS_RESIDUAL_UNDER
) {
1153 scsi_set_resid(cp
, resid
);
1155 DEBUG2(printk(KERN_INFO
1156 "scsi(%ld:%d:%d) UNDERRUN status detected "
1157 "0x%x-0x%x. resid=0x%x fw_resid=0x%x cdb=0x%x "
1158 "os_underflow=0x%x\n", vha
->host_no
,
1159 cp
->device
->id
, cp
->device
->lun
, comp_status
,
1160 scsi_status
, resid_len
, resid
, cp
->cmnd
[0],
1166 * Check to see if SCSI Status is non zero. If so report SCSI
1169 if (lscsi_status
!= 0) {
1170 cp
->result
= DID_OK
<< 16 | lscsi_status
;
1172 if (lscsi_status
== SAM_STAT_TASK_SET_FULL
) {
1173 DEBUG2(printk(KERN_INFO
1174 "scsi(%ld): QUEUE FULL status detected "
1175 "0x%x-0x%x.\n", vha
->host_no
, comp_status
,
1179 * Adjust queue depth for all luns on the
1182 fcport
->last_queue_full
= jiffies
;
1183 starget_for_each_device(
1184 cp
->device
->sdev_target
, fcport
,
1185 qla2x00_adjust_sdev_qdepth_down
);
1188 if (lscsi_status
!= SS_CHECK_CONDITION
)
1191 memset(cp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1192 if (!(scsi_status
& SS_SENSE_LEN_VALID
))
1195 qla2x00_handle_sense(sp
, sense_data
, sense_len
);
1198 * If RISC reports underrun and target does not report
1199 * it then we must have a lost frame, so tell upper
1200 * layer to retry it by reporting a bus busy.
1202 if (!(scsi_status
& SS_RESIDUAL_UNDER
)) {
1203 DEBUG2(printk("scsi(%ld:%d:%d:%d) Dropped "
1204 "frame(s) detected (%x of %x bytes)..."
1205 "retrying command.\n",
1206 vha
->host_no
, cp
->device
->channel
,
1207 cp
->device
->id
, cp
->device
->lun
, resid
,
1210 cp
->result
= DID_BUS_BUSY
<< 16;
1214 /* Handle mid-layer underflow */
1215 if ((unsigned)(scsi_bufflen(cp
) - resid
) <
1217 qla_printk(KERN_INFO
, ha
,
1218 "scsi(%ld:%d:%d:%d): Mid-layer underflow "
1219 "detected (%x of %x bytes)...returning "
1220 "error status.\n", vha
->host_no
,
1221 cp
->device
->channel
, cp
->device
->id
,
1222 cp
->device
->lun
, resid
,
1225 cp
->result
= DID_ERROR
<< 16;
1229 /* Everybody online, looking good... */
1230 cp
->result
= DID_OK
<< 16;
1234 case CS_DATA_OVERRUN
:
1235 DEBUG2(printk(KERN_INFO
1236 "scsi(%ld:%d:%d): OVERRUN status detected 0x%x-0x%x\n",
1237 vha
->host_no
, cp
->device
->id
, cp
->device
->lun
, comp_status
,
1239 DEBUG2(printk(KERN_INFO
1240 "CDB: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
1241 cp
->cmnd
[0], cp
->cmnd
[1], cp
->cmnd
[2], cp
->cmnd
[3],
1242 cp
->cmnd
[4], cp
->cmnd
[5]));
1243 DEBUG2(printk(KERN_INFO
1244 "PID=0x%lx req=0x%x xtra=0x%x -- returning DID_ERROR "
1246 cp
->serial_number
, scsi_bufflen(cp
), resid_len
));
1248 cp
->result
= DID_ERROR
<< 16;
1251 case CS_PORT_LOGGED_OUT
:
1252 case CS_PORT_CONFIG_CHG
:
1255 case CS_PORT_UNAVAILABLE
:
1257 * If the port is in Target Down state, return all IOs for this
1258 * Target with DID_NO_CONNECT ELSE Queue the IOs in the
1261 DEBUG2(printk("scsi(%ld:%d:%d): status_entry: Port Down "
1262 "pid=%ld, compl status=0x%x, port state=0x%x\n",
1263 vha
->host_no
, cp
->device
->id
, cp
->device
->lun
,
1264 cp
->serial_number
, comp_status
,
1265 atomic_read(&fcport
->state
)));
1268 * We are going to have the fc class block the rport
1269 * while we try to recover so instruct the mid layer
1270 * to requeue until the class decides how to handle this.
1272 cp
->result
= DID_TRANSPORT_DISRUPTED
<< 16;
1273 if (atomic_read(&fcport
->state
) == FCS_ONLINE
)
1274 qla2x00_mark_device_lost(fcport
->vha
, fcport
, 1, 1);
1278 DEBUG2(printk(KERN_INFO
1279 "scsi(%ld): RESET status detected 0x%x-0x%x.\n",
1280 vha
->host_no
, comp_status
, scsi_status
));
1282 cp
->result
= DID_RESET
<< 16;
1287 * hv2.19.12 - DID_ABORT does not retry the request if we
1288 * aborted this request then abort otherwise it must be a
1291 DEBUG2(printk(KERN_INFO
1292 "scsi(%ld): ABORT status detected 0x%x-0x%x.\n",
1293 vha
->host_no
, comp_status
, scsi_status
));
1295 cp
->result
= DID_RESET
<< 16;
1300 * We are going to have the fc class block the rport
1301 * while we try to recover so instruct the mid layer
1302 * to requeue until the class decides how to handle this.
1304 cp
->result
= DID_TRANSPORT_DISRUPTED
<< 16;
1306 if (IS_FWI2_CAPABLE(ha
)) {
1307 DEBUG2(printk(KERN_INFO
1308 "scsi(%ld:%d:%d:%d): TIMEOUT status detected "
1309 "0x%x-0x%x\n", vha
->host_no
, cp
->device
->channel
,
1310 cp
->device
->id
, cp
->device
->lun
, comp_status
,
1314 DEBUG2(printk(KERN_INFO
1315 "scsi(%ld:%d:%d:%d): TIMEOUT status detected 0x%x-0x%x "
1316 "sflags=%x.\n", vha
->host_no
, cp
->device
->channel
,
1317 cp
->device
->id
, cp
->device
->lun
, comp_status
, scsi_status
,
1318 le16_to_cpu(sts
->status_flags
)));
1320 /* Check to see if logout occurred. */
1321 if ((le16_to_cpu(sts
->status_flags
) & SF_LOGOUT_SENT
))
1322 qla2x00_mark_device_lost(fcport
->vha
, fcport
, 1, 1);
1326 DEBUG3(printk("scsi(%ld): Error detected (unknown status) "
1327 "0x%x-0x%x.\n", vha
->host_no
, comp_status
, scsi_status
));
1328 qla_printk(KERN_INFO
, ha
,
1329 "Unknown status detected 0x%x-0x%x.\n",
1330 comp_status
, scsi_status
);
1332 cp
->result
= DID_ERROR
<< 16;
1336 /* Place command on done queue. */
1337 if (vha
->status_srb
== NULL
)
1338 qla2x00_sp_compl(ha
, sp
);
1342 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1343 * @ha: SCSI driver HA context
1344 * @pkt: Entry pointer
1346 * Extended sense data.
1349 qla2x00_status_cont_entry(scsi_qla_host_t
*vha
, sts_cont_entry_t
*pkt
)
1351 uint8_t sense_sz
= 0;
1352 struct qla_hw_data
*ha
= vha
->hw
;
1353 srb_t
*sp
= vha
->status_srb
;
1354 struct scsi_cmnd
*cp
;
1356 if (sp
!= NULL
&& sp
->request_sense_length
!= 0) {
1359 DEBUG2(printk("%s(): Cmd already returned back to OS "
1360 "sp=%p.\n", __func__
, sp
));
1361 qla_printk(KERN_INFO
, ha
,
1362 "cmd is NULL: already returned to OS (sp=%p)\n",
1365 vha
->status_srb
= NULL
;
1369 if (sp
->request_sense_length
> sizeof(pkt
->data
)) {
1370 sense_sz
= sizeof(pkt
->data
);
1372 sense_sz
= sp
->request_sense_length
;
1375 /* Move sense data. */
1376 if (IS_FWI2_CAPABLE(ha
))
1377 host_to_fcp_swap(pkt
->data
, sizeof(pkt
->data
));
1378 memcpy(sp
->request_sense_ptr
, pkt
->data
, sense_sz
);
1379 DEBUG5(qla2x00_dump_buffer(sp
->request_sense_ptr
, sense_sz
));
1381 sp
->request_sense_ptr
+= sense_sz
;
1382 sp
->request_sense_length
-= sense_sz
;
1384 /* Place command on done queue. */
1385 if (sp
->request_sense_length
== 0) {
1386 vha
->status_srb
= NULL
;
1387 qla2x00_sp_compl(ha
, sp
);
1393 * qla2x00_error_entry() - Process an error entry.
1394 * @ha: SCSI driver HA context
1395 * @pkt: Entry pointer
1398 qla2x00_error_entry(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, sts_entry_t
*pkt
)
1401 struct qla_hw_data
*ha
= vha
->hw
;
1402 struct req_que
*req
= rsp
->req
;
1403 #if defined(QL_DEBUG_LEVEL_2)
1404 if (pkt
->entry_status
& RF_INV_E_ORDER
)
1405 qla_printk(KERN_ERR
, ha
, "%s: Invalid Entry Order\n", __func__
);
1406 else if (pkt
->entry_status
& RF_INV_E_COUNT
)
1407 qla_printk(KERN_ERR
, ha
, "%s: Invalid Entry Count\n", __func__
);
1408 else if (pkt
->entry_status
& RF_INV_E_PARAM
)
1409 qla_printk(KERN_ERR
, ha
,
1410 "%s: Invalid Entry Parameter\n", __func__
);
1411 else if (pkt
->entry_status
& RF_INV_E_TYPE
)
1412 qla_printk(KERN_ERR
, ha
, "%s: Invalid Entry Type\n", __func__
);
1413 else if (pkt
->entry_status
& RF_BUSY
)
1414 qla_printk(KERN_ERR
, ha
, "%s: Busy\n", __func__
);
1416 qla_printk(KERN_ERR
, ha
, "%s: UNKNOWN flag error\n", __func__
);
1419 /* Validate handle. */
1420 if (pkt
->handle
< MAX_OUTSTANDING_COMMANDS
)
1421 sp
= req
->outstanding_cmds
[pkt
->handle
];
1426 /* Free outstanding command slot. */
1427 req
->outstanding_cmds
[pkt
->handle
] = NULL
;
1429 /* Bad payload or header */
1430 if (pkt
->entry_status
&
1431 (RF_INV_E_ORDER
| RF_INV_E_COUNT
|
1432 RF_INV_E_PARAM
| RF_INV_E_TYPE
)) {
1433 sp
->cmd
->result
= DID_ERROR
<< 16;
1434 } else if (pkt
->entry_status
& RF_BUSY
) {
1435 sp
->cmd
->result
= DID_BUS_BUSY
<< 16;
1437 sp
->cmd
->result
= DID_ERROR
<< 16;
1439 qla2x00_sp_compl(ha
, sp
);
1441 } else if (pkt
->entry_type
== COMMAND_A64_TYPE
|| pkt
->entry_type
==
1442 COMMAND_TYPE
|| pkt
->entry_type
== COMMAND_TYPE_7
) {
1443 DEBUG2(printk("scsi(%ld): Error entry - invalid handle\n",
1445 qla_printk(KERN_WARNING
, ha
,
1446 "Error entry - invalid handle\n");
1448 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1449 qla2xxx_wake_dpc(vha
);
1454 * qla24xx_mbx_completion() - Process mailbox command completions.
1455 * @ha: SCSI driver HA context
1456 * @mb0: Mailbox0 register
1459 qla24xx_mbx_completion(scsi_qla_host_t
*vha
, uint16_t mb0
)
1462 uint16_t __iomem
*wptr
;
1463 struct qla_hw_data
*ha
= vha
->hw
;
1464 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
1466 /* Load return mailbox registers. */
1467 ha
->flags
.mbox_int
= 1;
1468 ha
->mailbox_out
[0] = mb0
;
1469 wptr
= (uint16_t __iomem
*)®
->mailbox1
;
1471 for (cnt
= 1; cnt
< ha
->mbx_count
; cnt
++) {
1472 ha
->mailbox_out
[cnt
] = RD_REG_WORD(wptr
);
1477 DEBUG3(printk("%s(%ld): Got mailbox completion. cmd=%x.\n",
1478 __func__
, vha
->host_no
, ha
->mcp
->mb
[0]));
1480 DEBUG2_3(printk("%s(%ld): MBX pointer ERROR!\n",
1481 __func__
, vha
->host_no
));
1486 * qla24xx_process_response_queue() - Process response queue entries.
1487 * @ha: SCSI driver HA context
1490 qla24xx_process_response_queue(struct rsp_que
*rsp
)
1492 struct sts_entry_24xx
*pkt
;
1493 struct scsi_qla_host
*vha
;
1495 vha
= qla2x00_get_rsp_host(rsp
);
1497 if (!vha
->flags
.online
)
1500 while (rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
) {
1501 pkt
= (struct sts_entry_24xx
*)rsp
->ring_ptr
;
1504 if (rsp
->ring_index
== rsp
->length
) {
1505 rsp
->ring_index
= 0;
1506 rsp
->ring_ptr
= rsp
->ring
;
1511 if (pkt
->entry_status
!= 0) {
1512 DEBUG3(printk(KERN_INFO
1513 "scsi(%ld): Process error entry.\n", vha
->host_no
));
1515 qla2x00_error_entry(vha
, rsp
, (sts_entry_t
*) pkt
);
1516 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
1521 switch (pkt
->entry_type
) {
1523 qla2x00_status_entry(vha
, rsp
, pkt
);
1525 case STATUS_CONT_TYPE
:
1526 qla2x00_status_cont_entry(vha
, (sts_cont_entry_t
*)pkt
);
1528 case VP_RPT_ID_IOCB_TYPE
:
1529 qla24xx_report_id_acquisition(vha
,
1530 (struct vp_rpt_id_entry_24xx
*)pkt
);
1533 /* Type Not Supported. */
1534 DEBUG4(printk(KERN_WARNING
1535 "scsi(%ld): Received unknown response pkt type %x "
1536 "entry status=%x.\n",
1537 vha
->host_no
, pkt
->entry_type
, pkt
->entry_status
));
1540 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
1544 /* Adjust ring index */
1545 WRT_REG_DWORD(rsp
->rsp_q_out
, rsp
->ring_index
);
1549 qla2xxx_check_risc_status(scsi_qla_host_t
*vha
)
1553 struct qla_hw_data
*ha
= vha
->hw
;
1554 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
1556 if (!IS_QLA25XX(ha
) && !IS_QLA81XX(ha
))
1560 WRT_REG_DWORD(®
->iobase_addr
, 0x7C00);
1561 RD_REG_DWORD(®
->iobase_addr
);
1562 WRT_REG_DWORD(®
->iobase_window
, 0x0001);
1563 for (cnt
= 10000; (RD_REG_DWORD(®
->iobase_window
) & BIT_0
) == 0 &&
1564 rval
== QLA_SUCCESS
; cnt
--) {
1566 WRT_REG_DWORD(®
->iobase_window
, 0x0001);
1569 rval
= QLA_FUNCTION_TIMEOUT
;
1571 if (rval
== QLA_SUCCESS
)
1574 WRT_REG_DWORD(®
->iobase_window
, 0x0003);
1575 for (cnt
= 100; (RD_REG_DWORD(®
->iobase_window
) & BIT_0
) == 0 &&
1576 rval
== QLA_SUCCESS
; cnt
--) {
1578 WRT_REG_DWORD(®
->iobase_window
, 0x0003);
1581 rval
= QLA_FUNCTION_TIMEOUT
;
1583 if (rval
!= QLA_SUCCESS
)
1587 if (RD_REG_DWORD(®
->iobase_c8
) & BIT_3
)
1588 qla_printk(KERN_INFO
, ha
, "Additional code -- 0x55AA.\n");
1591 WRT_REG_DWORD(®
->iobase_window
, 0x0000);
1592 RD_REG_DWORD(®
->iobase_window
);
1596 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
1598 * @dev_id: SCSI driver HA context
1600 * Called by system whenever the host adapter generates an interrupt.
1602 * Returns handled flag.
1605 qla24xx_intr_handler(int irq
, void *dev_id
)
1607 scsi_qla_host_t
*vha
;
1608 struct qla_hw_data
*ha
;
1609 struct device_reg_24xx __iomem
*reg
;
1615 struct rsp_que
*rsp
;
1617 rsp
= (struct rsp_que
*) dev_id
;
1620 "%s(): NULL response queue pointer\n", __func__
);
1625 reg
= &ha
->iobase
->isp24
;
1628 spin_lock(&ha
->hardware_lock
);
1629 vha
= qla2x00_get_rsp_host(rsp
);
1630 for (iter
= 50; iter
--; ) {
1631 stat
= RD_REG_DWORD(®
->host_status
);
1632 if (stat
& HSRX_RISC_PAUSED
) {
1633 if (pci_channel_offline(ha
->pdev
))
1636 hccr
= RD_REG_DWORD(®
->hccr
);
1638 qla_printk(KERN_INFO
, ha
, "RISC paused -- HCCR=%x, "
1639 "Dumping firmware!\n", hccr
);
1641 qla2xxx_check_risc_status(vha
);
1643 ha
->isp_ops
->fw_dump(vha
, 1);
1644 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1646 } else if ((stat
& HSRX_RISC_INT
) == 0)
1649 switch (stat
& 0xff) {
1654 qla24xx_mbx_completion(vha
, MSW(stat
));
1655 status
|= MBX_INTERRUPT
;
1660 mb
[1] = RD_REG_WORD(®
->mailbox1
);
1661 mb
[2] = RD_REG_WORD(®
->mailbox2
);
1662 mb
[3] = RD_REG_WORD(®
->mailbox3
);
1663 qla2x00_async_event(vha
, rsp
, mb
);
1667 qla24xx_process_response_queue(rsp
);
1670 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1672 vha
->host_no
, stat
& 0xff));
1675 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
1676 RD_REG_DWORD_RELAXED(®
->hccr
);
1678 spin_unlock(&ha
->hardware_lock
);
1680 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
1681 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
1682 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
1683 complete(&ha
->mbx_intr_comp
);
1690 qla24xx_msix_rsp_q(int irq
, void *dev_id
)
1692 struct qla_hw_data
*ha
;
1693 struct rsp_que
*rsp
;
1694 struct device_reg_24xx __iomem
*reg
;
1696 rsp
= (struct rsp_que
*) dev_id
;
1699 "%s(): NULL response queue pointer\n", __func__
);
1703 reg
= &ha
->iobase
->isp24
;
1705 spin_lock_irq(&ha
->hardware_lock
);
1707 qla24xx_process_response_queue(rsp
);
1708 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
1710 spin_unlock_irq(&ha
->hardware_lock
);
1716 qla25xx_msix_rsp_q(int irq
, void *dev_id
)
1718 struct qla_hw_data
*ha
;
1719 struct rsp_que
*rsp
;
1720 struct device_reg_24xx __iomem
*reg
;
1722 rsp
= (struct rsp_que
*) dev_id
;
1725 "%s(): NULL response queue pointer\n", __func__
);
1729 reg
= &ha
->iobase
->isp24
;
1731 spin_lock_irq(&ha
->hardware_lock
);
1733 qla24xx_process_response_queue(rsp
);
1735 spin_unlock_irq(&ha
->hardware_lock
);
1741 qla24xx_msix_default(int irq
, void *dev_id
)
1743 scsi_qla_host_t
*vha
;
1744 struct qla_hw_data
*ha
;
1745 struct rsp_que
*rsp
;
1746 struct device_reg_24xx __iomem
*reg
;
1752 rsp
= (struct rsp_que
*) dev_id
;
1755 "%s(): NULL response queue pointer\n", __func__
));
1759 reg
= &ha
->iobase
->isp24
;
1762 spin_lock_irq(&ha
->hardware_lock
);
1763 vha
= qla2x00_get_rsp_host(rsp
);
1765 stat
= RD_REG_DWORD(®
->host_status
);
1766 if (stat
& HSRX_RISC_PAUSED
) {
1767 if (pci_channel_offline(ha
->pdev
))
1770 hccr
= RD_REG_DWORD(®
->hccr
);
1772 qla_printk(KERN_INFO
, ha
, "RISC paused -- HCCR=%x, "
1773 "Dumping firmware!\n", hccr
);
1775 qla2xxx_check_risc_status(vha
);
1777 ha
->isp_ops
->fw_dump(vha
, 1);
1778 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1780 } else if ((stat
& HSRX_RISC_INT
) == 0)
1783 switch (stat
& 0xff) {
1788 qla24xx_mbx_completion(vha
, MSW(stat
));
1789 status
|= MBX_INTERRUPT
;
1794 mb
[1] = RD_REG_WORD(®
->mailbox1
);
1795 mb
[2] = RD_REG_WORD(®
->mailbox2
);
1796 mb
[3] = RD_REG_WORD(®
->mailbox3
);
1797 qla2x00_async_event(vha
, rsp
, mb
);
1801 qla24xx_process_response_queue(rsp
);
1804 DEBUG2(printk("scsi(%ld): Unrecognized interrupt type "
1806 vha
->host_no
, stat
& 0xff));
1809 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
1811 spin_unlock_irq(&ha
->hardware_lock
);
1813 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
1814 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
1815 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
1816 complete(&ha
->mbx_intr_comp
);
1822 /* Interrupt handling helpers. */
1824 struct qla_init_msix_entry
{
1828 irq_handler_t handler
;
1831 static struct qla_init_msix_entry base_queue
= {
1834 .name
= "qla2xxx (default)",
1835 .handler
= qla24xx_msix_default
,
1838 static struct qla_init_msix_entry base_rsp_queue
= {
1841 .name
= "qla2xxx (rsp_q)",
1842 .handler
= qla24xx_msix_rsp_q
,
1845 static struct qla_init_msix_entry multi_rsp_queue
= {
1848 .name
= "qla2xxx (multi_q)",
1849 .handler
= qla25xx_msix_rsp_q
,
1853 qla24xx_disable_msix(struct qla_hw_data
*ha
)
1856 struct qla_msix_entry
*qentry
;
1858 for (i
= 0; i
< ha
->msix_count
; i
++) {
1859 qentry
= &ha
->msix_entries
[i
];
1860 if (qentry
->have_irq
)
1861 free_irq(qentry
->vector
, qentry
->rsp
);
1863 pci_disable_msix(ha
->pdev
);
1864 kfree(ha
->msix_entries
);
1865 ha
->msix_entries
= NULL
;
1866 ha
->flags
.msix_enabled
= 0;
1870 qla24xx_enable_msix(struct qla_hw_data
*ha
, struct rsp_que
*rsp
)
1872 #define MIN_MSIX_COUNT 2
1874 struct msix_entry
*entries
;
1875 struct qla_msix_entry
*qentry
;
1876 struct qla_init_msix_entry
*msix_queue
;
1878 entries
= kzalloc(sizeof(struct msix_entry
) * ha
->msix_count
,
1883 for (i
= 0; i
< ha
->msix_count
; i
++)
1884 entries
[i
].entry
= i
;
1886 ret
= pci_enable_msix(ha
->pdev
, entries
, ha
->msix_count
);
1888 if (ret
< MIN_MSIX_COUNT
)
1891 qla_printk(KERN_WARNING
, ha
,
1892 "MSI-X: Failed to enable support -- %d/%d\n"
1893 " Retry with %d vectors\n", ha
->msix_count
, ret
, ret
);
1894 ha
->msix_count
= ret
;
1895 ret
= pci_enable_msix(ha
->pdev
, entries
, ha
->msix_count
);
1898 qla_printk(KERN_WARNING
, ha
, "MSI-X: Failed to enable"
1899 " support, giving up -- %d/%d\n",
1900 ha
->msix_count
, ret
);
1903 ha
->max_queues
= ha
->msix_count
- 1;
1905 ha
->msix_entries
= kzalloc(sizeof(struct qla_msix_entry
) *
1906 ha
->msix_count
, GFP_KERNEL
);
1907 if (!ha
->msix_entries
) {
1911 ha
->flags
.msix_enabled
= 1;
1913 for (i
= 0; i
< ha
->msix_count
; i
++) {
1914 qentry
= &ha
->msix_entries
[i
];
1915 qentry
->vector
= entries
[i
].vector
;
1916 qentry
->entry
= entries
[i
].entry
;
1917 qentry
->have_irq
= 0;
1921 /* Enable MSI-X for AENs for queue 0 */
1922 qentry
= &ha
->msix_entries
[0];
1923 ret
= request_irq(qentry
->vector
, base_queue
.handler
, 0,
1924 base_queue
.name
, rsp
);
1926 qla_printk(KERN_WARNING
, ha
,
1927 "MSI-X: Unable to register handler -- %x/%d.\n",
1928 qentry
->vector
, ret
);
1929 qla24xx_disable_msix(ha
);
1932 qentry
->have_irq
= 1;
1935 /* Enable MSI-X vector for response queue update for queue 0 */
1936 if (ha
->max_queues
> 1 && ha
->mqiobase
) {
1938 msix_queue
= &multi_rsp_queue
;
1939 qla_printk(KERN_INFO
, ha
,
1940 "MQ enabled, Number of Queue Resources: %d \n",
1944 msix_queue
= &base_rsp_queue
;
1947 qentry
= &ha
->msix_entries
[1];
1948 ret
= request_irq(qentry
->vector
, msix_queue
->handler
, 0,
1949 msix_queue
->name
, rsp
);
1951 qla_printk(KERN_WARNING
, ha
,
1952 "MSI-X: Unable to register handler -- %x/%d.\n",
1953 qentry
->vector
, ret
);
1954 qla24xx_disable_msix(ha
);
1958 qentry
->have_irq
= 1;
1967 qla2x00_request_irqs(struct qla_hw_data
*ha
, struct rsp_que
*rsp
)
1970 device_reg_t __iomem
*reg
= ha
->iobase
;
1972 /* If possible, enable MSI-X. */
1973 if (!IS_QLA2432(ha
) && !IS_QLA2532(ha
) &&
1974 !IS_QLA8432(ha
) && !IS_QLA8001(ha
))
1977 if (IS_QLA2432(ha
) && (ha
->pdev
->revision
< QLA_MSIX_CHIP_REV_24XX
||
1978 !QLA_MSIX_FW_MODE_1(ha
->fw_attributes
))) {
1979 DEBUG2(qla_printk(KERN_WARNING
, ha
,
1980 "MSI-X: Unsupported ISP2432 (0x%X, 0x%X).\n",
1981 ha
->pdev
->revision
, ha
->fw_attributes
));
1986 if (ha
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_HP
&&
1987 (ha
->pdev
->subsystem_device
== 0x7040 ||
1988 ha
->pdev
->subsystem_device
== 0x7041 ||
1989 ha
->pdev
->subsystem_device
== 0x1705)) {
1990 DEBUG2(qla_printk(KERN_WARNING
, ha
,
1991 "MSI-X: Unsupported ISP2432 SSVID/SSDID (0x%X, 0x%X).\n",
1992 ha
->pdev
->subsystem_vendor
,
1993 ha
->pdev
->subsystem_device
));
1998 ret
= qla24xx_enable_msix(ha
, rsp
);
2000 DEBUG2(qla_printk(KERN_INFO
, ha
,
2001 "MSI-X: Enabled (0x%X, 0x%X).\n", ha
->chip_revision
,
2002 ha
->fw_attributes
));
2003 goto clear_risc_ints
;
2005 qla_printk(KERN_WARNING
, ha
,
2006 "MSI-X: Falling back-to INTa mode -- %d.\n", ret
);
2009 if (!IS_QLA24XX(ha
) && !IS_QLA2532(ha
) && !IS_QLA8432(ha
) &&
2013 ret
= pci_enable_msi(ha
->pdev
);
2015 DEBUG2(qla_printk(KERN_INFO
, ha
, "MSI: Enabled.\n"));
2016 ha
->flags
.msi_enabled
= 1;
2020 ret
= request_irq(ha
->pdev
->irq
, ha
->isp_ops
->intr_handler
,
2021 IRQF_SHARED
, QLA2XXX_DRIVER_NAME
, rsp
);
2023 qla_printk(KERN_WARNING
, ha
,
2024 "Failed to reserve interrupt %d already in use.\n",
2028 ha
->flags
.inta_enabled
= 1;
2032 * FIXME: Noted that 8014s were being dropped during NK testing.
2033 * Timing deltas during MSI-X/INTa transitions?
2037 spin_lock_irq(&ha
->hardware_lock
);
2038 if (IS_FWI2_CAPABLE(ha
)) {
2039 WRT_REG_DWORD(®
->isp24
.hccr
, HCCRX_CLR_HOST_INT
);
2040 WRT_REG_DWORD(®
->isp24
.hccr
, HCCRX_CLR_RISC_INT
);
2042 WRT_REG_WORD(®
->isp
.semaphore
, 0);
2043 WRT_REG_WORD(®
->isp
.hccr
, HCCR_CLR_RISC_INT
);
2044 WRT_REG_WORD(®
->isp
.hccr
, HCCR_CLR_HOST_INT
);
2046 spin_unlock_irq(&ha
->hardware_lock
);
2053 qla2x00_free_irqs(scsi_qla_host_t
*vha
)
2055 struct qla_hw_data
*ha
= vha
->hw
;
2056 struct rsp_que
*rsp
= ha
->rsp_q_map
[0];
2058 if (ha
->flags
.msix_enabled
)
2059 qla24xx_disable_msix(ha
);
2060 else if (ha
->flags
.inta_enabled
) {
2061 free_irq(ha
->pdev
->irq
, rsp
);
2062 pci_disable_msi(ha
->pdev
);
2066 static struct scsi_qla_host
*
2067 qla2x00_get_rsp_host(struct rsp_que
*rsp
)
2070 struct qla_hw_data
*ha
= rsp
->hw
;
2071 struct scsi_qla_host
*vha
= NULL
;
2072 struct sts_entry_24xx
*pkt
;
2073 struct req_que
*req
;
2076 pkt
= (struct sts_entry_24xx
*) rsp
->ring_ptr
;
2078 if (pkt
&& pkt
->handle
< MAX_OUTSTANDING_COMMANDS
) {
2079 sp
= req
->outstanding_cmds
[pkt
->handle
];
2081 vha
= sp
->fcport
->vha
;
2085 /* handle it in base queue */
2086 vha
= pci_get_drvdata(ha
->pdev
);
2091 int qla25xx_request_irq(struct rsp_que
*rsp
)
2093 struct qla_hw_data
*ha
= rsp
->hw
;
2094 struct qla_init_msix_entry
*intr
= &multi_rsp_queue
;
2095 struct qla_msix_entry
*msix
= rsp
->msix
;
2098 ret
= request_irq(msix
->vector
, intr
->handler
, 0, intr
->name
, rsp
);
2100 qla_printk(KERN_WARNING
, ha
,
2101 "MSI-X: Unable to register handler -- %x/%d.\n",