2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <scsi/scsi_tcq.h>
12 #include <scsi/scsi_bsg_fc.h>
13 #include <scsi/scsi_eh.h>
15 static void qla2x00_mbx_completion(scsi_qla_host_t
*, uint16_t);
16 static void qla2x00_process_completed_request(struct scsi_qla_host
*,
17 struct req_que
*, uint32_t);
18 static void qla2x00_status_entry(scsi_qla_host_t
*, struct rsp_que
*, void *);
19 static void qla2x00_status_cont_entry(struct rsp_que
*, sts_cont_entry_t
*);
20 static void qla2x00_error_entry(scsi_qla_host_t
*, struct rsp_que
*,
24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
26 * @dev_id: SCSI driver HA context
28 * Called by system whenever the host adapter generates an interrupt.
30 * Returns handled flag.
33 qla2100_intr_handler(int irq
, void *dev_id
)
36 struct qla_hw_data
*ha
;
37 struct device_reg_2xxx __iomem
*reg
;
45 rsp
= (struct rsp_que
*) dev_id
;
48 "%s(): NULL response queue pointer.\n", __func__
);
53 reg
= &ha
->iobase
->isp
;
56 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
57 vha
= pci_get_drvdata(ha
->pdev
);
58 for (iter
= 50; iter
--; ) {
59 hccr
= RD_REG_WORD(®
->hccr
);
60 if (hccr
& HCCR_RISC_PAUSE
) {
61 if (pci_channel_offline(ha
->pdev
))
65 * Issue a "HARD" reset in order for the RISC interrupt
66 * bit to be cleared. Schedule a big hammer to get
67 * out of the RISC PAUSED state.
69 WRT_REG_WORD(®
->hccr
, HCCR_RESET_RISC
);
70 RD_REG_WORD(®
->hccr
);
72 ha
->isp_ops
->fw_dump(vha
, 1);
73 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
75 } else if ((RD_REG_WORD(®
->istatus
) & ISR_RISC_INT
) == 0)
78 if (RD_REG_WORD(®
->semaphore
) & BIT_0
) {
79 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
80 RD_REG_WORD(®
->hccr
);
82 /* Get mailbox data. */
83 mb
[0] = RD_MAILBOX_REG(ha
, reg
, 0);
84 if (mb
[0] > 0x3fff && mb
[0] < 0x8000) {
85 qla2x00_mbx_completion(vha
, mb
[0]);
86 status
|= MBX_INTERRUPT
;
87 } else if (mb
[0] > 0x7fff && mb
[0] < 0xc000) {
88 mb
[1] = RD_MAILBOX_REG(ha
, reg
, 1);
89 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
90 mb
[3] = RD_MAILBOX_REG(ha
, reg
, 3);
91 qla2x00_async_event(vha
, rsp
, mb
);
94 ql_dbg(ql_dbg_async
, vha
, 0x5025,
95 "Unrecognized interrupt type (%d).\n",
98 /* Release mailbox registers. */
99 WRT_REG_WORD(®
->semaphore
, 0);
100 RD_REG_WORD(®
->semaphore
);
102 qla2x00_process_response_queue(rsp
);
104 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
105 RD_REG_WORD(®
->hccr
);
108 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
110 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
111 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
112 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
113 complete(&ha
->mbx_intr_comp
);
116 return (IRQ_HANDLED
);
120 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
122 * @dev_id: SCSI driver HA context
124 * Called by system whenever the host adapter generates an interrupt.
126 * Returns handled flag.
129 qla2300_intr_handler(int irq
, void *dev_id
)
131 scsi_qla_host_t
*vha
;
132 struct device_reg_2xxx __iomem
*reg
;
139 struct qla_hw_data
*ha
;
142 rsp
= (struct rsp_que
*) dev_id
;
145 "%s(): NULL response queue pointer.\n", __func__
);
150 reg
= &ha
->iobase
->isp
;
153 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
154 vha
= pci_get_drvdata(ha
->pdev
);
155 for (iter
= 50; iter
--; ) {
156 stat
= RD_REG_DWORD(®
->u
.isp2300
.host_status
);
157 if (stat
& HSR_RISC_PAUSED
) {
158 if (unlikely(pci_channel_offline(ha
->pdev
)))
161 hccr
= RD_REG_WORD(®
->hccr
);
162 if (hccr
& (BIT_15
| BIT_13
| BIT_11
| BIT_8
))
163 ql_log(ql_log_warn
, vha
, 0x5026,
164 "Parity error -- HCCR=%x, Dumping "
165 "firmware.\n", hccr
);
167 ql_log(ql_log_warn
, vha
, 0x5027,
168 "RISC paused -- HCCR=%x, Dumping "
169 "firmware.\n", hccr
);
172 * Issue a "HARD" reset in order for the RISC
173 * interrupt bit to be cleared. Schedule a big
174 * hammer to get out of the RISC PAUSED state.
176 WRT_REG_WORD(®
->hccr
, HCCR_RESET_RISC
);
177 RD_REG_WORD(®
->hccr
);
179 ha
->isp_ops
->fw_dump(vha
, 1);
180 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
182 } else if ((stat
& HSR_RISC_INT
) == 0)
185 switch (stat
& 0xff) {
190 qla2x00_mbx_completion(vha
, MSW(stat
));
191 status
|= MBX_INTERRUPT
;
193 /* Release mailbox registers. */
194 WRT_REG_WORD(®
->semaphore
, 0);
198 mb
[1] = RD_MAILBOX_REG(ha
, reg
, 1);
199 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
200 mb
[3] = RD_MAILBOX_REG(ha
, reg
, 3);
201 qla2x00_async_event(vha
, rsp
, mb
);
204 qla2x00_process_response_queue(rsp
);
207 mb
[0] = MBA_CMPLT_1_16BIT
;
209 qla2x00_async_event(vha
, rsp
, mb
);
212 mb
[0] = MBA_SCSI_COMPLETION
;
214 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
215 qla2x00_async_event(vha
, rsp
, mb
);
218 ql_dbg(ql_dbg_async
, vha
, 0x5028,
219 "Unrecognized interrupt type (%d).\n", stat
& 0xff);
222 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
223 RD_REG_WORD_RELAXED(®
->hccr
);
225 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
227 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
228 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
229 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
230 complete(&ha
->mbx_intr_comp
);
233 return (IRQ_HANDLED
);
237 * qla2x00_mbx_completion() - Process mailbox command completions.
238 * @ha: SCSI driver HA context
239 * @mb0: Mailbox0 register
242 qla2x00_mbx_completion(scsi_qla_host_t
*vha
, uint16_t mb0
)
246 uint16_t __iomem
*wptr
;
247 struct qla_hw_data
*ha
= vha
->hw
;
248 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
250 /* Read all mbox registers? */
251 mboxes
= (1 << ha
->mbx_count
) - 1;
253 ql_dbg(ql_dbg_async
, vha
, 0x5001, "MBX pointer ERRROR.\n");
255 mboxes
= ha
->mcp
->in_mb
;
257 /* Load return mailbox registers. */
258 ha
->flags
.mbox_int
= 1;
259 ha
->mailbox_out
[0] = mb0
;
261 wptr
= (uint16_t __iomem
*)MAILBOX_REG(ha
, reg
, 1);
263 for (cnt
= 1; cnt
< ha
->mbx_count
; cnt
++) {
264 if (IS_QLA2200(ha
) && cnt
== 8)
265 wptr
= (uint16_t __iomem
*)MAILBOX_REG(ha
, reg
, 8);
266 if ((cnt
== 4 || cnt
== 5) && (mboxes
& BIT_0
))
267 ha
->mailbox_out
[cnt
] = qla2x00_debounce_register(wptr
);
268 else if (mboxes
& BIT_0
)
269 ha
->mailbox_out
[cnt
] = RD_REG_WORD(wptr
);
277 qla81xx_idc_event(scsi_qla_host_t
*vha
, uint16_t aen
, uint16_t descr
)
279 static char *event
[] =
280 { "Complete", "Request Notification", "Time Extension" };
282 struct device_reg_24xx __iomem
*reg24
= &vha
->hw
->iobase
->isp24
;
283 uint16_t __iomem
*wptr
;
284 uint16_t cnt
, timeout
, mb
[QLA_IDC_ACK_REGS
];
286 /* Seed data -- mailbox1 -> mailbox7. */
287 wptr
= (uint16_t __iomem
*)®24
->mailbox1
;
288 for (cnt
= 0; cnt
< QLA_IDC_ACK_REGS
; cnt
++, wptr
++)
289 mb
[cnt
] = RD_REG_WORD(wptr
);
291 ql_dbg(ql_dbg_async
, vha
, 0x5021,
292 "Inter-Driver Commucation %s -- "
293 "%04x %04x %04x %04x %04x %04x %04x.\n",
294 event
[aen
& 0xff], mb
[0], mb
[1], mb
[2], mb
[3],
295 mb
[4], mb
[5], mb
[6]);
297 /* Acknowledgement needed? [Notify && non-zero timeout]. */
298 timeout
= (descr
>> 8) & 0xf;
299 if (aen
!= MBA_IDC_NOTIFY
|| !timeout
)
302 ql_dbg(ql_dbg_async
, vha
, 0x5022,
303 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
304 vha
->host_no
, event
[aen
& 0xff], timeout
);
306 rval
= qla2x00_post_idc_ack_work(vha
, mb
);
307 if (rval
!= QLA_SUCCESS
)
308 ql_log(ql_log_warn
, vha
, 0x5023,
309 "IDC failed to post ACK.\n");
313 * qla2x00_async_event() - Process aynchronous events.
314 * @ha: SCSI driver HA context
315 * @mb: Mailbox registers (0 - 3)
318 qla2x00_async_event(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, uint16_t *mb
)
321 static char *link_speeds
[] = { "1", "2", "?", "4", "8", "10" };
326 struct qla_hw_data
*ha
= vha
->hw
;
327 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
328 struct device_reg_24xx __iomem
*reg24
= &ha
->iobase
->isp24
;
329 struct device_reg_82xx __iomem
*reg82
= &ha
->iobase
->isp82
;
330 uint32_t rscn_entry
, host_pid
;
331 uint8_t rscn_queue_index
;
334 /* Setup to process RIO completion. */
336 if (IS_QLA8XXX_TYPE(ha
))
339 case MBA_SCSI_COMPLETION
:
340 handles
[0] = le32_to_cpu((uint32_t)((mb
[2] << 16) | mb
[1]));
343 case MBA_CMPLT_1_16BIT
:
346 mb
[0] = MBA_SCSI_COMPLETION
;
348 case MBA_CMPLT_2_16BIT
:
352 mb
[0] = MBA_SCSI_COMPLETION
;
354 case MBA_CMPLT_3_16BIT
:
359 mb
[0] = MBA_SCSI_COMPLETION
;
361 case MBA_CMPLT_4_16BIT
:
365 handles
[3] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 6);
367 mb
[0] = MBA_SCSI_COMPLETION
;
369 case MBA_CMPLT_5_16BIT
:
373 handles
[3] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 6);
374 handles
[4] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 7);
376 mb
[0] = MBA_SCSI_COMPLETION
;
378 case MBA_CMPLT_2_32BIT
:
379 handles
[0] = le32_to_cpu((uint32_t)((mb
[2] << 16) | mb
[1]));
380 handles
[1] = le32_to_cpu(
381 ((uint32_t)(RD_MAILBOX_REG(ha
, reg
, 7) << 16)) |
382 RD_MAILBOX_REG(ha
, reg
, 6));
384 mb
[0] = MBA_SCSI_COMPLETION
;
391 case MBA_SCSI_COMPLETION
: /* Fast Post */
392 if (!vha
->flags
.online
)
395 for (cnt
= 0; cnt
< handle_cnt
; cnt
++)
396 qla2x00_process_completed_request(vha
, rsp
->req
,
400 case MBA_RESET
: /* Reset */
401 ql_dbg(ql_dbg_async
, vha
, 0x5002,
402 "Asynchronous RESET.\n");
404 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
407 case MBA_SYSTEM_ERR
: /* System Error */
408 mbx
= IS_QLA81XX(ha
) ? RD_REG_WORD(®24
->mailbox7
) : 0;
409 ql_log(ql_log_warn
, vha
, 0x5003,
410 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
411 "mbx7=%xh.\n", mb
[1], mb
[2], mb
[3], mbx
);
413 ha
->isp_ops
->fw_dump(vha
, 1);
415 if (IS_FWI2_CAPABLE(ha
)) {
416 if (mb
[1] == 0 && mb
[2] == 0) {
417 ql_log(ql_log_fatal
, vha
, 0x5004,
418 "Unrecoverable Hardware Error: adapter "
419 "marked OFFLINE!\n");
420 vha
->flags
.online
= 0;
422 /* Check to see if MPI timeout occurred */
423 if ((mbx
& MBX_3
) && (ha
->flags
.port0
))
424 set_bit(MPI_RESET_NEEDED
,
427 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
429 } else if (mb
[1] == 0) {
430 ql_log(ql_log_fatal
, vha
, 0x5005,
431 "Unrecoverable Hardware Error: adapter marked "
433 vha
->flags
.online
= 0;
435 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
438 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
439 ql_log(ql_log_warn
, vha
, 0x5006,
440 "ISP Request Transfer Error (%x).\n", mb
[1]);
442 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
445 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
446 ql_log(ql_log_warn
, vha
, 0x5007,
447 "ISP Response Transfer Error.\n");
449 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
452 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up */
453 ql_dbg(ql_dbg_async
, vha
, 0x5008,
454 "Asynchronous WAKEUP_THRES.\n");
457 case MBA_LIP_OCCURRED
: /* Loop Initialization Procedure */
458 ql_dbg(ql_dbg_async
, vha
, 0x5009,
459 "LIP occurred (%x).\n", mb
[1]);
461 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
462 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
463 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
464 qla2x00_mark_all_devices_lost(vha
, 1);
468 atomic_set(&vha
->vp_state
, VP_FAILED
);
469 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
472 set_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
);
473 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
475 vha
->flags
.management_server_logged_in
= 0;
476 qla2x00_post_aen_work(vha
, FCH_EVT_LIP
, mb
[1]);
479 case MBA_LOOP_UP
: /* Loop Up Event */
480 if (IS_QLA2100(ha
) || IS_QLA2200(ha
)) {
481 link_speed
= link_speeds
[0];
482 ha
->link_data_rate
= PORT_SPEED_1GB
;
484 link_speed
= link_speeds
[LS_UNKNOWN
];
486 link_speed
= link_speeds
[mb
[1]];
487 else if (mb
[1] == 0x13)
488 link_speed
= link_speeds
[5];
489 ha
->link_data_rate
= mb
[1];
492 ql_dbg(ql_dbg_async
, vha
, 0x500a,
493 "LOOP UP detected (%s Gbps).\n", link_speed
);
495 vha
->flags
.management_server_logged_in
= 0;
496 qla2x00_post_aen_work(vha
, FCH_EVT_LINKUP
, ha
->link_data_rate
);
499 case MBA_LOOP_DOWN
: /* Loop Down Event */
500 mbx
= IS_QLA81XX(ha
) ? RD_REG_WORD(®24
->mailbox4
) : 0;
501 mbx
= IS_QLA82XX(ha
) ? RD_REG_WORD(®82
->mailbox_out
[4]) : mbx
;
502 ql_dbg(ql_dbg_async
, vha
, 0x500b,
503 "LOOP DOWN detected (%x %x %x %x).\n",
504 mb
[1], mb
[2], mb
[3], mbx
);
506 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
507 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
508 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
509 vha
->device_flags
|= DFLG_NO_CABLE
;
510 qla2x00_mark_all_devices_lost(vha
, 1);
514 atomic_set(&vha
->vp_state
, VP_FAILED
);
515 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
518 vha
->flags
.management_server_logged_in
= 0;
519 ha
->link_data_rate
= PORT_SPEED_UNKNOWN
;
520 qla2x00_post_aen_work(vha
, FCH_EVT_LINKDOWN
, 0);
523 case MBA_LIP_RESET
: /* LIP reset occurred */
524 ql_dbg(ql_dbg_async
, vha
, 0x500c,
525 "LIP reset occurred (%x).\n", mb
[1]);
527 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
528 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
529 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
530 qla2x00_mark_all_devices_lost(vha
, 1);
534 atomic_set(&vha
->vp_state
, VP_FAILED
);
535 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
538 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
540 ha
->operating_mode
= LOOP
;
541 vha
->flags
.management_server_logged_in
= 0;
542 qla2x00_post_aen_work(vha
, FCH_EVT_LIPRESET
, mb
[1]);
545 /* case MBA_DCBX_COMPLETE: */
546 case MBA_POINT_TO_POINT
: /* Point-to-Point */
550 if (IS_QLA8XXX_TYPE(ha
)) {
551 ql_dbg(ql_dbg_async
, vha
, 0x500d,
552 "DCBX Completed -- %04x %04x %04x.\n",
553 mb
[1], mb
[2], mb
[3]);
554 if (ha
->notify_dcbx_comp
)
555 complete(&ha
->dcbx_comp
);
558 ql_dbg(ql_dbg_async
, vha
, 0x500e,
559 "Asynchronous P2P MODE received.\n");
562 * Until there's a transition from loop down to loop up, treat
563 * this as loop down only.
565 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
566 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
567 if (!atomic_read(&vha
->loop_down_timer
))
568 atomic_set(&vha
->loop_down_timer
,
570 qla2x00_mark_all_devices_lost(vha
, 1);
574 atomic_set(&vha
->vp_state
, VP_FAILED
);
575 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
578 if (!(test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
)))
579 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
581 set_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
);
582 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
584 ha
->flags
.gpsc_supported
= 1;
585 vha
->flags
.management_server_logged_in
= 0;
588 case MBA_CHG_IN_CONNECTION
: /* Change in connection mode */
592 ql_dbg(ql_dbg_async
, vha
, 0x500f,
593 "Configuration change detected: value=%x.\n", mb
[1]);
595 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
596 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
597 if (!atomic_read(&vha
->loop_down_timer
))
598 atomic_set(&vha
->loop_down_timer
,
600 qla2x00_mark_all_devices_lost(vha
, 1);
604 atomic_set(&vha
->vp_state
, VP_FAILED
);
605 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
608 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
609 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
612 case MBA_PORT_UPDATE
: /* Port database update */
614 * Handle only global and vn-port update events
617 * mb[1] = N_Port handle of changed port
618 * OR 0xffff for global event
619 * mb[2] = New login state
620 * 7 = Port logged out
621 * mb[3] = LSB is vp_idx, 0xff = all vps
623 * Skip processing if:
624 * Event is global, vp_idx is NOT all vps,
625 * vp_idx does not match
626 * Event is not global, vp_idx does not match
628 if (IS_QLA2XXX_MIDTYPE(ha
) &&
629 ((mb
[1] == 0xffff && (mb
[3] & 0xff) != 0xff) ||
630 (mb
[1] != 0xffff)) && vha
->vp_idx
!= (mb
[3] & 0xff))
633 /* Global event -- port logout or port unavailable. */
634 if (mb
[1] == 0xffff && mb
[2] == 0x7) {
635 ql_dbg(ql_dbg_async
, vha
, 0x5010,
636 "Port unavailable %04x %04x %04x.\n",
637 mb
[1], mb
[2], mb
[3]);
639 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
640 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
641 atomic_set(&vha
->loop_down_timer
,
643 vha
->device_flags
|= DFLG_NO_CABLE
;
644 qla2x00_mark_all_devices_lost(vha
, 1);
648 atomic_set(&vha
->vp_state
, VP_FAILED
);
649 fc_vport_set_state(vha
->fc_vport
,
651 qla2x00_mark_all_devices_lost(vha
, 1);
654 vha
->flags
.management_server_logged_in
= 0;
655 ha
->link_data_rate
= PORT_SPEED_UNKNOWN
;
660 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
661 * event etc. earlier indicating loop is down) then process
662 * it. Otherwise ignore it and Wait for RSCN to come in.
664 atomic_set(&vha
->loop_down_timer
, 0);
665 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
&&
666 atomic_read(&vha
->loop_state
) != LOOP_DEAD
) {
667 ql_dbg(ql_dbg_async
, vha
, 0x5011,
668 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
669 mb
[1], mb
[2], mb
[3]);
673 ql_dbg(ql_dbg_async
, vha
, 0x5012,
674 "Port database changed %04x %04x %04x.\n",
675 mb
[1], mb
[2], mb
[3]);
678 * Mark all devices as missing so we will login again.
680 atomic_set(&vha
->loop_state
, LOOP_UP
);
682 qla2x00_mark_all_devices_lost(vha
, 1);
684 vha
->flags
.rscn_queue_overflow
= 1;
686 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
687 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
690 case MBA_RSCN_UPDATE
: /* State Change Registration */
691 /* Check if the Vport has issued a SCR */
692 if (vha
->vp_idx
&& test_bit(VP_SCR_NEEDED
, &vha
->vp_flags
))
694 /* Only handle SCNs for our Vport index. */
695 if (ha
->flags
.npiv_supported
&& vha
->vp_idx
!= (mb
[3] & 0xff))
698 ql_dbg(ql_dbg_async
, vha
, 0x5013,
699 "RSCN database changed -- %04x %04x %04x.\n",
700 mb
[1], mb
[2], mb
[3]);
702 rscn_entry
= ((mb
[1] & 0xff) << 16) | mb
[2];
703 host_pid
= (vha
->d_id
.b
.domain
<< 16) | (vha
->d_id
.b
.area
<< 8)
705 if (rscn_entry
== host_pid
) {
706 ql_dbg(ql_dbg_async
, vha
, 0x5014,
707 "Ignoring RSCN update to local host "
708 "port ID (%06x).\n", host_pid
);
712 /* Ignore reserved bits from RSCN-payload. */
713 rscn_entry
= ((mb
[1] & 0x3ff) << 16) | mb
[2];
714 rscn_queue_index
= vha
->rscn_in_ptr
+ 1;
715 if (rscn_queue_index
== MAX_RSCN_COUNT
)
716 rscn_queue_index
= 0;
717 if (rscn_queue_index
!= vha
->rscn_out_ptr
) {
718 vha
->rscn_queue
[vha
->rscn_in_ptr
] = rscn_entry
;
719 vha
->rscn_in_ptr
= rscn_queue_index
;
721 vha
->flags
.rscn_queue_overflow
= 1;
724 atomic_set(&vha
->loop_down_timer
, 0);
725 vha
->flags
.management_server_logged_in
= 0;
727 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
728 set_bit(RSCN_UPDATE
, &vha
->dpc_flags
);
729 qla2x00_post_aen_work(vha
, FCH_EVT_RSCN
, rscn_entry
);
732 /* case MBA_RIO_RESPONSE: */
733 case MBA_ZIO_RESPONSE
:
734 ql_dbg(ql_dbg_async
, vha
, 0x5015,
735 "[R|Z]IO update completion.\n");
737 if (IS_FWI2_CAPABLE(ha
))
738 qla24xx_process_response_queue(vha
, rsp
);
740 qla2x00_process_response_queue(rsp
);
743 case MBA_DISCARD_RND_FRAME
:
744 ql_dbg(ql_dbg_async
, vha
, 0x5016,
745 "Discard RND Frame -- %04x %04x %04x.\n",
746 mb
[1], mb
[2], mb
[3]);
749 case MBA_TRACE_NOTIFICATION
:
750 ql_dbg(ql_dbg_async
, vha
, 0x5017,
751 "Trace Notification -- %04x %04x.\n", mb
[1], mb
[2]);
754 case MBA_ISP84XX_ALERT
:
755 ql_dbg(ql_dbg_async
, vha
, 0x5018,
756 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
757 mb
[1], mb
[2], mb
[3]);
759 spin_lock_irqsave(&ha
->cs84xx
->access_lock
, flags
);
761 case A84_PANIC_RECOVERY
:
762 ql_log(ql_log_info
, vha
, 0x5019,
763 "Alert 84XX: panic recovery %04x %04x.\n",
766 case A84_OP_LOGIN_COMPLETE
:
767 ha
->cs84xx
->op_fw_version
= mb
[3] << 16 | mb
[2];
768 ql_log(ql_log_info
, vha
, 0x501a,
769 "Alert 84XX: firmware version %x.\n",
770 ha
->cs84xx
->op_fw_version
);
772 case A84_DIAG_LOGIN_COMPLETE
:
773 ha
->cs84xx
->diag_fw_version
= mb
[3] << 16 | mb
[2];
774 ql_log(ql_log_info
, vha
, 0x501b,
775 "Alert 84XX: diagnostic firmware version %x.\n",
776 ha
->cs84xx
->diag_fw_version
);
778 case A84_GOLD_LOGIN_COMPLETE
:
779 ha
->cs84xx
->diag_fw_version
= mb
[3] << 16 | mb
[2];
780 ha
->cs84xx
->fw_update
= 1;
781 ql_log(ql_log_info
, vha
, 0x501c,
782 "Alert 84XX: gold firmware version %x.\n",
783 ha
->cs84xx
->gold_fw_version
);
786 ql_log(ql_log_warn
, vha
, 0x501d,
787 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
788 mb
[1], mb
[2], mb
[3]);
790 spin_unlock_irqrestore(&ha
->cs84xx
->access_lock
, flags
);
793 ql_dbg(ql_dbg_async
, vha
, 0x501e,
794 "DCBX Started -- %04x %04x %04x.\n",
795 mb
[1], mb
[2], mb
[3]);
797 case MBA_DCBX_PARAM_UPDATE
:
798 ql_dbg(ql_dbg_async
, vha
, 0x501f,
799 "DCBX Parameters Updated -- %04x %04x %04x.\n",
800 mb
[1], mb
[2], mb
[3]);
802 case MBA_FCF_CONF_ERR
:
803 ql_dbg(ql_dbg_async
, vha
, 0x5020,
804 "FCF Configuration Error -- %04x %04x %04x.\n",
805 mb
[1], mb
[2], mb
[3]);
807 case MBA_IDC_COMPLETE
:
809 case MBA_IDC_TIME_EXT
:
810 qla81xx_idc_event(vha
, mb
[0], mb
[1]);
814 if (!vha
->vp_idx
&& ha
->num_vhosts
)
815 qla2x00_alert_all_vps(rsp
, mb
);
819 * qla2x00_process_completed_request() - Process a Fast Post response.
820 * @ha: SCSI driver HA context
824 qla2x00_process_completed_request(struct scsi_qla_host
*vha
,
825 struct req_que
*req
, uint32_t index
)
828 struct qla_hw_data
*ha
= vha
->hw
;
830 /* Validate handle. */
831 if (index
>= MAX_OUTSTANDING_COMMANDS
) {
832 ql_log(ql_log_warn
, vha
, 0x3014,
833 "Invalid SCSI command index (%x).\n", index
);
836 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
838 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
842 sp
= req
->outstanding_cmds
[index
];
844 /* Free outstanding command slot. */
845 req
->outstanding_cmds
[index
] = NULL
;
847 /* Save ISP completion status */
848 sp
->cmd
->result
= DID_OK
<< 16;
849 qla2x00_sp_compl(ha
, sp
);
851 ql_log(ql_log_warn
, vha
, 0x3016, "Invalid SCSI SRB.\n");
854 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
856 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
861 qla2x00_get_sp_from_handle(scsi_qla_host_t
*vha
, const char *func
,
862 struct req_que
*req
, void *iocb
)
864 struct qla_hw_data
*ha
= vha
->hw
;
865 sts_entry_t
*pkt
= iocb
;
869 index
= LSW(pkt
->handle
);
870 if (index
>= MAX_OUTSTANDING_COMMANDS
) {
871 ql_log(ql_log_warn
, vha
, 0x5031,
872 "Invalid command index (%x).\n", index
);
874 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
876 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
879 sp
= req
->outstanding_cmds
[index
];
881 ql_log(ql_log_warn
, vha
, 0x5032,
882 "Invalid completion handle (%x) -- timed-out.\n", index
);
885 if (sp
->handle
!= index
) {
886 ql_log(ql_log_warn
, vha
, 0x5033,
887 "SRB handle (%x) mismatch %x.\n", sp
->handle
, index
);
891 req
->outstanding_cmds
[index
] = NULL
;
898 qla2x00_mbx_iocb_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
899 struct mbx_entry
*mbx
)
901 const char func
[] = "MBX-IOCB";
905 struct srb_iocb
*lio
;
910 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, mbx
);
915 lio
= ctx
->u
.iocb_cmd
;
918 data
= lio
->u
.logio
.data
;
920 data
[0] = MBS_COMMAND_ERROR
;
921 data
[1] = lio
->u
.logio
.flags
& SRB_LOGIN_RETRIED
?
922 QLA_LOGIO_LOGIN_RETRIED
: 0;
923 if (mbx
->entry_status
) {
924 ql_dbg(ql_dbg_async
, vha
, 0x5043,
925 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
926 "entry-status=%x status=%x state-flag=%x "
927 "status-flags=%x.\n", type
, sp
->handle
,
928 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
929 fcport
->d_id
.b
.al_pa
, mbx
->entry_status
,
930 le16_to_cpu(mbx
->status
), le16_to_cpu(mbx
->state_flags
),
931 le16_to_cpu(mbx
->status_flags
));
933 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x5029,
934 (uint8_t *)mbx
, sizeof(*mbx
));
939 status
= le16_to_cpu(mbx
->status
);
940 if (status
== 0x30 && ctx
->type
== SRB_LOGIN_CMD
&&
941 le16_to_cpu(mbx
->mb0
) == MBS_COMMAND_COMPLETE
)
943 if (!status
&& le16_to_cpu(mbx
->mb0
) == MBS_COMMAND_COMPLETE
) {
944 ql_dbg(ql_dbg_async
, vha
, 0x5045,
945 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
946 type
, sp
->handle
, fcport
->d_id
.b
.domain
,
947 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
948 le16_to_cpu(mbx
->mb1
));
950 data
[0] = MBS_COMMAND_COMPLETE
;
951 if (ctx
->type
== SRB_LOGIN_CMD
) {
952 fcport
->port_type
= FCT_TARGET
;
953 if (le16_to_cpu(mbx
->mb1
) & BIT_0
)
954 fcport
->port_type
= FCT_INITIATOR
;
955 else if (le16_to_cpu(mbx
->mb1
) & BIT_1
)
956 fcport
->flags
|= FCF_FCP2_DEVICE
;
961 data
[0] = le16_to_cpu(mbx
->mb0
);
963 case MBS_PORT_ID_USED
:
964 data
[1] = le16_to_cpu(mbx
->mb1
);
966 case MBS_LOOP_ID_USED
:
969 data
[0] = MBS_COMMAND_ERROR
;
973 ql_log(ql_log_warn
, vha
, 0x5046,
974 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
975 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type
, sp
->handle
,
976 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
977 status
, le16_to_cpu(mbx
->mb0
), le16_to_cpu(mbx
->mb1
),
978 le16_to_cpu(mbx
->mb2
), le16_to_cpu(mbx
->mb6
),
979 le16_to_cpu(mbx
->mb7
));
986 qla2x00_ct_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
987 sts_entry_t
*pkt
, int iocb_type
)
989 const char func
[] = "CT_IOCB";
991 struct qla_hw_data
*ha
= vha
->hw
;
993 struct srb_ctx
*sp_bsg
;
994 struct fc_bsg_job
*bsg_job
;
995 uint16_t comp_status
;
997 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, pkt
);
1002 bsg_job
= sp_bsg
->u
.bsg_job
;
1005 switch (sp_bsg
->type
) {
1007 type
= "ct pass-through";
1010 ql_log(ql_log_warn
, vha
, 0x5047,
1011 "Unrecognized SRB: (%p) type=%d.\n", sp
, sp_bsg
->type
);
1015 comp_status
= le16_to_cpu(pkt
->comp_status
);
1017 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1018 * fc payload to the caller
1020 bsg_job
->reply
->reply_data
.ctels_reply
.status
= FC_CTELS_STATUS_OK
;
1021 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1023 if (comp_status
!= CS_COMPLETE
) {
1024 if (comp_status
== CS_DATA_UNDERRUN
) {
1025 bsg_job
->reply
->result
= DID_OK
<< 16;
1026 bsg_job
->reply
->reply_payload_rcv_len
=
1027 le16_to_cpu(((sts_entry_t
*)pkt
)->rsp_info_len
);
1029 ql_log(ql_log_warn
, vha
, 0x5048,
1030 "CT pass-through-%s error "
1031 "comp_status-status=0x%x total_byte = 0x%x.\n",
1033 bsg_job
->reply
->reply_payload_rcv_len
);
1035 ql_log(ql_log_warn
, vha
, 0x5049,
1036 "CT pass-through-%s error "
1037 "comp_status-status=0x%x.\n", type
, comp_status
);
1038 bsg_job
->reply
->result
= DID_ERROR
<< 16;
1039 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1041 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x5035,
1042 (uint8_t *)pkt
, sizeof(*pkt
));
1044 bsg_job
->reply
->result
= DID_OK
<< 16;
1045 bsg_job
->reply
->reply_payload_rcv_len
=
1046 bsg_job
->reply_payload
.payload_len
;
1047 bsg_job
->reply_len
= 0;
1050 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->request_payload
.sg_list
,
1051 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1053 dma_unmap_sg(&ha
->pdev
->dev
, bsg_job
->reply_payload
.sg_list
,
1054 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1056 if (sp_bsg
->type
== SRB_ELS_CMD_HST
|| sp_bsg
->type
== SRB_CT_CMD
)
1060 mempool_free(sp
, ha
->srb_mempool
);
1061 bsg_job
->job_done(bsg_job
);
1065 qla24xx_els_ct_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1066 struct sts_entry_24xx
*pkt
, int iocb_type
)
1068 const char func
[] = "ELS_CT_IOCB";
1070 struct qla_hw_data
*ha
= vha
->hw
;
1072 struct srb_ctx
*sp_bsg
;
1073 struct fc_bsg_job
*bsg_job
;
1074 uint16_t comp_status
;
1075 uint32_t fw_status
[3];
1076 uint8_t* fw_sts_ptr
;
1078 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, pkt
);
1082 bsg_job
= sp_bsg
->u
.bsg_job
;
1085 switch (sp_bsg
->type
) {
1086 case SRB_ELS_CMD_RPT
:
1087 case SRB_ELS_CMD_HST
:
1091 type
= "ct pass-through";
1094 ql_log(ql_log_warn
, vha
, 0x503e,
1095 "Unrecognized SRB: (%p) type=%d.\n", sp
, sp_bsg
->type
);
1099 comp_status
= fw_status
[0] = le16_to_cpu(pkt
->comp_status
);
1100 fw_status
[1] = le16_to_cpu(((struct els_sts_entry_24xx
*)pkt
)->error_subcode_1
);
1101 fw_status
[2] = le16_to_cpu(((struct els_sts_entry_24xx
*)pkt
)->error_subcode_2
);
1103 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1104 * fc payload to the caller
1106 bsg_job
->reply
->reply_data
.ctels_reply
.status
= FC_CTELS_STATUS_OK
;
1107 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) + sizeof(fw_status
);
1109 if (comp_status
!= CS_COMPLETE
) {
1110 if (comp_status
== CS_DATA_UNDERRUN
) {
1111 bsg_job
->reply
->result
= DID_OK
<< 16;
1112 bsg_job
->reply
->reply_payload_rcv_len
=
1113 le16_to_cpu(((struct els_sts_entry_24xx
*)pkt
)->total_byte_count
);
1115 ql_log(ql_log_info
, vha
, 0x503f,
1116 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1117 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1118 type
, sp
->handle
, comp_status
, fw_status
[1], fw_status
[2],
1119 le16_to_cpu(((struct els_sts_entry_24xx
*)
1120 pkt
)->total_byte_count
));
1121 fw_sts_ptr
= ((uint8_t*)bsg_job
->req
->sense
) + sizeof(struct fc_bsg_reply
);
1122 memcpy( fw_sts_ptr
, fw_status
, sizeof(fw_status
));
1125 ql_log(ql_log_info
, vha
, 0x5040,
1126 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1127 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1128 type
, sp
->handle
, comp_status
,
1129 le16_to_cpu(((struct els_sts_entry_24xx
*)
1130 pkt
)->error_subcode_1
),
1131 le16_to_cpu(((struct els_sts_entry_24xx
*)
1132 pkt
)->error_subcode_2
));
1133 bsg_job
->reply
->result
= DID_ERROR
<< 16;
1134 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1135 fw_sts_ptr
= ((uint8_t*)bsg_job
->req
->sense
) + sizeof(struct fc_bsg_reply
);
1136 memcpy( fw_sts_ptr
, fw_status
, sizeof(fw_status
));
1138 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x5056,
1139 (uint8_t *)pkt
, sizeof(*pkt
));
1142 bsg_job
->reply
->result
= DID_OK
<< 16;
1143 bsg_job
->reply
->reply_payload_rcv_len
= bsg_job
->reply_payload
.payload_len
;
1144 bsg_job
->reply_len
= 0;
1147 dma_unmap_sg(&ha
->pdev
->dev
,
1148 bsg_job
->request_payload
.sg_list
,
1149 bsg_job
->request_payload
.sg_cnt
, DMA_TO_DEVICE
);
1150 dma_unmap_sg(&ha
->pdev
->dev
,
1151 bsg_job
->reply_payload
.sg_list
,
1152 bsg_job
->reply_payload
.sg_cnt
, DMA_FROM_DEVICE
);
1153 if ((sp_bsg
->type
== SRB_ELS_CMD_HST
) ||
1154 (sp_bsg
->type
== SRB_CT_CMD
))
1157 mempool_free(sp
, ha
->srb_mempool
);
1158 bsg_job
->job_done(bsg_job
);
1162 qla24xx_logio_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1163 struct logio_entry_24xx
*logio
)
1165 const char func
[] = "LOGIO-IOCB";
1169 struct srb_iocb
*lio
;
1170 struct srb_ctx
*ctx
;
1174 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, logio
);
1179 lio
= ctx
->u
.iocb_cmd
;
1181 fcport
= sp
->fcport
;
1182 data
= lio
->u
.logio
.data
;
1184 data
[0] = MBS_COMMAND_ERROR
;
1185 data
[1] = lio
->u
.logio
.flags
& SRB_LOGIN_RETRIED
?
1186 QLA_LOGIO_LOGIN_RETRIED
: 0;
1187 if (logio
->entry_status
) {
1188 ql_log(ql_log_warn
, vha
, 0x5034,
1189 "Async-%s error entry - hdl=%x"
1190 "portid=%02x%02x%02x entry-status=%x.\n",
1191 type
, sp
->handle
, fcport
->d_id
.b
.domain
,
1192 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
1193 logio
->entry_status
);
1194 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x504d,
1195 (uint8_t *)logio
, sizeof(*logio
));
1200 if (le16_to_cpu(logio
->comp_status
) == CS_COMPLETE
) {
1201 ql_dbg(ql_dbg_async
, vha
, 0x5036,
1202 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1203 "iop0=%x.\n", type
, sp
->handle
, fcport
->d_id
.b
.domain
,
1204 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
1205 le32_to_cpu(logio
->io_parameter
[0]));
1207 data
[0] = MBS_COMMAND_COMPLETE
;
1208 if (ctx
->type
!= SRB_LOGIN_CMD
)
1211 iop
[0] = le32_to_cpu(logio
->io_parameter
[0]);
1212 if (iop
[0] & BIT_4
) {
1213 fcport
->port_type
= FCT_TARGET
;
1215 fcport
->flags
|= FCF_FCP2_DEVICE
;
1216 } else if (iop
[0] & BIT_5
)
1217 fcport
->port_type
= FCT_INITIATOR
;
1219 if (logio
->io_parameter
[7] || logio
->io_parameter
[8])
1220 fcport
->supported_classes
|= FC_COS_CLASS2
;
1221 if (logio
->io_parameter
[9] || logio
->io_parameter
[10])
1222 fcport
->supported_classes
|= FC_COS_CLASS3
;
1227 iop
[0] = le32_to_cpu(logio
->io_parameter
[0]);
1228 iop
[1] = le32_to_cpu(logio
->io_parameter
[1]);
1230 case LSC_SCODE_PORTID_USED
:
1231 data
[0] = MBS_PORT_ID_USED
;
1232 data
[1] = LSW(iop
[1]);
1234 case LSC_SCODE_NPORT_USED
:
1235 data
[0] = MBS_LOOP_ID_USED
;
1238 data
[0] = MBS_COMMAND_ERROR
;
1242 ql_dbg(ql_dbg_async
, vha
, 0x5037,
1243 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1244 "iop0=%x iop1=%x.\n", type
, sp
->handle
, fcport
->d_id
.b
.domain
,
1245 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
1246 le16_to_cpu(logio
->comp_status
),
1247 le32_to_cpu(logio
->io_parameter
[0]),
1248 le32_to_cpu(logio
->io_parameter
[1]));
1255 qla24xx_tm_iocb_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1256 struct tsk_mgmt_entry
*tsk
)
1258 const char func
[] = "TMF-IOCB";
1262 struct srb_iocb
*iocb
;
1263 struct srb_ctx
*ctx
;
1264 struct sts_entry_24xx
*sts
= (struct sts_entry_24xx
*)tsk
;
1267 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, tsk
);
1272 iocb
= ctx
->u
.iocb_cmd
;
1274 fcport
= sp
->fcport
;
1276 if (sts
->entry_status
) {
1277 ql_log(ql_log_warn
, vha
, 0x5038,
1278 "Async-%s error - hdl=%x entry-status(%x).\n",
1279 type
, sp
->handle
, sts
->entry_status
);
1280 } else if (sts
->comp_status
!= __constant_cpu_to_le16(CS_COMPLETE
)) {
1281 ql_log(ql_log_warn
, vha
, 0x5039,
1282 "Async-%s error - hdl=%x completion status(%x).\n",
1283 type
, sp
->handle
, sts
->comp_status
);
1284 } else if (!(le16_to_cpu(sts
->scsi_status
) &
1285 SS_RESPONSE_INFO_LEN_VALID
)) {
1286 ql_log(ql_log_warn
, vha
, 0x503a,
1287 "Async-%s error - hdl=%x no response info(%x).\n",
1288 type
, sp
->handle
, sts
->scsi_status
);
1289 } else if (le32_to_cpu(sts
->rsp_data_len
) < 4) {
1290 ql_log(ql_log_warn
, vha
, 0x503b,
1291 "Async-%s error - hdl=%x not enough response(%d).\n",
1292 type
, sp
->handle
, sts
->rsp_data_len
);
1293 } else if (sts
->data
[3]) {
1294 ql_log(ql_log_warn
, vha
, 0x503c,
1295 "Async-%s error - hdl=%x response(%x).\n",
1296 type
, sp
->handle
, sts
->data
[3]);
1302 iocb
->u
.tmf
.data
= error
;
1303 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x5055,
1304 (uint8_t *)sts
, sizeof(*sts
));
1311 * qla2x00_process_response_queue() - Process response queue entries.
1312 * @ha: SCSI driver HA context
1315 qla2x00_process_response_queue(struct rsp_que
*rsp
)
1317 struct scsi_qla_host
*vha
;
1318 struct qla_hw_data
*ha
= rsp
->hw
;
1319 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
1321 uint16_t handle_cnt
;
1324 vha
= pci_get_drvdata(ha
->pdev
);
1326 if (!vha
->flags
.online
)
1329 while (rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
) {
1330 pkt
= (sts_entry_t
*)rsp
->ring_ptr
;
1333 if (rsp
->ring_index
== rsp
->length
) {
1334 rsp
->ring_index
= 0;
1335 rsp
->ring_ptr
= rsp
->ring
;
1340 if (pkt
->entry_status
!= 0) {
1341 qla2x00_error_entry(vha
, rsp
, pkt
);
1342 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
1347 switch (pkt
->entry_type
) {
1349 qla2x00_status_entry(vha
, rsp
, pkt
);
1351 case STATUS_TYPE_21
:
1352 handle_cnt
= ((sts21_entry_t
*)pkt
)->handle_count
;
1353 for (cnt
= 0; cnt
< handle_cnt
; cnt
++) {
1354 qla2x00_process_completed_request(vha
, rsp
->req
,
1355 ((sts21_entry_t
*)pkt
)->handle
[cnt
]);
1358 case STATUS_TYPE_22
:
1359 handle_cnt
= ((sts22_entry_t
*)pkt
)->handle_count
;
1360 for (cnt
= 0; cnt
< handle_cnt
; cnt
++) {
1361 qla2x00_process_completed_request(vha
, rsp
->req
,
1362 ((sts22_entry_t
*)pkt
)->handle
[cnt
]);
1365 case STATUS_CONT_TYPE
:
1366 qla2x00_status_cont_entry(rsp
, (sts_cont_entry_t
*)pkt
);
1369 qla2x00_mbx_iocb_entry(vha
, rsp
->req
,
1370 (struct mbx_entry
*)pkt
);
1373 qla2x00_ct_entry(vha
, rsp
->req
, pkt
, CT_IOCB_TYPE
);
1376 /* Type Not Supported. */
1377 ql_log(ql_log_warn
, vha
, 0x504a,
1378 "Received unknown response pkt type %x "
1379 "entry status=%x.\n",
1380 pkt
->entry_type
, pkt
->entry_status
);
1383 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
1387 /* Adjust ring index */
1388 WRT_REG_WORD(ISP_RSP_Q_OUT(ha
, reg
), rsp
->ring_index
);
1392 qla2x00_handle_sense(srb_t
*sp
, uint8_t *sense_data
, uint32_t par_sense_len
,
1393 uint32_t sense_len
, struct rsp_que
*rsp
)
1395 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1396 struct scsi_cmnd
*cp
= sp
->cmd
;
1398 if (sense_len
>= SCSI_SENSE_BUFFERSIZE
)
1399 sense_len
= SCSI_SENSE_BUFFERSIZE
;
1401 sp
->request_sense_length
= sense_len
;
1402 sp
->request_sense_ptr
= cp
->sense_buffer
;
1403 if (sp
->request_sense_length
> par_sense_len
)
1404 sense_len
= par_sense_len
;
1406 memcpy(cp
->sense_buffer
, sense_data
, sense_len
);
1408 sp
->request_sense_ptr
+= sense_len
;
1409 sp
->request_sense_length
-= sense_len
;
1410 if (sp
->request_sense_length
!= 0)
1411 rsp
->status_srb
= sp
;
1414 ql_dbg(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x301c,
1415 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1416 sp
->fcport
->vha
->host_no
, cp
->device
->id
, cp
->device
->lun
,
1418 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x302b,
1419 cp
->sense_buffer
, sense_len
);
1423 struct scsi_dif_tuple
{
1424 __be16 guard
; /* Checksum */
1425 __be16 app_tag
; /* APPL identifer */
1426 __be32 ref_tag
; /* Target LBA or indirect LBA */
1430 * Checks the guard or meta-data for the type of error
1431 * detected by the HBA. In case of errors, we set the
1432 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1433 * to indicate to the kernel that the HBA detected error.
1436 qla2x00_handle_dif_error(srb_t
*sp
, struct sts_entry_24xx
*sts24
)
1438 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1439 struct scsi_cmnd
*cmd
= sp
->cmd
;
1440 uint8_t *ap
= &sts24
->data
[12];
1441 uint8_t *ep
= &sts24
->data
[20];
1442 uint32_t e_ref_tag
, a_ref_tag
;
1443 uint16_t e_app_tag
, a_app_tag
;
1444 uint16_t e_guard
, a_guard
;
1447 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1448 * would make guard field appear at offset 2
1450 a_guard
= le16_to_cpu(*(uint16_t *)(ap
+ 2));
1451 a_app_tag
= le16_to_cpu(*(uint16_t *)(ap
+ 0));
1452 a_ref_tag
= le32_to_cpu(*(uint32_t *)(ap
+ 4));
1453 e_guard
= le16_to_cpu(*(uint16_t *)(ep
+ 2));
1454 e_app_tag
= le16_to_cpu(*(uint16_t *)(ep
+ 0));
1455 e_ref_tag
= le32_to_cpu(*(uint32_t *)(ep
+ 4));
1457 ql_dbg(ql_dbg_io
, vha
, 0x3023,
1458 "iocb(s) %p Returned STATUS.\n", sts24
);
1460 ql_dbg(ql_dbg_io
, vha
, 0x3024,
1461 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1462 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1463 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1464 cmd
->cmnd
[0], (u64
)scsi_get_lba(cmd
), a_ref_tag
, e_ref_tag
,
1465 a_app_tag
, e_app_tag
, a_guard
, e_guard
);
1469 * For type 3: ref & app tag is all 'f's
1470 * For type 0,1,2: app tag is all 'f's
1472 if ((a_app_tag
== 0xffff) &&
1473 ((scsi_get_prot_type(cmd
) != SCSI_PROT_DIF_TYPE3
) ||
1474 (a_ref_tag
== 0xffffffff))) {
1475 uint32_t blocks_done
, resid
;
1476 sector_t lba_s
= scsi_get_lba(cmd
);
1478 /* 2TB boundary case covered automatically with this */
1479 blocks_done
= e_ref_tag
- (uint32_t)lba_s
+ 1;
1481 resid
= scsi_bufflen(cmd
) - (blocks_done
*
1482 cmd
->device
->sector_size
);
1484 scsi_set_resid(cmd
, resid
);
1485 cmd
->result
= DID_OK
<< 16;
1487 /* Update protection tag */
1488 if (scsi_prot_sg_count(cmd
)) {
1489 uint32_t i
, j
= 0, k
= 0, num_ent
;
1490 struct scatterlist
*sg
;
1491 struct sd_dif_tuple
*spt
;
1493 /* Patch the corresponding protection tags */
1494 scsi_for_each_prot_sg(cmd
, sg
,
1495 scsi_prot_sg_count(cmd
), i
) {
1496 num_ent
= sg_dma_len(sg
) / 8;
1497 if (k
+ num_ent
< blocks_done
) {
1501 j
= blocks_done
- k
- 1;
1506 if (k
!= blocks_done
) {
1507 ql_log(ql_log_warn
, vha
, 0x302f,
1508 "unexpected tag values tag:lba=%x:%llx)\n",
1509 e_ref_tag
, (unsigned long long)lba_s
);
1513 spt
= page_address(sg_page(sg
)) + sg
->offset
;
1516 spt
->app_tag
= 0xffff;
1517 if (scsi_get_prot_type(cmd
) == SCSI_PROT_DIF_TYPE3
)
1518 spt
->ref_tag
= 0xffffffff;
1525 if (e_guard
!= a_guard
) {
1526 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
1528 set_driver_byte(cmd
, DRIVER_SENSE
);
1529 set_host_byte(cmd
, DID_ABORT
);
1530 cmd
->result
|= SAM_STAT_CHECK_CONDITION
<< 1;
1535 if (e_ref_tag
!= a_ref_tag
) {
1536 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
1538 set_driver_byte(cmd
, DRIVER_SENSE
);
1539 set_host_byte(cmd
, DID_ABORT
);
1540 cmd
->result
|= SAM_STAT_CHECK_CONDITION
<< 1;
1544 /* check appl tag */
1545 if (e_app_tag
!= a_app_tag
) {
1546 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
1548 set_driver_byte(cmd
, DRIVER_SENSE
);
1549 set_host_byte(cmd
, DID_ABORT
);
1550 cmd
->result
|= SAM_STAT_CHECK_CONDITION
<< 1;
1558 * qla2x00_status_entry() - Process a Status IOCB entry.
1559 * @ha: SCSI driver HA context
1560 * @pkt: Entry pointer
1563 qla2x00_status_entry(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, void *pkt
)
1567 struct scsi_cmnd
*cp
;
1569 struct sts_entry_24xx
*sts24
;
1570 uint16_t comp_status
;
1571 uint16_t scsi_status
;
1573 uint8_t lscsi_status
;
1575 uint32_t sense_len
, par_sense_len
, rsp_info_len
, resid_len
,
1577 uint8_t *rsp_info
, *sense_data
;
1578 struct qla_hw_data
*ha
= vha
->hw
;
1581 struct req_que
*req
;
1584 sts
= (sts_entry_t
*) pkt
;
1585 sts24
= (struct sts_entry_24xx
*) pkt
;
1586 if (IS_FWI2_CAPABLE(ha
)) {
1587 comp_status
= le16_to_cpu(sts24
->comp_status
);
1588 scsi_status
= le16_to_cpu(sts24
->scsi_status
) & SS_MASK
;
1590 comp_status
= le16_to_cpu(sts
->comp_status
);
1591 scsi_status
= le16_to_cpu(sts
->scsi_status
) & SS_MASK
;
1593 handle
= (uint32_t) LSW(sts
->handle
);
1594 que
= MSW(sts
->handle
);
1595 req
= ha
->req_q_map
[que
];
1597 /* Fast path completion. */
1598 if (comp_status
== CS_COMPLETE
&& scsi_status
== 0) {
1599 qla2x00_process_completed_request(vha
, req
, handle
);
1604 /* Validate handle. */
1605 if (handle
< MAX_OUTSTANDING_COMMANDS
) {
1606 sp
= req
->outstanding_cmds
[handle
];
1607 req
->outstanding_cmds
[handle
] = NULL
;
1612 ql_dbg(ql_dbg_io
, vha
, 0x3017,
1613 "Invalid status handle (0x%x).\n", sts
->handle
);
1616 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
1618 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1619 qla2xxx_wake_dpc(vha
);
1624 ql_dbg(ql_dbg_io
, vha
, 0x3018,
1625 "Command already returned (0x%x/%p).\n",
1631 lscsi_status
= scsi_status
& STATUS_MASK
;
1633 fcport
= sp
->fcport
;
1636 sense_len
= par_sense_len
= rsp_info_len
= resid_len
=
1638 if (IS_FWI2_CAPABLE(ha
)) {
1639 if (scsi_status
& SS_SENSE_LEN_VALID
)
1640 sense_len
= le32_to_cpu(sts24
->sense_len
);
1641 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
)
1642 rsp_info_len
= le32_to_cpu(sts24
->rsp_data_len
);
1643 if (scsi_status
& (SS_RESIDUAL_UNDER
| SS_RESIDUAL_OVER
))
1644 resid_len
= le32_to_cpu(sts24
->rsp_residual_count
);
1645 if (comp_status
== CS_DATA_UNDERRUN
)
1646 fw_resid_len
= le32_to_cpu(sts24
->residual_len
);
1647 rsp_info
= sts24
->data
;
1648 sense_data
= sts24
->data
;
1649 host_to_fcp_swap(sts24
->data
, sizeof(sts24
->data
));
1650 ox_id
= le16_to_cpu(sts24
->ox_id
);
1651 par_sense_len
= sizeof(sts24
->data
);
1653 if (scsi_status
& SS_SENSE_LEN_VALID
)
1654 sense_len
= le16_to_cpu(sts
->req_sense_length
);
1655 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
)
1656 rsp_info_len
= le16_to_cpu(sts
->rsp_info_len
);
1657 resid_len
= le32_to_cpu(sts
->residual_length
);
1658 rsp_info
= sts
->rsp_info
;
1659 sense_data
= sts
->req_sense_data
;
1660 par_sense_len
= sizeof(sts
->req_sense_data
);
1663 /* Check for any FCP transport errors. */
1664 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
) {
1665 /* Sense data lies beyond any FCP RESPONSE data. */
1666 if (IS_FWI2_CAPABLE(ha
)) {
1667 sense_data
+= rsp_info_len
;
1668 par_sense_len
-= rsp_info_len
;
1670 if (rsp_info_len
> 3 && rsp_info
[3]) {
1671 ql_dbg(ql_dbg_io
, vha
, 0x3019,
1672 "FCP I/O protocol failure (0x%x/0x%x).\n",
1673 rsp_info_len
, rsp_info
[3]);
1675 cp
->result
= DID_BUS_BUSY
<< 16;
1680 /* Check for overrun. */
1681 if (IS_FWI2_CAPABLE(ha
) && comp_status
== CS_COMPLETE
&&
1682 scsi_status
& SS_RESIDUAL_OVER
)
1683 comp_status
= CS_DATA_OVERRUN
;
1686 * Based on Host and scsi status generate status code for Linux
1688 switch (comp_status
) {
1691 if (scsi_status
== 0) {
1692 cp
->result
= DID_OK
<< 16;
1695 if (scsi_status
& (SS_RESIDUAL_UNDER
| SS_RESIDUAL_OVER
)) {
1697 scsi_set_resid(cp
, resid
);
1699 if (!lscsi_status
&&
1700 ((unsigned)(scsi_bufflen(cp
) - resid
) <
1702 ql_dbg(ql_dbg_io
, vha
, 0x301a,
1703 "Mid-layer underflow "
1704 "detected (0x%x of 0x%x bytes).\n",
1705 resid
, scsi_bufflen(cp
));
1707 cp
->result
= DID_ERROR
<< 16;
1711 cp
->result
= DID_OK
<< 16 | lscsi_status
;
1713 if (lscsi_status
== SAM_STAT_TASK_SET_FULL
) {
1714 ql_dbg(ql_dbg_io
, vha
, 0x301b,
1715 "QUEUE FULL detected.\n");
1719 if (lscsi_status
!= SS_CHECK_CONDITION
)
1722 memset(cp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1723 if (!(scsi_status
& SS_SENSE_LEN_VALID
))
1726 qla2x00_handle_sense(sp
, sense_data
, par_sense_len
, sense_len
,
1730 case CS_DATA_UNDERRUN
:
1731 /* Use F/W calculated residual length. */
1732 resid
= IS_FWI2_CAPABLE(ha
) ? fw_resid_len
: resid_len
;
1733 scsi_set_resid(cp
, resid
);
1734 if (scsi_status
& SS_RESIDUAL_UNDER
) {
1735 if (IS_FWI2_CAPABLE(ha
) && fw_resid_len
!= resid_len
) {
1736 ql_dbg(ql_dbg_io
, vha
, 0x301d,
1737 "Dropped frame(s) detected "
1738 "(0x%x of 0x%x bytes).\n",
1739 resid
, scsi_bufflen(cp
));
1741 cp
->result
= DID_ERROR
<< 16 | lscsi_status
;
1742 goto check_scsi_status
;
1745 if (!lscsi_status
&&
1746 ((unsigned)(scsi_bufflen(cp
) - resid
) <
1748 ql_dbg(ql_dbg_io
, vha
, 0x301e,
1749 "Mid-layer underflow "
1750 "detected (0x%x of 0x%x bytes).\n",
1751 resid
, scsi_bufflen(cp
));
1753 cp
->result
= DID_ERROR
<< 16;
1757 ql_dbg(ql_dbg_io
, vha
, 0x301f,
1758 "Dropped frame(s) detected (0x%x "
1759 "of 0x%x bytes).\n", resid
, scsi_bufflen(cp
));
1761 cp
->result
= DID_ERROR
<< 16 | lscsi_status
;
1762 goto check_scsi_status
;
1765 cp
->result
= DID_OK
<< 16 | lscsi_status
;
1770 * Check to see if SCSI Status is non zero. If so report SCSI
1773 if (lscsi_status
!= 0) {
1774 if (lscsi_status
== SAM_STAT_TASK_SET_FULL
) {
1775 ql_dbg(ql_dbg_io
, vha
, 0x3020,
1776 "QUEUE FULL detected.\n");
1780 if (lscsi_status
!= SS_CHECK_CONDITION
)
1783 memset(cp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1784 if (!(scsi_status
& SS_SENSE_LEN_VALID
))
1787 qla2x00_handle_sense(sp
, sense_data
, par_sense_len
,
1792 case CS_PORT_LOGGED_OUT
:
1793 case CS_PORT_CONFIG_CHG
:
1796 case CS_PORT_UNAVAILABLE
:
1801 * We are going to have the fc class block the rport
1802 * while we try to recover so instruct the mid layer
1803 * to requeue until the class decides how to handle this.
1805 cp
->result
= DID_TRANSPORT_DISRUPTED
<< 16;
1807 if (comp_status
== CS_TIMEOUT
) {
1808 if (IS_FWI2_CAPABLE(ha
))
1810 else if ((le16_to_cpu(sts
->status_flags
) &
1811 SF_LOGOUT_SENT
) == 0)
1815 ql_dbg(ql_dbg_io
, vha
, 0x3021,
1816 "Port down status: port-state=0x%x.\n",
1817 atomic_read(&fcport
->state
));
1819 if (atomic_read(&fcport
->state
) == FCS_ONLINE
)
1820 qla2x00_mark_device_lost(fcport
->vha
, fcport
, 1, 1);
1824 cp
->result
= DID_RESET
<< 16;
1828 logit
= qla2x00_handle_dif_error(sp
, sts24
);
1831 cp
->result
= DID_ERROR
<< 16;
1837 ql_dbg(ql_dbg_io
, vha
, 0x3022,
1838 "FCP command status: 0x%x-0x%x (0x%x) "
1839 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
1840 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
1841 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1842 comp_status
, scsi_status
, cp
->result
, vha
->host_no
,
1843 cp
->device
->id
, cp
->device
->lun
, fcport
->d_id
.b
.domain
,
1844 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
, ox_id
,
1845 cp
->cmnd
[0], cp
->cmnd
[1], cp
->cmnd
[2], cp
->cmnd
[3],
1846 cp
->cmnd
[4], cp
->cmnd
[5], cp
->cmnd
[6], cp
->cmnd
[7],
1847 cp
->cmnd
[8], cp
->cmnd
[9], scsi_bufflen(cp
), rsp_info_len
,
1848 resid_len
, fw_resid_len
);
1850 if (rsp
->status_srb
== NULL
)
1851 qla2x00_sp_compl(ha
, sp
);
1855 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1856 * @ha: SCSI driver HA context
1857 * @pkt: Entry pointer
1859 * Extended sense data.
1862 qla2x00_status_cont_entry(struct rsp_que
*rsp
, sts_cont_entry_t
*pkt
)
1864 uint8_t sense_sz
= 0;
1865 struct qla_hw_data
*ha
= rsp
->hw
;
1866 struct scsi_qla_host
*vha
= pci_get_drvdata(ha
->pdev
);
1867 srb_t
*sp
= rsp
->status_srb
;
1868 struct scsi_cmnd
*cp
;
1870 if (sp
!= NULL
&& sp
->request_sense_length
!= 0) {
1873 ql_log(ql_log_warn
, vha
, 0x3025,
1874 "cmd is NULL: already returned to OS (sp=%p).\n",
1877 rsp
->status_srb
= NULL
;
1881 if (sp
->request_sense_length
> sizeof(pkt
->data
)) {
1882 sense_sz
= sizeof(pkt
->data
);
1884 sense_sz
= sp
->request_sense_length
;
1887 /* Move sense data. */
1888 if (IS_FWI2_CAPABLE(ha
))
1889 host_to_fcp_swap(pkt
->data
, sizeof(pkt
->data
));
1890 memcpy(sp
->request_sense_ptr
, pkt
->data
, sense_sz
);
1891 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x302c,
1892 sp
->request_sense_ptr
, sense_sz
);
1894 sp
->request_sense_ptr
+= sense_sz
;
1895 sp
->request_sense_length
-= sense_sz
;
1897 /* Place command on done queue. */
1898 if (sp
->request_sense_length
== 0) {
1899 rsp
->status_srb
= NULL
;
1900 qla2x00_sp_compl(ha
, sp
);
1906 qla2x00_free_sp_ctx(scsi_qla_host_t
*vha
, srb_t
*sp
)
1908 struct qla_hw_data
*ha
= vha
->hw
;
1909 struct srb_ctx
*ctx
;
1916 if (ctx
->type
== SRB_LOGIN_CMD
||
1917 ctx
->type
== SRB_LOGOUT_CMD
||
1918 ctx
->type
== SRB_TM_CMD
) {
1919 ctx
->u
.iocb_cmd
->done(sp
);
1921 } else if (ctx
->type
== SRB_ADISC_CMD
) {
1922 ctx
->u
.iocb_cmd
->free(sp
);
1925 struct fc_bsg_job
*bsg_job
;
1927 bsg_job
= ctx
->u
.bsg_job
;
1928 if (ctx
->type
== SRB_ELS_CMD_HST
||
1929 ctx
->type
== SRB_CT_CMD
)
1932 bsg_job
->reply
->reply_data
.ctels_reply
.status
=
1934 bsg_job
->reply
->result
= DID_ERROR
<< 16;
1935 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1937 mempool_free(sp
, ha
->srb_mempool
);
1938 bsg_job
->job_done(bsg_job
);
1945 * qla2x00_error_entry() - Process an error entry.
1946 * @ha: SCSI driver HA context
1947 * @pkt: Entry pointer
1950 qla2x00_error_entry(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, sts_entry_t
*pkt
)
1953 struct qla_hw_data
*ha
= vha
->hw
;
1954 const char func
[] = "ERROR-IOCB";
1955 uint16_t que
= MSW(pkt
->handle
);
1956 struct req_que
*req
= ha
->req_q_map
[que
];
1958 if (pkt
->entry_status
& RF_INV_E_ORDER
)
1959 ql_dbg(ql_dbg_async
, vha
, 0x502a,
1960 "Invalid Entry Order.\n");
1961 else if (pkt
->entry_status
& RF_INV_E_COUNT
)
1962 ql_dbg(ql_dbg_async
, vha
, 0x502b,
1963 "Invalid Entry Count.\n");
1964 else if (pkt
->entry_status
& RF_INV_E_PARAM
)
1965 ql_dbg(ql_dbg_async
, vha
, 0x502c,
1966 "Invalid Entry Parameter.\n");
1967 else if (pkt
->entry_status
& RF_INV_E_TYPE
)
1968 ql_dbg(ql_dbg_async
, vha
, 0x502d,
1969 "Invalid Entry Type.\n");
1970 else if (pkt
->entry_status
& RF_BUSY
)
1971 ql_dbg(ql_dbg_async
, vha
, 0x502e,
1974 ql_dbg(ql_dbg_async
, vha
, 0x502f,
1975 "UNKNOWN flag error.\n");
1977 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, pkt
);
1979 if (qla2x00_free_sp_ctx(vha
, sp
)) {
1980 if (pkt
->entry_status
&
1981 (RF_INV_E_ORDER
| RF_INV_E_COUNT
|
1982 RF_INV_E_PARAM
| RF_INV_E_TYPE
)) {
1983 sp
->cmd
->result
= DID_ERROR
<< 16;
1984 } else if (pkt
->entry_status
& RF_BUSY
) {
1985 sp
->cmd
->result
= DID_BUS_BUSY
<< 16;
1987 sp
->cmd
->result
= DID_ERROR
<< 16;
1989 qla2x00_sp_compl(ha
, sp
);
1991 } else if (pkt
->entry_type
== COMMAND_A64_TYPE
|| pkt
->entry_type
==
1992 COMMAND_TYPE
|| pkt
->entry_type
== COMMAND_TYPE_7
1993 || pkt
->entry_type
== COMMAND_TYPE_6
) {
1994 ql_log(ql_log_warn
, vha
, 0x5030,
1995 "Error entry - invalid handle.\n");
1998 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
2000 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2001 qla2xxx_wake_dpc(vha
);
2006 * qla24xx_mbx_completion() - Process mailbox command completions.
2007 * @ha: SCSI driver HA context
2008 * @mb0: Mailbox0 register
2011 qla24xx_mbx_completion(scsi_qla_host_t
*vha
, uint16_t mb0
)
2015 uint16_t __iomem
*wptr
;
2016 struct qla_hw_data
*ha
= vha
->hw
;
2017 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
2019 /* Read all mbox registers? */
2020 mboxes
= (1 << ha
->mbx_count
) - 1;
2022 ql_dbg(ql_dbg_async
, vha
, 0x504e, "MBX pointer ERRROR.\n");
2024 mboxes
= ha
->mcp
->in_mb
;
2026 /* Load return mailbox registers. */
2027 ha
->flags
.mbox_int
= 1;
2028 ha
->mailbox_out
[0] = mb0
;
2030 wptr
= (uint16_t __iomem
*)®
->mailbox1
;
2032 for (cnt
= 1; cnt
< ha
->mbx_count
; cnt
++) {
2034 ha
->mailbox_out
[cnt
] = RD_REG_WORD(wptr
);
2042 * qla24xx_process_response_queue() - Process response queue entries.
2043 * @ha: SCSI driver HA context
2045 void qla24xx_process_response_queue(struct scsi_qla_host
*vha
,
2046 struct rsp_que
*rsp
)
2048 struct sts_entry_24xx
*pkt
;
2049 struct qla_hw_data
*ha
= vha
->hw
;
2051 if (!vha
->flags
.online
)
2054 while (rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
) {
2055 pkt
= (struct sts_entry_24xx
*)rsp
->ring_ptr
;
2058 if (rsp
->ring_index
== rsp
->length
) {
2059 rsp
->ring_index
= 0;
2060 rsp
->ring_ptr
= rsp
->ring
;
2065 if (pkt
->entry_status
!= 0) {
2066 qla2x00_error_entry(vha
, rsp
, (sts_entry_t
*) pkt
);
2067 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
2072 switch (pkt
->entry_type
) {
2074 qla2x00_status_entry(vha
, rsp
, pkt
);
2076 case STATUS_CONT_TYPE
:
2077 qla2x00_status_cont_entry(rsp
, (sts_cont_entry_t
*)pkt
);
2079 case VP_RPT_ID_IOCB_TYPE
:
2080 qla24xx_report_id_acquisition(vha
,
2081 (struct vp_rpt_id_entry_24xx
*)pkt
);
2083 case LOGINOUT_PORT_IOCB_TYPE
:
2084 qla24xx_logio_entry(vha
, rsp
->req
,
2085 (struct logio_entry_24xx
*)pkt
);
2087 case TSK_MGMT_IOCB_TYPE
:
2088 qla24xx_tm_iocb_entry(vha
, rsp
->req
,
2089 (struct tsk_mgmt_entry
*)pkt
);
2092 qla24xx_els_ct_entry(vha
, rsp
->req
, pkt
, CT_IOCB_TYPE
);
2095 qla24xx_els_ct_entry(vha
, rsp
->req
, pkt
, ELS_IOCB_TYPE
);
2098 /* Do nothing in this case, this check is to prevent it
2099 * from falling into default case
2103 /* Type Not Supported. */
2104 ql_dbg(ql_dbg_async
, vha
, 0x5042,
2105 "Received unknown response pkt type %x "
2106 "entry status=%x.\n",
2107 pkt
->entry_type
, pkt
->entry_status
);
2110 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
2114 /* Adjust ring index */
2115 if (IS_QLA82XX(ha
)) {
2116 struct device_reg_82xx __iomem
*reg
= &ha
->iobase
->isp82
;
2117 WRT_REG_DWORD(®
->rsp_q_out
[0], rsp
->ring_index
);
2119 WRT_REG_DWORD(rsp
->rsp_q_out
, rsp
->ring_index
);
2123 qla2xxx_check_risc_status(scsi_qla_host_t
*vha
)
2127 struct qla_hw_data
*ha
= vha
->hw
;
2128 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
2130 if (!IS_QLA25XX(ha
) && !IS_QLA81XX(ha
))
2134 WRT_REG_DWORD(®
->iobase_addr
, 0x7C00);
2135 RD_REG_DWORD(®
->iobase_addr
);
2136 WRT_REG_DWORD(®
->iobase_window
, 0x0001);
2137 for (cnt
= 10000; (RD_REG_DWORD(®
->iobase_window
) & BIT_0
) == 0 &&
2138 rval
== QLA_SUCCESS
; cnt
--) {
2140 WRT_REG_DWORD(®
->iobase_window
, 0x0001);
2143 rval
= QLA_FUNCTION_TIMEOUT
;
2145 if (rval
== QLA_SUCCESS
)
2148 WRT_REG_DWORD(®
->iobase_window
, 0x0003);
2149 for (cnt
= 100; (RD_REG_DWORD(®
->iobase_window
) & BIT_0
) == 0 &&
2150 rval
== QLA_SUCCESS
; cnt
--) {
2152 WRT_REG_DWORD(®
->iobase_window
, 0x0003);
2155 rval
= QLA_FUNCTION_TIMEOUT
;
2157 if (rval
!= QLA_SUCCESS
)
2161 if (RD_REG_DWORD(®
->iobase_c8
) & BIT_3
)
2162 ql_log(ql_log_info
, vha
, 0x504c,
2163 "Additional code -- 0x55AA.\n");
2166 WRT_REG_DWORD(®
->iobase_window
, 0x0000);
2167 RD_REG_DWORD(®
->iobase_window
);
2171 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2173 * @dev_id: SCSI driver HA context
2175 * Called by system whenever the host adapter generates an interrupt.
2177 * Returns handled flag.
2180 qla24xx_intr_handler(int irq
, void *dev_id
)
2182 scsi_qla_host_t
*vha
;
2183 struct qla_hw_data
*ha
;
2184 struct device_reg_24xx __iomem
*reg
;
2190 struct rsp_que
*rsp
;
2191 unsigned long flags
;
2193 rsp
= (struct rsp_que
*) dev_id
;
2196 "%s(): NULL response queue pointer.\n", __func__
);
2201 reg
= &ha
->iobase
->isp24
;
2204 if (unlikely(pci_channel_offline(ha
->pdev
)))
2207 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2208 vha
= pci_get_drvdata(ha
->pdev
);
2209 for (iter
= 50; iter
--; ) {
2210 stat
= RD_REG_DWORD(®
->host_status
);
2211 if (stat
& HSRX_RISC_PAUSED
) {
2212 if (unlikely(pci_channel_offline(ha
->pdev
)))
2215 hccr
= RD_REG_DWORD(®
->hccr
);
2217 ql_log(ql_log_warn
, vha
, 0x504b,
2218 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2221 qla2xxx_check_risc_status(vha
);
2223 ha
->isp_ops
->fw_dump(vha
, 1);
2224 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2226 } else if ((stat
& HSRX_RISC_INT
) == 0)
2229 switch (stat
& 0xff) {
2234 qla24xx_mbx_completion(vha
, MSW(stat
));
2235 status
|= MBX_INTERRUPT
;
2240 mb
[1] = RD_REG_WORD(®
->mailbox1
);
2241 mb
[2] = RD_REG_WORD(®
->mailbox2
);
2242 mb
[3] = RD_REG_WORD(®
->mailbox3
);
2243 qla2x00_async_event(vha
, rsp
, mb
);
2247 qla24xx_process_response_queue(vha
, rsp
);
2250 ql_dbg(ql_dbg_async
, vha
, 0x504f,
2251 "Unrecognized interrupt type (%d).\n", stat
* 0xff);
2254 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2255 RD_REG_DWORD_RELAXED(®
->hccr
);
2257 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2259 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
2260 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
2261 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
2262 complete(&ha
->mbx_intr_comp
);
2269 qla24xx_msix_rsp_q(int irq
, void *dev_id
)
2271 struct qla_hw_data
*ha
;
2272 struct rsp_que
*rsp
;
2273 struct device_reg_24xx __iomem
*reg
;
2274 struct scsi_qla_host
*vha
;
2275 unsigned long flags
;
2277 rsp
= (struct rsp_que
*) dev_id
;
2280 "%s(): NULL response queue pointer.\n", __func__
);
2284 reg
= &ha
->iobase
->isp24
;
2286 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2288 vha
= pci_get_drvdata(ha
->pdev
);
2289 qla24xx_process_response_queue(vha
, rsp
);
2290 if (!ha
->flags
.disable_msix_handshake
) {
2291 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2292 RD_REG_DWORD_RELAXED(®
->hccr
);
2294 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2300 qla25xx_msix_rsp_q(int irq
, void *dev_id
)
2302 struct qla_hw_data
*ha
;
2303 struct rsp_que
*rsp
;
2304 struct device_reg_24xx __iomem
*reg
;
2305 unsigned long flags
;
2307 rsp
= (struct rsp_que
*) dev_id
;
2310 "%s(): NULL response queue pointer.\n", __func__
);
2315 /* Clear the interrupt, if enabled, for this response queue */
2316 if (!ha
->flags
.disable_msix_handshake
) {
2317 reg
= &ha
->iobase
->isp24
;
2318 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2319 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2320 RD_REG_DWORD_RELAXED(®
->hccr
);
2321 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2323 queue_work_on((int) (rsp
->id
- 1), ha
->wq
, &rsp
->q_work
);
2329 qla24xx_msix_default(int irq
, void *dev_id
)
2331 scsi_qla_host_t
*vha
;
2332 struct qla_hw_data
*ha
;
2333 struct rsp_que
*rsp
;
2334 struct device_reg_24xx __iomem
*reg
;
2339 unsigned long flags
;
2341 rsp
= (struct rsp_que
*) dev_id
;
2344 "%s(): NULL response queue pointer.\n", __func__
);
2348 reg
= &ha
->iobase
->isp24
;
2351 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2352 vha
= pci_get_drvdata(ha
->pdev
);
2354 stat
= RD_REG_DWORD(®
->host_status
);
2355 if (stat
& HSRX_RISC_PAUSED
) {
2356 if (unlikely(pci_channel_offline(ha
->pdev
)))
2359 hccr
= RD_REG_DWORD(®
->hccr
);
2361 ql_log(ql_log_info
, vha
, 0x5050,
2362 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2365 qla2xxx_check_risc_status(vha
);
2367 ha
->isp_ops
->fw_dump(vha
, 1);
2368 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2370 } else if ((stat
& HSRX_RISC_INT
) == 0)
2373 switch (stat
& 0xff) {
2378 qla24xx_mbx_completion(vha
, MSW(stat
));
2379 status
|= MBX_INTERRUPT
;
2384 mb
[1] = RD_REG_WORD(®
->mailbox1
);
2385 mb
[2] = RD_REG_WORD(®
->mailbox2
);
2386 mb
[3] = RD_REG_WORD(®
->mailbox3
);
2387 qla2x00_async_event(vha
, rsp
, mb
);
2391 qla24xx_process_response_queue(vha
, rsp
);
2394 ql_dbg(ql_dbg_async
, vha
, 0x5051,
2395 "Unrecognized interrupt type (%d).\n", stat
& 0xff);
2398 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2400 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2402 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
2403 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
2404 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
2405 complete(&ha
->mbx_intr_comp
);
2410 /* Interrupt handling helpers. */
2412 struct qla_init_msix_entry
{
2414 irq_handler_t handler
;
2417 static struct qla_init_msix_entry msix_entries
[3] = {
2418 { "qla2xxx (default)", qla24xx_msix_default
},
2419 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q
},
2420 { "qla2xxx (multiq)", qla25xx_msix_rsp_q
},
2423 static struct qla_init_msix_entry qla82xx_msix_entries
[2] = {
2424 { "qla2xxx (default)", qla82xx_msix_default
},
2425 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q
},
2429 qla24xx_disable_msix(struct qla_hw_data
*ha
)
2432 struct qla_msix_entry
*qentry
;
2433 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
2435 for (i
= 0; i
< ha
->msix_count
; i
++) {
2436 qentry
= &ha
->msix_entries
[i
];
2437 if (qentry
->have_irq
)
2438 free_irq(qentry
->vector
, qentry
->rsp
);
2440 pci_disable_msix(ha
->pdev
);
2441 kfree(ha
->msix_entries
);
2442 ha
->msix_entries
= NULL
;
2443 ha
->flags
.msix_enabled
= 0;
2444 ql_dbg(ql_dbg_init
, vha
, 0x0042,
2445 "Disabled the MSI.\n");
2449 qla24xx_enable_msix(struct qla_hw_data
*ha
, struct rsp_que
*rsp
)
2451 #define MIN_MSIX_COUNT 2
2453 struct msix_entry
*entries
;
2454 struct qla_msix_entry
*qentry
;
2455 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
2457 entries
= kzalloc(sizeof(struct msix_entry
) * ha
->msix_count
,
2460 ql_log(ql_log_warn
, vha
, 0x00bc,
2461 "Failed to allocate memory for msix_entry.\n");
2465 for (i
= 0; i
< ha
->msix_count
; i
++)
2466 entries
[i
].entry
= i
;
2468 ret
= pci_enable_msix(ha
->pdev
, entries
, ha
->msix_count
);
2470 if (ret
< MIN_MSIX_COUNT
)
2473 ql_log(ql_log_warn
, vha
, 0x00c6,
2474 "MSI-X: Failed to enable support "
2475 "-- %d/%d\n Retry with %d vectors.\n",
2476 ha
->msix_count
, ret
, ret
);
2477 ha
->msix_count
= ret
;
2478 ret
= pci_enable_msix(ha
->pdev
, entries
, ha
->msix_count
);
2481 ql_log(ql_log_fatal
, vha
, 0x00c7,
2482 "MSI-X: Failed to enable support, "
2483 "giving up -- %d/%d.\n",
2484 ha
->msix_count
, ret
);
2487 ha
->max_rsp_queues
= ha
->msix_count
- 1;
2489 ha
->msix_entries
= kzalloc(sizeof(struct qla_msix_entry
) *
2490 ha
->msix_count
, GFP_KERNEL
);
2491 if (!ha
->msix_entries
) {
2492 ql_log(ql_log_fatal
, vha
, 0x00c8,
2493 "Failed to allocate memory for ha->msix_entries.\n");
2497 ha
->flags
.msix_enabled
= 1;
2499 for (i
= 0; i
< ha
->msix_count
; i
++) {
2500 qentry
= &ha
->msix_entries
[i
];
2501 qentry
->vector
= entries
[i
].vector
;
2502 qentry
->entry
= entries
[i
].entry
;
2503 qentry
->have_irq
= 0;
2507 /* Enable MSI-X vectors for the base queue */
2508 for (i
= 0; i
< 2; i
++) {
2509 qentry
= &ha
->msix_entries
[i
];
2510 if (IS_QLA82XX(ha
)) {
2511 ret
= request_irq(qentry
->vector
,
2512 qla82xx_msix_entries
[i
].handler
,
2513 0, qla82xx_msix_entries
[i
].name
, rsp
);
2515 ret
= request_irq(qentry
->vector
,
2516 msix_entries
[i
].handler
,
2517 0, msix_entries
[i
].name
, rsp
);
2520 ql_log(ql_log_fatal
, vha
, 0x00cb,
2521 "MSI-X: unable to register handler -- %x/%d.\n",
2522 qentry
->vector
, ret
);
2523 qla24xx_disable_msix(ha
);
2527 qentry
->have_irq
= 1;
2532 /* Enable MSI-X vector for response queue update for queue 0 */
2533 if (ha
->mqiobase
&& (ha
->max_rsp_queues
> 1 || ha
->max_req_queues
> 1))
2535 ql_dbg(ql_dbg_multiq
, vha
, 0xc005,
2536 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2537 ha
->mqiobase
, ha
->max_rsp_queues
, ha
->max_req_queues
);
2538 ql_dbg(ql_dbg_init
, vha
, 0x0055,
2539 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2540 ha
->mqiobase
, ha
->max_rsp_queues
, ha
->max_req_queues
);
2548 qla2x00_request_irqs(struct qla_hw_data
*ha
, struct rsp_que
*rsp
)
2551 device_reg_t __iomem
*reg
= ha
->iobase
;
2552 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
2554 /* If possible, enable MSI-X. */
2555 if (!IS_QLA2432(ha
) && !IS_QLA2532(ha
) &&
2556 !IS_QLA8432(ha
) && !IS_QLA8XXX_TYPE(ha
))
2559 if (ha
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_HP
&&
2560 (ha
->pdev
->subsystem_device
== 0x7040 ||
2561 ha
->pdev
->subsystem_device
== 0x7041 ||
2562 ha
->pdev
->subsystem_device
== 0x1705)) {
2563 ql_log(ql_log_warn
, vha
, 0x0034,
2564 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2565 ha
->pdev
->subsystem_vendor
,
2566 ha
->pdev
->subsystem_device
);
2570 if (IS_QLA2432(ha
) && (ha
->pdev
->revision
< QLA_MSIX_CHIP_REV_24XX
)) {
2571 ql_log(ql_log_warn
, vha
, 0x0035,
2572 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2573 ha
->pdev
->revision
, QLA_MSIX_CHIP_REV_24XX
);
2577 ret
= qla24xx_enable_msix(ha
, rsp
);
2579 ql_dbg(ql_dbg_init
, vha
, 0x0036,
2580 "MSI-X: Enabled (0x%X, 0x%X).\n",
2581 ha
->chip_revision
, ha
->fw_attributes
);
2582 goto clear_risc_ints
;
2584 ql_log(ql_log_info
, vha
, 0x0037,
2585 "MSI-X Falling back-to MSI mode -%d.\n", ret
);
2588 if (!IS_QLA24XX(ha
) && !IS_QLA2532(ha
) && !IS_QLA8432(ha
) &&
2592 ret
= pci_enable_msi(ha
->pdev
);
2594 ql_dbg(ql_dbg_init
, vha
, 0x0038,
2596 ha
->flags
.msi_enabled
= 1;
2598 ql_log(ql_log_warn
, vha
, 0x0039,
2599 "MSI-X; Falling back-to INTa mode -- %d.\n", ret
);
2602 ret
= request_irq(ha
->pdev
->irq
, ha
->isp_ops
->intr_handler
,
2603 ha
->flags
.msi_enabled
? 0 : IRQF_SHARED
,
2604 QLA2XXX_DRIVER_NAME
, rsp
);
2606 ql_log(ql_log_warn
, vha
, 0x003a,
2607 "Failed to reserve interrupt %d already in use.\n",
2615 * FIXME: Noted that 8014s were being dropped during NK testing.
2616 * Timing deltas during MSI-X/INTa transitions?
2618 if (IS_QLA81XX(ha
) || IS_QLA82XX(ha
))
2620 spin_lock_irq(&ha
->hardware_lock
);
2621 if (IS_FWI2_CAPABLE(ha
)) {
2622 WRT_REG_DWORD(®
->isp24
.hccr
, HCCRX_CLR_HOST_INT
);
2623 WRT_REG_DWORD(®
->isp24
.hccr
, HCCRX_CLR_RISC_INT
);
2625 WRT_REG_WORD(®
->isp
.semaphore
, 0);
2626 WRT_REG_WORD(®
->isp
.hccr
, HCCR_CLR_RISC_INT
);
2627 WRT_REG_WORD(®
->isp
.hccr
, HCCR_CLR_HOST_INT
);
2629 spin_unlock_irq(&ha
->hardware_lock
);
2636 qla2x00_free_irqs(scsi_qla_host_t
*vha
)
2638 struct qla_hw_data
*ha
= vha
->hw
;
2639 struct rsp_que
*rsp
= ha
->rsp_q_map
[0];
2641 if (ha
->flags
.msix_enabled
)
2642 qla24xx_disable_msix(ha
);
2643 else if (ha
->flags
.msi_enabled
) {
2644 free_irq(ha
->pdev
->irq
, rsp
);
2645 pci_disable_msi(ha
->pdev
);
2647 free_irq(ha
->pdev
->irq
, rsp
);
2651 int qla25xx_request_irq(struct rsp_que
*rsp
)
2653 struct qla_hw_data
*ha
= rsp
->hw
;
2654 struct qla_init_msix_entry
*intr
= &msix_entries
[2];
2655 struct qla_msix_entry
*msix
= rsp
->msix
;
2656 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
2659 ret
= request_irq(msix
->vector
, intr
->handler
, 0, intr
->name
, rsp
);
2661 ql_log(ql_log_fatal
, vha
, 0x00e6,
2662 "MSI-X: Unable to register handler -- %x/%d.\n",