2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsi_bsg_fc.h>
14 #include <scsi/scsi_eh.h>
16 static void qla2x00_mbx_completion(scsi_qla_host_t
*, uint16_t);
17 static void qla2x00_process_completed_request(struct scsi_qla_host
*,
18 struct req_que
*, uint32_t);
19 static void qla2x00_status_entry(scsi_qla_host_t
*, struct rsp_que
*, void *);
20 static void qla2x00_status_cont_entry(struct rsp_que
*, sts_cont_entry_t
*);
21 static void qla2x00_error_entry(scsi_qla_host_t
*, struct rsp_que
*,
25 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
27 * @dev_id: SCSI driver HA context
29 * Called by system whenever the host adapter generates an interrupt.
31 * Returns handled flag.
34 qla2100_intr_handler(int irq
, void *dev_id
)
37 struct qla_hw_data
*ha
;
38 struct device_reg_2xxx __iomem
*reg
;
46 rsp
= (struct rsp_que
*) dev_id
;
48 ql_log(ql_log_info
, NULL
, 0x505d,
49 "%s: NULL response queue pointer.\n", __func__
);
54 reg
= &ha
->iobase
->isp
;
57 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
58 vha
= pci_get_drvdata(ha
->pdev
);
59 for (iter
= 50; iter
--; ) {
60 hccr
= RD_REG_WORD(®
->hccr
);
61 if (hccr
& HCCR_RISC_PAUSE
) {
62 if (pci_channel_offline(ha
->pdev
))
66 * Issue a "HARD" reset in order for the RISC interrupt
67 * bit to be cleared. Schedule a big hammer to get
68 * out of the RISC PAUSED state.
70 WRT_REG_WORD(®
->hccr
, HCCR_RESET_RISC
);
71 RD_REG_WORD(®
->hccr
);
73 ha
->isp_ops
->fw_dump(vha
, 1);
74 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
76 } else if ((RD_REG_WORD(®
->istatus
) & ISR_RISC_INT
) == 0)
79 if (RD_REG_WORD(®
->semaphore
) & BIT_0
) {
80 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
81 RD_REG_WORD(®
->hccr
);
83 /* Get mailbox data. */
84 mb
[0] = RD_MAILBOX_REG(ha
, reg
, 0);
85 if (mb
[0] > 0x3fff && mb
[0] < 0x8000) {
86 qla2x00_mbx_completion(vha
, mb
[0]);
87 status
|= MBX_INTERRUPT
;
88 } else if (mb
[0] > 0x7fff && mb
[0] < 0xc000) {
89 mb
[1] = RD_MAILBOX_REG(ha
, reg
, 1);
90 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
91 mb
[3] = RD_MAILBOX_REG(ha
, reg
, 3);
92 qla2x00_async_event(vha
, rsp
, mb
);
95 ql_dbg(ql_dbg_async
, vha
, 0x5025,
96 "Unrecognized interrupt type (%d).\n",
99 /* Release mailbox registers. */
100 WRT_REG_WORD(®
->semaphore
, 0);
101 RD_REG_WORD(®
->semaphore
);
103 qla2x00_process_response_queue(rsp
);
105 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
106 RD_REG_WORD(®
->hccr
);
109 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
111 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
112 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
113 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
114 complete(&ha
->mbx_intr_comp
);
117 return (IRQ_HANDLED
);
121 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
123 * @dev_id: SCSI driver HA context
125 * Called by system whenever the host adapter generates an interrupt.
127 * Returns handled flag.
130 qla2300_intr_handler(int irq
, void *dev_id
)
132 scsi_qla_host_t
*vha
;
133 struct device_reg_2xxx __iomem
*reg
;
140 struct qla_hw_data
*ha
;
143 rsp
= (struct rsp_que
*) dev_id
;
145 ql_log(ql_log_info
, NULL
, 0x5058,
146 "%s: NULL response queue pointer.\n", __func__
);
151 reg
= &ha
->iobase
->isp
;
154 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
155 vha
= pci_get_drvdata(ha
->pdev
);
156 for (iter
= 50; iter
--; ) {
157 stat
= RD_REG_DWORD(®
->u
.isp2300
.host_status
);
158 if (stat
& HSR_RISC_PAUSED
) {
159 if (unlikely(pci_channel_offline(ha
->pdev
)))
162 hccr
= RD_REG_WORD(®
->hccr
);
163 if (hccr
& (BIT_15
| BIT_13
| BIT_11
| BIT_8
))
164 ql_log(ql_log_warn
, vha
, 0x5026,
165 "Parity error -- HCCR=%x, Dumping "
166 "firmware.\n", hccr
);
168 ql_log(ql_log_warn
, vha
, 0x5027,
169 "RISC paused -- HCCR=%x, Dumping "
170 "firmware.\n", hccr
);
173 * Issue a "HARD" reset in order for the RISC
174 * interrupt bit to be cleared. Schedule a big
175 * hammer to get out of the RISC PAUSED state.
177 WRT_REG_WORD(®
->hccr
, HCCR_RESET_RISC
);
178 RD_REG_WORD(®
->hccr
);
180 ha
->isp_ops
->fw_dump(vha
, 1);
181 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
183 } else if ((stat
& HSR_RISC_INT
) == 0)
186 switch (stat
& 0xff) {
191 qla2x00_mbx_completion(vha
, MSW(stat
));
192 status
|= MBX_INTERRUPT
;
194 /* Release mailbox registers. */
195 WRT_REG_WORD(®
->semaphore
, 0);
199 mb
[1] = RD_MAILBOX_REG(ha
, reg
, 1);
200 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
201 mb
[3] = RD_MAILBOX_REG(ha
, reg
, 3);
202 qla2x00_async_event(vha
, rsp
, mb
);
205 qla2x00_process_response_queue(rsp
);
208 mb
[0] = MBA_CMPLT_1_16BIT
;
210 qla2x00_async_event(vha
, rsp
, mb
);
213 mb
[0] = MBA_SCSI_COMPLETION
;
215 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
216 qla2x00_async_event(vha
, rsp
, mb
);
219 ql_dbg(ql_dbg_async
, vha
, 0x5028,
220 "Unrecognized interrupt type (%d).\n", stat
& 0xff);
223 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
224 RD_REG_WORD_RELAXED(®
->hccr
);
226 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
228 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
229 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
230 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
231 complete(&ha
->mbx_intr_comp
);
234 return (IRQ_HANDLED
);
238 * qla2x00_mbx_completion() - Process mailbox command completions.
239 * @ha: SCSI driver HA context
240 * @mb0: Mailbox0 register
243 qla2x00_mbx_completion(scsi_qla_host_t
*vha
, uint16_t mb0
)
247 uint16_t __iomem
*wptr
;
248 struct qla_hw_data
*ha
= vha
->hw
;
249 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
251 /* Read all mbox registers? */
252 mboxes
= (1 << ha
->mbx_count
) - 1;
254 ql_dbg(ql_dbg_async
, vha
, 0x5001, "MBX pointer ERRROR.\n");
256 mboxes
= ha
->mcp
->in_mb
;
258 /* Load return mailbox registers. */
259 ha
->flags
.mbox_int
= 1;
260 ha
->mailbox_out
[0] = mb0
;
262 wptr
= (uint16_t __iomem
*)MAILBOX_REG(ha
, reg
, 1);
264 for (cnt
= 1; cnt
< ha
->mbx_count
; cnt
++) {
265 if (IS_QLA2200(ha
) && cnt
== 8)
266 wptr
= (uint16_t __iomem
*)MAILBOX_REG(ha
, reg
, 8);
267 if ((cnt
== 4 || cnt
== 5) && (mboxes
& BIT_0
))
268 ha
->mailbox_out
[cnt
] = qla2x00_debounce_register(wptr
);
269 else if (mboxes
& BIT_0
)
270 ha
->mailbox_out
[cnt
] = RD_REG_WORD(wptr
);
278 qla81xx_idc_event(scsi_qla_host_t
*vha
, uint16_t aen
, uint16_t descr
)
280 static char *event
[] =
281 { "Complete", "Request Notification", "Time Extension" };
283 struct device_reg_24xx __iomem
*reg24
= &vha
->hw
->iobase
->isp24
;
284 uint16_t __iomem
*wptr
;
285 uint16_t cnt
, timeout
, mb
[QLA_IDC_ACK_REGS
];
287 /* Seed data -- mailbox1 -> mailbox7. */
288 wptr
= (uint16_t __iomem
*)®24
->mailbox1
;
289 for (cnt
= 0; cnt
< QLA_IDC_ACK_REGS
; cnt
++, wptr
++)
290 mb
[cnt
] = RD_REG_WORD(wptr
);
292 ql_dbg(ql_dbg_async
, vha
, 0x5021,
293 "Inter-Driver Communication %s -- "
294 "%04x %04x %04x %04x %04x %04x %04x.\n",
295 event
[aen
& 0xff], mb
[0], mb
[1], mb
[2], mb
[3],
296 mb
[4], mb
[5], mb
[6]);
298 /* Acknowledgement needed? [Notify && non-zero timeout]. */
299 timeout
= (descr
>> 8) & 0xf;
300 if (aen
!= MBA_IDC_NOTIFY
|| !timeout
)
303 ql_dbg(ql_dbg_async
, vha
, 0x5022,
304 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
305 vha
->host_no
, event
[aen
& 0xff], timeout
);
307 rval
= qla2x00_post_idc_ack_work(vha
, mb
);
308 if (rval
!= QLA_SUCCESS
)
309 ql_log(ql_log_warn
, vha
, 0x5023,
310 "IDC failed to post ACK.\n");
315 qla2x00_get_link_speed_str(struct qla_hw_data
*ha
)
317 static char *link_speeds
[] = {"1", "2", "?", "4", "8", "16", "10"};
319 int fw_speed
= ha
->link_data_rate
;
321 if (IS_QLA2100(ha
) || IS_QLA2200(ha
))
322 link_speed
= link_speeds
[0];
323 else if (fw_speed
== 0x13)
324 link_speed
= link_speeds
[6];
326 link_speed
= link_speeds
[LS_UNKNOWN
];
329 link_speeds
[fw_speed
];
336 * qla2x00_async_event() - Process aynchronous events.
337 * @ha: SCSI driver HA context
338 * @mb: Mailbox registers (0 - 3)
341 qla2x00_async_event(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, uint16_t *mb
)
346 struct qla_hw_data
*ha
= vha
->hw
;
347 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
348 struct device_reg_24xx __iomem
*reg24
= &ha
->iobase
->isp24
;
349 struct device_reg_82xx __iomem
*reg82
= &ha
->iobase
->isp82
;
350 uint32_t rscn_entry
, host_pid
;
353 /* Setup to process RIO completion. */
355 if (IS_CNA_CAPABLE(ha
))
358 case MBA_SCSI_COMPLETION
:
359 handles
[0] = le32_to_cpu((uint32_t)((mb
[2] << 16) | mb
[1]));
362 case MBA_CMPLT_1_16BIT
:
365 mb
[0] = MBA_SCSI_COMPLETION
;
367 case MBA_CMPLT_2_16BIT
:
371 mb
[0] = MBA_SCSI_COMPLETION
;
373 case MBA_CMPLT_3_16BIT
:
378 mb
[0] = MBA_SCSI_COMPLETION
;
380 case MBA_CMPLT_4_16BIT
:
384 handles
[3] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 6);
386 mb
[0] = MBA_SCSI_COMPLETION
;
388 case MBA_CMPLT_5_16BIT
:
392 handles
[3] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 6);
393 handles
[4] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 7);
395 mb
[0] = MBA_SCSI_COMPLETION
;
397 case MBA_CMPLT_2_32BIT
:
398 handles
[0] = le32_to_cpu((uint32_t)((mb
[2] << 16) | mb
[1]));
399 handles
[1] = le32_to_cpu(
400 ((uint32_t)(RD_MAILBOX_REG(ha
, reg
, 7) << 16)) |
401 RD_MAILBOX_REG(ha
, reg
, 6));
403 mb
[0] = MBA_SCSI_COMPLETION
;
410 case MBA_SCSI_COMPLETION
: /* Fast Post */
411 if (!vha
->flags
.online
)
414 for (cnt
= 0; cnt
< handle_cnt
; cnt
++)
415 qla2x00_process_completed_request(vha
, rsp
->req
,
419 case MBA_RESET
: /* Reset */
420 ql_dbg(ql_dbg_async
, vha
, 0x5002,
421 "Asynchronous RESET.\n");
423 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
426 case MBA_SYSTEM_ERR
: /* System Error */
427 mbx
= (IS_QLA81XX(ha
) || IS_QLA83XX(ha
)) ?
428 RD_REG_WORD(®24
->mailbox7
) : 0;
429 ql_log(ql_log_warn
, vha
, 0x5003,
430 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
431 "mbx7=%xh.\n", mb
[1], mb
[2], mb
[3], mbx
);
433 ha
->isp_ops
->fw_dump(vha
, 1);
435 if (IS_FWI2_CAPABLE(ha
)) {
436 if (mb
[1] == 0 && mb
[2] == 0) {
437 ql_log(ql_log_fatal
, vha
, 0x5004,
438 "Unrecoverable Hardware Error: adapter "
439 "marked OFFLINE!\n");
440 vha
->flags
.online
= 0;
441 vha
->device_flags
|= DFLG_DEV_FAILED
;
443 /* Check to see if MPI timeout occurred */
444 if ((mbx
& MBX_3
) && (ha
->flags
.port0
))
445 set_bit(MPI_RESET_NEEDED
,
448 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
450 } else if (mb
[1] == 0) {
451 ql_log(ql_log_fatal
, vha
, 0x5005,
452 "Unrecoverable Hardware Error: adapter marked "
454 vha
->flags
.online
= 0;
455 vha
->device_flags
|= DFLG_DEV_FAILED
;
457 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
460 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
461 ql_log(ql_log_warn
, vha
, 0x5006,
462 "ISP Request Transfer Error (%x).\n", mb
[1]);
464 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
467 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
468 ql_log(ql_log_warn
, vha
, 0x5007,
469 "ISP Response Transfer Error.\n");
471 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
474 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up */
475 ql_dbg(ql_dbg_async
, vha
, 0x5008,
476 "Asynchronous WAKEUP_THRES.\n");
479 case MBA_LIP_OCCURRED
: /* Loop Initialization Procedure */
480 ql_dbg(ql_dbg_async
, vha
, 0x5009,
481 "LIP occurred (%x).\n", mb
[1]);
483 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
484 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
485 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
486 qla2x00_mark_all_devices_lost(vha
, 1);
490 atomic_set(&vha
->vp_state
, VP_FAILED
);
491 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
494 set_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
);
495 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
497 vha
->flags
.management_server_logged_in
= 0;
498 qla2x00_post_aen_work(vha
, FCH_EVT_LIP
, mb
[1]);
501 case MBA_LOOP_UP
: /* Loop Up Event */
502 if (IS_QLA2100(ha
) || IS_QLA2200(ha
))
503 ha
->link_data_rate
= PORT_SPEED_1GB
;
505 ha
->link_data_rate
= mb
[1];
507 ql_dbg(ql_dbg_async
, vha
, 0x500a,
508 "LOOP UP detected (%s Gbps).\n",
509 qla2x00_get_link_speed_str(ha
));
511 vha
->flags
.management_server_logged_in
= 0;
512 qla2x00_post_aen_work(vha
, FCH_EVT_LINKUP
, ha
->link_data_rate
);
515 case MBA_LOOP_DOWN
: /* Loop Down Event */
516 mbx
= (IS_QLA81XX(ha
) || IS_QLA8031(ha
))
517 ? RD_REG_WORD(®24
->mailbox4
) : 0;
518 mbx
= IS_QLA82XX(ha
) ? RD_REG_WORD(®82
->mailbox_out
[4]) : mbx
;
519 ql_dbg(ql_dbg_async
, vha
, 0x500b,
520 "LOOP DOWN detected (%x %x %x %x).\n",
521 mb
[1], mb
[2], mb
[3], mbx
);
523 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
524 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
525 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
526 vha
->device_flags
|= DFLG_NO_CABLE
;
527 qla2x00_mark_all_devices_lost(vha
, 1);
531 atomic_set(&vha
->vp_state
, VP_FAILED
);
532 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
535 vha
->flags
.management_server_logged_in
= 0;
536 ha
->link_data_rate
= PORT_SPEED_UNKNOWN
;
537 qla2x00_post_aen_work(vha
, FCH_EVT_LINKDOWN
, 0);
540 case MBA_LIP_RESET
: /* LIP reset occurred */
541 ql_dbg(ql_dbg_async
, vha
, 0x500c,
542 "LIP reset occurred (%x).\n", mb
[1]);
544 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
545 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
546 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
547 qla2x00_mark_all_devices_lost(vha
, 1);
551 atomic_set(&vha
->vp_state
, VP_FAILED
);
552 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
555 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
557 ha
->operating_mode
= LOOP
;
558 vha
->flags
.management_server_logged_in
= 0;
559 qla2x00_post_aen_work(vha
, FCH_EVT_LIPRESET
, mb
[1]);
562 /* case MBA_DCBX_COMPLETE: */
563 case MBA_POINT_TO_POINT
: /* Point-to-Point */
567 if (IS_QLA81XX(ha
) || IS_QLA82XX(ha
) || IS_QLA8031(ha
)) {
568 ql_dbg(ql_dbg_async
, vha
, 0x500d,
569 "DCBX Completed -- %04x %04x %04x.\n",
570 mb
[1], mb
[2], mb
[3]);
571 if (ha
->notify_dcbx_comp
)
572 complete(&ha
->dcbx_comp
);
575 ql_dbg(ql_dbg_async
, vha
, 0x500e,
576 "Asynchronous P2P MODE received.\n");
579 * Until there's a transition from loop down to loop up, treat
580 * this as loop down only.
582 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
583 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
584 if (!atomic_read(&vha
->loop_down_timer
))
585 atomic_set(&vha
->loop_down_timer
,
587 qla2x00_mark_all_devices_lost(vha
, 1);
591 atomic_set(&vha
->vp_state
, VP_FAILED
);
592 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
595 if (!(test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
)))
596 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
598 set_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
);
599 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
601 ha
->flags
.gpsc_supported
= 1;
602 vha
->flags
.management_server_logged_in
= 0;
605 case MBA_CHG_IN_CONNECTION
: /* Change in connection mode */
609 ql_dbg(ql_dbg_async
, vha
, 0x500f,
610 "Configuration change detected: value=%x.\n", mb
[1]);
612 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
613 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
614 if (!atomic_read(&vha
->loop_down_timer
))
615 atomic_set(&vha
->loop_down_timer
,
617 qla2x00_mark_all_devices_lost(vha
, 1);
621 atomic_set(&vha
->vp_state
, VP_FAILED
);
622 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
625 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
626 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
629 case MBA_PORT_UPDATE
: /* Port database update */
631 * Handle only global and vn-port update events
634 * mb[1] = N_Port handle of changed port
635 * OR 0xffff for global event
636 * mb[2] = New login state
637 * 7 = Port logged out
638 * mb[3] = LSB is vp_idx, 0xff = all vps
640 * Skip processing if:
641 * Event is global, vp_idx is NOT all vps,
642 * vp_idx does not match
643 * Event is not global, vp_idx does not match
645 if (IS_QLA2XXX_MIDTYPE(ha
) &&
646 ((mb
[1] == 0xffff && (mb
[3] & 0xff) != 0xff) ||
647 (mb
[1] != 0xffff)) && vha
->vp_idx
!= (mb
[3] & 0xff))
650 /* Global event -- port logout or port unavailable. */
651 if (mb
[1] == 0xffff && mb
[2] == 0x7) {
652 ql_dbg(ql_dbg_async
, vha
, 0x5010,
653 "Port unavailable %04x %04x %04x.\n",
654 mb
[1], mb
[2], mb
[3]);
655 ql_log(ql_log_warn
, vha
, 0x505e,
656 "Link is offline.\n");
658 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
659 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
660 atomic_set(&vha
->loop_down_timer
,
662 vha
->device_flags
|= DFLG_NO_CABLE
;
663 qla2x00_mark_all_devices_lost(vha
, 1);
667 atomic_set(&vha
->vp_state
, VP_FAILED
);
668 fc_vport_set_state(vha
->fc_vport
,
670 qla2x00_mark_all_devices_lost(vha
, 1);
673 vha
->flags
.management_server_logged_in
= 0;
674 ha
->link_data_rate
= PORT_SPEED_UNKNOWN
;
679 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
680 * event etc. earlier indicating loop is down) then process
681 * it. Otherwise ignore it and Wait for RSCN to come in.
683 atomic_set(&vha
->loop_down_timer
, 0);
684 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
&&
685 atomic_read(&vha
->loop_state
) != LOOP_DEAD
) {
686 ql_dbg(ql_dbg_async
, vha
, 0x5011,
687 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
688 mb
[1], mb
[2], mb
[3]);
690 qlt_async_event(mb
[0], vha
, mb
);
694 ql_dbg(ql_dbg_async
, vha
, 0x5012,
695 "Port database changed %04x %04x %04x.\n",
696 mb
[1], mb
[2], mb
[3]);
697 ql_log(ql_log_warn
, vha
, 0x505f,
698 "Link is operational (%s Gbps).\n",
699 qla2x00_get_link_speed_str(ha
));
702 * Mark all devices as missing so we will login again.
704 atomic_set(&vha
->loop_state
, LOOP_UP
);
706 qla2x00_mark_all_devices_lost(vha
, 1);
708 if (vha
->vp_idx
== 0 && !qla_ini_mode_enabled(vha
))
709 set_bit(SCR_PENDING
, &vha
->dpc_flags
);
711 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
712 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
714 qlt_async_event(mb
[0], vha
, mb
);
717 case MBA_RSCN_UPDATE
: /* State Change Registration */
718 /* Check if the Vport has issued a SCR */
719 if (vha
->vp_idx
&& test_bit(VP_SCR_NEEDED
, &vha
->vp_flags
))
721 /* Only handle SCNs for our Vport index. */
722 if (ha
->flags
.npiv_supported
&& vha
->vp_idx
!= (mb
[3] & 0xff))
725 ql_dbg(ql_dbg_async
, vha
, 0x5013,
726 "RSCN database changed -- %04x %04x %04x.\n",
727 mb
[1], mb
[2], mb
[3]);
729 rscn_entry
= ((mb
[1] & 0xff) << 16) | mb
[2];
730 host_pid
= (vha
->d_id
.b
.domain
<< 16) | (vha
->d_id
.b
.area
<< 8)
732 if (rscn_entry
== host_pid
) {
733 ql_dbg(ql_dbg_async
, vha
, 0x5014,
734 "Ignoring RSCN update to local host "
735 "port ID (%06x).\n", host_pid
);
739 /* Ignore reserved bits from RSCN-payload. */
740 rscn_entry
= ((mb
[1] & 0x3ff) << 16) | mb
[2];
742 atomic_set(&vha
->loop_down_timer
, 0);
743 vha
->flags
.management_server_logged_in
= 0;
745 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
746 set_bit(RSCN_UPDATE
, &vha
->dpc_flags
);
747 qla2x00_post_aen_work(vha
, FCH_EVT_RSCN
, rscn_entry
);
750 /* case MBA_RIO_RESPONSE: */
751 case MBA_ZIO_RESPONSE
:
752 ql_dbg(ql_dbg_async
, vha
, 0x5015,
753 "[R|Z]IO update completion.\n");
755 if (IS_FWI2_CAPABLE(ha
))
756 qla24xx_process_response_queue(vha
, rsp
);
758 qla2x00_process_response_queue(rsp
);
761 case MBA_DISCARD_RND_FRAME
:
762 ql_dbg(ql_dbg_async
, vha
, 0x5016,
763 "Discard RND Frame -- %04x %04x %04x.\n",
764 mb
[1], mb
[2], mb
[3]);
767 case MBA_TRACE_NOTIFICATION
:
768 ql_dbg(ql_dbg_async
, vha
, 0x5017,
769 "Trace Notification -- %04x %04x.\n", mb
[1], mb
[2]);
772 case MBA_ISP84XX_ALERT
:
773 ql_dbg(ql_dbg_async
, vha
, 0x5018,
774 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
775 mb
[1], mb
[2], mb
[3]);
777 spin_lock_irqsave(&ha
->cs84xx
->access_lock
, flags
);
779 case A84_PANIC_RECOVERY
:
780 ql_log(ql_log_info
, vha
, 0x5019,
781 "Alert 84XX: panic recovery %04x %04x.\n",
784 case A84_OP_LOGIN_COMPLETE
:
785 ha
->cs84xx
->op_fw_version
= mb
[3] << 16 | mb
[2];
786 ql_log(ql_log_info
, vha
, 0x501a,
787 "Alert 84XX: firmware version %x.\n",
788 ha
->cs84xx
->op_fw_version
);
790 case A84_DIAG_LOGIN_COMPLETE
:
791 ha
->cs84xx
->diag_fw_version
= mb
[3] << 16 | mb
[2];
792 ql_log(ql_log_info
, vha
, 0x501b,
793 "Alert 84XX: diagnostic firmware version %x.\n",
794 ha
->cs84xx
->diag_fw_version
);
796 case A84_GOLD_LOGIN_COMPLETE
:
797 ha
->cs84xx
->diag_fw_version
= mb
[3] << 16 | mb
[2];
798 ha
->cs84xx
->fw_update
= 1;
799 ql_log(ql_log_info
, vha
, 0x501c,
800 "Alert 84XX: gold firmware version %x.\n",
801 ha
->cs84xx
->gold_fw_version
);
804 ql_log(ql_log_warn
, vha
, 0x501d,
805 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
806 mb
[1], mb
[2], mb
[3]);
808 spin_unlock_irqrestore(&ha
->cs84xx
->access_lock
, flags
);
811 ql_dbg(ql_dbg_async
, vha
, 0x501e,
812 "DCBX Started -- %04x %04x %04x.\n",
813 mb
[1], mb
[2], mb
[3]);
815 case MBA_DCBX_PARAM_UPDATE
:
816 ql_dbg(ql_dbg_async
, vha
, 0x501f,
817 "DCBX Parameters Updated -- %04x %04x %04x.\n",
818 mb
[1], mb
[2], mb
[3]);
820 case MBA_FCF_CONF_ERR
:
821 ql_dbg(ql_dbg_async
, vha
, 0x5020,
822 "FCF Configuration Error -- %04x %04x %04x.\n",
823 mb
[1], mb
[2], mb
[3]);
825 case MBA_IDC_COMPLETE
:
827 case MBA_IDC_TIME_EXT
:
828 qla81xx_idc_event(vha
, mb
[0], mb
[1]);
831 ql_dbg(ql_dbg_async
, vha
, 0x5057,
832 "Unknown AEN:%04x %04x %04x %04x\n",
833 mb
[0], mb
[1], mb
[2], mb
[3]);
836 qlt_async_event(mb
[0], vha
, mb
);
838 if (!vha
->vp_idx
&& ha
->num_vhosts
)
839 qla2x00_alert_all_vps(rsp
, mb
);
843 * qla2x00_process_completed_request() - Process a Fast Post response.
844 * @ha: SCSI driver HA context
848 qla2x00_process_completed_request(struct scsi_qla_host
*vha
,
849 struct req_que
*req
, uint32_t index
)
852 struct qla_hw_data
*ha
= vha
->hw
;
854 /* Validate handle. */
855 if (index
>= MAX_OUTSTANDING_COMMANDS
) {
856 ql_log(ql_log_warn
, vha
, 0x3014,
857 "Invalid SCSI command index (%x).\n", index
);
860 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
862 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
866 sp
= req
->outstanding_cmds
[index
];
868 /* Free outstanding command slot. */
869 req
->outstanding_cmds
[index
] = NULL
;
871 /* Save ISP completion status */
872 sp
->done(ha
, sp
, DID_OK
<< 16);
874 ql_log(ql_log_warn
, vha
, 0x3016, "Invalid SCSI SRB.\n");
877 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
879 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
884 qla2x00_get_sp_from_handle(scsi_qla_host_t
*vha
, const char *func
,
885 struct req_que
*req
, void *iocb
)
887 struct qla_hw_data
*ha
= vha
->hw
;
888 sts_entry_t
*pkt
= iocb
;
892 index
= LSW(pkt
->handle
);
893 if (index
>= MAX_OUTSTANDING_COMMANDS
) {
894 ql_log(ql_log_warn
, vha
, 0x5031,
895 "Invalid command index (%x).\n", index
);
897 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
899 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
902 sp
= req
->outstanding_cmds
[index
];
904 ql_log(ql_log_warn
, vha
, 0x5032,
905 "Invalid completion handle (%x) -- timed-out.\n", index
);
908 if (sp
->handle
!= index
) {
909 ql_log(ql_log_warn
, vha
, 0x5033,
910 "SRB handle (%x) mismatch %x.\n", sp
->handle
, index
);
914 req
->outstanding_cmds
[index
] = NULL
;
921 qla2x00_mbx_iocb_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
922 struct mbx_entry
*mbx
)
924 const char func
[] = "MBX-IOCB";
928 struct srb_iocb
*lio
;
932 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, mbx
);
936 lio
= &sp
->u
.iocb_cmd
;
939 data
= lio
->u
.logio
.data
;
941 data
[0] = MBS_COMMAND_ERROR
;
942 data
[1] = lio
->u
.logio
.flags
& SRB_LOGIN_RETRIED
?
943 QLA_LOGIO_LOGIN_RETRIED
: 0;
944 if (mbx
->entry_status
) {
945 ql_dbg(ql_dbg_async
, vha
, 0x5043,
946 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
947 "entry-status=%x status=%x state-flag=%x "
948 "status-flags=%x.\n", type
, sp
->handle
,
949 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
950 fcport
->d_id
.b
.al_pa
, mbx
->entry_status
,
951 le16_to_cpu(mbx
->status
), le16_to_cpu(mbx
->state_flags
),
952 le16_to_cpu(mbx
->status_flags
));
954 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x5029,
955 (uint8_t *)mbx
, sizeof(*mbx
));
960 status
= le16_to_cpu(mbx
->status
);
961 if (status
== 0x30 && sp
->type
== SRB_LOGIN_CMD
&&
962 le16_to_cpu(mbx
->mb0
) == MBS_COMMAND_COMPLETE
)
964 if (!status
&& le16_to_cpu(mbx
->mb0
) == MBS_COMMAND_COMPLETE
) {
965 ql_dbg(ql_dbg_async
, vha
, 0x5045,
966 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
967 type
, sp
->handle
, fcport
->d_id
.b
.domain
,
968 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
969 le16_to_cpu(mbx
->mb1
));
971 data
[0] = MBS_COMMAND_COMPLETE
;
972 if (sp
->type
== SRB_LOGIN_CMD
) {
973 fcport
->port_type
= FCT_TARGET
;
974 if (le16_to_cpu(mbx
->mb1
) & BIT_0
)
975 fcport
->port_type
= FCT_INITIATOR
;
976 else if (le16_to_cpu(mbx
->mb1
) & BIT_1
)
977 fcport
->flags
|= FCF_FCP2_DEVICE
;
982 data
[0] = le16_to_cpu(mbx
->mb0
);
984 case MBS_PORT_ID_USED
:
985 data
[1] = le16_to_cpu(mbx
->mb1
);
987 case MBS_LOOP_ID_USED
:
990 data
[0] = MBS_COMMAND_ERROR
;
994 ql_log(ql_log_warn
, vha
, 0x5046,
995 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
996 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type
, sp
->handle
,
997 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
998 status
, le16_to_cpu(mbx
->mb0
), le16_to_cpu(mbx
->mb1
),
999 le16_to_cpu(mbx
->mb2
), le16_to_cpu(mbx
->mb6
),
1000 le16_to_cpu(mbx
->mb7
));
1003 sp
->done(vha
, sp
, 0);
1007 qla2x00_ct_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1008 sts_entry_t
*pkt
, int iocb_type
)
1010 const char func
[] = "CT_IOCB";
1013 struct fc_bsg_job
*bsg_job
;
1014 uint16_t comp_status
;
1017 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, pkt
);
1021 bsg_job
= sp
->u
.bsg_job
;
1023 type
= "ct pass-through";
1025 comp_status
= le16_to_cpu(pkt
->comp_status
);
1027 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1028 * fc payload to the caller
1030 bsg_job
->reply
->reply_data
.ctels_reply
.status
= FC_CTELS_STATUS_OK
;
1031 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1033 if (comp_status
!= CS_COMPLETE
) {
1034 if (comp_status
== CS_DATA_UNDERRUN
) {
1036 bsg_job
->reply
->reply_payload_rcv_len
=
1037 le16_to_cpu(((sts_entry_t
*)pkt
)->rsp_info_len
);
1039 ql_log(ql_log_warn
, vha
, 0x5048,
1040 "CT pass-through-%s error "
1041 "comp_status-status=0x%x total_byte = 0x%x.\n",
1043 bsg_job
->reply
->reply_payload_rcv_len
);
1045 ql_log(ql_log_warn
, vha
, 0x5049,
1046 "CT pass-through-%s error "
1047 "comp_status-status=0x%x.\n", type
, comp_status
);
1048 res
= DID_ERROR
<< 16;
1049 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1051 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x5035,
1052 (uint8_t *)pkt
, sizeof(*pkt
));
1055 bsg_job
->reply
->reply_payload_rcv_len
=
1056 bsg_job
->reply_payload
.payload_len
;
1057 bsg_job
->reply_len
= 0;
1060 sp
->done(vha
, sp
, res
);
1064 qla24xx_els_ct_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1065 struct sts_entry_24xx
*pkt
, int iocb_type
)
1067 const char func
[] = "ELS_CT_IOCB";
1070 struct fc_bsg_job
*bsg_job
;
1071 uint16_t comp_status
;
1072 uint32_t fw_status
[3];
1073 uint8_t* fw_sts_ptr
;
1076 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, pkt
);
1079 bsg_job
= sp
->u
.bsg_job
;
1083 case SRB_ELS_CMD_RPT
:
1084 case SRB_ELS_CMD_HST
:
1088 type
= "ct pass-through";
1091 ql_dbg(ql_dbg_user
, vha
, 0x503e,
1092 "Unrecognized SRB: (%p) type=%d.\n", sp
, sp
->type
);
1096 comp_status
= fw_status
[0] = le16_to_cpu(pkt
->comp_status
);
1097 fw_status
[1] = le16_to_cpu(((struct els_sts_entry_24xx
*)pkt
)->error_subcode_1
);
1098 fw_status
[2] = le16_to_cpu(((struct els_sts_entry_24xx
*)pkt
)->error_subcode_2
);
1100 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1101 * fc payload to the caller
1103 bsg_job
->reply
->reply_data
.ctels_reply
.status
= FC_CTELS_STATUS_OK
;
1104 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) + sizeof(fw_status
);
1106 if (comp_status
!= CS_COMPLETE
) {
1107 if (comp_status
== CS_DATA_UNDERRUN
) {
1109 bsg_job
->reply
->reply_payload_rcv_len
=
1110 le16_to_cpu(((struct els_sts_entry_24xx
*)pkt
)->total_byte_count
);
1112 ql_dbg(ql_dbg_user
, vha
, 0x503f,
1113 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1114 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1115 type
, sp
->handle
, comp_status
, fw_status
[1], fw_status
[2],
1116 le16_to_cpu(((struct els_sts_entry_24xx
*)
1117 pkt
)->total_byte_count
));
1118 fw_sts_ptr
= ((uint8_t*)bsg_job
->req
->sense
) + sizeof(struct fc_bsg_reply
);
1119 memcpy( fw_sts_ptr
, fw_status
, sizeof(fw_status
));
1122 ql_dbg(ql_dbg_user
, vha
, 0x5040,
1123 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1124 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1125 type
, sp
->handle
, comp_status
,
1126 le16_to_cpu(((struct els_sts_entry_24xx
*)
1127 pkt
)->error_subcode_1
),
1128 le16_to_cpu(((struct els_sts_entry_24xx
*)
1129 pkt
)->error_subcode_2
));
1130 res
= DID_ERROR
<< 16;
1131 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1132 fw_sts_ptr
= ((uint8_t*)bsg_job
->req
->sense
) + sizeof(struct fc_bsg_reply
);
1133 memcpy( fw_sts_ptr
, fw_status
, sizeof(fw_status
));
1135 ql_dump_buffer(ql_dbg_user
+ ql_dbg_buffer
, vha
, 0x5056,
1136 (uint8_t *)pkt
, sizeof(*pkt
));
1140 bsg_job
->reply
->reply_payload_rcv_len
= bsg_job
->reply_payload
.payload_len
;
1141 bsg_job
->reply_len
= 0;
1144 sp
->done(vha
, sp
, res
);
1148 qla24xx_logio_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1149 struct logio_entry_24xx
*logio
)
1151 const char func
[] = "LOGIO-IOCB";
1155 struct srb_iocb
*lio
;
1159 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, logio
);
1163 lio
= &sp
->u
.iocb_cmd
;
1165 fcport
= sp
->fcport
;
1166 data
= lio
->u
.logio
.data
;
1168 data
[0] = MBS_COMMAND_ERROR
;
1169 data
[1] = lio
->u
.logio
.flags
& SRB_LOGIN_RETRIED
?
1170 QLA_LOGIO_LOGIN_RETRIED
: 0;
1171 if (logio
->entry_status
) {
1172 ql_log(ql_log_warn
, fcport
->vha
, 0x5034,
1173 "Async-%s error entry - hdl=%x"
1174 "portid=%02x%02x%02x entry-status=%x.\n",
1175 type
, sp
->handle
, fcport
->d_id
.b
.domain
,
1176 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
1177 logio
->entry_status
);
1178 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x504d,
1179 (uint8_t *)logio
, sizeof(*logio
));
1184 if (le16_to_cpu(logio
->comp_status
) == CS_COMPLETE
) {
1185 ql_dbg(ql_dbg_async
, fcport
->vha
, 0x5036,
1186 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1187 "iop0=%x.\n", type
, sp
->handle
, fcport
->d_id
.b
.domain
,
1188 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
1189 le32_to_cpu(logio
->io_parameter
[0]));
1191 data
[0] = MBS_COMMAND_COMPLETE
;
1192 if (sp
->type
!= SRB_LOGIN_CMD
)
1195 iop
[0] = le32_to_cpu(logio
->io_parameter
[0]);
1196 if (iop
[0] & BIT_4
) {
1197 fcport
->port_type
= FCT_TARGET
;
1199 fcport
->flags
|= FCF_FCP2_DEVICE
;
1200 } else if (iop
[0] & BIT_5
)
1201 fcport
->port_type
= FCT_INITIATOR
;
1204 fcport
->flags
|= FCF_CONF_COMP_SUPPORTED
;
1206 if (logio
->io_parameter
[7] || logio
->io_parameter
[8])
1207 fcport
->supported_classes
|= FC_COS_CLASS2
;
1208 if (logio
->io_parameter
[9] || logio
->io_parameter
[10])
1209 fcport
->supported_classes
|= FC_COS_CLASS3
;
1214 iop
[0] = le32_to_cpu(logio
->io_parameter
[0]);
1215 iop
[1] = le32_to_cpu(logio
->io_parameter
[1]);
1217 case LSC_SCODE_PORTID_USED
:
1218 data
[0] = MBS_PORT_ID_USED
;
1219 data
[1] = LSW(iop
[1]);
1221 case LSC_SCODE_NPORT_USED
:
1222 data
[0] = MBS_LOOP_ID_USED
;
1225 data
[0] = MBS_COMMAND_ERROR
;
1229 ql_dbg(ql_dbg_async
, fcport
->vha
, 0x5037,
1230 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1231 "iop0=%x iop1=%x.\n", type
, sp
->handle
, fcport
->d_id
.b
.domain
,
1232 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
1233 le16_to_cpu(logio
->comp_status
),
1234 le32_to_cpu(logio
->io_parameter
[0]),
1235 le32_to_cpu(logio
->io_parameter
[1]));
1238 sp
->done(vha
, sp
, 0);
1242 qla24xx_tm_iocb_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1243 struct tsk_mgmt_entry
*tsk
)
1245 const char func
[] = "TMF-IOCB";
1249 struct srb_iocb
*iocb
;
1250 struct sts_entry_24xx
*sts
= (struct sts_entry_24xx
*)tsk
;
1253 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, tsk
);
1257 iocb
= &sp
->u
.iocb_cmd
;
1259 fcport
= sp
->fcport
;
1261 if (sts
->entry_status
) {
1262 ql_log(ql_log_warn
, fcport
->vha
, 0x5038,
1263 "Async-%s error - hdl=%x entry-status(%x).\n",
1264 type
, sp
->handle
, sts
->entry_status
);
1265 } else if (sts
->comp_status
!= __constant_cpu_to_le16(CS_COMPLETE
)) {
1266 ql_log(ql_log_warn
, fcport
->vha
, 0x5039,
1267 "Async-%s error - hdl=%x completion status(%x).\n",
1268 type
, sp
->handle
, sts
->comp_status
);
1269 } else if (!(le16_to_cpu(sts
->scsi_status
) &
1270 SS_RESPONSE_INFO_LEN_VALID
)) {
1271 ql_log(ql_log_warn
, fcport
->vha
, 0x503a,
1272 "Async-%s error - hdl=%x no response info(%x).\n",
1273 type
, sp
->handle
, sts
->scsi_status
);
1274 } else if (le32_to_cpu(sts
->rsp_data_len
) < 4) {
1275 ql_log(ql_log_warn
, fcport
->vha
, 0x503b,
1276 "Async-%s error - hdl=%x not enough response(%d).\n",
1277 type
, sp
->handle
, sts
->rsp_data_len
);
1278 } else if (sts
->data
[3]) {
1279 ql_log(ql_log_warn
, fcport
->vha
, 0x503c,
1280 "Async-%s error - hdl=%x response(%x).\n",
1281 type
, sp
->handle
, sts
->data
[3]);
1287 iocb
->u
.tmf
.data
= error
;
1288 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x5055,
1289 (uint8_t *)sts
, sizeof(*sts
));
1292 sp
->done(vha
, sp
, 0);
1296 * qla2x00_process_response_queue() - Process response queue entries.
1297 * @ha: SCSI driver HA context
1300 qla2x00_process_response_queue(struct rsp_que
*rsp
)
1302 struct scsi_qla_host
*vha
;
1303 struct qla_hw_data
*ha
= rsp
->hw
;
1304 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
1306 uint16_t handle_cnt
;
1309 vha
= pci_get_drvdata(ha
->pdev
);
1311 if (!vha
->flags
.online
)
1314 while (rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
) {
1315 pkt
= (sts_entry_t
*)rsp
->ring_ptr
;
1318 if (rsp
->ring_index
== rsp
->length
) {
1319 rsp
->ring_index
= 0;
1320 rsp
->ring_ptr
= rsp
->ring
;
1325 if (pkt
->entry_status
!= 0) {
1326 qla2x00_error_entry(vha
, rsp
, pkt
);
1327 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
1332 switch (pkt
->entry_type
) {
1334 qla2x00_status_entry(vha
, rsp
, pkt
);
1336 case STATUS_TYPE_21
:
1337 handle_cnt
= ((sts21_entry_t
*)pkt
)->handle_count
;
1338 for (cnt
= 0; cnt
< handle_cnt
; cnt
++) {
1339 qla2x00_process_completed_request(vha
, rsp
->req
,
1340 ((sts21_entry_t
*)pkt
)->handle
[cnt
]);
1343 case STATUS_TYPE_22
:
1344 handle_cnt
= ((sts22_entry_t
*)pkt
)->handle_count
;
1345 for (cnt
= 0; cnt
< handle_cnt
; cnt
++) {
1346 qla2x00_process_completed_request(vha
, rsp
->req
,
1347 ((sts22_entry_t
*)pkt
)->handle
[cnt
]);
1350 case STATUS_CONT_TYPE
:
1351 qla2x00_status_cont_entry(rsp
, (sts_cont_entry_t
*)pkt
);
1354 qla2x00_mbx_iocb_entry(vha
, rsp
->req
,
1355 (struct mbx_entry
*)pkt
);
1358 qla2x00_ct_entry(vha
, rsp
->req
, pkt
, CT_IOCB_TYPE
);
1361 /* Type Not Supported. */
1362 ql_log(ql_log_warn
, vha
, 0x504a,
1363 "Received unknown response pkt type %x "
1364 "entry status=%x.\n",
1365 pkt
->entry_type
, pkt
->entry_status
);
1368 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
1372 /* Adjust ring index */
1373 WRT_REG_WORD(ISP_RSP_Q_OUT(ha
, reg
), rsp
->ring_index
);
1377 qla2x00_handle_sense(srb_t
*sp
, uint8_t *sense_data
, uint32_t par_sense_len
,
1378 uint32_t sense_len
, struct rsp_que
*rsp
, int res
)
1380 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1381 struct scsi_cmnd
*cp
= GET_CMD_SP(sp
);
1382 uint32_t track_sense_len
;
1384 if (sense_len
>= SCSI_SENSE_BUFFERSIZE
)
1385 sense_len
= SCSI_SENSE_BUFFERSIZE
;
1387 SET_CMD_SENSE_LEN(sp
, sense_len
);
1388 SET_CMD_SENSE_PTR(sp
, cp
->sense_buffer
);
1389 track_sense_len
= sense_len
;
1391 if (sense_len
> par_sense_len
)
1392 sense_len
= par_sense_len
;
1394 memcpy(cp
->sense_buffer
, sense_data
, sense_len
);
1396 SET_CMD_SENSE_PTR(sp
, cp
->sense_buffer
+ sense_len
);
1397 track_sense_len
-= sense_len
;
1398 SET_CMD_SENSE_LEN(sp
, track_sense_len
);
1400 if (track_sense_len
!= 0) {
1401 rsp
->status_srb
= sp
;
1406 ql_dbg(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x301c,
1407 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1408 sp
->fcport
->vha
->host_no
, cp
->device
->id
, cp
->device
->lun
,
1410 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x302b,
1411 cp
->sense_buffer
, sense_len
);
1415 struct scsi_dif_tuple
{
1416 __be16 guard
; /* Checksum */
1417 __be16 app_tag
; /* APPL identifer */
1418 __be32 ref_tag
; /* Target LBA or indirect LBA */
1422 * Checks the guard or meta-data for the type of error
1423 * detected by the HBA. In case of errors, we set the
1424 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1425 * to indicate to the kernel that the HBA detected error.
1428 qla2x00_handle_dif_error(srb_t
*sp
, struct sts_entry_24xx
*sts24
)
1430 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1431 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1432 uint8_t *ap
= &sts24
->data
[12];
1433 uint8_t *ep
= &sts24
->data
[20];
1434 uint32_t e_ref_tag
, a_ref_tag
;
1435 uint16_t e_app_tag
, a_app_tag
;
1436 uint16_t e_guard
, a_guard
;
1439 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1440 * would make guard field appear at offset 2
1442 a_guard
= le16_to_cpu(*(uint16_t *)(ap
+ 2));
1443 a_app_tag
= le16_to_cpu(*(uint16_t *)(ap
+ 0));
1444 a_ref_tag
= le32_to_cpu(*(uint32_t *)(ap
+ 4));
1445 e_guard
= le16_to_cpu(*(uint16_t *)(ep
+ 2));
1446 e_app_tag
= le16_to_cpu(*(uint16_t *)(ep
+ 0));
1447 e_ref_tag
= le32_to_cpu(*(uint32_t *)(ep
+ 4));
1449 ql_dbg(ql_dbg_io
, vha
, 0x3023,
1450 "iocb(s) %p Returned STATUS.\n", sts24
);
1452 ql_dbg(ql_dbg_io
, vha
, 0x3024,
1453 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1454 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1455 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1456 cmd
->cmnd
[0], (u64
)scsi_get_lba(cmd
), a_ref_tag
, e_ref_tag
,
1457 a_app_tag
, e_app_tag
, a_guard
, e_guard
);
1461 * For type 3: ref & app tag is all 'f's
1462 * For type 0,1,2: app tag is all 'f's
1464 if ((a_app_tag
== 0xffff) &&
1465 ((scsi_get_prot_type(cmd
) != SCSI_PROT_DIF_TYPE3
) ||
1466 (a_ref_tag
== 0xffffffff))) {
1467 uint32_t blocks_done
, resid
;
1468 sector_t lba_s
= scsi_get_lba(cmd
);
1470 /* 2TB boundary case covered automatically with this */
1471 blocks_done
= e_ref_tag
- (uint32_t)lba_s
+ 1;
1473 resid
= scsi_bufflen(cmd
) - (blocks_done
*
1474 cmd
->device
->sector_size
);
1476 scsi_set_resid(cmd
, resid
);
1477 cmd
->result
= DID_OK
<< 16;
1479 /* Update protection tag */
1480 if (scsi_prot_sg_count(cmd
)) {
1481 uint32_t i
, j
= 0, k
= 0, num_ent
;
1482 struct scatterlist
*sg
;
1483 struct sd_dif_tuple
*spt
;
1485 /* Patch the corresponding protection tags */
1486 scsi_for_each_prot_sg(cmd
, sg
,
1487 scsi_prot_sg_count(cmd
), i
) {
1488 num_ent
= sg_dma_len(sg
) / 8;
1489 if (k
+ num_ent
< blocks_done
) {
1493 j
= blocks_done
- k
- 1;
1498 if (k
!= blocks_done
) {
1499 ql_log(ql_log_warn
, vha
, 0x302f,
1500 "unexpected tag values tag:lba=%x:%llx)\n",
1501 e_ref_tag
, (unsigned long long)lba_s
);
1505 spt
= page_address(sg_page(sg
)) + sg
->offset
;
1508 spt
->app_tag
= 0xffff;
1509 if (scsi_get_prot_type(cmd
) == SCSI_PROT_DIF_TYPE3
)
1510 spt
->ref_tag
= 0xffffffff;
1517 if (e_guard
!= a_guard
) {
1518 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
1520 set_driver_byte(cmd
, DRIVER_SENSE
);
1521 set_host_byte(cmd
, DID_ABORT
);
1522 cmd
->result
|= SAM_STAT_CHECK_CONDITION
<< 1;
1527 if (e_ref_tag
!= a_ref_tag
) {
1528 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
1530 set_driver_byte(cmd
, DRIVER_SENSE
);
1531 set_host_byte(cmd
, DID_ABORT
);
1532 cmd
->result
|= SAM_STAT_CHECK_CONDITION
<< 1;
1536 /* check appl tag */
1537 if (e_app_tag
!= a_app_tag
) {
1538 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
1540 set_driver_byte(cmd
, DRIVER_SENSE
);
1541 set_host_byte(cmd
, DID_ABORT
);
1542 cmd
->result
|= SAM_STAT_CHECK_CONDITION
<< 1;
1550 * qla2x00_status_entry() - Process a Status IOCB entry.
1551 * @ha: SCSI driver HA context
1552 * @pkt: Entry pointer
1555 qla2x00_status_entry(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, void *pkt
)
1559 struct scsi_cmnd
*cp
;
1561 struct sts_entry_24xx
*sts24
;
1562 uint16_t comp_status
;
1563 uint16_t scsi_status
;
1565 uint8_t lscsi_status
;
1567 uint32_t sense_len
, par_sense_len
, rsp_info_len
, resid_len
,
1569 uint8_t *rsp_info
, *sense_data
;
1570 struct qla_hw_data
*ha
= vha
->hw
;
1573 struct req_que
*req
;
1577 sts
= (sts_entry_t
*) pkt
;
1578 sts24
= (struct sts_entry_24xx
*) pkt
;
1579 if (IS_FWI2_CAPABLE(ha
)) {
1580 comp_status
= le16_to_cpu(sts24
->comp_status
);
1581 scsi_status
= le16_to_cpu(sts24
->scsi_status
) & SS_MASK
;
1583 comp_status
= le16_to_cpu(sts
->comp_status
);
1584 scsi_status
= le16_to_cpu(sts
->scsi_status
) & SS_MASK
;
1586 handle
= (uint32_t) LSW(sts
->handle
);
1587 que
= MSW(sts
->handle
);
1588 req
= ha
->req_q_map
[que
];
1590 /* Fast path completion. */
1591 if (comp_status
== CS_COMPLETE
&& scsi_status
== 0) {
1592 qla2x00_process_completed_request(vha
, req
, handle
);
1597 /* Validate handle. */
1598 if (handle
< MAX_OUTSTANDING_COMMANDS
) {
1599 sp
= req
->outstanding_cmds
[handle
];
1600 req
->outstanding_cmds
[handle
] = NULL
;
1605 ql_dbg(ql_dbg_io
, vha
, 0x3017,
1606 "Invalid status handle (0x%x).\n", sts
->handle
);
1609 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
1611 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1612 qla2xxx_wake_dpc(vha
);
1615 cp
= GET_CMD_SP(sp
);
1617 ql_dbg(ql_dbg_io
, vha
, 0x3018,
1618 "Command already returned (0x%x/%p).\n",
1624 lscsi_status
= scsi_status
& STATUS_MASK
;
1626 fcport
= sp
->fcport
;
1629 sense_len
= par_sense_len
= rsp_info_len
= resid_len
=
1631 if (IS_FWI2_CAPABLE(ha
)) {
1632 if (scsi_status
& SS_SENSE_LEN_VALID
)
1633 sense_len
= le32_to_cpu(sts24
->sense_len
);
1634 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
)
1635 rsp_info_len
= le32_to_cpu(sts24
->rsp_data_len
);
1636 if (scsi_status
& (SS_RESIDUAL_UNDER
| SS_RESIDUAL_OVER
))
1637 resid_len
= le32_to_cpu(sts24
->rsp_residual_count
);
1638 if (comp_status
== CS_DATA_UNDERRUN
)
1639 fw_resid_len
= le32_to_cpu(sts24
->residual_len
);
1640 rsp_info
= sts24
->data
;
1641 sense_data
= sts24
->data
;
1642 host_to_fcp_swap(sts24
->data
, sizeof(sts24
->data
));
1643 ox_id
= le16_to_cpu(sts24
->ox_id
);
1644 par_sense_len
= sizeof(sts24
->data
);
1646 if (scsi_status
& SS_SENSE_LEN_VALID
)
1647 sense_len
= le16_to_cpu(sts
->req_sense_length
);
1648 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
)
1649 rsp_info_len
= le16_to_cpu(sts
->rsp_info_len
);
1650 resid_len
= le32_to_cpu(sts
->residual_length
);
1651 rsp_info
= sts
->rsp_info
;
1652 sense_data
= sts
->req_sense_data
;
1653 par_sense_len
= sizeof(sts
->req_sense_data
);
1656 /* Check for any FCP transport errors. */
1657 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
) {
1658 /* Sense data lies beyond any FCP RESPONSE data. */
1659 if (IS_FWI2_CAPABLE(ha
)) {
1660 sense_data
+= rsp_info_len
;
1661 par_sense_len
-= rsp_info_len
;
1663 if (rsp_info_len
> 3 && rsp_info
[3]) {
1664 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3019,
1665 "FCP I/O protocol failure (0x%x/0x%x).\n",
1666 rsp_info_len
, rsp_info
[3]);
1668 res
= DID_BUS_BUSY
<< 16;
1673 /* Check for overrun. */
1674 if (IS_FWI2_CAPABLE(ha
) && comp_status
== CS_COMPLETE
&&
1675 scsi_status
& SS_RESIDUAL_OVER
)
1676 comp_status
= CS_DATA_OVERRUN
;
1679 * Based on Host and scsi status generate status code for Linux
1681 switch (comp_status
) {
1684 if (scsi_status
== 0) {
1688 if (scsi_status
& (SS_RESIDUAL_UNDER
| SS_RESIDUAL_OVER
)) {
1690 scsi_set_resid(cp
, resid
);
1692 if (!lscsi_status
&&
1693 ((unsigned)(scsi_bufflen(cp
) - resid
) <
1695 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x301a,
1696 "Mid-layer underflow "
1697 "detected (0x%x of 0x%x bytes).\n",
1698 resid
, scsi_bufflen(cp
));
1700 res
= DID_ERROR
<< 16;
1704 res
= DID_OK
<< 16 | lscsi_status
;
1706 if (lscsi_status
== SAM_STAT_TASK_SET_FULL
) {
1707 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x301b,
1708 "QUEUE FULL detected.\n");
1712 if (lscsi_status
!= SS_CHECK_CONDITION
)
1715 memset(cp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1716 if (!(scsi_status
& SS_SENSE_LEN_VALID
))
1719 qla2x00_handle_sense(sp
, sense_data
, par_sense_len
, sense_len
,
1723 case CS_DATA_UNDERRUN
:
1724 /* Use F/W calculated residual length. */
1725 resid
= IS_FWI2_CAPABLE(ha
) ? fw_resid_len
: resid_len
;
1726 scsi_set_resid(cp
, resid
);
1727 if (scsi_status
& SS_RESIDUAL_UNDER
) {
1728 if (IS_FWI2_CAPABLE(ha
) && fw_resid_len
!= resid_len
) {
1729 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x301d,
1730 "Dropped frame(s) detected "
1731 "(0x%x of 0x%x bytes).\n",
1732 resid
, scsi_bufflen(cp
));
1734 res
= DID_ERROR
<< 16 | lscsi_status
;
1735 goto check_scsi_status
;
1738 if (!lscsi_status
&&
1739 ((unsigned)(scsi_bufflen(cp
) - resid
) <
1741 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x301e,
1742 "Mid-layer underflow "
1743 "detected (0x%x of 0x%x bytes).\n",
1744 resid
, scsi_bufflen(cp
));
1746 res
= DID_ERROR
<< 16;
1749 } else if (lscsi_status
!= SAM_STAT_TASK_SET_FULL
&&
1750 lscsi_status
!= SAM_STAT_BUSY
) {
1752 * scsi status of task set and busy are considered to be
1753 * task not completed.
1756 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x301f,
1757 "Dropped frame(s) detected (0x%x "
1758 "of 0x%x bytes).\n", resid
,
1761 res
= DID_ERROR
<< 16 | lscsi_status
;
1762 goto check_scsi_status
;
1764 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3030,
1765 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
1766 scsi_status
, lscsi_status
);
1769 res
= DID_OK
<< 16 | lscsi_status
;
1774 * Check to see if SCSI Status is non zero. If so report SCSI
1777 if (lscsi_status
!= 0) {
1778 if (lscsi_status
== SAM_STAT_TASK_SET_FULL
) {
1779 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3020,
1780 "QUEUE FULL detected.\n");
1784 if (lscsi_status
!= SS_CHECK_CONDITION
)
1787 memset(cp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
1788 if (!(scsi_status
& SS_SENSE_LEN_VALID
))
1791 qla2x00_handle_sense(sp
, sense_data
, par_sense_len
,
1792 sense_len
, rsp
, res
);
1796 case CS_PORT_LOGGED_OUT
:
1797 case CS_PORT_CONFIG_CHG
:
1800 case CS_PORT_UNAVAILABLE
:
1805 * We are going to have the fc class block the rport
1806 * while we try to recover so instruct the mid layer
1807 * to requeue until the class decides how to handle this.
1809 res
= DID_TRANSPORT_DISRUPTED
<< 16;
1811 if (comp_status
== CS_TIMEOUT
) {
1812 if (IS_FWI2_CAPABLE(ha
))
1814 else if ((le16_to_cpu(sts
->status_flags
) &
1815 SF_LOGOUT_SENT
) == 0)
1819 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3021,
1820 "Port down status: port-state=0x%x.\n",
1821 atomic_read(&fcport
->state
));
1823 if (atomic_read(&fcport
->state
) == FCS_ONLINE
)
1824 qla2x00_mark_device_lost(fcport
->vha
, fcport
, 1, 1);
1828 res
= DID_RESET
<< 16;
1832 logit
= qla2x00_handle_dif_error(sp
, sts24
);
1835 res
= DID_ERROR
<< 16;
1841 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3022,
1842 "FCP command status: 0x%x-0x%x (0x%x) "
1843 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
1844 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
1845 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1846 comp_status
, scsi_status
, res
, vha
->host_no
,
1847 cp
->device
->id
, cp
->device
->lun
, fcport
->d_id
.b
.domain
,
1848 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
, ox_id
,
1849 cp
->cmnd
[0], cp
->cmnd
[1], cp
->cmnd
[2], cp
->cmnd
[3],
1850 cp
->cmnd
[4], cp
->cmnd
[5], cp
->cmnd
[6], cp
->cmnd
[7],
1851 cp
->cmnd
[8], cp
->cmnd
[9], scsi_bufflen(cp
), rsp_info_len
,
1852 resid_len
, fw_resid_len
);
1854 if (rsp
->status_srb
== NULL
)
1855 sp
->done(ha
, sp
, res
);
1859 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1860 * @ha: SCSI driver HA context
1861 * @pkt: Entry pointer
1863 * Extended sense data.
1866 qla2x00_status_cont_entry(struct rsp_que
*rsp
, sts_cont_entry_t
*pkt
)
1868 uint8_t sense_sz
= 0;
1869 struct qla_hw_data
*ha
= rsp
->hw
;
1870 struct scsi_qla_host
*vha
= pci_get_drvdata(ha
->pdev
);
1871 srb_t
*sp
= rsp
->status_srb
;
1872 struct scsi_cmnd
*cp
;
1876 if (!sp
|| !GET_CMD_SENSE_LEN(sp
))
1879 sense_len
= GET_CMD_SENSE_LEN(sp
);
1880 sense_ptr
= GET_CMD_SENSE_PTR(sp
);
1882 cp
= GET_CMD_SP(sp
);
1884 ql_log(ql_log_warn
, vha
, 0x3025,
1885 "cmd is NULL: already returned to OS (sp=%p).\n", sp
);
1887 rsp
->status_srb
= NULL
;
1891 if (sense_len
> sizeof(pkt
->data
))
1892 sense_sz
= sizeof(pkt
->data
);
1894 sense_sz
= sense_len
;
1896 /* Move sense data. */
1897 if (IS_FWI2_CAPABLE(ha
))
1898 host_to_fcp_swap(pkt
->data
, sizeof(pkt
->data
));
1899 memcpy(sense_ptr
, pkt
->data
, sense_sz
);
1900 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x302c,
1901 sense_ptr
, sense_sz
);
1903 sense_len
-= sense_sz
;
1904 sense_ptr
+= sense_sz
;
1906 SET_CMD_SENSE_PTR(sp
, sense_ptr
);
1907 SET_CMD_SENSE_LEN(sp
, sense_len
);
1909 /* Place command on done queue. */
1910 if (sense_len
== 0) {
1911 rsp
->status_srb
= NULL
;
1912 sp
->done(ha
, sp
, cp
->result
);
1917 * qla2x00_error_entry() - Process an error entry.
1918 * @ha: SCSI driver HA context
1919 * @pkt: Entry pointer
1922 qla2x00_error_entry(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, sts_entry_t
*pkt
)
1925 struct qla_hw_data
*ha
= vha
->hw
;
1926 const char func
[] = "ERROR-IOCB";
1927 uint16_t que
= MSW(pkt
->handle
);
1928 struct req_que
*req
= NULL
;
1929 int res
= DID_ERROR
<< 16;
1931 ql_dbg(ql_dbg_async
, vha
, 0x502a,
1932 "type of error status in response: 0x%x\n", pkt
->entry_status
);
1934 if (que
>= ha
->max_req_queues
|| !ha
->req_q_map
[que
])
1937 req
= ha
->req_q_map
[que
];
1939 if (pkt
->entry_status
& RF_BUSY
)
1940 res
= DID_BUS_BUSY
<< 16;
1942 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, pkt
);
1944 sp
->done(ha
, sp
, res
);
1948 ql_log(ql_log_warn
, vha
, 0x5030,
1949 "Error entry - invalid handle/queue.\n");
1952 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
1954 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1955 qla2xxx_wake_dpc(vha
);
1959 * qla24xx_mbx_completion() - Process mailbox command completions.
1960 * @ha: SCSI driver HA context
1961 * @mb0: Mailbox0 register
1964 qla24xx_mbx_completion(scsi_qla_host_t
*vha
, uint16_t mb0
)
1968 uint16_t __iomem
*wptr
;
1969 struct qla_hw_data
*ha
= vha
->hw
;
1970 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
1972 /* Read all mbox registers? */
1973 mboxes
= (1 << ha
->mbx_count
) - 1;
1975 ql_dbg(ql_dbg_async
, vha
, 0x504e, "MBX pointer ERRROR.\n");
1977 mboxes
= ha
->mcp
->in_mb
;
1979 /* Load return mailbox registers. */
1980 ha
->flags
.mbox_int
= 1;
1981 ha
->mailbox_out
[0] = mb0
;
1983 wptr
= (uint16_t __iomem
*)®
->mailbox1
;
1985 for (cnt
= 1; cnt
< ha
->mbx_count
; cnt
++) {
1987 ha
->mailbox_out
[cnt
] = RD_REG_WORD(wptr
);
1995 * qla24xx_process_response_queue() - Process response queue entries.
1996 * @ha: SCSI driver HA context
1998 void qla24xx_process_response_queue(struct scsi_qla_host
*vha
,
1999 struct rsp_que
*rsp
)
2001 struct sts_entry_24xx
*pkt
;
2002 struct qla_hw_data
*ha
= vha
->hw
;
2004 if (!vha
->flags
.online
)
2007 while (rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
) {
2008 pkt
= (struct sts_entry_24xx
*)rsp
->ring_ptr
;
2011 if (rsp
->ring_index
== rsp
->length
) {
2012 rsp
->ring_index
= 0;
2013 rsp
->ring_ptr
= rsp
->ring
;
2018 if (pkt
->entry_status
!= 0) {
2019 qla2x00_error_entry(vha
, rsp
, (sts_entry_t
*) pkt
);
2021 (void)qlt_24xx_process_response_error(vha
, pkt
);
2023 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
2028 switch (pkt
->entry_type
) {
2030 qla2x00_status_entry(vha
, rsp
, pkt
);
2032 case STATUS_CONT_TYPE
:
2033 qla2x00_status_cont_entry(rsp
, (sts_cont_entry_t
*)pkt
);
2035 case VP_RPT_ID_IOCB_TYPE
:
2036 qla24xx_report_id_acquisition(vha
,
2037 (struct vp_rpt_id_entry_24xx
*)pkt
);
2039 case LOGINOUT_PORT_IOCB_TYPE
:
2040 qla24xx_logio_entry(vha
, rsp
->req
,
2041 (struct logio_entry_24xx
*)pkt
);
2043 case TSK_MGMT_IOCB_TYPE
:
2044 qla24xx_tm_iocb_entry(vha
, rsp
->req
,
2045 (struct tsk_mgmt_entry
*)pkt
);
2048 qla24xx_els_ct_entry(vha
, rsp
->req
, pkt
, CT_IOCB_TYPE
);
2051 qla24xx_els_ct_entry(vha
, rsp
->req
, pkt
, ELS_IOCB_TYPE
);
2053 case ABTS_RECV_24XX
:
2054 /* ensure that the ATIO queue is empty */
2055 qlt_24xx_process_atio_queue(vha
);
2056 case ABTS_RESP_24XX
:
2058 case NOTIFY_ACK_TYPE
:
2059 qlt_response_pkt_all_vps(vha
, (response_t
*)pkt
);
2062 /* Do nothing in this case, this check is to prevent it
2063 * from falling into default case
2067 /* Type Not Supported. */
2068 ql_dbg(ql_dbg_async
, vha
, 0x5042,
2069 "Received unknown response pkt type %x "
2070 "entry status=%x.\n",
2071 pkt
->entry_type
, pkt
->entry_status
);
2074 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
2078 /* Adjust ring index */
2079 if (IS_QLA82XX(ha
)) {
2080 struct device_reg_82xx __iomem
*reg
= &ha
->iobase
->isp82
;
2081 WRT_REG_DWORD(®
->rsp_q_out
[0], rsp
->ring_index
);
2083 WRT_REG_DWORD(rsp
->rsp_q_out
, rsp
->ring_index
);
2087 qla2xxx_check_risc_status(scsi_qla_host_t
*vha
)
2091 struct qla_hw_data
*ha
= vha
->hw
;
2092 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
2094 if (!IS_QLA25XX(ha
) && !IS_QLA81XX(ha
) && !IS_QLA83XX(ha
))
2098 WRT_REG_DWORD(®
->iobase_addr
, 0x7C00);
2099 RD_REG_DWORD(®
->iobase_addr
);
2100 WRT_REG_DWORD(®
->iobase_window
, 0x0001);
2101 for (cnt
= 10000; (RD_REG_DWORD(®
->iobase_window
) & BIT_0
) == 0 &&
2102 rval
== QLA_SUCCESS
; cnt
--) {
2104 WRT_REG_DWORD(®
->iobase_window
, 0x0001);
2107 rval
= QLA_FUNCTION_TIMEOUT
;
2109 if (rval
== QLA_SUCCESS
)
2112 WRT_REG_DWORD(®
->iobase_window
, 0x0003);
2113 for (cnt
= 100; (RD_REG_DWORD(®
->iobase_window
) & BIT_0
) == 0 &&
2114 rval
== QLA_SUCCESS
; cnt
--) {
2116 WRT_REG_DWORD(®
->iobase_window
, 0x0003);
2119 rval
= QLA_FUNCTION_TIMEOUT
;
2121 if (rval
!= QLA_SUCCESS
)
2125 if (RD_REG_DWORD(®
->iobase_c8
) & BIT_3
)
2126 ql_log(ql_log_info
, vha
, 0x504c,
2127 "Additional code -- 0x55AA.\n");
2130 WRT_REG_DWORD(®
->iobase_window
, 0x0000);
2131 RD_REG_DWORD(®
->iobase_window
);
2135 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2137 * @dev_id: SCSI driver HA context
2139 * Called by system whenever the host adapter generates an interrupt.
2141 * Returns handled flag.
2144 qla24xx_intr_handler(int irq
, void *dev_id
)
2146 scsi_qla_host_t
*vha
;
2147 struct qla_hw_data
*ha
;
2148 struct device_reg_24xx __iomem
*reg
;
2154 struct rsp_que
*rsp
;
2155 unsigned long flags
;
2157 rsp
= (struct rsp_que
*) dev_id
;
2159 ql_log(ql_log_info
, NULL
, 0x5059,
2160 "%s: NULL response queue pointer.\n", __func__
);
2165 reg
= &ha
->iobase
->isp24
;
2168 if (unlikely(pci_channel_offline(ha
->pdev
)))
2171 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2172 vha
= pci_get_drvdata(ha
->pdev
);
2173 for (iter
= 50; iter
--; ) {
2174 stat
= RD_REG_DWORD(®
->host_status
);
2175 if (stat
& HSRX_RISC_PAUSED
) {
2176 if (unlikely(pci_channel_offline(ha
->pdev
)))
2179 hccr
= RD_REG_DWORD(®
->hccr
);
2181 ql_log(ql_log_warn
, vha
, 0x504b,
2182 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2185 qla2xxx_check_risc_status(vha
);
2187 ha
->isp_ops
->fw_dump(vha
, 1);
2188 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2190 } else if ((stat
& HSRX_RISC_INT
) == 0)
2193 switch (stat
& 0xff) {
2198 qla24xx_mbx_completion(vha
, MSW(stat
));
2199 status
|= MBX_INTERRUPT
;
2204 mb
[1] = RD_REG_WORD(®
->mailbox1
);
2205 mb
[2] = RD_REG_WORD(®
->mailbox2
);
2206 mb
[3] = RD_REG_WORD(®
->mailbox3
);
2207 qla2x00_async_event(vha
, rsp
, mb
);
2211 qla24xx_process_response_queue(vha
, rsp
);
2213 case 0x1C: /* ATIO queue updated */
2214 qlt_24xx_process_atio_queue(vha
);
2216 case 0x1D: /* ATIO and response queues updated */
2217 qlt_24xx_process_atio_queue(vha
);
2218 qla24xx_process_response_queue(vha
, rsp
);
2221 ql_dbg(ql_dbg_async
, vha
, 0x504f,
2222 "Unrecognized interrupt type (%d).\n", stat
* 0xff);
2225 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2226 RD_REG_DWORD_RELAXED(®
->hccr
);
2228 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2230 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
2231 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
2232 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
2233 complete(&ha
->mbx_intr_comp
);
2240 qla24xx_msix_rsp_q(int irq
, void *dev_id
)
2242 struct qla_hw_data
*ha
;
2243 struct rsp_que
*rsp
;
2244 struct device_reg_24xx __iomem
*reg
;
2245 struct scsi_qla_host
*vha
;
2246 unsigned long flags
;
2248 rsp
= (struct rsp_que
*) dev_id
;
2250 ql_log(ql_log_info
, NULL
, 0x505a,
2251 "%s: NULL response queue pointer.\n", __func__
);
2255 reg
= &ha
->iobase
->isp24
;
2257 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2259 vha
= pci_get_drvdata(ha
->pdev
);
2260 qla24xx_process_response_queue(vha
, rsp
);
2261 if (!ha
->flags
.disable_msix_handshake
) {
2262 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2263 RD_REG_DWORD_RELAXED(®
->hccr
);
2265 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2271 qla25xx_msix_rsp_q(int irq
, void *dev_id
)
2273 struct qla_hw_data
*ha
;
2274 struct rsp_que
*rsp
;
2275 struct device_reg_24xx __iomem
*reg
;
2276 unsigned long flags
;
2278 rsp
= (struct rsp_que
*) dev_id
;
2280 ql_log(ql_log_info
, NULL
, 0x505b,
2281 "%s: NULL response queue pointer.\n", __func__
);
2286 /* Clear the interrupt, if enabled, for this response queue */
2287 if (!ha
->flags
.disable_msix_handshake
) {
2288 reg
= &ha
->iobase
->isp24
;
2289 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2290 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2291 RD_REG_DWORD_RELAXED(®
->hccr
);
2292 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2294 queue_work_on((int) (rsp
->id
- 1), ha
->wq
, &rsp
->q_work
);
2300 qla24xx_msix_default(int irq
, void *dev_id
)
2302 scsi_qla_host_t
*vha
;
2303 struct qla_hw_data
*ha
;
2304 struct rsp_que
*rsp
;
2305 struct device_reg_24xx __iomem
*reg
;
2310 unsigned long flags
;
2312 rsp
= (struct rsp_que
*) dev_id
;
2314 ql_log(ql_log_info
, NULL
, 0x505c,
2315 "%s: NULL response queue pointer.\n", __func__
);
2319 reg
= &ha
->iobase
->isp24
;
2322 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2323 vha
= pci_get_drvdata(ha
->pdev
);
2325 stat
= RD_REG_DWORD(®
->host_status
);
2326 if (stat
& HSRX_RISC_PAUSED
) {
2327 if (unlikely(pci_channel_offline(ha
->pdev
)))
2330 hccr
= RD_REG_DWORD(®
->hccr
);
2332 ql_log(ql_log_info
, vha
, 0x5050,
2333 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2336 qla2xxx_check_risc_status(vha
);
2338 ha
->isp_ops
->fw_dump(vha
, 1);
2339 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2341 } else if ((stat
& HSRX_RISC_INT
) == 0)
2344 switch (stat
& 0xff) {
2349 qla24xx_mbx_completion(vha
, MSW(stat
));
2350 status
|= MBX_INTERRUPT
;
2355 mb
[1] = RD_REG_WORD(®
->mailbox1
);
2356 mb
[2] = RD_REG_WORD(®
->mailbox2
);
2357 mb
[3] = RD_REG_WORD(®
->mailbox3
);
2358 qla2x00_async_event(vha
, rsp
, mb
);
2362 qla24xx_process_response_queue(vha
, rsp
);
2364 case 0x1C: /* ATIO queue updated */
2365 qlt_24xx_process_atio_queue(vha
);
2367 case 0x1D: /* ATIO and response queues updated */
2368 qlt_24xx_process_atio_queue(vha
);
2369 qla24xx_process_response_queue(vha
, rsp
);
2372 ql_dbg(ql_dbg_async
, vha
, 0x5051,
2373 "Unrecognized interrupt type (%d).\n", stat
& 0xff);
2376 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2378 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2380 if (test_bit(MBX_INTR_WAIT
, &ha
->mbx_cmd_flags
) &&
2381 (status
& MBX_INTERRUPT
) && ha
->flags
.mbox_int
) {
2382 set_bit(MBX_INTERRUPT
, &ha
->mbx_cmd_flags
);
2383 complete(&ha
->mbx_intr_comp
);
2388 /* Interrupt handling helpers. */
2390 struct qla_init_msix_entry
{
2392 irq_handler_t handler
;
2395 static struct qla_init_msix_entry msix_entries
[3] = {
2396 { "qla2xxx (default)", qla24xx_msix_default
},
2397 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q
},
2398 { "qla2xxx (multiq)", qla25xx_msix_rsp_q
},
2401 static struct qla_init_msix_entry qla82xx_msix_entries
[2] = {
2402 { "qla2xxx (default)", qla82xx_msix_default
},
2403 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q
},
2407 qla24xx_disable_msix(struct qla_hw_data
*ha
)
2410 struct qla_msix_entry
*qentry
;
2411 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
2413 for (i
= 0; i
< ha
->msix_count
; i
++) {
2414 qentry
= &ha
->msix_entries
[i
];
2415 if (qentry
->have_irq
)
2416 free_irq(qentry
->vector
, qentry
->rsp
);
2418 pci_disable_msix(ha
->pdev
);
2419 kfree(ha
->msix_entries
);
2420 ha
->msix_entries
= NULL
;
2421 ha
->flags
.msix_enabled
= 0;
2422 ql_dbg(ql_dbg_init
, vha
, 0x0042,
2423 "Disabled the MSI.\n");
2427 qla24xx_enable_msix(struct qla_hw_data
*ha
, struct rsp_que
*rsp
)
2429 #define MIN_MSIX_COUNT 2
2431 struct msix_entry
*entries
;
2432 struct qla_msix_entry
*qentry
;
2433 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
2435 entries
= kzalloc(sizeof(struct msix_entry
) * ha
->msix_count
,
2438 ql_log(ql_log_warn
, vha
, 0x00bc,
2439 "Failed to allocate memory for msix_entry.\n");
2443 for (i
= 0; i
< ha
->msix_count
; i
++)
2444 entries
[i
].entry
= i
;
2446 ret
= pci_enable_msix(ha
->pdev
, entries
, ha
->msix_count
);
2448 if (ret
< MIN_MSIX_COUNT
)
2451 ql_log(ql_log_warn
, vha
, 0x00c6,
2452 "MSI-X: Failed to enable support "
2453 "-- %d/%d\n Retry with %d vectors.\n",
2454 ha
->msix_count
, ret
, ret
);
2455 ha
->msix_count
= ret
;
2456 ret
= pci_enable_msix(ha
->pdev
, entries
, ha
->msix_count
);
2459 ql_log(ql_log_fatal
, vha
, 0x00c7,
2460 "MSI-X: Failed to enable support, "
2461 "giving up -- %d/%d.\n",
2462 ha
->msix_count
, ret
);
2465 ha
->max_rsp_queues
= ha
->msix_count
- 1;
2467 ha
->msix_entries
= kzalloc(sizeof(struct qla_msix_entry
) *
2468 ha
->msix_count
, GFP_KERNEL
);
2469 if (!ha
->msix_entries
) {
2470 ql_log(ql_log_fatal
, vha
, 0x00c8,
2471 "Failed to allocate memory for ha->msix_entries.\n");
2475 ha
->flags
.msix_enabled
= 1;
2477 for (i
= 0; i
< ha
->msix_count
; i
++) {
2478 qentry
= &ha
->msix_entries
[i
];
2479 qentry
->vector
= entries
[i
].vector
;
2480 qentry
->entry
= entries
[i
].entry
;
2481 qentry
->have_irq
= 0;
2485 /* Enable MSI-X vectors for the base queue */
2486 for (i
= 0; i
< 2; i
++) {
2487 qentry
= &ha
->msix_entries
[i
];
2488 if (IS_QLA82XX(ha
)) {
2489 ret
= request_irq(qentry
->vector
,
2490 qla82xx_msix_entries
[i
].handler
,
2491 0, qla82xx_msix_entries
[i
].name
, rsp
);
2493 ret
= request_irq(qentry
->vector
,
2494 msix_entries
[i
].handler
,
2495 0, msix_entries
[i
].name
, rsp
);
2498 ql_log(ql_log_fatal
, vha
, 0x00cb,
2499 "MSI-X: unable to register handler -- %x/%d.\n",
2500 qentry
->vector
, ret
);
2501 qla24xx_disable_msix(ha
);
2505 qentry
->have_irq
= 1;
2510 /* Enable MSI-X vector for response queue update for queue 0 */
2511 if (IS_QLA83XX(ha
)) {
2512 if (ha
->msixbase
&& ha
->mqiobase
&&
2513 (ha
->max_rsp_queues
> 1 || ha
->max_req_queues
> 1))
2517 && (ha
->max_rsp_queues
> 1 || ha
->max_req_queues
> 1))
2519 ql_dbg(ql_dbg_multiq
, vha
, 0xc005,
2520 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2521 ha
->mqiobase
, ha
->max_rsp_queues
, ha
->max_req_queues
);
2522 ql_dbg(ql_dbg_init
, vha
, 0x0055,
2523 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2524 ha
->mqiobase
, ha
->max_rsp_queues
, ha
->max_req_queues
);
2532 qla2x00_request_irqs(struct qla_hw_data
*ha
, struct rsp_que
*rsp
)
2535 device_reg_t __iomem
*reg
= ha
->iobase
;
2536 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
2538 /* If possible, enable MSI-X. */
2539 if (!IS_QLA2432(ha
) && !IS_QLA2532(ha
) && !IS_QLA8432(ha
) &&
2540 !IS_CNA_CAPABLE(ha
) && !IS_QLA2031(ha
))
2543 if (ha
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_HP
&&
2544 (ha
->pdev
->subsystem_device
== 0x7040 ||
2545 ha
->pdev
->subsystem_device
== 0x7041 ||
2546 ha
->pdev
->subsystem_device
== 0x1705)) {
2547 ql_log(ql_log_warn
, vha
, 0x0034,
2548 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2549 ha
->pdev
->subsystem_vendor
,
2550 ha
->pdev
->subsystem_device
);
2554 if (IS_QLA2432(ha
) && (ha
->pdev
->revision
< QLA_MSIX_CHIP_REV_24XX
)) {
2555 ql_log(ql_log_warn
, vha
, 0x0035,
2556 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2557 ha
->pdev
->revision
, QLA_MSIX_CHIP_REV_24XX
);
2561 ret
= qla24xx_enable_msix(ha
, rsp
);
2563 ql_dbg(ql_dbg_init
, vha
, 0x0036,
2564 "MSI-X: Enabled (0x%X, 0x%X).\n",
2565 ha
->chip_revision
, ha
->fw_attributes
);
2566 goto clear_risc_ints
;
2568 ql_log(ql_log_info
, vha
, 0x0037,
2569 "MSI-X Falling back-to MSI mode -%d.\n", ret
);
2572 if (!IS_QLA24XX(ha
) && !IS_QLA2532(ha
) && !IS_QLA8432(ha
) &&
2576 ret
= pci_enable_msi(ha
->pdev
);
2578 ql_dbg(ql_dbg_init
, vha
, 0x0038,
2580 ha
->flags
.msi_enabled
= 1;
2582 ql_log(ql_log_warn
, vha
, 0x0039,
2583 "MSI-X; Falling back-to INTa mode -- %d.\n", ret
);
2586 ret
= request_irq(ha
->pdev
->irq
, ha
->isp_ops
->intr_handler
,
2587 ha
->flags
.msi_enabled
? 0 : IRQF_SHARED
,
2588 QLA2XXX_DRIVER_NAME
, rsp
);
2590 ql_log(ql_log_warn
, vha
, 0x003a,
2591 "Failed to reserve interrupt %d already in use.\n",
2599 * FIXME: Noted that 8014s were being dropped during NK testing.
2600 * Timing deltas during MSI-X/INTa transitions?
2602 if (IS_QLA81XX(ha
) || IS_QLA82XX(ha
) || IS_QLA83XX(ha
))
2604 spin_lock_irq(&ha
->hardware_lock
);
2605 if (IS_FWI2_CAPABLE(ha
)) {
2606 WRT_REG_DWORD(®
->isp24
.hccr
, HCCRX_CLR_HOST_INT
);
2607 WRT_REG_DWORD(®
->isp24
.hccr
, HCCRX_CLR_RISC_INT
);
2609 WRT_REG_WORD(®
->isp
.semaphore
, 0);
2610 WRT_REG_WORD(®
->isp
.hccr
, HCCR_CLR_RISC_INT
);
2611 WRT_REG_WORD(®
->isp
.hccr
, HCCR_CLR_HOST_INT
);
2613 spin_unlock_irq(&ha
->hardware_lock
);
2620 qla2x00_free_irqs(scsi_qla_host_t
*vha
)
2622 struct qla_hw_data
*ha
= vha
->hw
;
2623 struct rsp_que
*rsp
;
2626 * We need to check that ha->rsp_q_map is valid in case we are called
2627 * from a probe failure context.
2629 if (!ha
->rsp_q_map
|| !ha
->rsp_q_map
[0])
2631 rsp
= ha
->rsp_q_map
[0];
2633 if (ha
->flags
.msix_enabled
)
2634 qla24xx_disable_msix(ha
);
2635 else if (ha
->flags
.msi_enabled
) {
2636 free_irq(ha
->pdev
->irq
, rsp
);
2637 pci_disable_msi(ha
->pdev
);
2639 free_irq(ha
->pdev
->irq
, rsp
);
2643 int qla25xx_request_irq(struct rsp_que
*rsp
)
2645 struct qla_hw_data
*ha
= rsp
->hw
;
2646 struct qla_init_msix_entry
*intr
= &msix_entries
[2];
2647 struct qla_msix_entry
*msix
= rsp
->msix
;
2648 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
2651 ret
= request_irq(msix
->vector
, intr
->handler
, 0, intr
->name
, rsp
);
2653 ql_log(ql_log_fatal
, vha
, 0x00e6,
2654 "MSI-X: Unable to register handler -- %x/%d.\n",