2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2013 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
8 #include "qla_target.h"
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <scsi/scsi_tcq.h>
13 #include <scsi/scsi_bsg_fc.h>
14 #include <scsi/scsi_eh.h>
16 static void qla2x00_mbx_completion(scsi_qla_host_t
*, uint16_t);
17 static void qla2x00_status_entry(scsi_qla_host_t
*, struct rsp_que
*, void *);
18 static void qla2x00_status_cont_entry(struct rsp_que
*, sts_cont_entry_t
*);
19 static void qla2x00_error_entry(scsi_qla_host_t
*, struct rsp_que
*,
23 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
25 * @dev_id: SCSI driver HA context
27 * Called by system whenever the host adapter generates an interrupt.
29 * Returns handled flag.
32 qla2100_intr_handler(int irq
, void *dev_id
)
35 struct qla_hw_data
*ha
;
36 struct device_reg_2xxx __iomem
*reg
;
44 rsp
= (struct rsp_que
*) dev_id
;
46 ql_log(ql_log_info
, NULL
, 0x505d,
47 "%s: NULL response queue pointer.\n", __func__
);
52 reg
= &ha
->iobase
->isp
;
55 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
56 vha
= pci_get_drvdata(ha
->pdev
);
57 for (iter
= 50; iter
--; ) {
58 hccr
= RD_REG_WORD(®
->hccr
);
59 if (hccr
& HCCR_RISC_PAUSE
) {
60 if (pci_channel_offline(ha
->pdev
))
64 * Issue a "HARD" reset in order for the RISC interrupt
65 * bit to be cleared. Schedule a big hammer to get
66 * out of the RISC PAUSED state.
68 WRT_REG_WORD(®
->hccr
, HCCR_RESET_RISC
);
69 RD_REG_WORD(®
->hccr
);
71 ha
->isp_ops
->fw_dump(vha
, 1);
72 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
74 } else if ((RD_REG_WORD(®
->istatus
) & ISR_RISC_INT
) == 0)
77 if (RD_REG_WORD(®
->semaphore
) & BIT_0
) {
78 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
79 RD_REG_WORD(®
->hccr
);
81 /* Get mailbox data. */
82 mb
[0] = RD_MAILBOX_REG(ha
, reg
, 0);
83 if (mb
[0] > 0x3fff && mb
[0] < 0x8000) {
84 qla2x00_mbx_completion(vha
, mb
[0]);
85 status
|= MBX_INTERRUPT
;
86 } else if (mb
[0] > 0x7fff && mb
[0] < 0xc000) {
87 mb
[1] = RD_MAILBOX_REG(ha
, reg
, 1);
88 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
89 mb
[3] = RD_MAILBOX_REG(ha
, reg
, 3);
90 qla2x00_async_event(vha
, rsp
, mb
);
93 ql_dbg(ql_dbg_async
, vha
, 0x5025,
94 "Unrecognized interrupt type (%d).\n",
97 /* Release mailbox registers. */
98 WRT_REG_WORD(®
->semaphore
, 0);
99 RD_REG_WORD(®
->semaphore
);
101 qla2x00_process_response_queue(rsp
);
103 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
104 RD_REG_WORD(®
->hccr
);
107 qla2x00_handle_mbx_completion(ha
, status
);
108 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
110 return (IRQ_HANDLED
);
114 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
116 * @dev_id: SCSI driver HA context
118 * Called by system whenever the host adapter generates an interrupt.
120 * Returns handled flag.
123 qla2300_intr_handler(int irq
, void *dev_id
)
125 scsi_qla_host_t
*vha
;
126 struct device_reg_2xxx __iomem
*reg
;
133 struct qla_hw_data
*ha
;
136 rsp
= (struct rsp_que
*) dev_id
;
138 ql_log(ql_log_info
, NULL
, 0x5058,
139 "%s: NULL response queue pointer.\n", __func__
);
144 reg
= &ha
->iobase
->isp
;
147 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
148 vha
= pci_get_drvdata(ha
->pdev
);
149 for (iter
= 50; iter
--; ) {
150 stat
= RD_REG_DWORD(®
->u
.isp2300
.host_status
);
151 if (stat
& HSR_RISC_PAUSED
) {
152 if (unlikely(pci_channel_offline(ha
->pdev
)))
155 hccr
= RD_REG_WORD(®
->hccr
);
156 if (hccr
& (BIT_15
| BIT_13
| BIT_11
| BIT_8
))
157 ql_log(ql_log_warn
, vha
, 0x5026,
158 "Parity error -- HCCR=%x, Dumping "
159 "firmware.\n", hccr
);
161 ql_log(ql_log_warn
, vha
, 0x5027,
162 "RISC paused -- HCCR=%x, Dumping "
163 "firmware.\n", hccr
);
166 * Issue a "HARD" reset in order for the RISC
167 * interrupt bit to be cleared. Schedule a big
168 * hammer to get out of the RISC PAUSED state.
170 WRT_REG_WORD(®
->hccr
, HCCR_RESET_RISC
);
171 RD_REG_WORD(®
->hccr
);
173 ha
->isp_ops
->fw_dump(vha
, 1);
174 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
176 } else if ((stat
& HSR_RISC_INT
) == 0)
179 switch (stat
& 0xff) {
184 qla2x00_mbx_completion(vha
, MSW(stat
));
185 status
|= MBX_INTERRUPT
;
187 /* Release mailbox registers. */
188 WRT_REG_WORD(®
->semaphore
, 0);
192 mb
[1] = RD_MAILBOX_REG(ha
, reg
, 1);
193 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
194 mb
[3] = RD_MAILBOX_REG(ha
, reg
, 3);
195 qla2x00_async_event(vha
, rsp
, mb
);
198 qla2x00_process_response_queue(rsp
);
201 mb
[0] = MBA_CMPLT_1_16BIT
;
203 qla2x00_async_event(vha
, rsp
, mb
);
206 mb
[0] = MBA_SCSI_COMPLETION
;
208 mb
[2] = RD_MAILBOX_REG(ha
, reg
, 2);
209 qla2x00_async_event(vha
, rsp
, mb
);
212 ql_dbg(ql_dbg_async
, vha
, 0x5028,
213 "Unrecognized interrupt type (%d).\n", stat
& 0xff);
216 WRT_REG_WORD(®
->hccr
, HCCR_CLR_RISC_INT
);
217 RD_REG_WORD_RELAXED(®
->hccr
);
219 qla2x00_handle_mbx_completion(ha
, status
);
220 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
222 return (IRQ_HANDLED
);
226 * qla2x00_mbx_completion() - Process mailbox command completions.
227 * @ha: SCSI driver HA context
228 * @mb0: Mailbox0 register
231 qla2x00_mbx_completion(scsi_qla_host_t
*vha
, uint16_t mb0
)
235 uint16_t __iomem
*wptr
;
236 struct qla_hw_data
*ha
= vha
->hw
;
237 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
239 /* Read all mbox registers? */
240 mboxes
= (1 << ha
->mbx_count
) - 1;
242 ql_dbg(ql_dbg_async
, vha
, 0x5001, "MBX pointer ERROR.\n");
244 mboxes
= ha
->mcp
->in_mb
;
246 /* Load return mailbox registers. */
247 ha
->flags
.mbox_int
= 1;
248 ha
->mailbox_out
[0] = mb0
;
250 wptr
= (uint16_t __iomem
*)MAILBOX_REG(ha
, reg
, 1);
252 for (cnt
= 1; cnt
< ha
->mbx_count
; cnt
++) {
253 if (IS_QLA2200(ha
) && cnt
== 8)
254 wptr
= (uint16_t __iomem
*)MAILBOX_REG(ha
, reg
, 8);
255 if ((cnt
== 4 || cnt
== 5) && (mboxes
& BIT_0
))
256 ha
->mailbox_out
[cnt
] = qla2x00_debounce_register(wptr
);
257 else if (mboxes
& BIT_0
)
258 ha
->mailbox_out
[cnt
] = RD_REG_WORD(wptr
);
266 qla81xx_idc_event(scsi_qla_host_t
*vha
, uint16_t aen
, uint16_t descr
)
268 static char *event
[] =
269 { "Complete", "Request Notification", "Time Extension" };
271 struct device_reg_24xx __iomem
*reg24
= &vha
->hw
->iobase
->isp24
;
272 uint16_t __iomem
*wptr
;
273 uint16_t cnt
, timeout
, mb
[QLA_IDC_ACK_REGS
];
275 /* Seed data -- mailbox1 -> mailbox7. */
276 wptr
= (uint16_t __iomem
*)®24
->mailbox1
;
277 for (cnt
= 0; cnt
< QLA_IDC_ACK_REGS
; cnt
++, wptr
++)
278 mb
[cnt
] = RD_REG_WORD(wptr
);
280 ql_dbg(ql_dbg_async
, vha
, 0x5021,
281 "Inter-Driver Communication %s -- "
282 "%04x %04x %04x %04x %04x %04x %04x.\n",
283 event
[aen
& 0xff], mb
[0], mb
[1], mb
[2], mb
[3],
284 mb
[4], mb
[5], mb
[6]);
286 /* Handle IDC Error completion case. */
287 case MBA_IDC_COMPLETE
:
289 vha
->hw
->flags
.idc_compl_status
= 1;
290 if (vha
->hw
->notify_dcbx_comp
)
291 complete(&vha
->hw
->dcbx_comp
);
296 /* Acknowledgement needed? [Notify && non-zero timeout]. */
297 timeout
= (descr
>> 8) & 0xf;
298 ql_dbg(ql_dbg_async
, vha
, 0x5022,
299 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
300 vha
->host_no
, event
[aen
& 0xff], timeout
);
304 rval
= qla2x00_post_idc_ack_work(vha
, mb
);
305 if (rval
!= QLA_SUCCESS
)
306 ql_log(ql_log_warn
, vha
, 0x5023,
307 "IDC failed to post ACK.\n");
309 case MBA_IDC_TIME_EXT
:
310 vha
->hw
->idc_extend_tmo
= descr
;
311 ql_dbg(ql_dbg_async
, vha
, 0x5087,
312 "%lu Inter-Driver Communication %s -- "
313 "Extend timeout by=%d.\n",
314 vha
->host_no
, event
[aen
& 0xff], vha
->hw
->idc_extend_tmo
);
321 qla2x00_get_link_speed_str(struct qla_hw_data
*ha
, uint16_t speed
)
323 static const char * const link_speeds
[] = {
324 "1", "2", "?", "4", "8", "16", "10"
327 if (IS_QLA2100(ha
) || IS_QLA2200(ha
))
328 return link_speeds
[0];
329 else if (speed
== 0x13)
330 return link_speeds
[6];
332 return link_speeds
[speed
];
334 return link_speeds
[LS_UNKNOWN
];
338 qla83xx_handle_8200_aen(scsi_qla_host_t
*vha
, uint16_t *mb
)
340 struct qla_hw_data
*ha
= vha
->hw
;
343 * 8200 AEN Interpretation:
345 * mb[1] = AEN Reason code
346 * mb[2] = LSW of Peg-Halt Status-1 Register
347 * mb[6] = MSW of Peg-Halt Status-1 Register
348 * mb[3] = LSW of Peg-Halt Status-2 register
349 * mb[7] = MSW of Peg-Halt Status-2 register
350 * mb[4] = IDC Device-State Register value
351 * mb[5] = IDC Driver-Presence Register value
353 ql_dbg(ql_dbg_async
, vha
, 0x506b, "AEN Code: mb[0] = 0x%x AEN reason: "
354 "mb[1] = 0x%x PH-status1: mb[2] = 0x%x PH-status1: mb[6] = 0x%x.\n",
355 mb
[0], mb
[1], mb
[2], mb
[6]);
356 ql_dbg(ql_dbg_async
, vha
, 0x506c, "PH-status2: mb[3] = 0x%x "
357 "PH-status2: mb[7] = 0x%x Device-State: mb[4] = 0x%x "
358 "Drv-Presence: mb[5] = 0x%x.\n", mb
[3], mb
[7], mb
[4], mb
[5]);
360 if (mb
[1] & (IDC_PEG_HALT_STATUS_CHANGE
| IDC_NIC_FW_REPORTED_FAILURE
|
361 IDC_HEARTBEAT_FAILURE
)) {
362 ha
->flags
.nic_core_hung
= 1;
363 ql_log(ql_log_warn
, vha
, 0x5060,
364 "83XX: F/W Error Reported: Check if reset required.\n");
366 if (mb
[1] & IDC_PEG_HALT_STATUS_CHANGE
) {
367 uint32_t protocol_engine_id
, fw_err_code
, err_level
;
370 * IDC_PEG_HALT_STATUS_CHANGE interpretation:
371 * - PEG-Halt Status-1 Register:
372 * (LSW = mb[2], MSW = mb[6])
373 * Bits 0-7 = protocol-engine ID
374 * Bits 8-28 = f/w error code
375 * Bits 29-31 = Error-level
376 * Error-level 0x1 = Non-Fatal error
377 * Error-level 0x2 = Recoverable Fatal error
378 * Error-level 0x4 = UnRecoverable Fatal error
379 * - PEG-Halt Status-2 Register:
380 * (LSW = mb[3], MSW = mb[7])
382 protocol_engine_id
= (mb
[2] & 0xff);
383 fw_err_code
= (((mb
[2] & 0xff00) >> 8) |
384 ((mb
[6] & 0x1fff) << 8));
385 err_level
= ((mb
[6] & 0xe000) >> 13);
386 ql_log(ql_log_warn
, vha
, 0x5061, "PegHalt Status-1 "
387 "Register: protocol_engine_id=0x%x "
388 "fw_err_code=0x%x err_level=0x%x.\n",
389 protocol_engine_id
, fw_err_code
, err_level
);
390 ql_log(ql_log_warn
, vha
, 0x5062, "PegHalt Status-2 "
391 "Register: 0x%x%x.\n", mb
[7], mb
[3]);
392 if (err_level
== ERR_LEVEL_NON_FATAL
) {
393 ql_log(ql_log_warn
, vha
, 0x5063,
394 "Not a fatal error, f/w has recovered "
396 } else if (err_level
== ERR_LEVEL_RECOVERABLE_FATAL
) {
397 ql_log(ql_log_fatal
, vha
, 0x5064,
398 "Recoverable Fatal error: Chip reset "
400 qla83xx_schedule_work(vha
,
401 QLA83XX_NIC_CORE_RESET
);
402 } else if (err_level
== ERR_LEVEL_UNRECOVERABLE_FATAL
) {
403 ql_log(ql_log_fatal
, vha
, 0x5065,
404 "Unrecoverable Fatal error: Set FAILED "
405 "state, reboot required.\n");
406 qla83xx_schedule_work(vha
,
407 QLA83XX_NIC_CORE_UNRECOVERABLE
);
411 if (mb
[1] & IDC_NIC_FW_REPORTED_FAILURE
) {
412 uint16_t peg_fw_state
, nw_interface_link_up
;
413 uint16_t nw_interface_signal_detect
, sfp_status
;
414 uint16_t htbt_counter
, htbt_monitor_enable
;
415 uint16_t sfp_additonal_info
, sfp_multirate
;
416 uint16_t sfp_tx_fault
, link_speed
, dcbx_status
;
419 * IDC_NIC_FW_REPORTED_FAILURE interpretation:
420 * - PEG-to-FC Status Register:
421 * (LSW = mb[2], MSW = mb[6])
422 * Bits 0-7 = Peg-Firmware state
423 * Bit 8 = N/W Interface Link-up
424 * Bit 9 = N/W Interface signal detected
425 * Bits 10-11 = SFP Status
426 * SFP Status 0x0 = SFP+ transceiver not expected
427 * SFP Status 0x1 = SFP+ transceiver not present
428 * SFP Status 0x2 = SFP+ transceiver invalid
429 * SFP Status 0x3 = SFP+ transceiver present and
431 * Bits 12-14 = Heartbeat Counter
432 * Bit 15 = Heartbeat Monitor Enable
433 * Bits 16-17 = SFP Additional Info
434 * SFP info 0x0 = Unregocnized transceiver for
436 * SFP info 0x1 = SFP+ brand validation failed
437 * SFP info 0x2 = SFP+ speed validation failed
438 * SFP info 0x3 = SFP+ access error
439 * Bit 18 = SFP Multirate
440 * Bit 19 = SFP Tx Fault
441 * Bits 20-22 = Link Speed
442 * Bits 23-27 = Reserved
443 * Bits 28-30 = DCBX Status
444 * DCBX Status 0x0 = DCBX Disabled
445 * DCBX Status 0x1 = DCBX Enabled
446 * DCBX Status 0x2 = DCBX Exchange error
449 peg_fw_state
= (mb
[2] & 0x00ff);
450 nw_interface_link_up
= ((mb
[2] & 0x0100) >> 8);
451 nw_interface_signal_detect
= ((mb
[2] & 0x0200) >> 9);
452 sfp_status
= ((mb
[2] & 0x0c00) >> 10);
453 htbt_counter
= ((mb
[2] & 0x7000) >> 12);
454 htbt_monitor_enable
= ((mb
[2] & 0x8000) >> 15);
455 sfp_additonal_info
= (mb
[6] & 0x0003);
456 sfp_multirate
= ((mb
[6] & 0x0004) >> 2);
457 sfp_tx_fault
= ((mb
[6] & 0x0008) >> 3);
458 link_speed
= ((mb
[6] & 0x0070) >> 4);
459 dcbx_status
= ((mb
[6] & 0x7000) >> 12);
461 ql_log(ql_log_warn
, vha
, 0x5066,
462 "Peg-to-Fc Status Register:\n"
463 "peg_fw_state=0x%x, nw_interface_link_up=0x%x, "
464 "nw_interface_signal_detect=0x%x"
465 "\nsfp_statis=0x%x.\n ", peg_fw_state
,
466 nw_interface_link_up
, nw_interface_signal_detect
,
468 ql_log(ql_log_warn
, vha
, 0x5067,
469 "htbt_counter=0x%x, htbt_monitor_enable=0x%x, "
470 "sfp_additonal_info=0x%x, sfp_multirate=0x%x.\n ",
471 htbt_counter
, htbt_monitor_enable
,
472 sfp_additonal_info
, sfp_multirate
);
473 ql_log(ql_log_warn
, vha
, 0x5068,
474 "sfp_tx_fault=0x%x, link_state=0x%x, "
475 "dcbx_status=0x%x.\n", sfp_tx_fault
, link_speed
,
478 qla83xx_schedule_work(vha
, QLA83XX_NIC_CORE_RESET
);
481 if (mb
[1] & IDC_HEARTBEAT_FAILURE
) {
482 ql_log(ql_log_warn
, vha
, 0x5069,
483 "Heartbeat Failure encountered, chip reset "
486 qla83xx_schedule_work(vha
, QLA83XX_NIC_CORE_RESET
);
490 if (mb
[1] & IDC_DEVICE_STATE_CHANGE
) {
491 ql_log(ql_log_info
, vha
, 0x506a,
492 "IDC Device-State changed = 0x%x.\n", mb
[4]);
493 if (ha
->flags
.nic_core_reset_owner
)
495 qla83xx_schedule_work(vha
, MBA_IDC_AEN
);
500 qla2x00_is_a_vp_did(scsi_qla_host_t
*vha
, uint32_t rscn_entry
)
502 struct qla_hw_data
*ha
= vha
->hw
;
511 spin_lock_irqsave(&ha
->vport_slock
, flags
);
512 list_for_each_entry(vp
, &ha
->vp_list
, list
) {
513 vp_did
= vp
->d_id
.b24
;
514 if (vp_did
== rscn_entry
) {
519 spin_unlock_irqrestore(&ha
->vport_slock
, flags
);
525 * qla2x00_async_event() - Process aynchronous events.
526 * @ha: SCSI driver HA context
527 * @mb: Mailbox registers (0 - 3)
530 qla2x00_async_event(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, uint16_t *mb
)
535 struct qla_hw_data
*ha
= vha
->hw
;
536 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
537 struct device_reg_24xx __iomem
*reg24
= &ha
->iobase
->isp24
;
538 struct device_reg_82xx __iomem
*reg82
= &ha
->iobase
->isp82
;
539 uint32_t rscn_entry
, host_pid
, tmp_pid
;
541 fc_port_t
*fcport
= NULL
;
543 /* Setup to process RIO completion. */
545 if (IS_CNA_CAPABLE(ha
))
548 case MBA_SCSI_COMPLETION
:
549 handles
[0] = le32_to_cpu((uint32_t)((mb
[2] << 16) | mb
[1]));
552 case MBA_CMPLT_1_16BIT
:
555 mb
[0] = MBA_SCSI_COMPLETION
;
557 case MBA_CMPLT_2_16BIT
:
561 mb
[0] = MBA_SCSI_COMPLETION
;
563 case MBA_CMPLT_3_16BIT
:
568 mb
[0] = MBA_SCSI_COMPLETION
;
570 case MBA_CMPLT_4_16BIT
:
574 handles
[3] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 6);
576 mb
[0] = MBA_SCSI_COMPLETION
;
578 case MBA_CMPLT_5_16BIT
:
582 handles
[3] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 6);
583 handles
[4] = (uint32_t)RD_MAILBOX_REG(ha
, reg
, 7);
585 mb
[0] = MBA_SCSI_COMPLETION
;
587 case MBA_CMPLT_2_32BIT
:
588 handles
[0] = le32_to_cpu((uint32_t)((mb
[2] << 16) | mb
[1]));
589 handles
[1] = le32_to_cpu(
590 ((uint32_t)(RD_MAILBOX_REG(ha
, reg
, 7) << 16)) |
591 RD_MAILBOX_REG(ha
, reg
, 6));
593 mb
[0] = MBA_SCSI_COMPLETION
;
600 case MBA_SCSI_COMPLETION
: /* Fast Post */
601 if (!vha
->flags
.online
)
604 for (cnt
= 0; cnt
< handle_cnt
; cnt
++)
605 qla2x00_process_completed_request(vha
, rsp
->req
,
609 case MBA_RESET
: /* Reset */
610 ql_dbg(ql_dbg_async
, vha
, 0x5002,
611 "Asynchronous RESET.\n");
613 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
616 case MBA_SYSTEM_ERR
: /* System Error */
617 mbx
= (IS_QLA81XX(ha
) || IS_QLA83XX(ha
)) ?
618 RD_REG_WORD(®24
->mailbox7
) : 0;
619 ql_log(ql_log_warn
, vha
, 0x5003,
620 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
621 "mbx7=%xh.\n", mb
[1], mb
[2], mb
[3], mbx
);
623 ha
->isp_ops
->fw_dump(vha
, 1);
625 if (IS_FWI2_CAPABLE(ha
)) {
626 if (mb
[1] == 0 && mb
[2] == 0) {
627 ql_log(ql_log_fatal
, vha
, 0x5004,
628 "Unrecoverable Hardware Error: adapter "
629 "marked OFFLINE!\n");
630 vha
->flags
.online
= 0;
631 vha
->device_flags
|= DFLG_DEV_FAILED
;
633 /* Check to see if MPI timeout occurred */
634 if ((mbx
& MBX_3
) && (ha
->flags
.port0
))
635 set_bit(MPI_RESET_NEEDED
,
638 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
640 } else if (mb
[1] == 0) {
641 ql_log(ql_log_fatal
, vha
, 0x5005,
642 "Unrecoverable Hardware Error: adapter marked "
644 vha
->flags
.online
= 0;
645 vha
->device_flags
|= DFLG_DEV_FAILED
;
647 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
650 case MBA_REQ_TRANSFER_ERR
: /* Request Transfer Error */
651 ql_log(ql_log_warn
, vha
, 0x5006,
652 "ISP Request Transfer Error (%x).\n", mb
[1]);
654 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
657 case MBA_RSP_TRANSFER_ERR
: /* Response Transfer Error */
658 ql_log(ql_log_warn
, vha
, 0x5007,
659 "ISP Response Transfer Error.\n");
661 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
664 case MBA_WAKEUP_THRES
: /* Request Queue Wake-up */
665 ql_dbg(ql_dbg_async
, vha
, 0x5008,
666 "Asynchronous WAKEUP_THRES.\n");
669 case MBA_LIP_OCCURRED
: /* Loop Initialization Procedure */
670 ql_dbg(ql_dbg_async
, vha
, 0x5009,
671 "LIP occurred (%x).\n", mb
[1]);
673 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
674 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
675 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
676 qla2x00_mark_all_devices_lost(vha
, 1);
680 atomic_set(&vha
->vp_state
, VP_FAILED
);
681 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
684 set_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
);
685 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
687 vha
->flags
.management_server_logged_in
= 0;
688 qla2x00_post_aen_work(vha
, FCH_EVT_LIP
, mb
[1]);
691 case MBA_LOOP_UP
: /* Loop Up Event */
692 if (IS_QLA2100(ha
) || IS_QLA2200(ha
))
693 ha
->link_data_rate
= PORT_SPEED_1GB
;
695 ha
->link_data_rate
= mb
[1];
697 ql_dbg(ql_dbg_async
, vha
, 0x500a,
698 "LOOP UP detected (%s Gbps).\n",
699 qla2x00_get_link_speed_str(ha
, ha
->link_data_rate
));
701 vha
->flags
.management_server_logged_in
= 0;
702 qla2x00_post_aen_work(vha
, FCH_EVT_LINKUP
, ha
->link_data_rate
);
705 case MBA_LOOP_DOWN
: /* Loop Down Event */
706 mbx
= (IS_QLA81XX(ha
) || IS_QLA8031(ha
))
707 ? RD_REG_WORD(®24
->mailbox4
) : 0;
708 mbx
= (IS_P3P_TYPE(ha
)) ? RD_REG_WORD(®82
->mailbox_out
[4])
710 ql_dbg(ql_dbg_async
, vha
, 0x500b,
711 "LOOP DOWN detected (%x %x %x %x).\n",
712 mb
[1], mb
[2], mb
[3], mbx
);
714 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
715 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
716 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
717 vha
->device_flags
|= DFLG_NO_CABLE
;
718 qla2x00_mark_all_devices_lost(vha
, 1);
722 atomic_set(&vha
->vp_state
, VP_FAILED
);
723 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
726 vha
->flags
.management_server_logged_in
= 0;
727 ha
->link_data_rate
= PORT_SPEED_UNKNOWN
;
728 qla2x00_post_aen_work(vha
, FCH_EVT_LINKDOWN
, 0);
731 case MBA_LIP_RESET
: /* LIP reset occurred */
732 ql_dbg(ql_dbg_async
, vha
, 0x500c,
733 "LIP reset occurred (%x).\n", mb
[1]);
735 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
736 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
737 atomic_set(&vha
->loop_down_timer
, LOOP_DOWN_TIME
);
738 qla2x00_mark_all_devices_lost(vha
, 1);
742 atomic_set(&vha
->vp_state
, VP_FAILED
);
743 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
746 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
748 ha
->operating_mode
= LOOP
;
749 vha
->flags
.management_server_logged_in
= 0;
750 qla2x00_post_aen_work(vha
, FCH_EVT_LIPRESET
, mb
[1]);
753 /* case MBA_DCBX_COMPLETE: */
754 case MBA_POINT_TO_POINT
: /* Point-to-Point */
758 if (IS_CNA_CAPABLE(ha
)) {
759 ql_dbg(ql_dbg_async
, vha
, 0x500d,
760 "DCBX Completed -- %04x %04x %04x.\n",
761 mb
[1], mb
[2], mb
[3]);
762 if (ha
->notify_dcbx_comp
)
763 complete(&ha
->dcbx_comp
);
766 ql_dbg(ql_dbg_async
, vha
, 0x500e,
767 "Asynchronous P2P MODE received.\n");
770 * Until there's a transition from loop down to loop up, treat
771 * this as loop down only.
773 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
774 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
775 if (!atomic_read(&vha
->loop_down_timer
))
776 atomic_set(&vha
->loop_down_timer
,
778 qla2x00_mark_all_devices_lost(vha
, 1);
782 atomic_set(&vha
->vp_state
, VP_FAILED
);
783 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
786 if (!(test_bit(ABORT_ISP_ACTIVE
, &vha
->dpc_flags
)))
787 set_bit(RESET_MARKER_NEEDED
, &vha
->dpc_flags
);
789 set_bit(REGISTER_FC4_NEEDED
, &vha
->dpc_flags
);
790 set_bit(REGISTER_FDMI_NEEDED
, &vha
->dpc_flags
);
792 ha
->flags
.gpsc_supported
= 1;
793 vha
->flags
.management_server_logged_in
= 0;
796 case MBA_CHG_IN_CONNECTION
: /* Change in connection mode */
800 ql_dbg(ql_dbg_async
, vha
, 0x500f,
801 "Configuration change detected: value=%x.\n", mb
[1]);
803 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
804 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
805 if (!atomic_read(&vha
->loop_down_timer
))
806 atomic_set(&vha
->loop_down_timer
,
808 qla2x00_mark_all_devices_lost(vha
, 1);
812 atomic_set(&vha
->vp_state
, VP_FAILED
);
813 fc_vport_set_state(vha
->fc_vport
, FC_VPORT_FAILED
);
816 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
817 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
820 case MBA_PORT_UPDATE
: /* Port database update */
822 * Handle only global and vn-port update events
825 * mb[1] = N_Port handle of changed port
826 * OR 0xffff for global event
827 * mb[2] = New login state
828 * 7 = Port logged out
829 * mb[3] = LSB is vp_idx, 0xff = all vps
831 * Skip processing if:
832 * Event is global, vp_idx is NOT all vps,
833 * vp_idx does not match
834 * Event is not global, vp_idx does not match
836 if (IS_QLA2XXX_MIDTYPE(ha
) &&
837 ((mb
[1] == 0xffff && (mb
[3] & 0xff) != 0xff) ||
838 (mb
[1] != 0xffff)) && vha
->vp_idx
!= (mb
[3] & 0xff))
841 /* Global event -- port logout or port unavailable. */
842 if (mb
[1] == 0xffff && mb
[2] == 0x7) {
843 ql_dbg(ql_dbg_async
, vha
, 0x5010,
844 "Port unavailable %04x %04x %04x.\n",
845 mb
[1], mb
[2], mb
[3]);
846 ql_log(ql_log_warn
, vha
, 0x505e,
847 "Link is offline.\n");
849 if (atomic_read(&vha
->loop_state
) != LOOP_DOWN
) {
850 atomic_set(&vha
->loop_state
, LOOP_DOWN
);
851 atomic_set(&vha
->loop_down_timer
,
853 vha
->device_flags
|= DFLG_NO_CABLE
;
854 qla2x00_mark_all_devices_lost(vha
, 1);
858 atomic_set(&vha
->vp_state
, VP_FAILED
);
859 fc_vport_set_state(vha
->fc_vport
,
861 qla2x00_mark_all_devices_lost(vha
, 1);
864 vha
->flags
.management_server_logged_in
= 0;
865 ha
->link_data_rate
= PORT_SPEED_UNKNOWN
;
870 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
871 * event etc. earlier indicating loop is down) then process
872 * it. Otherwise ignore it and Wait for RSCN to come in.
874 atomic_set(&vha
->loop_down_timer
, 0);
875 if (mb
[1] != 0xffff || (mb
[2] != 0x6 && mb
[2] != 0x4)) {
876 ql_dbg(ql_dbg_async
, vha
, 0x5011,
877 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
878 mb
[1], mb
[2], mb
[3]);
880 qlt_async_event(mb
[0], vha
, mb
);
884 ql_dbg(ql_dbg_async
, vha
, 0x5012,
885 "Port database changed %04x %04x %04x.\n",
886 mb
[1], mb
[2], mb
[3]);
887 ql_log(ql_log_warn
, vha
, 0x505f,
888 "Link is operational (%s Gbps).\n",
889 qla2x00_get_link_speed_str(ha
, ha
->link_data_rate
));
892 * Mark all devices as missing so we will login again.
894 atomic_set(&vha
->loop_state
, LOOP_UP
);
896 qla2x00_mark_all_devices_lost(vha
, 1);
898 if (vha
->vp_idx
== 0 && !qla_ini_mode_enabled(vha
))
899 set_bit(SCR_PENDING
, &vha
->dpc_flags
);
901 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
902 set_bit(LOCAL_LOOP_UPDATE
, &vha
->dpc_flags
);
904 qlt_async_event(mb
[0], vha
, mb
);
907 case MBA_RSCN_UPDATE
: /* State Change Registration */
908 /* Check if the Vport has issued a SCR */
909 if (vha
->vp_idx
&& test_bit(VP_SCR_NEEDED
, &vha
->vp_flags
))
911 /* Only handle SCNs for our Vport index. */
912 if (ha
->flags
.npiv_supported
&& vha
->vp_idx
!= (mb
[3] & 0xff))
915 ql_dbg(ql_dbg_async
, vha
, 0x5013,
916 "RSCN database changed -- %04x %04x %04x.\n",
917 mb
[1], mb
[2], mb
[3]);
919 rscn_entry
= ((mb
[1] & 0xff) << 16) | mb
[2];
920 host_pid
= (vha
->d_id
.b
.domain
<< 16) | (vha
->d_id
.b
.area
<< 8)
922 if (rscn_entry
== host_pid
) {
923 ql_dbg(ql_dbg_async
, vha
, 0x5014,
924 "Ignoring RSCN update to local host "
925 "port ID (%06x).\n", host_pid
);
929 /* Ignore reserved bits from RSCN-payload. */
930 rscn_entry
= ((mb
[1] & 0x3ff) << 16) | mb
[2];
932 /* Skip RSCNs for virtual ports on the same physical port */
933 if (qla2x00_is_a_vp_did(vha
, rscn_entry
))
937 * Search for the rport related to this RSCN entry and mark it
940 list_for_each_entry(fcport
, &vha
->vp_fcports
, list
) {
941 if (atomic_read(&fcport
->state
) != FCS_ONLINE
)
943 tmp_pid
= fcport
->d_id
.b24
;
944 if (fcport
->d_id
.b24
== rscn_entry
) {
945 qla2x00_mark_device_lost(vha
, fcport
, 0, 0);
950 atomic_set(&vha
->loop_down_timer
, 0);
951 vha
->flags
.management_server_logged_in
= 0;
953 set_bit(LOOP_RESYNC_NEEDED
, &vha
->dpc_flags
);
954 set_bit(RSCN_UPDATE
, &vha
->dpc_flags
);
955 qla2x00_post_aen_work(vha
, FCH_EVT_RSCN
, rscn_entry
);
958 /* case MBA_RIO_RESPONSE: */
959 case MBA_ZIO_RESPONSE
:
960 ql_dbg(ql_dbg_async
, vha
, 0x5015,
961 "[R|Z]IO update completion.\n");
963 if (IS_FWI2_CAPABLE(ha
))
964 qla24xx_process_response_queue(vha
, rsp
);
966 qla2x00_process_response_queue(rsp
);
969 case MBA_DISCARD_RND_FRAME
:
970 ql_dbg(ql_dbg_async
, vha
, 0x5016,
971 "Discard RND Frame -- %04x %04x %04x.\n",
972 mb
[1], mb
[2], mb
[3]);
975 case MBA_TRACE_NOTIFICATION
:
976 ql_dbg(ql_dbg_async
, vha
, 0x5017,
977 "Trace Notification -- %04x %04x.\n", mb
[1], mb
[2]);
980 case MBA_ISP84XX_ALERT
:
981 ql_dbg(ql_dbg_async
, vha
, 0x5018,
982 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
983 mb
[1], mb
[2], mb
[3]);
985 spin_lock_irqsave(&ha
->cs84xx
->access_lock
, flags
);
987 case A84_PANIC_RECOVERY
:
988 ql_log(ql_log_info
, vha
, 0x5019,
989 "Alert 84XX: panic recovery %04x %04x.\n",
992 case A84_OP_LOGIN_COMPLETE
:
993 ha
->cs84xx
->op_fw_version
= mb
[3] << 16 | mb
[2];
994 ql_log(ql_log_info
, vha
, 0x501a,
995 "Alert 84XX: firmware version %x.\n",
996 ha
->cs84xx
->op_fw_version
);
998 case A84_DIAG_LOGIN_COMPLETE
:
999 ha
->cs84xx
->diag_fw_version
= mb
[3] << 16 | mb
[2];
1000 ql_log(ql_log_info
, vha
, 0x501b,
1001 "Alert 84XX: diagnostic firmware version %x.\n",
1002 ha
->cs84xx
->diag_fw_version
);
1004 case A84_GOLD_LOGIN_COMPLETE
:
1005 ha
->cs84xx
->diag_fw_version
= mb
[3] << 16 | mb
[2];
1006 ha
->cs84xx
->fw_update
= 1;
1007 ql_log(ql_log_info
, vha
, 0x501c,
1008 "Alert 84XX: gold firmware version %x.\n",
1009 ha
->cs84xx
->gold_fw_version
);
1012 ql_log(ql_log_warn
, vha
, 0x501d,
1013 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
1014 mb
[1], mb
[2], mb
[3]);
1016 spin_unlock_irqrestore(&ha
->cs84xx
->access_lock
, flags
);
1018 case MBA_DCBX_START
:
1019 ql_dbg(ql_dbg_async
, vha
, 0x501e,
1020 "DCBX Started -- %04x %04x %04x.\n",
1021 mb
[1], mb
[2], mb
[3]);
1023 case MBA_DCBX_PARAM_UPDATE
:
1024 ql_dbg(ql_dbg_async
, vha
, 0x501f,
1025 "DCBX Parameters Updated -- %04x %04x %04x.\n",
1026 mb
[1], mb
[2], mb
[3]);
1028 case MBA_FCF_CONF_ERR
:
1029 ql_dbg(ql_dbg_async
, vha
, 0x5020,
1030 "FCF Configuration Error -- %04x %04x %04x.\n",
1031 mb
[1], mb
[2], mb
[3]);
1033 case MBA_IDC_NOTIFY
:
1034 if (IS_QLA8031(vha
->hw
) || IS_QLA8044(ha
)) {
1035 mb
[4] = RD_REG_WORD(®24
->mailbox4
);
1036 if (((mb
[2] & 0x7fff) == MBC_PORT_RESET
||
1037 (mb
[2] & 0x7fff) == MBC_SET_PORT_CONFIG
) &&
1038 (mb
[4] & INTERNAL_LOOPBACK_MASK
) != 0) {
1039 set_bit(ISP_QUIESCE_NEEDED
, &vha
->dpc_flags
);
1041 * Extend loop down timer since port is active.
1043 if (atomic_read(&vha
->loop_state
) == LOOP_DOWN
)
1044 atomic_set(&vha
->loop_down_timer
,
1046 qla2xxx_wake_dpc(vha
);
1049 case MBA_IDC_COMPLETE
:
1050 if (ha
->notify_lb_portup_comp
)
1051 complete(&ha
->lb_portup_comp
);
1053 case MBA_IDC_TIME_EXT
:
1054 if (IS_QLA81XX(vha
->hw
) || IS_QLA8031(vha
->hw
) ||
1056 qla81xx_idc_event(vha
, mb
[0], mb
[1]);
1060 mb
[4] = RD_REG_WORD(®24
->mailbox4
);
1061 mb
[5] = RD_REG_WORD(®24
->mailbox5
);
1062 mb
[6] = RD_REG_WORD(®24
->mailbox6
);
1063 mb
[7] = RD_REG_WORD(®24
->mailbox7
);
1064 qla83xx_handle_8200_aen(vha
, mb
);
1068 ql_dbg(ql_dbg_async
, vha
, 0x5057,
1069 "Unknown AEN:%04x %04x %04x %04x\n",
1070 mb
[0], mb
[1], mb
[2], mb
[3]);
1073 qlt_async_event(mb
[0], vha
, mb
);
1075 if (!vha
->vp_idx
&& ha
->num_vhosts
)
1076 qla2x00_alert_all_vps(rsp
, mb
);
1080 * qla2x00_process_completed_request() - Process a Fast Post response.
1081 * @ha: SCSI driver HA context
1085 qla2x00_process_completed_request(struct scsi_qla_host
*vha
,
1086 struct req_que
*req
, uint32_t index
)
1089 struct qla_hw_data
*ha
= vha
->hw
;
1091 /* Validate handle. */
1092 if (index
>= req
->num_outstanding_cmds
) {
1093 ql_log(ql_log_warn
, vha
, 0x3014,
1094 "Invalid SCSI command index (%x).\n", index
);
1096 if (IS_P3P_TYPE(ha
))
1097 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
1099 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1103 sp
= req
->outstanding_cmds
[index
];
1105 /* Free outstanding command slot. */
1106 req
->outstanding_cmds
[index
] = NULL
;
1108 /* Save ISP completion status */
1109 sp
->done(ha
, sp
, DID_OK
<< 16);
1111 ql_log(ql_log_warn
, vha
, 0x3016, "Invalid SCSI SRB.\n");
1113 if (IS_P3P_TYPE(ha
))
1114 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
1116 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1121 qla2x00_get_sp_from_handle(scsi_qla_host_t
*vha
, const char *func
,
1122 struct req_que
*req
, void *iocb
)
1124 struct qla_hw_data
*ha
= vha
->hw
;
1125 sts_entry_t
*pkt
= iocb
;
1129 index
= LSW(pkt
->handle
);
1130 if (index
>= req
->num_outstanding_cmds
) {
1131 ql_log(ql_log_warn
, vha
, 0x5031,
1132 "Invalid command index (%x).\n", index
);
1133 if (IS_P3P_TYPE(ha
))
1134 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
1136 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1139 sp
= req
->outstanding_cmds
[index
];
1141 ql_log(ql_log_warn
, vha
, 0x5032,
1142 "Invalid completion handle (%x) -- timed-out.\n", index
);
1145 if (sp
->handle
!= index
) {
1146 ql_log(ql_log_warn
, vha
, 0x5033,
1147 "SRB handle (%x) mismatch %x.\n", sp
->handle
, index
);
1151 req
->outstanding_cmds
[index
] = NULL
;
1158 qla2x00_mbx_iocb_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1159 struct mbx_entry
*mbx
)
1161 const char func
[] = "MBX-IOCB";
1165 struct srb_iocb
*lio
;
1169 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, mbx
);
1173 lio
= &sp
->u
.iocb_cmd
;
1175 fcport
= sp
->fcport
;
1176 data
= lio
->u
.logio
.data
;
1178 data
[0] = MBS_COMMAND_ERROR
;
1179 data
[1] = lio
->u
.logio
.flags
& SRB_LOGIN_RETRIED
?
1180 QLA_LOGIO_LOGIN_RETRIED
: 0;
1181 if (mbx
->entry_status
) {
1182 ql_dbg(ql_dbg_async
, vha
, 0x5043,
1183 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
1184 "entry-status=%x status=%x state-flag=%x "
1185 "status-flags=%x.\n", type
, sp
->handle
,
1186 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
,
1187 fcport
->d_id
.b
.al_pa
, mbx
->entry_status
,
1188 le16_to_cpu(mbx
->status
), le16_to_cpu(mbx
->state_flags
),
1189 le16_to_cpu(mbx
->status_flags
));
1191 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x5029,
1192 (uint8_t *)mbx
, sizeof(*mbx
));
1197 status
= le16_to_cpu(mbx
->status
);
1198 if (status
== 0x30 && sp
->type
== SRB_LOGIN_CMD
&&
1199 le16_to_cpu(mbx
->mb0
) == MBS_COMMAND_COMPLETE
)
1201 if (!status
&& le16_to_cpu(mbx
->mb0
) == MBS_COMMAND_COMPLETE
) {
1202 ql_dbg(ql_dbg_async
, vha
, 0x5045,
1203 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
1204 type
, sp
->handle
, fcport
->d_id
.b
.domain
,
1205 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
1206 le16_to_cpu(mbx
->mb1
));
1208 data
[0] = MBS_COMMAND_COMPLETE
;
1209 if (sp
->type
== SRB_LOGIN_CMD
) {
1210 fcport
->port_type
= FCT_TARGET
;
1211 if (le16_to_cpu(mbx
->mb1
) & BIT_0
)
1212 fcport
->port_type
= FCT_INITIATOR
;
1213 else if (le16_to_cpu(mbx
->mb1
) & BIT_1
)
1214 fcport
->flags
|= FCF_FCP2_DEVICE
;
1219 data
[0] = le16_to_cpu(mbx
->mb0
);
1221 case MBS_PORT_ID_USED
:
1222 data
[1] = le16_to_cpu(mbx
->mb1
);
1224 case MBS_LOOP_ID_USED
:
1227 data
[0] = MBS_COMMAND_ERROR
;
1231 ql_log(ql_log_warn
, vha
, 0x5046,
1232 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
1233 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type
, sp
->handle
,
1234 fcport
->d_id
.b
.domain
, fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
1235 status
, le16_to_cpu(mbx
->mb0
), le16_to_cpu(mbx
->mb1
),
1236 le16_to_cpu(mbx
->mb2
), le16_to_cpu(mbx
->mb6
),
1237 le16_to_cpu(mbx
->mb7
));
1240 sp
->done(vha
, sp
, 0);
1244 qla2x00_ct_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1245 sts_entry_t
*pkt
, int iocb_type
)
1247 const char func
[] = "CT_IOCB";
1250 struct fc_bsg_job
*bsg_job
;
1251 uint16_t comp_status
;
1254 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, pkt
);
1258 bsg_job
= sp
->u
.bsg_job
;
1260 type
= "ct pass-through";
1262 comp_status
= le16_to_cpu(pkt
->comp_status
);
1264 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1265 * fc payload to the caller
1267 bsg_job
->reply
->reply_data
.ctels_reply
.status
= FC_CTELS_STATUS_OK
;
1268 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1270 if (comp_status
!= CS_COMPLETE
) {
1271 if (comp_status
== CS_DATA_UNDERRUN
) {
1273 bsg_job
->reply
->reply_payload_rcv_len
=
1274 le16_to_cpu(((sts_entry_t
*)pkt
)->rsp_info_len
);
1276 ql_log(ql_log_warn
, vha
, 0x5048,
1277 "CT pass-through-%s error "
1278 "comp_status-status=0x%x total_byte = 0x%x.\n",
1280 bsg_job
->reply
->reply_payload_rcv_len
);
1282 ql_log(ql_log_warn
, vha
, 0x5049,
1283 "CT pass-through-%s error "
1284 "comp_status-status=0x%x.\n", type
, comp_status
);
1285 res
= DID_ERROR
<< 16;
1286 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1288 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x5035,
1289 (uint8_t *)pkt
, sizeof(*pkt
));
1292 bsg_job
->reply
->reply_payload_rcv_len
=
1293 bsg_job
->reply_payload
.payload_len
;
1294 bsg_job
->reply_len
= 0;
1297 sp
->done(vha
, sp
, res
);
1301 qla24xx_els_ct_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1302 struct sts_entry_24xx
*pkt
, int iocb_type
)
1304 const char func
[] = "ELS_CT_IOCB";
1307 struct fc_bsg_job
*bsg_job
;
1308 uint16_t comp_status
;
1309 uint32_t fw_status
[3];
1310 uint8_t* fw_sts_ptr
;
1313 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, pkt
);
1316 bsg_job
= sp
->u
.bsg_job
;
1320 case SRB_ELS_CMD_RPT
:
1321 case SRB_ELS_CMD_HST
:
1325 type
= "ct pass-through";
1328 ql_dbg(ql_dbg_user
, vha
, 0x503e,
1329 "Unrecognized SRB: (%p) type=%d.\n", sp
, sp
->type
);
1333 comp_status
= fw_status
[0] = le16_to_cpu(pkt
->comp_status
);
1334 fw_status
[1] = le16_to_cpu(((struct els_sts_entry_24xx
*)pkt
)->error_subcode_1
);
1335 fw_status
[2] = le16_to_cpu(((struct els_sts_entry_24xx
*)pkt
)->error_subcode_2
);
1337 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1338 * fc payload to the caller
1340 bsg_job
->reply
->reply_data
.ctels_reply
.status
= FC_CTELS_STATUS_OK
;
1341 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
) + sizeof(fw_status
);
1343 if (comp_status
!= CS_COMPLETE
) {
1344 if (comp_status
== CS_DATA_UNDERRUN
) {
1346 bsg_job
->reply
->reply_payload_rcv_len
=
1347 le16_to_cpu(((struct els_sts_entry_24xx
*)pkt
)->total_byte_count
);
1349 ql_dbg(ql_dbg_user
, vha
, 0x503f,
1350 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1351 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1352 type
, sp
->handle
, comp_status
, fw_status
[1], fw_status
[2],
1353 le16_to_cpu(((struct els_sts_entry_24xx
*)
1354 pkt
)->total_byte_count
));
1355 fw_sts_ptr
= ((uint8_t*)bsg_job
->req
->sense
) + sizeof(struct fc_bsg_reply
);
1356 memcpy( fw_sts_ptr
, fw_status
, sizeof(fw_status
));
1359 ql_dbg(ql_dbg_user
, vha
, 0x5040,
1360 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1361 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1362 type
, sp
->handle
, comp_status
,
1363 le16_to_cpu(((struct els_sts_entry_24xx
*)
1364 pkt
)->error_subcode_1
),
1365 le16_to_cpu(((struct els_sts_entry_24xx
*)
1366 pkt
)->error_subcode_2
));
1367 res
= DID_ERROR
<< 16;
1368 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1369 fw_sts_ptr
= ((uint8_t*)bsg_job
->req
->sense
) + sizeof(struct fc_bsg_reply
);
1370 memcpy( fw_sts_ptr
, fw_status
, sizeof(fw_status
));
1372 ql_dump_buffer(ql_dbg_user
+ ql_dbg_buffer
, vha
, 0x5056,
1373 (uint8_t *)pkt
, sizeof(*pkt
));
1377 bsg_job
->reply
->reply_payload_rcv_len
= bsg_job
->reply_payload
.payload_len
;
1378 bsg_job
->reply_len
= 0;
1381 sp
->done(vha
, sp
, res
);
1385 qla24xx_logio_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1386 struct logio_entry_24xx
*logio
)
1388 const char func
[] = "LOGIO-IOCB";
1392 struct srb_iocb
*lio
;
1396 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, logio
);
1400 lio
= &sp
->u
.iocb_cmd
;
1402 fcport
= sp
->fcport
;
1403 data
= lio
->u
.logio
.data
;
1405 data
[0] = MBS_COMMAND_ERROR
;
1406 data
[1] = lio
->u
.logio
.flags
& SRB_LOGIN_RETRIED
?
1407 QLA_LOGIO_LOGIN_RETRIED
: 0;
1408 if (logio
->entry_status
) {
1409 ql_log(ql_log_warn
, fcport
->vha
, 0x5034,
1410 "Async-%s error entry - hdl=%x"
1411 "portid=%02x%02x%02x entry-status=%x.\n",
1412 type
, sp
->handle
, fcport
->d_id
.b
.domain
,
1413 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
1414 logio
->entry_status
);
1415 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x504d,
1416 (uint8_t *)logio
, sizeof(*logio
));
1421 if (le16_to_cpu(logio
->comp_status
) == CS_COMPLETE
) {
1422 ql_dbg(ql_dbg_async
, fcport
->vha
, 0x5036,
1423 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1424 "iop0=%x.\n", type
, sp
->handle
, fcport
->d_id
.b
.domain
,
1425 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
1426 le32_to_cpu(logio
->io_parameter
[0]));
1428 data
[0] = MBS_COMMAND_COMPLETE
;
1429 if (sp
->type
!= SRB_LOGIN_CMD
)
1432 iop
[0] = le32_to_cpu(logio
->io_parameter
[0]);
1433 if (iop
[0] & BIT_4
) {
1434 fcport
->port_type
= FCT_TARGET
;
1436 fcport
->flags
|= FCF_FCP2_DEVICE
;
1437 } else if (iop
[0] & BIT_5
)
1438 fcport
->port_type
= FCT_INITIATOR
;
1441 fcport
->flags
|= FCF_CONF_COMP_SUPPORTED
;
1443 if (logio
->io_parameter
[7] || logio
->io_parameter
[8])
1444 fcport
->supported_classes
|= FC_COS_CLASS2
;
1445 if (logio
->io_parameter
[9] || logio
->io_parameter
[10])
1446 fcport
->supported_classes
|= FC_COS_CLASS3
;
1451 iop
[0] = le32_to_cpu(logio
->io_parameter
[0]);
1452 iop
[1] = le32_to_cpu(logio
->io_parameter
[1]);
1454 case LSC_SCODE_PORTID_USED
:
1455 data
[0] = MBS_PORT_ID_USED
;
1456 data
[1] = LSW(iop
[1]);
1458 case LSC_SCODE_NPORT_USED
:
1459 data
[0] = MBS_LOOP_ID_USED
;
1462 data
[0] = MBS_COMMAND_ERROR
;
1466 ql_dbg(ql_dbg_async
, fcport
->vha
, 0x5037,
1467 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1468 "iop0=%x iop1=%x.\n", type
, sp
->handle
, fcport
->d_id
.b
.domain
,
1469 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
1470 le16_to_cpu(logio
->comp_status
),
1471 le32_to_cpu(logio
->io_parameter
[0]),
1472 le32_to_cpu(logio
->io_parameter
[1]));
1475 sp
->done(vha
, sp
, 0);
1479 qla24xx_tm_iocb_entry(scsi_qla_host_t
*vha
, struct req_que
*req
,
1480 struct tsk_mgmt_entry
*tsk
)
1482 const char func
[] = "TMF-IOCB";
1486 struct srb_iocb
*iocb
;
1487 struct sts_entry_24xx
*sts
= (struct sts_entry_24xx
*)tsk
;
1490 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, tsk
);
1494 iocb
= &sp
->u
.iocb_cmd
;
1496 fcport
= sp
->fcport
;
1498 if (sts
->entry_status
) {
1499 ql_log(ql_log_warn
, fcport
->vha
, 0x5038,
1500 "Async-%s error - hdl=%x entry-status(%x).\n",
1501 type
, sp
->handle
, sts
->entry_status
);
1502 } else if (sts
->comp_status
!= __constant_cpu_to_le16(CS_COMPLETE
)) {
1503 ql_log(ql_log_warn
, fcport
->vha
, 0x5039,
1504 "Async-%s error - hdl=%x completion status(%x).\n",
1505 type
, sp
->handle
, sts
->comp_status
);
1506 } else if (!(le16_to_cpu(sts
->scsi_status
) &
1507 SS_RESPONSE_INFO_LEN_VALID
)) {
1508 ql_log(ql_log_warn
, fcport
->vha
, 0x503a,
1509 "Async-%s error - hdl=%x no response info(%x).\n",
1510 type
, sp
->handle
, sts
->scsi_status
);
1511 } else if (le32_to_cpu(sts
->rsp_data_len
) < 4) {
1512 ql_log(ql_log_warn
, fcport
->vha
, 0x503b,
1513 "Async-%s error - hdl=%x not enough response(%d).\n",
1514 type
, sp
->handle
, sts
->rsp_data_len
);
1515 } else if (sts
->data
[3]) {
1516 ql_log(ql_log_warn
, fcport
->vha
, 0x503c,
1517 "Async-%s error - hdl=%x response(%x).\n",
1518 type
, sp
->handle
, sts
->data
[3]);
1524 iocb
->u
.tmf
.data
= error
;
1525 ql_dump_buffer(ql_dbg_async
+ ql_dbg_buffer
, vha
, 0x5055,
1526 (uint8_t *)sts
, sizeof(*sts
));
1529 sp
->done(vha
, sp
, 0);
1533 * qla2x00_process_response_queue() - Process response queue entries.
1534 * @ha: SCSI driver HA context
1537 qla2x00_process_response_queue(struct rsp_que
*rsp
)
1539 struct scsi_qla_host
*vha
;
1540 struct qla_hw_data
*ha
= rsp
->hw
;
1541 struct device_reg_2xxx __iomem
*reg
= &ha
->iobase
->isp
;
1543 uint16_t handle_cnt
;
1546 vha
= pci_get_drvdata(ha
->pdev
);
1548 if (!vha
->flags
.online
)
1551 while (rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
) {
1552 pkt
= (sts_entry_t
*)rsp
->ring_ptr
;
1555 if (rsp
->ring_index
== rsp
->length
) {
1556 rsp
->ring_index
= 0;
1557 rsp
->ring_ptr
= rsp
->ring
;
1562 if (pkt
->entry_status
!= 0) {
1563 qla2x00_error_entry(vha
, rsp
, pkt
);
1564 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
1569 switch (pkt
->entry_type
) {
1571 qla2x00_status_entry(vha
, rsp
, pkt
);
1573 case STATUS_TYPE_21
:
1574 handle_cnt
= ((sts21_entry_t
*)pkt
)->handle_count
;
1575 for (cnt
= 0; cnt
< handle_cnt
; cnt
++) {
1576 qla2x00_process_completed_request(vha
, rsp
->req
,
1577 ((sts21_entry_t
*)pkt
)->handle
[cnt
]);
1580 case STATUS_TYPE_22
:
1581 handle_cnt
= ((sts22_entry_t
*)pkt
)->handle_count
;
1582 for (cnt
= 0; cnt
< handle_cnt
; cnt
++) {
1583 qla2x00_process_completed_request(vha
, rsp
->req
,
1584 ((sts22_entry_t
*)pkt
)->handle
[cnt
]);
1587 case STATUS_CONT_TYPE
:
1588 qla2x00_status_cont_entry(rsp
, (sts_cont_entry_t
*)pkt
);
1591 qla2x00_mbx_iocb_entry(vha
, rsp
->req
,
1592 (struct mbx_entry
*)pkt
);
1595 qla2x00_ct_entry(vha
, rsp
->req
, pkt
, CT_IOCB_TYPE
);
1598 /* Type Not Supported. */
1599 ql_log(ql_log_warn
, vha
, 0x504a,
1600 "Received unknown response pkt type %x "
1601 "entry status=%x.\n",
1602 pkt
->entry_type
, pkt
->entry_status
);
1605 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
1609 /* Adjust ring index */
1610 WRT_REG_WORD(ISP_RSP_Q_OUT(ha
, reg
), rsp
->ring_index
);
1614 qla2x00_handle_sense(srb_t
*sp
, uint8_t *sense_data
, uint32_t par_sense_len
,
1615 uint32_t sense_len
, struct rsp_que
*rsp
, int res
)
1617 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1618 struct scsi_cmnd
*cp
= GET_CMD_SP(sp
);
1619 uint32_t track_sense_len
;
1621 if (sense_len
>= SCSI_SENSE_BUFFERSIZE
)
1622 sense_len
= SCSI_SENSE_BUFFERSIZE
;
1624 SET_CMD_SENSE_LEN(sp
, sense_len
);
1625 SET_CMD_SENSE_PTR(sp
, cp
->sense_buffer
);
1626 track_sense_len
= sense_len
;
1628 if (sense_len
> par_sense_len
)
1629 sense_len
= par_sense_len
;
1631 memcpy(cp
->sense_buffer
, sense_data
, sense_len
);
1633 SET_CMD_SENSE_PTR(sp
, cp
->sense_buffer
+ sense_len
);
1634 track_sense_len
-= sense_len
;
1635 SET_CMD_SENSE_LEN(sp
, track_sense_len
);
1637 if (track_sense_len
!= 0) {
1638 rsp
->status_srb
= sp
;
1643 ql_dbg(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x301c,
1644 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1645 sp
->fcport
->vha
->host_no
, cp
->device
->id
, cp
->device
->lun
,
1647 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x302b,
1648 cp
->sense_buffer
, sense_len
);
1652 struct scsi_dif_tuple
{
1653 __be16 guard
; /* Checksum */
1654 __be16 app_tag
; /* APPL identifier */
1655 __be32 ref_tag
; /* Target LBA or indirect LBA */
1659 * Checks the guard or meta-data for the type of error
1660 * detected by the HBA. In case of errors, we set the
1661 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1662 * to indicate to the kernel that the HBA detected error.
1665 qla2x00_handle_dif_error(srb_t
*sp
, struct sts_entry_24xx
*sts24
)
1667 struct scsi_qla_host
*vha
= sp
->fcport
->vha
;
1668 struct scsi_cmnd
*cmd
= GET_CMD_SP(sp
);
1669 uint8_t *ap
= &sts24
->data
[12];
1670 uint8_t *ep
= &sts24
->data
[20];
1671 uint32_t e_ref_tag
, a_ref_tag
;
1672 uint16_t e_app_tag
, a_app_tag
;
1673 uint16_t e_guard
, a_guard
;
1676 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1677 * would make guard field appear at offset 2
1679 a_guard
= le16_to_cpu(*(uint16_t *)(ap
+ 2));
1680 a_app_tag
= le16_to_cpu(*(uint16_t *)(ap
+ 0));
1681 a_ref_tag
= le32_to_cpu(*(uint32_t *)(ap
+ 4));
1682 e_guard
= le16_to_cpu(*(uint16_t *)(ep
+ 2));
1683 e_app_tag
= le16_to_cpu(*(uint16_t *)(ep
+ 0));
1684 e_ref_tag
= le32_to_cpu(*(uint32_t *)(ep
+ 4));
1686 ql_dbg(ql_dbg_io
, vha
, 0x3023,
1687 "iocb(s) %p Returned STATUS.\n", sts24
);
1689 ql_dbg(ql_dbg_io
, vha
, 0x3024,
1690 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1691 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1692 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1693 cmd
->cmnd
[0], (u64
)scsi_get_lba(cmd
), a_ref_tag
, e_ref_tag
,
1694 a_app_tag
, e_app_tag
, a_guard
, e_guard
);
1698 * For type 3: ref & app tag is all 'f's
1699 * For type 0,1,2: app tag is all 'f's
1701 if ((a_app_tag
== 0xffff) &&
1702 ((scsi_get_prot_type(cmd
) != SCSI_PROT_DIF_TYPE3
) ||
1703 (a_ref_tag
== 0xffffffff))) {
1704 uint32_t blocks_done
, resid
;
1705 sector_t lba_s
= scsi_get_lba(cmd
);
1707 /* 2TB boundary case covered automatically with this */
1708 blocks_done
= e_ref_tag
- (uint32_t)lba_s
+ 1;
1710 resid
= scsi_bufflen(cmd
) - (blocks_done
*
1711 cmd
->device
->sector_size
);
1713 scsi_set_resid(cmd
, resid
);
1714 cmd
->result
= DID_OK
<< 16;
1716 /* Update protection tag */
1717 if (scsi_prot_sg_count(cmd
)) {
1718 uint32_t i
, j
= 0, k
= 0, num_ent
;
1719 struct scatterlist
*sg
;
1720 struct sd_dif_tuple
*spt
;
1722 /* Patch the corresponding protection tags */
1723 scsi_for_each_prot_sg(cmd
, sg
,
1724 scsi_prot_sg_count(cmd
), i
) {
1725 num_ent
= sg_dma_len(sg
) / 8;
1726 if (k
+ num_ent
< blocks_done
) {
1730 j
= blocks_done
- k
- 1;
1735 if (k
!= blocks_done
) {
1736 ql_log(ql_log_warn
, vha
, 0x302f,
1737 "unexpected tag values tag:lba=%x:%llx)\n",
1738 e_ref_tag
, (unsigned long long)lba_s
);
1742 spt
= page_address(sg_page(sg
)) + sg
->offset
;
1745 spt
->app_tag
= 0xffff;
1746 if (scsi_get_prot_type(cmd
) == SCSI_PROT_DIF_TYPE3
)
1747 spt
->ref_tag
= 0xffffffff;
1754 if (e_guard
!= a_guard
) {
1755 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
1757 set_driver_byte(cmd
, DRIVER_SENSE
);
1758 set_host_byte(cmd
, DID_ABORT
);
1759 cmd
->result
|= SAM_STAT_CHECK_CONDITION
<< 1;
1764 if (e_ref_tag
!= a_ref_tag
) {
1765 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
1767 set_driver_byte(cmd
, DRIVER_SENSE
);
1768 set_host_byte(cmd
, DID_ABORT
);
1769 cmd
->result
|= SAM_STAT_CHECK_CONDITION
<< 1;
1773 /* check appl tag */
1774 if (e_app_tag
!= a_app_tag
) {
1775 scsi_build_sense_buffer(1, cmd
->sense_buffer
, ILLEGAL_REQUEST
,
1777 set_driver_byte(cmd
, DRIVER_SENSE
);
1778 set_host_byte(cmd
, DID_ABORT
);
1779 cmd
->result
|= SAM_STAT_CHECK_CONDITION
<< 1;
1787 qla25xx_process_bidir_status_iocb(scsi_qla_host_t
*vha
, void *pkt
,
1788 struct req_que
*req
, uint32_t index
)
1790 struct qla_hw_data
*ha
= vha
->hw
;
1792 uint16_t comp_status
;
1793 uint16_t scsi_status
;
1795 uint32_t rval
= EXT_STATUS_OK
;
1796 struct fc_bsg_job
*bsg_job
= NULL
;
1798 struct sts_entry_24xx
*sts24
;
1799 sts
= (sts_entry_t
*) pkt
;
1800 sts24
= (struct sts_entry_24xx
*) pkt
;
1802 /* Validate handle. */
1803 if (index
>= req
->num_outstanding_cmds
) {
1804 ql_log(ql_log_warn
, vha
, 0x70af,
1805 "Invalid SCSI completion handle 0x%x.\n", index
);
1806 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1810 sp
= req
->outstanding_cmds
[index
];
1812 /* Free outstanding command slot. */
1813 req
->outstanding_cmds
[index
] = NULL
;
1814 bsg_job
= sp
->u
.bsg_job
;
1816 ql_log(ql_log_warn
, vha
, 0x70b0,
1817 "Req:%d: Invalid ISP SCSI completion handle(0x%x)\n",
1820 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1824 if (IS_FWI2_CAPABLE(ha
)) {
1825 comp_status
= le16_to_cpu(sts24
->comp_status
);
1826 scsi_status
= le16_to_cpu(sts24
->scsi_status
) & SS_MASK
;
1828 comp_status
= le16_to_cpu(sts
->comp_status
);
1829 scsi_status
= le16_to_cpu(sts
->scsi_status
) & SS_MASK
;
1832 thread_id
= bsg_job
->request
->rqst_data
.h_vendor
.vendor_cmd
[1];
1833 switch (comp_status
) {
1835 if (scsi_status
== 0) {
1836 bsg_job
->reply
->reply_payload_rcv_len
=
1837 bsg_job
->reply_payload
.payload_len
;
1838 vha
->qla_stats
.input_bytes
+=
1839 bsg_job
->reply
->reply_payload_rcv_len
;
1840 vha
->qla_stats
.input_requests
++;
1841 rval
= EXT_STATUS_OK
;
1845 case CS_DATA_OVERRUN
:
1846 ql_dbg(ql_dbg_user
, vha
, 0x70b1,
1847 "Command completed with date overrun thread_id=%d\n",
1849 rval
= EXT_STATUS_DATA_OVERRUN
;
1852 case CS_DATA_UNDERRUN
:
1853 ql_dbg(ql_dbg_user
, vha
, 0x70b2,
1854 "Command completed with date underrun thread_id=%d\n",
1856 rval
= EXT_STATUS_DATA_UNDERRUN
;
1858 case CS_BIDIR_RD_OVERRUN
:
1859 ql_dbg(ql_dbg_user
, vha
, 0x70b3,
1860 "Command completed with read data overrun thread_id=%d\n",
1862 rval
= EXT_STATUS_DATA_OVERRUN
;
1865 case CS_BIDIR_RD_WR_OVERRUN
:
1866 ql_dbg(ql_dbg_user
, vha
, 0x70b4,
1867 "Command completed with read and write data overrun "
1868 "thread_id=%d\n", thread_id
);
1869 rval
= EXT_STATUS_DATA_OVERRUN
;
1872 case CS_BIDIR_RD_OVERRUN_WR_UNDERRUN
:
1873 ql_dbg(ql_dbg_user
, vha
, 0x70b5,
1874 "Command completed with read data over and write data "
1875 "underrun thread_id=%d\n", thread_id
);
1876 rval
= EXT_STATUS_DATA_OVERRUN
;
1879 case CS_BIDIR_RD_UNDERRUN
:
1880 ql_dbg(ql_dbg_user
, vha
, 0x70b6,
1881 "Command completed with read data data underrun "
1882 "thread_id=%d\n", thread_id
);
1883 rval
= EXT_STATUS_DATA_UNDERRUN
;
1886 case CS_BIDIR_RD_UNDERRUN_WR_OVERRUN
:
1887 ql_dbg(ql_dbg_user
, vha
, 0x70b7,
1888 "Command completed with read data under and write data "
1889 "overrun thread_id=%d\n", thread_id
);
1890 rval
= EXT_STATUS_DATA_UNDERRUN
;
1893 case CS_BIDIR_RD_WR_UNDERRUN
:
1894 ql_dbg(ql_dbg_user
, vha
, 0x70b8,
1895 "Command completed with read and write data underrun "
1896 "thread_id=%d\n", thread_id
);
1897 rval
= EXT_STATUS_DATA_UNDERRUN
;
1901 ql_dbg(ql_dbg_user
, vha
, 0x70b9,
1902 "Command completed with data DMA error thread_id=%d\n",
1904 rval
= EXT_STATUS_DMA_ERR
;
1908 ql_dbg(ql_dbg_user
, vha
, 0x70ba,
1909 "Command completed with timeout thread_id=%d\n",
1911 rval
= EXT_STATUS_TIMEOUT
;
1914 ql_dbg(ql_dbg_user
, vha
, 0x70bb,
1915 "Command completed with completion status=0x%x "
1916 "thread_id=%d\n", comp_status
, thread_id
);
1917 rval
= EXT_STATUS_ERR
;
1920 bsg_job
->reply
->reply_payload_rcv_len
= 0;
1923 /* Return the vendor specific reply to API */
1924 bsg_job
->reply
->reply_data
.vendor_reply
.vendor_rsp
[0] = rval
;
1925 bsg_job
->reply_len
= sizeof(struct fc_bsg_reply
);
1926 /* Always return DID_OK, bsg will send the vendor specific response
1927 * in this case only */
1928 sp
->done(vha
, sp
, (DID_OK
<< 6));
1933 * qla2x00_status_entry() - Process a Status IOCB entry.
1934 * @ha: SCSI driver HA context
1935 * @pkt: Entry pointer
1938 qla2x00_status_entry(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, void *pkt
)
1942 struct scsi_cmnd
*cp
;
1944 struct sts_entry_24xx
*sts24
;
1945 uint16_t comp_status
;
1946 uint16_t scsi_status
;
1948 uint8_t lscsi_status
;
1950 uint32_t sense_len
, par_sense_len
, rsp_info_len
, resid_len
,
1952 uint8_t *rsp_info
, *sense_data
;
1953 struct qla_hw_data
*ha
= vha
->hw
;
1956 struct req_que
*req
;
1959 uint16_t state_flags
= 0;
1961 sts
= (sts_entry_t
*) pkt
;
1962 sts24
= (struct sts_entry_24xx
*) pkt
;
1963 if (IS_FWI2_CAPABLE(ha
)) {
1964 comp_status
= le16_to_cpu(sts24
->comp_status
);
1965 scsi_status
= le16_to_cpu(sts24
->scsi_status
) & SS_MASK
;
1966 state_flags
= le16_to_cpu(sts24
->state_flags
);
1968 comp_status
= le16_to_cpu(sts
->comp_status
);
1969 scsi_status
= le16_to_cpu(sts
->scsi_status
) & SS_MASK
;
1971 handle
= (uint32_t) LSW(sts
->handle
);
1972 que
= MSW(sts
->handle
);
1973 req
= ha
->req_q_map
[que
];
1975 /* Check for invalid queue pointer */
1977 que
>= find_first_zero_bit(ha
->req_qid_map
, ha
->max_req_queues
)) {
1978 ql_dbg(ql_dbg_io
, vha
, 0x3059,
1979 "Invalid status handle (0x%x): Bad req pointer. req=%p, "
1980 "que=%u.\n", sts
->handle
, req
, que
);
1984 /* Validate handle. */
1985 if (handle
< req
->num_outstanding_cmds
)
1986 sp
= req
->outstanding_cmds
[handle
];
1991 ql_dbg(ql_dbg_io
, vha
, 0x3017,
1992 "Invalid status handle (0x%x).\n", sts
->handle
);
1994 if (IS_P3P_TYPE(ha
))
1995 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
1997 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
1998 qla2xxx_wake_dpc(vha
);
2002 if (unlikely((state_flags
& BIT_1
) && (sp
->type
== SRB_BIDI_CMD
))) {
2003 qla25xx_process_bidir_status_iocb(vha
, pkt
, req
, handle
);
2007 /* Fast path completion. */
2008 if (comp_status
== CS_COMPLETE
&& scsi_status
== 0) {
2009 qla2x00_do_host_ramp_up(vha
);
2010 qla2x00_process_completed_request(vha
, req
, handle
);
2015 req
->outstanding_cmds
[handle
] = NULL
;
2016 cp
= GET_CMD_SP(sp
);
2018 ql_dbg(ql_dbg_io
, vha
, 0x3018,
2019 "Command already returned (0x%x/%p).\n",
2025 lscsi_status
= scsi_status
& STATUS_MASK
;
2027 fcport
= sp
->fcport
;
2030 sense_len
= par_sense_len
= rsp_info_len
= resid_len
=
2032 if (IS_FWI2_CAPABLE(ha
)) {
2033 if (scsi_status
& SS_SENSE_LEN_VALID
)
2034 sense_len
= le32_to_cpu(sts24
->sense_len
);
2035 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
)
2036 rsp_info_len
= le32_to_cpu(sts24
->rsp_data_len
);
2037 if (scsi_status
& (SS_RESIDUAL_UNDER
| SS_RESIDUAL_OVER
))
2038 resid_len
= le32_to_cpu(sts24
->rsp_residual_count
);
2039 if (comp_status
== CS_DATA_UNDERRUN
)
2040 fw_resid_len
= le32_to_cpu(sts24
->residual_len
);
2041 rsp_info
= sts24
->data
;
2042 sense_data
= sts24
->data
;
2043 host_to_fcp_swap(sts24
->data
, sizeof(sts24
->data
));
2044 ox_id
= le16_to_cpu(sts24
->ox_id
);
2045 par_sense_len
= sizeof(sts24
->data
);
2047 if (scsi_status
& SS_SENSE_LEN_VALID
)
2048 sense_len
= le16_to_cpu(sts
->req_sense_length
);
2049 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
)
2050 rsp_info_len
= le16_to_cpu(sts
->rsp_info_len
);
2051 resid_len
= le32_to_cpu(sts
->residual_length
);
2052 rsp_info
= sts
->rsp_info
;
2053 sense_data
= sts
->req_sense_data
;
2054 par_sense_len
= sizeof(sts
->req_sense_data
);
2057 /* Check for any FCP transport errors. */
2058 if (scsi_status
& SS_RESPONSE_INFO_LEN_VALID
) {
2059 /* Sense data lies beyond any FCP RESPONSE data. */
2060 if (IS_FWI2_CAPABLE(ha
)) {
2061 sense_data
+= rsp_info_len
;
2062 par_sense_len
-= rsp_info_len
;
2064 if (rsp_info_len
> 3 && rsp_info
[3]) {
2065 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3019,
2066 "FCP I/O protocol failure (0x%x/0x%x).\n",
2067 rsp_info_len
, rsp_info
[3]);
2069 res
= DID_BUS_BUSY
<< 16;
2074 /* Check for overrun. */
2075 if (IS_FWI2_CAPABLE(ha
) && comp_status
== CS_COMPLETE
&&
2076 scsi_status
& SS_RESIDUAL_OVER
)
2077 comp_status
= CS_DATA_OVERRUN
;
2080 * Based on Host and scsi status generate status code for Linux
2082 switch (comp_status
) {
2085 if (scsi_status
== 0) {
2089 if (scsi_status
& (SS_RESIDUAL_UNDER
| SS_RESIDUAL_OVER
)) {
2091 scsi_set_resid(cp
, resid
);
2093 if (!lscsi_status
&&
2094 ((unsigned)(scsi_bufflen(cp
) - resid
) <
2096 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x301a,
2097 "Mid-layer underflow "
2098 "detected (0x%x of 0x%x bytes).\n",
2099 resid
, scsi_bufflen(cp
));
2101 res
= DID_ERROR
<< 16;
2105 res
= DID_OK
<< 16 | lscsi_status
;
2107 if (lscsi_status
== SAM_STAT_TASK_SET_FULL
) {
2108 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x301b,
2109 "QUEUE FULL detected.\n");
2113 if (lscsi_status
!= SS_CHECK_CONDITION
)
2116 memset(cp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
2117 if (!(scsi_status
& SS_SENSE_LEN_VALID
))
2120 qla2x00_handle_sense(sp
, sense_data
, par_sense_len
, sense_len
,
2124 case CS_DATA_UNDERRUN
:
2125 /* Use F/W calculated residual length. */
2126 resid
= IS_FWI2_CAPABLE(ha
) ? fw_resid_len
: resid_len
;
2127 scsi_set_resid(cp
, resid
);
2128 if (scsi_status
& SS_RESIDUAL_UNDER
) {
2129 if (IS_FWI2_CAPABLE(ha
) && fw_resid_len
!= resid_len
) {
2130 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x301d,
2131 "Dropped frame(s) detected "
2132 "(0x%x of 0x%x bytes).\n",
2133 resid
, scsi_bufflen(cp
));
2135 res
= DID_ERROR
<< 16 | lscsi_status
;
2136 goto check_scsi_status
;
2139 if (!lscsi_status
&&
2140 ((unsigned)(scsi_bufflen(cp
) - resid
) <
2142 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x301e,
2143 "Mid-layer underflow "
2144 "detected (0x%x of 0x%x bytes).\n",
2145 resid
, scsi_bufflen(cp
));
2147 res
= DID_ERROR
<< 16;
2150 } else if (lscsi_status
!= SAM_STAT_TASK_SET_FULL
&&
2151 lscsi_status
!= SAM_STAT_BUSY
) {
2153 * scsi status of task set and busy are considered to be
2154 * task not completed.
2157 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x301f,
2158 "Dropped frame(s) detected (0x%x "
2159 "of 0x%x bytes).\n", resid
,
2162 res
= DID_ERROR
<< 16 | lscsi_status
;
2163 goto check_scsi_status
;
2165 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3030,
2166 "scsi_status: 0x%x, lscsi_status: 0x%x\n",
2167 scsi_status
, lscsi_status
);
2170 res
= DID_OK
<< 16 | lscsi_status
;
2175 * Check to see if SCSI Status is non zero. If so report SCSI
2178 if (lscsi_status
!= 0) {
2179 if (lscsi_status
== SAM_STAT_TASK_SET_FULL
) {
2180 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3020,
2181 "QUEUE FULL detected.\n");
2185 if (lscsi_status
!= SS_CHECK_CONDITION
)
2188 memset(cp
->sense_buffer
, 0, SCSI_SENSE_BUFFERSIZE
);
2189 if (!(scsi_status
& SS_SENSE_LEN_VALID
))
2192 qla2x00_handle_sense(sp
, sense_data
, par_sense_len
,
2193 sense_len
, rsp
, res
);
2197 case CS_PORT_LOGGED_OUT
:
2198 case CS_PORT_CONFIG_CHG
:
2201 case CS_PORT_UNAVAILABLE
:
2206 * We are going to have the fc class block the rport
2207 * while we try to recover so instruct the mid layer
2208 * to requeue until the class decides how to handle this.
2210 res
= DID_TRANSPORT_DISRUPTED
<< 16;
2212 if (comp_status
== CS_TIMEOUT
) {
2213 if (IS_FWI2_CAPABLE(ha
))
2215 else if ((le16_to_cpu(sts
->status_flags
) &
2216 SF_LOGOUT_SENT
) == 0)
2220 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3021,
2221 "Port to be marked lost on fcport=%02x%02x%02x, current "
2222 "port state= %s.\n", fcport
->d_id
.b
.domain
,
2223 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
,
2224 port_state_str
[atomic_read(&fcport
->state
)]);
2226 if (atomic_read(&fcport
->state
) == FCS_ONLINE
)
2227 qla2x00_mark_device_lost(fcport
->vha
, fcport
, 1, 1);
2231 res
= DID_RESET
<< 16;
2235 logit
= qla2x00_handle_dif_error(sp
, sts24
);
2240 res
= DID_ERROR
<< 16;
2242 if (!IS_PI_SPLIT_DET_CAPABLE(ha
))
2245 if (state_flags
& BIT_4
)
2246 scmd_printk(KERN_WARNING
, cp
,
2247 "Unsupported device '%s' found.\n",
2248 cp
->device
->vendor
);
2252 res
= DID_ERROR
<< 16;
2258 ql_dbg(ql_dbg_io
, fcport
->vha
, 0x3022,
2259 "FCP command status: 0x%x-0x%x (0x%x) nexus=%ld:%d:%d "
2260 "portid=%02x%02x%02x oxid=0x%x cdb=%10phN len=0x%x "
2261 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
2262 comp_status
, scsi_status
, res
, vha
->host_no
,
2263 cp
->device
->id
, cp
->device
->lun
, fcport
->d_id
.b
.domain
,
2264 fcport
->d_id
.b
.area
, fcport
->d_id
.b
.al_pa
, ox_id
,
2265 cp
->cmnd
, scsi_bufflen(cp
), rsp_info_len
,
2266 resid_len
, fw_resid_len
);
2269 qla2x00_do_host_ramp_up(vha
);
2271 if (rsp
->status_srb
== NULL
)
2272 sp
->done(ha
, sp
, res
);
2276 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
2277 * @ha: SCSI driver HA context
2278 * @pkt: Entry pointer
2280 * Extended sense data.
2283 qla2x00_status_cont_entry(struct rsp_que
*rsp
, sts_cont_entry_t
*pkt
)
2285 uint8_t sense_sz
= 0;
2286 struct qla_hw_data
*ha
= rsp
->hw
;
2287 struct scsi_qla_host
*vha
= pci_get_drvdata(ha
->pdev
);
2288 srb_t
*sp
= rsp
->status_srb
;
2289 struct scsi_cmnd
*cp
;
2293 if (!sp
|| !GET_CMD_SENSE_LEN(sp
))
2296 sense_len
= GET_CMD_SENSE_LEN(sp
);
2297 sense_ptr
= GET_CMD_SENSE_PTR(sp
);
2299 cp
= GET_CMD_SP(sp
);
2301 ql_log(ql_log_warn
, vha
, 0x3025,
2302 "cmd is NULL: already returned to OS (sp=%p).\n", sp
);
2304 rsp
->status_srb
= NULL
;
2308 if (sense_len
> sizeof(pkt
->data
))
2309 sense_sz
= sizeof(pkt
->data
);
2311 sense_sz
= sense_len
;
2313 /* Move sense data. */
2314 if (IS_FWI2_CAPABLE(ha
))
2315 host_to_fcp_swap(pkt
->data
, sizeof(pkt
->data
));
2316 memcpy(sense_ptr
, pkt
->data
, sense_sz
);
2317 ql_dump_buffer(ql_dbg_io
+ ql_dbg_buffer
, vha
, 0x302c,
2318 sense_ptr
, sense_sz
);
2320 sense_len
-= sense_sz
;
2321 sense_ptr
+= sense_sz
;
2323 SET_CMD_SENSE_PTR(sp
, sense_ptr
);
2324 SET_CMD_SENSE_LEN(sp
, sense_len
);
2326 /* Place command on done queue. */
2327 if (sense_len
== 0) {
2328 rsp
->status_srb
= NULL
;
2329 sp
->done(ha
, sp
, cp
->result
);
2334 * qla2x00_error_entry() - Process an error entry.
2335 * @ha: SCSI driver HA context
2336 * @pkt: Entry pointer
2339 qla2x00_error_entry(scsi_qla_host_t
*vha
, struct rsp_que
*rsp
, sts_entry_t
*pkt
)
2342 struct qla_hw_data
*ha
= vha
->hw
;
2343 const char func
[] = "ERROR-IOCB";
2344 uint16_t que
= MSW(pkt
->handle
);
2345 struct req_que
*req
= NULL
;
2346 int res
= DID_ERROR
<< 16;
2348 ql_dbg(ql_dbg_async
, vha
, 0x502a,
2349 "type of error status in response: 0x%x\n", pkt
->entry_status
);
2351 if (que
>= ha
->max_req_queues
|| !ha
->req_q_map
[que
])
2354 req
= ha
->req_q_map
[que
];
2356 if (pkt
->entry_status
& RF_BUSY
)
2357 res
= DID_BUS_BUSY
<< 16;
2359 sp
= qla2x00_get_sp_from_handle(vha
, func
, req
, pkt
);
2361 sp
->done(ha
, sp
, res
);
2365 ql_log(ql_log_warn
, vha
, 0x5030,
2366 "Error entry - invalid handle/queue.\n");
2368 if (IS_P3P_TYPE(ha
))
2369 set_bit(FCOE_CTX_RESET_NEEDED
, &vha
->dpc_flags
);
2371 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2372 qla2xxx_wake_dpc(vha
);
2376 * qla24xx_mbx_completion() - Process mailbox command completions.
2377 * @ha: SCSI driver HA context
2378 * @mb0: Mailbox0 register
2381 qla24xx_mbx_completion(scsi_qla_host_t
*vha
, uint16_t mb0
)
2385 uint16_t __iomem
*wptr
;
2386 struct qla_hw_data
*ha
= vha
->hw
;
2387 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
2389 /* Read all mbox registers? */
2390 mboxes
= (1 << ha
->mbx_count
) - 1;
2392 ql_dbg(ql_dbg_async
, vha
, 0x504e, "MBX pointer ERROR.\n");
2394 mboxes
= ha
->mcp
->in_mb
;
2396 /* Load return mailbox registers. */
2397 ha
->flags
.mbox_int
= 1;
2398 ha
->mailbox_out
[0] = mb0
;
2400 wptr
= (uint16_t __iomem
*)®
->mailbox1
;
2402 for (cnt
= 1; cnt
< ha
->mbx_count
; cnt
++) {
2404 ha
->mailbox_out
[cnt
] = RD_REG_WORD(wptr
);
2412 * qla24xx_process_response_queue() - Process response queue entries.
2413 * @ha: SCSI driver HA context
2415 void qla24xx_process_response_queue(struct scsi_qla_host
*vha
,
2416 struct rsp_que
*rsp
)
2418 struct sts_entry_24xx
*pkt
;
2419 struct qla_hw_data
*ha
= vha
->hw
;
2421 if (!vha
->flags
.online
)
2424 while (rsp
->ring_ptr
->signature
!= RESPONSE_PROCESSED
) {
2425 pkt
= (struct sts_entry_24xx
*)rsp
->ring_ptr
;
2428 if (rsp
->ring_index
== rsp
->length
) {
2429 rsp
->ring_index
= 0;
2430 rsp
->ring_ptr
= rsp
->ring
;
2435 if (pkt
->entry_status
!= 0) {
2436 qla2x00_error_entry(vha
, rsp
, (sts_entry_t
*) pkt
);
2438 (void)qlt_24xx_process_response_error(vha
, pkt
);
2440 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
2445 switch (pkt
->entry_type
) {
2447 qla2x00_status_entry(vha
, rsp
, pkt
);
2449 case STATUS_CONT_TYPE
:
2450 qla2x00_status_cont_entry(rsp
, (sts_cont_entry_t
*)pkt
);
2452 case VP_RPT_ID_IOCB_TYPE
:
2453 qla24xx_report_id_acquisition(vha
,
2454 (struct vp_rpt_id_entry_24xx
*)pkt
);
2456 case LOGINOUT_PORT_IOCB_TYPE
:
2457 qla24xx_logio_entry(vha
, rsp
->req
,
2458 (struct logio_entry_24xx
*)pkt
);
2460 case TSK_MGMT_IOCB_TYPE
:
2461 qla24xx_tm_iocb_entry(vha
, rsp
->req
,
2462 (struct tsk_mgmt_entry
*)pkt
);
2465 qla24xx_els_ct_entry(vha
, rsp
->req
, pkt
, CT_IOCB_TYPE
);
2468 qla24xx_els_ct_entry(vha
, rsp
->req
, pkt
, ELS_IOCB_TYPE
);
2470 case ABTS_RECV_24XX
:
2471 /* ensure that the ATIO queue is empty */
2472 qlt_24xx_process_atio_queue(vha
);
2473 case ABTS_RESP_24XX
:
2475 case NOTIFY_ACK_TYPE
:
2476 qlt_response_pkt_all_vps(vha
, (response_t
*)pkt
);
2479 /* Do nothing in this case, this check is to prevent it
2480 * from falling into default case
2484 /* Type Not Supported. */
2485 ql_dbg(ql_dbg_async
, vha
, 0x5042,
2486 "Received unknown response pkt type %x "
2487 "entry status=%x.\n",
2488 pkt
->entry_type
, pkt
->entry_status
);
2491 ((response_t
*)pkt
)->signature
= RESPONSE_PROCESSED
;
2495 /* Adjust ring index */
2496 if (IS_P3P_TYPE(ha
)) {
2497 struct device_reg_82xx __iomem
*reg
= &ha
->iobase
->isp82
;
2498 WRT_REG_DWORD(®
->rsp_q_out
[0], rsp
->ring_index
);
2500 WRT_REG_DWORD(rsp
->rsp_q_out
, rsp
->ring_index
);
2504 qla2xxx_check_risc_status(scsi_qla_host_t
*vha
)
2508 struct qla_hw_data
*ha
= vha
->hw
;
2509 struct device_reg_24xx __iomem
*reg
= &ha
->iobase
->isp24
;
2511 if (!IS_QLA25XX(ha
) && !IS_QLA81XX(ha
) && !IS_QLA83XX(ha
))
2515 WRT_REG_DWORD(®
->iobase_addr
, 0x7C00);
2516 RD_REG_DWORD(®
->iobase_addr
);
2517 WRT_REG_DWORD(®
->iobase_window
, 0x0001);
2518 for (cnt
= 10000; (RD_REG_DWORD(®
->iobase_window
) & BIT_0
) == 0 &&
2519 rval
== QLA_SUCCESS
; cnt
--) {
2521 WRT_REG_DWORD(®
->iobase_window
, 0x0001);
2524 rval
= QLA_FUNCTION_TIMEOUT
;
2526 if (rval
== QLA_SUCCESS
)
2530 WRT_REG_DWORD(®
->iobase_window
, 0x0003);
2531 for (cnt
= 100; (RD_REG_DWORD(®
->iobase_window
) & BIT_0
) == 0 &&
2532 rval
== QLA_SUCCESS
; cnt
--) {
2534 WRT_REG_DWORD(®
->iobase_window
, 0x0003);
2537 rval
= QLA_FUNCTION_TIMEOUT
;
2539 if (rval
!= QLA_SUCCESS
)
2543 if (RD_REG_DWORD(®
->iobase_c8
) & BIT_3
)
2544 ql_log(ql_log_info
, vha
, 0x504c,
2545 "Additional code -- 0x55AA.\n");
2548 WRT_REG_DWORD(®
->iobase_window
, 0x0000);
2549 RD_REG_DWORD(®
->iobase_window
);
2553 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP24xx.
2555 * @dev_id: SCSI driver HA context
2557 * Called by system whenever the host adapter generates an interrupt.
2559 * Returns handled flag.
2562 qla24xx_intr_handler(int irq
, void *dev_id
)
2564 scsi_qla_host_t
*vha
;
2565 struct qla_hw_data
*ha
;
2566 struct device_reg_24xx __iomem
*reg
;
2572 struct rsp_que
*rsp
;
2573 unsigned long flags
;
2575 rsp
= (struct rsp_que
*) dev_id
;
2577 ql_log(ql_log_info
, NULL
, 0x5059,
2578 "%s: NULL response queue pointer.\n", __func__
);
2583 reg
= &ha
->iobase
->isp24
;
2586 if (unlikely(pci_channel_offline(ha
->pdev
)))
2589 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2590 vha
= pci_get_drvdata(ha
->pdev
);
2591 for (iter
= 50; iter
--; ) {
2592 stat
= RD_REG_DWORD(®
->host_status
);
2593 if (stat
& HSRX_RISC_PAUSED
) {
2594 if (unlikely(pci_channel_offline(ha
->pdev
)))
2597 hccr
= RD_REG_DWORD(®
->hccr
);
2599 ql_log(ql_log_warn
, vha
, 0x504b,
2600 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2603 qla2xxx_check_risc_status(vha
);
2605 ha
->isp_ops
->fw_dump(vha
, 1);
2606 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2608 } else if ((stat
& HSRX_RISC_INT
) == 0)
2611 switch (stat
& 0xff) {
2612 case INTR_ROM_MB_SUCCESS
:
2613 case INTR_ROM_MB_FAILED
:
2614 case INTR_MB_SUCCESS
:
2615 case INTR_MB_FAILED
:
2616 qla24xx_mbx_completion(vha
, MSW(stat
));
2617 status
|= MBX_INTERRUPT
;
2620 case INTR_ASYNC_EVENT
:
2622 mb
[1] = RD_REG_WORD(®
->mailbox1
);
2623 mb
[2] = RD_REG_WORD(®
->mailbox2
);
2624 mb
[3] = RD_REG_WORD(®
->mailbox3
);
2625 qla2x00_async_event(vha
, rsp
, mb
);
2627 case INTR_RSP_QUE_UPDATE
:
2628 case INTR_RSP_QUE_UPDATE_83XX
:
2629 qla24xx_process_response_queue(vha
, rsp
);
2631 case INTR_ATIO_QUE_UPDATE
:
2632 qlt_24xx_process_atio_queue(vha
);
2634 case INTR_ATIO_RSP_QUE_UPDATE
:
2635 qlt_24xx_process_atio_queue(vha
);
2636 qla24xx_process_response_queue(vha
, rsp
);
2639 ql_dbg(ql_dbg_async
, vha
, 0x504f,
2640 "Unrecognized interrupt type (%d).\n", stat
* 0xff);
2643 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2644 RD_REG_DWORD_RELAXED(®
->hccr
);
2645 if (unlikely(IS_QLA83XX(ha
) && (ha
->pdev
->revision
== 1)))
2648 qla2x00_handle_mbx_completion(ha
, status
);
2649 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2655 qla24xx_msix_rsp_q(int irq
, void *dev_id
)
2657 struct qla_hw_data
*ha
;
2658 struct rsp_que
*rsp
;
2659 struct device_reg_24xx __iomem
*reg
;
2660 struct scsi_qla_host
*vha
;
2661 unsigned long flags
;
2663 rsp
= (struct rsp_que
*) dev_id
;
2665 ql_log(ql_log_info
, NULL
, 0x505a,
2666 "%s: NULL response queue pointer.\n", __func__
);
2670 reg
= &ha
->iobase
->isp24
;
2672 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2674 vha
= pci_get_drvdata(ha
->pdev
);
2675 qla24xx_process_response_queue(vha
, rsp
);
2676 if (!ha
->flags
.disable_msix_handshake
) {
2677 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2678 RD_REG_DWORD_RELAXED(®
->hccr
);
2680 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2686 qla25xx_msix_rsp_q(int irq
, void *dev_id
)
2688 struct qla_hw_data
*ha
;
2689 struct rsp_que
*rsp
;
2690 struct device_reg_24xx __iomem
*reg
;
2691 unsigned long flags
;
2693 rsp
= (struct rsp_que
*) dev_id
;
2695 ql_log(ql_log_info
, NULL
, 0x505b,
2696 "%s: NULL response queue pointer.\n", __func__
);
2701 /* Clear the interrupt, if enabled, for this response queue */
2702 if (!ha
->flags
.disable_msix_handshake
) {
2703 reg
= &ha
->iobase
->isp24
;
2704 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2705 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2706 RD_REG_DWORD_RELAXED(®
->hccr
);
2707 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2709 queue_work_on((int) (rsp
->id
- 1), ha
->wq
, &rsp
->q_work
);
2715 qla24xx_msix_default(int irq
, void *dev_id
)
2717 scsi_qla_host_t
*vha
;
2718 struct qla_hw_data
*ha
;
2719 struct rsp_que
*rsp
;
2720 struct device_reg_24xx __iomem
*reg
;
2725 unsigned long flags
;
2727 rsp
= (struct rsp_que
*) dev_id
;
2729 ql_log(ql_log_info
, NULL
, 0x505c,
2730 "%s: NULL response queue pointer.\n", __func__
);
2734 reg
= &ha
->iobase
->isp24
;
2737 spin_lock_irqsave(&ha
->hardware_lock
, flags
);
2738 vha
= pci_get_drvdata(ha
->pdev
);
2740 stat
= RD_REG_DWORD(®
->host_status
);
2741 if (stat
& HSRX_RISC_PAUSED
) {
2742 if (unlikely(pci_channel_offline(ha
->pdev
)))
2745 hccr
= RD_REG_DWORD(®
->hccr
);
2747 ql_log(ql_log_info
, vha
, 0x5050,
2748 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2751 qla2xxx_check_risc_status(vha
);
2753 ha
->isp_ops
->fw_dump(vha
, 1);
2754 set_bit(ISP_ABORT_NEEDED
, &vha
->dpc_flags
);
2756 } else if ((stat
& HSRX_RISC_INT
) == 0)
2759 switch (stat
& 0xff) {
2760 case INTR_ROM_MB_SUCCESS
:
2761 case INTR_ROM_MB_FAILED
:
2762 case INTR_MB_SUCCESS
:
2763 case INTR_MB_FAILED
:
2764 qla24xx_mbx_completion(vha
, MSW(stat
));
2765 status
|= MBX_INTERRUPT
;
2768 case INTR_ASYNC_EVENT
:
2770 mb
[1] = RD_REG_WORD(®
->mailbox1
);
2771 mb
[2] = RD_REG_WORD(®
->mailbox2
);
2772 mb
[3] = RD_REG_WORD(®
->mailbox3
);
2773 qla2x00_async_event(vha
, rsp
, mb
);
2775 case INTR_RSP_QUE_UPDATE
:
2776 case INTR_RSP_QUE_UPDATE_83XX
:
2777 qla24xx_process_response_queue(vha
, rsp
);
2779 case INTR_ATIO_QUE_UPDATE
:
2780 qlt_24xx_process_atio_queue(vha
);
2782 case INTR_ATIO_RSP_QUE_UPDATE
:
2783 qlt_24xx_process_atio_queue(vha
);
2784 qla24xx_process_response_queue(vha
, rsp
);
2787 ql_dbg(ql_dbg_async
, vha
, 0x5051,
2788 "Unrecognized interrupt type (%d).\n", stat
& 0xff);
2791 WRT_REG_DWORD(®
->hccr
, HCCRX_CLR_RISC_INT
);
2793 qla2x00_handle_mbx_completion(ha
, status
);
2794 spin_unlock_irqrestore(&ha
->hardware_lock
, flags
);
2799 /* Interrupt handling helpers. */
2801 struct qla_init_msix_entry
{
2803 irq_handler_t handler
;
2806 static struct qla_init_msix_entry msix_entries
[3] = {
2807 { "qla2xxx (default)", qla24xx_msix_default
},
2808 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q
},
2809 { "qla2xxx (multiq)", qla25xx_msix_rsp_q
},
2812 static struct qla_init_msix_entry qla82xx_msix_entries
[2] = {
2813 { "qla2xxx (default)", qla82xx_msix_default
},
2814 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q
},
2817 static struct qla_init_msix_entry qla83xx_msix_entries
[3] = {
2818 { "qla2xxx (default)", qla24xx_msix_default
},
2819 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q
},
2820 { "qla2xxx (atio_q)", qla83xx_msix_atio_q
},
2824 qla24xx_disable_msix(struct qla_hw_data
*ha
)
2827 struct qla_msix_entry
*qentry
;
2828 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
2830 for (i
= 0; i
< ha
->msix_count
; i
++) {
2831 qentry
= &ha
->msix_entries
[i
];
2832 if (qentry
->have_irq
)
2833 free_irq(qentry
->vector
, qentry
->rsp
);
2835 pci_disable_msix(ha
->pdev
);
2836 kfree(ha
->msix_entries
);
2837 ha
->msix_entries
= NULL
;
2838 ha
->flags
.msix_enabled
= 0;
2839 ql_dbg(ql_dbg_init
, vha
, 0x0042,
2840 "Disabled the MSI.\n");
2844 qla24xx_enable_msix(struct qla_hw_data
*ha
, struct rsp_que
*rsp
)
2846 #define MIN_MSIX_COUNT 2
2847 #define ATIO_VECTOR 2
2849 struct msix_entry
*entries
;
2850 struct qla_msix_entry
*qentry
;
2851 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
2853 entries
= kzalloc(sizeof(struct msix_entry
) * ha
->msix_count
,
2856 ql_log(ql_log_warn
, vha
, 0x00bc,
2857 "Failed to allocate memory for msix_entry.\n");
2861 for (i
= 0; i
< ha
->msix_count
; i
++)
2862 entries
[i
].entry
= i
;
2864 ret
= pci_enable_msix(ha
->pdev
, entries
, ha
->msix_count
);
2866 if (ret
< MIN_MSIX_COUNT
)
2869 ql_log(ql_log_warn
, vha
, 0x00c6,
2870 "MSI-X: Failed to enable support "
2871 "-- %d/%d\n Retry with %d vectors.\n",
2872 ha
->msix_count
, ret
, ret
);
2873 ha
->msix_count
= ret
;
2874 ret
= pci_enable_msix(ha
->pdev
, entries
, ha
->msix_count
);
2877 ql_log(ql_log_fatal
, vha
, 0x00c7,
2878 "MSI-X: Failed to enable support, "
2879 "giving up -- %d/%d.\n",
2880 ha
->msix_count
, ret
);
2883 ha
->max_rsp_queues
= ha
->msix_count
- 1;
2885 ha
->msix_entries
= kzalloc(sizeof(struct qla_msix_entry
) *
2886 ha
->msix_count
, GFP_KERNEL
);
2887 if (!ha
->msix_entries
) {
2888 ql_log(ql_log_fatal
, vha
, 0x00c8,
2889 "Failed to allocate memory for ha->msix_entries.\n");
2893 ha
->flags
.msix_enabled
= 1;
2895 for (i
= 0; i
< ha
->msix_count
; i
++) {
2896 qentry
= &ha
->msix_entries
[i
];
2897 qentry
->vector
= entries
[i
].vector
;
2898 qentry
->entry
= entries
[i
].entry
;
2899 qentry
->have_irq
= 0;
2903 /* Enable MSI-X vectors for the base queue */
2904 for (i
= 0; i
< 2; i
++) {
2905 qentry
= &ha
->msix_entries
[i
];
2906 if (IS_P3P_TYPE(ha
))
2907 ret
= request_irq(qentry
->vector
,
2908 qla82xx_msix_entries
[i
].handler
,
2909 0, qla82xx_msix_entries
[i
].name
, rsp
);
2911 ret
= request_irq(qentry
->vector
,
2912 msix_entries
[i
].handler
,
2913 0, msix_entries
[i
].name
, rsp
);
2915 goto msix_register_fail
;
2916 qentry
->have_irq
= 1;
2922 * If target mode is enable, also request the vector for the ATIO
2925 if (QLA_TGT_MODE_ENABLED() && IS_ATIO_MSIX_CAPABLE(ha
)) {
2926 qentry
= &ha
->msix_entries
[ATIO_VECTOR
];
2927 ret
= request_irq(qentry
->vector
,
2928 qla83xx_msix_entries
[ATIO_VECTOR
].handler
,
2929 0, qla83xx_msix_entries
[ATIO_VECTOR
].name
, rsp
);
2930 qentry
->have_irq
= 1;
2937 ql_log(ql_log_fatal
, vha
, 0x00cb,
2938 "MSI-X: unable to register handler -- %x/%d.\n",
2939 qentry
->vector
, ret
);
2940 qla24xx_disable_msix(ha
);
2945 /* Enable MSI-X vector for response queue update for queue 0 */
2946 if (IS_QLA83XX(ha
)) {
2947 if (ha
->msixbase
&& ha
->mqiobase
&&
2948 (ha
->max_rsp_queues
> 1 || ha
->max_req_queues
> 1))
2952 && (ha
->max_rsp_queues
> 1 || ha
->max_req_queues
> 1))
2954 ql_dbg(ql_dbg_multiq
, vha
, 0xc005,
2955 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2956 ha
->mqiobase
, ha
->max_rsp_queues
, ha
->max_req_queues
);
2957 ql_dbg(ql_dbg_init
, vha
, 0x0055,
2958 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2959 ha
->mqiobase
, ha
->max_rsp_queues
, ha
->max_req_queues
);
2967 qla2x00_request_irqs(struct qla_hw_data
*ha
, struct rsp_que
*rsp
)
2970 device_reg_t __iomem
*reg
= ha
->iobase
;
2971 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
2973 /* If possible, enable MSI-X. */
2974 if (!IS_QLA2432(ha
) && !IS_QLA2532(ha
) && !IS_QLA8432(ha
) &&
2975 !IS_CNA_CAPABLE(ha
) && !IS_QLA2031(ha
) && !IS_QLAFX00(ha
))
2978 if (ha
->pdev
->subsystem_vendor
== PCI_VENDOR_ID_HP
&&
2979 (ha
->pdev
->subsystem_device
== 0x7040 ||
2980 ha
->pdev
->subsystem_device
== 0x7041 ||
2981 ha
->pdev
->subsystem_device
== 0x1705)) {
2982 ql_log(ql_log_warn
, vha
, 0x0034,
2983 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2984 ha
->pdev
->subsystem_vendor
,
2985 ha
->pdev
->subsystem_device
);
2989 if (IS_QLA2432(ha
) && (ha
->pdev
->revision
< QLA_MSIX_CHIP_REV_24XX
)) {
2990 ql_log(ql_log_warn
, vha
, 0x0035,
2991 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2992 ha
->pdev
->revision
, QLA_MSIX_CHIP_REV_24XX
);
2996 ret
= qla24xx_enable_msix(ha
, rsp
);
2998 ql_dbg(ql_dbg_init
, vha
, 0x0036,
2999 "MSI-X: Enabled (0x%X, 0x%X).\n",
3000 ha
->chip_revision
, ha
->fw_attributes
);
3001 goto clear_risc_ints
;
3003 ql_log(ql_log_info
, vha
, 0x0037,
3004 "MSI-X Falling back-to MSI mode -%d.\n", ret
);
3007 if (!IS_QLA24XX(ha
) && !IS_QLA2532(ha
) && !IS_QLA8432(ha
) &&
3008 !IS_QLA8001(ha
) && !IS_P3P_TYPE(ha
) && !IS_QLAFX00(ha
))
3011 ret
= pci_enable_msi(ha
->pdev
);
3013 ql_dbg(ql_dbg_init
, vha
, 0x0038,
3015 ha
->flags
.msi_enabled
= 1;
3017 ql_log(ql_log_warn
, vha
, 0x0039,
3018 "MSI-X; Falling back-to INTa mode -- %d.\n", ret
);
3020 /* Skip INTx on ISP82xx. */
3021 if (!ha
->flags
.msi_enabled
&& IS_QLA82XX(ha
))
3022 return QLA_FUNCTION_FAILED
;
3026 ret
= request_irq(ha
->pdev
->irq
, ha
->isp_ops
->intr_handler
,
3027 ha
->flags
.msi_enabled
? 0 : IRQF_SHARED
,
3028 QLA2XXX_DRIVER_NAME
, rsp
);
3030 ql_log(ql_log_warn
, vha
, 0x003a,
3031 "Failed to reserve interrupt %d already in use.\n",
3034 } else if (!ha
->flags
.msi_enabled
) {
3035 ql_dbg(ql_dbg_init
, vha
, 0x0125,
3036 "INTa mode: Enabled.\n");
3037 ha
->flags
.mr_intr_valid
= 1;
3042 spin_lock_irq(&ha
->hardware_lock
);
3043 if (!IS_FWI2_CAPABLE(ha
))
3044 WRT_REG_WORD(®
->isp
.semaphore
, 0);
3045 spin_unlock_irq(&ha
->hardware_lock
);
3052 qla2x00_free_irqs(scsi_qla_host_t
*vha
)
3054 struct qla_hw_data
*ha
= vha
->hw
;
3055 struct rsp_que
*rsp
;
3058 * We need to check that ha->rsp_q_map is valid in case we are called
3059 * from a probe failure context.
3061 if (!ha
->rsp_q_map
|| !ha
->rsp_q_map
[0])
3063 rsp
= ha
->rsp_q_map
[0];
3065 if (ha
->flags
.msix_enabled
)
3066 qla24xx_disable_msix(ha
);
3067 else if (ha
->flags
.msi_enabled
) {
3068 free_irq(ha
->pdev
->irq
, rsp
);
3069 pci_disable_msi(ha
->pdev
);
3071 free_irq(ha
->pdev
->irq
, rsp
);
3075 int qla25xx_request_irq(struct rsp_que
*rsp
)
3077 struct qla_hw_data
*ha
= rsp
->hw
;
3078 struct qla_init_msix_entry
*intr
= &msix_entries
[2];
3079 struct qla_msix_entry
*msix
= rsp
->msix
;
3080 scsi_qla_host_t
*vha
= pci_get_drvdata(ha
->pdev
);
3083 ret
= request_irq(msix
->vector
, intr
->handler
, 0, intr
->name
, rsp
);
3085 ql_log(ql_log_fatal
, vha
, 0x00e6,
3086 "MSI-X: Unable to register handler -- %x/%d.\n",