i2c-eg20t: change timeout value 50msec to 1000msec
[zen-stable.git] / drivers / scsi / qla2xxx / qla_isr.c
blob349843ea32f62333688e2205775e5645a79443d0
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2011 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <linux/delay.h>
10 #include <linux/slab.h>
11 #include <scsi/scsi_tcq.h>
12 #include <scsi/scsi_bsg_fc.h>
13 #include <scsi/scsi_eh.h>
15 static void qla2x00_mbx_completion(scsi_qla_host_t *, uint16_t);
16 static void qla2x00_process_completed_request(struct scsi_qla_host *,
17 struct req_que *, uint32_t);
18 static void qla2x00_status_entry(scsi_qla_host_t *, struct rsp_que *, void *);
19 static void qla2x00_status_cont_entry(struct rsp_que *, sts_cont_entry_t *);
20 static void qla2x00_error_entry(scsi_qla_host_t *, struct rsp_que *,
21 sts_entry_t *);
23 /**
24 * qla2100_intr_handler() - Process interrupts for the ISP2100 and ISP2200.
25 * @irq:
26 * @dev_id: SCSI driver HA context
28 * Called by system whenever the host adapter generates an interrupt.
30 * Returns handled flag.
32 irqreturn_t
33 qla2100_intr_handler(int irq, void *dev_id)
35 scsi_qla_host_t *vha;
36 struct qla_hw_data *ha;
37 struct device_reg_2xxx __iomem *reg;
38 int status;
39 unsigned long iter;
40 uint16_t hccr;
41 uint16_t mb[4];
42 struct rsp_que *rsp;
43 unsigned long flags;
45 rsp = (struct rsp_que *) dev_id;
46 if (!rsp) {
47 printk(KERN_INFO
48 "%s(): NULL response queue pointer.\n", __func__);
49 return (IRQ_NONE);
52 ha = rsp->hw;
53 reg = &ha->iobase->isp;
54 status = 0;
56 spin_lock_irqsave(&ha->hardware_lock, flags);
57 vha = pci_get_drvdata(ha->pdev);
58 for (iter = 50; iter--; ) {
59 hccr = RD_REG_WORD(&reg->hccr);
60 if (hccr & HCCR_RISC_PAUSE) {
61 if (pci_channel_offline(ha->pdev))
62 break;
65 * Issue a "HARD" reset in order for the RISC interrupt
66 * bit to be cleared. Schedule a big hammer to get
67 * out of the RISC PAUSED state.
69 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
70 RD_REG_WORD(&reg->hccr);
72 ha->isp_ops->fw_dump(vha, 1);
73 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
74 break;
75 } else if ((RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) == 0)
76 break;
78 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
79 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
80 RD_REG_WORD(&reg->hccr);
82 /* Get mailbox data. */
83 mb[0] = RD_MAILBOX_REG(ha, reg, 0);
84 if (mb[0] > 0x3fff && mb[0] < 0x8000) {
85 qla2x00_mbx_completion(vha, mb[0]);
86 status |= MBX_INTERRUPT;
87 } else if (mb[0] > 0x7fff && mb[0] < 0xc000) {
88 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
89 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
90 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
91 qla2x00_async_event(vha, rsp, mb);
92 } else {
93 /*EMPTY*/
94 ql_dbg(ql_dbg_async, vha, 0x5025,
95 "Unrecognized interrupt type (%d).\n",
96 mb[0]);
98 /* Release mailbox registers. */
99 WRT_REG_WORD(&reg->semaphore, 0);
100 RD_REG_WORD(&reg->semaphore);
101 } else {
102 qla2x00_process_response_queue(rsp);
104 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
105 RD_REG_WORD(&reg->hccr);
108 spin_unlock_irqrestore(&ha->hardware_lock, flags);
110 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
111 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
112 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
113 complete(&ha->mbx_intr_comp);
116 return (IRQ_HANDLED);
120 * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
121 * @irq:
122 * @dev_id: SCSI driver HA context
124 * Called by system whenever the host adapter generates an interrupt.
126 * Returns handled flag.
128 irqreturn_t
129 qla2300_intr_handler(int irq, void *dev_id)
131 scsi_qla_host_t *vha;
132 struct device_reg_2xxx __iomem *reg;
133 int status;
134 unsigned long iter;
135 uint32_t stat;
136 uint16_t hccr;
137 uint16_t mb[4];
138 struct rsp_que *rsp;
139 struct qla_hw_data *ha;
140 unsigned long flags;
142 rsp = (struct rsp_que *) dev_id;
143 if (!rsp) {
144 printk(KERN_INFO
145 "%s(): NULL response queue pointer.\n", __func__);
146 return (IRQ_NONE);
149 ha = rsp->hw;
150 reg = &ha->iobase->isp;
151 status = 0;
153 spin_lock_irqsave(&ha->hardware_lock, flags);
154 vha = pci_get_drvdata(ha->pdev);
155 for (iter = 50; iter--; ) {
156 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
157 if (stat & HSR_RISC_PAUSED) {
158 if (unlikely(pci_channel_offline(ha->pdev)))
159 break;
161 hccr = RD_REG_WORD(&reg->hccr);
162 if (hccr & (BIT_15 | BIT_13 | BIT_11 | BIT_8))
163 ql_log(ql_log_warn, vha, 0x5026,
164 "Parity error -- HCCR=%x, Dumping "
165 "firmware.\n", hccr);
166 else
167 ql_log(ql_log_warn, vha, 0x5027,
168 "RISC paused -- HCCR=%x, Dumping "
169 "firmware.\n", hccr);
172 * Issue a "HARD" reset in order for the RISC
173 * interrupt bit to be cleared. Schedule a big
174 * hammer to get out of the RISC PAUSED state.
176 WRT_REG_WORD(&reg->hccr, HCCR_RESET_RISC);
177 RD_REG_WORD(&reg->hccr);
179 ha->isp_ops->fw_dump(vha, 1);
180 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
181 break;
182 } else if ((stat & HSR_RISC_INT) == 0)
183 break;
185 switch (stat & 0xff) {
186 case 0x1:
187 case 0x2:
188 case 0x10:
189 case 0x11:
190 qla2x00_mbx_completion(vha, MSW(stat));
191 status |= MBX_INTERRUPT;
193 /* Release mailbox registers. */
194 WRT_REG_WORD(&reg->semaphore, 0);
195 break;
196 case 0x12:
197 mb[0] = MSW(stat);
198 mb[1] = RD_MAILBOX_REG(ha, reg, 1);
199 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
200 mb[3] = RD_MAILBOX_REG(ha, reg, 3);
201 qla2x00_async_event(vha, rsp, mb);
202 break;
203 case 0x13:
204 qla2x00_process_response_queue(rsp);
205 break;
206 case 0x15:
207 mb[0] = MBA_CMPLT_1_16BIT;
208 mb[1] = MSW(stat);
209 qla2x00_async_event(vha, rsp, mb);
210 break;
211 case 0x16:
212 mb[0] = MBA_SCSI_COMPLETION;
213 mb[1] = MSW(stat);
214 mb[2] = RD_MAILBOX_REG(ha, reg, 2);
215 qla2x00_async_event(vha, rsp, mb);
216 break;
217 default:
218 ql_dbg(ql_dbg_async, vha, 0x5028,
219 "Unrecognized interrupt type (%d).\n", stat & 0xff);
220 break;
222 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
223 RD_REG_WORD_RELAXED(&reg->hccr);
225 spin_unlock_irqrestore(&ha->hardware_lock, flags);
227 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
228 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
229 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
230 complete(&ha->mbx_intr_comp);
233 return (IRQ_HANDLED);
237 * qla2x00_mbx_completion() - Process mailbox command completions.
238 * @ha: SCSI driver HA context
239 * @mb0: Mailbox0 register
241 static void
242 qla2x00_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
244 uint16_t cnt;
245 uint32_t mboxes;
246 uint16_t __iomem *wptr;
247 struct qla_hw_data *ha = vha->hw;
248 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
250 /* Read all mbox registers? */
251 mboxes = (1 << ha->mbx_count) - 1;
252 if (!ha->mcp)
253 ql_dbg(ql_dbg_async, vha, 0x5001, "MBX pointer ERRROR.\n");
254 else
255 mboxes = ha->mcp->in_mb;
257 /* Load return mailbox registers. */
258 ha->flags.mbox_int = 1;
259 ha->mailbox_out[0] = mb0;
260 mboxes >>= 1;
261 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 1);
263 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
264 if (IS_QLA2200(ha) && cnt == 8)
265 wptr = (uint16_t __iomem *)MAILBOX_REG(ha, reg, 8);
266 if ((cnt == 4 || cnt == 5) && (mboxes & BIT_0))
267 ha->mailbox_out[cnt] = qla2x00_debounce_register(wptr);
268 else if (mboxes & BIT_0)
269 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
271 wptr++;
272 mboxes >>= 1;
276 static void
277 qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
279 static char *event[] =
280 { "Complete", "Request Notification", "Time Extension" };
281 int rval;
282 struct device_reg_24xx __iomem *reg24 = &vha->hw->iobase->isp24;
283 uint16_t __iomem *wptr;
284 uint16_t cnt, timeout, mb[QLA_IDC_ACK_REGS];
286 /* Seed data -- mailbox1 -> mailbox7. */
287 wptr = (uint16_t __iomem *)&reg24->mailbox1;
288 for (cnt = 0; cnt < QLA_IDC_ACK_REGS; cnt++, wptr++)
289 mb[cnt] = RD_REG_WORD(wptr);
291 ql_dbg(ql_dbg_async, vha, 0x5021,
292 "Inter-Driver Commucation %s -- "
293 "%04x %04x %04x %04x %04x %04x %04x.\n",
294 event[aen & 0xff], mb[0], mb[1], mb[2], mb[3],
295 mb[4], mb[5], mb[6]);
297 /* Acknowledgement needed? [Notify && non-zero timeout]. */
298 timeout = (descr >> 8) & 0xf;
299 if (aen != MBA_IDC_NOTIFY || !timeout)
300 return;
302 ql_dbg(ql_dbg_async, vha, 0x5022,
303 "%lu Inter-Driver Communication %s -- ACK timeout=%d.\n",
304 vha->host_no, event[aen & 0xff], timeout);
306 rval = qla2x00_post_idc_ack_work(vha, mb);
307 if (rval != QLA_SUCCESS)
308 ql_log(ql_log_warn, vha, 0x5023,
309 "IDC failed to post ACK.\n");
313 * qla2x00_async_event() - Process aynchronous events.
314 * @ha: SCSI driver HA context
315 * @mb: Mailbox registers (0 - 3)
317 void
318 qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
320 #define LS_UNKNOWN 2
321 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
322 char *link_speed;
323 uint16_t handle_cnt;
324 uint16_t cnt, mbx;
325 uint32_t handles[5];
326 struct qla_hw_data *ha = vha->hw;
327 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
328 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
329 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
330 uint32_t rscn_entry, host_pid;
331 uint8_t rscn_queue_index;
332 unsigned long flags;
334 /* Setup to process RIO completion. */
335 handle_cnt = 0;
336 if (IS_QLA8XXX_TYPE(ha))
337 goto skip_rio;
338 switch (mb[0]) {
339 case MBA_SCSI_COMPLETION:
340 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
341 handle_cnt = 1;
342 break;
343 case MBA_CMPLT_1_16BIT:
344 handles[0] = mb[1];
345 handle_cnt = 1;
346 mb[0] = MBA_SCSI_COMPLETION;
347 break;
348 case MBA_CMPLT_2_16BIT:
349 handles[0] = mb[1];
350 handles[1] = mb[2];
351 handle_cnt = 2;
352 mb[0] = MBA_SCSI_COMPLETION;
353 break;
354 case MBA_CMPLT_3_16BIT:
355 handles[0] = mb[1];
356 handles[1] = mb[2];
357 handles[2] = mb[3];
358 handle_cnt = 3;
359 mb[0] = MBA_SCSI_COMPLETION;
360 break;
361 case MBA_CMPLT_4_16BIT:
362 handles[0] = mb[1];
363 handles[1] = mb[2];
364 handles[2] = mb[3];
365 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
366 handle_cnt = 4;
367 mb[0] = MBA_SCSI_COMPLETION;
368 break;
369 case MBA_CMPLT_5_16BIT:
370 handles[0] = mb[1];
371 handles[1] = mb[2];
372 handles[2] = mb[3];
373 handles[3] = (uint32_t)RD_MAILBOX_REG(ha, reg, 6);
374 handles[4] = (uint32_t)RD_MAILBOX_REG(ha, reg, 7);
375 handle_cnt = 5;
376 mb[0] = MBA_SCSI_COMPLETION;
377 break;
378 case MBA_CMPLT_2_32BIT:
379 handles[0] = le32_to_cpu((uint32_t)((mb[2] << 16) | mb[1]));
380 handles[1] = le32_to_cpu(
381 ((uint32_t)(RD_MAILBOX_REG(ha, reg, 7) << 16)) |
382 RD_MAILBOX_REG(ha, reg, 6));
383 handle_cnt = 2;
384 mb[0] = MBA_SCSI_COMPLETION;
385 break;
386 default:
387 break;
389 skip_rio:
390 switch (mb[0]) {
391 case MBA_SCSI_COMPLETION: /* Fast Post */
392 if (!vha->flags.online)
393 break;
395 for (cnt = 0; cnt < handle_cnt; cnt++)
396 qla2x00_process_completed_request(vha, rsp->req,
397 handles[cnt]);
398 break;
400 case MBA_RESET: /* Reset */
401 ql_dbg(ql_dbg_async, vha, 0x5002,
402 "Asynchronous RESET.\n");
404 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
405 break;
407 case MBA_SYSTEM_ERR: /* System Error */
408 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox7) : 0;
409 ql_log(ql_log_warn, vha, 0x5003,
410 "ISP System Error - mbx1=%xh mbx2=%xh mbx3=%xh "
411 "mbx7=%xh.\n", mb[1], mb[2], mb[3], mbx);
413 ha->isp_ops->fw_dump(vha, 1);
415 if (IS_FWI2_CAPABLE(ha)) {
416 if (mb[1] == 0 && mb[2] == 0) {
417 ql_log(ql_log_fatal, vha, 0x5004,
418 "Unrecoverable Hardware Error: adapter "
419 "marked OFFLINE!\n");
420 vha->flags.online = 0;
421 } else {
422 /* Check to see if MPI timeout occurred */
423 if ((mbx & MBX_3) && (ha->flags.port0))
424 set_bit(MPI_RESET_NEEDED,
425 &vha->dpc_flags);
427 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
429 } else if (mb[1] == 0) {
430 ql_log(ql_log_fatal, vha, 0x5005,
431 "Unrecoverable Hardware Error: adapter marked "
432 "OFFLINE!\n");
433 vha->flags.online = 0;
434 } else
435 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
436 break;
438 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
439 ql_log(ql_log_warn, vha, 0x5006,
440 "ISP Request Transfer Error (%x).\n", mb[1]);
442 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
443 break;
445 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
446 ql_log(ql_log_warn, vha, 0x5007,
447 "ISP Response Transfer Error.\n");
449 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
450 break;
452 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
453 ql_dbg(ql_dbg_async, vha, 0x5008,
454 "Asynchronous WAKEUP_THRES.\n");
455 break;
457 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
458 ql_dbg(ql_dbg_async, vha, 0x5009,
459 "LIP occurred (%x).\n", mb[1]);
461 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
462 atomic_set(&vha->loop_state, LOOP_DOWN);
463 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
464 qla2x00_mark_all_devices_lost(vha, 1);
467 if (vha->vp_idx) {
468 atomic_set(&vha->vp_state, VP_FAILED);
469 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
472 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
473 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
475 vha->flags.management_server_logged_in = 0;
476 qla2x00_post_aen_work(vha, FCH_EVT_LIP, mb[1]);
477 break;
479 case MBA_LOOP_UP: /* Loop Up Event */
480 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
481 link_speed = link_speeds[0];
482 ha->link_data_rate = PORT_SPEED_1GB;
483 } else {
484 link_speed = link_speeds[LS_UNKNOWN];
485 if (mb[1] < 5)
486 link_speed = link_speeds[mb[1]];
487 else if (mb[1] == 0x13)
488 link_speed = link_speeds[5];
489 ha->link_data_rate = mb[1];
492 ql_dbg(ql_dbg_async, vha, 0x500a,
493 "LOOP UP detected (%s Gbps).\n", link_speed);
495 vha->flags.management_server_logged_in = 0;
496 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
497 break;
499 case MBA_LOOP_DOWN: /* Loop Down Event */
500 mbx = IS_QLA81XX(ha) ? RD_REG_WORD(&reg24->mailbox4) : 0;
501 mbx = IS_QLA82XX(ha) ? RD_REG_WORD(&reg82->mailbox_out[4]) : mbx;
502 ql_dbg(ql_dbg_async, vha, 0x500b,
503 "LOOP DOWN detected (%x %x %x %x).\n",
504 mb[1], mb[2], mb[3], mbx);
506 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
507 atomic_set(&vha->loop_state, LOOP_DOWN);
508 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
509 vha->device_flags |= DFLG_NO_CABLE;
510 qla2x00_mark_all_devices_lost(vha, 1);
513 if (vha->vp_idx) {
514 atomic_set(&vha->vp_state, VP_FAILED);
515 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
518 vha->flags.management_server_logged_in = 0;
519 ha->link_data_rate = PORT_SPEED_UNKNOWN;
520 qla2x00_post_aen_work(vha, FCH_EVT_LINKDOWN, 0);
521 break;
523 case MBA_LIP_RESET: /* LIP reset occurred */
524 ql_dbg(ql_dbg_async, vha, 0x500c,
525 "LIP reset occurred (%x).\n", mb[1]);
527 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
528 atomic_set(&vha->loop_state, LOOP_DOWN);
529 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
530 qla2x00_mark_all_devices_lost(vha, 1);
533 if (vha->vp_idx) {
534 atomic_set(&vha->vp_state, VP_FAILED);
535 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
538 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
540 ha->operating_mode = LOOP;
541 vha->flags.management_server_logged_in = 0;
542 qla2x00_post_aen_work(vha, FCH_EVT_LIPRESET, mb[1]);
543 break;
545 /* case MBA_DCBX_COMPLETE: */
546 case MBA_POINT_TO_POINT: /* Point-to-Point */
547 if (IS_QLA2100(ha))
548 break;
550 if (IS_QLA8XXX_TYPE(ha)) {
551 ql_dbg(ql_dbg_async, vha, 0x500d,
552 "DCBX Completed -- %04x %04x %04x.\n",
553 mb[1], mb[2], mb[3]);
554 if (ha->notify_dcbx_comp)
555 complete(&ha->dcbx_comp);
557 } else
558 ql_dbg(ql_dbg_async, vha, 0x500e,
559 "Asynchronous P2P MODE received.\n");
562 * Until there's a transition from loop down to loop up, treat
563 * this as loop down only.
565 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
566 atomic_set(&vha->loop_state, LOOP_DOWN);
567 if (!atomic_read(&vha->loop_down_timer))
568 atomic_set(&vha->loop_down_timer,
569 LOOP_DOWN_TIME);
570 qla2x00_mark_all_devices_lost(vha, 1);
573 if (vha->vp_idx) {
574 atomic_set(&vha->vp_state, VP_FAILED);
575 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
578 if (!(test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)))
579 set_bit(RESET_MARKER_NEEDED, &vha->dpc_flags);
581 set_bit(REGISTER_FC4_NEEDED, &vha->dpc_flags);
582 set_bit(REGISTER_FDMI_NEEDED, &vha->dpc_flags);
584 ha->flags.gpsc_supported = 1;
585 vha->flags.management_server_logged_in = 0;
586 break;
588 case MBA_CHG_IN_CONNECTION: /* Change in connection mode */
589 if (IS_QLA2100(ha))
590 break;
592 ql_dbg(ql_dbg_async, vha, 0x500f,
593 "Configuration change detected: value=%x.\n", mb[1]);
595 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
596 atomic_set(&vha->loop_state, LOOP_DOWN);
597 if (!atomic_read(&vha->loop_down_timer))
598 atomic_set(&vha->loop_down_timer,
599 LOOP_DOWN_TIME);
600 qla2x00_mark_all_devices_lost(vha, 1);
603 if (vha->vp_idx) {
604 atomic_set(&vha->vp_state, VP_FAILED);
605 fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED);
608 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
609 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
610 break;
612 case MBA_PORT_UPDATE: /* Port database update */
614 * Handle only global and vn-port update events
616 * Relevant inputs:
617 * mb[1] = N_Port handle of changed port
618 * OR 0xffff for global event
619 * mb[2] = New login state
620 * 7 = Port logged out
621 * mb[3] = LSB is vp_idx, 0xff = all vps
623 * Skip processing if:
624 * Event is global, vp_idx is NOT all vps,
625 * vp_idx does not match
626 * Event is not global, vp_idx does not match
628 if (IS_QLA2XXX_MIDTYPE(ha) &&
629 ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff) ||
630 (mb[1] != 0xffff)) && vha->vp_idx != (mb[3] & 0xff))
631 break;
633 /* Global event -- port logout or port unavailable. */
634 if (mb[1] == 0xffff && mb[2] == 0x7) {
635 ql_dbg(ql_dbg_async, vha, 0x5010,
636 "Port unavailable %04x %04x %04x.\n",
637 mb[1], mb[2], mb[3]);
639 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
640 atomic_set(&vha->loop_state, LOOP_DOWN);
641 atomic_set(&vha->loop_down_timer,
642 LOOP_DOWN_TIME);
643 vha->device_flags |= DFLG_NO_CABLE;
644 qla2x00_mark_all_devices_lost(vha, 1);
647 if (vha->vp_idx) {
648 atomic_set(&vha->vp_state, VP_FAILED);
649 fc_vport_set_state(vha->fc_vport,
650 FC_VPORT_FAILED);
651 qla2x00_mark_all_devices_lost(vha, 1);
654 vha->flags.management_server_logged_in = 0;
655 ha->link_data_rate = PORT_SPEED_UNKNOWN;
656 break;
660 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
661 * event etc. earlier indicating loop is down) then process
662 * it. Otherwise ignore it and Wait for RSCN to come in.
664 atomic_set(&vha->loop_down_timer, 0);
665 if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
666 atomic_read(&vha->loop_state) != LOOP_DEAD) {
667 ql_dbg(ql_dbg_async, vha, 0x5011,
668 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
669 mb[1], mb[2], mb[3]);
670 break;
673 ql_dbg(ql_dbg_async, vha, 0x5012,
674 "Port database changed %04x %04x %04x.\n",
675 mb[1], mb[2], mb[3]);
678 * Mark all devices as missing so we will login again.
680 atomic_set(&vha->loop_state, LOOP_UP);
682 qla2x00_mark_all_devices_lost(vha, 1);
684 vha->flags.rscn_queue_overflow = 1;
686 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
687 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
688 break;
690 case MBA_RSCN_UPDATE: /* State Change Registration */
691 /* Check if the Vport has issued a SCR */
692 if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
693 break;
694 /* Only handle SCNs for our Vport index. */
695 if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
696 break;
698 ql_dbg(ql_dbg_async, vha, 0x5013,
699 "RSCN database changed -- %04x %04x %04x.\n",
700 mb[1], mb[2], mb[3]);
702 rscn_entry = ((mb[1] & 0xff) << 16) | mb[2];
703 host_pid = (vha->d_id.b.domain << 16) | (vha->d_id.b.area << 8)
704 | vha->d_id.b.al_pa;
705 if (rscn_entry == host_pid) {
706 ql_dbg(ql_dbg_async, vha, 0x5014,
707 "Ignoring RSCN update to local host "
708 "port ID (%06x).\n", host_pid);
709 break;
712 /* Ignore reserved bits from RSCN-payload. */
713 rscn_entry = ((mb[1] & 0x3ff) << 16) | mb[2];
714 rscn_queue_index = vha->rscn_in_ptr + 1;
715 if (rscn_queue_index == MAX_RSCN_COUNT)
716 rscn_queue_index = 0;
717 if (rscn_queue_index != vha->rscn_out_ptr) {
718 vha->rscn_queue[vha->rscn_in_ptr] = rscn_entry;
719 vha->rscn_in_ptr = rscn_queue_index;
720 } else {
721 vha->flags.rscn_queue_overflow = 1;
724 atomic_set(&vha->loop_down_timer, 0);
725 vha->flags.management_server_logged_in = 0;
727 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
728 set_bit(RSCN_UPDATE, &vha->dpc_flags);
729 qla2x00_post_aen_work(vha, FCH_EVT_RSCN, rscn_entry);
730 break;
732 /* case MBA_RIO_RESPONSE: */
733 case MBA_ZIO_RESPONSE:
734 ql_dbg(ql_dbg_async, vha, 0x5015,
735 "[R|Z]IO update completion.\n");
737 if (IS_FWI2_CAPABLE(ha))
738 qla24xx_process_response_queue(vha, rsp);
739 else
740 qla2x00_process_response_queue(rsp);
741 break;
743 case MBA_DISCARD_RND_FRAME:
744 ql_dbg(ql_dbg_async, vha, 0x5016,
745 "Discard RND Frame -- %04x %04x %04x.\n",
746 mb[1], mb[2], mb[3]);
747 break;
749 case MBA_TRACE_NOTIFICATION:
750 ql_dbg(ql_dbg_async, vha, 0x5017,
751 "Trace Notification -- %04x %04x.\n", mb[1], mb[2]);
752 break;
754 case MBA_ISP84XX_ALERT:
755 ql_dbg(ql_dbg_async, vha, 0x5018,
756 "ISP84XX Alert Notification -- %04x %04x %04x.\n",
757 mb[1], mb[2], mb[3]);
759 spin_lock_irqsave(&ha->cs84xx->access_lock, flags);
760 switch (mb[1]) {
761 case A84_PANIC_RECOVERY:
762 ql_log(ql_log_info, vha, 0x5019,
763 "Alert 84XX: panic recovery %04x %04x.\n",
764 mb[2], mb[3]);
765 break;
766 case A84_OP_LOGIN_COMPLETE:
767 ha->cs84xx->op_fw_version = mb[3] << 16 | mb[2];
768 ql_log(ql_log_info, vha, 0x501a,
769 "Alert 84XX: firmware version %x.\n",
770 ha->cs84xx->op_fw_version);
771 break;
772 case A84_DIAG_LOGIN_COMPLETE:
773 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
774 ql_log(ql_log_info, vha, 0x501b,
775 "Alert 84XX: diagnostic firmware version %x.\n",
776 ha->cs84xx->diag_fw_version);
777 break;
778 case A84_GOLD_LOGIN_COMPLETE:
779 ha->cs84xx->diag_fw_version = mb[3] << 16 | mb[2];
780 ha->cs84xx->fw_update = 1;
781 ql_log(ql_log_info, vha, 0x501c,
782 "Alert 84XX: gold firmware version %x.\n",
783 ha->cs84xx->gold_fw_version);
784 break;
785 default:
786 ql_log(ql_log_warn, vha, 0x501d,
787 "Alert 84xx: Invalid Alert %04x %04x %04x.\n",
788 mb[1], mb[2], mb[3]);
790 spin_unlock_irqrestore(&ha->cs84xx->access_lock, flags);
791 break;
792 case MBA_DCBX_START:
793 ql_dbg(ql_dbg_async, vha, 0x501e,
794 "DCBX Started -- %04x %04x %04x.\n",
795 mb[1], mb[2], mb[3]);
796 break;
797 case MBA_DCBX_PARAM_UPDATE:
798 ql_dbg(ql_dbg_async, vha, 0x501f,
799 "DCBX Parameters Updated -- %04x %04x %04x.\n",
800 mb[1], mb[2], mb[3]);
801 break;
802 case MBA_FCF_CONF_ERR:
803 ql_dbg(ql_dbg_async, vha, 0x5020,
804 "FCF Configuration Error -- %04x %04x %04x.\n",
805 mb[1], mb[2], mb[3]);
806 break;
807 case MBA_IDC_COMPLETE:
808 case MBA_IDC_NOTIFY:
809 case MBA_IDC_TIME_EXT:
810 qla81xx_idc_event(vha, mb[0], mb[1]);
811 break;
814 if (!vha->vp_idx && ha->num_vhosts)
815 qla2x00_alert_all_vps(rsp, mb);
819 * qla2x00_process_completed_request() - Process a Fast Post response.
820 * @ha: SCSI driver HA context
821 * @index: SRB index
823 static void
824 qla2x00_process_completed_request(struct scsi_qla_host *vha,
825 struct req_que *req, uint32_t index)
827 srb_t *sp;
828 struct qla_hw_data *ha = vha->hw;
830 /* Validate handle. */
831 if (index >= MAX_OUTSTANDING_COMMANDS) {
832 ql_log(ql_log_warn, vha, 0x3014,
833 "Invalid SCSI command index (%x).\n", index);
835 if (IS_QLA82XX(ha))
836 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
837 else
838 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
839 return;
842 sp = req->outstanding_cmds[index];
843 if (sp) {
844 /* Free outstanding command slot. */
845 req->outstanding_cmds[index] = NULL;
847 /* Save ISP completion status */
848 sp->cmd->result = DID_OK << 16;
849 qla2x00_sp_compl(ha, sp);
850 } else {
851 ql_log(ql_log_warn, vha, 0x3016, "Invalid SCSI SRB.\n");
853 if (IS_QLA82XX(ha))
854 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
855 else
856 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
860 static srb_t *
861 qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
862 struct req_que *req, void *iocb)
864 struct qla_hw_data *ha = vha->hw;
865 sts_entry_t *pkt = iocb;
866 srb_t *sp = NULL;
867 uint16_t index;
869 index = LSW(pkt->handle);
870 if (index >= MAX_OUTSTANDING_COMMANDS) {
871 ql_log(ql_log_warn, vha, 0x5031,
872 "Invalid command index (%x).\n", index);
873 if (IS_QLA82XX(ha))
874 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
875 else
876 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
877 goto done;
879 sp = req->outstanding_cmds[index];
880 if (!sp) {
881 ql_log(ql_log_warn, vha, 0x5032,
882 "Invalid completion handle (%x) -- timed-out.\n", index);
883 return sp;
885 if (sp->handle != index) {
886 ql_log(ql_log_warn, vha, 0x5033,
887 "SRB handle (%x) mismatch %x.\n", sp->handle, index);
888 return NULL;
891 req->outstanding_cmds[index] = NULL;
893 done:
894 return sp;
897 static void
898 qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
899 struct mbx_entry *mbx)
901 const char func[] = "MBX-IOCB";
902 const char *type;
903 fc_port_t *fcport;
904 srb_t *sp;
905 struct srb_iocb *lio;
906 struct srb_ctx *ctx;
907 uint16_t *data;
908 uint16_t status;
910 sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
911 if (!sp)
912 return;
914 ctx = sp->ctx;
915 lio = ctx->u.iocb_cmd;
916 type = ctx->name;
917 fcport = sp->fcport;
918 data = lio->u.logio.data;
920 data[0] = MBS_COMMAND_ERROR;
921 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
922 QLA_LOGIO_LOGIN_RETRIED : 0;
923 if (mbx->entry_status) {
924 ql_dbg(ql_dbg_async, vha, 0x5043,
925 "Async-%s error entry - hdl=%x portid=%02x%02x%02x "
926 "entry-status=%x status=%x state-flag=%x "
927 "status-flags=%x.\n", type, sp->handle,
928 fcport->d_id.b.domain, fcport->d_id.b.area,
929 fcport->d_id.b.al_pa, mbx->entry_status,
930 le16_to_cpu(mbx->status), le16_to_cpu(mbx->state_flags),
931 le16_to_cpu(mbx->status_flags));
933 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5029,
934 (uint8_t *)mbx, sizeof(*mbx));
936 goto logio_done;
939 status = le16_to_cpu(mbx->status);
940 if (status == 0x30 && ctx->type == SRB_LOGIN_CMD &&
941 le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE)
942 status = 0;
943 if (!status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
944 ql_dbg(ql_dbg_async, vha, 0x5045,
945 "Async-%s complete - hdl=%x portid=%02x%02x%02x mbx1=%x.\n",
946 type, sp->handle, fcport->d_id.b.domain,
947 fcport->d_id.b.area, fcport->d_id.b.al_pa,
948 le16_to_cpu(mbx->mb1));
950 data[0] = MBS_COMMAND_COMPLETE;
951 if (ctx->type == SRB_LOGIN_CMD) {
952 fcport->port_type = FCT_TARGET;
953 if (le16_to_cpu(mbx->mb1) & BIT_0)
954 fcport->port_type = FCT_INITIATOR;
955 else if (le16_to_cpu(mbx->mb1) & BIT_1)
956 fcport->flags |= FCF_FCP2_DEVICE;
958 goto logio_done;
961 data[0] = le16_to_cpu(mbx->mb0);
962 switch (data[0]) {
963 case MBS_PORT_ID_USED:
964 data[1] = le16_to_cpu(mbx->mb1);
965 break;
966 case MBS_LOOP_ID_USED:
967 break;
968 default:
969 data[0] = MBS_COMMAND_ERROR;
970 break;
973 ql_log(ql_log_warn, vha, 0x5046,
974 "Async-%s failed - hdl=%x portid=%02x%02x%02x status=%x "
975 "mb0=%x mb1=%x mb2=%x mb6=%x mb7=%x.\n", type, sp->handle,
976 fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
977 status, le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
978 le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
979 le16_to_cpu(mbx->mb7));
981 logio_done:
982 lio->done(sp);
985 static void
986 qla2x00_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
987 sts_entry_t *pkt, int iocb_type)
989 const char func[] = "CT_IOCB";
990 const char *type;
991 struct qla_hw_data *ha = vha->hw;
992 srb_t *sp;
993 struct srb_ctx *sp_bsg;
994 struct fc_bsg_job *bsg_job;
995 uint16_t comp_status;
997 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
998 if (!sp)
999 return;
1001 sp_bsg = sp->ctx;
1002 bsg_job = sp_bsg->u.bsg_job;
1004 type = NULL;
1005 switch (sp_bsg->type) {
1006 case SRB_CT_CMD:
1007 type = "ct pass-through";
1008 break;
1009 default:
1010 ql_log(ql_log_warn, vha, 0x5047,
1011 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1012 return;
1015 comp_status = le16_to_cpu(pkt->comp_status);
1017 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1018 * fc payload to the caller
1020 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1021 bsg_job->reply_len = sizeof(struct fc_bsg_reply);
1023 if (comp_status != CS_COMPLETE) {
1024 if (comp_status == CS_DATA_UNDERRUN) {
1025 bsg_job->reply->result = DID_OK << 16;
1026 bsg_job->reply->reply_payload_rcv_len =
1027 le16_to_cpu(((sts_entry_t *)pkt)->rsp_info_len);
1029 ql_log(ql_log_warn, vha, 0x5048,
1030 "CT pass-through-%s error "
1031 "comp_status-status=0x%x total_byte = 0x%x.\n",
1032 type, comp_status,
1033 bsg_job->reply->reply_payload_rcv_len);
1034 } else {
1035 ql_log(ql_log_warn, vha, 0x5049,
1036 "CT pass-through-%s error "
1037 "comp_status-status=0x%x.\n", type, comp_status);
1038 bsg_job->reply->result = DID_ERROR << 16;
1039 bsg_job->reply->reply_payload_rcv_len = 0;
1041 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5035,
1042 (uint8_t *)pkt, sizeof(*pkt));
1043 } else {
1044 bsg_job->reply->result = DID_OK << 16;
1045 bsg_job->reply->reply_payload_rcv_len =
1046 bsg_job->reply_payload.payload_len;
1047 bsg_job->reply_len = 0;
1050 dma_unmap_sg(&ha->pdev->dev, bsg_job->request_payload.sg_list,
1051 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1053 dma_unmap_sg(&ha->pdev->dev, bsg_job->reply_payload.sg_list,
1054 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1056 if (sp_bsg->type == SRB_ELS_CMD_HST || sp_bsg->type == SRB_CT_CMD)
1057 kfree(sp->fcport);
1059 kfree(sp->ctx);
1060 mempool_free(sp, ha->srb_mempool);
1061 bsg_job->job_done(bsg_job);
1064 static void
1065 qla24xx_els_ct_entry(scsi_qla_host_t *vha, struct req_que *req,
1066 struct sts_entry_24xx *pkt, int iocb_type)
1068 const char func[] = "ELS_CT_IOCB";
1069 const char *type;
1070 struct qla_hw_data *ha = vha->hw;
1071 srb_t *sp;
1072 struct srb_ctx *sp_bsg;
1073 struct fc_bsg_job *bsg_job;
1074 uint16_t comp_status;
1075 uint32_t fw_status[3];
1076 uint8_t* fw_sts_ptr;
1078 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1079 if (!sp)
1080 return;
1081 sp_bsg = sp->ctx;
1082 bsg_job = sp_bsg->u.bsg_job;
1084 type = NULL;
1085 switch (sp_bsg->type) {
1086 case SRB_ELS_CMD_RPT:
1087 case SRB_ELS_CMD_HST:
1088 type = "els";
1089 break;
1090 case SRB_CT_CMD:
1091 type = "ct pass-through";
1092 break;
1093 default:
1094 ql_log(ql_log_warn, vha, 0x503e,
1095 "Unrecognized SRB: (%p) type=%d.\n", sp, sp_bsg->type);
1096 return;
1099 comp_status = fw_status[0] = le16_to_cpu(pkt->comp_status);
1100 fw_status[1] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_1);
1101 fw_status[2] = le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->error_subcode_2);
1103 /* return FC_CTELS_STATUS_OK and leave the decoding of the ELS/CT
1104 * fc payload to the caller
1106 bsg_job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
1107 bsg_job->reply_len = sizeof(struct fc_bsg_reply) + sizeof(fw_status);
1109 if (comp_status != CS_COMPLETE) {
1110 if (comp_status == CS_DATA_UNDERRUN) {
1111 bsg_job->reply->result = DID_OK << 16;
1112 bsg_job->reply->reply_payload_rcv_len =
1113 le16_to_cpu(((struct els_sts_entry_24xx*)pkt)->total_byte_count);
1115 ql_log(ql_log_info, vha, 0x503f,
1116 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1117 "error subcode 1=0x%x error subcode 2=0x%x total_byte = 0x%x.\n",
1118 type, sp->handle, comp_status, fw_status[1], fw_status[2],
1119 le16_to_cpu(((struct els_sts_entry_24xx *)
1120 pkt)->total_byte_count));
1121 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1122 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1124 else {
1125 ql_log(ql_log_info, vha, 0x5040,
1126 "ELS-CT pass-through-%s error hdl=%x comp_status-status=0x%x "
1127 "error subcode 1=0x%x error subcode 2=0x%x.\n",
1128 type, sp->handle, comp_status,
1129 le16_to_cpu(((struct els_sts_entry_24xx *)
1130 pkt)->error_subcode_1),
1131 le16_to_cpu(((struct els_sts_entry_24xx *)
1132 pkt)->error_subcode_2));
1133 bsg_job->reply->result = DID_ERROR << 16;
1134 bsg_job->reply->reply_payload_rcv_len = 0;
1135 fw_sts_ptr = ((uint8_t*)bsg_job->req->sense) + sizeof(struct fc_bsg_reply);
1136 memcpy( fw_sts_ptr, fw_status, sizeof(fw_status));
1138 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5056,
1139 (uint8_t *)pkt, sizeof(*pkt));
1141 else {
1142 bsg_job->reply->result = DID_OK << 16;
1143 bsg_job->reply->reply_payload_rcv_len = bsg_job->reply_payload.payload_len;
1144 bsg_job->reply_len = 0;
1147 dma_unmap_sg(&ha->pdev->dev,
1148 bsg_job->request_payload.sg_list,
1149 bsg_job->request_payload.sg_cnt, DMA_TO_DEVICE);
1150 dma_unmap_sg(&ha->pdev->dev,
1151 bsg_job->reply_payload.sg_list,
1152 bsg_job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
1153 if ((sp_bsg->type == SRB_ELS_CMD_HST) ||
1154 (sp_bsg->type == SRB_CT_CMD))
1155 kfree(sp->fcport);
1156 kfree(sp->ctx);
1157 mempool_free(sp, ha->srb_mempool);
1158 bsg_job->job_done(bsg_job);
1161 static void
1162 qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1163 struct logio_entry_24xx *logio)
1165 const char func[] = "LOGIO-IOCB";
1166 const char *type;
1167 fc_port_t *fcport;
1168 srb_t *sp;
1169 struct srb_iocb *lio;
1170 struct srb_ctx *ctx;
1171 uint16_t *data;
1172 uint32_t iop[2];
1174 sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
1175 if (!sp)
1176 return;
1178 ctx = sp->ctx;
1179 lio = ctx->u.iocb_cmd;
1180 type = ctx->name;
1181 fcport = sp->fcport;
1182 data = lio->u.logio.data;
1184 data[0] = MBS_COMMAND_ERROR;
1185 data[1] = lio->u.logio.flags & SRB_LOGIN_RETRIED ?
1186 QLA_LOGIO_LOGIN_RETRIED : 0;
1187 if (logio->entry_status) {
1188 ql_log(ql_log_warn, vha, 0x5034,
1189 "Async-%s error entry - hdl=%x"
1190 "portid=%02x%02x%02x entry-status=%x.\n",
1191 type, sp->handle, fcport->d_id.b.domain,
1192 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1193 logio->entry_status);
1194 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x504d,
1195 (uint8_t *)logio, sizeof(*logio));
1197 goto logio_done;
1200 if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
1201 ql_dbg(ql_dbg_async, vha, 0x5036,
1202 "Async-%s complete - hdl=%x portid=%02x%02x%02x "
1203 "iop0=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1204 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1205 le32_to_cpu(logio->io_parameter[0]));
1207 data[0] = MBS_COMMAND_COMPLETE;
1208 if (ctx->type != SRB_LOGIN_CMD)
1209 goto logio_done;
1211 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1212 if (iop[0] & BIT_4) {
1213 fcport->port_type = FCT_TARGET;
1214 if (iop[0] & BIT_8)
1215 fcport->flags |= FCF_FCP2_DEVICE;
1216 } else if (iop[0] & BIT_5)
1217 fcport->port_type = FCT_INITIATOR;
1219 if (logio->io_parameter[7] || logio->io_parameter[8])
1220 fcport->supported_classes |= FC_COS_CLASS2;
1221 if (logio->io_parameter[9] || logio->io_parameter[10])
1222 fcport->supported_classes |= FC_COS_CLASS3;
1224 goto logio_done;
1227 iop[0] = le32_to_cpu(logio->io_parameter[0]);
1228 iop[1] = le32_to_cpu(logio->io_parameter[1]);
1229 switch (iop[0]) {
1230 case LSC_SCODE_PORTID_USED:
1231 data[0] = MBS_PORT_ID_USED;
1232 data[1] = LSW(iop[1]);
1233 break;
1234 case LSC_SCODE_NPORT_USED:
1235 data[0] = MBS_LOOP_ID_USED;
1236 break;
1237 default:
1238 data[0] = MBS_COMMAND_ERROR;
1239 break;
1242 ql_dbg(ql_dbg_async, vha, 0x5037,
1243 "Async-%s failed - hdl=%x portid=%02x%02x%02x comp=%x "
1244 "iop0=%x iop1=%x.\n", type, sp->handle, fcport->d_id.b.domain,
1245 fcport->d_id.b.area, fcport->d_id.b.al_pa,
1246 le16_to_cpu(logio->comp_status),
1247 le32_to_cpu(logio->io_parameter[0]),
1248 le32_to_cpu(logio->io_parameter[1]));
1250 logio_done:
1251 lio->done(sp);
1254 static void
1255 qla24xx_tm_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
1256 struct tsk_mgmt_entry *tsk)
1258 const char func[] = "TMF-IOCB";
1259 const char *type;
1260 fc_port_t *fcport;
1261 srb_t *sp;
1262 struct srb_iocb *iocb;
1263 struct srb_ctx *ctx;
1264 struct sts_entry_24xx *sts = (struct sts_entry_24xx *)tsk;
1265 int error = 1;
1267 sp = qla2x00_get_sp_from_handle(vha, func, req, tsk);
1268 if (!sp)
1269 return;
1271 ctx = sp->ctx;
1272 iocb = ctx->u.iocb_cmd;
1273 type = ctx->name;
1274 fcport = sp->fcport;
1276 if (sts->entry_status) {
1277 ql_log(ql_log_warn, vha, 0x5038,
1278 "Async-%s error - hdl=%x entry-status(%x).\n",
1279 type, sp->handle, sts->entry_status);
1280 } else if (sts->comp_status != __constant_cpu_to_le16(CS_COMPLETE)) {
1281 ql_log(ql_log_warn, vha, 0x5039,
1282 "Async-%s error - hdl=%x completion status(%x).\n",
1283 type, sp->handle, sts->comp_status);
1284 } else if (!(le16_to_cpu(sts->scsi_status) &
1285 SS_RESPONSE_INFO_LEN_VALID)) {
1286 ql_log(ql_log_warn, vha, 0x503a,
1287 "Async-%s error - hdl=%x no response info(%x).\n",
1288 type, sp->handle, sts->scsi_status);
1289 } else if (le32_to_cpu(sts->rsp_data_len) < 4) {
1290 ql_log(ql_log_warn, vha, 0x503b,
1291 "Async-%s error - hdl=%x not enough response(%d).\n",
1292 type, sp->handle, sts->rsp_data_len);
1293 } else if (sts->data[3]) {
1294 ql_log(ql_log_warn, vha, 0x503c,
1295 "Async-%s error - hdl=%x response(%x).\n",
1296 type, sp->handle, sts->data[3]);
1297 } else {
1298 error = 0;
1301 if (error) {
1302 iocb->u.tmf.data = error;
1303 ql_dump_buffer(ql_dbg_async + ql_dbg_buffer, vha, 0x5055,
1304 (uint8_t *)sts, sizeof(*sts));
1307 iocb->done(sp);
1311 * qla2x00_process_response_queue() - Process response queue entries.
1312 * @ha: SCSI driver HA context
1314 void
1315 qla2x00_process_response_queue(struct rsp_que *rsp)
1317 struct scsi_qla_host *vha;
1318 struct qla_hw_data *ha = rsp->hw;
1319 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1320 sts_entry_t *pkt;
1321 uint16_t handle_cnt;
1322 uint16_t cnt;
1324 vha = pci_get_drvdata(ha->pdev);
1326 if (!vha->flags.online)
1327 return;
1329 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
1330 pkt = (sts_entry_t *)rsp->ring_ptr;
1332 rsp->ring_index++;
1333 if (rsp->ring_index == rsp->length) {
1334 rsp->ring_index = 0;
1335 rsp->ring_ptr = rsp->ring;
1336 } else {
1337 rsp->ring_ptr++;
1340 if (pkt->entry_status != 0) {
1341 qla2x00_error_entry(vha, rsp, pkt);
1342 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1343 wmb();
1344 continue;
1347 switch (pkt->entry_type) {
1348 case STATUS_TYPE:
1349 qla2x00_status_entry(vha, rsp, pkt);
1350 break;
1351 case STATUS_TYPE_21:
1352 handle_cnt = ((sts21_entry_t *)pkt)->handle_count;
1353 for (cnt = 0; cnt < handle_cnt; cnt++) {
1354 qla2x00_process_completed_request(vha, rsp->req,
1355 ((sts21_entry_t *)pkt)->handle[cnt]);
1357 break;
1358 case STATUS_TYPE_22:
1359 handle_cnt = ((sts22_entry_t *)pkt)->handle_count;
1360 for (cnt = 0; cnt < handle_cnt; cnt++) {
1361 qla2x00_process_completed_request(vha, rsp->req,
1362 ((sts22_entry_t *)pkt)->handle[cnt]);
1364 break;
1365 case STATUS_CONT_TYPE:
1366 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
1367 break;
1368 case MBX_IOCB_TYPE:
1369 qla2x00_mbx_iocb_entry(vha, rsp->req,
1370 (struct mbx_entry *)pkt);
1371 break;
1372 case CT_IOCB_TYPE:
1373 qla2x00_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
1374 break;
1375 default:
1376 /* Type Not Supported. */
1377 ql_log(ql_log_warn, vha, 0x504a,
1378 "Received unknown response pkt type %x "
1379 "entry status=%x.\n",
1380 pkt->entry_type, pkt->entry_status);
1381 break;
1383 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1384 wmb();
1387 /* Adjust ring index */
1388 WRT_REG_WORD(ISP_RSP_Q_OUT(ha, reg), rsp->ring_index);
1391 static inline void
1392 qla2x00_handle_sense(srb_t *sp, uint8_t *sense_data, uint32_t par_sense_len,
1393 uint32_t sense_len, struct rsp_que *rsp)
1395 struct scsi_qla_host *vha = sp->fcport->vha;
1396 struct scsi_cmnd *cp = sp->cmd;
1398 if (sense_len >= SCSI_SENSE_BUFFERSIZE)
1399 sense_len = SCSI_SENSE_BUFFERSIZE;
1401 sp->request_sense_length = sense_len;
1402 sp->request_sense_ptr = cp->sense_buffer;
1403 if (sp->request_sense_length > par_sense_len)
1404 sense_len = par_sense_len;
1406 memcpy(cp->sense_buffer, sense_data, sense_len);
1408 sp->request_sense_ptr += sense_len;
1409 sp->request_sense_length -= sense_len;
1410 if (sp->request_sense_length != 0)
1411 rsp->status_srb = sp;
1413 if (sense_len) {
1414 ql_dbg(ql_dbg_io + ql_dbg_buffer, vha, 0x301c,
1415 "Check condition Sense data, nexus%ld:%d:%d cmd=%p.\n",
1416 sp->fcport->vha->host_no, cp->device->id, cp->device->lun,
1417 cp);
1418 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302b,
1419 cp->sense_buffer, sense_len);
1423 struct scsi_dif_tuple {
1424 __be16 guard; /* Checksum */
1425 __be16 app_tag; /* APPL identifer */
1426 __be32 ref_tag; /* Target LBA or indirect LBA */
1430 * Checks the guard or meta-data for the type of error
1431 * detected by the HBA. In case of errors, we set the
1432 * ASC/ASCQ fields in the sense buffer with ILLEGAL_REQUEST
1433 * to indicate to the kernel that the HBA detected error.
1435 static inline int
1436 qla2x00_handle_dif_error(srb_t *sp, struct sts_entry_24xx *sts24)
1438 struct scsi_qla_host *vha = sp->fcport->vha;
1439 struct scsi_cmnd *cmd = sp->cmd;
1440 uint8_t *ap = &sts24->data[12];
1441 uint8_t *ep = &sts24->data[20];
1442 uint32_t e_ref_tag, a_ref_tag;
1443 uint16_t e_app_tag, a_app_tag;
1444 uint16_t e_guard, a_guard;
1447 * swab32 of the "data" field in the beginning of qla2x00_status_entry()
1448 * would make guard field appear at offset 2
1450 a_guard = le16_to_cpu(*(uint16_t *)(ap + 2));
1451 a_app_tag = le16_to_cpu(*(uint16_t *)(ap + 0));
1452 a_ref_tag = le32_to_cpu(*(uint32_t *)(ap + 4));
1453 e_guard = le16_to_cpu(*(uint16_t *)(ep + 2));
1454 e_app_tag = le16_to_cpu(*(uint16_t *)(ep + 0));
1455 e_ref_tag = le32_to_cpu(*(uint32_t *)(ep + 4));
1457 ql_dbg(ql_dbg_io, vha, 0x3023,
1458 "iocb(s) %p Returned STATUS.\n", sts24);
1460 ql_dbg(ql_dbg_io, vha, 0x3024,
1461 "DIF ERROR in cmd 0x%x lba 0x%llx act ref"
1462 " tag=0x%x, exp ref_tag=0x%x, act app tag=0x%x, exp app"
1463 " tag=0x%x, act guard=0x%x, exp guard=0x%x.\n",
1464 cmd->cmnd[0], (u64)scsi_get_lba(cmd), a_ref_tag, e_ref_tag,
1465 a_app_tag, e_app_tag, a_guard, e_guard);
1468 * Ignore sector if:
1469 * For type 3: ref & app tag is all 'f's
1470 * For type 0,1,2: app tag is all 'f's
1472 if ((a_app_tag == 0xffff) &&
1473 ((scsi_get_prot_type(cmd) != SCSI_PROT_DIF_TYPE3) ||
1474 (a_ref_tag == 0xffffffff))) {
1475 uint32_t blocks_done, resid;
1476 sector_t lba_s = scsi_get_lba(cmd);
1478 /* 2TB boundary case covered automatically with this */
1479 blocks_done = e_ref_tag - (uint32_t)lba_s + 1;
1481 resid = scsi_bufflen(cmd) - (blocks_done *
1482 cmd->device->sector_size);
1484 scsi_set_resid(cmd, resid);
1485 cmd->result = DID_OK << 16;
1487 /* Update protection tag */
1488 if (scsi_prot_sg_count(cmd)) {
1489 uint32_t i, j = 0, k = 0, num_ent;
1490 struct scatterlist *sg;
1491 struct sd_dif_tuple *spt;
1493 /* Patch the corresponding protection tags */
1494 scsi_for_each_prot_sg(cmd, sg,
1495 scsi_prot_sg_count(cmd), i) {
1496 num_ent = sg_dma_len(sg) / 8;
1497 if (k + num_ent < blocks_done) {
1498 k += num_ent;
1499 continue;
1501 j = blocks_done - k - 1;
1502 k = blocks_done;
1503 break;
1506 if (k != blocks_done) {
1507 ql_log(ql_log_warn, vha, 0x302f,
1508 "unexpected tag values tag:lba=%x:%llx)\n",
1509 e_ref_tag, (unsigned long long)lba_s);
1510 return 1;
1513 spt = page_address(sg_page(sg)) + sg->offset;
1514 spt += j;
1516 spt->app_tag = 0xffff;
1517 if (scsi_get_prot_type(cmd) == SCSI_PROT_DIF_TYPE3)
1518 spt->ref_tag = 0xffffffff;
1521 return 0;
1524 /* check guard */
1525 if (e_guard != a_guard) {
1526 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1527 0x10, 0x1);
1528 set_driver_byte(cmd, DRIVER_SENSE);
1529 set_host_byte(cmd, DID_ABORT);
1530 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1531 return 1;
1534 /* check ref tag */
1535 if (e_ref_tag != a_ref_tag) {
1536 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1537 0x10, 0x3);
1538 set_driver_byte(cmd, DRIVER_SENSE);
1539 set_host_byte(cmd, DID_ABORT);
1540 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1541 return 1;
1544 /* check appl tag */
1545 if (e_app_tag != a_app_tag) {
1546 scsi_build_sense_buffer(1, cmd->sense_buffer, ILLEGAL_REQUEST,
1547 0x10, 0x2);
1548 set_driver_byte(cmd, DRIVER_SENSE);
1549 set_host_byte(cmd, DID_ABORT);
1550 cmd->result |= SAM_STAT_CHECK_CONDITION << 1;
1551 return 1;
1554 return 1;
1558 * qla2x00_status_entry() - Process a Status IOCB entry.
1559 * @ha: SCSI driver HA context
1560 * @pkt: Entry pointer
1562 static void
1563 qla2x00_status_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, void *pkt)
1565 srb_t *sp;
1566 fc_port_t *fcport;
1567 struct scsi_cmnd *cp;
1568 sts_entry_t *sts;
1569 struct sts_entry_24xx *sts24;
1570 uint16_t comp_status;
1571 uint16_t scsi_status;
1572 uint16_t ox_id;
1573 uint8_t lscsi_status;
1574 int32_t resid;
1575 uint32_t sense_len, par_sense_len, rsp_info_len, resid_len,
1576 fw_resid_len;
1577 uint8_t *rsp_info, *sense_data;
1578 struct qla_hw_data *ha = vha->hw;
1579 uint32_t handle;
1580 uint16_t que;
1581 struct req_que *req;
1582 int logit = 1;
1584 sts = (sts_entry_t *) pkt;
1585 sts24 = (struct sts_entry_24xx *) pkt;
1586 if (IS_FWI2_CAPABLE(ha)) {
1587 comp_status = le16_to_cpu(sts24->comp_status);
1588 scsi_status = le16_to_cpu(sts24->scsi_status) & SS_MASK;
1589 } else {
1590 comp_status = le16_to_cpu(sts->comp_status);
1591 scsi_status = le16_to_cpu(sts->scsi_status) & SS_MASK;
1593 handle = (uint32_t) LSW(sts->handle);
1594 que = MSW(sts->handle);
1595 req = ha->req_q_map[que];
1597 /* Fast path completion. */
1598 if (comp_status == CS_COMPLETE && scsi_status == 0) {
1599 qla2x00_process_completed_request(vha, req, handle);
1601 return;
1604 /* Validate handle. */
1605 if (handle < MAX_OUTSTANDING_COMMANDS) {
1606 sp = req->outstanding_cmds[handle];
1607 req->outstanding_cmds[handle] = NULL;
1608 } else
1609 sp = NULL;
1611 if (sp == NULL) {
1612 ql_dbg(ql_dbg_io, vha, 0x3017,
1613 "Invalid status handle (0x%x).\n", sts->handle);
1615 if (IS_QLA82XX(ha))
1616 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1617 else
1618 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
1619 qla2xxx_wake_dpc(vha);
1620 return;
1622 cp = sp->cmd;
1623 if (cp == NULL) {
1624 ql_dbg(ql_dbg_io, vha, 0x3018,
1625 "Command already returned (0x%x/%p).\n",
1626 sts->handle, sp);
1628 return;
1631 lscsi_status = scsi_status & STATUS_MASK;
1633 fcport = sp->fcport;
1635 ox_id = 0;
1636 sense_len = par_sense_len = rsp_info_len = resid_len =
1637 fw_resid_len = 0;
1638 if (IS_FWI2_CAPABLE(ha)) {
1639 if (scsi_status & SS_SENSE_LEN_VALID)
1640 sense_len = le32_to_cpu(sts24->sense_len);
1641 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1642 rsp_info_len = le32_to_cpu(sts24->rsp_data_len);
1643 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER))
1644 resid_len = le32_to_cpu(sts24->rsp_residual_count);
1645 if (comp_status == CS_DATA_UNDERRUN)
1646 fw_resid_len = le32_to_cpu(sts24->residual_len);
1647 rsp_info = sts24->data;
1648 sense_data = sts24->data;
1649 host_to_fcp_swap(sts24->data, sizeof(sts24->data));
1650 ox_id = le16_to_cpu(sts24->ox_id);
1651 par_sense_len = sizeof(sts24->data);
1652 } else {
1653 if (scsi_status & SS_SENSE_LEN_VALID)
1654 sense_len = le16_to_cpu(sts->req_sense_length);
1655 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID)
1656 rsp_info_len = le16_to_cpu(sts->rsp_info_len);
1657 resid_len = le32_to_cpu(sts->residual_length);
1658 rsp_info = sts->rsp_info;
1659 sense_data = sts->req_sense_data;
1660 par_sense_len = sizeof(sts->req_sense_data);
1663 /* Check for any FCP transport errors. */
1664 if (scsi_status & SS_RESPONSE_INFO_LEN_VALID) {
1665 /* Sense data lies beyond any FCP RESPONSE data. */
1666 if (IS_FWI2_CAPABLE(ha)) {
1667 sense_data += rsp_info_len;
1668 par_sense_len -= rsp_info_len;
1670 if (rsp_info_len > 3 && rsp_info[3]) {
1671 ql_dbg(ql_dbg_io, vha, 0x3019,
1672 "FCP I/O protocol failure (0x%x/0x%x).\n",
1673 rsp_info_len, rsp_info[3]);
1675 cp->result = DID_BUS_BUSY << 16;
1676 goto out;
1680 /* Check for overrun. */
1681 if (IS_FWI2_CAPABLE(ha) && comp_status == CS_COMPLETE &&
1682 scsi_status & SS_RESIDUAL_OVER)
1683 comp_status = CS_DATA_OVERRUN;
1686 * Based on Host and scsi status generate status code for Linux
1688 switch (comp_status) {
1689 case CS_COMPLETE:
1690 case CS_QUEUE_FULL:
1691 if (scsi_status == 0) {
1692 cp->result = DID_OK << 16;
1693 break;
1695 if (scsi_status & (SS_RESIDUAL_UNDER | SS_RESIDUAL_OVER)) {
1696 resid = resid_len;
1697 scsi_set_resid(cp, resid);
1699 if (!lscsi_status &&
1700 ((unsigned)(scsi_bufflen(cp) - resid) <
1701 cp->underflow)) {
1702 ql_dbg(ql_dbg_io, vha, 0x301a,
1703 "Mid-layer underflow "
1704 "detected (0x%x of 0x%x bytes).\n",
1705 resid, scsi_bufflen(cp));
1707 cp->result = DID_ERROR << 16;
1708 break;
1711 cp->result = DID_OK << 16 | lscsi_status;
1713 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1714 ql_dbg(ql_dbg_io, vha, 0x301b,
1715 "QUEUE FULL detected.\n");
1716 break;
1718 logit = 0;
1719 if (lscsi_status != SS_CHECK_CONDITION)
1720 break;
1722 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1723 if (!(scsi_status & SS_SENSE_LEN_VALID))
1724 break;
1726 qla2x00_handle_sense(sp, sense_data, par_sense_len, sense_len,
1727 rsp);
1728 break;
1730 case CS_DATA_UNDERRUN:
1731 /* Use F/W calculated residual length. */
1732 resid = IS_FWI2_CAPABLE(ha) ? fw_resid_len : resid_len;
1733 scsi_set_resid(cp, resid);
1734 if (scsi_status & SS_RESIDUAL_UNDER) {
1735 if (IS_FWI2_CAPABLE(ha) && fw_resid_len != resid_len) {
1736 ql_dbg(ql_dbg_io, vha, 0x301d,
1737 "Dropped frame(s) detected "
1738 "(0x%x of 0x%x bytes).\n",
1739 resid, scsi_bufflen(cp));
1741 cp->result = DID_ERROR << 16 | lscsi_status;
1742 goto check_scsi_status;
1745 if (!lscsi_status &&
1746 ((unsigned)(scsi_bufflen(cp) - resid) <
1747 cp->underflow)) {
1748 ql_dbg(ql_dbg_io, vha, 0x301e,
1749 "Mid-layer underflow "
1750 "detected (0x%x of 0x%x bytes).\n",
1751 resid, scsi_bufflen(cp));
1753 cp->result = DID_ERROR << 16;
1754 break;
1756 } else {
1757 ql_dbg(ql_dbg_io, vha, 0x301f,
1758 "Dropped frame(s) detected (0x%x "
1759 "of 0x%x bytes).\n", resid, scsi_bufflen(cp));
1761 cp->result = DID_ERROR << 16 | lscsi_status;
1762 goto check_scsi_status;
1765 cp->result = DID_OK << 16 | lscsi_status;
1766 logit = 0;
1768 check_scsi_status:
1770 * Check to see if SCSI Status is non zero. If so report SCSI
1771 * Status.
1773 if (lscsi_status != 0) {
1774 if (lscsi_status == SAM_STAT_TASK_SET_FULL) {
1775 ql_dbg(ql_dbg_io, vha, 0x3020,
1776 "QUEUE FULL detected.\n");
1777 logit = 1;
1778 break;
1780 if (lscsi_status != SS_CHECK_CONDITION)
1781 break;
1783 memset(cp->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1784 if (!(scsi_status & SS_SENSE_LEN_VALID))
1785 break;
1787 qla2x00_handle_sense(sp, sense_data, par_sense_len,
1788 sense_len, rsp);
1790 break;
1792 case CS_PORT_LOGGED_OUT:
1793 case CS_PORT_CONFIG_CHG:
1794 case CS_PORT_BUSY:
1795 case CS_INCOMPLETE:
1796 case CS_PORT_UNAVAILABLE:
1797 case CS_TIMEOUT:
1798 case CS_RESET:
1801 * We are going to have the fc class block the rport
1802 * while we try to recover so instruct the mid layer
1803 * to requeue until the class decides how to handle this.
1805 cp->result = DID_TRANSPORT_DISRUPTED << 16;
1807 if (comp_status == CS_TIMEOUT) {
1808 if (IS_FWI2_CAPABLE(ha))
1809 break;
1810 else if ((le16_to_cpu(sts->status_flags) &
1811 SF_LOGOUT_SENT) == 0)
1812 break;
1815 ql_dbg(ql_dbg_io, vha, 0x3021,
1816 "Port down status: port-state=0x%x.\n",
1817 atomic_read(&fcport->state));
1819 if (atomic_read(&fcport->state) == FCS_ONLINE)
1820 qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
1821 break;
1823 case CS_ABORTED:
1824 cp->result = DID_RESET << 16;
1825 break;
1827 case CS_DIF_ERROR:
1828 logit = qla2x00_handle_dif_error(sp, sts24);
1829 break;
1830 default:
1831 cp->result = DID_ERROR << 16;
1832 break;
1835 out:
1836 if (logit)
1837 ql_dbg(ql_dbg_io, vha, 0x3022,
1838 "FCP command status: 0x%x-0x%x (0x%x) "
1839 "nexus=%ld:%d:%d portid=%02x%02x%02x oxid=0x%x "
1840 "cdb=%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x len=0x%x "
1841 "rsp_info=0x%x resid=0x%x fw_resid=0x%x.\n",
1842 comp_status, scsi_status, cp->result, vha->host_no,
1843 cp->device->id, cp->device->lun, fcport->d_id.b.domain,
1844 fcport->d_id.b.area, fcport->d_id.b.al_pa, ox_id,
1845 cp->cmnd[0], cp->cmnd[1], cp->cmnd[2], cp->cmnd[3],
1846 cp->cmnd[4], cp->cmnd[5], cp->cmnd[6], cp->cmnd[7],
1847 cp->cmnd[8], cp->cmnd[9], scsi_bufflen(cp), rsp_info_len,
1848 resid_len, fw_resid_len);
1850 if (rsp->status_srb == NULL)
1851 qla2x00_sp_compl(ha, sp);
1855 * qla2x00_status_cont_entry() - Process a Status Continuations entry.
1856 * @ha: SCSI driver HA context
1857 * @pkt: Entry pointer
1859 * Extended sense data.
1861 static void
1862 qla2x00_status_cont_entry(struct rsp_que *rsp, sts_cont_entry_t *pkt)
1864 uint8_t sense_sz = 0;
1865 struct qla_hw_data *ha = rsp->hw;
1866 struct scsi_qla_host *vha = pci_get_drvdata(ha->pdev);
1867 srb_t *sp = rsp->status_srb;
1868 struct scsi_cmnd *cp;
1870 if (sp != NULL && sp->request_sense_length != 0) {
1871 cp = sp->cmd;
1872 if (cp == NULL) {
1873 ql_log(ql_log_warn, vha, 0x3025,
1874 "cmd is NULL: already returned to OS (sp=%p).\n",
1875 sp);
1877 rsp->status_srb = NULL;
1878 return;
1881 if (sp->request_sense_length > sizeof(pkt->data)) {
1882 sense_sz = sizeof(pkt->data);
1883 } else {
1884 sense_sz = sp->request_sense_length;
1887 /* Move sense data. */
1888 if (IS_FWI2_CAPABLE(ha))
1889 host_to_fcp_swap(pkt->data, sizeof(pkt->data));
1890 memcpy(sp->request_sense_ptr, pkt->data, sense_sz);
1891 ql_dump_buffer(ql_dbg_io + ql_dbg_buffer, vha, 0x302c,
1892 sp->request_sense_ptr, sense_sz);
1894 sp->request_sense_ptr += sense_sz;
1895 sp->request_sense_length -= sense_sz;
1897 /* Place command on done queue. */
1898 if (sp->request_sense_length == 0) {
1899 rsp->status_srb = NULL;
1900 qla2x00_sp_compl(ha, sp);
1905 static int
1906 qla2x00_free_sp_ctx(scsi_qla_host_t *vha, srb_t *sp)
1908 struct qla_hw_data *ha = vha->hw;
1909 struct srb_ctx *ctx;
1911 if (!sp->ctx)
1912 return 1;
1914 ctx = sp->ctx;
1916 if (ctx->type == SRB_LOGIN_CMD ||
1917 ctx->type == SRB_LOGOUT_CMD ||
1918 ctx->type == SRB_TM_CMD) {
1919 ctx->u.iocb_cmd->done(sp);
1920 return 0;
1921 } else if (ctx->type == SRB_ADISC_CMD) {
1922 ctx->u.iocb_cmd->free(sp);
1923 return 0;
1924 } else {
1925 struct fc_bsg_job *bsg_job;
1927 bsg_job = ctx->u.bsg_job;
1928 if (ctx->type == SRB_ELS_CMD_HST ||
1929 ctx->type == SRB_CT_CMD)
1930 kfree(sp->fcport);
1932 bsg_job->reply->reply_data.ctels_reply.status =
1933 FC_CTELS_STATUS_OK;
1934 bsg_job->reply->result = DID_ERROR << 16;
1935 bsg_job->reply->reply_payload_rcv_len = 0;
1936 kfree(sp->ctx);
1937 mempool_free(sp, ha->srb_mempool);
1938 bsg_job->job_done(bsg_job);
1939 return 0;
1941 return 1;
1945 * qla2x00_error_entry() - Process an error entry.
1946 * @ha: SCSI driver HA context
1947 * @pkt: Entry pointer
1949 static void
1950 qla2x00_error_entry(scsi_qla_host_t *vha, struct rsp_que *rsp, sts_entry_t *pkt)
1952 srb_t *sp;
1953 struct qla_hw_data *ha = vha->hw;
1954 const char func[] = "ERROR-IOCB";
1955 uint16_t que = MSW(pkt->handle);
1956 struct req_que *req = ha->req_q_map[que];
1958 if (pkt->entry_status & RF_INV_E_ORDER)
1959 ql_dbg(ql_dbg_async, vha, 0x502a,
1960 "Invalid Entry Order.\n");
1961 else if (pkt->entry_status & RF_INV_E_COUNT)
1962 ql_dbg(ql_dbg_async, vha, 0x502b,
1963 "Invalid Entry Count.\n");
1964 else if (pkt->entry_status & RF_INV_E_PARAM)
1965 ql_dbg(ql_dbg_async, vha, 0x502c,
1966 "Invalid Entry Parameter.\n");
1967 else if (pkt->entry_status & RF_INV_E_TYPE)
1968 ql_dbg(ql_dbg_async, vha, 0x502d,
1969 "Invalid Entry Type.\n");
1970 else if (pkt->entry_status & RF_BUSY)
1971 ql_dbg(ql_dbg_async, vha, 0x502e,
1972 "Busy.\n");
1973 else
1974 ql_dbg(ql_dbg_async, vha, 0x502f,
1975 "UNKNOWN flag error.\n");
1977 sp = qla2x00_get_sp_from_handle(vha, func, req, pkt);
1978 if (sp) {
1979 if (qla2x00_free_sp_ctx(vha, sp)) {
1980 if (pkt->entry_status &
1981 (RF_INV_E_ORDER | RF_INV_E_COUNT |
1982 RF_INV_E_PARAM | RF_INV_E_TYPE)) {
1983 sp->cmd->result = DID_ERROR << 16;
1984 } else if (pkt->entry_status & RF_BUSY) {
1985 sp->cmd->result = DID_BUS_BUSY << 16;
1986 } else {
1987 sp->cmd->result = DID_ERROR << 16;
1989 qla2x00_sp_compl(ha, sp);
1991 } else if (pkt->entry_type == COMMAND_A64_TYPE || pkt->entry_type ==
1992 COMMAND_TYPE || pkt->entry_type == COMMAND_TYPE_7
1993 || pkt->entry_type == COMMAND_TYPE_6) {
1994 ql_log(ql_log_warn, vha, 0x5030,
1995 "Error entry - invalid handle.\n");
1997 if (IS_QLA82XX(ha))
1998 set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
1999 else
2000 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2001 qla2xxx_wake_dpc(vha);
2006 * qla24xx_mbx_completion() - Process mailbox command completions.
2007 * @ha: SCSI driver HA context
2008 * @mb0: Mailbox0 register
2010 static void
2011 qla24xx_mbx_completion(scsi_qla_host_t *vha, uint16_t mb0)
2013 uint16_t cnt;
2014 uint32_t mboxes;
2015 uint16_t __iomem *wptr;
2016 struct qla_hw_data *ha = vha->hw;
2017 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2019 /* Read all mbox registers? */
2020 mboxes = (1 << ha->mbx_count) - 1;
2021 if (!ha->mcp)
2022 ql_dbg(ql_dbg_async, vha, 0x504e, "MBX pointer ERRROR.\n");
2023 else
2024 mboxes = ha->mcp->in_mb;
2026 /* Load return mailbox registers. */
2027 ha->flags.mbox_int = 1;
2028 ha->mailbox_out[0] = mb0;
2029 mboxes >>= 1;
2030 wptr = (uint16_t __iomem *)&reg->mailbox1;
2032 for (cnt = 1; cnt < ha->mbx_count; cnt++) {
2033 if (mboxes & BIT_0)
2034 ha->mailbox_out[cnt] = RD_REG_WORD(wptr);
2036 mboxes >>= 1;
2037 wptr++;
2042 * qla24xx_process_response_queue() - Process response queue entries.
2043 * @ha: SCSI driver HA context
2045 void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2046 struct rsp_que *rsp)
2048 struct sts_entry_24xx *pkt;
2049 struct qla_hw_data *ha = vha->hw;
2051 if (!vha->flags.online)
2052 return;
2054 while (rsp->ring_ptr->signature != RESPONSE_PROCESSED) {
2055 pkt = (struct sts_entry_24xx *)rsp->ring_ptr;
2057 rsp->ring_index++;
2058 if (rsp->ring_index == rsp->length) {
2059 rsp->ring_index = 0;
2060 rsp->ring_ptr = rsp->ring;
2061 } else {
2062 rsp->ring_ptr++;
2065 if (pkt->entry_status != 0) {
2066 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2067 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2068 wmb();
2069 continue;
2072 switch (pkt->entry_type) {
2073 case STATUS_TYPE:
2074 qla2x00_status_entry(vha, rsp, pkt);
2075 break;
2076 case STATUS_CONT_TYPE:
2077 qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
2078 break;
2079 case VP_RPT_ID_IOCB_TYPE:
2080 qla24xx_report_id_acquisition(vha,
2081 (struct vp_rpt_id_entry_24xx *)pkt);
2082 break;
2083 case LOGINOUT_PORT_IOCB_TYPE:
2084 qla24xx_logio_entry(vha, rsp->req,
2085 (struct logio_entry_24xx *)pkt);
2086 break;
2087 case TSK_MGMT_IOCB_TYPE:
2088 qla24xx_tm_iocb_entry(vha, rsp->req,
2089 (struct tsk_mgmt_entry *)pkt);
2090 break;
2091 case CT_IOCB_TYPE:
2092 qla24xx_els_ct_entry(vha, rsp->req, pkt, CT_IOCB_TYPE);
2093 break;
2094 case ELS_IOCB_TYPE:
2095 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2096 break;
2097 case MARKER_TYPE:
2098 /* Do nothing in this case, this check is to prevent it
2099 * from falling into default case
2101 break;
2102 default:
2103 /* Type Not Supported. */
2104 ql_dbg(ql_dbg_async, vha, 0x5042,
2105 "Received unknown response pkt type %x "
2106 "entry status=%x.\n",
2107 pkt->entry_type, pkt->entry_status);
2108 break;
2110 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
2111 wmb();
2114 /* Adjust ring index */
2115 if (IS_QLA82XX(ha)) {
2116 struct device_reg_82xx __iomem *reg = &ha->iobase->isp82;
2117 WRT_REG_DWORD(&reg->rsp_q_out[0], rsp->ring_index);
2118 } else
2119 WRT_REG_DWORD(rsp->rsp_q_out, rsp->ring_index);
2122 static void
2123 qla2xxx_check_risc_status(scsi_qla_host_t *vha)
2125 int rval;
2126 uint32_t cnt;
2127 struct qla_hw_data *ha = vha->hw;
2128 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2130 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha))
2131 return;
2133 rval = QLA_SUCCESS;
2134 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2135 RD_REG_DWORD(&reg->iobase_addr);
2136 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2137 for (cnt = 10000; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2138 rval == QLA_SUCCESS; cnt--) {
2139 if (cnt) {
2140 WRT_REG_DWORD(&reg->iobase_window, 0x0001);
2141 udelay(10);
2142 } else
2143 rval = QLA_FUNCTION_TIMEOUT;
2145 if (rval == QLA_SUCCESS)
2146 goto next_test;
2148 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2149 for (cnt = 100; (RD_REG_DWORD(&reg->iobase_window) & BIT_0) == 0 &&
2150 rval == QLA_SUCCESS; cnt--) {
2151 if (cnt) {
2152 WRT_REG_DWORD(&reg->iobase_window, 0x0003);
2153 udelay(10);
2154 } else
2155 rval = QLA_FUNCTION_TIMEOUT;
2157 if (rval != QLA_SUCCESS)
2158 goto done;
2160 next_test:
2161 if (RD_REG_DWORD(&reg->iobase_c8) & BIT_3)
2162 ql_log(ql_log_info, vha, 0x504c,
2163 "Additional code -- 0x55AA.\n");
2165 done:
2166 WRT_REG_DWORD(&reg->iobase_window, 0x0000);
2167 RD_REG_DWORD(&reg->iobase_window);
2171 * qla24xx_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
2172 * @irq:
2173 * @dev_id: SCSI driver HA context
2175 * Called by system whenever the host adapter generates an interrupt.
2177 * Returns handled flag.
2179 irqreturn_t
2180 qla24xx_intr_handler(int irq, void *dev_id)
2182 scsi_qla_host_t *vha;
2183 struct qla_hw_data *ha;
2184 struct device_reg_24xx __iomem *reg;
2185 int status;
2186 unsigned long iter;
2187 uint32_t stat;
2188 uint32_t hccr;
2189 uint16_t mb[4];
2190 struct rsp_que *rsp;
2191 unsigned long flags;
2193 rsp = (struct rsp_que *) dev_id;
2194 if (!rsp) {
2195 printk(KERN_INFO
2196 "%s(): NULL response queue pointer.\n", __func__);
2197 return IRQ_NONE;
2200 ha = rsp->hw;
2201 reg = &ha->iobase->isp24;
2202 status = 0;
2204 if (unlikely(pci_channel_offline(ha->pdev)))
2205 return IRQ_HANDLED;
2207 spin_lock_irqsave(&ha->hardware_lock, flags);
2208 vha = pci_get_drvdata(ha->pdev);
2209 for (iter = 50; iter--; ) {
2210 stat = RD_REG_DWORD(&reg->host_status);
2211 if (stat & HSRX_RISC_PAUSED) {
2212 if (unlikely(pci_channel_offline(ha->pdev)))
2213 break;
2215 hccr = RD_REG_DWORD(&reg->hccr);
2217 ql_log(ql_log_warn, vha, 0x504b,
2218 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2219 hccr);
2221 qla2xxx_check_risc_status(vha);
2223 ha->isp_ops->fw_dump(vha, 1);
2224 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2225 break;
2226 } else if ((stat & HSRX_RISC_INT) == 0)
2227 break;
2229 switch (stat & 0xff) {
2230 case 0x1:
2231 case 0x2:
2232 case 0x10:
2233 case 0x11:
2234 qla24xx_mbx_completion(vha, MSW(stat));
2235 status |= MBX_INTERRUPT;
2237 break;
2238 case 0x12:
2239 mb[0] = MSW(stat);
2240 mb[1] = RD_REG_WORD(&reg->mailbox1);
2241 mb[2] = RD_REG_WORD(&reg->mailbox2);
2242 mb[3] = RD_REG_WORD(&reg->mailbox3);
2243 qla2x00_async_event(vha, rsp, mb);
2244 break;
2245 case 0x13:
2246 case 0x14:
2247 qla24xx_process_response_queue(vha, rsp);
2248 break;
2249 default:
2250 ql_dbg(ql_dbg_async, vha, 0x504f,
2251 "Unrecognized interrupt type (%d).\n", stat * 0xff);
2252 break;
2254 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2255 RD_REG_DWORD_RELAXED(&reg->hccr);
2257 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2259 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2260 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2261 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2262 complete(&ha->mbx_intr_comp);
2265 return IRQ_HANDLED;
2268 static irqreturn_t
2269 qla24xx_msix_rsp_q(int irq, void *dev_id)
2271 struct qla_hw_data *ha;
2272 struct rsp_que *rsp;
2273 struct device_reg_24xx __iomem *reg;
2274 struct scsi_qla_host *vha;
2275 unsigned long flags;
2277 rsp = (struct rsp_que *) dev_id;
2278 if (!rsp) {
2279 printk(KERN_INFO
2280 "%s(): NULL response queue pointer.\n", __func__);
2281 return IRQ_NONE;
2283 ha = rsp->hw;
2284 reg = &ha->iobase->isp24;
2286 spin_lock_irqsave(&ha->hardware_lock, flags);
2288 vha = pci_get_drvdata(ha->pdev);
2289 qla24xx_process_response_queue(vha, rsp);
2290 if (!ha->flags.disable_msix_handshake) {
2291 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2292 RD_REG_DWORD_RELAXED(&reg->hccr);
2294 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2296 return IRQ_HANDLED;
2299 static irqreturn_t
2300 qla25xx_msix_rsp_q(int irq, void *dev_id)
2302 struct qla_hw_data *ha;
2303 struct rsp_que *rsp;
2304 struct device_reg_24xx __iomem *reg;
2305 unsigned long flags;
2307 rsp = (struct rsp_que *) dev_id;
2308 if (!rsp) {
2309 printk(KERN_INFO
2310 "%s(): NULL response queue pointer.\n", __func__);
2311 return IRQ_NONE;
2313 ha = rsp->hw;
2315 /* Clear the interrupt, if enabled, for this response queue */
2316 if (!ha->flags.disable_msix_handshake) {
2317 reg = &ha->iobase->isp24;
2318 spin_lock_irqsave(&ha->hardware_lock, flags);
2319 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2320 RD_REG_DWORD_RELAXED(&reg->hccr);
2321 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2323 queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
2325 return IRQ_HANDLED;
2328 static irqreturn_t
2329 qla24xx_msix_default(int irq, void *dev_id)
2331 scsi_qla_host_t *vha;
2332 struct qla_hw_data *ha;
2333 struct rsp_que *rsp;
2334 struct device_reg_24xx __iomem *reg;
2335 int status;
2336 uint32_t stat;
2337 uint32_t hccr;
2338 uint16_t mb[4];
2339 unsigned long flags;
2341 rsp = (struct rsp_que *) dev_id;
2342 if (!rsp) {
2343 printk(KERN_INFO
2344 "%s(): NULL response queue pointer.\n", __func__);
2345 return IRQ_NONE;
2347 ha = rsp->hw;
2348 reg = &ha->iobase->isp24;
2349 status = 0;
2351 spin_lock_irqsave(&ha->hardware_lock, flags);
2352 vha = pci_get_drvdata(ha->pdev);
2353 do {
2354 stat = RD_REG_DWORD(&reg->host_status);
2355 if (stat & HSRX_RISC_PAUSED) {
2356 if (unlikely(pci_channel_offline(ha->pdev)))
2357 break;
2359 hccr = RD_REG_DWORD(&reg->hccr);
2361 ql_log(ql_log_info, vha, 0x5050,
2362 "RISC paused -- HCCR=%x, Dumping firmware.\n",
2363 hccr);
2365 qla2xxx_check_risc_status(vha);
2367 ha->isp_ops->fw_dump(vha, 1);
2368 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
2369 break;
2370 } else if ((stat & HSRX_RISC_INT) == 0)
2371 break;
2373 switch (stat & 0xff) {
2374 case 0x1:
2375 case 0x2:
2376 case 0x10:
2377 case 0x11:
2378 qla24xx_mbx_completion(vha, MSW(stat));
2379 status |= MBX_INTERRUPT;
2381 break;
2382 case 0x12:
2383 mb[0] = MSW(stat);
2384 mb[1] = RD_REG_WORD(&reg->mailbox1);
2385 mb[2] = RD_REG_WORD(&reg->mailbox2);
2386 mb[3] = RD_REG_WORD(&reg->mailbox3);
2387 qla2x00_async_event(vha, rsp, mb);
2388 break;
2389 case 0x13:
2390 case 0x14:
2391 qla24xx_process_response_queue(vha, rsp);
2392 break;
2393 default:
2394 ql_dbg(ql_dbg_async, vha, 0x5051,
2395 "Unrecognized interrupt type (%d).\n", stat & 0xff);
2396 break;
2398 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
2399 } while (0);
2400 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2402 if (test_bit(MBX_INTR_WAIT, &ha->mbx_cmd_flags) &&
2403 (status & MBX_INTERRUPT) && ha->flags.mbox_int) {
2404 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
2405 complete(&ha->mbx_intr_comp);
2407 return IRQ_HANDLED;
2410 /* Interrupt handling helpers. */
2412 struct qla_init_msix_entry {
2413 const char *name;
2414 irq_handler_t handler;
2417 static struct qla_init_msix_entry msix_entries[3] = {
2418 { "qla2xxx (default)", qla24xx_msix_default },
2419 { "qla2xxx (rsp_q)", qla24xx_msix_rsp_q },
2420 { "qla2xxx (multiq)", qla25xx_msix_rsp_q },
2423 static struct qla_init_msix_entry qla82xx_msix_entries[2] = {
2424 { "qla2xxx (default)", qla82xx_msix_default },
2425 { "qla2xxx (rsp_q)", qla82xx_msix_rsp_q },
2428 static void
2429 qla24xx_disable_msix(struct qla_hw_data *ha)
2431 int i;
2432 struct qla_msix_entry *qentry;
2433 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2435 for (i = 0; i < ha->msix_count; i++) {
2436 qentry = &ha->msix_entries[i];
2437 if (qentry->have_irq)
2438 free_irq(qentry->vector, qentry->rsp);
2440 pci_disable_msix(ha->pdev);
2441 kfree(ha->msix_entries);
2442 ha->msix_entries = NULL;
2443 ha->flags.msix_enabled = 0;
2444 ql_dbg(ql_dbg_init, vha, 0x0042,
2445 "Disabled the MSI.\n");
2448 static int
2449 qla24xx_enable_msix(struct qla_hw_data *ha, struct rsp_que *rsp)
2451 #define MIN_MSIX_COUNT 2
2452 int i, ret;
2453 struct msix_entry *entries;
2454 struct qla_msix_entry *qentry;
2455 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2457 entries = kzalloc(sizeof(struct msix_entry) * ha->msix_count,
2458 GFP_KERNEL);
2459 if (!entries) {
2460 ql_log(ql_log_warn, vha, 0x00bc,
2461 "Failed to allocate memory for msix_entry.\n");
2462 return -ENOMEM;
2465 for (i = 0; i < ha->msix_count; i++)
2466 entries[i].entry = i;
2468 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2469 if (ret) {
2470 if (ret < MIN_MSIX_COUNT)
2471 goto msix_failed;
2473 ql_log(ql_log_warn, vha, 0x00c6,
2474 "MSI-X: Failed to enable support "
2475 "-- %d/%d\n Retry with %d vectors.\n",
2476 ha->msix_count, ret, ret);
2477 ha->msix_count = ret;
2478 ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
2479 if (ret) {
2480 msix_failed:
2481 ql_log(ql_log_fatal, vha, 0x00c7,
2482 "MSI-X: Failed to enable support, "
2483 "giving up -- %d/%d.\n",
2484 ha->msix_count, ret);
2485 goto msix_out;
2487 ha->max_rsp_queues = ha->msix_count - 1;
2489 ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
2490 ha->msix_count, GFP_KERNEL);
2491 if (!ha->msix_entries) {
2492 ql_log(ql_log_fatal, vha, 0x00c8,
2493 "Failed to allocate memory for ha->msix_entries.\n");
2494 ret = -ENOMEM;
2495 goto msix_out;
2497 ha->flags.msix_enabled = 1;
2499 for (i = 0; i < ha->msix_count; i++) {
2500 qentry = &ha->msix_entries[i];
2501 qentry->vector = entries[i].vector;
2502 qentry->entry = entries[i].entry;
2503 qentry->have_irq = 0;
2504 qentry->rsp = NULL;
2507 /* Enable MSI-X vectors for the base queue */
2508 for (i = 0; i < 2; i++) {
2509 qentry = &ha->msix_entries[i];
2510 if (IS_QLA82XX(ha)) {
2511 ret = request_irq(qentry->vector,
2512 qla82xx_msix_entries[i].handler,
2513 0, qla82xx_msix_entries[i].name, rsp);
2514 } else {
2515 ret = request_irq(qentry->vector,
2516 msix_entries[i].handler,
2517 0, msix_entries[i].name, rsp);
2519 if (ret) {
2520 ql_log(ql_log_fatal, vha, 0x00cb,
2521 "MSI-X: unable to register handler -- %x/%d.\n",
2522 qentry->vector, ret);
2523 qla24xx_disable_msix(ha);
2524 ha->mqenable = 0;
2525 goto msix_out;
2527 qentry->have_irq = 1;
2528 qentry->rsp = rsp;
2529 rsp->msix = qentry;
2532 /* Enable MSI-X vector for response queue update for queue 0 */
2533 if (ha->mqiobase && (ha->max_rsp_queues > 1 || ha->max_req_queues > 1))
2534 ha->mqenable = 1;
2535 ql_dbg(ql_dbg_multiq, vha, 0xc005,
2536 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2537 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2538 ql_dbg(ql_dbg_init, vha, 0x0055,
2539 "mqiobase=%p, max_rsp_queues=%d, max_req_queues=%d.\n",
2540 ha->mqiobase, ha->max_rsp_queues, ha->max_req_queues);
2542 msix_out:
2543 kfree(entries);
2544 return ret;
2548 qla2x00_request_irqs(struct qla_hw_data *ha, struct rsp_que *rsp)
2550 int ret;
2551 device_reg_t __iomem *reg = ha->iobase;
2552 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2554 /* If possible, enable MSI-X. */
2555 if (!IS_QLA2432(ha) && !IS_QLA2532(ha) &&
2556 !IS_QLA8432(ha) && !IS_QLA8XXX_TYPE(ha))
2557 goto skip_msi;
2559 if (ha->pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
2560 (ha->pdev->subsystem_device == 0x7040 ||
2561 ha->pdev->subsystem_device == 0x7041 ||
2562 ha->pdev->subsystem_device == 0x1705)) {
2563 ql_log(ql_log_warn, vha, 0x0034,
2564 "MSI-X: Unsupported ISP 2432 SSVID/SSDID (0x%X,0x%X).\n",
2565 ha->pdev->subsystem_vendor,
2566 ha->pdev->subsystem_device);
2567 goto skip_msi;
2570 if (IS_QLA2432(ha) && (ha->pdev->revision < QLA_MSIX_CHIP_REV_24XX)) {
2571 ql_log(ql_log_warn, vha, 0x0035,
2572 "MSI-X; Unsupported ISP2432 (0x%X, 0x%X).\n",
2573 ha->pdev->revision, QLA_MSIX_CHIP_REV_24XX);
2574 goto skip_msix;
2577 ret = qla24xx_enable_msix(ha, rsp);
2578 if (!ret) {
2579 ql_dbg(ql_dbg_init, vha, 0x0036,
2580 "MSI-X: Enabled (0x%X, 0x%X).\n",
2581 ha->chip_revision, ha->fw_attributes);
2582 goto clear_risc_ints;
2584 ql_log(ql_log_info, vha, 0x0037,
2585 "MSI-X Falling back-to MSI mode -%d.\n", ret);
2586 skip_msix:
2588 if (!IS_QLA24XX(ha) && !IS_QLA2532(ha) && !IS_QLA8432(ha) &&
2589 !IS_QLA8001(ha))
2590 goto skip_msi;
2592 ret = pci_enable_msi(ha->pdev);
2593 if (!ret) {
2594 ql_dbg(ql_dbg_init, vha, 0x0038,
2595 "MSI: Enabled.\n");
2596 ha->flags.msi_enabled = 1;
2597 } else
2598 ql_log(ql_log_warn, vha, 0x0039,
2599 "MSI-X; Falling back-to INTa mode -- %d.\n", ret);
2600 skip_msi:
2602 ret = request_irq(ha->pdev->irq, ha->isp_ops->intr_handler,
2603 ha->flags.msi_enabled ? 0 : IRQF_SHARED,
2604 QLA2XXX_DRIVER_NAME, rsp);
2605 if (ret) {
2606 ql_log(ql_log_warn, vha, 0x003a,
2607 "Failed to reserve interrupt %d already in use.\n",
2608 ha->pdev->irq);
2609 goto fail;
2612 clear_risc_ints:
2615 * FIXME: Noted that 8014s were being dropped during NK testing.
2616 * Timing deltas during MSI-X/INTa transitions?
2618 if (IS_QLA81XX(ha) || IS_QLA82XX(ha))
2619 goto fail;
2620 spin_lock_irq(&ha->hardware_lock);
2621 if (IS_FWI2_CAPABLE(ha)) {
2622 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_HOST_INT);
2623 WRT_REG_DWORD(&reg->isp24.hccr, HCCRX_CLR_RISC_INT);
2624 } else {
2625 WRT_REG_WORD(&reg->isp.semaphore, 0);
2626 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_RISC_INT);
2627 WRT_REG_WORD(&reg->isp.hccr, HCCR_CLR_HOST_INT);
2629 spin_unlock_irq(&ha->hardware_lock);
2631 fail:
2632 return ret;
2635 void
2636 qla2x00_free_irqs(scsi_qla_host_t *vha)
2638 struct qla_hw_data *ha = vha->hw;
2639 struct rsp_que *rsp = ha->rsp_q_map[0];
2641 if (ha->flags.msix_enabled)
2642 qla24xx_disable_msix(ha);
2643 else if (ha->flags.msi_enabled) {
2644 free_irq(ha->pdev->irq, rsp);
2645 pci_disable_msi(ha->pdev);
2646 } else
2647 free_irq(ha->pdev->irq, rsp);
2651 int qla25xx_request_irq(struct rsp_que *rsp)
2653 struct qla_hw_data *ha = rsp->hw;
2654 struct qla_init_msix_entry *intr = &msix_entries[2];
2655 struct qla_msix_entry *msix = rsp->msix;
2656 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
2657 int ret;
2659 ret = request_irq(msix->vector, intr->handler, 0, intr->name, rsp);
2660 if (ret) {
2661 ql_log(ql_log_fatal, vha, 0x00e6,
2662 "MSI-X: Unable to register handler -- %x/%d.\n",
2663 msix->vector, ret);
2664 return ret;
2666 msix->have_irq = 1;
2667 msix->rsp = rsp;
2668 return ret;