staging: usbip: bugfix for isochronous packets and optimization
[zen-stable.git] / drivers / scsi / qla2xxx / qla_dbg.c
blob09614114825759e035069f976cc8a64eb4bd3fa6
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2010 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
9 #include <linux/delay.h>
11 static inline void
12 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
14 fw_dump->fw_major_version = htonl(ha->fw_major_version);
15 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
16 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
17 fw_dump->fw_attributes = htonl(ha->fw_attributes);
19 fw_dump->vendor = htonl(ha->pdev->vendor);
20 fw_dump->device = htonl(ha->pdev->device);
21 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
22 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
25 static inline void *
26 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
28 struct req_que *req = ha->req_q_map[0];
29 struct rsp_que *rsp = ha->rsp_q_map[0];
30 /* Request queue. */
31 memcpy(ptr, req->ring, req->length *
32 sizeof(request_t));
34 /* Response queue. */
35 ptr += req->length * sizeof(request_t);
36 memcpy(ptr, rsp->ring, rsp->length *
37 sizeof(response_t));
39 return ptr + (rsp->length * sizeof(response_t));
42 static int
43 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
44 uint32_t ram_dwords, void **nxt)
46 int rval;
47 uint32_t cnt, stat, timer, dwords, idx;
48 uint16_t mb0;
49 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
50 dma_addr_t dump_dma = ha->gid_list_dma;
51 uint32_t *dump = (uint32_t *)ha->gid_list;
53 rval = QLA_SUCCESS;
54 mb0 = 0;
56 WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
57 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
59 dwords = GID_LIST_SIZE / 4;
60 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
61 cnt += dwords, addr += dwords) {
62 if (cnt + dwords > ram_dwords)
63 dwords = ram_dwords - cnt;
65 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
66 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
68 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
69 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
70 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
71 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
73 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
74 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
75 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
77 for (timer = 6000000; timer; timer--) {
78 /* Check for pending interrupts. */
79 stat = RD_REG_DWORD(&reg->host_status);
80 if (stat & HSRX_RISC_INT) {
81 stat &= 0xff;
83 if (stat == 0x1 || stat == 0x2 ||
84 stat == 0x10 || stat == 0x11) {
85 set_bit(MBX_INTERRUPT,
86 &ha->mbx_cmd_flags);
88 mb0 = RD_REG_WORD(&reg->mailbox0);
90 WRT_REG_DWORD(&reg->hccr,
91 HCCRX_CLR_RISC_INT);
92 RD_REG_DWORD(&reg->hccr);
93 break;
96 /* Clear this intr; it wasn't a mailbox intr */
97 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
98 RD_REG_DWORD(&reg->hccr);
100 udelay(5);
103 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
104 rval = mb0 & MBS_MASK;
105 for (idx = 0; idx < dwords; idx++)
106 ram[cnt + idx] = swab32(dump[idx]);
107 } else {
108 rval = QLA_FUNCTION_FAILED;
112 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
113 return rval;
116 static int
117 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
118 uint32_t cram_size, void **nxt)
120 int rval;
122 /* Code RAM. */
123 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
124 if (rval != QLA_SUCCESS)
125 return rval;
127 /* External Memory. */
128 return qla24xx_dump_ram(ha, 0x100000, *nxt,
129 ha->fw_memory_size - 0x100000 + 1, nxt);
132 static uint32_t *
133 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
134 uint32_t count, uint32_t *buf)
136 uint32_t __iomem *dmp_reg;
138 WRT_REG_DWORD(&reg->iobase_addr, iobase);
139 dmp_reg = &reg->iobase_window;
140 while (count--)
141 *buf++ = htonl(RD_REG_DWORD(dmp_reg++));
143 return buf;
146 static inline int
147 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg)
149 int rval = QLA_SUCCESS;
150 uint32_t cnt;
152 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
153 for (cnt = 30000;
154 ((RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED) == 0) &&
155 rval == QLA_SUCCESS; cnt--) {
156 if (cnt)
157 udelay(100);
158 else
159 rval = QLA_FUNCTION_TIMEOUT;
162 return rval;
165 static int
166 qla24xx_soft_reset(struct qla_hw_data *ha)
168 int rval = QLA_SUCCESS;
169 uint32_t cnt;
170 uint16_t mb0, wd;
171 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
173 /* Reset RISC. */
174 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
175 for (cnt = 0; cnt < 30000; cnt++) {
176 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
177 break;
179 udelay(10);
182 WRT_REG_DWORD(&reg->ctrl_status,
183 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
184 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
186 udelay(100);
187 /* Wait for firmware to complete NVRAM accesses. */
188 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
189 for (cnt = 10000 ; cnt && mb0; cnt--) {
190 udelay(5);
191 mb0 = (uint32_t) RD_REG_WORD(&reg->mailbox0);
192 barrier();
195 /* Wait for soft-reset to complete. */
196 for (cnt = 0; cnt < 30000; cnt++) {
197 if ((RD_REG_DWORD(&reg->ctrl_status) &
198 CSRX_ISP_SOFT_RESET) == 0)
199 break;
201 udelay(10);
203 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
204 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
206 for (cnt = 30000; RD_REG_WORD(&reg->mailbox0) != 0 &&
207 rval == QLA_SUCCESS; cnt--) {
208 if (cnt)
209 udelay(100);
210 else
211 rval = QLA_FUNCTION_TIMEOUT;
214 return rval;
217 static int
218 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
219 uint32_t ram_words, void **nxt)
221 int rval;
222 uint32_t cnt, stat, timer, words, idx;
223 uint16_t mb0;
224 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
225 dma_addr_t dump_dma = ha->gid_list_dma;
226 uint16_t *dump = (uint16_t *)ha->gid_list;
228 rval = QLA_SUCCESS;
229 mb0 = 0;
231 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
232 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
234 words = GID_LIST_SIZE / 2;
235 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
236 cnt += words, addr += words) {
237 if (cnt + words > ram_words)
238 words = ram_words - cnt;
240 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
241 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
243 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
244 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
245 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
246 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
248 WRT_MAILBOX_REG(ha, reg, 4, words);
249 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
251 for (timer = 6000000; timer; timer--) {
252 /* Check for pending interrupts. */
253 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
254 if (stat & HSR_RISC_INT) {
255 stat &= 0xff;
257 if (stat == 0x1 || stat == 0x2) {
258 set_bit(MBX_INTERRUPT,
259 &ha->mbx_cmd_flags);
261 mb0 = RD_MAILBOX_REG(ha, reg, 0);
263 /* Release mailbox registers. */
264 WRT_REG_WORD(&reg->semaphore, 0);
265 WRT_REG_WORD(&reg->hccr,
266 HCCR_CLR_RISC_INT);
267 RD_REG_WORD(&reg->hccr);
268 break;
269 } else if (stat == 0x10 || stat == 0x11) {
270 set_bit(MBX_INTERRUPT,
271 &ha->mbx_cmd_flags);
273 mb0 = RD_MAILBOX_REG(ha, reg, 0);
275 WRT_REG_WORD(&reg->hccr,
276 HCCR_CLR_RISC_INT);
277 RD_REG_WORD(&reg->hccr);
278 break;
281 /* clear this intr; it wasn't a mailbox intr */
282 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
283 RD_REG_WORD(&reg->hccr);
285 udelay(5);
288 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
289 rval = mb0 & MBS_MASK;
290 for (idx = 0; idx < words; idx++)
291 ram[cnt + idx] = swab16(dump[idx]);
292 } else {
293 rval = QLA_FUNCTION_FAILED;
297 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
298 return rval;
301 static inline void
302 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
303 uint16_t *buf)
305 uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
307 while (count--)
308 *buf++ = htons(RD_REG_WORD(dmp_reg++));
311 static inline void *
312 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
314 if (!ha->eft)
315 return ptr;
317 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
318 return ptr + ntohl(ha->fw_dump->eft_size);
321 static inline void *
322 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
324 uint32_t cnt;
325 uint32_t *iter_reg;
326 struct qla2xxx_fce_chain *fcec = ptr;
328 if (!ha->fce)
329 return ptr;
331 *last_chain = &fcec->type;
332 fcec->type = __constant_htonl(DUMP_CHAIN_FCE);
333 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
334 fce_calc_size(ha->fce_bufs));
335 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
336 fcec->addr_l = htonl(LSD(ha->fce_dma));
337 fcec->addr_h = htonl(MSD(ha->fce_dma));
339 iter_reg = fcec->eregs;
340 for (cnt = 0; cnt < 8; cnt++)
341 *iter_reg++ = htonl(ha->fce_mb[cnt]);
343 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
345 return iter_reg;
348 static inline void *
349 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
351 uint32_t cnt, que_idx;
352 uint8_t que_cnt;
353 struct qla2xxx_mq_chain *mq = ptr;
354 struct device_reg_25xxmq __iomem *reg;
356 if (!ha->mqenable)
357 return ptr;
359 mq = ptr;
360 *last_chain = &mq->type;
361 mq->type = __constant_htonl(DUMP_CHAIN_MQ);
362 mq->chain_size = __constant_htonl(sizeof(struct qla2xxx_mq_chain));
364 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
365 ha->max_req_queues : ha->max_rsp_queues;
366 mq->count = htonl(que_cnt);
367 for (cnt = 0; cnt < que_cnt; cnt++) {
368 reg = (struct device_reg_25xxmq *) ((void *)
369 ha->mqiobase + cnt * QLA_QUE_PAGE);
370 que_idx = cnt * 4;
371 mq->qregs[que_idx] = htonl(RD_REG_DWORD(&reg->req_q_in));
372 mq->qregs[que_idx+1] = htonl(RD_REG_DWORD(&reg->req_q_out));
373 mq->qregs[que_idx+2] = htonl(RD_REG_DWORD(&reg->rsp_q_in));
374 mq->qregs[que_idx+3] = htonl(RD_REG_DWORD(&reg->rsp_q_out));
377 return ptr + sizeof(struct qla2xxx_mq_chain);
380 static void
381 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
383 struct qla_hw_data *ha = vha->hw;
385 if (rval != QLA_SUCCESS) {
386 qla_printk(KERN_WARNING, ha,
387 "Failed to dump firmware (%x)!!!\n", rval);
388 ha->fw_dumped = 0;
389 } else {
390 qla_printk(KERN_INFO, ha,
391 "Firmware dump saved to temp buffer (%ld/%p).\n",
392 vha->host_no, ha->fw_dump);
393 ha->fw_dumped = 1;
394 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
399 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
400 * @ha: HA context
401 * @hardware_locked: Called with the hardware_lock
403 void
404 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
406 int rval;
407 uint32_t cnt;
408 struct qla_hw_data *ha = vha->hw;
409 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
410 uint16_t __iomem *dmp_reg;
411 unsigned long flags;
412 struct qla2300_fw_dump *fw;
413 void *nxt;
414 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
416 flags = 0;
418 if (!hardware_locked)
419 spin_lock_irqsave(&ha->hardware_lock, flags);
421 if (!ha->fw_dump) {
422 qla_printk(KERN_WARNING, ha,
423 "No buffer available for dump!!!\n");
424 goto qla2300_fw_dump_failed;
427 if (ha->fw_dumped) {
428 qla_printk(KERN_WARNING, ha,
429 "Firmware has been previously dumped (%p) -- ignoring "
430 "request...\n", ha->fw_dump);
431 goto qla2300_fw_dump_failed;
433 fw = &ha->fw_dump->isp.isp23;
434 qla2xxx_prep_dump(ha, ha->fw_dump);
436 rval = QLA_SUCCESS;
437 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
439 /* Pause RISC. */
440 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
441 if (IS_QLA2300(ha)) {
442 for (cnt = 30000;
443 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
444 rval == QLA_SUCCESS; cnt--) {
445 if (cnt)
446 udelay(100);
447 else
448 rval = QLA_FUNCTION_TIMEOUT;
450 } else {
451 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
452 udelay(10);
455 if (rval == QLA_SUCCESS) {
456 dmp_reg = &reg->flash_address;
457 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
458 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
460 dmp_reg = &reg->u.isp2300.req_q_in;
461 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2; cnt++)
462 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
464 dmp_reg = &reg->u.isp2300.mailbox0;
465 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
466 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
468 WRT_REG_WORD(&reg->ctrl_status, 0x40);
469 qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
471 WRT_REG_WORD(&reg->ctrl_status, 0x50);
472 qla2xxx_read_window(reg, 48, fw->dma_reg);
474 WRT_REG_WORD(&reg->ctrl_status, 0x00);
475 dmp_reg = &reg->risc_hw;
476 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
477 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
479 WRT_REG_WORD(&reg->pcr, 0x2000);
480 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
482 WRT_REG_WORD(&reg->pcr, 0x2200);
483 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
485 WRT_REG_WORD(&reg->pcr, 0x2400);
486 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
488 WRT_REG_WORD(&reg->pcr, 0x2600);
489 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
491 WRT_REG_WORD(&reg->pcr, 0x2800);
492 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
494 WRT_REG_WORD(&reg->pcr, 0x2A00);
495 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
497 WRT_REG_WORD(&reg->pcr, 0x2C00);
498 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
500 WRT_REG_WORD(&reg->pcr, 0x2E00);
501 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
503 WRT_REG_WORD(&reg->ctrl_status, 0x10);
504 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
506 WRT_REG_WORD(&reg->ctrl_status, 0x20);
507 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
509 WRT_REG_WORD(&reg->ctrl_status, 0x30);
510 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
512 /* Reset RISC. */
513 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
514 for (cnt = 0; cnt < 30000; cnt++) {
515 if ((RD_REG_WORD(&reg->ctrl_status) &
516 CSR_ISP_SOFT_RESET) == 0)
517 break;
519 udelay(10);
523 if (!IS_QLA2300(ha)) {
524 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
525 rval == QLA_SUCCESS; cnt--) {
526 if (cnt)
527 udelay(100);
528 else
529 rval = QLA_FUNCTION_TIMEOUT;
533 /* Get RISC SRAM. */
534 if (rval == QLA_SUCCESS)
535 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
536 sizeof(fw->risc_ram) / 2, &nxt);
538 /* Get stack SRAM. */
539 if (rval == QLA_SUCCESS)
540 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
541 sizeof(fw->stack_ram) / 2, &nxt);
543 /* Get data SRAM. */
544 if (rval == QLA_SUCCESS)
545 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
546 ha->fw_memory_size - 0x11000 + 1, &nxt);
548 if (rval == QLA_SUCCESS)
549 qla2xxx_copy_queues(ha, nxt);
551 qla2xxx_dump_post_process(base_vha, rval);
553 qla2300_fw_dump_failed:
554 if (!hardware_locked)
555 spin_unlock_irqrestore(&ha->hardware_lock, flags);
559 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
560 * @ha: HA context
561 * @hardware_locked: Called with the hardware_lock
563 void
564 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
566 int rval;
567 uint32_t cnt, timer;
568 uint16_t risc_address;
569 uint16_t mb0, mb2;
570 struct qla_hw_data *ha = vha->hw;
571 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
572 uint16_t __iomem *dmp_reg;
573 unsigned long flags;
574 struct qla2100_fw_dump *fw;
575 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
577 risc_address = 0;
578 mb0 = mb2 = 0;
579 flags = 0;
581 if (!hardware_locked)
582 spin_lock_irqsave(&ha->hardware_lock, flags);
584 if (!ha->fw_dump) {
585 qla_printk(KERN_WARNING, ha,
586 "No buffer available for dump!!!\n");
587 goto qla2100_fw_dump_failed;
590 if (ha->fw_dumped) {
591 qla_printk(KERN_WARNING, ha,
592 "Firmware has been previously dumped (%p) -- ignoring "
593 "request...\n", ha->fw_dump);
594 goto qla2100_fw_dump_failed;
596 fw = &ha->fw_dump->isp.isp21;
597 qla2xxx_prep_dump(ha, ha->fw_dump);
599 rval = QLA_SUCCESS;
600 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
602 /* Pause RISC. */
603 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
604 for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
605 rval == QLA_SUCCESS; cnt--) {
606 if (cnt)
607 udelay(100);
608 else
609 rval = QLA_FUNCTION_TIMEOUT;
611 if (rval == QLA_SUCCESS) {
612 dmp_reg = &reg->flash_address;
613 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++)
614 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
616 dmp_reg = &reg->u.isp2100.mailbox0;
617 for (cnt = 0; cnt < ha->mbx_count; cnt++) {
618 if (cnt == 8)
619 dmp_reg = &reg->u_end.isp2200.mailbox8;
621 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
624 dmp_reg = &reg->u.isp2100.unused_2[0];
625 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++)
626 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
628 WRT_REG_WORD(&reg->ctrl_status, 0x00);
629 dmp_reg = &reg->risc_hw;
630 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++)
631 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg++));
633 WRT_REG_WORD(&reg->pcr, 0x2000);
634 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
636 WRT_REG_WORD(&reg->pcr, 0x2100);
637 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
639 WRT_REG_WORD(&reg->pcr, 0x2200);
640 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
642 WRT_REG_WORD(&reg->pcr, 0x2300);
643 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
645 WRT_REG_WORD(&reg->pcr, 0x2400);
646 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
648 WRT_REG_WORD(&reg->pcr, 0x2500);
649 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
651 WRT_REG_WORD(&reg->pcr, 0x2600);
652 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
654 WRT_REG_WORD(&reg->pcr, 0x2700);
655 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
657 WRT_REG_WORD(&reg->ctrl_status, 0x10);
658 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
660 WRT_REG_WORD(&reg->ctrl_status, 0x20);
661 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
663 WRT_REG_WORD(&reg->ctrl_status, 0x30);
664 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
666 /* Reset the ISP. */
667 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
670 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
671 rval == QLA_SUCCESS; cnt--) {
672 if (cnt)
673 udelay(100);
674 else
675 rval = QLA_FUNCTION_TIMEOUT;
678 /* Pause RISC. */
679 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
680 (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
682 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
683 for (cnt = 30000;
684 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
685 rval == QLA_SUCCESS; cnt--) {
686 if (cnt)
687 udelay(100);
688 else
689 rval = QLA_FUNCTION_TIMEOUT;
691 if (rval == QLA_SUCCESS) {
692 /* Set memory configuration and timing. */
693 if (IS_QLA2100(ha))
694 WRT_REG_WORD(&reg->mctr, 0xf1);
695 else
696 WRT_REG_WORD(&reg->mctr, 0xf2);
697 RD_REG_WORD(&reg->mctr); /* PCI Posting. */
699 /* Release RISC. */
700 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
704 if (rval == QLA_SUCCESS) {
705 /* Get RISC SRAM. */
706 risc_address = 0x1000;
707 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
708 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
710 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
711 cnt++, risc_address++) {
712 WRT_MAILBOX_REG(ha, reg, 1, risc_address);
713 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
715 for (timer = 6000000; timer != 0; timer--) {
716 /* Check for pending interrupts. */
717 if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
718 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
719 set_bit(MBX_INTERRUPT,
720 &ha->mbx_cmd_flags);
722 mb0 = RD_MAILBOX_REG(ha, reg, 0);
723 mb2 = RD_MAILBOX_REG(ha, reg, 2);
725 WRT_REG_WORD(&reg->semaphore, 0);
726 WRT_REG_WORD(&reg->hccr,
727 HCCR_CLR_RISC_INT);
728 RD_REG_WORD(&reg->hccr);
729 break;
731 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
732 RD_REG_WORD(&reg->hccr);
734 udelay(5);
737 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
738 rval = mb0 & MBS_MASK;
739 fw->risc_ram[cnt] = htons(mb2);
740 } else {
741 rval = QLA_FUNCTION_FAILED;
745 if (rval == QLA_SUCCESS)
746 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
748 qla2xxx_dump_post_process(base_vha, rval);
750 qla2100_fw_dump_failed:
751 if (!hardware_locked)
752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
755 void
756 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
758 int rval;
759 uint32_t cnt;
760 uint32_t risc_address;
761 struct qla_hw_data *ha = vha->hw;
762 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
763 uint32_t __iomem *dmp_reg;
764 uint32_t *iter_reg;
765 uint16_t __iomem *mbx_reg;
766 unsigned long flags;
767 struct qla24xx_fw_dump *fw;
768 uint32_t ext_mem_cnt;
769 void *nxt;
770 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
772 if (IS_QLA82XX(ha))
773 return;
775 risc_address = ext_mem_cnt = 0;
776 flags = 0;
778 if (!hardware_locked)
779 spin_lock_irqsave(&ha->hardware_lock, flags);
781 if (!ha->fw_dump) {
782 qla_printk(KERN_WARNING, ha,
783 "No buffer available for dump!!!\n");
784 goto qla24xx_fw_dump_failed;
787 if (ha->fw_dumped) {
788 qla_printk(KERN_WARNING, ha,
789 "Firmware has been previously dumped (%p) -- ignoring "
790 "request...\n", ha->fw_dump);
791 goto qla24xx_fw_dump_failed;
793 fw = &ha->fw_dump->isp.isp24;
794 qla2xxx_prep_dump(ha, ha->fw_dump);
796 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
798 /* Pause RISC. */
799 rval = qla24xx_pause_risc(reg);
800 if (rval != QLA_SUCCESS)
801 goto qla24xx_fw_dump_failed_0;
803 /* Host interface registers. */
804 dmp_reg = &reg->flash_addr;
805 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
806 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
808 /* Disable interrupts. */
809 WRT_REG_DWORD(&reg->ictrl, 0);
810 RD_REG_DWORD(&reg->ictrl);
812 /* Shadow registers. */
813 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
814 RD_REG_DWORD(&reg->iobase_addr);
815 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
816 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
818 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
819 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
821 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
822 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
824 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
825 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
827 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
828 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
830 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
831 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
833 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
834 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
836 /* Mailbox registers. */
837 mbx_reg = &reg->mailbox0;
838 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
839 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
841 /* Transfer sequence registers. */
842 iter_reg = fw->xseq_gp_reg;
843 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
844 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
845 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
846 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
847 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
848 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
849 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
850 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
852 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
853 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
855 /* Receive sequence registers. */
856 iter_reg = fw->rseq_gp_reg;
857 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
858 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
859 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
860 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
861 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
862 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
863 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
864 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
866 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
867 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
868 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
870 /* Command DMA registers. */
871 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
873 /* Queues. */
874 iter_reg = fw->req0_dma_reg;
875 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
876 dmp_reg = &reg->iobase_q;
877 for (cnt = 0; cnt < 7; cnt++)
878 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
880 iter_reg = fw->resp0_dma_reg;
881 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
882 dmp_reg = &reg->iobase_q;
883 for (cnt = 0; cnt < 7; cnt++)
884 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
886 iter_reg = fw->req1_dma_reg;
887 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
888 dmp_reg = &reg->iobase_q;
889 for (cnt = 0; cnt < 7; cnt++)
890 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
892 /* Transmit DMA registers. */
893 iter_reg = fw->xmt0_dma_reg;
894 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
895 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
897 iter_reg = fw->xmt1_dma_reg;
898 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
899 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
901 iter_reg = fw->xmt2_dma_reg;
902 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
903 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
905 iter_reg = fw->xmt3_dma_reg;
906 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
907 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
909 iter_reg = fw->xmt4_dma_reg;
910 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
911 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
913 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
915 /* Receive DMA registers. */
916 iter_reg = fw->rcvt0_data_dma_reg;
917 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
918 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
920 iter_reg = fw->rcvt1_data_dma_reg;
921 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
922 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
924 /* RISC registers. */
925 iter_reg = fw->risc_gp_reg;
926 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
927 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
928 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
929 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
930 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
931 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
932 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
933 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
935 /* Local memory controller registers. */
936 iter_reg = fw->lmc_reg;
937 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
938 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
939 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
940 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
941 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
942 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
943 qla24xx_read_window(reg, 0x3060, 16, iter_reg);
945 /* Fibre Protocol Module registers. */
946 iter_reg = fw->fpm_hdw_reg;
947 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
948 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
949 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
950 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
951 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
952 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
953 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
954 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
955 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
956 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
957 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
958 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
960 /* Frame Buffer registers. */
961 iter_reg = fw->fb_hdw_reg;
962 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
963 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
964 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
965 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
966 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
967 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
968 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
969 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
970 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
971 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
972 qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
974 rval = qla24xx_soft_reset(ha);
975 if (rval != QLA_SUCCESS)
976 goto qla24xx_fw_dump_failed_0;
978 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
979 &nxt);
980 if (rval != QLA_SUCCESS)
981 goto qla24xx_fw_dump_failed_0;
983 nxt = qla2xxx_copy_queues(ha, nxt);
985 qla24xx_copy_eft(ha, nxt);
987 qla24xx_fw_dump_failed_0:
988 qla2xxx_dump_post_process(base_vha, rval);
990 qla24xx_fw_dump_failed:
991 if (!hardware_locked)
992 spin_unlock_irqrestore(&ha->hardware_lock, flags);
995 void
996 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
998 int rval;
999 uint32_t cnt;
1000 uint32_t risc_address;
1001 struct qla_hw_data *ha = vha->hw;
1002 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1003 uint32_t __iomem *dmp_reg;
1004 uint32_t *iter_reg;
1005 uint16_t __iomem *mbx_reg;
1006 unsigned long flags;
1007 struct qla25xx_fw_dump *fw;
1008 uint32_t ext_mem_cnt;
1009 void *nxt, *nxt_chain;
1010 uint32_t *last_chain = NULL;
1011 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1013 risc_address = ext_mem_cnt = 0;
1014 flags = 0;
1016 if (!hardware_locked)
1017 spin_lock_irqsave(&ha->hardware_lock, flags);
1019 if (!ha->fw_dump) {
1020 qla_printk(KERN_WARNING, ha,
1021 "No buffer available for dump!!!\n");
1022 goto qla25xx_fw_dump_failed;
1025 if (ha->fw_dumped) {
1026 qla_printk(KERN_WARNING, ha,
1027 "Firmware has been previously dumped (%p) -- ignoring "
1028 "request...\n", ha->fw_dump);
1029 goto qla25xx_fw_dump_failed;
1031 fw = &ha->fw_dump->isp.isp25;
1032 qla2xxx_prep_dump(ha, ha->fw_dump);
1033 ha->fw_dump->version = __constant_htonl(2);
1035 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1037 /* Pause RISC. */
1038 rval = qla24xx_pause_risc(reg);
1039 if (rval != QLA_SUCCESS)
1040 goto qla25xx_fw_dump_failed_0;
1042 /* Host/Risc registers. */
1043 iter_reg = fw->host_risc_reg;
1044 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1045 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1047 /* PCIe registers. */
1048 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1049 RD_REG_DWORD(&reg->iobase_addr);
1050 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1051 dmp_reg = &reg->iobase_c4;
1052 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1053 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1054 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1055 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1057 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1058 RD_REG_DWORD(&reg->iobase_window);
1060 /* Host interface registers. */
1061 dmp_reg = &reg->flash_addr;
1062 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1063 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1065 /* Disable interrupts. */
1066 WRT_REG_DWORD(&reg->ictrl, 0);
1067 RD_REG_DWORD(&reg->ictrl);
1069 /* Shadow registers. */
1070 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1071 RD_REG_DWORD(&reg->iobase_addr);
1072 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1073 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1075 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1076 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1078 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1079 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1081 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1082 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1084 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1085 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1087 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1088 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1090 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1091 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1093 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1094 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1096 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1097 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1099 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1100 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1102 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1103 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1105 /* RISC I/O register. */
1106 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1107 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1109 /* Mailbox registers. */
1110 mbx_reg = &reg->mailbox0;
1111 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1112 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1114 /* Transfer sequence registers. */
1115 iter_reg = fw->xseq_gp_reg;
1116 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1117 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1118 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1119 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1120 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1121 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1122 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1123 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1125 iter_reg = fw->xseq_0_reg;
1126 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1127 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1128 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1130 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1132 /* Receive sequence registers. */
1133 iter_reg = fw->rseq_gp_reg;
1134 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1135 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1136 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1137 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1138 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1139 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1140 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1141 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1143 iter_reg = fw->rseq_0_reg;
1144 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1145 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1147 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1148 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1150 /* Auxiliary sequence registers. */
1151 iter_reg = fw->aseq_gp_reg;
1152 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1153 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1154 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1155 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1156 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1157 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1158 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1159 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1161 iter_reg = fw->aseq_0_reg;
1162 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1163 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1165 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1166 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1168 /* Command DMA registers. */
1169 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1171 /* Queues. */
1172 iter_reg = fw->req0_dma_reg;
1173 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1174 dmp_reg = &reg->iobase_q;
1175 for (cnt = 0; cnt < 7; cnt++)
1176 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1178 iter_reg = fw->resp0_dma_reg;
1179 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1180 dmp_reg = &reg->iobase_q;
1181 for (cnt = 0; cnt < 7; cnt++)
1182 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1184 iter_reg = fw->req1_dma_reg;
1185 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1186 dmp_reg = &reg->iobase_q;
1187 for (cnt = 0; cnt < 7; cnt++)
1188 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1190 /* Transmit DMA registers. */
1191 iter_reg = fw->xmt0_dma_reg;
1192 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1193 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1195 iter_reg = fw->xmt1_dma_reg;
1196 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1197 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1199 iter_reg = fw->xmt2_dma_reg;
1200 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1201 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1203 iter_reg = fw->xmt3_dma_reg;
1204 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1205 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1207 iter_reg = fw->xmt4_dma_reg;
1208 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1209 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1211 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1213 /* Receive DMA registers. */
1214 iter_reg = fw->rcvt0_data_dma_reg;
1215 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1216 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1218 iter_reg = fw->rcvt1_data_dma_reg;
1219 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1220 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1222 /* RISC registers. */
1223 iter_reg = fw->risc_gp_reg;
1224 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1225 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1226 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1227 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1228 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1229 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1230 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1231 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1233 /* Local memory controller registers. */
1234 iter_reg = fw->lmc_reg;
1235 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1236 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1237 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1238 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1239 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1240 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1241 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1242 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1244 /* Fibre Protocol Module registers. */
1245 iter_reg = fw->fpm_hdw_reg;
1246 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1247 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1248 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1249 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1250 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1251 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1252 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1253 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1254 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1255 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1256 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1257 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1259 /* Frame Buffer registers. */
1260 iter_reg = fw->fb_hdw_reg;
1261 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1262 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1263 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1264 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1265 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1266 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1267 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1268 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1269 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1270 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1271 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1272 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1274 /* Multi queue registers */
1275 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1276 &last_chain);
1278 rval = qla24xx_soft_reset(ha);
1279 if (rval != QLA_SUCCESS)
1280 goto qla25xx_fw_dump_failed_0;
1282 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1283 &nxt);
1284 if (rval != QLA_SUCCESS)
1285 goto qla25xx_fw_dump_failed_0;
1287 nxt = qla2xxx_copy_queues(ha, nxt);
1289 nxt = qla24xx_copy_eft(ha, nxt);
1291 /* Chain entries -- started with MQ. */
1292 qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1293 if (last_chain) {
1294 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1295 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1298 qla25xx_fw_dump_failed_0:
1299 qla2xxx_dump_post_process(base_vha, rval);
1301 qla25xx_fw_dump_failed:
1302 if (!hardware_locked)
1303 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1306 void
1307 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1309 int rval;
1310 uint32_t cnt;
1311 uint32_t risc_address;
1312 struct qla_hw_data *ha = vha->hw;
1313 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1314 uint32_t __iomem *dmp_reg;
1315 uint32_t *iter_reg;
1316 uint16_t __iomem *mbx_reg;
1317 unsigned long flags;
1318 struct qla81xx_fw_dump *fw;
1319 uint32_t ext_mem_cnt;
1320 void *nxt, *nxt_chain;
1321 uint32_t *last_chain = NULL;
1322 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1324 risc_address = ext_mem_cnt = 0;
1325 flags = 0;
1327 if (!hardware_locked)
1328 spin_lock_irqsave(&ha->hardware_lock, flags);
1330 if (!ha->fw_dump) {
1331 qla_printk(KERN_WARNING, ha,
1332 "No buffer available for dump!!!\n");
1333 goto qla81xx_fw_dump_failed;
1336 if (ha->fw_dumped) {
1337 qla_printk(KERN_WARNING, ha,
1338 "Firmware has been previously dumped (%p) -- ignoring "
1339 "request...\n", ha->fw_dump);
1340 goto qla81xx_fw_dump_failed;
1342 fw = &ha->fw_dump->isp.isp81;
1343 qla2xxx_prep_dump(ha, ha->fw_dump);
1345 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1347 /* Pause RISC. */
1348 rval = qla24xx_pause_risc(reg);
1349 if (rval != QLA_SUCCESS)
1350 goto qla81xx_fw_dump_failed_0;
1352 /* Host/Risc registers. */
1353 iter_reg = fw->host_risc_reg;
1354 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1355 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1357 /* PCIe registers. */
1358 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1359 RD_REG_DWORD(&reg->iobase_addr);
1360 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1361 dmp_reg = &reg->iobase_c4;
1362 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg++));
1363 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg++));
1364 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1365 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1367 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1368 RD_REG_DWORD(&reg->iobase_window);
1370 /* Host interface registers. */
1371 dmp_reg = &reg->flash_addr;
1372 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++)
1373 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg++));
1375 /* Disable interrupts. */
1376 WRT_REG_DWORD(&reg->ictrl, 0);
1377 RD_REG_DWORD(&reg->ictrl);
1379 /* Shadow registers. */
1380 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1381 RD_REG_DWORD(&reg->iobase_addr);
1382 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1383 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1385 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1386 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1388 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1389 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1391 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1392 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1394 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1395 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1397 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1398 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1400 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1401 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1403 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1404 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1406 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1407 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1409 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1410 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1412 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1413 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1415 /* RISC I/O register. */
1416 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1417 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1419 /* Mailbox registers. */
1420 mbx_reg = &reg->mailbox0;
1421 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++)
1422 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg++));
1424 /* Transfer sequence registers. */
1425 iter_reg = fw->xseq_gp_reg;
1426 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1427 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1428 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1429 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1430 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1431 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1432 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1433 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1435 iter_reg = fw->xseq_0_reg;
1436 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1437 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1438 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1440 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1442 /* Receive sequence registers. */
1443 iter_reg = fw->rseq_gp_reg;
1444 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1445 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1446 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1447 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1448 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1449 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1450 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1451 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1453 iter_reg = fw->rseq_0_reg;
1454 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1455 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1457 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1458 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1460 /* Auxiliary sequence registers. */
1461 iter_reg = fw->aseq_gp_reg;
1462 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1463 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1464 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1465 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1466 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1467 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1468 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1469 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1471 iter_reg = fw->aseq_0_reg;
1472 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1473 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1475 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1476 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1478 /* Command DMA registers. */
1479 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1481 /* Queues. */
1482 iter_reg = fw->req0_dma_reg;
1483 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1484 dmp_reg = &reg->iobase_q;
1485 for (cnt = 0; cnt < 7; cnt++)
1486 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1488 iter_reg = fw->resp0_dma_reg;
1489 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1490 dmp_reg = &reg->iobase_q;
1491 for (cnt = 0; cnt < 7; cnt++)
1492 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1494 iter_reg = fw->req1_dma_reg;
1495 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1496 dmp_reg = &reg->iobase_q;
1497 for (cnt = 0; cnt < 7; cnt++)
1498 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg++));
1500 /* Transmit DMA registers. */
1501 iter_reg = fw->xmt0_dma_reg;
1502 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1503 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1505 iter_reg = fw->xmt1_dma_reg;
1506 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1507 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1509 iter_reg = fw->xmt2_dma_reg;
1510 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1511 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1513 iter_reg = fw->xmt3_dma_reg;
1514 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1515 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1517 iter_reg = fw->xmt4_dma_reg;
1518 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1519 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1521 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1523 /* Receive DMA registers. */
1524 iter_reg = fw->rcvt0_data_dma_reg;
1525 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1526 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1528 iter_reg = fw->rcvt1_data_dma_reg;
1529 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1530 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1532 /* RISC registers. */
1533 iter_reg = fw->risc_gp_reg;
1534 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1535 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1536 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1537 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1538 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1539 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1540 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1541 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1543 /* Local memory controller registers. */
1544 iter_reg = fw->lmc_reg;
1545 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1546 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1547 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1548 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1549 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1550 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1551 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1552 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1554 /* Fibre Protocol Module registers. */
1555 iter_reg = fw->fpm_hdw_reg;
1556 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1557 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1558 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1559 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1560 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1561 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1562 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1563 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1564 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1565 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1566 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1567 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1568 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1569 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1571 /* Frame Buffer registers. */
1572 iter_reg = fw->fb_hdw_reg;
1573 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1574 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1575 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1576 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1577 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1578 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1579 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1580 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1581 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1582 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1583 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1584 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1585 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1587 /* Multi queue registers */
1588 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1589 &last_chain);
1591 rval = qla24xx_soft_reset(ha);
1592 if (rval != QLA_SUCCESS)
1593 goto qla81xx_fw_dump_failed_0;
1595 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1596 &nxt);
1597 if (rval != QLA_SUCCESS)
1598 goto qla81xx_fw_dump_failed_0;
1600 nxt = qla2xxx_copy_queues(ha, nxt);
1602 nxt = qla24xx_copy_eft(ha, nxt);
1604 /* Chain entries -- started with MQ. */
1605 qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1606 if (last_chain) {
1607 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1608 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1611 qla81xx_fw_dump_failed_0:
1612 qla2xxx_dump_post_process(base_vha, rval);
1614 qla81xx_fw_dump_failed:
1615 if (!hardware_locked)
1616 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1619 /****************************************************************************/
1620 /* Driver Debug Functions. */
1621 /****************************************************************************/
1623 void
1624 qla2x00_dump_regs(scsi_qla_host_t *vha)
1626 int i;
1627 struct qla_hw_data *ha = vha->hw;
1628 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
1629 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
1630 uint16_t __iomem *mbx_reg;
1632 mbx_reg = IS_FWI2_CAPABLE(ha) ? &reg24->mailbox0:
1633 MAILBOX_REG(ha, reg, 0);
1635 printk("Mailbox registers:\n");
1636 for (i = 0; i < 6; i++)
1637 printk("scsi(%ld): mbox %d 0x%04x \n", vha->host_no, i,
1638 RD_REG_WORD(mbx_reg++));
1642 void
1643 qla2x00_dump_buffer(uint8_t * b, uint32_t size)
1645 uint32_t cnt;
1646 uint8_t c;
1648 printk(" 0 1 2 3 4 5 6 7 8 9 "
1649 "Ah Bh Ch Dh Eh Fh\n");
1650 printk("----------------------------------------"
1651 "----------------------\n");
1653 for (cnt = 0; cnt < size;) {
1654 c = *b++;
1655 printk("%02x",(uint32_t) c);
1656 cnt++;
1657 if (!(cnt % 16))
1658 printk("\n");
1659 else
1660 printk(" ");
1662 if (cnt % 16)
1663 printk("\n");
1666 void
1667 qla2x00_dump_buffer_zipped(uint8_t *b, uint32_t size)
1669 uint32_t cnt;
1670 uint8_t c;
1671 uint8_t last16[16], cur16[16];
1672 uint32_t lc = 0, num_same16 = 0, j;
1674 printk(KERN_DEBUG " 0 1 2 3 4 5 6 7 8 9 "
1675 "Ah Bh Ch Dh Eh Fh\n");
1676 printk(KERN_DEBUG "----------------------------------------"
1677 "----------------------\n");
1679 for (cnt = 0; cnt < size;) {
1680 c = *b++;
1682 cur16[lc++] = c;
1684 cnt++;
1685 if (cnt % 16)
1686 continue;
1688 /* We have 16 now */
1689 lc = 0;
1690 if (num_same16 == 0) {
1691 memcpy(last16, cur16, 16);
1692 num_same16++;
1693 continue;
1695 if (memcmp(cur16, last16, 16) == 0) {
1696 num_same16++;
1697 continue;
1699 for (j = 0; j < 16; j++)
1700 printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
1701 printk(KERN_DEBUG "\n");
1703 if (num_same16 > 1)
1704 printk(KERN_DEBUG "> prev pattern repeats (%u)"
1705 "more times\n", num_same16-1);
1706 memcpy(last16, cur16, 16);
1707 num_same16 = 1;
1710 if (num_same16) {
1711 for (j = 0; j < 16; j++)
1712 printk(KERN_DEBUG "%02x ", (uint32_t)last16[j]);
1713 printk(KERN_DEBUG "\n");
1715 if (num_same16 > 1)
1716 printk(KERN_DEBUG "> prev pattern repeats (%u)"
1717 "more times\n", num_same16-1);
1719 if (lc) {
1720 for (j = 0; j < lc; j++)
1721 printk(KERN_DEBUG "%02x ", (uint32_t)cur16[j]);
1722 printk(KERN_DEBUG "\n");