Linux 5.1.15
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_dbg.c
blobc7533fa7f46ebb93a8795238a6790b961a48113d
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
8 /*
9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages.
11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes |
13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0193 | 0x0146 |
15 * | | | 0x015b-0x0160 |
16 * | | | 0x016e |
17 * | Mailbox commands | 0x1206 | 0x11a2-0x11ff |
18 * | Device Discovery | 0x2134 | 0x210e-0x2116 |
19 * | | | 0x211a |
20 * | | | 0x211c-0x2128 |
21 * | | | 0x212a-0x2130 |
22 * | Queue Command and IO tracing | 0x3074 | 0x300b |
23 * | | | 0x3027-0x3028 |
24 * | | | 0x303d-0x3041 |
25 * | | | 0x302d,0x3033 |
26 * | | | 0x3036,0x3038 |
27 * | | | 0x303a |
28 * | DPC Thread | 0x4023 | 0x4002,0x4013 |
29 * | Async Events | 0x5090 | 0x502b-0x502f |
30 * | | | 0x5047 |
31 * | | | 0x5084,0x5075 |
32 * | | | 0x503d,0x5044 |
33 * | | | 0x505f |
34 * | Timer Routines | 0x6012 | |
35 * | User Space Interactions | 0x70e3 | 0x7018,0x702e |
36 * | | | 0x7020,0x7024 |
37 * | | | 0x7039,0x7045 |
38 * | | | 0x7073-0x7075 |
39 * | | | 0x70a5-0x70a6 |
40 * | | | 0x70a8,0x70ab |
41 * | | | 0x70ad-0x70ae |
42 * | | | 0x70d0-0x70d6 |
43 * | | | 0x70d7-0x70db |
44 * | Task Management | 0x8042 | 0x8000 |
45 * | | | 0x8019 |
46 * | | | 0x8025,0x8026 |
47 * | | | 0x8031,0x8032 |
48 * | | | 0x8039,0x803c |
49 * | AER/EEH | 0x9011 | |
50 * | Virtual Port | 0xa007 | |
51 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
52 * | | | 0xb09e,0xb0ae |
53 * | | | 0xb0c3,0xb0c6 |
54 * | | | 0xb0e0-0xb0ef |
55 * | | | 0xb085,0xb0dc |
56 * | | | 0xb107,0xb108 |
57 * | | | 0xb111,0xb11e |
58 * | | | 0xb12c,0xb12d |
59 * | | | 0xb13a,0xb142 |
60 * | | | 0xb13c-0xb140 |
61 * | | | 0xb149 |
62 * | MultiQ | 0xc010 | |
63 * | Misc | 0xd303 | 0xd031-0xd0ff |
64 * | | | 0xd101-0xd1fe |
65 * | | | 0xd214-0xd2fe |
66 * | Target Mode | 0xe081 | |
67 * | Target Mode Management | 0xf09b | 0xf002 |
68 * | | | 0xf046-0xf049 |
69 * | Target Mode Task Management | 0x1000d | |
70 * ----------------------------------------------------------------------
73 #include "qla_def.h"
75 #include <linux/delay.h>
77 static uint32_t ql_dbg_offset = 0x800;
79 static inline void
80 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
82 fw_dump->fw_major_version = htonl(ha->fw_major_version);
83 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
84 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
85 fw_dump->fw_attributes = htonl(ha->fw_attributes);
87 fw_dump->vendor = htonl(ha->pdev->vendor);
88 fw_dump->device = htonl(ha->pdev->device);
89 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
90 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
93 static inline void *
94 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
96 struct req_que *req = ha->req_q_map[0];
97 struct rsp_que *rsp = ha->rsp_q_map[0];
98 /* Request queue. */
99 memcpy(ptr, req->ring, req->length *
100 sizeof(request_t));
102 /* Response queue. */
103 ptr += req->length * sizeof(request_t);
104 memcpy(ptr, rsp->ring, rsp->length *
105 sizeof(response_t));
107 return ptr + (rsp->length * sizeof(response_t));
111 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
112 uint32_t ram_dwords, void **nxt)
114 int rval;
115 uint32_t cnt, stat, timer, dwords, idx;
116 uint16_t mb0;
117 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
118 dma_addr_t dump_dma = ha->gid_list_dma;
119 uint32_t *dump = (uint32_t *)ha->gid_list;
121 rval = QLA_SUCCESS;
122 mb0 = 0;
124 WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
125 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
127 dwords = qla2x00_gid_list_size(ha) / 4;
128 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
129 cnt += dwords, addr += dwords) {
130 if (cnt + dwords > ram_dwords)
131 dwords = ram_dwords - cnt;
133 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
134 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
136 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
137 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
138 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
139 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
141 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
142 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
144 WRT_REG_WORD(&reg->mailbox9, 0);
145 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
147 ha->flags.mbox_int = 0;
148 for (timer = 6000000; timer; timer--) {
149 /* Check for pending interrupts. */
150 stat = RD_REG_DWORD(&reg->host_status);
151 if (stat & HSRX_RISC_INT) {
152 stat &= 0xff;
154 if (stat == 0x1 || stat == 0x2 ||
155 stat == 0x10 || stat == 0x11) {
156 set_bit(MBX_INTERRUPT,
157 &ha->mbx_cmd_flags);
159 mb0 = RD_REG_WORD(&reg->mailbox0);
160 RD_REG_WORD(&reg->mailbox1);
162 WRT_REG_DWORD(&reg->hccr,
163 HCCRX_CLR_RISC_INT);
164 RD_REG_DWORD(&reg->hccr);
165 break;
168 /* Clear this intr; it wasn't a mailbox intr */
169 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
170 RD_REG_DWORD(&reg->hccr);
172 udelay(5);
174 ha->flags.mbox_int = 1;
176 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
177 rval = mb0 & MBS_MASK;
178 for (idx = 0; idx < dwords; idx++)
179 ram[cnt + idx] = IS_QLA27XX(ha) ?
180 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
181 } else {
182 rval = QLA_FUNCTION_FAILED;
186 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
187 return rval;
191 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
192 uint32_t ram_dwords, void **nxt)
194 int rval;
195 uint32_t cnt, stat, timer, dwords, idx;
196 uint16_t mb0;
197 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
198 dma_addr_t dump_dma = ha->gid_list_dma;
199 uint32_t *dump = (uint32_t *)ha->gid_list;
201 rval = QLA_SUCCESS;
202 mb0 = 0;
204 WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
205 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
207 dwords = qla2x00_gid_list_size(ha) / 4;
208 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
209 cnt += dwords, addr += dwords) {
210 if (cnt + dwords > ram_dwords)
211 dwords = ram_dwords - cnt;
213 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
214 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
216 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
217 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
218 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
219 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
221 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
222 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
223 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
225 ha->flags.mbox_int = 0;
226 for (timer = 6000000; timer; timer--) {
227 /* Check for pending interrupts. */
228 stat = RD_REG_DWORD(&reg->host_status);
229 if (stat & HSRX_RISC_INT) {
230 stat &= 0xff;
232 if (stat == 0x1 || stat == 0x2 ||
233 stat == 0x10 || stat == 0x11) {
234 set_bit(MBX_INTERRUPT,
235 &ha->mbx_cmd_flags);
237 mb0 = RD_REG_WORD(&reg->mailbox0);
239 WRT_REG_DWORD(&reg->hccr,
240 HCCRX_CLR_RISC_INT);
241 RD_REG_DWORD(&reg->hccr);
242 break;
245 /* Clear this intr; it wasn't a mailbox intr */
246 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
247 RD_REG_DWORD(&reg->hccr);
249 udelay(5);
251 ha->flags.mbox_int = 1;
253 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
254 rval = mb0 & MBS_MASK;
255 for (idx = 0; idx < dwords; idx++)
256 ram[cnt + idx] = IS_QLA27XX(ha) ?
257 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
258 } else {
259 rval = QLA_FUNCTION_FAILED;
263 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
264 return rval;
267 static int
268 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
269 uint32_t cram_size, void **nxt)
271 int rval;
273 /* Code RAM. */
274 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
275 if (rval != QLA_SUCCESS)
276 return rval;
278 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
280 /* External Memory. */
281 rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
282 ha->fw_memory_size - 0x100000 + 1, nxt);
283 if (rval == QLA_SUCCESS)
284 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
286 return rval;
289 static uint32_t *
290 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
291 uint32_t count, uint32_t *buf)
293 uint32_t __iomem *dmp_reg;
295 WRT_REG_DWORD(&reg->iobase_addr, iobase);
296 dmp_reg = &reg->iobase_window;
297 for ( ; count--; dmp_reg++)
298 *buf++ = htonl(RD_REG_DWORD(dmp_reg));
300 return buf;
303 void
304 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
306 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
308 /* 100 usec delay is sufficient enough for hardware to pause RISC */
309 udelay(100);
310 if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
311 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
315 qla24xx_soft_reset(struct qla_hw_data *ha)
317 int rval = QLA_SUCCESS;
318 uint32_t cnt;
319 uint16_t wd;
320 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
323 * Reset RISC. The delay is dependent on system architecture.
324 * Driver can proceed with the reset sequence after waiting
325 * for a timeout period.
327 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
328 for (cnt = 0; cnt < 30000; cnt++) {
329 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
330 break;
332 udelay(10);
334 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
335 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
337 WRT_REG_DWORD(&reg->ctrl_status,
338 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
339 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
341 udelay(100);
343 /* Wait for soft-reset to complete. */
344 for (cnt = 0; cnt < 30000; cnt++) {
345 if ((RD_REG_DWORD(&reg->ctrl_status) &
346 CSRX_ISP_SOFT_RESET) == 0)
347 break;
349 udelay(10);
351 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
352 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
354 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
355 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
357 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
358 rval == QLA_SUCCESS; cnt--) {
359 if (cnt)
360 udelay(10);
361 else
362 rval = QLA_FUNCTION_TIMEOUT;
364 if (rval == QLA_SUCCESS)
365 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
367 return rval;
370 static int
371 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
372 uint32_t ram_words, void **nxt)
374 int rval;
375 uint32_t cnt, stat, timer, words, idx;
376 uint16_t mb0;
377 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
378 dma_addr_t dump_dma = ha->gid_list_dma;
379 uint16_t *dump = (uint16_t *)ha->gid_list;
381 rval = QLA_SUCCESS;
382 mb0 = 0;
384 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
385 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
387 words = qla2x00_gid_list_size(ha) / 2;
388 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
389 cnt += words, addr += words) {
390 if (cnt + words > ram_words)
391 words = ram_words - cnt;
393 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
394 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
396 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
397 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
398 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
399 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
401 WRT_MAILBOX_REG(ha, reg, 4, words);
402 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
404 for (timer = 6000000; timer; timer--) {
405 /* Check for pending interrupts. */
406 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
407 if (stat & HSR_RISC_INT) {
408 stat &= 0xff;
410 if (stat == 0x1 || stat == 0x2) {
411 set_bit(MBX_INTERRUPT,
412 &ha->mbx_cmd_flags);
414 mb0 = RD_MAILBOX_REG(ha, reg, 0);
416 /* Release mailbox registers. */
417 WRT_REG_WORD(&reg->semaphore, 0);
418 WRT_REG_WORD(&reg->hccr,
419 HCCR_CLR_RISC_INT);
420 RD_REG_WORD(&reg->hccr);
421 break;
422 } else if (stat == 0x10 || stat == 0x11) {
423 set_bit(MBX_INTERRUPT,
424 &ha->mbx_cmd_flags);
426 mb0 = RD_MAILBOX_REG(ha, reg, 0);
428 WRT_REG_WORD(&reg->hccr,
429 HCCR_CLR_RISC_INT);
430 RD_REG_WORD(&reg->hccr);
431 break;
434 /* clear this intr; it wasn't a mailbox intr */
435 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
436 RD_REG_WORD(&reg->hccr);
438 udelay(5);
441 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
442 rval = mb0 & MBS_MASK;
443 for (idx = 0; idx < words; idx++)
444 ram[cnt + idx] = swab16(dump[idx]);
445 } else {
446 rval = QLA_FUNCTION_FAILED;
450 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
451 return rval;
454 static inline void
455 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
456 uint16_t *buf)
458 uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
460 for ( ; count--; dmp_reg++)
461 *buf++ = htons(RD_REG_WORD(dmp_reg));
464 static inline void *
465 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
467 if (!ha->eft)
468 return ptr;
470 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
471 return ptr + ntohl(ha->fw_dump->eft_size);
474 static inline void *
475 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
477 uint32_t cnt;
478 uint32_t *iter_reg;
479 struct qla2xxx_fce_chain *fcec = ptr;
481 if (!ha->fce)
482 return ptr;
484 *last_chain = &fcec->type;
485 fcec->type = htonl(DUMP_CHAIN_FCE);
486 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
487 fce_calc_size(ha->fce_bufs));
488 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
489 fcec->addr_l = htonl(LSD(ha->fce_dma));
490 fcec->addr_h = htonl(MSD(ha->fce_dma));
492 iter_reg = fcec->eregs;
493 for (cnt = 0; cnt < 8; cnt++)
494 *iter_reg++ = htonl(ha->fce_mb[cnt]);
496 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
498 return (char *)iter_reg + ntohl(fcec->size);
501 static inline void *
502 qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
504 struct qla2xxx_offld_chain *c = ptr;
506 if (!ha->exlogin_buf)
507 return ptr;
509 *last_chain = &c->type;
511 c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN);
512 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
513 ha->exlogin_size);
514 c->size = cpu_to_be32(ha->exlogin_size);
515 c->addr = cpu_to_be64(ha->exlogin_buf_dma);
517 ptr += sizeof(struct qla2xxx_offld_chain);
518 memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
520 return (char *)ptr + cpu_to_be32(c->size);
523 static inline void *
524 qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
526 struct qla2xxx_offld_chain *c = ptr;
528 if (!ha->exchoffld_buf)
529 return ptr;
531 *last_chain = &c->type;
533 c->type = cpu_to_be32(DUMP_CHAIN_EXCHG);
534 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
535 ha->exchoffld_size);
536 c->size = cpu_to_be32(ha->exchoffld_size);
537 c->addr = cpu_to_be64(ha->exchoffld_buf_dma);
539 ptr += sizeof(struct qla2xxx_offld_chain);
540 memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
542 return (char *)ptr + cpu_to_be32(c->size);
545 static inline void *
546 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
547 uint32_t **last_chain)
549 struct qla2xxx_mqueue_chain *q;
550 struct qla2xxx_mqueue_header *qh;
551 uint32_t num_queues;
552 int que;
553 struct {
554 int length;
555 void *ring;
556 } aq, *aqp;
558 if (!ha->tgt.atio_ring)
559 return ptr;
561 num_queues = 1;
562 aqp = &aq;
563 aqp->length = ha->tgt.atio_q_length;
564 aqp->ring = ha->tgt.atio_ring;
566 for (que = 0; que < num_queues; que++) {
567 /* aqp = ha->atio_q_map[que]; */
568 q = ptr;
569 *last_chain = &q->type;
570 q->type = htonl(DUMP_CHAIN_QUEUE);
571 q->chain_size = htonl(
572 sizeof(struct qla2xxx_mqueue_chain) +
573 sizeof(struct qla2xxx_mqueue_header) +
574 (aqp->length * sizeof(request_t)));
575 ptr += sizeof(struct qla2xxx_mqueue_chain);
577 /* Add header. */
578 qh = ptr;
579 qh->queue = htonl(TYPE_ATIO_QUEUE);
580 qh->number = htonl(que);
581 qh->size = htonl(aqp->length * sizeof(request_t));
582 ptr += sizeof(struct qla2xxx_mqueue_header);
584 /* Add data. */
585 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
587 ptr += aqp->length * sizeof(request_t);
590 return ptr;
593 static inline void *
594 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
596 struct qla2xxx_mqueue_chain *q;
597 struct qla2xxx_mqueue_header *qh;
598 struct req_que *req;
599 struct rsp_que *rsp;
600 int que;
602 if (!ha->mqenable)
603 return ptr;
605 /* Request queues */
606 for (que = 1; que < ha->max_req_queues; que++) {
607 req = ha->req_q_map[que];
608 if (!req)
609 break;
611 /* Add chain. */
612 q = ptr;
613 *last_chain = &q->type;
614 q->type = htonl(DUMP_CHAIN_QUEUE);
615 q->chain_size = htonl(
616 sizeof(struct qla2xxx_mqueue_chain) +
617 sizeof(struct qla2xxx_mqueue_header) +
618 (req->length * sizeof(request_t)));
619 ptr += sizeof(struct qla2xxx_mqueue_chain);
621 /* Add header. */
622 qh = ptr;
623 qh->queue = htonl(TYPE_REQUEST_QUEUE);
624 qh->number = htonl(que);
625 qh->size = htonl(req->length * sizeof(request_t));
626 ptr += sizeof(struct qla2xxx_mqueue_header);
628 /* Add data. */
629 memcpy(ptr, req->ring, req->length * sizeof(request_t));
630 ptr += req->length * sizeof(request_t);
633 /* Response queues */
634 for (que = 1; que < ha->max_rsp_queues; que++) {
635 rsp = ha->rsp_q_map[que];
636 if (!rsp)
637 break;
639 /* Add chain. */
640 q = ptr;
641 *last_chain = &q->type;
642 q->type = htonl(DUMP_CHAIN_QUEUE);
643 q->chain_size = htonl(
644 sizeof(struct qla2xxx_mqueue_chain) +
645 sizeof(struct qla2xxx_mqueue_header) +
646 (rsp->length * sizeof(response_t)));
647 ptr += sizeof(struct qla2xxx_mqueue_chain);
649 /* Add header. */
650 qh = ptr;
651 qh->queue = htonl(TYPE_RESPONSE_QUEUE);
652 qh->number = htonl(que);
653 qh->size = htonl(rsp->length * sizeof(response_t));
654 ptr += sizeof(struct qla2xxx_mqueue_header);
656 /* Add data. */
657 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
658 ptr += rsp->length * sizeof(response_t);
661 return ptr;
664 static inline void *
665 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
667 uint32_t cnt, que_idx;
668 uint8_t que_cnt;
669 struct qla2xxx_mq_chain *mq = ptr;
670 device_reg_t *reg;
672 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
673 return ptr;
675 mq = ptr;
676 *last_chain = &mq->type;
677 mq->type = htonl(DUMP_CHAIN_MQ);
678 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
680 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
681 ha->max_req_queues : ha->max_rsp_queues;
682 mq->count = htonl(que_cnt);
683 for (cnt = 0; cnt < que_cnt; cnt++) {
684 reg = ISP_QUE_REG(ha, cnt);
685 que_idx = cnt * 4;
686 mq->qregs[que_idx] =
687 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
688 mq->qregs[que_idx+1] =
689 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
690 mq->qregs[que_idx+2] =
691 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
692 mq->qregs[que_idx+3] =
693 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
696 return ptr + sizeof(struct qla2xxx_mq_chain);
699 void
700 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
702 struct qla_hw_data *ha = vha->hw;
704 if (rval != QLA_SUCCESS) {
705 ql_log(ql_log_warn, vha, 0xd000,
706 "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
707 rval, ha->fw_dump_cap_flags);
708 ha->fw_dumped = 0;
709 } else {
710 ql_log(ql_log_info, vha, 0xd001,
711 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
712 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
713 ha->fw_dumped = 1;
714 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
719 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
720 * @vha: HA context
721 * @hardware_locked: Called with the hardware_lock
723 void
724 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
726 int rval;
727 uint32_t cnt;
728 struct qla_hw_data *ha = vha->hw;
729 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
730 uint16_t __iomem *dmp_reg;
731 unsigned long flags;
732 struct qla2300_fw_dump *fw;
733 void *nxt;
734 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
736 flags = 0;
738 #ifndef __CHECKER__
739 if (!hardware_locked)
740 spin_lock_irqsave(&ha->hardware_lock, flags);
741 #endif
743 if (!ha->fw_dump) {
744 ql_log(ql_log_warn, vha, 0xd002,
745 "No buffer available for dump.\n");
746 goto qla2300_fw_dump_failed;
749 if (ha->fw_dumped) {
750 ql_log(ql_log_warn, vha, 0xd003,
751 "Firmware has been previously dumped (%p) "
752 "-- ignoring request.\n",
753 ha->fw_dump);
754 goto qla2300_fw_dump_failed;
756 fw = &ha->fw_dump->isp.isp23;
757 qla2xxx_prep_dump(ha, ha->fw_dump);
759 rval = QLA_SUCCESS;
760 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
762 /* Pause RISC. */
763 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
764 if (IS_QLA2300(ha)) {
765 for (cnt = 30000;
766 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
767 rval == QLA_SUCCESS; cnt--) {
768 if (cnt)
769 udelay(100);
770 else
771 rval = QLA_FUNCTION_TIMEOUT;
773 } else {
774 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
775 udelay(10);
778 if (rval == QLA_SUCCESS) {
779 dmp_reg = &reg->flash_address;
780 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
781 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
783 dmp_reg = &reg->u.isp2300.req_q_in;
784 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
785 cnt++, dmp_reg++)
786 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
788 dmp_reg = &reg->u.isp2300.mailbox0;
789 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
790 cnt++, dmp_reg++)
791 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
793 WRT_REG_WORD(&reg->ctrl_status, 0x40);
794 qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
796 WRT_REG_WORD(&reg->ctrl_status, 0x50);
797 qla2xxx_read_window(reg, 48, fw->dma_reg);
799 WRT_REG_WORD(&reg->ctrl_status, 0x00);
800 dmp_reg = &reg->risc_hw;
801 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
802 cnt++, dmp_reg++)
803 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
805 WRT_REG_WORD(&reg->pcr, 0x2000);
806 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
808 WRT_REG_WORD(&reg->pcr, 0x2200);
809 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
811 WRT_REG_WORD(&reg->pcr, 0x2400);
812 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
814 WRT_REG_WORD(&reg->pcr, 0x2600);
815 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
817 WRT_REG_WORD(&reg->pcr, 0x2800);
818 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
820 WRT_REG_WORD(&reg->pcr, 0x2A00);
821 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
823 WRT_REG_WORD(&reg->pcr, 0x2C00);
824 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
826 WRT_REG_WORD(&reg->pcr, 0x2E00);
827 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
829 WRT_REG_WORD(&reg->ctrl_status, 0x10);
830 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
832 WRT_REG_WORD(&reg->ctrl_status, 0x20);
833 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
835 WRT_REG_WORD(&reg->ctrl_status, 0x30);
836 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
838 /* Reset RISC. */
839 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
840 for (cnt = 0; cnt < 30000; cnt++) {
841 if ((RD_REG_WORD(&reg->ctrl_status) &
842 CSR_ISP_SOFT_RESET) == 0)
843 break;
845 udelay(10);
849 if (!IS_QLA2300(ha)) {
850 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
851 rval == QLA_SUCCESS; cnt--) {
852 if (cnt)
853 udelay(100);
854 else
855 rval = QLA_FUNCTION_TIMEOUT;
859 /* Get RISC SRAM. */
860 if (rval == QLA_SUCCESS)
861 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
862 sizeof(fw->risc_ram) / 2, &nxt);
864 /* Get stack SRAM. */
865 if (rval == QLA_SUCCESS)
866 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
867 sizeof(fw->stack_ram) / 2, &nxt);
869 /* Get data SRAM. */
870 if (rval == QLA_SUCCESS)
871 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
872 ha->fw_memory_size - 0x11000 + 1, &nxt);
874 if (rval == QLA_SUCCESS)
875 qla2xxx_copy_queues(ha, nxt);
877 qla2xxx_dump_post_process(base_vha, rval);
879 qla2300_fw_dump_failed:
880 #ifndef __CHECKER__
881 if (!hardware_locked)
882 spin_unlock_irqrestore(&ha->hardware_lock, flags);
883 #else
885 #endif
889 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
890 * @vha: HA context
891 * @hardware_locked: Called with the hardware_lock
893 void
894 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
896 int rval;
897 uint32_t cnt, timer;
898 uint16_t risc_address;
899 uint16_t mb0, mb2;
900 struct qla_hw_data *ha = vha->hw;
901 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
902 uint16_t __iomem *dmp_reg;
903 unsigned long flags;
904 struct qla2100_fw_dump *fw;
905 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
907 risc_address = 0;
908 mb0 = mb2 = 0;
909 flags = 0;
911 #ifndef __CHECKER__
912 if (!hardware_locked)
913 spin_lock_irqsave(&ha->hardware_lock, flags);
914 #endif
916 if (!ha->fw_dump) {
917 ql_log(ql_log_warn, vha, 0xd004,
918 "No buffer available for dump.\n");
919 goto qla2100_fw_dump_failed;
922 if (ha->fw_dumped) {
923 ql_log(ql_log_warn, vha, 0xd005,
924 "Firmware has been previously dumped (%p) "
925 "-- ignoring request.\n",
926 ha->fw_dump);
927 goto qla2100_fw_dump_failed;
929 fw = &ha->fw_dump->isp.isp21;
930 qla2xxx_prep_dump(ha, ha->fw_dump);
932 rval = QLA_SUCCESS;
933 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
935 /* Pause RISC. */
936 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
937 for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
938 rval == QLA_SUCCESS; cnt--) {
939 if (cnt)
940 udelay(100);
941 else
942 rval = QLA_FUNCTION_TIMEOUT;
944 if (rval == QLA_SUCCESS) {
945 dmp_reg = &reg->flash_address;
946 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
947 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
949 dmp_reg = &reg->u.isp2100.mailbox0;
950 for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
951 if (cnt == 8)
952 dmp_reg = &reg->u_end.isp2200.mailbox8;
954 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
957 dmp_reg = &reg->u.isp2100.unused_2[0];
958 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
959 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
961 WRT_REG_WORD(&reg->ctrl_status, 0x00);
962 dmp_reg = &reg->risc_hw;
963 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
964 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
966 WRT_REG_WORD(&reg->pcr, 0x2000);
967 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
969 WRT_REG_WORD(&reg->pcr, 0x2100);
970 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
972 WRT_REG_WORD(&reg->pcr, 0x2200);
973 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
975 WRT_REG_WORD(&reg->pcr, 0x2300);
976 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
978 WRT_REG_WORD(&reg->pcr, 0x2400);
979 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
981 WRT_REG_WORD(&reg->pcr, 0x2500);
982 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
984 WRT_REG_WORD(&reg->pcr, 0x2600);
985 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
987 WRT_REG_WORD(&reg->pcr, 0x2700);
988 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
990 WRT_REG_WORD(&reg->ctrl_status, 0x10);
991 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
993 WRT_REG_WORD(&reg->ctrl_status, 0x20);
994 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
996 WRT_REG_WORD(&reg->ctrl_status, 0x30);
997 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
999 /* Reset the ISP. */
1000 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1003 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
1004 rval == QLA_SUCCESS; cnt--) {
1005 if (cnt)
1006 udelay(100);
1007 else
1008 rval = QLA_FUNCTION_TIMEOUT;
1011 /* Pause RISC. */
1012 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
1013 (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
1015 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
1016 for (cnt = 30000;
1017 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
1018 rval == QLA_SUCCESS; cnt--) {
1019 if (cnt)
1020 udelay(100);
1021 else
1022 rval = QLA_FUNCTION_TIMEOUT;
1024 if (rval == QLA_SUCCESS) {
1025 /* Set memory configuration and timing. */
1026 if (IS_QLA2100(ha))
1027 WRT_REG_WORD(&reg->mctr, 0xf1);
1028 else
1029 WRT_REG_WORD(&reg->mctr, 0xf2);
1030 RD_REG_WORD(&reg->mctr); /* PCI Posting. */
1032 /* Release RISC. */
1033 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1037 if (rval == QLA_SUCCESS) {
1038 /* Get RISC SRAM. */
1039 risc_address = 0x1000;
1040 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
1041 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1043 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
1044 cnt++, risc_address++) {
1045 WRT_MAILBOX_REG(ha, reg, 1, risc_address);
1046 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
1048 for (timer = 6000000; timer != 0; timer--) {
1049 /* Check for pending interrupts. */
1050 if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
1051 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
1052 set_bit(MBX_INTERRUPT,
1053 &ha->mbx_cmd_flags);
1055 mb0 = RD_MAILBOX_REG(ha, reg, 0);
1056 mb2 = RD_MAILBOX_REG(ha, reg, 2);
1058 WRT_REG_WORD(&reg->semaphore, 0);
1059 WRT_REG_WORD(&reg->hccr,
1060 HCCR_CLR_RISC_INT);
1061 RD_REG_WORD(&reg->hccr);
1062 break;
1064 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
1065 RD_REG_WORD(&reg->hccr);
1067 udelay(5);
1070 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1071 rval = mb0 & MBS_MASK;
1072 fw->risc_ram[cnt] = htons(mb2);
1073 } else {
1074 rval = QLA_FUNCTION_FAILED;
1078 if (rval == QLA_SUCCESS)
1079 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
1081 qla2xxx_dump_post_process(base_vha, rval);
1083 qla2100_fw_dump_failed:
1084 #ifndef __CHECKER__
1085 if (!hardware_locked)
1086 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1087 #else
1089 #endif
1092 void
1093 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1095 int rval;
1096 uint32_t cnt;
1097 struct qla_hw_data *ha = vha->hw;
1098 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1099 uint32_t __iomem *dmp_reg;
1100 uint32_t *iter_reg;
1101 uint16_t __iomem *mbx_reg;
1102 unsigned long flags;
1103 struct qla24xx_fw_dump *fw;
1104 void *nxt;
1105 void *nxt_chain;
1106 uint32_t *last_chain = NULL;
1107 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1109 if (IS_P3P_TYPE(ha))
1110 return;
1112 flags = 0;
1113 ha->fw_dump_cap_flags = 0;
1115 #ifndef __CHECKER__
1116 if (!hardware_locked)
1117 spin_lock_irqsave(&ha->hardware_lock, flags);
1118 #endif
1120 if (!ha->fw_dump) {
1121 ql_log(ql_log_warn, vha, 0xd006,
1122 "No buffer available for dump.\n");
1123 goto qla24xx_fw_dump_failed;
1126 if (ha->fw_dumped) {
1127 ql_log(ql_log_warn, vha, 0xd007,
1128 "Firmware has been previously dumped (%p) "
1129 "-- ignoring request.\n",
1130 ha->fw_dump);
1131 goto qla24xx_fw_dump_failed;
1133 QLA_FW_STOPPED(ha);
1134 fw = &ha->fw_dump->isp.isp24;
1135 qla2xxx_prep_dump(ha, ha->fw_dump);
1137 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1140 * Pause RISC. No need to track timeout, as resetting the chip
1141 * is the right approach incase of pause timeout
1143 qla24xx_pause_risc(reg, ha);
1145 /* Host interface registers. */
1146 dmp_reg = &reg->flash_addr;
1147 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1148 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1150 /* Disable interrupts. */
1151 WRT_REG_DWORD(&reg->ictrl, 0);
1152 RD_REG_DWORD(&reg->ictrl);
1154 /* Shadow registers. */
1155 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1156 RD_REG_DWORD(&reg->iobase_addr);
1157 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1158 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1160 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1161 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1163 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1164 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1166 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1167 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1169 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1170 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1172 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1173 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1175 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1176 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1178 /* Mailbox registers. */
1179 mbx_reg = &reg->mailbox0;
1180 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1181 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1183 /* Transfer sequence registers. */
1184 iter_reg = fw->xseq_gp_reg;
1185 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1186 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1187 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1188 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1189 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1190 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1191 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1192 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1194 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1195 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1197 /* Receive sequence registers. */
1198 iter_reg = fw->rseq_gp_reg;
1199 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1200 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1201 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1202 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1203 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1204 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1205 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1206 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1208 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1209 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1210 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1212 /* Command DMA registers. */
1213 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1215 /* Queues. */
1216 iter_reg = fw->req0_dma_reg;
1217 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1218 dmp_reg = &reg->iobase_q;
1219 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1220 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1222 iter_reg = fw->resp0_dma_reg;
1223 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1224 dmp_reg = &reg->iobase_q;
1225 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1226 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1228 iter_reg = fw->req1_dma_reg;
1229 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1230 dmp_reg = &reg->iobase_q;
1231 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1232 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1234 /* Transmit DMA registers. */
1235 iter_reg = fw->xmt0_dma_reg;
1236 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1237 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1239 iter_reg = fw->xmt1_dma_reg;
1240 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1241 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1243 iter_reg = fw->xmt2_dma_reg;
1244 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1245 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1247 iter_reg = fw->xmt3_dma_reg;
1248 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1249 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1251 iter_reg = fw->xmt4_dma_reg;
1252 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1253 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1255 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1257 /* Receive DMA registers. */
1258 iter_reg = fw->rcvt0_data_dma_reg;
1259 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1260 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1262 iter_reg = fw->rcvt1_data_dma_reg;
1263 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1264 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1266 /* RISC registers. */
1267 iter_reg = fw->risc_gp_reg;
1268 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1269 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1270 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1271 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1272 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1273 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1274 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1275 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1277 /* Local memory controller registers. */
1278 iter_reg = fw->lmc_reg;
1279 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1280 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1281 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1282 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1283 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1284 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1285 qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1287 /* Fibre Protocol Module registers. */
1288 iter_reg = fw->fpm_hdw_reg;
1289 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1290 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1291 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1292 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1293 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1294 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1295 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1296 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1297 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1298 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1299 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1300 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1302 /* Frame Buffer registers. */
1303 iter_reg = fw->fb_hdw_reg;
1304 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1305 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1306 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1307 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1308 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1309 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1310 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1311 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1312 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1313 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1314 qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1316 rval = qla24xx_soft_reset(ha);
1317 if (rval != QLA_SUCCESS)
1318 goto qla24xx_fw_dump_failed_0;
1320 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1321 &nxt);
1322 if (rval != QLA_SUCCESS)
1323 goto qla24xx_fw_dump_failed_0;
1325 nxt = qla2xxx_copy_queues(ha, nxt);
1327 qla24xx_copy_eft(ha, nxt);
1329 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1330 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1331 if (last_chain) {
1332 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1333 *last_chain |= htonl(DUMP_CHAIN_LAST);
1336 /* Adjust valid length. */
1337 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1339 qla24xx_fw_dump_failed_0:
1340 qla2xxx_dump_post_process(base_vha, rval);
1342 qla24xx_fw_dump_failed:
1343 #ifndef __CHECKER__
1344 if (!hardware_locked)
1345 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1346 #else
1348 #endif
1351 void
1352 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1354 int rval;
1355 uint32_t cnt;
1356 struct qla_hw_data *ha = vha->hw;
1357 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1358 uint32_t __iomem *dmp_reg;
1359 uint32_t *iter_reg;
1360 uint16_t __iomem *mbx_reg;
1361 unsigned long flags;
1362 struct qla25xx_fw_dump *fw;
1363 void *nxt, *nxt_chain;
1364 uint32_t *last_chain = NULL;
1365 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1367 flags = 0;
1368 ha->fw_dump_cap_flags = 0;
1370 #ifndef __CHECKER__
1371 if (!hardware_locked)
1372 spin_lock_irqsave(&ha->hardware_lock, flags);
1373 #endif
1375 if (!ha->fw_dump) {
1376 ql_log(ql_log_warn, vha, 0xd008,
1377 "No buffer available for dump.\n");
1378 goto qla25xx_fw_dump_failed;
1381 if (ha->fw_dumped) {
1382 ql_log(ql_log_warn, vha, 0xd009,
1383 "Firmware has been previously dumped (%p) "
1384 "-- ignoring request.\n",
1385 ha->fw_dump);
1386 goto qla25xx_fw_dump_failed;
1388 QLA_FW_STOPPED(ha);
1389 fw = &ha->fw_dump->isp.isp25;
1390 qla2xxx_prep_dump(ha, ha->fw_dump);
1391 ha->fw_dump->version = htonl(2);
1393 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1396 * Pause RISC. No need to track timeout, as resetting the chip
1397 * is the right approach incase of pause timeout
1399 qla24xx_pause_risc(reg, ha);
1401 /* Host/Risc registers. */
1402 iter_reg = fw->host_risc_reg;
1403 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1404 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1406 /* PCIe registers. */
1407 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1408 RD_REG_DWORD(&reg->iobase_addr);
1409 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1410 dmp_reg = &reg->iobase_c4;
1411 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1412 dmp_reg++;
1413 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1414 dmp_reg++;
1415 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1416 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1418 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1419 RD_REG_DWORD(&reg->iobase_window);
1421 /* Host interface registers. */
1422 dmp_reg = &reg->flash_addr;
1423 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1424 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1426 /* Disable interrupts. */
1427 WRT_REG_DWORD(&reg->ictrl, 0);
1428 RD_REG_DWORD(&reg->ictrl);
1430 /* Shadow registers. */
1431 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1432 RD_REG_DWORD(&reg->iobase_addr);
1433 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1434 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1436 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1437 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1439 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1440 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1442 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1443 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1445 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1446 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1448 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1449 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1451 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1452 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1454 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1455 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1457 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1458 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1460 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1461 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1463 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1464 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1466 /* RISC I/O register. */
1467 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1468 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1470 /* Mailbox registers. */
1471 mbx_reg = &reg->mailbox0;
1472 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1473 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1475 /* Transfer sequence registers. */
1476 iter_reg = fw->xseq_gp_reg;
1477 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1478 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1479 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1480 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1481 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1482 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1483 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1484 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1486 iter_reg = fw->xseq_0_reg;
1487 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1488 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1489 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1491 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1493 /* Receive sequence registers. */
1494 iter_reg = fw->rseq_gp_reg;
1495 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1496 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1497 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1498 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1499 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1500 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1501 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1502 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1504 iter_reg = fw->rseq_0_reg;
1505 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1506 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1508 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1509 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1511 /* Auxiliary sequence registers. */
1512 iter_reg = fw->aseq_gp_reg;
1513 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1514 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1515 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1516 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1517 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1518 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1519 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1520 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1522 iter_reg = fw->aseq_0_reg;
1523 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1524 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1526 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1527 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1529 /* Command DMA registers. */
1530 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1532 /* Queues. */
1533 iter_reg = fw->req0_dma_reg;
1534 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1535 dmp_reg = &reg->iobase_q;
1536 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1537 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1539 iter_reg = fw->resp0_dma_reg;
1540 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1541 dmp_reg = &reg->iobase_q;
1542 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1543 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1545 iter_reg = fw->req1_dma_reg;
1546 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1547 dmp_reg = &reg->iobase_q;
1548 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1549 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1551 /* Transmit DMA registers. */
1552 iter_reg = fw->xmt0_dma_reg;
1553 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1554 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1556 iter_reg = fw->xmt1_dma_reg;
1557 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1558 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1560 iter_reg = fw->xmt2_dma_reg;
1561 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1562 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1564 iter_reg = fw->xmt3_dma_reg;
1565 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1566 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1568 iter_reg = fw->xmt4_dma_reg;
1569 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1570 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1572 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1574 /* Receive DMA registers. */
1575 iter_reg = fw->rcvt0_data_dma_reg;
1576 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1577 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1579 iter_reg = fw->rcvt1_data_dma_reg;
1580 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1581 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1583 /* RISC registers. */
1584 iter_reg = fw->risc_gp_reg;
1585 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1586 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1587 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1588 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1589 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1590 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1591 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1592 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1594 /* Local memory controller registers. */
1595 iter_reg = fw->lmc_reg;
1596 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1597 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1598 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1599 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1600 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1601 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1602 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1603 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1605 /* Fibre Protocol Module registers. */
1606 iter_reg = fw->fpm_hdw_reg;
1607 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1608 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1609 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1610 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1611 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1612 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1613 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1614 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1615 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1616 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1617 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1618 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1620 /* Frame Buffer registers. */
1621 iter_reg = fw->fb_hdw_reg;
1622 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1623 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1624 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1625 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1626 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1627 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1628 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1629 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1630 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1631 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1632 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1633 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1635 /* Multi queue registers */
1636 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1637 &last_chain);
1639 rval = qla24xx_soft_reset(ha);
1640 if (rval != QLA_SUCCESS)
1641 goto qla25xx_fw_dump_failed_0;
1643 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1644 &nxt);
1645 if (rval != QLA_SUCCESS)
1646 goto qla25xx_fw_dump_failed_0;
1648 nxt = qla2xxx_copy_queues(ha, nxt);
1650 qla24xx_copy_eft(ha, nxt);
1652 /* Chain entries -- started with MQ. */
1653 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1654 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1655 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1656 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1657 if (last_chain) {
1658 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1659 *last_chain |= htonl(DUMP_CHAIN_LAST);
1662 /* Adjust valid length. */
1663 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1665 qla25xx_fw_dump_failed_0:
1666 qla2xxx_dump_post_process(base_vha, rval);
1668 qla25xx_fw_dump_failed:
1669 #ifndef __CHECKER__
1670 if (!hardware_locked)
1671 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1672 #else
1674 #endif
1677 void
1678 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1680 int rval;
1681 uint32_t cnt;
1682 struct qla_hw_data *ha = vha->hw;
1683 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1684 uint32_t __iomem *dmp_reg;
1685 uint32_t *iter_reg;
1686 uint16_t __iomem *mbx_reg;
1687 unsigned long flags;
1688 struct qla81xx_fw_dump *fw;
1689 void *nxt, *nxt_chain;
1690 uint32_t *last_chain = NULL;
1691 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1693 flags = 0;
1694 ha->fw_dump_cap_flags = 0;
1696 #ifndef __CHECKER__
1697 if (!hardware_locked)
1698 spin_lock_irqsave(&ha->hardware_lock, flags);
1699 #endif
1701 if (!ha->fw_dump) {
1702 ql_log(ql_log_warn, vha, 0xd00a,
1703 "No buffer available for dump.\n");
1704 goto qla81xx_fw_dump_failed;
1707 if (ha->fw_dumped) {
1708 ql_log(ql_log_warn, vha, 0xd00b,
1709 "Firmware has been previously dumped (%p) "
1710 "-- ignoring request.\n",
1711 ha->fw_dump);
1712 goto qla81xx_fw_dump_failed;
1714 fw = &ha->fw_dump->isp.isp81;
1715 qla2xxx_prep_dump(ha, ha->fw_dump);
1717 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1720 * Pause RISC. No need to track timeout, as resetting the chip
1721 * is the right approach incase of pause timeout
1723 qla24xx_pause_risc(reg, ha);
1725 /* Host/Risc registers. */
1726 iter_reg = fw->host_risc_reg;
1727 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1728 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1730 /* PCIe registers. */
1731 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1732 RD_REG_DWORD(&reg->iobase_addr);
1733 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1734 dmp_reg = &reg->iobase_c4;
1735 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1736 dmp_reg++;
1737 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1738 dmp_reg++;
1739 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1740 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1742 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1743 RD_REG_DWORD(&reg->iobase_window);
1745 /* Host interface registers. */
1746 dmp_reg = &reg->flash_addr;
1747 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1748 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1750 /* Disable interrupts. */
1751 WRT_REG_DWORD(&reg->ictrl, 0);
1752 RD_REG_DWORD(&reg->ictrl);
1754 /* Shadow registers. */
1755 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1756 RD_REG_DWORD(&reg->iobase_addr);
1757 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1758 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1760 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1761 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1763 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1764 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1766 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1767 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1769 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1770 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1772 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1773 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1775 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1776 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1778 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1779 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1781 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1782 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1784 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1785 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1787 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1788 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1790 /* RISC I/O register. */
1791 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1792 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1794 /* Mailbox registers. */
1795 mbx_reg = &reg->mailbox0;
1796 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1797 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1799 /* Transfer sequence registers. */
1800 iter_reg = fw->xseq_gp_reg;
1801 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1802 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1803 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1804 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1805 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1806 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1807 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1808 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1810 iter_reg = fw->xseq_0_reg;
1811 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1812 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1813 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1815 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1817 /* Receive sequence registers. */
1818 iter_reg = fw->rseq_gp_reg;
1819 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1820 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1821 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1822 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1823 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1824 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1825 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1826 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1828 iter_reg = fw->rseq_0_reg;
1829 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1830 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1832 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1833 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1835 /* Auxiliary sequence registers. */
1836 iter_reg = fw->aseq_gp_reg;
1837 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1838 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1839 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1840 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1841 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1842 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1843 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1844 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1846 iter_reg = fw->aseq_0_reg;
1847 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1848 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1850 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1851 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1853 /* Command DMA registers. */
1854 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1856 /* Queues. */
1857 iter_reg = fw->req0_dma_reg;
1858 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1859 dmp_reg = &reg->iobase_q;
1860 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1861 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1863 iter_reg = fw->resp0_dma_reg;
1864 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1865 dmp_reg = &reg->iobase_q;
1866 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1867 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1869 iter_reg = fw->req1_dma_reg;
1870 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1871 dmp_reg = &reg->iobase_q;
1872 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1873 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1875 /* Transmit DMA registers. */
1876 iter_reg = fw->xmt0_dma_reg;
1877 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1878 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1880 iter_reg = fw->xmt1_dma_reg;
1881 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1882 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1884 iter_reg = fw->xmt2_dma_reg;
1885 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1886 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1888 iter_reg = fw->xmt3_dma_reg;
1889 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1890 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1892 iter_reg = fw->xmt4_dma_reg;
1893 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1894 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1896 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1898 /* Receive DMA registers. */
1899 iter_reg = fw->rcvt0_data_dma_reg;
1900 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1901 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1903 iter_reg = fw->rcvt1_data_dma_reg;
1904 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1905 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1907 /* RISC registers. */
1908 iter_reg = fw->risc_gp_reg;
1909 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1910 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1911 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1912 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1913 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1914 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1915 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1916 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1918 /* Local memory controller registers. */
1919 iter_reg = fw->lmc_reg;
1920 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1921 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1922 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1923 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1924 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1925 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1926 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1927 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1929 /* Fibre Protocol Module registers. */
1930 iter_reg = fw->fpm_hdw_reg;
1931 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1932 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1933 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1934 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1935 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1936 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1937 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1938 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1939 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1940 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1941 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1942 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1943 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1944 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1946 /* Frame Buffer registers. */
1947 iter_reg = fw->fb_hdw_reg;
1948 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1949 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1950 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1951 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1952 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1953 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1954 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1955 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1956 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1957 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1958 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1959 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1960 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1962 /* Multi queue registers */
1963 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1964 &last_chain);
1966 rval = qla24xx_soft_reset(ha);
1967 if (rval != QLA_SUCCESS)
1968 goto qla81xx_fw_dump_failed_0;
1970 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1971 &nxt);
1972 if (rval != QLA_SUCCESS)
1973 goto qla81xx_fw_dump_failed_0;
1975 nxt = qla2xxx_copy_queues(ha, nxt);
1977 qla24xx_copy_eft(ha, nxt);
1979 /* Chain entries -- started with MQ. */
1980 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1981 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1982 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1983 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1984 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
1985 if (last_chain) {
1986 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1987 *last_chain |= htonl(DUMP_CHAIN_LAST);
1990 /* Adjust valid length. */
1991 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1993 qla81xx_fw_dump_failed_0:
1994 qla2xxx_dump_post_process(base_vha, rval);
1996 qla81xx_fw_dump_failed:
1997 #ifndef __CHECKER__
1998 if (!hardware_locked)
1999 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2000 #else
2002 #endif
2005 void
2006 qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
2008 int rval;
2009 uint32_t cnt;
2010 struct qla_hw_data *ha = vha->hw;
2011 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2012 uint32_t __iomem *dmp_reg;
2013 uint32_t *iter_reg;
2014 uint16_t __iomem *mbx_reg;
2015 unsigned long flags;
2016 struct qla83xx_fw_dump *fw;
2017 void *nxt, *nxt_chain;
2018 uint32_t *last_chain = NULL;
2019 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2021 flags = 0;
2022 ha->fw_dump_cap_flags = 0;
2024 #ifndef __CHECKER__
2025 if (!hardware_locked)
2026 spin_lock_irqsave(&ha->hardware_lock, flags);
2027 #endif
2029 if (!ha->fw_dump) {
2030 ql_log(ql_log_warn, vha, 0xd00c,
2031 "No buffer available for dump!!!\n");
2032 goto qla83xx_fw_dump_failed;
2035 if (ha->fw_dumped) {
2036 ql_log(ql_log_warn, vha, 0xd00d,
2037 "Firmware has been previously dumped (%p) -- ignoring "
2038 "request...\n", ha->fw_dump);
2039 goto qla83xx_fw_dump_failed;
2041 QLA_FW_STOPPED(ha);
2042 fw = &ha->fw_dump->isp.isp83;
2043 qla2xxx_prep_dump(ha, ha->fw_dump);
2045 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
2048 * Pause RISC. No need to track timeout, as resetting the chip
2049 * is the right approach incase of pause timeout
2051 qla24xx_pause_risc(reg, ha);
2053 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
2054 dmp_reg = &reg->iobase_window;
2055 RD_REG_DWORD(dmp_reg);
2056 WRT_REG_DWORD(dmp_reg, 0);
2058 dmp_reg = &reg->unused_4_1[0];
2059 RD_REG_DWORD(dmp_reg);
2060 WRT_REG_DWORD(dmp_reg, 0);
2062 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
2063 dmp_reg = &reg->unused_4_1[2];
2064 RD_REG_DWORD(dmp_reg);
2065 WRT_REG_DWORD(dmp_reg, 0);
2067 /* select PCR and disable ecc checking and correction */
2068 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2069 RD_REG_DWORD(&reg->iobase_addr);
2070 WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
2072 /* Host/Risc registers. */
2073 iter_reg = fw->host_risc_reg;
2074 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2075 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2076 qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2078 /* PCIe registers. */
2079 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2080 RD_REG_DWORD(&reg->iobase_addr);
2081 WRT_REG_DWORD(&reg->iobase_window, 0x01);
2082 dmp_reg = &reg->iobase_c4;
2083 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
2084 dmp_reg++;
2085 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
2086 dmp_reg++;
2087 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
2088 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
2090 WRT_REG_DWORD(&reg->iobase_window, 0x00);
2091 RD_REG_DWORD(&reg->iobase_window);
2093 /* Host interface registers. */
2094 dmp_reg = &reg->flash_addr;
2095 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
2096 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
2098 /* Disable interrupts. */
2099 WRT_REG_DWORD(&reg->ictrl, 0);
2100 RD_REG_DWORD(&reg->ictrl);
2102 /* Shadow registers. */
2103 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2104 RD_REG_DWORD(&reg->iobase_addr);
2105 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
2106 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2108 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
2109 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2111 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
2112 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2114 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
2115 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2117 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
2118 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2120 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
2121 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2123 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
2124 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2126 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
2127 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2129 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
2130 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2132 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
2133 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2135 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
2136 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2138 /* RISC I/O register. */
2139 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
2140 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
2142 /* Mailbox registers. */
2143 mbx_reg = &reg->mailbox0;
2144 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
2145 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
2147 /* Transfer sequence registers. */
2148 iter_reg = fw->xseq_gp_reg;
2149 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2150 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2151 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2152 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2153 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2154 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2155 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2156 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2157 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2158 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2159 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2160 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2161 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2162 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2163 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2164 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2166 iter_reg = fw->xseq_0_reg;
2167 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2168 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2169 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2171 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2173 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2175 /* Receive sequence registers. */
2176 iter_reg = fw->rseq_gp_reg;
2177 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2178 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2179 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2180 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2181 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2182 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2183 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2184 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2185 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2186 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2187 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2188 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2189 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2190 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2191 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2192 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2194 iter_reg = fw->rseq_0_reg;
2195 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2196 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2198 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2199 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2200 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2202 /* Auxiliary sequence registers. */
2203 iter_reg = fw->aseq_gp_reg;
2204 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2205 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2206 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2207 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2208 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2209 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2210 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2211 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2212 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2213 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2214 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2215 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2216 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2217 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2218 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2219 qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2221 iter_reg = fw->aseq_0_reg;
2222 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2223 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2225 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2226 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2227 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2229 /* Command DMA registers. */
2230 iter_reg = fw->cmd_dma_reg;
2231 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2232 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2233 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2234 qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2236 /* Queues. */
2237 iter_reg = fw->req0_dma_reg;
2238 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2239 dmp_reg = &reg->iobase_q;
2240 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2241 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2243 iter_reg = fw->resp0_dma_reg;
2244 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2245 dmp_reg = &reg->iobase_q;
2246 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2247 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2249 iter_reg = fw->req1_dma_reg;
2250 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2251 dmp_reg = &reg->iobase_q;
2252 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2253 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2255 /* Transmit DMA registers. */
2256 iter_reg = fw->xmt0_dma_reg;
2257 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2258 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2260 iter_reg = fw->xmt1_dma_reg;
2261 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2262 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2264 iter_reg = fw->xmt2_dma_reg;
2265 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2266 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2268 iter_reg = fw->xmt3_dma_reg;
2269 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2270 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2272 iter_reg = fw->xmt4_dma_reg;
2273 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2274 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2276 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2278 /* Receive DMA registers. */
2279 iter_reg = fw->rcvt0_data_dma_reg;
2280 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2281 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2283 iter_reg = fw->rcvt1_data_dma_reg;
2284 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2285 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2287 /* RISC registers. */
2288 iter_reg = fw->risc_gp_reg;
2289 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2290 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2291 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2292 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2293 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2294 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2295 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2296 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2298 /* Local memory controller registers. */
2299 iter_reg = fw->lmc_reg;
2300 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2301 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2302 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2303 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2304 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2305 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2306 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2307 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2309 /* Fibre Protocol Module registers. */
2310 iter_reg = fw->fpm_hdw_reg;
2311 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2312 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2313 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2314 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2315 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2316 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2317 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2318 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2319 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2320 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2321 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2322 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2323 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2324 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2325 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2326 qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2328 /* RQ0 Array registers. */
2329 iter_reg = fw->rq0_array_reg;
2330 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2331 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2332 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2333 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2334 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2335 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2336 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2337 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2338 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2339 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2340 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2341 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2342 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2343 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2344 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2345 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2347 /* RQ1 Array registers. */
2348 iter_reg = fw->rq1_array_reg;
2349 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2350 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2351 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2352 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2353 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2354 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2355 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2356 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2357 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2358 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2359 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2360 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2361 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2362 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2363 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2364 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2366 /* RP0 Array registers. */
2367 iter_reg = fw->rp0_array_reg;
2368 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2369 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2370 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2371 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2372 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2373 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2374 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2375 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2376 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2377 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2378 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2379 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2380 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2381 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2382 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2383 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2385 /* RP1 Array registers. */
2386 iter_reg = fw->rp1_array_reg;
2387 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2388 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2389 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2390 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2391 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2392 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2393 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2394 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2395 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2396 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2397 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2398 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2399 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2400 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2401 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2402 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2404 iter_reg = fw->at0_array_reg;
2405 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2406 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2407 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2408 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2409 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2410 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2411 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2412 qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2414 /* I/O Queue Control registers. */
2415 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2417 /* Frame Buffer registers. */
2418 iter_reg = fw->fb_hdw_reg;
2419 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2420 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2421 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2422 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2423 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2424 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2425 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2426 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2427 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2428 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2429 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2430 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2431 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2432 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2433 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2434 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2435 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2436 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2437 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2438 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2439 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2440 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2441 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2442 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2443 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2444 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2445 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2447 /* Multi queue registers */
2448 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2449 &last_chain);
2451 rval = qla24xx_soft_reset(ha);
2452 if (rval != QLA_SUCCESS) {
2453 ql_log(ql_log_warn, vha, 0xd00e,
2454 "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2455 rval = QLA_SUCCESS;
2457 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2459 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2460 RD_REG_DWORD(&reg->hccr);
2462 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2463 RD_REG_DWORD(&reg->hccr);
2465 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2466 RD_REG_DWORD(&reg->hccr);
2468 for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
2469 udelay(5);
2471 if (!cnt) {
2472 nxt = fw->code_ram;
2473 nxt += sizeof(fw->code_ram);
2474 nxt += (ha->fw_memory_size - 0x100000 + 1);
2475 goto copy_queue;
2476 } else {
2477 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2478 ql_log(ql_log_warn, vha, 0xd010,
2479 "bigger hammer success?\n");
2483 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2484 &nxt);
2485 if (rval != QLA_SUCCESS)
2486 goto qla83xx_fw_dump_failed_0;
2488 copy_queue:
2489 nxt = qla2xxx_copy_queues(ha, nxt);
2491 qla24xx_copy_eft(ha, nxt);
2493 /* Chain entries -- started with MQ. */
2494 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2495 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2496 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2497 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
2498 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
2499 if (last_chain) {
2500 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2501 *last_chain |= htonl(DUMP_CHAIN_LAST);
2504 /* Adjust valid length. */
2505 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2507 qla83xx_fw_dump_failed_0:
2508 qla2xxx_dump_post_process(base_vha, rval);
2510 qla83xx_fw_dump_failed:
2511 #ifndef __CHECKER__
2512 if (!hardware_locked)
2513 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2514 #else
2516 #endif
2519 /****************************************************************************/
2520 /* Driver Debug Functions. */
2521 /****************************************************************************/
2523 static inline int
2524 ql_mask_match(uint32_t level)
2526 return (level & ql2xextended_error_logging) == level;
2530 * This function is for formatting and logging debug information.
2531 * It is to be used when vha is available. It formats the message
2532 * and logs it to the messages file.
2533 * parameters:
2534 * level: The level of the debug messages to be printed.
2535 * If ql2xextended_error_logging value is correctly set,
2536 * this message will appear in the messages file.
2537 * vha: Pointer to the scsi_qla_host_t.
2538 * id: This is a unique identifier for the level. It identifies the
2539 * part of the code from where the message originated.
2540 * msg: The message to be displayed.
2542 void
2543 ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2545 va_list va;
2546 struct va_format vaf;
2548 if (!ql_mask_match(level))
2549 return;
2551 va_start(va, fmt);
2553 vaf.fmt = fmt;
2554 vaf.va = &va;
2556 if (vha != NULL) {
2557 const struct pci_dev *pdev = vha->hw->pdev;
2558 /* <module-name> <pci-name> <msg-id>:<host> Message */
2559 pr_warn("%s [%s]-%04x:%ld: %pV",
2560 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2561 vha->host_no, &vaf);
2562 } else {
2563 pr_warn("%s [%s]-%04x: : %pV",
2564 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2567 va_end(va);
2572 * This function is for formatting and logging debug information.
2573 * It is to be used when vha is not available and pci is available,
2574 * i.e., before host allocation. It formats the message and logs it
2575 * to the messages file.
2576 * parameters:
2577 * level: The level of the debug messages to be printed.
2578 * If ql2xextended_error_logging value is correctly set,
2579 * this message will appear in the messages file.
2580 * pdev: Pointer to the struct pci_dev.
2581 * id: This is a unique id for the level. It identifies the part
2582 * of the code from where the message originated.
2583 * msg: The message to be displayed.
2585 void
2586 ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2587 const char *fmt, ...)
2589 va_list va;
2590 struct va_format vaf;
2592 if (pdev == NULL)
2593 return;
2594 if (!ql_mask_match(level))
2595 return;
2597 va_start(va, fmt);
2599 vaf.fmt = fmt;
2600 vaf.va = &va;
2602 /* <module-name> <dev-name>:<msg-id> Message */
2603 pr_warn("%s [%s]-%04x: : %pV",
2604 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
2606 va_end(va);
2610 * This function is for formatting and logging log messages.
2611 * It is to be used when vha is available. It formats the message
2612 * and logs it to the messages file. All the messages will be logged
2613 * irrespective of value of ql2xextended_error_logging.
2614 * parameters:
2615 * level: The level of the log messages to be printed in the
2616 * messages file.
2617 * vha: Pointer to the scsi_qla_host_t
2618 * id: This is a unique id for the level. It identifies the
2619 * part of the code from where the message originated.
2620 * msg: The message to be displayed.
2622 void
2623 ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2625 va_list va;
2626 struct va_format vaf;
2627 char pbuf[128];
2629 if (level > ql_errlev)
2630 return;
2632 if (vha != NULL) {
2633 const struct pci_dev *pdev = vha->hw->pdev;
2634 /* <module-name> <msg-id>:<host> Message */
2635 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
2636 QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
2637 } else {
2638 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2639 QL_MSGHDR, "0000:00:00.0", id);
2641 pbuf[sizeof(pbuf) - 1] = 0;
2643 va_start(va, fmt);
2645 vaf.fmt = fmt;
2646 vaf.va = &va;
2648 switch (level) {
2649 case ql_log_fatal: /* FATAL LOG */
2650 pr_crit("%s%pV", pbuf, &vaf);
2651 break;
2652 case ql_log_warn:
2653 pr_err("%s%pV", pbuf, &vaf);
2654 break;
2655 case ql_log_info:
2656 pr_warn("%s%pV", pbuf, &vaf);
2657 break;
2658 default:
2659 pr_info("%s%pV", pbuf, &vaf);
2660 break;
2663 va_end(va);
2667 * This function is for formatting and logging log messages.
2668 * It is to be used when vha is not available and pci is available,
2669 * i.e., before host allocation. It formats the message and logs
2670 * it to the messages file. All the messages are logged irrespective
2671 * of the value of ql2xextended_error_logging.
2672 * parameters:
2673 * level: The level of the log messages to be printed in the
2674 * messages file.
2675 * pdev: Pointer to the struct pci_dev.
2676 * id: This is a unique id for the level. It identifies the
2677 * part of the code from where the message originated.
2678 * msg: The message to be displayed.
2680 void
2681 ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2682 const char *fmt, ...)
2684 va_list va;
2685 struct va_format vaf;
2686 char pbuf[128];
2688 if (pdev == NULL)
2689 return;
2690 if (level > ql_errlev)
2691 return;
2693 /* <module-name> <dev-name>:<msg-id> Message */
2694 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2695 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2696 pbuf[sizeof(pbuf) - 1] = 0;
2698 va_start(va, fmt);
2700 vaf.fmt = fmt;
2701 vaf.va = &va;
2703 switch (level) {
2704 case ql_log_fatal: /* FATAL LOG */
2705 pr_crit("%s%pV", pbuf, &vaf);
2706 break;
2707 case ql_log_warn:
2708 pr_err("%s%pV", pbuf, &vaf);
2709 break;
2710 case ql_log_info:
2711 pr_warn("%s%pV", pbuf, &vaf);
2712 break;
2713 default:
2714 pr_info("%s%pV", pbuf, &vaf);
2715 break;
2718 va_end(va);
2721 void
2722 ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
2724 int i;
2725 struct qla_hw_data *ha = vha->hw;
2726 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2727 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2728 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2729 uint16_t __iomem *mbx_reg;
2731 if (!ql_mask_match(level))
2732 return;
2734 if (IS_P3P_TYPE(ha))
2735 mbx_reg = &reg82->mailbox_in[0];
2736 else if (IS_FWI2_CAPABLE(ha))
2737 mbx_reg = &reg24->mailbox0;
2738 else
2739 mbx_reg = MAILBOX_REG(ha, reg, 0);
2741 ql_dbg(level, vha, id, "Mailbox registers:\n");
2742 for (i = 0; i < 6; i++, mbx_reg++)
2743 ql_dbg(level, vha, id,
2744 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg));
2748 void
2749 ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
2750 uint8_t *buf, uint size)
2752 uint cnt;
2754 if (!ql_mask_match(level))
2755 return;
2757 ql_dbg(level, vha, id,
2758 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
2759 ql_dbg(level, vha, id,
2760 "----- -----------------------------------------------\n");
2761 for (cnt = 0; cnt < size; cnt += 16) {
2762 ql_dbg(level, vha, id, "%04x: ", cnt);
2763 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2764 buf + cnt, min(16U, size - cnt), false);
2769 * This function is for formatting and logging log messages.
2770 * It is to be used when vha is available. It formats the message
2771 * and logs it to the messages file. All the messages will be logged
2772 * irrespective of value of ql2xextended_error_logging.
2773 * parameters:
2774 * level: The level of the log messages to be printed in the
2775 * messages file.
2776 * vha: Pointer to the scsi_qla_host_t
2777 * id: This is a unique id for the level. It identifies the
2778 * part of the code from where the message originated.
2779 * msg: The message to be displayed.
2781 void
2782 ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2783 const char *fmt, ...)
2785 va_list va;
2786 struct va_format vaf;
2787 char pbuf[128];
2789 if (level > ql_errlev)
2790 return;
2792 if (qpair != NULL) {
2793 const struct pci_dev *pdev = qpair->pdev;
2794 /* <module-name> <msg-id>:<host> Message */
2795 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ",
2796 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2797 } else {
2798 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2799 QL_MSGHDR, "0000:00:00.0", id);
2801 pbuf[sizeof(pbuf) - 1] = 0;
2803 va_start(va, fmt);
2805 vaf.fmt = fmt;
2806 vaf.va = &va;
2808 switch (level) {
2809 case ql_log_fatal: /* FATAL LOG */
2810 pr_crit("%s%pV", pbuf, &vaf);
2811 break;
2812 case ql_log_warn:
2813 pr_err("%s%pV", pbuf, &vaf);
2814 break;
2815 case ql_log_info:
2816 pr_warn("%s%pV", pbuf, &vaf);
2817 break;
2818 default:
2819 pr_info("%s%pV", pbuf, &vaf);
2820 break;
2823 va_end(va);
2827 * This function is for formatting and logging debug information.
2828 * It is to be used when vha is available. It formats the message
2829 * and logs it to the messages file.
2830 * parameters:
2831 * level: The level of the debug messages to be printed.
2832 * If ql2xextended_error_logging value is correctly set,
2833 * this message will appear in the messages file.
2834 * vha: Pointer to the scsi_qla_host_t.
2835 * id: This is a unique identifier for the level. It identifies the
2836 * part of the code from where the message originated.
2837 * msg: The message to be displayed.
2839 void
2840 ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2841 const char *fmt, ...)
2843 va_list va;
2844 struct va_format vaf;
2846 if (!ql_mask_match(level))
2847 return;
2849 va_start(va, fmt);
2851 vaf.fmt = fmt;
2852 vaf.va = &va;
2854 if (qpair != NULL) {
2855 const struct pci_dev *pdev = qpair->pdev;
2856 /* <module-name> <pci-name> <msg-id>:<host> Message */
2857 pr_warn("%s [%s]-%04x: %pV",
2858 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2859 &vaf);
2860 } else {
2861 pr_warn("%s [%s]-%04x: : %pV",
2862 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2865 va_end(va);