drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / drivers / scsi / qla2xxx / qla_dbg.c
blob691ef827a5ab3eb467bfced7dad33fa08dce0b29
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
5 */
7 /*
8 * Table for showing the current message id in use for particular level
9 * Change this table for addition of log/debug messages.
10 * ----------------------------------------------------------------------
11 * | Level | Last Value Used | Holes |
12 * ----------------------------------------------------------------------
13 * | Module Init and Probe | 0x0199 | |
14 * | Mailbox commands | 0x1206 | 0x11a5-0x11ff |
15 * | Device Discovery | 0x2134 | 0x2112-0x2115 |
16 * | | | 0x2127-0x2128 |
17 * | Queue Command and IO tracing | 0x3074 | 0x300b |
18 * | | | 0x3027-0x3028 |
19 * | | | 0x303d-0x3041 |
20 * | | | 0x302e,0x3033 |
21 * | | | 0x3036,0x3038 |
22 * | | | 0x303a |
23 * | DPC Thread | 0x4023 | 0x4002,0x4013 |
24 * | Async Events | 0x509c | |
25 * | Timer Routines | 0x6012 | |
26 * | User Space Interactions | 0x70e3 | 0x7018,0x702e |
27 * | | | 0x7020,0x7024 |
28 * | | | 0x7039,0x7045 |
29 * | | | 0x7073-0x7075 |
30 * | | | 0x70a5-0x70a6 |
31 * | | | 0x70a8,0x70ab |
32 * | | | 0x70ad-0x70ae |
33 * | | | 0x70d0-0x70d6 |
34 * | | | 0x70d7-0x70db |
35 * | Task Management | 0x8042 | 0x8000 |
36 * | | | 0x8019 |
37 * | | | 0x8025,0x8026 |
38 * | | | 0x8031,0x8032 |
39 * | | | 0x8039,0x803c |
40 * | AER/EEH | 0x9011 | |
41 * | Virtual Port | 0xa007 | |
42 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
43 * | | | 0xb09e,0xb0ae |
44 * | | | 0xb0c3,0xb0c6 |
45 * | | | 0xb0e0-0xb0ef |
46 * | | | 0xb085,0xb0dc |
47 * | | | 0xb107,0xb108 |
48 * | | | 0xb111,0xb11e |
49 * | | | 0xb12c,0xb12d |
50 * | | | 0xb13a,0xb142 |
51 * | | | 0xb13c-0xb140 |
52 * | | | 0xb149 |
53 * | MultiQ | 0xc010 | |
54 * | Misc | 0xd303 | 0xd031-0xd0ff |
55 * | | | 0xd101-0xd1fe |
56 * | | | 0xd214-0xd2fe |
57 * | Target Mode | 0xe081 | |
58 * | Target Mode Management | 0xf09b | 0xf002 |
59 * | | | 0xf046-0xf049 |
60 * | Target Mode Task Management | 0x1000d | |
61 * ----------------------------------------------------------------------
64 #include "qla_def.h"
66 #include <linux/delay.h>
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/qla.h>
70 static uint32_t ql_dbg_offset = 0x800;
72 static inline void
73 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
75 fw_dump->fw_major_version = htonl(ha->fw_major_version);
76 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
77 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
78 fw_dump->fw_attributes = htonl(ha->fw_attributes);
80 fw_dump->vendor = htonl(ha->pdev->vendor);
81 fw_dump->device = htonl(ha->pdev->device);
82 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
83 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
86 static inline void *
87 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
89 struct req_que *req = ha->req_q_map[0];
90 struct rsp_que *rsp = ha->rsp_q_map[0];
91 /* Request queue. */
92 memcpy(ptr, req->ring, req->length *
93 sizeof(request_t));
95 /* Response queue. */
96 ptr += req->length * sizeof(request_t);
97 memcpy(ptr, rsp->ring, rsp->length *
98 sizeof(response_t));
100 return ptr + (rsp->length * sizeof(response_t));
104 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
105 uint32_t ram_dwords, void **nxt)
107 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
108 dma_addr_t dump_dma = ha->gid_list_dma;
109 uint32_t *chunk = (uint32_t *)ha->gid_list;
110 uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
111 uint32_t stat;
112 ulong i, j, timer = 6000000;
113 int rval = QLA_FUNCTION_FAILED;
114 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
116 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
118 if (qla_pci_disconnected(vha, reg))
119 return rval;
121 for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
122 if (i + dwords > ram_dwords)
123 dwords = ram_dwords - i;
125 wrt_reg_word(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
126 wrt_reg_word(&reg->mailbox1, LSW(addr));
127 wrt_reg_word(&reg->mailbox8, MSW(addr));
129 wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
130 wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
131 wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
132 wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
134 wrt_reg_word(&reg->mailbox4, MSW(dwords));
135 wrt_reg_word(&reg->mailbox5, LSW(dwords));
137 wrt_reg_word(&reg->mailbox9, 0);
138 wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
140 ha->flags.mbox_int = 0;
141 while (timer--) {
142 udelay(5);
144 if (qla_pci_disconnected(vha, reg))
145 return rval;
147 stat = rd_reg_dword(&reg->host_status);
148 /* Check for pending interrupts. */
149 if (!(stat & HSRX_RISC_INT))
150 continue;
152 stat &= 0xff;
153 if (stat != 0x1 && stat != 0x2 &&
154 stat != 0x10 && stat != 0x11) {
156 /* Clear this intr; it wasn't a mailbox intr */
157 wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
158 rd_reg_dword(&reg->hccr);
159 continue;
162 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
163 rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
164 wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
165 rd_reg_dword(&reg->hccr);
166 break;
168 ha->flags.mbox_int = 1;
169 *nxt = ram + i;
171 if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
172 /* no interrupt, timed out*/
173 return rval;
175 if (rval) {
176 /* error completion status */
177 return rval;
179 for (j = 0; j < dwords; j++) {
180 ram[i + j] =
181 (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
182 chunk[j] : swab32(chunk[j]);
186 *nxt = ram + i;
187 return QLA_SUCCESS;
191 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be32 *ram,
192 uint32_t ram_dwords, void **nxt)
194 int rval = QLA_FUNCTION_FAILED;
195 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
196 dma_addr_t dump_dma = ha->gid_list_dma;
197 uint32_t *chunk = (uint32_t *)ha->gid_list;
198 uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
199 uint32_t stat;
200 ulong i, j, timer = 6000000;
201 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
203 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
205 if (qla_pci_disconnected(vha, reg))
206 return rval;
208 for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
209 if (i + dwords > ram_dwords)
210 dwords = ram_dwords - i;
212 wrt_reg_word(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
213 wrt_reg_word(&reg->mailbox1, LSW(addr));
214 wrt_reg_word(&reg->mailbox8, MSW(addr));
215 wrt_reg_word(&reg->mailbox10, 0);
217 wrt_reg_word(&reg->mailbox2, MSW(LSD(dump_dma)));
218 wrt_reg_word(&reg->mailbox3, LSW(LSD(dump_dma)));
219 wrt_reg_word(&reg->mailbox6, MSW(MSD(dump_dma)));
220 wrt_reg_word(&reg->mailbox7, LSW(MSD(dump_dma)));
222 wrt_reg_word(&reg->mailbox4, MSW(dwords));
223 wrt_reg_word(&reg->mailbox5, LSW(dwords));
224 wrt_reg_dword(&reg->hccr, HCCRX_SET_HOST_INT);
226 ha->flags.mbox_int = 0;
227 while (timer--) {
228 udelay(5);
229 if (qla_pci_disconnected(vha, reg))
230 return rval;
232 stat = rd_reg_dword(&reg->host_status);
233 /* Check for pending interrupts. */
234 if (!(stat & HSRX_RISC_INT))
235 continue;
237 stat &= 0xff;
238 if (stat != 0x1 && stat != 0x2 &&
239 stat != 0x10 && stat != 0x11) {
240 wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
241 rd_reg_dword(&reg->hccr);
242 continue;
245 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
246 rval = rd_reg_word(&reg->mailbox0) & MBS_MASK;
247 wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_INT);
248 rd_reg_dword(&reg->hccr);
249 break;
251 ha->flags.mbox_int = 1;
252 *nxt = ram + i;
254 if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
255 /* no interrupt, timed out*/
256 return rval;
258 if (rval) {
259 /* error completion status */
260 return rval;
262 for (j = 0; j < dwords; j++) {
263 ram[i + j] = (__force __be32)
264 ((IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
265 chunk[j] : swab32(chunk[j]));
269 *nxt = ram + i;
270 return QLA_SUCCESS;
273 static int
274 qla24xx_dump_memory(struct qla_hw_data *ha, __be32 *code_ram,
275 uint32_t cram_size, void **nxt)
277 int rval;
279 /* Code RAM. */
280 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
281 if (rval != QLA_SUCCESS)
282 return rval;
284 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
286 /* External Memory. */
287 rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
288 ha->fw_memory_size - 0x100000 + 1, nxt);
289 if (rval == QLA_SUCCESS)
290 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
292 return rval;
295 static __be32 *
296 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
297 uint32_t count, __be32 *buf)
299 __le32 __iomem *dmp_reg;
301 wrt_reg_dword(&reg->iobase_addr, iobase);
302 dmp_reg = &reg->iobase_window;
303 for ( ; count--; dmp_reg++)
304 *buf++ = htonl(rd_reg_dword(dmp_reg));
306 return buf;
309 void
310 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
312 wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_PAUSE);
314 /* 100 usec delay is sufficient enough for hardware to pause RISC */
315 udelay(100);
316 if (rd_reg_dword(&reg->host_status) & HSRX_RISC_PAUSED)
317 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
321 qla24xx_soft_reset(struct qla_hw_data *ha)
323 int rval = QLA_SUCCESS;
324 uint32_t cnt;
325 uint16_t wd;
326 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
329 * Reset RISC. The delay is dependent on system architecture.
330 * Driver can proceed with the reset sequence after waiting
331 * for a timeout period.
333 wrt_reg_dword(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
334 for (cnt = 0; cnt < 30000; cnt++) {
335 if ((rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
336 break;
338 udelay(10);
340 if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
341 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
343 wrt_reg_dword(&reg->ctrl_status,
344 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
345 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
347 udelay(100);
349 /* Wait for soft-reset to complete. */
350 for (cnt = 0; cnt < 30000; cnt++) {
351 if ((rd_reg_dword(&reg->ctrl_status) &
352 CSRX_ISP_SOFT_RESET) == 0)
353 break;
355 udelay(10);
357 if (!(rd_reg_dword(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
358 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
360 wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
361 rd_reg_dword(&reg->hccr); /* PCI Posting. */
363 for (cnt = 10000; rd_reg_word(&reg->mailbox0) != 0 &&
364 rval == QLA_SUCCESS; cnt--) {
365 if (cnt)
366 udelay(10);
367 else
368 rval = QLA_FUNCTION_TIMEOUT;
370 if (rval == QLA_SUCCESS)
371 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
373 return rval;
376 static int
377 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, __be16 *ram,
378 uint32_t ram_words, void **nxt)
380 int rval;
381 uint32_t cnt, stat, timer, words, idx;
382 uint16_t mb0;
383 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
384 dma_addr_t dump_dma = ha->gid_list_dma;
385 __le16 *dump = (__force __le16 *)ha->gid_list;
387 rval = QLA_SUCCESS;
388 mb0 = 0;
390 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
391 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
393 words = qla2x00_gid_list_size(ha) / 2;
394 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
395 cnt += words, addr += words) {
396 if (cnt + words > ram_words)
397 words = ram_words - cnt;
399 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
400 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
402 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
403 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
404 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
405 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
407 WRT_MAILBOX_REG(ha, reg, 4, words);
408 wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
410 for (timer = 6000000; timer; timer--) {
411 /* Check for pending interrupts. */
412 stat = rd_reg_dword(&reg->u.isp2300.host_status);
413 if (stat & HSR_RISC_INT) {
414 stat &= 0xff;
416 if (stat == 0x1 || stat == 0x2) {
417 set_bit(MBX_INTERRUPT,
418 &ha->mbx_cmd_flags);
420 mb0 = RD_MAILBOX_REG(ha, reg, 0);
422 /* Release mailbox registers. */
423 wrt_reg_word(&reg->semaphore, 0);
424 wrt_reg_word(&reg->hccr,
425 HCCR_CLR_RISC_INT);
426 rd_reg_word(&reg->hccr);
427 break;
428 } else if (stat == 0x10 || stat == 0x11) {
429 set_bit(MBX_INTERRUPT,
430 &ha->mbx_cmd_flags);
432 mb0 = RD_MAILBOX_REG(ha, reg, 0);
434 wrt_reg_word(&reg->hccr,
435 HCCR_CLR_RISC_INT);
436 rd_reg_word(&reg->hccr);
437 break;
440 /* clear this intr; it wasn't a mailbox intr */
441 wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
442 rd_reg_word(&reg->hccr);
444 udelay(5);
447 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
448 rval = mb0 & MBS_MASK;
449 for (idx = 0; idx < words; idx++)
450 ram[cnt + idx] =
451 cpu_to_be16(le16_to_cpu(dump[idx]));
452 } else {
453 rval = QLA_FUNCTION_FAILED;
457 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
458 return rval;
461 static inline void
462 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
463 __be16 *buf)
465 __le16 __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
467 for ( ; count--; dmp_reg++)
468 *buf++ = htons(rd_reg_word(dmp_reg));
471 static inline void *
472 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
474 if (!ha->eft)
475 return ptr;
477 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
478 return ptr + ntohl(ha->fw_dump->eft_size);
481 static inline void *
482 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
484 uint32_t cnt;
485 __be32 *iter_reg;
486 struct qla2xxx_fce_chain *fcec = ptr;
488 if (!ha->fce)
489 return ptr;
491 *last_chain = &fcec->type;
492 fcec->type = htonl(DUMP_CHAIN_FCE);
493 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
494 fce_calc_size(ha->fce_bufs));
495 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
496 fcec->addr_l = htonl(LSD(ha->fce_dma));
497 fcec->addr_h = htonl(MSD(ha->fce_dma));
499 iter_reg = fcec->eregs;
500 for (cnt = 0; cnt < 8; cnt++)
501 *iter_reg++ = htonl(ha->fce_mb[cnt]);
503 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
505 return (char *)iter_reg + ntohl(fcec->size);
508 static inline void *
509 qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
511 struct qla2xxx_offld_chain *c = ptr;
513 if (!ha->exlogin_buf)
514 return ptr;
516 *last_chain = &c->type;
518 c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN);
519 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
520 ha->exlogin_size);
521 c->size = cpu_to_be32(ha->exlogin_size);
522 c->addr = cpu_to_be64(ha->exlogin_buf_dma);
524 ptr += sizeof(struct qla2xxx_offld_chain);
525 memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
527 return (char *)ptr + be32_to_cpu(c->size);
530 static inline void *
531 qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
533 struct qla2xxx_offld_chain *c = ptr;
535 if (!ha->exchoffld_buf)
536 return ptr;
538 *last_chain = &c->type;
540 c->type = cpu_to_be32(DUMP_CHAIN_EXCHG);
541 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
542 ha->exchoffld_size);
543 c->size = cpu_to_be32(ha->exchoffld_size);
544 c->addr = cpu_to_be64(ha->exchoffld_buf_dma);
546 ptr += sizeof(struct qla2xxx_offld_chain);
547 memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
549 return (char *)ptr + be32_to_cpu(c->size);
552 static inline void *
553 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
554 __be32 **last_chain)
556 struct qla2xxx_mqueue_chain *q;
557 struct qla2xxx_mqueue_header *qh;
558 uint32_t num_queues;
559 int que;
560 struct {
561 int length;
562 void *ring;
563 } aq, *aqp;
565 if (!ha->tgt.atio_ring)
566 return ptr;
568 num_queues = 1;
569 aqp = &aq;
570 aqp->length = ha->tgt.atio_q_length;
571 aqp->ring = ha->tgt.atio_ring;
573 for (que = 0; que < num_queues; que++) {
574 /* aqp = ha->atio_q_map[que]; */
575 q = ptr;
576 *last_chain = &q->type;
577 q->type = htonl(DUMP_CHAIN_QUEUE);
578 q->chain_size = htonl(
579 sizeof(struct qla2xxx_mqueue_chain) +
580 sizeof(struct qla2xxx_mqueue_header) +
581 (aqp->length * sizeof(request_t)));
582 ptr += sizeof(struct qla2xxx_mqueue_chain);
584 /* Add header. */
585 qh = ptr;
586 qh->queue = htonl(TYPE_ATIO_QUEUE);
587 qh->number = htonl(que);
588 qh->size = htonl(aqp->length * sizeof(request_t));
589 ptr += sizeof(struct qla2xxx_mqueue_header);
591 /* Add data. */
592 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
594 ptr += aqp->length * sizeof(request_t);
597 return ptr;
600 static inline void *
601 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
603 struct qla2xxx_mqueue_chain *q;
604 struct qla2xxx_mqueue_header *qh;
605 struct req_que *req;
606 struct rsp_que *rsp;
607 int que;
609 if (!ha->mqenable)
610 return ptr;
612 /* Request queues */
613 for (que = 1; que < ha->max_req_queues; que++) {
614 req = ha->req_q_map[que];
615 if (!req)
616 break;
618 /* Add chain. */
619 q = ptr;
620 *last_chain = &q->type;
621 q->type = htonl(DUMP_CHAIN_QUEUE);
622 q->chain_size = htonl(
623 sizeof(struct qla2xxx_mqueue_chain) +
624 sizeof(struct qla2xxx_mqueue_header) +
625 (req->length * sizeof(request_t)));
626 ptr += sizeof(struct qla2xxx_mqueue_chain);
628 /* Add header. */
629 qh = ptr;
630 qh->queue = htonl(TYPE_REQUEST_QUEUE);
631 qh->number = htonl(que);
632 qh->size = htonl(req->length * sizeof(request_t));
633 ptr += sizeof(struct qla2xxx_mqueue_header);
635 /* Add data. */
636 memcpy(ptr, req->ring, req->length * sizeof(request_t));
637 ptr += req->length * sizeof(request_t);
640 /* Response queues */
641 for (que = 1; que < ha->max_rsp_queues; que++) {
642 rsp = ha->rsp_q_map[que];
643 if (!rsp)
644 break;
646 /* Add chain. */
647 q = ptr;
648 *last_chain = &q->type;
649 q->type = htonl(DUMP_CHAIN_QUEUE);
650 q->chain_size = htonl(
651 sizeof(struct qla2xxx_mqueue_chain) +
652 sizeof(struct qla2xxx_mqueue_header) +
653 (rsp->length * sizeof(response_t)));
654 ptr += sizeof(struct qla2xxx_mqueue_chain);
656 /* Add header. */
657 qh = ptr;
658 qh->queue = htonl(TYPE_RESPONSE_QUEUE);
659 qh->number = htonl(que);
660 qh->size = htonl(rsp->length * sizeof(response_t));
661 ptr += sizeof(struct qla2xxx_mqueue_header);
663 /* Add data. */
664 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
665 ptr += rsp->length * sizeof(response_t);
668 return ptr;
671 static inline void *
672 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, __be32 **last_chain)
674 uint32_t cnt, que_idx;
675 uint8_t que_cnt;
676 struct qla2xxx_mq_chain *mq = ptr;
677 device_reg_t *reg;
679 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
680 IS_QLA28XX(ha))
681 return ptr;
683 mq = ptr;
684 *last_chain = &mq->type;
685 mq->type = htonl(DUMP_CHAIN_MQ);
686 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
688 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
689 ha->max_req_queues : ha->max_rsp_queues;
690 mq->count = htonl(que_cnt);
691 for (cnt = 0; cnt < que_cnt; cnt++) {
692 reg = ISP_QUE_REG(ha, cnt);
693 que_idx = cnt * 4;
694 mq->qregs[que_idx] =
695 htonl(rd_reg_dword(&reg->isp25mq.req_q_in));
696 mq->qregs[que_idx+1] =
697 htonl(rd_reg_dword(&reg->isp25mq.req_q_out));
698 mq->qregs[que_idx+2] =
699 htonl(rd_reg_dword(&reg->isp25mq.rsp_q_in));
700 mq->qregs[que_idx+3] =
701 htonl(rd_reg_dword(&reg->isp25mq.rsp_q_out));
704 return ptr + sizeof(struct qla2xxx_mq_chain);
707 void
708 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
710 struct qla_hw_data *ha = vha->hw;
712 if (rval != QLA_SUCCESS) {
713 ql_log(ql_log_warn, vha, 0xd000,
714 "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
715 rval, ha->fw_dump_cap_flags);
716 ha->fw_dumped = false;
717 } else {
718 ql_log(ql_log_info, vha, 0xd001,
719 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
720 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
721 ha->fw_dumped = true;
722 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
726 void qla2xxx_dump_fw(scsi_qla_host_t *vha)
728 unsigned long flags;
730 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
731 vha->hw->isp_ops->fw_dump(vha);
732 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
736 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
737 * @vha: HA context
739 void
740 qla2300_fw_dump(scsi_qla_host_t *vha)
742 int rval;
743 uint32_t cnt;
744 struct qla_hw_data *ha = vha->hw;
745 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
746 __le16 __iomem *dmp_reg;
747 struct qla2300_fw_dump *fw;
748 void *nxt;
749 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
751 lockdep_assert_held(&ha->hardware_lock);
753 if (!ha->fw_dump) {
754 ql_log(ql_log_warn, vha, 0xd002,
755 "No buffer available for dump.\n");
756 return;
759 if (ha->fw_dumped) {
760 ql_log(ql_log_warn, vha, 0xd003,
761 "Firmware has been previously dumped (%p) "
762 "-- ignoring request.\n",
763 ha->fw_dump);
764 return;
766 fw = &ha->fw_dump->isp.isp23;
767 qla2xxx_prep_dump(ha, ha->fw_dump);
769 rval = QLA_SUCCESS;
770 fw->hccr = htons(rd_reg_word(&reg->hccr));
772 /* Pause RISC. */
773 wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
774 if (IS_QLA2300(ha)) {
775 for (cnt = 30000;
776 (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
777 rval == QLA_SUCCESS; cnt--) {
778 if (cnt)
779 udelay(100);
780 else
781 rval = QLA_FUNCTION_TIMEOUT;
783 } else {
784 rd_reg_word(&reg->hccr); /* PCI Posting. */
785 udelay(10);
788 if (rval == QLA_SUCCESS) {
789 dmp_reg = &reg->flash_address;
790 for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
791 fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
793 dmp_reg = &reg->u.isp2300.req_q_in;
794 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_host_reg);
795 cnt++, dmp_reg++)
796 fw->risc_host_reg[cnt] = htons(rd_reg_word(dmp_reg));
798 dmp_reg = &reg->u.isp2300.mailbox0;
799 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg);
800 cnt++, dmp_reg++)
801 fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
803 wrt_reg_word(&reg->ctrl_status, 0x40);
804 qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
806 wrt_reg_word(&reg->ctrl_status, 0x50);
807 qla2xxx_read_window(reg, 48, fw->dma_reg);
809 wrt_reg_word(&reg->ctrl_status, 0x00);
810 dmp_reg = &reg->risc_hw;
811 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg);
812 cnt++, dmp_reg++)
813 fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
815 wrt_reg_word(&reg->pcr, 0x2000);
816 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
818 wrt_reg_word(&reg->pcr, 0x2200);
819 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
821 wrt_reg_word(&reg->pcr, 0x2400);
822 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
824 wrt_reg_word(&reg->pcr, 0x2600);
825 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
827 wrt_reg_word(&reg->pcr, 0x2800);
828 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
830 wrt_reg_word(&reg->pcr, 0x2A00);
831 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
833 wrt_reg_word(&reg->pcr, 0x2C00);
834 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
836 wrt_reg_word(&reg->pcr, 0x2E00);
837 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
839 wrt_reg_word(&reg->ctrl_status, 0x10);
840 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
842 wrt_reg_word(&reg->ctrl_status, 0x20);
843 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
845 wrt_reg_word(&reg->ctrl_status, 0x30);
846 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
848 /* Reset RISC. */
849 wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
850 for (cnt = 0; cnt < 30000; cnt++) {
851 if ((rd_reg_word(&reg->ctrl_status) &
852 CSR_ISP_SOFT_RESET) == 0)
853 break;
855 udelay(10);
859 if (!IS_QLA2300(ha)) {
860 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
861 rval == QLA_SUCCESS; cnt--) {
862 if (cnt)
863 udelay(100);
864 else
865 rval = QLA_FUNCTION_TIMEOUT;
869 /* Get RISC SRAM. */
870 if (rval == QLA_SUCCESS)
871 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
872 ARRAY_SIZE(fw->risc_ram), &nxt);
874 /* Get stack SRAM. */
875 if (rval == QLA_SUCCESS)
876 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
877 ARRAY_SIZE(fw->stack_ram), &nxt);
879 /* Get data SRAM. */
880 if (rval == QLA_SUCCESS)
881 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
882 ha->fw_memory_size - 0x11000 + 1, &nxt);
884 if (rval == QLA_SUCCESS)
885 qla2xxx_copy_queues(ha, nxt);
887 qla2xxx_dump_post_process(base_vha, rval);
891 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
892 * @vha: HA context
894 void
895 qla2100_fw_dump(scsi_qla_host_t *vha)
897 int rval;
898 uint32_t cnt, timer;
899 uint16_t risc_address = 0;
900 uint16_t mb0 = 0, mb2 = 0;
901 struct qla_hw_data *ha = vha->hw;
902 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
903 __le16 __iomem *dmp_reg;
904 struct qla2100_fw_dump *fw;
905 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
907 lockdep_assert_held(&ha->hardware_lock);
909 if (!ha->fw_dump) {
910 ql_log(ql_log_warn, vha, 0xd004,
911 "No buffer available for dump.\n");
912 return;
915 if (ha->fw_dumped) {
916 ql_log(ql_log_warn, vha, 0xd005,
917 "Firmware has been previously dumped (%p) "
918 "-- ignoring request.\n",
919 ha->fw_dump);
920 return;
922 fw = &ha->fw_dump->isp.isp21;
923 qla2xxx_prep_dump(ha, ha->fw_dump);
925 rval = QLA_SUCCESS;
926 fw->hccr = htons(rd_reg_word(&reg->hccr));
928 /* Pause RISC. */
929 wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
930 for (cnt = 30000; (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
931 rval == QLA_SUCCESS; cnt--) {
932 if (cnt)
933 udelay(100);
934 else
935 rval = QLA_FUNCTION_TIMEOUT;
937 if (rval == QLA_SUCCESS) {
938 dmp_reg = &reg->flash_address;
939 for (cnt = 0; cnt < ARRAY_SIZE(fw->pbiu_reg); cnt++, dmp_reg++)
940 fw->pbiu_reg[cnt] = htons(rd_reg_word(dmp_reg));
942 dmp_reg = &reg->u.isp2100.mailbox0;
943 for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
944 if (cnt == 8)
945 dmp_reg = &reg->u_end.isp2200.mailbox8;
947 fw->mailbox_reg[cnt] = htons(rd_reg_word(dmp_reg));
950 dmp_reg = &reg->u.isp2100.unused_2[0];
951 for (cnt = 0; cnt < ARRAY_SIZE(fw->dma_reg); cnt++, dmp_reg++)
952 fw->dma_reg[cnt] = htons(rd_reg_word(dmp_reg));
954 wrt_reg_word(&reg->ctrl_status, 0x00);
955 dmp_reg = &reg->risc_hw;
956 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_hdw_reg); cnt++, dmp_reg++)
957 fw->risc_hdw_reg[cnt] = htons(rd_reg_word(dmp_reg));
959 wrt_reg_word(&reg->pcr, 0x2000);
960 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
962 wrt_reg_word(&reg->pcr, 0x2100);
963 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
965 wrt_reg_word(&reg->pcr, 0x2200);
966 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
968 wrt_reg_word(&reg->pcr, 0x2300);
969 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
971 wrt_reg_word(&reg->pcr, 0x2400);
972 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
974 wrt_reg_word(&reg->pcr, 0x2500);
975 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
977 wrt_reg_word(&reg->pcr, 0x2600);
978 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
980 wrt_reg_word(&reg->pcr, 0x2700);
981 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
983 wrt_reg_word(&reg->ctrl_status, 0x10);
984 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
986 wrt_reg_word(&reg->ctrl_status, 0x20);
987 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
989 wrt_reg_word(&reg->ctrl_status, 0x30);
990 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
992 /* Reset the ISP. */
993 wrt_reg_word(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
996 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
997 rval == QLA_SUCCESS; cnt--) {
998 if (cnt)
999 udelay(100);
1000 else
1001 rval = QLA_FUNCTION_TIMEOUT;
1004 /* Pause RISC. */
1005 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
1006 (rd_reg_word(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
1008 wrt_reg_word(&reg->hccr, HCCR_PAUSE_RISC);
1009 for (cnt = 30000;
1010 (rd_reg_word(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
1011 rval == QLA_SUCCESS; cnt--) {
1012 if (cnt)
1013 udelay(100);
1014 else
1015 rval = QLA_FUNCTION_TIMEOUT;
1017 if (rval == QLA_SUCCESS) {
1018 /* Set memory configuration and timing. */
1019 if (IS_QLA2100(ha))
1020 wrt_reg_word(&reg->mctr, 0xf1);
1021 else
1022 wrt_reg_word(&reg->mctr, 0xf2);
1023 rd_reg_word(&reg->mctr); /* PCI Posting. */
1025 /* Release RISC. */
1026 wrt_reg_word(&reg->hccr, HCCR_RELEASE_RISC);
1030 if (rval == QLA_SUCCESS) {
1031 /* Get RISC SRAM. */
1032 risc_address = 0x1000;
1033 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
1034 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1036 for (cnt = 0; cnt < ARRAY_SIZE(fw->risc_ram) && rval == QLA_SUCCESS;
1037 cnt++, risc_address++) {
1038 WRT_MAILBOX_REG(ha, reg, 1, risc_address);
1039 wrt_reg_word(&reg->hccr, HCCR_SET_HOST_INT);
1041 for (timer = 6000000; timer != 0; timer--) {
1042 /* Check for pending interrupts. */
1043 if (rd_reg_word(&reg->istatus) & ISR_RISC_INT) {
1044 if (rd_reg_word(&reg->semaphore) & BIT_0) {
1045 set_bit(MBX_INTERRUPT,
1046 &ha->mbx_cmd_flags);
1048 mb0 = RD_MAILBOX_REG(ha, reg, 0);
1049 mb2 = RD_MAILBOX_REG(ha, reg, 2);
1051 wrt_reg_word(&reg->semaphore, 0);
1052 wrt_reg_word(&reg->hccr,
1053 HCCR_CLR_RISC_INT);
1054 rd_reg_word(&reg->hccr);
1055 break;
1057 wrt_reg_word(&reg->hccr, HCCR_CLR_RISC_INT);
1058 rd_reg_word(&reg->hccr);
1060 udelay(5);
1063 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1064 rval = mb0 & MBS_MASK;
1065 fw->risc_ram[cnt] = htons(mb2);
1066 } else {
1067 rval = QLA_FUNCTION_FAILED;
1071 if (rval == QLA_SUCCESS)
1072 qla2xxx_copy_queues(ha, &fw->queue_dump[0]);
1074 qla2xxx_dump_post_process(base_vha, rval);
1077 void
1078 qla24xx_fw_dump(scsi_qla_host_t *vha)
1080 int rval;
1081 uint32_t cnt;
1082 struct qla_hw_data *ha = vha->hw;
1083 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1084 __le32 __iomem *dmp_reg;
1085 __be32 *iter_reg;
1086 __le16 __iomem *mbx_reg;
1087 struct qla24xx_fw_dump *fw;
1088 void *nxt;
1089 void *nxt_chain;
1090 __be32 *last_chain = NULL;
1091 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1093 lockdep_assert_held(&ha->hardware_lock);
1095 if (IS_P3P_TYPE(ha))
1096 return;
1098 ha->fw_dump_cap_flags = 0;
1100 if (!ha->fw_dump) {
1101 ql_log(ql_log_warn, vha, 0xd006,
1102 "No buffer available for dump.\n");
1103 return;
1106 if (ha->fw_dumped) {
1107 ql_log(ql_log_warn, vha, 0xd007,
1108 "Firmware has been previously dumped (%p) "
1109 "-- ignoring request.\n",
1110 ha->fw_dump);
1111 return;
1113 QLA_FW_STOPPED(ha);
1114 fw = &ha->fw_dump->isp.isp24;
1115 qla2xxx_prep_dump(ha, ha->fw_dump);
1117 fw->host_status = htonl(rd_reg_dword(&reg->host_status));
1120 * Pause RISC. No need to track timeout, as resetting the chip
1121 * is the right approach incase of pause timeout
1123 qla24xx_pause_risc(reg, ha);
1125 /* Host interface registers. */
1126 dmp_reg = &reg->flash_addr;
1127 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
1128 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
1130 /* Disable interrupts. */
1131 wrt_reg_dword(&reg->ictrl, 0);
1132 rd_reg_dword(&reg->ictrl);
1134 /* Shadow registers. */
1135 wrt_reg_dword(&reg->iobase_addr, 0x0F70);
1136 rd_reg_dword(&reg->iobase_addr);
1137 wrt_reg_dword(&reg->iobase_select, 0xB0000000);
1138 fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
1140 wrt_reg_dword(&reg->iobase_select, 0xB0100000);
1141 fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
1143 wrt_reg_dword(&reg->iobase_select, 0xB0200000);
1144 fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
1146 wrt_reg_dword(&reg->iobase_select, 0xB0300000);
1147 fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
1149 wrt_reg_dword(&reg->iobase_select, 0xB0400000);
1150 fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
1152 wrt_reg_dword(&reg->iobase_select, 0xB0500000);
1153 fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
1155 wrt_reg_dword(&reg->iobase_select, 0xB0600000);
1156 fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
1158 /* Mailbox registers. */
1159 mbx_reg = &reg->mailbox0;
1160 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
1161 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
1163 /* Transfer sequence registers. */
1164 iter_reg = fw->xseq_gp_reg;
1165 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1166 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1167 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1168 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1169 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1170 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1171 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1172 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1174 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1175 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1177 /* Receive sequence registers. */
1178 iter_reg = fw->rseq_gp_reg;
1179 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1180 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1181 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1182 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1183 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1184 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1185 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1186 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1188 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1189 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1190 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1192 /* Command DMA registers. */
1193 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1195 /* Queues. */
1196 iter_reg = fw->req0_dma_reg;
1197 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1198 dmp_reg = &reg->iobase_q;
1199 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1200 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1202 iter_reg = fw->resp0_dma_reg;
1203 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1204 dmp_reg = &reg->iobase_q;
1205 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1206 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1208 iter_reg = fw->req1_dma_reg;
1209 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1210 dmp_reg = &reg->iobase_q;
1211 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1212 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1214 /* Transmit DMA registers. */
1215 iter_reg = fw->xmt0_dma_reg;
1216 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1217 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1219 iter_reg = fw->xmt1_dma_reg;
1220 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1221 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1223 iter_reg = fw->xmt2_dma_reg;
1224 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1225 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1227 iter_reg = fw->xmt3_dma_reg;
1228 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1229 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1231 iter_reg = fw->xmt4_dma_reg;
1232 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1233 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1235 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1237 /* Receive DMA registers. */
1238 iter_reg = fw->rcvt0_data_dma_reg;
1239 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1240 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1242 iter_reg = fw->rcvt1_data_dma_reg;
1243 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1244 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1246 /* RISC registers. */
1247 iter_reg = fw->risc_gp_reg;
1248 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1249 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1250 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1251 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1252 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1253 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1254 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1255 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1257 /* Local memory controller registers. */
1258 iter_reg = fw->lmc_reg;
1259 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1260 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1261 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1262 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1263 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1264 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1265 qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1267 /* Fibre Protocol Module registers. */
1268 iter_reg = fw->fpm_hdw_reg;
1269 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1270 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1271 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1272 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1273 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1274 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1275 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1276 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1277 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1278 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1279 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1280 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1282 /* Frame Buffer registers. */
1283 iter_reg = fw->fb_hdw_reg;
1284 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1285 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1286 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1287 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1288 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1289 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1290 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1291 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1292 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1293 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1294 qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1296 rval = qla24xx_soft_reset(ha);
1297 if (rval != QLA_SUCCESS)
1298 goto qla24xx_fw_dump_failed_0;
1300 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1301 &nxt);
1302 if (rval != QLA_SUCCESS)
1303 goto qla24xx_fw_dump_failed_0;
1305 nxt = qla2xxx_copy_queues(ha, nxt);
1307 qla24xx_copy_eft(ha, nxt);
1309 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1310 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1311 if (last_chain) {
1312 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1313 *last_chain |= htonl(DUMP_CHAIN_LAST);
1316 /* Adjust valid length. */
1317 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1319 qla24xx_fw_dump_failed_0:
1320 qla2xxx_dump_post_process(base_vha, rval);
1323 void
1324 qla25xx_fw_dump(scsi_qla_host_t *vha)
1326 int rval;
1327 uint32_t cnt;
1328 struct qla_hw_data *ha = vha->hw;
1329 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1330 __le32 __iomem *dmp_reg;
1331 __be32 *iter_reg;
1332 __le16 __iomem *mbx_reg;
1333 struct qla25xx_fw_dump *fw;
1334 void *nxt, *nxt_chain;
1335 __be32 *last_chain = NULL;
1336 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1338 lockdep_assert_held(&ha->hardware_lock);
1340 ha->fw_dump_cap_flags = 0;
1342 if (!ha->fw_dump) {
1343 ql_log(ql_log_warn, vha, 0xd008,
1344 "No buffer available for dump.\n");
1345 return;
1348 if (ha->fw_dumped) {
1349 ql_log(ql_log_warn, vha, 0xd009,
1350 "Firmware has been previously dumped (%p) "
1351 "-- ignoring request.\n",
1352 ha->fw_dump);
1353 return;
1355 QLA_FW_STOPPED(ha);
1356 fw = &ha->fw_dump->isp.isp25;
1357 qla2xxx_prep_dump(ha, ha->fw_dump);
1358 ha->fw_dump->version = htonl(2);
1360 fw->host_status = htonl(rd_reg_dword(&reg->host_status));
1363 * Pause RISC. No need to track timeout, as resetting the chip
1364 * is the right approach incase of pause timeout
1366 qla24xx_pause_risc(reg, ha);
1368 /* Host/Risc registers. */
1369 iter_reg = fw->host_risc_reg;
1370 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1371 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1373 /* PCIe registers. */
1374 wrt_reg_dword(&reg->iobase_addr, 0x7C00);
1375 rd_reg_dword(&reg->iobase_addr);
1376 wrt_reg_dword(&reg->iobase_window, 0x01);
1377 dmp_reg = &reg->iobase_c4;
1378 fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
1379 dmp_reg++;
1380 fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
1381 dmp_reg++;
1382 fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
1383 fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
1385 wrt_reg_dword(&reg->iobase_window, 0x00);
1386 rd_reg_dword(&reg->iobase_window);
1388 /* Host interface registers. */
1389 dmp_reg = &reg->flash_addr;
1390 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
1391 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
1393 /* Disable interrupts. */
1394 wrt_reg_dword(&reg->ictrl, 0);
1395 rd_reg_dword(&reg->ictrl);
1397 /* Shadow registers. */
1398 wrt_reg_dword(&reg->iobase_addr, 0x0F70);
1399 rd_reg_dword(&reg->iobase_addr);
1400 wrt_reg_dword(&reg->iobase_select, 0xB0000000);
1401 fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
1403 wrt_reg_dword(&reg->iobase_select, 0xB0100000);
1404 fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
1406 wrt_reg_dword(&reg->iobase_select, 0xB0200000);
1407 fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
1409 wrt_reg_dword(&reg->iobase_select, 0xB0300000);
1410 fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
1412 wrt_reg_dword(&reg->iobase_select, 0xB0400000);
1413 fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
1415 wrt_reg_dword(&reg->iobase_select, 0xB0500000);
1416 fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
1418 wrt_reg_dword(&reg->iobase_select, 0xB0600000);
1419 fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
1421 wrt_reg_dword(&reg->iobase_select, 0xB0700000);
1422 fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
1424 wrt_reg_dword(&reg->iobase_select, 0xB0800000);
1425 fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
1427 wrt_reg_dword(&reg->iobase_select, 0xB0900000);
1428 fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
1430 wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
1431 fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
1433 /* RISC I/O register. */
1434 wrt_reg_dword(&reg->iobase_addr, 0x0010);
1435 fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
1437 /* Mailbox registers. */
1438 mbx_reg = &reg->mailbox0;
1439 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
1440 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
1442 /* Transfer sequence registers. */
1443 iter_reg = fw->xseq_gp_reg;
1444 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1445 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1446 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1447 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1448 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1449 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1450 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1451 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1453 iter_reg = fw->xseq_0_reg;
1454 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1455 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1456 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1458 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1460 /* Receive sequence registers. */
1461 iter_reg = fw->rseq_gp_reg;
1462 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1463 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1464 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1465 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1466 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1467 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1468 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1469 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1471 iter_reg = fw->rseq_0_reg;
1472 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1473 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1475 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1476 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1478 /* Auxiliary sequence registers. */
1479 iter_reg = fw->aseq_gp_reg;
1480 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1481 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1482 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1483 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1484 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1485 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1486 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1487 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1489 iter_reg = fw->aseq_0_reg;
1490 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1491 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1493 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1494 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1496 /* Command DMA registers. */
1497 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1499 /* Queues. */
1500 iter_reg = fw->req0_dma_reg;
1501 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1502 dmp_reg = &reg->iobase_q;
1503 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1504 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1506 iter_reg = fw->resp0_dma_reg;
1507 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1508 dmp_reg = &reg->iobase_q;
1509 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1510 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1512 iter_reg = fw->req1_dma_reg;
1513 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1514 dmp_reg = &reg->iobase_q;
1515 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1516 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1518 /* Transmit DMA registers. */
1519 iter_reg = fw->xmt0_dma_reg;
1520 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1521 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1523 iter_reg = fw->xmt1_dma_reg;
1524 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1525 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1527 iter_reg = fw->xmt2_dma_reg;
1528 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1529 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1531 iter_reg = fw->xmt3_dma_reg;
1532 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1533 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1535 iter_reg = fw->xmt4_dma_reg;
1536 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1537 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1539 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1541 /* Receive DMA registers. */
1542 iter_reg = fw->rcvt0_data_dma_reg;
1543 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1544 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1546 iter_reg = fw->rcvt1_data_dma_reg;
1547 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1548 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1550 /* RISC registers. */
1551 iter_reg = fw->risc_gp_reg;
1552 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1553 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1554 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1555 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1556 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1557 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1558 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1559 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1561 /* Local memory controller registers. */
1562 iter_reg = fw->lmc_reg;
1563 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1564 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1565 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1566 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1567 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1568 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1569 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1570 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1572 /* Fibre Protocol Module registers. */
1573 iter_reg = fw->fpm_hdw_reg;
1574 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1575 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1576 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1577 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1578 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1579 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1580 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1581 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1582 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1583 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1584 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1585 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1587 /* Frame Buffer registers. */
1588 iter_reg = fw->fb_hdw_reg;
1589 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1590 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1591 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1592 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1593 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1594 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1595 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1596 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1597 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1598 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1599 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1600 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1602 /* Multi queue registers */
1603 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1604 &last_chain);
1606 rval = qla24xx_soft_reset(ha);
1607 if (rval != QLA_SUCCESS)
1608 goto qla25xx_fw_dump_failed_0;
1610 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1611 &nxt);
1612 if (rval != QLA_SUCCESS)
1613 goto qla25xx_fw_dump_failed_0;
1615 nxt = qla2xxx_copy_queues(ha, nxt);
1617 qla24xx_copy_eft(ha, nxt);
1619 /* Chain entries -- started with MQ. */
1620 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1621 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1622 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1623 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1624 if (last_chain) {
1625 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1626 *last_chain |= htonl(DUMP_CHAIN_LAST);
1629 /* Adjust valid length. */
1630 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1632 qla25xx_fw_dump_failed_0:
1633 qla2xxx_dump_post_process(base_vha, rval);
1636 void
1637 qla81xx_fw_dump(scsi_qla_host_t *vha)
1639 int rval;
1640 uint32_t cnt;
1641 struct qla_hw_data *ha = vha->hw;
1642 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1643 __le32 __iomem *dmp_reg;
1644 __be32 *iter_reg;
1645 __le16 __iomem *mbx_reg;
1646 struct qla81xx_fw_dump *fw;
1647 void *nxt, *nxt_chain;
1648 __be32 *last_chain = NULL;
1649 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1651 lockdep_assert_held(&ha->hardware_lock);
1653 ha->fw_dump_cap_flags = 0;
1655 if (!ha->fw_dump) {
1656 ql_log(ql_log_warn, vha, 0xd00a,
1657 "No buffer available for dump.\n");
1658 return;
1661 if (ha->fw_dumped) {
1662 ql_log(ql_log_warn, vha, 0xd00b,
1663 "Firmware has been previously dumped (%p) "
1664 "-- ignoring request.\n",
1665 ha->fw_dump);
1666 return;
1668 fw = &ha->fw_dump->isp.isp81;
1669 qla2xxx_prep_dump(ha, ha->fw_dump);
1671 fw->host_status = htonl(rd_reg_dword(&reg->host_status));
1674 * Pause RISC. No need to track timeout, as resetting the chip
1675 * is the right approach incase of pause timeout
1677 qla24xx_pause_risc(reg, ha);
1679 /* Host/Risc registers. */
1680 iter_reg = fw->host_risc_reg;
1681 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1682 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1684 /* PCIe registers. */
1685 wrt_reg_dword(&reg->iobase_addr, 0x7C00);
1686 rd_reg_dword(&reg->iobase_addr);
1687 wrt_reg_dword(&reg->iobase_window, 0x01);
1688 dmp_reg = &reg->iobase_c4;
1689 fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
1690 dmp_reg++;
1691 fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
1692 dmp_reg++;
1693 fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
1694 fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
1696 wrt_reg_dword(&reg->iobase_window, 0x00);
1697 rd_reg_dword(&reg->iobase_window);
1699 /* Host interface registers. */
1700 dmp_reg = &reg->flash_addr;
1701 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
1702 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
1704 /* Disable interrupts. */
1705 wrt_reg_dword(&reg->ictrl, 0);
1706 rd_reg_dword(&reg->ictrl);
1708 /* Shadow registers. */
1709 wrt_reg_dword(&reg->iobase_addr, 0x0F70);
1710 rd_reg_dword(&reg->iobase_addr);
1711 wrt_reg_dword(&reg->iobase_select, 0xB0000000);
1712 fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
1714 wrt_reg_dword(&reg->iobase_select, 0xB0100000);
1715 fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
1717 wrt_reg_dword(&reg->iobase_select, 0xB0200000);
1718 fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
1720 wrt_reg_dword(&reg->iobase_select, 0xB0300000);
1721 fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
1723 wrt_reg_dword(&reg->iobase_select, 0xB0400000);
1724 fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
1726 wrt_reg_dword(&reg->iobase_select, 0xB0500000);
1727 fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
1729 wrt_reg_dword(&reg->iobase_select, 0xB0600000);
1730 fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
1732 wrt_reg_dword(&reg->iobase_select, 0xB0700000);
1733 fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
1735 wrt_reg_dword(&reg->iobase_select, 0xB0800000);
1736 fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
1738 wrt_reg_dword(&reg->iobase_select, 0xB0900000);
1739 fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
1741 wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
1742 fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
1744 /* RISC I/O register. */
1745 wrt_reg_dword(&reg->iobase_addr, 0x0010);
1746 fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
1748 /* Mailbox registers. */
1749 mbx_reg = &reg->mailbox0;
1750 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
1751 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
1753 /* Transfer sequence registers. */
1754 iter_reg = fw->xseq_gp_reg;
1755 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1756 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1757 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1758 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1759 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1760 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1761 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1762 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1764 iter_reg = fw->xseq_0_reg;
1765 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1766 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1767 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1769 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1771 /* Receive sequence registers. */
1772 iter_reg = fw->rseq_gp_reg;
1773 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1774 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1775 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1776 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1777 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1778 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1779 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1780 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1782 iter_reg = fw->rseq_0_reg;
1783 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1784 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1786 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1787 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1789 /* Auxiliary sequence registers. */
1790 iter_reg = fw->aseq_gp_reg;
1791 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1792 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1793 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1794 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1795 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1796 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1797 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1798 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1800 iter_reg = fw->aseq_0_reg;
1801 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1802 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1804 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1805 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1807 /* Command DMA registers. */
1808 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1810 /* Queues. */
1811 iter_reg = fw->req0_dma_reg;
1812 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1813 dmp_reg = &reg->iobase_q;
1814 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1815 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1817 iter_reg = fw->resp0_dma_reg;
1818 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1819 dmp_reg = &reg->iobase_q;
1820 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1821 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1823 iter_reg = fw->req1_dma_reg;
1824 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1825 dmp_reg = &reg->iobase_q;
1826 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1827 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
1829 /* Transmit DMA registers. */
1830 iter_reg = fw->xmt0_dma_reg;
1831 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1832 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1834 iter_reg = fw->xmt1_dma_reg;
1835 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1836 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1838 iter_reg = fw->xmt2_dma_reg;
1839 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1840 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1842 iter_reg = fw->xmt3_dma_reg;
1843 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1844 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1846 iter_reg = fw->xmt4_dma_reg;
1847 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1848 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1850 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1852 /* Receive DMA registers. */
1853 iter_reg = fw->rcvt0_data_dma_reg;
1854 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1855 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1857 iter_reg = fw->rcvt1_data_dma_reg;
1858 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1859 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1861 /* RISC registers. */
1862 iter_reg = fw->risc_gp_reg;
1863 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1864 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1865 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1866 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1867 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1868 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1869 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1870 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1872 /* Local memory controller registers. */
1873 iter_reg = fw->lmc_reg;
1874 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1875 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1876 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1877 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1878 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1879 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1880 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1881 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1883 /* Fibre Protocol Module registers. */
1884 iter_reg = fw->fpm_hdw_reg;
1885 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1886 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1887 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1888 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1889 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1890 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1891 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1892 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1893 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1894 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1895 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1896 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1897 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1898 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1900 /* Frame Buffer registers. */
1901 iter_reg = fw->fb_hdw_reg;
1902 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1903 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1904 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1905 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1906 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1907 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1908 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1909 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1910 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1911 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1912 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1913 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1914 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1916 /* Multi queue registers */
1917 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1918 &last_chain);
1920 rval = qla24xx_soft_reset(ha);
1921 if (rval != QLA_SUCCESS)
1922 goto qla81xx_fw_dump_failed_0;
1924 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1925 &nxt);
1926 if (rval != QLA_SUCCESS)
1927 goto qla81xx_fw_dump_failed_0;
1929 nxt = qla2xxx_copy_queues(ha, nxt);
1931 qla24xx_copy_eft(ha, nxt);
1933 /* Chain entries -- started with MQ. */
1934 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1935 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1936 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1937 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1938 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
1939 if (last_chain) {
1940 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1941 *last_chain |= htonl(DUMP_CHAIN_LAST);
1944 /* Adjust valid length. */
1945 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1947 qla81xx_fw_dump_failed_0:
1948 qla2xxx_dump_post_process(base_vha, rval);
1951 void
1952 qla83xx_fw_dump(scsi_qla_host_t *vha)
1954 int rval;
1955 uint32_t cnt;
1956 struct qla_hw_data *ha = vha->hw;
1957 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1958 __le32 __iomem *dmp_reg;
1959 __be32 *iter_reg;
1960 __le16 __iomem *mbx_reg;
1961 struct qla83xx_fw_dump *fw;
1962 void *nxt, *nxt_chain;
1963 __be32 *last_chain = NULL;
1964 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1966 lockdep_assert_held(&ha->hardware_lock);
1968 ha->fw_dump_cap_flags = 0;
1970 if (!ha->fw_dump) {
1971 ql_log(ql_log_warn, vha, 0xd00c,
1972 "No buffer available for dump!!!\n");
1973 return;
1976 if (ha->fw_dumped) {
1977 ql_log(ql_log_warn, vha, 0xd00d,
1978 "Firmware has been previously dumped (%p) -- ignoring "
1979 "request...\n", ha->fw_dump);
1980 return;
1982 QLA_FW_STOPPED(ha);
1983 fw = &ha->fw_dump->isp.isp83;
1984 qla2xxx_prep_dump(ha, ha->fw_dump);
1986 fw->host_status = htonl(rd_reg_dword(&reg->host_status));
1989 * Pause RISC. No need to track timeout, as resetting the chip
1990 * is the right approach incase of pause timeout
1992 qla24xx_pause_risc(reg, ha);
1994 wrt_reg_dword(&reg->iobase_addr, 0x6000);
1995 dmp_reg = &reg->iobase_window;
1996 rd_reg_dword(dmp_reg);
1997 wrt_reg_dword(dmp_reg, 0);
1999 dmp_reg = &reg->unused_4_1[0];
2000 rd_reg_dword(dmp_reg);
2001 wrt_reg_dword(dmp_reg, 0);
2003 wrt_reg_dword(&reg->iobase_addr, 0x6010);
2004 dmp_reg = &reg->unused_4_1[2];
2005 rd_reg_dword(dmp_reg);
2006 wrt_reg_dword(dmp_reg, 0);
2008 /* select PCR and disable ecc checking and correction */
2009 wrt_reg_dword(&reg->iobase_addr, 0x0F70);
2010 rd_reg_dword(&reg->iobase_addr);
2011 wrt_reg_dword(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
2013 /* Host/Risc registers. */
2014 iter_reg = fw->host_risc_reg;
2015 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2016 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2017 qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2019 /* PCIe registers. */
2020 wrt_reg_dword(&reg->iobase_addr, 0x7C00);
2021 rd_reg_dword(&reg->iobase_addr);
2022 wrt_reg_dword(&reg->iobase_window, 0x01);
2023 dmp_reg = &reg->iobase_c4;
2024 fw->pcie_regs[0] = htonl(rd_reg_dword(dmp_reg));
2025 dmp_reg++;
2026 fw->pcie_regs[1] = htonl(rd_reg_dword(dmp_reg));
2027 dmp_reg++;
2028 fw->pcie_regs[2] = htonl(rd_reg_dword(dmp_reg));
2029 fw->pcie_regs[3] = htonl(rd_reg_dword(&reg->iobase_window));
2031 wrt_reg_dword(&reg->iobase_window, 0x00);
2032 rd_reg_dword(&reg->iobase_window);
2034 /* Host interface registers. */
2035 dmp_reg = &reg->flash_addr;
2036 for (cnt = 0; cnt < ARRAY_SIZE(fw->host_reg); cnt++, dmp_reg++)
2037 fw->host_reg[cnt] = htonl(rd_reg_dword(dmp_reg));
2039 /* Disable interrupts. */
2040 wrt_reg_dword(&reg->ictrl, 0);
2041 rd_reg_dword(&reg->ictrl);
2043 /* Shadow registers. */
2044 wrt_reg_dword(&reg->iobase_addr, 0x0F70);
2045 rd_reg_dword(&reg->iobase_addr);
2046 wrt_reg_dword(&reg->iobase_select, 0xB0000000);
2047 fw->shadow_reg[0] = htonl(rd_reg_dword(&reg->iobase_sdata));
2049 wrt_reg_dword(&reg->iobase_select, 0xB0100000);
2050 fw->shadow_reg[1] = htonl(rd_reg_dword(&reg->iobase_sdata));
2052 wrt_reg_dword(&reg->iobase_select, 0xB0200000);
2053 fw->shadow_reg[2] = htonl(rd_reg_dword(&reg->iobase_sdata));
2055 wrt_reg_dword(&reg->iobase_select, 0xB0300000);
2056 fw->shadow_reg[3] = htonl(rd_reg_dword(&reg->iobase_sdata));
2058 wrt_reg_dword(&reg->iobase_select, 0xB0400000);
2059 fw->shadow_reg[4] = htonl(rd_reg_dword(&reg->iobase_sdata));
2061 wrt_reg_dword(&reg->iobase_select, 0xB0500000);
2062 fw->shadow_reg[5] = htonl(rd_reg_dword(&reg->iobase_sdata));
2064 wrt_reg_dword(&reg->iobase_select, 0xB0600000);
2065 fw->shadow_reg[6] = htonl(rd_reg_dword(&reg->iobase_sdata));
2067 wrt_reg_dword(&reg->iobase_select, 0xB0700000);
2068 fw->shadow_reg[7] = htonl(rd_reg_dword(&reg->iobase_sdata));
2070 wrt_reg_dword(&reg->iobase_select, 0xB0800000);
2071 fw->shadow_reg[8] = htonl(rd_reg_dword(&reg->iobase_sdata));
2073 wrt_reg_dword(&reg->iobase_select, 0xB0900000);
2074 fw->shadow_reg[9] = htonl(rd_reg_dword(&reg->iobase_sdata));
2076 wrt_reg_dword(&reg->iobase_select, 0xB0A00000);
2077 fw->shadow_reg[10] = htonl(rd_reg_dword(&reg->iobase_sdata));
2079 /* RISC I/O register. */
2080 wrt_reg_dword(&reg->iobase_addr, 0x0010);
2081 fw->risc_io_reg = htonl(rd_reg_dword(&reg->iobase_window));
2083 /* Mailbox registers. */
2084 mbx_reg = &reg->mailbox0;
2085 for (cnt = 0; cnt < ARRAY_SIZE(fw->mailbox_reg); cnt++, mbx_reg++)
2086 fw->mailbox_reg[cnt] = htons(rd_reg_word(mbx_reg));
2088 /* Transfer sequence registers. */
2089 iter_reg = fw->xseq_gp_reg;
2090 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2091 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2092 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2093 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2094 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2095 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2096 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2097 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2098 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2099 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2100 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2101 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2102 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2103 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2104 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2105 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2107 iter_reg = fw->xseq_0_reg;
2108 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2109 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2110 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2112 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2114 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2116 /* Receive sequence registers. */
2117 iter_reg = fw->rseq_gp_reg;
2118 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2119 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2120 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2121 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2122 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2123 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2124 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2125 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2126 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2127 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2128 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2129 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2130 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2131 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2132 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2133 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2135 iter_reg = fw->rseq_0_reg;
2136 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2137 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2139 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2140 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2141 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2143 /* Auxiliary sequence registers. */
2144 iter_reg = fw->aseq_gp_reg;
2145 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2146 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2147 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2148 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2149 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2150 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2151 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2152 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2153 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2154 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2155 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2156 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2157 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2158 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2159 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2160 qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2162 iter_reg = fw->aseq_0_reg;
2163 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2164 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2166 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2167 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2168 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2170 /* Command DMA registers. */
2171 iter_reg = fw->cmd_dma_reg;
2172 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2173 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2174 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2175 qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2177 /* Queues. */
2178 iter_reg = fw->req0_dma_reg;
2179 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2180 dmp_reg = &reg->iobase_q;
2181 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2182 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
2184 iter_reg = fw->resp0_dma_reg;
2185 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2186 dmp_reg = &reg->iobase_q;
2187 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2188 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
2190 iter_reg = fw->req1_dma_reg;
2191 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2192 dmp_reg = &reg->iobase_q;
2193 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2194 *iter_reg++ = htonl(rd_reg_dword(dmp_reg));
2196 /* Transmit DMA registers. */
2197 iter_reg = fw->xmt0_dma_reg;
2198 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2199 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2201 iter_reg = fw->xmt1_dma_reg;
2202 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2203 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2205 iter_reg = fw->xmt2_dma_reg;
2206 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2207 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2209 iter_reg = fw->xmt3_dma_reg;
2210 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2211 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2213 iter_reg = fw->xmt4_dma_reg;
2214 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2215 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2217 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2219 /* Receive DMA registers. */
2220 iter_reg = fw->rcvt0_data_dma_reg;
2221 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2222 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2224 iter_reg = fw->rcvt1_data_dma_reg;
2225 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2226 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2228 /* RISC registers. */
2229 iter_reg = fw->risc_gp_reg;
2230 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2231 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2232 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2233 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2234 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2235 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2236 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2237 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2239 /* Local memory controller registers. */
2240 iter_reg = fw->lmc_reg;
2241 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2242 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2243 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2244 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2245 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2246 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2247 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2248 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2250 /* Fibre Protocol Module registers. */
2251 iter_reg = fw->fpm_hdw_reg;
2252 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2253 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2254 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2255 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2256 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2257 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2258 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2259 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2260 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2261 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2262 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2263 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2264 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2265 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2266 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2267 qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2269 /* RQ0 Array registers. */
2270 iter_reg = fw->rq0_array_reg;
2271 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2272 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2273 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2274 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2275 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2276 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2277 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2278 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2279 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2280 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2281 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2282 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2283 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2284 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2285 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2286 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2288 /* RQ1 Array registers. */
2289 iter_reg = fw->rq1_array_reg;
2290 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2291 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2292 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2293 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2294 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2295 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2296 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2297 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2298 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2299 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2300 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2301 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2302 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2303 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2304 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2305 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2307 /* RP0 Array registers. */
2308 iter_reg = fw->rp0_array_reg;
2309 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2310 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2311 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2312 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2313 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2314 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2315 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2316 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2317 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2318 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2319 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2320 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2321 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2322 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2323 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2324 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2326 /* RP1 Array registers. */
2327 iter_reg = fw->rp1_array_reg;
2328 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2329 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2330 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2331 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2332 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2333 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2334 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2335 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2336 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2337 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2338 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2339 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2340 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2341 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2342 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2343 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2345 iter_reg = fw->at0_array_reg;
2346 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2347 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2348 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2349 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2350 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2351 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2352 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2353 qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2355 /* I/O Queue Control registers. */
2356 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2358 /* Frame Buffer registers. */
2359 iter_reg = fw->fb_hdw_reg;
2360 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2361 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2362 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2363 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2364 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2365 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2366 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2367 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2368 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2369 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2370 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2371 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2372 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2373 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2374 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2375 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2376 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2377 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2378 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2379 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2380 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2381 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2382 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2383 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2384 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2385 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2386 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2388 /* Multi queue registers */
2389 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2390 &last_chain);
2392 rval = qla24xx_soft_reset(ha);
2393 if (rval != QLA_SUCCESS) {
2394 ql_log(ql_log_warn, vha, 0xd00e,
2395 "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2396 rval = QLA_SUCCESS;
2398 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2400 wrt_reg_dword(&reg->hccr, HCCRX_SET_RISC_RESET);
2401 rd_reg_dword(&reg->hccr);
2403 wrt_reg_dword(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2404 rd_reg_dword(&reg->hccr);
2406 wrt_reg_dword(&reg->hccr, HCCRX_CLR_RISC_RESET);
2407 rd_reg_dword(&reg->hccr);
2409 for (cnt = 30000; cnt && (rd_reg_word(&reg->mailbox0)); cnt--)
2410 udelay(5);
2412 if (!cnt) {
2413 nxt = fw->code_ram;
2414 nxt += sizeof(fw->code_ram);
2415 nxt += (ha->fw_memory_size - 0x100000 + 1);
2416 goto copy_queue;
2417 } else {
2418 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2419 ql_log(ql_log_warn, vha, 0xd010,
2420 "bigger hammer success?\n");
2424 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2425 &nxt);
2426 if (rval != QLA_SUCCESS)
2427 goto qla83xx_fw_dump_failed_0;
2429 copy_queue:
2430 nxt = qla2xxx_copy_queues(ha, nxt);
2432 qla24xx_copy_eft(ha, nxt);
2434 /* Chain entries -- started with MQ. */
2435 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2436 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2437 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2438 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
2439 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
2440 if (last_chain) {
2441 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2442 *last_chain |= htonl(DUMP_CHAIN_LAST);
2445 /* Adjust valid length. */
2446 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2448 qla83xx_fw_dump_failed_0:
2449 qla2xxx_dump_post_process(base_vha, rval);
2452 /****************************************************************************/
2453 /* Driver Debug Functions. */
2454 /****************************************************************************/
2456 /* Write the debug message prefix into @pbuf. */
2457 static void ql_dbg_prefix(char *pbuf, int pbuf_size, struct pci_dev *pdev,
2458 const scsi_qla_host_t *vha, uint msg_id)
2460 if (vha) {
2461 const struct pci_dev *pdev = vha->hw->pdev;
2463 /* <module-name> [<dev-name>]-<msg-id>:<host>: */
2464 snprintf(pbuf, pbuf_size, "%s [%s]-%04x:%lu: ", QL_MSGHDR,
2465 dev_name(&(pdev->dev)), msg_id, vha->host_no);
2466 } else if (pdev) {
2467 snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
2468 dev_name(&pdev->dev), msg_id);
2469 } else {
2470 /* <module-name> [<dev-name>]-<msg-id>: : */
2471 snprintf(pbuf, pbuf_size, "%s [%s]-%04x: : ", QL_MSGHDR,
2472 "0000:00:00.0", msg_id);
2477 * This function is for formatting and logging debug information.
2478 * It is to be used when vha is available. It formats the message
2479 * and logs it to the messages file.
2480 * parameters:
2481 * level: The level of the debug messages to be printed.
2482 * If ql2xextended_error_logging value is correctly set,
2483 * this message will appear in the messages file.
2484 * vha: Pointer to the scsi_qla_host_t.
2485 * id: This is a unique identifier for the level. It identifies the
2486 * part of the code from where the message originated.
2487 * msg: The message to be displayed.
2489 void
2490 ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
2492 va_list va;
2493 struct va_format vaf;
2494 char pbuf[64];
2496 ql_ktrace(1, level, pbuf, NULL, vha, id, fmt);
2498 if (!ql_mask_match(level))
2499 return;
2501 if (!pbuf[0]) /* set by ql_ktrace */
2502 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id);
2504 va_start(va, fmt);
2506 vaf.fmt = fmt;
2507 vaf.va = &va;
2509 pr_warn("%s%pV", pbuf, &vaf);
2511 va_end(va);
2516 * This function is for formatting and logging debug information.
2517 * It is to be used when vha is not available and pci is available,
2518 * i.e., before host allocation. It formats the message and logs it
2519 * to the messages file.
2520 * parameters:
2521 * level: The level of the debug messages to be printed.
2522 * If ql2xextended_error_logging value is correctly set,
2523 * this message will appear in the messages file.
2524 * pdev: Pointer to the struct pci_dev.
2525 * id: This is a unique id for the level. It identifies the part
2526 * of the code from where the message originated.
2527 * msg: The message to be displayed.
2529 void
2530 ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
2532 va_list va;
2533 struct va_format vaf;
2534 char pbuf[128];
2536 if (pdev == NULL)
2537 return;
2539 ql_ktrace(1, level, pbuf, pdev, NULL, id, fmt);
2541 if (!ql_mask_match(level))
2542 return;
2544 va_start(va, fmt);
2546 vaf.fmt = fmt;
2547 vaf.va = &va;
2549 if (!pbuf[0]) /* set by ql_ktrace */
2550 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL,
2551 id + ql_dbg_offset);
2552 pr_warn("%s%pV", pbuf, &vaf);
2554 va_end(va);
2558 * This function is for formatting and logging log messages.
2559 * It is to be used when vha is available. It formats the message
2560 * and logs it to the messages file. All the messages will be logged
2561 * irrespective of value of ql2xextended_error_logging.
2562 * parameters:
2563 * level: The level of the log messages to be printed in the
2564 * messages file.
2565 * vha: Pointer to the scsi_qla_host_t
2566 * id: This is a unique id for the level. It identifies the
2567 * part of the code from where the message originated.
2568 * msg: The message to be displayed.
2570 void
2571 ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
2573 va_list va;
2574 struct va_format vaf;
2575 char pbuf[128];
2577 if (level > ql_errlev)
2578 return;
2580 ql_ktrace(0, level, pbuf, NULL, vha, id, fmt);
2582 if (!pbuf[0]) /* set by ql_ktrace */
2583 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL, vha, id);
2585 va_start(va, fmt);
2587 vaf.fmt = fmt;
2588 vaf.va = &va;
2590 switch (level) {
2591 case ql_log_fatal: /* FATAL LOG */
2592 pr_crit("%s%pV", pbuf, &vaf);
2593 break;
2594 case ql_log_warn:
2595 pr_err("%s%pV", pbuf, &vaf);
2596 break;
2597 case ql_log_info:
2598 pr_warn("%s%pV", pbuf, &vaf);
2599 break;
2600 default:
2601 pr_info("%s%pV", pbuf, &vaf);
2602 break;
2605 va_end(va);
2609 * This function is for formatting and logging log messages.
2610 * It is to be used when vha is not available and pci is available,
2611 * i.e., before host allocation. It formats the message and logs
2612 * it to the messages file. All the messages are logged irrespective
2613 * of the value of ql2xextended_error_logging.
2614 * parameters:
2615 * level: The level of the log messages to be printed in the
2616 * messages file.
2617 * pdev: Pointer to the struct pci_dev.
2618 * id: This is a unique id for the level. It identifies the
2619 * part of the code from where the message originated.
2620 * msg: The message to be displayed.
2622 void
2623 ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
2625 va_list va;
2626 struct va_format vaf;
2627 char pbuf[128];
2629 if (pdev == NULL)
2630 return;
2631 if (level > ql_errlev)
2632 return;
2634 ql_ktrace(0, level, pbuf, pdev, NULL, id, fmt);
2636 if (!pbuf[0]) /* set by ql_ktrace */
2637 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), pdev, NULL, id);
2639 va_start(va, fmt);
2641 vaf.fmt = fmt;
2642 vaf.va = &va;
2644 switch (level) {
2645 case ql_log_fatal: /* FATAL LOG */
2646 pr_crit("%s%pV", pbuf, &vaf);
2647 break;
2648 case ql_log_warn:
2649 pr_err("%s%pV", pbuf, &vaf);
2650 break;
2651 case ql_log_info:
2652 pr_warn("%s%pV", pbuf, &vaf);
2653 break;
2654 default:
2655 pr_info("%s%pV", pbuf, &vaf);
2656 break;
2659 va_end(va);
2662 void
2663 ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
2665 int i;
2666 struct qla_hw_data *ha = vha->hw;
2667 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2668 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2669 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2670 __le16 __iomem *mbx_reg;
2672 if (!ql_mask_match(level))
2673 return;
2675 if (IS_P3P_TYPE(ha))
2676 mbx_reg = &reg82->mailbox_in[0];
2677 else if (IS_FWI2_CAPABLE(ha))
2678 mbx_reg = &reg24->mailbox0;
2679 else
2680 mbx_reg = MAILBOX_REG(ha, reg, 0);
2682 ql_dbg(level, vha, id, "Mailbox registers:\n");
2683 for (i = 0; i < 6; i++, mbx_reg++)
2684 ql_dbg(level, vha, id,
2685 "mbox[%d] %#04x\n", i, rd_reg_word(mbx_reg));
2688 void
2689 ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf,
2690 uint size)
2692 uint cnt;
2694 if (!ql_mask_match(level))
2695 return;
2697 ql_dbg(level, vha, id,
2698 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
2699 ql_dbg(level, vha, id,
2700 "----- -----------------------------------------------\n");
2701 for (cnt = 0; cnt < size; cnt += 16) {
2702 ql_dbg(level, vha, id, "%04x: ", cnt);
2703 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2704 buf + cnt, min(16U, size - cnt), false);
2709 * This function is for formatting and logging log messages.
2710 * It is to be used when vha is available. It formats the message
2711 * and logs it to the messages file. All the messages will be logged
2712 * irrespective of value of ql2xextended_error_logging.
2713 * parameters:
2714 * level: The level of the log messages to be printed in the
2715 * messages file.
2716 * vha: Pointer to the scsi_qla_host_t
2717 * id: This is a unique id for the level. It identifies the
2718 * part of the code from where the message originated.
2719 * msg: The message to be displayed.
2721 void
2722 ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2723 const char *fmt, ...)
2725 va_list va;
2726 struct va_format vaf;
2727 char pbuf[128];
2729 if (level > ql_errlev)
2730 return;
2732 ql_ktrace(0, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
2734 if (!pbuf[0]) /* set by ql_ktrace */
2735 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
2736 qpair ? qpair->vha : NULL, id);
2738 va_start(va, fmt);
2740 vaf.fmt = fmt;
2741 vaf.va = &va;
2743 switch (level) {
2744 case ql_log_fatal: /* FATAL LOG */
2745 pr_crit("%s%pV", pbuf, &vaf);
2746 break;
2747 case ql_log_warn:
2748 pr_err("%s%pV", pbuf, &vaf);
2749 break;
2750 case ql_log_info:
2751 pr_warn("%s%pV", pbuf, &vaf);
2752 break;
2753 default:
2754 pr_info("%s%pV", pbuf, &vaf);
2755 break;
2758 va_end(va);
2762 * This function is for formatting and logging debug information.
2763 * It is to be used when vha is available. It formats the message
2764 * and logs it to the messages file.
2765 * parameters:
2766 * level: The level of the debug messages to be printed.
2767 * If ql2xextended_error_logging value is correctly set,
2768 * this message will appear in the messages file.
2769 * vha: Pointer to the scsi_qla_host_t.
2770 * id: This is a unique identifier for the level. It identifies the
2771 * part of the code from where the message originated.
2772 * msg: The message to be displayed.
2774 void
2775 ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2776 const char *fmt, ...)
2778 va_list va;
2779 struct va_format vaf;
2780 char pbuf[128];
2782 ql_ktrace(1, level, pbuf, NULL, qpair ? qpair->vha : NULL, id, fmt);
2784 if (!ql_mask_match(level))
2785 return;
2787 va_start(va, fmt);
2789 vaf.fmt = fmt;
2790 vaf.va = &va;
2792 if (!pbuf[0]) /* set by ql_ktrace */
2793 ql_dbg_prefix(pbuf, ARRAY_SIZE(pbuf), NULL,
2794 qpair ? qpair->vha : NULL, id + ql_dbg_offset);
2796 pr_warn("%s%pV", pbuf, &vaf);
2798 va_end(va);