treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_dbg.c
blobe5500bba06ca06c45dffcd9d484cbc71c584229e
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
8 /*
9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages.
11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes |
13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0193 | 0x0146 |
15 * | | | 0x015b-0x0160 |
16 * | | | 0x016e |
17 * | Mailbox commands | 0x1206 | 0x11a2-0x11ff |
18 * | Device Discovery | 0x2134 | 0x210e-0x2116 |
19 * | | | 0x211a |
20 * | | | 0x211c-0x2128 |
21 * | | | 0x212a-0x2134 |
22 * | Queue Command and IO tracing | 0x3074 | 0x300b |
23 * | | | 0x3027-0x3028 |
24 * | | | 0x303d-0x3041 |
25 * | | | 0x302d,0x3033 |
26 * | | | 0x3036,0x3038 |
27 * | | | 0x303a |
28 * | DPC Thread | 0x4023 | 0x4002,0x4013 |
29 * | Async Events | 0x5090 | 0x502b-0x502f |
30 * | | | 0x5047 |
31 * | | | 0x5084,0x5075 |
32 * | | | 0x503d,0x5044 |
33 * | | | 0x505f |
34 * | Timer Routines | 0x6012 | |
35 * | User Space Interactions | 0x70e3 | 0x7018,0x702e |
36 * | | | 0x7020,0x7024 |
37 * | | | 0x7039,0x7045 |
38 * | | | 0x7073-0x7075 |
39 * | | | 0x70a5-0x70a6 |
40 * | | | 0x70a8,0x70ab |
41 * | | | 0x70ad-0x70ae |
42 * | | | 0x70d0-0x70d6 |
43 * | | | 0x70d7-0x70db |
44 * | Task Management | 0x8042 | 0x8000 |
45 * | | | 0x8019 |
46 * | | | 0x8025,0x8026 |
47 * | | | 0x8031,0x8032 |
48 * | | | 0x8039,0x803c |
49 * | AER/EEH | 0x9011 | |
50 * | Virtual Port | 0xa007 | |
51 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
52 * | | | 0xb09e,0xb0ae |
53 * | | | 0xb0c3,0xb0c6 |
54 * | | | 0xb0e0-0xb0ef |
55 * | | | 0xb085,0xb0dc |
56 * | | | 0xb107,0xb108 |
57 * | | | 0xb111,0xb11e |
58 * | | | 0xb12c,0xb12d |
59 * | | | 0xb13a,0xb142 |
60 * | | | 0xb13c-0xb140 |
61 * | | | 0xb149 |
62 * | MultiQ | 0xc010 | |
63 * | Misc | 0xd303 | 0xd031-0xd0ff |
64 * | | | 0xd101-0xd1fe |
65 * | | | 0xd214-0xd2fe |
66 * | Target Mode | 0xe081 | |
67 * | Target Mode Management | 0xf09b | 0xf002 |
68 * | | | 0xf046-0xf049 |
69 * | Target Mode Task Management | 0x1000d | |
70 * ----------------------------------------------------------------------
73 #include "qla_def.h"
75 #include <linux/delay.h>
77 static uint32_t ql_dbg_offset = 0x800;
79 static inline void
80 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
82 fw_dump->fw_major_version = htonl(ha->fw_major_version);
83 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
84 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
85 fw_dump->fw_attributes = htonl(ha->fw_attributes);
87 fw_dump->vendor = htonl(ha->pdev->vendor);
88 fw_dump->device = htonl(ha->pdev->device);
89 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
90 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
93 static inline void *
94 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
96 struct req_que *req = ha->req_q_map[0];
97 struct rsp_que *rsp = ha->rsp_q_map[0];
98 /* Request queue. */
99 memcpy(ptr, req->ring, req->length *
100 sizeof(request_t));
102 /* Response queue. */
103 ptr += req->length * sizeof(request_t);
104 memcpy(ptr, rsp->ring, rsp->length *
105 sizeof(response_t));
107 return ptr + (rsp->length * sizeof(response_t));
111 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
112 uint32_t ram_dwords, void **nxt)
114 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
115 dma_addr_t dump_dma = ha->gid_list_dma;
116 uint32_t *chunk = (void *)ha->gid_list;
117 uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
118 uint32_t stat;
119 ulong i, j, timer = 6000000;
120 int rval = QLA_FUNCTION_FAILED;
122 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
123 for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
124 if (i + dwords > ram_dwords)
125 dwords = ram_dwords - i;
127 WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
128 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
129 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
131 WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
132 WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
133 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
134 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
136 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
137 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
139 WRT_REG_WORD(&reg->mailbox9, 0);
140 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
142 ha->flags.mbox_int = 0;
143 while (timer--) {
144 udelay(5);
146 stat = RD_REG_DWORD(&reg->host_status);
147 /* Check for pending interrupts. */
148 if (!(stat & HSRX_RISC_INT))
149 continue;
151 stat &= 0xff;
152 if (stat != 0x1 && stat != 0x2 &&
153 stat != 0x10 && stat != 0x11) {
155 /* Clear this intr; it wasn't a mailbox intr */
156 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
157 RD_REG_DWORD(&reg->hccr);
158 continue;
161 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
162 rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
163 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
164 RD_REG_DWORD(&reg->hccr);
165 break;
167 ha->flags.mbox_int = 1;
168 *nxt = ram + i;
170 if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
171 /* no interrupt, timed out*/
172 return rval;
174 if (rval) {
175 /* error completion status */
176 return rval;
178 for (j = 0; j < dwords; j++) {
179 ram[i + j] =
180 (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
181 chunk[j] : swab32(chunk[j]);
185 *nxt = ram + i;
186 return QLA_SUCCESS;
190 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
191 uint32_t ram_dwords, void **nxt)
193 int rval = QLA_FUNCTION_FAILED;
194 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
195 dma_addr_t dump_dma = ha->gid_list_dma;
196 uint32_t *chunk = (void *)ha->gid_list;
197 uint32_t dwords = qla2x00_gid_list_size(ha) / 4;
198 uint32_t stat;
199 ulong i, j, timer = 6000000;
201 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
203 for (i = 0; i < ram_dwords; i += dwords, addr += dwords) {
204 if (i + dwords > ram_dwords)
205 dwords = ram_dwords - i;
207 WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
208 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
209 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
211 WRT_REG_WORD(&reg->mailbox2, MSW(LSD(dump_dma)));
212 WRT_REG_WORD(&reg->mailbox3, LSW(LSD(dump_dma)));
213 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
214 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
216 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
217 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
218 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
220 ha->flags.mbox_int = 0;
221 while (timer--) {
222 udelay(5);
223 stat = RD_REG_DWORD(&reg->host_status);
225 /* Check for pending interrupts. */
226 if (!(stat & HSRX_RISC_INT))
227 continue;
229 stat &= 0xff;
230 if (stat != 0x1 && stat != 0x2 &&
231 stat != 0x10 && stat != 0x11) {
232 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
233 RD_REG_DWORD(&reg->hccr);
234 continue;
237 set_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
238 rval = RD_REG_WORD(&reg->mailbox0) & MBS_MASK;
239 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
240 RD_REG_DWORD(&reg->hccr);
241 break;
243 ha->flags.mbox_int = 1;
244 *nxt = ram + i;
246 if (!test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
247 /* no interrupt, timed out*/
248 return rval;
250 if (rval) {
251 /* error completion status */
252 return rval;
254 for (j = 0; j < dwords; j++) {
255 ram[i + j] =
256 (IS_QLA27XX(ha) || IS_QLA28XX(ha)) ?
257 chunk[j] : swab32(chunk[j]);
261 *nxt = ram + i;
262 return QLA_SUCCESS;
265 static int
266 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
267 uint32_t cram_size, void **nxt)
269 int rval;
271 /* Code RAM. */
272 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
273 if (rval != QLA_SUCCESS)
274 return rval;
276 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
278 /* External Memory. */
279 rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
280 ha->fw_memory_size - 0x100000 + 1, nxt);
281 if (rval == QLA_SUCCESS)
282 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
284 return rval;
287 static uint32_t *
288 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
289 uint32_t count, uint32_t *buf)
291 uint32_t __iomem *dmp_reg;
293 WRT_REG_DWORD(&reg->iobase_addr, iobase);
294 dmp_reg = &reg->iobase_window;
295 for ( ; count--; dmp_reg++)
296 *buf++ = htonl(RD_REG_DWORD(dmp_reg));
298 return buf;
301 void
302 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
304 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
306 /* 100 usec delay is sufficient enough for hardware to pause RISC */
307 udelay(100);
308 if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
309 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
313 qla24xx_soft_reset(struct qla_hw_data *ha)
315 int rval = QLA_SUCCESS;
316 uint32_t cnt;
317 uint16_t wd;
318 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
321 * Reset RISC. The delay is dependent on system architecture.
322 * Driver can proceed with the reset sequence after waiting
323 * for a timeout period.
325 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
326 for (cnt = 0; cnt < 30000; cnt++) {
327 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
328 break;
330 udelay(10);
332 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
333 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
335 WRT_REG_DWORD(&reg->ctrl_status,
336 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
337 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
339 udelay(100);
341 /* Wait for soft-reset to complete. */
342 for (cnt = 0; cnt < 30000; cnt++) {
343 if ((RD_REG_DWORD(&reg->ctrl_status) &
344 CSRX_ISP_SOFT_RESET) == 0)
345 break;
347 udelay(10);
349 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
350 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
352 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
353 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
355 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
356 rval == QLA_SUCCESS; cnt--) {
357 if (cnt)
358 udelay(10);
359 else
360 rval = QLA_FUNCTION_TIMEOUT;
362 if (rval == QLA_SUCCESS)
363 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
365 return rval;
368 static int
369 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
370 uint32_t ram_words, void **nxt)
372 int rval;
373 uint32_t cnt, stat, timer, words, idx;
374 uint16_t mb0;
375 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
376 dma_addr_t dump_dma = ha->gid_list_dma;
377 uint16_t *dump = (uint16_t *)ha->gid_list;
379 rval = QLA_SUCCESS;
380 mb0 = 0;
382 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
383 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
385 words = qla2x00_gid_list_size(ha) / 2;
386 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
387 cnt += words, addr += words) {
388 if (cnt + words > ram_words)
389 words = ram_words - cnt;
391 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
392 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
394 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
395 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
396 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
397 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
399 WRT_MAILBOX_REG(ha, reg, 4, words);
400 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
402 for (timer = 6000000; timer; timer--) {
403 /* Check for pending interrupts. */
404 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
405 if (stat & HSR_RISC_INT) {
406 stat &= 0xff;
408 if (stat == 0x1 || stat == 0x2) {
409 set_bit(MBX_INTERRUPT,
410 &ha->mbx_cmd_flags);
412 mb0 = RD_MAILBOX_REG(ha, reg, 0);
414 /* Release mailbox registers. */
415 WRT_REG_WORD(&reg->semaphore, 0);
416 WRT_REG_WORD(&reg->hccr,
417 HCCR_CLR_RISC_INT);
418 RD_REG_WORD(&reg->hccr);
419 break;
420 } else if (stat == 0x10 || stat == 0x11) {
421 set_bit(MBX_INTERRUPT,
422 &ha->mbx_cmd_flags);
424 mb0 = RD_MAILBOX_REG(ha, reg, 0);
426 WRT_REG_WORD(&reg->hccr,
427 HCCR_CLR_RISC_INT);
428 RD_REG_WORD(&reg->hccr);
429 break;
432 /* clear this intr; it wasn't a mailbox intr */
433 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
434 RD_REG_WORD(&reg->hccr);
436 udelay(5);
439 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
440 rval = mb0 & MBS_MASK;
441 for (idx = 0; idx < words; idx++)
442 ram[cnt + idx] = swab16(dump[idx]);
443 } else {
444 rval = QLA_FUNCTION_FAILED;
448 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
449 return rval;
452 static inline void
453 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
454 uint16_t *buf)
456 uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
458 for ( ; count--; dmp_reg++)
459 *buf++ = htons(RD_REG_WORD(dmp_reg));
462 static inline void *
463 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
465 if (!ha->eft)
466 return ptr;
468 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
469 return ptr + ntohl(ha->fw_dump->eft_size);
472 static inline void *
473 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
475 uint32_t cnt;
476 uint32_t *iter_reg;
477 struct qla2xxx_fce_chain *fcec = ptr;
479 if (!ha->fce)
480 return ptr;
482 *last_chain = &fcec->type;
483 fcec->type = htonl(DUMP_CHAIN_FCE);
484 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
485 fce_calc_size(ha->fce_bufs));
486 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
487 fcec->addr_l = htonl(LSD(ha->fce_dma));
488 fcec->addr_h = htonl(MSD(ha->fce_dma));
490 iter_reg = fcec->eregs;
491 for (cnt = 0; cnt < 8; cnt++)
492 *iter_reg++ = htonl(ha->fce_mb[cnt]);
494 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
496 return (char *)iter_reg + ntohl(fcec->size);
499 static inline void *
500 qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
502 struct qla2xxx_offld_chain *c = ptr;
504 if (!ha->exlogin_buf)
505 return ptr;
507 *last_chain = &c->type;
509 c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN);
510 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
511 ha->exlogin_size);
512 c->size = cpu_to_be32(ha->exlogin_size);
513 c->addr = cpu_to_be64(ha->exlogin_buf_dma);
515 ptr += sizeof(struct qla2xxx_offld_chain);
516 memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
518 return (char *)ptr + cpu_to_be32(c->size);
521 static inline void *
522 qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
524 struct qla2xxx_offld_chain *c = ptr;
526 if (!ha->exchoffld_buf)
527 return ptr;
529 *last_chain = &c->type;
531 c->type = cpu_to_be32(DUMP_CHAIN_EXCHG);
532 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
533 ha->exchoffld_size);
534 c->size = cpu_to_be32(ha->exchoffld_size);
535 c->addr = cpu_to_be64(ha->exchoffld_buf_dma);
537 ptr += sizeof(struct qla2xxx_offld_chain);
538 memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
540 return (char *)ptr + cpu_to_be32(c->size);
543 static inline void *
544 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
545 uint32_t **last_chain)
547 struct qla2xxx_mqueue_chain *q;
548 struct qla2xxx_mqueue_header *qh;
549 uint32_t num_queues;
550 int que;
551 struct {
552 int length;
553 void *ring;
554 } aq, *aqp;
556 if (!ha->tgt.atio_ring)
557 return ptr;
559 num_queues = 1;
560 aqp = &aq;
561 aqp->length = ha->tgt.atio_q_length;
562 aqp->ring = ha->tgt.atio_ring;
564 for (que = 0; que < num_queues; que++) {
565 /* aqp = ha->atio_q_map[que]; */
566 q = ptr;
567 *last_chain = &q->type;
568 q->type = htonl(DUMP_CHAIN_QUEUE);
569 q->chain_size = htonl(
570 sizeof(struct qla2xxx_mqueue_chain) +
571 sizeof(struct qla2xxx_mqueue_header) +
572 (aqp->length * sizeof(request_t)));
573 ptr += sizeof(struct qla2xxx_mqueue_chain);
575 /* Add header. */
576 qh = ptr;
577 qh->queue = htonl(TYPE_ATIO_QUEUE);
578 qh->number = htonl(que);
579 qh->size = htonl(aqp->length * sizeof(request_t));
580 ptr += sizeof(struct qla2xxx_mqueue_header);
582 /* Add data. */
583 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
585 ptr += aqp->length * sizeof(request_t);
588 return ptr;
591 static inline void *
592 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
594 struct qla2xxx_mqueue_chain *q;
595 struct qla2xxx_mqueue_header *qh;
596 struct req_que *req;
597 struct rsp_que *rsp;
598 int que;
600 if (!ha->mqenable)
601 return ptr;
603 /* Request queues */
604 for (que = 1; que < ha->max_req_queues; que++) {
605 req = ha->req_q_map[que];
606 if (!req)
607 break;
609 /* Add chain. */
610 q = ptr;
611 *last_chain = &q->type;
612 q->type = htonl(DUMP_CHAIN_QUEUE);
613 q->chain_size = htonl(
614 sizeof(struct qla2xxx_mqueue_chain) +
615 sizeof(struct qla2xxx_mqueue_header) +
616 (req->length * sizeof(request_t)));
617 ptr += sizeof(struct qla2xxx_mqueue_chain);
619 /* Add header. */
620 qh = ptr;
621 qh->queue = htonl(TYPE_REQUEST_QUEUE);
622 qh->number = htonl(que);
623 qh->size = htonl(req->length * sizeof(request_t));
624 ptr += sizeof(struct qla2xxx_mqueue_header);
626 /* Add data. */
627 memcpy(ptr, req->ring, req->length * sizeof(request_t));
628 ptr += req->length * sizeof(request_t);
631 /* Response queues */
632 for (que = 1; que < ha->max_rsp_queues; que++) {
633 rsp = ha->rsp_q_map[que];
634 if (!rsp)
635 break;
637 /* Add chain. */
638 q = ptr;
639 *last_chain = &q->type;
640 q->type = htonl(DUMP_CHAIN_QUEUE);
641 q->chain_size = htonl(
642 sizeof(struct qla2xxx_mqueue_chain) +
643 sizeof(struct qla2xxx_mqueue_header) +
644 (rsp->length * sizeof(response_t)));
645 ptr += sizeof(struct qla2xxx_mqueue_chain);
647 /* Add header. */
648 qh = ptr;
649 qh->queue = htonl(TYPE_RESPONSE_QUEUE);
650 qh->number = htonl(que);
651 qh->size = htonl(rsp->length * sizeof(response_t));
652 ptr += sizeof(struct qla2xxx_mqueue_header);
654 /* Add data. */
655 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
656 ptr += rsp->length * sizeof(response_t);
659 return ptr;
662 static inline void *
663 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
665 uint32_t cnt, que_idx;
666 uint8_t que_cnt;
667 struct qla2xxx_mq_chain *mq = ptr;
668 device_reg_t *reg;
670 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha) ||
671 IS_QLA28XX(ha))
672 return ptr;
674 mq = ptr;
675 *last_chain = &mq->type;
676 mq->type = htonl(DUMP_CHAIN_MQ);
677 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
679 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
680 ha->max_req_queues : ha->max_rsp_queues;
681 mq->count = htonl(que_cnt);
682 for (cnt = 0; cnt < que_cnt; cnt++) {
683 reg = ISP_QUE_REG(ha, cnt);
684 que_idx = cnt * 4;
685 mq->qregs[que_idx] =
686 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
687 mq->qregs[que_idx+1] =
688 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
689 mq->qregs[que_idx+2] =
690 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
691 mq->qregs[que_idx+3] =
692 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
695 return ptr + sizeof(struct qla2xxx_mq_chain);
698 void
699 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
701 struct qla_hw_data *ha = vha->hw;
703 if (rval != QLA_SUCCESS) {
704 ql_log(ql_log_warn, vha, 0xd000,
705 "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
706 rval, ha->fw_dump_cap_flags);
707 ha->fw_dumped = 0;
708 } else {
709 ql_log(ql_log_info, vha, 0xd001,
710 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
711 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
712 ha->fw_dumped = 1;
713 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
718 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
719 * @vha: HA context
720 * @hardware_locked: Called with the hardware_lock
722 void
723 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
725 int rval;
726 uint32_t cnt;
727 struct qla_hw_data *ha = vha->hw;
728 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
729 uint16_t __iomem *dmp_reg;
730 unsigned long flags;
731 struct qla2300_fw_dump *fw;
732 void *nxt;
733 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
735 flags = 0;
737 #ifndef __CHECKER__
738 if (!hardware_locked)
739 spin_lock_irqsave(&ha->hardware_lock, flags);
740 #endif
742 if (!ha->fw_dump) {
743 ql_log(ql_log_warn, vha, 0xd002,
744 "No buffer available for dump.\n");
745 goto qla2300_fw_dump_failed;
748 if (ha->fw_dumped) {
749 ql_log(ql_log_warn, vha, 0xd003,
750 "Firmware has been previously dumped (%p) "
751 "-- ignoring request.\n",
752 ha->fw_dump);
753 goto qla2300_fw_dump_failed;
755 fw = &ha->fw_dump->isp.isp23;
756 qla2xxx_prep_dump(ha, ha->fw_dump);
758 rval = QLA_SUCCESS;
759 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
761 /* Pause RISC. */
762 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
763 if (IS_QLA2300(ha)) {
764 for (cnt = 30000;
765 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
766 rval == QLA_SUCCESS; cnt--) {
767 if (cnt)
768 udelay(100);
769 else
770 rval = QLA_FUNCTION_TIMEOUT;
772 } else {
773 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
774 udelay(10);
777 if (rval == QLA_SUCCESS) {
778 dmp_reg = &reg->flash_address;
779 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
780 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
782 dmp_reg = &reg->u.isp2300.req_q_in;
783 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
784 cnt++, dmp_reg++)
785 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
787 dmp_reg = &reg->u.isp2300.mailbox0;
788 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
789 cnt++, dmp_reg++)
790 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
792 WRT_REG_WORD(&reg->ctrl_status, 0x40);
793 qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
795 WRT_REG_WORD(&reg->ctrl_status, 0x50);
796 qla2xxx_read_window(reg, 48, fw->dma_reg);
798 WRT_REG_WORD(&reg->ctrl_status, 0x00);
799 dmp_reg = &reg->risc_hw;
800 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
801 cnt++, dmp_reg++)
802 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
804 WRT_REG_WORD(&reg->pcr, 0x2000);
805 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
807 WRT_REG_WORD(&reg->pcr, 0x2200);
808 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
810 WRT_REG_WORD(&reg->pcr, 0x2400);
811 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
813 WRT_REG_WORD(&reg->pcr, 0x2600);
814 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
816 WRT_REG_WORD(&reg->pcr, 0x2800);
817 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
819 WRT_REG_WORD(&reg->pcr, 0x2A00);
820 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
822 WRT_REG_WORD(&reg->pcr, 0x2C00);
823 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
825 WRT_REG_WORD(&reg->pcr, 0x2E00);
826 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
828 WRT_REG_WORD(&reg->ctrl_status, 0x10);
829 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
831 WRT_REG_WORD(&reg->ctrl_status, 0x20);
832 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
834 WRT_REG_WORD(&reg->ctrl_status, 0x30);
835 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
837 /* Reset RISC. */
838 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
839 for (cnt = 0; cnt < 30000; cnt++) {
840 if ((RD_REG_WORD(&reg->ctrl_status) &
841 CSR_ISP_SOFT_RESET) == 0)
842 break;
844 udelay(10);
848 if (!IS_QLA2300(ha)) {
849 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
850 rval == QLA_SUCCESS; cnt--) {
851 if (cnt)
852 udelay(100);
853 else
854 rval = QLA_FUNCTION_TIMEOUT;
858 /* Get RISC SRAM. */
859 if (rval == QLA_SUCCESS)
860 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
861 sizeof(fw->risc_ram) / 2, &nxt);
863 /* Get stack SRAM. */
864 if (rval == QLA_SUCCESS)
865 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
866 sizeof(fw->stack_ram) / 2, &nxt);
868 /* Get data SRAM. */
869 if (rval == QLA_SUCCESS)
870 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
871 ha->fw_memory_size - 0x11000 + 1, &nxt);
873 if (rval == QLA_SUCCESS)
874 qla2xxx_copy_queues(ha, nxt);
876 qla2xxx_dump_post_process(base_vha, rval);
878 qla2300_fw_dump_failed:
879 #ifndef __CHECKER__
880 if (!hardware_locked)
881 spin_unlock_irqrestore(&ha->hardware_lock, flags);
882 #else
884 #endif
888 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
889 * @vha: HA context
890 * @hardware_locked: Called with the hardware_lock
892 void
893 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
895 int rval;
896 uint32_t cnt, timer;
897 uint16_t risc_address;
898 uint16_t mb0, mb2;
899 struct qla_hw_data *ha = vha->hw;
900 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
901 uint16_t __iomem *dmp_reg;
902 unsigned long flags;
903 struct qla2100_fw_dump *fw;
904 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
906 risc_address = 0;
907 mb0 = mb2 = 0;
908 flags = 0;
910 #ifndef __CHECKER__
911 if (!hardware_locked)
912 spin_lock_irqsave(&ha->hardware_lock, flags);
913 #endif
915 if (!ha->fw_dump) {
916 ql_log(ql_log_warn, vha, 0xd004,
917 "No buffer available for dump.\n");
918 goto qla2100_fw_dump_failed;
921 if (ha->fw_dumped) {
922 ql_log(ql_log_warn, vha, 0xd005,
923 "Firmware has been previously dumped (%p) "
924 "-- ignoring request.\n",
925 ha->fw_dump);
926 goto qla2100_fw_dump_failed;
928 fw = &ha->fw_dump->isp.isp21;
929 qla2xxx_prep_dump(ha, ha->fw_dump);
931 rval = QLA_SUCCESS;
932 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
934 /* Pause RISC. */
935 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
936 for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
937 rval == QLA_SUCCESS; cnt--) {
938 if (cnt)
939 udelay(100);
940 else
941 rval = QLA_FUNCTION_TIMEOUT;
943 if (rval == QLA_SUCCESS) {
944 dmp_reg = &reg->flash_address;
945 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
946 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
948 dmp_reg = &reg->u.isp2100.mailbox0;
949 for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
950 if (cnt == 8)
951 dmp_reg = &reg->u_end.isp2200.mailbox8;
953 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
956 dmp_reg = &reg->u.isp2100.unused_2[0];
957 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
958 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
960 WRT_REG_WORD(&reg->ctrl_status, 0x00);
961 dmp_reg = &reg->risc_hw;
962 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
963 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
965 WRT_REG_WORD(&reg->pcr, 0x2000);
966 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
968 WRT_REG_WORD(&reg->pcr, 0x2100);
969 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
971 WRT_REG_WORD(&reg->pcr, 0x2200);
972 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
974 WRT_REG_WORD(&reg->pcr, 0x2300);
975 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
977 WRT_REG_WORD(&reg->pcr, 0x2400);
978 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
980 WRT_REG_WORD(&reg->pcr, 0x2500);
981 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
983 WRT_REG_WORD(&reg->pcr, 0x2600);
984 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
986 WRT_REG_WORD(&reg->pcr, 0x2700);
987 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
989 WRT_REG_WORD(&reg->ctrl_status, 0x10);
990 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
992 WRT_REG_WORD(&reg->ctrl_status, 0x20);
993 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
995 WRT_REG_WORD(&reg->ctrl_status, 0x30);
996 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
998 /* Reset the ISP. */
999 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1002 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
1003 rval == QLA_SUCCESS; cnt--) {
1004 if (cnt)
1005 udelay(100);
1006 else
1007 rval = QLA_FUNCTION_TIMEOUT;
1010 /* Pause RISC. */
1011 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
1012 (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
1014 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
1015 for (cnt = 30000;
1016 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
1017 rval == QLA_SUCCESS; cnt--) {
1018 if (cnt)
1019 udelay(100);
1020 else
1021 rval = QLA_FUNCTION_TIMEOUT;
1023 if (rval == QLA_SUCCESS) {
1024 /* Set memory configuration and timing. */
1025 if (IS_QLA2100(ha))
1026 WRT_REG_WORD(&reg->mctr, 0xf1);
1027 else
1028 WRT_REG_WORD(&reg->mctr, 0xf2);
1029 RD_REG_WORD(&reg->mctr); /* PCI Posting. */
1031 /* Release RISC. */
1032 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1036 if (rval == QLA_SUCCESS) {
1037 /* Get RISC SRAM. */
1038 risc_address = 0x1000;
1039 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
1040 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1042 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
1043 cnt++, risc_address++) {
1044 WRT_MAILBOX_REG(ha, reg, 1, risc_address);
1045 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
1047 for (timer = 6000000; timer != 0; timer--) {
1048 /* Check for pending interrupts. */
1049 if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
1050 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
1051 set_bit(MBX_INTERRUPT,
1052 &ha->mbx_cmd_flags);
1054 mb0 = RD_MAILBOX_REG(ha, reg, 0);
1055 mb2 = RD_MAILBOX_REG(ha, reg, 2);
1057 WRT_REG_WORD(&reg->semaphore, 0);
1058 WRT_REG_WORD(&reg->hccr,
1059 HCCR_CLR_RISC_INT);
1060 RD_REG_WORD(&reg->hccr);
1061 break;
1063 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
1064 RD_REG_WORD(&reg->hccr);
1066 udelay(5);
1069 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1070 rval = mb0 & MBS_MASK;
1071 fw->risc_ram[cnt] = htons(mb2);
1072 } else {
1073 rval = QLA_FUNCTION_FAILED;
1077 if (rval == QLA_SUCCESS)
1078 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
1080 qla2xxx_dump_post_process(base_vha, rval);
1082 qla2100_fw_dump_failed:
1083 #ifndef __CHECKER__
1084 if (!hardware_locked)
1085 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1086 #else
1088 #endif
1091 void
1092 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1094 int rval;
1095 uint32_t cnt;
1096 struct qla_hw_data *ha = vha->hw;
1097 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1098 uint32_t __iomem *dmp_reg;
1099 uint32_t *iter_reg;
1100 uint16_t __iomem *mbx_reg;
1101 unsigned long flags;
1102 struct qla24xx_fw_dump *fw;
1103 void *nxt;
1104 void *nxt_chain;
1105 uint32_t *last_chain = NULL;
1106 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1108 if (IS_P3P_TYPE(ha))
1109 return;
1111 flags = 0;
1112 ha->fw_dump_cap_flags = 0;
1114 #ifndef __CHECKER__
1115 if (!hardware_locked)
1116 spin_lock_irqsave(&ha->hardware_lock, flags);
1117 #endif
1119 if (!ha->fw_dump) {
1120 ql_log(ql_log_warn, vha, 0xd006,
1121 "No buffer available for dump.\n");
1122 goto qla24xx_fw_dump_failed;
1125 if (ha->fw_dumped) {
1126 ql_log(ql_log_warn, vha, 0xd007,
1127 "Firmware has been previously dumped (%p) "
1128 "-- ignoring request.\n",
1129 ha->fw_dump);
1130 goto qla24xx_fw_dump_failed;
1132 QLA_FW_STOPPED(ha);
1133 fw = &ha->fw_dump->isp.isp24;
1134 qla2xxx_prep_dump(ha, ha->fw_dump);
1136 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1139 * Pause RISC. No need to track timeout, as resetting the chip
1140 * is the right approach incase of pause timeout
1142 qla24xx_pause_risc(reg, ha);
1144 /* Host interface registers. */
1145 dmp_reg = &reg->flash_addr;
1146 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1147 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1149 /* Disable interrupts. */
1150 WRT_REG_DWORD(&reg->ictrl, 0);
1151 RD_REG_DWORD(&reg->ictrl);
1153 /* Shadow registers. */
1154 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1155 RD_REG_DWORD(&reg->iobase_addr);
1156 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1157 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1159 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1160 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1162 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1163 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1165 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1166 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1168 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1169 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1171 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1172 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1174 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1175 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1177 /* Mailbox registers. */
1178 mbx_reg = &reg->mailbox0;
1179 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1180 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1182 /* Transfer sequence registers. */
1183 iter_reg = fw->xseq_gp_reg;
1184 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1185 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1186 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1187 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1188 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1189 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1190 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1191 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1193 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1194 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1196 /* Receive sequence registers. */
1197 iter_reg = fw->rseq_gp_reg;
1198 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1199 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1200 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1201 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1202 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1203 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1204 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1205 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1207 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1208 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1209 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1211 /* Command DMA registers. */
1212 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1214 /* Queues. */
1215 iter_reg = fw->req0_dma_reg;
1216 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1217 dmp_reg = &reg->iobase_q;
1218 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1219 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1221 iter_reg = fw->resp0_dma_reg;
1222 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1223 dmp_reg = &reg->iobase_q;
1224 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1225 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1227 iter_reg = fw->req1_dma_reg;
1228 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1229 dmp_reg = &reg->iobase_q;
1230 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1231 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1233 /* Transmit DMA registers. */
1234 iter_reg = fw->xmt0_dma_reg;
1235 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1236 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1238 iter_reg = fw->xmt1_dma_reg;
1239 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1240 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1242 iter_reg = fw->xmt2_dma_reg;
1243 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1244 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1246 iter_reg = fw->xmt3_dma_reg;
1247 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1248 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1250 iter_reg = fw->xmt4_dma_reg;
1251 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1252 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1254 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1256 /* Receive DMA registers. */
1257 iter_reg = fw->rcvt0_data_dma_reg;
1258 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1259 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1261 iter_reg = fw->rcvt1_data_dma_reg;
1262 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1263 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1265 /* RISC registers. */
1266 iter_reg = fw->risc_gp_reg;
1267 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1268 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1269 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1270 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1271 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1272 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1273 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1274 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1276 /* Local memory controller registers. */
1277 iter_reg = fw->lmc_reg;
1278 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1279 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1280 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1281 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1282 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1283 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1284 qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1286 /* Fibre Protocol Module registers. */
1287 iter_reg = fw->fpm_hdw_reg;
1288 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1289 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1290 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1291 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1292 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1293 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1294 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1295 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1296 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1297 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1298 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1299 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1301 /* Frame Buffer registers. */
1302 iter_reg = fw->fb_hdw_reg;
1303 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1304 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1305 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1306 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1307 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1308 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1309 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1310 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1311 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1312 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1313 qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1315 rval = qla24xx_soft_reset(ha);
1316 if (rval != QLA_SUCCESS)
1317 goto qla24xx_fw_dump_failed_0;
1319 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1320 &nxt);
1321 if (rval != QLA_SUCCESS)
1322 goto qla24xx_fw_dump_failed_0;
1324 nxt = qla2xxx_copy_queues(ha, nxt);
1326 qla24xx_copy_eft(ha, nxt);
1328 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1329 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1330 if (last_chain) {
1331 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1332 *last_chain |= htonl(DUMP_CHAIN_LAST);
1335 /* Adjust valid length. */
1336 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1338 qla24xx_fw_dump_failed_0:
1339 qla2xxx_dump_post_process(base_vha, rval);
1341 qla24xx_fw_dump_failed:
1342 #ifndef __CHECKER__
1343 if (!hardware_locked)
1344 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1345 #else
1347 #endif
1350 void
1351 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1353 int rval;
1354 uint32_t cnt;
1355 struct qla_hw_data *ha = vha->hw;
1356 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1357 uint32_t __iomem *dmp_reg;
1358 uint32_t *iter_reg;
1359 uint16_t __iomem *mbx_reg;
1360 unsigned long flags;
1361 struct qla25xx_fw_dump *fw;
1362 void *nxt, *nxt_chain;
1363 uint32_t *last_chain = NULL;
1364 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1366 flags = 0;
1367 ha->fw_dump_cap_flags = 0;
1369 #ifndef __CHECKER__
1370 if (!hardware_locked)
1371 spin_lock_irqsave(&ha->hardware_lock, flags);
1372 #endif
1374 if (!ha->fw_dump) {
1375 ql_log(ql_log_warn, vha, 0xd008,
1376 "No buffer available for dump.\n");
1377 goto qla25xx_fw_dump_failed;
1380 if (ha->fw_dumped) {
1381 ql_log(ql_log_warn, vha, 0xd009,
1382 "Firmware has been previously dumped (%p) "
1383 "-- ignoring request.\n",
1384 ha->fw_dump);
1385 goto qla25xx_fw_dump_failed;
1387 QLA_FW_STOPPED(ha);
1388 fw = &ha->fw_dump->isp.isp25;
1389 qla2xxx_prep_dump(ha, ha->fw_dump);
1390 ha->fw_dump->version = htonl(2);
1392 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1395 * Pause RISC. No need to track timeout, as resetting the chip
1396 * is the right approach incase of pause timeout
1398 qla24xx_pause_risc(reg, ha);
1400 /* Host/Risc registers. */
1401 iter_reg = fw->host_risc_reg;
1402 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1403 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1405 /* PCIe registers. */
1406 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1407 RD_REG_DWORD(&reg->iobase_addr);
1408 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1409 dmp_reg = &reg->iobase_c4;
1410 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1411 dmp_reg++;
1412 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1413 dmp_reg++;
1414 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1415 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1417 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1418 RD_REG_DWORD(&reg->iobase_window);
1420 /* Host interface registers. */
1421 dmp_reg = &reg->flash_addr;
1422 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1423 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1425 /* Disable interrupts. */
1426 WRT_REG_DWORD(&reg->ictrl, 0);
1427 RD_REG_DWORD(&reg->ictrl);
1429 /* Shadow registers. */
1430 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1431 RD_REG_DWORD(&reg->iobase_addr);
1432 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1433 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1435 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1436 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1438 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1439 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1441 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1442 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1444 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1445 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1447 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1448 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1450 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1451 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1453 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1454 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1456 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1457 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1459 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1460 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1462 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1463 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1465 /* RISC I/O register. */
1466 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1467 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1469 /* Mailbox registers. */
1470 mbx_reg = &reg->mailbox0;
1471 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1472 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1474 /* Transfer sequence registers. */
1475 iter_reg = fw->xseq_gp_reg;
1476 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1477 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1478 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1479 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1480 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1481 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1482 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1483 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1485 iter_reg = fw->xseq_0_reg;
1486 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1487 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1488 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1490 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1492 /* Receive sequence registers. */
1493 iter_reg = fw->rseq_gp_reg;
1494 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1495 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1496 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1497 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1498 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1499 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1500 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1501 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1503 iter_reg = fw->rseq_0_reg;
1504 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1505 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1507 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1508 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1510 /* Auxiliary sequence registers. */
1511 iter_reg = fw->aseq_gp_reg;
1512 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1513 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1514 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1515 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1516 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1517 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1518 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1519 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1521 iter_reg = fw->aseq_0_reg;
1522 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1523 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1525 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1526 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1528 /* Command DMA registers. */
1529 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1531 /* Queues. */
1532 iter_reg = fw->req0_dma_reg;
1533 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1534 dmp_reg = &reg->iobase_q;
1535 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1536 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1538 iter_reg = fw->resp0_dma_reg;
1539 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1540 dmp_reg = &reg->iobase_q;
1541 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1542 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1544 iter_reg = fw->req1_dma_reg;
1545 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1546 dmp_reg = &reg->iobase_q;
1547 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1548 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1550 /* Transmit DMA registers. */
1551 iter_reg = fw->xmt0_dma_reg;
1552 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1553 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1555 iter_reg = fw->xmt1_dma_reg;
1556 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1557 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1559 iter_reg = fw->xmt2_dma_reg;
1560 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1561 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1563 iter_reg = fw->xmt3_dma_reg;
1564 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1565 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1567 iter_reg = fw->xmt4_dma_reg;
1568 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1569 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1571 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1573 /* Receive DMA registers. */
1574 iter_reg = fw->rcvt0_data_dma_reg;
1575 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1576 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1578 iter_reg = fw->rcvt1_data_dma_reg;
1579 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1580 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1582 /* RISC registers. */
1583 iter_reg = fw->risc_gp_reg;
1584 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1585 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1586 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1587 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1588 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1589 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1590 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1591 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1593 /* Local memory controller registers. */
1594 iter_reg = fw->lmc_reg;
1595 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1596 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1597 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1598 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1599 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1600 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1601 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1602 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1604 /* Fibre Protocol Module registers. */
1605 iter_reg = fw->fpm_hdw_reg;
1606 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1607 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1608 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1609 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1610 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1611 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1612 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1613 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1614 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1615 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1616 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1617 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1619 /* Frame Buffer registers. */
1620 iter_reg = fw->fb_hdw_reg;
1621 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1622 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1623 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1624 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1625 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1626 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1627 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1628 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1629 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1630 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1631 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1632 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1634 /* Multi queue registers */
1635 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1636 &last_chain);
1638 rval = qla24xx_soft_reset(ha);
1639 if (rval != QLA_SUCCESS)
1640 goto qla25xx_fw_dump_failed_0;
1642 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1643 &nxt);
1644 if (rval != QLA_SUCCESS)
1645 goto qla25xx_fw_dump_failed_0;
1647 nxt = qla2xxx_copy_queues(ha, nxt);
1649 qla24xx_copy_eft(ha, nxt);
1651 /* Chain entries -- started with MQ. */
1652 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1653 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1654 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1655 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1656 if (last_chain) {
1657 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1658 *last_chain |= htonl(DUMP_CHAIN_LAST);
1661 /* Adjust valid length. */
1662 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1664 qla25xx_fw_dump_failed_0:
1665 qla2xxx_dump_post_process(base_vha, rval);
1667 qla25xx_fw_dump_failed:
1668 #ifndef __CHECKER__
1669 if (!hardware_locked)
1670 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1671 #else
1673 #endif
1676 void
1677 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1679 int rval;
1680 uint32_t cnt;
1681 struct qla_hw_data *ha = vha->hw;
1682 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1683 uint32_t __iomem *dmp_reg;
1684 uint32_t *iter_reg;
1685 uint16_t __iomem *mbx_reg;
1686 unsigned long flags;
1687 struct qla81xx_fw_dump *fw;
1688 void *nxt, *nxt_chain;
1689 uint32_t *last_chain = NULL;
1690 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1692 flags = 0;
1693 ha->fw_dump_cap_flags = 0;
1695 #ifndef __CHECKER__
1696 if (!hardware_locked)
1697 spin_lock_irqsave(&ha->hardware_lock, flags);
1698 #endif
1700 if (!ha->fw_dump) {
1701 ql_log(ql_log_warn, vha, 0xd00a,
1702 "No buffer available for dump.\n");
1703 goto qla81xx_fw_dump_failed;
1706 if (ha->fw_dumped) {
1707 ql_log(ql_log_warn, vha, 0xd00b,
1708 "Firmware has been previously dumped (%p) "
1709 "-- ignoring request.\n",
1710 ha->fw_dump);
1711 goto qla81xx_fw_dump_failed;
1713 fw = &ha->fw_dump->isp.isp81;
1714 qla2xxx_prep_dump(ha, ha->fw_dump);
1716 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1719 * Pause RISC. No need to track timeout, as resetting the chip
1720 * is the right approach incase of pause timeout
1722 qla24xx_pause_risc(reg, ha);
1724 /* Host/Risc registers. */
1725 iter_reg = fw->host_risc_reg;
1726 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1727 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1729 /* PCIe registers. */
1730 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1731 RD_REG_DWORD(&reg->iobase_addr);
1732 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1733 dmp_reg = &reg->iobase_c4;
1734 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1735 dmp_reg++;
1736 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1737 dmp_reg++;
1738 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1739 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1741 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1742 RD_REG_DWORD(&reg->iobase_window);
1744 /* Host interface registers. */
1745 dmp_reg = &reg->flash_addr;
1746 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1747 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1749 /* Disable interrupts. */
1750 WRT_REG_DWORD(&reg->ictrl, 0);
1751 RD_REG_DWORD(&reg->ictrl);
1753 /* Shadow registers. */
1754 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1755 RD_REG_DWORD(&reg->iobase_addr);
1756 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1757 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1759 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1760 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1762 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1763 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1765 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1766 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1768 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1769 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1771 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1772 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1774 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1775 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1777 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1778 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1780 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1781 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1783 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1784 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1786 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1787 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1789 /* RISC I/O register. */
1790 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1791 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1793 /* Mailbox registers. */
1794 mbx_reg = &reg->mailbox0;
1795 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1796 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1798 /* Transfer sequence registers. */
1799 iter_reg = fw->xseq_gp_reg;
1800 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1801 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1802 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1803 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1804 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1805 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1806 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1807 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1809 iter_reg = fw->xseq_0_reg;
1810 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1811 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1812 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1814 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1816 /* Receive sequence registers. */
1817 iter_reg = fw->rseq_gp_reg;
1818 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1819 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1820 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1821 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1822 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1823 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1824 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1825 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1827 iter_reg = fw->rseq_0_reg;
1828 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1829 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1831 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1832 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1834 /* Auxiliary sequence registers. */
1835 iter_reg = fw->aseq_gp_reg;
1836 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1837 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1838 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1839 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1840 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1841 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1842 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1843 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1845 iter_reg = fw->aseq_0_reg;
1846 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1847 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1849 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1850 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1852 /* Command DMA registers. */
1853 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1855 /* Queues. */
1856 iter_reg = fw->req0_dma_reg;
1857 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1858 dmp_reg = &reg->iobase_q;
1859 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1860 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1862 iter_reg = fw->resp0_dma_reg;
1863 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1864 dmp_reg = &reg->iobase_q;
1865 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1866 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1868 iter_reg = fw->req1_dma_reg;
1869 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1870 dmp_reg = &reg->iobase_q;
1871 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1872 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1874 /* Transmit DMA registers. */
1875 iter_reg = fw->xmt0_dma_reg;
1876 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1877 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1879 iter_reg = fw->xmt1_dma_reg;
1880 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1881 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1883 iter_reg = fw->xmt2_dma_reg;
1884 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1885 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1887 iter_reg = fw->xmt3_dma_reg;
1888 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1889 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1891 iter_reg = fw->xmt4_dma_reg;
1892 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1893 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1895 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1897 /* Receive DMA registers. */
1898 iter_reg = fw->rcvt0_data_dma_reg;
1899 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1900 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1902 iter_reg = fw->rcvt1_data_dma_reg;
1903 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1904 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1906 /* RISC registers. */
1907 iter_reg = fw->risc_gp_reg;
1908 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1909 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1910 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1911 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1912 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1913 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1914 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1915 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1917 /* Local memory controller registers. */
1918 iter_reg = fw->lmc_reg;
1919 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1920 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1921 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1922 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1923 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1924 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1925 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1926 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1928 /* Fibre Protocol Module registers. */
1929 iter_reg = fw->fpm_hdw_reg;
1930 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1931 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1932 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1933 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1934 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1935 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1936 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1937 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1938 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1939 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1940 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1941 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1942 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1943 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1945 /* Frame Buffer registers. */
1946 iter_reg = fw->fb_hdw_reg;
1947 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1948 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1949 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1950 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1951 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1952 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1953 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1954 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1955 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1956 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1957 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1958 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1959 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1961 /* Multi queue registers */
1962 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1963 &last_chain);
1965 rval = qla24xx_soft_reset(ha);
1966 if (rval != QLA_SUCCESS)
1967 goto qla81xx_fw_dump_failed_0;
1969 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1970 &nxt);
1971 if (rval != QLA_SUCCESS)
1972 goto qla81xx_fw_dump_failed_0;
1974 nxt = qla2xxx_copy_queues(ha, nxt);
1976 qla24xx_copy_eft(ha, nxt);
1978 /* Chain entries -- started with MQ. */
1979 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1980 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1981 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1982 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1983 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
1984 if (last_chain) {
1985 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1986 *last_chain |= htonl(DUMP_CHAIN_LAST);
1989 /* Adjust valid length. */
1990 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1992 qla81xx_fw_dump_failed_0:
1993 qla2xxx_dump_post_process(base_vha, rval);
1995 qla81xx_fw_dump_failed:
1996 #ifndef __CHECKER__
1997 if (!hardware_locked)
1998 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1999 #else
2001 #endif
2004 void
2005 qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
2007 int rval;
2008 uint32_t cnt;
2009 struct qla_hw_data *ha = vha->hw;
2010 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2011 uint32_t __iomem *dmp_reg;
2012 uint32_t *iter_reg;
2013 uint16_t __iomem *mbx_reg;
2014 unsigned long flags;
2015 struct qla83xx_fw_dump *fw;
2016 void *nxt, *nxt_chain;
2017 uint32_t *last_chain = NULL;
2018 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2020 flags = 0;
2021 ha->fw_dump_cap_flags = 0;
2023 #ifndef __CHECKER__
2024 if (!hardware_locked)
2025 spin_lock_irqsave(&ha->hardware_lock, flags);
2026 #endif
2028 if (!ha->fw_dump) {
2029 ql_log(ql_log_warn, vha, 0xd00c,
2030 "No buffer available for dump!!!\n");
2031 goto qla83xx_fw_dump_failed;
2034 if (ha->fw_dumped) {
2035 ql_log(ql_log_warn, vha, 0xd00d,
2036 "Firmware has been previously dumped (%p) -- ignoring "
2037 "request...\n", ha->fw_dump);
2038 goto qla83xx_fw_dump_failed;
2040 QLA_FW_STOPPED(ha);
2041 fw = &ha->fw_dump->isp.isp83;
2042 qla2xxx_prep_dump(ha, ha->fw_dump);
2044 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
2047 * Pause RISC. No need to track timeout, as resetting the chip
2048 * is the right approach incase of pause timeout
2050 qla24xx_pause_risc(reg, ha);
2052 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
2053 dmp_reg = &reg->iobase_window;
2054 RD_REG_DWORD(dmp_reg);
2055 WRT_REG_DWORD(dmp_reg, 0);
2057 dmp_reg = &reg->unused_4_1[0];
2058 RD_REG_DWORD(dmp_reg);
2059 WRT_REG_DWORD(dmp_reg, 0);
2061 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
2062 dmp_reg = &reg->unused_4_1[2];
2063 RD_REG_DWORD(dmp_reg);
2064 WRT_REG_DWORD(dmp_reg, 0);
2066 /* select PCR and disable ecc checking and correction */
2067 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2068 RD_REG_DWORD(&reg->iobase_addr);
2069 WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
2071 /* Host/Risc registers. */
2072 iter_reg = fw->host_risc_reg;
2073 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2074 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2075 qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2077 /* PCIe registers. */
2078 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2079 RD_REG_DWORD(&reg->iobase_addr);
2080 WRT_REG_DWORD(&reg->iobase_window, 0x01);
2081 dmp_reg = &reg->iobase_c4;
2082 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
2083 dmp_reg++;
2084 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
2085 dmp_reg++;
2086 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
2087 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
2089 WRT_REG_DWORD(&reg->iobase_window, 0x00);
2090 RD_REG_DWORD(&reg->iobase_window);
2092 /* Host interface registers. */
2093 dmp_reg = &reg->flash_addr;
2094 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
2095 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
2097 /* Disable interrupts. */
2098 WRT_REG_DWORD(&reg->ictrl, 0);
2099 RD_REG_DWORD(&reg->ictrl);
2101 /* Shadow registers. */
2102 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2103 RD_REG_DWORD(&reg->iobase_addr);
2104 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
2105 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2107 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
2108 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2110 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
2111 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2113 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
2114 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2116 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
2117 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2119 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
2120 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2122 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
2123 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2125 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
2126 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2128 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
2129 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2131 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
2132 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2134 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
2135 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2137 /* RISC I/O register. */
2138 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
2139 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
2141 /* Mailbox registers. */
2142 mbx_reg = &reg->mailbox0;
2143 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
2144 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
2146 /* Transfer sequence registers. */
2147 iter_reg = fw->xseq_gp_reg;
2148 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2149 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2150 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2151 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2152 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2153 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2154 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2155 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2156 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2157 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2158 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2159 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2160 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2161 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2162 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2163 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2165 iter_reg = fw->xseq_0_reg;
2166 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2167 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2168 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2170 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2172 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2174 /* Receive sequence registers. */
2175 iter_reg = fw->rseq_gp_reg;
2176 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2177 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2178 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2179 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2180 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2181 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2182 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2183 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2184 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2185 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2186 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2187 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2188 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2189 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2190 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2191 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2193 iter_reg = fw->rseq_0_reg;
2194 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2195 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2197 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2198 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2199 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2201 /* Auxiliary sequence registers. */
2202 iter_reg = fw->aseq_gp_reg;
2203 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2204 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2205 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2206 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2207 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2208 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2209 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2210 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2211 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2212 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2213 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2214 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2215 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2216 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2217 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2218 qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2220 iter_reg = fw->aseq_0_reg;
2221 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2222 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2224 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2225 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2226 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2228 /* Command DMA registers. */
2229 iter_reg = fw->cmd_dma_reg;
2230 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2231 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2232 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2233 qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2235 /* Queues. */
2236 iter_reg = fw->req0_dma_reg;
2237 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2238 dmp_reg = &reg->iobase_q;
2239 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2240 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2242 iter_reg = fw->resp0_dma_reg;
2243 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2244 dmp_reg = &reg->iobase_q;
2245 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2246 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2248 iter_reg = fw->req1_dma_reg;
2249 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2250 dmp_reg = &reg->iobase_q;
2251 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2252 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2254 /* Transmit DMA registers. */
2255 iter_reg = fw->xmt0_dma_reg;
2256 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2257 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2259 iter_reg = fw->xmt1_dma_reg;
2260 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2261 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2263 iter_reg = fw->xmt2_dma_reg;
2264 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2265 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2267 iter_reg = fw->xmt3_dma_reg;
2268 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2269 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2271 iter_reg = fw->xmt4_dma_reg;
2272 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2273 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2275 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2277 /* Receive DMA registers. */
2278 iter_reg = fw->rcvt0_data_dma_reg;
2279 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2280 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2282 iter_reg = fw->rcvt1_data_dma_reg;
2283 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2284 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2286 /* RISC registers. */
2287 iter_reg = fw->risc_gp_reg;
2288 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2289 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2290 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2291 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2292 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2293 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2294 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2295 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2297 /* Local memory controller registers. */
2298 iter_reg = fw->lmc_reg;
2299 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2300 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2301 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2302 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2303 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2304 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2305 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2306 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2308 /* Fibre Protocol Module registers. */
2309 iter_reg = fw->fpm_hdw_reg;
2310 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2311 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2312 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2313 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2314 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2315 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2316 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2317 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2318 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2319 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2320 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2321 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2322 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2323 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2324 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2325 qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2327 /* RQ0 Array registers. */
2328 iter_reg = fw->rq0_array_reg;
2329 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2330 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2331 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2332 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2333 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2334 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2335 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2336 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2337 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2338 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2339 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2340 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2341 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2342 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2343 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2344 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2346 /* RQ1 Array registers. */
2347 iter_reg = fw->rq1_array_reg;
2348 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2349 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2350 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2351 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2352 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2353 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2354 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2355 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2356 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2357 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2358 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2359 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2360 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2361 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2362 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2363 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2365 /* RP0 Array registers. */
2366 iter_reg = fw->rp0_array_reg;
2367 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2368 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2369 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2370 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2371 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2372 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2373 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2374 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2375 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2376 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2377 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2378 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2379 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2380 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2381 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2382 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2384 /* RP1 Array registers. */
2385 iter_reg = fw->rp1_array_reg;
2386 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2387 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2388 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2389 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2390 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2391 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2392 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2393 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2394 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2395 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2396 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2397 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2398 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2399 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2400 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2401 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2403 iter_reg = fw->at0_array_reg;
2404 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2405 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2406 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2407 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2408 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2409 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2410 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2411 qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2413 /* I/O Queue Control registers. */
2414 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2416 /* Frame Buffer registers. */
2417 iter_reg = fw->fb_hdw_reg;
2418 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2419 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2420 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2421 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2422 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2423 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2424 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2425 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2426 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2427 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2428 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2429 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2430 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2431 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2432 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2433 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2434 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2435 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2436 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2437 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2438 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2439 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2440 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2441 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2442 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2443 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2444 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2446 /* Multi queue registers */
2447 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2448 &last_chain);
2450 rval = qla24xx_soft_reset(ha);
2451 if (rval != QLA_SUCCESS) {
2452 ql_log(ql_log_warn, vha, 0xd00e,
2453 "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2454 rval = QLA_SUCCESS;
2456 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2458 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2459 RD_REG_DWORD(&reg->hccr);
2461 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2462 RD_REG_DWORD(&reg->hccr);
2464 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2465 RD_REG_DWORD(&reg->hccr);
2467 for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
2468 udelay(5);
2470 if (!cnt) {
2471 nxt = fw->code_ram;
2472 nxt += sizeof(fw->code_ram);
2473 nxt += (ha->fw_memory_size - 0x100000 + 1);
2474 goto copy_queue;
2475 } else {
2476 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2477 ql_log(ql_log_warn, vha, 0xd010,
2478 "bigger hammer success?\n");
2482 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2483 &nxt);
2484 if (rval != QLA_SUCCESS)
2485 goto qla83xx_fw_dump_failed_0;
2487 copy_queue:
2488 nxt = qla2xxx_copy_queues(ha, nxt);
2490 qla24xx_copy_eft(ha, nxt);
2492 /* Chain entries -- started with MQ. */
2493 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2494 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2495 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2496 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
2497 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
2498 if (last_chain) {
2499 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2500 *last_chain |= htonl(DUMP_CHAIN_LAST);
2503 /* Adjust valid length. */
2504 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2506 qla83xx_fw_dump_failed_0:
2507 qla2xxx_dump_post_process(base_vha, rval);
2509 qla83xx_fw_dump_failed:
2510 #ifndef __CHECKER__
2511 if (!hardware_locked)
2512 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2513 #else
2515 #endif
2518 /****************************************************************************/
2519 /* Driver Debug Functions. */
2520 /****************************************************************************/
2522 static inline int
2523 ql_mask_match(uint level)
2525 return (level & ql2xextended_error_logging) == level;
2529 * This function is for formatting and logging debug information.
2530 * It is to be used when vha is available. It formats the message
2531 * and logs it to the messages file.
2532 * parameters:
2533 * level: The level of the debug messages to be printed.
2534 * If ql2xextended_error_logging value is correctly set,
2535 * this message will appear in the messages file.
2536 * vha: Pointer to the scsi_qla_host_t.
2537 * id: This is a unique identifier for the level. It identifies the
2538 * part of the code from where the message originated.
2539 * msg: The message to be displayed.
2541 void
2542 ql_dbg(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
2544 va_list va;
2545 struct va_format vaf;
2547 if (!ql_mask_match(level))
2548 return;
2550 va_start(va, fmt);
2552 vaf.fmt = fmt;
2553 vaf.va = &va;
2555 if (vha != NULL) {
2556 const struct pci_dev *pdev = vha->hw->pdev;
2557 /* <module-name> <pci-name> <msg-id>:<host> Message */
2558 pr_warn("%s [%s]-%04x:%ld: %pV",
2559 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2560 vha->host_no, &vaf);
2561 } else {
2562 pr_warn("%s [%s]-%04x: : %pV",
2563 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2566 va_end(va);
2571 * This function is for formatting and logging debug information.
2572 * It is to be used when vha is not available and pci is available,
2573 * i.e., before host allocation. It formats the message and logs it
2574 * to the messages file.
2575 * parameters:
2576 * level: The level of the debug messages to be printed.
2577 * If ql2xextended_error_logging value is correctly set,
2578 * this message will appear in the messages file.
2579 * pdev: Pointer to the struct pci_dev.
2580 * id: This is a unique id for the level. It identifies the part
2581 * of the code from where the message originated.
2582 * msg: The message to be displayed.
2584 void
2585 ql_dbg_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
2587 va_list va;
2588 struct va_format vaf;
2590 if (pdev == NULL)
2591 return;
2592 if (!ql_mask_match(level))
2593 return;
2595 va_start(va, fmt);
2597 vaf.fmt = fmt;
2598 vaf.va = &va;
2600 /* <module-name> <dev-name>:<msg-id> Message */
2601 pr_warn("%s [%s]-%04x: : %pV",
2602 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
2604 va_end(va);
2608 * This function is for formatting and logging log messages.
2609 * It is to be used when vha is available. It formats the message
2610 * and logs it to the messages file. All the messages will be logged
2611 * irrespective of value of ql2xextended_error_logging.
2612 * parameters:
2613 * level: The level of the log messages to be printed in the
2614 * messages file.
2615 * vha: Pointer to the scsi_qla_host_t
2616 * id: This is a unique id for the level. It identifies the
2617 * part of the code from where the message originated.
2618 * msg: The message to be displayed.
2620 void
2621 ql_log(uint level, scsi_qla_host_t *vha, uint id, const char *fmt, ...)
2623 va_list va;
2624 struct va_format vaf;
2625 char pbuf[128];
2627 if (level > ql_errlev)
2628 return;
2630 if (vha != NULL) {
2631 const struct pci_dev *pdev = vha->hw->pdev;
2632 /* <module-name> <msg-id>:<host> Message */
2633 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
2634 QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
2635 } else {
2636 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2637 QL_MSGHDR, "0000:00:00.0", id);
2639 pbuf[sizeof(pbuf) - 1] = 0;
2641 va_start(va, fmt);
2643 vaf.fmt = fmt;
2644 vaf.va = &va;
2646 switch (level) {
2647 case ql_log_fatal: /* FATAL LOG */
2648 pr_crit("%s%pV", pbuf, &vaf);
2649 break;
2650 case ql_log_warn:
2651 pr_err("%s%pV", pbuf, &vaf);
2652 break;
2653 case ql_log_info:
2654 pr_warn("%s%pV", pbuf, &vaf);
2655 break;
2656 default:
2657 pr_info("%s%pV", pbuf, &vaf);
2658 break;
2661 va_end(va);
2665 * This function is for formatting and logging log messages.
2666 * It is to be used when vha is not available and pci is available,
2667 * i.e., before host allocation. It formats the message and logs
2668 * it to the messages file. All the messages are logged irrespective
2669 * of the value of ql2xextended_error_logging.
2670 * parameters:
2671 * level: The level of the log messages to be printed in the
2672 * messages file.
2673 * pdev: Pointer to the struct pci_dev.
2674 * id: This is a unique id for the level. It identifies the
2675 * part of the code from where the message originated.
2676 * msg: The message to be displayed.
2678 void
2679 ql_log_pci(uint level, struct pci_dev *pdev, uint id, const char *fmt, ...)
2681 va_list va;
2682 struct va_format vaf;
2683 char pbuf[128];
2685 if (pdev == NULL)
2686 return;
2687 if (level > ql_errlev)
2688 return;
2690 /* <module-name> <dev-name>:<msg-id> Message */
2691 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2692 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2693 pbuf[sizeof(pbuf) - 1] = 0;
2695 va_start(va, fmt);
2697 vaf.fmt = fmt;
2698 vaf.va = &va;
2700 switch (level) {
2701 case ql_log_fatal: /* FATAL LOG */
2702 pr_crit("%s%pV", pbuf, &vaf);
2703 break;
2704 case ql_log_warn:
2705 pr_err("%s%pV", pbuf, &vaf);
2706 break;
2707 case ql_log_info:
2708 pr_warn("%s%pV", pbuf, &vaf);
2709 break;
2710 default:
2711 pr_info("%s%pV", pbuf, &vaf);
2712 break;
2715 va_end(va);
2718 void
2719 ql_dump_regs(uint level, scsi_qla_host_t *vha, uint id)
2721 int i;
2722 struct qla_hw_data *ha = vha->hw;
2723 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2724 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2725 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2726 uint16_t __iomem *mbx_reg;
2728 if (!ql_mask_match(level))
2729 return;
2731 if (IS_P3P_TYPE(ha))
2732 mbx_reg = &reg82->mailbox_in[0];
2733 else if (IS_FWI2_CAPABLE(ha))
2734 mbx_reg = &reg24->mailbox0;
2735 else
2736 mbx_reg = MAILBOX_REG(ha, reg, 0);
2738 ql_dbg(level, vha, id, "Mailbox registers:\n");
2739 for (i = 0; i < 6; i++, mbx_reg++)
2740 ql_dbg(level, vha, id,
2741 "mbox[%d] %#04x\n", i, RD_REG_WORD(mbx_reg));
2745 void
2746 ql_dump_buffer(uint level, scsi_qla_host_t *vha, uint id, const void *buf,
2747 uint size)
2749 uint cnt;
2751 if (!ql_mask_match(level))
2752 return;
2754 ql_dbg(level, vha, id,
2755 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
2756 ql_dbg(level, vha, id,
2757 "----- -----------------------------------------------\n");
2758 for (cnt = 0; cnt < size; cnt += 16) {
2759 ql_dbg(level, vha, id, "%04x: ", cnt);
2760 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2761 buf + cnt, min(16U, size - cnt), false);
2766 * This function is for formatting and logging log messages.
2767 * It is to be used when vha is available. It formats the message
2768 * and logs it to the messages file. All the messages will be logged
2769 * irrespective of value of ql2xextended_error_logging.
2770 * parameters:
2771 * level: The level of the log messages to be printed in the
2772 * messages file.
2773 * vha: Pointer to the scsi_qla_host_t
2774 * id: This is a unique id for the level. It identifies the
2775 * part of the code from where the message originated.
2776 * msg: The message to be displayed.
2778 void
2779 ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2780 const char *fmt, ...)
2782 va_list va;
2783 struct va_format vaf;
2784 char pbuf[128];
2786 if (level > ql_errlev)
2787 return;
2789 if (qpair != NULL) {
2790 const struct pci_dev *pdev = qpair->pdev;
2791 /* <module-name> <msg-id>:<host> Message */
2792 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ",
2793 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2794 } else {
2795 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2796 QL_MSGHDR, "0000:00:00.0", id);
2798 pbuf[sizeof(pbuf) - 1] = 0;
2800 va_start(va, fmt);
2802 vaf.fmt = fmt;
2803 vaf.va = &va;
2805 switch (level) {
2806 case ql_log_fatal: /* FATAL LOG */
2807 pr_crit("%s%pV", pbuf, &vaf);
2808 break;
2809 case ql_log_warn:
2810 pr_err("%s%pV", pbuf, &vaf);
2811 break;
2812 case ql_log_info:
2813 pr_warn("%s%pV", pbuf, &vaf);
2814 break;
2815 default:
2816 pr_info("%s%pV", pbuf, &vaf);
2817 break;
2820 va_end(va);
2824 * This function is for formatting and logging debug information.
2825 * It is to be used when vha is available. It formats the message
2826 * and logs it to the messages file.
2827 * parameters:
2828 * level: The level of the debug messages to be printed.
2829 * If ql2xextended_error_logging value is correctly set,
2830 * this message will appear in the messages file.
2831 * vha: Pointer to the scsi_qla_host_t.
2832 * id: This is a unique identifier for the level. It identifies the
2833 * part of the code from where the message originated.
2834 * msg: The message to be displayed.
2836 void
2837 ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2838 const char *fmt, ...)
2840 va_list va;
2841 struct va_format vaf;
2843 if (!ql_mask_match(level))
2844 return;
2846 va_start(va, fmt);
2848 vaf.fmt = fmt;
2849 vaf.va = &va;
2851 if (qpair != NULL) {
2852 const struct pci_dev *pdev = qpair->pdev;
2853 /* <module-name> <pci-name> <msg-id>:<host> Message */
2854 pr_warn("%s [%s]-%04x: %pV",
2855 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2856 &vaf);
2857 } else {
2858 pr_warn("%s [%s]-%04x: : %pV",
2859 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2862 va_end(va);