workqueue: Make worker_attach/detach_pool() update worker->pool
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_dbg.c
blob5fd44c50bbac240a86182ae8a3a928dace3f67f4
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
8 /*
9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages.
11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes |
13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0193 | 0x0146 |
15 * | | | 0x015b-0x0160 |
16 * | | | 0x016e |
17 * | Mailbox commands | 0x1206 | 0x11a2-0x11ff |
18 * | Device Discovery | 0x2134 | 0x210e-0x2116 |
19 * | | | 0x211a |
20 * | | | 0x211c-0x2128 |
21 * | | | 0x212a-0x2130 |
22 * | Queue Command and IO tracing | 0x3074 | 0x300b |
23 * | | | 0x3027-0x3028 |
24 * | | | 0x303d-0x3041 |
25 * | | | 0x302d,0x3033 |
26 * | | | 0x3036,0x3038 |
27 * | | | 0x303a |
28 * | DPC Thread | 0x4023 | 0x4002,0x4013 |
29 * | Async Events | 0x5090 | 0x502b-0x502f |
30 * | | | 0x5047 |
31 * | | | 0x5084,0x5075 |
32 * | | | 0x503d,0x5044 |
33 * | | | 0x505f |
34 * | Timer Routines | 0x6012 | |
35 * | User Space Interactions | 0x70e3 | 0x7018,0x702e |
36 * | | | 0x7020,0x7024 |
37 * | | | 0x7039,0x7045 |
38 * | | | 0x7073-0x7075 |
39 * | | | 0x70a5-0x70a6 |
40 * | | | 0x70a8,0x70ab |
41 * | | | 0x70ad-0x70ae |
42 * | | | 0x70d0-0x70d6 |
43 * | | | 0x70d7-0x70db |
44 * | Task Management | 0x8042 | 0x8000 |
45 * | | | 0x8019 |
46 * | | | 0x8025,0x8026 |
47 * | | | 0x8031,0x8032 |
48 * | | | 0x8039,0x803c |
49 * | AER/EEH | 0x9011 | |
50 * | Virtual Port | 0xa007 | |
51 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
52 * | | | 0xb09e,0xb0ae |
53 * | | | 0xb0c3,0xb0c6 |
54 * | | | 0xb0e0-0xb0ef |
55 * | | | 0xb085,0xb0dc |
56 * | | | 0xb107,0xb108 |
57 * | | | 0xb111,0xb11e |
58 * | | | 0xb12c,0xb12d |
59 * | | | 0xb13a,0xb142 |
60 * | | | 0xb13c-0xb140 |
61 * | | | 0xb149 |
62 * | MultiQ | 0xc010 | |
63 * | Misc | 0xd303 | 0xd031-0xd0ff |
64 * | | | 0xd101-0xd1fe |
65 * | | | 0xd214-0xd2fe |
66 * | Target Mode | 0xe081 | |
67 * | Target Mode Management | 0xf09b | 0xf002 |
68 * | | | 0xf046-0xf049 |
69 * | Target Mode Task Management | 0x1000d | |
70 * ----------------------------------------------------------------------
73 #include "qla_def.h"
75 #include <linux/delay.h>
77 static uint32_t ql_dbg_offset = 0x800;
79 static inline void
80 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
82 fw_dump->fw_major_version = htonl(ha->fw_major_version);
83 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
84 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
85 fw_dump->fw_attributes = htonl(ha->fw_attributes);
87 fw_dump->vendor = htonl(ha->pdev->vendor);
88 fw_dump->device = htonl(ha->pdev->device);
89 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
90 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
93 static inline void *
94 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
96 struct req_que *req = ha->req_q_map[0];
97 struct rsp_que *rsp = ha->rsp_q_map[0];
98 /* Request queue. */
99 memcpy(ptr, req->ring, req->length *
100 sizeof(request_t));
102 /* Response queue. */
103 ptr += req->length * sizeof(request_t);
104 memcpy(ptr, rsp->ring, rsp->length *
105 sizeof(response_t));
107 return ptr + (rsp->length * sizeof(response_t));
111 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
112 uint32_t ram_dwords, void **nxt)
114 int rval;
115 uint32_t cnt, stat, timer, dwords, idx;
116 uint16_t mb0;
117 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
118 dma_addr_t dump_dma = ha->gid_list_dma;
119 uint32_t *dump = (uint32_t *)ha->gid_list;
121 rval = QLA_SUCCESS;
122 mb0 = 0;
124 WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
125 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
127 dwords = qla2x00_gid_list_size(ha) / 4;
128 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
129 cnt += dwords, addr += dwords) {
130 if (cnt + dwords > ram_dwords)
131 dwords = ram_dwords - cnt;
133 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
134 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
136 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
137 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
138 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
139 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
141 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
142 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
144 WRT_REG_WORD(&reg->mailbox9, 0);
145 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
147 ha->flags.mbox_int = 0;
148 for (timer = 6000000; timer; timer--) {
149 /* Check for pending interrupts. */
150 stat = RD_REG_DWORD(&reg->host_status);
151 if (stat & HSRX_RISC_INT) {
152 stat &= 0xff;
154 if (stat == 0x1 || stat == 0x2 ||
155 stat == 0x10 || stat == 0x11) {
156 set_bit(MBX_INTERRUPT,
157 &ha->mbx_cmd_flags);
159 mb0 = RD_REG_WORD(&reg->mailbox0);
160 RD_REG_WORD(&reg->mailbox1);
162 WRT_REG_DWORD(&reg->hccr,
163 HCCRX_CLR_RISC_INT);
164 RD_REG_DWORD(&reg->hccr);
165 break;
168 /* Clear this intr; it wasn't a mailbox intr */
169 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
170 RD_REG_DWORD(&reg->hccr);
172 udelay(5);
174 ha->flags.mbox_int = 1;
176 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
177 rval = mb0 & MBS_MASK;
178 for (idx = 0; idx < dwords; idx++)
179 ram[cnt + idx] = IS_QLA27XX(ha) ?
180 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
181 } else {
182 rval = QLA_FUNCTION_FAILED;
186 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
187 return rval;
191 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
192 uint32_t ram_dwords, void **nxt)
194 int rval;
195 uint32_t cnt, stat, timer, dwords, idx;
196 uint16_t mb0;
197 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
198 dma_addr_t dump_dma = ha->gid_list_dma;
199 uint32_t *dump = (uint32_t *)ha->gid_list;
201 rval = QLA_SUCCESS;
202 mb0 = 0;
204 WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
205 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
207 dwords = qla2x00_gid_list_size(ha) / 4;
208 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
209 cnt += dwords, addr += dwords) {
210 if (cnt + dwords > ram_dwords)
211 dwords = ram_dwords - cnt;
213 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
214 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
216 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
217 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
218 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
219 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
221 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
222 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
223 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
225 ha->flags.mbox_int = 0;
226 for (timer = 6000000; timer; timer--) {
227 /* Check for pending interrupts. */
228 stat = RD_REG_DWORD(&reg->host_status);
229 if (stat & HSRX_RISC_INT) {
230 stat &= 0xff;
232 if (stat == 0x1 || stat == 0x2 ||
233 stat == 0x10 || stat == 0x11) {
234 set_bit(MBX_INTERRUPT,
235 &ha->mbx_cmd_flags);
237 mb0 = RD_REG_WORD(&reg->mailbox0);
239 WRT_REG_DWORD(&reg->hccr,
240 HCCRX_CLR_RISC_INT);
241 RD_REG_DWORD(&reg->hccr);
242 break;
245 /* Clear this intr; it wasn't a mailbox intr */
246 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
247 RD_REG_DWORD(&reg->hccr);
249 udelay(5);
251 ha->flags.mbox_int = 1;
253 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
254 rval = mb0 & MBS_MASK;
255 for (idx = 0; idx < dwords; idx++)
256 ram[cnt + idx] = IS_QLA27XX(ha) ?
257 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
258 } else {
259 rval = QLA_FUNCTION_FAILED;
263 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
264 return rval;
267 static int
268 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
269 uint32_t cram_size, void **nxt)
271 int rval;
273 /* Code RAM. */
274 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
275 if (rval != QLA_SUCCESS)
276 return rval;
278 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
280 /* External Memory. */
281 rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
282 ha->fw_memory_size - 0x100000 + 1, nxt);
283 if (rval == QLA_SUCCESS)
284 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
286 return rval;
289 static uint32_t *
290 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
291 uint32_t count, uint32_t *buf)
293 uint32_t __iomem *dmp_reg;
295 WRT_REG_DWORD(&reg->iobase_addr, iobase);
296 dmp_reg = &reg->iobase_window;
297 for ( ; count--; dmp_reg++)
298 *buf++ = htonl(RD_REG_DWORD(dmp_reg));
300 return buf;
303 void
304 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
306 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
308 /* 100 usec delay is sufficient enough for hardware to pause RISC */
309 udelay(100);
310 if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
311 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
315 qla24xx_soft_reset(struct qla_hw_data *ha)
317 int rval = QLA_SUCCESS;
318 uint32_t cnt;
319 uint16_t wd;
320 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
323 * Reset RISC. The delay is dependent on system architecture.
324 * Driver can proceed with the reset sequence after waiting
325 * for a timeout period.
327 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
328 for (cnt = 0; cnt < 30000; cnt++) {
329 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
330 break;
332 udelay(10);
334 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
335 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
337 WRT_REG_DWORD(&reg->ctrl_status,
338 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
339 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
341 udelay(100);
343 /* Wait for soft-reset to complete. */
344 for (cnt = 0; cnt < 30000; cnt++) {
345 if ((RD_REG_DWORD(&reg->ctrl_status) &
346 CSRX_ISP_SOFT_RESET) == 0)
347 break;
349 udelay(10);
351 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
352 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
354 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
355 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
357 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
358 rval == QLA_SUCCESS; cnt--) {
359 if (cnt)
360 udelay(10);
361 else
362 rval = QLA_FUNCTION_TIMEOUT;
364 if (rval == QLA_SUCCESS)
365 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
367 return rval;
370 static int
371 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
372 uint32_t ram_words, void **nxt)
374 int rval;
375 uint32_t cnt, stat, timer, words, idx;
376 uint16_t mb0;
377 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
378 dma_addr_t dump_dma = ha->gid_list_dma;
379 uint16_t *dump = (uint16_t *)ha->gid_list;
381 rval = QLA_SUCCESS;
382 mb0 = 0;
384 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
385 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
387 words = qla2x00_gid_list_size(ha) / 2;
388 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
389 cnt += words, addr += words) {
390 if (cnt + words > ram_words)
391 words = ram_words - cnt;
393 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
394 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
396 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
397 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
398 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
399 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
401 WRT_MAILBOX_REG(ha, reg, 4, words);
402 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
404 for (timer = 6000000; timer; timer--) {
405 /* Check for pending interrupts. */
406 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
407 if (stat & HSR_RISC_INT) {
408 stat &= 0xff;
410 if (stat == 0x1 || stat == 0x2) {
411 set_bit(MBX_INTERRUPT,
412 &ha->mbx_cmd_flags);
414 mb0 = RD_MAILBOX_REG(ha, reg, 0);
416 /* Release mailbox registers. */
417 WRT_REG_WORD(&reg->semaphore, 0);
418 WRT_REG_WORD(&reg->hccr,
419 HCCR_CLR_RISC_INT);
420 RD_REG_WORD(&reg->hccr);
421 break;
422 } else if (stat == 0x10 || stat == 0x11) {
423 set_bit(MBX_INTERRUPT,
424 &ha->mbx_cmd_flags);
426 mb0 = RD_MAILBOX_REG(ha, reg, 0);
428 WRT_REG_WORD(&reg->hccr,
429 HCCR_CLR_RISC_INT);
430 RD_REG_WORD(&reg->hccr);
431 break;
434 /* clear this intr; it wasn't a mailbox intr */
435 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
436 RD_REG_WORD(&reg->hccr);
438 udelay(5);
441 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
442 rval = mb0 & MBS_MASK;
443 for (idx = 0; idx < words; idx++)
444 ram[cnt + idx] = swab16(dump[idx]);
445 } else {
446 rval = QLA_FUNCTION_FAILED;
450 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
451 return rval;
454 static inline void
455 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
456 uint16_t *buf)
458 uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
460 for ( ; count--; dmp_reg++)
461 *buf++ = htons(RD_REG_WORD(dmp_reg));
464 static inline void *
465 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
467 if (!ha->eft)
468 return ptr;
470 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
471 return ptr + ntohl(ha->fw_dump->eft_size);
474 static inline void *
475 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
477 uint32_t cnt;
478 uint32_t *iter_reg;
479 struct qla2xxx_fce_chain *fcec = ptr;
481 if (!ha->fce)
482 return ptr;
484 *last_chain = &fcec->type;
485 fcec->type = htonl(DUMP_CHAIN_FCE);
486 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
487 fce_calc_size(ha->fce_bufs));
488 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
489 fcec->addr_l = htonl(LSD(ha->fce_dma));
490 fcec->addr_h = htonl(MSD(ha->fce_dma));
492 iter_reg = fcec->eregs;
493 for (cnt = 0; cnt < 8; cnt++)
494 *iter_reg++ = htonl(ha->fce_mb[cnt]);
496 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
498 return (char *)iter_reg + ntohl(fcec->size);
501 static inline void *
502 qla25xx_copy_exlogin(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
504 struct qla2xxx_offld_chain *c = ptr;
506 if (!ha->exlogin_buf)
507 return ptr;
509 *last_chain = &c->type;
511 c->type = cpu_to_be32(DUMP_CHAIN_EXLOGIN);
512 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
513 ha->exlogin_size);
514 c->size = cpu_to_be32(ha->exlogin_size);
515 c->addr = cpu_to_be64(ha->exlogin_buf_dma);
517 ptr += sizeof(struct qla2xxx_offld_chain);
518 memcpy(ptr, ha->exlogin_buf, ha->exlogin_size);
520 return (char *)ptr + cpu_to_be32(c->size);
523 static inline void *
524 qla81xx_copy_exchoffld(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
526 struct qla2xxx_offld_chain *c = ptr;
528 if (!ha->exchoffld_buf)
529 return ptr;
531 *last_chain = &c->type;
533 c->type = cpu_to_be32(DUMP_CHAIN_EXCHG);
534 c->chain_size = cpu_to_be32(sizeof(struct qla2xxx_offld_chain) +
535 ha->exchoffld_size);
536 c->size = cpu_to_be32(ha->exchoffld_size);
537 c->addr = cpu_to_be64(ha->exchoffld_buf_dma);
539 ptr += sizeof(struct qla2xxx_offld_chain);
540 memcpy(ptr, ha->exchoffld_buf, ha->exchoffld_size);
542 return (char *)ptr + cpu_to_be32(c->size);
545 static inline void *
546 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
547 uint32_t **last_chain)
549 struct qla2xxx_mqueue_chain *q;
550 struct qla2xxx_mqueue_header *qh;
551 uint32_t num_queues;
552 int que;
553 struct {
554 int length;
555 void *ring;
556 } aq, *aqp;
558 if (!ha->tgt.atio_ring)
559 return ptr;
561 num_queues = 1;
562 aqp = &aq;
563 aqp->length = ha->tgt.atio_q_length;
564 aqp->ring = ha->tgt.atio_ring;
566 for (que = 0; que < num_queues; que++) {
567 /* aqp = ha->atio_q_map[que]; */
568 q = ptr;
569 *last_chain = &q->type;
570 q->type = htonl(DUMP_CHAIN_QUEUE);
571 q->chain_size = htonl(
572 sizeof(struct qla2xxx_mqueue_chain) +
573 sizeof(struct qla2xxx_mqueue_header) +
574 (aqp->length * sizeof(request_t)));
575 ptr += sizeof(struct qla2xxx_mqueue_chain);
577 /* Add header. */
578 qh = ptr;
579 qh->queue = htonl(TYPE_ATIO_QUEUE);
580 qh->number = htonl(que);
581 qh->size = htonl(aqp->length * sizeof(request_t));
582 ptr += sizeof(struct qla2xxx_mqueue_header);
584 /* Add data. */
585 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
587 ptr += aqp->length * sizeof(request_t);
590 return ptr;
593 static inline void *
594 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
596 struct qla2xxx_mqueue_chain *q;
597 struct qla2xxx_mqueue_header *qh;
598 struct req_que *req;
599 struct rsp_que *rsp;
600 int que;
602 if (!ha->mqenable)
603 return ptr;
605 /* Request queues */
606 for (que = 1; que < ha->max_req_queues; que++) {
607 req = ha->req_q_map[que];
608 if (!req)
609 break;
611 /* Add chain. */
612 q = ptr;
613 *last_chain = &q->type;
614 q->type = htonl(DUMP_CHAIN_QUEUE);
615 q->chain_size = htonl(
616 sizeof(struct qla2xxx_mqueue_chain) +
617 sizeof(struct qla2xxx_mqueue_header) +
618 (req->length * sizeof(request_t)));
619 ptr += sizeof(struct qla2xxx_mqueue_chain);
621 /* Add header. */
622 qh = ptr;
623 qh->queue = htonl(TYPE_REQUEST_QUEUE);
624 qh->number = htonl(que);
625 qh->size = htonl(req->length * sizeof(request_t));
626 ptr += sizeof(struct qla2xxx_mqueue_header);
628 /* Add data. */
629 memcpy(ptr, req->ring, req->length * sizeof(request_t));
630 ptr += req->length * sizeof(request_t);
633 /* Response queues */
634 for (que = 1; que < ha->max_rsp_queues; que++) {
635 rsp = ha->rsp_q_map[que];
636 if (!rsp)
637 break;
639 /* Add chain. */
640 q = ptr;
641 *last_chain = &q->type;
642 q->type = htonl(DUMP_CHAIN_QUEUE);
643 q->chain_size = htonl(
644 sizeof(struct qla2xxx_mqueue_chain) +
645 sizeof(struct qla2xxx_mqueue_header) +
646 (rsp->length * sizeof(response_t)));
647 ptr += sizeof(struct qla2xxx_mqueue_chain);
649 /* Add header. */
650 qh = ptr;
651 qh->queue = htonl(TYPE_RESPONSE_QUEUE);
652 qh->number = htonl(que);
653 qh->size = htonl(rsp->length * sizeof(response_t));
654 ptr += sizeof(struct qla2xxx_mqueue_header);
656 /* Add data. */
657 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
658 ptr += rsp->length * sizeof(response_t);
661 return ptr;
664 static inline void *
665 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
667 uint32_t cnt, que_idx;
668 uint8_t que_cnt;
669 struct qla2xxx_mq_chain *mq = ptr;
670 device_reg_t *reg;
672 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
673 return ptr;
675 mq = ptr;
676 *last_chain = &mq->type;
677 mq->type = htonl(DUMP_CHAIN_MQ);
678 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
680 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
681 ha->max_req_queues : ha->max_rsp_queues;
682 mq->count = htonl(que_cnt);
683 for (cnt = 0; cnt < que_cnt; cnt++) {
684 reg = ISP_QUE_REG(ha, cnt);
685 que_idx = cnt * 4;
686 mq->qregs[que_idx] =
687 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
688 mq->qregs[que_idx+1] =
689 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
690 mq->qregs[que_idx+2] =
691 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
692 mq->qregs[que_idx+3] =
693 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
696 return ptr + sizeof(struct qla2xxx_mq_chain);
699 void
700 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
702 struct qla_hw_data *ha = vha->hw;
704 if (rval != QLA_SUCCESS) {
705 ql_log(ql_log_warn, vha, 0xd000,
706 "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
707 rval, ha->fw_dump_cap_flags);
708 ha->fw_dumped = 0;
709 } else {
710 ql_log(ql_log_info, vha, 0xd001,
711 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
712 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
713 ha->fw_dumped = 1;
714 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
719 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
720 * @vha: HA context
721 * @hardware_locked: Called with the hardware_lock
723 void
724 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
726 int rval;
727 uint32_t cnt;
728 struct qla_hw_data *ha = vha->hw;
729 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
730 uint16_t __iomem *dmp_reg;
731 unsigned long flags;
732 struct qla2300_fw_dump *fw;
733 void *nxt;
734 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
736 flags = 0;
738 #ifndef __CHECKER__
739 if (!hardware_locked)
740 spin_lock_irqsave(&ha->hardware_lock, flags);
741 #endif
743 if (!ha->fw_dump) {
744 ql_log(ql_log_warn, vha, 0xd002,
745 "No buffer available for dump.\n");
746 goto qla2300_fw_dump_failed;
749 if (ha->fw_dumped) {
750 ql_log(ql_log_warn, vha, 0xd003,
751 "Firmware has been previously dumped (%p) "
752 "-- ignoring request.\n",
753 ha->fw_dump);
754 goto qla2300_fw_dump_failed;
756 fw = &ha->fw_dump->isp.isp23;
757 qla2xxx_prep_dump(ha, ha->fw_dump);
759 rval = QLA_SUCCESS;
760 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
762 /* Pause RISC. */
763 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
764 if (IS_QLA2300(ha)) {
765 for (cnt = 30000;
766 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
767 rval == QLA_SUCCESS; cnt--) {
768 if (cnt)
769 udelay(100);
770 else
771 rval = QLA_FUNCTION_TIMEOUT;
773 } else {
774 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
775 udelay(10);
778 if (rval == QLA_SUCCESS) {
779 dmp_reg = &reg->flash_address;
780 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
781 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
783 dmp_reg = &reg->u.isp2300.req_q_in;
784 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
785 cnt++, dmp_reg++)
786 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
788 dmp_reg = &reg->u.isp2300.mailbox0;
789 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
790 cnt++, dmp_reg++)
791 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
793 WRT_REG_WORD(&reg->ctrl_status, 0x40);
794 qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
796 WRT_REG_WORD(&reg->ctrl_status, 0x50);
797 qla2xxx_read_window(reg, 48, fw->dma_reg);
799 WRT_REG_WORD(&reg->ctrl_status, 0x00);
800 dmp_reg = &reg->risc_hw;
801 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
802 cnt++, dmp_reg++)
803 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
805 WRT_REG_WORD(&reg->pcr, 0x2000);
806 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
808 WRT_REG_WORD(&reg->pcr, 0x2200);
809 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
811 WRT_REG_WORD(&reg->pcr, 0x2400);
812 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
814 WRT_REG_WORD(&reg->pcr, 0x2600);
815 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
817 WRT_REG_WORD(&reg->pcr, 0x2800);
818 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
820 WRT_REG_WORD(&reg->pcr, 0x2A00);
821 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
823 WRT_REG_WORD(&reg->pcr, 0x2C00);
824 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
826 WRT_REG_WORD(&reg->pcr, 0x2E00);
827 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
829 WRT_REG_WORD(&reg->ctrl_status, 0x10);
830 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
832 WRT_REG_WORD(&reg->ctrl_status, 0x20);
833 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
835 WRT_REG_WORD(&reg->ctrl_status, 0x30);
836 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
838 /* Reset RISC. */
839 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
840 for (cnt = 0; cnt < 30000; cnt++) {
841 if ((RD_REG_WORD(&reg->ctrl_status) &
842 CSR_ISP_SOFT_RESET) == 0)
843 break;
845 udelay(10);
849 if (!IS_QLA2300(ha)) {
850 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
851 rval == QLA_SUCCESS; cnt--) {
852 if (cnt)
853 udelay(100);
854 else
855 rval = QLA_FUNCTION_TIMEOUT;
859 /* Get RISC SRAM. */
860 if (rval == QLA_SUCCESS)
861 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
862 sizeof(fw->risc_ram) / 2, &nxt);
864 /* Get stack SRAM. */
865 if (rval == QLA_SUCCESS)
866 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
867 sizeof(fw->stack_ram) / 2, &nxt);
869 /* Get data SRAM. */
870 if (rval == QLA_SUCCESS)
871 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
872 ha->fw_memory_size - 0x11000 + 1, &nxt);
874 if (rval == QLA_SUCCESS)
875 qla2xxx_copy_queues(ha, nxt);
877 qla2xxx_dump_post_process(base_vha, rval);
879 qla2300_fw_dump_failed:
880 #ifndef __CHECKER__
881 if (!hardware_locked)
882 spin_unlock_irqrestore(&ha->hardware_lock, flags);
883 #else
885 #endif
889 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
890 * @vha: HA context
891 * @hardware_locked: Called with the hardware_lock
893 void
894 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
896 int rval;
897 uint32_t cnt, timer;
898 uint16_t risc_address;
899 uint16_t mb0, mb2;
900 struct qla_hw_data *ha = vha->hw;
901 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
902 uint16_t __iomem *dmp_reg;
903 unsigned long flags;
904 struct qla2100_fw_dump *fw;
905 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
907 risc_address = 0;
908 mb0 = mb2 = 0;
909 flags = 0;
911 #ifndef __CHECKER__
912 if (!hardware_locked)
913 spin_lock_irqsave(&ha->hardware_lock, flags);
914 #endif
916 if (!ha->fw_dump) {
917 ql_log(ql_log_warn, vha, 0xd004,
918 "No buffer available for dump.\n");
919 goto qla2100_fw_dump_failed;
922 if (ha->fw_dumped) {
923 ql_log(ql_log_warn, vha, 0xd005,
924 "Firmware has been previously dumped (%p) "
925 "-- ignoring request.\n",
926 ha->fw_dump);
927 goto qla2100_fw_dump_failed;
929 fw = &ha->fw_dump->isp.isp21;
930 qla2xxx_prep_dump(ha, ha->fw_dump);
932 rval = QLA_SUCCESS;
933 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
935 /* Pause RISC. */
936 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
937 for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
938 rval == QLA_SUCCESS; cnt--) {
939 if (cnt)
940 udelay(100);
941 else
942 rval = QLA_FUNCTION_TIMEOUT;
944 if (rval == QLA_SUCCESS) {
945 dmp_reg = &reg->flash_address;
946 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
947 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
949 dmp_reg = &reg->u.isp2100.mailbox0;
950 for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
951 if (cnt == 8)
952 dmp_reg = &reg->u_end.isp2200.mailbox8;
954 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
957 dmp_reg = &reg->u.isp2100.unused_2[0];
958 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
959 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
961 WRT_REG_WORD(&reg->ctrl_status, 0x00);
962 dmp_reg = &reg->risc_hw;
963 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
964 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
966 WRT_REG_WORD(&reg->pcr, 0x2000);
967 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
969 WRT_REG_WORD(&reg->pcr, 0x2100);
970 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
972 WRT_REG_WORD(&reg->pcr, 0x2200);
973 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
975 WRT_REG_WORD(&reg->pcr, 0x2300);
976 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
978 WRT_REG_WORD(&reg->pcr, 0x2400);
979 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
981 WRT_REG_WORD(&reg->pcr, 0x2500);
982 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
984 WRT_REG_WORD(&reg->pcr, 0x2600);
985 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
987 WRT_REG_WORD(&reg->pcr, 0x2700);
988 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
990 WRT_REG_WORD(&reg->ctrl_status, 0x10);
991 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
993 WRT_REG_WORD(&reg->ctrl_status, 0x20);
994 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
996 WRT_REG_WORD(&reg->ctrl_status, 0x30);
997 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
999 /* Reset the ISP. */
1000 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
1003 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
1004 rval == QLA_SUCCESS; cnt--) {
1005 if (cnt)
1006 udelay(100);
1007 else
1008 rval = QLA_FUNCTION_TIMEOUT;
1011 /* Pause RISC. */
1012 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
1013 (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
1015 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
1016 for (cnt = 30000;
1017 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
1018 rval == QLA_SUCCESS; cnt--) {
1019 if (cnt)
1020 udelay(100);
1021 else
1022 rval = QLA_FUNCTION_TIMEOUT;
1024 if (rval == QLA_SUCCESS) {
1025 /* Set memory configuration and timing. */
1026 if (IS_QLA2100(ha))
1027 WRT_REG_WORD(&reg->mctr, 0xf1);
1028 else
1029 WRT_REG_WORD(&reg->mctr, 0xf2);
1030 RD_REG_WORD(&reg->mctr); /* PCI Posting. */
1032 /* Release RISC. */
1033 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
1037 if (rval == QLA_SUCCESS) {
1038 /* Get RISC SRAM. */
1039 risc_address = 0x1000;
1040 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
1041 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
1043 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
1044 cnt++, risc_address++) {
1045 WRT_MAILBOX_REG(ha, reg, 1, risc_address);
1046 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
1048 for (timer = 6000000; timer != 0; timer--) {
1049 /* Check for pending interrupts. */
1050 if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
1051 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
1052 set_bit(MBX_INTERRUPT,
1053 &ha->mbx_cmd_flags);
1055 mb0 = RD_MAILBOX_REG(ha, reg, 0);
1056 mb2 = RD_MAILBOX_REG(ha, reg, 2);
1058 WRT_REG_WORD(&reg->semaphore, 0);
1059 WRT_REG_WORD(&reg->hccr,
1060 HCCR_CLR_RISC_INT);
1061 RD_REG_WORD(&reg->hccr);
1062 break;
1064 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
1065 RD_REG_WORD(&reg->hccr);
1067 udelay(5);
1070 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1071 rval = mb0 & MBS_MASK;
1072 fw->risc_ram[cnt] = htons(mb2);
1073 } else {
1074 rval = QLA_FUNCTION_FAILED;
1078 if (rval == QLA_SUCCESS)
1079 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
1081 qla2xxx_dump_post_process(base_vha, rval);
1083 qla2100_fw_dump_failed:
1084 #ifndef __CHECKER__
1085 if (!hardware_locked)
1086 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1087 #else
1089 #endif
1092 void
1093 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1095 int rval;
1096 uint32_t cnt;
1097 struct qla_hw_data *ha = vha->hw;
1098 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1099 uint32_t __iomem *dmp_reg;
1100 uint32_t *iter_reg;
1101 uint16_t __iomem *mbx_reg;
1102 unsigned long flags;
1103 struct qla24xx_fw_dump *fw;
1104 void *nxt;
1105 void *nxt_chain;
1106 uint32_t *last_chain = NULL;
1107 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1109 if (IS_P3P_TYPE(ha))
1110 return;
1112 flags = 0;
1113 ha->fw_dump_cap_flags = 0;
1115 #ifndef __CHECKER__
1116 if (!hardware_locked)
1117 spin_lock_irqsave(&ha->hardware_lock, flags);
1118 #endif
1120 if (!ha->fw_dump) {
1121 ql_log(ql_log_warn, vha, 0xd006,
1122 "No buffer available for dump.\n");
1123 goto qla24xx_fw_dump_failed;
1126 if (ha->fw_dumped) {
1127 ql_log(ql_log_warn, vha, 0xd007,
1128 "Firmware has been previously dumped (%p) "
1129 "-- ignoring request.\n",
1130 ha->fw_dump);
1131 goto qla24xx_fw_dump_failed;
1133 fw = &ha->fw_dump->isp.isp24;
1134 qla2xxx_prep_dump(ha, ha->fw_dump);
1136 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1139 * Pause RISC. No need to track timeout, as resetting the chip
1140 * is the right approach incase of pause timeout
1142 qla24xx_pause_risc(reg, ha);
1144 /* Host interface registers. */
1145 dmp_reg = &reg->flash_addr;
1146 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1147 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1149 /* Disable interrupts. */
1150 WRT_REG_DWORD(&reg->ictrl, 0);
1151 RD_REG_DWORD(&reg->ictrl);
1153 /* Shadow registers. */
1154 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1155 RD_REG_DWORD(&reg->iobase_addr);
1156 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1157 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1159 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1160 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1162 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1163 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1165 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1166 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1168 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1169 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1171 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1172 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1174 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1175 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1177 /* Mailbox registers. */
1178 mbx_reg = &reg->mailbox0;
1179 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1180 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1182 /* Transfer sequence registers. */
1183 iter_reg = fw->xseq_gp_reg;
1184 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1185 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1186 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1187 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1188 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1189 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1190 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1191 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1193 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1194 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1196 /* Receive sequence registers. */
1197 iter_reg = fw->rseq_gp_reg;
1198 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1199 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1200 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1201 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1202 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1203 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1204 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1205 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1207 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1208 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1209 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1211 /* Command DMA registers. */
1212 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1214 /* Queues. */
1215 iter_reg = fw->req0_dma_reg;
1216 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1217 dmp_reg = &reg->iobase_q;
1218 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1219 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1221 iter_reg = fw->resp0_dma_reg;
1222 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1223 dmp_reg = &reg->iobase_q;
1224 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1225 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1227 iter_reg = fw->req1_dma_reg;
1228 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1229 dmp_reg = &reg->iobase_q;
1230 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1231 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1233 /* Transmit DMA registers. */
1234 iter_reg = fw->xmt0_dma_reg;
1235 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1236 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1238 iter_reg = fw->xmt1_dma_reg;
1239 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1240 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1242 iter_reg = fw->xmt2_dma_reg;
1243 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1244 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1246 iter_reg = fw->xmt3_dma_reg;
1247 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1248 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1250 iter_reg = fw->xmt4_dma_reg;
1251 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1252 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1254 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1256 /* Receive DMA registers. */
1257 iter_reg = fw->rcvt0_data_dma_reg;
1258 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1259 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1261 iter_reg = fw->rcvt1_data_dma_reg;
1262 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1263 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1265 /* RISC registers. */
1266 iter_reg = fw->risc_gp_reg;
1267 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1268 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1269 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1270 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1271 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1272 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1273 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1274 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1276 /* Local memory controller registers. */
1277 iter_reg = fw->lmc_reg;
1278 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1279 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1280 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1281 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1282 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1283 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1284 qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1286 /* Fibre Protocol Module registers. */
1287 iter_reg = fw->fpm_hdw_reg;
1288 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1289 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1290 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1291 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1292 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1293 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1294 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1295 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1296 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1297 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1298 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1299 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1301 /* Frame Buffer registers. */
1302 iter_reg = fw->fb_hdw_reg;
1303 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1304 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1305 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1306 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1307 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1308 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1309 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1310 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1311 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1312 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1313 qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1315 rval = qla24xx_soft_reset(ha);
1316 if (rval != QLA_SUCCESS)
1317 goto qla24xx_fw_dump_failed_0;
1319 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1320 &nxt);
1321 if (rval != QLA_SUCCESS)
1322 goto qla24xx_fw_dump_failed_0;
1324 nxt = qla2xxx_copy_queues(ha, nxt);
1326 qla24xx_copy_eft(ha, nxt);
1328 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1329 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1330 if (last_chain) {
1331 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1332 *last_chain |= htonl(DUMP_CHAIN_LAST);
1335 /* Adjust valid length. */
1336 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1338 qla24xx_fw_dump_failed_0:
1339 qla2xxx_dump_post_process(base_vha, rval);
1341 qla24xx_fw_dump_failed:
1342 #ifndef __CHECKER__
1343 if (!hardware_locked)
1344 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1345 #else
1347 #endif
1350 void
1351 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1353 int rval;
1354 uint32_t cnt;
1355 struct qla_hw_data *ha = vha->hw;
1356 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1357 uint32_t __iomem *dmp_reg;
1358 uint32_t *iter_reg;
1359 uint16_t __iomem *mbx_reg;
1360 unsigned long flags;
1361 struct qla25xx_fw_dump *fw;
1362 void *nxt, *nxt_chain;
1363 uint32_t *last_chain = NULL;
1364 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1366 flags = 0;
1367 ha->fw_dump_cap_flags = 0;
1369 #ifndef __CHECKER__
1370 if (!hardware_locked)
1371 spin_lock_irqsave(&ha->hardware_lock, flags);
1372 #endif
1374 if (!ha->fw_dump) {
1375 ql_log(ql_log_warn, vha, 0xd008,
1376 "No buffer available for dump.\n");
1377 goto qla25xx_fw_dump_failed;
1380 if (ha->fw_dumped) {
1381 ql_log(ql_log_warn, vha, 0xd009,
1382 "Firmware has been previously dumped (%p) "
1383 "-- ignoring request.\n",
1384 ha->fw_dump);
1385 goto qla25xx_fw_dump_failed;
1387 fw = &ha->fw_dump->isp.isp25;
1388 qla2xxx_prep_dump(ha, ha->fw_dump);
1389 ha->fw_dump->version = htonl(2);
1391 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1394 * Pause RISC. No need to track timeout, as resetting the chip
1395 * is the right approach incase of pause timeout
1397 qla24xx_pause_risc(reg, ha);
1399 /* Host/Risc registers. */
1400 iter_reg = fw->host_risc_reg;
1401 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1402 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1404 /* PCIe registers. */
1405 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1406 RD_REG_DWORD(&reg->iobase_addr);
1407 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1408 dmp_reg = &reg->iobase_c4;
1409 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1410 dmp_reg++;
1411 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1412 dmp_reg++;
1413 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1414 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1416 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1417 RD_REG_DWORD(&reg->iobase_window);
1419 /* Host interface registers. */
1420 dmp_reg = &reg->flash_addr;
1421 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1422 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1424 /* Disable interrupts. */
1425 WRT_REG_DWORD(&reg->ictrl, 0);
1426 RD_REG_DWORD(&reg->ictrl);
1428 /* Shadow registers. */
1429 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1430 RD_REG_DWORD(&reg->iobase_addr);
1431 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1432 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1434 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1435 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1437 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1438 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1440 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1441 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1443 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1444 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1446 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1447 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1449 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1450 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1452 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1453 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1455 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1456 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1458 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1459 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1461 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1462 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1464 /* RISC I/O register. */
1465 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1466 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1468 /* Mailbox registers. */
1469 mbx_reg = &reg->mailbox0;
1470 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1471 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1473 /* Transfer sequence registers. */
1474 iter_reg = fw->xseq_gp_reg;
1475 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1476 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1477 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1478 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1479 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1480 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1481 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1482 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1484 iter_reg = fw->xseq_0_reg;
1485 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1486 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1487 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1489 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1491 /* Receive sequence registers. */
1492 iter_reg = fw->rseq_gp_reg;
1493 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1494 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1495 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1496 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1497 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1498 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1499 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1500 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1502 iter_reg = fw->rseq_0_reg;
1503 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1504 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1506 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1507 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1509 /* Auxiliary sequence registers. */
1510 iter_reg = fw->aseq_gp_reg;
1511 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1512 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1513 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1514 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1515 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1516 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1517 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1518 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1520 iter_reg = fw->aseq_0_reg;
1521 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1522 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1524 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1525 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1527 /* Command DMA registers. */
1528 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1530 /* Queues. */
1531 iter_reg = fw->req0_dma_reg;
1532 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1533 dmp_reg = &reg->iobase_q;
1534 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1535 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1537 iter_reg = fw->resp0_dma_reg;
1538 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1539 dmp_reg = &reg->iobase_q;
1540 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1541 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1543 iter_reg = fw->req1_dma_reg;
1544 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1545 dmp_reg = &reg->iobase_q;
1546 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1547 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1549 /* Transmit DMA registers. */
1550 iter_reg = fw->xmt0_dma_reg;
1551 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1552 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1554 iter_reg = fw->xmt1_dma_reg;
1555 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1556 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1558 iter_reg = fw->xmt2_dma_reg;
1559 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1560 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1562 iter_reg = fw->xmt3_dma_reg;
1563 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1564 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1566 iter_reg = fw->xmt4_dma_reg;
1567 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1568 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1570 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1572 /* Receive DMA registers. */
1573 iter_reg = fw->rcvt0_data_dma_reg;
1574 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1575 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1577 iter_reg = fw->rcvt1_data_dma_reg;
1578 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1579 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1581 /* RISC registers. */
1582 iter_reg = fw->risc_gp_reg;
1583 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1584 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1585 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1586 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1587 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1588 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1589 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1590 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1592 /* Local memory controller registers. */
1593 iter_reg = fw->lmc_reg;
1594 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1595 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1596 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1597 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1598 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1599 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1600 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1601 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1603 /* Fibre Protocol Module registers. */
1604 iter_reg = fw->fpm_hdw_reg;
1605 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1606 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1607 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1608 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1609 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1610 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1611 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1612 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1613 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1614 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1615 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1616 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1618 /* Frame Buffer registers. */
1619 iter_reg = fw->fb_hdw_reg;
1620 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1621 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1622 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1623 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1624 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1625 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1626 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1627 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1628 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1629 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1630 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1631 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1633 /* Multi queue registers */
1634 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1635 &last_chain);
1637 rval = qla24xx_soft_reset(ha);
1638 if (rval != QLA_SUCCESS)
1639 goto qla25xx_fw_dump_failed_0;
1641 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1642 &nxt);
1643 if (rval != QLA_SUCCESS)
1644 goto qla25xx_fw_dump_failed_0;
1646 nxt = qla2xxx_copy_queues(ha, nxt);
1648 qla24xx_copy_eft(ha, nxt);
1650 /* Chain entries -- started with MQ. */
1651 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1652 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1653 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1654 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1655 if (last_chain) {
1656 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1657 *last_chain |= htonl(DUMP_CHAIN_LAST);
1660 /* Adjust valid length. */
1661 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1663 qla25xx_fw_dump_failed_0:
1664 qla2xxx_dump_post_process(base_vha, rval);
1666 qla25xx_fw_dump_failed:
1667 #ifndef __CHECKER__
1668 if (!hardware_locked)
1669 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1670 #else
1672 #endif
1675 void
1676 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1678 int rval;
1679 uint32_t cnt;
1680 struct qla_hw_data *ha = vha->hw;
1681 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1682 uint32_t __iomem *dmp_reg;
1683 uint32_t *iter_reg;
1684 uint16_t __iomem *mbx_reg;
1685 unsigned long flags;
1686 struct qla81xx_fw_dump *fw;
1687 void *nxt, *nxt_chain;
1688 uint32_t *last_chain = NULL;
1689 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1691 flags = 0;
1692 ha->fw_dump_cap_flags = 0;
1694 #ifndef __CHECKER__
1695 if (!hardware_locked)
1696 spin_lock_irqsave(&ha->hardware_lock, flags);
1697 #endif
1699 if (!ha->fw_dump) {
1700 ql_log(ql_log_warn, vha, 0xd00a,
1701 "No buffer available for dump.\n");
1702 goto qla81xx_fw_dump_failed;
1705 if (ha->fw_dumped) {
1706 ql_log(ql_log_warn, vha, 0xd00b,
1707 "Firmware has been previously dumped (%p) "
1708 "-- ignoring request.\n",
1709 ha->fw_dump);
1710 goto qla81xx_fw_dump_failed;
1712 fw = &ha->fw_dump->isp.isp81;
1713 qla2xxx_prep_dump(ha, ha->fw_dump);
1715 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1718 * Pause RISC. No need to track timeout, as resetting the chip
1719 * is the right approach incase of pause timeout
1721 qla24xx_pause_risc(reg, ha);
1723 /* Host/Risc registers. */
1724 iter_reg = fw->host_risc_reg;
1725 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1726 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1728 /* PCIe registers. */
1729 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1730 RD_REG_DWORD(&reg->iobase_addr);
1731 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1732 dmp_reg = &reg->iobase_c4;
1733 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1734 dmp_reg++;
1735 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1736 dmp_reg++;
1737 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1738 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1740 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1741 RD_REG_DWORD(&reg->iobase_window);
1743 /* Host interface registers. */
1744 dmp_reg = &reg->flash_addr;
1745 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1746 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1748 /* Disable interrupts. */
1749 WRT_REG_DWORD(&reg->ictrl, 0);
1750 RD_REG_DWORD(&reg->ictrl);
1752 /* Shadow registers. */
1753 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1754 RD_REG_DWORD(&reg->iobase_addr);
1755 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1756 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1758 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1759 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1761 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1762 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1764 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1765 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1767 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1768 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1770 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1771 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1773 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1774 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1776 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1777 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1779 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1780 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1782 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1783 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1785 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1786 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1788 /* RISC I/O register. */
1789 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1790 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1792 /* Mailbox registers. */
1793 mbx_reg = &reg->mailbox0;
1794 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1795 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1797 /* Transfer sequence registers. */
1798 iter_reg = fw->xseq_gp_reg;
1799 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1800 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1801 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1802 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1803 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1804 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1805 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1806 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1808 iter_reg = fw->xseq_0_reg;
1809 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1810 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1811 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1813 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1815 /* Receive sequence registers. */
1816 iter_reg = fw->rseq_gp_reg;
1817 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1818 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1819 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1820 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1821 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1822 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1823 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1824 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1826 iter_reg = fw->rseq_0_reg;
1827 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1828 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1830 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1831 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1833 /* Auxiliary sequence registers. */
1834 iter_reg = fw->aseq_gp_reg;
1835 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1836 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1837 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1838 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1839 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1840 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1841 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1842 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1844 iter_reg = fw->aseq_0_reg;
1845 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1846 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1848 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1849 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1851 /* Command DMA registers. */
1852 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1854 /* Queues. */
1855 iter_reg = fw->req0_dma_reg;
1856 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1857 dmp_reg = &reg->iobase_q;
1858 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1859 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1861 iter_reg = fw->resp0_dma_reg;
1862 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1863 dmp_reg = &reg->iobase_q;
1864 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1865 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1867 iter_reg = fw->req1_dma_reg;
1868 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1869 dmp_reg = &reg->iobase_q;
1870 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1871 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1873 /* Transmit DMA registers. */
1874 iter_reg = fw->xmt0_dma_reg;
1875 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1876 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1878 iter_reg = fw->xmt1_dma_reg;
1879 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1880 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1882 iter_reg = fw->xmt2_dma_reg;
1883 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1884 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1886 iter_reg = fw->xmt3_dma_reg;
1887 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1888 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1890 iter_reg = fw->xmt4_dma_reg;
1891 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1892 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1894 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1896 /* Receive DMA registers. */
1897 iter_reg = fw->rcvt0_data_dma_reg;
1898 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1899 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1901 iter_reg = fw->rcvt1_data_dma_reg;
1902 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1903 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1905 /* RISC registers. */
1906 iter_reg = fw->risc_gp_reg;
1907 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1908 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1909 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1910 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1911 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1912 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1913 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1914 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1916 /* Local memory controller registers. */
1917 iter_reg = fw->lmc_reg;
1918 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1919 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1920 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1921 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1922 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1923 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1924 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1925 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1927 /* Fibre Protocol Module registers. */
1928 iter_reg = fw->fpm_hdw_reg;
1929 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1930 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1931 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1932 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1933 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1934 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1935 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1936 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1937 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1938 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1939 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1940 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1941 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1942 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1944 /* Frame Buffer registers. */
1945 iter_reg = fw->fb_hdw_reg;
1946 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1947 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1948 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1949 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1950 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1951 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1952 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1953 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1954 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1955 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1956 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1957 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1958 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1960 /* Multi queue registers */
1961 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1962 &last_chain);
1964 rval = qla24xx_soft_reset(ha);
1965 if (rval != QLA_SUCCESS)
1966 goto qla81xx_fw_dump_failed_0;
1968 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1969 &nxt);
1970 if (rval != QLA_SUCCESS)
1971 goto qla81xx_fw_dump_failed_0;
1973 nxt = qla2xxx_copy_queues(ha, nxt);
1975 qla24xx_copy_eft(ha, nxt);
1977 /* Chain entries -- started with MQ. */
1978 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1979 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1980 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1981 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
1982 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
1983 if (last_chain) {
1984 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1985 *last_chain |= htonl(DUMP_CHAIN_LAST);
1988 /* Adjust valid length. */
1989 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1991 qla81xx_fw_dump_failed_0:
1992 qla2xxx_dump_post_process(base_vha, rval);
1994 qla81xx_fw_dump_failed:
1995 #ifndef __CHECKER__
1996 if (!hardware_locked)
1997 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1998 #else
2000 #endif
2003 void
2004 qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
2006 int rval;
2007 uint32_t cnt;
2008 struct qla_hw_data *ha = vha->hw;
2009 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
2010 uint32_t __iomem *dmp_reg;
2011 uint32_t *iter_reg;
2012 uint16_t __iomem *mbx_reg;
2013 unsigned long flags;
2014 struct qla83xx_fw_dump *fw;
2015 void *nxt, *nxt_chain;
2016 uint32_t *last_chain = NULL;
2017 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
2019 flags = 0;
2020 ha->fw_dump_cap_flags = 0;
2022 #ifndef __CHECKER__
2023 if (!hardware_locked)
2024 spin_lock_irqsave(&ha->hardware_lock, flags);
2025 #endif
2027 if (!ha->fw_dump) {
2028 ql_log(ql_log_warn, vha, 0xd00c,
2029 "No buffer available for dump!!!\n");
2030 goto qla83xx_fw_dump_failed;
2033 if (ha->fw_dumped) {
2034 ql_log(ql_log_warn, vha, 0xd00d,
2035 "Firmware has been previously dumped (%p) -- ignoring "
2036 "request...\n", ha->fw_dump);
2037 goto qla83xx_fw_dump_failed;
2039 fw = &ha->fw_dump->isp.isp83;
2040 qla2xxx_prep_dump(ha, ha->fw_dump);
2042 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
2045 * Pause RISC. No need to track timeout, as resetting the chip
2046 * is the right approach incase of pause timeout
2048 qla24xx_pause_risc(reg, ha);
2050 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
2051 dmp_reg = &reg->iobase_window;
2052 RD_REG_DWORD(dmp_reg);
2053 WRT_REG_DWORD(dmp_reg, 0);
2055 dmp_reg = &reg->unused_4_1[0];
2056 RD_REG_DWORD(dmp_reg);
2057 WRT_REG_DWORD(dmp_reg, 0);
2059 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
2060 dmp_reg = &reg->unused_4_1[2];
2061 RD_REG_DWORD(dmp_reg);
2062 WRT_REG_DWORD(dmp_reg, 0);
2064 /* select PCR and disable ecc checking and correction */
2065 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2066 RD_REG_DWORD(&reg->iobase_addr);
2067 WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
2069 /* Host/Risc registers. */
2070 iter_reg = fw->host_risc_reg;
2071 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2072 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2073 qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2075 /* PCIe registers. */
2076 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2077 RD_REG_DWORD(&reg->iobase_addr);
2078 WRT_REG_DWORD(&reg->iobase_window, 0x01);
2079 dmp_reg = &reg->iobase_c4;
2080 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
2081 dmp_reg++;
2082 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
2083 dmp_reg++;
2084 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
2085 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
2087 WRT_REG_DWORD(&reg->iobase_window, 0x00);
2088 RD_REG_DWORD(&reg->iobase_window);
2090 /* Host interface registers. */
2091 dmp_reg = &reg->flash_addr;
2092 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
2093 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
2095 /* Disable interrupts. */
2096 WRT_REG_DWORD(&reg->ictrl, 0);
2097 RD_REG_DWORD(&reg->ictrl);
2099 /* Shadow registers. */
2100 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2101 RD_REG_DWORD(&reg->iobase_addr);
2102 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
2103 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2105 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
2106 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2108 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
2109 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2111 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
2112 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2114 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
2115 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2117 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
2118 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2120 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
2121 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2123 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
2124 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2126 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
2127 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2129 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
2130 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2132 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
2133 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2135 /* RISC I/O register. */
2136 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
2137 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
2139 /* Mailbox registers. */
2140 mbx_reg = &reg->mailbox0;
2141 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
2142 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
2144 /* Transfer sequence registers. */
2145 iter_reg = fw->xseq_gp_reg;
2146 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2147 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2148 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2149 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2150 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2151 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2152 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2153 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2154 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2155 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2156 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2157 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2158 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2159 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2160 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2161 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2163 iter_reg = fw->xseq_0_reg;
2164 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2165 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2166 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2168 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2170 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2172 /* Receive sequence registers. */
2173 iter_reg = fw->rseq_gp_reg;
2174 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2175 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2176 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2177 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2178 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2179 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2180 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2181 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2182 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2183 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2184 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2185 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2186 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2187 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2188 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2189 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2191 iter_reg = fw->rseq_0_reg;
2192 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2193 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2195 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2196 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2197 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2199 /* Auxiliary sequence registers. */
2200 iter_reg = fw->aseq_gp_reg;
2201 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2202 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2203 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2204 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2205 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2206 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2207 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2208 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2209 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2210 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2211 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2212 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2213 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2214 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2215 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2216 qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2218 iter_reg = fw->aseq_0_reg;
2219 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2220 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2222 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2223 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2224 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2226 /* Command DMA registers. */
2227 iter_reg = fw->cmd_dma_reg;
2228 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2229 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2230 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2231 qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2233 /* Queues. */
2234 iter_reg = fw->req0_dma_reg;
2235 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2236 dmp_reg = &reg->iobase_q;
2237 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2238 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2240 iter_reg = fw->resp0_dma_reg;
2241 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2242 dmp_reg = &reg->iobase_q;
2243 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2244 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2246 iter_reg = fw->req1_dma_reg;
2247 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2248 dmp_reg = &reg->iobase_q;
2249 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2250 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2252 /* Transmit DMA registers. */
2253 iter_reg = fw->xmt0_dma_reg;
2254 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2255 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2257 iter_reg = fw->xmt1_dma_reg;
2258 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2259 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2261 iter_reg = fw->xmt2_dma_reg;
2262 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2263 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2265 iter_reg = fw->xmt3_dma_reg;
2266 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2267 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2269 iter_reg = fw->xmt4_dma_reg;
2270 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2271 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2273 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2275 /* Receive DMA registers. */
2276 iter_reg = fw->rcvt0_data_dma_reg;
2277 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2278 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2280 iter_reg = fw->rcvt1_data_dma_reg;
2281 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2282 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2284 /* RISC registers. */
2285 iter_reg = fw->risc_gp_reg;
2286 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2287 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2288 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2289 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2290 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2291 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2292 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2293 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2295 /* Local memory controller registers. */
2296 iter_reg = fw->lmc_reg;
2297 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2298 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2299 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2300 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2301 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2302 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2303 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2304 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2306 /* Fibre Protocol Module registers. */
2307 iter_reg = fw->fpm_hdw_reg;
2308 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2309 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2310 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2311 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2312 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2313 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2314 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2315 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2316 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2317 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2318 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2319 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2320 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2321 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2322 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2323 qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2325 /* RQ0 Array registers. */
2326 iter_reg = fw->rq0_array_reg;
2327 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2328 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2329 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2330 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2331 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2332 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2333 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2334 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2335 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2336 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2337 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2338 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2339 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2340 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2341 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2342 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2344 /* RQ1 Array registers. */
2345 iter_reg = fw->rq1_array_reg;
2346 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2347 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2348 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2349 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2350 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2351 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2352 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2353 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2354 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2355 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2356 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2357 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2358 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2359 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2360 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2361 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2363 /* RP0 Array registers. */
2364 iter_reg = fw->rp0_array_reg;
2365 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2366 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2367 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2368 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2369 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2370 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2371 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2372 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2373 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2374 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2375 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2376 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2377 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2378 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2379 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2380 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2382 /* RP1 Array registers. */
2383 iter_reg = fw->rp1_array_reg;
2384 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2385 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2386 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2387 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2388 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2389 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2390 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2391 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2392 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2393 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2394 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2395 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2396 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2397 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2398 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2399 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2401 iter_reg = fw->at0_array_reg;
2402 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2403 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2404 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2405 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2406 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2407 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2408 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2409 qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2411 /* I/O Queue Control registers. */
2412 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2414 /* Frame Buffer registers. */
2415 iter_reg = fw->fb_hdw_reg;
2416 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2417 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2418 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2419 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2420 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2421 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2422 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2423 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2424 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2425 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2426 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2427 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2428 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2429 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2430 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2431 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2432 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2433 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2434 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2435 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2436 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2437 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2438 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2439 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2440 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2441 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2442 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2444 /* Multi queue registers */
2445 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2446 &last_chain);
2448 rval = qla24xx_soft_reset(ha);
2449 if (rval != QLA_SUCCESS) {
2450 ql_log(ql_log_warn, vha, 0xd00e,
2451 "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2452 rval = QLA_SUCCESS;
2454 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2456 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2457 RD_REG_DWORD(&reg->hccr);
2459 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2460 RD_REG_DWORD(&reg->hccr);
2462 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2463 RD_REG_DWORD(&reg->hccr);
2465 for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
2466 udelay(5);
2468 if (!cnt) {
2469 nxt = fw->code_ram;
2470 nxt += sizeof(fw->code_ram);
2471 nxt += (ha->fw_memory_size - 0x100000 + 1);
2472 goto copy_queue;
2473 } else {
2474 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2475 ql_log(ql_log_warn, vha, 0xd010,
2476 "bigger hammer success?\n");
2480 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2481 &nxt);
2482 if (rval != QLA_SUCCESS)
2483 goto qla83xx_fw_dump_failed_0;
2485 copy_queue:
2486 nxt = qla2xxx_copy_queues(ha, nxt);
2488 qla24xx_copy_eft(ha, nxt);
2490 /* Chain entries -- started with MQ. */
2491 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2492 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2493 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2494 nxt_chain = qla25xx_copy_exlogin(ha, nxt_chain, &last_chain);
2495 nxt_chain = qla81xx_copy_exchoffld(ha, nxt_chain, &last_chain);
2496 if (last_chain) {
2497 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2498 *last_chain |= htonl(DUMP_CHAIN_LAST);
2501 /* Adjust valid length. */
2502 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2504 qla83xx_fw_dump_failed_0:
2505 qla2xxx_dump_post_process(base_vha, rval);
2507 qla83xx_fw_dump_failed:
2508 #ifndef __CHECKER__
2509 if (!hardware_locked)
2510 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2511 #else
2513 #endif
2516 /****************************************************************************/
2517 /* Driver Debug Functions. */
2518 /****************************************************************************/
2520 static inline int
2521 ql_mask_match(uint32_t level)
2523 return (level & ql2xextended_error_logging) == level;
2527 * This function is for formatting and logging debug information.
2528 * It is to be used when vha is available. It formats the message
2529 * and logs it to the messages file.
2530 * parameters:
2531 * level: The level of the debug messages to be printed.
2532 * If ql2xextended_error_logging value is correctly set,
2533 * this message will appear in the messages file.
2534 * vha: Pointer to the scsi_qla_host_t.
2535 * id: This is a unique identifier for the level. It identifies the
2536 * part of the code from where the message originated.
2537 * msg: The message to be displayed.
2539 void
2540 ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2542 va_list va;
2543 struct va_format vaf;
2545 if (!ql_mask_match(level))
2546 return;
2548 va_start(va, fmt);
2550 vaf.fmt = fmt;
2551 vaf.va = &va;
2553 if (vha != NULL) {
2554 const struct pci_dev *pdev = vha->hw->pdev;
2555 /* <module-name> <pci-name> <msg-id>:<host> Message */
2556 pr_warn("%s [%s]-%04x:%ld: %pV",
2557 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2558 vha->host_no, &vaf);
2559 } else {
2560 pr_warn("%s [%s]-%04x: : %pV",
2561 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2564 va_end(va);
2569 * This function is for formatting and logging debug information.
2570 * It is to be used when vha is not available and pci is available,
2571 * i.e., before host allocation. It formats the message and logs it
2572 * to the messages file.
2573 * parameters:
2574 * level: The level of the debug messages to be printed.
2575 * If ql2xextended_error_logging value is correctly set,
2576 * this message will appear in the messages file.
2577 * pdev: Pointer to the struct pci_dev.
2578 * id: This is a unique id for the level. It identifies the part
2579 * of the code from where the message originated.
2580 * msg: The message to be displayed.
2582 void
2583 ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2584 const char *fmt, ...)
2586 va_list va;
2587 struct va_format vaf;
2589 if (pdev == NULL)
2590 return;
2591 if (!ql_mask_match(level))
2592 return;
2594 va_start(va, fmt);
2596 vaf.fmt = fmt;
2597 vaf.va = &va;
2599 /* <module-name> <dev-name>:<msg-id> Message */
2600 pr_warn("%s [%s]-%04x: : %pV",
2601 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
2603 va_end(va);
2607 * This function is for formatting and logging log messages.
2608 * It is to be used when vha is available. It formats the message
2609 * and logs it to the messages file. All the messages will be logged
2610 * irrespective of value of ql2xextended_error_logging.
2611 * parameters:
2612 * level: The level of the log messages to be printed in the
2613 * messages file.
2614 * vha: Pointer to the scsi_qla_host_t
2615 * id: This is a unique id for the level. It identifies the
2616 * part of the code from where the message originated.
2617 * msg: The message to be displayed.
2619 void
2620 ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2622 va_list va;
2623 struct va_format vaf;
2624 char pbuf[128];
2626 if (level > ql_errlev)
2627 return;
2629 if (vha != NULL) {
2630 const struct pci_dev *pdev = vha->hw->pdev;
2631 /* <module-name> <msg-id>:<host> Message */
2632 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
2633 QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
2634 } else {
2635 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2636 QL_MSGHDR, "0000:00:00.0", id);
2638 pbuf[sizeof(pbuf) - 1] = 0;
2640 va_start(va, fmt);
2642 vaf.fmt = fmt;
2643 vaf.va = &va;
2645 switch (level) {
2646 case ql_log_fatal: /* FATAL LOG */
2647 pr_crit("%s%pV", pbuf, &vaf);
2648 break;
2649 case ql_log_warn:
2650 pr_err("%s%pV", pbuf, &vaf);
2651 break;
2652 case ql_log_info:
2653 pr_warn("%s%pV", pbuf, &vaf);
2654 break;
2655 default:
2656 pr_info("%s%pV", pbuf, &vaf);
2657 break;
2660 va_end(va);
2664 * This function is for formatting and logging log messages.
2665 * It is to be used when vha is not available and pci is available,
2666 * i.e., before host allocation. It formats the message and logs
2667 * it to the messages file. All the messages are logged irrespective
2668 * of the value of ql2xextended_error_logging.
2669 * parameters:
2670 * level: The level of the log messages to be printed in the
2671 * messages file.
2672 * pdev: Pointer to the struct pci_dev.
2673 * id: This is a unique id for the level. It identifies the
2674 * part of the code from where the message originated.
2675 * msg: The message to be displayed.
2677 void
2678 ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2679 const char *fmt, ...)
2681 va_list va;
2682 struct va_format vaf;
2683 char pbuf[128];
2685 if (pdev == NULL)
2686 return;
2687 if (level > ql_errlev)
2688 return;
2690 /* <module-name> <dev-name>:<msg-id> Message */
2691 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2692 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2693 pbuf[sizeof(pbuf) - 1] = 0;
2695 va_start(va, fmt);
2697 vaf.fmt = fmt;
2698 vaf.va = &va;
2700 switch (level) {
2701 case ql_log_fatal: /* FATAL LOG */
2702 pr_crit("%s%pV", pbuf, &vaf);
2703 break;
2704 case ql_log_warn:
2705 pr_err("%s%pV", pbuf, &vaf);
2706 break;
2707 case ql_log_info:
2708 pr_warn("%s%pV", pbuf, &vaf);
2709 break;
2710 default:
2711 pr_info("%s%pV", pbuf, &vaf);
2712 break;
2715 va_end(va);
2718 void
2719 ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
2721 int i;
2722 struct qla_hw_data *ha = vha->hw;
2723 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2724 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2725 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2726 uint16_t __iomem *mbx_reg;
2728 if (!ql_mask_match(level))
2729 return;
2731 if (IS_P3P_TYPE(ha))
2732 mbx_reg = &reg82->mailbox_in[0];
2733 else if (IS_FWI2_CAPABLE(ha))
2734 mbx_reg = &reg24->mailbox0;
2735 else
2736 mbx_reg = MAILBOX_REG(ha, reg, 0);
2738 ql_dbg(level, vha, id, "Mailbox registers:\n");
2739 for (i = 0; i < 6; i++, mbx_reg++)
2740 ql_dbg(level, vha, id,
2741 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg));
2745 void
2746 ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
2747 uint8_t *buf, uint size)
2749 uint cnt;
2751 if (!ql_mask_match(level))
2752 return;
2754 ql_dbg(level, vha, id,
2755 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
2756 ql_dbg(level, vha, id,
2757 "----- -----------------------------------------------\n");
2758 for (cnt = 0; cnt < size; cnt += 16) {
2759 ql_dbg(level, vha, id, "%04x: ", cnt);
2760 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2761 buf + cnt, min(16U, size - cnt), false);
2766 * This function is for formatting and logging log messages.
2767 * It is to be used when vha is available. It formats the message
2768 * and logs it to the messages file. All the messages will be logged
2769 * irrespective of value of ql2xextended_error_logging.
2770 * parameters:
2771 * level: The level of the log messages to be printed in the
2772 * messages file.
2773 * vha: Pointer to the scsi_qla_host_t
2774 * id: This is a unique id for the level. It identifies the
2775 * part of the code from where the message originated.
2776 * msg: The message to be displayed.
2778 void
2779 ql_log_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2780 const char *fmt, ...)
2782 va_list va;
2783 struct va_format vaf;
2784 char pbuf[128];
2786 if (level > ql_errlev)
2787 return;
2789 if (qpair != NULL) {
2790 const struct pci_dev *pdev = qpair->pdev;
2791 /* <module-name> <msg-id>:<host> Message */
2792 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: ",
2793 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2794 } else {
2795 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2796 QL_MSGHDR, "0000:00:00.0", id);
2798 pbuf[sizeof(pbuf) - 1] = 0;
2800 va_start(va, fmt);
2802 vaf.fmt = fmt;
2803 vaf.va = &va;
2805 switch (level) {
2806 case ql_log_fatal: /* FATAL LOG */
2807 pr_crit("%s%pV", pbuf, &vaf);
2808 break;
2809 case ql_log_warn:
2810 pr_err("%s%pV", pbuf, &vaf);
2811 break;
2812 case ql_log_info:
2813 pr_warn("%s%pV", pbuf, &vaf);
2814 break;
2815 default:
2816 pr_info("%s%pV", pbuf, &vaf);
2817 break;
2820 va_end(va);
2824 * This function is for formatting and logging debug information.
2825 * It is to be used when vha is available. It formats the message
2826 * and logs it to the messages file.
2827 * parameters:
2828 * level: The level of the debug messages to be printed.
2829 * If ql2xextended_error_logging value is correctly set,
2830 * this message will appear in the messages file.
2831 * vha: Pointer to the scsi_qla_host_t.
2832 * id: This is a unique identifier for the level. It identifies the
2833 * part of the code from where the message originated.
2834 * msg: The message to be displayed.
2836 void
2837 ql_dbg_qp(uint32_t level, struct qla_qpair *qpair, int32_t id,
2838 const char *fmt, ...)
2840 va_list va;
2841 struct va_format vaf;
2843 if (!ql_mask_match(level))
2844 return;
2846 va_start(va, fmt);
2848 vaf.fmt = fmt;
2849 vaf.va = &va;
2851 if (qpair != NULL) {
2852 const struct pci_dev *pdev = qpair->pdev;
2853 /* <module-name> <pci-name> <msg-id>:<host> Message */
2854 pr_warn("%s [%s]-%04x: %pV",
2855 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2856 &vaf);
2857 } else {
2858 pr_warn("%s [%s]-%04x: : %pV",
2859 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2862 va_end(va);