x86/mm/pat: Don't report PAT on CPUs that don't support it
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_dbg.c
blob88748a6ab73f6fc59f4a2b9fb202fa9efe24c7de
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
8 /*
9 * Table for showing the current message id in use for particular level
10 * Change this table for addition of log/debug messages.
11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes |
13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0193 | 0x0146 |
15 * | | | 0x015b-0x0160 |
16 * | | | 0x016e |
17 * | Mailbox commands | 0x1199 | 0x1193 |
18 * | Device Discovery | 0x2004 | 0x2016 |
19 * | | | 0x2011-0x2012, |
20 * | | | 0x2099-0x20a4 |
21 * | Queue Command and IO tracing | 0x3074 | 0x300b |
22 * | | | 0x3027-0x3028 |
23 * | | | 0x303d-0x3041 |
24 * | | | 0x302d,0x3033 |
25 * | | | 0x3036,0x3038 |
26 * | | | 0x303a |
27 * | DPC Thread | 0x4023 | 0x4002,0x4013 |
28 * | Async Events | 0x5090 | 0x502b-0x502f |
29 * | | | 0x5047 |
30 * | | | 0x5084,0x5075 |
31 * | | | 0x503d,0x5044 |
32 * | | | 0x505f |
33 * | Timer Routines | 0x6012 | |
34 * | User Space Interactions | 0x70e3 | 0x7018,0x702e |
35 * | | | 0x7020,0x7024 |
36 * | | | 0x7039,0x7045 |
37 * | | | 0x7073-0x7075 |
38 * | | | 0x70a5-0x70a6 |
39 * | | | 0x70a8,0x70ab |
40 * | | | 0x70ad-0x70ae |
41 * | | | 0x70d0-0x70d6 |
42 * | | | 0x70d7-0x70db |
43 * | Task Management | 0x8042 | 0x8000,0x800b |
44 * | | | 0x8019 |
45 * | | | 0x8025,0x8026 |
46 * | | | 0x8031,0x8032 |
47 * | | | 0x8039,0x803c |
48 * | AER/EEH | 0x9011 | |
49 * | Virtual Port | 0xa007 | |
50 * | ISP82XX Specific | 0xb157 | 0xb002,0xb024 |
51 * | | | 0xb09e,0xb0ae |
52 * | | | 0xb0c3,0xb0c6 |
53 * | | | 0xb0e0-0xb0ef |
54 * | | | 0xb085,0xb0dc |
55 * | | | 0xb107,0xb108 |
56 * | | | 0xb111,0xb11e |
57 * | | | 0xb12c,0xb12d |
58 * | | | 0xb13a,0xb142 |
59 * | | | 0xb13c-0xb140 |
60 * | | | 0xb149 |
61 * | MultiQ | 0xc010 | |
62 * | Misc | 0xd301 | 0xd031-0xd0ff |
63 * | | | 0xd101-0xd1fe |
64 * | | | 0xd214-0xd2fe |
65 * | Target Mode | 0xe080 | |
66 * | Target Mode Management | 0xf09b | 0xf002 |
67 * | | | 0xf046-0xf049 |
68 * | Target Mode Task Management | 0x1000d | |
69 * ----------------------------------------------------------------------
72 #include "qla_def.h"
74 #include <linux/delay.h>
76 static uint32_t ql_dbg_offset = 0x800;
78 static inline void
79 qla2xxx_prep_dump(struct qla_hw_data *ha, struct qla2xxx_fw_dump *fw_dump)
81 fw_dump->fw_major_version = htonl(ha->fw_major_version);
82 fw_dump->fw_minor_version = htonl(ha->fw_minor_version);
83 fw_dump->fw_subminor_version = htonl(ha->fw_subminor_version);
84 fw_dump->fw_attributes = htonl(ha->fw_attributes);
86 fw_dump->vendor = htonl(ha->pdev->vendor);
87 fw_dump->device = htonl(ha->pdev->device);
88 fw_dump->subsystem_vendor = htonl(ha->pdev->subsystem_vendor);
89 fw_dump->subsystem_device = htonl(ha->pdev->subsystem_device);
92 static inline void *
93 qla2xxx_copy_queues(struct qla_hw_data *ha, void *ptr)
95 struct req_que *req = ha->req_q_map[0];
96 struct rsp_que *rsp = ha->rsp_q_map[0];
97 /* Request queue. */
98 memcpy(ptr, req->ring, req->length *
99 sizeof(request_t));
101 /* Response queue. */
102 ptr += req->length * sizeof(request_t);
103 memcpy(ptr, rsp->ring, rsp->length *
104 sizeof(response_t));
106 return ptr + (rsp->length * sizeof(response_t));
110 qla27xx_dump_mpi_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
111 uint32_t ram_dwords, void **nxt)
113 int rval;
114 uint32_t cnt, stat, timer, dwords, idx;
115 uint16_t mb0;
116 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
117 dma_addr_t dump_dma = ha->gid_list_dma;
118 uint32_t *dump = (uint32_t *)ha->gid_list;
120 rval = QLA_SUCCESS;
121 mb0 = 0;
123 WRT_REG_WORD(&reg->mailbox0, MBC_LOAD_DUMP_MPI_RAM);
124 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
126 dwords = qla2x00_gid_list_size(ha) / 4;
127 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
128 cnt += dwords, addr += dwords) {
129 if (cnt + dwords > ram_dwords)
130 dwords = ram_dwords - cnt;
132 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
133 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
135 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
136 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
137 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
138 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
140 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
141 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
143 WRT_REG_WORD(&reg->mailbox9, 0);
144 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
146 ha->flags.mbox_int = 0;
147 for (timer = 6000000; timer; timer--) {
148 /* Check for pending interrupts. */
149 stat = RD_REG_DWORD(&reg->host_status);
150 if (stat & HSRX_RISC_INT) {
151 stat &= 0xff;
153 if (stat == 0x1 || stat == 0x2 ||
154 stat == 0x10 || stat == 0x11) {
155 set_bit(MBX_INTERRUPT,
156 &ha->mbx_cmd_flags);
158 mb0 = RD_REG_WORD(&reg->mailbox0);
159 RD_REG_WORD(&reg->mailbox1);
161 WRT_REG_DWORD(&reg->hccr,
162 HCCRX_CLR_RISC_INT);
163 RD_REG_DWORD(&reg->hccr);
164 break;
167 /* Clear this intr; it wasn't a mailbox intr */
168 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
169 RD_REG_DWORD(&reg->hccr);
171 udelay(5);
173 ha->flags.mbox_int = 1;
175 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
176 rval = mb0 & MBS_MASK;
177 for (idx = 0; idx < dwords; idx++)
178 ram[cnt + idx] = IS_QLA27XX(ha) ?
179 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
180 } else {
181 rval = QLA_FUNCTION_FAILED;
185 *nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
186 return rval;
190 qla24xx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint32_t *ram,
191 uint32_t ram_dwords, void **nxt)
193 int rval;
194 uint32_t cnt, stat, timer, dwords, idx;
195 uint16_t mb0;
196 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
197 dma_addr_t dump_dma = ha->gid_list_dma;
198 uint32_t *dump = (uint32_t *)ha->gid_list;
200 rval = QLA_SUCCESS;
201 mb0 = 0;
203 WRT_REG_WORD(&reg->mailbox0, MBC_DUMP_RISC_RAM_EXTENDED);
204 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
206 dwords = qla2x00_gid_list_size(ha) / 4;
207 for (cnt = 0; cnt < ram_dwords && rval == QLA_SUCCESS;
208 cnt += dwords, addr += dwords) {
209 if (cnt + dwords > ram_dwords)
210 dwords = ram_dwords - cnt;
212 WRT_REG_WORD(&reg->mailbox1, LSW(addr));
213 WRT_REG_WORD(&reg->mailbox8, MSW(addr));
215 WRT_REG_WORD(&reg->mailbox2, MSW(dump_dma));
216 WRT_REG_WORD(&reg->mailbox3, LSW(dump_dma));
217 WRT_REG_WORD(&reg->mailbox6, MSW(MSD(dump_dma)));
218 WRT_REG_WORD(&reg->mailbox7, LSW(MSD(dump_dma)));
220 WRT_REG_WORD(&reg->mailbox4, MSW(dwords));
221 WRT_REG_WORD(&reg->mailbox5, LSW(dwords));
222 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_HOST_INT);
224 ha->flags.mbox_int = 0;
225 for (timer = 6000000; timer; timer--) {
226 /* Check for pending interrupts. */
227 stat = RD_REG_DWORD(&reg->host_status);
228 if (stat & HSRX_RISC_INT) {
229 stat &= 0xff;
231 if (stat == 0x1 || stat == 0x2 ||
232 stat == 0x10 || stat == 0x11) {
233 set_bit(MBX_INTERRUPT,
234 &ha->mbx_cmd_flags);
236 mb0 = RD_REG_WORD(&reg->mailbox0);
238 WRT_REG_DWORD(&reg->hccr,
239 HCCRX_CLR_RISC_INT);
240 RD_REG_DWORD(&reg->hccr);
241 break;
244 /* Clear this intr; it wasn't a mailbox intr */
245 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
246 RD_REG_DWORD(&reg->hccr);
248 udelay(5);
250 ha->flags.mbox_int = 1;
252 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
253 rval = mb0 & MBS_MASK;
254 for (idx = 0; idx < dwords; idx++)
255 ram[cnt + idx] = IS_QLA27XX(ha) ?
256 le32_to_cpu(dump[idx]) : swab32(dump[idx]);
257 } else {
258 rval = QLA_FUNCTION_FAILED;
262 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
263 return rval;
266 static int
267 qla24xx_dump_memory(struct qla_hw_data *ha, uint32_t *code_ram,
268 uint32_t cram_size, void **nxt)
270 int rval;
272 /* Code RAM. */
273 rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
274 if (rval != QLA_SUCCESS)
275 return rval;
277 set_bit(RISC_SRAM_DUMP_CMPL, &ha->fw_dump_cap_flags);
279 /* External Memory. */
280 rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
281 ha->fw_memory_size - 0x100000 + 1, nxt);
282 if (rval == QLA_SUCCESS)
283 set_bit(RISC_EXT_MEM_DUMP_CMPL, &ha->fw_dump_cap_flags);
285 return rval;
288 static uint32_t *
289 qla24xx_read_window(struct device_reg_24xx __iomem *reg, uint32_t iobase,
290 uint32_t count, uint32_t *buf)
292 uint32_t __iomem *dmp_reg;
294 WRT_REG_DWORD(&reg->iobase_addr, iobase);
295 dmp_reg = &reg->iobase_window;
296 for ( ; count--; dmp_reg++)
297 *buf++ = htonl(RD_REG_DWORD(dmp_reg));
299 return buf;
302 void
303 qla24xx_pause_risc(struct device_reg_24xx __iomem *reg, struct qla_hw_data *ha)
305 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_PAUSE);
307 /* 100 usec delay is sufficient enough for hardware to pause RISC */
308 udelay(100);
309 if (RD_REG_DWORD(&reg->host_status) & HSRX_RISC_PAUSED)
310 set_bit(RISC_PAUSE_CMPL, &ha->fw_dump_cap_flags);
314 qla24xx_soft_reset(struct qla_hw_data *ha)
316 int rval = QLA_SUCCESS;
317 uint32_t cnt;
318 uint16_t wd;
319 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
322 * Reset RISC. The delay is dependent on system architecture.
323 * Driver can proceed with the reset sequence after waiting
324 * for a timeout period.
326 WRT_REG_DWORD(&reg->ctrl_status, CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
327 for (cnt = 0; cnt < 30000; cnt++) {
328 if ((RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE) == 0)
329 break;
331 udelay(10);
333 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_DMA_ACTIVE))
334 set_bit(DMA_SHUTDOWN_CMPL, &ha->fw_dump_cap_flags);
336 WRT_REG_DWORD(&reg->ctrl_status,
337 CSRX_ISP_SOFT_RESET|CSRX_DMA_SHUTDOWN|MWB_4096_BYTES);
338 pci_read_config_word(ha->pdev, PCI_COMMAND, &wd);
340 udelay(100);
342 /* Wait for soft-reset to complete. */
343 for (cnt = 0; cnt < 30000; cnt++) {
344 if ((RD_REG_DWORD(&reg->ctrl_status) &
345 CSRX_ISP_SOFT_RESET) == 0)
346 break;
348 udelay(10);
350 if (!(RD_REG_DWORD(&reg->ctrl_status) & CSRX_ISP_SOFT_RESET))
351 set_bit(ISP_RESET_CMPL, &ha->fw_dump_cap_flags);
353 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
354 RD_REG_DWORD(&reg->hccr); /* PCI Posting. */
356 for (cnt = 10000; RD_REG_WORD(&reg->mailbox0) != 0 &&
357 rval == QLA_SUCCESS; cnt--) {
358 if (cnt)
359 udelay(10);
360 else
361 rval = QLA_FUNCTION_TIMEOUT;
363 if (rval == QLA_SUCCESS)
364 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
366 return rval;
369 static int
370 qla2xxx_dump_ram(struct qla_hw_data *ha, uint32_t addr, uint16_t *ram,
371 uint32_t ram_words, void **nxt)
373 int rval;
374 uint32_t cnt, stat, timer, words, idx;
375 uint16_t mb0;
376 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
377 dma_addr_t dump_dma = ha->gid_list_dma;
378 uint16_t *dump = (uint16_t *)ha->gid_list;
380 rval = QLA_SUCCESS;
381 mb0 = 0;
383 WRT_MAILBOX_REG(ha, reg, 0, MBC_DUMP_RISC_RAM_EXTENDED);
384 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
386 words = qla2x00_gid_list_size(ha) / 2;
387 for (cnt = 0; cnt < ram_words && rval == QLA_SUCCESS;
388 cnt += words, addr += words) {
389 if (cnt + words > ram_words)
390 words = ram_words - cnt;
392 WRT_MAILBOX_REG(ha, reg, 1, LSW(addr));
393 WRT_MAILBOX_REG(ha, reg, 8, MSW(addr));
395 WRT_MAILBOX_REG(ha, reg, 2, MSW(dump_dma));
396 WRT_MAILBOX_REG(ha, reg, 3, LSW(dump_dma));
397 WRT_MAILBOX_REG(ha, reg, 6, MSW(MSD(dump_dma)));
398 WRT_MAILBOX_REG(ha, reg, 7, LSW(MSD(dump_dma)));
400 WRT_MAILBOX_REG(ha, reg, 4, words);
401 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
403 for (timer = 6000000; timer; timer--) {
404 /* Check for pending interrupts. */
405 stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
406 if (stat & HSR_RISC_INT) {
407 stat &= 0xff;
409 if (stat == 0x1 || stat == 0x2) {
410 set_bit(MBX_INTERRUPT,
411 &ha->mbx_cmd_flags);
413 mb0 = RD_MAILBOX_REG(ha, reg, 0);
415 /* Release mailbox registers. */
416 WRT_REG_WORD(&reg->semaphore, 0);
417 WRT_REG_WORD(&reg->hccr,
418 HCCR_CLR_RISC_INT);
419 RD_REG_WORD(&reg->hccr);
420 break;
421 } else if (stat == 0x10 || stat == 0x11) {
422 set_bit(MBX_INTERRUPT,
423 &ha->mbx_cmd_flags);
425 mb0 = RD_MAILBOX_REG(ha, reg, 0);
427 WRT_REG_WORD(&reg->hccr,
428 HCCR_CLR_RISC_INT);
429 RD_REG_WORD(&reg->hccr);
430 break;
433 /* clear this intr; it wasn't a mailbox intr */
434 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
435 RD_REG_WORD(&reg->hccr);
437 udelay(5);
440 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
441 rval = mb0 & MBS_MASK;
442 for (idx = 0; idx < words; idx++)
443 ram[cnt + idx] = swab16(dump[idx]);
444 } else {
445 rval = QLA_FUNCTION_FAILED;
449 *nxt = rval == QLA_SUCCESS ? &ram[cnt]: NULL;
450 return rval;
453 static inline void
454 qla2xxx_read_window(struct device_reg_2xxx __iomem *reg, uint32_t count,
455 uint16_t *buf)
457 uint16_t __iomem *dmp_reg = &reg->u.isp2300.fb_cmd;
459 for ( ; count--; dmp_reg++)
460 *buf++ = htons(RD_REG_WORD(dmp_reg));
463 static inline void *
464 qla24xx_copy_eft(struct qla_hw_data *ha, void *ptr)
466 if (!ha->eft)
467 return ptr;
469 memcpy(ptr, ha->eft, ntohl(ha->fw_dump->eft_size));
470 return ptr + ntohl(ha->fw_dump->eft_size);
473 static inline void *
474 qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
476 uint32_t cnt;
477 uint32_t *iter_reg;
478 struct qla2xxx_fce_chain *fcec = ptr;
480 if (!ha->fce)
481 return ptr;
483 *last_chain = &fcec->type;
484 fcec->type = htonl(DUMP_CHAIN_FCE);
485 fcec->chain_size = htonl(sizeof(struct qla2xxx_fce_chain) +
486 fce_calc_size(ha->fce_bufs));
487 fcec->size = htonl(fce_calc_size(ha->fce_bufs));
488 fcec->addr_l = htonl(LSD(ha->fce_dma));
489 fcec->addr_h = htonl(MSD(ha->fce_dma));
491 iter_reg = fcec->eregs;
492 for (cnt = 0; cnt < 8; cnt++)
493 *iter_reg++ = htonl(ha->fce_mb[cnt]);
495 memcpy(iter_reg, ha->fce, ntohl(fcec->size));
497 return (char *)iter_reg + ntohl(fcec->size);
500 static inline void *
501 qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
502 uint32_t **last_chain)
504 struct qla2xxx_mqueue_chain *q;
505 struct qla2xxx_mqueue_header *qh;
506 uint32_t num_queues;
507 int que;
508 struct {
509 int length;
510 void *ring;
511 } aq, *aqp;
513 if (!ha->tgt.atio_ring)
514 return ptr;
516 num_queues = 1;
517 aqp = &aq;
518 aqp->length = ha->tgt.atio_q_length;
519 aqp->ring = ha->tgt.atio_ring;
521 for (que = 0; que < num_queues; que++) {
522 /* aqp = ha->atio_q_map[que]; */
523 q = ptr;
524 *last_chain = &q->type;
525 q->type = htonl(DUMP_CHAIN_QUEUE);
526 q->chain_size = htonl(
527 sizeof(struct qla2xxx_mqueue_chain) +
528 sizeof(struct qla2xxx_mqueue_header) +
529 (aqp->length * sizeof(request_t)));
530 ptr += sizeof(struct qla2xxx_mqueue_chain);
532 /* Add header. */
533 qh = ptr;
534 qh->queue = htonl(TYPE_ATIO_QUEUE);
535 qh->number = htonl(que);
536 qh->size = htonl(aqp->length * sizeof(request_t));
537 ptr += sizeof(struct qla2xxx_mqueue_header);
539 /* Add data. */
540 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
542 ptr += aqp->length * sizeof(request_t);
545 return ptr;
548 static inline void *
549 qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
551 struct qla2xxx_mqueue_chain *q;
552 struct qla2xxx_mqueue_header *qh;
553 struct req_que *req;
554 struct rsp_que *rsp;
555 int que;
557 if (!ha->mqenable)
558 return ptr;
560 /* Request queues */
561 for (que = 1; que < ha->max_req_queues; que++) {
562 req = ha->req_q_map[que];
563 if (!req)
564 break;
566 /* Add chain. */
567 q = ptr;
568 *last_chain = &q->type;
569 q->type = htonl(DUMP_CHAIN_QUEUE);
570 q->chain_size = htonl(
571 sizeof(struct qla2xxx_mqueue_chain) +
572 sizeof(struct qla2xxx_mqueue_header) +
573 (req->length * sizeof(request_t)));
574 ptr += sizeof(struct qla2xxx_mqueue_chain);
576 /* Add header. */
577 qh = ptr;
578 qh->queue = htonl(TYPE_REQUEST_QUEUE);
579 qh->number = htonl(que);
580 qh->size = htonl(req->length * sizeof(request_t));
581 ptr += sizeof(struct qla2xxx_mqueue_header);
583 /* Add data. */
584 memcpy(ptr, req->ring, req->length * sizeof(request_t));
585 ptr += req->length * sizeof(request_t);
588 /* Response queues */
589 for (que = 1; que < ha->max_rsp_queues; que++) {
590 rsp = ha->rsp_q_map[que];
591 if (!rsp)
592 break;
594 /* Add chain. */
595 q = ptr;
596 *last_chain = &q->type;
597 q->type = htonl(DUMP_CHAIN_QUEUE);
598 q->chain_size = htonl(
599 sizeof(struct qla2xxx_mqueue_chain) +
600 sizeof(struct qla2xxx_mqueue_header) +
601 (rsp->length * sizeof(response_t)));
602 ptr += sizeof(struct qla2xxx_mqueue_chain);
604 /* Add header. */
605 qh = ptr;
606 qh->queue = htonl(TYPE_RESPONSE_QUEUE);
607 qh->number = htonl(que);
608 qh->size = htonl(rsp->length * sizeof(response_t));
609 ptr += sizeof(struct qla2xxx_mqueue_header);
611 /* Add data. */
612 memcpy(ptr, rsp->ring, rsp->length * sizeof(response_t));
613 ptr += rsp->length * sizeof(response_t);
616 return ptr;
619 static inline void *
620 qla25xx_copy_mq(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
622 uint32_t cnt, que_idx;
623 uint8_t que_cnt;
624 struct qla2xxx_mq_chain *mq = ptr;
625 device_reg_t *reg;
627 if (!ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
628 return ptr;
630 mq = ptr;
631 *last_chain = &mq->type;
632 mq->type = htonl(DUMP_CHAIN_MQ);
633 mq->chain_size = htonl(sizeof(struct qla2xxx_mq_chain));
635 que_cnt = ha->max_req_queues > ha->max_rsp_queues ?
636 ha->max_req_queues : ha->max_rsp_queues;
637 mq->count = htonl(que_cnt);
638 for (cnt = 0; cnt < que_cnt; cnt++) {
639 reg = ISP_QUE_REG(ha, cnt);
640 que_idx = cnt * 4;
641 mq->qregs[que_idx] =
642 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_in));
643 mq->qregs[que_idx+1] =
644 htonl(RD_REG_DWORD(&reg->isp25mq.req_q_out));
645 mq->qregs[que_idx+2] =
646 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_in));
647 mq->qregs[que_idx+3] =
648 htonl(RD_REG_DWORD(&reg->isp25mq.rsp_q_out));
651 return ptr + sizeof(struct qla2xxx_mq_chain);
654 void
655 qla2xxx_dump_post_process(scsi_qla_host_t *vha, int rval)
657 struct qla_hw_data *ha = vha->hw;
659 if (rval != QLA_SUCCESS) {
660 ql_log(ql_log_warn, vha, 0xd000,
661 "Failed to dump firmware (%x), dump status flags (0x%lx).\n",
662 rval, ha->fw_dump_cap_flags);
663 ha->fw_dumped = 0;
664 } else {
665 ql_log(ql_log_info, vha, 0xd001,
666 "Firmware dump saved to temp buffer (%ld/%p), dump status flags (0x%lx).\n",
667 vha->host_no, ha->fw_dump, ha->fw_dump_cap_flags);
668 ha->fw_dumped = 1;
669 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
674 * qla2300_fw_dump() - Dumps binary data from the 2300 firmware.
675 * @ha: HA context
676 * @hardware_locked: Called with the hardware_lock
678 void
679 qla2300_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
681 int rval;
682 uint32_t cnt;
683 struct qla_hw_data *ha = vha->hw;
684 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
685 uint16_t __iomem *dmp_reg;
686 unsigned long flags;
687 struct qla2300_fw_dump *fw;
688 void *nxt;
689 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
691 flags = 0;
693 #ifndef __CHECKER__
694 if (!hardware_locked)
695 spin_lock_irqsave(&ha->hardware_lock, flags);
696 #endif
698 if (!ha->fw_dump) {
699 ql_log(ql_log_warn, vha, 0xd002,
700 "No buffer available for dump.\n");
701 goto qla2300_fw_dump_failed;
704 if (ha->fw_dumped) {
705 ql_log(ql_log_warn, vha, 0xd003,
706 "Firmware has been previously dumped (%p) "
707 "-- ignoring request.\n",
708 ha->fw_dump);
709 goto qla2300_fw_dump_failed;
711 fw = &ha->fw_dump->isp.isp23;
712 qla2xxx_prep_dump(ha, ha->fw_dump);
714 rval = QLA_SUCCESS;
715 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
717 /* Pause RISC. */
718 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
719 if (IS_QLA2300(ha)) {
720 for (cnt = 30000;
721 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
722 rval == QLA_SUCCESS; cnt--) {
723 if (cnt)
724 udelay(100);
725 else
726 rval = QLA_FUNCTION_TIMEOUT;
728 } else {
729 RD_REG_WORD(&reg->hccr); /* PCI Posting. */
730 udelay(10);
733 if (rval == QLA_SUCCESS) {
734 dmp_reg = &reg->flash_address;
735 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
736 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
738 dmp_reg = &reg->u.isp2300.req_q_in;
739 for (cnt = 0; cnt < sizeof(fw->risc_host_reg) / 2;
740 cnt++, dmp_reg++)
741 fw->risc_host_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
743 dmp_reg = &reg->u.isp2300.mailbox0;
744 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2;
745 cnt++, dmp_reg++)
746 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
748 WRT_REG_WORD(&reg->ctrl_status, 0x40);
749 qla2xxx_read_window(reg, 32, fw->resp_dma_reg);
751 WRT_REG_WORD(&reg->ctrl_status, 0x50);
752 qla2xxx_read_window(reg, 48, fw->dma_reg);
754 WRT_REG_WORD(&reg->ctrl_status, 0x00);
755 dmp_reg = &reg->risc_hw;
756 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2;
757 cnt++, dmp_reg++)
758 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
760 WRT_REG_WORD(&reg->pcr, 0x2000);
761 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
763 WRT_REG_WORD(&reg->pcr, 0x2200);
764 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
766 WRT_REG_WORD(&reg->pcr, 0x2400);
767 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
769 WRT_REG_WORD(&reg->pcr, 0x2600);
770 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
772 WRT_REG_WORD(&reg->pcr, 0x2800);
773 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
775 WRT_REG_WORD(&reg->pcr, 0x2A00);
776 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
778 WRT_REG_WORD(&reg->pcr, 0x2C00);
779 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
781 WRT_REG_WORD(&reg->pcr, 0x2E00);
782 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
784 WRT_REG_WORD(&reg->ctrl_status, 0x10);
785 qla2xxx_read_window(reg, 64, fw->frame_buf_hdw_reg);
787 WRT_REG_WORD(&reg->ctrl_status, 0x20);
788 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
790 WRT_REG_WORD(&reg->ctrl_status, 0x30);
791 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
793 /* Reset RISC. */
794 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
795 for (cnt = 0; cnt < 30000; cnt++) {
796 if ((RD_REG_WORD(&reg->ctrl_status) &
797 CSR_ISP_SOFT_RESET) == 0)
798 break;
800 udelay(10);
804 if (!IS_QLA2300(ha)) {
805 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
806 rval == QLA_SUCCESS; cnt--) {
807 if (cnt)
808 udelay(100);
809 else
810 rval = QLA_FUNCTION_TIMEOUT;
814 /* Get RISC SRAM. */
815 if (rval == QLA_SUCCESS)
816 rval = qla2xxx_dump_ram(ha, 0x800, fw->risc_ram,
817 sizeof(fw->risc_ram) / 2, &nxt);
819 /* Get stack SRAM. */
820 if (rval == QLA_SUCCESS)
821 rval = qla2xxx_dump_ram(ha, 0x10000, fw->stack_ram,
822 sizeof(fw->stack_ram) / 2, &nxt);
824 /* Get data SRAM. */
825 if (rval == QLA_SUCCESS)
826 rval = qla2xxx_dump_ram(ha, 0x11000, fw->data_ram,
827 ha->fw_memory_size - 0x11000 + 1, &nxt);
829 if (rval == QLA_SUCCESS)
830 qla2xxx_copy_queues(ha, nxt);
832 qla2xxx_dump_post_process(base_vha, rval);
834 qla2300_fw_dump_failed:
835 #ifndef __CHECKER__
836 if (!hardware_locked)
837 spin_unlock_irqrestore(&ha->hardware_lock, flags);
838 #else
840 #endif
844 * qla2100_fw_dump() - Dumps binary data from the 2100/2200 firmware.
845 * @ha: HA context
846 * @hardware_locked: Called with the hardware_lock
848 void
849 qla2100_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
851 int rval;
852 uint32_t cnt, timer;
853 uint16_t risc_address;
854 uint16_t mb0, mb2;
855 struct qla_hw_data *ha = vha->hw;
856 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
857 uint16_t __iomem *dmp_reg;
858 unsigned long flags;
859 struct qla2100_fw_dump *fw;
860 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
862 risc_address = 0;
863 mb0 = mb2 = 0;
864 flags = 0;
866 #ifndef __CHECKER__
867 if (!hardware_locked)
868 spin_lock_irqsave(&ha->hardware_lock, flags);
869 #endif
871 if (!ha->fw_dump) {
872 ql_log(ql_log_warn, vha, 0xd004,
873 "No buffer available for dump.\n");
874 goto qla2100_fw_dump_failed;
877 if (ha->fw_dumped) {
878 ql_log(ql_log_warn, vha, 0xd005,
879 "Firmware has been previously dumped (%p) "
880 "-- ignoring request.\n",
881 ha->fw_dump);
882 goto qla2100_fw_dump_failed;
884 fw = &ha->fw_dump->isp.isp21;
885 qla2xxx_prep_dump(ha, ha->fw_dump);
887 rval = QLA_SUCCESS;
888 fw->hccr = htons(RD_REG_WORD(&reg->hccr));
890 /* Pause RISC. */
891 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
892 for (cnt = 30000; (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
893 rval == QLA_SUCCESS; cnt--) {
894 if (cnt)
895 udelay(100);
896 else
897 rval = QLA_FUNCTION_TIMEOUT;
899 if (rval == QLA_SUCCESS) {
900 dmp_reg = &reg->flash_address;
901 for (cnt = 0; cnt < sizeof(fw->pbiu_reg) / 2; cnt++, dmp_reg++)
902 fw->pbiu_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
904 dmp_reg = &reg->u.isp2100.mailbox0;
905 for (cnt = 0; cnt < ha->mbx_count; cnt++, dmp_reg++) {
906 if (cnt == 8)
907 dmp_reg = &reg->u_end.isp2200.mailbox8;
909 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
912 dmp_reg = &reg->u.isp2100.unused_2[0];
913 for (cnt = 0; cnt < sizeof(fw->dma_reg) / 2; cnt++, dmp_reg++)
914 fw->dma_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
916 WRT_REG_WORD(&reg->ctrl_status, 0x00);
917 dmp_reg = &reg->risc_hw;
918 for (cnt = 0; cnt < sizeof(fw->risc_hdw_reg) / 2; cnt++, dmp_reg++)
919 fw->risc_hdw_reg[cnt] = htons(RD_REG_WORD(dmp_reg));
921 WRT_REG_WORD(&reg->pcr, 0x2000);
922 qla2xxx_read_window(reg, 16, fw->risc_gp0_reg);
924 WRT_REG_WORD(&reg->pcr, 0x2100);
925 qla2xxx_read_window(reg, 16, fw->risc_gp1_reg);
927 WRT_REG_WORD(&reg->pcr, 0x2200);
928 qla2xxx_read_window(reg, 16, fw->risc_gp2_reg);
930 WRT_REG_WORD(&reg->pcr, 0x2300);
931 qla2xxx_read_window(reg, 16, fw->risc_gp3_reg);
933 WRT_REG_WORD(&reg->pcr, 0x2400);
934 qla2xxx_read_window(reg, 16, fw->risc_gp4_reg);
936 WRT_REG_WORD(&reg->pcr, 0x2500);
937 qla2xxx_read_window(reg, 16, fw->risc_gp5_reg);
939 WRT_REG_WORD(&reg->pcr, 0x2600);
940 qla2xxx_read_window(reg, 16, fw->risc_gp6_reg);
942 WRT_REG_WORD(&reg->pcr, 0x2700);
943 qla2xxx_read_window(reg, 16, fw->risc_gp7_reg);
945 WRT_REG_WORD(&reg->ctrl_status, 0x10);
946 qla2xxx_read_window(reg, 16, fw->frame_buf_hdw_reg);
948 WRT_REG_WORD(&reg->ctrl_status, 0x20);
949 qla2xxx_read_window(reg, 64, fw->fpm_b0_reg);
951 WRT_REG_WORD(&reg->ctrl_status, 0x30);
952 qla2xxx_read_window(reg, 64, fw->fpm_b1_reg);
954 /* Reset the ISP. */
955 WRT_REG_WORD(&reg->ctrl_status, CSR_ISP_SOFT_RESET);
958 for (cnt = 30000; RD_MAILBOX_REG(ha, reg, 0) != 0 &&
959 rval == QLA_SUCCESS; cnt--) {
960 if (cnt)
961 udelay(100);
962 else
963 rval = QLA_FUNCTION_TIMEOUT;
966 /* Pause RISC. */
967 if (rval == QLA_SUCCESS && (IS_QLA2200(ha) || (IS_QLA2100(ha) &&
968 (RD_REG_WORD(&reg->mctr) & (BIT_1 | BIT_0)) != 0))) {
970 WRT_REG_WORD(&reg->hccr, HCCR_PAUSE_RISC);
971 for (cnt = 30000;
972 (RD_REG_WORD(&reg->hccr) & HCCR_RISC_PAUSE) == 0 &&
973 rval == QLA_SUCCESS; cnt--) {
974 if (cnt)
975 udelay(100);
976 else
977 rval = QLA_FUNCTION_TIMEOUT;
979 if (rval == QLA_SUCCESS) {
980 /* Set memory configuration and timing. */
981 if (IS_QLA2100(ha))
982 WRT_REG_WORD(&reg->mctr, 0xf1);
983 else
984 WRT_REG_WORD(&reg->mctr, 0xf2);
985 RD_REG_WORD(&reg->mctr); /* PCI Posting. */
987 /* Release RISC. */
988 WRT_REG_WORD(&reg->hccr, HCCR_RELEASE_RISC);
992 if (rval == QLA_SUCCESS) {
993 /* Get RISC SRAM. */
994 risc_address = 0x1000;
995 WRT_MAILBOX_REG(ha, reg, 0, MBC_READ_RAM_WORD);
996 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
998 for (cnt = 0; cnt < sizeof(fw->risc_ram) / 2 && rval == QLA_SUCCESS;
999 cnt++, risc_address++) {
1000 WRT_MAILBOX_REG(ha, reg, 1, risc_address);
1001 WRT_REG_WORD(&reg->hccr, HCCR_SET_HOST_INT);
1003 for (timer = 6000000; timer != 0; timer--) {
1004 /* Check for pending interrupts. */
1005 if (RD_REG_WORD(&reg->istatus) & ISR_RISC_INT) {
1006 if (RD_REG_WORD(&reg->semaphore) & BIT_0) {
1007 set_bit(MBX_INTERRUPT,
1008 &ha->mbx_cmd_flags);
1010 mb0 = RD_MAILBOX_REG(ha, reg, 0);
1011 mb2 = RD_MAILBOX_REG(ha, reg, 2);
1013 WRT_REG_WORD(&reg->semaphore, 0);
1014 WRT_REG_WORD(&reg->hccr,
1015 HCCR_CLR_RISC_INT);
1016 RD_REG_WORD(&reg->hccr);
1017 break;
1019 WRT_REG_WORD(&reg->hccr, HCCR_CLR_RISC_INT);
1020 RD_REG_WORD(&reg->hccr);
1022 udelay(5);
1025 if (test_and_clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags)) {
1026 rval = mb0 & MBS_MASK;
1027 fw->risc_ram[cnt] = htons(mb2);
1028 } else {
1029 rval = QLA_FUNCTION_FAILED;
1033 if (rval == QLA_SUCCESS)
1034 qla2xxx_copy_queues(ha, &fw->risc_ram[cnt]);
1036 qla2xxx_dump_post_process(base_vha, rval);
1038 qla2100_fw_dump_failed:
1039 #ifndef __CHECKER__
1040 if (!hardware_locked)
1041 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1042 #else
1044 #endif
1047 void
1048 qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1050 int rval;
1051 uint32_t cnt;
1052 struct qla_hw_data *ha = vha->hw;
1053 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1054 uint32_t __iomem *dmp_reg;
1055 uint32_t *iter_reg;
1056 uint16_t __iomem *mbx_reg;
1057 unsigned long flags;
1058 struct qla24xx_fw_dump *fw;
1059 void *nxt;
1060 void *nxt_chain;
1061 uint32_t *last_chain = NULL;
1062 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1064 if (IS_P3P_TYPE(ha))
1065 return;
1067 flags = 0;
1068 ha->fw_dump_cap_flags = 0;
1070 #ifndef __CHECKER__
1071 if (!hardware_locked)
1072 spin_lock_irqsave(&ha->hardware_lock, flags);
1073 #endif
1075 if (!ha->fw_dump) {
1076 ql_log(ql_log_warn, vha, 0xd006,
1077 "No buffer available for dump.\n");
1078 goto qla24xx_fw_dump_failed;
1081 if (ha->fw_dumped) {
1082 ql_log(ql_log_warn, vha, 0xd007,
1083 "Firmware has been previously dumped (%p) "
1084 "-- ignoring request.\n",
1085 ha->fw_dump);
1086 goto qla24xx_fw_dump_failed;
1088 fw = &ha->fw_dump->isp.isp24;
1089 qla2xxx_prep_dump(ha, ha->fw_dump);
1091 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1094 * Pause RISC. No need to track timeout, as resetting the chip
1095 * is the right approach incase of pause timeout
1097 qla24xx_pause_risc(reg, ha);
1099 /* Host interface registers. */
1100 dmp_reg = &reg->flash_addr;
1101 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1102 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1104 /* Disable interrupts. */
1105 WRT_REG_DWORD(&reg->ictrl, 0);
1106 RD_REG_DWORD(&reg->ictrl);
1108 /* Shadow registers. */
1109 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1110 RD_REG_DWORD(&reg->iobase_addr);
1111 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1112 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1114 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1115 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1117 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1118 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1120 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1121 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1123 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1124 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1126 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1127 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1129 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1130 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1132 /* Mailbox registers. */
1133 mbx_reg = &reg->mailbox0;
1134 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1135 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1137 /* Transfer sequence registers. */
1138 iter_reg = fw->xseq_gp_reg;
1139 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1140 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1141 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1142 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1143 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1144 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1145 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1146 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1148 qla24xx_read_window(reg, 0xBFE0, 16, fw->xseq_0_reg);
1149 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1151 /* Receive sequence registers. */
1152 iter_reg = fw->rseq_gp_reg;
1153 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1154 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1155 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1156 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1157 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1158 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1159 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1160 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1162 qla24xx_read_window(reg, 0xFFD0, 16, fw->rseq_0_reg);
1163 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1164 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1166 /* Command DMA registers. */
1167 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1169 /* Queues. */
1170 iter_reg = fw->req0_dma_reg;
1171 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1172 dmp_reg = &reg->iobase_q;
1173 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1174 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1176 iter_reg = fw->resp0_dma_reg;
1177 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1178 dmp_reg = &reg->iobase_q;
1179 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1180 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1182 iter_reg = fw->req1_dma_reg;
1183 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1184 dmp_reg = &reg->iobase_q;
1185 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1186 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1188 /* Transmit DMA registers. */
1189 iter_reg = fw->xmt0_dma_reg;
1190 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1191 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1193 iter_reg = fw->xmt1_dma_reg;
1194 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1195 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1197 iter_reg = fw->xmt2_dma_reg;
1198 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1199 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1201 iter_reg = fw->xmt3_dma_reg;
1202 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1203 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1205 iter_reg = fw->xmt4_dma_reg;
1206 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1207 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1209 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1211 /* Receive DMA registers. */
1212 iter_reg = fw->rcvt0_data_dma_reg;
1213 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1214 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1216 iter_reg = fw->rcvt1_data_dma_reg;
1217 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1218 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1220 /* RISC registers. */
1221 iter_reg = fw->risc_gp_reg;
1222 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1223 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1224 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1225 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1226 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1227 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1228 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1229 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1231 /* Local memory controller registers. */
1232 iter_reg = fw->lmc_reg;
1233 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1234 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1235 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1236 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1237 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1238 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1239 qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1241 /* Fibre Protocol Module registers. */
1242 iter_reg = fw->fpm_hdw_reg;
1243 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1244 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1245 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1246 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1247 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1248 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1249 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1250 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1251 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1252 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1253 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1254 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1256 /* Frame Buffer registers. */
1257 iter_reg = fw->fb_hdw_reg;
1258 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1259 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1260 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1261 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1262 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1263 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1264 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1265 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1266 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1267 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1268 qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1270 rval = qla24xx_soft_reset(ha);
1271 if (rval != QLA_SUCCESS)
1272 goto qla24xx_fw_dump_failed_0;
1274 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1275 &nxt);
1276 if (rval != QLA_SUCCESS)
1277 goto qla24xx_fw_dump_failed_0;
1279 nxt = qla2xxx_copy_queues(ha, nxt);
1281 qla24xx_copy_eft(ha, nxt);
1283 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1284 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1285 if (last_chain) {
1286 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1287 *last_chain |= htonl(DUMP_CHAIN_LAST);
1290 /* Adjust valid length. */
1291 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1293 qla24xx_fw_dump_failed_0:
1294 qla2xxx_dump_post_process(base_vha, rval);
1296 qla24xx_fw_dump_failed:
1297 #ifndef __CHECKER__
1298 if (!hardware_locked)
1299 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1300 #else
1302 #endif
1305 void
1306 qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1308 int rval;
1309 uint32_t cnt;
1310 struct qla_hw_data *ha = vha->hw;
1311 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1312 uint32_t __iomem *dmp_reg;
1313 uint32_t *iter_reg;
1314 uint16_t __iomem *mbx_reg;
1315 unsigned long flags;
1316 struct qla25xx_fw_dump *fw;
1317 void *nxt, *nxt_chain;
1318 uint32_t *last_chain = NULL;
1319 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1321 flags = 0;
1322 ha->fw_dump_cap_flags = 0;
1324 #ifndef __CHECKER__
1325 if (!hardware_locked)
1326 spin_lock_irqsave(&ha->hardware_lock, flags);
1327 #endif
1329 if (!ha->fw_dump) {
1330 ql_log(ql_log_warn, vha, 0xd008,
1331 "No buffer available for dump.\n");
1332 goto qla25xx_fw_dump_failed;
1335 if (ha->fw_dumped) {
1336 ql_log(ql_log_warn, vha, 0xd009,
1337 "Firmware has been previously dumped (%p) "
1338 "-- ignoring request.\n",
1339 ha->fw_dump);
1340 goto qla25xx_fw_dump_failed;
1342 fw = &ha->fw_dump->isp.isp25;
1343 qla2xxx_prep_dump(ha, ha->fw_dump);
1344 ha->fw_dump->version = htonl(2);
1346 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1349 * Pause RISC. No need to track timeout, as resetting the chip
1350 * is the right approach incase of pause timeout
1352 qla24xx_pause_risc(reg, ha);
1354 /* Host/Risc registers. */
1355 iter_reg = fw->host_risc_reg;
1356 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1357 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1359 /* PCIe registers. */
1360 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1361 RD_REG_DWORD(&reg->iobase_addr);
1362 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1363 dmp_reg = &reg->iobase_c4;
1364 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1365 dmp_reg++;
1366 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1367 dmp_reg++;
1368 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1369 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1371 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1372 RD_REG_DWORD(&reg->iobase_window);
1374 /* Host interface registers. */
1375 dmp_reg = &reg->flash_addr;
1376 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1377 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1379 /* Disable interrupts. */
1380 WRT_REG_DWORD(&reg->ictrl, 0);
1381 RD_REG_DWORD(&reg->ictrl);
1383 /* Shadow registers. */
1384 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1385 RD_REG_DWORD(&reg->iobase_addr);
1386 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1387 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1389 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1390 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1392 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1393 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1395 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1396 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1398 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1399 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1401 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1402 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1404 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1405 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1407 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1408 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1410 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1411 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1413 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1414 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1416 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1417 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1419 /* RISC I/O register. */
1420 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1421 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1423 /* Mailbox registers. */
1424 mbx_reg = &reg->mailbox0;
1425 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1426 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1428 /* Transfer sequence registers. */
1429 iter_reg = fw->xseq_gp_reg;
1430 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1431 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1432 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1433 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1434 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1435 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1436 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1437 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1439 iter_reg = fw->xseq_0_reg;
1440 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1441 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1442 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1444 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1446 /* Receive sequence registers. */
1447 iter_reg = fw->rseq_gp_reg;
1448 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1449 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1450 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1451 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1452 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1453 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1454 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1455 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1457 iter_reg = fw->rseq_0_reg;
1458 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1459 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1461 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1462 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1464 /* Auxiliary sequence registers. */
1465 iter_reg = fw->aseq_gp_reg;
1466 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1467 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1468 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1469 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1470 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1471 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1472 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1473 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1475 iter_reg = fw->aseq_0_reg;
1476 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1477 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1479 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1480 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1482 /* Command DMA registers. */
1483 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1485 /* Queues. */
1486 iter_reg = fw->req0_dma_reg;
1487 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1488 dmp_reg = &reg->iobase_q;
1489 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1490 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1492 iter_reg = fw->resp0_dma_reg;
1493 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1494 dmp_reg = &reg->iobase_q;
1495 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1496 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1498 iter_reg = fw->req1_dma_reg;
1499 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1500 dmp_reg = &reg->iobase_q;
1501 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1502 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1504 /* Transmit DMA registers. */
1505 iter_reg = fw->xmt0_dma_reg;
1506 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1507 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1509 iter_reg = fw->xmt1_dma_reg;
1510 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1511 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1513 iter_reg = fw->xmt2_dma_reg;
1514 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1515 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1517 iter_reg = fw->xmt3_dma_reg;
1518 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1519 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1521 iter_reg = fw->xmt4_dma_reg;
1522 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1523 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1525 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1527 /* Receive DMA registers. */
1528 iter_reg = fw->rcvt0_data_dma_reg;
1529 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1530 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1532 iter_reg = fw->rcvt1_data_dma_reg;
1533 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1534 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1536 /* RISC registers. */
1537 iter_reg = fw->risc_gp_reg;
1538 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1539 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1540 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1541 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1542 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1543 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1544 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1545 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1547 /* Local memory controller registers. */
1548 iter_reg = fw->lmc_reg;
1549 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1550 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1551 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1552 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1553 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1554 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1555 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1556 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1558 /* Fibre Protocol Module registers. */
1559 iter_reg = fw->fpm_hdw_reg;
1560 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1561 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1562 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1563 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1564 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1565 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1566 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1567 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1568 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1569 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1570 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1571 qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1573 /* Frame Buffer registers. */
1574 iter_reg = fw->fb_hdw_reg;
1575 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1576 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1577 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1578 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1579 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1580 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1581 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1582 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1583 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1584 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1585 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1586 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1588 /* Multi queue registers */
1589 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1590 &last_chain);
1592 rval = qla24xx_soft_reset(ha);
1593 if (rval != QLA_SUCCESS)
1594 goto qla25xx_fw_dump_failed_0;
1596 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1597 &nxt);
1598 if (rval != QLA_SUCCESS)
1599 goto qla25xx_fw_dump_failed_0;
1601 nxt = qla2xxx_copy_queues(ha, nxt);
1603 qla24xx_copy_eft(ha, nxt);
1605 /* Chain entries -- started with MQ. */
1606 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1607 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1608 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1609 if (last_chain) {
1610 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1611 *last_chain |= htonl(DUMP_CHAIN_LAST);
1614 /* Adjust valid length. */
1615 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1617 qla25xx_fw_dump_failed_0:
1618 qla2xxx_dump_post_process(base_vha, rval);
1620 qla25xx_fw_dump_failed:
1621 #ifndef __CHECKER__
1622 if (!hardware_locked)
1623 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1624 #else
1626 #endif
1629 void
1630 qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1632 int rval;
1633 uint32_t cnt;
1634 struct qla_hw_data *ha = vha->hw;
1635 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1636 uint32_t __iomem *dmp_reg;
1637 uint32_t *iter_reg;
1638 uint16_t __iomem *mbx_reg;
1639 unsigned long flags;
1640 struct qla81xx_fw_dump *fw;
1641 void *nxt, *nxt_chain;
1642 uint32_t *last_chain = NULL;
1643 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1645 flags = 0;
1646 ha->fw_dump_cap_flags = 0;
1648 #ifndef __CHECKER__
1649 if (!hardware_locked)
1650 spin_lock_irqsave(&ha->hardware_lock, flags);
1651 #endif
1653 if (!ha->fw_dump) {
1654 ql_log(ql_log_warn, vha, 0xd00a,
1655 "No buffer available for dump.\n");
1656 goto qla81xx_fw_dump_failed;
1659 if (ha->fw_dumped) {
1660 ql_log(ql_log_warn, vha, 0xd00b,
1661 "Firmware has been previously dumped (%p) "
1662 "-- ignoring request.\n",
1663 ha->fw_dump);
1664 goto qla81xx_fw_dump_failed;
1666 fw = &ha->fw_dump->isp.isp81;
1667 qla2xxx_prep_dump(ha, ha->fw_dump);
1669 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1672 * Pause RISC. No need to track timeout, as resetting the chip
1673 * is the right approach incase of pause timeout
1675 qla24xx_pause_risc(reg, ha);
1677 /* Host/Risc registers. */
1678 iter_reg = fw->host_risc_reg;
1679 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
1680 qla24xx_read_window(reg, 0x7010, 16, iter_reg);
1682 /* PCIe registers. */
1683 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
1684 RD_REG_DWORD(&reg->iobase_addr);
1685 WRT_REG_DWORD(&reg->iobase_window, 0x01);
1686 dmp_reg = &reg->iobase_c4;
1687 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
1688 dmp_reg++;
1689 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
1690 dmp_reg++;
1691 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
1692 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
1694 WRT_REG_DWORD(&reg->iobase_window, 0x00);
1695 RD_REG_DWORD(&reg->iobase_window);
1697 /* Host interface registers. */
1698 dmp_reg = &reg->flash_addr;
1699 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
1700 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
1702 /* Disable interrupts. */
1703 WRT_REG_DWORD(&reg->ictrl, 0);
1704 RD_REG_DWORD(&reg->ictrl);
1706 /* Shadow registers. */
1707 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
1708 RD_REG_DWORD(&reg->iobase_addr);
1709 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
1710 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1712 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
1713 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1715 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
1716 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1718 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
1719 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1721 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
1722 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1724 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
1725 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1727 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
1728 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1730 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
1731 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1733 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
1734 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1736 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
1737 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1739 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
1740 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
1742 /* RISC I/O register. */
1743 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
1744 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
1746 /* Mailbox registers. */
1747 mbx_reg = &reg->mailbox0;
1748 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
1749 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
1751 /* Transfer sequence registers. */
1752 iter_reg = fw->xseq_gp_reg;
1753 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
1754 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
1755 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
1756 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
1757 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
1758 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
1759 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
1760 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
1762 iter_reg = fw->xseq_0_reg;
1763 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
1764 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
1765 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
1767 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
1769 /* Receive sequence registers. */
1770 iter_reg = fw->rseq_gp_reg;
1771 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
1772 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
1773 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
1774 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
1775 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
1776 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
1777 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
1778 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
1780 iter_reg = fw->rseq_0_reg;
1781 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
1782 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
1784 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
1785 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
1787 /* Auxiliary sequence registers. */
1788 iter_reg = fw->aseq_gp_reg;
1789 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
1790 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
1791 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
1792 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
1793 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
1794 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
1795 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
1796 qla24xx_read_window(reg, 0xB070, 16, iter_reg);
1798 iter_reg = fw->aseq_0_reg;
1799 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
1800 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
1802 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
1803 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
1805 /* Command DMA registers. */
1806 qla24xx_read_window(reg, 0x7100, 16, fw->cmd_dma_reg);
1808 /* Queues. */
1809 iter_reg = fw->req0_dma_reg;
1810 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
1811 dmp_reg = &reg->iobase_q;
1812 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1813 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1815 iter_reg = fw->resp0_dma_reg;
1816 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
1817 dmp_reg = &reg->iobase_q;
1818 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1819 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1821 iter_reg = fw->req1_dma_reg;
1822 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
1823 dmp_reg = &reg->iobase_q;
1824 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
1825 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
1827 /* Transmit DMA registers. */
1828 iter_reg = fw->xmt0_dma_reg;
1829 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
1830 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
1832 iter_reg = fw->xmt1_dma_reg;
1833 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
1834 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
1836 iter_reg = fw->xmt2_dma_reg;
1837 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
1838 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
1840 iter_reg = fw->xmt3_dma_reg;
1841 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
1842 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
1844 iter_reg = fw->xmt4_dma_reg;
1845 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
1846 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
1848 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
1850 /* Receive DMA registers. */
1851 iter_reg = fw->rcvt0_data_dma_reg;
1852 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
1853 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
1855 iter_reg = fw->rcvt1_data_dma_reg;
1856 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
1857 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
1859 /* RISC registers. */
1860 iter_reg = fw->risc_gp_reg;
1861 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
1862 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
1863 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
1864 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
1865 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
1866 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
1867 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
1868 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
1870 /* Local memory controller registers. */
1871 iter_reg = fw->lmc_reg;
1872 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
1873 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
1874 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
1875 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
1876 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
1877 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
1878 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
1879 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
1881 /* Fibre Protocol Module registers. */
1882 iter_reg = fw->fpm_hdw_reg;
1883 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
1884 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
1885 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
1886 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
1887 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
1888 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
1889 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
1890 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
1891 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
1892 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
1893 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
1894 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
1895 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
1896 qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
1898 /* Frame Buffer registers. */
1899 iter_reg = fw->fb_hdw_reg;
1900 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
1901 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
1902 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
1903 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
1904 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
1905 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
1906 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
1907 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
1908 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
1909 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
1910 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
1911 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
1912 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
1914 /* Multi queue registers */
1915 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
1916 &last_chain);
1918 rval = qla24xx_soft_reset(ha);
1919 if (rval != QLA_SUCCESS)
1920 goto qla81xx_fw_dump_failed_0;
1922 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
1923 &nxt);
1924 if (rval != QLA_SUCCESS)
1925 goto qla81xx_fw_dump_failed_0;
1927 nxt = qla2xxx_copy_queues(ha, nxt);
1929 qla24xx_copy_eft(ha, nxt);
1931 /* Chain entries -- started with MQ. */
1932 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1933 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1934 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1935 if (last_chain) {
1936 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
1937 *last_chain |= htonl(DUMP_CHAIN_LAST);
1940 /* Adjust valid length. */
1941 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1943 qla81xx_fw_dump_failed_0:
1944 qla2xxx_dump_post_process(base_vha, rval);
1946 qla81xx_fw_dump_failed:
1947 #ifndef __CHECKER__
1948 if (!hardware_locked)
1949 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1950 #else
1952 #endif
1955 void
1956 qla83xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1958 int rval;
1959 uint32_t cnt;
1960 struct qla_hw_data *ha = vha->hw;
1961 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
1962 uint32_t __iomem *dmp_reg;
1963 uint32_t *iter_reg;
1964 uint16_t __iomem *mbx_reg;
1965 unsigned long flags;
1966 struct qla83xx_fw_dump *fw;
1967 void *nxt, *nxt_chain;
1968 uint32_t *last_chain = NULL;
1969 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
1971 flags = 0;
1972 ha->fw_dump_cap_flags = 0;
1974 #ifndef __CHECKER__
1975 if (!hardware_locked)
1976 spin_lock_irqsave(&ha->hardware_lock, flags);
1977 #endif
1979 if (!ha->fw_dump) {
1980 ql_log(ql_log_warn, vha, 0xd00c,
1981 "No buffer available for dump!!!\n");
1982 goto qla83xx_fw_dump_failed;
1985 if (ha->fw_dumped) {
1986 ql_log(ql_log_warn, vha, 0xd00d,
1987 "Firmware has been previously dumped (%p) -- ignoring "
1988 "request...\n", ha->fw_dump);
1989 goto qla83xx_fw_dump_failed;
1991 fw = &ha->fw_dump->isp.isp83;
1992 qla2xxx_prep_dump(ha, ha->fw_dump);
1994 fw->host_status = htonl(RD_REG_DWORD(&reg->host_status));
1997 * Pause RISC. No need to track timeout, as resetting the chip
1998 * is the right approach incase of pause timeout
2000 qla24xx_pause_risc(reg, ha);
2002 WRT_REG_DWORD(&reg->iobase_addr, 0x6000);
2003 dmp_reg = &reg->iobase_window;
2004 RD_REG_DWORD(dmp_reg);
2005 WRT_REG_DWORD(dmp_reg, 0);
2007 dmp_reg = &reg->unused_4_1[0];
2008 RD_REG_DWORD(dmp_reg);
2009 WRT_REG_DWORD(dmp_reg, 0);
2011 WRT_REG_DWORD(&reg->iobase_addr, 0x6010);
2012 dmp_reg = &reg->unused_4_1[2];
2013 RD_REG_DWORD(dmp_reg);
2014 WRT_REG_DWORD(dmp_reg, 0);
2016 /* select PCR and disable ecc checking and correction */
2017 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2018 RD_REG_DWORD(&reg->iobase_addr);
2019 WRT_REG_DWORD(&reg->iobase_select, 0x60000000); /* write to F0h = PCR */
2021 /* Host/Risc registers. */
2022 iter_reg = fw->host_risc_reg;
2023 iter_reg = qla24xx_read_window(reg, 0x7000, 16, iter_reg);
2024 iter_reg = qla24xx_read_window(reg, 0x7010, 16, iter_reg);
2025 qla24xx_read_window(reg, 0x7040, 16, iter_reg);
2027 /* PCIe registers. */
2028 WRT_REG_DWORD(&reg->iobase_addr, 0x7C00);
2029 RD_REG_DWORD(&reg->iobase_addr);
2030 WRT_REG_DWORD(&reg->iobase_window, 0x01);
2031 dmp_reg = &reg->iobase_c4;
2032 fw->pcie_regs[0] = htonl(RD_REG_DWORD(dmp_reg));
2033 dmp_reg++;
2034 fw->pcie_regs[1] = htonl(RD_REG_DWORD(dmp_reg));
2035 dmp_reg++;
2036 fw->pcie_regs[2] = htonl(RD_REG_DWORD(dmp_reg));
2037 fw->pcie_regs[3] = htonl(RD_REG_DWORD(&reg->iobase_window));
2039 WRT_REG_DWORD(&reg->iobase_window, 0x00);
2040 RD_REG_DWORD(&reg->iobase_window);
2042 /* Host interface registers. */
2043 dmp_reg = &reg->flash_addr;
2044 for (cnt = 0; cnt < sizeof(fw->host_reg) / 4; cnt++, dmp_reg++)
2045 fw->host_reg[cnt] = htonl(RD_REG_DWORD(dmp_reg));
2047 /* Disable interrupts. */
2048 WRT_REG_DWORD(&reg->ictrl, 0);
2049 RD_REG_DWORD(&reg->ictrl);
2051 /* Shadow registers. */
2052 WRT_REG_DWORD(&reg->iobase_addr, 0x0F70);
2053 RD_REG_DWORD(&reg->iobase_addr);
2054 WRT_REG_DWORD(&reg->iobase_select, 0xB0000000);
2055 fw->shadow_reg[0] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2057 WRT_REG_DWORD(&reg->iobase_select, 0xB0100000);
2058 fw->shadow_reg[1] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2060 WRT_REG_DWORD(&reg->iobase_select, 0xB0200000);
2061 fw->shadow_reg[2] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2063 WRT_REG_DWORD(&reg->iobase_select, 0xB0300000);
2064 fw->shadow_reg[3] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2066 WRT_REG_DWORD(&reg->iobase_select, 0xB0400000);
2067 fw->shadow_reg[4] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2069 WRT_REG_DWORD(&reg->iobase_select, 0xB0500000);
2070 fw->shadow_reg[5] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2072 WRT_REG_DWORD(&reg->iobase_select, 0xB0600000);
2073 fw->shadow_reg[6] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2075 WRT_REG_DWORD(&reg->iobase_select, 0xB0700000);
2076 fw->shadow_reg[7] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2078 WRT_REG_DWORD(&reg->iobase_select, 0xB0800000);
2079 fw->shadow_reg[8] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2081 WRT_REG_DWORD(&reg->iobase_select, 0xB0900000);
2082 fw->shadow_reg[9] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2084 WRT_REG_DWORD(&reg->iobase_select, 0xB0A00000);
2085 fw->shadow_reg[10] = htonl(RD_REG_DWORD(&reg->iobase_sdata));
2087 /* RISC I/O register. */
2088 WRT_REG_DWORD(&reg->iobase_addr, 0x0010);
2089 fw->risc_io_reg = htonl(RD_REG_DWORD(&reg->iobase_window));
2091 /* Mailbox registers. */
2092 mbx_reg = &reg->mailbox0;
2093 for (cnt = 0; cnt < sizeof(fw->mailbox_reg) / 2; cnt++, mbx_reg++)
2094 fw->mailbox_reg[cnt] = htons(RD_REG_WORD(mbx_reg));
2096 /* Transfer sequence registers. */
2097 iter_reg = fw->xseq_gp_reg;
2098 iter_reg = qla24xx_read_window(reg, 0xBE00, 16, iter_reg);
2099 iter_reg = qla24xx_read_window(reg, 0xBE10, 16, iter_reg);
2100 iter_reg = qla24xx_read_window(reg, 0xBE20, 16, iter_reg);
2101 iter_reg = qla24xx_read_window(reg, 0xBE30, 16, iter_reg);
2102 iter_reg = qla24xx_read_window(reg, 0xBE40, 16, iter_reg);
2103 iter_reg = qla24xx_read_window(reg, 0xBE50, 16, iter_reg);
2104 iter_reg = qla24xx_read_window(reg, 0xBE60, 16, iter_reg);
2105 iter_reg = qla24xx_read_window(reg, 0xBE70, 16, iter_reg);
2106 iter_reg = qla24xx_read_window(reg, 0xBF00, 16, iter_reg);
2107 iter_reg = qla24xx_read_window(reg, 0xBF10, 16, iter_reg);
2108 iter_reg = qla24xx_read_window(reg, 0xBF20, 16, iter_reg);
2109 iter_reg = qla24xx_read_window(reg, 0xBF30, 16, iter_reg);
2110 iter_reg = qla24xx_read_window(reg, 0xBF40, 16, iter_reg);
2111 iter_reg = qla24xx_read_window(reg, 0xBF50, 16, iter_reg);
2112 iter_reg = qla24xx_read_window(reg, 0xBF60, 16, iter_reg);
2113 qla24xx_read_window(reg, 0xBF70, 16, iter_reg);
2115 iter_reg = fw->xseq_0_reg;
2116 iter_reg = qla24xx_read_window(reg, 0xBFC0, 16, iter_reg);
2117 iter_reg = qla24xx_read_window(reg, 0xBFD0, 16, iter_reg);
2118 qla24xx_read_window(reg, 0xBFE0, 16, iter_reg);
2120 qla24xx_read_window(reg, 0xBFF0, 16, fw->xseq_1_reg);
2122 qla24xx_read_window(reg, 0xBEF0, 16, fw->xseq_2_reg);
2124 /* Receive sequence registers. */
2125 iter_reg = fw->rseq_gp_reg;
2126 iter_reg = qla24xx_read_window(reg, 0xFE00, 16, iter_reg);
2127 iter_reg = qla24xx_read_window(reg, 0xFE10, 16, iter_reg);
2128 iter_reg = qla24xx_read_window(reg, 0xFE20, 16, iter_reg);
2129 iter_reg = qla24xx_read_window(reg, 0xFE30, 16, iter_reg);
2130 iter_reg = qla24xx_read_window(reg, 0xFE40, 16, iter_reg);
2131 iter_reg = qla24xx_read_window(reg, 0xFE50, 16, iter_reg);
2132 iter_reg = qla24xx_read_window(reg, 0xFE60, 16, iter_reg);
2133 iter_reg = qla24xx_read_window(reg, 0xFE70, 16, iter_reg);
2134 iter_reg = qla24xx_read_window(reg, 0xFF00, 16, iter_reg);
2135 iter_reg = qla24xx_read_window(reg, 0xFF10, 16, iter_reg);
2136 iter_reg = qla24xx_read_window(reg, 0xFF20, 16, iter_reg);
2137 iter_reg = qla24xx_read_window(reg, 0xFF30, 16, iter_reg);
2138 iter_reg = qla24xx_read_window(reg, 0xFF40, 16, iter_reg);
2139 iter_reg = qla24xx_read_window(reg, 0xFF50, 16, iter_reg);
2140 iter_reg = qla24xx_read_window(reg, 0xFF60, 16, iter_reg);
2141 qla24xx_read_window(reg, 0xFF70, 16, iter_reg);
2143 iter_reg = fw->rseq_0_reg;
2144 iter_reg = qla24xx_read_window(reg, 0xFFC0, 16, iter_reg);
2145 qla24xx_read_window(reg, 0xFFD0, 16, iter_reg);
2147 qla24xx_read_window(reg, 0xFFE0, 16, fw->rseq_1_reg);
2148 qla24xx_read_window(reg, 0xFFF0, 16, fw->rseq_2_reg);
2149 qla24xx_read_window(reg, 0xFEF0, 16, fw->rseq_3_reg);
2151 /* Auxiliary sequence registers. */
2152 iter_reg = fw->aseq_gp_reg;
2153 iter_reg = qla24xx_read_window(reg, 0xB000, 16, iter_reg);
2154 iter_reg = qla24xx_read_window(reg, 0xB010, 16, iter_reg);
2155 iter_reg = qla24xx_read_window(reg, 0xB020, 16, iter_reg);
2156 iter_reg = qla24xx_read_window(reg, 0xB030, 16, iter_reg);
2157 iter_reg = qla24xx_read_window(reg, 0xB040, 16, iter_reg);
2158 iter_reg = qla24xx_read_window(reg, 0xB050, 16, iter_reg);
2159 iter_reg = qla24xx_read_window(reg, 0xB060, 16, iter_reg);
2160 iter_reg = qla24xx_read_window(reg, 0xB070, 16, iter_reg);
2161 iter_reg = qla24xx_read_window(reg, 0xB100, 16, iter_reg);
2162 iter_reg = qla24xx_read_window(reg, 0xB110, 16, iter_reg);
2163 iter_reg = qla24xx_read_window(reg, 0xB120, 16, iter_reg);
2164 iter_reg = qla24xx_read_window(reg, 0xB130, 16, iter_reg);
2165 iter_reg = qla24xx_read_window(reg, 0xB140, 16, iter_reg);
2166 iter_reg = qla24xx_read_window(reg, 0xB150, 16, iter_reg);
2167 iter_reg = qla24xx_read_window(reg, 0xB160, 16, iter_reg);
2168 qla24xx_read_window(reg, 0xB170, 16, iter_reg);
2170 iter_reg = fw->aseq_0_reg;
2171 iter_reg = qla24xx_read_window(reg, 0xB0C0, 16, iter_reg);
2172 qla24xx_read_window(reg, 0xB0D0, 16, iter_reg);
2174 qla24xx_read_window(reg, 0xB0E0, 16, fw->aseq_1_reg);
2175 qla24xx_read_window(reg, 0xB0F0, 16, fw->aseq_2_reg);
2176 qla24xx_read_window(reg, 0xB1F0, 16, fw->aseq_3_reg);
2178 /* Command DMA registers. */
2179 iter_reg = fw->cmd_dma_reg;
2180 iter_reg = qla24xx_read_window(reg, 0x7100, 16, iter_reg);
2181 iter_reg = qla24xx_read_window(reg, 0x7120, 16, iter_reg);
2182 iter_reg = qla24xx_read_window(reg, 0x7130, 16, iter_reg);
2183 qla24xx_read_window(reg, 0x71F0, 16, iter_reg);
2185 /* Queues. */
2186 iter_reg = fw->req0_dma_reg;
2187 iter_reg = qla24xx_read_window(reg, 0x7200, 8, iter_reg);
2188 dmp_reg = &reg->iobase_q;
2189 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2190 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2192 iter_reg = fw->resp0_dma_reg;
2193 iter_reg = qla24xx_read_window(reg, 0x7300, 8, iter_reg);
2194 dmp_reg = &reg->iobase_q;
2195 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2196 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2198 iter_reg = fw->req1_dma_reg;
2199 iter_reg = qla24xx_read_window(reg, 0x7400, 8, iter_reg);
2200 dmp_reg = &reg->iobase_q;
2201 for (cnt = 0; cnt < 7; cnt++, dmp_reg++)
2202 *iter_reg++ = htonl(RD_REG_DWORD(dmp_reg));
2204 /* Transmit DMA registers. */
2205 iter_reg = fw->xmt0_dma_reg;
2206 iter_reg = qla24xx_read_window(reg, 0x7600, 16, iter_reg);
2207 qla24xx_read_window(reg, 0x7610, 16, iter_reg);
2209 iter_reg = fw->xmt1_dma_reg;
2210 iter_reg = qla24xx_read_window(reg, 0x7620, 16, iter_reg);
2211 qla24xx_read_window(reg, 0x7630, 16, iter_reg);
2213 iter_reg = fw->xmt2_dma_reg;
2214 iter_reg = qla24xx_read_window(reg, 0x7640, 16, iter_reg);
2215 qla24xx_read_window(reg, 0x7650, 16, iter_reg);
2217 iter_reg = fw->xmt3_dma_reg;
2218 iter_reg = qla24xx_read_window(reg, 0x7660, 16, iter_reg);
2219 qla24xx_read_window(reg, 0x7670, 16, iter_reg);
2221 iter_reg = fw->xmt4_dma_reg;
2222 iter_reg = qla24xx_read_window(reg, 0x7680, 16, iter_reg);
2223 qla24xx_read_window(reg, 0x7690, 16, iter_reg);
2225 qla24xx_read_window(reg, 0x76A0, 16, fw->xmt_data_dma_reg);
2227 /* Receive DMA registers. */
2228 iter_reg = fw->rcvt0_data_dma_reg;
2229 iter_reg = qla24xx_read_window(reg, 0x7700, 16, iter_reg);
2230 qla24xx_read_window(reg, 0x7710, 16, iter_reg);
2232 iter_reg = fw->rcvt1_data_dma_reg;
2233 iter_reg = qla24xx_read_window(reg, 0x7720, 16, iter_reg);
2234 qla24xx_read_window(reg, 0x7730, 16, iter_reg);
2236 /* RISC registers. */
2237 iter_reg = fw->risc_gp_reg;
2238 iter_reg = qla24xx_read_window(reg, 0x0F00, 16, iter_reg);
2239 iter_reg = qla24xx_read_window(reg, 0x0F10, 16, iter_reg);
2240 iter_reg = qla24xx_read_window(reg, 0x0F20, 16, iter_reg);
2241 iter_reg = qla24xx_read_window(reg, 0x0F30, 16, iter_reg);
2242 iter_reg = qla24xx_read_window(reg, 0x0F40, 16, iter_reg);
2243 iter_reg = qla24xx_read_window(reg, 0x0F50, 16, iter_reg);
2244 iter_reg = qla24xx_read_window(reg, 0x0F60, 16, iter_reg);
2245 qla24xx_read_window(reg, 0x0F70, 16, iter_reg);
2247 /* Local memory controller registers. */
2248 iter_reg = fw->lmc_reg;
2249 iter_reg = qla24xx_read_window(reg, 0x3000, 16, iter_reg);
2250 iter_reg = qla24xx_read_window(reg, 0x3010, 16, iter_reg);
2251 iter_reg = qla24xx_read_window(reg, 0x3020, 16, iter_reg);
2252 iter_reg = qla24xx_read_window(reg, 0x3030, 16, iter_reg);
2253 iter_reg = qla24xx_read_window(reg, 0x3040, 16, iter_reg);
2254 iter_reg = qla24xx_read_window(reg, 0x3050, 16, iter_reg);
2255 iter_reg = qla24xx_read_window(reg, 0x3060, 16, iter_reg);
2256 qla24xx_read_window(reg, 0x3070, 16, iter_reg);
2258 /* Fibre Protocol Module registers. */
2259 iter_reg = fw->fpm_hdw_reg;
2260 iter_reg = qla24xx_read_window(reg, 0x4000, 16, iter_reg);
2261 iter_reg = qla24xx_read_window(reg, 0x4010, 16, iter_reg);
2262 iter_reg = qla24xx_read_window(reg, 0x4020, 16, iter_reg);
2263 iter_reg = qla24xx_read_window(reg, 0x4030, 16, iter_reg);
2264 iter_reg = qla24xx_read_window(reg, 0x4040, 16, iter_reg);
2265 iter_reg = qla24xx_read_window(reg, 0x4050, 16, iter_reg);
2266 iter_reg = qla24xx_read_window(reg, 0x4060, 16, iter_reg);
2267 iter_reg = qla24xx_read_window(reg, 0x4070, 16, iter_reg);
2268 iter_reg = qla24xx_read_window(reg, 0x4080, 16, iter_reg);
2269 iter_reg = qla24xx_read_window(reg, 0x4090, 16, iter_reg);
2270 iter_reg = qla24xx_read_window(reg, 0x40A0, 16, iter_reg);
2271 iter_reg = qla24xx_read_window(reg, 0x40B0, 16, iter_reg);
2272 iter_reg = qla24xx_read_window(reg, 0x40C0, 16, iter_reg);
2273 iter_reg = qla24xx_read_window(reg, 0x40D0, 16, iter_reg);
2274 iter_reg = qla24xx_read_window(reg, 0x40E0, 16, iter_reg);
2275 qla24xx_read_window(reg, 0x40F0, 16, iter_reg);
2277 /* RQ0 Array registers. */
2278 iter_reg = fw->rq0_array_reg;
2279 iter_reg = qla24xx_read_window(reg, 0x5C00, 16, iter_reg);
2280 iter_reg = qla24xx_read_window(reg, 0x5C10, 16, iter_reg);
2281 iter_reg = qla24xx_read_window(reg, 0x5C20, 16, iter_reg);
2282 iter_reg = qla24xx_read_window(reg, 0x5C30, 16, iter_reg);
2283 iter_reg = qla24xx_read_window(reg, 0x5C40, 16, iter_reg);
2284 iter_reg = qla24xx_read_window(reg, 0x5C50, 16, iter_reg);
2285 iter_reg = qla24xx_read_window(reg, 0x5C60, 16, iter_reg);
2286 iter_reg = qla24xx_read_window(reg, 0x5C70, 16, iter_reg);
2287 iter_reg = qla24xx_read_window(reg, 0x5C80, 16, iter_reg);
2288 iter_reg = qla24xx_read_window(reg, 0x5C90, 16, iter_reg);
2289 iter_reg = qla24xx_read_window(reg, 0x5CA0, 16, iter_reg);
2290 iter_reg = qla24xx_read_window(reg, 0x5CB0, 16, iter_reg);
2291 iter_reg = qla24xx_read_window(reg, 0x5CC0, 16, iter_reg);
2292 iter_reg = qla24xx_read_window(reg, 0x5CD0, 16, iter_reg);
2293 iter_reg = qla24xx_read_window(reg, 0x5CE0, 16, iter_reg);
2294 qla24xx_read_window(reg, 0x5CF0, 16, iter_reg);
2296 /* RQ1 Array registers. */
2297 iter_reg = fw->rq1_array_reg;
2298 iter_reg = qla24xx_read_window(reg, 0x5D00, 16, iter_reg);
2299 iter_reg = qla24xx_read_window(reg, 0x5D10, 16, iter_reg);
2300 iter_reg = qla24xx_read_window(reg, 0x5D20, 16, iter_reg);
2301 iter_reg = qla24xx_read_window(reg, 0x5D30, 16, iter_reg);
2302 iter_reg = qla24xx_read_window(reg, 0x5D40, 16, iter_reg);
2303 iter_reg = qla24xx_read_window(reg, 0x5D50, 16, iter_reg);
2304 iter_reg = qla24xx_read_window(reg, 0x5D60, 16, iter_reg);
2305 iter_reg = qla24xx_read_window(reg, 0x5D70, 16, iter_reg);
2306 iter_reg = qla24xx_read_window(reg, 0x5D80, 16, iter_reg);
2307 iter_reg = qla24xx_read_window(reg, 0x5D90, 16, iter_reg);
2308 iter_reg = qla24xx_read_window(reg, 0x5DA0, 16, iter_reg);
2309 iter_reg = qla24xx_read_window(reg, 0x5DB0, 16, iter_reg);
2310 iter_reg = qla24xx_read_window(reg, 0x5DC0, 16, iter_reg);
2311 iter_reg = qla24xx_read_window(reg, 0x5DD0, 16, iter_reg);
2312 iter_reg = qla24xx_read_window(reg, 0x5DE0, 16, iter_reg);
2313 qla24xx_read_window(reg, 0x5DF0, 16, iter_reg);
2315 /* RP0 Array registers. */
2316 iter_reg = fw->rp0_array_reg;
2317 iter_reg = qla24xx_read_window(reg, 0x5E00, 16, iter_reg);
2318 iter_reg = qla24xx_read_window(reg, 0x5E10, 16, iter_reg);
2319 iter_reg = qla24xx_read_window(reg, 0x5E20, 16, iter_reg);
2320 iter_reg = qla24xx_read_window(reg, 0x5E30, 16, iter_reg);
2321 iter_reg = qla24xx_read_window(reg, 0x5E40, 16, iter_reg);
2322 iter_reg = qla24xx_read_window(reg, 0x5E50, 16, iter_reg);
2323 iter_reg = qla24xx_read_window(reg, 0x5E60, 16, iter_reg);
2324 iter_reg = qla24xx_read_window(reg, 0x5E70, 16, iter_reg);
2325 iter_reg = qla24xx_read_window(reg, 0x5E80, 16, iter_reg);
2326 iter_reg = qla24xx_read_window(reg, 0x5E90, 16, iter_reg);
2327 iter_reg = qla24xx_read_window(reg, 0x5EA0, 16, iter_reg);
2328 iter_reg = qla24xx_read_window(reg, 0x5EB0, 16, iter_reg);
2329 iter_reg = qla24xx_read_window(reg, 0x5EC0, 16, iter_reg);
2330 iter_reg = qla24xx_read_window(reg, 0x5ED0, 16, iter_reg);
2331 iter_reg = qla24xx_read_window(reg, 0x5EE0, 16, iter_reg);
2332 qla24xx_read_window(reg, 0x5EF0, 16, iter_reg);
2334 /* RP1 Array registers. */
2335 iter_reg = fw->rp1_array_reg;
2336 iter_reg = qla24xx_read_window(reg, 0x5F00, 16, iter_reg);
2337 iter_reg = qla24xx_read_window(reg, 0x5F10, 16, iter_reg);
2338 iter_reg = qla24xx_read_window(reg, 0x5F20, 16, iter_reg);
2339 iter_reg = qla24xx_read_window(reg, 0x5F30, 16, iter_reg);
2340 iter_reg = qla24xx_read_window(reg, 0x5F40, 16, iter_reg);
2341 iter_reg = qla24xx_read_window(reg, 0x5F50, 16, iter_reg);
2342 iter_reg = qla24xx_read_window(reg, 0x5F60, 16, iter_reg);
2343 iter_reg = qla24xx_read_window(reg, 0x5F70, 16, iter_reg);
2344 iter_reg = qla24xx_read_window(reg, 0x5F80, 16, iter_reg);
2345 iter_reg = qla24xx_read_window(reg, 0x5F90, 16, iter_reg);
2346 iter_reg = qla24xx_read_window(reg, 0x5FA0, 16, iter_reg);
2347 iter_reg = qla24xx_read_window(reg, 0x5FB0, 16, iter_reg);
2348 iter_reg = qla24xx_read_window(reg, 0x5FC0, 16, iter_reg);
2349 iter_reg = qla24xx_read_window(reg, 0x5FD0, 16, iter_reg);
2350 iter_reg = qla24xx_read_window(reg, 0x5FE0, 16, iter_reg);
2351 qla24xx_read_window(reg, 0x5FF0, 16, iter_reg);
2353 iter_reg = fw->at0_array_reg;
2354 iter_reg = qla24xx_read_window(reg, 0x7080, 16, iter_reg);
2355 iter_reg = qla24xx_read_window(reg, 0x7090, 16, iter_reg);
2356 iter_reg = qla24xx_read_window(reg, 0x70A0, 16, iter_reg);
2357 iter_reg = qla24xx_read_window(reg, 0x70B0, 16, iter_reg);
2358 iter_reg = qla24xx_read_window(reg, 0x70C0, 16, iter_reg);
2359 iter_reg = qla24xx_read_window(reg, 0x70D0, 16, iter_reg);
2360 iter_reg = qla24xx_read_window(reg, 0x70E0, 16, iter_reg);
2361 qla24xx_read_window(reg, 0x70F0, 16, iter_reg);
2363 /* I/O Queue Control registers. */
2364 qla24xx_read_window(reg, 0x7800, 16, fw->queue_control_reg);
2366 /* Frame Buffer registers. */
2367 iter_reg = fw->fb_hdw_reg;
2368 iter_reg = qla24xx_read_window(reg, 0x6000, 16, iter_reg);
2369 iter_reg = qla24xx_read_window(reg, 0x6010, 16, iter_reg);
2370 iter_reg = qla24xx_read_window(reg, 0x6020, 16, iter_reg);
2371 iter_reg = qla24xx_read_window(reg, 0x6030, 16, iter_reg);
2372 iter_reg = qla24xx_read_window(reg, 0x6040, 16, iter_reg);
2373 iter_reg = qla24xx_read_window(reg, 0x6060, 16, iter_reg);
2374 iter_reg = qla24xx_read_window(reg, 0x6070, 16, iter_reg);
2375 iter_reg = qla24xx_read_window(reg, 0x6100, 16, iter_reg);
2376 iter_reg = qla24xx_read_window(reg, 0x6130, 16, iter_reg);
2377 iter_reg = qla24xx_read_window(reg, 0x6150, 16, iter_reg);
2378 iter_reg = qla24xx_read_window(reg, 0x6170, 16, iter_reg);
2379 iter_reg = qla24xx_read_window(reg, 0x6190, 16, iter_reg);
2380 iter_reg = qla24xx_read_window(reg, 0x61B0, 16, iter_reg);
2381 iter_reg = qla24xx_read_window(reg, 0x61C0, 16, iter_reg);
2382 iter_reg = qla24xx_read_window(reg, 0x6530, 16, iter_reg);
2383 iter_reg = qla24xx_read_window(reg, 0x6540, 16, iter_reg);
2384 iter_reg = qla24xx_read_window(reg, 0x6550, 16, iter_reg);
2385 iter_reg = qla24xx_read_window(reg, 0x6560, 16, iter_reg);
2386 iter_reg = qla24xx_read_window(reg, 0x6570, 16, iter_reg);
2387 iter_reg = qla24xx_read_window(reg, 0x6580, 16, iter_reg);
2388 iter_reg = qla24xx_read_window(reg, 0x6590, 16, iter_reg);
2389 iter_reg = qla24xx_read_window(reg, 0x65A0, 16, iter_reg);
2390 iter_reg = qla24xx_read_window(reg, 0x65B0, 16, iter_reg);
2391 iter_reg = qla24xx_read_window(reg, 0x65C0, 16, iter_reg);
2392 iter_reg = qla24xx_read_window(reg, 0x65D0, 16, iter_reg);
2393 iter_reg = qla24xx_read_window(reg, 0x65E0, 16, iter_reg);
2394 qla24xx_read_window(reg, 0x6F00, 16, iter_reg);
2396 /* Multi queue registers */
2397 nxt_chain = qla25xx_copy_mq(ha, (void *)ha->fw_dump + ha->chain_offset,
2398 &last_chain);
2400 rval = qla24xx_soft_reset(ha);
2401 if (rval != QLA_SUCCESS) {
2402 ql_log(ql_log_warn, vha, 0xd00e,
2403 "SOFT RESET FAILED, forcing continuation of dump!!!\n");
2404 rval = QLA_SUCCESS;
2406 ql_log(ql_log_warn, vha, 0xd00f, "try a bigger hammer!!!\n");
2408 WRT_REG_DWORD(&reg->hccr, HCCRX_SET_RISC_RESET);
2409 RD_REG_DWORD(&reg->hccr);
2411 WRT_REG_DWORD(&reg->hccr, HCCRX_REL_RISC_PAUSE);
2412 RD_REG_DWORD(&reg->hccr);
2414 WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_RESET);
2415 RD_REG_DWORD(&reg->hccr);
2417 for (cnt = 30000; cnt && (RD_REG_WORD(&reg->mailbox0)); cnt--)
2418 udelay(5);
2420 if (!cnt) {
2421 nxt = fw->code_ram;
2422 nxt += sizeof(fw->code_ram);
2423 nxt += (ha->fw_memory_size - 0x100000 + 1);
2424 goto copy_queue;
2425 } else {
2426 set_bit(RISC_RDY_AFT_RESET, &ha->fw_dump_cap_flags);
2427 ql_log(ql_log_warn, vha, 0xd010,
2428 "bigger hammer success?\n");
2432 rval = qla24xx_dump_memory(ha, fw->code_ram, sizeof(fw->code_ram),
2433 &nxt);
2434 if (rval != QLA_SUCCESS)
2435 goto qla83xx_fw_dump_failed_0;
2437 copy_queue:
2438 nxt = qla2xxx_copy_queues(ha, nxt);
2440 qla24xx_copy_eft(ha, nxt);
2442 /* Chain entries -- started with MQ. */
2443 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2444 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2445 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2446 if (last_chain) {
2447 ha->fw_dump->version |= htonl(DUMP_CHAIN_VARIANT);
2448 *last_chain |= htonl(DUMP_CHAIN_LAST);
2451 /* Adjust valid length. */
2452 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
2454 qla83xx_fw_dump_failed_0:
2455 qla2xxx_dump_post_process(base_vha, rval);
2457 qla83xx_fw_dump_failed:
2458 #ifndef __CHECKER__
2459 if (!hardware_locked)
2460 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2461 #else
2463 #endif
2466 /****************************************************************************/
2467 /* Driver Debug Functions. */
2468 /****************************************************************************/
2470 static inline int
2471 ql_mask_match(uint32_t level)
2473 if (ql2xextended_error_logging == 1)
2474 ql2xextended_error_logging = QL_DBG_DEFAULT1_MASK;
2475 return (level & ql2xextended_error_logging) == level;
2479 * This function is for formatting and logging debug information.
2480 * It is to be used when vha is available. It formats the message
2481 * and logs it to the messages file.
2482 * parameters:
2483 * level: The level of the debug messages to be printed.
2484 * If ql2xextended_error_logging value is correctly set,
2485 * this message will appear in the messages file.
2486 * vha: Pointer to the scsi_qla_host_t.
2487 * id: This is a unique identifier for the level. It identifies the
2488 * part of the code from where the message originated.
2489 * msg: The message to be displayed.
2491 void
2492 ql_dbg(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2494 va_list va;
2495 struct va_format vaf;
2497 if (!ql_mask_match(level))
2498 return;
2500 va_start(va, fmt);
2502 vaf.fmt = fmt;
2503 vaf.va = &va;
2505 if (vha != NULL) {
2506 const struct pci_dev *pdev = vha->hw->pdev;
2507 /* <module-name> <pci-name> <msg-id>:<host> Message */
2508 pr_warn("%s [%s]-%04x:%ld: %pV",
2509 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset,
2510 vha->host_no, &vaf);
2511 } else {
2512 pr_warn("%s [%s]-%04x: : %pV",
2513 QL_MSGHDR, "0000:00:00.0", id + ql_dbg_offset, &vaf);
2516 va_end(va);
2521 * This function is for formatting and logging debug information.
2522 * It is to be used when vha is not available and pci is available,
2523 * i.e., before host allocation. It formats the message and logs it
2524 * to the messages file.
2525 * parameters:
2526 * level: The level of the debug messages to be printed.
2527 * If ql2xextended_error_logging value is correctly set,
2528 * this message will appear in the messages file.
2529 * pdev: Pointer to the struct pci_dev.
2530 * id: This is a unique id for the level. It identifies the part
2531 * of the code from where the message originated.
2532 * msg: The message to be displayed.
2534 void
2535 ql_dbg_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2536 const char *fmt, ...)
2538 va_list va;
2539 struct va_format vaf;
2541 if (pdev == NULL)
2542 return;
2543 if (!ql_mask_match(level))
2544 return;
2546 va_start(va, fmt);
2548 vaf.fmt = fmt;
2549 vaf.va = &va;
2551 /* <module-name> <dev-name>:<msg-id> Message */
2552 pr_warn("%s [%s]-%04x: : %pV",
2553 QL_MSGHDR, dev_name(&(pdev->dev)), id + ql_dbg_offset, &vaf);
2555 va_end(va);
2559 * This function is for formatting and logging log messages.
2560 * It is to be used when vha is available. It formats the message
2561 * and logs it to the messages file. All the messages will be logged
2562 * irrespective of value of ql2xextended_error_logging.
2563 * parameters:
2564 * level: The level of the log messages to be printed in the
2565 * messages file.
2566 * vha: Pointer to the scsi_qla_host_t
2567 * id: This is a unique id for the level. It identifies the
2568 * part of the code from where the message originated.
2569 * msg: The message to be displayed.
2571 void
2572 ql_log(uint32_t level, scsi_qla_host_t *vha, int32_t id, const char *fmt, ...)
2574 va_list va;
2575 struct va_format vaf;
2576 char pbuf[128];
2578 if (level > ql_errlev)
2579 return;
2581 if (vha != NULL) {
2582 const struct pci_dev *pdev = vha->hw->pdev;
2583 /* <module-name> <msg-id>:<host> Message */
2584 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x:%ld: ",
2585 QL_MSGHDR, dev_name(&(pdev->dev)), id, vha->host_no);
2586 } else {
2587 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2588 QL_MSGHDR, "0000:00:00.0", id);
2590 pbuf[sizeof(pbuf) - 1] = 0;
2592 va_start(va, fmt);
2594 vaf.fmt = fmt;
2595 vaf.va = &va;
2597 switch (level) {
2598 case ql_log_fatal: /* FATAL LOG */
2599 pr_crit("%s%pV", pbuf, &vaf);
2600 break;
2601 case ql_log_warn:
2602 pr_err("%s%pV", pbuf, &vaf);
2603 break;
2604 case ql_log_info:
2605 pr_warn("%s%pV", pbuf, &vaf);
2606 break;
2607 default:
2608 pr_info("%s%pV", pbuf, &vaf);
2609 break;
2612 va_end(va);
2616 * This function is for formatting and logging log messages.
2617 * It is to be used when vha is not available and pci is available,
2618 * i.e., before host allocation. It formats the message and logs
2619 * it to the messages file. All the messages are logged irrespective
2620 * of the value of ql2xextended_error_logging.
2621 * parameters:
2622 * level: The level of the log messages to be printed in the
2623 * messages file.
2624 * pdev: Pointer to the struct pci_dev.
2625 * id: This is a unique id for the level. It identifies the
2626 * part of the code from where the message originated.
2627 * msg: The message to be displayed.
2629 void
2630 ql_log_pci(uint32_t level, struct pci_dev *pdev, int32_t id,
2631 const char *fmt, ...)
2633 va_list va;
2634 struct va_format vaf;
2635 char pbuf[128];
2637 if (pdev == NULL)
2638 return;
2639 if (level > ql_errlev)
2640 return;
2642 /* <module-name> <dev-name>:<msg-id> Message */
2643 snprintf(pbuf, sizeof(pbuf), "%s [%s]-%04x: : ",
2644 QL_MSGHDR, dev_name(&(pdev->dev)), id);
2645 pbuf[sizeof(pbuf) - 1] = 0;
2647 va_start(va, fmt);
2649 vaf.fmt = fmt;
2650 vaf.va = &va;
2652 switch (level) {
2653 case ql_log_fatal: /* FATAL LOG */
2654 pr_crit("%s%pV", pbuf, &vaf);
2655 break;
2656 case ql_log_warn:
2657 pr_err("%s%pV", pbuf, &vaf);
2658 break;
2659 case ql_log_info:
2660 pr_warn("%s%pV", pbuf, &vaf);
2661 break;
2662 default:
2663 pr_info("%s%pV", pbuf, &vaf);
2664 break;
2667 va_end(va);
2670 void
2671 ql_dump_regs(uint32_t level, scsi_qla_host_t *vha, int32_t id)
2673 int i;
2674 struct qla_hw_data *ha = vha->hw;
2675 struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
2676 struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
2677 struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
2678 uint16_t __iomem *mbx_reg;
2680 if (!ql_mask_match(level))
2681 return;
2683 if (IS_P3P_TYPE(ha))
2684 mbx_reg = &reg82->mailbox_in[0];
2685 else if (IS_FWI2_CAPABLE(ha))
2686 mbx_reg = &reg24->mailbox0;
2687 else
2688 mbx_reg = MAILBOX_REG(ha, reg, 0);
2690 ql_dbg(level, vha, id, "Mailbox registers:\n");
2691 for (i = 0; i < 6; i++)
2692 ql_dbg(level, vha, id,
2693 "mbox[%d] 0x%04x\n", i, RD_REG_WORD(mbx_reg++));
2697 void
2698 ql_dump_buffer(uint32_t level, scsi_qla_host_t *vha, int32_t id,
2699 uint8_t *buf, uint size)
2701 uint cnt;
2703 if (!ql_mask_match(level))
2704 return;
2706 ql_dbg(level, vha, id,
2707 "%-+5d 0 1 2 3 4 5 6 7 8 9 A B C D E F\n", size);
2708 ql_dbg(level, vha, id,
2709 "----- -----------------------------------------------\n");
2710 for (cnt = 0; cnt < size; cnt += 16) {
2711 ql_dbg(level, vha, id, "%04x: ", cnt);
2712 print_hex_dump(KERN_CONT, "", DUMP_PREFIX_NONE, 16, 1,
2713 buf + cnt, min(16U, size - cnt), false);