1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
9 #define ISPREG(vha) (&(vha)->hw->iobase->isp24)
10 #define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
11 #define IOBASE(vha) IOBAR(ISPREG(vha))
12 #define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
15 qla27xx_insert16(uint16_t value
, void *buf
, ulong
*len
)
19 *(__le16
*)buf
= cpu_to_le16(value
);
21 *len
+= sizeof(value
);
25 qla27xx_insert32(uint32_t value
, void *buf
, ulong
*len
)
29 *(__le32
*)buf
= cpu_to_le32(value
);
31 *len
+= sizeof(value
);
35 qla27xx_insertbuf(void *mem
, ulong size
, void *buf
, ulong
*len
)
37 if (buf
&& mem
&& size
) {
39 memcpy(buf
, mem
, size
);
45 qla27xx_read8(void __iomem
*window
, void *buf
, ulong
*len
)
50 value
= rd_reg_byte(window
);
52 qla27xx_insert32(value
, buf
, len
);
56 qla27xx_read16(void __iomem
*window
, void *buf
, ulong
*len
)
61 value
= rd_reg_word(window
);
63 qla27xx_insert32(value
, buf
, len
);
67 qla27xx_read32(void __iomem
*window
, void *buf
, ulong
*len
)
72 value
= rd_reg_dword(window
);
74 qla27xx_insert32(value
, buf
, len
);
77 static inline void (*qla27xx_read_vector(uint width
))(void __iomem
*, void *, ulong
*)
80 (width
== 1) ? qla27xx_read8
:
81 (width
== 2) ? qla27xx_read16
:
86 qla27xx_read_reg(__iomem
struct device_reg_24xx
*reg
,
87 uint offset
, void *buf
, ulong
*len
)
89 void __iomem
*window
= (void __iomem
*)reg
+ offset
;
91 qla27xx_read32(window
, buf
, len
);
95 qla27xx_write_reg(__iomem
struct device_reg_24xx
*reg
,
96 uint offset
, uint32_t data
, void *buf
)
99 void __iomem
*window
= (void __iomem
*)reg
+ offset
;
101 wrt_reg_dword(window
, data
);
106 qla27xx_read_window(__iomem
struct device_reg_24xx
*reg
,
107 uint32_t addr
, uint offset
, uint count
, uint width
, void *buf
,
110 void __iomem
*window
= (void __iomem
*)reg
+ offset
;
111 void (*readn
)(void __iomem
*, void *, ulong
*) = qla27xx_read_vector(width
);
113 qla27xx_write_reg(reg
, IOBAR(reg
), addr
, buf
);
115 qla27xx_insert32(addr
, buf
, len
);
116 readn(window
, buf
, len
);
123 qla27xx_skip_entry(struct qla27xx_fwdt_entry
*ent
, void *buf
)
126 ent
->hdr
.driver_flags
|= DRIVER_FLAG_SKIP_ENTRY
;
129 static inline struct qla27xx_fwdt_entry
*
130 qla27xx_next_entry(struct qla27xx_fwdt_entry
*ent
)
132 return (void *)ent
+ le32_to_cpu(ent
->hdr
.size
);
135 static struct qla27xx_fwdt_entry
*
136 qla27xx_fwdt_entry_t0(struct scsi_qla_host
*vha
,
137 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
139 ql_dbg(ql_dbg_misc
, vha
, 0xd100,
140 "%s: nop [%lx]\n", __func__
, *len
);
141 qla27xx_skip_entry(ent
, buf
);
143 return qla27xx_next_entry(ent
);
146 static struct qla27xx_fwdt_entry
*
147 qla27xx_fwdt_entry_t255(struct scsi_qla_host
*vha
,
148 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
150 ql_dbg(ql_dbg_misc
, vha
, 0xd1ff,
151 "%s: end [%lx]\n", __func__
, *len
);
152 qla27xx_skip_entry(ent
, buf
);
158 static struct qla27xx_fwdt_entry
*
159 qla27xx_fwdt_entry_t256(struct scsi_qla_host
*vha
,
160 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
162 ulong addr
= le32_to_cpu(ent
->t256
.base_addr
);
163 uint offset
= ent
->t256
.pci_offset
;
164 ulong count
= le16_to_cpu(ent
->t256
.reg_count
);
165 uint width
= ent
->t256
.reg_width
;
167 ql_dbg(ql_dbg_misc
, vha
, 0xd200,
168 "%s: rdio t1 [%lx]\n", __func__
, *len
);
169 qla27xx_read_window(ISPREG(vha
), addr
, offset
, count
, width
, buf
, len
);
171 return qla27xx_next_entry(ent
);
174 static struct qla27xx_fwdt_entry
*
175 qla27xx_fwdt_entry_t257(struct scsi_qla_host
*vha
,
176 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
178 ulong addr
= le32_to_cpu(ent
->t257
.base_addr
);
179 uint offset
= ent
->t257
.pci_offset
;
180 ulong data
= le32_to_cpu(ent
->t257
.write_data
);
182 ql_dbg(ql_dbg_misc
, vha
, 0xd201,
183 "%s: wrio t1 [%lx]\n", __func__
, *len
);
184 qla27xx_write_reg(ISPREG(vha
), IOBASE(vha
), addr
, buf
);
185 qla27xx_write_reg(ISPREG(vha
), offset
, data
, buf
);
187 return qla27xx_next_entry(ent
);
190 static struct qla27xx_fwdt_entry
*
191 qla27xx_fwdt_entry_t258(struct scsi_qla_host
*vha
,
192 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
194 uint banksel
= ent
->t258
.banksel_offset
;
195 ulong bank
= le32_to_cpu(ent
->t258
.bank
);
196 ulong addr
= le32_to_cpu(ent
->t258
.base_addr
);
197 uint offset
= ent
->t258
.pci_offset
;
198 uint count
= le16_to_cpu(ent
->t258
.reg_count
);
199 uint width
= ent
->t258
.reg_width
;
201 ql_dbg(ql_dbg_misc
, vha
, 0xd202,
202 "%s: rdio t2 [%lx]\n", __func__
, *len
);
203 qla27xx_write_reg(ISPREG(vha
), banksel
, bank
, buf
);
204 qla27xx_read_window(ISPREG(vha
), addr
, offset
, count
, width
, buf
, len
);
206 return qla27xx_next_entry(ent
);
209 static struct qla27xx_fwdt_entry
*
210 qla27xx_fwdt_entry_t259(struct scsi_qla_host
*vha
,
211 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
213 ulong addr
= le32_to_cpu(ent
->t259
.base_addr
);
214 uint banksel
= ent
->t259
.banksel_offset
;
215 ulong bank
= le32_to_cpu(ent
->t259
.bank
);
216 uint offset
= ent
->t259
.pci_offset
;
217 ulong data
= le32_to_cpu(ent
->t259
.write_data
);
219 ql_dbg(ql_dbg_misc
, vha
, 0xd203,
220 "%s: wrio t2 [%lx]\n", __func__
, *len
);
221 qla27xx_write_reg(ISPREG(vha
), IOBASE(vha
), addr
, buf
);
222 qla27xx_write_reg(ISPREG(vha
), banksel
, bank
, buf
);
223 qla27xx_write_reg(ISPREG(vha
), offset
, data
, buf
);
225 return qla27xx_next_entry(ent
);
228 static struct qla27xx_fwdt_entry
*
229 qla27xx_fwdt_entry_t260(struct scsi_qla_host
*vha
,
230 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
232 uint offset
= ent
->t260
.pci_offset
;
234 ql_dbg(ql_dbg_misc
, vha
, 0xd204,
235 "%s: rdpci [%lx]\n", __func__
, *len
);
236 qla27xx_insert32(offset
, buf
, len
);
237 qla27xx_read_reg(ISPREG(vha
), offset
, buf
, len
);
239 return qla27xx_next_entry(ent
);
242 static struct qla27xx_fwdt_entry
*
243 qla27xx_fwdt_entry_t261(struct scsi_qla_host
*vha
,
244 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
246 uint offset
= ent
->t261
.pci_offset
;
247 ulong data
= le32_to_cpu(ent
->t261
.write_data
);
249 ql_dbg(ql_dbg_misc
, vha
, 0xd205,
250 "%s: wrpci [%lx]\n", __func__
, *len
);
251 qla27xx_write_reg(ISPREG(vha
), offset
, data
, buf
);
253 return qla27xx_next_entry(ent
);
256 static struct qla27xx_fwdt_entry
*
257 qla27xx_fwdt_entry_t262(struct scsi_qla_host
*vha
,
258 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
260 uint area
= ent
->t262
.ram_area
;
261 ulong start
= le32_to_cpu(ent
->t262
.start_addr
);
262 ulong end
= le32_to_cpu(ent
->t262
.end_addr
);
266 ql_dbg(ql_dbg_misc
, vha
, 0xd206,
267 "%s: rdram(%x) [%lx]\n", __func__
, ent
->t262
.ram_area
, *len
);
269 if (area
== T262_RAM_AREA_CRITICAL_RAM
) {
271 } else if (area
== T262_RAM_AREA_EXTERNAL_RAM
) {
272 end
= vha
->hw
->fw_memory_size
;
274 ent
->t262
.end_addr
= cpu_to_le32(end
);
275 } else if (area
== T262_RAM_AREA_SHARED_RAM
) {
276 start
= vha
->hw
->fw_shared_ram_start
;
277 end
= vha
->hw
->fw_shared_ram_end
;
279 ent
->t262
.start_addr
= cpu_to_le32(start
);
280 ent
->t262
.end_addr
= cpu_to_le32(end
);
282 } else if (area
== T262_RAM_AREA_DDR_RAM
) {
283 start
= vha
->hw
->fw_ddr_ram_start
;
284 end
= vha
->hw
->fw_ddr_ram_end
;
286 ent
->t262
.start_addr
= cpu_to_le32(start
);
287 ent
->t262
.end_addr
= cpu_to_le32(end
);
289 } else if (area
== T262_RAM_AREA_MISC
) {
291 ent
->t262
.start_addr
= cpu_to_le32(start
);
292 ent
->t262
.end_addr
= cpu_to_le32(end
);
295 ql_dbg(ql_dbg_misc
, vha
, 0xd022,
296 "%s: unknown area %x\n", __func__
, area
);
297 qla27xx_skip_entry(ent
, buf
);
301 if (end
< start
|| start
== 0 || end
== 0) {
302 ql_dbg(ql_dbg_misc
, vha
, 0xd023,
303 "%s: unusable range (start=%lx end=%lx)\n",
304 __func__
, start
, end
);
305 qla27xx_skip_entry(ent
, buf
);
309 dwords
= end
- start
+ 1;
312 rc
= qla24xx_dump_ram(vha
->hw
, start
, buf
, dwords
, &buf
);
313 if (rc
!= QLA_SUCCESS
) {
314 ql_dbg(ql_dbg_async
, vha
, 0xffff,
315 "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n",
316 __func__
, area
, start
, end
);
317 return INVALID_ENTRY
;
320 *len
+= dwords
* sizeof(uint32_t);
322 return qla27xx_next_entry(ent
);
325 static struct qla27xx_fwdt_entry
*
326 qla27xx_fwdt_entry_t263(struct scsi_qla_host
*vha
,
327 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
329 uint type
= ent
->t263
.queue_type
;
334 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd207,
335 "%s: getq(%x) [%lx]\n", __func__
, type
, *len
);
336 if (type
== T263_QUEUE_TYPE_REQ
) {
337 for (i
= 0; i
< vha
->hw
->max_req_queues
; i
++) {
338 struct req_que
*req
= vha
->hw
->req_q_map
[i
];
342 req
->length
: REQUEST_ENTRY_CNT_24XX
;
343 qla27xx_insert16(i
, buf
, len
);
344 qla27xx_insert16(length
, buf
, len
);
345 qla27xx_insertbuf(req
? req
->ring
: NULL
,
346 length
* sizeof(*req
->ring
), buf
, len
);
350 } else if (type
== T263_QUEUE_TYPE_RSP
) {
351 for (i
= 0; i
< vha
->hw
->max_rsp_queues
; i
++) {
352 struct rsp_que
*rsp
= vha
->hw
->rsp_q_map
[i
];
356 rsp
->length
: RESPONSE_ENTRY_CNT_MQ
;
357 qla27xx_insert16(i
, buf
, len
);
358 qla27xx_insert16(length
, buf
, len
);
359 qla27xx_insertbuf(rsp
? rsp
->ring
: NULL
,
360 length
* sizeof(*rsp
->ring
), buf
, len
);
364 } else if (QLA_TGT_MODE_ENABLED() &&
365 ent
->t263
.queue_type
== T263_QUEUE_TYPE_ATIO
) {
366 struct qla_hw_data
*ha
= vha
->hw
;
367 struct atio
*atr
= ha
->tgt
.atio_ring
;
370 length
= ha
->tgt
.atio_q_length
;
371 qla27xx_insert16(0, buf
, len
);
372 qla27xx_insert16(length
, buf
, len
);
373 qla27xx_insertbuf(atr
, length
* sizeof(*atr
), buf
, len
);
377 ql_dbg(ql_dbg_misc
, vha
, 0xd026,
378 "%s: unknown queue %x\n", __func__
, type
);
379 qla27xx_skip_entry(ent
, buf
);
384 ent
->t263
.num_queues
= count
;
386 qla27xx_skip_entry(ent
, buf
);
389 return qla27xx_next_entry(ent
);
392 static struct qla27xx_fwdt_entry
*
393 qla27xx_fwdt_entry_t264(struct scsi_qla_host
*vha
,
394 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
396 ql_dbg(ql_dbg_misc
, vha
, 0xd208,
397 "%s: getfce [%lx]\n", __func__
, *len
);
400 ent
->t264
.fce_trace_size
= FCE_SIZE
;
401 ent
->t264
.write_pointer
= vha
->hw
->fce_wr
;
402 ent
->t264
.base_pointer
= vha
->hw
->fce_dma
;
403 ent
->t264
.fce_enable_mb0
= vha
->hw
->fce_mb
[0];
404 ent
->t264
.fce_enable_mb2
= vha
->hw
->fce_mb
[2];
405 ent
->t264
.fce_enable_mb3
= vha
->hw
->fce_mb
[3];
406 ent
->t264
.fce_enable_mb4
= vha
->hw
->fce_mb
[4];
407 ent
->t264
.fce_enable_mb5
= vha
->hw
->fce_mb
[5];
408 ent
->t264
.fce_enable_mb6
= vha
->hw
->fce_mb
[6];
410 qla27xx_insertbuf(vha
->hw
->fce
, FCE_SIZE
, buf
, len
);
412 ql_dbg(ql_dbg_misc
, vha
, 0xd027,
413 "%s: missing fce\n", __func__
);
414 qla27xx_skip_entry(ent
, buf
);
417 return qla27xx_next_entry(ent
);
420 static struct qla27xx_fwdt_entry
*
421 qla27xx_fwdt_entry_t265(struct scsi_qla_host
*vha
,
422 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
424 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd209,
425 "%s: pause risc [%lx]\n", __func__
, *len
);
427 qla24xx_pause_risc(ISPREG(vha
), vha
->hw
);
429 return qla27xx_next_entry(ent
);
432 static struct qla27xx_fwdt_entry
*
433 qla27xx_fwdt_entry_t266(struct scsi_qla_host
*vha
,
434 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
436 ql_dbg(ql_dbg_misc
, vha
, 0xd20a,
437 "%s: reset risc [%lx]\n", __func__
, *len
);
439 if (qla24xx_soft_reset(vha
->hw
) != QLA_SUCCESS
) {
440 ql_dbg(ql_dbg_async
, vha
, 0x5001,
441 "%s: unable to soft reset\n", __func__
);
442 return INVALID_ENTRY
;
446 return qla27xx_next_entry(ent
);
449 static struct qla27xx_fwdt_entry
*
450 qla27xx_fwdt_entry_t267(struct scsi_qla_host
*vha
,
451 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
453 uint offset
= ent
->t267
.pci_offset
;
454 ulong data
= le32_to_cpu(ent
->t267
.data
);
456 ql_dbg(ql_dbg_misc
, vha
, 0xd20b,
457 "%s: dis intr [%lx]\n", __func__
, *len
);
458 qla27xx_write_reg(ISPREG(vha
), offset
, data
, buf
);
460 return qla27xx_next_entry(ent
);
463 static struct qla27xx_fwdt_entry
*
464 qla27xx_fwdt_entry_t268(struct scsi_qla_host
*vha
,
465 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
467 ql_dbg(ql_dbg_misc
, vha
, 0xd20c,
468 "%s: gethb(%x) [%lx]\n", __func__
, ent
->t268
.buf_type
, *len
);
469 switch (ent
->t268
.buf_type
) {
470 case T268_BUF_TYPE_EXTD_TRACE
:
473 ent
->t268
.buf_size
= EFT_SIZE
;
474 ent
->t268
.start_addr
= vha
->hw
->eft_dma
;
476 qla27xx_insertbuf(vha
->hw
->eft
, EFT_SIZE
, buf
, len
);
478 ql_dbg(ql_dbg_misc
, vha
, 0xd028,
479 "%s: missing eft\n", __func__
);
480 qla27xx_skip_entry(ent
, buf
);
483 case T268_BUF_TYPE_EXCH_BUFOFF
:
484 if (vha
->hw
->exchoffld_buf
) {
486 ent
->t268
.buf_size
= vha
->hw
->exchoffld_size
;
487 ent
->t268
.start_addr
=
488 vha
->hw
->exchoffld_buf_dma
;
490 qla27xx_insertbuf(vha
->hw
->exchoffld_buf
,
491 vha
->hw
->exchoffld_size
, buf
, len
);
493 ql_dbg(ql_dbg_misc
, vha
, 0xd028,
494 "%s: missing exch offld\n", __func__
);
495 qla27xx_skip_entry(ent
, buf
);
498 case T268_BUF_TYPE_EXTD_LOGIN
:
499 if (vha
->hw
->exlogin_buf
) {
501 ent
->t268
.buf_size
= vha
->hw
->exlogin_size
;
502 ent
->t268
.start_addr
=
503 vha
->hw
->exlogin_buf_dma
;
505 qla27xx_insertbuf(vha
->hw
->exlogin_buf
,
506 vha
->hw
->exlogin_size
, buf
, len
);
508 ql_dbg(ql_dbg_misc
, vha
, 0xd028,
509 "%s: missing ext login\n", __func__
);
510 qla27xx_skip_entry(ent
, buf
);
514 case T268_BUF_TYPE_REQ_MIRROR
:
515 case T268_BUF_TYPE_RSP_MIRROR
:
517 * Mirror pointers are not implemented in the
518 * driver, instead shadow pointers are used by
519 * the drier. Skip these entries.
521 qla27xx_skip_entry(ent
, buf
);
524 ql_dbg(ql_dbg_async
, vha
, 0xd02b,
525 "%s: unknown buffer %x\n", __func__
, ent
->t268
.buf_type
);
526 qla27xx_skip_entry(ent
, buf
);
530 return qla27xx_next_entry(ent
);
533 static struct qla27xx_fwdt_entry
*
534 qla27xx_fwdt_entry_t269(struct scsi_qla_host
*vha
,
535 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
537 ql_dbg(ql_dbg_misc
, vha
, 0xd20d,
538 "%s: scratch [%lx]\n", __func__
, *len
);
539 qla27xx_insert32(0xaaaaaaaa, buf
, len
);
540 qla27xx_insert32(0xbbbbbbbb, buf
, len
);
541 qla27xx_insert32(0xcccccccc, buf
, len
);
542 qla27xx_insert32(0xdddddddd, buf
, len
);
543 qla27xx_insert32(*len
+ sizeof(uint32_t), buf
, len
);
545 ent
->t269
.scratch_size
= 5 * sizeof(uint32_t);
547 return qla27xx_next_entry(ent
);
550 static struct qla27xx_fwdt_entry
*
551 qla27xx_fwdt_entry_t270(struct scsi_qla_host
*vha
,
552 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
554 ulong addr
= le32_to_cpu(ent
->t270
.addr
);
555 ulong dwords
= le32_to_cpu(ent
->t270
.count
);
557 ql_dbg(ql_dbg_misc
, vha
, 0xd20e,
558 "%s: rdremreg [%lx]\n", __func__
, *len
);
559 qla27xx_write_reg(ISPREG(vha
), IOBASE_ADDR
, 0x40, buf
);
561 qla27xx_write_reg(ISPREG(vha
), 0xc0, addr
|0x80000000, buf
);
562 qla27xx_insert32(addr
, buf
, len
);
563 qla27xx_read_reg(ISPREG(vha
), 0xc4, buf
, len
);
564 addr
+= sizeof(uint32_t);
567 return qla27xx_next_entry(ent
);
570 static struct qla27xx_fwdt_entry
*
571 qla27xx_fwdt_entry_t271(struct scsi_qla_host
*vha
,
572 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
574 ulong addr
= le32_to_cpu(ent
->t271
.addr
);
575 ulong data
= le32_to_cpu(ent
->t271
.data
);
577 ql_dbg(ql_dbg_misc
, vha
, 0xd20f,
578 "%s: wrremreg [%lx]\n", __func__
, *len
);
579 qla27xx_write_reg(ISPREG(vha
), IOBASE(vha
), 0x40, buf
);
580 qla27xx_write_reg(ISPREG(vha
), 0xc4, data
, buf
);
581 qla27xx_write_reg(ISPREG(vha
), 0xc0, addr
, buf
);
583 return qla27xx_next_entry(ent
);
586 static struct qla27xx_fwdt_entry
*
587 qla27xx_fwdt_entry_t272(struct scsi_qla_host
*vha
,
588 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
590 ulong dwords
= le32_to_cpu(ent
->t272
.count
);
591 ulong start
= le32_to_cpu(ent
->t272
.addr
);
593 ql_dbg(ql_dbg_misc
, vha
, 0xd210,
594 "%s: rdremram [%lx]\n", __func__
, *len
);
596 ql_dbg(ql_dbg_misc
, vha
, 0xd02c,
597 "%s: @%lx -> (%lx dwords)\n", __func__
, start
, dwords
);
599 qla27xx_dump_mpi_ram(vha
->hw
, start
, buf
, dwords
, &buf
);
601 *len
+= dwords
* sizeof(uint32_t);
603 return qla27xx_next_entry(ent
);
606 static struct qla27xx_fwdt_entry
*
607 qla27xx_fwdt_entry_t273(struct scsi_qla_host
*vha
,
608 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
610 ulong dwords
= le32_to_cpu(ent
->t273
.count
);
611 ulong addr
= le32_to_cpu(ent
->t273
.addr
);
614 ql_dbg(ql_dbg_misc
, vha
, 0xd211,
615 "%s: pcicfg [%lx]\n", __func__
, *len
);
618 if (pci_read_config_dword(vha
->hw
->pdev
, addr
, &value
))
619 ql_dbg(ql_dbg_misc
, vha
, 0xd02d,
620 "%s: failed pcicfg read at %lx\n", __func__
, addr
);
621 qla27xx_insert32(addr
, buf
, len
);
622 qla27xx_insert32(value
, buf
, len
);
623 addr
+= sizeof(uint32_t);
626 return qla27xx_next_entry(ent
);
629 static struct qla27xx_fwdt_entry
*
630 qla27xx_fwdt_entry_t274(struct scsi_qla_host
*vha
,
631 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
633 ulong type
= ent
->t274
.queue_type
;
637 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd212,
638 "%s: getqsh(%lx) [%lx]\n", __func__
, type
, *len
);
639 if (type
== T274_QUEUE_TYPE_REQ_SHAD
) {
640 for (i
= 0; i
< vha
->hw
->max_req_queues
; i
++) {
641 struct req_que
*req
= vha
->hw
->req_q_map
[i
];
644 qla27xx_insert16(i
, buf
, len
);
645 qla27xx_insert16(1, buf
, len
);
646 qla27xx_insert32(req
&& req
->out_ptr
?
647 *req
->out_ptr
: 0, buf
, len
);
651 } else if (type
== T274_QUEUE_TYPE_RSP_SHAD
) {
652 for (i
= 0; i
< vha
->hw
->max_rsp_queues
; i
++) {
653 struct rsp_que
*rsp
= vha
->hw
->rsp_q_map
[i
];
656 qla27xx_insert16(i
, buf
, len
);
657 qla27xx_insert16(1, buf
, len
);
658 qla27xx_insert32(rsp
&& rsp
->in_ptr
?
659 *rsp
->in_ptr
: 0, buf
, len
);
663 } else if (QLA_TGT_MODE_ENABLED() &&
664 ent
->t274
.queue_type
== T274_QUEUE_TYPE_ATIO_SHAD
) {
665 struct qla_hw_data
*ha
= vha
->hw
;
666 struct atio
*atr
= ha
->tgt
.atio_ring_ptr
;
669 qla27xx_insert16(0, buf
, len
);
670 qla27xx_insert16(1, buf
, len
);
671 qla27xx_insert32(ha
->tgt
.atio_q_in
?
672 readl(ha
->tgt
.atio_q_in
) : 0, buf
, len
);
676 ql_dbg(ql_dbg_misc
, vha
, 0xd02f,
677 "%s: unknown queue %lx\n", __func__
, type
);
678 qla27xx_skip_entry(ent
, buf
);
683 ent
->t274
.num_queues
= count
;
685 qla27xx_skip_entry(ent
, buf
);
688 return qla27xx_next_entry(ent
);
691 static struct qla27xx_fwdt_entry
*
692 qla27xx_fwdt_entry_t275(struct scsi_qla_host
*vha
,
693 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
695 ulong offset
= offsetof(typeof(*ent
), t275
.buffer
);
696 ulong length
= le32_to_cpu(ent
->t275
.length
);
697 ulong size
= le32_to_cpu(ent
->hdr
.size
);
698 void *buffer
= ent
->t275
.buffer
;
700 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd213,
701 "%s: buffer(%lx) [%lx]\n", __func__
, length
, *len
);
703 ql_dbg(ql_dbg_misc
, vha
, 0xd020,
704 "%s: buffer zero length\n", __func__
);
705 qla27xx_skip_entry(ent
, buf
);
708 if (offset
+ length
> size
) {
709 length
= size
- offset
;
710 ql_dbg(ql_dbg_misc
, vha
, 0xd030,
711 "%s: buffer overflow, truncate [%lx]\n", __func__
, length
);
712 ent
->t275
.length
= cpu_to_le32(length
);
715 qla27xx_insertbuf(buffer
, length
, buf
, len
);
717 return qla27xx_next_entry(ent
);
720 static struct qla27xx_fwdt_entry
*
721 qla27xx_fwdt_entry_t276(struct scsi_qla_host
*vha
,
722 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
724 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd214,
725 "%s: cond [%lx]\n", __func__
, *len
);
728 ulong cond1
= le32_to_cpu(ent
->t276
.cond1
);
729 ulong cond2
= le32_to_cpu(ent
->t276
.cond2
);
730 uint type
= vha
->hw
->pdev
->device
>> 4 & 0xf;
731 uint func
= vha
->hw
->port_no
& 0x3;
733 if (type
!= cond1
|| func
!= cond2
) {
734 struct qla27xx_fwdt_template
*tmp
= buf
;
737 ent
= qla27xx_next_entry(ent
);
738 qla27xx_skip_entry(ent
, buf
);
742 return qla27xx_next_entry(ent
);
745 static struct qla27xx_fwdt_entry
*
746 qla27xx_fwdt_entry_t277(struct scsi_qla_host
*vha
,
747 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
749 ulong cmd_addr
= le32_to_cpu(ent
->t277
.cmd_addr
);
750 ulong wr_cmd_data
= le32_to_cpu(ent
->t277
.wr_cmd_data
);
751 ulong data_addr
= le32_to_cpu(ent
->t277
.data_addr
);
753 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd215,
754 "%s: rdpep [%lx]\n", __func__
, *len
);
755 qla27xx_insert32(wr_cmd_data
, buf
, len
);
756 qla27xx_write_reg(ISPREG(vha
), cmd_addr
, wr_cmd_data
, buf
);
757 qla27xx_read_reg(ISPREG(vha
), data_addr
, buf
, len
);
759 return qla27xx_next_entry(ent
);
762 static struct qla27xx_fwdt_entry
*
763 qla27xx_fwdt_entry_t278(struct scsi_qla_host
*vha
,
764 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
766 ulong cmd_addr
= le32_to_cpu(ent
->t278
.cmd_addr
);
767 ulong wr_cmd_data
= le32_to_cpu(ent
->t278
.wr_cmd_data
);
768 ulong data_addr
= le32_to_cpu(ent
->t278
.data_addr
);
769 ulong wr_data
= le32_to_cpu(ent
->t278
.wr_data
);
771 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd216,
772 "%s: wrpep [%lx]\n", __func__
, *len
);
773 qla27xx_write_reg(ISPREG(vha
), data_addr
, wr_data
, buf
);
774 qla27xx_write_reg(ISPREG(vha
), cmd_addr
, wr_cmd_data
, buf
);
776 return qla27xx_next_entry(ent
);
779 static struct qla27xx_fwdt_entry
*
780 qla27xx_fwdt_entry_other(struct scsi_qla_host
*vha
,
781 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
783 ulong type
= le32_to_cpu(ent
->hdr
.type
);
785 ql_dbg(ql_dbg_misc
, vha
, 0xd2ff,
786 "%s: other %lx [%lx]\n", __func__
, type
, *len
);
787 qla27xx_skip_entry(ent
, buf
);
789 return qla27xx_next_entry(ent
);
794 typeof(qla27xx_fwdt_entry_other
)(*call
);
795 } qla27xx_fwdt_entry_call
[] = {
796 { ENTRY_TYPE_NOP
, qla27xx_fwdt_entry_t0
},
797 { ENTRY_TYPE_TMP_END
, qla27xx_fwdt_entry_t255
},
798 { ENTRY_TYPE_RD_IOB_T1
, qla27xx_fwdt_entry_t256
},
799 { ENTRY_TYPE_WR_IOB_T1
, qla27xx_fwdt_entry_t257
},
800 { ENTRY_TYPE_RD_IOB_T2
, qla27xx_fwdt_entry_t258
},
801 { ENTRY_TYPE_WR_IOB_T2
, qla27xx_fwdt_entry_t259
},
802 { ENTRY_TYPE_RD_PCI
, qla27xx_fwdt_entry_t260
},
803 { ENTRY_TYPE_WR_PCI
, qla27xx_fwdt_entry_t261
},
804 { ENTRY_TYPE_RD_RAM
, qla27xx_fwdt_entry_t262
},
805 { ENTRY_TYPE_GET_QUEUE
, qla27xx_fwdt_entry_t263
},
806 { ENTRY_TYPE_GET_FCE
, qla27xx_fwdt_entry_t264
},
807 { ENTRY_TYPE_PSE_RISC
, qla27xx_fwdt_entry_t265
},
808 { ENTRY_TYPE_RST_RISC
, qla27xx_fwdt_entry_t266
},
809 { ENTRY_TYPE_DIS_INTR
, qla27xx_fwdt_entry_t267
},
810 { ENTRY_TYPE_GET_HBUF
, qla27xx_fwdt_entry_t268
},
811 { ENTRY_TYPE_SCRATCH
, qla27xx_fwdt_entry_t269
},
812 { ENTRY_TYPE_RDREMREG
, qla27xx_fwdt_entry_t270
},
813 { ENTRY_TYPE_WRREMREG
, qla27xx_fwdt_entry_t271
},
814 { ENTRY_TYPE_RDREMRAM
, qla27xx_fwdt_entry_t272
},
815 { ENTRY_TYPE_PCICFG
, qla27xx_fwdt_entry_t273
},
816 { ENTRY_TYPE_GET_SHADOW
, qla27xx_fwdt_entry_t274
},
817 { ENTRY_TYPE_WRITE_BUF
, qla27xx_fwdt_entry_t275
},
818 { ENTRY_TYPE_CONDITIONAL
, qla27xx_fwdt_entry_t276
},
819 { ENTRY_TYPE_RDPEPREG
, qla27xx_fwdt_entry_t277
},
820 { ENTRY_TYPE_WRPEPREG
, qla27xx_fwdt_entry_t278
},
821 { -1, qla27xx_fwdt_entry_other
}
825 typeof(qla27xx_fwdt_entry_call
->call
)(qla27xx_find_entry(uint type
))
827 typeof(*qla27xx_fwdt_entry_call
) *list
= qla27xx_fwdt_entry_call
;
829 while (list
->type
< type
)
832 if (list
->type
== type
)
834 return qla27xx_fwdt_entry_other
;
838 qla27xx_walk_template(struct scsi_qla_host
*vha
,
839 struct qla27xx_fwdt_template
*tmp
, void *buf
, ulong
*len
)
841 struct qla27xx_fwdt_entry
*ent
= (void *)tmp
+
842 le32_to_cpu(tmp
->entry_offset
);
845 tmp
->count
= le32_to_cpu(tmp
->entry_count
);
846 ql_dbg(ql_dbg_misc
, vha
, 0xd01a,
847 "%s: entry count %u\n", __func__
, tmp
->count
);
848 while (ent
&& tmp
->count
--) {
849 type
= le32_to_cpu(ent
->hdr
.type
);
850 ent
= qla27xx_find_entry(type
)(vha
, ent
, buf
, len
);
854 if (ent
== INVALID_ENTRY
) {
856 ql_dbg(ql_dbg_async
, vha
, 0xffff,
857 "Unable to capture FW dump");
863 ql_dbg(ql_dbg_misc
, vha
, 0xd018,
864 "%s: entry count residual=+%u\n", __func__
, tmp
->count
);
867 ql_dbg(ql_dbg_misc
, vha
, 0xd019,
868 "%s: missing end entry\n", __func__
);
871 cpu_to_le32s(&tmp
->count
); /* endianize residual count */
875 qla27xx_time_stamp(struct qla27xx_fwdt_template
*tmp
)
877 tmp
->capture_timestamp
= cpu_to_le32(jiffies
);
881 qla27xx_driver_info(struct qla27xx_fwdt_template
*tmp
)
883 uint8_t v
[] = { 0, 0, 0, 0, 0, 0 };
885 WARN_ON_ONCE(sscanf(qla2x00_version_str
,
886 "%hhu.%hhu.%hhu.%hhu",
887 v
+ 0, v
+ 1, v
+ 2, v
+ 3) != 4);
889 tmp
->driver_info
[0] = cpu_to_le32(
890 v
[3] << 24 | v
[2] << 16 | v
[1] << 8 | v
[0]);
891 tmp
->driver_info
[1] = cpu_to_le32(v
[5] << 8 | v
[4]);
892 tmp
->driver_info
[2] = __constant_cpu_to_le32(0x12345678);
896 qla27xx_firmware_info(struct scsi_qla_host
*vha
,
897 struct qla27xx_fwdt_template
*tmp
)
899 tmp
->firmware_version
[0] = cpu_to_le32(vha
->hw
->fw_major_version
);
900 tmp
->firmware_version
[1] = cpu_to_le32(vha
->hw
->fw_minor_version
);
901 tmp
->firmware_version
[2] = cpu_to_le32(vha
->hw
->fw_subminor_version
);
902 tmp
->firmware_version
[3] = cpu_to_le32(
903 vha
->hw
->fw_attributes_h
<< 16 | vha
->hw
->fw_attributes
);
904 tmp
->firmware_version
[4] = cpu_to_le32(
905 vha
->hw
->fw_attributes_ext
[1] << 16 | vha
->hw
->fw_attributes_ext
[0]);
909 ql27xx_edit_template(struct scsi_qla_host
*vha
,
910 struct qla27xx_fwdt_template
*tmp
)
912 qla27xx_time_stamp(tmp
);
913 qla27xx_driver_info(tmp
);
914 qla27xx_firmware_info(vha
, tmp
);
917 static inline uint32_t
918 qla27xx_template_checksum(void *p
, ulong size
)
923 size
/= sizeof(*buf
);
925 for ( ; size
--; buf
++)
926 sum
+= le32_to_cpu(*buf
);
928 sum
= (sum
& 0xffffffff) + (sum
>> 32);
934 qla27xx_verify_template_checksum(struct qla27xx_fwdt_template
*tmp
)
936 return qla27xx_template_checksum(tmp
,
937 le32_to_cpu(tmp
->template_size
)) == 0;
941 qla27xx_verify_template_header(struct qla27xx_fwdt_template
*tmp
)
943 return le32_to_cpu(tmp
->template_type
) == TEMPLATE_TYPE_FWDUMP
;
947 qla27xx_execute_fwdt_template(struct scsi_qla_host
*vha
,
948 struct qla27xx_fwdt_template
*tmp
, void *buf
)
952 if (qla27xx_fwdt_template_valid(tmp
)) {
953 len
= le32_to_cpu(tmp
->template_size
);
954 tmp
= memcpy(buf
, tmp
, len
);
955 ql27xx_edit_template(vha
, tmp
);
956 qla27xx_walk_template(vha
, tmp
, buf
, &len
);
963 qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host
*vha
, void *p
)
965 struct qla27xx_fwdt_template
*tmp
= p
;
968 if (qla27xx_fwdt_template_valid(tmp
)) {
969 len
= le32_to_cpu(tmp
->template_size
);
970 qla27xx_walk_template(vha
, tmp
, NULL
, &len
);
977 qla27xx_fwdt_template_size(void *p
)
979 struct qla27xx_fwdt_template
*tmp
= p
;
981 return le32_to_cpu(tmp
->template_size
);
985 qla27xx_fwdt_template_valid(void *p
)
987 struct qla27xx_fwdt_template
*tmp
= p
;
989 if (!qla27xx_verify_template_header(tmp
)) {
990 ql_log(ql_log_warn
, NULL
, 0xd01c,
991 "%s: template type %x\n", __func__
,
992 le32_to_cpu(tmp
->template_type
));
996 if (!qla27xx_verify_template_checksum(tmp
)) {
997 ql_log(ql_log_warn
, NULL
, 0xd01d,
998 "%s: failed template checksum\n", __func__
);
1006 qla27xx_mpi_fwdump(scsi_qla_host_t
*vha
, int hardware_locked
)
1010 if (!hardware_locked
)
1011 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
1012 if (!vha
->hw
->mpi_fw_dump
) {
1013 ql_log(ql_log_warn
, vha
, 0x02f3, "-> mpi_fwdump no buffer\n");
1015 struct fwdt
*fwdt
= &vha
->hw
->fwdt
[1];
1017 void *buf
= vha
->hw
->mpi_fw_dump
;
1018 bool walk_template_only
= false;
1020 if (vha
->hw
->mpi_fw_dumped
) {
1021 /* Use the spare area for any further dumps. */
1022 buf
+= fwdt
->dump_size
;
1023 walk_template_only
= true;
1024 ql_log(ql_log_warn
, vha
, 0x02f4,
1025 "-> MPI firmware already dumped -- dump saving to temporary buffer %p.\n",
1029 ql_log(ql_log_warn
, vha
, 0x02f5, "-> fwdt1 running...\n");
1030 if (!fwdt
->template) {
1031 ql_log(ql_log_warn
, vha
, 0x02f6,
1032 "-> fwdt1 no template\n");
1035 len
= qla27xx_execute_fwdt_template(vha
, fwdt
->template, buf
);
1038 } else if (len
!= fwdt
->dump_size
) {
1039 ql_log(ql_log_warn
, vha
, 0x02f7,
1040 "-> fwdt1 fwdump residual=%+ld\n",
1041 fwdt
->dump_size
- len
);
1043 vha
->hw
->stat
.num_mpi_reset
++;
1044 if (walk_template_only
)
1047 vha
->hw
->mpi_fw_dump_len
= len
;
1048 vha
->hw
->mpi_fw_dumped
= 1;
1050 ql_log(ql_log_warn
, vha
, 0x02f8,
1051 "-> MPI firmware dump saved to buffer (%lu/%p)\n",
1052 vha
->host_no
, vha
->hw
->mpi_fw_dump
);
1053 qla2x00_post_uevent_work(vha
, QLA_UEVENT_CODE_FW_DUMP
);
1057 if (!hardware_locked
)
1058 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
1062 qla27xx_fwdump(scsi_qla_host_t
*vha
)
1064 lockdep_assert_held(&vha
->hw
->hardware_lock
);
1066 if (!vha
->hw
->fw_dump
) {
1067 ql_log(ql_log_warn
, vha
, 0xd01e, "-> fwdump no buffer\n");
1068 } else if (vha
->hw
->fw_dumped
) {
1069 ql_log(ql_log_warn
, vha
, 0xd01f,
1070 "-> Firmware already dumped (%p) -- ignoring request\n",
1073 struct fwdt
*fwdt
= vha
->hw
->fwdt
;
1075 void *buf
= vha
->hw
->fw_dump
;
1077 ql_log(ql_log_warn
, vha
, 0xd011, "-> fwdt0 running...\n");
1078 if (!fwdt
->template) {
1079 ql_log(ql_log_warn
, vha
, 0xd012,
1080 "-> fwdt0 no template\n");
1083 len
= qla27xx_execute_fwdt_template(vha
, fwdt
->template, buf
);
1086 } else if (len
!= fwdt
->dump_size
) {
1087 ql_log(ql_log_warn
, vha
, 0xd013,
1088 "-> fwdt0 fwdump residual=%+ld\n",
1089 fwdt
->dump_size
- len
);
1092 vha
->hw
->fw_dump_len
= len
;
1093 vha
->hw
->fw_dumped
= true;
1095 ql_log(ql_log_warn
, vha
, 0xd015,
1096 "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
1097 vha
->host_no
, vha
->hw
->fw_dump
, vha
->hw
->fw_dump_cap_flags
);
1098 qla2x00_post_uevent_work(vha
, QLA_UEVENT_CODE_FW_DUMP
);