1 // SPDX-License-Identifier: GPL-2.0-only
3 * QLogic Fibre Channel HBA Driver
4 * Copyright (c) 2003-2014 QLogic Corporation
9 #define ISPREG(vha) (&(vha)->hw->iobase->isp24)
10 #define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
11 #define IOBASE(vha) IOBAR(ISPREG(vha))
12 #define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
15 qla27xx_insert16(uint16_t value
, void *buf
, ulong
*len
)
19 *(__le16
*)buf
= cpu_to_le16(value
);
21 *len
+= sizeof(value
);
25 qla27xx_insert32(uint32_t value
, void *buf
, ulong
*len
)
29 *(__le32
*)buf
= cpu_to_le32(value
);
31 *len
+= sizeof(value
);
35 qla27xx_insertbuf(void *mem
, ulong size
, void *buf
, ulong
*len
)
37 if (buf
&& mem
&& size
) {
39 memcpy(buf
, mem
, size
);
45 qla27xx_read8(void __iomem
*window
, void *buf
, ulong
*len
)
50 value
= rd_reg_byte(window
);
52 qla27xx_insert32(value
, buf
, len
);
56 qla27xx_read16(void __iomem
*window
, void *buf
, ulong
*len
)
61 value
= rd_reg_word(window
);
63 qla27xx_insert32(value
, buf
, len
);
67 qla27xx_read32(void __iomem
*window
, void *buf
, ulong
*len
)
72 value
= rd_reg_dword(window
);
74 qla27xx_insert32(value
, buf
, len
);
77 static inline void (*qla27xx_read_vector(uint width
))(void __iomem
*, void *, ulong
*)
80 (width
== 1) ? qla27xx_read8
:
81 (width
== 2) ? qla27xx_read16
:
86 qla27xx_read_reg(__iomem
struct device_reg_24xx
*reg
,
87 uint offset
, void *buf
, ulong
*len
)
89 void __iomem
*window
= (void __iomem
*)reg
+ offset
;
91 qla27xx_read32(window
, buf
, len
);
95 qla27xx_write_reg(__iomem
struct device_reg_24xx
*reg
,
96 uint offset
, uint32_t data
, void *buf
)
99 void __iomem
*window
= (void __iomem
*)reg
+ offset
;
101 wrt_reg_dword(window
, data
);
106 qla27xx_read_window(__iomem
struct device_reg_24xx
*reg
,
107 uint32_t addr
, uint offset
, uint count
, uint width
, void *buf
,
110 void __iomem
*window
= (void __iomem
*)reg
+ offset
;
111 void (*readn
)(void __iomem
*, void *, ulong
*) = qla27xx_read_vector(width
);
113 qla27xx_write_reg(reg
, IOBAR(reg
), addr
, buf
);
115 qla27xx_insert32(addr
, buf
, len
);
116 readn(window
, buf
, len
);
123 qla27xx_skip_entry(struct qla27xx_fwdt_entry
*ent
, void *buf
)
126 ent
->hdr
.driver_flags
|= DRIVER_FLAG_SKIP_ENTRY
;
129 static inline struct qla27xx_fwdt_entry
*
130 qla27xx_next_entry(struct qla27xx_fwdt_entry
*ent
)
132 return (void *)ent
+ le32_to_cpu(ent
->hdr
.size
);
135 static struct qla27xx_fwdt_entry
*
136 qla27xx_fwdt_entry_t0(struct scsi_qla_host
*vha
,
137 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
139 ql_dbg(ql_dbg_misc
, vha
, 0xd100,
140 "%s: nop [%lx]\n", __func__
, *len
);
141 qla27xx_skip_entry(ent
, buf
);
143 return qla27xx_next_entry(ent
);
146 static struct qla27xx_fwdt_entry
*
147 qla27xx_fwdt_entry_t255(struct scsi_qla_host
*vha
,
148 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
150 ql_dbg(ql_dbg_misc
, vha
, 0xd1ff,
151 "%s: end [%lx]\n", __func__
, *len
);
152 qla27xx_skip_entry(ent
, buf
);
158 static struct qla27xx_fwdt_entry
*
159 qla27xx_fwdt_entry_t256(struct scsi_qla_host
*vha
,
160 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
162 ulong addr
= le32_to_cpu(ent
->t256
.base_addr
);
163 uint offset
= ent
->t256
.pci_offset
;
164 ulong count
= le16_to_cpu(ent
->t256
.reg_count
);
165 uint width
= ent
->t256
.reg_width
;
167 ql_dbg(ql_dbg_misc
, vha
, 0xd200,
168 "%s: rdio t1 [%lx]\n", __func__
, *len
);
169 qla27xx_read_window(ISPREG(vha
), addr
, offset
, count
, width
, buf
, len
);
171 return qla27xx_next_entry(ent
);
174 static struct qla27xx_fwdt_entry
*
175 qla27xx_fwdt_entry_t257(struct scsi_qla_host
*vha
,
176 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
178 ulong addr
= le32_to_cpu(ent
->t257
.base_addr
);
179 uint offset
= ent
->t257
.pci_offset
;
180 ulong data
= le32_to_cpu(ent
->t257
.write_data
);
182 ql_dbg(ql_dbg_misc
, vha
, 0xd201,
183 "%s: wrio t1 [%lx]\n", __func__
, *len
);
184 qla27xx_write_reg(ISPREG(vha
), IOBASE(vha
), addr
, buf
);
185 qla27xx_write_reg(ISPREG(vha
), offset
, data
, buf
);
187 return qla27xx_next_entry(ent
);
190 static struct qla27xx_fwdt_entry
*
191 qla27xx_fwdt_entry_t258(struct scsi_qla_host
*vha
,
192 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
194 uint banksel
= ent
->t258
.banksel_offset
;
195 ulong bank
= le32_to_cpu(ent
->t258
.bank
);
196 ulong addr
= le32_to_cpu(ent
->t258
.base_addr
);
197 uint offset
= ent
->t258
.pci_offset
;
198 uint count
= le16_to_cpu(ent
->t258
.reg_count
);
199 uint width
= ent
->t258
.reg_width
;
201 ql_dbg(ql_dbg_misc
, vha
, 0xd202,
202 "%s: rdio t2 [%lx]\n", __func__
, *len
);
203 qla27xx_write_reg(ISPREG(vha
), banksel
, bank
, buf
);
204 qla27xx_read_window(ISPREG(vha
), addr
, offset
, count
, width
, buf
, len
);
206 return qla27xx_next_entry(ent
);
209 static struct qla27xx_fwdt_entry
*
210 qla27xx_fwdt_entry_t259(struct scsi_qla_host
*vha
,
211 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
213 ulong addr
= le32_to_cpu(ent
->t259
.base_addr
);
214 uint banksel
= ent
->t259
.banksel_offset
;
215 ulong bank
= le32_to_cpu(ent
->t259
.bank
);
216 uint offset
= ent
->t259
.pci_offset
;
217 ulong data
= le32_to_cpu(ent
->t259
.write_data
);
219 ql_dbg(ql_dbg_misc
, vha
, 0xd203,
220 "%s: wrio t2 [%lx]\n", __func__
, *len
);
221 qla27xx_write_reg(ISPREG(vha
), IOBASE(vha
), addr
, buf
);
222 qla27xx_write_reg(ISPREG(vha
), banksel
, bank
, buf
);
223 qla27xx_write_reg(ISPREG(vha
), offset
, data
, buf
);
225 return qla27xx_next_entry(ent
);
228 static struct qla27xx_fwdt_entry
*
229 qla27xx_fwdt_entry_t260(struct scsi_qla_host
*vha
,
230 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
232 uint offset
= ent
->t260
.pci_offset
;
234 ql_dbg(ql_dbg_misc
, vha
, 0xd204,
235 "%s: rdpci [%lx]\n", __func__
, *len
);
236 qla27xx_insert32(offset
, buf
, len
);
237 qla27xx_read_reg(ISPREG(vha
), offset
, buf
, len
);
239 return qla27xx_next_entry(ent
);
242 static struct qla27xx_fwdt_entry
*
243 qla27xx_fwdt_entry_t261(struct scsi_qla_host
*vha
,
244 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
246 uint offset
= ent
->t261
.pci_offset
;
247 ulong data
= le32_to_cpu(ent
->t261
.write_data
);
249 ql_dbg(ql_dbg_misc
, vha
, 0xd205,
250 "%s: wrpci [%lx]\n", __func__
, *len
);
251 qla27xx_write_reg(ISPREG(vha
), offset
, data
, buf
);
253 return qla27xx_next_entry(ent
);
256 static struct qla27xx_fwdt_entry
*
257 qla27xx_fwdt_entry_t262(struct scsi_qla_host
*vha
,
258 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
260 uint area
= ent
->t262
.ram_area
;
261 ulong start
= le32_to_cpu(ent
->t262
.start_addr
);
262 ulong end
= le32_to_cpu(ent
->t262
.end_addr
);
266 ql_dbg(ql_dbg_misc
, vha
, 0xd206,
267 "%s: rdram(%x) [%lx]\n", __func__
, ent
->t262
.ram_area
, *len
);
269 if (area
== T262_RAM_AREA_CRITICAL_RAM
) {
271 } else if (area
== T262_RAM_AREA_EXTERNAL_RAM
) {
272 end
= vha
->hw
->fw_memory_size
;
274 ent
->t262
.end_addr
= cpu_to_le32(end
);
275 } else if (area
== T262_RAM_AREA_SHARED_RAM
) {
276 start
= vha
->hw
->fw_shared_ram_start
;
277 end
= vha
->hw
->fw_shared_ram_end
;
279 ent
->t262
.start_addr
= cpu_to_le32(start
);
280 ent
->t262
.end_addr
= cpu_to_le32(end
);
282 } else if (area
== T262_RAM_AREA_DDR_RAM
) {
283 start
= vha
->hw
->fw_ddr_ram_start
;
284 end
= vha
->hw
->fw_ddr_ram_end
;
286 ent
->t262
.start_addr
= cpu_to_le32(start
);
287 ent
->t262
.end_addr
= cpu_to_le32(end
);
289 } else if (area
== T262_RAM_AREA_MISC
) {
291 ent
->t262
.start_addr
= cpu_to_le32(start
);
292 ent
->t262
.end_addr
= cpu_to_le32(end
);
295 ql_dbg(ql_dbg_misc
, vha
, 0xd022,
296 "%s: unknown area %x\n", __func__
, area
);
297 qla27xx_skip_entry(ent
, buf
);
301 if (end
< start
|| start
== 0 || end
== 0) {
302 ql_dbg(ql_dbg_misc
, vha
, 0xd023,
303 "%s: unusable range (start=%lx end=%lx)\n",
304 __func__
, start
, end
);
305 qla27xx_skip_entry(ent
, buf
);
309 dwords
= end
- start
+ 1;
312 rc
= qla24xx_dump_ram(vha
->hw
, start
, buf
, dwords
, &buf
);
313 if (rc
!= QLA_SUCCESS
) {
314 ql_dbg(ql_dbg_async
, vha
, 0xffff,
315 "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n",
316 __func__
, area
, start
, end
);
317 return INVALID_ENTRY
;
320 *len
+= dwords
* sizeof(uint32_t);
322 return qla27xx_next_entry(ent
);
325 static struct qla27xx_fwdt_entry
*
326 qla27xx_fwdt_entry_t263(struct scsi_qla_host
*vha
,
327 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
329 uint type
= ent
->t263
.queue_type
;
334 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd207,
335 "%s: getq(%x) [%lx]\n", __func__
, type
, *len
);
336 if (type
== T263_QUEUE_TYPE_REQ
) {
337 for (i
= 0; i
< vha
->hw
->max_req_queues
; i
++) {
338 struct req_que
*req
= vha
->hw
->req_q_map
[i
];
342 req
->length
: REQUEST_ENTRY_CNT_24XX
;
343 qla27xx_insert16(i
, buf
, len
);
344 qla27xx_insert16(length
, buf
, len
);
345 qla27xx_insertbuf(req
? req
->ring
: NULL
,
346 length
* sizeof(*req
->ring
), buf
, len
);
350 } else if (type
== T263_QUEUE_TYPE_RSP
) {
351 for (i
= 0; i
< vha
->hw
->max_rsp_queues
; i
++) {
352 struct rsp_que
*rsp
= vha
->hw
->rsp_q_map
[i
];
356 rsp
->length
: RESPONSE_ENTRY_CNT_MQ
;
357 qla27xx_insert16(i
, buf
, len
);
358 qla27xx_insert16(length
, buf
, len
);
359 qla27xx_insertbuf(rsp
? rsp
->ring
: NULL
,
360 length
* sizeof(*rsp
->ring
), buf
, len
);
364 } else if (QLA_TGT_MODE_ENABLED() &&
365 ent
->t263
.queue_type
== T263_QUEUE_TYPE_ATIO
) {
366 struct qla_hw_data
*ha
= vha
->hw
;
367 struct atio
*atr
= ha
->tgt
.atio_ring
;
370 length
= ha
->tgt
.atio_q_length
;
371 qla27xx_insert16(0, buf
, len
);
372 qla27xx_insert16(length
, buf
, len
);
373 qla27xx_insertbuf(atr
, length
* sizeof(*atr
), buf
, len
);
377 ql_dbg(ql_dbg_misc
, vha
, 0xd026,
378 "%s: unknown queue %x\n", __func__
, type
);
379 qla27xx_skip_entry(ent
, buf
);
384 ent
->t263
.num_queues
= count
;
386 qla27xx_skip_entry(ent
, buf
);
389 return qla27xx_next_entry(ent
);
392 static struct qla27xx_fwdt_entry
*
393 qla27xx_fwdt_entry_t264(struct scsi_qla_host
*vha
,
394 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
396 ql_dbg(ql_dbg_misc
, vha
, 0xd208,
397 "%s: getfce [%lx]\n", __func__
, *len
);
400 ent
->t264
.fce_trace_size
= FCE_SIZE
;
401 ent
->t264
.write_pointer
= vha
->hw
->fce_wr
;
402 ent
->t264
.base_pointer
= vha
->hw
->fce_dma
;
403 ent
->t264
.fce_enable_mb0
= vha
->hw
->fce_mb
[0];
404 ent
->t264
.fce_enable_mb2
= vha
->hw
->fce_mb
[2];
405 ent
->t264
.fce_enable_mb3
= vha
->hw
->fce_mb
[3];
406 ent
->t264
.fce_enable_mb4
= vha
->hw
->fce_mb
[4];
407 ent
->t264
.fce_enable_mb5
= vha
->hw
->fce_mb
[5];
408 ent
->t264
.fce_enable_mb6
= vha
->hw
->fce_mb
[6];
410 qla27xx_insertbuf(vha
->hw
->fce
, FCE_SIZE
, buf
, len
);
412 ql_dbg(ql_dbg_misc
, vha
, 0xd027,
413 "%s: missing fce\n", __func__
);
414 qla27xx_skip_entry(ent
, buf
);
417 return qla27xx_next_entry(ent
);
420 static struct qla27xx_fwdt_entry
*
421 qla27xx_fwdt_entry_t265(struct scsi_qla_host
*vha
,
422 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
424 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd209,
425 "%s: pause risc [%lx]\n", __func__
, *len
);
427 qla24xx_pause_risc(ISPREG(vha
), vha
->hw
);
429 return qla27xx_next_entry(ent
);
432 static struct qla27xx_fwdt_entry
*
433 qla27xx_fwdt_entry_t266(struct scsi_qla_host
*vha
,
434 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
436 ql_dbg(ql_dbg_misc
, vha
, 0xd20a,
437 "%s: reset risc [%lx]\n", __func__
, *len
);
439 WARN_ON_ONCE(qla24xx_soft_reset(vha
->hw
) != QLA_SUCCESS
);
441 return qla27xx_next_entry(ent
);
444 static struct qla27xx_fwdt_entry
*
445 qla27xx_fwdt_entry_t267(struct scsi_qla_host
*vha
,
446 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
448 uint offset
= ent
->t267
.pci_offset
;
449 ulong data
= le32_to_cpu(ent
->t267
.data
);
451 ql_dbg(ql_dbg_misc
, vha
, 0xd20b,
452 "%s: dis intr [%lx]\n", __func__
, *len
);
453 qla27xx_write_reg(ISPREG(vha
), offset
, data
, buf
);
455 return qla27xx_next_entry(ent
);
458 static struct qla27xx_fwdt_entry
*
459 qla27xx_fwdt_entry_t268(struct scsi_qla_host
*vha
,
460 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
462 ql_dbg(ql_dbg_misc
, vha
, 0xd20c,
463 "%s: gethb(%x) [%lx]\n", __func__
, ent
->t268
.buf_type
, *len
);
464 switch (ent
->t268
.buf_type
) {
465 case T268_BUF_TYPE_EXTD_TRACE
:
468 ent
->t268
.buf_size
= EFT_SIZE
;
469 ent
->t268
.start_addr
= vha
->hw
->eft_dma
;
471 qla27xx_insertbuf(vha
->hw
->eft
, EFT_SIZE
, buf
, len
);
473 ql_dbg(ql_dbg_misc
, vha
, 0xd028,
474 "%s: missing eft\n", __func__
);
475 qla27xx_skip_entry(ent
, buf
);
478 case T268_BUF_TYPE_EXCH_BUFOFF
:
479 if (vha
->hw
->exchoffld_buf
) {
481 ent
->t268
.buf_size
= vha
->hw
->exchoffld_size
;
482 ent
->t268
.start_addr
=
483 vha
->hw
->exchoffld_buf_dma
;
485 qla27xx_insertbuf(vha
->hw
->exchoffld_buf
,
486 vha
->hw
->exchoffld_size
, buf
, len
);
488 ql_dbg(ql_dbg_misc
, vha
, 0xd028,
489 "%s: missing exch offld\n", __func__
);
490 qla27xx_skip_entry(ent
, buf
);
493 case T268_BUF_TYPE_EXTD_LOGIN
:
494 if (vha
->hw
->exlogin_buf
) {
496 ent
->t268
.buf_size
= vha
->hw
->exlogin_size
;
497 ent
->t268
.start_addr
=
498 vha
->hw
->exlogin_buf_dma
;
500 qla27xx_insertbuf(vha
->hw
->exlogin_buf
,
501 vha
->hw
->exlogin_size
, buf
, len
);
503 ql_dbg(ql_dbg_misc
, vha
, 0xd028,
504 "%s: missing ext login\n", __func__
);
505 qla27xx_skip_entry(ent
, buf
);
509 case T268_BUF_TYPE_REQ_MIRROR
:
510 case T268_BUF_TYPE_RSP_MIRROR
:
512 * Mirror pointers are not implemented in the
513 * driver, instead shadow pointers are used by
514 * the drier. Skip these entries.
516 qla27xx_skip_entry(ent
, buf
);
519 ql_dbg(ql_dbg_async
, vha
, 0xd02b,
520 "%s: unknown buffer %x\n", __func__
, ent
->t268
.buf_type
);
521 qla27xx_skip_entry(ent
, buf
);
525 return qla27xx_next_entry(ent
);
528 static struct qla27xx_fwdt_entry
*
529 qla27xx_fwdt_entry_t269(struct scsi_qla_host
*vha
,
530 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
532 ql_dbg(ql_dbg_misc
, vha
, 0xd20d,
533 "%s: scratch [%lx]\n", __func__
, *len
);
534 qla27xx_insert32(0xaaaaaaaa, buf
, len
);
535 qla27xx_insert32(0xbbbbbbbb, buf
, len
);
536 qla27xx_insert32(0xcccccccc, buf
, len
);
537 qla27xx_insert32(0xdddddddd, buf
, len
);
538 qla27xx_insert32(*len
+ sizeof(uint32_t), buf
, len
);
540 ent
->t269
.scratch_size
= 5 * sizeof(uint32_t);
542 return qla27xx_next_entry(ent
);
545 static struct qla27xx_fwdt_entry
*
546 qla27xx_fwdt_entry_t270(struct scsi_qla_host
*vha
,
547 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
549 ulong addr
= le32_to_cpu(ent
->t270
.addr
);
550 ulong dwords
= le32_to_cpu(ent
->t270
.count
);
552 ql_dbg(ql_dbg_misc
, vha
, 0xd20e,
553 "%s: rdremreg [%lx]\n", __func__
, *len
);
554 qla27xx_write_reg(ISPREG(vha
), IOBASE_ADDR
, 0x40, buf
);
556 qla27xx_write_reg(ISPREG(vha
), 0xc0, addr
|0x80000000, buf
);
557 qla27xx_insert32(addr
, buf
, len
);
558 qla27xx_read_reg(ISPREG(vha
), 0xc4, buf
, len
);
559 addr
+= sizeof(uint32_t);
562 return qla27xx_next_entry(ent
);
565 static struct qla27xx_fwdt_entry
*
566 qla27xx_fwdt_entry_t271(struct scsi_qla_host
*vha
,
567 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
569 ulong addr
= le32_to_cpu(ent
->t271
.addr
);
570 ulong data
= le32_to_cpu(ent
->t271
.data
);
572 ql_dbg(ql_dbg_misc
, vha
, 0xd20f,
573 "%s: wrremreg [%lx]\n", __func__
, *len
);
574 qla27xx_write_reg(ISPREG(vha
), IOBASE(vha
), 0x40, buf
);
575 qla27xx_write_reg(ISPREG(vha
), 0xc4, data
, buf
);
576 qla27xx_write_reg(ISPREG(vha
), 0xc0, addr
, buf
);
578 return qla27xx_next_entry(ent
);
581 static struct qla27xx_fwdt_entry
*
582 qla27xx_fwdt_entry_t272(struct scsi_qla_host
*vha
,
583 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
585 ulong dwords
= le32_to_cpu(ent
->t272
.count
);
586 ulong start
= le32_to_cpu(ent
->t272
.addr
);
588 ql_dbg(ql_dbg_misc
, vha
, 0xd210,
589 "%s: rdremram [%lx]\n", __func__
, *len
);
591 ql_dbg(ql_dbg_misc
, vha
, 0xd02c,
592 "%s: @%lx -> (%lx dwords)\n", __func__
, start
, dwords
);
594 qla27xx_dump_mpi_ram(vha
->hw
, start
, buf
, dwords
, &buf
);
596 *len
+= dwords
* sizeof(uint32_t);
598 return qla27xx_next_entry(ent
);
601 static struct qla27xx_fwdt_entry
*
602 qla27xx_fwdt_entry_t273(struct scsi_qla_host
*vha
,
603 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
605 ulong dwords
= le32_to_cpu(ent
->t273
.count
);
606 ulong addr
= le32_to_cpu(ent
->t273
.addr
);
609 ql_dbg(ql_dbg_misc
, vha
, 0xd211,
610 "%s: pcicfg [%lx]\n", __func__
, *len
);
613 if (pci_read_config_dword(vha
->hw
->pdev
, addr
, &value
))
614 ql_dbg(ql_dbg_misc
, vha
, 0xd02d,
615 "%s: failed pcicfg read at %lx\n", __func__
, addr
);
616 qla27xx_insert32(addr
, buf
, len
);
617 qla27xx_insert32(value
, buf
, len
);
618 addr
+= sizeof(uint32_t);
621 return qla27xx_next_entry(ent
);
624 static struct qla27xx_fwdt_entry
*
625 qla27xx_fwdt_entry_t274(struct scsi_qla_host
*vha
,
626 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
628 ulong type
= ent
->t274
.queue_type
;
632 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd212,
633 "%s: getqsh(%lx) [%lx]\n", __func__
, type
, *len
);
634 if (type
== T274_QUEUE_TYPE_REQ_SHAD
) {
635 for (i
= 0; i
< vha
->hw
->max_req_queues
; i
++) {
636 struct req_que
*req
= vha
->hw
->req_q_map
[i
];
639 qla27xx_insert16(i
, buf
, len
);
640 qla27xx_insert16(1, buf
, len
);
641 qla27xx_insert32(req
&& req
->out_ptr
?
642 *req
->out_ptr
: 0, buf
, len
);
646 } else if (type
== T274_QUEUE_TYPE_RSP_SHAD
) {
647 for (i
= 0; i
< vha
->hw
->max_rsp_queues
; i
++) {
648 struct rsp_que
*rsp
= vha
->hw
->rsp_q_map
[i
];
651 qla27xx_insert16(i
, buf
, len
);
652 qla27xx_insert16(1, buf
, len
);
653 qla27xx_insert32(rsp
&& rsp
->in_ptr
?
654 *rsp
->in_ptr
: 0, buf
, len
);
658 } else if (QLA_TGT_MODE_ENABLED() &&
659 ent
->t274
.queue_type
== T274_QUEUE_TYPE_ATIO_SHAD
) {
660 struct qla_hw_data
*ha
= vha
->hw
;
661 struct atio
*atr
= ha
->tgt
.atio_ring_ptr
;
664 qla27xx_insert16(0, buf
, len
);
665 qla27xx_insert16(1, buf
, len
);
666 qla27xx_insert32(ha
->tgt
.atio_q_in
?
667 readl(ha
->tgt
.atio_q_in
) : 0, buf
, len
);
671 ql_dbg(ql_dbg_misc
, vha
, 0xd02f,
672 "%s: unknown queue %lx\n", __func__
, type
);
673 qla27xx_skip_entry(ent
, buf
);
678 ent
->t274
.num_queues
= count
;
680 qla27xx_skip_entry(ent
, buf
);
683 return qla27xx_next_entry(ent
);
686 static struct qla27xx_fwdt_entry
*
687 qla27xx_fwdt_entry_t275(struct scsi_qla_host
*vha
,
688 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
690 ulong offset
= offsetof(typeof(*ent
), t275
.buffer
);
691 ulong length
= le32_to_cpu(ent
->t275
.length
);
692 ulong size
= le32_to_cpu(ent
->hdr
.size
);
693 void *buffer
= ent
->t275
.buffer
;
695 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd213,
696 "%s: buffer(%lx) [%lx]\n", __func__
, length
, *len
);
698 ql_dbg(ql_dbg_misc
, vha
, 0xd020,
699 "%s: buffer zero length\n", __func__
);
700 qla27xx_skip_entry(ent
, buf
);
703 if (offset
+ length
> size
) {
704 length
= size
- offset
;
705 ql_dbg(ql_dbg_misc
, vha
, 0xd030,
706 "%s: buffer overflow, truncate [%lx]\n", __func__
, length
);
707 ent
->t275
.length
= cpu_to_le32(length
);
710 qla27xx_insertbuf(buffer
, length
, buf
, len
);
712 return qla27xx_next_entry(ent
);
715 static struct qla27xx_fwdt_entry
*
716 qla27xx_fwdt_entry_t276(struct scsi_qla_host
*vha
,
717 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
719 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd214,
720 "%s: cond [%lx]\n", __func__
, *len
);
723 ulong cond1
= le32_to_cpu(ent
->t276
.cond1
);
724 ulong cond2
= le32_to_cpu(ent
->t276
.cond2
);
725 uint type
= vha
->hw
->pdev
->device
>> 4 & 0xf;
726 uint func
= vha
->hw
->port_no
& 0x3;
728 if (type
!= cond1
|| func
!= cond2
) {
729 struct qla27xx_fwdt_template
*tmp
= buf
;
732 ent
= qla27xx_next_entry(ent
);
733 qla27xx_skip_entry(ent
, buf
);
737 return qla27xx_next_entry(ent
);
740 static struct qla27xx_fwdt_entry
*
741 qla27xx_fwdt_entry_t277(struct scsi_qla_host
*vha
,
742 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
744 ulong cmd_addr
= le32_to_cpu(ent
->t277
.cmd_addr
);
745 ulong wr_cmd_data
= le32_to_cpu(ent
->t277
.wr_cmd_data
);
746 ulong data_addr
= le32_to_cpu(ent
->t277
.data_addr
);
748 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd215,
749 "%s: rdpep [%lx]\n", __func__
, *len
);
750 qla27xx_insert32(wr_cmd_data
, buf
, len
);
751 qla27xx_write_reg(ISPREG(vha
), cmd_addr
, wr_cmd_data
, buf
);
752 qla27xx_read_reg(ISPREG(vha
), data_addr
, buf
, len
);
754 return qla27xx_next_entry(ent
);
757 static struct qla27xx_fwdt_entry
*
758 qla27xx_fwdt_entry_t278(struct scsi_qla_host
*vha
,
759 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
761 ulong cmd_addr
= le32_to_cpu(ent
->t278
.cmd_addr
);
762 ulong wr_cmd_data
= le32_to_cpu(ent
->t278
.wr_cmd_data
);
763 ulong data_addr
= le32_to_cpu(ent
->t278
.data_addr
);
764 ulong wr_data
= le32_to_cpu(ent
->t278
.wr_data
);
766 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd216,
767 "%s: wrpep [%lx]\n", __func__
, *len
);
768 qla27xx_write_reg(ISPREG(vha
), data_addr
, wr_data
, buf
);
769 qla27xx_write_reg(ISPREG(vha
), cmd_addr
, wr_cmd_data
, buf
);
771 return qla27xx_next_entry(ent
);
774 static struct qla27xx_fwdt_entry
*
775 qla27xx_fwdt_entry_other(struct scsi_qla_host
*vha
,
776 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
778 ulong type
= le32_to_cpu(ent
->hdr
.type
);
780 ql_dbg(ql_dbg_misc
, vha
, 0xd2ff,
781 "%s: other %lx [%lx]\n", __func__
, type
, *len
);
782 qla27xx_skip_entry(ent
, buf
);
784 return qla27xx_next_entry(ent
);
789 typeof(qla27xx_fwdt_entry_other
)(*call
);
790 } qla27xx_fwdt_entry_call
[] = {
791 { ENTRY_TYPE_NOP
, qla27xx_fwdt_entry_t0
},
792 { ENTRY_TYPE_TMP_END
, qla27xx_fwdt_entry_t255
},
793 { ENTRY_TYPE_RD_IOB_T1
, qla27xx_fwdt_entry_t256
},
794 { ENTRY_TYPE_WR_IOB_T1
, qla27xx_fwdt_entry_t257
},
795 { ENTRY_TYPE_RD_IOB_T2
, qla27xx_fwdt_entry_t258
},
796 { ENTRY_TYPE_WR_IOB_T2
, qla27xx_fwdt_entry_t259
},
797 { ENTRY_TYPE_RD_PCI
, qla27xx_fwdt_entry_t260
},
798 { ENTRY_TYPE_WR_PCI
, qla27xx_fwdt_entry_t261
},
799 { ENTRY_TYPE_RD_RAM
, qla27xx_fwdt_entry_t262
},
800 { ENTRY_TYPE_GET_QUEUE
, qla27xx_fwdt_entry_t263
},
801 { ENTRY_TYPE_GET_FCE
, qla27xx_fwdt_entry_t264
},
802 { ENTRY_TYPE_PSE_RISC
, qla27xx_fwdt_entry_t265
},
803 { ENTRY_TYPE_RST_RISC
, qla27xx_fwdt_entry_t266
},
804 { ENTRY_TYPE_DIS_INTR
, qla27xx_fwdt_entry_t267
},
805 { ENTRY_TYPE_GET_HBUF
, qla27xx_fwdt_entry_t268
},
806 { ENTRY_TYPE_SCRATCH
, qla27xx_fwdt_entry_t269
},
807 { ENTRY_TYPE_RDREMREG
, qla27xx_fwdt_entry_t270
},
808 { ENTRY_TYPE_WRREMREG
, qla27xx_fwdt_entry_t271
},
809 { ENTRY_TYPE_RDREMRAM
, qla27xx_fwdt_entry_t272
},
810 { ENTRY_TYPE_PCICFG
, qla27xx_fwdt_entry_t273
},
811 { ENTRY_TYPE_GET_SHADOW
, qla27xx_fwdt_entry_t274
},
812 { ENTRY_TYPE_WRITE_BUF
, qla27xx_fwdt_entry_t275
},
813 { ENTRY_TYPE_CONDITIONAL
, qla27xx_fwdt_entry_t276
},
814 { ENTRY_TYPE_RDPEPREG
, qla27xx_fwdt_entry_t277
},
815 { ENTRY_TYPE_WRPEPREG
, qla27xx_fwdt_entry_t278
},
816 { -1, qla27xx_fwdt_entry_other
}
820 typeof(qla27xx_fwdt_entry_call
->call
)(qla27xx_find_entry(uint type
))
822 typeof(*qla27xx_fwdt_entry_call
) *list
= qla27xx_fwdt_entry_call
;
824 while (list
->type
< type
)
827 if (list
->type
== type
)
829 return qla27xx_fwdt_entry_other
;
833 qla27xx_walk_template(struct scsi_qla_host
*vha
,
834 struct qla27xx_fwdt_template
*tmp
, void *buf
, ulong
*len
)
836 struct qla27xx_fwdt_entry
*ent
= (void *)tmp
+
837 le32_to_cpu(tmp
->entry_offset
);
840 tmp
->count
= le32_to_cpu(tmp
->entry_count
);
841 ql_dbg(ql_dbg_misc
, vha
, 0xd01a,
842 "%s: entry count %u\n", __func__
, tmp
->count
);
843 while (ent
&& tmp
->count
--) {
844 type
= le32_to_cpu(ent
->hdr
.type
);
845 ent
= qla27xx_find_entry(type
)(vha
, ent
, buf
, len
);
849 if (ent
== INVALID_ENTRY
) {
851 ql_dbg(ql_dbg_async
, vha
, 0xffff,
852 "Unable to capture FW dump");
858 ql_dbg(ql_dbg_misc
, vha
, 0xd018,
859 "%s: entry count residual=+%u\n", __func__
, tmp
->count
);
862 ql_dbg(ql_dbg_misc
, vha
, 0xd019,
863 "%s: missing end entry\n", __func__
);
866 cpu_to_le32s(&tmp
->count
); /* endianize residual count */
870 qla27xx_time_stamp(struct qla27xx_fwdt_template
*tmp
)
872 tmp
->capture_timestamp
= cpu_to_le32(jiffies
);
876 qla27xx_driver_info(struct qla27xx_fwdt_template
*tmp
)
878 uint8_t v
[] = { 0, 0, 0, 0, 0, 0 };
880 WARN_ON_ONCE(sscanf(qla2x00_version_str
,
881 "%hhu.%hhu.%hhu.%hhu",
882 v
+ 0, v
+ 1, v
+ 2, v
+ 3) != 4);
884 tmp
->driver_info
[0] = cpu_to_le32(
885 v
[3] << 24 | v
[2] << 16 | v
[1] << 8 | v
[0]);
886 tmp
->driver_info
[1] = cpu_to_le32(v
[5] << 8 | v
[4]);
887 tmp
->driver_info
[2] = __constant_cpu_to_le32(0x12345678);
891 qla27xx_firmware_info(struct scsi_qla_host
*vha
,
892 struct qla27xx_fwdt_template
*tmp
)
894 tmp
->firmware_version
[0] = cpu_to_le32(vha
->hw
->fw_major_version
);
895 tmp
->firmware_version
[1] = cpu_to_le32(vha
->hw
->fw_minor_version
);
896 tmp
->firmware_version
[2] = cpu_to_le32(vha
->hw
->fw_subminor_version
);
897 tmp
->firmware_version
[3] = cpu_to_le32(
898 vha
->hw
->fw_attributes_h
<< 16 | vha
->hw
->fw_attributes
);
899 tmp
->firmware_version
[4] = cpu_to_le32(
900 vha
->hw
->fw_attributes_ext
[1] << 16 | vha
->hw
->fw_attributes_ext
[0]);
904 ql27xx_edit_template(struct scsi_qla_host
*vha
,
905 struct qla27xx_fwdt_template
*tmp
)
907 qla27xx_time_stamp(tmp
);
908 qla27xx_driver_info(tmp
);
909 qla27xx_firmware_info(vha
, tmp
);
912 static inline uint32_t
913 qla27xx_template_checksum(void *p
, ulong size
)
918 size
/= sizeof(*buf
);
920 for ( ; size
--; buf
++)
921 sum
+= le32_to_cpu(*buf
);
923 sum
= (sum
& 0xffffffff) + (sum
>> 32);
929 qla27xx_verify_template_checksum(struct qla27xx_fwdt_template
*tmp
)
931 return qla27xx_template_checksum(tmp
,
932 le32_to_cpu(tmp
->template_size
)) == 0;
936 qla27xx_verify_template_header(struct qla27xx_fwdt_template
*tmp
)
938 return le32_to_cpu(tmp
->template_type
) == TEMPLATE_TYPE_FWDUMP
;
942 qla27xx_execute_fwdt_template(struct scsi_qla_host
*vha
,
943 struct qla27xx_fwdt_template
*tmp
, void *buf
)
947 if (qla27xx_fwdt_template_valid(tmp
)) {
948 len
= le32_to_cpu(tmp
->template_size
);
949 tmp
= memcpy(buf
, tmp
, len
);
950 ql27xx_edit_template(vha
, tmp
);
951 qla27xx_walk_template(vha
, tmp
, buf
, &len
);
958 qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host
*vha
, void *p
)
960 struct qla27xx_fwdt_template
*tmp
= p
;
963 if (qla27xx_fwdt_template_valid(tmp
)) {
964 len
= le32_to_cpu(tmp
->template_size
);
965 qla27xx_walk_template(vha
, tmp
, NULL
, &len
);
972 qla27xx_fwdt_template_size(void *p
)
974 struct qla27xx_fwdt_template
*tmp
= p
;
976 return le32_to_cpu(tmp
->template_size
);
980 qla27xx_fwdt_template_valid(void *p
)
982 struct qla27xx_fwdt_template
*tmp
= p
;
984 if (!qla27xx_verify_template_header(tmp
)) {
985 ql_log(ql_log_warn
, NULL
, 0xd01c,
986 "%s: template type %x\n", __func__
,
987 le32_to_cpu(tmp
->template_type
));
991 if (!qla27xx_verify_template_checksum(tmp
)) {
992 ql_log(ql_log_warn
, NULL
, 0xd01d,
993 "%s: failed template checksum\n", __func__
);
1001 qla27xx_mpi_fwdump(scsi_qla_host_t
*vha
, int hardware_locked
)
1005 if (!hardware_locked
)
1006 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
1007 if (!vha
->hw
->mpi_fw_dump
) {
1008 ql_log(ql_log_warn
, vha
, 0x02f3, "-> mpi_fwdump no buffer\n");
1010 struct fwdt
*fwdt
= &vha
->hw
->fwdt
[1];
1012 void *buf
= vha
->hw
->mpi_fw_dump
;
1013 bool walk_template_only
= false;
1015 if (vha
->hw
->mpi_fw_dumped
) {
1016 /* Use the spare area for any further dumps. */
1017 buf
+= fwdt
->dump_size
;
1018 walk_template_only
= true;
1019 ql_log(ql_log_warn
, vha
, 0x02f4,
1020 "-> MPI firmware already dumped -- dump saving to temporary buffer %p.\n",
1024 ql_log(ql_log_warn
, vha
, 0x02f5, "-> fwdt1 running...\n");
1025 if (!fwdt
->template) {
1026 ql_log(ql_log_warn
, vha
, 0x02f6,
1027 "-> fwdt1 no template\n");
1030 len
= qla27xx_execute_fwdt_template(vha
, fwdt
->template, buf
);
1033 } else if (len
!= fwdt
->dump_size
) {
1034 ql_log(ql_log_warn
, vha
, 0x02f7,
1035 "-> fwdt1 fwdump residual=%+ld\n",
1036 fwdt
->dump_size
- len
);
1038 vha
->hw
->stat
.num_mpi_reset
++;
1039 if (walk_template_only
)
1042 vha
->hw
->mpi_fw_dump_len
= len
;
1043 vha
->hw
->mpi_fw_dumped
= 1;
1045 ql_log(ql_log_warn
, vha
, 0x02f8,
1046 "-> MPI firmware dump saved to buffer (%lu/%p)\n",
1047 vha
->host_no
, vha
->hw
->mpi_fw_dump
);
1048 qla2x00_post_uevent_work(vha
, QLA_UEVENT_CODE_FW_DUMP
);
1052 if (!hardware_locked
)
1053 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);
1057 qla27xx_fwdump(scsi_qla_host_t
*vha
)
1059 lockdep_assert_held(&vha
->hw
->hardware_lock
);
1061 if (!vha
->hw
->fw_dump
) {
1062 ql_log(ql_log_warn
, vha
, 0xd01e, "-> fwdump no buffer\n");
1063 } else if (vha
->hw
->fw_dumped
) {
1064 ql_log(ql_log_warn
, vha
, 0xd01f,
1065 "-> Firmware already dumped (%p) -- ignoring request\n",
1068 struct fwdt
*fwdt
= vha
->hw
->fwdt
;
1070 void *buf
= vha
->hw
->fw_dump
;
1072 ql_log(ql_log_warn
, vha
, 0xd011, "-> fwdt0 running...\n");
1073 if (!fwdt
->template) {
1074 ql_log(ql_log_warn
, vha
, 0xd012,
1075 "-> fwdt0 no template\n");
1078 len
= qla27xx_execute_fwdt_template(vha
, fwdt
->template, buf
);
1081 } else if (len
!= fwdt
->dump_size
) {
1082 ql_log(ql_log_warn
, vha
, 0xd013,
1083 "-> fwdt0 fwdump residual=%+ld\n",
1084 fwdt
->dump_size
- len
);
1087 vha
->hw
->fw_dump_len
= len
;
1088 vha
->hw
->fw_dumped
= true;
1090 ql_log(ql_log_warn
, vha
, 0xd015,
1091 "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
1092 vha
->host_no
, vha
->hw
->fw_dump
, vha
->hw
->fw_dump_cap_flags
);
1093 qla2x00_post_uevent_work(vha
, QLA_UEVENT_CODE_FW_DUMP
);