2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
10 #define ISPREG(vha) (&(vha)->hw->iobase->isp24)
11 #define IOBAR(reg) offsetof(typeof(*(reg)), iobase_addr)
12 #define IOBASE(vha) IOBAR(ISPREG(vha))
13 #define INVALID_ENTRY ((struct qla27xx_fwdt_entry *)0xffffffffffffffffUL)
16 qla27xx_insert16(uint16_t value
, void *buf
, ulong
*len
)
20 *(__le16
*)buf
= cpu_to_le16(value
);
22 *len
+= sizeof(value
);
26 qla27xx_insert32(uint32_t value
, void *buf
, ulong
*len
)
30 *(__le32
*)buf
= cpu_to_le32(value
);
32 *len
+= sizeof(value
);
36 qla27xx_insertbuf(void *mem
, ulong size
, void *buf
, ulong
*len
)
38 if (buf
&& mem
&& size
) {
40 memcpy(buf
, mem
, size
);
46 qla27xx_read8(void __iomem
*window
, void *buf
, ulong
*len
)
51 value
= RD_REG_BYTE(window
);
53 qla27xx_insert32(value
, buf
, len
);
57 qla27xx_read16(void __iomem
*window
, void *buf
, ulong
*len
)
62 value
= RD_REG_WORD(window
);
64 qla27xx_insert32(value
, buf
, len
);
68 qla27xx_read32(void __iomem
*window
, void *buf
, ulong
*len
)
73 value
= RD_REG_DWORD(window
);
75 qla27xx_insert32(value
, buf
, len
);
78 static inline void (*qla27xx_read_vector(uint width
))(void __iomem
*, void *, ulong
*)
81 (width
== 1) ? qla27xx_read8
:
82 (width
== 2) ? qla27xx_read16
:
87 qla27xx_read_reg(__iomem
struct device_reg_24xx
*reg
,
88 uint offset
, void *buf
, ulong
*len
)
90 void __iomem
*window
= (void __iomem
*)reg
+ offset
;
92 qla27xx_read32(window
, buf
, len
);
96 qla27xx_write_reg(__iomem
struct device_reg_24xx
*reg
,
97 uint offset
, uint32_t data
, void *buf
)
100 void __iomem
*window
= (void __iomem
*)reg
+ offset
;
102 WRT_REG_DWORD(window
, data
);
107 qla27xx_read_window(__iomem
struct device_reg_24xx
*reg
,
108 uint32_t addr
, uint offset
, uint count
, uint width
, void *buf
,
111 void __iomem
*window
= (void __iomem
*)reg
+ offset
;
112 void (*readn
)(void __iomem
*, void *, ulong
*) = qla27xx_read_vector(width
);
114 qla27xx_write_reg(reg
, IOBAR(reg
), addr
, buf
);
116 qla27xx_insert32(addr
, buf
, len
);
117 readn(window
, buf
, len
);
124 qla27xx_skip_entry(struct qla27xx_fwdt_entry
*ent
, void *buf
)
127 ent
->hdr
.driver_flags
|= DRIVER_FLAG_SKIP_ENTRY
;
130 static inline struct qla27xx_fwdt_entry
*
131 qla27xx_next_entry(struct qla27xx_fwdt_entry
*ent
)
133 return (void *)ent
+ le32_to_cpu(ent
->hdr
.size
);
136 static struct qla27xx_fwdt_entry
*
137 qla27xx_fwdt_entry_t0(struct scsi_qla_host
*vha
,
138 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
140 ql_dbg(ql_dbg_misc
, vha
, 0xd100,
141 "%s: nop [%lx]\n", __func__
, *len
);
142 qla27xx_skip_entry(ent
, buf
);
144 return qla27xx_next_entry(ent
);
147 static struct qla27xx_fwdt_entry
*
148 qla27xx_fwdt_entry_t255(struct scsi_qla_host
*vha
,
149 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
151 ql_dbg(ql_dbg_misc
, vha
, 0xd1ff,
152 "%s: end [%lx]\n", __func__
, *len
);
153 qla27xx_skip_entry(ent
, buf
);
159 static struct qla27xx_fwdt_entry
*
160 qla27xx_fwdt_entry_t256(struct scsi_qla_host
*vha
,
161 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
163 ulong addr
= le32_to_cpu(ent
->t256
.base_addr
);
164 uint offset
= ent
->t256
.pci_offset
;
165 ulong count
= le16_to_cpu(ent
->t256
.reg_count
);
166 uint width
= ent
->t256
.reg_width
;
168 ql_dbg(ql_dbg_misc
, vha
, 0xd200,
169 "%s: rdio t1 [%lx]\n", __func__
, *len
);
170 qla27xx_read_window(ISPREG(vha
), addr
, offset
, count
, width
, buf
, len
);
172 return qla27xx_next_entry(ent
);
175 static struct qla27xx_fwdt_entry
*
176 qla27xx_fwdt_entry_t257(struct scsi_qla_host
*vha
,
177 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
179 ulong addr
= le32_to_cpu(ent
->t257
.base_addr
);
180 uint offset
= ent
->t257
.pci_offset
;
181 ulong data
= le32_to_cpu(ent
->t257
.write_data
);
183 ql_dbg(ql_dbg_misc
, vha
, 0xd201,
184 "%s: wrio t1 [%lx]\n", __func__
, *len
);
185 qla27xx_write_reg(ISPREG(vha
), IOBASE(vha
), addr
, buf
);
186 qla27xx_write_reg(ISPREG(vha
), offset
, data
, buf
);
188 return qla27xx_next_entry(ent
);
191 static struct qla27xx_fwdt_entry
*
192 qla27xx_fwdt_entry_t258(struct scsi_qla_host
*vha
,
193 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
195 uint banksel
= ent
->t258
.banksel_offset
;
196 ulong bank
= le32_to_cpu(ent
->t258
.bank
);
197 ulong addr
= le32_to_cpu(ent
->t258
.base_addr
);
198 uint offset
= ent
->t258
.pci_offset
;
199 uint count
= le16_to_cpu(ent
->t258
.reg_count
);
200 uint width
= ent
->t258
.reg_width
;
202 ql_dbg(ql_dbg_misc
, vha
, 0xd202,
203 "%s: rdio t2 [%lx]\n", __func__
, *len
);
204 qla27xx_write_reg(ISPREG(vha
), banksel
, bank
, buf
);
205 qla27xx_read_window(ISPREG(vha
), addr
, offset
, count
, width
, buf
, len
);
207 return qla27xx_next_entry(ent
);
210 static struct qla27xx_fwdt_entry
*
211 qla27xx_fwdt_entry_t259(struct scsi_qla_host
*vha
,
212 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
214 ulong addr
= le32_to_cpu(ent
->t259
.base_addr
);
215 uint banksel
= ent
->t259
.banksel_offset
;
216 ulong bank
= le32_to_cpu(ent
->t259
.bank
);
217 uint offset
= ent
->t259
.pci_offset
;
218 ulong data
= le32_to_cpu(ent
->t259
.write_data
);
220 ql_dbg(ql_dbg_misc
, vha
, 0xd203,
221 "%s: wrio t2 [%lx]\n", __func__
, *len
);
222 qla27xx_write_reg(ISPREG(vha
), IOBASE(vha
), addr
, buf
);
223 qla27xx_write_reg(ISPREG(vha
), banksel
, bank
, buf
);
224 qla27xx_write_reg(ISPREG(vha
), offset
, data
, buf
);
226 return qla27xx_next_entry(ent
);
229 static struct qla27xx_fwdt_entry
*
230 qla27xx_fwdt_entry_t260(struct scsi_qla_host
*vha
,
231 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
233 uint offset
= ent
->t260
.pci_offset
;
235 ql_dbg(ql_dbg_misc
, vha
, 0xd204,
236 "%s: rdpci [%lx]\n", __func__
, *len
);
237 qla27xx_insert32(offset
, buf
, len
);
238 qla27xx_read_reg(ISPREG(vha
), offset
, buf
, len
);
240 return qla27xx_next_entry(ent
);
243 static struct qla27xx_fwdt_entry
*
244 qla27xx_fwdt_entry_t261(struct scsi_qla_host
*vha
,
245 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
247 uint offset
= ent
->t261
.pci_offset
;
248 ulong data
= le32_to_cpu(ent
->t261
.write_data
);
250 ql_dbg(ql_dbg_misc
, vha
, 0xd205,
251 "%s: wrpci [%lx]\n", __func__
, *len
);
252 qla27xx_write_reg(ISPREG(vha
), offset
, data
, buf
);
254 return qla27xx_next_entry(ent
);
257 static struct qla27xx_fwdt_entry
*
258 qla27xx_fwdt_entry_t262(struct scsi_qla_host
*vha
,
259 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
261 uint area
= ent
->t262
.ram_area
;
262 ulong start
= le32_to_cpu(ent
->t262
.start_addr
);
263 ulong end
= le32_to_cpu(ent
->t262
.end_addr
);
267 ql_dbg(ql_dbg_misc
, vha
, 0xd206,
268 "%s: rdram(%x) [%lx]\n", __func__
, ent
->t262
.ram_area
, *len
);
270 if (area
== T262_RAM_AREA_CRITICAL_RAM
) {
272 } else if (area
== T262_RAM_AREA_EXTERNAL_RAM
) {
273 end
= vha
->hw
->fw_memory_size
;
275 ent
->t262
.end_addr
= cpu_to_le32(end
);
276 } else if (area
== T262_RAM_AREA_SHARED_RAM
) {
277 start
= vha
->hw
->fw_shared_ram_start
;
278 end
= vha
->hw
->fw_shared_ram_end
;
280 ent
->t262
.start_addr
= cpu_to_le32(start
);
281 ent
->t262
.end_addr
= cpu_to_le32(end
);
283 } else if (area
== T262_RAM_AREA_DDR_RAM
) {
284 start
= vha
->hw
->fw_ddr_ram_start
;
285 end
= vha
->hw
->fw_ddr_ram_end
;
287 ent
->t262
.start_addr
= cpu_to_le32(start
);
288 ent
->t262
.end_addr
= cpu_to_le32(end
);
290 } else if (area
== T262_RAM_AREA_MISC
) {
292 ent
->t262
.start_addr
= cpu_to_le32(start
);
293 ent
->t262
.end_addr
= cpu_to_le32(end
);
296 ql_dbg(ql_dbg_misc
, vha
, 0xd022,
297 "%s: unknown area %x\n", __func__
, area
);
298 qla27xx_skip_entry(ent
, buf
);
302 if (end
< start
|| start
== 0 || end
== 0) {
303 ql_dbg(ql_dbg_misc
, vha
, 0xd023,
304 "%s: unusable range (start=%lx end=%lx)\n",
305 __func__
, start
, end
);
306 qla27xx_skip_entry(ent
, buf
);
310 dwords
= end
- start
+ 1;
313 rc
= qla24xx_dump_ram(vha
->hw
, start
, buf
, dwords
, &buf
);
314 if (rc
!= QLA_SUCCESS
) {
315 ql_dbg(ql_dbg_async
, vha
, 0xffff,
316 "%s: dump ram MB failed. Area %xh start %lxh end %lxh\n",
317 __func__
, area
, start
, end
);
318 return INVALID_ENTRY
;
321 *len
+= dwords
* sizeof(uint32_t);
323 return qla27xx_next_entry(ent
);
326 static struct qla27xx_fwdt_entry
*
327 qla27xx_fwdt_entry_t263(struct scsi_qla_host
*vha
,
328 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
330 uint type
= ent
->t263
.queue_type
;
335 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd207,
336 "%s: getq(%x) [%lx]\n", __func__
, type
, *len
);
337 if (type
== T263_QUEUE_TYPE_REQ
) {
338 for (i
= 0; i
< vha
->hw
->max_req_queues
; i
++) {
339 struct req_que
*req
= vha
->hw
->req_q_map
[i
];
343 req
->length
: REQUEST_ENTRY_CNT_24XX
;
344 qla27xx_insert16(i
, buf
, len
);
345 qla27xx_insert16(length
, buf
, len
);
346 qla27xx_insertbuf(req
? req
->ring
: NULL
,
347 length
* sizeof(*req
->ring
), buf
, len
);
351 } else if (type
== T263_QUEUE_TYPE_RSP
) {
352 for (i
= 0; i
< vha
->hw
->max_rsp_queues
; i
++) {
353 struct rsp_que
*rsp
= vha
->hw
->rsp_q_map
[i
];
357 rsp
->length
: RESPONSE_ENTRY_CNT_MQ
;
358 qla27xx_insert16(i
, buf
, len
);
359 qla27xx_insert16(length
, buf
, len
);
360 qla27xx_insertbuf(rsp
? rsp
->ring
: NULL
,
361 length
* sizeof(*rsp
->ring
), buf
, len
);
365 } else if (QLA_TGT_MODE_ENABLED() &&
366 ent
->t263
.queue_type
== T263_QUEUE_TYPE_ATIO
) {
367 struct qla_hw_data
*ha
= vha
->hw
;
368 struct atio
*atr
= ha
->tgt
.atio_ring
;
371 length
= ha
->tgt
.atio_q_length
;
372 qla27xx_insert16(0, buf
, len
);
373 qla27xx_insert16(length
, buf
, len
);
374 qla27xx_insertbuf(atr
, length
* sizeof(*atr
), buf
, len
);
378 ql_dbg(ql_dbg_misc
, vha
, 0xd026,
379 "%s: unknown queue %x\n", __func__
, type
);
380 qla27xx_skip_entry(ent
, buf
);
385 ent
->t263
.num_queues
= count
;
387 qla27xx_skip_entry(ent
, buf
);
390 return qla27xx_next_entry(ent
);
393 static struct qla27xx_fwdt_entry
*
394 qla27xx_fwdt_entry_t264(struct scsi_qla_host
*vha
,
395 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
397 ql_dbg(ql_dbg_misc
, vha
, 0xd208,
398 "%s: getfce [%lx]\n", __func__
, *len
);
401 ent
->t264
.fce_trace_size
= FCE_SIZE
;
402 ent
->t264
.write_pointer
= vha
->hw
->fce_wr
;
403 ent
->t264
.base_pointer
= vha
->hw
->fce_dma
;
404 ent
->t264
.fce_enable_mb0
= vha
->hw
->fce_mb
[0];
405 ent
->t264
.fce_enable_mb2
= vha
->hw
->fce_mb
[2];
406 ent
->t264
.fce_enable_mb3
= vha
->hw
->fce_mb
[3];
407 ent
->t264
.fce_enable_mb4
= vha
->hw
->fce_mb
[4];
408 ent
->t264
.fce_enable_mb5
= vha
->hw
->fce_mb
[5];
409 ent
->t264
.fce_enable_mb6
= vha
->hw
->fce_mb
[6];
411 qla27xx_insertbuf(vha
->hw
->fce
, FCE_SIZE
, buf
, len
);
413 ql_dbg(ql_dbg_misc
, vha
, 0xd027,
414 "%s: missing fce\n", __func__
);
415 qla27xx_skip_entry(ent
, buf
);
418 return qla27xx_next_entry(ent
);
421 static struct qla27xx_fwdt_entry
*
422 qla27xx_fwdt_entry_t265(struct scsi_qla_host
*vha
,
423 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
425 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd209,
426 "%s: pause risc [%lx]\n", __func__
, *len
);
428 qla24xx_pause_risc(ISPREG(vha
), vha
->hw
);
430 return qla27xx_next_entry(ent
);
433 static struct qla27xx_fwdt_entry
*
434 qla27xx_fwdt_entry_t266(struct scsi_qla_host
*vha
,
435 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
437 ql_dbg(ql_dbg_misc
, vha
, 0xd20a,
438 "%s: reset risc [%lx]\n", __func__
, *len
);
440 WARN_ON_ONCE(qla24xx_soft_reset(vha
->hw
) != QLA_SUCCESS
);
442 return qla27xx_next_entry(ent
);
445 static struct qla27xx_fwdt_entry
*
446 qla27xx_fwdt_entry_t267(struct scsi_qla_host
*vha
,
447 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
449 uint offset
= ent
->t267
.pci_offset
;
450 ulong data
= le32_to_cpu(ent
->t267
.data
);
452 ql_dbg(ql_dbg_misc
, vha
, 0xd20b,
453 "%s: dis intr [%lx]\n", __func__
, *len
);
454 qla27xx_write_reg(ISPREG(vha
), offset
, data
, buf
);
456 return qla27xx_next_entry(ent
);
459 static struct qla27xx_fwdt_entry
*
460 qla27xx_fwdt_entry_t268(struct scsi_qla_host
*vha
,
461 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
463 ql_dbg(ql_dbg_misc
, vha
, 0xd20c,
464 "%s: gethb(%x) [%lx]\n", __func__
, ent
->t268
.buf_type
, *len
);
465 switch (ent
->t268
.buf_type
) {
466 case T268_BUF_TYPE_EXTD_TRACE
:
469 ent
->t268
.buf_size
= EFT_SIZE
;
470 ent
->t268
.start_addr
= vha
->hw
->eft_dma
;
472 qla27xx_insertbuf(vha
->hw
->eft
, EFT_SIZE
, buf
, len
);
474 ql_dbg(ql_dbg_misc
, vha
, 0xd028,
475 "%s: missing eft\n", __func__
);
476 qla27xx_skip_entry(ent
, buf
);
479 case T268_BUF_TYPE_EXCH_BUFOFF
:
480 if (vha
->hw
->exchoffld_buf
) {
482 ent
->t268
.buf_size
= vha
->hw
->exchoffld_size
;
483 ent
->t268
.start_addr
=
484 vha
->hw
->exchoffld_buf_dma
;
486 qla27xx_insertbuf(vha
->hw
->exchoffld_buf
,
487 vha
->hw
->exchoffld_size
, buf
, len
);
489 ql_dbg(ql_dbg_misc
, vha
, 0xd028,
490 "%s: missing exch offld\n", __func__
);
491 qla27xx_skip_entry(ent
, buf
);
494 case T268_BUF_TYPE_EXTD_LOGIN
:
495 if (vha
->hw
->exlogin_buf
) {
497 ent
->t268
.buf_size
= vha
->hw
->exlogin_size
;
498 ent
->t268
.start_addr
=
499 vha
->hw
->exlogin_buf_dma
;
501 qla27xx_insertbuf(vha
->hw
->exlogin_buf
,
502 vha
->hw
->exlogin_size
, buf
, len
);
504 ql_dbg(ql_dbg_misc
, vha
, 0xd028,
505 "%s: missing ext login\n", __func__
);
506 qla27xx_skip_entry(ent
, buf
);
510 case T268_BUF_TYPE_REQ_MIRROR
:
511 case T268_BUF_TYPE_RSP_MIRROR
:
513 * Mirror pointers are not implemented in the
514 * driver, instead shadow pointers are used by
515 * the drier. Skip these entries.
517 qla27xx_skip_entry(ent
, buf
);
520 ql_dbg(ql_dbg_async
, vha
, 0xd02b,
521 "%s: unknown buffer %x\n", __func__
, ent
->t268
.buf_type
);
522 qla27xx_skip_entry(ent
, buf
);
526 return qla27xx_next_entry(ent
);
529 static struct qla27xx_fwdt_entry
*
530 qla27xx_fwdt_entry_t269(struct scsi_qla_host
*vha
,
531 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
533 ql_dbg(ql_dbg_misc
, vha
, 0xd20d,
534 "%s: scratch [%lx]\n", __func__
, *len
);
535 qla27xx_insert32(0xaaaaaaaa, buf
, len
);
536 qla27xx_insert32(0xbbbbbbbb, buf
, len
);
537 qla27xx_insert32(0xcccccccc, buf
, len
);
538 qla27xx_insert32(0xdddddddd, buf
, len
);
539 qla27xx_insert32(*len
+ sizeof(uint32_t), buf
, len
);
541 ent
->t269
.scratch_size
= 5 * sizeof(uint32_t);
543 return qla27xx_next_entry(ent
);
546 static struct qla27xx_fwdt_entry
*
547 qla27xx_fwdt_entry_t270(struct scsi_qla_host
*vha
,
548 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
550 ulong addr
= le32_to_cpu(ent
->t270
.addr
);
551 ulong dwords
= le32_to_cpu(ent
->t270
.count
);
553 ql_dbg(ql_dbg_misc
, vha
, 0xd20e,
554 "%s: rdremreg [%lx]\n", __func__
, *len
);
555 qla27xx_write_reg(ISPREG(vha
), IOBASE_ADDR
, 0x40, buf
);
557 qla27xx_write_reg(ISPREG(vha
), 0xc0, addr
|0x80000000, buf
);
558 qla27xx_insert32(addr
, buf
, len
);
559 qla27xx_read_reg(ISPREG(vha
), 0xc4, buf
, len
);
560 addr
+= sizeof(uint32_t);
563 return qla27xx_next_entry(ent
);
566 static struct qla27xx_fwdt_entry
*
567 qla27xx_fwdt_entry_t271(struct scsi_qla_host
*vha
,
568 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
570 ulong addr
= le32_to_cpu(ent
->t271
.addr
);
571 ulong data
= le32_to_cpu(ent
->t271
.data
);
573 ql_dbg(ql_dbg_misc
, vha
, 0xd20f,
574 "%s: wrremreg [%lx]\n", __func__
, *len
);
575 qla27xx_write_reg(ISPREG(vha
), IOBASE(vha
), 0x40, buf
);
576 qla27xx_write_reg(ISPREG(vha
), 0xc4, data
, buf
);
577 qla27xx_write_reg(ISPREG(vha
), 0xc0, addr
, buf
);
579 return qla27xx_next_entry(ent
);
582 static struct qla27xx_fwdt_entry
*
583 qla27xx_fwdt_entry_t272(struct scsi_qla_host
*vha
,
584 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
586 ulong dwords
= le32_to_cpu(ent
->t272
.count
);
587 ulong start
= le32_to_cpu(ent
->t272
.addr
);
589 ql_dbg(ql_dbg_misc
, vha
, 0xd210,
590 "%s: rdremram [%lx]\n", __func__
, *len
);
592 ql_dbg(ql_dbg_misc
, vha
, 0xd02c,
593 "%s: @%lx -> (%lx dwords)\n", __func__
, start
, dwords
);
595 qla27xx_dump_mpi_ram(vha
->hw
, start
, buf
, dwords
, &buf
);
597 *len
+= dwords
* sizeof(uint32_t);
599 return qla27xx_next_entry(ent
);
602 static struct qla27xx_fwdt_entry
*
603 qla27xx_fwdt_entry_t273(struct scsi_qla_host
*vha
,
604 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
606 ulong dwords
= le32_to_cpu(ent
->t273
.count
);
607 ulong addr
= le32_to_cpu(ent
->t273
.addr
);
610 ql_dbg(ql_dbg_misc
, vha
, 0xd211,
611 "%s: pcicfg [%lx]\n", __func__
, *len
);
614 if (pci_read_config_dword(vha
->hw
->pdev
, addr
, &value
))
615 ql_dbg(ql_dbg_misc
, vha
, 0xd02d,
616 "%s: failed pcicfg read at %lx\n", __func__
, addr
);
617 qla27xx_insert32(addr
, buf
, len
);
618 qla27xx_insert32(value
, buf
, len
);
619 addr
+= sizeof(uint32_t);
622 return qla27xx_next_entry(ent
);
625 static struct qla27xx_fwdt_entry
*
626 qla27xx_fwdt_entry_t274(struct scsi_qla_host
*vha
,
627 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
629 ulong type
= ent
->t274
.queue_type
;
633 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd212,
634 "%s: getqsh(%lx) [%lx]\n", __func__
, type
, *len
);
635 if (type
== T274_QUEUE_TYPE_REQ_SHAD
) {
636 for (i
= 0; i
< vha
->hw
->max_req_queues
; i
++) {
637 struct req_que
*req
= vha
->hw
->req_q_map
[i
];
640 qla27xx_insert16(i
, buf
, len
);
641 qla27xx_insert16(1, buf
, len
);
642 qla27xx_insert32(req
&& req
->out_ptr
?
643 *req
->out_ptr
: 0, buf
, len
);
647 } else if (type
== T274_QUEUE_TYPE_RSP_SHAD
) {
648 for (i
= 0; i
< vha
->hw
->max_rsp_queues
; i
++) {
649 struct rsp_que
*rsp
= vha
->hw
->rsp_q_map
[i
];
652 qla27xx_insert16(i
, buf
, len
);
653 qla27xx_insert16(1, buf
, len
);
654 qla27xx_insert32(rsp
&& rsp
->in_ptr
?
655 *rsp
->in_ptr
: 0, buf
, len
);
659 } else if (QLA_TGT_MODE_ENABLED() &&
660 ent
->t274
.queue_type
== T274_QUEUE_TYPE_ATIO_SHAD
) {
661 struct qla_hw_data
*ha
= vha
->hw
;
662 struct atio
*atr
= ha
->tgt
.atio_ring_ptr
;
665 qla27xx_insert16(0, buf
, len
);
666 qla27xx_insert16(1, buf
, len
);
667 qla27xx_insert32(ha
->tgt
.atio_q_in
?
668 readl(ha
->tgt
.atio_q_in
) : 0, buf
, len
);
672 ql_dbg(ql_dbg_misc
, vha
, 0xd02f,
673 "%s: unknown queue %lx\n", __func__
, type
);
674 qla27xx_skip_entry(ent
, buf
);
679 ent
->t274
.num_queues
= count
;
681 qla27xx_skip_entry(ent
, buf
);
684 return qla27xx_next_entry(ent
);
687 static struct qla27xx_fwdt_entry
*
688 qla27xx_fwdt_entry_t275(struct scsi_qla_host
*vha
,
689 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
691 ulong offset
= offsetof(typeof(*ent
), t275
.buffer
);
692 ulong length
= le32_to_cpu(ent
->t275
.length
);
693 ulong size
= le32_to_cpu(ent
->hdr
.size
);
694 void *buffer
= ent
->t275
.buffer
;
696 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd213,
697 "%s: buffer(%lx) [%lx]\n", __func__
, length
, *len
);
699 ql_dbg(ql_dbg_misc
, vha
, 0xd020,
700 "%s: buffer zero length\n", __func__
);
701 qla27xx_skip_entry(ent
, buf
);
704 if (offset
+ length
> size
) {
705 length
= size
- offset
;
706 ql_dbg(ql_dbg_misc
, vha
, 0xd030,
707 "%s: buffer overflow, truncate [%lx]\n", __func__
, length
);
708 ent
->t275
.length
= cpu_to_le32(length
);
711 qla27xx_insertbuf(buffer
, length
, buf
, len
);
713 return qla27xx_next_entry(ent
);
716 static struct qla27xx_fwdt_entry
*
717 qla27xx_fwdt_entry_t276(struct scsi_qla_host
*vha
,
718 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
720 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd214,
721 "%s: cond [%lx]\n", __func__
, *len
);
724 ulong cond1
= le32_to_cpu(ent
->t276
.cond1
);
725 ulong cond2
= le32_to_cpu(ent
->t276
.cond2
);
726 uint type
= vha
->hw
->pdev
->device
>> 4 & 0xf;
727 uint func
= vha
->hw
->port_no
& 0x3;
729 if (type
!= cond1
|| func
!= cond2
) {
730 struct qla27xx_fwdt_template
*tmp
= buf
;
733 ent
= qla27xx_next_entry(ent
);
734 qla27xx_skip_entry(ent
, buf
);
738 return qla27xx_next_entry(ent
);
741 static struct qla27xx_fwdt_entry
*
742 qla27xx_fwdt_entry_t277(struct scsi_qla_host
*vha
,
743 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
745 ulong cmd_addr
= le32_to_cpu(ent
->t277
.cmd_addr
);
746 ulong wr_cmd_data
= le32_to_cpu(ent
->t277
.wr_cmd_data
);
747 ulong data_addr
= le32_to_cpu(ent
->t277
.data_addr
);
749 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd215,
750 "%s: rdpep [%lx]\n", __func__
, *len
);
751 qla27xx_insert32(wr_cmd_data
, buf
, len
);
752 qla27xx_write_reg(ISPREG(vha
), cmd_addr
, wr_cmd_data
, buf
);
753 qla27xx_read_reg(ISPREG(vha
), data_addr
, buf
, len
);
755 return qla27xx_next_entry(ent
);
758 static struct qla27xx_fwdt_entry
*
759 qla27xx_fwdt_entry_t278(struct scsi_qla_host
*vha
,
760 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
762 ulong cmd_addr
= le32_to_cpu(ent
->t278
.cmd_addr
);
763 ulong wr_cmd_data
= le32_to_cpu(ent
->t278
.wr_cmd_data
);
764 ulong data_addr
= le32_to_cpu(ent
->t278
.data_addr
);
765 ulong wr_data
= le32_to_cpu(ent
->t278
.wr_data
);
767 ql_dbg(ql_dbg_misc
+ ql_dbg_verbose
, vha
, 0xd216,
768 "%s: wrpep [%lx]\n", __func__
, *len
);
769 qla27xx_write_reg(ISPREG(vha
), data_addr
, wr_data
, buf
);
770 qla27xx_write_reg(ISPREG(vha
), cmd_addr
, wr_cmd_data
, buf
);
772 return qla27xx_next_entry(ent
);
775 static struct qla27xx_fwdt_entry
*
776 qla27xx_fwdt_entry_other(struct scsi_qla_host
*vha
,
777 struct qla27xx_fwdt_entry
*ent
, void *buf
, ulong
*len
)
779 ulong type
= le32_to_cpu(ent
->hdr
.type
);
781 ql_dbg(ql_dbg_misc
, vha
, 0xd2ff,
782 "%s: other %lx [%lx]\n", __func__
, type
, *len
);
783 qla27xx_skip_entry(ent
, buf
);
785 return qla27xx_next_entry(ent
);
790 typeof(qla27xx_fwdt_entry_other
)(*call
);
791 } qla27xx_fwdt_entry_call
[] = {
792 { ENTRY_TYPE_NOP
, qla27xx_fwdt_entry_t0
},
793 { ENTRY_TYPE_TMP_END
, qla27xx_fwdt_entry_t255
},
794 { ENTRY_TYPE_RD_IOB_T1
, qla27xx_fwdt_entry_t256
},
795 { ENTRY_TYPE_WR_IOB_T1
, qla27xx_fwdt_entry_t257
},
796 { ENTRY_TYPE_RD_IOB_T2
, qla27xx_fwdt_entry_t258
},
797 { ENTRY_TYPE_WR_IOB_T2
, qla27xx_fwdt_entry_t259
},
798 { ENTRY_TYPE_RD_PCI
, qla27xx_fwdt_entry_t260
},
799 { ENTRY_TYPE_WR_PCI
, qla27xx_fwdt_entry_t261
},
800 { ENTRY_TYPE_RD_RAM
, qla27xx_fwdt_entry_t262
},
801 { ENTRY_TYPE_GET_QUEUE
, qla27xx_fwdt_entry_t263
},
802 { ENTRY_TYPE_GET_FCE
, qla27xx_fwdt_entry_t264
},
803 { ENTRY_TYPE_PSE_RISC
, qla27xx_fwdt_entry_t265
},
804 { ENTRY_TYPE_RST_RISC
, qla27xx_fwdt_entry_t266
},
805 { ENTRY_TYPE_DIS_INTR
, qla27xx_fwdt_entry_t267
},
806 { ENTRY_TYPE_GET_HBUF
, qla27xx_fwdt_entry_t268
},
807 { ENTRY_TYPE_SCRATCH
, qla27xx_fwdt_entry_t269
},
808 { ENTRY_TYPE_RDREMREG
, qla27xx_fwdt_entry_t270
},
809 { ENTRY_TYPE_WRREMREG
, qla27xx_fwdt_entry_t271
},
810 { ENTRY_TYPE_RDREMRAM
, qla27xx_fwdt_entry_t272
},
811 { ENTRY_TYPE_PCICFG
, qla27xx_fwdt_entry_t273
},
812 { ENTRY_TYPE_GET_SHADOW
, qla27xx_fwdt_entry_t274
},
813 { ENTRY_TYPE_WRITE_BUF
, qla27xx_fwdt_entry_t275
},
814 { ENTRY_TYPE_CONDITIONAL
, qla27xx_fwdt_entry_t276
},
815 { ENTRY_TYPE_RDPEPREG
, qla27xx_fwdt_entry_t277
},
816 { ENTRY_TYPE_WRPEPREG
, qla27xx_fwdt_entry_t278
},
817 { -1, qla27xx_fwdt_entry_other
}
821 typeof(qla27xx_fwdt_entry_call
->call
)(qla27xx_find_entry(uint type
))
823 typeof(*qla27xx_fwdt_entry_call
) *list
= qla27xx_fwdt_entry_call
;
825 while (list
->type
< type
)
828 if (list
->type
== type
)
830 return qla27xx_fwdt_entry_other
;
834 qla27xx_walk_template(struct scsi_qla_host
*vha
,
835 struct qla27xx_fwdt_template
*tmp
, void *buf
, ulong
*len
)
837 struct qla27xx_fwdt_entry
*ent
= (void *)tmp
+
838 le32_to_cpu(tmp
->entry_offset
);
841 tmp
->count
= le32_to_cpu(tmp
->entry_count
);
842 ql_dbg(ql_dbg_misc
, vha
, 0xd01a,
843 "%s: entry count %u\n", __func__
, tmp
->count
);
844 while (ent
&& tmp
->count
--) {
845 type
= le32_to_cpu(ent
->hdr
.type
);
846 ent
= qla27xx_find_entry(type
)(vha
, ent
, buf
, len
);
850 if (ent
== INVALID_ENTRY
) {
852 ql_dbg(ql_dbg_async
, vha
, 0xffff,
853 "Unable to capture FW dump");
859 ql_dbg(ql_dbg_misc
, vha
, 0xd018,
860 "%s: entry count residual=+%u\n", __func__
, tmp
->count
);
863 ql_dbg(ql_dbg_misc
, vha
, 0xd019,
864 "%s: missing end entry\n", __func__
);
867 cpu_to_le32s(&tmp
->count
); /* endianize residual count */
871 qla27xx_time_stamp(struct qla27xx_fwdt_template
*tmp
)
873 tmp
->capture_timestamp
= jiffies
;
877 qla27xx_driver_info(struct qla27xx_fwdt_template
*tmp
)
879 uint8_t v
[] = { 0, 0, 0, 0, 0, 0 };
881 WARN_ON_ONCE(sscanf(qla2x00_version_str
,
882 "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
883 v
+0, v
+1, v
+2, v
+3, v
+4, v
+5) != 6);
885 tmp
->driver_info
[0] = v
[3] << 24 | v
[2] << 16 | v
[1] << 8 | v
[0];
886 tmp
->driver_info
[1] = v
[5] << 8 | v
[4];
887 tmp
->driver_info
[2] = 0x12345678;
891 qla27xx_firmware_info(struct scsi_qla_host
*vha
,
892 struct qla27xx_fwdt_template
*tmp
)
894 tmp
->firmware_version
[0] = vha
->hw
->fw_major_version
;
895 tmp
->firmware_version
[1] = vha
->hw
->fw_minor_version
;
896 tmp
->firmware_version
[2] = vha
->hw
->fw_subminor_version
;
897 tmp
->firmware_version
[3] =
898 vha
->hw
->fw_attributes_h
<< 16 | vha
->hw
->fw_attributes
;
899 tmp
->firmware_version
[4] =
900 vha
->hw
->fw_attributes_ext
[1] << 16 | vha
->hw
->fw_attributes_ext
[0];
904 ql27xx_edit_template(struct scsi_qla_host
*vha
,
905 struct qla27xx_fwdt_template
*tmp
)
907 qla27xx_time_stamp(tmp
);
908 qla27xx_driver_info(tmp
);
909 qla27xx_firmware_info(vha
, tmp
);
912 static inline uint32_t
913 qla27xx_template_checksum(void *p
, ulong size
)
918 size
/= sizeof(*buf
);
920 for ( ; size
--; buf
++)
921 sum
+= le32_to_cpu(*buf
);
923 sum
= (sum
& 0xffffffff) + (sum
>> 32);
929 qla27xx_verify_template_checksum(struct qla27xx_fwdt_template
*tmp
)
931 return qla27xx_template_checksum(tmp
, tmp
->template_size
) == 0;
935 qla27xx_verify_template_header(struct qla27xx_fwdt_template
*tmp
)
937 return le32_to_cpu(tmp
->template_type
) == TEMPLATE_TYPE_FWDUMP
;
941 qla27xx_execute_fwdt_template(struct scsi_qla_host
*vha
,
942 struct qla27xx_fwdt_template
*tmp
, void *buf
)
946 if (qla27xx_fwdt_template_valid(tmp
)) {
947 len
= tmp
->template_size
;
948 tmp
= memcpy(buf
, tmp
, len
);
949 ql27xx_edit_template(vha
, tmp
);
950 qla27xx_walk_template(vha
, tmp
, buf
, &len
);
957 qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host
*vha
, void *p
)
959 struct qla27xx_fwdt_template
*tmp
= p
;
962 if (qla27xx_fwdt_template_valid(tmp
)) {
963 len
= tmp
->template_size
;
964 qla27xx_walk_template(vha
, tmp
, NULL
, &len
);
971 qla27xx_fwdt_template_size(void *p
)
973 struct qla27xx_fwdt_template
*tmp
= p
;
975 return tmp
->template_size
;
979 qla27xx_fwdt_template_valid(void *p
)
981 struct qla27xx_fwdt_template
*tmp
= p
;
983 if (!qla27xx_verify_template_header(tmp
)) {
984 ql_log(ql_log_warn
, NULL
, 0xd01c,
985 "%s: template type %x\n", __func__
,
986 le32_to_cpu(tmp
->template_type
));
990 if (!qla27xx_verify_template_checksum(tmp
)) {
991 ql_log(ql_log_warn
, NULL
, 0xd01d,
992 "%s: failed template checksum\n", __func__
);
1000 qla27xx_fwdump(scsi_qla_host_t
*vha
, int hardware_locked
)
1005 if (!hardware_locked
)
1006 spin_lock_irqsave(&vha
->hw
->hardware_lock
, flags
);
1009 if (!vha
->hw
->fw_dump
) {
1010 ql_log(ql_log_warn
, vha
, 0xd01e, "-> fwdump no buffer\n");
1011 } else if (vha
->hw
->fw_dumped
) {
1012 ql_log(ql_log_warn
, vha
, 0xd01f,
1013 "-> Firmware already dumped (%p) -- ignoring request\n",
1016 struct fwdt
*fwdt
= vha
->hw
->fwdt
;
1019 void *buf
= vha
->hw
->fw_dump
;
1020 uint count
= vha
->hw
->fw_dump_mpi
? 2 : 1;
1022 for (j
= 0; j
< count
; j
++, fwdt
++, buf
+= len
) {
1023 ql_log(ql_log_warn
, vha
, 0xd011,
1024 "-> fwdt%u running...\n", j
);
1025 if (!fwdt
->template) {
1026 ql_log(ql_log_warn
, vha
, 0xd012,
1027 "-> fwdt%u no template\n", j
);
1030 len
= qla27xx_execute_fwdt_template(vha
,
1031 fwdt
->template, buf
);
1034 } else if (len
!= fwdt
->dump_size
) {
1035 ql_log(ql_log_warn
, vha
, 0xd013,
1036 "-> fwdt%u fwdump residual=%+ld\n",
1037 j
, fwdt
->dump_size
- len
);
1040 vha
->hw
->fw_dump_len
= buf
- (void *)vha
->hw
->fw_dump
;
1041 vha
->hw
->fw_dumped
= 1;
1043 ql_log(ql_log_warn
, vha
, 0xd015,
1044 "-> Firmware dump saved to buffer (%lu/%p) <%lx>\n",
1045 vha
->host_no
, vha
->hw
->fw_dump
, vha
->hw
->fw_dump_cap_flags
);
1046 qla2x00_post_uevent_work(vha
, QLA_UEVENT_CODE_FW_DUMP
);
1050 vha
->hw
->fw_dump_mpi
= 0;
1052 if (!hardware_locked
)
1053 spin_unlock_irqrestore(&vha
->hw
->hardware_lock
, flags
);