Linux 5.1.15
[linux/fpc-iii.git] / drivers / scsi / qla2xxx / qla_tmpl.c
blob9e52500caff0821c839c58064b42219d6f6ff473
1 /*
2 * QLogic Fibre Channel HBA Driver
3 * Copyright (c) 2003-2014 QLogic Corporation
5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */
7 #include "qla_def.h"
8 #include "qla_tmpl.h"
10 /* note default template is in big endian */
11 static const uint32_t ql27xx_fwdt_default_template[] = {
12 0x63000000, 0xa4000000, 0x7c050000, 0x00000000,
13 0x30000000, 0x01000000, 0x00000000, 0xc0406eb4,
14 0x00000000, 0x00000000, 0x00000000, 0x00000000,
15 0x00000000, 0x00000000, 0x00000000, 0x00000000,
16 0x00000000, 0x00000000, 0x00000000, 0x00000000,
17 0x00000000, 0x00000000, 0x00000000, 0x00000000,
18 0x00000000, 0x00000000, 0x00000000, 0x00000000,
19 0x00000000, 0x00000000, 0x00000000, 0x00000000,
20 0x00000000, 0x00000000, 0x00000000, 0x00000000,
21 0x00000000, 0x00000000, 0x00000000, 0x00000000,
22 0x00000000, 0x04010000, 0x14000000, 0x00000000,
23 0x02000000, 0x44000000, 0x09010000, 0x10000000,
24 0x00000000, 0x02000000, 0x01010000, 0x1c000000,
25 0x00000000, 0x02000000, 0x00600000, 0x00000000,
26 0xc0000000, 0x01010000, 0x1c000000, 0x00000000,
27 0x02000000, 0x00600000, 0x00000000, 0xcc000000,
28 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
29 0x10600000, 0x00000000, 0xd4000000, 0x01010000,
30 0x1c000000, 0x00000000, 0x02000000, 0x700f0000,
31 0x00000060, 0xf0000000, 0x00010000, 0x18000000,
32 0x00000000, 0x02000000, 0x00700000, 0x041000c0,
33 0x00010000, 0x18000000, 0x00000000, 0x02000000,
34 0x10700000, 0x041000c0, 0x00010000, 0x18000000,
35 0x00000000, 0x02000000, 0x40700000, 0x041000c0,
36 0x01010000, 0x1c000000, 0x00000000, 0x02000000,
37 0x007c0000, 0x01000000, 0xc0000000, 0x00010000,
38 0x18000000, 0x00000000, 0x02000000, 0x007c0000,
39 0x040300c4, 0x00010000, 0x18000000, 0x00000000,
40 0x02000000, 0x007c0000, 0x040100c0, 0x01010000,
41 0x1c000000, 0x00000000, 0x02000000, 0x007c0000,
42 0x00000000, 0xc0000000, 0x00010000, 0x18000000,
43 0x00000000, 0x02000000, 0x007c0000, 0x04200000,
44 0x0b010000, 0x18000000, 0x00000000, 0x02000000,
45 0x0c000000, 0x00000000, 0x02010000, 0x20000000,
46 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
47 0xf0000000, 0x000000b0, 0x02010000, 0x20000000,
48 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
49 0xf0000000, 0x000010b0, 0x02010000, 0x20000000,
50 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
51 0xf0000000, 0x000020b0, 0x02010000, 0x20000000,
52 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
53 0xf0000000, 0x000030b0, 0x02010000, 0x20000000,
54 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
55 0xf0000000, 0x000040b0, 0x02010000, 0x20000000,
56 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
57 0xf0000000, 0x000050b0, 0x02010000, 0x20000000,
58 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
59 0xf0000000, 0x000060b0, 0x02010000, 0x20000000,
60 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
61 0xf0000000, 0x000070b0, 0x02010000, 0x20000000,
62 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
63 0xf0000000, 0x000080b0, 0x02010000, 0x20000000,
64 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
65 0xf0000000, 0x000090b0, 0x02010000, 0x20000000,
66 0x00000000, 0x02000000, 0x700f0000, 0x040100fc,
67 0xf0000000, 0x0000a0b0, 0x00010000, 0x18000000,
68 0x00000000, 0x02000000, 0x0a000000, 0x040100c0,
69 0x00010000, 0x18000000, 0x00000000, 0x02000000,
70 0x0a000000, 0x04200080, 0x00010000, 0x18000000,
71 0x00000000, 0x02000000, 0x00be0000, 0x041000c0,
72 0x00010000, 0x18000000, 0x00000000, 0x02000000,
73 0x10be0000, 0x041000c0, 0x00010000, 0x18000000,
74 0x00000000, 0x02000000, 0x20be0000, 0x041000c0,
75 0x00010000, 0x18000000, 0x00000000, 0x02000000,
76 0x30be0000, 0x041000c0, 0x00010000, 0x18000000,
77 0x00000000, 0x02000000, 0x00b00000, 0x041000c0,
78 0x00010000, 0x18000000, 0x00000000, 0x02000000,
79 0x10b00000, 0x041000c0, 0x00010000, 0x18000000,
80 0x00000000, 0x02000000, 0x20b00000, 0x041000c0,
81 0x00010000, 0x18000000, 0x00000000, 0x02000000,
82 0x30b00000, 0x041000c0, 0x00010000, 0x18000000,
83 0x00000000, 0x02000000, 0x00300000, 0x041000c0,
84 0x00010000, 0x18000000, 0x00000000, 0x02000000,
85 0x10300000, 0x041000c0, 0x00010000, 0x18000000,
86 0x00000000, 0x02000000, 0x20300000, 0x041000c0,
87 0x00010000, 0x18000000, 0x00000000, 0x02000000,
88 0x30300000, 0x041000c0, 0x0a010000, 0x10000000,
89 0x00000000, 0x02000000, 0x06010000, 0x1c000000,
90 0x00000000, 0x02000000, 0x01000000, 0x00000200,
91 0xff230200, 0x06010000, 0x1c000000, 0x00000000,
92 0x02000000, 0x02000000, 0x00001000, 0x00000000,
93 0x07010000, 0x18000000, 0x00000000, 0x02000000,
94 0x00000000, 0x01000000, 0x07010000, 0x18000000,
95 0x00000000, 0x02000000, 0x00000000, 0x02000000,
96 0x07010000, 0x18000000, 0x00000000, 0x02000000,
97 0x00000000, 0x03000000, 0x0d010000, 0x14000000,
98 0x00000000, 0x02000000, 0x00000000, 0xff000000,
99 0x10000000, 0x00000000, 0x00000080,
102 static inline void __iomem *
103 qla27xx_isp_reg(struct scsi_qla_host *vha)
105 return &vha->hw->iobase->isp24;
108 static inline void
109 qla27xx_insert16(uint16_t value, void *buf, ulong *len)
111 if (buf) {
112 buf += *len;
113 *(__le16 *)buf = cpu_to_le16(value);
115 *len += sizeof(value);
118 static inline void
119 qla27xx_insert32(uint32_t value, void *buf, ulong *len)
121 if (buf) {
122 buf += *len;
123 *(__le32 *)buf = cpu_to_le32(value);
125 *len += sizeof(value);
128 static inline void
129 qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
132 if (buf && mem && size) {
133 buf += *len;
134 memcpy(buf, mem, size);
136 *len += size;
139 static inline void
140 qla27xx_read8(void __iomem *window, void *buf, ulong *len)
142 uint8_t value = ~0;
144 if (buf) {
145 value = RD_REG_BYTE(window);
147 qla27xx_insert32(value, buf, len);
150 static inline void
151 qla27xx_read16(void __iomem *window, void *buf, ulong *len)
153 uint16_t value = ~0;
155 if (buf) {
156 value = RD_REG_WORD(window);
158 qla27xx_insert32(value, buf, len);
161 static inline void
162 qla27xx_read32(void __iomem *window, void *buf, ulong *len)
164 uint32_t value = ~0;
166 if (buf) {
167 value = RD_REG_DWORD(window);
169 qla27xx_insert32(value, buf, len);
172 static inline void (*qla27xx_read_vector(uint width))(void __iomem*, void *, ulong *)
174 return
175 (width == 1) ? qla27xx_read8 :
176 (width == 2) ? qla27xx_read16 :
177 qla27xx_read32;
180 static inline void
181 qla27xx_read_reg(__iomem struct device_reg_24xx *reg,
182 uint offset, void *buf, ulong *len)
184 void __iomem *window = (void __iomem *)reg + offset;
186 qla27xx_read32(window, buf, len);
189 static inline void
190 qla27xx_write_reg(__iomem struct device_reg_24xx *reg,
191 uint offset, uint32_t data, void *buf)
193 __iomem void *window = (void __iomem *)reg + offset;
195 if (buf) {
196 WRT_REG_DWORD(window, data);
200 static inline void
201 qla27xx_read_window(__iomem struct device_reg_24xx *reg,
202 uint32_t addr, uint offset, uint count, uint width, void *buf,
203 ulong *len)
205 void __iomem *window = (void __iomem *)reg + offset;
206 void (*readn)(void __iomem*, void *, ulong *) = qla27xx_read_vector(width);
208 qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
209 while (count--) {
210 qla27xx_insert32(addr, buf, len);
211 readn(window, buf, len);
212 window += width;
213 addr++;
217 static inline void
218 qla27xx_skip_entry(struct qla27xx_fwdt_entry *ent, void *buf)
220 if (buf)
221 ent->hdr.driver_flags |= DRIVER_FLAG_SKIP_ENTRY;
224 static inline struct qla27xx_fwdt_entry *
225 qla27xx_next_entry(struct qla27xx_fwdt_entry *ent)
227 return (void *)ent + ent->hdr.size;
230 static struct qla27xx_fwdt_entry *
231 qla27xx_fwdt_entry_t0(struct scsi_qla_host *vha,
232 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
234 ql_dbg(ql_dbg_misc, vha, 0xd100,
235 "%s: nop [%lx]\n", __func__, *len);
236 qla27xx_skip_entry(ent, buf);
238 return qla27xx_next_entry(ent);
241 static struct qla27xx_fwdt_entry *
242 qla27xx_fwdt_entry_t255(struct scsi_qla_host *vha,
243 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
245 ql_dbg(ql_dbg_misc, vha, 0xd1ff,
246 "%s: end [%lx]\n", __func__, *len);
247 qla27xx_skip_entry(ent, buf);
249 /* terminate */
250 return NULL;
253 static struct qla27xx_fwdt_entry *
254 qla27xx_fwdt_entry_t256(struct scsi_qla_host *vha,
255 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
257 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
259 ql_dbg(ql_dbg_misc, vha, 0xd200,
260 "%s: rdio t1 [%lx]\n", __func__, *len);
261 qla27xx_read_window(reg, ent->t256.base_addr, ent->t256.pci_offset,
262 ent->t256.reg_count, ent->t256.reg_width, buf, len);
264 return qla27xx_next_entry(ent);
267 static struct qla27xx_fwdt_entry *
268 qla27xx_fwdt_entry_t257(struct scsi_qla_host *vha,
269 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
271 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
273 ql_dbg(ql_dbg_misc, vha, 0xd201,
274 "%s: wrio t1 [%lx]\n", __func__, *len);
275 qla27xx_write_reg(reg, IOBASE_ADDR, ent->t257.base_addr, buf);
276 qla27xx_write_reg(reg, ent->t257.pci_offset, ent->t257.write_data, buf);
278 return qla27xx_next_entry(ent);
281 static struct qla27xx_fwdt_entry *
282 qla27xx_fwdt_entry_t258(struct scsi_qla_host *vha,
283 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
285 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
287 ql_dbg(ql_dbg_misc, vha, 0xd202,
288 "%s: rdio t2 [%lx]\n", __func__, *len);
289 qla27xx_write_reg(reg, ent->t258.banksel_offset, ent->t258.bank, buf);
290 qla27xx_read_window(reg, ent->t258.base_addr, ent->t258.pci_offset,
291 ent->t258.reg_count, ent->t258.reg_width, buf, len);
293 return qla27xx_next_entry(ent);
296 static struct qla27xx_fwdt_entry *
297 qla27xx_fwdt_entry_t259(struct scsi_qla_host *vha,
298 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
300 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
302 ql_dbg(ql_dbg_misc, vha, 0xd203,
303 "%s: wrio t2 [%lx]\n", __func__, *len);
304 qla27xx_write_reg(reg, IOBASE_ADDR, ent->t259.base_addr, buf);
305 qla27xx_write_reg(reg, ent->t259.banksel_offset, ent->t259.bank, buf);
306 qla27xx_write_reg(reg, ent->t259.pci_offset, ent->t259.write_data, buf);
308 return qla27xx_next_entry(ent);
311 static struct qla27xx_fwdt_entry *
312 qla27xx_fwdt_entry_t260(struct scsi_qla_host *vha,
313 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
315 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
317 ql_dbg(ql_dbg_misc, vha, 0xd204,
318 "%s: rdpci [%lx]\n", __func__, *len);
319 qla27xx_insert32(ent->t260.pci_offset, buf, len);
320 qla27xx_read_reg(reg, ent->t260.pci_offset, buf, len);
322 return qla27xx_next_entry(ent);
325 static struct qla27xx_fwdt_entry *
326 qla27xx_fwdt_entry_t261(struct scsi_qla_host *vha,
327 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
329 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
331 ql_dbg(ql_dbg_misc, vha, 0xd205,
332 "%s: wrpci [%lx]\n", __func__, *len);
333 qla27xx_write_reg(reg, ent->t261.pci_offset, ent->t261.write_data, buf);
335 return qla27xx_next_entry(ent);
338 static struct qla27xx_fwdt_entry *
339 qla27xx_fwdt_entry_t262(struct scsi_qla_host *vha,
340 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
342 ulong dwords;
343 ulong start;
344 ulong end;
346 ql_dbg(ql_dbg_misc, vha, 0xd206,
347 "%s: rdram(%x) [%lx]\n", __func__, ent->t262.ram_area, *len);
348 start = ent->t262.start_addr;
349 end = ent->t262.end_addr;
351 if (ent->t262.ram_area == T262_RAM_AREA_CRITICAL_RAM) {
353 } else if (ent->t262.ram_area == T262_RAM_AREA_EXTERNAL_RAM) {
354 end = vha->hw->fw_memory_size;
355 if (buf)
356 ent->t262.end_addr = end;
357 } else if (ent->t262.ram_area == T262_RAM_AREA_SHARED_RAM) {
358 start = vha->hw->fw_shared_ram_start;
359 end = vha->hw->fw_shared_ram_end;
360 if (buf) {
361 ent->t262.start_addr = start;
362 ent->t262.end_addr = end;
364 } else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
365 start = vha->hw->fw_ddr_ram_start;
366 end = vha->hw->fw_ddr_ram_end;
367 if (buf) {
368 ent->t262.start_addr = start;
369 ent->t262.end_addr = end;
371 } else if (ent->t262.ram_area == T262_RAM_AREA_MISC) {
372 if (buf) {
373 ent->t262.start_addr = start;
374 ent->t262.end_addr = end;
376 } else {
377 ql_dbg(ql_dbg_misc, vha, 0xd022,
378 "%s: unknown area %x\n", __func__, ent->t262.ram_area);
379 qla27xx_skip_entry(ent, buf);
380 goto done;
383 if (end < start || start == 0 || end == 0) {
384 ql_dbg(ql_dbg_misc, vha, 0xd023,
385 "%s: unusable range (start=%x end=%x)\n", __func__,
386 ent->t262.end_addr, ent->t262.start_addr);
387 qla27xx_skip_entry(ent, buf);
388 goto done;
391 dwords = end - start + 1;
392 if (buf) {
393 buf += *len;
394 qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
396 *len += dwords * sizeof(uint32_t);
397 done:
398 return qla27xx_next_entry(ent);
401 static struct qla27xx_fwdt_entry *
402 qla27xx_fwdt_entry_t263(struct scsi_qla_host *vha,
403 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
405 uint count = 0;
406 uint i;
407 uint length;
409 ql_dbg(ql_dbg_misc, vha, 0xd207,
410 "%s: getq(%x) [%lx]\n", __func__, ent->t263.queue_type, *len);
411 if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
412 for (i = 0; i < vha->hw->max_req_queues; i++) {
413 struct req_que *req = vha->hw->req_q_map[i];
415 if (req || !buf) {
416 length = req ?
417 req->length : REQUEST_ENTRY_CNT_24XX;
418 qla27xx_insert16(i, buf, len);
419 qla27xx_insert16(length, buf, len);
420 qla27xx_insertbuf(req ? req->ring : NULL,
421 length * sizeof(*req->ring), buf, len);
422 count++;
425 } else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
426 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
427 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
429 if (rsp || !buf) {
430 length = rsp ?
431 rsp->length : RESPONSE_ENTRY_CNT_MQ;
432 qla27xx_insert16(i, buf, len);
433 qla27xx_insert16(length, buf, len);
434 qla27xx_insertbuf(rsp ? rsp->ring : NULL,
435 length * sizeof(*rsp->ring), buf, len);
436 count++;
439 } else if (QLA_TGT_MODE_ENABLED() &&
440 ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
441 struct qla_hw_data *ha = vha->hw;
442 struct atio *atr = ha->tgt.atio_ring;
444 if (atr || !buf) {
445 length = ha->tgt.atio_q_length;
446 qla27xx_insert16(0, buf, len);
447 qla27xx_insert16(length, buf, len);
448 qla27xx_insertbuf(atr, length * sizeof(*atr), buf, len);
449 count++;
451 } else {
452 ql_dbg(ql_dbg_misc, vha, 0xd026,
453 "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
454 qla27xx_skip_entry(ent, buf);
457 if (buf) {
458 if (count)
459 ent->t263.num_queues = count;
460 else
461 qla27xx_skip_entry(ent, buf);
464 return qla27xx_next_entry(ent);
467 static struct qla27xx_fwdt_entry *
468 qla27xx_fwdt_entry_t264(struct scsi_qla_host *vha,
469 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
471 ql_dbg(ql_dbg_misc, vha, 0xd208,
472 "%s: getfce [%lx]\n", __func__, *len);
473 if (vha->hw->fce) {
474 if (buf) {
475 ent->t264.fce_trace_size = FCE_SIZE;
476 ent->t264.write_pointer = vha->hw->fce_wr;
477 ent->t264.base_pointer = vha->hw->fce_dma;
478 ent->t264.fce_enable_mb0 = vha->hw->fce_mb[0];
479 ent->t264.fce_enable_mb2 = vha->hw->fce_mb[2];
480 ent->t264.fce_enable_mb3 = vha->hw->fce_mb[3];
481 ent->t264.fce_enable_mb4 = vha->hw->fce_mb[4];
482 ent->t264.fce_enable_mb5 = vha->hw->fce_mb[5];
483 ent->t264.fce_enable_mb6 = vha->hw->fce_mb[6];
485 qla27xx_insertbuf(vha->hw->fce, FCE_SIZE, buf, len);
486 } else {
487 ql_dbg(ql_dbg_misc, vha, 0xd027,
488 "%s: missing fce\n", __func__);
489 qla27xx_skip_entry(ent, buf);
492 return qla27xx_next_entry(ent);
495 static struct qla27xx_fwdt_entry *
496 qla27xx_fwdt_entry_t265(struct scsi_qla_host *vha,
497 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
499 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
501 ql_dbg(ql_dbg_misc, vha, 0xd209,
502 "%s: pause risc [%lx]\n", __func__, *len);
503 if (buf)
504 qla24xx_pause_risc(reg, vha->hw);
506 return qla27xx_next_entry(ent);
509 static struct qla27xx_fwdt_entry *
510 qla27xx_fwdt_entry_t266(struct scsi_qla_host *vha,
511 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
513 ql_dbg(ql_dbg_misc, vha, 0xd20a,
514 "%s: reset risc [%lx]\n", __func__, *len);
515 if (buf)
516 qla24xx_soft_reset(vha->hw);
518 return qla27xx_next_entry(ent);
521 static struct qla27xx_fwdt_entry *
522 qla27xx_fwdt_entry_t267(struct scsi_qla_host *vha,
523 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
525 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
527 ql_dbg(ql_dbg_misc, vha, 0xd20b,
528 "%s: dis intr [%lx]\n", __func__, *len);
529 qla27xx_write_reg(reg, ent->t267.pci_offset, ent->t267.data, buf);
531 return qla27xx_next_entry(ent);
534 static struct qla27xx_fwdt_entry *
535 qla27xx_fwdt_entry_t268(struct scsi_qla_host *vha,
536 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
538 ql_dbg(ql_dbg_misc, vha, 0xd20c,
539 "%s: gethb(%x) [%lx]\n", __func__, ent->t268.buf_type, *len);
540 switch (ent->t268.buf_type) {
541 case T268_BUF_TYPE_EXTD_TRACE:
542 if (vha->hw->eft) {
543 if (buf) {
544 ent->t268.buf_size = EFT_SIZE;
545 ent->t268.start_addr = vha->hw->eft_dma;
547 qla27xx_insertbuf(vha->hw->eft, EFT_SIZE, buf, len);
548 } else {
549 ql_dbg(ql_dbg_misc, vha, 0xd028,
550 "%s: missing eft\n", __func__);
551 qla27xx_skip_entry(ent, buf);
553 break;
554 case T268_BUF_TYPE_EXCH_BUFOFF:
555 if (vha->hw->exchoffld_buf) {
556 if (buf) {
557 ent->t268.buf_size = vha->hw->exchoffld_size;
558 ent->t268.start_addr =
559 vha->hw->exchoffld_buf_dma;
561 qla27xx_insertbuf(vha->hw->exchoffld_buf,
562 vha->hw->exchoffld_size, buf, len);
563 } else {
564 ql_dbg(ql_dbg_misc, vha, 0xd028,
565 "%s: missing exch offld\n", __func__);
566 qla27xx_skip_entry(ent, buf);
568 break;
569 case T268_BUF_TYPE_EXTD_LOGIN:
570 if (vha->hw->exlogin_buf) {
571 if (buf) {
572 ent->t268.buf_size = vha->hw->exlogin_size;
573 ent->t268.start_addr =
574 vha->hw->exlogin_buf_dma;
576 qla27xx_insertbuf(vha->hw->exlogin_buf,
577 vha->hw->exlogin_size, buf, len);
578 } else {
579 ql_dbg(ql_dbg_misc, vha, 0xd028,
580 "%s: missing ext login\n", __func__);
581 qla27xx_skip_entry(ent, buf);
583 break;
585 case T268_BUF_TYPE_REQ_MIRROR:
586 case T268_BUF_TYPE_RSP_MIRROR:
588 * Mirror pointers are not implemented in the
589 * driver, instead shadow pointers are used by
590 * the drier. Skip these entries.
592 qla27xx_skip_entry(ent, buf);
593 break;
594 default:
595 ql_dbg(ql_dbg_async, vha, 0xd02b,
596 "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
597 qla27xx_skip_entry(ent, buf);
598 break;
601 return qla27xx_next_entry(ent);
604 static struct qla27xx_fwdt_entry *
605 qla27xx_fwdt_entry_t269(struct scsi_qla_host *vha,
606 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
608 ql_dbg(ql_dbg_misc, vha, 0xd20d,
609 "%s: scratch [%lx]\n", __func__, *len);
610 qla27xx_insert32(0xaaaaaaaa, buf, len);
611 qla27xx_insert32(0xbbbbbbbb, buf, len);
612 qla27xx_insert32(0xcccccccc, buf, len);
613 qla27xx_insert32(0xdddddddd, buf, len);
614 qla27xx_insert32(*len + sizeof(uint32_t), buf, len);
615 if (buf)
616 ent->t269.scratch_size = 5 * sizeof(uint32_t);
618 return qla27xx_next_entry(ent);
621 static struct qla27xx_fwdt_entry *
622 qla27xx_fwdt_entry_t270(struct scsi_qla_host *vha,
623 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
625 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
626 ulong dwords = ent->t270.count;
627 ulong addr = ent->t270.addr;
629 ql_dbg(ql_dbg_misc, vha, 0xd20e,
630 "%s: rdremreg [%lx]\n", __func__, *len);
631 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
632 while (dwords--) {
633 qla27xx_write_reg(reg, 0xc0, addr|0x80000000, buf);
634 qla27xx_insert32(addr, buf, len);
635 qla27xx_read_reg(reg, 0xc4, buf, len);
636 addr += sizeof(uint32_t);
639 return qla27xx_next_entry(ent);
642 static struct qla27xx_fwdt_entry *
643 qla27xx_fwdt_entry_t271(struct scsi_qla_host *vha,
644 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
646 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
647 ulong addr = ent->t271.addr;
648 ulong data = ent->t271.data;
650 ql_dbg(ql_dbg_misc, vha, 0xd20f,
651 "%s: wrremreg [%lx]\n", __func__, *len);
652 qla27xx_write_reg(reg, IOBASE_ADDR, 0x40, buf);
653 qla27xx_write_reg(reg, 0xc4, data, buf);
654 qla27xx_write_reg(reg, 0xc0, addr, buf);
656 return qla27xx_next_entry(ent);
659 static struct qla27xx_fwdt_entry *
660 qla27xx_fwdt_entry_t272(struct scsi_qla_host *vha,
661 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
663 ulong dwords = ent->t272.count;
664 ulong start = ent->t272.addr;
666 ql_dbg(ql_dbg_misc, vha, 0xd210,
667 "%s: rdremram [%lx]\n", __func__, *len);
668 if (buf) {
669 ql_dbg(ql_dbg_misc, vha, 0xd02c,
670 "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
671 buf += *len;
672 qla27xx_dump_mpi_ram(vha->hw, start, buf, dwords, &buf);
674 *len += dwords * sizeof(uint32_t);
676 return qla27xx_next_entry(ent);
679 static struct qla27xx_fwdt_entry *
680 qla27xx_fwdt_entry_t273(struct scsi_qla_host *vha,
681 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
683 ulong dwords = ent->t273.count;
684 ulong addr = ent->t273.addr;
685 uint32_t value;
687 ql_dbg(ql_dbg_misc, vha, 0xd211,
688 "%s: pcicfg [%lx]\n", __func__, *len);
689 while (dwords--) {
690 value = ~0;
691 if (pci_read_config_dword(vha->hw->pdev, addr, &value))
692 ql_dbg(ql_dbg_misc, vha, 0xd02d,
693 "%s: failed pcicfg read at %lx\n", __func__, addr);
694 qla27xx_insert32(addr, buf, len);
695 qla27xx_insert32(value, buf, len);
696 addr += sizeof(uint32_t);
699 return qla27xx_next_entry(ent);
702 static struct qla27xx_fwdt_entry *
703 qla27xx_fwdt_entry_t274(struct scsi_qla_host *vha,
704 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
706 uint count = 0;
707 uint i;
709 ql_dbg(ql_dbg_misc, vha, 0xd212,
710 "%s: getqsh(%x) [%lx]\n", __func__, ent->t274.queue_type, *len);
711 if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
712 for (i = 0; i < vha->hw->max_req_queues; i++) {
713 struct req_que *req = vha->hw->req_q_map[i];
715 if (req || !buf) {
716 qla27xx_insert16(i, buf, len);
717 qla27xx_insert16(1, buf, len);
718 qla27xx_insert32(req && req->out_ptr ?
719 *req->out_ptr : 0, buf, len);
720 count++;
723 } else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
724 for (i = 0; i < vha->hw->max_rsp_queues; i++) {
725 struct rsp_que *rsp = vha->hw->rsp_q_map[i];
727 if (rsp || !buf) {
728 qla27xx_insert16(i, buf, len);
729 qla27xx_insert16(1, buf, len);
730 qla27xx_insert32(rsp && rsp->in_ptr ?
731 *rsp->in_ptr : 0, buf, len);
732 count++;
735 } else if (QLA_TGT_MODE_ENABLED() &&
736 ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
737 struct qla_hw_data *ha = vha->hw;
738 struct atio *atr = ha->tgt.atio_ring_ptr;
740 if (atr || !buf) {
741 qla27xx_insert16(0, buf, len);
742 qla27xx_insert16(1, buf, len);
743 qla27xx_insert32(ha->tgt.atio_q_in ?
744 readl(ha->tgt.atio_q_in) : 0, buf, len);
745 count++;
747 } else {
748 ql_dbg(ql_dbg_misc, vha, 0xd02f,
749 "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
750 qla27xx_skip_entry(ent, buf);
753 if (buf) {
754 if (count)
755 ent->t274.num_queues = count;
756 else
757 qla27xx_skip_entry(ent, buf);
760 return qla27xx_next_entry(ent);
763 static struct qla27xx_fwdt_entry *
764 qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
765 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
767 ulong offset = offsetof(typeof(*ent), t275.buffer);
769 ql_dbg(ql_dbg_misc, vha, 0xd213,
770 "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
771 if (!ent->t275.length) {
772 ql_dbg(ql_dbg_misc, vha, 0xd020,
773 "%s: buffer zero length\n", __func__);
774 qla27xx_skip_entry(ent, buf);
775 goto done;
777 if (offset + ent->t275.length > ent->hdr.size) {
778 ql_dbg(ql_dbg_misc, vha, 0xd030,
779 "%s: buffer overflow\n", __func__);
780 qla27xx_skip_entry(ent, buf);
781 goto done;
784 qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
785 done:
786 return qla27xx_next_entry(ent);
789 static struct qla27xx_fwdt_entry *
790 qla27xx_fwdt_entry_t276(struct scsi_qla_host *vha,
791 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
793 uint type = vha->hw->pdev->device >> 4 & 0xf;
794 uint func = vha->hw->port_no & 0x3;
796 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd214,
797 "%s: cond [%lx]\n", __func__, *len);
799 if (type != ent->t276.cond1 || func != ent->t276.cond2) {
800 ent = qla27xx_next_entry(ent);
801 qla27xx_skip_entry(ent, buf);
804 return qla27xx_next_entry(ent);
807 static struct qla27xx_fwdt_entry *
808 qla27xx_fwdt_entry_t277(struct scsi_qla_host *vha,
809 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
811 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
813 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd215,
814 "%s: rdpep [%lx]\n", __func__, *len);
815 qla27xx_insert32(ent->t277.wr_cmd_data, buf, len);
816 qla27xx_write_reg(reg, ent->t277.cmd_addr, ent->t277.wr_cmd_data, buf);
817 qla27xx_read_reg(reg, ent->t277.data_addr, buf, len);
819 return qla27xx_next_entry(ent);
822 static struct qla27xx_fwdt_entry *
823 qla27xx_fwdt_entry_t278(struct scsi_qla_host *vha,
824 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
826 struct device_reg_24xx __iomem *reg = qla27xx_isp_reg(vha);
828 ql_dbg(ql_dbg_misc + ql_dbg_verbose, vha, 0xd216,
829 "%s: wrpep [%lx]\n", __func__, *len);
830 qla27xx_write_reg(reg, ent->t278.data_addr, ent->t278.wr_data, buf);
831 qla27xx_write_reg(reg, ent->t278.cmd_addr, ent->t278.wr_cmd_data, buf);
833 return qla27xx_next_entry(ent);
836 static struct qla27xx_fwdt_entry *
837 qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
838 struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
840 ql_dbg(ql_dbg_misc, vha, 0xd2ff,
841 "%s: type %x [%lx]\n", __func__, ent->hdr.type, *len);
842 qla27xx_skip_entry(ent, buf);
844 return qla27xx_next_entry(ent);
847 static struct {
848 uint type;
849 typeof(qla27xx_fwdt_entry_other)(*call);
850 } qla27xx_fwdt_entry_call[] = {
851 { ENTRY_TYPE_NOP, qla27xx_fwdt_entry_t0 },
852 { ENTRY_TYPE_TMP_END, qla27xx_fwdt_entry_t255 },
853 { ENTRY_TYPE_RD_IOB_T1, qla27xx_fwdt_entry_t256 },
854 { ENTRY_TYPE_WR_IOB_T1, qla27xx_fwdt_entry_t257 },
855 { ENTRY_TYPE_RD_IOB_T2, qla27xx_fwdt_entry_t258 },
856 { ENTRY_TYPE_WR_IOB_T2, qla27xx_fwdt_entry_t259 },
857 { ENTRY_TYPE_RD_PCI, qla27xx_fwdt_entry_t260 },
858 { ENTRY_TYPE_WR_PCI, qla27xx_fwdt_entry_t261 },
859 { ENTRY_TYPE_RD_RAM, qla27xx_fwdt_entry_t262 },
860 { ENTRY_TYPE_GET_QUEUE, qla27xx_fwdt_entry_t263 },
861 { ENTRY_TYPE_GET_FCE, qla27xx_fwdt_entry_t264 },
862 { ENTRY_TYPE_PSE_RISC, qla27xx_fwdt_entry_t265 },
863 { ENTRY_TYPE_RST_RISC, qla27xx_fwdt_entry_t266 },
864 { ENTRY_TYPE_DIS_INTR, qla27xx_fwdt_entry_t267 },
865 { ENTRY_TYPE_GET_HBUF, qla27xx_fwdt_entry_t268 },
866 { ENTRY_TYPE_SCRATCH, qla27xx_fwdt_entry_t269 },
867 { ENTRY_TYPE_RDREMREG, qla27xx_fwdt_entry_t270 },
868 { ENTRY_TYPE_WRREMREG, qla27xx_fwdt_entry_t271 },
869 { ENTRY_TYPE_RDREMRAM, qla27xx_fwdt_entry_t272 },
870 { ENTRY_TYPE_PCICFG, qla27xx_fwdt_entry_t273 },
871 { ENTRY_TYPE_GET_SHADOW, qla27xx_fwdt_entry_t274 },
872 { ENTRY_TYPE_WRITE_BUF, qla27xx_fwdt_entry_t275 },
873 { ENTRY_TYPE_CONDITIONAL, qla27xx_fwdt_entry_t276 },
874 { ENTRY_TYPE_RDPEPREG, qla27xx_fwdt_entry_t277 },
875 { ENTRY_TYPE_WRPEPREG, qla27xx_fwdt_entry_t278 },
876 { -1, qla27xx_fwdt_entry_other }
879 static inline
880 typeof(qla27xx_fwdt_entry_call->call)(qla27xx_find_entry(uint type))
882 typeof(*qla27xx_fwdt_entry_call) *list = qla27xx_fwdt_entry_call;
884 while (list->type < type)
885 list++;
887 if (list->type == type)
888 return list->call;
889 return qla27xx_fwdt_entry_other;
892 static void
893 qla27xx_walk_template(struct scsi_qla_host *vha,
894 struct qla27xx_fwdt_template *tmp, void *buf, ulong *len)
896 struct qla27xx_fwdt_entry *ent = (void *)tmp + tmp->entry_offset;
897 ulong count = tmp->entry_count;
899 ql_dbg(ql_dbg_misc, vha, 0xd01a,
900 "%s: entry count %lx\n", __func__, count);
901 while (count--) {
902 ent = qla27xx_find_entry(ent->hdr.type)(vha, ent, buf, len);
903 if (!ent)
904 break;
907 if (count)
908 ql_dbg(ql_dbg_misc, vha, 0xd018,
909 "%s: entry residual count (%lx)\n", __func__, count);
911 if (ent)
912 ql_dbg(ql_dbg_misc, vha, 0xd019,
913 "%s: missing end entry (%lx)\n", __func__, count);
915 if (buf && *len != vha->hw->fw_dump_len)
916 ql_dbg(ql_dbg_misc, vha, 0xd01b,
917 "%s: length=%#lx residual=%+ld\n",
918 __func__, *len, vha->hw->fw_dump_len - *len);
920 if (buf) {
921 ql_log(ql_log_warn, vha, 0xd015,
922 "Firmware dump saved to temp buffer (%lu/%p)\n",
923 vha->host_no, vha->hw->fw_dump);
924 qla2x00_post_uevent_work(vha, QLA_UEVENT_CODE_FW_DUMP);
928 static void
929 qla27xx_time_stamp(struct qla27xx_fwdt_template *tmp)
931 tmp->capture_timestamp = jiffies;
934 static void
935 qla27xx_driver_info(struct qla27xx_fwdt_template *tmp)
937 uint8_t v[] = { 0, 0, 0, 0, 0, 0 };
939 sscanf(qla2x00_version_str, "%hhu.%hhu.%hhu.%hhu.%hhu.%hhu",
940 v+0, v+1, v+2, v+3, v+4, v+5);
942 tmp->driver_info[0] = v[3] << 24 | v[2] << 16 | v[1] << 8 | v[0];
943 tmp->driver_info[1] = v[5] << 8 | v[4];
944 tmp->driver_info[2] = 0x12345678;
947 static void
948 qla27xx_firmware_info(struct qla27xx_fwdt_template *tmp,
949 struct scsi_qla_host *vha)
951 tmp->firmware_version[0] = vha->hw->fw_major_version;
952 tmp->firmware_version[1] = vha->hw->fw_minor_version;
953 tmp->firmware_version[2] = vha->hw->fw_subminor_version;
954 tmp->firmware_version[3] =
955 vha->hw->fw_attributes_h << 16 | vha->hw->fw_attributes;
956 tmp->firmware_version[4] =
957 vha->hw->fw_attributes_ext[1] << 16 | vha->hw->fw_attributes_ext[0];
960 static void
961 ql27xx_edit_template(struct scsi_qla_host *vha,
962 struct qla27xx_fwdt_template *tmp)
964 qla27xx_time_stamp(tmp);
965 qla27xx_driver_info(tmp);
966 qla27xx_firmware_info(tmp, vha);
969 static inline uint32_t
970 qla27xx_template_checksum(void *p, ulong size)
972 uint32_t *buf = p;
973 uint64_t sum = 0;
975 size /= sizeof(*buf);
977 while (size--)
978 sum += *buf++;
980 sum = (sum & 0xffffffff) + (sum >> 32);
982 return ~sum;
985 static inline int
986 qla27xx_verify_template_checksum(struct qla27xx_fwdt_template *tmp)
988 return qla27xx_template_checksum(tmp, tmp->template_size) == 0;
991 static inline int
992 qla27xx_verify_template_header(struct qla27xx_fwdt_template *tmp)
994 return tmp->template_type == TEMPLATE_TYPE_FWDUMP;
997 static void
998 qla27xx_execute_fwdt_template(struct scsi_qla_host *vha)
1000 struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
1001 ulong len;
1003 if (qla27xx_fwdt_template_valid(tmp)) {
1004 len = tmp->template_size;
1005 tmp = memcpy(vha->hw->fw_dump, tmp, len);
1006 ql27xx_edit_template(vha, tmp);
1007 qla27xx_walk_template(vha, tmp, tmp, &len);
1008 vha->hw->fw_dump_len = len;
1009 vha->hw->fw_dumped = 1;
1013 ulong
1014 qla27xx_fwdt_calculate_dump_size(struct scsi_qla_host *vha)
1016 struct qla27xx_fwdt_template *tmp = vha->hw->fw_dump_template;
1017 ulong len = 0;
1019 if (qla27xx_fwdt_template_valid(tmp)) {
1020 len = tmp->template_size;
1021 qla27xx_walk_template(vha, tmp, NULL, &len);
1024 return len;
1027 ulong
1028 qla27xx_fwdt_template_size(void *p)
1030 struct qla27xx_fwdt_template *tmp = p;
1032 return tmp->template_size;
1035 ulong
1036 qla27xx_fwdt_template_default_size(void)
1038 return sizeof(ql27xx_fwdt_default_template);
1041 const void *
1042 qla27xx_fwdt_template_default(void)
1044 return ql27xx_fwdt_default_template;
1048 qla27xx_fwdt_template_valid(void *p)
1050 struct qla27xx_fwdt_template *tmp = p;
1052 if (!qla27xx_verify_template_header(tmp)) {
1053 ql_log(ql_log_warn, NULL, 0xd01c,
1054 "%s: template type %x\n", __func__, tmp->template_type);
1055 return false;
1058 if (!qla27xx_verify_template_checksum(tmp)) {
1059 ql_log(ql_log_warn, NULL, 0xd01d,
1060 "%s: failed template checksum\n", __func__);
1061 return false;
1064 return true;
1067 void
1068 qla27xx_fwdump(scsi_qla_host_t *vha, int hardware_locked)
1070 ulong flags = 0;
1072 #ifndef __CHECKER__
1073 if (!hardware_locked)
1074 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
1075 #endif
1077 if (!vha->hw->fw_dump)
1078 ql_log(ql_log_warn, vha, 0xd01e, "fwdump buffer missing.\n");
1079 else if (!vha->hw->fw_dump_template)
1080 ql_log(ql_log_warn, vha, 0xd01f, "fwdump template missing.\n");
1081 else if (vha->hw->fw_dumped)
1082 ql_log(ql_log_warn, vha, 0xd300,
1083 "Firmware has been previously dumped (%p),"
1084 " -- ignoring request\n", vha->hw->fw_dump);
1085 else {
1086 QLA_FW_STOPPED(vha->hw);
1087 qla27xx_execute_fwdt_template(vha);
1090 #ifndef __CHECKER__
1091 if (!hardware_locked)
1092 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
1093 #endif