1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2005-2014, 2018-2020 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2015-2017 Intel Deutschland GmbH
7 #include <linux/devcoredump.h>
17 * struct iwl_fw_dump_ptrs - set of pointers needed for the fw-error-dump
19 * @fwrt_ptr: pointer to the buffer coming from fwrt
20 * @trans_ptr: pointer to struct %iwl_trans_dump_data which contains the
22 * @trans_len: length of the valid data in trans_ptr
23 * @fwrt_len: length of the valid data in fwrt_ptr
25 struct iwl_fw_dump_ptrs
{
26 struct iwl_trans_dump_data
*trans_ptr
;
31 #define RADIO_REG_MAX_READ 0x2ad
32 static void iwl_read_radio_regs(struct iwl_fw_runtime
*fwrt
,
33 struct iwl_fw_error_dump_data
**dump_data
)
35 u8
*pos
= (void *)(*dump_data
)->data
;
39 IWL_DEBUG_INFO(fwrt
, "WRT radio registers dump\n");
41 if (!iwl_trans_grab_nic_access(fwrt
->trans
, &flags
))
44 (*dump_data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_RADIO_REG
);
45 (*dump_data
)->len
= cpu_to_le32(RADIO_REG_MAX_READ
);
47 for (i
= 0; i
< RADIO_REG_MAX_READ
; i
++) {
48 u32 rd_cmd
= RADIO_RSP_RD_CMD
;
50 rd_cmd
|= i
<< RADIO_RSP_ADDR_POS
;
51 iwl_write_prph_no_grab(fwrt
->trans
, RSP_RADIO_CMD
, rd_cmd
);
52 *pos
= (u8
)iwl_read_prph_no_grab(fwrt
->trans
, RSP_RADIO_RDDAT
);
57 *dump_data
= iwl_fw_error_next_data(*dump_data
);
59 iwl_trans_release_nic_access(fwrt
->trans
, &flags
);
62 static void iwl_fwrt_dump_rxf(struct iwl_fw_runtime
*fwrt
,
63 struct iwl_fw_error_dump_data
**dump_data
,
64 int size
, u32 offset
, int fifo_num
)
66 struct iwl_fw_error_dump_fifo
*fifo_hdr
;
71 fifo_hdr
= (void *)(*dump_data
)->data
;
72 fifo_data
= (void *)fifo_hdr
->data
;
75 /* No need to try to read the data if the length is 0 */
79 /* Add a TLV for the RXF */
80 (*dump_data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_RXF
);
81 (*dump_data
)->len
= cpu_to_le32(fifo_len
+ sizeof(*fifo_hdr
));
83 fifo_hdr
->fifo_num
= cpu_to_le32(fifo_num
);
84 fifo_hdr
->available_bytes
=
85 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
86 RXF_RD_D_SPACE
+ offset
));
88 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
89 RXF_RD_WR_PTR
+ offset
));
91 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
92 RXF_RD_RD_PTR
+ offset
));
94 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
95 RXF_RD_FENCE_PTR
+ offset
));
96 fifo_hdr
->fence_mode
=
97 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
98 RXF_SET_FENCE_MODE
+ offset
));
101 iwl_trans_write_prph(fwrt
->trans
, RXF_SET_FENCE_MODE
+ offset
, 0x1);
102 /* Set fence pointer to the same place like WR pointer */
103 iwl_trans_write_prph(fwrt
->trans
, RXF_LD_WR2FENCE
+ offset
, 0x1);
104 /* Set fence offset */
105 iwl_trans_write_prph(fwrt
->trans
,
106 RXF_LD_FENCE_OFFSET_ADDR
+ offset
, 0x0);
109 fifo_len
/= sizeof(u32
); /* Size in DWORDS */
110 for (i
= 0; i
< fifo_len
; i
++)
111 fifo_data
[i
] = iwl_trans_read_prph(fwrt
->trans
,
112 RXF_FIFO_RD_FENCE_INC
+
114 *dump_data
= iwl_fw_error_next_data(*dump_data
);
117 static void iwl_fwrt_dump_txf(struct iwl_fw_runtime
*fwrt
,
118 struct iwl_fw_error_dump_data
**dump_data
,
119 int size
, u32 offset
, int fifo_num
)
121 struct iwl_fw_error_dump_fifo
*fifo_hdr
;
126 fifo_hdr
= (void *)(*dump_data
)->data
;
127 fifo_data
= (void *)fifo_hdr
->data
;
130 /* No need to try to read the data if the length is 0 */
134 /* Add a TLV for the FIFO */
135 (*dump_data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_TXF
);
136 (*dump_data
)->len
= cpu_to_le32(fifo_len
+ sizeof(*fifo_hdr
));
138 fifo_hdr
->fifo_num
= cpu_to_le32(fifo_num
);
139 fifo_hdr
->available_bytes
=
140 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
141 TXF_FIFO_ITEM_CNT
+ offset
));
143 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
144 TXF_WR_PTR
+ offset
));
146 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
147 TXF_RD_PTR
+ offset
));
148 fifo_hdr
->fence_ptr
=
149 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
150 TXF_FENCE_PTR
+ offset
));
151 fifo_hdr
->fence_mode
=
152 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
153 TXF_LOCK_FENCE
+ offset
));
155 /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
156 iwl_trans_write_prph(fwrt
->trans
, TXF_READ_MODIFY_ADDR
+ offset
,
157 TXF_WR_PTR
+ offset
);
159 /* Dummy-read to advance the read pointer to the head */
160 iwl_trans_read_prph(fwrt
->trans
, TXF_READ_MODIFY_DATA
+ offset
);
163 fifo_len
/= sizeof(u32
); /* Size in DWORDS */
164 for (i
= 0; i
< fifo_len
; i
++)
165 fifo_data
[i
] = iwl_trans_read_prph(fwrt
->trans
,
166 TXF_READ_MODIFY_DATA
+
168 *dump_data
= iwl_fw_error_next_data(*dump_data
);
171 static void iwl_fw_dump_rxf(struct iwl_fw_runtime
*fwrt
,
172 struct iwl_fw_error_dump_data
**dump_data
)
174 struct iwl_fwrt_shared_mem_cfg
*cfg
= &fwrt
->smem_cfg
;
177 IWL_DEBUG_INFO(fwrt
, "WRT RX FIFO dump\n");
179 if (!iwl_trans_grab_nic_access(fwrt
->trans
, &flags
))
182 if (iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_RXF
)) {
184 iwl_fwrt_dump_rxf(fwrt
, dump_data
,
185 cfg
->lmac
[0].rxfifo1_size
, 0, 0);
187 iwl_fwrt_dump_rxf(fwrt
, dump_data
, cfg
->rxfifo2_size
,
189 fwrt
->trans
->trans_cfg
->umac_prph_offset
, 1);
190 /* Pull LMAC2 RXF1 */
191 if (fwrt
->smem_cfg
.num_lmacs
> 1)
192 iwl_fwrt_dump_rxf(fwrt
, dump_data
,
193 cfg
->lmac
[1].rxfifo1_size
,
194 LMAC2_PRPH_OFFSET
, 2);
197 iwl_trans_release_nic_access(fwrt
->trans
, &flags
);
200 static void iwl_fw_dump_txf(struct iwl_fw_runtime
*fwrt
,
201 struct iwl_fw_error_dump_data
**dump_data
)
203 struct iwl_fw_error_dump_fifo
*fifo_hdr
;
204 struct iwl_fwrt_shared_mem_cfg
*cfg
= &fwrt
->smem_cfg
;
210 IWL_DEBUG_INFO(fwrt
, "WRT TX FIFO dump\n");
212 if (!iwl_trans_grab_nic_access(fwrt
->trans
, &flags
))
215 if (iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_TXF
)) {
216 /* Pull TXF data from LMAC1 */
217 for (i
= 0; i
< fwrt
->smem_cfg
.num_txfifo_entries
; i
++) {
218 /* Mark the number of TXF we're pulling now */
219 iwl_trans_write_prph(fwrt
->trans
, TXF_LARC_NUM
, i
);
220 iwl_fwrt_dump_txf(fwrt
, dump_data
,
221 cfg
->lmac
[0].txfifo_size
[i
], 0, i
);
224 /* Pull TXF data from LMAC2 */
225 if (fwrt
->smem_cfg
.num_lmacs
> 1) {
226 for (i
= 0; i
< fwrt
->smem_cfg
.num_txfifo_entries
;
228 /* Mark the number of TXF we're pulling now */
229 iwl_trans_write_prph(fwrt
->trans
,
231 LMAC2_PRPH_OFFSET
, i
);
232 iwl_fwrt_dump_txf(fwrt
, dump_data
,
233 cfg
->lmac
[1].txfifo_size
[i
],
235 i
+ cfg
->num_txfifo_entries
);
240 if (iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_INTERNAL_TXF
) &&
241 fw_has_capa(&fwrt
->fw
->ucode_capa
,
242 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
)) {
243 /* Pull UMAC internal TXF data from all TXFs */
245 i
< ARRAY_SIZE(fwrt
->smem_cfg
.internal_txfifo_size
);
247 fifo_hdr
= (void *)(*dump_data
)->data
;
248 fifo_data
= (void *)fifo_hdr
->data
;
249 fifo_len
= fwrt
->smem_cfg
.internal_txfifo_size
[i
];
251 /* No need to try to read the data if the length is 0 */
255 /* Add a TLV for the internal FIFOs */
257 cpu_to_le32(IWL_FW_ERROR_DUMP_INTERNAL_TXF
);
259 cpu_to_le32(fifo_len
+ sizeof(*fifo_hdr
));
261 fifo_hdr
->fifo_num
= cpu_to_le32(i
);
263 /* Mark the number of TXF we're pulling now */
264 iwl_trans_write_prph(fwrt
->trans
, TXF_CPU2_NUM
, i
+
265 fwrt
->smem_cfg
.num_txfifo_entries
);
267 fifo_hdr
->available_bytes
=
268 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
269 TXF_CPU2_FIFO_ITEM_CNT
));
271 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
274 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
276 fifo_hdr
->fence_ptr
=
277 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
278 TXF_CPU2_FENCE_PTR
));
279 fifo_hdr
->fence_mode
=
280 cpu_to_le32(iwl_trans_read_prph(fwrt
->trans
,
281 TXF_CPU2_LOCK_FENCE
));
283 /* Set TXF_CPU2_READ_MODIFY_ADDR to TXF_CPU2_WR_PTR */
284 iwl_trans_write_prph(fwrt
->trans
,
285 TXF_CPU2_READ_MODIFY_ADDR
,
288 /* Dummy-read to advance the read pointer to head */
289 iwl_trans_read_prph(fwrt
->trans
,
290 TXF_CPU2_READ_MODIFY_DATA
);
293 fifo_len
/= sizeof(u32
); /* Size in DWORDS */
294 for (j
= 0; j
< fifo_len
; j
++)
296 iwl_trans_read_prph(fwrt
->trans
,
297 TXF_CPU2_READ_MODIFY_DATA
);
298 *dump_data
= iwl_fw_error_next_data(*dump_data
);
302 iwl_trans_release_nic_access(fwrt
->trans
, &flags
);
305 #define IWL8260_ICCM_OFFSET 0x44000 /* Only for B-step */
306 #define IWL8260_ICCM_LEN 0xC000 /* Only for B-step */
308 struct iwl_prph_range
{
312 static const struct iwl_prph_range iwl_prph_dump_addr_comm
[] = {
313 { .start
= 0x00a00000, .end
= 0x00a00000 },
314 { .start
= 0x00a0000c, .end
= 0x00a00024 },
315 { .start
= 0x00a0002c, .end
= 0x00a0003c },
316 { .start
= 0x00a00410, .end
= 0x00a00418 },
317 { .start
= 0x00a00420, .end
= 0x00a00420 },
318 { .start
= 0x00a00428, .end
= 0x00a00428 },
319 { .start
= 0x00a00430, .end
= 0x00a0043c },
320 { .start
= 0x00a00444, .end
= 0x00a00444 },
321 { .start
= 0x00a004c0, .end
= 0x00a004cc },
322 { .start
= 0x00a004d8, .end
= 0x00a004d8 },
323 { .start
= 0x00a004e0, .end
= 0x00a004f0 },
324 { .start
= 0x00a00840, .end
= 0x00a00840 },
325 { .start
= 0x00a00850, .end
= 0x00a00858 },
326 { .start
= 0x00a01004, .end
= 0x00a01008 },
327 { .start
= 0x00a01010, .end
= 0x00a01010 },
328 { .start
= 0x00a01018, .end
= 0x00a01018 },
329 { .start
= 0x00a01024, .end
= 0x00a01024 },
330 { .start
= 0x00a0102c, .end
= 0x00a01034 },
331 { .start
= 0x00a0103c, .end
= 0x00a01040 },
332 { .start
= 0x00a01048, .end
= 0x00a01094 },
333 { .start
= 0x00a01c00, .end
= 0x00a01c20 },
334 { .start
= 0x00a01c58, .end
= 0x00a01c58 },
335 { .start
= 0x00a01c7c, .end
= 0x00a01c7c },
336 { .start
= 0x00a01c28, .end
= 0x00a01c54 },
337 { .start
= 0x00a01c5c, .end
= 0x00a01c5c },
338 { .start
= 0x00a01c60, .end
= 0x00a01cdc },
339 { .start
= 0x00a01ce0, .end
= 0x00a01d0c },
340 { .start
= 0x00a01d18, .end
= 0x00a01d20 },
341 { .start
= 0x00a01d2c, .end
= 0x00a01d30 },
342 { .start
= 0x00a01d40, .end
= 0x00a01d5c },
343 { .start
= 0x00a01d80, .end
= 0x00a01d80 },
344 { .start
= 0x00a01d98, .end
= 0x00a01d9c },
345 { .start
= 0x00a01da8, .end
= 0x00a01da8 },
346 { .start
= 0x00a01db8, .end
= 0x00a01df4 },
347 { .start
= 0x00a01dc0, .end
= 0x00a01dfc },
348 { .start
= 0x00a01e00, .end
= 0x00a01e2c },
349 { .start
= 0x00a01e40, .end
= 0x00a01e60 },
350 { .start
= 0x00a01e68, .end
= 0x00a01e6c },
351 { .start
= 0x00a01e74, .end
= 0x00a01e74 },
352 { .start
= 0x00a01e84, .end
= 0x00a01e90 },
353 { .start
= 0x00a01e9c, .end
= 0x00a01ec4 },
354 { .start
= 0x00a01ed0, .end
= 0x00a01ee0 },
355 { .start
= 0x00a01f00, .end
= 0x00a01f1c },
356 { .start
= 0x00a01f44, .end
= 0x00a01ffc },
357 { .start
= 0x00a02000, .end
= 0x00a02048 },
358 { .start
= 0x00a02068, .end
= 0x00a020f0 },
359 { .start
= 0x00a02100, .end
= 0x00a02118 },
360 { .start
= 0x00a02140, .end
= 0x00a0214c },
361 { .start
= 0x00a02168, .end
= 0x00a0218c },
362 { .start
= 0x00a021c0, .end
= 0x00a021c0 },
363 { .start
= 0x00a02400, .end
= 0x00a02410 },
364 { .start
= 0x00a02418, .end
= 0x00a02420 },
365 { .start
= 0x00a02428, .end
= 0x00a0242c },
366 { .start
= 0x00a02434, .end
= 0x00a02434 },
367 { .start
= 0x00a02440, .end
= 0x00a02460 },
368 { .start
= 0x00a02468, .end
= 0x00a024b0 },
369 { .start
= 0x00a024c8, .end
= 0x00a024cc },
370 { .start
= 0x00a02500, .end
= 0x00a02504 },
371 { .start
= 0x00a0250c, .end
= 0x00a02510 },
372 { .start
= 0x00a02540, .end
= 0x00a02554 },
373 { .start
= 0x00a02580, .end
= 0x00a025f4 },
374 { .start
= 0x00a02600, .end
= 0x00a0260c },
375 { .start
= 0x00a02648, .end
= 0x00a02650 },
376 { .start
= 0x00a02680, .end
= 0x00a02680 },
377 { .start
= 0x00a026c0, .end
= 0x00a026d0 },
378 { .start
= 0x00a02700, .end
= 0x00a0270c },
379 { .start
= 0x00a02804, .end
= 0x00a02804 },
380 { .start
= 0x00a02818, .end
= 0x00a0281c },
381 { .start
= 0x00a02c00, .end
= 0x00a02db4 },
382 { .start
= 0x00a02df4, .end
= 0x00a02fb0 },
383 { .start
= 0x00a03000, .end
= 0x00a03014 },
384 { .start
= 0x00a0301c, .end
= 0x00a0302c },
385 { .start
= 0x00a03034, .end
= 0x00a03038 },
386 { .start
= 0x00a03040, .end
= 0x00a03048 },
387 { .start
= 0x00a03060, .end
= 0x00a03068 },
388 { .start
= 0x00a03070, .end
= 0x00a03074 },
389 { .start
= 0x00a0307c, .end
= 0x00a0307c },
390 { .start
= 0x00a03080, .end
= 0x00a03084 },
391 { .start
= 0x00a0308c, .end
= 0x00a03090 },
392 { .start
= 0x00a03098, .end
= 0x00a03098 },
393 { .start
= 0x00a030a0, .end
= 0x00a030a0 },
394 { .start
= 0x00a030a8, .end
= 0x00a030b4 },
395 { .start
= 0x00a030bc, .end
= 0x00a030bc },
396 { .start
= 0x00a030c0, .end
= 0x00a0312c },
397 { .start
= 0x00a03c00, .end
= 0x00a03c5c },
398 { .start
= 0x00a04400, .end
= 0x00a04454 },
399 { .start
= 0x00a04460, .end
= 0x00a04474 },
400 { .start
= 0x00a044c0, .end
= 0x00a044ec },
401 { .start
= 0x00a04500, .end
= 0x00a04504 },
402 { .start
= 0x00a04510, .end
= 0x00a04538 },
403 { .start
= 0x00a04540, .end
= 0x00a04548 },
404 { .start
= 0x00a04560, .end
= 0x00a0457c },
405 { .start
= 0x00a04590, .end
= 0x00a04598 },
406 { .start
= 0x00a045c0, .end
= 0x00a045f4 },
409 static const struct iwl_prph_range iwl_prph_dump_addr_9000
[] = {
410 { .start
= 0x00a05c00, .end
= 0x00a05c18 },
411 { .start
= 0x00a05400, .end
= 0x00a056e8 },
412 { .start
= 0x00a08000, .end
= 0x00a098bc },
413 { .start
= 0x00a02400, .end
= 0x00a02758 },
414 { .start
= 0x00a04764, .end
= 0x00a0476c },
415 { .start
= 0x00a04770, .end
= 0x00a04774 },
416 { .start
= 0x00a04620, .end
= 0x00a04624 },
419 static const struct iwl_prph_range iwl_prph_dump_addr_22000
[] = {
420 { .start
= 0x00a00000, .end
= 0x00a00000 },
421 { .start
= 0x00a0000c, .end
= 0x00a00024 },
422 { .start
= 0x00a0002c, .end
= 0x00a00034 },
423 { .start
= 0x00a0003c, .end
= 0x00a0003c },
424 { .start
= 0x00a00410, .end
= 0x00a00418 },
425 { .start
= 0x00a00420, .end
= 0x00a00420 },
426 { .start
= 0x00a00428, .end
= 0x00a00428 },
427 { .start
= 0x00a00430, .end
= 0x00a0043c },
428 { .start
= 0x00a00444, .end
= 0x00a00444 },
429 { .start
= 0x00a00840, .end
= 0x00a00840 },
430 { .start
= 0x00a00850, .end
= 0x00a00858 },
431 { .start
= 0x00a01004, .end
= 0x00a01008 },
432 { .start
= 0x00a01010, .end
= 0x00a01010 },
433 { .start
= 0x00a01018, .end
= 0x00a01018 },
434 { .start
= 0x00a01024, .end
= 0x00a01024 },
435 { .start
= 0x00a0102c, .end
= 0x00a01034 },
436 { .start
= 0x00a0103c, .end
= 0x00a01040 },
437 { .start
= 0x00a01048, .end
= 0x00a01050 },
438 { .start
= 0x00a01058, .end
= 0x00a01058 },
439 { .start
= 0x00a01060, .end
= 0x00a01070 },
440 { .start
= 0x00a0108c, .end
= 0x00a0108c },
441 { .start
= 0x00a01c20, .end
= 0x00a01c28 },
442 { .start
= 0x00a01d10, .end
= 0x00a01d10 },
443 { .start
= 0x00a01e28, .end
= 0x00a01e2c },
444 { .start
= 0x00a01e60, .end
= 0x00a01e60 },
445 { .start
= 0x00a01e80, .end
= 0x00a01e80 },
446 { .start
= 0x00a01ea0, .end
= 0x00a01ea0 },
447 { .start
= 0x00a02000, .end
= 0x00a0201c },
448 { .start
= 0x00a02024, .end
= 0x00a02024 },
449 { .start
= 0x00a02040, .end
= 0x00a02048 },
450 { .start
= 0x00a020c0, .end
= 0x00a020e0 },
451 { .start
= 0x00a02400, .end
= 0x00a02404 },
452 { .start
= 0x00a0240c, .end
= 0x00a02414 },
453 { .start
= 0x00a0241c, .end
= 0x00a0243c },
454 { .start
= 0x00a02448, .end
= 0x00a024bc },
455 { .start
= 0x00a024c4, .end
= 0x00a024cc },
456 { .start
= 0x00a02508, .end
= 0x00a02508 },
457 { .start
= 0x00a02510, .end
= 0x00a02514 },
458 { .start
= 0x00a0251c, .end
= 0x00a0251c },
459 { .start
= 0x00a0252c, .end
= 0x00a0255c },
460 { .start
= 0x00a02564, .end
= 0x00a025a0 },
461 { .start
= 0x00a025a8, .end
= 0x00a025b4 },
462 { .start
= 0x00a025c0, .end
= 0x00a025c0 },
463 { .start
= 0x00a025e8, .end
= 0x00a025f4 },
464 { .start
= 0x00a02c08, .end
= 0x00a02c18 },
465 { .start
= 0x00a02c2c, .end
= 0x00a02c38 },
466 { .start
= 0x00a02c68, .end
= 0x00a02c78 },
467 { .start
= 0x00a03000, .end
= 0x00a03000 },
468 { .start
= 0x00a03010, .end
= 0x00a03014 },
469 { .start
= 0x00a0301c, .end
= 0x00a0302c },
470 { .start
= 0x00a03034, .end
= 0x00a03038 },
471 { .start
= 0x00a03040, .end
= 0x00a03044 },
472 { .start
= 0x00a03060, .end
= 0x00a03068 },
473 { .start
= 0x00a03070, .end
= 0x00a03070 },
474 { .start
= 0x00a0307c, .end
= 0x00a03084 },
475 { .start
= 0x00a0308c, .end
= 0x00a03090 },
476 { .start
= 0x00a03098, .end
= 0x00a03098 },
477 { .start
= 0x00a030a0, .end
= 0x00a030a0 },
478 { .start
= 0x00a030a8, .end
= 0x00a030b4 },
479 { .start
= 0x00a030bc, .end
= 0x00a030c0 },
480 { .start
= 0x00a030c8, .end
= 0x00a030f4 },
481 { .start
= 0x00a03100, .end
= 0x00a0312c },
482 { .start
= 0x00a03c00, .end
= 0x00a03c5c },
483 { .start
= 0x00a04400, .end
= 0x00a04454 },
484 { .start
= 0x00a04460, .end
= 0x00a04474 },
485 { .start
= 0x00a044c0, .end
= 0x00a044ec },
486 { .start
= 0x00a04500, .end
= 0x00a04504 },
487 { .start
= 0x00a04510, .end
= 0x00a04538 },
488 { .start
= 0x00a04540, .end
= 0x00a04548 },
489 { .start
= 0x00a04560, .end
= 0x00a04560 },
490 { .start
= 0x00a04570, .end
= 0x00a0457c },
491 { .start
= 0x00a04590, .end
= 0x00a04590 },
492 { .start
= 0x00a04598, .end
= 0x00a04598 },
493 { .start
= 0x00a045c0, .end
= 0x00a045f4 },
494 { .start
= 0x00a05c18, .end
= 0x00a05c1c },
495 { .start
= 0x00a0c000, .end
= 0x00a0c018 },
496 { .start
= 0x00a0c020, .end
= 0x00a0c028 },
497 { .start
= 0x00a0c038, .end
= 0x00a0c094 },
498 { .start
= 0x00a0c0c0, .end
= 0x00a0c104 },
499 { .start
= 0x00a0c10c, .end
= 0x00a0c118 },
500 { .start
= 0x00a0c150, .end
= 0x00a0c174 },
501 { .start
= 0x00a0c17c, .end
= 0x00a0c188 },
502 { .start
= 0x00a0c190, .end
= 0x00a0c198 },
503 { .start
= 0x00a0c1a0, .end
= 0x00a0c1a8 },
504 { .start
= 0x00a0c1b0, .end
= 0x00a0c1b8 },
507 static const struct iwl_prph_range iwl_prph_dump_addr_ax210
[] = {
508 { .start
= 0x00d03c00, .end
= 0x00d03c64 },
509 { .start
= 0x00d05c18, .end
= 0x00d05c1c },
510 { .start
= 0x00d0c000, .end
= 0x00d0c174 },
513 static void iwl_read_prph_block(struct iwl_trans
*trans
, u32 start
,
514 u32 len_bytes
, __le32
*data
)
518 for (i
= 0; i
< len_bytes
; i
+= 4)
519 *data
++ = cpu_to_le32(iwl_read_prph_no_grab(trans
, start
+ i
));
522 static void iwl_dump_prph(struct iwl_fw_runtime
*fwrt
,
523 const struct iwl_prph_range
*iwl_prph_dump_addr
,
524 u32 range_len
, void *ptr
)
526 struct iwl_fw_error_dump_prph
*prph
;
527 struct iwl_trans
*trans
= fwrt
->trans
;
528 struct iwl_fw_error_dump_data
**data
=
529 (struct iwl_fw_error_dump_data
**)ptr
;
536 IWL_DEBUG_INFO(trans
, "WRT PRPH dump\n");
538 if (!iwl_trans_grab_nic_access(trans
, &flags
))
541 for (i
= 0; i
< range_len
; i
++) {
542 /* The range includes both boundaries */
543 int num_bytes_in_chunk
= iwl_prph_dump_addr
[i
].end
-
544 iwl_prph_dump_addr
[i
].start
+ 4;
546 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH
);
547 (*data
)->len
= cpu_to_le32(sizeof(*prph
) +
549 prph
= (void *)(*data
)->data
;
550 prph
->prph_start
= cpu_to_le32(iwl_prph_dump_addr
[i
].start
);
552 iwl_read_prph_block(trans
, iwl_prph_dump_addr
[i
].start
,
553 /* our range is inclusive, hence + 4 */
554 iwl_prph_dump_addr
[i
].end
-
555 iwl_prph_dump_addr
[i
].start
+ 4,
558 *data
= iwl_fw_error_next_data(*data
);
561 iwl_trans_release_nic_access(trans
, &flags
);
565 * alloc_sgtable - allocates scallerlist table in the given size,
566 * fills it with pages and returns it
567 * @size: the size (in bytes) of the table
569 static struct scatterlist
*alloc_sgtable(int size
)
571 int alloc_size
, nents
, i
;
572 struct page
*new_page
;
573 struct scatterlist
*iter
;
574 struct scatterlist
*table
;
576 nents
= DIV_ROUND_UP(size
, PAGE_SIZE
);
577 table
= kcalloc(nents
, sizeof(*table
), GFP_KERNEL
);
580 sg_init_table(table
, nents
);
582 for_each_sg(table
, iter
, sg_nents(table
), i
) {
583 new_page
= alloc_page(GFP_KERNEL
);
585 /* release all previous allocated pages in the table */
587 for_each_sg(table
, iter
, sg_nents(table
), i
) {
588 new_page
= sg_page(iter
);
590 __free_page(new_page
);
595 alloc_size
= min_t(int, size
, PAGE_SIZE
);
597 sg_set_page(iter
, new_page
, alloc_size
, 0);
602 static void iwl_fw_get_prph_len(struct iwl_fw_runtime
*fwrt
,
603 const struct iwl_prph_range
*iwl_prph_dump_addr
,
604 u32 range_len
, void *ptr
)
606 u32
*prph_len
= (u32
*)ptr
;
607 int i
, num_bytes_in_chunk
;
612 for (i
= 0; i
< range_len
; i
++) {
613 /* The range includes both boundaries */
615 iwl_prph_dump_addr
[i
].end
-
616 iwl_prph_dump_addr
[i
].start
+ 4;
618 *prph_len
+= sizeof(struct iwl_fw_error_dump_data
) +
619 sizeof(struct iwl_fw_error_dump_prph
) +
624 static void iwl_fw_prph_handler(struct iwl_fw_runtime
*fwrt
, void *ptr
,
625 void (*handler
)(struct iwl_fw_runtime
*,
626 const struct iwl_prph_range
*,
631 if (fwrt
->trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
632 range_len
= ARRAY_SIZE(iwl_prph_dump_addr_ax210
);
633 handler(fwrt
, iwl_prph_dump_addr_ax210
, range_len
, ptr
);
634 } else if (fwrt
->trans
->trans_cfg
->device_family
>=
635 IWL_DEVICE_FAMILY_22000
) {
636 range_len
= ARRAY_SIZE(iwl_prph_dump_addr_22000
);
637 handler(fwrt
, iwl_prph_dump_addr_22000
, range_len
, ptr
);
639 range_len
= ARRAY_SIZE(iwl_prph_dump_addr_comm
);
640 handler(fwrt
, iwl_prph_dump_addr_comm
, range_len
, ptr
);
642 if (fwrt
->trans
->trans_cfg
->mq_rx_supported
) {
643 range_len
= ARRAY_SIZE(iwl_prph_dump_addr_9000
);
644 handler(fwrt
, iwl_prph_dump_addr_9000
, range_len
, ptr
);
649 static void iwl_fw_dump_mem(struct iwl_fw_runtime
*fwrt
,
650 struct iwl_fw_error_dump_data
**dump_data
,
651 u32 len
, u32 ofs
, u32 type
)
653 struct iwl_fw_error_dump_mem
*dump_mem
;
658 (*dump_data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_MEM
);
659 (*dump_data
)->len
= cpu_to_le32(len
+ sizeof(*dump_mem
));
660 dump_mem
= (void *)(*dump_data
)->data
;
661 dump_mem
->type
= cpu_to_le32(type
);
662 dump_mem
->offset
= cpu_to_le32(ofs
);
663 iwl_trans_read_mem_bytes(fwrt
->trans
, ofs
, dump_mem
->data
, len
);
664 *dump_data
= iwl_fw_error_next_data(*dump_data
);
666 IWL_DEBUG_INFO(fwrt
, "WRT memory dump. Type=%u\n", dump_mem
->type
);
669 #define ADD_LEN(len, item_len, const_len) \
670 do {size_t item = item_len; len += (!!item) * const_len + item; } \
673 static int iwl_fw_rxf_len(struct iwl_fw_runtime
*fwrt
,
674 struct iwl_fwrt_shared_mem_cfg
*mem_cfg
)
676 size_t hdr_len
= sizeof(struct iwl_fw_error_dump_data
) +
677 sizeof(struct iwl_fw_error_dump_fifo
);
681 if (!iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_RXF
))
684 /* Count RXF2 size */
685 ADD_LEN(fifo_len
, mem_cfg
->rxfifo2_size
, hdr_len
);
687 /* Count RXF1 sizes */
688 if (WARN_ON(mem_cfg
->num_lmacs
> MAX_NUM_LMAC
))
689 mem_cfg
->num_lmacs
= MAX_NUM_LMAC
;
691 for (i
= 0; i
< mem_cfg
->num_lmacs
; i
++)
692 ADD_LEN(fifo_len
, mem_cfg
->lmac
[i
].rxfifo1_size
, hdr_len
);
697 static int iwl_fw_txf_len(struct iwl_fw_runtime
*fwrt
,
698 struct iwl_fwrt_shared_mem_cfg
*mem_cfg
)
700 size_t hdr_len
= sizeof(struct iwl_fw_error_dump_data
) +
701 sizeof(struct iwl_fw_error_dump_fifo
);
705 if (!iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_TXF
))
706 goto dump_internal_txf
;
708 /* Count TXF sizes */
709 if (WARN_ON(mem_cfg
->num_lmacs
> MAX_NUM_LMAC
))
710 mem_cfg
->num_lmacs
= MAX_NUM_LMAC
;
712 for (i
= 0; i
< mem_cfg
->num_lmacs
; i
++) {
715 for (j
= 0; j
< mem_cfg
->num_txfifo_entries
; j
++)
716 ADD_LEN(fifo_len
, mem_cfg
->lmac
[i
].txfifo_size
[j
],
721 if (!(iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_INTERNAL_TXF
) &&
722 fw_has_capa(&fwrt
->fw
->ucode_capa
,
723 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
)))
726 for (i
= 0; i
< ARRAY_SIZE(mem_cfg
->internal_txfifo_size
); i
++)
727 ADD_LEN(fifo_len
, mem_cfg
->internal_txfifo_size
[i
], hdr_len
);
733 static void iwl_dump_paging(struct iwl_fw_runtime
*fwrt
,
734 struct iwl_fw_error_dump_data
**data
)
738 IWL_DEBUG_INFO(fwrt
, "WRT paging dump\n");
739 for (i
= 1; i
< fwrt
->num_of_paging_blk
+ 1; i
++) {
740 struct iwl_fw_error_dump_paging
*paging
;
742 fwrt
->fw_paging_db
[i
].fw_paging_block
;
743 dma_addr_t addr
= fwrt
->fw_paging_db
[i
].fw_paging_phys
;
745 (*data
)->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING
);
746 (*data
)->len
= cpu_to_le32(sizeof(*paging
) +
748 paging
= (void *)(*data
)->data
;
749 paging
->index
= cpu_to_le32(i
);
750 dma_sync_single_for_cpu(fwrt
->trans
->dev
, addr
,
753 memcpy(paging
->data
, page_address(pages
),
755 dma_sync_single_for_device(fwrt
->trans
->dev
, addr
,
758 (*data
) = iwl_fw_error_next_data(*data
);
762 static struct iwl_fw_error_dump_file
*
763 iwl_fw_error_dump_file(struct iwl_fw_runtime
*fwrt
,
764 struct iwl_fw_dump_ptrs
*fw_error_dump
,
765 struct iwl_fwrt_dump_data
*data
)
767 struct iwl_fw_error_dump_file
*dump_file
;
768 struct iwl_fw_error_dump_data
*dump_data
;
769 struct iwl_fw_error_dump_info
*dump_info
;
770 struct iwl_fw_error_dump_smem_cfg
*dump_smem_cfg
;
771 struct iwl_fw_error_dump_trigger_desc
*dump_trig
;
772 u32 sram_len
, sram_ofs
;
773 const struct iwl_fw_dbg_mem_seg_tlv
*fw_mem
= fwrt
->fw
->dbg
.mem_tlv
;
774 struct iwl_fwrt_shared_mem_cfg
*mem_cfg
= &fwrt
->smem_cfg
;
775 u32 file_len
, fifo_len
= 0, prph_len
= 0, radio_len
= 0;
776 u32 smem_len
= fwrt
->fw
->dbg
.n_mem_tlv
? 0 : fwrt
->trans
->cfg
->smem_len
;
777 u32 sram2_len
= fwrt
->fw
->dbg
.n_mem_tlv
?
778 0 : fwrt
->trans
->cfg
->dccm2_len
;
781 /* SRAM - include stack CCM if driver knows the values for it */
782 if (!fwrt
->trans
->cfg
->dccm_offset
|| !fwrt
->trans
->cfg
->dccm_len
) {
783 const struct fw_img
*img
;
785 if (fwrt
->cur_fw_img
>= IWL_UCODE_TYPE_MAX
)
787 img
= &fwrt
->fw
->img
[fwrt
->cur_fw_img
];
788 sram_ofs
= img
->sec
[IWL_UCODE_SECTION_DATA
].offset
;
789 sram_len
= img
->sec
[IWL_UCODE_SECTION_DATA
].len
;
791 sram_ofs
= fwrt
->trans
->cfg
->dccm_offset
;
792 sram_len
= fwrt
->trans
->cfg
->dccm_len
;
795 /* reading RXF/TXF sizes */
796 if (test_bit(STATUS_FW_ERROR
, &fwrt
->trans
->status
)) {
797 fifo_len
= iwl_fw_rxf_len(fwrt
, mem_cfg
);
798 fifo_len
+= iwl_fw_txf_len(fwrt
, mem_cfg
);
800 /* Make room for PRPH registers */
801 if (iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_PRPH
))
802 iwl_fw_prph_handler(fwrt
, &prph_len
,
803 iwl_fw_get_prph_len
);
805 if (fwrt
->trans
->trans_cfg
->device_family
==
806 IWL_DEVICE_FAMILY_7000
&&
807 iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_RADIO_REG
))
808 radio_len
= sizeof(*dump_data
) + RADIO_REG_MAX_READ
;
811 file_len
= sizeof(*dump_file
) + fifo_len
+ prph_len
+ radio_len
;
813 if (iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_DEV_FW_INFO
))
814 file_len
+= sizeof(*dump_data
) + sizeof(*dump_info
);
815 if (iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_MEM_CFG
))
816 file_len
+= sizeof(*dump_data
) + sizeof(*dump_smem_cfg
);
818 if (iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_MEM
)) {
819 size_t hdr_len
= sizeof(*dump_data
) +
820 sizeof(struct iwl_fw_error_dump_mem
);
822 /* Dump SRAM only if no mem_tlvs */
823 if (!fwrt
->fw
->dbg
.n_mem_tlv
)
824 ADD_LEN(file_len
, sram_len
, hdr_len
);
826 /* Make room for all mem types that exist */
827 ADD_LEN(file_len
, smem_len
, hdr_len
);
828 ADD_LEN(file_len
, sram2_len
, hdr_len
);
830 for (i
= 0; i
< fwrt
->fw
->dbg
.n_mem_tlv
; i
++)
831 ADD_LEN(file_len
, le32_to_cpu(fw_mem
[i
].len
), hdr_len
);
834 /* Make room for fw's virtual image pages, if it exists */
835 if (iwl_fw_dbg_is_paging_enabled(fwrt
))
836 file_len
+= fwrt
->num_of_paging_blk
*
837 (sizeof(*dump_data
) +
838 sizeof(struct iwl_fw_error_dump_paging
) +
841 if (iwl_fw_dbg_is_d3_debug_enabled(fwrt
) && fwrt
->dump
.d3_debug_data
) {
842 file_len
+= sizeof(*dump_data
) +
843 fwrt
->trans
->cfg
->d3_debug_data_length
* 2;
846 /* If we only want a monitor dump, reset the file length */
847 if (data
->monitor_only
) {
848 file_len
= sizeof(*dump_file
) + sizeof(*dump_data
) * 2 +
849 sizeof(*dump_info
) + sizeof(*dump_smem_cfg
);
852 if (iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_ERROR_INFO
) &&
854 file_len
+= sizeof(*dump_data
) + sizeof(*dump_trig
) +
857 dump_file
= vzalloc(file_len
);
861 fw_error_dump
->fwrt_ptr
= dump_file
;
863 dump_file
->barker
= cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER
);
864 dump_data
= (void *)dump_file
->data
;
866 if (iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_DEV_FW_INFO
)) {
867 dump_data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO
);
868 dump_data
->len
= cpu_to_le32(sizeof(*dump_info
));
869 dump_info
= (void *)dump_data
->data
;
871 cpu_to_le32(CSR_HW_REV_TYPE(fwrt
->trans
->hw_rev
));
873 cpu_to_le32(CSR_HW_REV_STEP(fwrt
->trans
->hw_rev
));
874 memcpy(dump_info
->fw_human_readable
, fwrt
->fw
->human_readable
,
875 sizeof(dump_info
->fw_human_readable
));
876 strncpy(dump_info
->dev_human_readable
, fwrt
->trans
->name
,
877 sizeof(dump_info
->dev_human_readable
) - 1);
878 strncpy(dump_info
->bus_human_readable
, fwrt
->dev
->bus
->name
,
879 sizeof(dump_info
->bus_human_readable
) - 1);
880 dump_info
->num_of_lmacs
= fwrt
->smem_cfg
.num_lmacs
;
881 dump_info
->lmac_err_id
[0] =
882 cpu_to_le32(fwrt
->dump
.lmac_err_id
[0]);
883 if (fwrt
->smem_cfg
.num_lmacs
> 1)
884 dump_info
->lmac_err_id
[1] =
885 cpu_to_le32(fwrt
->dump
.lmac_err_id
[1]);
886 dump_info
->umac_err_id
= cpu_to_le32(fwrt
->dump
.umac_err_id
);
888 dump_data
= iwl_fw_error_next_data(dump_data
);
891 if (iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_MEM_CFG
)) {
892 /* Dump shared memory configuration */
893 dump_data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG
);
894 dump_data
->len
= cpu_to_le32(sizeof(*dump_smem_cfg
));
895 dump_smem_cfg
= (void *)dump_data
->data
;
896 dump_smem_cfg
->num_lmacs
= cpu_to_le32(mem_cfg
->num_lmacs
);
897 dump_smem_cfg
->num_txfifo_entries
=
898 cpu_to_le32(mem_cfg
->num_txfifo_entries
);
899 for (i
= 0; i
< MAX_NUM_LMAC
; i
++) {
901 u32
*txf_size
= mem_cfg
->lmac
[i
].txfifo_size
;
903 for (j
= 0; j
< TX_FIFO_MAX_NUM
; j
++)
904 dump_smem_cfg
->lmac
[i
].txfifo_size
[j
] =
905 cpu_to_le32(txf_size
[j
]);
906 dump_smem_cfg
->lmac
[i
].rxfifo1_size
=
907 cpu_to_le32(mem_cfg
->lmac
[i
].rxfifo1_size
);
909 dump_smem_cfg
->rxfifo2_size
=
910 cpu_to_le32(mem_cfg
->rxfifo2_size
);
911 dump_smem_cfg
->internal_txfifo_addr
=
912 cpu_to_le32(mem_cfg
->internal_txfifo_addr
);
913 for (i
= 0; i
< TX_FIFO_INTERNAL_MAX_NUM
; i
++) {
914 dump_smem_cfg
->internal_txfifo_size
[i
] =
915 cpu_to_le32(mem_cfg
->internal_txfifo_size
[i
]);
918 dump_data
= iwl_fw_error_next_data(dump_data
);
921 /* We only dump the FIFOs if the FW is in error state */
923 iwl_fw_dump_rxf(fwrt
, &dump_data
);
924 iwl_fw_dump_txf(fwrt
, &dump_data
);
928 iwl_read_radio_regs(fwrt
, &dump_data
);
930 if (iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_ERROR_INFO
) &&
932 dump_data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO
);
933 dump_data
->len
= cpu_to_le32(sizeof(*dump_trig
) +
935 dump_trig
= (void *)dump_data
->data
;
936 memcpy(dump_trig
, &data
->desc
->trig_desc
,
937 sizeof(*dump_trig
) + data
->desc
->len
);
939 dump_data
= iwl_fw_error_next_data(dump_data
);
942 /* In case we only want monitor dump, skip to dump trasport data */
943 if (data
->monitor_only
)
946 if (iwl_fw_dbg_type_on(fwrt
, IWL_FW_ERROR_DUMP_MEM
)) {
947 const struct iwl_fw_dbg_mem_seg_tlv
*fw_dbg_mem
=
948 fwrt
->fw
->dbg
.mem_tlv
;
950 if (!fwrt
->fw
->dbg
.n_mem_tlv
)
951 iwl_fw_dump_mem(fwrt
, &dump_data
, sram_len
, sram_ofs
,
952 IWL_FW_ERROR_DUMP_MEM_SRAM
);
954 for (i
= 0; i
< fwrt
->fw
->dbg
.n_mem_tlv
; i
++) {
955 u32 len
= le32_to_cpu(fw_dbg_mem
[i
].len
);
956 u32 ofs
= le32_to_cpu(fw_dbg_mem
[i
].ofs
);
958 iwl_fw_dump_mem(fwrt
, &dump_data
, len
, ofs
,
959 le32_to_cpu(fw_dbg_mem
[i
].data_type
));
962 iwl_fw_dump_mem(fwrt
, &dump_data
, smem_len
,
963 fwrt
->trans
->cfg
->smem_offset
,
964 IWL_FW_ERROR_DUMP_MEM_SMEM
);
966 iwl_fw_dump_mem(fwrt
, &dump_data
, sram2_len
,
967 fwrt
->trans
->cfg
->dccm2_offset
,
968 IWL_FW_ERROR_DUMP_MEM_SRAM
);
971 if (iwl_fw_dbg_is_d3_debug_enabled(fwrt
) && fwrt
->dump
.d3_debug_data
) {
972 u32 addr
= fwrt
->trans
->cfg
->d3_debug_data_base_addr
;
973 size_t data_size
= fwrt
->trans
->cfg
->d3_debug_data_length
;
975 dump_data
->type
= cpu_to_le32(IWL_FW_ERROR_DUMP_D3_DEBUG_DATA
);
976 dump_data
->len
= cpu_to_le32(data_size
* 2);
978 memcpy(dump_data
->data
, fwrt
->dump
.d3_debug_data
, data_size
);
980 kfree(fwrt
->dump
.d3_debug_data
);
981 fwrt
->dump
.d3_debug_data
= NULL
;
983 iwl_trans_read_mem_bytes(fwrt
->trans
, addr
,
984 dump_data
->data
+ data_size
,
987 dump_data
= iwl_fw_error_next_data(dump_data
);
990 /* Dump fw's virtual image */
991 if (iwl_fw_dbg_is_paging_enabled(fwrt
))
992 iwl_dump_paging(fwrt
, &dump_data
);
995 iwl_fw_prph_handler(fwrt
, &dump_data
, iwl_dump_prph
);
998 dump_file
->file_len
= cpu_to_le32(file_len
);
1003 * struct iwl_dump_ini_region_data - region data
1004 * @reg_tlv: region TLV
1005 * @dump_data: dump data
1007 struct iwl_dump_ini_region_data
{
1008 struct iwl_ucode_tlv
*reg_tlv
;
1009 struct iwl_fwrt_dump_data
*dump_data
;
1013 iwl_dump_ini_prph_mac_iter(struct iwl_fw_runtime
*fwrt
,
1014 struct iwl_dump_ini_region_data
*reg_data
,
1015 void *range_ptr
, int idx
)
1017 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1018 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1019 __le32
*val
= range
->data
;
1021 u32 addr
= le32_to_cpu(reg
->addrs
[idx
]) +
1022 le32_to_cpu(reg
->dev_addr
.offset
);
1025 range
->internal_base_addr
= cpu_to_le32(addr
);
1026 range
->range_data_size
= reg
->dev_addr
.size
;
1027 for (i
= 0; i
< le32_to_cpu(reg
->dev_addr
.size
); i
+= 4) {
1028 prph_val
= iwl_read_prph(fwrt
->trans
, addr
+ i
);
1029 if (prph_val
== 0x5a5a5a5a)
1031 *val
++ = cpu_to_le32(prph_val
);
1034 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1038 iwl_dump_ini_prph_phy_iter(struct iwl_fw_runtime
*fwrt
,
1039 struct iwl_dump_ini_region_data
*reg_data
,
1040 void *range_ptr
, int idx
)
1042 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1043 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1044 __le32
*val
= range
->data
;
1045 u32 indirect_wr_addr
= WMAL_INDRCT_RD_CMD1
;
1046 u32 indirect_rd_addr
= WMAL_MRSPF_1
;
1048 u32 addr
= le32_to_cpu(reg
->addrs
[idx
]);
1051 unsigned long flags
;
1054 range
->internal_base_addr
= cpu_to_le32(addr
);
1055 range
->range_data_size
= reg
->dev_addr
.size
;
1057 if (fwrt
->trans
->trans_cfg
->device_family
< IWL_DEVICE_FAMILY_AX210
)
1058 indirect_wr_addr
= WMAL_INDRCT_CMD1
;
1060 indirect_wr_addr
+= le32_to_cpu(reg
->dev_addr
.offset
);
1061 indirect_rd_addr
+= le32_to_cpu(reg
->dev_addr
.offset
);
1063 if (!iwl_trans_grab_nic_access(fwrt
->trans
, &flags
))
1066 dphy_addr
= (reg
->dev_addr
.offset
) ? WFPM_LMAC2_PS_CTL_RW
:
1067 WFPM_LMAC1_PS_CTL_RW
;
1068 dphy_state
= iwl_read_umac_prph_no_grab(fwrt
->trans
, dphy_addr
);
1070 for (i
= 0; i
< le32_to_cpu(reg
->dev_addr
.size
); i
+= 4) {
1071 if (dphy_state
== HBUS_TIMEOUT
||
1072 (dphy_state
& WFPM_PS_CTL_RW_PHYRF_PD_FSM_CURSTATE_MSK
) !=
1073 WFPM_PHYRF_STATE_ON
) {
1074 *val
++ = cpu_to_le32(WFPM_DPHY_OFF
);
1078 iwl_write_prph_no_grab(fwrt
->trans
, indirect_wr_addr
,
1079 WMAL_INDRCT_CMD(addr
+ i
));
1080 prph_val
= iwl_read_prph_no_grab(fwrt
->trans
,
1082 *val
++ = cpu_to_le32(prph_val
);
1085 iwl_trans_release_nic_access(fwrt
->trans
, &flags
);
1086 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1089 static int iwl_dump_ini_csr_iter(struct iwl_fw_runtime
*fwrt
,
1090 struct iwl_dump_ini_region_data
*reg_data
,
1091 void *range_ptr
, int idx
)
1093 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1094 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1095 __le32
*val
= range
->data
;
1096 u32 addr
= le32_to_cpu(reg
->addrs
[idx
]) +
1097 le32_to_cpu(reg
->dev_addr
.offset
);
1100 range
->internal_base_addr
= cpu_to_le32(addr
);
1101 range
->range_data_size
= reg
->dev_addr
.size
;
1102 for (i
= 0; i
< le32_to_cpu(reg
->dev_addr
.size
); i
+= 4)
1103 *val
++ = cpu_to_le32(iwl_trans_read32(fwrt
->trans
, addr
+ i
));
1105 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1108 static int iwl_dump_ini_config_iter(struct iwl_fw_runtime
*fwrt
,
1109 struct iwl_dump_ini_region_data
*reg_data
,
1110 void *range_ptr
, int idx
)
1112 struct iwl_trans
*trans
= fwrt
->trans
;
1113 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1114 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1115 __le32
*val
= range
->data
;
1116 u32 addr
= le32_to_cpu(reg
->addrs
[idx
]) +
1117 le32_to_cpu(reg
->dev_addr
.offset
);
1120 /* we shouldn't get here if the trans doesn't have read_config32 */
1121 if (WARN_ON_ONCE(!trans
->ops
->read_config32
))
1124 range
->internal_base_addr
= cpu_to_le32(addr
);
1125 range
->range_data_size
= reg
->dev_addr
.size
;
1126 for (i
= 0; i
< le32_to_cpu(reg
->dev_addr
.size
); i
+= 4) {
1130 ret
= trans
->ops
->read_config32(trans
, addr
+ i
, &tmp
);
1134 *val
++ = cpu_to_le32(tmp
);
1137 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1140 static int iwl_dump_ini_dev_mem_iter(struct iwl_fw_runtime
*fwrt
,
1141 struct iwl_dump_ini_region_data
*reg_data
,
1142 void *range_ptr
, int idx
)
1144 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1145 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1146 u32 addr
= le32_to_cpu(reg
->addrs
[idx
]) +
1147 le32_to_cpu(reg
->dev_addr
.offset
);
1149 range
->internal_base_addr
= cpu_to_le32(addr
);
1150 range
->range_data_size
= reg
->dev_addr
.size
;
1151 iwl_trans_read_mem_bytes(fwrt
->trans
, addr
, range
->data
,
1152 le32_to_cpu(reg
->dev_addr
.size
));
1154 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1157 static int _iwl_dump_ini_paging_iter(struct iwl_fw_runtime
*fwrt
,
1158 void *range_ptr
, int idx
)
1160 /* increase idx by 1 since the pages are from 1 to
1161 * fwrt->num_of_paging_blk + 1
1163 struct page
*page
= fwrt
->fw_paging_db
[++idx
].fw_paging_block
;
1164 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1165 dma_addr_t addr
= fwrt
->fw_paging_db
[idx
].fw_paging_phys
;
1166 u32 page_size
= fwrt
->fw_paging_db
[idx
].fw_paging_size
;
1168 range
->page_num
= cpu_to_le32(idx
);
1169 range
->range_data_size
= cpu_to_le32(page_size
);
1170 dma_sync_single_for_cpu(fwrt
->trans
->dev
, addr
, page_size
,
1172 memcpy(range
->data
, page_address(page
), page_size
);
1173 dma_sync_single_for_device(fwrt
->trans
->dev
, addr
, page_size
,
1176 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1179 static int iwl_dump_ini_paging_iter(struct iwl_fw_runtime
*fwrt
,
1180 struct iwl_dump_ini_region_data
*reg_data
,
1181 void *range_ptr
, int idx
)
1183 struct iwl_fw_ini_error_dump_range
*range
;
1186 if (!fwrt
->trans
->trans_cfg
->gen2
)
1187 return _iwl_dump_ini_paging_iter(fwrt
, range_ptr
, idx
);
1190 page_size
= fwrt
->trans
->init_dram
.paging
[idx
].size
;
1192 range
->page_num
= cpu_to_le32(idx
);
1193 range
->range_data_size
= cpu_to_le32(page_size
);
1194 memcpy(range
->data
, fwrt
->trans
->init_dram
.paging
[idx
].block
,
1197 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1201 iwl_dump_ini_mon_dram_iter(struct iwl_fw_runtime
*fwrt
,
1202 struct iwl_dump_ini_region_data
*reg_data
,
1203 void *range_ptr
, int idx
)
1205 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1206 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1207 struct iwl_dram_data
*frag
;
1208 u32 alloc_id
= le32_to_cpu(reg
->dram_alloc_id
);
1210 frag
= &fwrt
->trans
->dbg
.fw_mon_ini
[alloc_id
].frags
[idx
];
1212 range
->dram_base_addr
= cpu_to_le64(frag
->physical
);
1213 range
->range_data_size
= cpu_to_le32(frag
->size
);
1215 memcpy(range
->data
, frag
->block
, frag
->size
);
1217 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1220 static int iwl_dump_ini_mon_smem_iter(struct iwl_fw_runtime
*fwrt
,
1221 struct iwl_dump_ini_region_data
*reg_data
,
1222 void *range_ptr
, int idx
)
1224 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1225 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1226 u32 addr
= le32_to_cpu(reg
->internal_buffer
.base_addr
);
1228 range
->internal_base_addr
= cpu_to_le32(addr
);
1229 range
->range_data_size
= reg
->internal_buffer
.size
;
1230 iwl_trans_read_mem_bytes(fwrt
->trans
, addr
, range
->data
,
1231 le32_to_cpu(reg
->internal_buffer
.size
));
1233 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1236 static bool iwl_ini_txf_iter(struct iwl_fw_runtime
*fwrt
,
1237 struct iwl_dump_ini_region_data
*reg_data
, int idx
)
1239 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1240 struct iwl_txf_iter_data
*iter
= &fwrt
->dump
.txf_iter_data
;
1241 struct iwl_fwrt_shared_mem_cfg
*cfg
= &fwrt
->smem_cfg
;
1242 int txf_num
= cfg
->num_txfifo_entries
;
1243 int int_txf_num
= ARRAY_SIZE(cfg
->internal_txfifo_size
);
1244 u32 lmac_bitmap
= le32_to_cpu(reg
->fifos
.fid
[0]);
1247 if (le32_to_cpu(reg
->fifos
.offset
) && cfg
->num_lmacs
== 1) {
1248 IWL_ERR(fwrt
, "WRT: Invalid lmac offset 0x%x\n",
1249 le32_to_cpu(reg
->fifos
.offset
));
1253 iter
->internal_txf
= 0;
1254 iter
->fifo_size
= 0;
1256 if (le32_to_cpu(reg
->fifos
.offset
))
1262 if (!iter
->internal_txf
) {
1263 for (iter
->fifo
++; iter
->fifo
< txf_num
; iter
->fifo
++) {
1265 cfg
->lmac
[iter
->lmac
].txfifo_size
[iter
->fifo
];
1266 if (iter
->fifo_size
&& (lmac_bitmap
& BIT(iter
->fifo
)))
1272 iter
->internal_txf
= 1;
1274 if (!fw_has_capa(&fwrt
->fw
->ucode_capa
,
1275 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
))
1278 for (iter
->fifo
++; iter
->fifo
< int_txf_num
+ txf_num
; iter
->fifo
++) {
1280 cfg
->internal_txfifo_size
[iter
->fifo
- txf_num
];
1281 if (iter
->fifo_size
&& (lmac_bitmap
& BIT(iter
->fifo
)))
1288 static int iwl_dump_ini_txf_iter(struct iwl_fw_runtime
*fwrt
,
1289 struct iwl_dump_ini_region_data
*reg_data
,
1290 void *range_ptr
, int idx
)
1292 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1293 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1294 struct iwl_txf_iter_data
*iter
= &fwrt
->dump
.txf_iter_data
;
1295 struct iwl_fw_ini_error_dump_register
*reg_dump
= (void *)range
->data
;
1296 u32 offs
= le32_to_cpu(reg
->fifos
.offset
), addr
;
1297 u32 registers_num
= iwl_tlv_array_len(reg_data
->reg_tlv
, reg
, addrs
);
1298 u32 registers_size
= registers_num
* sizeof(*reg_dump
);
1300 unsigned long flags
;
1303 if (!iwl_ini_txf_iter(fwrt
, reg_data
, idx
))
1306 if (!iwl_trans_grab_nic_access(fwrt
->trans
, &flags
))
1309 range
->fifo_hdr
.fifo_num
= cpu_to_le32(iter
->fifo
);
1310 range
->fifo_hdr
.num_of_registers
= cpu_to_le32(registers_num
);
1311 range
->range_data_size
= cpu_to_le32(iter
->fifo_size
+ registers_size
);
1313 iwl_write_prph_no_grab(fwrt
->trans
, TXF_LARC_NUM
+ offs
, iter
->fifo
);
1316 * read txf registers. for each register, write to the dump the
1317 * register address and its value
1319 for (i
= 0; i
< registers_num
; i
++) {
1320 addr
= le32_to_cpu(reg
->addrs
[i
]) + offs
;
1322 reg_dump
->addr
= cpu_to_le32(addr
);
1323 reg_dump
->data
= cpu_to_le32(iwl_read_prph_no_grab(fwrt
->trans
,
1329 if (reg
->fifos
.hdr_only
) {
1330 range
->range_data_size
= cpu_to_le32(registers_size
);
1334 /* Set the TXF_READ_MODIFY_ADDR to TXF_WR_PTR */
1335 iwl_write_prph_no_grab(fwrt
->trans
, TXF_READ_MODIFY_ADDR
+ offs
,
1338 /* Dummy-read to advance the read pointer to the head */
1339 iwl_read_prph_no_grab(fwrt
->trans
, TXF_READ_MODIFY_DATA
+ offs
);
1342 addr
= TXF_READ_MODIFY_DATA
+ offs
;
1343 data
= (void *)reg_dump
;
1344 for (i
= 0; i
< iter
->fifo_size
; i
+= sizeof(*data
))
1345 *data
++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt
->trans
, addr
));
1348 iwl_trans_release_nic_access(fwrt
->trans
, &flags
);
1350 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1353 struct iwl_ini_rxf_data
{
1359 static void iwl_ini_get_rxf_data(struct iwl_fw_runtime
*fwrt
,
1360 struct iwl_dump_ini_region_data
*reg_data
,
1361 struct iwl_ini_rxf_data
*data
)
1363 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1364 u32 fid1
= le32_to_cpu(reg
->fifos
.fid
[0]);
1365 u32 fid2
= le32_to_cpu(reg
->fifos
.fid
[1]);
1371 /* make sure only one bit is set in only one fid */
1372 if (WARN_ONCE(hweight_long(fid1
) + hweight_long(fid2
) != 1,
1373 "fid1=%x, fid2=%x\n", fid1
, fid2
))
1376 memset(data
, 0, sizeof(*data
));
1379 fifo_idx
= ffs(fid1
) - 1;
1380 if (WARN_ONCE(fifo_idx
>= MAX_NUM_LMAC
, "fifo_idx=%d\n",
1384 data
->size
= fwrt
->smem_cfg
.lmac
[fifo_idx
].rxfifo1_size
;
1385 data
->fifo_num
= fifo_idx
;
1389 fifo_idx
= ffs(fid2
) - 1;
1390 if (iwl_fw_lookup_notif_ver(fwrt
->fw
, SYSTEM_GROUP
,
1391 SHARED_MEM_CFG_CMD
, 0) <= 3)
1396 if (WARN_ONCE(fifo_idx
> max_idx
,
1397 "invalid umac fifo idx %d", fifo_idx
))
1400 /* use bit 31 to distinguish between umac and lmac rxf while
1403 data
->fifo_num
= fifo_idx
| IWL_RXF_UMAC_BIT
;
1407 data
->size
= fwrt
->smem_cfg
.rxfifo2_size
;
1408 data
->offset
= iwl_umac_prph(fwrt
->trans
,
1409 RXF_DIFF_FROM_PREV
);
1412 data
->size
= fwrt
->smem_cfg
.rxfifo2_control_size
;
1413 data
->offset
= iwl_umac_prph(fwrt
->trans
,
1414 RXF2C_DIFF_FROM_PREV
);
1420 static int iwl_dump_ini_rxf_iter(struct iwl_fw_runtime
*fwrt
,
1421 struct iwl_dump_ini_region_data
*reg_data
,
1422 void *range_ptr
, int idx
)
1424 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1425 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1426 struct iwl_ini_rxf_data rxf_data
;
1427 struct iwl_fw_ini_error_dump_register
*reg_dump
= (void *)range
->data
;
1428 u32 offs
= le32_to_cpu(reg
->fifos
.offset
), addr
;
1429 u32 registers_num
= iwl_tlv_array_len(reg_data
->reg_tlv
, reg
, addrs
);
1430 u32 registers_size
= registers_num
* sizeof(*reg_dump
);
1432 unsigned long flags
;
1435 iwl_ini_get_rxf_data(fwrt
, reg_data
, &rxf_data
);
1439 if (!iwl_trans_grab_nic_access(fwrt
->trans
, &flags
))
1442 range
->fifo_hdr
.fifo_num
= cpu_to_le32(rxf_data
.fifo_num
);
1443 range
->fifo_hdr
.num_of_registers
= cpu_to_le32(registers_num
);
1444 range
->range_data_size
= cpu_to_le32(rxf_data
.size
+ registers_size
);
1447 * read rxf registers. for each register, write to the dump the
1448 * register address and its value
1450 for (i
= 0; i
< registers_num
; i
++) {
1451 addr
= le32_to_cpu(reg
->addrs
[i
]) + offs
;
1453 reg_dump
->addr
= cpu_to_le32(addr
);
1454 reg_dump
->data
= cpu_to_le32(iwl_read_prph_no_grab(fwrt
->trans
,
1460 if (reg
->fifos
.hdr_only
) {
1461 range
->range_data_size
= cpu_to_le32(registers_size
);
1465 offs
= rxf_data
.offset
;
1468 iwl_write_prph_no_grab(fwrt
->trans
, RXF_SET_FENCE_MODE
+ offs
, 0x1);
1469 /* Set fence pointer to the same place like WR pointer */
1470 iwl_write_prph_no_grab(fwrt
->trans
, RXF_LD_WR2FENCE
+ offs
, 0x1);
1471 /* Set fence offset */
1472 iwl_write_prph_no_grab(fwrt
->trans
, RXF_LD_FENCE_OFFSET_ADDR
+ offs
,
1476 addr
= RXF_FIFO_RD_FENCE_INC
+ offs
;
1477 data
= (void *)reg_dump
;
1478 for (i
= 0; i
< rxf_data
.size
; i
+= sizeof(*data
))
1479 *data
++ = cpu_to_le32(iwl_read_prph_no_grab(fwrt
->trans
, addr
));
1482 iwl_trans_release_nic_access(fwrt
->trans
, &flags
);
1484 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1488 iwl_dump_ini_err_table_iter(struct iwl_fw_runtime
*fwrt
,
1489 struct iwl_dump_ini_region_data
*reg_data
,
1490 void *range_ptr
, int idx
)
1492 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1493 struct iwl_fw_ini_region_err_table
*err_table
= ®
->err_table
;
1494 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1495 u32 addr
= le32_to_cpu(err_table
->base_addr
) +
1496 le32_to_cpu(err_table
->offset
);
1498 range
->internal_base_addr
= cpu_to_le32(addr
);
1499 range
->range_data_size
= err_table
->size
;
1500 iwl_trans_read_mem_bytes(fwrt
->trans
, addr
, range
->data
,
1501 le32_to_cpu(err_table
->size
));
1503 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1507 iwl_dump_ini_special_mem_iter(struct iwl_fw_runtime
*fwrt
,
1508 struct iwl_dump_ini_region_data
*reg_data
,
1509 void *range_ptr
, int idx
)
1511 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1512 struct iwl_fw_ini_region_special_device_memory
*special_mem
=
1515 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1516 u32 addr
= le32_to_cpu(special_mem
->base_addr
) +
1517 le32_to_cpu(special_mem
->offset
);
1519 range
->internal_base_addr
= cpu_to_le32(addr
);
1520 range
->range_data_size
= special_mem
->size
;
1521 iwl_trans_read_mem_bytes(fwrt
->trans
, addr
, range
->data
,
1522 le32_to_cpu(special_mem
->size
));
1524 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1527 static int iwl_dump_ini_fw_pkt_iter(struct iwl_fw_runtime
*fwrt
,
1528 struct iwl_dump_ini_region_data
*reg_data
,
1529 void *range_ptr
, int idx
)
1531 struct iwl_fw_ini_error_dump_range
*range
= range_ptr
;
1532 struct iwl_rx_packet
*pkt
= reg_data
->dump_data
->fw_pkt
;
1538 pkt_len
= iwl_rx_packet_payload_len(pkt
);
1540 memcpy(&range
->fw_pkt_hdr
, &pkt
->hdr
, sizeof(range
->fw_pkt_hdr
));
1541 range
->range_data_size
= cpu_to_le32(pkt_len
);
1543 memcpy(range
->data
, pkt
->data
, pkt_len
);
1545 return sizeof(*range
) + le32_to_cpu(range
->range_data_size
);
1549 iwl_dump_ini_mem_fill_header(struct iwl_fw_runtime
*fwrt
,
1550 struct iwl_dump_ini_region_data
*reg_data
,
1553 struct iwl_fw_ini_error_dump
*dump
= data
;
1555 dump
->header
.version
= cpu_to_le32(IWL_INI_DUMP_VER
);
1557 return dump
->ranges
;
1561 * mask_apply_and_normalize - applies mask on val and normalize the result
1563 * The normalization is based on the first set bit in the mask
1566 * @mask: mask to apply and to normalize with
1568 static u32
mask_apply_and_normalize(u32 val
, u32 mask
)
1570 return (val
& mask
) >> (ffs(mask
) - 1);
1573 static __le32
iwl_get_mon_reg(struct iwl_fw_runtime
*fwrt
, u32 alloc_id
,
1574 const struct iwl_fw_mon_reg
*reg_info
)
1578 /* The header addresses of DBGCi is calculate as follows:
1579 * DBGC1 address + (0x100 * i)
1581 offs
= (alloc_id
- IWL_FW_INI_ALLOCATION_ID_DBGC1
) * 0x100;
1583 if (!reg_info
|| !reg_info
->addr
|| !reg_info
->mask
)
1586 val
= iwl_read_prph_no_grab(fwrt
->trans
, reg_info
->addr
+ offs
);
1588 return cpu_to_le32(mask_apply_and_normalize(val
, reg_info
->mask
));
1592 iwl_dump_ini_mon_fill_header(struct iwl_fw_runtime
*fwrt
,
1593 struct iwl_dump_ini_region_data
*reg_data
,
1594 struct iwl_fw_ini_monitor_dump
*data
,
1595 const struct iwl_fw_mon_regs
*addrs
)
1597 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1598 u32 alloc_id
= le32_to_cpu(reg
->dram_alloc_id
);
1599 unsigned long flags
;
1601 if (!iwl_trans_grab_nic_access(fwrt
->trans
, &flags
)) {
1602 IWL_ERR(fwrt
, "Failed to get monitor header\n");
1606 data
->write_ptr
= iwl_get_mon_reg(fwrt
, alloc_id
,
1608 if (fwrt
->trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_AX210
) {
1609 u32 wrt_ptr
= le32_to_cpu(data
->write_ptr
);
1611 data
->write_ptr
= cpu_to_le32(wrt_ptr
>> 2);
1613 data
->cycle_cnt
= iwl_get_mon_reg(fwrt
, alloc_id
,
1615 data
->cur_frag
= iwl_get_mon_reg(fwrt
, alloc_id
,
1618 iwl_trans_release_nic_access(fwrt
->trans
, &flags
);
1620 data
->header
.version
= cpu_to_le32(IWL_INI_DUMP_VER
);
1622 return data
->ranges
;
1626 iwl_dump_ini_mon_dram_fill_header(struct iwl_fw_runtime
*fwrt
,
1627 struct iwl_dump_ini_region_data
*reg_data
,
1630 struct iwl_fw_ini_monitor_dump
*mon_dump
= (void *)data
;
1632 return iwl_dump_ini_mon_fill_header(fwrt
, reg_data
, mon_dump
,
1633 &fwrt
->trans
->cfg
->mon_dram_regs
);
1637 iwl_dump_ini_mon_smem_fill_header(struct iwl_fw_runtime
*fwrt
,
1638 struct iwl_dump_ini_region_data
*reg_data
,
1641 struct iwl_fw_ini_monitor_dump
*mon_dump
= (void *)data
;
1643 return iwl_dump_ini_mon_fill_header(fwrt
, reg_data
, mon_dump
,
1644 &fwrt
->trans
->cfg
->mon_smem_regs
);
1648 iwl_dump_ini_err_table_fill_header(struct iwl_fw_runtime
*fwrt
,
1649 struct iwl_dump_ini_region_data
*reg_data
,
1652 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1653 struct iwl_fw_ini_err_table_dump
*dump
= data
;
1655 dump
->header
.version
= cpu_to_le32(IWL_INI_DUMP_VER
);
1656 dump
->version
= reg
->err_table
.version
;
1658 return dump
->ranges
;
1662 iwl_dump_ini_special_mem_fill_header(struct iwl_fw_runtime
*fwrt
,
1663 struct iwl_dump_ini_region_data
*reg_data
,
1666 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1667 struct iwl_fw_ini_special_device_memory
*dump
= data
;
1669 dump
->header
.version
= cpu_to_le32(IWL_INI_DUMP_VER
);
1670 dump
->type
= reg
->special_mem
.type
;
1671 dump
->version
= reg
->special_mem
.version
;
1673 return dump
->ranges
;
1676 static u32
iwl_dump_ini_mem_ranges(struct iwl_fw_runtime
*fwrt
,
1677 struct iwl_dump_ini_region_data
*reg_data
)
1679 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1681 return iwl_tlv_array_len(reg_data
->reg_tlv
, reg
, addrs
);
1684 static u32
iwl_dump_ini_paging_ranges(struct iwl_fw_runtime
*fwrt
,
1685 struct iwl_dump_ini_region_data
*reg_data
)
1687 if (fwrt
->trans
->trans_cfg
->gen2
)
1688 return fwrt
->trans
->init_dram
.paging_cnt
;
1690 return fwrt
->num_of_paging_blk
;
1694 iwl_dump_ini_mon_dram_ranges(struct iwl_fw_runtime
*fwrt
,
1695 struct iwl_dump_ini_region_data
*reg_data
)
1697 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1698 struct iwl_fw_mon
*fw_mon
;
1699 u32 ranges
= 0, alloc_id
= le32_to_cpu(reg
->dram_alloc_id
);
1702 fw_mon
= &fwrt
->trans
->dbg
.fw_mon_ini
[alloc_id
];
1704 for (i
= 0; i
< fw_mon
->num_frags
; i
++) {
1705 if (!fw_mon
->frags
[i
].size
)
1714 static u32
iwl_dump_ini_txf_ranges(struct iwl_fw_runtime
*fwrt
,
1715 struct iwl_dump_ini_region_data
*reg_data
)
1717 u32 num_of_fifos
= 0;
1719 while (iwl_ini_txf_iter(fwrt
, reg_data
, num_of_fifos
))
1722 return num_of_fifos
;
1725 static u32
iwl_dump_ini_single_range(struct iwl_fw_runtime
*fwrt
,
1726 struct iwl_dump_ini_region_data
*reg_data
)
1731 static u32
iwl_dump_ini_mem_get_size(struct iwl_fw_runtime
*fwrt
,
1732 struct iwl_dump_ini_region_data
*reg_data
)
1734 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1735 u32 size
= le32_to_cpu(reg
->dev_addr
.size
);
1736 u32 ranges
= iwl_dump_ini_mem_ranges(fwrt
, reg_data
);
1738 if (!size
|| !ranges
)
1741 return sizeof(struct iwl_fw_ini_error_dump
) + ranges
*
1742 (size
+ sizeof(struct iwl_fw_ini_error_dump_range
));
1746 iwl_dump_ini_paging_get_size(struct iwl_fw_runtime
*fwrt
,
1747 struct iwl_dump_ini_region_data
*reg_data
)
1750 u32 range_header_len
= sizeof(struct iwl_fw_ini_error_dump_range
);
1751 u32 size
= sizeof(struct iwl_fw_ini_error_dump
);
1753 if (fwrt
->trans
->trans_cfg
->gen2
) {
1754 for (i
= 0; i
< iwl_dump_ini_paging_ranges(fwrt
, reg_data
); i
++)
1755 size
+= range_header_len
+
1756 fwrt
->trans
->init_dram
.paging
[i
].size
;
1758 for (i
= 1; i
<= iwl_dump_ini_paging_ranges(fwrt
, reg_data
);
1760 size
+= range_header_len
+
1761 fwrt
->fw_paging_db
[i
].fw_paging_size
;
1768 iwl_dump_ini_mon_dram_get_size(struct iwl_fw_runtime
*fwrt
,
1769 struct iwl_dump_ini_region_data
*reg_data
)
1771 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1772 struct iwl_fw_mon
*fw_mon
;
1773 u32 size
= 0, alloc_id
= le32_to_cpu(reg
->dram_alloc_id
);
1776 fw_mon
= &fwrt
->trans
->dbg
.fw_mon_ini
[alloc_id
];
1778 for (i
= 0; i
< fw_mon
->num_frags
; i
++) {
1779 struct iwl_dram_data
*frag
= &fw_mon
->frags
[i
];
1784 size
+= sizeof(struct iwl_fw_ini_error_dump_range
) + frag
->size
;
1788 size
+= sizeof(struct iwl_fw_ini_monitor_dump
);
1794 iwl_dump_ini_mon_smem_get_size(struct iwl_fw_runtime
*fwrt
,
1795 struct iwl_dump_ini_region_data
*reg_data
)
1797 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1800 size
= le32_to_cpu(reg
->internal_buffer
.size
);
1804 size
+= sizeof(struct iwl_fw_ini_monitor_dump
) +
1805 sizeof(struct iwl_fw_ini_error_dump_range
);
1810 static u32
iwl_dump_ini_txf_get_size(struct iwl_fw_runtime
*fwrt
,
1811 struct iwl_dump_ini_region_data
*reg_data
)
1813 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1814 struct iwl_txf_iter_data
*iter
= &fwrt
->dump
.txf_iter_data
;
1815 u32 registers_num
= iwl_tlv_array_len(reg_data
->reg_tlv
, reg
, addrs
);
1817 u32 fifo_hdr
= sizeof(struct iwl_fw_ini_error_dump_range
) +
1819 sizeof(struct iwl_fw_ini_error_dump_register
);
1821 while (iwl_ini_txf_iter(fwrt
, reg_data
, size
)) {
1823 if (!reg
->fifos
.hdr_only
)
1824 size
+= iter
->fifo_size
;
1830 return size
+ sizeof(struct iwl_fw_ini_error_dump
);
1833 static u32
iwl_dump_ini_rxf_get_size(struct iwl_fw_runtime
*fwrt
,
1834 struct iwl_dump_ini_region_data
*reg_data
)
1836 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1837 struct iwl_ini_rxf_data rx_data
;
1838 u32 registers_num
= iwl_tlv_array_len(reg_data
->reg_tlv
, reg
, addrs
);
1839 u32 size
= sizeof(struct iwl_fw_ini_error_dump
) +
1840 sizeof(struct iwl_fw_ini_error_dump_range
) +
1841 registers_num
* sizeof(struct iwl_fw_ini_error_dump_register
);
1843 if (reg
->fifos
.hdr_only
)
1846 iwl_ini_get_rxf_data(fwrt
, reg_data
, &rx_data
);
1847 size
+= rx_data
.size
;
1853 iwl_dump_ini_err_table_get_size(struct iwl_fw_runtime
*fwrt
,
1854 struct iwl_dump_ini_region_data
*reg_data
)
1856 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1857 u32 size
= le32_to_cpu(reg
->err_table
.size
);
1860 size
+= sizeof(struct iwl_fw_ini_err_table_dump
) +
1861 sizeof(struct iwl_fw_ini_error_dump_range
);
1867 iwl_dump_ini_special_mem_get_size(struct iwl_fw_runtime
*fwrt
,
1868 struct iwl_dump_ini_region_data
*reg_data
)
1870 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1871 u32 size
= le32_to_cpu(reg
->special_mem
.size
);
1874 size
+= sizeof(struct iwl_fw_ini_special_device_memory
) +
1875 sizeof(struct iwl_fw_ini_error_dump_range
);
1881 iwl_dump_ini_fw_pkt_get_size(struct iwl_fw_runtime
*fwrt
,
1882 struct iwl_dump_ini_region_data
*reg_data
)
1886 if (!reg_data
->dump_data
->fw_pkt
)
1889 size
+= iwl_rx_packet_payload_len(reg_data
->dump_data
->fw_pkt
);
1891 size
+= sizeof(struct iwl_fw_ini_error_dump
) +
1892 sizeof(struct iwl_fw_ini_error_dump_range
);
1898 * struct iwl_dump_ini_mem_ops - ini memory dump operations
1899 * @get_num_of_ranges: returns the number of memory ranges in the region.
1900 * @get_size: returns the total size of the region.
1901 * @fill_mem_hdr: fills region type specific headers and returns pointer to
1902 * the first range or NULL if failed to fill headers.
1903 * @fill_range: copies a given memory range into the dump.
1904 * Returns the size of the range or negative error value otherwise.
1906 struct iwl_dump_ini_mem_ops
{
1907 u32 (*get_num_of_ranges
)(struct iwl_fw_runtime
*fwrt
,
1908 struct iwl_dump_ini_region_data
*reg_data
);
1909 u32 (*get_size
)(struct iwl_fw_runtime
*fwrt
,
1910 struct iwl_dump_ini_region_data
*reg_data
);
1911 void *(*fill_mem_hdr
)(struct iwl_fw_runtime
*fwrt
,
1912 struct iwl_dump_ini_region_data
*reg_data
,
1914 int (*fill_range
)(struct iwl_fw_runtime
*fwrt
,
1915 struct iwl_dump_ini_region_data
*reg_data
,
1916 void *range
, int idx
);
1922 * Creates a dump tlv and copy a memory region into it.
1923 * Returns the size of the current dump tlv or 0 if failed
1925 * @fwrt: fw runtime struct
1926 * @list: list to add the dump tlv to
1927 * @reg_data: memory region
1928 * @ops: memory dump operations
1930 static u32
iwl_dump_ini_mem(struct iwl_fw_runtime
*fwrt
, struct list_head
*list
,
1931 struct iwl_dump_ini_region_data
*reg_data
,
1932 const struct iwl_dump_ini_mem_ops
*ops
)
1934 struct iwl_fw_ini_region_tlv
*reg
= (void *)reg_data
->reg_tlv
->data
;
1935 struct iwl_fw_ini_dump_entry
*entry
;
1936 struct iwl_fw_error_dump_data
*tlv
;
1937 struct iwl_fw_ini_error_dump_header
*header
;
1938 u32 type
= le32_to_cpu(reg
->type
), id
= le32_to_cpu(reg
->id
);
1939 u32 num_of_ranges
, i
, size
;
1942 if (!ops
->get_num_of_ranges
|| !ops
->get_size
|| !ops
->fill_mem_hdr
||
1946 size
= ops
->get_size(fwrt
, reg_data
);
1950 entry
= vzalloc(sizeof(*entry
) + sizeof(*tlv
) + size
);
1954 entry
->size
= sizeof(*tlv
) + size
;
1956 tlv
= (void *)entry
->data
;
1957 tlv
->type
= reg
->type
;
1958 tlv
->len
= cpu_to_le32(size
);
1960 IWL_DEBUG_FW(fwrt
, "WRT: Collecting region: id=%d, type=%d\n", id
,
1963 num_of_ranges
= ops
->get_num_of_ranges(fwrt
, reg_data
);
1965 header
= (void *)tlv
->data
;
1966 header
->region_id
= reg
->id
;
1967 header
->num_of_ranges
= cpu_to_le32(num_of_ranges
);
1968 header
->name_len
= cpu_to_le32(IWL_FW_INI_MAX_NAME
);
1969 memcpy(header
->name
, reg
->name
, IWL_FW_INI_MAX_NAME
);
1971 range
= ops
->fill_mem_hdr(fwrt
, reg_data
, header
);
1974 "WRT: Failed to fill region header: id=%d, type=%d\n",
1979 for (i
= 0; i
< num_of_ranges
; i
++) {
1980 int range_size
= ops
->fill_range(fwrt
, reg_data
, range
, i
);
1982 if (range_size
< 0) {
1984 "WRT: Failed to dump region: id=%d, type=%d\n",
1988 range
= range
+ range_size
;
1991 list_add_tail(&entry
->list
, list
);
2001 static u32
iwl_dump_ini_info(struct iwl_fw_runtime
*fwrt
,
2002 struct iwl_fw_ini_trigger_tlv
*trigger
,
2003 struct list_head
*list
)
2005 struct iwl_fw_ini_dump_entry
*entry
;
2006 struct iwl_fw_error_dump_data
*tlv
;
2007 struct iwl_fw_ini_dump_info
*dump
;
2008 struct iwl_dbg_tlv_node
*node
;
2009 struct iwl_fw_ini_dump_cfg_name
*cfg_name
;
2010 u32 size
= sizeof(*tlv
) + sizeof(*dump
);
2011 u32 num_of_cfg_names
= 0;
2014 list_for_each_entry(node
, &fwrt
->trans
->dbg
.debug_info_tlv_list
, list
) {
2015 size
+= sizeof(*cfg_name
);
2019 entry
= vzalloc(sizeof(*entry
) + size
);
2025 tlv
= (void *)entry
->data
;
2026 tlv
->type
= cpu_to_le32(IWL_INI_DUMP_INFO_TYPE
);
2027 tlv
->len
= cpu_to_le32(size
- sizeof(*tlv
));
2029 dump
= (void *)tlv
->data
;
2031 dump
->version
= cpu_to_le32(IWL_INI_DUMP_VER
);
2032 dump
->time_point
= trigger
->time_point
;
2033 dump
->trigger_reason
= trigger
->trigger_reason
;
2034 dump
->external_cfg_state
=
2035 cpu_to_le32(fwrt
->trans
->dbg
.external_ini_cfg
);
2037 dump
->ver_type
= cpu_to_le32(fwrt
->dump
.fw_ver
.type
);
2038 dump
->ver_subtype
= cpu_to_le32(fwrt
->dump
.fw_ver
.subtype
);
2040 dump
->hw_step
= cpu_to_le32(CSR_HW_REV_STEP(fwrt
->trans
->hw_rev
));
2043 * Several HWs all have type == 0x42, so we'll override this value
2044 * according to the detected HW
2046 hw_type
= CSR_HW_REV_TYPE(fwrt
->trans
->hw_rev
);
2047 if (hw_type
== IWL_AX210_HW_TYPE
) {
2048 u32 prph_val
= iwl_read_prph(fwrt
->trans
, WFPM_OTP_CFG1_ADDR
);
2049 u32 is_jacket
= !!(prph_val
& WFPM_OTP_CFG1_IS_JACKET_BIT
);
2050 u32 is_cdb
= !!(prph_val
& WFPM_OTP_CFG1_IS_CDB_BIT
);
2051 u32 masked_bits
= is_jacket
| (is_cdb
<< 1);
2054 * The HW type depends on certain bits in this case, so add
2055 * these bits to the HW type. We won't have collisions since we
2056 * add these bits after the highest possible bit in the mask.
2058 hw_type
|= masked_bits
<< IWL_AX210_HW_TYPE_ADDITION_SHIFT
;
2060 dump
->hw_type
= cpu_to_le32(hw_type
);
2062 dump
->rf_id_flavor
=
2063 cpu_to_le32(CSR_HW_RFID_FLAVOR(fwrt
->trans
->hw_rf_id
));
2064 dump
->rf_id_dash
= cpu_to_le32(CSR_HW_RFID_DASH(fwrt
->trans
->hw_rf_id
));
2065 dump
->rf_id_step
= cpu_to_le32(CSR_HW_RFID_STEP(fwrt
->trans
->hw_rf_id
));
2066 dump
->rf_id_type
= cpu_to_le32(CSR_HW_RFID_TYPE(fwrt
->trans
->hw_rf_id
));
2068 dump
->lmac_major
= cpu_to_le32(fwrt
->dump
.fw_ver
.lmac_major
);
2069 dump
->lmac_minor
= cpu_to_le32(fwrt
->dump
.fw_ver
.lmac_minor
);
2070 dump
->umac_major
= cpu_to_le32(fwrt
->dump
.fw_ver
.umac_major
);
2071 dump
->umac_minor
= cpu_to_le32(fwrt
->dump
.fw_ver
.umac_minor
);
2073 dump
->fw_mon_mode
= cpu_to_le32(fwrt
->trans
->dbg
.ini_dest
);
2074 dump
->regions_mask
= trigger
->regions_mask
;
2076 dump
->build_tag_len
= cpu_to_le32(sizeof(dump
->build_tag
));
2077 memcpy(dump
->build_tag
, fwrt
->fw
->human_readable
,
2078 sizeof(dump
->build_tag
));
2080 cfg_name
= dump
->cfg_names
;
2081 dump
->num_of_cfg_names
= cpu_to_le32(num_of_cfg_names
);
2082 list_for_each_entry(node
, &fwrt
->trans
->dbg
.debug_info_tlv_list
, list
) {
2083 struct iwl_fw_ini_debug_info_tlv
*debug_info
=
2084 (void *)node
->tlv
.data
;
2086 cfg_name
->image_type
= debug_info
->image_type
;
2087 cfg_name
->cfg_name_len
=
2088 cpu_to_le32(IWL_FW_INI_MAX_CFG_NAME
);
2089 memcpy(cfg_name
->cfg_name
, debug_info
->debug_cfg_name
,
2090 sizeof(cfg_name
->cfg_name
));
2094 /* add dump info TLV to the beginning of the list since it needs to be
2095 * the first TLV in the dump
2097 list_add(&entry
->list
, list
);
2102 static const struct iwl_dump_ini_mem_ops iwl_dump_ini_region_ops
[] = {
2103 [IWL_FW_INI_REGION_INVALID
] = {},
2104 [IWL_FW_INI_REGION_INTERNAL_BUFFER
] = {
2105 .get_num_of_ranges
= iwl_dump_ini_single_range
,
2106 .get_size
= iwl_dump_ini_mon_smem_get_size
,
2107 .fill_mem_hdr
= iwl_dump_ini_mon_smem_fill_header
,
2108 .fill_range
= iwl_dump_ini_mon_smem_iter
,
2110 [IWL_FW_INI_REGION_DRAM_BUFFER
] = {
2111 .get_num_of_ranges
= iwl_dump_ini_mon_dram_ranges
,
2112 .get_size
= iwl_dump_ini_mon_dram_get_size
,
2113 .fill_mem_hdr
= iwl_dump_ini_mon_dram_fill_header
,
2114 .fill_range
= iwl_dump_ini_mon_dram_iter
,
2116 [IWL_FW_INI_REGION_TXF
] = {
2117 .get_num_of_ranges
= iwl_dump_ini_txf_ranges
,
2118 .get_size
= iwl_dump_ini_txf_get_size
,
2119 .fill_mem_hdr
= iwl_dump_ini_mem_fill_header
,
2120 .fill_range
= iwl_dump_ini_txf_iter
,
2122 [IWL_FW_INI_REGION_RXF
] = {
2123 .get_num_of_ranges
= iwl_dump_ini_single_range
,
2124 .get_size
= iwl_dump_ini_rxf_get_size
,
2125 .fill_mem_hdr
= iwl_dump_ini_mem_fill_header
,
2126 .fill_range
= iwl_dump_ini_rxf_iter
,
2128 [IWL_FW_INI_REGION_LMAC_ERROR_TABLE
] = {
2129 .get_num_of_ranges
= iwl_dump_ini_single_range
,
2130 .get_size
= iwl_dump_ini_err_table_get_size
,
2131 .fill_mem_hdr
= iwl_dump_ini_err_table_fill_header
,
2132 .fill_range
= iwl_dump_ini_err_table_iter
,
2134 [IWL_FW_INI_REGION_UMAC_ERROR_TABLE
] = {
2135 .get_num_of_ranges
= iwl_dump_ini_single_range
,
2136 .get_size
= iwl_dump_ini_err_table_get_size
,
2137 .fill_mem_hdr
= iwl_dump_ini_err_table_fill_header
,
2138 .fill_range
= iwl_dump_ini_err_table_iter
,
2140 [IWL_FW_INI_REGION_RSP_OR_NOTIF
] = {
2141 .get_num_of_ranges
= iwl_dump_ini_single_range
,
2142 .get_size
= iwl_dump_ini_fw_pkt_get_size
,
2143 .fill_mem_hdr
= iwl_dump_ini_mem_fill_header
,
2144 .fill_range
= iwl_dump_ini_fw_pkt_iter
,
2146 [IWL_FW_INI_REGION_DEVICE_MEMORY
] = {
2147 .get_num_of_ranges
= iwl_dump_ini_mem_ranges
,
2148 .get_size
= iwl_dump_ini_mem_get_size
,
2149 .fill_mem_hdr
= iwl_dump_ini_mem_fill_header
,
2150 .fill_range
= iwl_dump_ini_dev_mem_iter
,
2152 [IWL_FW_INI_REGION_PERIPHERY_MAC
] = {
2153 .get_num_of_ranges
= iwl_dump_ini_mem_ranges
,
2154 .get_size
= iwl_dump_ini_mem_get_size
,
2155 .fill_mem_hdr
= iwl_dump_ini_mem_fill_header
,
2156 .fill_range
= iwl_dump_ini_prph_mac_iter
,
2158 [IWL_FW_INI_REGION_PERIPHERY_PHY
] = {
2159 .get_num_of_ranges
= iwl_dump_ini_mem_ranges
,
2160 .get_size
= iwl_dump_ini_mem_get_size
,
2161 .fill_mem_hdr
= iwl_dump_ini_mem_fill_header
,
2162 .fill_range
= iwl_dump_ini_prph_phy_iter
,
2164 [IWL_FW_INI_REGION_PERIPHERY_AUX
] = {},
2165 [IWL_FW_INI_REGION_PAGING
] = {
2166 .fill_mem_hdr
= iwl_dump_ini_mem_fill_header
,
2167 .get_num_of_ranges
= iwl_dump_ini_paging_ranges
,
2168 .get_size
= iwl_dump_ini_paging_get_size
,
2169 .fill_range
= iwl_dump_ini_paging_iter
,
2171 [IWL_FW_INI_REGION_CSR
] = {
2172 .get_num_of_ranges
= iwl_dump_ini_mem_ranges
,
2173 .get_size
= iwl_dump_ini_mem_get_size
,
2174 .fill_mem_hdr
= iwl_dump_ini_mem_fill_header
,
2175 .fill_range
= iwl_dump_ini_csr_iter
,
2177 [IWL_FW_INI_REGION_DRAM_IMR
] = {},
2178 [IWL_FW_INI_REGION_PCI_IOSF_CONFIG
] = {
2179 .get_num_of_ranges
= iwl_dump_ini_mem_ranges
,
2180 .get_size
= iwl_dump_ini_mem_get_size
,
2181 .fill_mem_hdr
= iwl_dump_ini_mem_fill_header
,
2182 .fill_range
= iwl_dump_ini_config_iter
,
2184 [IWL_FW_INI_REGION_SPECIAL_DEVICE_MEMORY
] = {
2185 .get_num_of_ranges
= iwl_dump_ini_single_range
,
2186 .get_size
= iwl_dump_ini_special_mem_get_size
,
2187 .fill_mem_hdr
= iwl_dump_ini_special_mem_fill_header
,
2188 .fill_range
= iwl_dump_ini_special_mem_iter
,
2192 static u32
iwl_dump_ini_trigger(struct iwl_fw_runtime
*fwrt
,
2193 struct iwl_fwrt_dump_data
*dump_data
,
2194 struct list_head
*list
)
2196 struct iwl_fw_ini_trigger_tlv
*trigger
= dump_data
->trig
;
2197 enum iwl_fw_ini_time_point tp_id
= le32_to_cpu(trigger
->time_point
);
2198 struct iwl_dump_ini_region_data reg_data
= {
2199 .dump_data
= dump_data
,
2203 u64 regions_mask
= le64_to_cpu(trigger
->regions_mask
);
2205 BUILD_BUG_ON(sizeof(trigger
->regions_mask
) != sizeof(regions_mask
));
2206 BUILD_BUG_ON((sizeof(trigger
->regions_mask
) * BITS_PER_BYTE
) <
2207 ARRAY_SIZE(fwrt
->trans
->dbg
.active_regions
));
2209 for (i
= 0; i
< ARRAY_SIZE(fwrt
->trans
->dbg
.active_regions
); i
++) {
2211 struct iwl_fw_ini_region_tlv
*reg
;
2213 if (!(BIT_ULL(i
) & regions_mask
))
2216 reg_data
.reg_tlv
= fwrt
->trans
->dbg
.active_regions
[i
];
2217 if (!reg_data
.reg_tlv
) {
2219 "WRT: Unassigned region id %d, skipping\n", i
);
2223 reg
= (void *)reg_data
.reg_tlv
->data
;
2224 reg_type
= le32_to_cpu(reg
->type
);
2225 if (reg_type
>= ARRAY_SIZE(iwl_dump_ini_region_ops
))
2228 if (reg_type
== IWL_FW_INI_REGION_PERIPHERY_PHY
&&
2229 tp_id
!= IWL_FW_INI_TIME_POINT_FW_ASSERT
) {
2231 "WRT: trying to collect phy prph at time point: %d, skipping\n",
2236 size
+= iwl_dump_ini_mem(fwrt
, list
, ®_data
,
2237 &iwl_dump_ini_region_ops
[reg_type
]);
2241 size
+= iwl_dump_ini_info(fwrt
, trigger
, list
);
2246 static bool iwl_fw_ini_trigger_on(struct iwl_fw_runtime
*fwrt
,
2247 struct iwl_fw_ini_trigger_tlv
*trig
)
2249 enum iwl_fw_ini_time_point tp_id
= le32_to_cpu(trig
->time_point
);
2250 u32 usec
= le32_to_cpu(trig
->ignore_consec
);
2252 if (!iwl_trans_dbg_ini_valid(fwrt
->trans
) ||
2253 tp_id
== IWL_FW_INI_TIME_POINT_INVALID
||
2254 tp_id
>= IWL_FW_INI_TIME_POINT_NUM
||
2255 iwl_fw_dbg_no_trig_window(fwrt
, tp_id
, usec
))
2261 static u32
iwl_dump_ini_file_gen(struct iwl_fw_runtime
*fwrt
,
2262 struct iwl_fwrt_dump_data
*dump_data
,
2263 struct list_head
*list
)
2265 struct iwl_fw_ini_trigger_tlv
*trigger
= dump_data
->trig
;
2266 struct iwl_fw_ini_dump_entry
*entry
;
2267 struct iwl_fw_ini_dump_file_hdr
*hdr
;
2270 if (!trigger
|| !iwl_fw_ini_trigger_on(fwrt
, trigger
) ||
2271 !le64_to_cpu(trigger
->regions_mask
))
2274 entry
= vzalloc(sizeof(*entry
) + sizeof(*hdr
));
2278 entry
->size
= sizeof(*hdr
);
2280 size
= iwl_dump_ini_trigger(fwrt
, dump_data
, list
);
2286 hdr
= (void *)entry
->data
;
2287 hdr
->barker
= cpu_to_le32(IWL_FW_INI_ERROR_DUMP_BARKER
);
2288 hdr
->file_len
= cpu_to_le32(size
+ entry
->size
);
2290 list_add(&entry
->list
, list
);
2292 return le32_to_cpu(hdr
->file_len
);
2295 static inline void iwl_fw_free_dump_desc(struct iwl_fw_runtime
*fwrt
,
2296 const struct iwl_fw_dump_desc
*desc
)
2298 if (desc
&& desc
!= &iwl_dump_desc_assert
)
2301 fwrt
->dump
.lmac_err_id
[0] = 0;
2302 if (fwrt
->smem_cfg
.num_lmacs
> 1)
2303 fwrt
->dump
.lmac_err_id
[1] = 0;
2304 fwrt
->dump
.umac_err_id
= 0;
2307 static void iwl_fw_error_dump(struct iwl_fw_runtime
*fwrt
,
2308 struct iwl_fwrt_dump_data
*dump_data
)
2310 struct iwl_fw_dump_ptrs fw_error_dump
= {};
2311 struct iwl_fw_error_dump_file
*dump_file
;
2312 struct scatterlist
*sg_dump_data
;
2314 u32 dump_mask
= fwrt
->fw
->dbg
.dump_mask
;
2316 dump_file
= iwl_fw_error_dump_file(fwrt
, &fw_error_dump
, dump_data
);
2320 if (dump_data
->monitor_only
)
2321 dump_mask
&= IWL_FW_ERROR_DUMP_FW_MONITOR
;
2323 fw_error_dump
.trans_ptr
= iwl_trans_dump_data(fwrt
->trans
, dump_mask
);
2324 file_len
= le32_to_cpu(dump_file
->file_len
);
2325 fw_error_dump
.fwrt_len
= file_len
;
2327 if (fw_error_dump
.trans_ptr
) {
2328 file_len
+= fw_error_dump
.trans_ptr
->len
;
2329 dump_file
->file_len
= cpu_to_le32(file_len
);
2332 sg_dump_data
= alloc_sgtable(file_len
);
2334 sg_pcopy_from_buffer(sg_dump_data
,
2335 sg_nents(sg_dump_data
),
2336 fw_error_dump
.fwrt_ptr
,
2337 fw_error_dump
.fwrt_len
, 0);
2338 if (fw_error_dump
.trans_ptr
)
2339 sg_pcopy_from_buffer(sg_dump_data
,
2340 sg_nents(sg_dump_data
),
2341 fw_error_dump
.trans_ptr
->data
,
2342 fw_error_dump
.trans_ptr
->len
,
2343 fw_error_dump
.fwrt_len
);
2344 dev_coredumpsg(fwrt
->trans
->dev
, sg_dump_data
, file_len
,
2347 vfree(fw_error_dump
.fwrt_ptr
);
2348 vfree(fw_error_dump
.trans_ptr
);
2351 static void iwl_dump_ini_list_free(struct list_head
*list
)
2353 while (!list_empty(list
)) {
2354 struct iwl_fw_ini_dump_entry
*entry
=
2355 list_entry(list
->next
, typeof(*entry
), list
);
2357 list_del(&entry
->list
);
2362 static void iwl_fw_error_dump_data_free(struct iwl_fwrt_dump_data
*dump_data
)
2364 dump_data
->trig
= NULL
;
2365 kfree(dump_data
->fw_pkt
);
2366 dump_data
->fw_pkt
= NULL
;
2369 static void iwl_fw_error_ini_dump(struct iwl_fw_runtime
*fwrt
,
2370 struct iwl_fwrt_dump_data
*dump_data
)
2372 struct list_head dump_list
= LIST_HEAD_INIT(dump_list
);
2373 struct scatterlist
*sg_dump_data
;
2374 u32 file_len
= iwl_dump_ini_file_gen(fwrt
, dump_data
, &dump_list
);
2379 sg_dump_data
= alloc_sgtable(file_len
);
2381 struct iwl_fw_ini_dump_entry
*entry
;
2382 int sg_entries
= sg_nents(sg_dump_data
);
2385 list_for_each_entry(entry
, &dump_list
, list
) {
2386 sg_pcopy_from_buffer(sg_dump_data
, sg_entries
,
2387 entry
->data
, entry
->size
, offs
);
2388 offs
+= entry
->size
;
2390 dev_coredumpsg(fwrt
->trans
->dev
, sg_dump_data
, file_len
,
2393 iwl_dump_ini_list_free(&dump_list
);
2396 const struct iwl_fw_dump_desc iwl_dump_desc_assert
= {
2398 .type
= cpu_to_le32(FW_DBG_TRIGGER_FW_ASSERT
),
2401 IWL_EXPORT_SYMBOL(iwl_dump_desc_assert
);
2403 int iwl_fw_dbg_collect_desc(struct iwl_fw_runtime
*fwrt
,
2404 const struct iwl_fw_dump_desc
*desc
,
2408 struct iwl_fwrt_wk_data
*wk_data
;
2411 if (iwl_trans_dbg_ini_valid(fwrt
->trans
)) {
2412 iwl_fw_free_dump_desc(fwrt
, desc
);
2417 * Check there is an available worker.
2418 * ffz return value is undefined if no zero exists,
2419 * so check against ~0UL first.
2421 if (fwrt
->dump
.active_wks
== ~0UL)
2424 idx
= ffz(fwrt
->dump
.active_wks
);
2426 if (idx
>= IWL_FW_RUNTIME_DUMP_WK_NUM
||
2427 test_and_set_bit(fwrt
->dump
.wks
[idx
].idx
, &fwrt
->dump
.active_wks
))
2430 wk_data
= &fwrt
->dump
.wks
[idx
];
2432 if (WARN_ON(wk_data
->dump_data
.desc
))
2433 iwl_fw_free_dump_desc(fwrt
, wk_data
->dump_data
.desc
);
2435 wk_data
->dump_data
.desc
= desc
;
2436 wk_data
->dump_data
.monitor_only
= monitor_only
;
2438 IWL_WARN(fwrt
, "Collecting data: trigger %d fired.\n",
2439 le32_to_cpu(desc
->trig_desc
.type
));
2441 schedule_delayed_work(&wk_data
->wk
, usecs_to_jiffies(delay
));
2445 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_desc
);
2447 int iwl_fw_dbg_error_collect(struct iwl_fw_runtime
*fwrt
,
2448 enum iwl_fw_dbg_trigger trig_type
)
2450 if (!test_bit(STATUS_DEVICE_ENABLED
, &fwrt
->trans
->status
))
2453 if (iwl_trans_dbg_ini_valid(fwrt
->trans
)) {
2454 if (trig_type
!= FW_DBG_TRIGGER_ALIVE_TIMEOUT
)
2457 iwl_dbg_tlv_time_point(fwrt
,
2458 IWL_FW_INI_TIME_POINT_HOST_ALIVE_TIMEOUT
,
2461 struct iwl_fw_dump_desc
*iwl_dump_error_desc
;
2464 iwl_dump_error_desc
=
2465 kmalloc(sizeof(*iwl_dump_error_desc
), GFP_KERNEL
);
2467 if (!iwl_dump_error_desc
)
2470 iwl_dump_error_desc
->trig_desc
.type
= cpu_to_le32(trig_type
);
2471 iwl_dump_error_desc
->len
= 0;
2473 ret
= iwl_fw_dbg_collect_desc(fwrt
, iwl_dump_error_desc
,
2476 kfree(iwl_dump_error_desc
);
2481 iwl_trans_sync_nmi(fwrt
->trans
);
2485 IWL_EXPORT_SYMBOL(iwl_fw_dbg_error_collect
);
2487 int iwl_fw_dbg_collect(struct iwl_fw_runtime
*fwrt
,
2488 enum iwl_fw_dbg_trigger trig
,
2489 const char *str
, size_t len
,
2490 struct iwl_fw_dbg_trigger_tlv
*trigger
)
2492 struct iwl_fw_dump_desc
*desc
;
2493 unsigned int delay
= 0;
2494 bool monitor_only
= false;
2497 u16 occurrences
= le16_to_cpu(trigger
->occurrences
) - 1;
2499 if (!le16_to_cpu(trigger
->occurrences
))
2502 if (trigger
->flags
& IWL_FW_DBG_FORCE_RESTART
) {
2503 IWL_WARN(fwrt
, "Force restart: trigger %d fired.\n",
2505 iwl_force_nmi(fwrt
->trans
);
2509 trigger
->occurrences
= cpu_to_le16(occurrences
);
2510 monitor_only
= trigger
->mode
& IWL_FW_DBG_TRIGGER_MONITOR_ONLY
;
2512 /* convert msec to usec */
2513 delay
= le32_to_cpu(trigger
->stop_delay
) * USEC_PER_MSEC
;
2516 desc
= kzalloc(sizeof(*desc
) + len
, GFP_ATOMIC
);
2522 desc
->trig_desc
.type
= cpu_to_le32(trig
);
2523 memcpy(desc
->trig_desc
.data
, str
, len
);
2525 return iwl_fw_dbg_collect_desc(fwrt
, desc
, monitor_only
, delay
);
2527 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect
);
2529 int iwl_fw_dbg_ini_collect(struct iwl_fw_runtime
*fwrt
,
2530 struct iwl_fwrt_dump_data
*dump_data
)
2532 struct iwl_fw_ini_trigger_tlv
*trig
= dump_data
->trig
;
2533 enum iwl_fw_ini_time_point tp_id
= le32_to_cpu(trig
->time_point
);
2537 if (!iwl_fw_ini_trigger_on(fwrt
, trig
)) {
2538 IWL_WARN(fwrt
, "WRT: Trigger %d is not active, aborting dump\n",
2543 delay
= le32_to_cpu(trig
->dump_delay
);
2544 occur
= le32_to_cpu(trig
->occurrences
);
2548 trig
->occurrences
= cpu_to_le32(--occur
);
2550 /* Check there is an available worker.
2551 * ffz return value is undefined if no zero exists,
2552 * so check against ~0UL first.
2554 if (fwrt
->dump
.active_wks
== ~0UL)
2557 idx
= ffz(fwrt
->dump
.active_wks
);
2559 if (idx
>= IWL_FW_RUNTIME_DUMP_WK_NUM
||
2560 test_and_set_bit(fwrt
->dump
.wks
[idx
].idx
, &fwrt
->dump
.active_wks
))
2563 fwrt
->dump
.wks
[idx
].dump_data
= *dump_data
;
2565 IWL_WARN(fwrt
, "WRT: Collecting data: ini trigger %d fired.\n", tp_id
);
2567 schedule_delayed_work(&fwrt
->dump
.wks
[idx
].wk
, usecs_to_jiffies(delay
));
2572 int iwl_fw_dbg_collect_trig(struct iwl_fw_runtime
*fwrt
,
2573 struct iwl_fw_dbg_trigger_tlv
*trigger
,
2574 const char *fmt
, ...)
2579 if (iwl_trans_dbg_ini_valid(fwrt
->trans
))
2585 buf
[sizeof(buf
) - 1] = '\0';
2588 vsnprintf(buf
, sizeof(buf
), fmt
, ap
);
2591 /* check for truncation */
2592 if (WARN_ON_ONCE(buf
[sizeof(buf
) - 1]))
2593 buf
[sizeof(buf
) - 1] = '\0';
2595 len
= strlen(buf
) + 1;
2598 ret
= iwl_fw_dbg_collect(fwrt
, le32_to_cpu(trigger
->id
), buf
, len
,
2606 IWL_EXPORT_SYMBOL(iwl_fw_dbg_collect_trig
);
2608 int iwl_fw_start_dbg_conf(struct iwl_fw_runtime
*fwrt
, u8 conf_id
)
2614 if (WARN_ONCE(conf_id
>= ARRAY_SIZE(fwrt
->fw
->dbg
.conf_tlv
),
2615 "Invalid configuration %d\n", conf_id
))
2618 /* EARLY START - firmware's configuration is hard coded */
2619 if ((!fwrt
->fw
->dbg
.conf_tlv
[conf_id
] ||
2620 !fwrt
->fw
->dbg
.conf_tlv
[conf_id
]->num_of_hcmds
) &&
2621 conf_id
== FW_DBG_START_FROM_ALIVE
)
2624 if (!fwrt
->fw
->dbg
.conf_tlv
[conf_id
])
2627 if (fwrt
->dump
.conf
!= FW_DBG_INVALID
)
2628 IWL_INFO(fwrt
, "FW already configured (%d) - re-configuring\n",
2631 /* Send all HCMDs for configuring the FW debug */
2632 ptr
= (void *)&fwrt
->fw
->dbg
.conf_tlv
[conf_id
]->hcmd
;
2633 for (i
= 0; i
< fwrt
->fw
->dbg
.conf_tlv
[conf_id
]->num_of_hcmds
; i
++) {
2634 struct iwl_fw_dbg_conf_hcmd
*cmd
= (void *)ptr
;
2635 struct iwl_host_cmd hcmd
= {
2637 .len
= { le16_to_cpu(cmd
->len
), },
2638 .data
= { cmd
->data
, },
2641 ret
= iwl_trans_send_cmd(fwrt
->trans
, &hcmd
);
2645 ptr
+= sizeof(*cmd
);
2646 ptr
+= le16_to_cpu(cmd
->len
);
2649 fwrt
->dump
.conf
= conf_id
;
2653 IWL_EXPORT_SYMBOL(iwl_fw_start_dbg_conf
);
2655 /* this function assumes dump_start was called beforehand and dump_end will be
2658 static void iwl_fw_dbg_collect_sync(struct iwl_fw_runtime
*fwrt
, u8 wk_idx
)
2660 struct iwl_fw_dbg_params params
= {0};
2661 struct iwl_fwrt_dump_data
*dump_data
=
2662 &fwrt
->dump
.wks
[wk_idx
].dump_data
;
2664 if (!test_bit(wk_idx
, &fwrt
->dump
.active_wks
))
2667 if (!test_bit(STATUS_DEVICE_ENABLED
, &fwrt
->trans
->status
)) {
2668 IWL_ERR(fwrt
, "Device is not enabled - cannot dump error\n");
2672 /* there's no point in fw dump if the bus is dead */
2673 if (test_bit(STATUS_TRANS_DEAD
, &fwrt
->trans
->status
)) {
2674 IWL_ERR(fwrt
, "Skip fw error dump since bus is dead\n");
2678 iwl_fw_dbg_stop_restart_recording(fwrt
, ¶ms
, true);
2680 IWL_DEBUG_FW_INFO(fwrt
, "WRT: Data collection start\n");
2681 if (iwl_trans_dbg_ini_valid(fwrt
->trans
))
2682 iwl_fw_error_ini_dump(fwrt
, &fwrt
->dump
.wks
[wk_idx
].dump_data
);
2684 iwl_fw_error_dump(fwrt
, &fwrt
->dump
.wks
[wk_idx
].dump_data
);
2685 IWL_DEBUG_FW_INFO(fwrt
, "WRT: Data collection done\n");
2687 iwl_fw_dbg_stop_restart_recording(fwrt
, ¶ms
, false);
2690 if (iwl_trans_dbg_ini_valid(fwrt
->trans
)) {
2691 iwl_fw_error_dump_data_free(dump_data
);
2693 iwl_fw_free_dump_desc(fwrt
, dump_data
->desc
);
2694 dump_data
->desc
= NULL
;
2697 clear_bit(wk_idx
, &fwrt
->dump
.active_wks
);
2700 void iwl_fw_error_dump_wk(struct work_struct
*work
)
2702 struct iwl_fwrt_wk_data
*wks
=
2703 container_of(work
, typeof(*wks
), wk
.work
);
2704 struct iwl_fw_runtime
*fwrt
=
2705 container_of(wks
, typeof(*fwrt
), dump
.wks
[wks
->idx
]);
2707 /* assumes the op mode mutex is locked in dump_start since
2708 * iwl_fw_dbg_collect_sync can't run in parallel
2710 if (fwrt
->ops
&& fwrt
->ops
->dump_start
&&
2711 fwrt
->ops
->dump_start(fwrt
->ops_ctx
))
2714 iwl_fw_dbg_collect_sync(fwrt
, wks
->idx
);
2716 if (fwrt
->ops
&& fwrt
->ops
->dump_end
)
2717 fwrt
->ops
->dump_end(fwrt
->ops_ctx
);
2720 void iwl_fw_dbg_read_d3_debug_data(struct iwl_fw_runtime
*fwrt
)
2722 const struct iwl_cfg
*cfg
= fwrt
->trans
->cfg
;
2724 if (!iwl_fw_dbg_is_d3_debug_enabled(fwrt
))
2727 if (!fwrt
->dump
.d3_debug_data
) {
2728 fwrt
->dump
.d3_debug_data
= kmalloc(cfg
->d3_debug_data_length
,
2730 if (!fwrt
->dump
.d3_debug_data
) {
2732 "failed to allocate memory for D3 debug data\n");
2737 /* if the buffer holds previous debug data it is overwritten */
2738 iwl_trans_read_mem_bytes(fwrt
->trans
, cfg
->d3_debug_data_base_addr
,
2739 fwrt
->dump
.d3_debug_data
,
2740 cfg
->d3_debug_data_length
);
2742 IWL_EXPORT_SYMBOL(iwl_fw_dbg_read_d3_debug_data
);
2744 void iwl_fw_dbg_stop_sync(struct iwl_fw_runtime
*fwrt
)
2748 iwl_dbg_tlv_del_timers(fwrt
->trans
);
2749 for (i
= 0; i
< IWL_FW_RUNTIME_DUMP_WK_NUM
; i
++)
2750 iwl_fw_dbg_collect_sync(fwrt
, i
);
2752 iwl_fw_dbg_stop_restart_recording(fwrt
, NULL
, true);
2754 IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_sync
);
2756 #define FSEQ_REG(x) { .addr = (x), .str = #x, }
2758 void iwl_fw_error_print_fseq_regs(struct iwl_fw_runtime
*fwrt
)
2760 struct iwl_trans
*trans
= fwrt
->trans
;
2761 unsigned long flags
;
2767 FSEQ_REG(FSEQ_ERROR_CODE
),
2768 FSEQ_REG(FSEQ_TOP_INIT_VERSION
),
2769 FSEQ_REG(FSEQ_CNVIO_INIT_VERSION
),
2770 FSEQ_REG(FSEQ_OTP_VERSION
),
2771 FSEQ_REG(FSEQ_TOP_CONTENT_VERSION
),
2772 FSEQ_REG(FSEQ_ALIVE_TOKEN
),
2773 FSEQ_REG(FSEQ_CNVI_ID
),
2774 FSEQ_REG(FSEQ_CNVR_ID
),
2775 FSEQ_REG(CNVI_AUX_MISC_CHIP
),
2776 FSEQ_REG(CNVR_AUX_MISC_CHIP
),
2777 FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_DIG_DCDC_VTRIM
),
2778 FSEQ_REG(CNVR_SCU_SD_REGS_SD_REG_ACTIVE_VDIG_MIRROR
),
2781 if (!iwl_trans_grab_nic_access(trans
, &flags
))
2784 IWL_ERR(fwrt
, "Fseq Registers:\n");
2786 for (i
= 0; i
< ARRAY_SIZE(fseq_regs
); i
++)
2787 IWL_ERR(fwrt
, "0x%08X | %s\n",
2788 iwl_read_prph_no_grab(trans
, fseq_regs
[i
].addr
),
2791 iwl_trans_release_nic_access(trans
, &flags
);
2793 IWL_EXPORT_SYMBOL(iwl_fw_error_print_fseq_regs
);
2795 static int iwl_fw_dbg_suspend_resume_hcmd(struct iwl_trans
*trans
, bool suspend
)
2797 struct iwl_dbg_suspend_resume_cmd cmd
= {
2798 .operation
= suspend
?
2799 cpu_to_le32(DBGC_SUSPEND_CMD
) :
2800 cpu_to_le32(DBGC_RESUME_CMD
),
2802 struct iwl_host_cmd hcmd
= {
2803 .id
= WIDE_ID(DEBUG_GROUP
, DBGC_SUSPEND_RESUME
),
2805 .len
[0] = sizeof(cmd
),
2808 return iwl_trans_send_cmd(trans
, &hcmd
);
2811 static void iwl_fw_dbg_stop_recording(struct iwl_trans
*trans
,
2812 struct iwl_fw_dbg_params
*params
)
2814 if (trans
->trans_cfg
->device_family
== IWL_DEVICE_FAMILY_7000
) {
2815 iwl_set_bits_prph(trans
, MON_BUFF_SAMPLE_CTL
, 0x100);
2820 params
->in_sample
= iwl_read_umac_prph(trans
, DBGC_IN_SAMPLE
);
2821 params
->out_ctrl
= iwl_read_umac_prph(trans
, DBGC_OUT_CTRL
);
2824 iwl_write_umac_prph(trans
, DBGC_IN_SAMPLE
, 0);
2825 /* wait for the DBGC to finish writing the internal buffer to DRAM to
2826 * avoid halting the HW while writing
2828 usleep_range(700, 1000);
2829 iwl_write_umac_prph(trans
, DBGC_OUT_CTRL
, 0);
2832 static int iwl_fw_dbg_restart_recording(struct iwl_trans
*trans
,
2833 struct iwl_fw_dbg_params
*params
)
2838 if (trans
->trans_cfg
->device_family
== IWL_DEVICE_FAMILY_7000
) {
2839 iwl_clear_bits_prph(trans
, MON_BUFF_SAMPLE_CTL
, 0x100);
2840 iwl_clear_bits_prph(trans
, MON_BUFF_SAMPLE_CTL
, 0x1);
2841 iwl_set_bits_prph(trans
, MON_BUFF_SAMPLE_CTL
, 0x1);
2843 iwl_write_umac_prph(trans
, DBGC_IN_SAMPLE
, params
->in_sample
);
2844 iwl_write_umac_prph(trans
, DBGC_OUT_CTRL
, params
->out_ctrl
);
2850 void iwl_fw_dbg_stop_restart_recording(struct iwl_fw_runtime
*fwrt
,
2851 struct iwl_fw_dbg_params
*params
,
2854 int ret __maybe_unused
= 0;
2856 if (test_bit(STATUS_FW_ERROR
, &fwrt
->trans
->status
))
2859 if (fw_has_capa(&fwrt
->fw
->ucode_capa
,
2860 IWL_UCODE_TLV_CAPA_DBG_SUSPEND_RESUME_CMD_SUPP
))
2861 ret
= iwl_fw_dbg_suspend_resume_hcmd(fwrt
->trans
, stop
);
2863 iwl_fw_dbg_stop_recording(fwrt
->trans
, params
);
2865 ret
= iwl_fw_dbg_restart_recording(fwrt
->trans
, params
);
2866 #ifdef CONFIG_IWLWIFI_DEBUGFS
2869 fwrt
->trans
->dbg
.rec_on
= false;
2871 iwl_fw_set_dbg_rec_on(fwrt
);
2875 IWL_EXPORT_SYMBOL(iwl_fw_dbg_stop_restart_recording
);