1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2018-2020 Intel Corporation
5 #include <linux/firmware.h>
8 #include "iwl-dbg-tlv.h"
10 #include "fw/runtime.h"
13 * enum iwl_dbg_tlv_type - debug TLV types
14 * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV
15 * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV
16 * @IWL_DBG_TLV_TYPE_HCMD: host command TLV
17 * @IWL_DBG_TLV_TYPE_REGION: region TLV
18 * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV
19 * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs
21 enum iwl_dbg_tlv_type
{
22 IWL_DBG_TLV_TYPE_DEBUG_INFO
=
23 IWL_UCODE_TLV_TYPE_DEBUG_INFO
- IWL_UCODE_TLV_DEBUG_BASE
,
24 IWL_DBG_TLV_TYPE_BUF_ALLOC
,
25 IWL_DBG_TLV_TYPE_HCMD
,
26 IWL_DBG_TLV_TYPE_REGION
,
27 IWL_DBG_TLV_TYPE_TRIGGER
,
32 * struct iwl_dbg_tlv_ver_data - debug TLV version struct
33 * @min_ver: min version supported
34 * @max_ver: max version supported
36 struct iwl_dbg_tlv_ver_data
{
42 * struct iwl_dbg_tlv_timer_node - timer node struct
43 * @list: list of &struct iwl_dbg_tlv_timer_node
45 * @fwrt: &struct iwl_fw_runtime
46 * @tlv: TLV attach to the timer node
48 struct iwl_dbg_tlv_timer_node
{
49 struct list_head list
;
50 struct timer_list timer
;
51 struct iwl_fw_runtime
*fwrt
;
52 struct iwl_ucode_tlv
*tlv
;
55 static const struct iwl_dbg_tlv_ver_data
56 dbg_ver_table
[IWL_DBG_TLV_TYPE_NUM
] = {
57 [IWL_DBG_TLV_TYPE_DEBUG_INFO
] = {.min_ver
= 1, .max_ver
= 1,},
58 [IWL_DBG_TLV_TYPE_BUF_ALLOC
] = {.min_ver
= 1, .max_ver
= 1,},
59 [IWL_DBG_TLV_TYPE_HCMD
] = {.min_ver
= 1, .max_ver
= 1,},
60 [IWL_DBG_TLV_TYPE_REGION
] = {.min_ver
= 1, .max_ver
= 1,},
61 [IWL_DBG_TLV_TYPE_TRIGGER
] = {.min_ver
= 1, .max_ver
= 1,},
64 static int iwl_dbg_tlv_add(struct iwl_ucode_tlv
*tlv
, struct list_head
*list
)
66 u32 len
= le32_to_cpu(tlv
->length
);
67 struct iwl_dbg_tlv_node
*node
;
69 node
= kzalloc(sizeof(*node
) + len
, GFP_KERNEL
);
73 memcpy(&node
->tlv
, tlv
, sizeof(node
->tlv
) + len
);
74 list_add_tail(&node
->list
, list
);
79 static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv
*tlv
)
81 struct iwl_fw_ini_header
*hdr
= (void *)&tlv
->data
[0];
82 u32 type
= le32_to_cpu(tlv
->type
);
83 u32 tlv_idx
= type
- IWL_UCODE_TLV_DEBUG_BASE
;
84 u32 ver
= le32_to_cpu(hdr
->version
);
86 if (ver
< dbg_ver_table
[tlv_idx
].min_ver
||
87 ver
> dbg_ver_table
[tlv_idx
].max_ver
)
93 static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans
*trans
,
94 struct iwl_ucode_tlv
*tlv
)
96 struct iwl_fw_ini_debug_info_tlv
*debug_info
= (void *)tlv
->data
;
98 if (le32_to_cpu(tlv
->length
) != sizeof(*debug_info
))
101 IWL_DEBUG_FW(trans
, "WRT: Loading debug cfg: %s\n",
102 debug_info
->debug_cfg_name
);
104 return iwl_dbg_tlv_add(tlv
, &trans
->dbg
.debug_info_tlv_list
);
107 static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans
*trans
,
108 struct iwl_ucode_tlv
*tlv
)
110 struct iwl_fw_ini_allocation_tlv
*alloc
= (void *)tlv
->data
;
114 if (le32_to_cpu(tlv
->length
) != sizeof(*alloc
))
117 buf_location
= le32_to_cpu(alloc
->buf_location
);
118 alloc_id
= le32_to_cpu(alloc
->alloc_id
);
120 if (buf_location
== IWL_FW_INI_LOCATION_INVALID
||
121 buf_location
>= IWL_FW_INI_LOCATION_NUM
)
124 if (alloc_id
== IWL_FW_INI_ALLOCATION_INVALID
||
125 alloc_id
>= IWL_FW_INI_ALLOCATION_NUM
)
128 if (buf_location
== IWL_FW_INI_LOCATION_NPK_PATH
&&
129 alloc_id
!= IWL_FW_INI_ALLOCATION_ID_DBGC1
)
132 if (buf_location
== IWL_FW_INI_LOCATION_SRAM_PATH
&&
133 alloc_id
!= IWL_FW_INI_ALLOCATION_ID_DBGC1
&&
134 alloc_id
!= IWL_FW_INI_ALLOCATION_ID_INTERNAL
)
137 trans
->dbg
.fw_mon_cfg
[alloc_id
] = *alloc
;
142 "WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n",
143 alloc_id
, buf_location
);
147 static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans
*trans
,
148 struct iwl_ucode_tlv
*tlv
)
150 struct iwl_fw_ini_hcmd_tlv
*hcmd
= (void *)tlv
->data
;
151 u32 tp
= le32_to_cpu(hcmd
->time_point
);
153 if (le32_to_cpu(tlv
->length
) <= sizeof(*hcmd
))
156 /* Host commands can not be sent in early time point since the FW
159 if (tp
== IWL_FW_INI_TIME_POINT_INVALID
||
160 tp
>= IWL_FW_INI_TIME_POINT_NUM
||
161 tp
== IWL_FW_INI_TIME_POINT_EARLY
) {
163 "WRT: Invalid time point %u for host command TLV\n",
168 return iwl_dbg_tlv_add(tlv
, &trans
->dbg
.time_point
[tp
].hcmd_list
);
171 static int iwl_dbg_tlv_alloc_region(struct iwl_trans
*trans
,
172 struct iwl_ucode_tlv
*tlv
)
174 struct iwl_fw_ini_region_tlv
*reg
= (void *)tlv
->data
;
175 struct iwl_ucode_tlv
**active_reg
;
176 u32 id
= le32_to_cpu(reg
->id
);
177 u32 type
= le32_to_cpu(reg
->type
);
178 u32 tlv_len
= sizeof(*tlv
) + le32_to_cpu(tlv
->length
);
180 if (le32_to_cpu(tlv
->length
) < sizeof(*reg
))
183 /* For safe using a string from FW make sure we have a
186 reg
->name
[IWL_FW_INI_MAX_NAME
- 1] = 0;
188 IWL_DEBUG_FW(trans
, "WRT: parsing region: %s\n", reg
->name
);
190 if (id
>= IWL_FW_INI_MAX_REGION_ID
) {
191 IWL_ERR(trans
, "WRT: Invalid region id %u\n", id
);
195 if (type
<= IWL_FW_INI_REGION_INVALID
||
196 type
>= IWL_FW_INI_REGION_NUM
) {
197 IWL_ERR(trans
, "WRT: Invalid region type %u\n", type
);
201 if (type
== IWL_FW_INI_REGION_PCI_IOSF_CONFIG
&&
202 !trans
->ops
->read_config32
) {
203 IWL_ERR(trans
, "WRT: Unsupported region type %u\n", type
);
207 active_reg
= &trans
->dbg
.active_regions
[id
];
209 IWL_WARN(trans
, "WRT: Overriding region id %u\n", id
);
214 *active_reg
= kmemdup(tlv
, tlv_len
, GFP_KERNEL
);
218 IWL_DEBUG_FW(trans
, "WRT: Enabling region id %u type %u\n", id
, type
);
223 static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans
*trans
,
224 struct iwl_ucode_tlv
*tlv
)
226 struct iwl_fw_ini_trigger_tlv
*trig
= (void *)tlv
->data
;
227 u32 tp
= le32_to_cpu(trig
->time_point
);
228 struct iwl_ucode_tlv
*dup
= NULL
;
231 if (le32_to_cpu(tlv
->length
) < sizeof(*trig
))
234 if (tp
<= IWL_FW_INI_TIME_POINT_INVALID
||
235 tp
>= IWL_FW_INI_TIME_POINT_NUM
) {
237 "WRT: Invalid time point %u for trigger TLV\n",
242 if (!le32_to_cpu(trig
->occurrences
)) {
243 dup
= kmemdup(tlv
, sizeof(*tlv
) + le32_to_cpu(tlv
->length
),
247 trig
= (void *)dup
->data
;
248 trig
->occurrences
= cpu_to_le32(-1);
252 ret
= iwl_dbg_tlv_add(tlv
, &trans
->dbg
.time_point
[tp
].trig_list
);
258 static int (*dbg_tlv_alloc
[])(struct iwl_trans
*trans
,
259 struct iwl_ucode_tlv
*tlv
) = {
260 [IWL_DBG_TLV_TYPE_DEBUG_INFO
] = iwl_dbg_tlv_alloc_debug_info
,
261 [IWL_DBG_TLV_TYPE_BUF_ALLOC
] = iwl_dbg_tlv_alloc_buf_alloc
,
262 [IWL_DBG_TLV_TYPE_HCMD
] = iwl_dbg_tlv_alloc_hcmd
,
263 [IWL_DBG_TLV_TYPE_REGION
] = iwl_dbg_tlv_alloc_region
,
264 [IWL_DBG_TLV_TYPE_TRIGGER
] = iwl_dbg_tlv_alloc_trigger
,
267 void iwl_dbg_tlv_alloc(struct iwl_trans
*trans
, struct iwl_ucode_tlv
*tlv
,
270 struct iwl_fw_ini_header
*hdr
= (void *)&tlv
->data
[0];
271 u32 type
= le32_to_cpu(tlv
->type
);
272 u32 tlv_idx
= type
- IWL_UCODE_TLV_DEBUG_BASE
;
273 u32 domain
= le32_to_cpu(hdr
->domain
);
274 enum iwl_ini_cfg_state
*cfg_state
= ext
?
275 &trans
->dbg
.external_ini_cfg
: &trans
->dbg
.internal_ini_cfg
;
278 if (domain
!= IWL_FW_INI_DOMAIN_ALWAYS_ON
&&
279 !(domain
& trans
->dbg
.domains_bitmap
)) {
281 "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
282 domain
, trans
->dbg
.domains_bitmap
);
286 if (tlv_idx
>= ARRAY_SIZE(dbg_tlv_alloc
) || !dbg_tlv_alloc
[tlv_idx
]) {
287 IWL_ERR(trans
, "WRT: Unsupported TLV type 0x%x\n", type
);
291 if (!iwl_dbg_tlv_ver_support(tlv
)) {
292 IWL_ERR(trans
, "WRT: Unsupported TLV 0x%x version %u\n", type
,
293 le32_to_cpu(hdr
->version
));
297 ret
= dbg_tlv_alloc
[tlv_idx
](trans
, tlv
);
300 "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
305 if (*cfg_state
== IWL_INI_CFG_STATE_NOT_LOADED
)
306 *cfg_state
= IWL_INI_CFG_STATE_LOADED
;
311 *cfg_state
= IWL_INI_CFG_STATE_CORRUPTED
;
314 void iwl_dbg_tlv_del_timers(struct iwl_trans
*trans
)
316 struct list_head
*timer_list
= &trans
->dbg
.periodic_trig_list
;
317 struct iwl_dbg_tlv_timer_node
*node
, *tmp
;
319 list_for_each_entry_safe(node
, tmp
, timer_list
, list
) {
320 del_timer(&node
->timer
);
321 list_del(&node
->list
);
325 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers
);
327 static void iwl_dbg_tlv_fragments_free(struct iwl_trans
*trans
,
328 enum iwl_fw_ini_allocation_id alloc_id
)
330 struct iwl_fw_mon
*fw_mon
;
333 if (alloc_id
<= IWL_FW_INI_ALLOCATION_INVALID
||
334 alloc_id
>= IWL_FW_INI_ALLOCATION_NUM
)
337 fw_mon
= &trans
->dbg
.fw_mon_ini
[alloc_id
];
339 for (i
= 0; i
< fw_mon
->num_frags
; i
++) {
340 struct iwl_dram_data
*frag
= &fw_mon
->frags
[i
];
342 dma_free_coherent(trans
->dev
, frag
->size
, frag
->block
,
350 kfree(fw_mon
->frags
);
351 fw_mon
->frags
= NULL
;
352 fw_mon
->num_frags
= 0;
355 void iwl_dbg_tlv_free(struct iwl_trans
*trans
)
357 struct iwl_dbg_tlv_node
*tlv_node
, *tlv_node_tmp
;
360 iwl_dbg_tlv_del_timers(trans
);
362 for (i
= 0; i
< ARRAY_SIZE(trans
->dbg
.active_regions
); i
++) {
363 struct iwl_ucode_tlv
**active_reg
=
364 &trans
->dbg
.active_regions
[i
];
370 list_for_each_entry_safe(tlv_node
, tlv_node_tmp
,
371 &trans
->dbg
.debug_info_tlv_list
, list
) {
372 list_del(&tlv_node
->list
);
376 for (i
= 0; i
< ARRAY_SIZE(trans
->dbg
.time_point
); i
++) {
377 struct iwl_dbg_tlv_time_point_data
*tp
=
378 &trans
->dbg
.time_point
[i
];
380 list_for_each_entry_safe(tlv_node
, tlv_node_tmp
, &tp
->trig_list
,
382 list_del(&tlv_node
->list
);
386 list_for_each_entry_safe(tlv_node
, tlv_node_tmp
, &tp
->hcmd_list
,
388 list_del(&tlv_node
->list
);
392 list_for_each_entry_safe(tlv_node
, tlv_node_tmp
,
393 &tp
->active_trig_list
, list
) {
394 list_del(&tlv_node
->list
);
399 for (i
= 0; i
< ARRAY_SIZE(trans
->dbg
.fw_mon_ini
); i
++)
400 iwl_dbg_tlv_fragments_free(trans
, i
);
403 static int iwl_dbg_tlv_parse_bin(struct iwl_trans
*trans
, const u8
*data
,
406 struct iwl_ucode_tlv
*tlv
;
409 while (len
>= sizeof(*tlv
)) {
413 tlv_len
= le32_to_cpu(tlv
->length
);
416 IWL_ERR(trans
, "invalid TLV len: %zd/%u\n",
420 len
-= ALIGN(tlv_len
, 4);
421 data
+= sizeof(*tlv
) + ALIGN(tlv_len
, 4);
423 iwl_dbg_tlv_alloc(trans
, tlv
, true);
429 void iwl_dbg_tlv_load_bin(struct device
*dev
, struct iwl_trans
*trans
)
431 const struct firmware
*fw
;
434 if (!iwlwifi_mod_params
.enable_ini
)
437 res
= firmware_request_nowarn(&fw
, "iwl-debug-yoyo.bin", dev
);
441 iwl_dbg_tlv_parse_bin(trans
, fw
->data
, fw
->size
);
443 release_firmware(fw
);
446 void iwl_dbg_tlv_init(struct iwl_trans
*trans
)
450 INIT_LIST_HEAD(&trans
->dbg
.debug_info_tlv_list
);
451 INIT_LIST_HEAD(&trans
->dbg
.periodic_trig_list
);
453 for (i
= 0; i
< ARRAY_SIZE(trans
->dbg
.time_point
); i
++) {
454 struct iwl_dbg_tlv_time_point_data
*tp
=
455 &trans
->dbg
.time_point
[i
];
457 INIT_LIST_HEAD(&tp
->trig_list
);
458 INIT_LIST_HEAD(&tp
->hcmd_list
);
459 INIT_LIST_HEAD(&tp
->active_trig_list
);
463 static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime
*fwrt
,
464 struct iwl_dram_data
*frag
, u32 pages
)
469 if (!frag
|| frag
->size
|| !pages
)
473 * We try to allocate as many pages as we can, starting with
474 * the requested amount and going down until we can allocate
475 * something. Because of DIV_ROUND_UP(), pages will never go
476 * down to 0 and stop the loop, so stop when pages reaches 1,
477 * which is too small anyway.
480 block
= dma_alloc_coherent(fwrt
->dev
, pages
* PAGE_SIZE
,
482 GFP_KERNEL
| __GFP_NOWARN
);
486 IWL_WARN(fwrt
, "WRT: Failed to allocate fragment size %lu\n",
489 pages
= DIV_ROUND_UP(pages
, 2);
495 frag
->physical
= physical
;
497 frag
->size
= pages
* PAGE_SIZE
;
502 static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime
*fwrt
,
503 enum iwl_fw_ini_allocation_id alloc_id
)
505 struct iwl_fw_mon
*fw_mon
;
506 struct iwl_fw_ini_allocation_tlv
*fw_mon_cfg
;
507 u32 num_frags
, remain_pages
, frag_pages
;
510 if (alloc_id
< IWL_FW_INI_ALLOCATION_INVALID
||
511 alloc_id
>= IWL_FW_INI_ALLOCATION_NUM
)
514 fw_mon_cfg
= &fwrt
->trans
->dbg
.fw_mon_cfg
[alloc_id
];
515 fw_mon
= &fwrt
->trans
->dbg
.fw_mon_ini
[alloc_id
];
517 if (fw_mon
->num_frags
||
518 fw_mon_cfg
->buf_location
!=
519 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH
))
522 num_frags
= le32_to_cpu(fw_mon_cfg
->max_frags_num
);
523 if (!fw_has_capa(&fwrt
->fw
->ucode_capa
,
524 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP
)) {
525 if (alloc_id
!= IWL_FW_INI_ALLOCATION_ID_DBGC1
)
530 remain_pages
= DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg
->req_size
),
532 num_frags
= min_t(u32
, num_frags
, BUF_ALLOC_MAX_NUM_FRAGS
);
533 num_frags
= min_t(u32
, num_frags
, remain_pages
);
534 frag_pages
= DIV_ROUND_UP(remain_pages
, num_frags
);
536 fw_mon
->frags
= kcalloc(num_frags
, sizeof(*fw_mon
->frags
), GFP_KERNEL
);
540 for (i
= 0; i
< num_frags
; i
++) {
541 int pages
= min_t(u32
, frag_pages
, remain_pages
);
544 "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
545 alloc_id
, i
, pages
* PAGE_SIZE
);
547 pages
= iwl_dbg_tlv_alloc_fragment(fwrt
, &fw_mon
->frags
[i
],
550 u32 alloc_size
= le32_to_cpu(fw_mon_cfg
->req_size
) -
551 (remain_pages
* PAGE_SIZE
);
553 if (alloc_size
< le32_to_cpu(fw_mon_cfg
->min_size
)) {
554 iwl_dbg_tlv_fragments_free(fwrt
->trans
,
561 remain_pages
-= pages
;
568 static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime
*fwrt
,
569 enum iwl_fw_ini_allocation_id alloc_id
)
571 struct iwl_fw_mon
*fw_mon
;
572 u32 remain_frags
, num_commands
;
573 int i
, fw_mon_idx
= 0;
575 if (!fw_has_capa(&fwrt
->fw
->ucode_capa
,
576 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP
))
579 if (alloc_id
< IWL_FW_INI_ALLOCATION_INVALID
||
580 alloc_id
>= IWL_FW_INI_ALLOCATION_NUM
)
583 if (le32_to_cpu(fwrt
->trans
->dbg
.fw_mon_cfg
[alloc_id
].buf_location
) !=
584 IWL_FW_INI_LOCATION_DRAM_PATH
)
587 fw_mon
= &fwrt
->trans
->dbg
.fw_mon_ini
[alloc_id
];
589 /* the first fragment of DBGC1 is given to the FW via register
592 if (alloc_id
== IWL_FW_INI_ALLOCATION_ID_DBGC1
)
595 remain_frags
= fw_mon
->num_frags
- fw_mon_idx
;
599 num_commands
= DIV_ROUND_UP(remain_frags
, BUF_ALLOC_MAX_NUM_FRAGS
);
601 IWL_DEBUG_FW(fwrt
, "WRT: Applying DRAM destination (alloc_id=%u)\n",
604 for (i
= 0; i
< num_commands
; i
++) {
605 u32 num_frags
= min_t(u32
, remain_frags
,
606 BUF_ALLOC_MAX_NUM_FRAGS
);
607 struct iwl_buf_alloc_cmd data
= {
608 .alloc_id
= cpu_to_le32(alloc_id
),
609 .num_frags
= cpu_to_le32(num_frags
),
611 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH
),
613 struct iwl_host_cmd hcmd
= {
614 .id
= WIDE_ID(DEBUG_GROUP
, BUFFER_ALLOCATION
),
616 .len
[0] = sizeof(data
),
620 for (j
= 0; j
< num_frags
; j
++) {
621 struct iwl_buf_alloc_frag
*frag
= &data
.frags
[j
];
622 struct iwl_dram_data
*fw_mon_frag
=
623 &fw_mon
->frags
[fw_mon_idx
++];
625 frag
->addr
= cpu_to_le64(fw_mon_frag
->physical
);
626 frag
->size
= cpu_to_le32(fw_mon_frag
->size
);
628 ret
= iwl_trans_send_cmd(fwrt
->trans
, &hcmd
);
632 remain_frags
-= num_frags
;
638 static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime
*fwrt
)
642 for (i
= 0; i
< IWL_FW_INI_ALLOCATION_NUM
; i
++) {
643 ret
= iwl_dbg_tlv_apply_buffer(fwrt
, i
);
646 "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
651 static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime
*fwrt
,
652 struct list_head
*hcmd_list
)
654 struct iwl_dbg_tlv_node
*node
;
656 list_for_each_entry(node
, hcmd_list
, list
) {
657 struct iwl_fw_ini_hcmd_tlv
*hcmd
= (void *)node
->tlv
.data
;
658 struct iwl_fw_ini_hcmd
*hcmd_data
= &hcmd
->hcmd
;
659 u16 hcmd_len
= le32_to_cpu(node
->tlv
.length
) - sizeof(*hcmd
);
660 struct iwl_host_cmd cmd
= {
661 .id
= WIDE_ID(hcmd_data
->group
, hcmd_data
->id
),
662 .len
= { hcmd_len
, },
663 .data
= { hcmd_data
->data
, },
666 iwl_trans_send_cmd(fwrt
->trans
, &cmd
);
670 static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list
*t
)
672 struct iwl_dbg_tlv_timer_node
*timer_node
=
673 from_timer(timer_node
, t
, timer
);
674 struct iwl_fwrt_dump_data dump_data
= {
675 .trig
= (void *)timer_node
->tlv
->data
,
679 ret
= iwl_fw_dbg_ini_collect(timer_node
->fwrt
, &dump_data
);
680 if (!ret
|| ret
== -EBUSY
) {
681 u32 occur
= le32_to_cpu(dump_data
.trig
->occurrences
);
682 u32 collect_interval
= le32_to_cpu(dump_data
.trig
->data
[0]);
687 mod_timer(t
, jiffies
+ msecs_to_jiffies(collect_interval
));
691 static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime
*fwrt
)
693 struct iwl_dbg_tlv_node
*node
;
694 struct list_head
*trig_list
=
695 &fwrt
->trans
->dbg
.time_point
[IWL_FW_INI_TIME_POINT_PERIODIC
].active_trig_list
;
697 list_for_each_entry(node
, trig_list
, list
) {
698 struct iwl_fw_ini_trigger_tlv
*trig
= (void *)node
->tlv
.data
;
699 struct iwl_dbg_tlv_timer_node
*timer_node
;
700 u32 occur
= le32_to_cpu(trig
->occurrences
), collect_interval
;
701 u32 min_interval
= 100;
706 /* make sure there is at least one dword of data for the
709 if (le32_to_cpu(node
->tlv
.length
) <
710 sizeof(*trig
) + sizeof(__le32
)) {
712 "WRT: Invalid periodic trigger data was not given\n");
716 if (le32_to_cpu(trig
->data
[0]) < min_interval
) {
718 "WRT: Override min interval from %u to %u msec\n",
719 le32_to_cpu(trig
->data
[0]), min_interval
);
720 trig
->data
[0] = cpu_to_le32(min_interval
);
723 collect_interval
= le32_to_cpu(trig
->data
[0]);
725 timer_node
= kzalloc(sizeof(*timer_node
), GFP_KERNEL
);
728 "WRT: Failed to allocate periodic trigger\n");
732 timer_node
->fwrt
= fwrt
;
733 timer_node
->tlv
= &node
->tlv
;
734 timer_setup(&timer_node
->timer
,
735 iwl_dbg_tlv_periodic_trig_handler
, 0);
737 list_add_tail(&timer_node
->list
,
738 &fwrt
->trans
->dbg
.periodic_trig_list
);
740 IWL_DEBUG_FW(fwrt
, "WRT: Enabling periodic trigger\n");
742 mod_timer(&timer_node
->timer
,
743 jiffies
+ msecs_to_jiffies(collect_interval
));
747 static bool is_trig_data_contained(struct iwl_ucode_tlv
*new,
748 struct iwl_ucode_tlv
*old
)
750 struct iwl_fw_ini_trigger_tlv
*new_trig
= (void *)new->data
;
751 struct iwl_fw_ini_trigger_tlv
*old_trig
= (void *)old
->data
;
752 __le32
*new_data
= new_trig
->data
, *old_data
= old_trig
->data
;
753 u32 new_dwords_num
= iwl_tlv_array_len(new, new_trig
, data
);
754 u32 old_dwords_num
= iwl_tlv_array_len(old
, old_trig
, data
);
757 for (i
= 0; i
< new_dwords_num
; i
++) {
760 for (j
= 0; j
< old_dwords_num
; j
++) {
761 if (new_data
[i
] == old_data
[j
]) {
773 static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime
*fwrt
,
774 struct iwl_ucode_tlv
*trig_tlv
,
775 struct iwl_dbg_tlv_node
*node
)
777 struct iwl_ucode_tlv
*node_tlv
= &node
->tlv
;
778 struct iwl_fw_ini_trigger_tlv
*node_trig
= (void *)node_tlv
->data
;
779 struct iwl_fw_ini_trigger_tlv
*trig
= (void *)trig_tlv
->data
;
780 u32 policy
= le32_to_cpu(trig
->apply_policy
);
781 u32 size
= le32_to_cpu(trig_tlv
->length
);
782 u32 trig_data_len
= size
- sizeof(*trig
);
785 if (!(policy
& IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA
)) {
786 u32 data_len
= le32_to_cpu(node_tlv
->length
) -
790 "WRT: Appending trigger data (time point %u)\n",
791 le32_to_cpu(trig
->time_point
));
797 "WRT: Overriding trigger data (time point %u)\n",
798 le32_to_cpu(trig
->time_point
));
801 if (size
!= le32_to_cpu(node_tlv
->length
)) {
802 struct list_head
*prev
= node
->list
.prev
;
803 struct iwl_dbg_tlv_node
*tmp
;
805 list_del(&node
->list
);
807 tmp
= krealloc(node
, sizeof(*node
) + size
, GFP_KERNEL
);
810 "WRT: No memory to override trigger (time point %u)\n",
811 le32_to_cpu(trig
->time_point
));
813 list_add(&node
->list
, prev
);
818 list_add(&tmp
->list
, prev
);
819 node_tlv
= &tmp
->tlv
;
820 node_trig
= (void *)node_tlv
->data
;
823 memcpy(node_trig
->data
+ offset
, trig
->data
, trig_data_len
);
824 node_tlv
->length
= cpu_to_le32(size
);
826 if (policy
& IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG
) {
828 "WRT: Overriding trigger configuration (time point %u)\n",
829 le32_to_cpu(trig
->time_point
));
831 /* the first 11 dwords are configuration related */
832 memcpy(node_trig
, trig
, sizeof(__le32
) * 11);
835 if (policy
& IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS
) {
837 "WRT: Overriding trigger regions (time point %u)\n",
838 le32_to_cpu(trig
->time_point
));
840 node_trig
->regions_mask
= trig
->regions_mask
;
843 "WRT: Appending trigger regions (time point %u)\n",
844 le32_to_cpu(trig
->time_point
));
846 node_trig
->regions_mask
|= trig
->regions_mask
;
853 iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime
*fwrt
,
854 struct list_head
*trig_list
,
855 struct iwl_ucode_tlv
*trig_tlv
)
857 struct iwl_fw_ini_trigger_tlv
*trig
= (void *)trig_tlv
->data
;
858 struct iwl_dbg_tlv_node
*node
, *match
= NULL
;
859 u32 policy
= le32_to_cpu(trig
->apply_policy
);
861 list_for_each_entry(node
, trig_list
, list
) {
862 if (!(policy
& IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT
))
865 if (!(policy
& IWL_FW_INI_APPLY_POLICY_MATCH_DATA
) ||
866 is_trig_data_contained(trig_tlv
, &node
->tlv
)) {
873 IWL_DEBUG_FW(fwrt
, "WRT: Enabling trigger (time point %u)\n",
874 le32_to_cpu(trig
->time_point
));
875 return iwl_dbg_tlv_add(trig_tlv
, trig_list
);
878 return iwl_dbg_tlv_override_trig_node(fwrt
, trig_tlv
, match
);
882 iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime
*fwrt
,
883 struct iwl_dbg_tlv_time_point_data
*tp
)
885 struct iwl_dbg_tlv_node
*node
;
886 struct list_head
*trig_list
= &tp
->trig_list
;
887 struct list_head
*active_trig_list
= &tp
->active_trig_list
;
889 list_for_each_entry(node
, trig_list
, list
) {
890 struct iwl_ucode_tlv
*tlv
= &node
->tlv
;
892 iwl_dbg_tlv_add_active_trigger(fwrt
, active_trig_list
, tlv
);
896 static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime
*fwrt
,
897 struct iwl_fwrt_dump_data
*dump_data
,
898 union iwl_dbg_tlv_tp_data
*tp_data
,
901 struct iwl_rx_packet
*pkt
= tp_data
->fw_pkt
;
902 struct iwl_cmd_header
*wanted_hdr
= (void *)&trig_data
;
904 if (pkt
&& (pkt
->hdr
.cmd
== wanted_hdr
->cmd
&&
905 pkt
->hdr
.group_id
== wanted_hdr
->group_id
)) {
906 struct iwl_rx_packet
*fw_pkt
=
908 sizeof(*pkt
) + iwl_rx_packet_payload_len(pkt
),
914 dump_data
->fw_pkt
= fw_pkt
;
923 iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime
*fwrt
,
924 struct list_head
*active_trig_list
,
925 union iwl_dbg_tlv_tp_data
*tp_data
,
926 bool (*data_check
)(struct iwl_fw_runtime
*fwrt
,
927 struct iwl_fwrt_dump_data
*dump_data
,
928 union iwl_dbg_tlv_tp_data
*tp_data
,
931 struct iwl_dbg_tlv_node
*node
;
933 list_for_each_entry(node
, active_trig_list
, list
) {
934 struct iwl_fwrt_dump_data dump_data
= {
935 .trig
= (void *)node
->tlv
.data
,
937 u32 num_data
= iwl_tlv_array_len(&node
->tlv
, dump_data
.trig
,
942 ret
= iwl_fw_dbg_ini_collect(fwrt
, &dump_data
);
947 for (i
= 0; i
< num_data
; i
++) {
949 data_check(fwrt
, &dump_data
, tp_data
,
950 le32_to_cpu(dump_data
.trig
->data
[i
]))) {
951 ret
= iwl_fw_dbg_ini_collect(fwrt
, &dump_data
);
963 static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime
*fwrt
)
965 enum iwl_fw_ini_buffer_location
*ini_dest
= &fwrt
->trans
->dbg
.ini_dest
;
968 if (*ini_dest
!= IWL_FW_INI_LOCATION_INVALID
)
972 "WRT: Generating active triggers list, domain 0x%x\n",
973 fwrt
->trans
->dbg
.domains_bitmap
);
975 for (i
= 0; i
< ARRAY_SIZE(fwrt
->trans
->dbg
.time_point
); i
++) {
976 struct iwl_dbg_tlv_time_point_data
*tp
=
977 &fwrt
->trans
->dbg
.time_point
[i
];
979 iwl_dbg_tlv_gen_active_trig_list(fwrt
, tp
);
982 *ini_dest
= IWL_FW_INI_LOCATION_INVALID
;
983 for (i
= 0; i
< IWL_FW_INI_ALLOCATION_NUM
; i
++) {
984 struct iwl_fw_ini_allocation_tlv
*fw_mon_cfg
=
985 &fwrt
->trans
->dbg
.fw_mon_cfg
[i
];
986 u32 dest
= le32_to_cpu(fw_mon_cfg
->buf_location
);
988 if (dest
== IWL_FW_INI_LOCATION_INVALID
)
991 if (*ini_dest
== IWL_FW_INI_LOCATION_INVALID
)
994 if (dest
!= *ini_dest
)
997 ret
= iwl_dbg_tlv_alloc_fragments(fwrt
, i
);
1000 "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
1005 void iwl_dbg_tlv_time_point(struct iwl_fw_runtime
*fwrt
,
1006 enum iwl_fw_ini_time_point tp_id
,
1007 union iwl_dbg_tlv_tp_data
*tp_data
)
1009 struct list_head
*hcmd_list
, *trig_list
;
1011 if (!iwl_trans_dbg_ini_valid(fwrt
->trans
) ||
1012 tp_id
== IWL_FW_INI_TIME_POINT_INVALID
||
1013 tp_id
>= IWL_FW_INI_TIME_POINT_NUM
)
1016 hcmd_list
= &fwrt
->trans
->dbg
.time_point
[tp_id
].hcmd_list
;
1017 trig_list
= &fwrt
->trans
->dbg
.time_point
[tp_id
].active_trig_list
;
1020 case IWL_FW_INI_TIME_POINT_EARLY
:
1021 iwl_dbg_tlv_init_cfg(fwrt
);
1022 iwl_dbg_tlv_tp_trigger(fwrt
, trig_list
, tp_data
, NULL
);
1024 case IWL_FW_INI_TIME_POINT_AFTER_ALIVE
:
1025 iwl_dbg_tlv_apply_buffers(fwrt
);
1026 iwl_dbg_tlv_send_hcmds(fwrt
, hcmd_list
);
1027 iwl_dbg_tlv_tp_trigger(fwrt
, trig_list
, tp_data
, NULL
);
1029 case IWL_FW_INI_TIME_POINT_PERIODIC
:
1030 iwl_dbg_tlv_set_periodic_trigs(fwrt
);
1031 iwl_dbg_tlv_send_hcmds(fwrt
, hcmd_list
);
1033 case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF
:
1034 case IWL_FW_INI_TIME_POINT_MISSED_BEACONS
:
1035 case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION
:
1036 iwl_dbg_tlv_send_hcmds(fwrt
, hcmd_list
);
1037 iwl_dbg_tlv_tp_trigger(fwrt
, trig_list
, tp_data
,
1038 iwl_dbg_tlv_check_fw_pkt
);
1041 iwl_dbg_tlv_send_hcmds(fwrt
, hcmd_list
);
1042 iwl_dbg_tlv_tp_trigger(fwrt
, trig_list
, tp_data
, NULL
);
1046 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point
);