1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright (C) 2018 - 2019 Intel Corporation
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright (C) 2018 - 2019 Intel Corporation
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
38 * * Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * * Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in
42 * the documentation and/or other materials provided with the
44 * * Neither the name Intel Corporation nor the names of its
45 * contributors may be used to endorse or promote products derived
46 * from this software without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
51 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
52 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
53 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
54 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
58 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *****************************************************************************/
62 #include <linux/firmware.h>
64 #include "iwl-trans.h"
65 #include "iwl-dbg-tlv.h"
67 #include "fw/runtime.h"
70 * enum iwl_dbg_tlv_type - debug TLV types
71 * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV
72 * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV
73 * @IWL_DBG_TLV_TYPE_HCMD: host command TLV
74 * @IWL_DBG_TLV_TYPE_REGION: region TLV
75 * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV
76 * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs
78 enum iwl_dbg_tlv_type
{
79 IWL_DBG_TLV_TYPE_DEBUG_INFO
=
80 IWL_UCODE_TLV_TYPE_DEBUG_INFO
- IWL_UCODE_TLV_DEBUG_BASE
,
81 IWL_DBG_TLV_TYPE_BUF_ALLOC
,
82 IWL_DBG_TLV_TYPE_HCMD
,
83 IWL_DBG_TLV_TYPE_REGION
,
84 IWL_DBG_TLV_TYPE_TRIGGER
,
89 * struct iwl_dbg_tlv_ver_data - debug TLV version struct
90 * @min_ver: min version supported
91 * @max_ver: max version supported
93 struct iwl_dbg_tlv_ver_data
{
99 * struct iwl_dbg_tlv_timer_node - timer node struct
100 * @list: list of &struct iwl_dbg_tlv_timer_node
102 * @fwrt: &struct iwl_fw_runtime
103 * @tlv: TLV attach to the timer node
105 struct iwl_dbg_tlv_timer_node
{
106 struct list_head list
;
107 struct timer_list timer
;
108 struct iwl_fw_runtime
*fwrt
;
109 struct iwl_ucode_tlv
*tlv
;
112 static const struct iwl_dbg_tlv_ver_data
113 dbg_ver_table
[IWL_DBG_TLV_TYPE_NUM
] = {
114 [IWL_DBG_TLV_TYPE_DEBUG_INFO
] = {.min_ver
= 1, .max_ver
= 1,},
115 [IWL_DBG_TLV_TYPE_BUF_ALLOC
] = {.min_ver
= 1, .max_ver
= 1,},
116 [IWL_DBG_TLV_TYPE_HCMD
] = {.min_ver
= 1, .max_ver
= 1,},
117 [IWL_DBG_TLV_TYPE_REGION
] = {.min_ver
= 1, .max_ver
= 1,},
118 [IWL_DBG_TLV_TYPE_TRIGGER
] = {.min_ver
= 1, .max_ver
= 1,},
121 static int iwl_dbg_tlv_add(struct iwl_ucode_tlv
*tlv
, struct list_head
*list
)
123 u32 len
= le32_to_cpu(tlv
->length
);
124 struct iwl_dbg_tlv_node
*node
;
126 node
= kzalloc(sizeof(*node
) + len
, GFP_KERNEL
);
130 memcpy(&node
->tlv
, tlv
, sizeof(node
->tlv
) + len
);
131 list_add_tail(&node
->list
, list
);
136 static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv
*tlv
)
138 struct iwl_fw_ini_header
*hdr
= (void *)&tlv
->data
[0];
139 u32 type
= le32_to_cpu(tlv
->type
);
140 u32 tlv_idx
= type
- IWL_UCODE_TLV_DEBUG_BASE
;
141 u32 ver
= le32_to_cpu(hdr
->version
);
143 if (ver
< dbg_ver_table
[tlv_idx
].min_ver
||
144 ver
> dbg_ver_table
[tlv_idx
].max_ver
)
150 static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans
*trans
,
151 struct iwl_ucode_tlv
*tlv
)
153 struct iwl_fw_ini_debug_info_tlv
*debug_info
= (void *)tlv
->data
;
155 if (le32_to_cpu(tlv
->length
) != sizeof(*debug_info
))
158 IWL_DEBUG_FW(trans
, "WRT: Loading debug cfg: %s\n",
159 debug_info
->debug_cfg_name
);
161 return iwl_dbg_tlv_add(tlv
, &trans
->dbg
.debug_info_tlv_list
);
164 static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans
*trans
,
165 struct iwl_ucode_tlv
*tlv
)
167 struct iwl_fw_ini_allocation_tlv
*alloc
= (void *)tlv
->data
;
168 u32 buf_location
= le32_to_cpu(alloc
->buf_location
);
169 u32 alloc_id
= le32_to_cpu(alloc
->alloc_id
);
171 if (le32_to_cpu(tlv
->length
) != sizeof(*alloc
) ||
172 (buf_location
!= IWL_FW_INI_LOCATION_SRAM_PATH
&&
173 buf_location
!= IWL_FW_INI_LOCATION_DRAM_PATH
))
176 if ((buf_location
== IWL_FW_INI_LOCATION_SRAM_PATH
&&
177 alloc_id
!= IWL_FW_INI_ALLOCATION_ID_DBGC1
) ||
178 (buf_location
== IWL_FW_INI_LOCATION_DRAM_PATH
&&
179 (alloc_id
== IWL_FW_INI_ALLOCATION_INVALID
||
180 alloc_id
>= IWL_FW_INI_ALLOCATION_NUM
))) {
182 "WRT: Invalid allocation id %u for allocation TLV\n",
187 trans
->dbg
.fw_mon_cfg
[alloc_id
] = *alloc
;
192 static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans
*trans
,
193 struct iwl_ucode_tlv
*tlv
)
195 struct iwl_fw_ini_hcmd_tlv
*hcmd
= (void *)tlv
->data
;
196 u32 tp
= le32_to_cpu(hcmd
->time_point
);
198 if (le32_to_cpu(tlv
->length
) <= sizeof(*hcmd
))
201 /* Host commands can not be sent in early time point since the FW
204 if (tp
== IWL_FW_INI_TIME_POINT_INVALID
||
205 tp
>= IWL_FW_INI_TIME_POINT_NUM
||
206 tp
== IWL_FW_INI_TIME_POINT_EARLY
) {
208 "WRT: Invalid time point %u for host command TLV\n",
213 return iwl_dbg_tlv_add(tlv
, &trans
->dbg
.time_point
[tp
].hcmd_list
);
216 static int iwl_dbg_tlv_alloc_region(struct iwl_trans
*trans
,
217 struct iwl_ucode_tlv
*tlv
)
219 struct iwl_fw_ini_region_tlv
*reg
= (void *)tlv
->data
;
220 struct iwl_ucode_tlv
**active_reg
;
221 u32 id
= le32_to_cpu(reg
->id
);
222 u32 type
= le32_to_cpu(reg
->type
);
223 u32 tlv_len
= sizeof(*tlv
) + le32_to_cpu(tlv
->length
);
225 if (le32_to_cpu(tlv
->length
) < sizeof(*reg
))
228 if (id
>= IWL_FW_INI_MAX_REGION_ID
) {
229 IWL_ERR(trans
, "WRT: Invalid region id %u\n", id
);
233 if (type
<= IWL_FW_INI_REGION_INVALID
||
234 type
>= IWL_FW_INI_REGION_NUM
) {
235 IWL_ERR(trans
, "WRT: Invalid region type %u\n", type
);
239 active_reg
= &trans
->dbg
.active_regions
[id
];
241 IWL_WARN(trans
, "WRT: Overriding region id %u\n", id
);
246 *active_reg
= kmemdup(tlv
, tlv_len
, GFP_KERNEL
);
250 IWL_DEBUG_FW(trans
, "WRT: Enabling region id %u type %u\n", id
, type
);
255 static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans
*trans
,
256 struct iwl_ucode_tlv
*tlv
)
258 struct iwl_fw_ini_trigger_tlv
*trig
= (void *)tlv
->data
;
259 u32 tp
= le32_to_cpu(trig
->time_point
);
261 if (le32_to_cpu(tlv
->length
) < sizeof(*trig
))
264 if (tp
<= IWL_FW_INI_TIME_POINT_INVALID
||
265 tp
>= IWL_FW_INI_TIME_POINT_NUM
) {
267 "WRT: Invalid time point %u for trigger TLV\n",
272 if (!le32_to_cpu(trig
->occurrences
))
273 trig
->occurrences
= cpu_to_le32(-1);
275 return iwl_dbg_tlv_add(tlv
, &trans
->dbg
.time_point
[tp
].trig_list
);
278 static int (*dbg_tlv_alloc
[])(struct iwl_trans
*trans
,
279 struct iwl_ucode_tlv
*tlv
) = {
280 [IWL_DBG_TLV_TYPE_DEBUG_INFO
] = iwl_dbg_tlv_alloc_debug_info
,
281 [IWL_DBG_TLV_TYPE_BUF_ALLOC
] = iwl_dbg_tlv_alloc_buf_alloc
,
282 [IWL_DBG_TLV_TYPE_HCMD
] = iwl_dbg_tlv_alloc_hcmd
,
283 [IWL_DBG_TLV_TYPE_REGION
] = iwl_dbg_tlv_alloc_region
,
284 [IWL_DBG_TLV_TYPE_TRIGGER
] = iwl_dbg_tlv_alloc_trigger
,
287 void iwl_dbg_tlv_alloc(struct iwl_trans
*trans
, struct iwl_ucode_tlv
*tlv
,
290 struct iwl_fw_ini_header
*hdr
= (void *)&tlv
->data
[0];
291 u32 type
= le32_to_cpu(tlv
->type
);
292 u32 tlv_idx
= type
- IWL_UCODE_TLV_DEBUG_BASE
;
293 u32 domain
= le32_to_cpu(hdr
->domain
);
294 enum iwl_ini_cfg_state
*cfg_state
= ext
?
295 &trans
->dbg
.external_ini_cfg
: &trans
->dbg
.internal_ini_cfg
;
298 if (domain
!= IWL_FW_INI_DOMAIN_ALWAYS_ON
&&
299 !(domain
& trans
->dbg
.domains_bitmap
)) {
301 "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
302 domain
, trans
->dbg
.domains_bitmap
);
306 if (tlv_idx
>= ARRAY_SIZE(dbg_tlv_alloc
) || !dbg_tlv_alloc
[tlv_idx
]) {
307 IWL_ERR(trans
, "WRT: Unsupported TLV type 0x%x\n", type
);
311 if (!iwl_dbg_tlv_ver_support(tlv
)) {
312 IWL_ERR(trans
, "WRT: Unsupported TLV 0x%x version %u\n", type
,
313 le32_to_cpu(hdr
->version
));
317 ret
= dbg_tlv_alloc
[tlv_idx
](trans
, tlv
);
320 "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
325 if (*cfg_state
== IWL_INI_CFG_STATE_NOT_LOADED
)
326 *cfg_state
= IWL_INI_CFG_STATE_LOADED
;
331 *cfg_state
= IWL_INI_CFG_STATE_CORRUPTED
;
334 void iwl_dbg_tlv_del_timers(struct iwl_trans
*trans
)
336 struct list_head
*timer_list
= &trans
->dbg
.periodic_trig_list
;
337 struct iwl_dbg_tlv_timer_node
*node
, *tmp
;
339 list_for_each_entry_safe(node
, tmp
, timer_list
, list
) {
340 del_timer(&node
->timer
);
341 list_del(&node
->list
);
345 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers
);
347 static void iwl_dbg_tlv_fragments_free(struct iwl_trans
*trans
,
348 enum iwl_fw_ini_allocation_id alloc_id
)
350 struct iwl_fw_mon
*fw_mon
;
353 if (alloc_id
<= IWL_FW_INI_ALLOCATION_INVALID
||
354 alloc_id
>= IWL_FW_INI_ALLOCATION_NUM
)
357 fw_mon
= &trans
->dbg
.fw_mon_ini
[alloc_id
];
359 for (i
= 0; i
< fw_mon
->num_frags
; i
++) {
360 struct iwl_dram_data
*frag
= &fw_mon
->frags
[i
];
362 dma_free_coherent(trans
->dev
, frag
->size
, frag
->block
,
370 kfree(fw_mon
->frags
);
371 fw_mon
->frags
= NULL
;
372 fw_mon
->num_frags
= 0;
375 void iwl_dbg_tlv_free(struct iwl_trans
*trans
)
377 struct iwl_dbg_tlv_node
*tlv_node
, *tlv_node_tmp
;
380 iwl_dbg_tlv_del_timers(trans
);
382 for (i
= 0; i
< ARRAY_SIZE(trans
->dbg
.active_regions
); i
++) {
383 struct iwl_ucode_tlv
**active_reg
=
384 &trans
->dbg
.active_regions
[i
];
390 list_for_each_entry_safe(tlv_node
, tlv_node_tmp
,
391 &trans
->dbg
.debug_info_tlv_list
, list
) {
392 list_del(&tlv_node
->list
);
396 for (i
= 0; i
< ARRAY_SIZE(trans
->dbg
.time_point
); i
++) {
397 struct iwl_dbg_tlv_time_point_data
*tp
=
398 &trans
->dbg
.time_point
[i
];
400 list_for_each_entry_safe(tlv_node
, tlv_node_tmp
, &tp
->trig_list
,
402 list_del(&tlv_node
->list
);
406 list_for_each_entry_safe(tlv_node
, tlv_node_tmp
, &tp
->hcmd_list
,
408 list_del(&tlv_node
->list
);
412 list_for_each_entry_safe(tlv_node
, tlv_node_tmp
,
413 &tp
->active_trig_list
, list
) {
414 list_del(&tlv_node
->list
);
419 for (i
= 0; i
< ARRAY_SIZE(trans
->dbg
.fw_mon_ini
); i
++)
420 iwl_dbg_tlv_fragments_free(trans
, i
);
423 static int iwl_dbg_tlv_parse_bin(struct iwl_trans
*trans
, const u8
*data
,
426 struct iwl_ucode_tlv
*tlv
;
429 while (len
>= sizeof(*tlv
)) {
433 tlv_len
= le32_to_cpu(tlv
->length
);
436 IWL_ERR(trans
, "invalid TLV len: %zd/%u\n",
440 len
-= ALIGN(tlv_len
, 4);
441 data
+= sizeof(*tlv
) + ALIGN(tlv_len
, 4);
443 iwl_dbg_tlv_alloc(trans
, tlv
, true);
449 void iwl_dbg_tlv_load_bin(struct device
*dev
, struct iwl_trans
*trans
)
451 const struct firmware
*fw
;
454 if (!iwlwifi_mod_params
.enable_ini
)
457 res
= request_firmware(&fw
, "iwl-debug-yoyo.bin", dev
);
461 iwl_dbg_tlv_parse_bin(trans
, fw
->data
, fw
->size
);
463 release_firmware(fw
);
466 void iwl_dbg_tlv_init(struct iwl_trans
*trans
)
470 INIT_LIST_HEAD(&trans
->dbg
.debug_info_tlv_list
);
471 INIT_LIST_HEAD(&trans
->dbg
.periodic_trig_list
);
473 for (i
= 0; i
< ARRAY_SIZE(trans
->dbg
.time_point
); i
++) {
474 struct iwl_dbg_tlv_time_point_data
*tp
=
475 &trans
->dbg
.time_point
[i
];
477 INIT_LIST_HEAD(&tp
->trig_list
);
478 INIT_LIST_HEAD(&tp
->hcmd_list
);
479 INIT_LIST_HEAD(&tp
->active_trig_list
);
483 static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime
*fwrt
,
484 struct iwl_dram_data
*frag
, u32 pages
)
489 if (!frag
|| frag
->size
|| !pages
)
493 * We try to allocate as many pages as we can, starting with
494 * the requested amount and going down until we can allocate
495 * something. Because of DIV_ROUND_UP(), pages will never go
496 * down to 0 and stop the loop, so stop when pages reaches 1,
497 * which is too small anyway.
500 block
= dma_alloc_coherent(fwrt
->dev
, pages
* PAGE_SIZE
,
502 GFP_KERNEL
| __GFP_NOWARN
);
506 IWL_WARN(fwrt
, "WRT: Failed to allocate fragment size %lu\n",
509 pages
= DIV_ROUND_UP(pages
, 2);
515 frag
->physical
= physical
;
517 frag
->size
= pages
* PAGE_SIZE
;
522 static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime
*fwrt
,
523 enum iwl_fw_ini_allocation_id alloc_id
)
525 struct iwl_fw_mon
*fw_mon
;
526 struct iwl_fw_ini_allocation_tlv
*fw_mon_cfg
;
527 u32 num_frags
, remain_pages
, frag_pages
;
530 if (alloc_id
< IWL_FW_INI_ALLOCATION_INVALID
||
531 alloc_id
>= IWL_FW_INI_ALLOCATION_NUM
)
534 fw_mon_cfg
= &fwrt
->trans
->dbg
.fw_mon_cfg
[alloc_id
];
535 fw_mon
= &fwrt
->trans
->dbg
.fw_mon_ini
[alloc_id
];
537 if (fw_mon
->num_frags
||
538 fw_mon_cfg
->buf_location
!=
539 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH
))
542 num_frags
= le32_to_cpu(fw_mon_cfg
->max_frags_num
);
543 if (!fw_has_capa(&fwrt
->fw
->ucode_capa
,
544 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP
)) {
545 if (alloc_id
!= IWL_FW_INI_ALLOCATION_ID_DBGC1
)
550 remain_pages
= DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg
->req_size
),
552 num_frags
= min_t(u32
, num_frags
, BUF_ALLOC_MAX_NUM_FRAGS
);
553 num_frags
= min_t(u32
, num_frags
, remain_pages
);
554 frag_pages
= DIV_ROUND_UP(remain_pages
, num_frags
);
556 fw_mon
->frags
= kcalloc(num_frags
, sizeof(*fw_mon
->frags
), GFP_KERNEL
);
560 for (i
= 0; i
< num_frags
; i
++) {
561 int pages
= min_t(u32
, frag_pages
, remain_pages
);
564 "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
565 alloc_id
, i
, pages
* PAGE_SIZE
);
567 pages
= iwl_dbg_tlv_alloc_fragment(fwrt
, &fw_mon
->frags
[i
],
570 u32 alloc_size
= le32_to_cpu(fw_mon_cfg
->req_size
) -
571 (remain_pages
* PAGE_SIZE
);
573 if (alloc_size
< le32_to_cpu(fw_mon_cfg
->min_size
)) {
574 iwl_dbg_tlv_fragments_free(fwrt
->trans
,
581 remain_pages
-= pages
;
588 static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime
*fwrt
,
589 enum iwl_fw_ini_allocation_id alloc_id
)
591 struct iwl_fw_mon
*fw_mon
;
592 u32 remain_frags
, num_commands
;
593 int i
, fw_mon_idx
= 0;
595 if (!fw_has_capa(&fwrt
->fw
->ucode_capa
,
596 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP
))
599 if (alloc_id
< IWL_FW_INI_ALLOCATION_INVALID
||
600 alloc_id
>= IWL_FW_INI_ALLOCATION_NUM
)
603 if (le32_to_cpu(fwrt
->trans
->dbg
.fw_mon_cfg
[alloc_id
].buf_location
) !=
604 IWL_FW_INI_LOCATION_DRAM_PATH
)
607 fw_mon
= &fwrt
->trans
->dbg
.fw_mon_ini
[alloc_id
];
609 /* the first fragment of DBGC1 is given to the FW via register
612 if (alloc_id
== IWL_FW_INI_ALLOCATION_ID_DBGC1
)
615 remain_frags
= fw_mon
->num_frags
- fw_mon_idx
;
619 num_commands
= DIV_ROUND_UP(remain_frags
, BUF_ALLOC_MAX_NUM_FRAGS
);
621 IWL_DEBUG_FW(fwrt
, "WRT: Applying DRAM destination (alloc_id=%u)\n",
624 for (i
= 0; i
< num_commands
; i
++) {
625 u32 num_frags
= min_t(u32
, remain_frags
,
626 BUF_ALLOC_MAX_NUM_FRAGS
);
627 struct iwl_buf_alloc_cmd data
= {
628 .alloc_id
= cpu_to_le32(alloc_id
),
629 .num_frags
= cpu_to_le32(num_frags
),
631 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH
),
633 struct iwl_host_cmd hcmd
= {
634 .id
= WIDE_ID(DEBUG_GROUP
, BUFFER_ALLOCATION
),
636 .len
[0] = sizeof(data
),
640 for (j
= 0; j
< num_frags
; j
++) {
641 struct iwl_buf_alloc_frag
*frag
= &data
.frags
[j
];
642 struct iwl_dram_data
*fw_mon_frag
=
643 &fw_mon
->frags
[fw_mon_idx
++];
645 frag
->addr
= cpu_to_le64(fw_mon_frag
->physical
);
646 frag
->size
= cpu_to_le32(fw_mon_frag
->size
);
648 ret
= iwl_trans_send_cmd(fwrt
->trans
, &hcmd
);
652 remain_frags
-= num_frags
;
658 static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime
*fwrt
)
662 for (i
= 0; i
< IWL_FW_INI_ALLOCATION_NUM
; i
++) {
663 ret
= iwl_dbg_tlv_apply_buffer(fwrt
, i
);
666 "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
671 static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime
*fwrt
,
672 struct list_head
*hcmd_list
)
674 struct iwl_dbg_tlv_node
*node
;
676 list_for_each_entry(node
, hcmd_list
, list
) {
677 struct iwl_fw_ini_hcmd_tlv
*hcmd
= (void *)node
->tlv
.data
;
678 struct iwl_fw_ini_hcmd
*hcmd_data
= &hcmd
->hcmd
;
679 u16 hcmd_len
= le32_to_cpu(node
->tlv
.length
) - sizeof(*hcmd
);
680 struct iwl_host_cmd cmd
= {
681 .id
= WIDE_ID(hcmd_data
->group
, hcmd_data
->id
),
682 .len
= { hcmd_len
, },
683 .data
= { hcmd_data
->data
, },
686 iwl_trans_send_cmd(fwrt
->trans
, &cmd
);
690 static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list
*t
)
692 struct iwl_dbg_tlv_timer_node
*timer_node
=
693 from_timer(timer_node
, t
, timer
);
694 struct iwl_fwrt_dump_data dump_data
= {
695 .trig
= (void *)timer_node
->tlv
->data
,
699 ret
= iwl_fw_dbg_ini_collect(timer_node
->fwrt
, &dump_data
);
700 if (!ret
|| ret
== -EBUSY
) {
701 u32 occur
= le32_to_cpu(dump_data
.trig
->occurrences
);
702 u32 collect_interval
= le32_to_cpu(dump_data
.trig
->data
[0]);
707 mod_timer(t
, jiffies
+ msecs_to_jiffies(collect_interval
));
711 static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime
*fwrt
)
713 struct iwl_dbg_tlv_node
*node
;
714 struct list_head
*trig_list
=
715 &fwrt
->trans
->dbg
.time_point
[IWL_FW_INI_TIME_POINT_PERIODIC
].active_trig_list
;
717 list_for_each_entry(node
, trig_list
, list
) {
718 struct iwl_fw_ini_trigger_tlv
*trig
= (void *)node
->tlv
.data
;
719 struct iwl_dbg_tlv_timer_node
*timer_node
;
720 u32 occur
= le32_to_cpu(trig
->occurrences
), collect_interval
;
721 u32 min_interval
= 100;
726 /* make sure there is at least one dword of data for the
729 if (le32_to_cpu(node
->tlv
.length
) <
730 sizeof(*trig
) + sizeof(__le32
)) {
732 "WRT: Invalid periodic trigger data was not given\n");
736 if (le32_to_cpu(trig
->data
[0]) < min_interval
) {
738 "WRT: Override min interval from %u to %u msec\n",
739 le32_to_cpu(trig
->data
[0]), min_interval
);
740 trig
->data
[0] = cpu_to_le32(min_interval
);
743 collect_interval
= le32_to_cpu(trig
->data
[0]);
745 timer_node
= kzalloc(sizeof(*timer_node
), GFP_KERNEL
);
748 "WRT: Failed to allocate periodic trigger\n");
752 timer_node
->fwrt
= fwrt
;
753 timer_node
->tlv
= &node
->tlv
;
754 timer_setup(&timer_node
->timer
,
755 iwl_dbg_tlv_periodic_trig_handler
, 0);
757 list_add_tail(&timer_node
->list
,
758 &fwrt
->trans
->dbg
.periodic_trig_list
);
760 IWL_DEBUG_FW(fwrt
, "WRT: Enabling periodic trigger\n");
762 mod_timer(&timer_node
->timer
,
763 jiffies
+ msecs_to_jiffies(collect_interval
));
767 static bool is_trig_data_contained(struct iwl_ucode_tlv
*new,
768 struct iwl_ucode_tlv
*old
)
770 struct iwl_fw_ini_trigger_tlv
*new_trig
= (void *)new->data
;
771 struct iwl_fw_ini_trigger_tlv
*old_trig
= (void *)old
->data
;
772 __le32
*new_data
= new_trig
->data
, *old_data
= old_trig
->data
;
773 u32 new_dwords_num
= iwl_tlv_array_len(new, new_trig
, data
);
774 u32 old_dwords_num
= iwl_tlv_array_len(new, new_trig
, data
);
777 for (i
= 0; i
< new_dwords_num
; i
++) {
780 for (j
= 0; j
< old_dwords_num
; j
++) {
781 if (new_data
[i
] == old_data
[j
]) {
793 static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime
*fwrt
,
794 struct iwl_ucode_tlv
*trig_tlv
,
795 struct iwl_dbg_tlv_node
*node
)
797 struct iwl_ucode_tlv
*node_tlv
= &node
->tlv
;
798 struct iwl_fw_ini_trigger_tlv
*node_trig
= (void *)node_tlv
->data
;
799 struct iwl_fw_ini_trigger_tlv
*trig
= (void *)trig_tlv
->data
;
800 u32 policy
= le32_to_cpu(trig
->apply_policy
);
801 u32 size
= le32_to_cpu(trig_tlv
->length
);
802 u32 trig_data_len
= size
- sizeof(*trig
);
805 if (!(policy
& IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA
)) {
806 u32 data_len
= le32_to_cpu(node_tlv
->length
) -
810 "WRT: Appending trigger data (time point %u)\n",
811 le32_to_cpu(trig
->time_point
));
817 "WRT: Overriding trigger data (time point %u)\n",
818 le32_to_cpu(trig
->time_point
));
821 if (size
!= le32_to_cpu(node_tlv
->length
)) {
822 struct list_head
*prev
= node
->list
.prev
;
823 struct iwl_dbg_tlv_node
*tmp
;
825 list_del(&node
->list
);
827 tmp
= krealloc(node
, sizeof(*node
) + size
, GFP_KERNEL
);
830 "WRT: No memory to override trigger (time point %u)\n",
831 le32_to_cpu(trig
->time_point
));
833 list_add(&node
->list
, prev
);
838 list_add(&tmp
->list
, prev
);
839 node_tlv
= &tmp
->tlv
;
840 node_trig
= (void *)node_tlv
->data
;
843 memcpy(node_trig
->data
+ offset
, trig
->data
, trig_data_len
);
844 node_tlv
->length
= cpu_to_le32(size
);
846 if (policy
& IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG
) {
848 "WRT: Overriding trigger configuration (time point %u)\n",
849 le32_to_cpu(trig
->time_point
));
851 /* the first 11 dwords are configuration related */
852 memcpy(node_trig
, trig
, sizeof(__le32
) * 11);
855 if (policy
& IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS
) {
857 "WRT: Overriding trigger regions (time point %u)\n",
858 le32_to_cpu(trig
->time_point
));
860 node_trig
->regions_mask
= trig
->regions_mask
;
863 "WRT: Appending trigger regions (time point %u)\n",
864 le32_to_cpu(trig
->time_point
));
866 node_trig
->regions_mask
|= trig
->regions_mask
;
873 iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime
*fwrt
,
874 struct list_head
*trig_list
,
875 struct iwl_ucode_tlv
*trig_tlv
)
877 struct iwl_fw_ini_trigger_tlv
*trig
= (void *)trig_tlv
->data
;
878 struct iwl_dbg_tlv_node
*node
, *match
= NULL
;
879 u32 policy
= le32_to_cpu(trig
->apply_policy
);
881 list_for_each_entry(node
, trig_list
, list
) {
882 if (!(policy
& IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT
))
885 if (!(policy
& IWL_FW_INI_APPLY_POLICY_MATCH_DATA
) ||
886 is_trig_data_contained(trig_tlv
, &node
->tlv
)) {
893 IWL_DEBUG_FW(fwrt
, "WRT: Enabling trigger (time point %u)\n",
894 le32_to_cpu(trig
->time_point
));
895 return iwl_dbg_tlv_add(trig_tlv
, trig_list
);
898 return iwl_dbg_tlv_override_trig_node(fwrt
, trig_tlv
, match
);
902 iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime
*fwrt
,
903 struct iwl_dbg_tlv_time_point_data
*tp
)
905 struct iwl_dbg_tlv_node
*node
;
906 struct list_head
*trig_list
= &tp
->trig_list
;
907 struct list_head
*active_trig_list
= &tp
->active_trig_list
;
909 list_for_each_entry(node
, trig_list
, list
) {
910 struct iwl_ucode_tlv
*tlv
= &node
->tlv
;
912 iwl_dbg_tlv_add_active_trigger(fwrt
, active_trig_list
, tlv
);
916 static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime
*fwrt
,
917 struct iwl_fwrt_dump_data
*dump_data
,
918 union iwl_dbg_tlv_tp_data
*tp_data
,
921 struct iwl_rx_packet
*pkt
= tp_data
->fw_pkt
;
922 struct iwl_cmd_header
*wanted_hdr
= (void *)&trig_data
;
924 if (pkt
&& ((wanted_hdr
->cmd
== 0 && wanted_hdr
->group_id
== 0) ||
925 (pkt
->hdr
.cmd
== wanted_hdr
->cmd
&&
926 pkt
->hdr
.group_id
== wanted_hdr
->group_id
))) {
927 struct iwl_rx_packet
*fw_pkt
=
929 sizeof(*pkt
) + iwl_rx_packet_payload_len(pkt
),
935 dump_data
->fw_pkt
= fw_pkt
;
944 iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime
*fwrt
,
945 struct list_head
*active_trig_list
,
946 union iwl_dbg_tlv_tp_data
*tp_data
,
947 bool (*data_check
)(struct iwl_fw_runtime
*fwrt
,
948 struct iwl_fwrt_dump_data
*dump_data
,
949 union iwl_dbg_tlv_tp_data
*tp_data
,
952 struct iwl_dbg_tlv_node
*node
;
954 list_for_each_entry(node
, active_trig_list
, list
) {
955 struct iwl_fwrt_dump_data dump_data
= {
956 .trig
= (void *)node
->tlv
.data
,
958 u32 num_data
= iwl_tlv_array_len(&node
->tlv
, dump_data
.trig
,
963 ret
= iwl_fw_dbg_ini_collect(fwrt
, &dump_data
);
968 for (i
= 0; i
< num_data
; i
++) {
970 data_check(fwrt
, &dump_data
, tp_data
,
971 le32_to_cpu(dump_data
.trig
->data
[i
]))) {
972 ret
= iwl_fw_dbg_ini_collect(fwrt
, &dump_data
);
984 static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime
*fwrt
)
986 enum iwl_fw_ini_buffer_location
*ini_dest
= &fwrt
->trans
->dbg
.ini_dest
;
990 "WRT: Generating active triggers list, domain 0x%x\n",
991 fwrt
->trans
->dbg
.domains_bitmap
);
993 for (i
= 0; i
< ARRAY_SIZE(fwrt
->trans
->dbg
.time_point
); i
++) {
994 struct iwl_dbg_tlv_time_point_data
*tp
=
995 &fwrt
->trans
->dbg
.time_point
[i
];
997 iwl_dbg_tlv_gen_active_trig_list(fwrt
, tp
);
1000 *ini_dest
= IWL_FW_INI_LOCATION_INVALID
;
1001 for (i
= 0; i
< IWL_FW_INI_ALLOCATION_NUM
; i
++) {
1002 struct iwl_fw_ini_allocation_tlv
*fw_mon_cfg
=
1003 &fwrt
->trans
->dbg
.fw_mon_cfg
[i
];
1004 u32 dest
= le32_to_cpu(fw_mon_cfg
->buf_location
);
1006 if (dest
== IWL_FW_INI_LOCATION_INVALID
)
1009 if (*ini_dest
== IWL_FW_INI_LOCATION_INVALID
)
1012 if (dest
!= *ini_dest
)
1015 ret
= iwl_dbg_tlv_alloc_fragments(fwrt
, i
);
1018 "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
1023 void iwl_dbg_tlv_time_point(struct iwl_fw_runtime
*fwrt
,
1024 enum iwl_fw_ini_time_point tp_id
,
1025 union iwl_dbg_tlv_tp_data
*tp_data
)
1027 struct list_head
*hcmd_list
, *trig_list
;
1029 if (!iwl_trans_dbg_ini_valid(fwrt
->trans
) ||
1030 tp_id
== IWL_FW_INI_TIME_POINT_INVALID
||
1031 tp_id
>= IWL_FW_INI_TIME_POINT_NUM
)
1034 hcmd_list
= &fwrt
->trans
->dbg
.time_point
[tp_id
].hcmd_list
;
1035 trig_list
= &fwrt
->trans
->dbg
.time_point
[tp_id
].active_trig_list
;
1038 case IWL_FW_INI_TIME_POINT_EARLY
:
1039 iwl_dbg_tlv_init_cfg(fwrt
);
1040 iwl_dbg_tlv_tp_trigger(fwrt
, trig_list
, tp_data
, NULL
);
1042 case IWL_FW_INI_TIME_POINT_AFTER_ALIVE
:
1043 iwl_dbg_tlv_apply_buffers(fwrt
);
1044 iwl_dbg_tlv_send_hcmds(fwrt
, hcmd_list
);
1045 iwl_dbg_tlv_tp_trigger(fwrt
, trig_list
, tp_data
, NULL
);
1047 case IWL_FW_INI_TIME_POINT_PERIODIC
:
1048 iwl_dbg_tlv_set_periodic_trigs(fwrt
);
1049 iwl_dbg_tlv_send_hcmds(fwrt
, hcmd_list
);
1051 case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF
:
1052 case IWL_FW_INI_TIME_POINT_MISSED_BEACONS
:
1053 iwl_dbg_tlv_send_hcmds(fwrt
, hcmd_list
);
1054 iwl_dbg_tlv_tp_trigger(fwrt
, trig_list
, tp_data
,
1055 iwl_dbg_tlv_check_fw_pkt
);
1058 iwl_dbg_tlv_send_hcmds(fwrt
, hcmd_list
);
1059 iwl_dbg_tlv_tp_trigger(fwrt
, trig_list
, tp_data
, NULL
);
1063 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point
);