1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright (C) 2018 - 2019 Intel Corporation
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright (C) 2018 - 2019 Intel Corporation
32 * All rights reserved.
34 * Redistribution and use in source and binary forms, with or without
35 * modification, are permitted provided that the following conditions
38 * * Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * * Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in
42 * the documentation and/or other materials provided with the
44 * * Neither the name Intel Corporation nor the names of its
45 * contributors may be used to endorse or promote products derived
46 * from this software without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
49 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
50 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
51 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
52 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
53 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
54 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
55 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
56 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
57 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
58 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
60 *****************************************************************************/
62 #include <linux/firmware.h>
64 #include "iwl-trans.h"
65 #include "iwl-dbg-tlv.h"
67 #include "fw/runtime.h"
70 * enum iwl_dbg_tlv_type - debug TLV types
71 * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV
72 * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV
73 * @IWL_DBG_TLV_TYPE_HCMD: host command TLV
74 * @IWL_DBG_TLV_TYPE_REGION: region TLV
75 * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV
76 * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs
78 enum iwl_dbg_tlv_type
{
79 IWL_DBG_TLV_TYPE_DEBUG_INFO
=
80 IWL_UCODE_TLV_TYPE_DEBUG_INFO
- IWL_UCODE_TLV_DEBUG_BASE
,
81 IWL_DBG_TLV_TYPE_BUF_ALLOC
,
82 IWL_DBG_TLV_TYPE_HCMD
,
83 IWL_DBG_TLV_TYPE_REGION
,
84 IWL_DBG_TLV_TYPE_TRIGGER
,
89 * struct iwl_dbg_tlv_ver_data - debug TLV version struct
90 * @min_ver: min version supported
91 * @max_ver: max version supported
93 struct iwl_dbg_tlv_ver_data
{
99 * struct iwl_dbg_tlv_timer_node - timer node struct
100 * @list: list of &struct iwl_dbg_tlv_timer_node
102 * @fwrt: &struct iwl_fw_runtime
103 * @tlv: TLV attach to the timer node
105 struct iwl_dbg_tlv_timer_node
{
106 struct list_head list
;
107 struct timer_list timer
;
108 struct iwl_fw_runtime
*fwrt
;
109 struct iwl_ucode_tlv
*tlv
;
112 static const struct iwl_dbg_tlv_ver_data
113 dbg_ver_table
[IWL_DBG_TLV_TYPE_NUM
] = {
114 [IWL_DBG_TLV_TYPE_DEBUG_INFO
] = {.min_ver
= 1, .max_ver
= 1,},
115 [IWL_DBG_TLV_TYPE_BUF_ALLOC
] = {.min_ver
= 1, .max_ver
= 1,},
116 [IWL_DBG_TLV_TYPE_HCMD
] = {.min_ver
= 1, .max_ver
= 1,},
117 [IWL_DBG_TLV_TYPE_REGION
] = {.min_ver
= 1, .max_ver
= 1,},
118 [IWL_DBG_TLV_TYPE_TRIGGER
] = {.min_ver
= 1, .max_ver
= 1,},
121 static int iwl_dbg_tlv_add(struct iwl_ucode_tlv
*tlv
, struct list_head
*list
)
123 u32 len
= le32_to_cpu(tlv
->length
);
124 struct iwl_dbg_tlv_node
*node
;
126 node
= kzalloc(sizeof(*node
) + len
, GFP_KERNEL
);
130 memcpy(&node
->tlv
, tlv
, sizeof(node
->tlv
) + len
);
131 list_add_tail(&node
->list
, list
);
136 static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv
*tlv
)
138 struct iwl_fw_ini_header
*hdr
= (void *)&tlv
->data
[0];
139 u32 type
= le32_to_cpu(tlv
->type
);
140 u32 tlv_idx
= type
- IWL_UCODE_TLV_DEBUG_BASE
;
141 u32 ver
= le32_to_cpu(hdr
->version
);
143 if (ver
< dbg_ver_table
[tlv_idx
].min_ver
||
144 ver
> dbg_ver_table
[tlv_idx
].max_ver
)
150 static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans
*trans
,
151 struct iwl_ucode_tlv
*tlv
)
153 struct iwl_fw_ini_debug_info_tlv
*debug_info
= (void *)tlv
->data
;
155 if (le32_to_cpu(tlv
->length
) != sizeof(*debug_info
))
158 IWL_DEBUG_FW(trans
, "WRT: Loading debug cfg: %s\n",
159 debug_info
->debug_cfg_name
);
161 return iwl_dbg_tlv_add(tlv
, &trans
->dbg
.debug_info_tlv_list
);
164 static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans
*trans
,
165 struct iwl_ucode_tlv
*tlv
)
167 struct iwl_fw_ini_allocation_tlv
*alloc
= (void *)tlv
->data
;
168 u32 buf_location
= le32_to_cpu(alloc
->buf_location
);
169 u32 alloc_id
= le32_to_cpu(alloc
->alloc_id
);
171 if (le32_to_cpu(tlv
->length
) != sizeof(*alloc
) ||
172 (buf_location
!= IWL_FW_INI_LOCATION_SRAM_PATH
&&
173 buf_location
!= IWL_FW_INI_LOCATION_DRAM_PATH
))
176 if ((buf_location
== IWL_FW_INI_LOCATION_SRAM_PATH
&&
177 alloc_id
!= IWL_FW_INI_ALLOCATION_ID_DBGC1
) ||
178 (buf_location
== IWL_FW_INI_LOCATION_DRAM_PATH
&&
179 (alloc_id
== IWL_FW_INI_ALLOCATION_INVALID
||
180 alloc_id
>= IWL_FW_INI_ALLOCATION_NUM
))) {
182 "WRT: Invalid allocation id %u for allocation TLV\n",
187 trans
->dbg
.fw_mon_cfg
[alloc_id
] = *alloc
;
192 static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans
*trans
,
193 struct iwl_ucode_tlv
*tlv
)
195 struct iwl_fw_ini_hcmd_tlv
*hcmd
= (void *)tlv
->data
;
196 u32 tp
= le32_to_cpu(hcmd
->time_point
);
198 if (le32_to_cpu(tlv
->length
) <= sizeof(*hcmd
))
201 /* Host commands can not be sent in early time point since the FW
204 if (tp
== IWL_FW_INI_TIME_POINT_INVALID
||
205 tp
>= IWL_FW_INI_TIME_POINT_NUM
||
206 tp
== IWL_FW_INI_TIME_POINT_EARLY
) {
208 "WRT: Invalid time point %u for host command TLV\n",
213 return iwl_dbg_tlv_add(tlv
, &trans
->dbg
.time_point
[tp
].hcmd_list
);
216 static int iwl_dbg_tlv_alloc_region(struct iwl_trans
*trans
,
217 struct iwl_ucode_tlv
*tlv
)
219 struct iwl_fw_ini_region_tlv
*reg
= (void *)tlv
->data
;
220 struct iwl_ucode_tlv
**active_reg
;
221 u32 id
= le32_to_cpu(reg
->id
);
222 u32 type
= le32_to_cpu(reg
->type
);
223 u32 tlv_len
= sizeof(*tlv
) + le32_to_cpu(tlv
->length
);
225 if (le32_to_cpu(tlv
->length
) < sizeof(*reg
))
228 if (id
>= IWL_FW_INI_MAX_REGION_ID
) {
229 IWL_ERR(trans
, "WRT: Invalid region id %u\n", id
);
233 if (type
<= IWL_FW_INI_REGION_INVALID
||
234 type
>= IWL_FW_INI_REGION_NUM
) {
235 IWL_ERR(trans
, "WRT: Invalid region type %u\n", type
);
239 if (type
== IWL_FW_INI_REGION_PCI_IOSF_CONFIG
&&
240 !trans
->ops
->read_config32
) {
241 IWL_ERR(trans
, "WRT: Unsupported region type %u\n", type
);
245 active_reg
= &trans
->dbg
.active_regions
[id
];
247 IWL_WARN(trans
, "WRT: Overriding region id %u\n", id
);
252 *active_reg
= kmemdup(tlv
, tlv_len
, GFP_KERNEL
);
256 IWL_DEBUG_FW(trans
, "WRT: Enabling region id %u type %u\n", id
, type
);
261 static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans
*trans
,
262 struct iwl_ucode_tlv
*tlv
)
264 struct iwl_fw_ini_trigger_tlv
*trig
= (void *)tlv
->data
;
265 u32 tp
= le32_to_cpu(trig
->time_point
);
267 if (le32_to_cpu(tlv
->length
) < sizeof(*trig
))
270 if (tp
<= IWL_FW_INI_TIME_POINT_INVALID
||
271 tp
>= IWL_FW_INI_TIME_POINT_NUM
) {
273 "WRT: Invalid time point %u for trigger TLV\n",
278 if (!le32_to_cpu(trig
->occurrences
))
279 trig
->occurrences
= cpu_to_le32(-1);
281 return iwl_dbg_tlv_add(tlv
, &trans
->dbg
.time_point
[tp
].trig_list
);
284 static int (*dbg_tlv_alloc
[])(struct iwl_trans
*trans
,
285 struct iwl_ucode_tlv
*tlv
) = {
286 [IWL_DBG_TLV_TYPE_DEBUG_INFO
] = iwl_dbg_tlv_alloc_debug_info
,
287 [IWL_DBG_TLV_TYPE_BUF_ALLOC
] = iwl_dbg_tlv_alloc_buf_alloc
,
288 [IWL_DBG_TLV_TYPE_HCMD
] = iwl_dbg_tlv_alloc_hcmd
,
289 [IWL_DBG_TLV_TYPE_REGION
] = iwl_dbg_tlv_alloc_region
,
290 [IWL_DBG_TLV_TYPE_TRIGGER
] = iwl_dbg_tlv_alloc_trigger
,
293 void iwl_dbg_tlv_alloc(struct iwl_trans
*trans
, struct iwl_ucode_tlv
*tlv
,
296 struct iwl_fw_ini_header
*hdr
= (void *)&tlv
->data
[0];
297 u32 type
= le32_to_cpu(tlv
->type
);
298 u32 tlv_idx
= type
- IWL_UCODE_TLV_DEBUG_BASE
;
299 u32 domain
= le32_to_cpu(hdr
->domain
);
300 enum iwl_ini_cfg_state
*cfg_state
= ext
?
301 &trans
->dbg
.external_ini_cfg
: &trans
->dbg
.internal_ini_cfg
;
304 if (domain
!= IWL_FW_INI_DOMAIN_ALWAYS_ON
&&
305 !(domain
& trans
->dbg
.domains_bitmap
)) {
307 "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
308 domain
, trans
->dbg
.domains_bitmap
);
312 if (tlv_idx
>= ARRAY_SIZE(dbg_tlv_alloc
) || !dbg_tlv_alloc
[tlv_idx
]) {
313 IWL_ERR(trans
, "WRT: Unsupported TLV type 0x%x\n", type
);
317 if (!iwl_dbg_tlv_ver_support(tlv
)) {
318 IWL_ERR(trans
, "WRT: Unsupported TLV 0x%x version %u\n", type
,
319 le32_to_cpu(hdr
->version
));
323 ret
= dbg_tlv_alloc
[tlv_idx
](trans
, tlv
);
326 "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
331 if (*cfg_state
== IWL_INI_CFG_STATE_NOT_LOADED
)
332 *cfg_state
= IWL_INI_CFG_STATE_LOADED
;
337 *cfg_state
= IWL_INI_CFG_STATE_CORRUPTED
;
340 void iwl_dbg_tlv_del_timers(struct iwl_trans
*trans
)
342 struct list_head
*timer_list
= &trans
->dbg
.periodic_trig_list
;
343 struct iwl_dbg_tlv_timer_node
*node
, *tmp
;
345 list_for_each_entry_safe(node
, tmp
, timer_list
, list
) {
346 del_timer(&node
->timer
);
347 list_del(&node
->list
);
351 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers
);
353 static void iwl_dbg_tlv_fragments_free(struct iwl_trans
*trans
,
354 enum iwl_fw_ini_allocation_id alloc_id
)
356 struct iwl_fw_mon
*fw_mon
;
359 if (alloc_id
<= IWL_FW_INI_ALLOCATION_INVALID
||
360 alloc_id
>= IWL_FW_INI_ALLOCATION_NUM
)
363 fw_mon
= &trans
->dbg
.fw_mon_ini
[alloc_id
];
365 for (i
= 0; i
< fw_mon
->num_frags
; i
++) {
366 struct iwl_dram_data
*frag
= &fw_mon
->frags
[i
];
368 dma_free_coherent(trans
->dev
, frag
->size
, frag
->block
,
376 kfree(fw_mon
->frags
);
377 fw_mon
->frags
= NULL
;
378 fw_mon
->num_frags
= 0;
381 void iwl_dbg_tlv_free(struct iwl_trans
*trans
)
383 struct iwl_dbg_tlv_node
*tlv_node
, *tlv_node_tmp
;
386 iwl_dbg_tlv_del_timers(trans
);
388 for (i
= 0; i
< ARRAY_SIZE(trans
->dbg
.active_regions
); i
++) {
389 struct iwl_ucode_tlv
**active_reg
=
390 &trans
->dbg
.active_regions
[i
];
396 list_for_each_entry_safe(tlv_node
, tlv_node_tmp
,
397 &trans
->dbg
.debug_info_tlv_list
, list
) {
398 list_del(&tlv_node
->list
);
402 for (i
= 0; i
< ARRAY_SIZE(trans
->dbg
.time_point
); i
++) {
403 struct iwl_dbg_tlv_time_point_data
*tp
=
404 &trans
->dbg
.time_point
[i
];
406 list_for_each_entry_safe(tlv_node
, tlv_node_tmp
, &tp
->trig_list
,
408 list_del(&tlv_node
->list
);
412 list_for_each_entry_safe(tlv_node
, tlv_node_tmp
, &tp
->hcmd_list
,
414 list_del(&tlv_node
->list
);
418 list_for_each_entry_safe(tlv_node
, tlv_node_tmp
,
419 &tp
->active_trig_list
, list
) {
420 list_del(&tlv_node
->list
);
425 for (i
= 0; i
< ARRAY_SIZE(trans
->dbg
.fw_mon_ini
); i
++)
426 iwl_dbg_tlv_fragments_free(trans
, i
);
429 static int iwl_dbg_tlv_parse_bin(struct iwl_trans
*trans
, const u8
*data
,
432 struct iwl_ucode_tlv
*tlv
;
435 while (len
>= sizeof(*tlv
)) {
439 tlv_len
= le32_to_cpu(tlv
->length
);
442 IWL_ERR(trans
, "invalid TLV len: %zd/%u\n",
446 len
-= ALIGN(tlv_len
, 4);
447 data
+= sizeof(*tlv
) + ALIGN(tlv_len
, 4);
449 iwl_dbg_tlv_alloc(trans
, tlv
, true);
455 void iwl_dbg_tlv_load_bin(struct device
*dev
, struct iwl_trans
*trans
)
457 const struct firmware
*fw
;
460 if (!iwlwifi_mod_params
.enable_ini
)
463 res
= request_firmware(&fw
, "iwl-debug-yoyo.bin", dev
);
467 iwl_dbg_tlv_parse_bin(trans
, fw
->data
, fw
->size
);
469 release_firmware(fw
);
472 void iwl_dbg_tlv_init(struct iwl_trans
*trans
)
476 INIT_LIST_HEAD(&trans
->dbg
.debug_info_tlv_list
);
477 INIT_LIST_HEAD(&trans
->dbg
.periodic_trig_list
);
479 for (i
= 0; i
< ARRAY_SIZE(trans
->dbg
.time_point
); i
++) {
480 struct iwl_dbg_tlv_time_point_data
*tp
=
481 &trans
->dbg
.time_point
[i
];
483 INIT_LIST_HEAD(&tp
->trig_list
);
484 INIT_LIST_HEAD(&tp
->hcmd_list
);
485 INIT_LIST_HEAD(&tp
->active_trig_list
);
489 static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime
*fwrt
,
490 struct iwl_dram_data
*frag
, u32 pages
)
495 if (!frag
|| frag
->size
|| !pages
)
499 * We try to allocate as many pages as we can, starting with
500 * the requested amount and going down until we can allocate
501 * something. Because of DIV_ROUND_UP(), pages will never go
502 * down to 0 and stop the loop, so stop when pages reaches 1,
503 * which is too small anyway.
506 block
= dma_alloc_coherent(fwrt
->dev
, pages
* PAGE_SIZE
,
508 GFP_KERNEL
| __GFP_NOWARN
);
512 IWL_WARN(fwrt
, "WRT: Failed to allocate fragment size %lu\n",
515 pages
= DIV_ROUND_UP(pages
, 2);
521 frag
->physical
= physical
;
523 frag
->size
= pages
* PAGE_SIZE
;
528 static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime
*fwrt
,
529 enum iwl_fw_ini_allocation_id alloc_id
)
531 struct iwl_fw_mon
*fw_mon
;
532 struct iwl_fw_ini_allocation_tlv
*fw_mon_cfg
;
533 u32 num_frags
, remain_pages
, frag_pages
;
536 if (alloc_id
< IWL_FW_INI_ALLOCATION_INVALID
||
537 alloc_id
>= IWL_FW_INI_ALLOCATION_NUM
)
540 fw_mon_cfg
= &fwrt
->trans
->dbg
.fw_mon_cfg
[alloc_id
];
541 fw_mon
= &fwrt
->trans
->dbg
.fw_mon_ini
[alloc_id
];
543 if (fw_mon
->num_frags
||
544 fw_mon_cfg
->buf_location
!=
545 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH
))
548 num_frags
= le32_to_cpu(fw_mon_cfg
->max_frags_num
);
549 if (!fw_has_capa(&fwrt
->fw
->ucode_capa
,
550 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP
)) {
551 if (alloc_id
!= IWL_FW_INI_ALLOCATION_ID_DBGC1
)
556 remain_pages
= DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg
->req_size
),
558 num_frags
= min_t(u32
, num_frags
, BUF_ALLOC_MAX_NUM_FRAGS
);
559 num_frags
= min_t(u32
, num_frags
, remain_pages
);
560 frag_pages
= DIV_ROUND_UP(remain_pages
, num_frags
);
562 fw_mon
->frags
= kcalloc(num_frags
, sizeof(*fw_mon
->frags
), GFP_KERNEL
);
566 for (i
= 0; i
< num_frags
; i
++) {
567 int pages
= min_t(u32
, frag_pages
, remain_pages
);
570 "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
571 alloc_id
, i
, pages
* PAGE_SIZE
);
573 pages
= iwl_dbg_tlv_alloc_fragment(fwrt
, &fw_mon
->frags
[i
],
576 u32 alloc_size
= le32_to_cpu(fw_mon_cfg
->req_size
) -
577 (remain_pages
* PAGE_SIZE
);
579 if (alloc_size
< le32_to_cpu(fw_mon_cfg
->min_size
)) {
580 iwl_dbg_tlv_fragments_free(fwrt
->trans
,
587 remain_pages
-= pages
;
594 static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime
*fwrt
,
595 enum iwl_fw_ini_allocation_id alloc_id
)
597 struct iwl_fw_mon
*fw_mon
;
598 u32 remain_frags
, num_commands
;
599 int i
, fw_mon_idx
= 0;
601 if (!fw_has_capa(&fwrt
->fw
->ucode_capa
,
602 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP
))
605 if (alloc_id
< IWL_FW_INI_ALLOCATION_INVALID
||
606 alloc_id
>= IWL_FW_INI_ALLOCATION_NUM
)
609 if (le32_to_cpu(fwrt
->trans
->dbg
.fw_mon_cfg
[alloc_id
].buf_location
) !=
610 IWL_FW_INI_LOCATION_DRAM_PATH
)
613 fw_mon
= &fwrt
->trans
->dbg
.fw_mon_ini
[alloc_id
];
615 /* the first fragment of DBGC1 is given to the FW via register
618 if (alloc_id
== IWL_FW_INI_ALLOCATION_ID_DBGC1
)
621 remain_frags
= fw_mon
->num_frags
- fw_mon_idx
;
625 num_commands
= DIV_ROUND_UP(remain_frags
, BUF_ALLOC_MAX_NUM_FRAGS
);
627 IWL_DEBUG_FW(fwrt
, "WRT: Applying DRAM destination (alloc_id=%u)\n",
630 for (i
= 0; i
< num_commands
; i
++) {
631 u32 num_frags
= min_t(u32
, remain_frags
,
632 BUF_ALLOC_MAX_NUM_FRAGS
);
633 struct iwl_buf_alloc_cmd data
= {
634 .alloc_id
= cpu_to_le32(alloc_id
),
635 .num_frags
= cpu_to_le32(num_frags
),
637 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH
),
639 struct iwl_host_cmd hcmd
= {
640 .id
= WIDE_ID(DEBUG_GROUP
, BUFFER_ALLOCATION
),
642 .len
[0] = sizeof(data
),
646 for (j
= 0; j
< num_frags
; j
++) {
647 struct iwl_buf_alloc_frag
*frag
= &data
.frags
[j
];
648 struct iwl_dram_data
*fw_mon_frag
=
649 &fw_mon
->frags
[fw_mon_idx
++];
651 frag
->addr
= cpu_to_le64(fw_mon_frag
->physical
);
652 frag
->size
= cpu_to_le32(fw_mon_frag
->size
);
654 ret
= iwl_trans_send_cmd(fwrt
->trans
, &hcmd
);
658 remain_frags
-= num_frags
;
664 static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime
*fwrt
)
668 for (i
= 0; i
< IWL_FW_INI_ALLOCATION_NUM
; i
++) {
669 ret
= iwl_dbg_tlv_apply_buffer(fwrt
, i
);
672 "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
677 static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime
*fwrt
,
678 struct list_head
*hcmd_list
)
680 struct iwl_dbg_tlv_node
*node
;
682 list_for_each_entry(node
, hcmd_list
, list
) {
683 struct iwl_fw_ini_hcmd_tlv
*hcmd
= (void *)node
->tlv
.data
;
684 struct iwl_fw_ini_hcmd
*hcmd_data
= &hcmd
->hcmd
;
685 u16 hcmd_len
= le32_to_cpu(node
->tlv
.length
) - sizeof(*hcmd
);
686 struct iwl_host_cmd cmd
= {
687 .id
= WIDE_ID(hcmd_data
->group
, hcmd_data
->id
),
688 .len
= { hcmd_len
, },
689 .data
= { hcmd_data
->data
, },
692 iwl_trans_send_cmd(fwrt
->trans
, &cmd
);
696 static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list
*t
)
698 struct iwl_dbg_tlv_timer_node
*timer_node
=
699 from_timer(timer_node
, t
, timer
);
700 struct iwl_fwrt_dump_data dump_data
= {
701 .trig
= (void *)timer_node
->tlv
->data
,
705 ret
= iwl_fw_dbg_ini_collect(timer_node
->fwrt
, &dump_data
);
706 if (!ret
|| ret
== -EBUSY
) {
707 u32 occur
= le32_to_cpu(dump_data
.trig
->occurrences
);
708 u32 collect_interval
= le32_to_cpu(dump_data
.trig
->data
[0]);
713 mod_timer(t
, jiffies
+ msecs_to_jiffies(collect_interval
));
717 static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime
*fwrt
)
719 struct iwl_dbg_tlv_node
*node
;
720 struct list_head
*trig_list
=
721 &fwrt
->trans
->dbg
.time_point
[IWL_FW_INI_TIME_POINT_PERIODIC
].active_trig_list
;
723 list_for_each_entry(node
, trig_list
, list
) {
724 struct iwl_fw_ini_trigger_tlv
*trig
= (void *)node
->tlv
.data
;
725 struct iwl_dbg_tlv_timer_node
*timer_node
;
726 u32 occur
= le32_to_cpu(trig
->occurrences
), collect_interval
;
727 u32 min_interval
= 100;
732 /* make sure there is at least one dword of data for the
735 if (le32_to_cpu(node
->tlv
.length
) <
736 sizeof(*trig
) + sizeof(__le32
)) {
738 "WRT: Invalid periodic trigger data was not given\n");
742 if (le32_to_cpu(trig
->data
[0]) < min_interval
) {
744 "WRT: Override min interval from %u to %u msec\n",
745 le32_to_cpu(trig
->data
[0]), min_interval
);
746 trig
->data
[0] = cpu_to_le32(min_interval
);
749 collect_interval
= le32_to_cpu(trig
->data
[0]);
751 timer_node
= kzalloc(sizeof(*timer_node
), GFP_KERNEL
);
754 "WRT: Failed to allocate periodic trigger\n");
758 timer_node
->fwrt
= fwrt
;
759 timer_node
->tlv
= &node
->tlv
;
760 timer_setup(&timer_node
->timer
,
761 iwl_dbg_tlv_periodic_trig_handler
, 0);
763 list_add_tail(&timer_node
->list
,
764 &fwrt
->trans
->dbg
.periodic_trig_list
);
766 IWL_DEBUG_FW(fwrt
, "WRT: Enabling periodic trigger\n");
768 mod_timer(&timer_node
->timer
,
769 jiffies
+ msecs_to_jiffies(collect_interval
));
773 static bool is_trig_data_contained(struct iwl_ucode_tlv
*new,
774 struct iwl_ucode_tlv
*old
)
776 struct iwl_fw_ini_trigger_tlv
*new_trig
= (void *)new->data
;
777 struct iwl_fw_ini_trigger_tlv
*old_trig
= (void *)old
->data
;
778 __le32
*new_data
= new_trig
->data
, *old_data
= old_trig
->data
;
779 u32 new_dwords_num
= iwl_tlv_array_len(new, new_trig
, data
);
780 u32 old_dwords_num
= iwl_tlv_array_len(new, new_trig
, data
);
783 for (i
= 0; i
< new_dwords_num
; i
++) {
786 for (j
= 0; j
< old_dwords_num
; j
++) {
787 if (new_data
[i
] == old_data
[j
]) {
799 static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime
*fwrt
,
800 struct iwl_ucode_tlv
*trig_tlv
,
801 struct iwl_dbg_tlv_node
*node
)
803 struct iwl_ucode_tlv
*node_tlv
= &node
->tlv
;
804 struct iwl_fw_ini_trigger_tlv
*node_trig
= (void *)node_tlv
->data
;
805 struct iwl_fw_ini_trigger_tlv
*trig
= (void *)trig_tlv
->data
;
806 u32 policy
= le32_to_cpu(trig
->apply_policy
);
807 u32 size
= le32_to_cpu(trig_tlv
->length
);
808 u32 trig_data_len
= size
- sizeof(*trig
);
811 if (!(policy
& IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA
)) {
812 u32 data_len
= le32_to_cpu(node_tlv
->length
) -
816 "WRT: Appending trigger data (time point %u)\n",
817 le32_to_cpu(trig
->time_point
));
823 "WRT: Overriding trigger data (time point %u)\n",
824 le32_to_cpu(trig
->time_point
));
827 if (size
!= le32_to_cpu(node_tlv
->length
)) {
828 struct list_head
*prev
= node
->list
.prev
;
829 struct iwl_dbg_tlv_node
*tmp
;
831 list_del(&node
->list
);
833 tmp
= krealloc(node
, sizeof(*node
) + size
, GFP_KERNEL
);
836 "WRT: No memory to override trigger (time point %u)\n",
837 le32_to_cpu(trig
->time_point
));
839 list_add(&node
->list
, prev
);
844 list_add(&tmp
->list
, prev
);
845 node_tlv
= &tmp
->tlv
;
846 node_trig
= (void *)node_tlv
->data
;
849 memcpy(node_trig
->data
+ offset
, trig
->data
, trig_data_len
);
850 node_tlv
->length
= cpu_to_le32(size
);
852 if (policy
& IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG
) {
854 "WRT: Overriding trigger configuration (time point %u)\n",
855 le32_to_cpu(trig
->time_point
));
857 /* the first 11 dwords are configuration related */
858 memcpy(node_trig
, trig
, sizeof(__le32
) * 11);
861 if (policy
& IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS
) {
863 "WRT: Overriding trigger regions (time point %u)\n",
864 le32_to_cpu(trig
->time_point
));
866 node_trig
->regions_mask
= trig
->regions_mask
;
869 "WRT: Appending trigger regions (time point %u)\n",
870 le32_to_cpu(trig
->time_point
));
872 node_trig
->regions_mask
|= trig
->regions_mask
;
879 iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime
*fwrt
,
880 struct list_head
*trig_list
,
881 struct iwl_ucode_tlv
*trig_tlv
)
883 struct iwl_fw_ini_trigger_tlv
*trig
= (void *)trig_tlv
->data
;
884 struct iwl_dbg_tlv_node
*node
, *match
= NULL
;
885 u32 policy
= le32_to_cpu(trig
->apply_policy
);
887 list_for_each_entry(node
, trig_list
, list
) {
888 if (!(policy
& IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT
))
891 if (!(policy
& IWL_FW_INI_APPLY_POLICY_MATCH_DATA
) ||
892 is_trig_data_contained(trig_tlv
, &node
->tlv
)) {
899 IWL_DEBUG_FW(fwrt
, "WRT: Enabling trigger (time point %u)\n",
900 le32_to_cpu(trig
->time_point
));
901 return iwl_dbg_tlv_add(trig_tlv
, trig_list
);
904 return iwl_dbg_tlv_override_trig_node(fwrt
, trig_tlv
, match
);
908 iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime
*fwrt
,
909 struct iwl_dbg_tlv_time_point_data
*tp
)
911 struct iwl_dbg_tlv_node
*node
;
912 struct list_head
*trig_list
= &tp
->trig_list
;
913 struct list_head
*active_trig_list
= &tp
->active_trig_list
;
915 list_for_each_entry(node
, trig_list
, list
) {
916 struct iwl_ucode_tlv
*tlv
= &node
->tlv
;
918 iwl_dbg_tlv_add_active_trigger(fwrt
, active_trig_list
, tlv
);
922 static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime
*fwrt
,
923 struct iwl_fwrt_dump_data
*dump_data
,
924 union iwl_dbg_tlv_tp_data
*tp_data
,
927 struct iwl_rx_packet
*pkt
= tp_data
->fw_pkt
;
928 struct iwl_cmd_header
*wanted_hdr
= (void *)&trig_data
;
930 if (pkt
&& ((wanted_hdr
->cmd
== 0 && wanted_hdr
->group_id
== 0) ||
931 (pkt
->hdr
.cmd
== wanted_hdr
->cmd
&&
932 pkt
->hdr
.group_id
== wanted_hdr
->group_id
))) {
933 struct iwl_rx_packet
*fw_pkt
=
935 sizeof(*pkt
) + iwl_rx_packet_payload_len(pkt
),
941 dump_data
->fw_pkt
= fw_pkt
;
950 iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime
*fwrt
,
951 struct list_head
*active_trig_list
,
952 union iwl_dbg_tlv_tp_data
*tp_data
,
953 bool (*data_check
)(struct iwl_fw_runtime
*fwrt
,
954 struct iwl_fwrt_dump_data
*dump_data
,
955 union iwl_dbg_tlv_tp_data
*tp_data
,
958 struct iwl_dbg_tlv_node
*node
;
960 list_for_each_entry(node
, active_trig_list
, list
) {
961 struct iwl_fwrt_dump_data dump_data
= {
962 .trig
= (void *)node
->tlv
.data
,
964 u32 num_data
= iwl_tlv_array_len(&node
->tlv
, dump_data
.trig
,
969 ret
= iwl_fw_dbg_ini_collect(fwrt
, &dump_data
);
974 for (i
= 0; i
< num_data
; i
++) {
976 data_check(fwrt
, &dump_data
, tp_data
,
977 le32_to_cpu(dump_data
.trig
->data
[i
]))) {
978 ret
= iwl_fw_dbg_ini_collect(fwrt
, &dump_data
);
990 static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime
*fwrt
)
992 enum iwl_fw_ini_buffer_location
*ini_dest
= &fwrt
->trans
->dbg
.ini_dest
;
996 "WRT: Generating active triggers list, domain 0x%x\n",
997 fwrt
->trans
->dbg
.domains_bitmap
);
999 for (i
= 0; i
< ARRAY_SIZE(fwrt
->trans
->dbg
.time_point
); i
++) {
1000 struct iwl_dbg_tlv_time_point_data
*tp
=
1001 &fwrt
->trans
->dbg
.time_point
[i
];
1003 iwl_dbg_tlv_gen_active_trig_list(fwrt
, tp
);
1006 *ini_dest
= IWL_FW_INI_LOCATION_INVALID
;
1007 for (i
= 0; i
< IWL_FW_INI_ALLOCATION_NUM
; i
++) {
1008 struct iwl_fw_ini_allocation_tlv
*fw_mon_cfg
=
1009 &fwrt
->trans
->dbg
.fw_mon_cfg
[i
];
1010 u32 dest
= le32_to_cpu(fw_mon_cfg
->buf_location
);
1012 if (dest
== IWL_FW_INI_LOCATION_INVALID
)
1015 if (*ini_dest
== IWL_FW_INI_LOCATION_INVALID
)
1018 if (dest
!= *ini_dest
)
1021 ret
= iwl_dbg_tlv_alloc_fragments(fwrt
, i
);
1024 "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
1029 void iwl_dbg_tlv_time_point(struct iwl_fw_runtime
*fwrt
,
1030 enum iwl_fw_ini_time_point tp_id
,
1031 union iwl_dbg_tlv_tp_data
*tp_data
)
1033 struct list_head
*hcmd_list
, *trig_list
;
1035 if (!iwl_trans_dbg_ini_valid(fwrt
->trans
) ||
1036 tp_id
== IWL_FW_INI_TIME_POINT_INVALID
||
1037 tp_id
>= IWL_FW_INI_TIME_POINT_NUM
)
1040 hcmd_list
= &fwrt
->trans
->dbg
.time_point
[tp_id
].hcmd_list
;
1041 trig_list
= &fwrt
->trans
->dbg
.time_point
[tp_id
].active_trig_list
;
1044 case IWL_FW_INI_TIME_POINT_EARLY
:
1045 iwl_dbg_tlv_init_cfg(fwrt
);
1046 iwl_dbg_tlv_tp_trigger(fwrt
, trig_list
, tp_data
, NULL
);
1048 case IWL_FW_INI_TIME_POINT_AFTER_ALIVE
:
1049 iwl_dbg_tlv_apply_buffers(fwrt
);
1050 iwl_dbg_tlv_send_hcmds(fwrt
, hcmd_list
);
1051 iwl_dbg_tlv_tp_trigger(fwrt
, trig_list
, tp_data
, NULL
);
1053 case IWL_FW_INI_TIME_POINT_PERIODIC
:
1054 iwl_dbg_tlv_set_periodic_trigs(fwrt
);
1055 iwl_dbg_tlv_send_hcmds(fwrt
, hcmd_list
);
1057 case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF
:
1058 case IWL_FW_INI_TIME_POINT_MISSED_BEACONS
:
1059 iwl_dbg_tlv_send_hcmds(fwrt
, hcmd_list
);
1060 iwl_dbg_tlv_tp_trigger(fwrt
, trig_list
, tp_data
,
1061 iwl_dbg_tlv_check_fw_pkt
);
1064 iwl_dbg_tlv_send_hcmds(fwrt
, hcmd_list
);
1065 iwl_dbg_tlv_tp_trigger(fwrt
, trig_list
, tp_data
, NULL
);
1069 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point
);