WIP FPC-III support
[linux/fpc-iii.git] / drivers / net / wireless / intel / iwlwifi / iwl-dbg-tlv.c
bloba654147d3cd61a162c9c9cb1a70078cf331e3e6c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2018-2020 Intel Corporation
4 */
5 #include <linux/firmware.h>
6 #include "iwl-drv.h"
7 #include "iwl-trans.h"
8 #include "iwl-dbg-tlv.h"
9 #include "fw/dbg.h"
10 #include "fw/runtime.h"
12 /**
13 * enum iwl_dbg_tlv_type - debug TLV types
14 * @IWL_DBG_TLV_TYPE_DEBUG_INFO: debug info TLV
15 * @IWL_DBG_TLV_TYPE_BUF_ALLOC: buffer allocation TLV
16 * @IWL_DBG_TLV_TYPE_HCMD: host command TLV
17 * @IWL_DBG_TLV_TYPE_REGION: region TLV
18 * @IWL_DBG_TLV_TYPE_TRIGGER: trigger TLV
19 * @IWL_DBG_TLV_TYPE_NUM: number of debug TLVs
21 enum iwl_dbg_tlv_type {
22 IWL_DBG_TLV_TYPE_DEBUG_INFO =
23 IWL_UCODE_TLV_TYPE_DEBUG_INFO - IWL_UCODE_TLV_DEBUG_BASE,
24 IWL_DBG_TLV_TYPE_BUF_ALLOC,
25 IWL_DBG_TLV_TYPE_HCMD,
26 IWL_DBG_TLV_TYPE_REGION,
27 IWL_DBG_TLV_TYPE_TRIGGER,
28 IWL_DBG_TLV_TYPE_NUM,
31 /**
32 * struct iwl_dbg_tlv_ver_data - debug TLV version struct
33 * @min_ver: min version supported
34 * @max_ver: max version supported
36 struct iwl_dbg_tlv_ver_data {
37 int min_ver;
38 int max_ver;
41 /**
42 * struct iwl_dbg_tlv_timer_node - timer node struct
43 * @list: list of &struct iwl_dbg_tlv_timer_node
44 * @timer: timer
45 * @fwrt: &struct iwl_fw_runtime
46 * @tlv: TLV attach to the timer node
48 struct iwl_dbg_tlv_timer_node {
49 struct list_head list;
50 struct timer_list timer;
51 struct iwl_fw_runtime *fwrt;
52 struct iwl_ucode_tlv *tlv;
55 static const struct iwl_dbg_tlv_ver_data
56 dbg_ver_table[IWL_DBG_TLV_TYPE_NUM] = {
57 [IWL_DBG_TLV_TYPE_DEBUG_INFO] = {.min_ver = 1, .max_ver = 1,},
58 [IWL_DBG_TLV_TYPE_BUF_ALLOC] = {.min_ver = 1, .max_ver = 1,},
59 [IWL_DBG_TLV_TYPE_HCMD] = {.min_ver = 1, .max_ver = 1,},
60 [IWL_DBG_TLV_TYPE_REGION] = {.min_ver = 1, .max_ver = 1,},
61 [IWL_DBG_TLV_TYPE_TRIGGER] = {.min_ver = 1, .max_ver = 1,},
64 static int iwl_dbg_tlv_add(struct iwl_ucode_tlv *tlv, struct list_head *list)
66 u32 len = le32_to_cpu(tlv->length);
67 struct iwl_dbg_tlv_node *node;
69 node = kzalloc(sizeof(*node) + len, GFP_KERNEL);
70 if (!node)
71 return -ENOMEM;
73 memcpy(&node->tlv, tlv, sizeof(node->tlv) + len);
74 list_add_tail(&node->list, list);
76 return 0;
79 static bool iwl_dbg_tlv_ver_support(struct iwl_ucode_tlv *tlv)
81 struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
82 u32 type = le32_to_cpu(tlv->type);
83 u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
84 u32 ver = le32_to_cpu(hdr->version);
86 if (ver < dbg_ver_table[tlv_idx].min_ver ||
87 ver > dbg_ver_table[tlv_idx].max_ver)
88 return false;
90 return true;
93 static int iwl_dbg_tlv_alloc_debug_info(struct iwl_trans *trans,
94 struct iwl_ucode_tlv *tlv)
96 struct iwl_fw_ini_debug_info_tlv *debug_info = (void *)tlv->data;
98 if (le32_to_cpu(tlv->length) != sizeof(*debug_info))
99 return -EINVAL;
101 IWL_DEBUG_FW(trans, "WRT: Loading debug cfg: %s\n",
102 debug_info->debug_cfg_name);
104 return iwl_dbg_tlv_add(tlv, &trans->dbg.debug_info_tlv_list);
107 static int iwl_dbg_tlv_alloc_buf_alloc(struct iwl_trans *trans,
108 struct iwl_ucode_tlv *tlv)
110 struct iwl_fw_ini_allocation_tlv *alloc = (void *)tlv->data;
111 u32 buf_location;
112 u32 alloc_id;
114 if (le32_to_cpu(tlv->length) != sizeof(*alloc))
115 return -EINVAL;
117 buf_location = le32_to_cpu(alloc->buf_location);
118 alloc_id = le32_to_cpu(alloc->alloc_id);
120 if (buf_location == IWL_FW_INI_LOCATION_INVALID ||
121 buf_location >= IWL_FW_INI_LOCATION_NUM)
122 goto err;
124 if (alloc_id == IWL_FW_INI_ALLOCATION_INVALID ||
125 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
126 goto err;
128 if (buf_location == IWL_FW_INI_LOCATION_NPK_PATH &&
129 alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
130 goto err;
132 if (buf_location == IWL_FW_INI_LOCATION_SRAM_PATH &&
133 alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1 &&
134 alloc_id != IWL_FW_INI_ALLOCATION_ID_INTERNAL)
135 goto err;
137 trans->dbg.fw_mon_cfg[alloc_id] = *alloc;
139 return 0;
140 err:
141 IWL_ERR(trans,
142 "WRT: Invalid allocation id %u and/or location id %u for allocation TLV\n",
143 alloc_id, buf_location);
144 return -EINVAL;
147 static int iwl_dbg_tlv_alloc_hcmd(struct iwl_trans *trans,
148 struct iwl_ucode_tlv *tlv)
150 struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)tlv->data;
151 u32 tp = le32_to_cpu(hcmd->time_point);
153 if (le32_to_cpu(tlv->length) <= sizeof(*hcmd))
154 return -EINVAL;
156 /* Host commands can not be sent in early time point since the FW
157 * is not ready
159 if (tp == IWL_FW_INI_TIME_POINT_INVALID ||
160 tp >= IWL_FW_INI_TIME_POINT_NUM ||
161 tp == IWL_FW_INI_TIME_POINT_EARLY) {
162 IWL_ERR(trans,
163 "WRT: Invalid time point %u for host command TLV\n",
164 tp);
165 return -EINVAL;
168 return iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].hcmd_list);
171 static int iwl_dbg_tlv_alloc_region(struct iwl_trans *trans,
172 struct iwl_ucode_tlv *tlv)
174 struct iwl_fw_ini_region_tlv *reg = (void *)tlv->data;
175 struct iwl_ucode_tlv **active_reg;
176 u32 id = le32_to_cpu(reg->id);
177 u32 type = le32_to_cpu(reg->type);
178 u32 tlv_len = sizeof(*tlv) + le32_to_cpu(tlv->length);
180 if (le32_to_cpu(tlv->length) < sizeof(*reg))
181 return -EINVAL;
183 /* For safe using a string from FW make sure we have a
184 * null terminator
186 reg->name[IWL_FW_INI_MAX_NAME - 1] = 0;
188 IWL_DEBUG_FW(trans, "WRT: parsing region: %s\n", reg->name);
190 if (id >= IWL_FW_INI_MAX_REGION_ID) {
191 IWL_ERR(trans, "WRT: Invalid region id %u\n", id);
192 return -EINVAL;
195 if (type <= IWL_FW_INI_REGION_INVALID ||
196 type >= IWL_FW_INI_REGION_NUM) {
197 IWL_ERR(trans, "WRT: Invalid region type %u\n", type);
198 return -EINVAL;
201 if (type == IWL_FW_INI_REGION_PCI_IOSF_CONFIG &&
202 !trans->ops->read_config32) {
203 IWL_ERR(trans, "WRT: Unsupported region type %u\n", type);
204 return -EOPNOTSUPP;
207 active_reg = &trans->dbg.active_regions[id];
208 if (*active_reg) {
209 IWL_WARN(trans, "WRT: Overriding region id %u\n", id);
211 kfree(*active_reg);
214 *active_reg = kmemdup(tlv, tlv_len, GFP_KERNEL);
215 if (!*active_reg)
216 return -ENOMEM;
218 IWL_DEBUG_FW(trans, "WRT: Enabling region id %u type %u\n", id, type);
220 return 0;
223 static int iwl_dbg_tlv_alloc_trigger(struct iwl_trans *trans,
224 struct iwl_ucode_tlv *tlv)
226 struct iwl_fw_ini_trigger_tlv *trig = (void *)tlv->data;
227 u32 tp = le32_to_cpu(trig->time_point);
228 struct iwl_ucode_tlv *dup = NULL;
229 int ret;
231 if (le32_to_cpu(tlv->length) < sizeof(*trig))
232 return -EINVAL;
234 if (tp <= IWL_FW_INI_TIME_POINT_INVALID ||
235 tp >= IWL_FW_INI_TIME_POINT_NUM) {
236 IWL_ERR(trans,
237 "WRT: Invalid time point %u for trigger TLV\n",
238 tp);
239 return -EINVAL;
242 if (!le32_to_cpu(trig->occurrences)) {
243 dup = kmemdup(tlv, sizeof(*tlv) + le32_to_cpu(tlv->length),
244 GFP_KERNEL);
245 if (!dup)
246 return -ENOMEM;
247 trig = (void *)dup->data;
248 trig->occurrences = cpu_to_le32(-1);
249 tlv = dup;
252 ret = iwl_dbg_tlv_add(tlv, &trans->dbg.time_point[tp].trig_list);
253 kfree(dup);
255 return ret;
258 static int (*dbg_tlv_alloc[])(struct iwl_trans *trans,
259 struct iwl_ucode_tlv *tlv) = {
260 [IWL_DBG_TLV_TYPE_DEBUG_INFO] = iwl_dbg_tlv_alloc_debug_info,
261 [IWL_DBG_TLV_TYPE_BUF_ALLOC] = iwl_dbg_tlv_alloc_buf_alloc,
262 [IWL_DBG_TLV_TYPE_HCMD] = iwl_dbg_tlv_alloc_hcmd,
263 [IWL_DBG_TLV_TYPE_REGION] = iwl_dbg_tlv_alloc_region,
264 [IWL_DBG_TLV_TYPE_TRIGGER] = iwl_dbg_tlv_alloc_trigger,
267 void iwl_dbg_tlv_alloc(struct iwl_trans *trans, struct iwl_ucode_tlv *tlv,
268 bool ext)
270 struct iwl_fw_ini_header *hdr = (void *)&tlv->data[0];
271 u32 type = le32_to_cpu(tlv->type);
272 u32 tlv_idx = type - IWL_UCODE_TLV_DEBUG_BASE;
273 u32 domain = le32_to_cpu(hdr->domain);
274 enum iwl_ini_cfg_state *cfg_state = ext ?
275 &trans->dbg.external_ini_cfg : &trans->dbg.internal_ini_cfg;
276 int ret;
278 if (domain != IWL_FW_INI_DOMAIN_ALWAYS_ON &&
279 !(domain & trans->dbg.domains_bitmap)) {
280 IWL_DEBUG_FW(trans,
281 "WRT: Skipping TLV with disabled domain 0x%0x (0x%0x)\n",
282 domain, trans->dbg.domains_bitmap);
283 return;
286 if (tlv_idx >= ARRAY_SIZE(dbg_tlv_alloc) || !dbg_tlv_alloc[tlv_idx]) {
287 IWL_ERR(trans, "WRT: Unsupported TLV type 0x%x\n", type);
288 goto out_err;
291 if (!iwl_dbg_tlv_ver_support(tlv)) {
292 IWL_ERR(trans, "WRT: Unsupported TLV 0x%x version %u\n", type,
293 le32_to_cpu(hdr->version));
294 goto out_err;
297 ret = dbg_tlv_alloc[tlv_idx](trans, tlv);
298 if (ret) {
299 IWL_ERR(trans,
300 "WRT: Failed to allocate TLV 0x%x, ret %d, (ext=%d)\n",
301 type, ret, ext);
302 goto out_err;
305 if (*cfg_state == IWL_INI_CFG_STATE_NOT_LOADED)
306 *cfg_state = IWL_INI_CFG_STATE_LOADED;
308 return;
310 out_err:
311 *cfg_state = IWL_INI_CFG_STATE_CORRUPTED;
314 void iwl_dbg_tlv_del_timers(struct iwl_trans *trans)
316 struct list_head *timer_list = &trans->dbg.periodic_trig_list;
317 struct iwl_dbg_tlv_timer_node *node, *tmp;
319 list_for_each_entry_safe(node, tmp, timer_list, list) {
320 del_timer(&node->timer);
321 list_del(&node->list);
322 kfree(node);
325 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_del_timers);
327 static void iwl_dbg_tlv_fragments_free(struct iwl_trans *trans,
328 enum iwl_fw_ini_allocation_id alloc_id)
330 struct iwl_fw_mon *fw_mon;
331 int i;
333 if (alloc_id <= IWL_FW_INI_ALLOCATION_INVALID ||
334 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
335 return;
337 fw_mon = &trans->dbg.fw_mon_ini[alloc_id];
339 for (i = 0; i < fw_mon->num_frags; i++) {
340 struct iwl_dram_data *frag = &fw_mon->frags[i];
342 dma_free_coherent(trans->dev, frag->size, frag->block,
343 frag->physical);
345 frag->physical = 0;
346 frag->block = NULL;
347 frag->size = 0;
350 kfree(fw_mon->frags);
351 fw_mon->frags = NULL;
352 fw_mon->num_frags = 0;
355 void iwl_dbg_tlv_free(struct iwl_trans *trans)
357 struct iwl_dbg_tlv_node *tlv_node, *tlv_node_tmp;
358 int i;
360 iwl_dbg_tlv_del_timers(trans);
362 for (i = 0; i < ARRAY_SIZE(trans->dbg.active_regions); i++) {
363 struct iwl_ucode_tlv **active_reg =
364 &trans->dbg.active_regions[i];
366 kfree(*active_reg);
367 *active_reg = NULL;
370 list_for_each_entry_safe(tlv_node, tlv_node_tmp,
371 &trans->dbg.debug_info_tlv_list, list) {
372 list_del(&tlv_node->list);
373 kfree(tlv_node);
376 for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
377 struct iwl_dbg_tlv_time_point_data *tp =
378 &trans->dbg.time_point[i];
380 list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->trig_list,
381 list) {
382 list_del(&tlv_node->list);
383 kfree(tlv_node);
386 list_for_each_entry_safe(tlv_node, tlv_node_tmp, &tp->hcmd_list,
387 list) {
388 list_del(&tlv_node->list);
389 kfree(tlv_node);
392 list_for_each_entry_safe(tlv_node, tlv_node_tmp,
393 &tp->active_trig_list, list) {
394 list_del(&tlv_node->list);
395 kfree(tlv_node);
399 for (i = 0; i < ARRAY_SIZE(trans->dbg.fw_mon_ini); i++)
400 iwl_dbg_tlv_fragments_free(trans, i);
403 static int iwl_dbg_tlv_parse_bin(struct iwl_trans *trans, const u8 *data,
404 size_t len)
406 struct iwl_ucode_tlv *tlv;
407 u32 tlv_len;
409 while (len >= sizeof(*tlv)) {
410 len -= sizeof(*tlv);
411 tlv = (void *)data;
413 tlv_len = le32_to_cpu(tlv->length);
415 if (len < tlv_len) {
416 IWL_ERR(trans, "invalid TLV len: %zd/%u\n",
417 len, tlv_len);
418 return -EINVAL;
420 len -= ALIGN(tlv_len, 4);
421 data += sizeof(*tlv) + ALIGN(tlv_len, 4);
423 iwl_dbg_tlv_alloc(trans, tlv, true);
426 return 0;
429 void iwl_dbg_tlv_load_bin(struct device *dev, struct iwl_trans *trans)
431 const struct firmware *fw;
432 int res;
434 if (!iwlwifi_mod_params.enable_ini)
435 return;
437 res = firmware_request_nowarn(&fw, "iwl-debug-yoyo.bin", dev);
438 if (res)
439 return;
441 iwl_dbg_tlv_parse_bin(trans, fw->data, fw->size);
443 release_firmware(fw);
446 void iwl_dbg_tlv_init(struct iwl_trans *trans)
448 int i;
450 INIT_LIST_HEAD(&trans->dbg.debug_info_tlv_list);
451 INIT_LIST_HEAD(&trans->dbg.periodic_trig_list);
453 for (i = 0; i < ARRAY_SIZE(trans->dbg.time_point); i++) {
454 struct iwl_dbg_tlv_time_point_data *tp =
455 &trans->dbg.time_point[i];
457 INIT_LIST_HEAD(&tp->trig_list);
458 INIT_LIST_HEAD(&tp->hcmd_list);
459 INIT_LIST_HEAD(&tp->active_trig_list);
463 static int iwl_dbg_tlv_alloc_fragment(struct iwl_fw_runtime *fwrt,
464 struct iwl_dram_data *frag, u32 pages)
466 void *block = NULL;
467 dma_addr_t physical;
469 if (!frag || frag->size || !pages)
470 return -EIO;
473 * We try to allocate as many pages as we can, starting with
474 * the requested amount and going down until we can allocate
475 * something. Because of DIV_ROUND_UP(), pages will never go
476 * down to 0 and stop the loop, so stop when pages reaches 1,
477 * which is too small anyway.
479 while (pages > 1) {
480 block = dma_alloc_coherent(fwrt->dev, pages * PAGE_SIZE,
481 &physical,
482 GFP_KERNEL | __GFP_NOWARN);
483 if (block)
484 break;
486 IWL_WARN(fwrt, "WRT: Failed to allocate fragment size %lu\n",
487 pages * PAGE_SIZE);
489 pages = DIV_ROUND_UP(pages, 2);
492 if (!block)
493 return -ENOMEM;
495 frag->physical = physical;
496 frag->block = block;
497 frag->size = pages * PAGE_SIZE;
499 return pages;
502 static int iwl_dbg_tlv_alloc_fragments(struct iwl_fw_runtime *fwrt,
503 enum iwl_fw_ini_allocation_id alloc_id)
505 struct iwl_fw_mon *fw_mon;
506 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg;
507 u32 num_frags, remain_pages, frag_pages;
508 int i;
510 if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
511 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
512 return -EIO;
514 fw_mon_cfg = &fwrt->trans->dbg.fw_mon_cfg[alloc_id];
515 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
517 if (fw_mon->num_frags ||
518 fw_mon_cfg->buf_location !=
519 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH))
520 return 0;
522 num_frags = le32_to_cpu(fw_mon_cfg->max_frags_num);
523 if (!fw_has_capa(&fwrt->fw->ucode_capa,
524 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP)) {
525 if (alloc_id != IWL_FW_INI_ALLOCATION_ID_DBGC1)
526 return -EIO;
527 num_frags = 1;
530 remain_pages = DIV_ROUND_UP(le32_to_cpu(fw_mon_cfg->req_size),
531 PAGE_SIZE);
532 num_frags = min_t(u32, num_frags, BUF_ALLOC_MAX_NUM_FRAGS);
533 num_frags = min_t(u32, num_frags, remain_pages);
534 frag_pages = DIV_ROUND_UP(remain_pages, num_frags);
536 fw_mon->frags = kcalloc(num_frags, sizeof(*fw_mon->frags), GFP_KERNEL);
537 if (!fw_mon->frags)
538 return -ENOMEM;
540 for (i = 0; i < num_frags; i++) {
541 int pages = min_t(u32, frag_pages, remain_pages);
543 IWL_DEBUG_FW(fwrt,
544 "WRT: Allocating DRAM buffer (alloc_id=%u, fragment=%u, size=0x%lx)\n",
545 alloc_id, i, pages * PAGE_SIZE);
547 pages = iwl_dbg_tlv_alloc_fragment(fwrt, &fw_mon->frags[i],
548 pages);
549 if (pages < 0) {
550 u32 alloc_size = le32_to_cpu(fw_mon_cfg->req_size) -
551 (remain_pages * PAGE_SIZE);
553 if (alloc_size < le32_to_cpu(fw_mon_cfg->min_size)) {
554 iwl_dbg_tlv_fragments_free(fwrt->trans,
555 alloc_id);
556 return pages;
558 break;
561 remain_pages -= pages;
562 fw_mon->num_frags++;
565 return 0;
568 static int iwl_dbg_tlv_apply_buffer(struct iwl_fw_runtime *fwrt,
569 enum iwl_fw_ini_allocation_id alloc_id)
571 struct iwl_fw_mon *fw_mon;
572 u32 remain_frags, num_commands;
573 int i, fw_mon_idx = 0;
575 if (!fw_has_capa(&fwrt->fw->ucode_capa,
576 IWL_UCODE_TLV_CAPA_DBG_BUF_ALLOC_CMD_SUPP))
577 return 0;
579 if (alloc_id < IWL_FW_INI_ALLOCATION_INVALID ||
580 alloc_id >= IWL_FW_INI_ALLOCATION_NUM)
581 return -EIO;
583 if (le32_to_cpu(fwrt->trans->dbg.fw_mon_cfg[alloc_id].buf_location) !=
584 IWL_FW_INI_LOCATION_DRAM_PATH)
585 return 0;
587 fw_mon = &fwrt->trans->dbg.fw_mon_ini[alloc_id];
589 /* the first fragment of DBGC1 is given to the FW via register
590 * or context info
592 if (alloc_id == IWL_FW_INI_ALLOCATION_ID_DBGC1)
593 fw_mon_idx++;
595 remain_frags = fw_mon->num_frags - fw_mon_idx;
596 if (!remain_frags)
597 return 0;
599 num_commands = DIV_ROUND_UP(remain_frags, BUF_ALLOC_MAX_NUM_FRAGS);
601 IWL_DEBUG_FW(fwrt, "WRT: Applying DRAM destination (alloc_id=%u)\n",
602 alloc_id);
604 for (i = 0; i < num_commands; i++) {
605 u32 num_frags = min_t(u32, remain_frags,
606 BUF_ALLOC_MAX_NUM_FRAGS);
607 struct iwl_buf_alloc_cmd data = {
608 .alloc_id = cpu_to_le32(alloc_id),
609 .num_frags = cpu_to_le32(num_frags),
610 .buf_location =
611 cpu_to_le32(IWL_FW_INI_LOCATION_DRAM_PATH),
613 struct iwl_host_cmd hcmd = {
614 .id = WIDE_ID(DEBUG_GROUP, BUFFER_ALLOCATION),
615 .data[0] = &data,
616 .len[0] = sizeof(data),
618 int ret, j;
620 for (j = 0; j < num_frags; j++) {
621 struct iwl_buf_alloc_frag *frag = &data.frags[j];
622 struct iwl_dram_data *fw_mon_frag =
623 &fw_mon->frags[fw_mon_idx++];
625 frag->addr = cpu_to_le64(fw_mon_frag->physical);
626 frag->size = cpu_to_le32(fw_mon_frag->size);
628 ret = iwl_trans_send_cmd(fwrt->trans, &hcmd);
629 if (ret)
630 return ret;
632 remain_frags -= num_frags;
635 return 0;
638 static void iwl_dbg_tlv_apply_buffers(struct iwl_fw_runtime *fwrt)
640 int ret, i;
642 for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
643 ret = iwl_dbg_tlv_apply_buffer(fwrt, i);
644 if (ret)
645 IWL_WARN(fwrt,
646 "WRT: Failed to apply DRAM buffer for allocation id %d, ret=%d\n",
647 i, ret);
651 static void iwl_dbg_tlv_send_hcmds(struct iwl_fw_runtime *fwrt,
652 struct list_head *hcmd_list)
654 struct iwl_dbg_tlv_node *node;
656 list_for_each_entry(node, hcmd_list, list) {
657 struct iwl_fw_ini_hcmd_tlv *hcmd = (void *)node->tlv.data;
658 struct iwl_fw_ini_hcmd *hcmd_data = &hcmd->hcmd;
659 u16 hcmd_len = le32_to_cpu(node->tlv.length) - sizeof(*hcmd);
660 struct iwl_host_cmd cmd = {
661 .id = WIDE_ID(hcmd_data->group, hcmd_data->id),
662 .len = { hcmd_len, },
663 .data = { hcmd_data->data, },
666 iwl_trans_send_cmd(fwrt->trans, &cmd);
670 static void iwl_dbg_tlv_periodic_trig_handler(struct timer_list *t)
672 struct iwl_dbg_tlv_timer_node *timer_node =
673 from_timer(timer_node, t, timer);
674 struct iwl_fwrt_dump_data dump_data = {
675 .trig = (void *)timer_node->tlv->data,
677 int ret;
679 ret = iwl_fw_dbg_ini_collect(timer_node->fwrt, &dump_data);
680 if (!ret || ret == -EBUSY) {
681 u32 occur = le32_to_cpu(dump_data.trig->occurrences);
682 u32 collect_interval = le32_to_cpu(dump_data.trig->data[0]);
684 if (!occur)
685 return;
687 mod_timer(t, jiffies + msecs_to_jiffies(collect_interval));
691 static void iwl_dbg_tlv_set_periodic_trigs(struct iwl_fw_runtime *fwrt)
693 struct iwl_dbg_tlv_node *node;
694 struct list_head *trig_list =
695 &fwrt->trans->dbg.time_point[IWL_FW_INI_TIME_POINT_PERIODIC].active_trig_list;
697 list_for_each_entry(node, trig_list, list) {
698 struct iwl_fw_ini_trigger_tlv *trig = (void *)node->tlv.data;
699 struct iwl_dbg_tlv_timer_node *timer_node;
700 u32 occur = le32_to_cpu(trig->occurrences), collect_interval;
701 u32 min_interval = 100;
703 if (!occur)
704 continue;
706 /* make sure there is at least one dword of data for the
707 * interval value
709 if (le32_to_cpu(node->tlv.length) <
710 sizeof(*trig) + sizeof(__le32)) {
711 IWL_ERR(fwrt,
712 "WRT: Invalid periodic trigger data was not given\n");
713 continue;
716 if (le32_to_cpu(trig->data[0]) < min_interval) {
717 IWL_WARN(fwrt,
718 "WRT: Override min interval from %u to %u msec\n",
719 le32_to_cpu(trig->data[0]), min_interval);
720 trig->data[0] = cpu_to_le32(min_interval);
723 collect_interval = le32_to_cpu(trig->data[0]);
725 timer_node = kzalloc(sizeof(*timer_node), GFP_KERNEL);
726 if (!timer_node) {
727 IWL_ERR(fwrt,
728 "WRT: Failed to allocate periodic trigger\n");
729 continue;
732 timer_node->fwrt = fwrt;
733 timer_node->tlv = &node->tlv;
734 timer_setup(&timer_node->timer,
735 iwl_dbg_tlv_periodic_trig_handler, 0);
737 list_add_tail(&timer_node->list,
738 &fwrt->trans->dbg.periodic_trig_list);
740 IWL_DEBUG_FW(fwrt, "WRT: Enabling periodic trigger\n");
742 mod_timer(&timer_node->timer,
743 jiffies + msecs_to_jiffies(collect_interval));
747 static bool is_trig_data_contained(struct iwl_ucode_tlv *new,
748 struct iwl_ucode_tlv *old)
750 struct iwl_fw_ini_trigger_tlv *new_trig = (void *)new->data;
751 struct iwl_fw_ini_trigger_tlv *old_trig = (void *)old->data;
752 __le32 *new_data = new_trig->data, *old_data = old_trig->data;
753 u32 new_dwords_num = iwl_tlv_array_len(new, new_trig, data);
754 u32 old_dwords_num = iwl_tlv_array_len(old, old_trig, data);
755 int i, j;
757 for (i = 0; i < new_dwords_num; i++) {
758 bool match = false;
760 for (j = 0; j < old_dwords_num; j++) {
761 if (new_data[i] == old_data[j]) {
762 match = true;
763 break;
766 if (!match)
767 return false;
770 return true;
773 static int iwl_dbg_tlv_override_trig_node(struct iwl_fw_runtime *fwrt,
774 struct iwl_ucode_tlv *trig_tlv,
775 struct iwl_dbg_tlv_node *node)
777 struct iwl_ucode_tlv *node_tlv = &node->tlv;
778 struct iwl_fw_ini_trigger_tlv *node_trig = (void *)node_tlv->data;
779 struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
780 u32 policy = le32_to_cpu(trig->apply_policy);
781 u32 size = le32_to_cpu(trig_tlv->length);
782 u32 trig_data_len = size - sizeof(*trig);
783 u32 offset = 0;
785 if (!(policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_DATA)) {
786 u32 data_len = le32_to_cpu(node_tlv->length) -
787 sizeof(*node_trig);
789 IWL_DEBUG_FW(fwrt,
790 "WRT: Appending trigger data (time point %u)\n",
791 le32_to_cpu(trig->time_point));
793 offset += data_len;
794 size += data_len;
795 } else {
796 IWL_DEBUG_FW(fwrt,
797 "WRT: Overriding trigger data (time point %u)\n",
798 le32_to_cpu(trig->time_point));
801 if (size != le32_to_cpu(node_tlv->length)) {
802 struct list_head *prev = node->list.prev;
803 struct iwl_dbg_tlv_node *tmp;
805 list_del(&node->list);
807 tmp = krealloc(node, sizeof(*node) + size, GFP_KERNEL);
808 if (!tmp) {
809 IWL_WARN(fwrt,
810 "WRT: No memory to override trigger (time point %u)\n",
811 le32_to_cpu(trig->time_point));
813 list_add(&node->list, prev);
815 return -ENOMEM;
818 list_add(&tmp->list, prev);
819 node_tlv = &tmp->tlv;
820 node_trig = (void *)node_tlv->data;
823 memcpy(node_trig->data + offset, trig->data, trig_data_len);
824 node_tlv->length = cpu_to_le32(size);
826 if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_CFG) {
827 IWL_DEBUG_FW(fwrt,
828 "WRT: Overriding trigger configuration (time point %u)\n",
829 le32_to_cpu(trig->time_point));
831 /* the first 11 dwords are configuration related */
832 memcpy(node_trig, trig, sizeof(__le32) * 11);
835 if (policy & IWL_FW_INI_APPLY_POLICY_OVERRIDE_REGIONS) {
836 IWL_DEBUG_FW(fwrt,
837 "WRT: Overriding trigger regions (time point %u)\n",
838 le32_to_cpu(trig->time_point));
840 node_trig->regions_mask = trig->regions_mask;
841 } else {
842 IWL_DEBUG_FW(fwrt,
843 "WRT: Appending trigger regions (time point %u)\n",
844 le32_to_cpu(trig->time_point));
846 node_trig->regions_mask |= trig->regions_mask;
849 return 0;
852 static int
853 iwl_dbg_tlv_add_active_trigger(struct iwl_fw_runtime *fwrt,
854 struct list_head *trig_list,
855 struct iwl_ucode_tlv *trig_tlv)
857 struct iwl_fw_ini_trigger_tlv *trig = (void *)trig_tlv->data;
858 struct iwl_dbg_tlv_node *node, *match = NULL;
859 u32 policy = le32_to_cpu(trig->apply_policy);
861 list_for_each_entry(node, trig_list, list) {
862 if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_TIME_POINT))
863 break;
865 if (!(policy & IWL_FW_INI_APPLY_POLICY_MATCH_DATA) ||
866 is_trig_data_contained(trig_tlv, &node->tlv)) {
867 match = node;
868 break;
872 if (!match) {
873 IWL_DEBUG_FW(fwrt, "WRT: Enabling trigger (time point %u)\n",
874 le32_to_cpu(trig->time_point));
875 return iwl_dbg_tlv_add(trig_tlv, trig_list);
878 return iwl_dbg_tlv_override_trig_node(fwrt, trig_tlv, match);
881 static void
882 iwl_dbg_tlv_gen_active_trig_list(struct iwl_fw_runtime *fwrt,
883 struct iwl_dbg_tlv_time_point_data *tp)
885 struct iwl_dbg_tlv_node *node;
886 struct list_head *trig_list = &tp->trig_list;
887 struct list_head *active_trig_list = &tp->active_trig_list;
889 list_for_each_entry(node, trig_list, list) {
890 struct iwl_ucode_tlv *tlv = &node->tlv;
892 iwl_dbg_tlv_add_active_trigger(fwrt, active_trig_list, tlv);
896 static bool iwl_dbg_tlv_check_fw_pkt(struct iwl_fw_runtime *fwrt,
897 struct iwl_fwrt_dump_data *dump_data,
898 union iwl_dbg_tlv_tp_data *tp_data,
899 u32 trig_data)
901 struct iwl_rx_packet *pkt = tp_data->fw_pkt;
902 struct iwl_cmd_header *wanted_hdr = (void *)&trig_data;
904 if (pkt && (pkt->hdr.cmd == wanted_hdr->cmd &&
905 pkt->hdr.group_id == wanted_hdr->group_id)) {
906 struct iwl_rx_packet *fw_pkt =
907 kmemdup(pkt,
908 sizeof(*pkt) + iwl_rx_packet_payload_len(pkt),
909 GFP_ATOMIC);
911 if (!fw_pkt)
912 return false;
914 dump_data->fw_pkt = fw_pkt;
916 return true;
919 return false;
922 static int
923 iwl_dbg_tlv_tp_trigger(struct iwl_fw_runtime *fwrt,
924 struct list_head *active_trig_list,
925 union iwl_dbg_tlv_tp_data *tp_data,
926 bool (*data_check)(struct iwl_fw_runtime *fwrt,
927 struct iwl_fwrt_dump_data *dump_data,
928 union iwl_dbg_tlv_tp_data *tp_data,
929 u32 trig_data))
931 struct iwl_dbg_tlv_node *node;
933 list_for_each_entry(node, active_trig_list, list) {
934 struct iwl_fwrt_dump_data dump_data = {
935 .trig = (void *)node->tlv.data,
937 u32 num_data = iwl_tlv_array_len(&node->tlv, dump_data.trig,
938 data);
939 int ret, i;
941 if (!num_data) {
942 ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
943 if (ret)
944 return ret;
947 for (i = 0; i < num_data; i++) {
948 if (!data_check ||
949 data_check(fwrt, &dump_data, tp_data,
950 le32_to_cpu(dump_data.trig->data[i]))) {
951 ret = iwl_fw_dbg_ini_collect(fwrt, &dump_data);
952 if (ret)
953 return ret;
955 break;
960 return 0;
963 static void iwl_dbg_tlv_init_cfg(struct iwl_fw_runtime *fwrt)
965 enum iwl_fw_ini_buffer_location *ini_dest = &fwrt->trans->dbg.ini_dest;
966 int ret, i;
968 if (*ini_dest != IWL_FW_INI_LOCATION_INVALID)
969 return;
971 IWL_DEBUG_FW(fwrt,
972 "WRT: Generating active triggers list, domain 0x%x\n",
973 fwrt->trans->dbg.domains_bitmap);
975 for (i = 0; i < ARRAY_SIZE(fwrt->trans->dbg.time_point); i++) {
976 struct iwl_dbg_tlv_time_point_data *tp =
977 &fwrt->trans->dbg.time_point[i];
979 iwl_dbg_tlv_gen_active_trig_list(fwrt, tp);
982 *ini_dest = IWL_FW_INI_LOCATION_INVALID;
983 for (i = 0; i < IWL_FW_INI_ALLOCATION_NUM; i++) {
984 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
985 &fwrt->trans->dbg.fw_mon_cfg[i];
986 u32 dest = le32_to_cpu(fw_mon_cfg->buf_location);
988 if (dest == IWL_FW_INI_LOCATION_INVALID)
989 continue;
991 if (*ini_dest == IWL_FW_INI_LOCATION_INVALID)
992 *ini_dest = dest;
994 if (dest != *ini_dest)
995 continue;
997 ret = iwl_dbg_tlv_alloc_fragments(fwrt, i);
998 if (ret)
999 IWL_WARN(fwrt,
1000 "WRT: Failed to allocate DRAM buffer for allocation id %d, ret=%d\n",
1001 i, ret);
1005 void iwl_dbg_tlv_time_point(struct iwl_fw_runtime *fwrt,
1006 enum iwl_fw_ini_time_point tp_id,
1007 union iwl_dbg_tlv_tp_data *tp_data)
1009 struct list_head *hcmd_list, *trig_list;
1011 if (!iwl_trans_dbg_ini_valid(fwrt->trans) ||
1012 tp_id == IWL_FW_INI_TIME_POINT_INVALID ||
1013 tp_id >= IWL_FW_INI_TIME_POINT_NUM)
1014 return;
1016 hcmd_list = &fwrt->trans->dbg.time_point[tp_id].hcmd_list;
1017 trig_list = &fwrt->trans->dbg.time_point[tp_id].active_trig_list;
1019 switch (tp_id) {
1020 case IWL_FW_INI_TIME_POINT_EARLY:
1021 iwl_dbg_tlv_init_cfg(fwrt);
1022 iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
1023 break;
1024 case IWL_FW_INI_TIME_POINT_AFTER_ALIVE:
1025 iwl_dbg_tlv_apply_buffers(fwrt);
1026 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1027 iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
1028 break;
1029 case IWL_FW_INI_TIME_POINT_PERIODIC:
1030 iwl_dbg_tlv_set_periodic_trigs(fwrt);
1031 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1032 break;
1033 case IWL_FW_INI_TIME_POINT_FW_RSP_OR_NOTIF:
1034 case IWL_FW_INI_TIME_POINT_MISSED_BEACONS:
1035 case IWL_FW_INI_TIME_POINT_FW_DHC_NOTIFICATION:
1036 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1037 iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data,
1038 iwl_dbg_tlv_check_fw_pkt);
1039 break;
1040 default:
1041 iwl_dbg_tlv_send_hcmds(fwrt, hcmd_list);
1042 iwl_dbg_tlv_tp_trigger(fwrt, trig_list, tp_data, NULL);
1043 break;
1046 IWL_EXPORT_SYMBOL(iwl_dbg_tlv_time_point);