1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #include <net/mac80211.h>
8 #include <linux/netdevice.h>
11 #include "iwl-trans.h"
12 #include "iwl-op-mode.h"
14 #include "iwl-debug.h"
19 #include "fw/regulatory.h"
23 #include "iwl-phy-db.h"
24 #include "iwl-modparams.h"
25 #include "iwl-nvm-parse.h"
26 #include "time-sync.h"
28 #define MVM_UCODE_ALIVE_TIMEOUT (2 * HZ)
29 #define MVM_UCODE_CALIB_TIMEOUT (2 * HZ)
31 struct iwl_mvm_alive_data
{
36 static int iwl_send_tx_ant_cfg(struct iwl_mvm
*mvm
, u8 valid_tx_ant
)
38 struct iwl_tx_ant_cfg_cmd tx_ant_cmd
= {
39 .valid
= cpu_to_le32(valid_tx_ant
),
42 IWL_DEBUG_FW(mvm
, "select valid tx ant: %u\n", valid_tx_ant
);
43 return iwl_mvm_send_cmd_pdu(mvm
, TX_ANT_CONFIGURATION_CMD
, 0,
44 sizeof(tx_ant_cmd
), &tx_ant_cmd
);
47 static int iwl_send_rss_cfg_cmd(struct iwl_mvm
*mvm
)
50 struct iwl_rss_config_cmd cmd
= {
51 .flags
= cpu_to_le32(IWL_RSS_ENABLE
),
52 .hash_mask
= BIT(IWL_RSS_HASH_TYPE_IPV4_TCP
) |
53 BIT(IWL_RSS_HASH_TYPE_IPV4_UDP
) |
54 BIT(IWL_RSS_HASH_TYPE_IPV4_PAYLOAD
) |
55 BIT(IWL_RSS_HASH_TYPE_IPV6_TCP
) |
56 BIT(IWL_RSS_HASH_TYPE_IPV6_UDP
) |
57 BIT(IWL_RSS_HASH_TYPE_IPV6_PAYLOAD
),
60 if (mvm
->trans
->num_rx_queues
== 1)
63 /* Do not direct RSS traffic to Q 0 which is our fallback queue */
64 for (i
= 0; i
< ARRAY_SIZE(cmd
.indirection_table
); i
++)
65 cmd
.indirection_table
[i
] =
66 1 + (i
% (mvm
->trans
->num_rx_queues
- 1));
67 netdev_rss_key_fill(cmd
.secret_key
, sizeof(cmd
.secret_key
));
69 return iwl_mvm_send_cmd_pdu(mvm
, RSS_CONFIG_CMD
, 0, sizeof(cmd
), &cmd
);
72 static int iwl_mvm_send_dqa_cmd(struct iwl_mvm
*mvm
)
74 struct iwl_dqa_enable_cmd dqa_cmd
= {
75 .cmd_queue
= cpu_to_le32(IWL_MVM_DQA_CMD_QUEUE
),
77 u32 cmd_id
= WIDE_ID(DATA_PATH_GROUP
, DQA_ENABLE_CMD
);
80 ret
= iwl_mvm_send_cmd_pdu(mvm
, cmd_id
, 0, sizeof(dqa_cmd
), &dqa_cmd
);
82 IWL_ERR(mvm
, "Failed to send DQA enabling command: %d\n", ret
);
84 IWL_DEBUG_FW(mvm
, "Working in DQA mode\n");
89 void iwl_mvm_mfu_assert_dump_notif(struct iwl_mvm
*mvm
,
90 struct iwl_rx_cmd_buffer
*rxb
)
92 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
93 struct iwl_mfu_assert_dump_notif
*mfu_dump_notif
= (void *)pkt
->data
;
95 if (mfu_dump_notif
->index_num
== 0)
96 IWL_INFO(mvm
, "MFUART assert id 0x%x occurred\n",
97 le32_to_cpu(mfu_dump_notif
->assert_id
));
100 static bool iwl_alive_fn(struct iwl_notif_wait_data
*notif_wait
,
101 struct iwl_rx_packet
*pkt
, void *data
)
103 unsigned int pkt_len
= iwl_rx_packet_payload_len(pkt
);
104 struct iwl_mvm
*mvm
=
105 container_of(notif_wait
, struct iwl_mvm
, notif_wait
);
106 struct iwl_mvm_alive_data
*alive_data
= data
;
107 struct iwl_umac_alive
*umac
;
108 struct iwl_lmac_alive
*lmac1
;
109 struct iwl_lmac_alive
*lmac2
= NULL
;
111 u32 lmac_error_event_table
, umac_error_table
;
112 u32 version
= iwl_fw_lookup_notif_ver(mvm
->fw
, LEGACY_GROUP
,
113 UCODE_ALIVE_NTFY
, 0);
118 struct iwl_alive_ntf_v6
*palive
;
120 if (pkt_len
< sizeof(*palive
))
123 palive
= (void *)pkt
->data
;
124 mvm
->trans
->dbg
.imr_data
.imr_enable
=
125 le32_to_cpu(palive
->imr
.enabled
);
126 mvm
->trans
->dbg
.imr_data
.imr_size
=
127 le32_to_cpu(palive
->imr
.size
);
128 mvm
->trans
->dbg
.imr_data
.imr2sram_remainbyte
=
129 mvm
->trans
->dbg
.imr_data
.imr_size
;
130 mvm
->trans
->dbg
.imr_data
.imr_base_addr
=
131 palive
->imr
.base_addr
;
132 mvm
->trans
->dbg
.imr_data
.imr_curr_addr
=
133 le64_to_cpu(mvm
->trans
->dbg
.imr_data
.imr_base_addr
);
134 IWL_DEBUG_FW(mvm
, "IMR Enabled: 0x0%x size 0x0%x Address 0x%016llx\n",
135 mvm
->trans
->dbg
.imr_data
.imr_enable
,
136 mvm
->trans
->dbg
.imr_data
.imr_size
,
137 le64_to_cpu(mvm
->trans
->dbg
.imr_data
.imr_base_addr
));
139 if (!mvm
->trans
->dbg
.imr_data
.imr_enable
) {
140 for (i
= 0; i
< ARRAY_SIZE(mvm
->trans
->dbg
.active_regions
); i
++) {
141 struct iwl_ucode_tlv
*reg_tlv
;
142 struct iwl_fw_ini_region_tlv
*reg
;
144 reg_tlv
= mvm
->trans
->dbg
.active_regions
[i
];
148 reg
= (void *)reg_tlv
->data
;
150 * We have only one DRAM IMR region, so we
151 * can break as soon as we find the first
154 if (reg
->type
== IWL_FW_INI_REGION_DRAM_IMR
) {
155 mvm
->trans
->dbg
.unsupported_region_msk
|= BIT(i
);
163 struct iwl_alive_ntf_v5
*palive
;
165 if (pkt_len
< sizeof(*palive
))
168 palive
= (void *)pkt
->data
;
169 umac
= &palive
->umac_data
;
170 lmac1
= &palive
->lmac_data
[0];
171 lmac2
= &palive
->lmac_data
[1];
172 status
= le16_to_cpu(palive
->status
);
174 mvm
->trans
->sku_id
[0] = le32_to_cpu(palive
->sku_id
.data
[0]);
175 mvm
->trans
->sku_id
[1] = le32_to_cpu(palive
->sku_id
.data
[1]);
176 mvm
->trans
->sku_id
[2] = le32_to_cpu(palive
->sku_id
.data
[2]);
178 IWL_DEBUG_FW(mvm
, "Got sku_id: 0x0%x 0x0%x 0x0%x\n",
179 mvm
->trans
->sku_id
[0],
180 mvm
->trans
->sku_id
[1],
181 mvm
->trans
->sku_id
[2]);
182 } else if (iwl_rx_packet_payload_len(pkt
) == sizeof(struct iwl_alive_ntf_v4
)) {
183 struct iwl_alive_ntf_v4
*palive
;
185 if (pkt_len
< sizeof(*palive
))
188 palive
= (void *)pkt
->data
;
189 umac
= &palive
->umac_data
;
190 lmac1
= &palive
->lmac_data
[0];
191 lmac2
= &palive
->lmac_data
[1];
192 status
= le16_to_cpu(palive
->status
);
193 } else if (iwl_rx_packet_payload_len(pkt
) ==
194 sizeof(struct iwl_alive_ntf_v3
)) {
195 struct iwl_alive_ntf_v3
*palive3
;
197 if (pkt_len
< sizeof(*palive3
))
200 palive3
= (void *)pkt
->data
;
201 umac
= &palive3
->umac_data
;
202 lmac1
= &palive3
->lmac_data
;
203 status
= le16_to_cpu(palive3
->status
);
205 WARN(1, "unsupported alive notification (size %d)\n",
206 iwl_rx_packet_payload_len(pkt
));
207 /* get timeout later */
211 lmac_error_event_table
=
212 le32_to_cpu(lmac1
->dbg_ptrs
.error_event_table_ptr
);
213 iwl_fw_lmac1_set_alive_err_table(mvm
->trans
, lmac_error_event_table
);
216 mvm
->trans
->dbg
.lmac_error_event_table
[1] =
217 le32_to_cpu(lmac2
->dbg_ptrs
.error_event_table_ptr
);
219 umac_error_table
= le32_to_cpu(umac
->dbg_ptrs
.error_info_addr
) &
220 ~FW_ADDR_CACHE_CONTROL
;
222 if (umac_error_table
) {
223 if (umac_error_table
>=
224 mvm
->trans
->cfg
->min_umac_error_event_table
) {
225 iwl_fw_umac_set_alive_err_table(mvm
->trans
,
229 "Not valid error log pointer 0x%08X for %s uCode\n",
231 (mvm
->fwrt
.cur_fw_img
== IWL_UCODE_INIT
) ?
236 alive_data
->scd_base_addr
= le32_to_cpu(lmac1
->dbg_ptrs
.scd_base_ptr
);
237 alive_data
->valid
= status
== IWL_ALIVE_STATUS_OK
;
240 "Alive ucode status 0x%04x revision 0x%01X 0x%01X\n",
241 status
, lmac1
->ver_type
, lmac1
->ver_subtype
);
244 IWL_DEBUG_FW(mvm
, "Alive ucode CDB\n");
247 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
248 le32_to_cpu(umac
->umac_major
),
249 le32_to_cpu(umac
->umac_minor
));
251 iwl_fwrt_update_fw_versions(&mvm
->fwrt
, lmac1
, umac
);
256 static bool iwl_wait_init_complete(struct iwl_notif_wait_data
*notif_wait
,
257 struct iwl_rx_packet
*pkt
, void *data
)
259 WARN_ON(pkt
->hdr
.cmd
!= INIT_COMPLETE_NOTIF
);
264 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data
*notif_wait
,
265 struct iwl_rx_packet
*pkt
, void *data
)
267 struct iwl_phy_db
*phy_db
= data
;
269 if (pkt
->hdr
.cmd
!= CALIB_RES_NOTIF_PHY_DB
) {
270 WARN_ON(pkt
->hdr
.cmd
!= INIT_COMPLETE_NOTIF
);
274 WARN_ON(iwl_phy_db_set_section(phy_db
, pkt
));
279 static void iwl_mvm_print_pd_notification(struct iwl_mvm
*mvm
)
281 #define IWL_FW_PRINT_REG_INFO(reg_name) \
282 IWL_ERR(mvm, #reg_name ": 0x%x\n", iwl_read_umac_prph(trans, reg_name))
284 struct iwl_trans
*trans
= mvm
->trans
;
285 enum iwl_device_family device_family
= trans
->trans_cfg
->device_family
;
287 if (device_family
< IWL_DEVICE_FAMILY_8000
)
290 if (device_family
<= IWL_DEVICE_FAMILY_9000
)
291 IWL_FW_PRINT_REG_INFO(WFPM_ARC1_PD_NOTIFICATION
);
293 IWL_FW_PRINT_REG_INFO(WFPM_LMAC1_PD_NOTIFICATION
);
295 IWL_FW_PRINT_REG_INFO(HPM_SECONDARY_DEVICE_STATE
);
298 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_ADDR
);
299 IWL_FW_PRINT_REG_INFO(WFPM_MAC_OTP_CFG7_DATA
);
302 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm
*mvm
,
303 enum iwl_ucode_type ucode_type
)
305 struct iwl_notification_wait alive_wait
;
306 struct iwl_mvm_alive_data alive_data
= {};
307 const struct fw_img
*fw
;
309 enum iwl_ucode_type old_type
= mvm
->fwrt
.cur_fw_img
;
310 static const u16 alive_cmd
[] = { UCODE_ALIVE_NTFY
};
312 ucode_type
== IWL_UCODE_INIT
|| iwl_mvm_has_unified_ucode(mvm
);
314 struct iwl_pc_data
*pc_data
;
316 if (ucode_type
== IWL_UCODE_REGULAR
&&
317 iwl_fw_dbg_conf_usniffer(mvm
->fw
, FW_DBG_START_FROM_ALIVE
) &&
318 !(fw_has_capa(&mvm
->fw
->ucode_capa
,
319 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED
)))
320 fw
= iwl_get_ucode_image(mvm
->fw
, IWL_UCODE_REGULAR_USNIFFER
);
322 fw
= iwl_get_ucode_image(mvm
->fw
, ucode_type
);
325 iwl_fw_set_current_image(&mvm
->fwrt
, ucode_type
);
326 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING
, &mvm
->status
);
328 iwl_init_notification_wait(&mvm
->notif_wait
, &alive_wait
,
329 alive_cmd
, ARRAY_SIZE(alive_cmd
),
330 iwl_alive_fn
, &alive_data
);
333 * We want to load the INIT firmware even in RFKILL
334 * For the unified firmware case, the ucode_type is not
335 * INIT, but we still need to run it.
337 ret
= iwl_trans_start_fw(mvm
->trans
, fw
, run_in_rfkill
);
339 iwl_fw_set_current_image(&mvm
->fwrt
, old_type
);
340 iwl_remove_notification(&mvm
->notif_wait
, &alive_wait
);
345 * Some things may run in the background now, but we
346 * just wait for the ALIVE notification here.
348 ret
= iwl_wait_notification(&mvm
->notif_wait
, &alive_wait
,
349 MVM_UCODE_ALIVE_TIMEOUT
);
351 if (mvm
->trans
->trans_cfg
->device_family
==
352 IWL_DEVICE_FAMILY_AX210
) {
353 /* print these registers regardless of alive fail/success */
354 IWL_INFO(mvm
, "WFPM_UMAC_PD_NOTIFICATION: 0x%x\n",
355 iwl_read_umac_prph(mvm
->trans
, WFPM_ARC1_PD_NOTIFICATION
));
356 IWL_INFO(mvm
, "WFPM_LMAC2_PD_NOTIFICATION: 0x%x\n",
357 iwl_read_umac_prph(mvm
->trans
, WFPM_LMAC2_PD_NOTIFICATION
));
358 IWL_INFO(mvm
, "WFPM_AUTH_KEY_0: 0x%x\n",
359 iwl_read_umac_prph(mvm
->trans
, SB_MODIFY_CFG_FLAG
));
360 IWL_INFO(mvm
, "CNVI_SCU_SEQ_DATA_DW9: 0x%x\n",
361 iwl_read_prph(mvm
->trans
, CNVI_SCU_SEQ_DATA_DW9
));
365 struct iwl_trans
*trans
= mvm
->trans
;
368 if (trans
->trans_cfg
->device_family
>=
369 IWL_DEVICE_FAMILY_22000
) {
371 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
372 iwl_read_umac_prph(trans
, UMAG_SB_CPU_1_STATUS
),
373 iwl_read_umac_prph(trans
,
374 UMAG_SB_CPU_2_STATUS
));
375 } else if (trans
->trans_cfg
->device_family
>=
376 IWL_DEVICE_FAMILY_8000
) {
378 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
379 iwl_read_prph(trans
, SB_CPU_1_STATUS
),
380 iwl_read_prph(trans
, SB_CPU_2_STATUS
));
383 iwl_mvm_print_pd_notification(mvm
);
385 /* LMAC/UMAC PC info */
386 if (trans
->trans_cfg
->device_family
>=
387 IWL_DEVICE_FAMILY_22000
) {
388 pc_data
= trans
->dbg
.pc_data
;
389 for (count
= 0; count
< trans
->dbg
.num_pc
;
391 IWL_ERR(mvm
, "%s: 0x%x\n",
393 pc_data
->pc_address
);
394 } else if (trans
->trans_cfg
->device_family
>=
395 IWL_DEVICE_FAMILY_9000
) {
396 IWL_ERR(mvm
, "UMAC PC: 0x%x\n",
397 iwl_read_umac_prph(trans
,
398 UREG_UMAC_CURRENT_PC
));
399 IWL_ERR(mvm
, "LMAC PC: 0x%x\n",
400 iwl_read_umac_prph(trans
,
401 UREG_LMAC1_CURRENT_PC
));
402 if (iwl_mvm_is_cdb_supported(mvm
))
403 IWL_ERR(mvm
, "LMAC2 PC: 0x%x\n",
404 iwl_read_umac_prph(trans
,
405 UREG_LMAC2_CURRENT_PC
));
408 if (ret
== -ETIMEDOUT
&& !mvm
->fw_product_reset
)
409 iwl_fw_dbg_error_collect(&mvm
->fwrt
,
410 FW_DBG_TRIGGER_ALIVE_TIMEOUT
);
412 iwl_fw_set_current_image(&mvm
->fwrt
, old_type
);
416 if (!alive_data
.valid
) {
417 IWL_ERR(mvm
, "Loaded ucode is not valid!\n");
418 iwl_fw_set_current_image(&mvm
->fwrt
, old_type
);
422 /* if reached this point, Alive notification was received */
423 iwl_mei_alive_notif(true);
425 ret
= iwl_pnvm_load(mvm
->trans
, &mvm
->notif_wait
,
426 &mvm
->fw
->ucode_capa
);
428 IWL_ERR(mvm
, "Timeout waiting for PNVM load!\n");
429 iwl_fw_set_current_image(&mvm
->fwrt
, old_type
);
433 iwl_trans_fw_alive(mvm
->trans
, alive_data
.scd_base_addr
);
436 * Note: all the queues are enabled as part of the interface
437 * initialization, but in firmware restart scenarios they
438 * could be stopped, so wake them up. In firmware restart,
439 * mac80211 will have the queues stopped as well until the
440 * reconfiguration completes. During normal startup, they
444 memset(&mvm
->queue_info
, 0, sizeof(mvm
->queue_info
));
446 * Set a 'fake' TID for the command queue, since we use the
447 * hweight() of the tid_bitmap as a refcount now. Not that
448 * we ever even consider the command queue as one we might
449 * want to reuse, but be safe nevertheless.
451 mvm
->queue_info
[IWL_MVM_DQA_CMD_QUEUE
].tid_bitmap
=
452 BIT(IWL_MAX_TID_COUNT
+ 2);
454 set_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING
, &mvm
->status
);
455 #ifdef CONFIG_IWLWIFI_DEBUGFS
456 iwl_fw_set_dbg_rec_on(&mvm
->fwrt
);
460 * For pre-MLD API (MLD API doesn't use the timestamps):
461 * All the BSSes in the BSS table include the GP2 in the system
462 * at the beacon Rx time, this is of course no longer relevant
463 * since we are resetting the firmware.
464 * Purge all the BSS table.
466 if (!mvm
->mld_api_is_used
)
467 cfg80211_bss_flush(mvm
->hw
->wiphy
);
472 static void iwl_mvm_phy_filter_init(struct iwl_mvm
*mvm
,
473 struct iwl_phy_specific_cfg
*phy_filters
)
476 *phy_filters
= mvm
->phy_filters
;
477 #endif /* CONFIG_ACPI */
480 static void iwl_mvm_uats_init(struct iwl_mvm
*mvm
)
484 struct iwl_host_cmd cmd
= {
485 .id
= WIDE_ID(REGULATORY_AND_NVM_GROUP
,
486 MCC_ALLOWED_AP_TYPE_CMD
),
488 .data
[0] = &mvm
->fwrt
.uats_table
,
489 .len
[0] = sizeof(mvm
->fwrt
.uats_table
),
490 .dataflags
[0] = IWL_HCMD_DFL_NOCOPY
,
493 if (mvm
->trans
->trans_cfg
->device_family
< IWL_DEVICE_FAMILY_AX210
) {
494 IWL_DEBUG_RADIO(mvm
, "UATS feature is not supported\n");
498 cmd_ver
= iwl_fw_lookup_cmd_ver(mvm
->fw
, cmd
.id
,
499 IWL_FW_CMD_VER_UNKNOWN
);
502 "MCC_ALLOWED_AP_TYPE_CMD ver %d not supported\n",
507 ret
= iwl_uefi_get_uats_table(mvm
->trans
, &mvm
->fwrt
);
509 IWL_DEBUG_FW(mvm
, "failed to read UATS table (%d)\n", ret
);
513 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
515 IWL_ERR(mvm
, "failed to send MCC_ALLOWED_AP_TYPE_CMD (%d)\n",
518 IWL_DEBUG_RADIO(mvm
, "MCC_ALLOWED_AP_TYPE_CMD sent to FW\n");
521 static int iwl_mvm_sgom_init(struct iwl_mvm
*mvm
)
525 struct iwl_host_cmd cmd
= {
526 .id
= WIDE_ID(REGULATORY_AND_NVM_GROUP
,
527 SAR_OFFSET_MAPPING_TABLE_CMD
),
529 .data
[0] = &mvm
->fwrt
.sgom_table
,
530 .len
[0] = sizeof(mvm
->fwrt
.sgom_table
),
531 .dataflags
[0] = IWL_HCMD_DFL_NOCOPY
,
534 if (!mvm
->fwrt
.sgom_enabled
) {
535 IWL_DEBUG_RADIO(mvm
, "SGOM table is disabled\n");
539 cmd_ver
= iwl_fw_lookup_cmd_ver(mvm
->fw
, cmd
.id
,
540 IWL_FW_CMD_VER_UNKNOWN
);
543 IWL_DEBUG_RADIO(mvm
, "command version is unsupported. version = %d\n",
548 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
550 IWL_ERR(mvm
, "failed to send SAR_OFFSET_MAPPING_CMD (%d)\n", ret
);
555 static int iwl_send_phy_cfg_cmd(struct iwl_mvm
*mvm
)
557 u32 cmd_id
= PHY_CONFIGURATION_CMD
;
558 struct iwl_phy_cfg_cmd_v3 phy_cfg_cmd
;
559 enum iwl_ucode_type ucode_type
= mvm
->fwrt
.cur_fw_img
;
563 if (iwl_mvm_has_unified_ucode(mvm
) &&
564 !mvm
->trans
->cfg
->tx_with_siso_diversity
)
567 if (mvm
->trans
->cfg
->tx_with_siso_diversity
) {
569 * TODO: currently we don't set the antenna but letting the NIC
570 * to decide which antenna to use. This should come from BIOS.
572 phy_cfg_cmd
.phy_cfg
=
573 cpu_to_le32(FW_PHY_CFG_CHAIN_SAD_ENABLED
);
577 phy_cfg_cmd
.phy_cfg
= cpu_to_le32(iwl_mvm_get_phy_config(mvm
));
579 /* set flags extra PHY configuration flags from the device's cfg */
580 phy_cfg_cmd
.phy_cfg
|=
581 cpu_to_le32(mvm
->trans
->trans_cfg
->extra_phy_cfg_flags
);
583 phy_cfg_cmd
.calib_control
.event_trigger
=
584 mvm
->fw
->default_calib
[ucode_type
].event_trigger
;
585 phy_cfg_cmd
.calib_control
.flow_trigger
=
586 mvm
->fw
->default_calib
[ucode_type
].flow_trigger
;
588 cmd_ver
= iwl_fw_lookup_cmd_ver(mvm
->fw
, cmd_id
,
589 IWL_FW_CMD_VER_UNKNOWN
);
591 iwl_mvm_phy_filter_init(mvm
, &phy_cfg_cmd
.phy_specific_cfg
);
593 IWL_DEBUG_INFO(mvm
, "Sending Phy CFG command: 0x%x\n",
594 phy_cfg_cmd
.phy_cfg
);
595 cmd_size
= (cmd_ver
== 3) ? sizeof(struct iwl_phy_cfg_cmd_v3
) :
596 sizeof(struct iwl_phy_cfg_cmd_v1
);
597 return iwl_mvm_send_cmd_pdu(mvm
, cmd_id
, 0, cmd_size
, &phy_cfg_cmd
);
600 static int iwl_run_unified_mvm_ucode(struct iwl_mvm
*mvm
)
602 struct iwl_notification_wait init_wait
;
603 struct iwl_nvm_access_complete_cmd nvm_complete
= {};
604 struct iwl_init_extended_cfg_cmd init_cfg
= {
605 .init_flags
= cpu_to_le32(BIT(IWL_INIT_NVM
)),
607 static const u16 init_complete
[] = {
613 if (mvm
->trans
->cfg
->tx_with_siso_diversity
)
614 init_cfg
.init_flags
|= cpu_to_le32(BIT(IWL_INIT_PHY
));
616 lockdep_assert_held(&mvm
->mutex
);
618 mvm
->rfkill_safe_init_done
= false;
620 if (mvm
->trans
->trans_cfg
->device_family
== IWL_DEVICE_FAMILY_AX210
) {
621 sb_cfg
= iwl_read_umac_prph(mvm
->trans
, SB_MODIFY_CFG_FLAG
);
622 /* if needed, we'll reset this on our way out later */
623 mvm
->fw_product_reset
= sb_cfg
== SB_CFG_RESIDES_IN_ROM
;
624 if (mvm
->fw_product_reset
&& iwl_mei_pldr_req())
628 iwl_init_notification_wait(&mvm
->notif_wait
,
631 ARRAY_SIZE(init_complete
),
632 iwl_wait_init_complete
,
635 iwl_dbg_tlv_time_point(&mvm
->fwrt
, IWL_FW_INI_TIME_POINT_EARLY
, NULL
);
637 /* Will also start the device */
638 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_REGULAR
);
640 IWL_ERR(mvm
, "Failed to start RT ucode: %d\n", ret
);
642 /* if we needed reset then fail here, but notify and remove */
643 if (mvm
->fw_product_reset
) {
644 iwl_mei_alive_notif(false);
645 iwl_trans_pcie_remove(mvm
->trans
, true);
650 iwl_dbg_tlv_time_point(&mvm
->fwrt
, IWL_FW_INI_TIME_POINT_AFTER_ALIVE
,
653 if (mvm
->trans
->trans_cfg
->device_family
>= IWL_DEVICE_FAMILY_BZ
)
654 mvm
->trans
->step_urm
= !!(iwl_read_umac_prph(mvm
->trans
,
655 CNVI_PMU_STEP_FLOW
) &
656 CNVI_PMU_STEP_FLOW_FORCE_URM
);
658 /* Send init config command to mark that we are sending NVM access
661 ret
= iwl_mvm_send_cmd_pdu(mvm
, WIDE_ID(SYSTEM_GROUP
,
662 INIT_EXTENDED_CFG_CMD
),
664 sizeof(init_cfg
), &init_cfg
);
666 IWL_ERR(mvm
, "Failed to run init config command: %d\n",
671 /* Load NVM to NIC if needed */
672 if (mvm
->nvm_file_name
) {
673 ret
= iwl_read_external_nvm(mvm
->trans
, mvm
->nvm_file_name
,
677 ret
= iwl_mvm_load_nvm_to_nic(mvm
);
682 ret
= iwl_mvm_send_cmd_pdu(mvm
, WIDE_ID(REGULATORY_AND_NVM_GROUP
,
683 NVM_ACCESS_COMPLETE
),
685 sizeof(nvm_complete
), &nvm_complete
);
687 IWL_ERR(mvm
, "Failed to run complete NVM access: %d\n",
692 ret
= iwl_send_phy_cfg_cmd(mvm
);
694 IWL_ERR(mvm
, "Failed to run PHY configuration: %d\n",
699 /* We wait for the INIT complete notification */
700 ret
= iwl_wait_notification(&mvm
->notif_wait
, &init_wait
,
701 MVM_UCODE_ALIVE_TIMEOUT
);
705 /* Read the NVM only at driver load time, no need to do this twice */
706 if (!mvm
->nvm_data
) {
707 mvm
->nvm_data
= iwl_get_nvm(mvm
->trans
, mvm
->fw
,
708 mvm
->set_tx_ant
, mvm
->set_rx_ant
);
709 if (IS_ERR(mvm
->nvm_data
)) {
710 ret
= PTR_ERR(mvm
->nvm_data
);
711 mvm
->nvm_data
= NULL
;
712 IWL_ERR(mvm
, "Failed to read NVM: %d\n", ret
);
717 mvm
->rfkill_safe_init_done
= true;
722 iwl_remove_notification(&mvm
->notif_wait
, &init_wait
);
726 int iwl_run_init_mvm_ucode(struct iwl_mvm
*mvm
)
728 struct iwl_notification_wait calib_wait
;
729 static const u16 init_complete
[] = {
731 CALIB_RES_NOTIF_PHY_DB
735 if (iwl_mvm_has_unified_ucode(mvm
))
736 return iwl_run_unified_mvm_ucode(mvm
);
738 lockdep_assert_held(&mvm
->mutex
);
740 mvm
->rfkill_safe_init_done
= false;
742 iwl_init_notification_wait(&mvm
->notif_wait
,
745 ARRAY_SIZE(init_complete
),
746 iwl_wait_phy_db_entry
,
749 iwl_dbg_tlv_time_point(&mvm
->fwrt
, IWL_FW_INI_TIME_POINT_EARLY
, NULL
);
751 /* Will also start the device */
752 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_INIT
);
754 IWL_ERR(mvm
, "Failed to start INIT ucode: %d\n", ret
);
758 if (mvm
->trans
->trans_cfg
->device_family
< IWL_DEVICE_FAMILY_8000
) {
759 ret
= iwl_mvm_send_bt_init_conf(mvm
);
764 /* Read the NVM only at driver load time, no need to do this twice */
765 if (!mvm
->nvm_data
) {
766 ret
= iwl_nvm_init(mvm
);
768 IWL_ERR(mvm
, "Failed to read NVM: %d\n", ret
);
773 /* In case we read the NVM from external file, load it to the NIC */
774 if (mvm
->nvm_file_name
) {
775 ret
= iwl_mvm_load_nvm_to_nic(mvm
);
780 WARN_ONCE(mvm
->nvm_data
->nvm_version
< mvm
->trans
->cfg
->nvm_ver
,
781 "Too old NVM version (0x%0x, required = 0x%0x)",
782 mvm
->nvm_data
->nvm_version
, mvm
->trans
->cfg
->nvm_ver
);
785 * abort after reading the nvm in case RF Kill is on, we will complete
786 * the init seq later when RF kill will switch to off
788 if (iwl_mvm_is_radio_hw_killed(mvm
)) {
789 IWL_DEBUG_RF_KILL(mvm
,
790 "jump over all phy activities due to RF kill\n");
794 mvm
->rfkill_safe_init_done
= true;
796 /* Send TX valid antennas before triggering calibrations */
797 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
801 ret
= iwl_send_phy_cfg_cmd(mvm
);
803 IWL_ERR(mvm
, "Failed to run INIT calibrations: %d\n",
809 * Some things may run in the background now, but we
810 * just wait for the calibration complete notification.
812 ret
= iwl_wait_notification(&mvm
->notif_wait
, &calib_wait
,
813 MVM_UCODE_CALIB_TIMEOUT
);
817 if (iwl_mvm_is_radio_hw_killed(mvm
)) {
818 IWL_DEBUG_RF_KILL(mvm
, "RFKILL while calibrating.\n");
821 IWL_ERR(mvm
, "Failed to run INIT calibrations: %d\n",
828 iwl_remove_notification(&mvm
->notif_wait
, &calib_wait
);
830 mvm
->rfkill_safe_init_done
= false;
831 if (!mvm
->nvm_data
) {
832 /* we want to debug INIT and we have no NVM - fake */
833 mvm
->nvm_data
= kzalloc(sizeof(struct iwl_nvm_data
) +
834 sizeof(struct ieee80211_channel
) +
835 sizeof(struct ieee80211_rate
),
839 mvm
->nvm_data
->bands
[0].channels
= mvm
->nvm_data
->channels
;
840 mvm
->nvm_data
->bands
[0].n_channels
= 1;
841 mvm
->nvm_data
->bands
[0].n_bitrates
= 1;
842 mvm
->nvm_data
->bands
[0].bitrates
=
843 (void *)(mvm
->nvm_data
->channels
+ 1);
844 mvm
->nvm_data
->bands
[0].bitrates
->hw_value
= 10;
850 static int iwl_mvm_config_ltr(struct iwl_mvm
*mvm
)
852 struct iwl_ltr_config_cmd cmd
= {
853 .flags
= cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE
),
856 if (!mvm
->trans
->ltr_enabled
)
859 return iwl_mvm_send_cmd_pdu(mvm
, LTR_CONFIG
, 0,
863 int iwl_mvm_sar_select_profile(struct iwl_mvm
*mvm
, int prof_a
, int prof_b
)
865 u32 cmd_id
= REDUCE_TX_POWER_CMD
;
866 struct iwl_dev_tx_power_cmd_v3_v8 cmd
= {
867 .common
.set_mode
= cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS
),
869 struct iwl_dev_tx_power_cmd cmd_v9_v10
= {
870 .common
.set_mode
= cpu_to_le32(IWL_TX_POWER_MODE_SET_CHAINS
),
876 u8 cmd_ver
= iwl_fw_lookup_cmd_ver(mvm
->fw
, cmd_id
, 3);
877 void *cmd_data
= &cmd
;
880 len
= sizeof(cmd_v9_v10
.v10
);
881 n_subbands
= IWL_NUM_SUB_BANDS_V2
;
882 per_chain
= &cmd_v9_v10
.v10
.per_chain
[0][0][0];
883 cmd_v9_v10
.v10
.flags
=
884 cpu_to_le32(mvm
->fwrt
.reduced_power_flags
);
885 } else if (cmd_ver
== 9) {
886 len
= sizeof(cmd_v9_v10
.v9
);
887 n_subbands
= IWL_NUM_SUB_BANDS_V1
;
888 per_chain
= &cmd_v9_v10
.v9
.per_chain
[0][0];
889 } else if (cmd_ver
>= 7) {
890 len
= sizeof(cmd
.v7
);
891 n_subbands
= IWL_NUM_SUB_BANDS_V2
;
892 per_chain
= cmd
.v7
.per_chain
[0][0];
893 cmd
.v7
.flags
= cpu_to_le32(mvm
->fwrt
.reduced_power_flags
);
895 len
= sizeof(cmd
.v8
);
896 } else if (cmd_ver
== 6) {
897 len
= sizeof(cmd
.v6
);
898 n_subbands
= IWL_NUM_SUB_BANDS_V2
;
899 per_chain
= cmd
.v6
.per_chain
[0][0];
900 } else if (fw_has_api(&mvm
->fw
->ucode_capa
,
901 IWL_UCODE_TLV_API_REDUCE_TX_POWER
)) {
902 len
= sizeof(cmd
.v5
);
903 n_subbands
= IWL_NUM_SUB_BANDS_V1
;
904 per_chain
= cmd
.v5
.per_chain
[0][0];
905 } else if (fw_has_capa(&mvm
->fw
->ucode_capa
,
906 IWL_UCODE_TLV_CAPA_TX_POWER_ACK
)) {
907 len
= sizeof(cmd
.v4
);
908 n_subbands
= IWL_NUM_SUB_BANDS_V1
;
909 per_chain
= cmd
.v4
.per_chain
[0][0];
911 len
= sizeof(cmd
.v3
);
912 n_subbands
= IWL_NUM_SUB_BANDS_V1
;
913 per_chain
= cmd
.v3
.per_chain
[0][0];
916 /* all structs have the same common part, add its length */
917 len
+= sizeof(cmd
.common
);
920 len
+= sizeof(cmd
.per_band
);
922 cmd_data
= &cmd_v9_v10
;
924 ret
= iwl_sar_fill_profile(&mvm
->fwrt
, per_chain
,
925 IWL_NUM_CHAIN_TABLES
,
926 n_subbands
, prof_a
, prof_b
);
928 /* return on error or if the profile is disabled (positive number) */
932 iwl_mei_set_power_limit(per_chain
);
934 IWL_DEBUG_RADIO(mvm
, "Sending REDUCE_TX_POWER_CMD per chain\n");
935 return iwl_mvm_send_cmd_pdu(mvm
, cmd_id
, 0, len
, cmd_data
);
938 int iwl_mvm_get_sar_geo_profile(struct iwl_mvm
*mvm
)
940 union iwl_geo_tx_power_profiles_cmd geo_tx_cmd
;
941 struct iwl_geo_tx_power_profiles_resp
*resp
;
944 struct iwl_host_cmd cmd
= {
945 .id
= WIDE_ID(PHY_OPS_GROUP
, PER_CHAIN_LIMIT_OFFSET_CMD
),
946 .flags
= CMD_WANT_SKB
,
947 .data
= { &geo_tx_cmd
},
949 u8 cmd_ver
= iwl_fw_lookup_cmd_ver(mvm
->fw
, cmd
.id
,
950 IWL_FW_CMD_VER_UNKNOWN
);
952 /* the ops field is at the same spot for all versions, so set in v1 */
954 cpu_to_le32(IWL_PER_CHAIN_OFFSET_GET_CURRENT_TABLE
);
957 len
= sizeof(geo_tx_cmd
.v5
);
958 else if (cmd_ver
== 4)
959 len
= sizeof(geo_tx_cmd
.v4
);
960 else if (cmd_ver
== 3)
961 len
= sizeof(geo_tx_cmd
.v3
);
962 else if (fw_has_api(&mvm
->fwrt
.fw
->ucode_capa
,
963 IWL_UCODE_TLV_API_SAR_TABLE_VER
))
964 len
= sizeof(geo_tx_cmd
.v2
);
966 len
= sizeof(geo_tx_cmd
.v1
);
968 if (!iwl_sar_geo_support(&mvm
->fwrt
))
973 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
975 IWL_ERR(mvm
, "Failed to get geographic profile info %d\n", ret
);
979 resp
= (void *)cmd
.resp_pkt
->data
;
980 ret
= le32_to_cpu(resp
->profile_idx
);
982 if (WARN_ON(ret
> BIOS_GEO_MAX_PROFILE_NUM
))
989 static int iwl_mvm_sar_geo_init(struct iwl_mvm
*mvm
)
991 u32 cmd_id
= WIDE_ID(PHY_OPS_GROUP
, PER_CHAIN_LIMIT_OFFSET_CMD
);
992 union iwl_geo_tx_power_profiles_cmd cmd
;
996 __le32 sk
= cpu_to_le32(0);
998 u8 cmd_ver
= iwl_fw_lookup_cmd_ver(mvm
->fw
, cmd_id
,
999 IWL_FW_CMD_VER_UNKNOWN
);
1001 BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1
, ops
) !=
1002 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2
, ops
) ||
1003 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2
, ops
) !=
1004 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3
, ops
) ||
1005 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3
, ops
) !=
1006 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4
, ops
) ||
1007 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4
, ops
) !=
1008 offsetof(struct iwl_geo_tx_power_profiles_cmd_v5
, ops
));
1010 /* the ops field is at the same spot for all versions, so set in v1 */
1011 cmd
.v1
.ops
= cpu_to_le32(IWL_PER_CHAIN_OFFSET_SET_TABLES
);
1013 /* Only set to South Korea if the table revision is 1 */
1014 if (mvm
->fwrt
.geo_rev
== 1)
1015 sk
= cpu_to_le32(1);
1018 len
= sizeof(cmd
.v5
);
1019 n_bands
= ARRAY_SIZE(cmd
.v5
.table
[0]);
1020 n_profiles
= BIOS_GEO_MAX_PROFILE_NUM
;
1021 cmd
.v5
.table_revision
= sk
;
1022 } else if (cmd_ver
== 4) {
1023 len
= sizeof(cmd
.v4
);
1024 n_bands
= ARRAY_SIZE(cmd
.v4
.table
[0]);
1025 n_profiles
= BIOS_GEO_MAX_PROFILE_NUM
;
1026 cmd
.v4
.table_revision
= sk
;
1027 } else if (cmd_ver
== 3) {
1028 len
= sizeof(cmd
.v3
);
1029 n_bands
= ARRAY_SIZE(cmd
.v3
.table
[0]);
1030 n_profiles
= BIOS_GEO_MIN_PROFILE_NUM
;
1031 cmd
.v3
.table_revision
= sk
;
1032 } else if (fw_has_api(&mvm
->fwrt
.fw
->ucode_capa
,
1033 IWL_UCODE_TLV_API_SAR_TABLE_VER
)) {
1034 len
= sizeof(cmd
.v2
);
1035 n_bands
= ARRAY_SIZE(cmd
.v2
.table
[0]);
1036 n_profiles
= BIOS_GEO_MIN_PROFILE_NUM
;
1037 cmd
.v2
.table_revision
= sk
;
1039 len
= sizeof(cmd
.v1
);
1040 n_bands
= ARRAY_SIZE(cmd
.v1
.table
[0]);
1041 n_profiles
= BIOS_GEO_MIN_PROFILE_NUM
;
1044 BUILD_BUG_ON(offsetof(struct iwl_geo_tx_power_profiles_cmd_v1
, table
) !=
1045 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2
, table
) ||
1046 offsetof(struct iwl_geo_tx_power_profiles_cmd_v2
, table
) !=
1047 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3
, table
) ||
1048 offsetof(struct iwl_geo_tx_power_profiles_cmd_v3
, table
) !=
1049 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4
, table
) ||
1050 offsetof(struct iwl_geo_tx_power_profiles_cmd_v4
, table
) !=
1051 offsetof(struct iwl_geo_tx_power_profiles_cmd_v5
, table
));
1052 /* the table is at the same position for all versions, so set use v1 */
1053 ret
= iwl_sar_geo_fill_table(&mvm
->fwrt
, &cmd
.v1
.table
[0][0],
1054 n_bands
, n_profiles
);
1057 * It is a valid scenario to not support SAR, or miss wgds table,
1058 * but in that case there is no need to send the command.
1063 return iwl_mvm_send_cmd_pdu(mvm
, cmd_id
, 0, len
, &cmd
);
1066 int iwl_mvm_ppag_send_cmd(struct iwl_mvm
*mvm
)
1068 union iwl_ppag_table_cmd cmd
;
1071 ret
= iwl_fill_ppag_table(&mvm
->fwrt
, &cmd
, &cmd_size
);
1072 /* Not supporting PPAG table is a valid scenario */
1076 IWL_DEBUG_RADIO(mvm
, "Sending PER_PLATFORM_ANT_GAIN_CMD\n");
1077 ret
= iwl_mvm_send_cmd_pdu(mvm
, WIDE_ID(PHY_OPS_GROUP
,
1078 PER_PLATFORM_ANT_GAIN_CMD
),
1081 IWL_ERR(mvm
, "failed to send PER_PLATFORM_ANT_GAIN_CMD (%d)\n",
1087 static int iwl_mvm_ppag_init(struct iwl_mvm
*mvm
)
1089 /* no need to read the table, done in INIT stage */
1090 if (!(iwl_is_ppag_approved(&mvm
->fwrt
)))
1093 return iwl_mvm_ppag_send_cmd(mvm
);
1096 static bool iwl_mvm_add_to_tas_block_list(__le32
*list
, __le32
*le_size
, unsigned int mcc
)
1099 u32 size
= le32_to_cpu(*le_size
);
1101 /* Verify that there is room for another country */
1102 if (size
>= IWL_WTAS_BLACK_LIST_MAX
)
1105 for (i
= 0; i
< size
; i
++) {
1106 if (list
[i
] == cpu_to_le32(mcc
))
1110 list
[size
++] = cpu_to_le32(mcc
);
1111 *le_size
= cpu_to_le32(size
);
1115 static void iwl_mvm_tas_init(struct iwl_mvm
*mvm
)
1117 u32 cmd_id
= WIDE_ID(REGULATORY_AND_NVM_GROUP
, TAS_CONFIG
);
1119 struct iwl_tas_data data
= {};
1120 struct iwl_tas_config_cmd cmd
= {};
1121 int cmd_size
, fw_ver
;
1123 BUILD_BUG_ON(ARRAY_SIZE(data
.block_list_array
) !=
1124 IWL_WTAS_BLACK_LIST_MAX
);
1125 BUILD_BUG_ON(ARRAY_SIZE(cmd
.common
.block_list_array
) !=
1126 IWL_WTAS_BLACK_LIST_MAX
);
1128 if (!fw_has_capa(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_CAPA_TAS_CFG
)) {
1129 IWL_DEBUG_RADIO(mvm
, "TAS not enabled in FW\n");
1133 ret
= iwl_bios_get_tas_table(&mvm
->fwrt
, &data
);
1135 IWL_DEBUG_RADIO(mvm
,
1136 "TAS table invalid or unavailable. (%d)\n",
1144 if (!iwl_is_tas_approved()) {
1145 IWL_DEBUG_RADIO(mvm
,
1146 "System vendor '%s' is not in the approved list, disabling TAS in US and Canada.\n",
1147 dmi_get_system_info(DMI_SYS_VENDOR
) ?: "<unknown>");
1148 if ((!iwl_mvm_add_to_tas_block_list(data
.block_list_array
,
1149 &data
.block_list_size
,
1151 (!iwl_mvm_add_to_tas_block_list(data
.block_list_array
,
1152 &data
.block_list_size
,
1154 IWL_DEBUG_RADIO(mvm
,
1155 "Unable to add US/Canada to TAS block list, disabling TAS\n");
1159 IWL_DEBUG_RADIO(mvm
,
1160 "System vendor '%s' is in the approved list.\n",
1161 dmi_get_system_info(DMI_SYS_VENDOR
) ?: "<unknown>");
1164 fw_ver
= iwl_fw_lookup_cmd_ver(mvm
->fw
, cmd_id
,
1165 IWL_FW_CMD_VER_UNKNOWN
);
1167 memcpy(&cmd
.common
, &data
, sizeof(struct iwl_tas_config_cmd_common
));
1169 /* Set v3 or v4 specific parts. will be trunctated for fw_ver < 3 */
1171 cmd
.v4
.override_tas_iec
= data
.override_tas_iec
;
1172 cmd
.v4
.enable_tas_iec
= data
.enable_tas_iec
;
1173 cmd
.v4
.usa_tas_uhb_allowed
= data
.usa_tas_uhb_allowed
;
1175 cmd
.v3
.override_tas_iec
= cpu_to_le16(data
.override_tas_iec
);
1176 cmd
.v3
.enable_tas_iec
= cpu_to_le16(data
.enable_tas_iec
);
1179 cmd_size
= sizeof(struct iwl_tas_config_cmd_common
);
1181 /* v4 is the same size as v3 */
1182 cmd_size
+= sizeof(struct iwl_tas_config_cmd_v3
);
1184 ret
= iwl_mvm_send_cmd_pdu(mvm
, cmd_id
, 0, cmd_size
, &cmd
);
1186 IWL_DEBUG_RADIO(mvm
, "failed to send TAS_CONFIG (%d)\n", ret
);
1189 static bool iwl_mvm_eval_dsm_rfi(struct iwl_mvm
*mvm
)
1192 /* default behaviour is disabled */
1193 bool bios_enable_rfi
= false;
1194 int ret
= iwl_bios_get_dsm(&mvm
->fwrt
, DSM_FUNC_RFI_CONFIG
, &value
);
1198 IWL_DEBUG_RADIO(mvm
, "Failed to get DSM RFI, ret=%d\n", ret
);
1199 return bios_enable_rfi
;
1202 value
&= DSM_VALUE_RFI_DISABLE
;
1203 /* RFI BIOS CONFIG value can be 0 or 3 only.
1204 * i.e 0 means DDR and DLVR enabled. 3 means DDR and DLVR disabled.
1205 * 1 and 2 are invalid BIOS configurations, So, it's not possible to
1206 * disable ddr/dlvr separately.
1209 IWL_DEBUG_RADIO(mvm
, "DSM RFI is evaluated to enable\n");
1210 bios_enable_rfi
= true;
1211 } else if (value
== DSM_VALUE_RFI_DISABLE
) {
1212 IWL_DEBUG_RADIO(mvm
, "DSM RFI is evaluated to disable\n");
1214 IWL_DEBUG_RADIO(mvm
,
1215 "DSM RFI got invalid value, value=%d\n", value
);
1218 return bios_enable_rfi
;
1221 static void iwl_mvm_lari_cfg(struct iwl_mvm
*mvm
)
1223 struct iwl_lari_config_change_cmd cmd
;
1227 ret
= iwl_fill_lari_config(&mvm
->fwrt
, &cmd
, &cmd_size
);
1229 ret
= iwl_mvm_send_cmd_pdu(mvm
,
1230 WIDE_ID(REGULATORY_AND_NVM_GROUP
,
1231 LARI_CONFIG_CHANGE
),
1234 IWL_DEBUG_RADIO(mvm
,
1235 "Failed to send LARI_CONFIG_CHANGE (%d)\n",
1240 void iwl_mvm_get_bios_tables(struct iwl_mvm
*mvm
)
1244 iwl_acpi_get_guid_lock_status(&mvm
->fwrt
);
1246 /* read PPAG table */
1247 ret
= iwl_bios_get_ppag_table(&mvm
->fwrt
);
1249 IWL_DEBUG_RADIO(mvm
,
1250 "PPAG BIOS table invalid or unavailable. (%d)\n",
1254 /* read SAR tables */
1255 ret
= iwl_bios_get_wrds_table(&mvm
->fwrt
);
1257 IWL_DEBUG_RADIO(mvm
,
1258 "WRDS SAR BIOS table invalid or unavailable. (%d)\n",
1261 * If not available, don't fail and don't bother with EWRD and
1264 if (!iwl_bios_get_wgds_table(&mvm
->fwrt
)) {
1266 * If basic SAR is not available, we check for WGDS,
1267 * which should *not* be available either. If it is
1268 * available, issue an error, because we can't use SAR
1269 * Geo without basic SAR.
1271 IWL_ERR(mvm
, "BIOS contains WGDS but no WRDS\n");
1275 ret
= iwl_bios_get_ewrd_table(&mvm
->fwrt
);
1276 /* if EWRD is not available, we can still use
1277 * WRDS, so don't fail */
1279 IWL_DEBUG_RADIO(mvm
,
1280 "EWRD SAR BIOS table invalid or unavailable. (%d)\n",
1283 /* read geo SAR table */
1284 if (iwl_sar_geo_support(&mvm
->fwrt
)) {
1285 ret
= iwl_bios_get_wgds_table(&mvm
->fwrt
);
1287 IWL_DEBUG_RADIO(mvm
,
1288 "Geo SAR BIOS table invalid or unavailable. (%d)\n",
1290 /* we don't fail if the table is not available */
1294 iwl_acpi_get_phy_filters(&mvm
->fwrt
, &mvm
->phy_filters
);
1296 if (iwl_bios_get_eckv(&mvm
->fwrt
, &mvm
->ext_clock_valid
))
1297 IWL_DEBUG_RADIO(mvm
, "ECKV table doesn't exist in BIOS\n");
1300 static void iwl_mvm_disconnect_iterator(void *data
, u8
*mac
,
1301 struct ieee80211_vif
*vif
)
1303 if (vif
->type
== NL80211_IFTYPE_STATION
)
1304 ieee80211_hw_restart_disconnect(vif
);
1307 void iwl_mvm_send_recovery_cmd(struct iwl_mvm
*mvm
, u32 flags
)
1309 u32 error_log_size
= mvm
->fw
->ucode_capa
.error_log_size
;
1313 struct iwl_fw_error_recovery_cmd recovery_cmd
= {
1314 .flags
= cpu_to_le32(flags
),
1317 struct iwl_host_cmd host_cmd
= {
1318 .id
= WIDE_ID(SYSTEM_GROUP
, FW_ERROR_RECOVERY_CMD
),
1319 .data
= {&recovery_cmd
, },
1320 .len
= {sizeof(recovery_cmd
), },
1323 /* no error log was defined in TLV */
1324 if (!error_log_size
)
1327 if (flags
& ERROR_RECOVERY_UPDATE_DB
) {
1328 /* no buf was allocated while HW reset */
1329 if (!mvm
->error_recovery_buf
)
1332 host_cmd
.data
[1] = mvm
->error_recovery_buf
;
1333 host_cmd
.len
[1] = error_log_size
;
1334 host_cmd
.dataflags
[1] = IWL_HCMD_DFL_NOCOPY
;
1335 recovery_cmd
.buf_size
= cpu_to_le32(error_log_size
);
1338 ret
= iwl_mvm_send_cmd_status(mvm
, &host_cmd
, &status
);
1339 kfree(mvm
->error_recovery_buf
);
1340 mvm
->error_recovery_buf
= NULL
;
1343 IWL_ERR(mvm
, "Failed to send recovery cmd %d\n", ret
);
1347 /* skb respond is only relevant in ERROR_RECOVERY_UPDATE_DB */
1348 if (flags
& ERROR_RECOVERY_UPDATE_DB
) {
1351 "Failed to send recovery cmd blob was invalid %d\n",
1354 ieee80211_iterate_interfaces(mvm
->hw
, 0,
1355 iwl_mvm_disconnect_iterator
,
1361 static int iwl_mvm_sar_init(struct iwl_mvm
*mvm
)
1363 return iwl_mvm_sar_select_profile(mvm
, 1, 1);
1366 static int iwl_mvm_load_rt_fw(struct iwl_mvm
*mvm
)
1370 if (iwl_mvm_has_unified_ucode(mvm
))
1371 return iwl_run_unified_mvm_ucode(mvm
);
1373 ret
= iwl_run_init_mvm_ucode(mvm
);
1376 IWL_ERR(mvm
, "Failed to run INIT ucode: %d\n", ret
);
1380 iwl_fw_dbg_stop_sync(&mvm
->fwrt
);
1381 iwl_trans_stop_device(mvm
->trans
);
1382 ret
= iwl_trans_start_hw(mvm
->trans
);
1386 mvm
->rfkill_safe_init_done
= false;
1387 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_REGULAR
);
1391 mvm
->rfkill_safe_init_done
= true;
1393 iwl_dbg_tlv_time_point(&mvm
->fwrt
, IWL_FW_INI_TIME_POINT_AFTER_ALIVE
,
1396 return iwl_init_paging(&mvm
->fwrt
, mvm
->fwrt
.cur_fw_img
);
1399 int iwl_mvm_up(struct iwl_mvm
*mvm
)
1402 struct ieee80211_supported_band
*sband
= NULL
;
1404 lockdep_assert_wiphy(mvm
->hw
->wiphy
);
1405 lockdep_assert_held(&mvm
->mutex
);
1407 ret
= iwl_trans_start_hw(mvm
->trans
);
1411 ret
= iwl_mvm_load_rt_fw(mvm
);
1413 IWL_ERR(mvm
, "Failed to start RT ucode: %d\n", ret
);
1414 if (ret
!= -ERFKILL
&& !mvm
->fw_product_reset
)
1415 iwl_fw_dbg_error_collect(&mvm
->fwrt
,
1416 FW_DBG_TRIGGER_DRIVER
);
1420 /* FW loaded successfully */
1421 mvm
->fw_product_reset
= false;
1423 iwl_fw_disable_dbg_asserts(&mvm
->fwrt
);
1424 iwl_get_shared_mem_conf(&mvm
->fwrt
);
1426 ret
= iwl_mvm_sf_update(mvm
, NULL
, false);
1428 IWL_ERR(mvm
, "Failed to initialize Smart Fifo\n");
1430 if (!iwl_trans_dbg_ini_valid(mvm
->trans
)) {
1431 mvm
->fwrt
.dump
.conf
= FW_DBG_INVALID
;
1432 /* if we have a destination, assume EARLY START */
1433 if (mvm
->fw
->dbg
.dest_tlv
)
1434 mvm
->fwrt
.dump
.conf
= FW_DBG_START_FROM_ALIVE
;
1435 iwl_fw_start_dbg_conf(&mvm
->fwrt
, FW_DBG_START_FROM_ALIVE
);
1438 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
1442 if (!iwl_mvm_has_unified_ucode(mvm
)) {
1443 /* Send phy db control command and then phy db calibration */
1444 ret
= iwl_send_phy_db_data(mvm
->phy_db
);
1447 ret
= iwl_send_phy_cfg_cmd(mvm
);
1452 ret
= iwl_mvm_send_bt_init_conf(mvm
);
1456 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
1457 IWL_UCODE_TLV_CAPA_SOC_LATENCY_SUPPORT
)) {
1458 ret
= iwl_set_soc_latency(&mvm
->fwrt
);
1463 iwl_mvm_lari_cfg(mvm
);
1465 /* Init RSS configuration */
1466 ret
= iwl_configure_rxq(&mvm
->fwrt
);
1470 if (iwl_mvm_has_new_rx_api(mvm
)) {
1471 ret
= iwl_send_rss_cfg_cmd(mvm
);
1473 IWL_ERR(mvm
, "Failed to configure RSS queues: %d\n",
1479 /* init the fw <-> mac80211 STA mapping */
1480 for (i
= 0; i
< mvm
->fw
->ucode_capa
.num_stations
; i
++) {
1481 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[i
], NULL
);
1482 RCU_INIT_POINTER(mvm
->fw_id_to_link_sta
[i
], NULL
);
1485 for (i
= 0; i
< IWL_FW_MAX_LINK_ID
+ 1; i
++)
1486 RCU_INIT_POINTER(mvm
->link_id_to_link_conf
[i
], NULL
);
1488 mvm
->tdls_cs
.peer
.sta_id
= IWL_INVALID_STA
;
1490 /* reset quota debouncing buffer - 0xff will yield invalid data */
1491 memset(&mvm
->last_quota_cmd
, 0xff, sizeof(mvm
->last_quota_cmd
));
1493 if (fw_has_capa(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_CAPA_DQA_SUPPORT
)) {
1494 ret
= iwl_mvm_send_dqa_cmd(mvm
);
1500 * Add auxiliary station for scanning.
1501 * Newer versions of this command implies that the fw uses
1502 * internal aux station for all aux activities that don't
1503 * requires a dedicated data queue.
1505 if (!iwl_mvm_has_new_station_api(mvm
->fw
)) {
1507 * In old version the aux station uses mac id like other
1508 * station and not lmac id
1510 ret
= iwl_mvm_add_aux_sta(mvm
, MAC_INDEX_AUX
);
1515 /* Add all the PHY contexts */
1517 while (!sband
&& i
< NUM_NL80211_BANDS
)
1518 sband
= mvm
->hw
->wiphy
->bands
[i
++];
1520 if (WARN_ON_ONCE(!sband
)) {
1525 if (iwl_mvm_is_tt_in_fw(mvm
)) {
1526 /* in order to give the responsibility of ct-kill and
1527 * TX backoff to FW we need to send empty temperature reporting
1528 * cmd during init time
1530 iwl_mvm_send_temp_report_ths_cmd(mvm
);
1532 /* Initialize tx backoffs to the minimal possible */
1533 iwl_mvm_tt_tx_backoff(mvm
, 0);
1536 #ifdef CONFIG_THERMAL
1537 /* TODO: read the budget from BIOS / Platform NVM */
1540 * In case there is no budget from BIOS / Platform NVM the default
1541 * budget should be 2000mW (cooling state 0).
1543 if (iwl_mvm_is_ctdp_supported(mvm
)) {
1544 ret
= iwl_mvm_ctdp_command(mvm
, CTDP_CMD_OPERATION_START
,
1545 mvm
->cooling_dev
.cur_state
);
1551 if (!fw_has_capa(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_CAPA_SET_LTR_GEN2
))
1552 WARN_ON(iwl_mvm_config_ltr(mvm
));
1554 ret
= iwl_mvm_power_update_device(mvm
);
1559 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1560 * anyway, so don't init MCC.
1562 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL
, &mvm
->status
)) {
1563 ret
= iwl_mvm_init_mcc(mvm
);
1568 if (fw_has_capa(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_CAPA_UMAC_SCAN
)) {
1569 mvm
->scan_type
= IWL_SCAN_TYPE_NOT_SET
;
1570 mvm
->hb_scan_type
= IWL_SCAN_TYPE_NOT_SET
;
1571 ret
= iwl_mvm_config_scan(mvm
);
1576 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
)) {
1577 iwl_mvm_send_recovery_cmd(mvm
, ERROR_RECOVERY_UPDATE_DB
);
1579 if (mvm
->time_sync
.active
)
1580 iwl_mvm_time_sync_config(mvm
, mvm
->time_sync
.peer_addr
,
1581 IWL_TIME_SYNC_PROTOCOL_TM
|
1582 IWL_TIME_SYNC_PROTOCOL_FTM
);
1585 if (!mvm
->ptp_data
.ptp_clock
)
1586 iwl_mvm_ptp_init(mvm
);
1588 ret
= iwl_mvm_ppag_init(mvm
);
1592 ret
= iwl_mvm_sar_init(mvm
);
1594 ret
= iwl_mvm_sar_geo_init(mvm
);
1598 ret
= iwl_mvm_sgom_init(mvm
);
1602 iwl_mvm_tas_init(mvm
);
1603 iwl_mvm_leds_sync(mvm
);
1604 iwl_mvm_uats_init(mvm
);
1606 if (iwl_rfi_supported(mvm
)) {
1607 if (iwl_mvm_eval_dsm_rfi(mvm
))
1608 iwl_rfi_send_config_cmd(mvm
, NULL
);
1611 iwl_mvm_mei_device_state(mvm
, true);
1613 IWL_DEBUG_INFO(mvm
, "RT uCode started.\n");
1616 iwl_mvm_stop_device(mvm
);
1620 int iwl_mvm_load_d3_fw(struct iwl_mvm
*mvm
)
1624 lockdep_assert_wiphy(mvm
->hw
->wiphy
);
1625 lockdep_assert_held(&mvm
->mutex
);
1627 ret
= iwl_trans_start_hw(mvm
->trans
);
1631 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_WOWLAN
);
1633 IWL_ERR(mvm
, "Failed to start WoWLAN firmware: %d\n", ret
);
1637 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
1641 /* Send phy db control command and then phy db calibration*/
1642 ret
= iwl_send_phy_db_data(mvm
->phy_db
);
1646 ret
= iwl_send_phy_cfg_cmd(mvm
);
1650 /* init the fw <-> mac80211 STA mapping */
1651 for (i
= 0; i
< mvm
->fw
->ucode_capa
.num_stations
; i
++) {
1652 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[i
], NULL
);
1653 RCU_INIT_POINTER(mvm
->fw_id_to_link_sta
[i
], NULL
);
1656 if (!iwl_mvm_has_new_station_api(mvm
->fw
)) {
1658 * Add auxiliary station for scanning.
1659 * Newer versions of this command implies that the fw uses
1660 * internal aux station for all aux activities that don't
1661 * requires a dedicated data queue.
1662 * In old version the aux station uses mac id like other
1663 * station and not lmac id
1665 ret
= iwl_mvm_add_aux_sta(mvm
, MAC_INDEX_AUX
);
1672 iwl_mvm_stop_device(mvm
);
1676 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm
*mvm
,
1677 struct iwl_rx_cmd_buffer
*rxb
)
1679 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1680 struct iwl_mfuart_load_notif
*mfuart_notif
= (void *)pkt
->data
;
1683 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1684 le32_to_cpu(mfuart_notif
->installed_ver
),
1685 le32_to_cpu(mfuart_notif
->external_ver
),
1686 le32_to_cpu(mfuart_notif
->status
),
1687 le32_to_cpu(mfuart_notif
->duration
));
1689 if (iwl_rx_packet_payload_len(pkt
) == sizeof(*mfuart_notif
))
1691 "MFUART: image size: 0x%08x\n",
1692 le32_to_cpu(mfuart_notif
->image_size
));