1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2014, 2018-2019, 2021-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
7 #include <linux/firmware.h>
8 #include <linux/rtnetlink.h>
12 #include "iwl-nvm-utils.h"
13 #include "iwl-nvm-parse.h"
17 /* Default NVM size to read */
18 #define IWL_NVM_DEFAULT_CHUNK_SIZE (2 * 1024)
20 #define NVM_WRITE_OPCODE 1
21 #define NVM_READ_OPCODE 0
23 /* load nvm chunk response */
25 READ_NVM_CHUNK_SUCCEED
= 0,
26 READ_NVM_CHUNK_NOT_VALID_ADDRESS
= 1
30 * prepare the NVM host command w/ the pointers to the nvm buffer
33 static int iwl_nvm_write_chunk(struct iwl_mvm
*mvm
, u16 section
,
34 u16 offset
, u16 length
, const u8
*data
)
36 struct iwl_nvm_access_cmd nvm_access_cmd
= {
37 .offset
= cpu_to_le16(offset
),
38 .length
= cpu_to_le16(length
),
39 .type
= cpu_to_le16(section
),
40 .op_code
= NVM_WRITE_OPCODE
,
42 struct iwl_host_cmd cmd
= {
44 .len
= { sizeof(struct iwl_nvm_access_cmd
), length
},
45 .flags
= CMD_WANT_SKB
| CMD_SEND_IN_RFKILL
,
46 .data
= { &nvm_access_cmd
, data
},
47 /* data may come from vmalloc, so use _DUP */
48 .dataflags
= { 0, IWL_HCMD_DFL_DUP
},
50 struct iwl_rx_packet
*pkt
;
51 struct iwl_nvm_access_resp
*nvm_resp
;
54 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
59 /* Extract & check NVM write response */
60 nvm_resp
= (void *)pkt
->data
;
61 if (le16_to_cpu(nvm_resp
->status
) != READ_NVM_CHUNK_SUCCEED
) {
63 "NVM access write command failed for section %u (status = 0x%x)\n",
64 section
, le16_to_cpu(nvm_resp
->status
));
72 static int iwl_nvm_read_chunk(struct iwl_mvm
*mvm
, u16 section
,
73 u16 offset
, u16 length
, u8
*data
)
75 struct iwl_nvm_access_cmd nvm_access_cmd
= {
76 .offset
= cpu_to_le16(offset
),
77 .length
= cpu_to_le16(length
),
78 .type
= cpu_to_le16(section
),
79 .op_code
= NVM_READ_OPCODE
,
81 struct iwl_nvm_access_resp
*nvm_resp
;
82 struct iwl_rx_packet
*pkt
;
83 struct iwl_host_cmd cmd
= {
85 .flags
= CMD_WANT_SKB
| CMD_SEND_IN_RFKILL
,
86 .data
= { &nvm_access_cmd
, },
88 int ret
, bytes_read
, offset_read
;
91 cmd
.len
[0] = sizeof(struct iwl_nvm_access_cmd
);
93 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
99 /* Extract NVM response */
100 nvm_resp
= (void *)pkt
->data
;
101 ret
= le16_to_cpu(nvm_resp
->status
);
102 bytes_read
= le16_to_cpu(nvm_resp
->length
);
103 offset_read
= le16_to_cpu(nvm_resp
->offset
);
104 resp_data
= nvm_resp
->data
;
107 (ret
== READ_NVM_CHUNK_NOT_VALID_ADDRESS
)) {
109 * meaning of NOT_VALID_ADDRESS:
110 * driver try to read chunk from address that is
111 * multiple of 2K and got an error since addr is empty.
112 * meaning of (offset != 0): driver already
113 * read valid data from another chunk so this case
116 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
117 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
121 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
122 "NVM access command failed with status %d (device: %s)\n",
123 ret
, mvm
->trans
->name
);
129 if (offset_read
!= offset
) {
130 IWL_ERR(mvm
, "NVM ACCESS response with invalid offset %d\n",
136 /* Write data to NVM */
137 memcpy(data
+ offset
, resp_data
, bytes_read
);
145 static int iwl_nvm_write_section(struct iwl_mvm
*mvm
, u16 section
,
146 const u8
*data
, u16 length
)
150 /* copy data in chunks of 2k (and remainder if any) */
152 while (offset
< length
) {
155 chunk_size
= min(IWL_NVM_DEFAULT_CHUNK_SIZE
,
158 ret
= iwl_nvm_write_chunk(mvm
, section
, offset
,
159 chunk_size
, data
+ offset
);
163 offset
+= chunk_size
;
170 * Reads an NVM section completely.
171 * NICs prior to 7000 family doesn't have a real NVM, but just read
172 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
173 * by uCode, we need to manually check in this case that we don't
174 * overflow and try to read more than the EEPROM size.
175 * For 7000 family NICs, we supply the maximal size we can read, and
176 * the uCode fills the response with as much data as we can,
177 * without overflowing, so no check is needed.
179 static int iwl_nvm_read_section(struct iwl_mvm
*mvm
, u16 section
,
180 u8
*data
, u32 size_read
)
182 u16 length
, offset
= 0;
185 /* Set nvm section read length */
186 length
= IWL_NVM_DEFAULT_CHUNK_SIZE
;
190 /* Read the NVM until exhausted (reading less than requested) */
191 while (ret
== length
) {
192 /* Check no memory assumptions fail and cause an overflow */
193 if ((size_read
+ offset
+ length
) >
194 mvm
->trans
->trans_cfg
->base_params
->eeprom_size
) {
195 IWL_ERR(mvm
, "EEPROM size is too small for NVM\n");
199 ret
= iwl_nvm_read_chunk(mvm
, section
, offset
, length
, data
);
201 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
202 "Cannot read NVM from section %d offset %d, length %d\n",
203 section
, offset
, length
);
209 iwl_nvm_fixups(mvm
->trans
->hw_id
, section
, data
, offset
);
211 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
212 "NVM section %d read completed\n", section
);
216 static struct iwl_nvm_data
*
217 iwl_parse_nvm_sections(struct iwl_mvm
*mvm
)
219 struct iwl_nvm_section
*sections
= mvm
->nvm_sections
;
221 const __le16
*sw
, *calib
, *regulatory
, *mac_override
, *phy_sku
;
222 u8 tx_ant
= mvm
->fw
->valid_tx_ant
;
223 u8 rx_ant
= mvm
->fw
->valid_rx_ant
;
226 /* Checking for required sections */
227 if (mvm
->trans
->cfg
->nvm_type
== IWL_NVM
) {
228 if (!mvm
->nvm_sections
[NVM_SECTION_TYPE_SW
].data
||
229 !mvm
->nvm_sections
[mvm
->cfg
->nvm_hw_section_num
].data
) {
230 IWL_ERR(mvm
, "Can't parse empty OTP/NVM sections\n");
234 if (mvm
->trans
->cfg
->nvm_type
== IWL_NVM_SDP
)
235 regulatory_type
= NVM_SECTION_TYPE_REGULATORY_SDP
;
237 regulatory_type
= NVM_SECTION_TYPE_REGULATORY
;
239 /* SW and REGULATORY sections are mandatory */
240 if (!mvm
->nvm_sections
[NVM_SECTION_TYPE_SW
].data
||
241 !mvm
->nvm_sections
[regulatory_type
].data
) {
243 "Can't parse empty family 8000 OTP/NVM sections\n");
246 /* MAC_OVERRIDE or at least HW section must exist */
247 if (!mvm
->nvm_sections
[mvm
->cfg
->nvm_hw_section_num
].data
&&
248 !mvm
->nvm_sections
[NVM_SECTION_TYPE_MAC_OVERRIDE
].data
) {
250 "Can't parse mac_address, empty sections\n");
254 /* PHY_SKU section is mandatory in B0 */
255 if (mvm
->trans
->cfg
->nvm_type
== IWL_NVM_EXT
&&
256 !mvm
->nvm_sections
[NVM_SECTION_TYPE_PHY_SKU
].data
) {
258 "Can't parse phy_sku in B0, empty sections\n");
263 hw
= (const __be16
*)sections
[mvm
->cfg
->nvm_hw_section_num
].data
;
264 sw
= (const __le16
*)sections
[NVM_SECTION_TYPE_SW
].data
;
265 calib
= (const __le16
*)sections
[NVM_SECTION_TYPE_CALIBRATION
].data
;
267 (const __le16
*)sections
[NVM_SECTION_TYPE_MAC_OVERRIDE
].data
;
268 phy_sku
= (const __le16
*)sections
[NVM_SECTION_TYPE_PHY_SKU
].data
;
270 regulatory
= mvm
->trans
->cfg
->nvm_type
== IWL_NVM_SDP
?
271 (const __le16
*)sections
[NVM_SECTION_TYPE_REGULATORY_SDP
].data
:
272 (const __le16
*)sections
[NVM_SECTION_TYPE_REGULATORY
].data
;
275 tx_ant
&= mvm
->set_tx_ant
;
278 rx_ant
&= mvm
->set_rx_ant
;
280 return iwl_parse_nvm_data(mvm
->trans
, mvm
->cfg
, mvm
->fw
, hw
, sw
, calib
,
281 regulatory
, mac_override
, phy_sku
,
285 /* Loads the NVM data stored in mvm->nvm_sections into the NIC */
286 int iwl_mvm_load_nvm_to_nic(struct iwl_mvm
*mvm
)
289 struct iwl_nvm_section
*sections
= mvm
->nvm_sections
;
291 IWL_DEBUG_EEPROM(mvm
->trans
->dev
, "'Write to NVM\n");
293 for (i
= 0; i
< ARRAY_SIZE(mvm
->nvm_sections
); i
++) {
294 if (!mvm
->nvm_sections
[i
].data
|| !mvm
->nvm_sections
[i
].length
)
296 ret
= iwl_nvm_write_section(mvm
, i
, sections
[i
].data
,
299 IWL_ERR(mvm
, "iwl_mvm_send_cmd failed: %d\n", ret
);
306 int iwl_nvm_init(struct iwl_mvm
*mvm
)
310 u8
*nvm_buffer
, *temp
;
311 const char *nvm_file_C
= mvm
->cfg
->default_nvm_file_C_step
;
313 if (WARN_ON_ONCE(mvm
->cfg
->nvm_hw_section_num
>= NVM_MAX_NUM_SECTIONS
))
316 /* load NVM values from nic */
317 /* Read From FW NVM */
318 IWL_DEBUG_EEPROM(mvm
->trans
->dev
, "Read from NVM\n");
320 nvm_buffer
= kmalloc(mvm
->trans
->trans_cfg
->base_params
->eeprom_size
,
324 for (section
= 0; section
< NVM_MAX_NUM_SECTIONS
; section
++) {
325 /* we override the constness for initial read */
326 ret
= iwl_nvm_read_section(mvm
, section
, nvm_buffer
,
328 if (ret
== -ENODATA
) {
335 temp
= kmemdup(nvm_buffer
, ret
, GFP_KERNEL
);
341 iwl_nvm_fixups(mvm
->trans
->hw_id
, section
, temp
, ret
);
343 mvm
->nvm_sections
[section
].data
= temp
;
344 mvm
->nvm_sections
[section
].length
= ret
;
346 #ifdef CONFIG_IWLWIFI_DEBUGFS
348 case NVM_SECTION_TYPE_SW
:
349 mvm
->nvm_sw_blob
.data
= temp
;
350 mvm
->nvm_sw_blob
.size
= ret
;
352 case NVM_SECTION_TYPE_CALIBRATION
:
353 mvm
->nvm_calib_blob
.data
= temp
;
354 mvm
->nvm_calib_blob
.size
= ret
;
356 case NVM_SECTION_TYPE_PRODUCTION
:
357 mvm
->nvm_prod_blob
.data
= temp
;
358 mvm
->nvm_prod_blob
.size
= ret
;
360 case NVM_SECTION_TYPE_PHY_SKU
:
361 mvm
->nvm_phy_sku_blob
.data
= temp
;
362 mvm
->nvm_phy_sku_blob
.size
= ret
;
364 case NVM_SECTION_TYPE_REGULATORY_SDP
:
365 case NVM_SECTION_TYPE_REGULATORY
:
366 mvm
->nvm_reg_blob
.data
= temp
;
367 mvm
->nvm_reg_blob
.size
= ret
;
370 if (section
== mvm
->cfg
->nvm_hw_section_num
) {
371 mvm
->nvm_hw_blob
.data
= temp
;
372 mvm
->nvm_hw_blob
.size
= ret
;
379 IWL_ERR(mvm
, "OTP is blank\n");
382 /* Only if PNVM selected in the mod param - load external NVM */
383 if (mvm
->nvm_file_name
) {
384 /* read External NVM file from the mod param */
385 ret
= iwl_read_external_nvm(mvm
->trans
, mvm
->nvm_file_name
,
388 mvm
->nvm_file_name
= nvm_file_C
;
390 if ((ret
== -EFAULT
|| ret
== -ENOENT
) &&
391 mvm
->nvm_file_name
) {
392 /* in case nvm file was failed try again */
393 ret
= iwl_read_external_nvm(mvm
->trans
,
404 /* parse the relevant nvm sections */
405 mvm
->nvm_data
= iwl_parse_nvm_sections(mvm
);
408 IWL_DEBUG_EEPROM(mvm
->trans
->dev
, "nvm version = %x\n",
409 mvm
->nvm_data
->nvm_version
);
411 return ret
< 0 ? ret
: 0;
414 struct iwl_mcc_update_resp_v8
*
415 iwl_mvm_update_mcc(struct iwl_mvm
*mvm
, const char *alpha2
,
416 enum iwl_mcc_source src_id
)
418 struct iwl_mcc_update_cmd mcc_update_cmd
= {
419 .mcc
= cpu_to_le16(alpha2
[0] << 8 | alpha2
[1]),
420 .source_id
= (u8
)src_id
,
422 struct iwl_mcc_update_resp_v8
*resp_cp
;
423 struct iwl_rx_packet
*pkt
;
424 struct iwl_host_cmd cmd
= {
425 .id
= MCC_UPDATE_CMD
,
426 .flags
= CMD_WANT_SKB
| CMD_SEND_IN_RFKILL
,
427 .data
= { &mcc_update_cmd
},
432 int resp_len
, n_channels
;
435 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm
)))
436 return ERR_PTR(-EOPNOTSUPP
);
438 cmd
.len
[0] = sizeof(struct iwl_mcc_update_cmd
);
440 IWL_DEBUG_LAR(mvm
, "send MCC update to FW with '%c%c' src = %d\n",
441 alpha2
[0], alpha2
[1], src_id
);
443 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
449 resp_ver
= iwl_fw_lookup_notif_ver(mvm
->fw
, IWL_ALWAYS_LONG_GROUP
,
452 /* Extract MCC response */
454 struct iwl_mcc_update_resp_v8
*mcc_resp_v8
= (void *)pkt
->data
;
456 n_channels
= __le32_to_cpu(mcc_resp_v8
->n_channels
);
457 if (iwl_rx_packet_payload_len(pkt
) !=
458 struct_size(mcc_resp_v8
, channels
, n_channels
)) {
459 resp_cp
= ERR_PTR(-EINVAL
);
462 resp_len
= struct_size(resp_cp
, channels
, n_channels
);
463 resp_cp
= kzalloc(resp_len
, GFP_KERNEL
);
465 resp_cp
= ERR_PTR(-ENOMEM
);
468 resp_cp
->status
= mcc_resp_v8
->status
;
469 resp_cp
->mcc
= mcc_resp_v8
->mcc
;
470 resp_cp
->cap
= mcc_resp_v8
->cap
;
471 resp_cp
->source_id
= mcc_resp_v8
->source_id
;
472 resp_cp
->time
= mcc_resp_v8
->time
;
473 resp_cp
->geo_info
= mcc_resp_v8
->geo_info
;
474 resp_cp
->n_channels
= mcc_resp_v8
->n_channels
;
475 memcpy(resp_cp
->channels
, mcc_resp_v8
->channels
,
476 n_channels
* sizeof(__le32
));
477 } else if (fw_has_capa(&mvm
->fw
->ucode_capa
,
478 IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT
)) {
479 struct iwl_mcc_update_resp_v4
*mcc_resp_v4
= (void *)pkt
->data
;
481 n_channels
= __le32_to_cpu(mcc_resp_v4
->n_channels
);
482 if (iwl_rx_packet_payload_len(pkt
) !=
483 struct_size(mcc_resp_v4
, channels
, n_channels
)) {
484 resp_cp
= ERR_PTR(-EINVAL
);
487 resp_len
= struct_size(resp_cp
, channels
, n_channels
);
488 resp_cp
= kzalloc(resp_len
, GFP_KERNEL
);
490 resp_cp
= ERR_PTR(-ENOMEM
);
494 resp_cp
->status
= mcc_resp_v4
->status
;
495 resp_cp
->mcc
= mcc_resp_v4
->mcc
;
496 resp_cp
->cap
= cpu_to_le32(le16_to_cpu(mcc_resp_v4
->cap
));
497 resp_cp
->source_id
= mcc_resp_v4
->source_id
;
498 resp_cp
->time
= mcc_resp_v4
->time
;
499 resp_cp
->geo_info
= mcc_resp_v4
->geo_info
;
500 resp_cp
->n_channels
= mcc_resp_v4
->n_channels
;
501 memcpy(resp_cp
->channels
, mcc_resp_v4
->channels
,
502 n_channels
* sizeof(__le32
));
504 struct iwl_mcc_update_resp_v3
*mcc_resp_v3
= (void *)pkt
->data
;
506 n_channels
= __le32_to_cpu(mcc_resp_v3
->n_channels
);
507 if (iwl_rx_packet_payload_len(pkt
) !=
508 struct_size(mcc_resp_v3
, channels
, n_channels
)) {
509 resp_cp
= ERR_PTR(-EINVAL
);
512 resp_len
= struct_size(resp_cp
, channels
, n_channels
);
513 resp_cp
= kzalloc(resp_len
, GFP_KERNEL
);
515 resp_cp
= ERR_PTR(-ENOMEM
);
519 resp_cp
->status
= mcc_resp_v3
->status
;
520 resp_cp
->mcc
= mcc_resp_v3
->mcc
;
521 resp_cp
->cap
= cpu_to_le32(mcc_resp_v3
->cap
);
522 resp_cp
->source_id
= mcc_resp_v3
->source_id
;
523 resp_cp
->time
= mcc_resp_v3
->time
;
524 resp_cp
->geo_info
= mcc_resp_v3
->geo_info
;
525 resp_cp
->n_channels
= mcc_resp_v3
->n_channels
;
526 memcpy(resp_cp
->channels
, mcc_resp_v3
->channels
,
527 n_channels
* sizeof(__le32
));
530 status
= le32_to_cpu(resp_cp
->status
);
532 mcc
= le16_to_cpu(resp_cp
->mcc
);
534 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
536 mcc
= 0x3030; /* "00" - world */
537 resp_cp
->mcc
= cpu_to_le16(mcc
);
541 "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
542 status
, mcc
, mcc
>> 8, mcc
& 0xff, n_channels
);
549 int iwl_mvm_init_mcc(struct iwl_mvm
*mvm
)
554 struct ieee80211_regdomain
*regd
;
557 if (mvm
->cfg
->nvm_type
== IWL_NVM_EXT
) {
558 tlv_lar
= fw_has_capa(&mvm
->fw
->ucode_capa
,
559 IWL_UCODE_TLV_CAPA_LAR_SUPPORT
);
560 nvm_lar
= mvm
->nvm_data
->lar_enabled
;
561 if (tlv_lar
!= nvm_lar
)
563 "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
564 tlv_lar
? "enabled" : "disabled",
565 nvm_lar
? "enabled" : "disabled");
568 if (!iwl_mvm_is_lar_supported(mvm
))
572 * try to replay the last set MCC to FW. If it doesn't exist,
573 * queue an update to cfg80211 to retrieve the default alpha2 from FW.
575 retval
= iwl_mvm_init_fw_regd(mvm
, true);
576 if (retval
!= -ENOENT
)
580 * Driver regulatory hint for initial update, this also informs the
581 * firmware we support wifi location updates.
582 * Disallow scans that might crash the FW while the LAR regdomain
585 mvm
->lar_regdom_set
= false;
587 regd
= iwl_mvm_get_current_regdomain(mvm
, NULL
);
588 if (IS_ERR_OR_NULL(regd
))
591 if (iwl_mvm_is_wifi_mcc_supported(mvm
) &&
592 !iwl_bios_get_mcc(&mvm
->fwrt
, mcc
)) {
594 regd
= iwl_mvm_get_regdomain(mvm
->hw
->wiphy
, mcc
,
595 MCC_SOURCE_BIOS
, NULL
);
596 if (IS_ERR_OR_NULL(regd
))
600 retval
= regulatory_set_wiphy_regd_sync(mvm
->hw
->wiphy
, regd
);
605 void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm
*mvm
,
606 struct iwl_rx_cmd_buffer
*rxb
)
608 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
609 struct iwl_mcc_chub_notif
*notif
= (void *)pkt
->data
;
610 enum iwl_mcc_source src
;
612 struct ieee80211_regdomain
*regd
;
614 bool changed
= false;
616 lockdep_assert_held(&mvm
->mutex
);
618 if (iwl_mvm_is_vif_assoc(mvm
) && notif
->source_id
== MCC_SOURCE_WIFI
) {
619 IWL_DEBUG_LAR(mvm
, "Ignore mcc update while associated\n");
623 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm
)))
626 mcc
[0] = le16_to_cpu(notif
->mcc
) >> 8;
627 mcc
[1] = le16_to_cpu(notif
->mcc
) & 0xff;
629 src
= notif
->source_id
;
632 "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
634 regd
= iwl_mvm_get_regdomain(mvm
->hw
->wiphy
, mcc
, src
, &changed
);
635 if (IS_ERR_OR_NULL(regd
))
639 IWL_DEBUG_LAR(mvm
, "RX: No change in the regulatory data\n");
643 wgds_tbl_idx
= iwl_mvm_get_sar_geo_profile(mvm
);
644 if (wgds_tbl_idx
< 1)
646 "SAR WGDS is disabled or error received (%d)\n",
649 IWL_DEBUG_INFO(mvm
, "SAR WGDS: geo profile %d is configured\n",
652 regulatory_set_wiphy_regd(mvm
->hw
->wiphy
, regd
);