1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11 * Copyright(c) 2018 Intel Corporation
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of version 2 of the GNU General Public License as
15 * published by the Free Software Foundation.
17 * This program is distributed in the hope that it will be useful, but
18 * WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 * General Public License for more details.
22 * The full GNU General Public License is included in this distribution
23 * in the file called COPYING.
25 * Contact Information:
26 * Intel Linux Wireless <linuxwifi@intel.com>
27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
34 * Copyright(c) 2018 Intel Corporation
35 * All rights reserved.
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
41 * * Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * * Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in
45 * the documentation and/or other materials provided with the
47 * * Neither the name Intel Corporation nor the names of its
48 * contributors may be used to endorse or promote products derived
49 * from this software without specific prior written permission.
51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
63 *****************************************************************************/
64 #include <linux/firmware.h>
65 #include <linux/rtnetlink.h>
66 #include "iwl-trans.h"
69 #include "iwl-eeprom-parse.h"
70 #include "iwl-eeprom-read.h"
71 #include "iwl-nvm-parse.h"
75 /* Default NVM size to read */
76 #define IWL_NVM_DEFAULT_CHUNK_SIZE (2 * 1024)
78 #define NVM_WRITE_OPCODE 1
79 #define NVM_READ_OPCODE 0
81 /* load nvm chunk response */
83 READ_NVM_CHUNK_SUCCEED
= 0,
84 READ_NVM_CHUNK_NOT_VALID_ADDRESS
= 1
88 * prepare the NVM host command w/ the pointers to the nvm buffer
91 static int iwl_nvm_write_chunk(struct iwl_mvm
*mvm
, u16 section
,
92 u16 offset
, u16 length
, const u8
*data
)
94 struct iwl_nvm_access_cmd nvm_access_cmd
= {
95 .offset
= cpu_to_le16(offset
),
96 .length
= cpu_to_le16(length
),
97 .type
= cpu_to_le16(section
),
98 .op_code
= NVM_WRITE_OPCODE
,
100 struct iwl_host_cmd cmd
= {
101 .id
= NVM_ACCESS_CMD
,
102 .len
= { sizeof(struct iwl_nvm_access_cmd
), length
},
103 .flags
= CMD_WANT_SKB
| CMD_SEND_IN_RFKILL
,
104 .data
= { &nvm_access_cmd
, data
},
105 /* data may come from vmalloc, so use _DUP */
106 .dataflags
= { 0, IWL_HCMD_DFL_DUP
},
108 struct iwl_rx_packet
*pkt
;
109 struct iwl_nvm_access_resp
*nvm_resp
;
112 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
117 /* Extract & check NVM write response */
118 nvm_resp
= (void *)pkt
->data
;
119 if (le16_to_cpu(nvm_resp
->status
) != READ_NVM_CHUNK_SUCCEED
) {
121 "NVM access write command failed for section %u (status = 0x%x)\n",
122 section
, le16_to_cpu(nvm_resp
->status
));
130 static int iwl_nvm_read_chunk(struct iwl_mvm
*mvm
, u16 section
,
131 u16 offset
, u16 length
, u8
*data
)
133 struct iwl_nvm_access_cmd nvm_access_cmd
= {
134 .offset
= cpu_to_le16(offset
),
135 .length
= cpu_to_le16(length
),
136 .type
= cpu_to_le16(section
),
137 .op_code
= NVM_READ_OPCODE
,
139 struct iwl_nvm_access_resp
*nvm_resp
;
140 struct iwl_rx_packet
*pkt
;
141 struct iwl_host_cmd cmd
= {
142 .id
= NVM_ACCESS_CMD
,
143 .flags
= CMD_WANT_SKB
| CMD_SEND_IN_RFKILL
,
144 .data
= { &nvm_access_cmd
, },
146 int ret
, bytes_read
, offset_read
;
149 cmd
.len
[0] = sizeof(struct iwl_nvm_access_cmd
);
151 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
157 /* Extract NVM response */
158 nvm_resp
= (void *)pkt
->data
;
159 ret
= le16_to_cpu(nvm_resp
->status
);
160 bytes_read
= le16_to_cpu(nvm_resp
->length
);
161 offset_read
= le16_to_cpu(nvm_resp
->offset
);
162 resp_data
= nvm_resp
->data
;
165 (ret
== READ_NVM_CHUNK_NOT_VALID_ADDRESS
)) {
167 * meaning of NOT_VALID_ADDRESS:
168 * driver try to read chunk from address that is
169 * multiple of 2K and got an error since addr is empty.
170 * meaning of (offset != 0): driver already
171 * read valid data from another chunk so this case
174 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
175 "NVM access command failed on offset 0x%x since that section size is multiple 2K\n",
179 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
180 "NVM access command failed with status %d (device: %s)\n",
181 ret
, mvm
->trans
->name
);
187 if (offset_read
!= offset
) {
188 IWL_ERR(mvm
, "NVM ACCESS response with invalid offset %d\n",
194 /* Write data to NVM */
195 memcpy(data
+ offset
, resp_data
, bytes_read
);
203 static int iwl_nvm_write_section(struct iwl_mvm
*mvm
, u16 section
,
204 const u8
*data
, u16 length
)
208 /* copy data in chunks of 2k (and remainder if any) */
210 while (offset
< length
) {
213 chunk_size
= min(IWL_NVM_DEFAULT_CHUNK_SIZE
,
216 ret
= iwl_nvm_write_chunk(mvm
, section
, offset
,
217 chunk_size
, data
+ offset
);
221 offset
+= chunk_size
;
228 * Reads an NVM section completely.
229 * NICs prior to 7000 family doesn't have a real NVM, but just read
230 * section 0 which is the EEPROM. Because the EEPROM reading is unlimited
231 * by uCode, we need to manually check in this case that we don't
232 * overflow and try to read more than the EEPROM size.
233 * For 7000 family NICs, we supply the maximal size we can read, and
234 * the uCode fills the response with as much data as we can,
235 * without overflowing, so no check is needed.
237 static int iwl_nvm_read_section(struct iwl_mvm
*mvm
, u16 section
,
238 u8
*data
, u32 size_read
)
240 u16 length
, offset
= 0;
243 /* Set nvm section read length */
244 length
= IWL_NVM_DEFAULT_CHUNK_SIZE
;
248 /* Read the NVM until exhausted (reading less than requested) */
249 while (ret
== length
) {
250 /* Check no memory assumptions fail and cause an overflow */
251 if ((size_read
+ offset
+ length
) >
252 mvm
->trans
->trans_cfg
->base_params
->eeprom_size
) {
253 IWL_ERR(mvm
, "EEPROM size is too small for NVM\n");
257 ret
= iwl_nvm_read_chunk(mvm
, section
, offset
, length
, data
);
259 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
260 "Cannot read NVM from section %d offset %d, length %d\n",
261 section
, offset
, length
);
267 iwl_nvm_fixups(mvm
->trans
->hw_id
, section
, data
, offset
);
269 IWL_DEBUG_EEPROM(mvm
->trans
->dev
,
270 "NVM section %d read completed\n", section
);
274 static struct iwl_nvm_data
*
275 iwl_parse_nvm_sections(struct iwl_mvm
*mvm
)
277 struct iwl_nvm_section
*sections
= mvm
->nvm_sections
;
279 const __le16
*sw
, *calib
, *regulatory
, *mac_override
, *phy_sku
;
282 /* Checking for required sections */
283 if (mvm
->trans
->cfg
->nvm_type
== IWL_NVM
) {
284 if (!mvm
->nvm_sections
[NVM_SECTION_TYPE_SW
].data
||
285 !mvm
->nvm_sections
[mvm
->cfg
->nvm_hw_section_num
].data
) {
286 IWL_ERR(mvm
, "Can't parse empty OTP/NVM sections\n");
290 if (mvm
->trans
->cfg
->nvm_type
== IWL_NVM_SDP
)
291 regulatory_type
= NVM_SECTION_TYPE_REGULATORY_SDP
;
293 regulatory_type
= NVM_SECTION_TYPE_REGULATORY
;
295 /* SW and REGULATORY sections are mandatory */
296 if (!mvm
->nvm_sections
[NVM_SECTION_TYPE_SW
].data
||
297 !mvm
->nvm_sections
[regulatory_type
].data
) {
299 "Can't parse empty family 8000 OTP/NVM sections\n");
302 /* MAC_OVERRIDE or at least HW section must exist */
303 if (!mvm
->nvm_sections
[mvm
->cfg
->nvm_hw_section_num
].data
&&
304 !mvm
->nvm_sections
[NVM_SECTION_TYPE_MAC_OVERRIDE
].data
) {
306 "Can't parse mac_address, empty sections\n");
310 /* PHY_SKU section is mandatory in B0 */
311 if (!mvm
->nvm_sections
[NVM_SECTION_TYPE_PHY_SKU
].data
) {
313 "Can't parse phy_sku in B0, empty sections\n");
318 hw
= (const __be16
*)sections
[mvm
->cfg
->nvm_hw_section_num
].data
;
319 sw
= (const __le16
*)sections
[NVM_SECTION_TYPE_SW
].data
;
320 calib
= (const __le16
*)sections
[NVM_SECTION_TYPE_CALIBRATION
].data
;
322 (const __le16
*)sections
[NVM_SECTION_TYPE_MAC_OVERRIDE
].data
;
323 phy_sku
= (const __le16
*)sections
[NVM_SECTION_TYPE_PHY_SKU
].data
;
325 regulatory
= mvm
->trans
->cfg
->nvm_type
== IWL_NVM_SDP
?
326 (const __le16
*)sections
[NVM_SECTION_TYPE_REGULATORY_SDP
].data
:
327 (const __le16
*)sections
[NVM_SECTION_TYPE_REGULATORY
].data
;
329 return iwl_parse_nvm_data(mvm
->trans
, mvm
->cfg
, mvm
->fw
, hw
, sw
, calib
,
330 regulatory
, mac_override
, phy_sku
,
331 mvm
->fw
->valid_tx_ant
, mvm
->fw
->valid_rx_ant
);
334 /* Loads the NVM data stored in mvm->nvm_sections into the NIC */
335 int iwl_mvm_load_nvm_to_nic(struct iwl_mvm
*mvm
)
338 struct iwl_nvm_section
*sections
= mvm
->nvm_sections
;
340 IWL_DEBUG_EEPROM(mvm
->trans
->dev
, "'Write to NVM\n");
342 for (i
= 0; i
< ARRAY_SIZE(mvm
->nvm_sections
); i
++) {
343 if (!mvm
->nvm_sections
[i
].data
|| !mvm
->nvm_sections
[i
].length
)
345 ret
= iwl_nvm_write_section(mvm
, i
, sections
[i
].data
,
348 IWL_ERR(mvm
, "iwl_mvm_send_cmd failed: %d\n", ret
);
355 int iwl_nvm_init(struct iwl_mvm
*mvm
)
359 u8
*nvm_buffer
, *temp
;
360 const char *nvm_file_C
= mvm
->cfg
->default_nvm_file_C_step
;
362 if (WARN_ON_ONCE(mvm
->cfg
->nvm_hw_section_num
>= NVM_MAX_NUM_SECTIONS
))
365 /* load NVM values from nic */
366 /* Read From FW NVM */
367 IWL_DEBUG_EEPROM(mvm
->trans
->dev
, "Read from NVM\n");
369 nvm_buffer
= kmalloc(mvm
->trans
->trans_cfg
->base_params
->eeprom_size
,
373 for (section
= 0; section
< NVM_MAX_NUM_SECTIONS
; section
++) {
374 /* we override the constness for initial read */
375 ret
= iwl_nvm_read_section(mvm
, section
, nvm_buffer
,
377 if (ret
== -ENODATA
) {
384 temp
= kmemdup(nvm_buffer
, ret
, GFP_KERNEL
);
390 iwl_nvm_fixups(mvm
->trans
->hw_id
, section
, temp
, ret
);
392 mvm
->nvm_sections
[section
].data
= temp
;
393 mvm
->nvm_sections
[section
].length
= ret
;
395 #ifdef CONFIG_IWLWIFI_DEBUGFS
397 case NVM_SECTION_TYPE_SW
:
398 mvm
->nvm_sw_blob
.data
= temp
;
399 mvm
->nvm_sw_blob
.size
= ret
;
401 case NVM_SECTION_TYPE_CALIBRATION
:
402 mvm
->nvm_calib_blob
.data
= temp
;
403 mvm
->nvm_calib_blob
.size
= ret
;
405 case NVM_SECTION_TYPE_PRODUCTION
:
406 mvm
->nvm_prod_blob
.data
= temp
;
407 mvm
->nvm_prod_blob
.size
= ret
;
409 case NVM_SECTION_TYPE_PHY_SKU
:
410 mvm
->nvm_phy_sku_blob
.data
= temp
;
411 mvm
->nvm_phy_sku_blob
.size
= ret
;
413 case NVM_SECTION_TYPE_REGULATORY_SDP
:
414 case NVM_SECTION_TYPE_REGULATORY
:
415 mvm
->nvm_reg_blob
.data
= temp
;
416 mvm
->nvm_reg_blob
.size
= ret
;
419 if (section
== mvm
->cfg
->nvm_hw_section_num
) {
420 mvm
->nvm_hw_blob
.data
= temp
;
421 mvm
->nvm_hw_blob
.size
= ret
;
428 IWL_ERR(mvm
, "OTP is blank\n");
431 /* Only if PNVM selected in the mod param - load external NVM */
432 if (mvm
->nvm_file_name
) {
433 /* read External NVM file from the mod param */
434 ret
= iwl_read_external_nvm(mvm
->trans
, mvm
->nvm_file_name
,
437 mvm
->nvm_file_name
= nvm_file_C
;
439 if ((ret
== -EFAULT
|| ret
== -ENOENT
) &&
440 mvm
->nvm_file_name
) {
441 /* in case nvm file was failed try again */
442 ret
= iwl_read_external_nvm(mvm
->trans
,
453 /* parse the relevant nvm sections */
454 mvm
->nvm_data
= iwl_parse_nvm_sections(mvm
);
457 IWL_DEBUG_EEPROM(mvm
->trans
->dev
, "nvm version = %x\n",
458 mvm
->nvm_data
->nvm_version
);
460 return ret
< 0 ? ret
: 0;
463 struct iwl_mcc_update_resp
*
464 iwl_mvm_update_mcc(struct iwl_mvm
*mvm
, const char *alpha2
,
465 enum iwl_mcc_source src_id
)
467 struct iwl_mcc_update_cmd mcc_update_cmd
= {
468 .mcc
= cpu_to_le16(alpha2
[0] << 8 | alpha2
[1]),
469 .source_id
= (u8
)src_id
,
471 struct iwl_mcc_update_resp
*resp_cp
;
472 struct iwl_rx_packet
*pkt
;
473 struct iwl_host_cmd cmd
= {
474 .id
= MCC_UPDATE_CMD
,
475 .flags
= CMD_WANT_SKB
,
476 .data
= { &mcc_update_cmd
},
481 int resp_len
, n_channels
;
484 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm
)))
485 return ERR_PTR(-EOPNOTSUPP
);
487 cmd
.len
[0] = sizeof(struct iwl_mcc_update_cmd
);
489 IWL_DEBUG_LAR(mvm
, "send MCC update to FW with '%c%c' src = %d\n",
490 alpha2
[0], alpha2
[1], src_id
);
492 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
498 /* Extract MCC response */
499 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
500 IWL_UCODE_TLV_CAPA_MCC_UPDATE_11AX_SUPPORT
)) {
501 struct iwl_mcc_update_resp
*mcc_resp
= (void *)pkt
->data
;
503 n_channels
= __le32_to_cpu(mcc_resp
->n_channels
);
504 resp_len
= sizeof(struct iwl_mcc_update_resp
) +
505 n_channels
* sizeof(__le32
);
506 resp_cp
= kmemdup(mcc_resp
, resp_len
, GFP_KERNEL
);
508 resp_cp
= ERR_PTR(-ENOMEM
);
512 struct iwl_mcc_update_resp_v3
*mcc_resp_v3
= (void *)pkt
->data
;
514 n_channels
= __le32_to_cpu(mcc_resp_v3
->n_channels
);
515 resp_len
= sizeof(struct iwl_mcc_update_resp
) +
516 n_channels
* sizeof(__le32
);
517 resp_cp
= kzalloc(resp_len
, GFP_KERNEL
);
519 resp_cp
= ERR_PTR(-ENOMEM
);
523 resp_cp
->status
= mcc_resp_v3
->status
;
524 resp_cp
->mcc
= mcc_resp_v3
->mcc
;
525 resp_cp
->cap
= cpu_to_le16(mcc_resp_v3
->cap
);
526 resp_cp
->source_id
= mcc_resp_v3
->source_id
;
527 resp_cp
->time
= mcc_resp_v3
->time
;
528 resp_cp
->geo_info
= mcc_resp_v3
->geo_info
;
529 resp_cp
->n_channels
= mcc_resp_v3
->n_channels
;
530 memcpy(resp_cp
->channels
, mcc_resp_v3
->channels
,
531 n_channels
* sizeof(__le32
));
534 status
= le32_to_cpu(resp_cp
->status
);
536 mcc
= le16_to_cpu(resp_cp
->mcc
);
538 /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
540 mcc
= 0x3030; /* "00" - world */
541 resp_cp
->mcc
= cpu_to_le16(mcc
);
545 "MCC response status: 0x%x. new MCC: 0x%x ('%c%c') n_chans: %d\n",
546 status
, mcc
, mcc
>> 8, mcc
& 0xff, n_channels
);
553 int iwl_mvm_init_mcc(struct iwl_mvm
*mvm
)
558 struct ieee80211_regdomain
*regd
;
561 if (mvm
->cfg
->nvm_type
== IWL_NVM_EXT
) {
562 tlv_lar
= fw_has_capa(&mvm
->fw
->ucode_capa
,
563 IWL_UCODE_TLV_CAPA_LAR_SUPPORT
);
564 nvm_lar
= mvm
->nvm_data
->lar_enabled
;
565 if (tlv_lar
!= nvm_lar
)
567 "Conflict between TLV & NVM regarding enabling LAR (TLV = %s NVM =%s)\n",
568 tlv_lar
? "enabled" : "disabled",
569 nvm_lar
? "enabled" : "disabled");
572 if (!iwl_mvm_is_lar_supported(mvm
))
576 * try to replay the last set MCC to FW. If it doesn't exist,
577 * queue an update to cfg80211 to retrieve the default alpha2 from FW.
579 retval
= iwl_mvm_init_fw_regd(mvm
);
580 if (retval
!= -ENOENT
)
584 * Driver regulatory hint for initial update, this also informs the
585 * firmware we support wifi location updates.
586 * Disallow scans that might crash the FW while the LAR regdomain
589 mvm
->lar_regdom_set
= false;
591 regd
= iwl_mvm_get_current_regdomain(mvm
, NULL
);
592 if (IS_ERR_OR_NULL(regd
))
595 if (iwl_mvm_is_wifi_mcc_supported(mvm
) &&
596 !iwl_acpi_get_mcc(mvm
->dev
, mcc
)) {
598 regd
= iwl_mvm_get_regdomain(mvm
->hw
->wiphy
, mcc
,
599 MCC_SOURCE_BIOS
, NULL
);
600 if (IS_ERR_OR_NULL(regd
))
604 retval
= regulatory_set_wiphy_regd_sync_rtnl(mvm
->hw
->wiphy
, regd
);
609 void iwl_mvm_rx_chub_update_mcc(struct iwl_mvm
*mvm
,
610 struct iwl_rx_cmd_buffer
*rxb
)
612 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
613 struct iwl_mcc_chub_notif
*notif
= (void *)pkt
->data
;
614 enum iwl_mcc_source src
;
616 struct ieee80211_regdomain
*regd
;
619 lockdep_assert_held(&mvm
->mutex
);
621 if (iwl_mvm_is_vif_assoc(mvm
) && notif
->source_id
== MCC_SOURCE_WIFI
) {
622 IWL_DEBUG_LAR(mvm
, "Ignore mcc update while associated\n");
626 if (WARN_ON_ONCE(!iwl_mvm_is_lar_supported(mvm
)))
629 mcc
[0] = le16_to_cpu(notif
->mcc
) >> 8;
630 mcc
[1] = le16_to_cpu(notif
->mcc
) & 0xff;
632 src
= notif
->source_id
;
635 "RX: received chub update mcc cmd (mcc '%s' src %d)\n",
637 regd
= iwl_mvm_get_regdomain(mvm
->hw
->wiphy
, mcc
, src
, NULL
);
638 if (IS_ERR_OR_NULL(regd
))
641 wgds_tbl_idx
= iwl_mvm_get_sar_geo_profile(mvm
);
642 if (wgds_tbl_idx
< 0)
643 IWL_DEBUG_INFO(mvm
, "SAR WGDS is disabled (%d)\n",
646 IWL_DEBUG_INFO(mvm
, "SAR WGDS: geo profile %d is configured\n",
649 regulatory_set_wiphy_regd(mvm
->hw
->wiphy
, regd
);