1 /******************************************************************************
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10 * Copyright(c) 2016 Intel Deutschland GmbH
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of version 2 of the GNU General Public License as
14 * published by the Free Software Foundation.
16 * This program is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
26 * The full GNU General Public License is included in this distribution
27 * in the file called COPYING.
29 * Contact Information:
30 * Intel Linux Wireless <linuxwifi@intel.com>
31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
35 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37 * All rights reserved.
39 * Redistribution and use in source and binary forms, with or without
40 * modification, are permitted provided that the following conditions
43 * * Redistributions of source code must retain the above copyright
44 * notice, this list of conditions and the following disclaimer.
45 * * Redistributions in binary form must reproduce the above copyright
46 * notice, this list of conditions and the following disclaimer in
47 * the documentation and/or other materials provided with the
49 * * Neither the name Intel Corporation nor the names of its
50 * contributors may be used to endorse or promote products derived
51 * from this software without specific prior written permission.
53 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
54 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
55 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
56 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
57 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
58 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
59 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
63 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65 *****************************************************************************/
66 #include <net/mac80211.h>
67 #include <linux/netdevice.h>
69 #include "iwl-trans.h"
70 #include "iwl-op-mode.h"
72 #include "iwl-debug.h"
73 #include "iwl-csr.h" /* for iwl_mvm_rx_card_state_notif */
74 #include "iwl-io.h" /* for iwl_mvm_rx_card_state_notif */
76 #include "iwl-eeprom-parse.h"
80 #include "iwl-phy-db.h"
82 #define MVM_UCODE_ALIVE_TIMEOUT HZ
83 #define MVM_UCODE_CALIB_TIMEOUT (2*HZ)
85 #define UCODE_VALID_OK cpu_to_le32(0x1)
87 struct iwl_mvm_alive_data
{
92 static inline const struct fw_img
*
93 iwl_get_ucode_image(struct iwl_mvm
*mvm
, enum iwl_ucode_type ucode_type
)
95 if (ucode_type
>= IWL_UCODE_TYPE_MAX
)
98 return &mvm
->fw
->img
[ucode_type
];
101 static int iwl_send_tx_ant_cfg(struct iwl_mvm
*mvm
, u8 valid_tx_ant
)
103 struct iwl_tx_ant_cfg_cmd tx_ant_cmd
= {
104 .valid
= cpu_to_le32(valid_tx_ant
),
107 IWL_DEBUG_FW(mvm
, "select valid tx ant: %u\n", valid_tx_ant
);
108 return iwl_mvm_send_cmd_pdu(mvm
, TX_ANT_CONFIGURATION_CMD
, 0,
109 sizeof(tx_ant_cmd
), &tx_ant_cmd
);
112 static int iwl_send_rss_cfg_cmd(struct iwl_mvm
*mvm
)
115 struct iwl_rss_config_cmd cmd
= {
116 .flags
= cpu_to_le32(IWL_RSS_ENABLE
),
117 .hash_mask
= IWL_RSS_HASH_TYPE_IPV4_TCP
|
118 IWL_RSS_HASH_TYPE_IPV4_UDP
|
119 IWL_RSS_HASH_TYPE_IPV4_PAYLOAD
|
120 IWL_RSS_HASH_TYPE_IPV6_TCP
|
121 IWL_RSS_HASH_TYPE_IPV6_UDP
|
122 IWL_RSS_HASH_TYPE_IPV6_PAYLOAD
,
125 /* Do not direct RSS traffic to Q 0 which is our fallback queue */
126 for (i
= 0; i
< ARRAY_SIZE(cmd
.indirection_table
); i
++)
127 cmd
.indirection_table
[i
] =
128 1 + (i
% (mvm
->trans
->num_rx_queues
- 1));
129 netdev_rss_key_fill(cmd
.secret_key
, sizeof(cmd
.secret_key
));
131 return iwl_mvm_send_cmd_pdu(mvm
, RSS_CONFIG_CMD
, 0, sizeof(cmd
), &cmd
);
134 void iwl_free_fw_paging(struct iwl_mvm
*mvm
)
138 if (!mvm
->fw_paging_db
[0].fw_paging_block
)
141 for (i
= 0; i
< NUM_OF_FW_PAGING_BLOCKS
; i
++) {
142 if (!mvm
->fw_paging_db
[i
].fw_paging_block
) {
144 "Paging: block %d already freed, continue to next page\n",
150 __free_pages(mvm
->fw_paging_db
[i
].fw_paging_block
,
151 get_order(mvm
->fw_paging_db
[i
].fw_paging_size
));
152 mvm
->fw_paging_db
[i
].fw_paging_block
= NULL
;
154 kfree(mvm
->trans
->paging_download_buf
);
155 mvm
->trans
->paging_download_buf
= NULL
;
156 mvm
->trans
->paging_db
= NULL
;
158 memset(mvm
->fw_paging_db
, 0, sizeof(mvm
->fw_paging_db
));
161 static int iwl_fill_paging_mem(struct iwl_mvm
*mvm
, const struct fw_img
*image
)
167 * find where is the paging image start point:
168 * if CPU2 exist and it's in paging format, then the image looks like:
169 * CPU1 sections (2 or more)
170 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
171 * CPU2 sections (not paged)
172 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
173 * non paged to CPU2 paging sec
175 * CPU2 paging image (including instruction and data)
177 for (sec_idx
= 0; sec_idx
< IWL_UCODE_SECTION_MAX
; sec_idx
++) {
178 if (image
->sec
[sec_idx
].offset
== PAGING_SEPARATOR_SECTION
) {
185 * If paging is enabled there should be at least 2 more sections left
186 * (one for CSS and one for Paging data)
188 if (sec_idx
>= ARRAY_SIZE(image
->sec
) - 1) {
189 IWL_ERR(mvm
, "Paging: Missing CSS and/or paging sections\n");
190 iwl_free_fw_paging(mvm
);
194 /* copy the CSS block to the dram */
195 IWL_DEBUG_FW(mvm
, "Paging: load paging CSS to FW, sec = %d\n",
198 memcpy(page_address(mvm
->fw_paging_db
[0].fw_paging_block
),
199 image
->sec
[sec_idx
].data
,
200 mvm
->fw_paging_db
[0].fw_paging_size
);
203 "Paging: copied %d CSS bytes to first block\n",
204 mvm
->fw_paging_db
[0].fw_paging_size
);
209 * copy the paging blocks to the dram
210 * loop index start from 1 since that CSS block already copied to dram
211 * and CSS index is 0.
212 * loop stop at num_of_paging_blk since that last block is not full.
214 for (idx
= 1; idx
< mvm
->num_of_paging_blk
; idx
++) {
215 memcpy(page_address(mvm
->fw_paging_db
[idx
].fw_paging_block
),
216 image
->sec
[sec_idx
].data
+ offset
,
217 mvm
->fw_paging_db
[idx
].fw_paging_size
);
220 "Paging: copied %d paging bytes to block %d\n",
221 mvm
->fw_paging_db
[idx
].fw_paging_size
,
224 offset
+= mvm
->fw_paging_db
[idx
].fw_paging_size
;
227 /* copy the last paging block */
228 if (mvm
->num_of_pages_in_last_blk
> 0) {
229 memcpy(page_address(mvm
->fw_paging_db
[idx
].fw_paging_block
),
230 image
->sec
[sec_idx
].data
+ offset
,
231 FW_PAGING_SIZE
* mvm
->num_of_pages_in_last_blk
);
234 "Paging: copied %d pages in the last block %d\n",
235 mvm
->num_of_pages_in_last_blk
, idx
);
241 static int iwl_alloc_fw_paging_mem(struct iwl_mvm
*mvm
,
242 const struct fw_img
*image
)
247 int order
, num_of_pages
;
250 if (mvm
->fw_paging_db
[0].fw_paging_block
)
253 dma_enabled
= is_device_dma_capable(mvm
->trans
->dev
);
255 /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
256 BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE
) != PAGING_BLOCK_SIZE
);
258 num_of_pages
= image
->paging_mem_size
/ FW_PAGING_SIZE
;
259 mvm
->num_of_paging_blk
= ((num_of_pages
- 1) /
260 NUM_OF_PAGE_PER_GROUP
) + 1;
262 mvm
->num_of_pages_in_last_blk
=
264 NUM_OF_PAGE_PER_GROUP
* (mvm
->num_of_paging_blk
- 1);
267 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
268 mvm
->num_of_paging_blk
,
269 mvm
->num_of_pages_in_last_blk
);
271 /* allocate block of 4Kbytes for paging CSS */
272 order
= get_order(FW_PAGING_SIZE
);
273 block
= alloc_pages(GFP_KERNEL
, order
);
275 /* free all the previous pages since we failed */
276 iwl_free_fw_paging(mvm
);
280 mvm
->fw_paging_db
[blk_idx
].fw_paging_block
= block
;
281 mvm
->fw_paging_db
[blk_idx
].fw_paging_size
= FW_PAGING_SIZE
;
284 phys
= dma_map_page(mvm
->trans
->dev
, block
, 0,
285 PAGE_SIZE
<< order
, DMA_BIDIRECTIONAL
);
286 if (dma_mapping_error(mvm
->trans
->dev
, phys
)) {
288 * free the previous pages and the current one since
289 * we failed to map_page.
291 iwl_free_fw_paging(mvm
);
294 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
= phys
;
296 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
= PAGING_ADDR_SIG
|
297 blk_idx
<< BLOCK_2_EXP_SIZE
;
301 "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
305 * allocate blocks in dram.
306 * since that CSS allocated in fw_paging_db[0] loop start from index 1
308 for (blk_idx
= 1; blk_idx
< mvm
->num_of_paging_blk
+ 1; blk_idx
++) {
309 /* allocate block of PAGING_BLOCK_SIZE (32K) */
310 order
= get_order(PAGING_BLOCK_SIZE
);
311 block
= alloc_pages(GFP_KERNEL
, order
);
313 /* free all the previous pages since we failed */
314 iwl_free_fw_paging(mvm
);
318 mvm
->fw_paging_db
[blk_idx
].fw_paging_block
= block
;
319 mvm
->fw_paging_db
[blk_idx
].fw_paging_size
= PAGING_BLOCK_SIZE
;
322 phys
= dma_map_page(mvm
->trans
->dev
, block
, 0,
325 if (dma_mapping_error(mvm
->trans
->dev
, phys
)) {
327 * free the previous pages and the current one
328 * since we failed to map_page.
330 iwl_free_fw_paging(mvm
);
333 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
= phys
;
335 mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
=
337 blk_idx
<< BLOCK_2_EXP_SIZE
;
341 "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
348 static int iwl_save_fw_paging(struct iwl_mvm
*mvm
,
349 const struct fw_img
*fw
)
353 ret
= iwl_alloc_fw_paging_mem(mvm
, fw
);
357 return iwl_fill_paging_mem(mvm
, fw
);
360 /* send paging cmd to FW in case CPU2 has paging image */
361 static int iwl_send_paging_cmd(struct iwl_mvm
*mvm
, const struct fw_img
*fw
)
365 struct iwl_fw_paging_cmd fw_paging_cmd
= {
367 cpu_to_le32(PAGING_CMD_IS_SECURED
|
368 PAGING_CMD_IS_ENABLED
|
369 (mvm
->num_of_pages_in_last_blk
<<
370 PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS
)),
371 .block_size
= cpu_to_le32(BLOCK_2_EXP_SIZE
),
372 .block_num
= cpu_to_le32(mvm
->num_of_paging_blk
),
375 /* loop for for all paging blocks + CSS block */
376 for (blk_idx
= 0; blk_idx
< mvm
->num_of_paging_blk
+ 1; blk_idx
++) {
378 cpu_to_le32(mvm
->fw_paging_db
[blk_idx
].fw_paging_phys
>>
380 fw_paging_cmd
.device_phy_addr
[blk_idx
] = dev_phy_addr
;
383 return iwl_mvm_send_cmd_pdu(mvm
, iwl_cmd_id(FW_PAGING_BLOCK_CMD
,
384 IWL_ALWAYS_LONG_GROUP
, 0),
385 0, sizeof(fw_paging_cmd
), &fw_paging_cmd
);
389 * Send paging item cmd to FW in case CPU2 has paging image
391 static int iwl_trans_get_paging_item(struct iwl_mvm
*mvm
)
394 struct iwl_fw_get_item_cmd fw_get_item_cmd
= {
395 .item_id
= cpu_to_le32(IWL_FW_ITEM_ID_PAGING
),
398 struct iwl_fw_get_item_resp
*item_resp
;
399 struct iwl_host_cmd cmd
= {
400 .id
= iwl_cmd_id(FW_GET_ITEM_CMD
, IWL_ALWAYS_LONG_GROUP
, 0),
401 .flags
= CMD_WANT_SKB
| CMD_SEND_IN_RFKILL
,
402 .data
= { &fw_get_item_cmd
, },
405 cmd
.len
[0] = sizeof(struct iwl_fw_get_item_cmd
);
407 ret
= iwl_mvm_send_cmd(mvm
, &cmd
);
410 "Paging: Failed to send FW_GET_ITEM_CMD cmd (err = %d)\n",
415 item_resp
= (void *)((struct iwl_rx_packet
*)cmd
.resp_pkt
)->data
;
416 if (item_resp
->item_id
!= cpu_to_le32(IWL_FW_ITEM_ID_PAGING
)) {
418 "Paging: got wrong item in FW_GET_ITEM_CMD resp (item_id = %u)\n",
419 le32_to_cpu(item_resp
->item_id
));
424 /* Add an extra page for headers */
425 mvm
->trans
->paging_download_buf
= kzalloc(PAGING_BLOCK_SIZE
+
428 if (!mvm
->trans
->paging_download_buf
) {
432 mvm
->trans
->paging_req_addr
= le32_to_cpu(item_resp
->item_val
);
433 mvm
->trans
->paging_db
= mvm
->fw_paging_db
;
435 "Paging: got paging request address (paging_req_addr 0x%08x)\n",
436 mvm
->trans
->paging_req_addr
);
444 static bool iwl_alive_fn(struct iwl_notif_wait_data
*notif_wait
,
445 struct iwl_rx_packet
*pkt
, void *data
)
447 struct iwl_mvm
*mvm
=
448 container_of(notif_wait
, struct iwl_mvm
, notif_wait
);
449 struct iwl_mvm_alive_data
*alive_data
= data
;
450 struct mvm_alive_resp_ver1
*palive1
;
451 struct mvm_alive_resp_ver2
*palive2
;
452 struct mvm_alive_resp
*palive
;
454 if (iwl_rx_packet_payload_len(pkt
) == sizeof(*palive1
)) {
455 palive1
= (void *)pkt
->data
;
457 mvm
->support_umac_log
= false;
458 mvm
->error_event_table
=
459 le32_to_cpu(palive1
->error_event_table_ptr
);
460 mvm
->log_event_table
=
461 le32_to_cpu(palive1
->log_event_table_ptr
);
462 alive_data
->scd_base_addr
= le32_to_cpu(palive1
->scd_base_ptr
);
464 alive_data
->valid
= le16_to_cpu(palive1
->status
) ==
467 "Alive VER1 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
468 le16_to_cpu(palive1
->status
), palive1
->ver_type
,
469 palive1
->ver_subtype
, palive1
->flags
);
470 } else if (iwl_rx_packet_payload_len(pkt
) == sizeof(*palive2
)) {
471 palive2
= (void *)pkt
->data
;
473 mvm
->error_event_table
=
474 le32_to_cpu(palive2
->error_event_table_ptr
);
475 mvm
->log_event_table
=
476 le32_to_cpu(palive2
->log_event_table_ptr
);
477 alive_data
->scd_base_addr
= le32_to_cpu(palive2
->scd_base_ptr
);
478 mvm
->umac_error_event_table
=
479 le32_to_cpu(palive2
->error_info_addr
);
480 mvm
->sf_space
.addr
= le32_to_cpu(palive2
->st_fwrd_addr
);
481 mvm
->sf_space
.size
= le32_to_cpu(palive2
->st_fwrd_size
);
483 alive_data
->valid
= le16_to_cpu(palive2
->status
) ==
485 if (mvm
->umac_error_event_table
)
486 mvm
->support_umac_log
= true;
489 "Alive VER2 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
490 le16_to_cpu(palive2
->status
), palive2
->ver_type
,
491 palive2
->ver_subtype
, palive2
->flags
);
494 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
495 palive2
->umac_major
, palive2
->umac_minor
);
496 } else if (iwl_rx_packet_payload_len(pkt
) == sizeof(*palive
)) {
497 palive
= (void *)pkt
->data
;
499 mvm
->error_event_table
=
500 le32_to_cpu(palive
->error_event_table_ptr
);
501 mvm
->log_event_table
=
502 le32_to_cpu(palive
->log_event_table_ptr
);
503 alive_data
->scd_base_addr
= le32_to_cpu(palive
->scd_base_ptr
);
504 mvm
->umac_error_event_table
=
505 le32_to_cpu(palive
->error_info_addr
);
506 mvm
->sf_space
.addr
= le32_to_cpu(palive
->st_fwrd_addr
);
507 mvm
->sf_space
.size
= le32_to_cpu(palive
->st_fwrd_size
);
509 alive_data
->valid
= le16_to_cpu(palive
->status
) ==
511 if (mvm
->umac_error_event_table
)
512 mvm
->support_umac_log
= true;
515 "Alive VER3 ucode status 0x%04x revision 0x%01X 0x%01X flags 0x%01X\n",
516 le16_to_cpu(palive
->status
), palive
->ver_type
,
517 palive
->ver_subtype
, palive
->flags
);
520 "UMAC version: Major - 0x%x, Minor - 0x%x\n",
521 le32_to_cpu(palive
->umac_major
),
522 le32_to_cpu(palive
->umac_minor
));
528 static bool iwl_wait_phy_db_entry(struct iwl_notif_wait_data
*notif_wait
,
529 struct iwl_rx_packet
*pkt
, void *data
)
531 struct iwl_phy_db
*phy_db
= data
;
533 if (pkt
->hdr
.cmd
!= CALIB_RES_NOTIF_PHY_DB
) {
534 WARN_ON(pkt
->hdr
.cmd
!= INIT_COMPLETE_NOTIF
);
538 WARN_ON(iwl_phy_db_set_section(phy_db
, pkt
));
543 static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm
*mvm
,
544 enum iwl_ucode_type ucode_type
)
546 struct iwl_notification_wait alive_wait
;
547 struct iwl_mvm_alive_data alive_data
;
548 const struct fw_img
*fw
;
550 enum iwl_ucode_type old_type
= mvm
->cur_ucode
;
551 static const u16 alive_cmd
[] = { MVM_ALIVE
};
552 struct iwl_sf_region st_fwrd_space
;
554 if (ucode_type
== IWL_UCODE_REGULAR
&&
555 iwl_fw_dbg_conf_usniffer(mvm
->fw
, FW_DBG_START_FROM_ALIVE
) &&
556 !(fw_has_capa(&mvm
->fw
->ucode_capa
,
557 IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED
)))
558 fw
= iwl_get_ucode_image(mvm
, IWL_UCODE_REGULAR_USNIFFER
);
560 fw
= iwl_get_ucode_image(mvm
, ucode_type
);
563 mvm
->cur_ucode
= ucode_type
;
564 mvm
->ucode_loaded
= false;
566 iwl_init_notification_wait(&mvm
->notif_wait
, &alive_wait
,
567 alive_cmd
, ARRAY_SIZE(alive_cmd
),
568 iwl_alive_fn
, &alive_data
);
570 ret
= iwl_trans_start_fw(mvm
->trans
, fw
, ucode_type
== IWL_UCODE_INIT
);
572 mvm
->cur_ucode
= old_type
;
573 iwl_remove_notification(&mvm
->notif_wait
, &alive_wait
);
578 * Some things may run in the background now, but we
579 * just wait for the ALIVE notification here.
581 ret
= iwl_wait_notification(&mvm
->notif_wait
, &alive_wait
,
582 MVM_UCODE_ALIVE_TIMEOUT
);
584 if (mvm
->trans
->cfg
->device_family
== IWL_DEVICE_FAMILY_8000
)
586 "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
587 iwl_read_prph(mvm
->trans
, SB_CPU_1_STATUS
),
588 iwl_read_prph(mvm
->trans
, SB_CPU_2_STATUS
));
589 mvm
->cur_ucode
= old_type
;
593 if (!alive_data
.valid
) {
594 IWL_ERR(mvm
, "Loaded ucode is not valid!\n");
595 mvm
->cur_ucode
= old_type
;
600 * update the sdio allocation according to the pointer we get in the
601 * alive notification.
603 st_fwrd_space
.addr
= mvm
->sf_space
.addr
;
604 st_fwrd_space
.size
= mvm
->sf_space
.size
;
605 ret
= iwl_trans_update_sf(mvm
->trans
, &st_fwrd_space
);
607 IWL_ERR(mvm
, "Failed to update SF size. ret %d\n", ret
);
611 iwl_trans_fw_alive(mvm
->trans
, alive_data
.scd_base_addr
);
614 * configure and operate fw paging mechanism.
615 * driver configures the paging flow only once, CPU2 paging image
616 * included in the IWL_UCODE_INIT image.
618 if (fw
->paging_mem_size
) {
620 * When dma is not enabled, the driver needs to copy / write
621 * the downloaded / uploaded page to / from the smem.
622 * This gets the location of the place were the pages are
625 if (!is_device_dma_capable(mvm
->trans
->dev
)) {
626 ret
= iwl_trans_get_paging_item(mvm
);
628 IWL_ERR(mvm
, "failed to get FW paging item\n");
633 ret
= iwl_save_fw_paging(mvm
, fw
);
635 IWL_ERR(mvm
, "failed to save the FW paging image\n");
639 ret
= iwl_send_paging_cmd(mvm
, fw
);
641 IWL_ERR(mvm
, "failed to send the paging cmd\n");
642 iwl_free_fw_paging(mvm
);
648 * Note: all the queues are enabled as part of the interface
649 * initialization, but in firmware restart scenarios they
650 * could be stopped, so wake them up. In firmware restart,
651 * mac80211 will have the queues stopped as well until the
652 * reconfiguration completes. During normal startup, they
656 memset(&mvm
->queue_info
, 0, sizeof(mvm
->queue_info
));
657 if (iwl_mvm_is_dqa_supported(mvm
))
658 mvm
->queue_info
[IWL_MVM_DQA_CMD_QUEUE
].hw_queue_refcount
= 1;
660 mvm
->queue_info
[IWL_MVM_CMD_QUEUE
].hw_queue_refcount
= 1;
662 for (i
= 0; i
< IEEE80211_MAX_QUEUES
; i
++)
663 atomic_set(&mvm
->mac80211_queue_stop_count
[i
], 0);
665 mvm
->ucode_loaded
= true;
670 static int iwl_send_phy_cfg_cmd(struct iwl_mvm
*mvm
)
672 struct iwl_phy_cfg_cmd phy_cfg_cmd
;
673 enum iwl_ucode_type ucode_type
= mvm
->cur_ucode
;
676 phy_cfg_cmd
.phy_cfg
= cpu_to_le32(iwl_mvm_get_phy_config(mvm
));
677 phy_cfg_cmd
.calib_control
.event_trigger
=
678 mvm
->fw
->default_calib
[ucode_type
].event_trigger
;
679 phy_cfg_cmd
.calib_control
.flow_trigger
=
680 mvm
->fw
->default_calib
[ucode_type
].flow_trigger
;
682 IWL_DEBUG_INFO(mvm
, "Sending Phy CFG command: 0x%x\n",
683 phy_cfg_cmd
.phy_cfg
);
685 return iwl_mvm_send_cmd_pdu(mvm
, PHY_CONFIGURATION_CMD
, 0,
686 sizeof(phy_cfg_cmd
), &phy_cfg_cmd
);
689 int iwl_run_init_mvm_ucode(struct iwl_mvm
*mvm
, bool read_nvm
)
691 struct iwl_notification_wait calib_wait
;
692 static const u16 init_complete
[] = {
694 CALIB_RES_NOTIF_PHY_DB
698 lockdep_assert_held(&mvm
->mutex
);
700 if (WARN_ON_ONCE(mvm
->calibrating
))
703 iwl_init_notification_wait(&mvm
->notif_wait
,
706 ARRAY_SIZE(init_complete
),
707 iwl_wait_phy_db_entry
,
710 /* Will also start the device */
711 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_INIT
);
713 IWL_ERR(mvm
, "Failed to start INIT ucode: %d\n", ret
);
717 ret
= iwl_send_bt_init_conf(mvm
);
721 /* Read the NVM only at driver load time, no need to do this twice */
724 ret
= iwl_nvm_init(mvm
, true);
726 IWL_ERR(mvm
, "Failed to read NVM: %d\n", ret
);
731 /* In case we read the NVM from external file, load it to the NIC */
732 if (mvm
->nvm_file_name
)
733 iwl_mvm_load_nvm_to_nic(mvm
);
735 ret
= iwl_nvm_check_version(mvm
->nvm_data
, mvm
->trans
);
739 * abort after reading the nvm in case RF Kill is on, we will complete
740 * the init seq later when RF kill will switch to off
742 if (iwl_mvm_is_radio_hw_killed(mvm
)) {
743 IWL_DEBUG_RF_KILL(mvm
,
744 "jump over all phy activities due to RF kill\n");
745 iwl_remove_notification(&mvm
->notif_wait
, &calib_wait
);
750 mvm
->calibrating
= true;
752 /* Send TX valid antennas before triggering calibrations */
753 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
758 * Send phy configurations command to init uCode
759 * to start the 16.0 uCode init image internal calibrations.
761 ret
= iwl_send_phy_cfg_cmd(mvm
);
763 IWL_ERR(mvm
, "Failed to run INIT calibrations: %d\n",
769 * Some things may run in the background now, but we
770 * just wait for the calibration complete notification.
772 ret
= iwl_wait_notification(&mvm
->notif_wait
, &calib_wait
,
773 MVM_UCODE_CALIB_TIMEOUT
);
775 if (ret
&& iwl_mvm_is_radio_hw_killed(mvm
)) {
776 IWL_DEBUG_RF_KILL(mvm
, "RFKILL while calibrating.\n");
782 iwl_remove_notification(&mvm
->notif_wait
, &calib_wait
);
784 mvm
->calibrating
= false;
785 if (iwlmvm_mod_params
.init_dbg
&& !mvm
->nvm_data
) {
786 /* we want to debug INIT and we have no NVM - fake */
787 mvm
->nvm_data
= kzalloc(sizeof(struct iwl_nvm_data
) +
788 sizeof(struct ieee80211_channel
) +
789 sizeof(struct ieee80211_rate
),
793 mvm
->nvm_data
->bands
[0].channels
= mvm
->nvm_data
->channels
;
794 mvm
->nvm_data
->bands
[0].n_channels
= 1;
795 mvm
->nvm_data
->bands
[0].n_bitrates
= 1;
796 mvm
->nvm_data
->bands
[0].bitrates
=
797 (void *)mvm
->nvm_data
->channels
+ 1;
798 mvm
->nvm_data
->bands
[0].bitrates
->hw_value
= 10;
804 static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm
*mvm
)
806 struct iwl_host_cmd cmd
= {
807 .flags
= CMD_WANT_SKB
,
811 struct iwl_shared_mem_cfg
*mem_cfg
;
812 struct iwl_rx_packet
*pkt
;
815 lockdep_assert_held(&mvm
->mutex
);
817 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
818 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
))
819 cmd
.id
= iwl_cmd_id(SHARED_MEM_CFG_CMD
, SYSTEM_GROUP
, 0);
821 cmd
.id
= SHARED_MEM_CFG
;
823 if (WARN_ON(iwl_mvm_send_cmd(mvm
, &cmd
)))
827 mem_cfg
= (void *)pkt
->data
;
829 mvm
->shared_mem_cfg
.shared_mem_addr
=
830 le32_to_cpu(mem_cfg
->shared_mem_addr
);
831 mvm
->shared_mem_cfg
.shared_mem_size
=
832 le32_to_cpu(mem_cfg
->shared_mem_size
);
833 mvm
->shared_mem_cfg
.sample_buff_addr
=
834 le32_to_cpu(mem_cfg
->sample_buff_addr
);
835 mvm
->shared_mem_cfg
.sample_buff_size
=
836 le32_to_cpu(mem_cfg
->sample_buff_size
);
837 mvm
->shared_mem_cfg
.txfifo_addr
= le32_to_cpu(mem_cfg
->txfifo_addr
);
838 for (i
= 0; i
< ARRAY_SIZE(mvm
->shared_mem_cfg
.txfifo_size
); i
++)
839 mvm
->shared_mem_cfg
.txfifo_size
[i
] =
840 le32_to_cpu(mem_cfg
->txfifo_size
[i
]);
841 for (i
= 0; i
< ARRAY_SIZE(mvm
->shared_mem_cfg
.rxfifo_size
); i
++)
842 mvm
->shared_mem_cfg
.rxfifo_size
[i
] =
843 le32_to_cpu(mem_cfg
->rxfifo_size
[i
]);
844 mvm
->shared_mem_cfg
.page_buff_addr
=
845 le32_to_cpu(mem_cfg
->page_buff_addr
);
846 mvm
->shared_mem_cfg
.page_buff_size
=
847 le32_to_cpu(mem_cfg
->page_buff_size
);
849 /* new API has more data */
850 if (fw_has_capa(&mvm
->fw
->ucode_capa
,
851 IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
)) {
852 mvm
->shared_mem_cfg
.rxfifo_addr
=
853 le32_to_cpu(mem_cfg
->rxfifo_addr
);
854 mvm
->shared_mem_cfg
.internal_txfifo_addr
=
855 le32_to_cpu(mem_cfg
->internal_txfifo_addr
);
857 BUILD_BUG_ON(sizeof(mvm
->shared_mem_cfg
.internal_txfifo_size
) !=
858 sizeof(mem_cfg
->internal_txfifo_size
));
861 i
< ARRAY_SIZE(mvm
->shared_mem_cfg
.internal_txfifo_size
);
863 mvm
->shared_mem_cfg
.internal_txfifo_size
[i
] =
864 le32_to_cpu(mem_cfg
->internal_txfifo_size
[i
]);
867 IWL_DEBUG_INFO(mvm
, "SHARED MEM CFG: got memory offsets/sizes\n");
872 static int iwl_mvm_config_ltr(struct iwl_mvm
*mvm
)
874 struct iwl_ltr_config_cmd cmd
= {
875 .flags
= cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE
),
878 if (!mvm
->trans
->ltr_enabled
)
881 return iwl_mvm_send_cmd_pdu(mvm
, LTR_CONFIG
, 0,
885 int iwl_mvm_up(struct iwl_mvm
*mvm
)
888 struct ieee80211_channel
*chan
;
889 struct cfg80211_chan_def chandef
;
891 lockdep_assert_held(&mvm
->mutex
);
893 ret
= iwl_trans_start_hw(mvm
->trans
);
898 * If we haven't completed the run of the init ucode during
899 * module loading, load init ucode now
900 * (for example, if we were in RFKILL)
902 ret
= iwl_run_init_mvm_ucode(mvm
, false);
903 if (ret
&& !iwlmvm_mod_params
.init_dbg
) {
904 IWL_ERR(mvm
, "Failed to run INIT ucode: %d\n", ret
);
905 /* this can't happen */
906 if (WARN_ON(ret
> 0))
910 if (!iwlmvm_mod_params
.init_dbg
) {
912 * Stop and start the transport without entering low power
913 * mode. This will save the state of other components on the
914 * device that are triggered by the INIT firwmare (MFUART).
916 _iwl_trans_stop_device(mvm
->trans
, false);
917 ret
= _iwl_trans_start_hw(mvm
->trans
, false);
922 if (iwlmvm_mod_params
.init_dbg
)
925 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_REGULAR
);
927 IWL_ERR(mvm
, "Failed to start RT ucode: %d\n", ret
);
931 iwl_mvm_get_shared_mem_conf(mvm
);
933 ret
= iwl_mvm_sf_update(mvm
, NULL
, false);
935 IWL_ERR(mvm
, "Failed to initialize Smart Fifo\n");
937 mvm
->fw_dbg_conf
= FW_DBG_INVALID
;
938 /* if we have a destination, assume EARLY START */
939 if (mvm
->fw
->dbg_dest_tlv
)
940 mvm
->fw_dbg_conf
= FW_DBG_START_FROM_ALIVE
;
941 iwl_mvm_start_fw_dbg_conf(mvm
, FW_DBG_START_FROM_ALIVE
);
943 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
947 ret
= iwl_send_bt_init_conf(mvm
);
951 /* Send phy db control command and then phy db calibration*/
952 ret
= iwl_send_phy_db_data(mvm
->phy_db
);
956 ret
= iwl_send_phy_cfg_cmd(mvm
);
960 /* Init RSS configuration */
961 if (iwl_mvm_has_new_rx_api(mvm
)) {
962 ret
= iwl_send_rss_cfg_cmd(mvm
);
964 IWL_ERR(mvm
, "Failed to configure RSS queues: %d\n",
970 /* init the fw <-> mac80211 STA mapping */
971 for (i
= 0; i
< IWL_MVM_STATION_COUNT
; i
++)
972 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[i
], NULL
);
974 mvm
->tdls_cs
.peer
.sta_id
= IWL_MVM_STATION_COUNT
;
976 /* reset quota debouncing buffer - 0xff will yield invalid data */
977 memset(&mvm
->last_quota_cmd
, 0xff, sizeof(mvm
->last_quota_cmd
));
979 /* Add auxiliary station for scanning */
980 ret
= iwl_mvm_add_aux_sta(mvm
);
984 /* Add all the PHY contexts */
985 chan
= &mvm
->hw
->wiphy
->bands
[NL80211_BAND_2GHZ
]->channels
[0];
986 cfg80211_chandef_create(&chandef
, chan
, NL80211_CHAN_NO_HT
);
987 for (i
= 0; i
< NUM_PHY_CTX
; i
++) {
989 * The channel used here isn't relevant as it's
990 * going to be overwritten in the other flows.
991 * For now use the first channel we have.
993 ret
= iwl_mvm_phy_ctxt_add(mvm
, &mvm
->phy_ctxts
[i
],
999 #ifdef CONFIG_THERMAL
1000 if (iwl_mvm_is_tt_in_fw(mvm
)) {
1001 /* in order to give the responsibility of ct-kill and
1002 * TX backoff to FW we need to send empty temperature reporting
1003 * cmd during init time
1005 iwl_mvm_send_temp_report_ths_cmd(mvm
);
1007 /* Initialize tx backoffs to the minimal possible */
1008 iwl_mvm_tt_tx_backoff(mvm
, 0);
1011 /* TODO: read the budget from BIOS / Platform NVM */
1012 if (iwl_mvm_is_ctdp_supported(mvm
) && mvm
->cooling_dev
.cur_state
> 0)
1013 ret
= iwl_mvm_ctdp_command(mvm
, CTDP_CMD_OPERATION_START
,
1014 mvm
->cooling_dev
.cur_state
);
1016 /* Initialize tx backoffs to the minimal possible */
1017 iwl_mvm_tt_tx_backoff(mvm
, 0);
1020 WARN_ON(iwl_mvm_config_ltr(mvm
));
1022 ret
= iwl_mvm_power_update_device(mvm
);
1027 * RTNL is not taken during Ct-kill, but we don't need to scan/Tx
1028 * anyway, so don't init MCC.
1030 if (!test_bit(IWL_MVM_STATUS_HW_CTKILL
, &mvm
->status
)) {
1031 ret
= iwl_mvm_init_mcc(mvm
);
1036 if (fw_has_capa(&mvm
->fw
->ucode_capa
, IWL_UCODE_TLV_CAPA_UMAC_SCAN
)) {
1037 mvm
->scan_type
= IWL_SCAN_TYPE_NOT_SET
;
1038 ret
= iwl_mvm_config_scan(mvm
);
1043 if (iwl_mvm_is_csum_supported(mvm
) &&
1044 mvm
->cfg
->features
& NETIF_F_RXCSUM
)
1045 iwl_trans_write_prph(mvm
->trans
, RX_EN_CSUM
, 0x3);
1047 /* allow FW/transport low power modes if not during restart */
1048 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART
, &mvm
->status
))
1049 iwl_mvm_unref(mvm
, IWL_MVM_REF_UCODE_DOWN
);
1051 IWL_DEBUG_INFO(mvm
, "RT uCode started.\n");
1054 iwl_mvm_stop_device(mvm
);
1058 int iwl_mvm_load_d3_fw(struct iwl_mvm
*mvm
)
1062 lockdep_assert_held(&mvm
->mutex
);
1064 ret
= iwl_trans_start_hw(mvm
->trans
);
1068 ret
= iwl_mvm_load_ucode_wait_alive(mvm
, IWL_UCODE_WOWLAN
);
1070 IWL_ERR(mvm
, "Failed to start WoWLAN firmware: %d\n", ret
);
1074 ret
= iwl_send_tx_ant_cfg(mvm
, iwl_mvm_get_valid_tx_ant(mvm
));
1078 /* Send phy db control command and then phy db calibration*/
1079 ret
= iwl_send_phy_db_data(mvm
->phy_db
);
1083 ret
= iwl_send_phy_cfg_cmd(mvm
);
1087 /* init the fw <-> mac80211 STA mapping */
1088 for (i
= 0; i
< IWL_MVM_STATION_COUNT
; i
++)
1089 RCU_INIT_POINTER(mvm
->fw_id_to_mac_id
[i
], NULL
);
1091 /* Add auxiliary station for scanning */
1092 ret
= iwl_mvm_add_aux_sta(mvm
);
1098 iwl_mvm_stop_device(mvm
);
1102 void iwl_mvm_rx_card_state_notif(struct iwl_mvm
*mvm
,
1103 struct iwl_rx_cmd_buffer
*rxb
)
1105 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1106 struct iwl_card_state_notif
*card_state_notif
= (void *)pkt
->data
;
1107 u32 flags
= le32_to_cpu(card_state_notif
->flags
);
1109 IWL_DEBUG_RF_KILL(mvm
, "Card state received: HW:%s SW:%s CT:%s\n",
1110 (flags
& HW_CARD_DISABLED
) ? "Kill" : "On",
1111 (flags
& SW_CARD_DISABLED
) ? "Kill" : "On",
1112 (flags
& CT_KILL_CARD_DISABLED
) ?
1113 "Reached" : "Not reached");
1116 void iwl_mvm_rx_mfuart_notif(struct iwl_mvm
*mvm
,
1117 struct iwl_rx_cmd_buffer
*rxb
)
1119 struct iwl_rx_packet
*pkt
= rxb_addr(rxb
);
1120 struct iwl_mfuart_load_notif
*mfuart_notif
= (void *)pkt
->data
;
1123 "MFUART: installed ver: 0x%08x, external ver: 0x%08x, status: 0x%08x, duration: 0x%08x\n",
1124 le32_to_cpu(mfuart_notif
->installed_ver
),
1125 le32_to_cpu(mfuart_notif
->external_ver
),
1126 le32_to_cpu(mfuart_notif
->status
),
1127 le32_to_cpu(mfuart_notif
->duration
));