1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2014, 2018-2019, 2021 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
9 #include "fw/api/commands.h"
11 void iwl_free_fw_paging(struct iwl_fw_runtime
*fwrt
)
15 if (!fwrt
->fw_paging_db
[0].fw_paging_block
)
18 for (i
= 0; i
< NUM_OF_FW_PAGING_BLOCKS
; i
++) {
19 struct iwl_fw_paging
*paging
= &fwrt
->fw_paging_db
[i
];
21 if (!paging
->fw_paging_block
) {
23 "Paging: block %d already freed, continue to next page\n",
28 dma_unmap_page(fwrt
->trans
->dev
, paging
->fw_paging_phys
,
29 paging
->fw_paging_size
, DMA_BIDIRECTIONAL
);
31 __free_pages(paging
->fw_paging_block
,
32 get_order(paging
->fw_paging_size
));
33 paging
->fw_paging_block
= NULL
;
36 memset(fwrt
->fw_paging_db
, 0, sizeof(fwrt
->fw_paging_db
));
38 IWL_EXPORT_SYMBOL(iwl_free_fw_paging
);
40 static int iwl_alloc_fw_paging_mem(struct iwl_fw_runtime
*fwrt
,
41 const struct fw_img
*image
)
45 int blk_idx
, order
, num_of_pages
, size
;
47 if (fwrt
->fw_paging_db
[0].fw_paging_block
)
50 /* ensure BLOCK_2_EXP_SIZE is power of 2 of PAGING_BLOCK_SIZE */
51 BUILD_BUG_ON(BIT(BLOCK_2_EXP_SIZE
) != PAGING_BLOCK_SIZE
);
53 num_of_pages
= image
->paging_mem_size
/ FW_PAGING_SIZE
;
54 fwrt
->num_of_paging_blk
=
55 DIV_ROUND_UP(num_of_pages
, NUM_OF_PAGE_PER_GROUP
);
56 fwrt
->num_of_pages_in_last_blk
=
58 NUM_OF_PAGE_PER_GROUP
* (fwrt
->num_of_paging_blk
- 1);
61 "Paging: allocating mem for %d paging blocks, each block holds 8 pages, last block holds %d pages\n",
62 fwrt
->num_of_paging_blk
,
63 fwrt
->num_of_pages_in_last_blk
);
66 * Allocate CSS and paging blocks in dram.
68 for (blk_idx
= 0; blk_idx
< fwrt
->num_of_paging_blk
+ 1; blk_idx
++) {
69 /* For CSS allocate 4KB, for others PAGING_BLOCK_SIZE (32K) */
70 size
= blk_idx
? PAGING_BLOCK_SIZE
: FW_PAGING_SIZE
;
71 order
= get_order(size
);
72 block
= alloc_pages(GFP_KERNEL
, order
);
74 /* free all the previous pages since we failed */
75 iwl_free_fw_paging(fwrt
);
79 fwrt
->fw_paging_db
[blk_idx
].fw_paging_block
= block
;
80 fwrt
->fw_paging_db
[blk_idx
].fw_paging_size
= size
;
82 phys
= dma_map_page(fwrt
->trans
->dev
, block
, 0,
85 if (dma_mapping_error(fwrt
->trans
->dev
, phys
)) {
87 * free the previous pages and the current one
88 * since we failed to map_page.
90 iwl_free_fw_paging(fwrt
);
93 fwrt
->fw_paging_db
[blk_idx
].fw_paging_phys
= phys
;
97 "Paging: allocated 4K(CSS) bytes (order %d) for firmware paging.\n",
101 "Paging: allocated 32K bytes (order %d) for firmware paging.\n",
108 static int iwl_fill_paging_mem(struct iwl_fw_runtime
*fwrt
,
109 const struct fw_img
*image
)
111 int sec_idx
, idx
, ret
;
115 * find where is the paging image start point:
116 * if CPU2 exist and it's in paging format, then the image looks like:
117 * CPU1 sections (2 or more)
118 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between CPU1 to CPU2
119 * CPU2 sections (not paged)
120 * PAGING_SEPARATOR_SECTION delimiter - separate between CPU2
121 * non paged to CPU2 paging sec
123 * CPU2 paging image (including instruction and data)
125 for (sec_idx
= 0; sec_idx
< image
->num_sec
; sec_idx
++) {
126 if (image
->sec
[sec_idx
].offset
== PAGING_SEPARATOR_SECTION
) {
133 * If paging is enabled there should be at least 2 more sections left
134 * (one for CSS and one for Paging data)
136 if (sec_idx
>= image
->num_sec
- 1) {
137 IWL_ERR(fwrt
, "Paging: Missing CSS and/or paging sections\n");
142 /* copy the CSS block to the dram */
143 IWL_DEBUG_FW(fwrt
, "Paging: load paging CSS to FW, sec = %d\n",
146 if (image
->sec
[sec_idx
].len
> fwrt
->fw_paging_db
[0].fw_paging_size
) {
147 IWL_ERR(fwrt
, "CSS block is larger than paging size\n");
152 memcpy(page_address(fwrt
->fw_paging_db
[0].fw_paging_block
),
153 image
->sec
[sec_idx
].data
,
154 image
->sec
[sec_idx
].len
);
155 fwrt
->fw_paging_db
[0].fw_offs
= image
->sec
[sec_idx
].offset
;
156 dma_sync_single_for_device(fwrt
->trans
->dev
,
157 fwrt
->fw_paging_db
[0].fw_paging_phys
,
158 fwrt
->fw_paging_db
[0].fw_paging_size
,
162 "Paging: copied %d CSS bytes to first block\n",
163 fwrt
->fw_paging_db
[0].fw_paging_size
);
168 * Copy the paging blocks to the dram. The loop index starts
169 * from 1 since the CSS block (index 0) was already copied to
170 * dram. We use num_of_paging_blk + 1 to account for that.
172 for (idx
= 1; idx
< fwrt
->num_of_paging_blk
+ 1; idx
++) {
173 struct iwl_fw_paging
*block
= &fwrt
->fw_paging_db
[idx
];
174 int remaining
= image
->sec
[sec_idx
].len
- offset
;
175 int len
= block
->fw_paging_size
;
178 * For the last block, we copy all that is remaining,
179 * for all other blocks, we copy fw_paging_size at a
181 if (idx
== fwrt
->num_of_paging_blk
) {
184 fwrt
->num_of_pages_in_last_blk
* FW_PAGING_SIZE
) {
186 "Paging: last block contains more data than expected %d\n",
191 } else if (block
->fw_paging_size
> remaining
) {
193 "Paging: not enough data in other in block %d (%d)\n",
199 memcpy(page_address(block
->fw_paging_block
),
200 (const u8
*)image
->sec
[sec_idx
].data
+ offset
, len
);
201 block
->fw_offs
= image
->sec
[sec_idx
].offset
+ offset
;
202 dma_sync_single_for_device(fwrt
->trans
->dev
,
203 block
->fw_paging_phys
,
204 block
->fw_paging_size
,
208 "Paging: copied %d paging bytes to block %d\n",
211 offset
+= block
->fw_paging_size
;
217 iwl_free_fw_paging(fwrt
);
221 static int iwl_save_fw_paging(struct iwl_fw_runtime
*fwrt
,
222 const struct fw_img
*fw
)
226 ret
= iwl_alloc_fw_paging_mem(fwrt
, fw
);
230 return iwl_fill_paging_mem(fwrt
, fw
);
233 /* send paging cmd to FW in case CPU2 has paging image */
234 static int iwl_send_paging_cmd(struct iwl_fw_runtime
*fwrt
,
235 const struct fw_img
*fw
)
237 struct iwl_fw_paging_cmd paging_cmd
= {
238 .flags
= cpu_to_le32(PAGING_CMD_IS_SECURED
|
239 PAGING_CMD_IS_ENABLED
|
240 (fwrt
->num_of_pages_in_last_blk
<<
241 PAGING_CMD_NUM_OF_PAGES_IN_LAST_GRP_POS
)),
242 .block_size
= cpu_to_le32(BLOCK_2_EXP_SIZE
),
243 .block_num
= cpu_to_le32(fwrt
->num_of_paging_blk
),
245 struct iwl_host_cmd hcmd
= {
246 .id
= WIDE_ID(IWL_ALWAYS_LONG_GROUP
, FW_PAGING_BLOCK_CMD
),
247 .len
= { sizeof(paging_cmd
), },
248 .data
= { &paging_cmd
, },
252 /* loop for all paging blocks + CSS block */
253 for (blk_idx
= 0; blk_idx
< fwrt
->num_of_paging_blk
+ 1; blk_idx
++) {
254 dma_addr_t addr
= fwrt
->fw_paging_db
[blk_idx
].fw_paging_phys
;
257 addr
= addr
>> PAGE_2_EXP_SIZE
;
258 phy_addr
= cpu_to_le32(addr
);
259 paging_cmd
.device_phy_addr
[blk_idx
] = phy_addr
;
262 return iwl_trans_send_cmd(fwrt
->trans
, &hcmd
);
265 int iwl_init_paging(struct iwl_fw_runtime
*fwrt
, enum iwl_ucode_type type
)
267 const struct fw_img
*fw
= &fwrt
->fw
->img
[type
];
270 if (fwrt
->trans
->trans_cfg
->gen2
)
274 * Configure and operate fw paging mechanism.
275 * The driver configures the paging flow only once.
276 * The CPU2 paging image is included in the IWL_UCODE_INIT image.
278 if (!fw
->paging_mem_size
)
281 ret
= iwl_save_fw_paging(fwrt
, fw
);
283 IWL_ERR(fwrt
, "failed to save the FW paging image\n");
287 ret
= iwl_send_paging_cmd(fwrt
, fw
);
289 IWL_ERR(fwrt
, "failed to send the paging cmd\n");
290 iwl_free_fw_paging(fwrt
);
296 IWL_EXPORT_SYMBOL(iwl_init_paging
);