2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
29 #include "amdgpu_ucode.h"
31 static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header
*hdr
)
33 DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr
->size_bytes
));
34 DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr
->header_size_bytes
));
35 DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr
->header_version_major
));
36 DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr
->header_version_minor
));
37 DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr
->ip_version_major
));
38 DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr
->ip_version_minor
));
39 DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr
->ucode_version
));
40 DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr
->ucode_size_bytes
));
41 DRM_DEBUG("ucode_array_offset_bytes: %u\n",
42 le32_to_cpu(hdr
->ucode_array_offset_bytes
));
43 DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr
->crc32
));
46 void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header
*hdr
)
48 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
49 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
52 amdgpu_ucode_print_common_hdr(hdr
);
54 if (version_major
== 1) {
55 const struct mc_firmware_header_v1_0
*mc_hdr
=
56 container_of(hdr
, struct mc_firmware_header_v1_0
, header
);
58 DRM_DEBUG("io_debug_size_bytes: %u\n",
59 le32_to_cpu(mc_hdr
->io_debug_size_bytes
));
60 DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
61 le32_to_cpu(mc_hdr
->io_debug_array_offset_bytes
));
63 DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major
, version_minor
);
67 void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header
*hdr
)
69 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
70 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
73 amdgpu_ucode_print_common_hdr(hdr
);
75 if (version_major
== 1) {
76 const struct smc_firmware_header_v1_0
*smc_hdr
=
77 container_of(hdr
, struct smc_firmware_header_v1_0
, header
);
79 DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr
->ucode_start_addr
));
81 DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major
, version_minor
);
85 void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header
*hdr
)
87 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
88 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
91 amdgpu_ucode_print_common_hdr(hdr
);
93 if (version_major
== 1) {
94 const struct gfx_firmware_header_v1_0
*gfx_hdr
=
95 container_of(hdr
, struct gfx_firmware_header_v1_0
, header
);
97 DRM_DEBUG("ucode_feature_version: %u\n",
98 le32_to_cpu(gfx_hdr
->ucode_feature_version
));
99 DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr
->jt_offset
));
100 DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr
->jt_size
));
102 DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major
, version_minor
);
106 void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header
*hdr
)
108 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
109 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
112 amdgpu_ucode_print_common_hdr(hdr
);
114 if (version_major
== 1) {
115 const struct rlc_firmware_header_v1_0
*rlc_hdr
=
116 container_of(hdr
, struct rlc_firmware_header_v1_0
, header
);
118 DRM_DEBUG("ucode_feature_version: %u\n",
119 le32_to_cpu(rlc_hdr
->ucode_feature_version
));
120 DRM_DEBUG("save_and_restore_offset: %u\n",
121 le32_to_cpu(rlc_hdr
->save_and_restore_offset
));
122 DRM_DEBUG("clear_state_descriptor_offset: %u\n",
123 le32_to_cpu(rlc_hdr
->clear_state_descriptor_offset
));
124 DRM_DEBUG("avail_scratch_ram_locations: %u\n",
125 le32_to_cpu(rlc_hdr
->avail_scratch_ram_locations
));
126 DRM_DEBUG("master_pkt_description_offset: %u\n",
127 le32_to_cpu(rlc_hdr
->master_pkt_description_offset
));
128 } else if (version_major
== 2) {
129 const struct rlc_firmware_header_v2_0
*rlc_hdr
=
130 container_of(hdr
, struct rlc_firmware_header_v2_0
, header
);
132 DRM_DEBUG("ucode_feature_version: %u\n",
133 le32_to_cpu(rlc_hdr
->ucode_feature_version
));
134 DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr
->jt_offset
));
135 DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr
->jt_size
));
136 DRM_DEBUG("save_and_restore_offset: %u\n",
137 le32_to_cpu(rlc_hdr
->save_and_restore_offset
));
138 DRM_DEBUG("clear_state_descriptor_offset: %u\n",
139 le32_to_cpu(rlc_hdr
->clear_state_descriptor_offset
));
140 DRM_DEBUG("avail_scratch_ram_locations: %u\n",
141 le32_to_cpu(rlc_hdr
->avail_scratch_ram_locations
));
142 DRM_DEBUG("reg_restore_list_size: %u\n",
143 le32_to_cpu(rlc_hdr
->reg_restore_list_size
));
144 DRM_DEBUG("reg_list_format_start: %u\n",
145 le32_to_cpu(rlc_hdr
->reg_list_format_start
));
146 DRM_DEBUG("reg_list_format_separate_start: %u\n",
147 le32_to_cpu(rlc_hdr
->reg_list_format_separate_start
));
148 DRM_DEBUG("starting_offsets_start: %u\n",
149 le32_to_cpu(rlc_hdr
->starting_offsets_start
));
150 DRM_DEBUG("reg_list_format_size_bytes: %u\n",
151 le32_to_cpu(rlc_hdr
->reg_list_format_size_bytes
));
152 DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
153 le32_to_cpu(rlc_hdr
->reg_list_format_array_offset_bytes
));
154 DRM_DEBUG("reg_list_size_bytes: %u\n",
155 le32_to_cpu(rlc_hdr
->reg_list_size_bytes
));
156 DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
157 le32_to_cpu(rlc_hdr
->reg_list_array_offset_bytes
));
158 DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
159 le32_to_cpu(rlc_hdr
->reg_list_format_separate_size_bytes
));
160 DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
161 le32_to_cpu(rlc_hdr
->reg_list_format_separate_array_offset_bytes
));
162 DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
163 le32_to_cpu(rlc_hdr
->reg_list_separate_size_bytes
));
164 DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
165 le32_to_cpu(rlc_hdr
->reg_list_separate_array_offset_bytes
));
166 if (version_minor
== 1) {
167 const struct rlc_firmware_header_v2_1
*v2_1
=
168 container_of(rlc_hdr
, struct rlc_firmware_header_v2_1
, v2_0
);
169 DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
170 le32_to_cpu(v2_1
->reg_list_format_direct_reg_list_length
));
171 DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
172 le32_to_cpu(v2_1
->save_restore_list_cntl_ucode_ver
));
173 DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
174 le32_to_cpu(v2_1
->save_restore_list_cntl_feature_ver
));
175 DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
176 le32_to_cpu(v2_1
->save_restore_list_cntl_size_bytes
));
177 DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
178 le32_to_cpu(v2_1
->save_restore_list_cntl_offset_bytes
));
179 DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
180 le32_to_cpu(v2_1
->save_restore_list_gpm_ucode_ver
));
181 DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
182 le32_to_cpu(v2_1
->save_restore_list_gpm_feature_ver
));
183 DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
184 le32_to_cpu(v2_1
->save_restore_list_gpm_size_bytes
));
185 DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
186 le32_to_cpu(v2_1
->save_restore_list_gpm_offset_bytes
));
187 DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
188 le32_to_cpu(v2_1
->save_restore_list_srm_ucode_ver
));
189 DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
190 le32_to_cpu(v2_1
->save_restore_list_srm_feature_ver
));
191 DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
192 le32_to_cpu(v2_1
->save_restore_list_srm_size_bytes
));
193 DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
194 le32_to_cpu(v2_1
->save_restore_list_srm_offset_bytes
));
197 DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major
, version_minor
);
201 void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header
*hdr
)
203 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
204 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
207 amdgpu_ucode_print_common_hdr(hdr
);
209 if (version_major
== 1) {
210 const struct sdma_firmware_header_v1_0
*sdma_hdr
=
211 container_of(hdr
, struct sdma_firmware_header_v1_0
, header
);
213 DRM_DEBUG("ucode_feature_version: %u\n",
214 le32_to_cpu(sdma_hdr
->ucode_feature_version
));
215 DRM_DEBUG("ucode_change_version: %u\n",
216 le32_to_cpu(sdma_hdr
->ucode_change_version
));
217 DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr
->jt_offset
));
218 DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr
->jt_size
));
219 if (version_minor
>= 1) {
220 const struct sdma_firmware_header_v1_1
*sdma_v1_1_hdr
=
221 container_of(sdma_hdr
, struct sdma_firmware_header_v1_1
, v1_0
);
222 DRM_DEBUG("digest_size: %u\n", le32_to_cpu(sdma_v1_1_hdr
->digest_size
));
225 DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
226 version_major
, version_minor
);
230 void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header
*hdr
)
232 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
233 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
235 DRM_DEBUG("GPU_INFO\n");
236 amdgpu_ucode_print_common_hdr(hdr
);
238 if (version_major
== 1) {
239 const struct gpu_info_firmware_header_v1_0
*gpu_info_hdr
=
240 container_of(hdr
, struct gpu_info_firmware_header_v1_0
, header
);
242 DRM_DEBUG("version_major: %u\n",
243 le16_to_cpu(gpu_info_hdr
->version_major
));
244 DRM_DEBUG("version_minor: %u\n",
245 le16_to_cpu(gpu_info_hdr
->version_minor
));
247 DRM_ERROR("Unknown gpu_info ucode version: %u.%u\n", version_major
, version_minor
);
251 int amdgpu_ucode_validate(const struct firmware
*fw
)
253 const struct common_firmware_header
*hdr
=
254 (const struct common_firmware_header
*)fw
->data
;
256 if (fw
->size
== le32_to_cpu(hdr
->size_bytes
))
262 bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header
*hdr
,
263 uint16_t hdr_major
, uint16_t hdr_minor
)
265 if ((hdr
->common
.header_version_major
== hdr_major
) &&
266 (hdr
->common
.header_version_minor
== hdr_minor
))
271 enum amdgpu_firmware_load_type
272 amdgpu_ucode_get_load_type(struct amdgpu_device
*adev
, int load_type
)
274 switch (adev
->asic_type
) {
275 #ifdef CONFIG_DRM_AMDGPU_SI
281 return AMDGPU_FW_LOAD_DIRECT
;
283 #ifdef CONFIG_DRM_AMDGPU_CIK
289 return AMDGPU_FW_LOAD_DIRECT
;
300 return AMDGPU_FW_LOAD_SMU
;
306 return AMDGPU_FW_LOAD_DIRECT
;
308 return AMDGPU_FW_LOAD_PSP
;
310 DRM_ERROR("Unknown firmware load type\n");
313 return AMDGPU_FW_LOAD_DIRECT
;
316 static int amdgpu_ucode_init_single_fw(struct amdgpu_device
*adev
,
317 struct amdgpu_firmware_info
*ucode
,
318 uint64_t mc_addr
, void *kptr
)
320 const struct common_firmware_header
*header
= NULL
;
321 const struct gfx_firmware_header_v1_0
*cp_hdr
= NULL
;
322 const struct dmcu_firmware_header_v1_0
*dmcu_hdr
= NULL
;
324 if (NULL
== ucode
->fw
)
327 ucode
->mc_addr
= mc_addr
;
330 if (ucode
->ucode_id
== AMDGPU_UCODE_ID_STORAGE
)
333 header
= (const struct common_firmware_header
*)ucode
->fw
->data
;
334 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)ucode
->fw
->data
;
335 dmcu_hdr
= (const struct dmcu_firmware_header_v1_0
*)ucode
->fw
->data
;
337 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
||
338 (ucode
->ucode_id
!= AMDGPU_UCODE_ID_CP_MEC1
&&
339 ucode
->ucode_id
!= AMDGPU_UCODE_ID_CP_MEC2
&&
340 ucode
->ucode_id
!= AMDGPU_UCODE_ID_CP_MEC1_JT
&&
341 ucode
->ucode_id
!= AMDGPU_UCODE_ID_CP_MEC2_JT
&&
342 ucode
->ucode_id
!= AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
&&
343 ucode
->ucode_id
!= AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
&&
344 ucode
->ucode_id
!= AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
&&
345 ucode
->ucode_id
!= AMDGPU_UCODE_ID_DMCU_ERAM
&&
346 ucode
->ucode_id
!= AMDGPU_UCODE_ID_DMCU_INTV
)) {
347 ucode
->ucode_size
= le32_to_cpu(header
->ucode_size_bytes
);
349 memcpy(ucode
->kaddr
, (void *)((uint8_t *)ucode
->fw
->data
+
350 le32_to_cpu(header
->ucode_array_offset_bytes
)),
352 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_CP_MEC1
||
353 ucode
->ucode_id
== AMDGPU_UCODE_ID_CP_MEC2
) {
354 ucode
->ucode_size
= le32_to_cpu(header
->ucode_size_bytes
) -
355 le32_to_cpu(cp_hdr
->jt_size
) * 4;
357 memcpy(ucode
->kaddr
, (void *)((uint8_t *)ucode
->fw
->data
+
358 le32_to_cpu(header
->ucode_array_offset_bytes
)),
360 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_CP_MEC1_JT
||
361 ucode
->ucode_id
== AMDGPU_UCODE_ID_CP_MEC2_JT
) {
362 ucode
->ucode_size
= le32_to_cpu(cp_hdr
->jt_size
) * 4;
364 memcpy(ucode
->kaddr
, (void *)((uint8_t *)ucode
->fw
->data
+
365 le32_to_cpu(header
->ucode_array_offset_bytes
) +
366 le32_to_cpu(cp_hdr
->jt_offset
) * 4),
368 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_DMCU_ERAM
) {
369 ucode
->ucode_size
= le32_to_cpu(header
->ucode_size_bytes
) -
370 le32_to_cpu(dmcu_hdr
->intv_size_bytes
);
372 memcpy(ucode
->kaddr
, (void *)((uint8_t *)ucode
->fw
->data
+
373 le32_to_cpu(header
->ucode_array_offset_bytes
)),
375 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_DMCU_INTV
) {
376 ucode
->ucode_size
= le32_to_cpu(dmcu_hdr
->intv_size_bytes
);
378 memcpy(ucode
->kaddr
, (void *)((uint8_t *)ucode
->fw
->data
+
379 le32_to_cpu(header
->ucode_array_offset_bytes
) +
380 le32_to_cpu(dmcu_hdr
->intv_offset_bytes
)),
382 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
) {
383 ucode
->ucode_size
= adev
->gfx
.rlc
.save_restore_list_cntl_size_bytes
;
384 memcpy(ucode
->kaddr
, adev
->gfx
.rlc
.save_restore_list_cntl
,
386 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
) {
387 ucode
->ucode_size
= adev
->gfx
.rlc
.save_restore_list_gpm_size_bytes
;
388 memcpy(ucode
->kaddr
, adev
->gfx
.rlc
.save_restore_list_gpm
,
390 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
) {
391 ucode
->ucode_size
= adev
->gfx
.rlc
.save_restore_list_srm_size_bytes
;
392 memcpy(ucode
->kaddr
, adev
->gfx
.rlc
.save_restore_list_srm
,
399 static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info
*ucode
,
400 uint64_t mc_addr
, void *kptr
)
402 const struct gfx_firmware_header_v1_0
*header
= NULL
;
403 const struct common_firmware_header
*comm_hdr
= NULL
;
404 uint8_t* src_addr
= NULL
;
405 uint8_t* dst_addr
= NULL
;
407 if (NULL
== ucode
->fw
)
410 comm_hdr
= (const struct common_firmware_header
*)ucode
->fw
->data
;
411 header
= (const struct gfx_firmware_header_v1_0
*)ucode
->fw
->data
;
412 dst_addr
= ucode
->kaddr
+
413 ALIGN(le32_to_cpu(comm_hdr
->ucode_size_bytes
),
415 src_addr
= (uint8_t *)ucode
->fw
->data
+
416 le32_to_cpu(comm_hdr
->ucode_array_offset_bytes
) +
417 (le32_to_cpu(header
->jt_offset
) * 4);
418 memcpy(dst_addr
, src_addr
, le32_to_cpu(header
->jt_size
) * 4);
423 int amdgpu_ucode_create_bo(struct amdgpu_device
*adev
)
425 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_DIRECT
) {
426 amdgpu_bo_create_kernel(adev
, adev
->firmware
.fw_size
, PAGE_SIZE
,
427 amdgpu_sriov_vf(adev
) ? AMDGPU_GEM_DOMAIN_VRAM
: AMDGPU_GEM_DOMAIN_GTT
,
428 &adev
->firmware
.fw_buf
,
429 &adev
->firmware
.fw_buf_mc
,
430 &adev
->firmware
.fw_buf_ptr
);
431 if (!adev
->firmware
.fw_buf
) {
432 dev_err(adev
->dev
, "failed to create kernel buffer for firmware.fw_buf\n");
434 } else if (amdgpu_sriov_vf(adev
)) {
435 memset(adev
->firmware
.fw_buf_ptr
, 0, adev
->firmware
.fw_size
);
441 void amdgpu_ucode_free_bo(struct amdgpu_device
*adev
)
443 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_DIRECT
)
444 amdgpu_bo_free_kernel(&adev
->firmware
.fw_buf
,
445 &adev
->firmware
.fw_buf_mc
,
446 &adev
->firmware
.fw_buf_ptr
);
449 int amdgpu_ucode_init_bo(struct amdgpu_device
*adev
)
451 uint64_t fw_offset
= 0;
453 struct amdgpu_firmware_info
*ucode
= NULL
;
455 /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */
456 if (!amdgpu_sriov_vf(adev
) && (adev
->in_gpu_reset
|| adev
->in_suspend
))
459 * if SMU loaded firmware, it needn't add SMC, UVD, and VCE
462 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
463 if (amdgpu_sriov_vf(adev
))
464 adev
->firmware
.max_ucodes
= AMDGPU_UCODE_ID_MAXIMUM
- 3;
466 adev
->firmware
.max_ucodes
= AMDGPU_UCODE_ID_MAXIMUM
- 4;
468 adev
->firmware
.max_ucodes
= AMDGPU_UCODE_ID_MAXIMUM
;
471 for (i
= 0; i
< adev
->firmware
.max_ucodes
; i
++) {
472 ucode
= &adev
->firmware
.ucode
[i
];
474 amdgpu_ucode_init_single_fw(adev
, ucode
, adev
->firmware
.fw_buf_mc
+ fw_offset
,
475 adev
->firmware
.fw_buf_ptr
+ fw_offset
);
476 if (i
== AMDGPU_UCODE_ID_CP_MEC1
&&
477 adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
478 const struct gfx_firmware_header_v1_0
*cp_hdr
;
479 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)ucode
->fw
->data
;
480 amdgpu_ucode_patch_jt(ucode
, adev
->firmware
.fw_buf_mc
+ fw_offset
,
481 adev
->firmware
.fw_buf_ptr
+ fw_offset
);
482 fw_offset
+= ALIGN(le32_to_cpu(cp_hdr
->jt_size
) << 2, PAGE_SIZE
);
484 fw_offset
+= ALIGN(ucode
->ucode_size
, PAGE_SIZE
);