2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
25 #include <linux/slab.h>
26 #include <linux/module.h>
29 #include "amdgpu_ucode.h"
31 static void amdgpu_ucode_print_common_hdr(const struct common_firmware_header
*hdr
)
33 DRM_DEBUG("size_bytes: %u\n", le32_to_cpu(hdr
->size_bytes
));
34 DRM_DEBUG("header_size_bytes: %u\n", le32_to_cpu(hdr
->header_size_bytes
));
35 DRM_DEBUG("header_version_major: %u\n", le16_to_cpu(hdr
->header_version_major
));
36 DRM_DEBUG("header_version_minor: %u\n", le16_to_cpu(hdr
->header_version_minor
));
37 DRM_DEBUG("ip_version_major: %u\n", le16_to_cpu(hdr
->ip_version_major
));
38 DRM_DEBUG("ip_version_minor: %u\n", le16_to_cpu(hdr
->ip_version_minor
));
39 DRM_DEBUG("ucode_version: 0x%08x\n", le32_to_cpu(hdr
->ucode_version
));
40 DRM_DEBUG("ucode_size_bytes: %u\n", le32_to_cpu(hdr
->ucode_size_bytes
));
41 DRM_DEBUG("ucode_array_offset_bytes: %u\n",
42 le32_to_cpu(hdr
->ucode_array_offset_bytes
));
43 DRM_DEBUG("crc32: 0x%08x\n", le32_to_cpu(hdr
->crc32
));
46 void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header
*hdr
)
48 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
49 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
52 amdgpu_ucode_print_common_hdr(hdr
);
54 if (version_major
== 1) {
55 const struct mc_firmware_header_v1_0
*mc_hdr
=
56 container_of(hdr
, struct mc_firmware_header_v1_0
, header
);
58 DRM_DEBUG("io_debug_size_bytes: %u\n",
59 le32_to_cpu(mc_hdr
->io_debug_size_bytes
));
60 DRM_DEBUG("io_debug_array_offset_bytes: %u\n",
61 le32_to_cpu(mc_hdr
->io_debug_array_offset_bytes
));
63 DRM_ERROR("Unknown MC ucode version: %u.%u\n", version_major
, version_minor
);
67 void amdgpu_ucode_print_smc_hdr(const struct common_firmware_header
*hdr
)
69 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
70 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
73 amdgpu_ucode_print_common_hdr(hdr
);
75 if (version_major
== 1) {
76 const struct smc_firmware_header_v1_0
*smc_hdr
=
77 container_of(hdr
, struct smc_firmware_header_v1_0
, header
);
79 DRM_DEBUG("ucode_start_addr: %u\n", le32_to_cpu(smc_hdr
->ucode_start_addr
));
80 } else if (version_major
== 2) {
81 const struct smc_firmware_header_v1_0
*v1_hdr
=
82 container_of(hdr
, struct smc_firmware_header_v1_0
, header
);
83 const struct smc_firmware_header_v2_0
*v2_hdr
=
84 container_of(v1_hdr
, struct smc_firmware_header_v2_0
, v1_0
);
86 DRM_DEBUG("ppt_offset_bytes: %u\n", le32_to_cpu(v2_hdr
->ppt_offset_bytes
));
87 DRM_DEBUG("ppt_size_bytes: %u\n", le32_to_cpu(v2_hdr
->ppt_size_bytes
));
89 DRM_ERROR("Unknown SMC ucode version: %u.%u\n", version_major
, version_minor
);
93 void amdgpu_ucode_print_gfx_hdr(const struct common_firmware_header
*hdr
)
95 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
96 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
99 amdgpu_ucode_print_common_hdr(hdr
);
101 if (version_major
== 1) {
102 const struct gfx_firmware_header_v1_0
*gfx_hdr
=
103 container_of(hdr
, struct gfx_firmware_header_v1_0
, header
);
105 DRM_DEBUG("ucode_feature_version: %u\n",
106 le32_to_cpu(gfx_hdr
->ucode_feature_version
));
107 DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(gfx_hdr
->jt_offset
));
108 DRM_DEBUG("jt_size: %u\n", le32_to_cpu(gfx_hdr
->jt_size
));
110 DRM_ERROR("Unknown GFX ucode version: %u.%u\n", version_major
, version_minor
);
114 void amdgpu_ucode_print_rlc_hdr(const struct common_firmware_header
*hdr
)
116 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
117 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
120 amdgpu_ucode_print_common_hdr(hdr
);
122 if (version_major
== 1) {
123 const struct rlc_firmware_header_v1_0
*rlc_hdr
=
124 container_of(hdr
, struct rlc_firmware_header_v1_0
, header
);
126 DRM_DEBUG("ucode_feature_version: %u\n",
127 le32_to_cpu(rlc_hdr
->ucode_feature_version
));
128 DRM_DEBUG("save_and_restore_offset: %u\n",
129 le32_to_cpu(rlc_hdr
->save_and_restore_offset
));
130 DRM_DEBUG("clear_state_descriptor_offset: %u\n",
131 le32_to_cpu(rlc_hdr
->clear_state_descriptor_offset
));
132 DRM_DEBUG("avail_scratch_ram_locations: %u\n",
133 le32_to_cpu(rlc_hdr
->avail_scratch_ram_locations
));
134 DRM_DEBUG("master_pkt_description_offset: %u\n",
135 le32_to_cpu(rlc_hdr
->master_pkt_description_offset
));
136 } else if (version_major
== 2) {
137 const struct rlc_firmware_header_v2_0
*rlc_hdr
=
138 container_of(hdr
, struct rlc_firmware_header_v2_0
, header
);
140 DRM_DEBUG("ucode_feature_version: %u\n",
141 le32_to_cpu(rlc_hdr
->ucode_feature_version
));
142 DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(rlc_hdr
->jt_offset
));
143 DRM_DEBUG("jt_size: %u\n", le32_to_cpu(rlc_hdr
->jt_size
));
144 DRM_DEBUG("save_and_restore_offset: %u\n",
145 le32_to_cpu(rlc_hdr
->save_and_restore_offset
));
146 DRM_DEBUG("clear_state_descriptor_offset: %u\n",
147 le32_to_cpu(rlc_hdr
->clear_state_descriptor_offset
));
148 DRM_DEBUG("avail_scratch_ram_locations: %u\n",
149 le32_to_cpu(rlc_hdr
->avail_scratch_ram_locations
));
150 DRM_DEBUG("reg_restore_list_size: %u\n",
151 le32_to_cpu(rlc_hdr
->reg_restore_list_size
));
152 DRM_DEBUG("reg_list_format_start: %u\n",
153 le32_to_cpu(rlc_hdr
->reg_list_format_start
));
154 DRM_DEBUG("reg_list_format_separate_start: %u\n",
155 le32_to_cpu(rlc_hdr
->reg_list_format_separate_start
));
156 DRM_DEBUG("starting_offsets_start: %u\n",
157 le32_to_cpu(rlc_hdr
->starting_offsets_start
));
158 DRM_DEBUG("reg_list_format_size_bytes: %u\n",
159 le32_to_cpu(rlc_hdr
->reg_list_format_size_bytes
));
160 DRM_DEBUG("reg_list_format_array_offset_bytes: %u\n",
161 le32_to_cpu(rlc_hdr
->reg_list_format_array_offset_bytes
));
162 DRM_DEBUG("reg_list_size_bytes: %u\n",
163 le32_to_cpu(rlc_hdr
->reg_list_size_bytes
));
164 DRM_DEBUG("reg_list_array_offset_bytes: %u\n",
165 le32_to_cpu(rlc_hdr
->reg_list_array_offset_bytes
));
166 DRM_DEBUG("reg_list_format_separate_size_bytes: %u\n",
167 le32_to_cpu(rlc_hdr
->reg_list_format_separate_size_bytes
));
168 DRM_DEBUG("reg_list_format_separate_array_offset_bytes: %u\n",
169 le32_to_cpu(rlc_hdr
->reg_list_format_separate_array_offset_bytes
));
170 DRM_DEBUG("reg_list_separate_size_bytes: %u\n",
171 le32_to_cpu(rlc_hdr
->reg_list_separate_size_bytes
));
172 DRM_DEBUG("reg_list_separate_array_offset_bytes: %u\n",
173 le32_to_cpu(rlc_hdr
->reg_list_separate_array_offset_bytes
));
174 if (version_minor
== 1) {
175 const struct rlc_firmware_header_v2_1
*v2_1
=
176 container_of(rlc_hdr
, struct rlc_firmware_header_v2_1
, v2_0
);
177 DRM_DEBUG("reg_list_format_direct_reg_list_length: %u\n",
178 le32_to_cpu(v2_1
->reg_list_format_direct_reg_list_length
));
179 DRM_DEBUG("save_restore_list_cntl_ucode_ver: %u\n",
180 le32_to_cpu(v2_1
->save_restore_list_cntl_ucode_ver
));
181 DRM_DEBUG("save_restore_list_cntl_feature_ver: %u\n",
182 le32_to_cpu(v2_1
->save_restore_list_cntl_feature_ver
));
183 DRM_DEBUG("save_restore_list_cntl_size_bytes %u\n",
184 le32_to_cpu(v2_1
->save_restore_list_cntl_size_bytes
));
185 DRM_DEBUG("save_restore_list_cntl_offset_bytes: %u\n",
186 le32_to_cpu(v2_1
->save_restore_list_cntl_offset_bytes
));
187 DRM_DEBUG("save_restore_list_gpm_ucode_ver: %u\n",
188 le32_to_cpu(v2_1
->save_restore_list_gpm_ucode_ver
));
189 DRM_DEBUG("save_restore_list_gpm_feature_ver: %u\n",
190 le32_to_cpu(v2_1
->save_restore_list_gpm_feature_ver
));
191 DRM_DEBUG("save_restore_list_gpm_size_bytes %u\n",
192 le32_to_cpu(v2_1
->save_restore_list_gpm_size_bytes
));
193 DRM_DEBUG("save_restore_list_gpm_offset_bytes: %u\n",
194 le32_to_cpu(v2_1
->save_restore_list_gpm_offset_bytes
));
195 DRM_DEBUG("save_restore_list_srm_ucode_ver: %u\n",
196 le32_to_cpu(v2_1
->save_restore_list_srm_ucode_ver
));
197 DRM_DEBUG("save_restore_list_srm_feature_ver: %u\n",
198 le32_to_cpu(v2_1
->save_restore_list_srm_feature_ver
));
199 DRM_DEBUG("save_restore_list_srm_size_bytes %u\n",
200 le32_to_cpu(v2_1
->save_restore_list_srm_size_bytes
));
201 DRM_DEBUG("save_restore_list_srm_offset_bytes: %u\n",
202 le32_to_cpu(v2_1
->save_restore_list_srm_offset_bytes
));
205 DRM_ERROR("Unknown RLC ucode version: %u.%u\n", version_major
, version_minor
);
209 void amdgpu_ucode_print_sdma_hdr(const struct common_firmware_header
*hdr
)
211 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
212 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
215 amdgpu_ucode_print_common_hdr(hdr
);
217 if (version_major
== 1) {
218 const struct sdma_firmware_header_v1_0
*sdma_hdr
=
219 container_of(hdr
, struct sdma_firmware_header_v1_0
, header
);
221 DRM_DEBUG("ucode_feature_version: %u\n",
222 le32_to_cpu(sdma_hdr
->ucode_feature_version
));
223 DRM_DEBUG("ucode_change_version: %u\n",
224 le32_to_cpu(sdma_hdr
->ucode_change_version
));
225 DRM_DEBUG("jt_offset: %u\n", le32_to_cpu(sdma_hdr
->jt_offset
));
226 DRM_DEBUG("jt_size: %u\n", le32_to_cpu(sdma_hdr
->jt_size
));
227 if (version_minor
>= 1) {
228 const struct sdma_firmware_header_v1_1
*sdma_v1_1_hdr
=
229 container_of(sdma_hdr
, struct sdma_firmware_header_v1_1
, v1_0
);
230 DRM_DEBUG("digest_size: %u\n", le32_to_cpu(sdma_v1_1_hdr
->digest_size
));
233 DRM_ERROR("Unknown SDMA ucode version: %u.%u\n",
234 version_major
, version_minor
);
238 void amdgpu_ucode_print_psp_hdr(const struct common_firmware_header
*hdr
)
240 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
241 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
244 amdgpu_ucode_print_common_hdr(hdr
);
246 if (version_major
== 1) {
247 const struct psp_firmware_header_v1_0
*psp_hdr
=
248 container_of(hdr
, struct psp_firmware_header_v1_0
, header
);
250 DRM_DEBUG("ucode_feature_version: %u\n",
251 le32_to_cpu(psp_hdr
->ucode_feature_version
));
252 DRM_DEBUG("sos_offset_bytes: %u\n",
253 le32_to_cpu(psp_hdr
->sos_offset_bytes
));
254 DRM_DEBUG("sos_size_bytes: %u\n",
255 le32_to_cpu(psp_hdr
->sos_size_bytes
));
256 if (version_minor
== 1) {
257 const struct psp_firmware_header_v1_1
*psp_hdr_v1_1
=
258 container_of(psp_hdr
, struct psp_firmware_header_v1_1
, v1_0
);
259 DRM_DEBUG("toc_header_version: %u\n",
260 le32_to_cpu(psp_hdr_v1_1
->toc_header_version
));
261 DRM_DEBUG("toc_offset_bytes: %u\n",
262 le32_to_cpu(psp_hdr_v1_1
->toc_offset_bytes
));
263 DRM_DEBUG("toc_size_bytes: %u\n",
264 le32_to_cpu(psp_hdr_v1_1
->toc_size_bytes
));
265 DRM_DEBUG("kdb_header_version: %u\n",
266 le32_to_cpu(psp_hdr_v1_1
->kdb_header_version
));
267 DRM_DEBUG("kdb_offset_bytes: %u\n",
268 le32_to_cpu(psp_hdr_v1_1
->kdb_offset_bytes
));
269 DRM_DEBUG("kdb_size_bytes: %u\n",
270 le32_to_cpu(psp_hdr_v1_1
->kdb_size_bytes
));
272 if (version_minor
== 2) {
273 const struct psp_firmware_header_v1_2
*psp_hdr_v1_2
=
274 container_of(psp_hdr
, struct psp_firmware_header_v1_2
, v1_0
);
275 DRM_DEBUG("kdb_header_version: %u\n",
276 le32_to_cpu(psp_hdr_v1_2
->kdb_header_version
));
277 DRM_DEBUG("kdb_offset_bytes: %u\n",
278 le32_to_cpu(psp_hdr_v1_2
->kdb_offset_bytes
));
279 DRM_DEBUG("kdb_size_bytes: %u\n",
280 le32_to_cpu(psp_hdr_v1_2
->kdb_size_bytes
));
283 DRM_ERROR("Unknown PSP ucode version: %u.%u\n",
284 version_major
, version_minor
);
288 void amdgpu_ucode_print_gpu_info_hdr(const struct common_firmware_header
*hdr
)
290 uint16_t version_major
= le16_to_cpu(hdr
->header_version_major
);
291 uint16_t version_minor
= le16_to_cpu(hdr
->header_version_minor
);
293 DRM_DEBUG("GPU_INFO\n");
294 amdgpu_ucode_print_common_hdr(hdr
);
296 if (version_major
== 1) {
297 const struct gpu_info_firmware_header_v1_0
*gpu_info_hdr
=
298 container_of(hdr
, struct gpu_info_firmware_header_v1_0
, header
);
300 DRM_DEBUG("version_major: %u\n",
301 le16_to_cpu(gpu_info_hdr
->version_major
));
302 DRM_DEBUG("version_minor: %u\n",
303 le16_to_cpu(gpu_info_hdr
->version_minor
));
305 DRM_ERROR("Unknown gpu_info ucode version: %u.%u\n", version_major
, version_minor
);
309 int amdgpu_ucode_validate(const struct firmware
*fw
)
311 const struct common_firmware_header
*hdr
=
312 (const struct common_firmware_header
*)fw
->data
;
314 if (fw
->size
== le32_to_cpu(hdr
->size_bytes
))
320 bool amdgpu_ucode_hdr_version(union amdgpu_firmware_header
*hdr
,
321 uint16_t hdr_major
, uint16_t hdr_minor
)
323 if ((hdr
->common
.header_version_major
== hdr_major
) &&
324 (hdr
->common
.header_version_minor
== hdr_minor
))
329 enum amdgpu_firmware_load_type
330 amdgpu_ucode_get_load_type(struct amdgpu_device
*adev
, int load_type
)
332 switch (adev
->asic_type
) {
333 #ifdef CONFIG_DRM_AMDGPU_SI
339 return AMDGPU_FW_LOAD_DIRECT
;
341 #ifdef CONFIG_DRM_AMDGPU_CIK
347 return AMDGPU_FW_LOAD_DIRECT
;
358 return AMDGPU_FW_LOAD_SMU
;
369 return AMDGPU_FW_LOAD_DIRECT
;
371 return AMDGPU_FW_LOAD_PSP
;
374 DRM_ERROR("Unknown firmware load type\n");
377 return AMDGPU_FW_LOAD_DIRECT
;
380 #define FW_VERSION_ATTR(name, mode, field) \
381 static ssize_t show_##name(struct device *dev, \
382 struct device_attribute *attr, \
385 struct drm_device *ddev = dev_get_drvdata(dev); \
386 struct amdgpu_device *adev = ddev->dev_private; \
388 return snprintf(buf, PAGE_SIZE, "0x%08x\n", adev->field); \
390 static DEVICE_ATTR(name, mode, show_##name, NULL)
392 FW_VERSION_ATTR(vce_fw_version
, 0444, vce
.fw_version
);
393 FW_VERSION_ATTR(uvd_fw_version
, 0444, uvd
.fw_version
);
394 FW_VERSION_ATTR(mc_fw_version
, 0444, gmc
.fw_version
);
395 FW_VERSION_ATTR(me_fw_version
, 0444, gfx
.me_fw_version
);
396 FW_VERSION_ATTR(pfp_fw_version
, 0444, gfx
.pfp_fw_version
);
397 FW_VERSION_ATTR(ce_fw_version
, 0444, gfx
.ce_fw_version
);
398 FW_VERSION_ATTR(rlc_fw_version
, 0444, gfx
.rlc_fw_version
);
399 FW_VERSION_ATTR(rlc_srlc_fw_version
, 0444, gfx
.rlc_srlc_fw_version
);
400 FW_VERSION_ATTR(rlc_srlg_fw_version
, 0444, gfx
.rlc_srlg_fw_version
);
401 FW_VERSION_ATTR(rlc_srls_fw_version
, 0444, gfx
.rlc_srls_fw_version
);
402 FW_VERSION_ATTR(mec_fw_version
, 0444, gfx
.mec_fw_version
);
403 FW_VERSION_ATTR(mec2_fw_version
, 0444, gfx
.mec2_fw_version
);
404 FW_VERSION_ATTR(sos_fw_version
, 0444, psp
.sos_fw_version
);
405 FW_VERSION_ATTR(asd_fw_version
, 0444, psp
.asd_fw_version
);
406 FW_VERSION_ATTR(ta_ras_fw_version
, 0444, psp
.ta_fw_version
);
407 FW_VERSION_ATTR(ta_xgmi_fw_version
, 0444, psp
.ta_fw_version
);
408 FW_VERSION_ATTR(smc_fw_version
, 0444, pm
.fw_version
);
409 FW_VERSION_ATTR(sdma_fw_version
, 0444, sdma
.instance
[0].fw_version
);
410 FW_VERSION_ATTR(sdma2_fw_version
, 0444, sdma
.instance
[1].fw_version
);
411 FW_VERSION_ATTR(vcn_fw_version
, 0444, vcn
.fw_version
);
412 FW_VERSION_ATTR(dmcu_fw_version
, 0444, dm
.dmcu_fw_version
);
414 static struct attribute
*fw_attrs
[] = {
415 &dev_attr_vce_fw_version
.attr
, &dev_attr_uvd_fw_version
.attr
,
416 &dev_attr_mc_fw_version
.attr
, &dev_attr_me_fw_version
.attr
,
417 &dev_attr_pfp_fw_version
.attr
, &dev_attr_ce_fw_version
.attr
,
418 &dev_attr_rlc_fw_version
.attr
, &dev_attr_rlc_srlc_fw_version
.attr
,
419 &dev_attr_rlc_srlg_fw_version
.attr
, &dev_attr_rlc_srls_fw_version
.attr
,
420 &dev_attr_mec_fw_version
.attr
, &dev_attr_mec2_fw_version
.attr
,
421 &dev_attr_sos_fw_version
.attr
, &dev_attr_asd_fw_version
.attr
,
422 &dev_attr_ta_ras_fw_version
.attr
, &dev_attr_ta_xgmi_fw_version
.attr
,
423 &dev_attr_smc_fw_version
.attr
, &dev_attr_sdma_fw_version
.attr
,
424 &dev_attr_sdma2_fw_version
.attr
, &dev_attr_vcn_fw_version
.attr
,
425 &dev_attr_dmcu_fw_version
.attr
, NULL
428 static const struct attribute_group fw_attr_group
= {
429 .name
= "fw_version",
433 int amdgpu_ucode_sysfs_init(struct amdgpu_device
*adev
)
435 return sysfs_create_group(&adev
->dev
->kobj
, &fw_attr_group
);
438 void amdgpu_ucode_sysfs_fini(struct amdgpu_device
*adev
)
440 sysfs_remove_group(&adev
->dev
->kobj
, &fw_attr_group
);
443 static int amdgpu_ucode_init_single_fw(struct amdgpu_device
*adev
,
444 struct amdgpu_firmware_info
*ucode
,
445 uint64_t mc_addr
, void *kptr
)
447 const struct common_firmware_header
*header
= NULL
;
448 const struct gfx_firmware_header_v1_0
*cp_hdr
= NULL
;
449 const struct dmcu_firmware_header_v1_0
*dmcu_hdr
= NULL
;
450 const struct dmcub_firmware_header_v1_0
*dmcub_hdr
= NULL
;
452 if (NULL
== ucode
->fw
)
455 ucode
->mc_addr
= mc_addr
;
458 if (ucode
->ucode_id
== AMDGPU_UCODE_ID_STORAGE
)
461 header
= (const struct common_firmware_header
*)ucode
->fw
->data
;
462 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)ucode
->fw
->data
;
463 dmcu_hdr
= (const struct dmcu_firmware_header_v1_0
*)ucode
->fw
->data
;
464 dmcub_hdr
= (const struct dmcub_firmware_header_v1_0
*)ucode
->fw
->data
;
466 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
||
467 (ucode
->ucode_id
!= AMDGPU_UCODE_ID_CP_MEC1
&&
468 ucode
->ucode_id
!= AMDGPU_UCODE_ID_CP_MEC2
&&
469 ucode
->ucode_id
!= AMDGPU_UCODE_ID_CP_MEC1_JT
&&
470 ucode
->ucode_id
!= AMDGPU_UCODE_ID_CP_MEC2_JT
&&
471 ucode
->ucode_id
!= AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
&&
472 ucode
->ucode_id
!= AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
&&
473 ucode
->ucode_id
!= AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
&&
474 ucode
->ucode_id
!= AMDGPU_UCODE_ID_DMCU_ERAM
&&
475 ucode
->ucode_id
!= AMDGPU_UCODE_ID_DMCU_INTV
&&
476 ucode
->ucode_id
!= AMDGPU_UCODE_ID_DMCUB
)) {
477 ucode
->ucode_size
= le32_to_cpu(header
->ucode_size_bytes
);
479 memcpy(ucode
->kaddr
, (void *)((uint8_t *)ucode
->fw
->data
+
480 le32_to_cpu(header
->ucode_array_offset_bytes
)),
482 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_CP_MEC1
||
483 ucode
->ucode_id
== AMDGPU_UCODE_ID_CP_MEC2
) {
484 ucode
->ucode_size
= le32_to_cpu(header
->ucode_size_bytes
) -
485 le32_to_cpu(cp_hdr
->jt_size
) * 4;
487 memcpy(ucode
->kaddr
, (void *)((uint8_t *)ucode
->fw
->data
+
488 le32_to_cpu(header
->ucode_array_offset_bytes
)),
490 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_CP_MEC1_JT
||
491 ucode
->ucode_id
== AMDGPU_UCODE_ID_CP_MEC2_JT
) {
492 ucode
->ucode_size
= le32_to_cpu(cp_hdr
->jt_size
) * 4;
494 memcpy(ucode
->kaddr
, (void *)((uint8_t *)ucode
->fw
->data
+
495 le32_to_cpu(header
->ucode_array_offset_bytes
) +
496 le32_to_cpu(cp_hdr
->jt_offset
) * 4),
498 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_DMCU_ERAM
) {
499 ucode
->ucode_size
= le32_to_cpu(header
->ucode_size_bytes
) -
500 le32_to_cpu(dmcu_hdr
->intv_size_bytes
);
502 memcpy(ucode
->kaddr
, (void *)((uint8_t *)ucode
->fw
->data
+
503 le32_to_cpu(header
->ucode_array_offset_bytes
)),
505 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_DMCU_INTV
) {
506 ucode
->ucode_size
= le32_to_cpu(dmcu_hdr
->intv_size_bytes
);
508 memcpy(ucode
->kaddr
, (void *)((uint8_t *)ucode
->fw
->data
+
509 le32_to_cpu(header
->ucode_array_offset_bytes
) +
510 le32_to_cpu(dmcu_hdr
->intv_offset_bytes
)),
512 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_DMCUB
) {
513 ucode
->ucode_size
= le32_to_cpu(dmcub_hdr
->inst_const_bytes
);
515 (void *)((uint8_t *)ucode
->fw
->data
+
516 le32_to_cpu(header
->ucode_array_offset_bytes
)),
518 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL
) {
519 ucode
->ucode_size
= adev
->gfx
.rlc
.save_restore_list_cntl_size_bytes
;
520 memcpy(ucode
->kaddr
, adev
->gfx
.rlc
.save_restore_list_cntl
,
522 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM
) {
523 ucode
->ucode_size
= adev
->gfx
.rlc
.save_restore_list_gpm_size_bytes
;
524 memcpy(ucode
->kaddr
, adev
->gfx
.rlc
.save_restore_list_gpm
,
526 } else if (ucode
->ucode_id
== AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM
) {
527 ucode
->ucode_size
= adev
->gfx
.rlc
.save_restore_list_srm_size_bytes
;
528 memcpy(ucode
->kaddr
, adev
->gfx
.rlc
.save_restore_list_srm
,
535 static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info
*ucode
,
536 uint64_t mc_addr
, void *kptr
)
538 const struct gfx_firmware_header_v1_0
*header
= NULL
;
539 const struct common_firmware_header
*comm_hdr
= NULL
;
540 uint8_t* src_addr
= NULL
;
541 uint8_t* dst_addr
= NULL
;
543 if (NULL
== ucode
->fw
)
546 comm_hdr
= (const struct common_firmware_header
*)ucode
->fw
->data
;
547 header
= (const struct gfx_firmware_header_v1_0
*)ucode
->fw
->data
;
548 dst_addr
= ucode
->kaddr
+
549 ALIGN(le32_to_cpu(comm_hdr
->ucode_size_bytes
),
551 src_addr
= (uint8_t *)ucode
->fw
->data
+
552 le32_to_cpu(comm_hdr
->ucode_array_offset_bytes
) +
553 (le32_to_cpu(header
->jt_offset
) * 4);
554 memcpy(dst_addr
, src_addr
, le32_to_cpu(header
->jt_size
) * 4);
559 int amdgpu_ucode_create_bo(struct amdgpu_device
*adev
)
561 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_DIRECT
) {
562 amdgpu_bo_create_kernel(adev
, adev
->firmware
.fw_size
, PAGE_SIZE
,
563 amdgpu_sriov_vf(adev
) ? AMDGPU_GEM_DOMAIN_VRAM
: AMDGPU_GEM_DOMAIN_GTT
,
564 &adev
->firmware
.fw_buf
,
565 &adev
->firmware
.fw_buf_mc
,
566 &adev
->firmware
.fw_buf_ptr
);
567 if (!adev
->firmware
.fw_buf
) {
568 dev_err(adev
->dev
, "failed to create kernel buffer for firmware.fw_buf\n");
570 } else if (amdgpu_sriov_vf(adev
)) {
571 memset(adev
->firmware
.fw_buf_ptr
, 0, adev
->firmware
.fw_size
);
577 void amdgpu_ucode_free_bo(struct amdgpu_device
*adev
)
579 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_DIRECT
)
580 amdgpu_bo_free_kernel(&adev
->firmware
.fw_buf
,
581 &adev
->firmware
.fw_buf_mc
,
582 &adev
->firmware
.fw_buf_ptr
);
585 int amdgpu_ucode_init_bo(struct amdgpu_device
*adev
)
587 uint64_t fw_offset
= 0;
589 struct amdgpu_firmware_info
*ucode
= NULL
;
591 /* for baremetal, the ucode is allocated in gtt, so don't need to fill the bo when reset/suspend */
592 if (!amdgpu_sriov_vf(adev
) && (adev
->in_gpu_reset
|| adev
->in_suspend
))
595 * if SMU loaded firmware, it needn't add SMC, UVD, and VCE
598 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
599 if (amdgpu_sriov_vf(adev
))
600 adev
->firmware
.max_ucodes
= AMDGPU_UCODE_ID_MAXIMUM
- 3;
602 adev
->firmware
.max_ucodes
= AMDGPU_UCODE_ID_MAXIMUM
- 4;
604 adev
->firmware
.max_ucodes
= AMDGPU_UCODE_ID_MAXIMUM
;
607 for (i
= 0; i
< adev
->firmware
.max_ucodes
; i
++) {
608 ucode
= &adev
->firmware
.ucode
[i
];
610 amdgpu_ucode_init_single_fw(adev
, ucode
, adev
->firmware
.fw_buf_mc
+ fw_offset
,
611 adev
->firmware
.fw_buf_ptr
+ fw_offset
);
612 if (i
== AMDGPU_UCODE_ID_CP_MEC1
&&
613 adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
614 const struct gfx_firmware_header_v1_0
*cp_hdr
;
615 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)ucode
->fw
->data
;
616 amdgpu_ucode_patch_jt(ucode
, adev
->firmware
.fw_buf_mc
+ fw_offset
,
617 adev
->firmware
.fw_buf_ptr
+ fw_offset
);
618 fw_offset
+= ALIGN(le32_to_cpu(cp_hdr
->jt_size
) << 2, PAGE_SIZE
);
620 fw_offset
+= ALIGN(ucode
->ucode_size
, PAGE_SIZE
);