2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <drm/amdgpu_drm.h>
26 #include "atomfirmware.h"
27 #include "amdgpu_atomfirmware.h"
31 #define get_index_into_master_table(master_table, table_name) (offsetof(struct master_table, table_name) / sizeof(uint16_t))
33 bool amdgpu_atomfirmware_gpu_supports_virtualization(struct amdgpu_device
*adev
)
35 int index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
39 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, NULL
,
40 NULL
, NULL
, &data_offset
)) {
41 struct atom_firmware_info_v3_1
*firmware_info
=
42 (struct atom_firmware_info_v3_1
*)(adev
->mode_info
.atom_context
->bios
+
45 if (le32_to_cpu(firmware_info
->firmware_capability
) &
46 ATOM_FIRMWARE_CAP_GPU_VIRTUALIZATION
)
52 void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device
*adev
)
54 int index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
58 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, NULL
,
59 NULL
, NULL
, &data_offset
)) {
60 struct atom_firmware_info_v3_1
*firmware_info
=
61 (struct atom_firmware_info_v3_1
*)(adev
->mode_info
.atom_context
->bios
+
64 adev
->bios_scratch_reg_offset
=
65 le32_to_cpu(firmware_info
->bios_scratch_reg_startaddr
);
69 int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device
*adev
)
71 struct atom_context
*ctx
= adev
->mode_info
.atom_context
;
72 int index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
73 vram_usagebyfirmware
);
74 struct vram_usagebyfirmware_v2_1
* firmware_usage
;
75 uint32_t start_addr
, size
;
79 if (amdgpu_atom_parse_data_header(ctx
, index
, NULL
, NULL
, NULL
, &data_offset
)) {
80 firmware_usage
= (struct vram_usagebyfirmware_v2_1
*)(ctx
->bios
+ data_offset
);
81 DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n",
82 le32_to_cpu(firmware_usage
->start_address_in_kb
),
83 le16_to_cpu(firmware_usage
->used_by_firmware_in_kb
),
84 le16_to_cpu(firmware_usage
->used_by_driver_in_kb
));
86 start_addr
= le32_to_cpu(firmware_usage
->start_address_in_kb
);
87 size
= le16_to_cpu(firmware_usage
->used_by_firmware_in_kb
);
89 if ((uint32_t)(start_addr
& ATOM_VRAM_OPERATION_FLAGS_MASK
) ==
90 (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION
<<
91 ATOM_VRAM_OPERATION_FLAGS_SHIFT
)) {
92 /* Firmware request VRAM reservation for SR-IOV */
93 adev
->fw_vram_usage
.start_offset
= (start_addr
&
94 (~ATOM_VRAM_OPERATION_FLAGS_MASK
)) << 10;
95 adev
->fw_vram_usage
.size
= size
<< 10;
96 /* Use the default scratch size */
99 usage_bytes
= le16_to_cpu(firmware_usage
->used_by_driver_in_kb
) << 10;
102 ctx
->scratch_size_bytes
= 0;
103 if (usage_bytes
== 0)
104 usage_bytes
= 20 * 1024;
105 /* allocate some scratch memory */
106 ctx
->scratch
= kzalloc(usage_bytes
, GFP_KERNEL
);
109 ctx
->scratch_size_bytes
= usage_bytes
;
114 struct atom_integrated_system_info_v1_11 v11
;
118 struct atom_umc_info_v3_1 v31
;
122 struct atom_vram_info_header_v2_3 v23
;
125 * Return vram width from integrated system info table, if available,
128 int amdgpu_atomfirmware_get_vram_width(struct amdgpu_device
*adev
)
130 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
131 int index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
132 integratedsysteminfo
);
133 u16 data_offset
, size
;
134 union igp_info
*igp_info
;
137 /* get any igp specific overrides */
138 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, &size
,
139 &frev
, &crev
, &data_offset
)) {
140 igp_info
= (union igp_info
*)
141 (mode_info
->atom_context
->bios
+ data_offset
);
144 return igp_info
->v11
.umachannelnumber
* 64;
153 static int convert_atom_mem_type_to_vram_type (struct amdgpu_device
*adev
,
158 if (adev
->flags
& AMD_IS_APU
) {
159 switch (atom_mem_type
) {
162 vram_type
= AMDGPU_VRAM_TYPE_DDR2
;
166 vram_type
= AMDGPU_VRAM_TYPE_DDR3
;
170 vram_type
= AMDGPU_VRAM_TYPE_DDR4
;
173 vram_type
= AMDGPU_VRAM_TYPE_UNKNOWN
;
177 switch (atom_mem_type
) {
178 case ATOM_DGPU_VRAM_TYPE_GDDR5
:
179 vram_type
= AMDGPU_VRAM_TYPE_GDDR5
;
181 case ATOM_DGPU_VRAM_TYPE_HBM2
:
182 vram_type
= AMDGPU_VRAM_TYPE_HBM
;
185 vram_type
= AMDGPU_VRAM_TYPE_UNKNOWN
;
193 * Return vram type from either integrated system info table
194 * or umc info table, if available, or 0 (TYPE_UNKNOWN) if not
196 int amdgpu_atomfirmware_get_vram_type(struct amdgpu_device
*adev
)
198 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
200 u16 data_offset
, size
;
201 union igp_info
*igp_info
;
202 union vram_info
*vram_info
;
206 if (adev
->flags
& AMD_IS_APU
)
207 index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
208 integratedsysteminfo
);
210 index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
212 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
,
214 &frev
, &crev
, &data_offset
)) {
215 if (adev
->flags
& AMD_IS_APU
) {
216 igp_info
= (union igp_info
*)
217 (mode_info
->atom_context
->bios
+ data_offset
);
220 mem_type
= igp_info
->v11
.memorytype
;
221 return convert_atom_mem_type_to_vram_type(adev
, mem_type
);
226 vram_info
= (union vram_info
*)
227 (mode_info
->atom_context
->bios
+ data_offset
);
230 mem_type
= vram_info
->v23
.vram_module
[0].memory_type
;
231 return convert_atom_mem_type_to_vram_type(adev
, mem_type
);
241 union firmware_info
{
242 struct atom_firmware_info_v3_1 v31
;
246 struct atom_smu_info_v3_1 v31
;
249 int amdgpu_atomfirmware_get_clock_info(struct amdgpu_device
*adev
)
251 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
252 struct amdgpu_pll
*spll
= &adev
->clock
.spll
;
253 struct amdgpu_pll
*mpll
= &adev
->clock
.mpll
;
255 uint16_t data_offset
;
256 int ret
= -EINVAL
, index
;
258 index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
260 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
261 &frev
, &crev
, &data_offset
)) {
262 union firmware_info
*firmware_info
=
263 (union firmware_info
*)(mode_info
->atom_context
->bios
+
266 adev
->clock
.default_sclk
=
267 le32_to_cpu(firmware_info
->v31
.bootup_sclk_in10khz
);
268 adev
->clock
.default_mclk
=
269 le32_to_cpu(firmware_info
->v31
.bootup_mclk_in10khz
);
271 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
272 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
274 /* not technically a clock, but... */
275 adev
->mode_info
.firmware_flags
=
276 le32_to_cpu(firmware_info
->v31
.firmware_capability
);
281 index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
283 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
284 &frev
, &crev
, &data_offset
)) {
285 union smu_info
*smu_info
=
286 (union smu_info
*)(mode_info
->atom_context
->bios
+
290 spll
->reference_freq
= le32_to_cpu(smu_info
->v31
.core_refclk_10khz
);
292 spll
->reference_div
= 0;
293 spll
->min_post_div
= 1;
294 spll
->max_post_div
= 1;
295 spll
->min_ref_div
= 2;
296 spll
->max_ref_div
= 0xff;
297 spll
->min_feedback_div
= 4;
298 spll
->max_feedback_div
= 0xff;
304 index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
306 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
307 &frev
, &crev
, &data_offset
)) {
308 union umc_info
*umc_info
=
309 (union umc_info
*)(mode_info
->atom_context
->bios
+
313 mpll
->reference_freq
= le32_to_cpu(umc_info
->v31
.mem_refclk_10khz
);
315 mpll
->reference_div
= 0;
316 mpll
->min_post_div
= 1;
317 mpll
->max_post_div
= 1;
318 mpll
->min_ref_div
= 2;
319 mpll
->max_ref_div
= 0xff;
320 mpll
->min_feedback_div
= 4;
321 mpll
->max_feedback_div
= 0xff;
331 struct atom_gfx_info_v2_4 v24
;
334 int amdgpu_atomfirmware_get_gfx_info(struct amdgpu_device
*adev
)
336 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
339 uint16_t data_offset
;
341 index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
343 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
344 &frev
, &crev
, &data_offset
)) {
345 union gfx_info
*gfx_info
= (union gfx_info
*)
346 (mode_info
->atom_context
->bios
+ data_offset
);
349 adev
->gfx
.config
.max_shader_engines
= gfx_info
->v24
.gc_num_se
;
350 adev
->gfx
.config
.max_cu_per_sh
= gfx_info
->v24
.gc_num_cu_per_sh
;
351 adev
->gfx
.config
.max_sh_per_se
= gfx_info
->v24
.gc_num_sh_per_se
;
352 adev
->gfx
.config
.max_backends_per_se
= gfx_info
->v24
.gc_num_rb_per_se
;
353 adev
->gfx
.config
.max_texture_channel_caches
= gfx_info
->v24
.gc_num_tccs
;
354 adev
->gfx
.config
.max_gprs
= le16_to_cpu(gfx_info
->v24
.gc_num_gprs
);
355 adev
->gfx
.config
.max_gs_threads
= gfx_info
->v24
.gc_num_max_gs_thds
;
356 adev
->gfx
.config
.gs_vgt_table_depth
= gfx_info
->v24
.gc_gs_table_depth
;
357 adev
->gfx
.config
.gs_prim_buffer_depth
=
358 le16_to_cpu(gfx_info
->v24
.gc_gsprim_buff_depth
);
359 adev
->gfx
.config
.double_offchip_lds_buf
=
360 gfx_info
->v24
.gc_double_offchip_lds_buffer
;
361 adev
->gfx
.cu_info
.wave_front_size
= le16_to_cpu(gfx_info
->v24
.gc_wave_size
);
362 adev
->gfx
.cu_info
.max_waves_per_simd
= le16_to_cpu(gfx_info
->v24
.gc_max_waves_per_simd
);
363 adev
->gfx
.cu_info
.max_scratch_slots_per_cu
= gfx_info
->v24
.gc_max_scratch_slots_per_cu
;
364 adev
->gfx
.cu_info
.lds_size
= le16_to_cpu(gfx_info
->v24
.gc_lds_size
);