2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/module.h>
25 #include <linux/pci.h>
27 #define SMU_11_0_PARTIAL_PPTABLE
31 #include "amdgpu_smu.h"
32 #include "smu_internal.h"
33 #include "atomfirmware.h"
34 #include "amdgpu_atomfirmware.h"
35 #include "smu_v11_0.h"
36 #include "smu_v11_0_pptable.h"
37 #include "soc15_common.h"
40 #include "amdgpu_ras.h"
42 #include "asic_reg/thm/thm_11_0_2_offset.h"
43 #include "asic_reg/thm/thm_11_0_2_sh_mask.h"
44 #include "asic_reg/mp/mp_11_0_offset.h"
45 #include "asic_reg/mp/mp_11_0_sh_mask.h"
46 #include "asic_reg/nbio/nbio_7_4_offset.h"
47 #include "asic_reg/nbio/nbio_7_4_sh_mask.h"
48 #include "asic_reg/smuio/smuio_11_0_0_offset.h"
49 #include "asic_reg/smuio/smuio_11_0_0_sh_mask.h"
51 MODULE_FIRMWARE("amdgpu/vega20_smc.bin");
52 MODULE_FIRMWARE("amdgpu/arcturus_smc.bin");
53 MODULE_FIRMWARE("amdgpu/navi10_smc.bin");
54 MODULE_FIRMWARE("amdgpu/navi14_smc.bin");
55 MODULE_FIRMWARE("amdgpu/navi12_smc.bin");
57 #define SMU11_VOLTAGE_SCALE 4
59 static int smu_v11_0_send_msg_without_waiting(struct smu_context
*smu
,
62 struct amdgpu_device
*adev
= smu
->adev
;
63 WREG32_SOC15(MP1
, 0, mmMP1_SMN_C2PMSG_66
, msg
);
67 int smu_v11_0_read_arg(struct smu_context
*smu
, uint32_t *arg
)
69 struct amdgpu_device
*adev
= smu
->adev
;
71 *arg
= RREG32_SOC15(MP1
, 0, mmMP1_SMN_C2PMSG_82
);
75 static int smu_v11_0_wait_for_response(struct smu_context
*smu
)
77 struct amdgpu_device
*adev
= smu
->adev
;
78 uint32_t cur_value
, i
, timeout
= adev
->usec_timeout
* 10;
80 for (i
= 0; i
< timeout
; i
++) {
81 cur_value
= RREG32_SOC15(MP1
, 0, mmMP1_SMN_C2PMSG_90
);
82 if ((cur_value
& MP1_C2PMSG_90__CONTENT_MASK
) != 0)
83 return cur_value
== 0x1 ? 0 : -EIO
;
88 /* timeout means wrong logic */
93 smu_v11_0_send_msg_with_param(struct smu_context
*smu
,
94 enum smu_message_type msg
,
97 struct amdgpu_device
*adev
= smu
->adev
;
98 int ret
= 0, index
= 0;
100 index
= smu_msg_get_index(smu
, msg
);
104 ret
= smu_v11_0_wait_for_response(smu
);
106 pr_err("Msg issuing pre-check failed and "
107 "SMU may be not in the right state!\n");
111 WREG32_SOC15(MP1
, 0, mmMP1_SMN_C2PMSG_90
, 0);
113 WREG32_SOC15(MP1
, 0, mmMP1_SMN_C2PMSG_82
, param
);
115 smu_v11_0_send_msg_without_waiting(smu
, (uint16_t)index
);
117 ret
= smu_v11_0_wait_for_response(smu
);
119 pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
120 smu_get_message_name(smu
, msg
), index
, param
, ret
);
125 int smu_v11_0_init_microcode(struct smu_context
*smu
)
127 struct amdgpu_device
*adev
= smu
->adev
;
128 const char *chip_name
;
131 const struct smc_firmware_header_v1_0
*hdr
;
132 const struct common_firmware_header
*header
;
133 struct amdgpu_firmware_info
*ucode
= NULL
;
135 switch (adev
->asic_type
) {
137 chip_name
= "vega20";
140 chip_name
= "arcturus";
143 chip_name
= "navi10";
146 chip_name
= "navi14";
149 chip_name
= "navi12";
155 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_smc.bin", chip_name
);
157 err
= request_firmware(&adev
->pm
.fw
, fw_name
, adev
->dev
);
160 err
= amdgpu_ucode_validate(adev
->pm
.fw
);
164 hdr
= (const struct smc_firmware_header_v1_0
*) adev
->pm
.fw
->data
;
165 amdgpu_ucode_print_smc_hdr(&hdr
->header
);
166 adev
->pm
.fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
168 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_PSP
) {
169 ucode
= &adev
->firmware
.ucode
[AMDGPU_UCODE_ID_SMC
];
170 ucode
->ucode_id
= AMDGPU_UCODE_ID_SMC
;
171 ucode
->fw
= adev
->pm
.fw
;
172 header
= (const struct common_firmware_header
*)ucode
->fw
->data
;
173 adev
->firmware
.fw_size
+=
174 ALIGN(le32_to_cpu(header
->ucode_size_bytes
), PAGE_SIZE
);
179 DRM_ERROR("smu_v11_0: Failed to load firmware \"%s\"\n",
181 release_firmware(adev
->pm
.fw
);
187 int smu_v11_0_load_microcode(struct smu_context
*smu
)
189 struct amdgpu_device
*adev
= smu
->adev
;
191 const struct smc_firmware_header_v1_0
*hdr
;
192 uint32_t addr_start
= MP1_SRAM
;
194 uint32_t mp1_fw_flags
;
196 hdr
= (const struct smc_firmware_header_v1_0
*) adev
->pm
.fw
->data
;
197 src
= (const uint32_t *)(adev
->pm
.fw
->data
+
198 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
200 for (i
= 1; i
< MP1_SMC_SIZE
/4 - 1; i
++) {
201 WREG32_PCIE(addr_start
, src
[i
]);
205 WREG32_PCIE(MP1_Public
| (smnMP1_PUB_CTRL
& 0xffffffff),
206 1 & MP1_SMN_PUB_CTRL__RESET_MASK
);
207 WREG32_PCIE(MP1_Public
| (smnMP1_PUB_CTRL
& 0xffffffff),
208 1 & ~MP1_SMN_PUB_CTRL__RESET_MASK
);
210 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
211 mp1_fw_flags
= RREG32_PCIE(MP1_Public
|
212 (smnMP1_FIRMWARE_FLAGS
& 0xffffffff));
213 if ((mp1_fw_flags
& MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK
) >>
214 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT
)
219 if (i
== adev
->usec_timeout
)
225 int smu_v11_0_check_fw_status(struct smu_context
*smu
)
227 struct amdgpu_device
*adev
= smu
->adev
;
228 uint32_t mp1_fw_flags
;
230 mp1_fw_flags
= RREG32_PCIE(MP1_Public
|
231 (smnMP1_FIRMWARE_FLAGS
& 0xffffffff));
233 if ((mp1_fw_flags
& MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK
) >>
234 MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT
)
240 int smu_v11_0_check_fw_version(struct smu_context
*smu
)
242 uint32_t if_version
= 0xff, smu_version
= 0xff;
244 uint8_t smu_minor
, smu_debug
;
247 ret
= smu_get_smc_version(smu
, &if_version
, &smu_version
);
251 smu_major
= (smu_version
>> 16) & 0xffff;
252 smu_minor
= (smu_version
>> 8) & 0xff;
253 smu_debug
= (smu_version
>> 0) & 0xff;
255 switch (smu
->adev
->asic_type
) {
257 smu
->smc_if_version
= SMU11_DRIVER_IF_VERSION_VG20
;
260 smu
->smc_if_version
= SMU11_DRIVER_IF_VERSION_ARCT
;
263 smu
->smc_if_version
= SMU11_DRIVER_IF_VERSION_NV10
;
266 smu
->smc_if_version
= SMU11_DRIVER_IF_VERSION_NV14
;
269 pr_err("smu unsupported asic type:%d.\n", smu
->adev
->asic_type
);
270 smu
->smc_if_version
= SMU11_DRIVER_IF_VERSION_INV
;
275 * 1. if_version mismatch is not critical as our fw is designed
276 * to be backward compatible.
277 * 2. New fw usually brings some optimizations. But that's visible
278 * only on the paired driver.
279 * Considering above, we just leave user a warning message instead
280 * of halt driver loading.
282 if (if_version
!= smu
->smc_if_version
) {
283 pr_info("smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
284 "smu fw version = 0x%08x (%d.%d.%d)\n",
285 smu
->smc_if_version
, if_version
,
286 smu_version
, smu_major
, smu_minor
, smu_debug
);
287 pr_warn("SMU driver if version not matched\n");
293 static int smu_v11_0_set_pptable_v2_0(struct smu_context
*smu
, void **table
, uint32_t *size
)
295 struct amdgpu_device
*adev
= smu
->adev
;
296 uint32_t ppt_offset_bytes
;
297 const struct smc_firmware_header_v2_0
*v2
;
299 v2
= (const struct smc_firmware_header_v2_0
*) adev
->pm
.fw
->data
;
301 ppt_offset_bytes
= le32_to_cpu(v2
->ppt_offset_bytes
);
302 *size
= le32_to_cpu(v2
->ppt_size_bytes
);
303 *table
= (uint8_t *)v2
+ ppt_offset_bytes
;
308 static int smu_v11_0_set_pptable_v2_1(struct smu_context
*smu
, void **table
,
309 uint32_t *size
, uint32_t pptable_id
)
311 struct amdgpu_device
*adev
= smu
->adev
;
312 const struct smc_firmware_header_v2_1
*v2_1
;
313 struct smc_soft_pptable_entry
*entries
;
314 uint32_t pptable_count
= 0;
317 v2_1
= (const struct smc_firmware_header_v2_1
*) adev
->pm
.fw
->data
;
318 entries
= (struct smc_soft_pptable_entry
*)
319 ((uint8_t *)v2_1
+ le32_to_cpu(v2_1
->pptable_entry_offset
));
320 pptable_count
= le32_to_cpu(v2_1
->pptable_count
);
321 for (i
= 0; i
< pptable_count
; i
++) {
322 if (le32_to_cpu(entries
[i
].id
) == pptable_id
) {
323 *table
= ((uint8_t *)v2_1
+ le32_to_cpu(entries
[i
].ppt_offset_bytes
));
324 *size
= le32_to_cpu(entries
[i
].ppt_size_bytes
);
329 if (i
== pptable_count
)
335 int smu_v11_0_setup_pptable(struct smu_context
*smu
)
337 struct amdgpu_device
*adev
= smu
->adev
;
338 const struct smc_firmware_header_v1_0
*hdr
;
341 uint16_t atom_table_size
;
344 uint16_t version_major
, version_minor
;
346 hdr
= (const struct smc_firmware_header_v1_0
*) adev
->pm
.fw
->data
;
347 version_major
= le16_to_cpu(hdr
->header
.header_version_major
);
348 version_minor
= le16_to_cpu(hdr
->header
.header_version_minor
);
349 if (version_major
== 2 && smu
->smu_table
.boot_values
.pp_table_id
> 0) {
350 pr_info("use driver provided pptable %d\n", smu
->smu_table
.boot_values
.pp_table_id
);
351 switch (version_minor
) {
353 ret
= smu_v11_0_set_pptable_v2_0(smu
, &table
, &size
);
356 ret
= smu_v11_0_set_pptable_v2_1(smu
, &table
, &size
,
357 smu
->smu_table
.boot_values
.pp_table_id
);
367 pr_info("use vbios provided pptable\n");
368 index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
371 ret
= smu_get_atom_data_table(smu
, index
, &atom_table_size
, &frev
, &crev
,
375 size
= atom_table_size
;
378 if (!smu
->smu_table
.power_play_table
)
379 smu
->smu_table
.power_play_table
= table
;
380 if (!smu
->smu_table
.power_play_table_size
)
381 smu
->smu_table
.power_play_table_size
= size
;
386 static int smu_v11_0_init_dpm_context(struct smu_context
*smu
)
388 struct smu_dpm_context
*smu_dpm
= &smu
->smu_dpm
;
390 if (smu_dpm
->dpm_context
|| smu_dpm
->dpm_context_size
!= 0)
393 return smu_alloc_dpm_context(smu
);
396 static int smu_v11_0_fini_dpm_context(struct smu_context
*smu
)
398 struct smu_dpm_context
*smu_dpm
= &smu
->smu_dpm
;
400 if (!smu_dpm
->dpm_context
|| smu_dpm
->dpm_context_size
== 0)
403 kfree(smu_dpm
->dpm_context
);
404 kfree(smu_dpm
->golden_dpm_context
);
405 kfree(smu_dpm
->dpm_current_power_state
);
406 kfree(smu_dpm
->dpm_request_power_state
);
407 smu_dpm
->dpm_context
= NULL
;
408 smu_dpm
->golden_dpm_context
= NULL
;
409 smu_dpm
->dpm_context_size
= 0;
410 smu_dpm
->dpm_current_power_state
= NULL
;
411 smu_dpm
->dpm_request_power_state
= NULL
;
416 int smu_v11_0_init_smc_tables(struct smu_context
*smu
)
418 struct smu_table_context
*smu_table
= &smu
->smu_table
;
419 struct smu_table
*tables
= NULL
;
422 if (smu_table
->tables
)
425 tables
= kcalloc(SMU_TABLE_COUNT
, sizeof(struct smu_table
),
430 smu_table
->tables
= tables
;
432 ret
= smu_tables_init(smu
, tables
);
436 ret
= smu_v11_0_init_dpm_context(smu
);
443 int smu_v11_0_fini_smc_tables(struct smu_context
*smu
)
445 struct smu_table_context
*smu_table
= &smu
->smu_table
;
448 if (!smu_table
->tables
)
451 kfree(smu_table
->tables
);
452 kfree(smu_table
->metrics_table
);
453 kfree(smu_table
->watermarks_table
);
454 smu_table
->tables
= NULL
;
455 smu_table
->metrics_table
= NULL
;
456 smu_table
->watermarks_table
= NULL
;
457 smu_table
->metrics_time
= 0;
459 ret
= smu_v11_0_fini_dpm_context(smu
);
465 int smu_v11_0_init_power(struct smu_context
*smu
)
467 struct smu_power_context
*smu_power
= &smu
->smu_power
;
469 if (!smu
->pm_enabled
)
471 if (smu_power
->power_context
|| smu_power
->power_context_size
!= 0)
474 smu_power
->power_context
= kzalloc(sizeof(struct smu_11_0_dpm_context
),
476 if (!smu_power
->power_context
)
478 smu_power
->power_context_size
= sizeof(struct smu_11_0_dpm_context
);
483 int smu_v11_0_fini_power(struct smu_context
*smu
)
485 struct smu_power_context
*smu_power
= &smu
->smu_power
;
487 if (!smu
->pm_enabled
)
489 if (!smu_power
->power_context
|| smu_power
->power_context_size
== 0)
492 kfree(smu_power
->power_context
);
493 smu_power
->power_context
= NULL
;
494 smu_power
->power_context_size
= 0;
499 int smu_v11_0_get_vbios_bootup_values(struct smu_context
*smu
)
504 struct atom_common_table_header
*header
;
505 struct atom_firmware_info_v3_3
*v_3_3
;
506 struct atom_firmware_info_v3_1
*v_3_1
;
508 index
= get_index_into_master_table(atom_master_list_of_data_tables_v2_1
,
511 ret
= smu_get_atom_data_table(smu
, index
, &size
, &frev
, &crev
,
512 (uint8_t **)&header
);
516 if (header
->format_revision
!= 3) {
517 pr_err("unknown atom_firmware_info version! for smu11\n");
521 switch (header
->content_revision
) {
525 v_3_1
= (struct atom_firmware_info_v3_1
*)header
;
526 smu
->smu_table
.boot_values
.revision
= v_3_1
->firmware_revision
;
527 smu
->smu_table
.boot_values
.gfxclk
= v_3_1
->bootup_sclk_in10khz
;
528 smu
->smu_table
.boot_values
.uclk
= v_3_1
->bootup_mclk_in10khz
;
529 smu
->smu_table
.boot_values
.socclk
= 0;
530 smu
->smu_table
.boot_values
.dcefclk
= 0;
531 smu
->smu_table
.boot_values
.vddc
= v_3_1
->bootup_vddc_mv
;
532 smu
->smu_table
.boot_values
.vddci
= v_3_1
->bootup_vddci_mv
;
533 smu
->smu_table
.boot_values
.mvddc
= v_3_1
->bootup_mvddc_mv
;
534 smu
->smu_table
.boot_values
.vdd_gfx
= v_3_1
->bootup_vddgfx_mv
;
535 smu
->smu_table
.boot_values
.cooling_id
= v_3_1
->coolingsolution_id
;
536 smu
->smu_table
.boot_values
.pp_table_id
= 0;
540 v_3_3
= (struct atom_firmware_info_v3_3
*)header
;
541 smu
->smu_table
.boot_values
.revision
= v_3_3
->firmware_revision
;
542 smu
->smu_table
.boot_values
.gfxclk
= v_3_3
->bootup_sclk_in10khz
;
543 smu
->smu_table
.boot_values
.uclk
= v_3_3
->bootup_mclk_in10khz
;
544 smu
->smu_table
.boot_values
.socclk
= 0;
545 smu
->smu_table
.boot_values
.dcefclk
= 0;
546 smu
->smu_table
.boot_values
.vddc
= v_3_3
->bootup_vddc_mv
;
547 smu
->smu_table
.boot_values
.vddci
= v_3_3
->bootup_vddci_mv
;
548 smu
->smu_table
.boot_values
.mvddc
= v_3_3
->bootup_mvddc_mv
;
549 smu
->smu_table
.boot_values
.vdd_gfx
= v_3_3
->bootup_vddgfx_mv
;
550 smu
->smu_table
.boot_values
.cooling_id
= v_3_3
->coolingsolution_id
;
551 smu
->smu_table
.boot_values
.pp_table_id
= v_3_3
->pplib_pptable_id
;
554 smu
->smu_table
.boot_values
.format_revision
= header
->format_revision
;
555 smu
->smu_table
.boot_values
.content_revision
= header
->content_revision
;
560 int smu_v11_0_get_clk_info_from_vbios(struct smu_context
*smu
)
563 struct amdgpu_device
*adev
= smu
->adev
;
564 struct atom_get_smu_clock_info_parameters_v3_1 input
= {0};
565 struct atom_get_smu_clock_info_output_parameters_v3_1
*output
;
567 input
.clk_id
= SMU11_SYSPLL0_SOCCLK_ID
;
568 input
.command
= GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ
;
569 index
= get_index_into_master_table(atom_master_list_of_command_functions_v2_1
,
572 ret
= amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
,
577 output
= (struct atom_get_smu_clock_info_output_parameters_v3_1
*)&input
;
578 smu
->smu_table
.boot_values
.socclk
= le32_to_cpu(output
->atom_smu_outputclkfreq
.smu_clock_freq_hz
) / 10000;
580 memset(&input
, 0, sizeof(input
));
581 input
.clk_id
= SMU11_SYSPLL0_DCEFCLK_ID
;
582 input
.command
= GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ
;
583 index
= get_index_into_master_table(atom_master_list_of_command_functions_v2_1
,
586 ret
= amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
,
591 output
= (struct atom_get_smu_clock_info_output_parameters_v3_1
*)&input
;
592 smu
->smu_table
.boot_values
.dcefclk
= le32_to_cpu(output
->atom_smu_outputclkfreq
.smu_clock_freq_hz
) / 10000;
594 memset(&input
, 0, sizeof(input
));
595 input
.clk_id
= SMU11_SYSPLL0_ECLK_ID
;
596 input
.command
= GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ
;
597 index
= get_index_into_master_table(atom_master_list_of_command_functions_v2_1
,
600 ret
= amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
,
605 output
= (struct atom_get_smu_clock_info_output_parameters_v3_1
*)&input
;
606 smu
->smu_table
.boot_values
.eclk
= le32_to_cpu(output
->atom_smu_outputclkfreq
.smu_clock_freq_hz
) / 10000;
608 memset(&input
, 0, sizeof(input
));
609 input
.clk_id
= SMU11_SYSPLL0_VCLK_ID
;
610 input
.command
= GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ
;
611 index
= get_index_into_master_table(atom_master_list_of_command_functions_v2_1
,
614 ret
= amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
,
619 output
= (struct atom_get_smu_clock_info_output_parameters_v3_1
*)&input
;
620 smu
->smu_table
.boot_values
.vclk
= le32_to_cpu(output
->atom_smu_outputclkfreq
.smu_clock_freq_hz
) / 10000;
622 memset(&input
, 0, sizeof(input
));
623 input
.clk_id
= SMU11_SYSPLL0_DCLK_ID
;
624 input
.command
= GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ
;
625 index
= get_index_into_master_table(atom_master_list_of_command_functions_v2_1
,
628 ret
= amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
,
633 output
= (struct atom_get_smu_clock_info_output_parameters_v3_1
*)&input
;
634 smu
->smu_table
.boot_values
.dclk
= le32_to_cpu(output
->atom_smu_outputclkfreq
.smu_clock_freq_hz
) / 10000;
636 if ((smu
->smu_table
.boot_values
.format_revision
== 3) &&
637 (smu
->smu_table
.boot_values
.content_revision
>= 2)) {
638 memset(&input
, 0, sizeof(input
));
639 input
.clk_id
= SMU11_SYSPLL1_0_FCLK_ID
;
640 input
.syspll_id
= SMU11_SYSPLL1_2_ID
;
641 input
.command
= GET_SMU_CLOCK_INFO_V3_1_GET_CLOCK_FREQ
;
642 index
= get_index_into_master_table(atom_master_list_of_command_functions_v2_1
,
645 ret
= amdgpu_atom_execute_table(adev
->mode_info
.atom_context
, index
,
650 output
= (struct atom_get_smu_clock_info_output_parameters_v3_1
*)&input
;
651 smu
->smu_table
.boot_values
.fclk
= le32_to_cpu(output
->atom_smu_outputclkfreq
.smu_clock_freq_hz
) / 10000;
657 int smu_v11_0_notify_memory_pool_location(struct smu_context
*smu
)
659 struct smu_table_context
*smu_table
= &smu
->smu_table
;
660 struct smu_table
*memory_pool
= &smu_table
->memory_pool
;
663 uint32_t address_low
, address_high
;
665 if (memory_pool
->size
== 0 || memory_pool
->cpu_addr
== NULL
)
668 address
= (uintptr_t)memory_pool
->cpu_addr
;
669 address_high
= (uint32_t)upper_32_bits(address
);
670 address_low
= (uint32_t)lower_32_bits(address
);
672 ret
= smu_send_smc_msg_with_param(smu
,
673 SMU_MSG_SetSystemVirtualDramAddrHigh
,
677 ret
= smu_send_smc_msg_with_param(smu
,
678 SMU_MSG_SetSystemVirtualDramAddrLow
,
683 address
= memory_pool
->mc_address
;
684 address_high
= (uint32_t)upper_32_bits(address
);
685 address_low
= (uint32_t)lower_32_bits(address
);
687 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_DramLogSetDramAddrHigh
,
691 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_DramLogSetDramAddrLow
,
695 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_DramLogSetDramSize
,
696 (uint32_t)memory_pool
->size
);
703 int smu_v11_0_check_pptable(struct smu_context
*smu
)
707 ret
= smu_check_powerplay_table(smu
);
711 int smu_v11_0_parse_pptable(struct smu_context
*smu
)
715 struct smu_table_context
*table_context
= &smu
->smu_table
;
716 struct smu_table
*table
= &table_context
->tables
[SMU_TABLE_PPTABLE
];
718 if (table_context
->driver_pptable
)
721 table_context
->driver_pptable
= kzalloc(table
->size
, GFP_KERNEL
);
723 if (!table_context
->driver_pptable
)
726 ret
= smu_store_powerplay_table(smu
);
730 ret
= smu_append_powerplay_table(smu
);
735 int smu_v11_0_populate_smc_pptable(struct smu_context
*smu
)
739 ret
= smu_set_default_dpm_table(smu
);
744 int smu_v11_0_write_pptable(struct smu_context
*smu
)
746 struct smu_table_context
*table_context
= &smu
->smu_table
;
749 ret
= smu_update_table(smu
, SMU_TABLE_PPTABLE
, 0,
750 table_context
->driver_pptable
, true);
755 int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context
*smu
, uint32_t clk
)
759 ret
= smu_send_smc_msg_with_param(smu
,
760 SMU_MSG_SetMinDeepSleepDcefclk
, clk
);
762 pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
767 int smu_v11_0_set_min_dcef_deep_sleep(struct smu_context
*smu
)
769 struct smu_table_context
*table_context
= &smu
->smu_table
;
771 if (!smu
->pm_enabled
)
776 return smu_v11_0_set_deep_sleep_dcefclk(smu
, table_context
->boot_values
.dcefclk
/ 100);
779 int smu_v11_0_set_driver_table_location(struct smu_context
*smu
)
781 struct smu_table
*driver_table
= &smu
->smu_table
.driver_table
;
784 if (driver_table
->mc_address
) {
785 ret
= smu_send_smc_msg_with_param(smu
,
786 SMU_MSG_SetDriverDramAddrHigh
,
787 upper_32_bits(driver_table
->mc_address
));
789 ret
= smu_send_smc_msg_with_param(smu
,
790 SMU_MSG_SetDriverDramAddrLow
,
791 lower_32_bits(driver_table
->mc_address
));
797 int smu_v11_0_set_tool_table_location(struct smu_context
*smu
)
800 struct smu_table
*tool_table
= &smu
->smu_table
.tables
[SMU_TABLE_PMSTATUSLOG
];
802 if (tool_table
->mc_address
) {
803 ret
= smu_send_smc_msg_with_param(smu
,
804 SMU_MSG_SetToolsDramAddrHigh
,
805 upper_32_bits(tool_table
->mc_address
));
807 ret
= smu_send_smc_msg_with_param(smu
,
808 SMU_MSG_SetToolsDramAddrLow
,
809 lower_32_bits(tool_table
->mc_address
));
815 int smu_v11_0_init_display_count(struct smu_context
*smu
, uint32_t count
)
819 if (!smu
->pm_enabled
)
822 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_NumOfDisplays
, count
);
827 int smu_v11_0_set_allowed_mask(struct smu_context
*smu
)
829 struct smu_feature
*feature
= &smu
->smu_feature
;
831 uint32_t feature_mask
[2];
833 mutex_lock(&feature
->mutex
);
834 if (bitmap_empty(feature
->allowed
, SMU_FEATURE_MAX
) || feature
->feature_num
< 64)
837 bitmap_copy((unsigned long *)feature_mask
, feature
->allowed
, 64);
839 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetAllowedFeaturesMaskHigh
,
844 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetAllowedFeaturesMaskLow
,
850 mutex_unlock(&feature
->mutex
);
854 int smu_v11_0_get_enabled_mask(struct smu_context
*smu
,
855 uint32_t *feature_mask
, uint32_t num
)
857 uint32_t feature_mask_high
= 0, feature_mask_low
= 0;
858 struct smu_feature
*feature
= &smu
->smu_feature
;
861 if (!feature_mask
|| num
< 2)
864 if (bitmap_empty(feature
->enabled
, feature
->feature_num
)) {
865 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetEnabledSmuFeaturesHigh
);
868 ret
= smu_read_smc_arg(smu
, &feature_mask_high
);
872 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetEnabledSmuFeaturesLow
);
875 ret
= smu_read_smc_arg(smu
, &feature_mask_low
);
879 feature_mask
[0] = feature_mask_low
;
880 feature_mask
[1] = feature_mask_high
;
882 bitmap_copy((unsigned long *)feature_mask
, feature
->enabled
,
883 feature
->feature_num
);
889 int smu_v11_0_system_features_control(struct smu_context
*smu
,
892 struct smu_feature
*feature
= &smu
->smu_feature
;
893 uint32_t feature_mask
[2];
896 ret
= smu_send_smc_msg(smu
, (en
? SMU_MSG_EnableAllSmuFeatures
:
897 SMU_MSG_DisableAllSmuFeatures
));
902 ret
= smu_feature_get_enabled_mask(smu
, feature_mask
, 2);
906 bitmap_copy(feature
->enabled
, (unsigned long *)&feature_mask
,
907 feature
->feature_num
);
908 bitmap_copy(feature
->supported
, (unsigned long *)&feature_mask
,
909 feature
->feature_num
);
911 bitmap_zero(feature
->enabled
, feature
->feature_num
);
912 bitmap_zero(feature
->supported
, feature
->feature_num
);
918 int smu_v11_0_notify_display_change(struct smu_context
*smu
)
922 if (!smu
->pm_enabled
)
924 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UCLK_BIT
) &&
925 smu
->adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_HBM
)
926 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetUclkFastSwitch
, 1);
932 smu_v11_0_get_max_sustainable_clock(struct smu_context
*smu
, uint32_t *clock
,
933 enum smu_clk_type clock_select
)
938 if (!smu
->pm_enabled
)
941 if ((smu_msg_get_index(smu
, SMU_MSG_GetDcModeMaxDpmFreq
) < 0) ||
942 (smu_msg_get_index(smu
, SMU_MSG_GetMaxDpmFreq
) < 0))
945 clk_id
= smu_clk_get_index(smu
, clock_select
);
949 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetDcModeMaxDpmFreq
,
952 pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
956 ret
= smu_read_smc_arg(smu
, clock
);
963 /* if DC limit is zero, return AC limit */
964 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetMaxDpmFreq
,
967 pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
971 ret
= smu_read_smc_arg(smu
, clock
);
976 int smu_v11_0_init_max_sustainable_clocks(struct smu_context
*smu
)
978 struct smu_11_0_max_sustainable_clocks
*max_sustainable_clocks
;
981 max_sustainable_clocks
= kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks
),
983 smu
->smu_table
.max_sustainable_clocks
= (void *)max_sustainable_clocks
;
985 max_sustainable_clocks
->uclock
= smu
->smu_table
.boot_values
.uclk
/ 100;
986 max_sustainable_clocks
->soc_clock
= smu
->smu_table
.boot_values
.socclk
/ 100;
987 max_sustainable_clocks
->dcef_clock
= smu
->smu_table
.boot_values
.dcefclk
/ 100;
988 max_sustainable_clocks
->display_clock
= 0xFFFFFFFF;
989 max_sustainable_clocks
->phy_clock
= 0xFFFFFFFF;
990 max_sustainable_clocks
->pixel_clock
= 0xFFFFFFFF;
992 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UCLK_BIT
)) {
993 ret
= smu_v11_0_get_max_sustainable_clock(smu
,
994 &(max_sustainable_clocks
->uclock
),
997 pr_err("[%s] failed to get max UCLK from SMC!",
1003 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_SOCCLK_BIT
)) {
1004 ret
= smu_v11_0_get_max_sustainable_clock(smu
,
1005 &(max_sustainable_clocks
->soc_clock
),
1008 pr_err("[%s] failed to get max SOCCLK from SMC!",
1014 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_DCEFCLK_BIT
)) {
1015 ret
= smu_v11_0_get_max_sustainable_clock(smu
,
1016 &(max_sustainable_clocks
->dcef_clock
),
1019 pr_err("[%s] failed to get max DCEFCLK from SMC!",
1024 ret
= smu_v11_0_get_max_sustainable_clock(smu
,
1025 &(max_sustainable_clocks
->display_clock
),
1028 pr_err("[%s] failed to get max DISPCLK from SMC!",
1032 ret
= smu_v11_0_get_max_sustainable_clock(smu
,
1033 &(max_sustainable_clocks
->phy_clock
),
1036 pr_err("[%s] failed to get max PHYCLK from SMC!",
1040 ret
= smu_v11_0_get_max_sustainable_clock(smu
,
1041 &(max_sustainable_clocks
->pixel_clock
),
1044 pr_err("[%s] failed to get max PIXCLK from SMC!",
1050 if (max_sustainable_clocks
->soc_clock
< max_sustainable_clocks
->uclock
)
1051 max_sustainable_clocks
->uclock
= max_sustainable_clocks
->soc_clock
;
1056 uint32_t smu_v11_0_get_max_power_limit(struct smu_context
*smu
) {
1057 uint32_t od_limit
, max_power_limit
;
1058 struct smu_11_0_powerplay_table
*powerplay_table
= NULL
;
1059 struct smu_table_context
*table_context
= &smu
->smu_table
;
1060 powerplay_table
= table_context
->power_play_table
;
1062 max_power_limit
= smu_get_pptable_power_limit(smu
);
1064 if (!max_power_limit
) {
1065 // If we couldn't get the table limit, fall back on first-read value
1066 if (!smu
->default_power_limit
)
1067 smu
->default_power_limit
= smu
->power_limit
;
1068 max_power_limit
= smu
->default_power_limit
;
1071 if (smu
->od_enabled
) {
1072 od_limit
= le32_to_cpu(powerplay_table
->overdrive_table
.max
[SMU_11_0_ODSETTING_POWERPERCENTAGE
]);
1074 pr_debug("ODSETTING_POWERPERCENTAGE: %d (default: %d)\n", od_limit
, smu
->default_power_limit
);
1076 max_power_limit
*= (100 + od_limit
);
1077 max_power_limit
/= 100;
1080 return max_power_limit
;
1083 int smu_v11_0_set_power_limit(struct smu_context
*smu
, uint32_t n
)
1086 uint32_t max_power_limit
;
1088 max_power_limit
= smu_v11_0_get_max_power_limit(smu
);
1090 if (n
> max_power_limit
) {
1091 pr_err("New power limit (%d) is over the max allowed %d\n",
1098 n
= smu
->default_power_limit
;
1100 if (!smu_feature_is_enabled(smu
, SMU_FEATURE_PPT_BIT
)) {
1101 pr_err("Setting new power limit is not supported!\n");
1105 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetPptLimit
, n
);
1107 pr_err("[%s] Set power limit Failed!\n", __func__
);
1110 smu
->power_limit
= n
;
1115 int smu_v11_0_get_current_clk_freq(struct smu_context
*smu
,
1116 enum smu_clk_type clk_id
,
1123 if (clk_id
>= SMU_CLK_COUNT
|| !value
)
1126 asic_clk_id
= smu_clk_get_index(smu
, clk_id
);
1127 if (asic_clk_id
< 0)
1130 /* if don't has GetDpmClockFreq Message, try get current clock by SmuMetrics_t */
1131 if (smu_msg_get_index(smu
, SMU_MSG_GetDpmClockFreq
) < 0)
1132 ret
= smu_get_current_clk_freq_by_table(smu
, clk_id
, &freq
);
1134 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetDpmClockFreq
,
1135 (asic_clk_id
<< 16));
1139 ret
= smu_read_smc_arg(smu
, &freq
);
1150 static int smu_v11_0_set_thermal_range(struct smu_context
*smu
,
1151 struct smu_temperature_range range
)
1153 struct amdgpu_device
*adev
= smu
->adev
;
1154 int low
= SMU_THERMAL_MINIMUM_ALERT_TEMP
;
1155 int high
= SMU_THERMAL_MAXIMUM_ALERT_TEMP
;
1157 struct smu_table_context
*table_context
= &smu
->smu_table
;
1158 struct smu_11_0_powerplay_table
*powerplay_table
= table_context
->power_play_table
;
1160 low
= max(SMU_THERMAL_MINIMUM_ALERT_TEMP
,
1161 range
.min
/ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES
);
1162 high
= min((uint16_t)SMU_THERMAL_MAXIMUM_ALERT_TEMP
, powerplay_table
->software_shutdown_temp
);
1167 val
= RREG32_SOC15(THM
, 0, mmTHM_THERMAL_INT_CTRL
);
1168 val
= REG_SET_FIELD(val
, THM_THERMAL_INT_CTRL
, MAX_IH_CREDIT
, 5);
1169 val
= REG_SET_FIELD(val
, THM_THERMAL_INT_CTRL
, THERM_IH_HW_ENA
, 1);
1170 val
= REG_SET_FIELD(val
, THM_THERMAL_INT_CTRL
, THERM_INTH_MASK
, 0);
1171 val
= REG_SET_FIELD(val
, THM_THERMAL_INT_CTRL
, THERM_INTL_MASK
, 0);
1172 val
= REG_SET_FIELD(val
, THM_THERMAL_INT_CTRL
, DIG_THERM_INTH
, (high
& 0xff));
1173 val
= REG_SET_FIELD(val
, THM_THERMAL_INT_CTRL
, DIG_THERM_INTL
, (low
& 0xff));
1174 val
= val
& (~THM_THERMAL_INT_CTRL__THERM_TRIGGER_MASK_MASK
);
1176 WREG32_SOC15(THM
, 0, mmTHM_THERMAL_INT_CTRL
, val
);
1181 static int smu_v11_0_enable_thermal_alert(struct smu_context
*smu
)
1183 struct amdgpu_device
*adev
= smu
->adev
;
1186 val
|= (1 << THM_THERMAL_INT_ENA__THERM_INTH_CLR__SHIFT
);
1187 val
|= (1 << THM_THERMAL_INT_ENA__THERM_INTL_CLR__SHIFT
);
1188 val
|= (1 << THM_THERMAL_INT_ENA__THERM_TRIGGER_CLR__SHIFT
);
1190 WREG32_SOC15(THM
, 0, mmTHM_THERMAL_INT_ENA
, val
);
1195 int smu_v11_0_start_thermal_control(struct smu_context
*smu
)
1198 struct smu_temperature_range range
;
1199 struct amdgpu_device
*adev
= smu
->adev
;
1201 if (!smu
->pm_enabled
)
1204 memcpy(&range
, &smu11_thermal_policy
[0], sizeof(struct smu_temperature_range
));
1206 ret
= smu_get_thermal_temperature_range(smu
, &range
);
1210 if (smu
->smu_table
.thermal_controller_type
) {
1211 ret
= smu_v11_0_set_thermal_range(smu
, range
);
1215 ret
= smu_v11_0_enable_thermal_alert(smu
);
1219 ret
= smu_set_thermal_fan_table(smu
);
1224 adev
->pm
.dpm
.thermal
.min_temp
= range
.min
;
1225 adev
->pm
.dpm
.thermal
.max_temp
= range
.max
;
1226 adev
->pm
.dpm
.thermal
.max_edge_emergency_temp
= range
.edge_emergency_max
;
1227 adev
->pm
.dpm
.thermal
.min_hotspot_temp
= range
.hotspot_min
;
1228 adev
->pm
.dpm
.thermal
.max_hotspot_crit_temp
= range
.hotspot_crit_max
;
1229 adev
->pm
.dpm
.thermal
.max_hotspot_emergency_temp
= range
.hotspot_emergency_max
;
1230 adev
->pm
.dpm
.thermal
.min_mem_temp
= range
.mem_min
;
1231 adev
->pm
.dpm
.thermal
.max_mem_crit_temp
= range
.mem_crit_max
;
1232 adev
->pm
.dpm
.thermal
.max_mem_emergency_temp
= range
.mem_emergency_max
;
1237 int smu_v11_0_stop_thermal_control(struct smu_context
*smu
)
1239 struct amdgpu_device
*adev
= smu
->adev
;
1241 WREG32_SOC15(THM
, 0, mmTHM_THERMAL_INT_ENA
, 0);
1246 static uint16_t convert_to_vddc(uint8_t vid
)
1248 return (uint16_t) ((6200 - (vid
* 25)) / SMU11_VOLTAGE_SCALE
);
1251 static int smu_v11_0_get_gfx_vdd(struct smu_context
*smu
, uint32_t *value
)
1253 struct amdgpu_device
*adev
= smu
->adev
;
1254 uint32_t vdd
= 0, val_vid
= 0;
1258 val_vid
= (RREG32_SOC15(SMUIO
, 0, mmSMUSVI0_TEL_PLANE0
) &
1259 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK
) >>
1260 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT
;
1262 vdd
= (uint32_t)convert_to_vddc((uint8_t)val_vid
);
1270 int smu_v11_0_read_sensor(struct smu_context
*smu
,
1271 enum amd_pp_sensors sensor
,
1272 void *data
, uint32_t *size
)
1280 case AMDGPU_PP_SENSOR_GFX_MCLK
:
1281 ret
= smu_get_current_clk_freq(smu
, SMU_UCLK
, (uint32_t *)data
);
1284 case AMDGPU_PP_SENSOR_GFX_SCLK
:
1285 ret
= smu_get_current_clk_freq(smu
, SMU_GFXCLK
, (uint32_t *)data
);
1288 case AMDGPU_PP_SENSOR_VDDGFX
:
1289 ret
= smu_v11_0_get_gfx_vdd(smu
, (uint32_t *)data
);
1292 case AMDGPU_PP_SENSOR_MIN_FAN_RPM
:
1293 *(uint32_t *)data
= 0;
1297 ret
= smu_common_read_sensor(smu
, sensor
, data
, size
);
1308 smu_v11_0_display_clock_voltage_request(struct smu_context
*smu
,
1309 struct pp_display_clock_request
1312 enum amd_pp_clock_type clk_type
= clock_req
->clock_type
;
1314 enum smu_clk_type clk_select
= 0;
1315 uint32_t clk_freq
= clock_req
->clock_freq_in_khz
/ 1000;
1317 if (!smu
->pm_enabled
)
1320 if (smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_DCEFCLK_BIT
) ||
1321 smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UCLK_BIT
)) {
1323 case amd_pp_dcef_clock
:
1324 clk_select
= SMU_DCEFCLK
;
1326 case amd_pp_disp_clock
:
1327 clk_select
= SMU_DISPCLK
;
1329 case amd_pp_pixel_clock
:
1330 clk_select
= SMU_PIXCLK
;
1332 case amd_pp_phy_clock
:
1333 clk_select
= SMU_PHYCLK
;
1335 case amd_pp_mem_clock
:
1336 clk_select
= SMU_UCLK
;
1339 pr_info("[%s] Invalid Clock Type!", __func__
);
1347 if (clk_select
== SMU_UCLK
&& smu
->disable_uclk_switch
)
1350 ret
= smu_set_hard_freq_range(smu
, clk_select
, clk_freq
, 0);
1352 if(clk_select
== SMU_UCLK
)
1353 smu
->hard_min_uclk_req_from_dal
= clk_freq
;
1360 int smu_v11_0_gfx_off_control(struct smu_context
*smu
, bool enable
)
1363 struct amdgpu_device
*adev
= smu
->adev
;
1365 switch (adev
->asic_type
) {
1371 if (!(adev
->pm
.pp_feature
& PP_GFXOFF_MASK
))
1374 ret
= smu_send_smc_msg(smu
, SMU_MSG_AllowGfxOff
);
1376 ret
= smu_send_smc_msg(smu
, SMU_MSG_DisallowGfxOff
);
1386 smu_v11_0_get_fan_control_mode(struct smu_context
*smu
)
1388 if (!smu_feature_is_enabled(smu
, SMU_FEATURE_FAN_CONTROL_BIT
))
1389 return AMD_FAN_CTRL_MANUAL
;
1391 return AMD_FAN_CTRL_AUTO
;
1395 smu_v11_0_auto_fan_control(struct smu_context
*smu
, bool auto_fan_control
)
1399 if (!smu_feature_is_supported(smu
, SMU_FEATURE_FAN_CONTROL_BIT
))
1402 ret
= smu_feature_set_enabled(smu
, SMU_FEATURE_FAN_CONTROL_BIT
, auto_fan_control
);
1404 pr_err("[%s]%s smc FAN CONTROL feature failed!",
1405 __func__
, (auto_fan_control
? "Start" : "Stop"));
1411 smu_v11_0_set_fan_static_mode(struct smu_context
*smu
, uint32_t mode
)
1413 struct amdgpu_device
*adev
= smu
->adev
;
1415 WREG32_SOC15(THM
, 0, mmCG_FDO_CTRL2
,
1416 REG_SET_FIELD(RREG32_SOC15(THM
, 0, mmCG_FDO_CTRL2
),
1417 CG_FDO_CTRL2
, TMIN
, 0));
1418 WREG32_SOC15(THM
, 0, mmCG_FDO_CTRL2
,
1419 REG_SET_FIELD(RREG32_SOC15(THM
, 0, mmCG_FDO_CTRL2
),
1420 CG_FDO_CTRL2
, FDO_PWM_MODE
, mode
));
1426 smu_v11_0_set_fan_speed_percent(struct smu_context
*smu
, uint32_t speed
)
1428 struct amdgpu_device
*adev
= smu
->adev
;
1429 uint32_t duty100
, duty
;
1435 if (smu_v11_0_auto_fan_control(smu
, 0))
1438 duty100
= REG_GET_FIELD(RREG32_SOC15(THM
, 0, mmCG_FDO_CTRL1
),
1439 CG_FDO_CTRL1
, FMAX_DUTY100
);
1443 tmp64
= (uint64_t)speed
* duty100
;
1445 duty
= (uint32_t)tmp64
;
1447 WREG32_SOC15(THM
, 0, mmCG_FDO_CTRL0
,
1448 REG_SET_FIELD(RREG32_SOC15(THM
, 0, mmCG_FDO_CTRL0
),
1449 CG_FDO_CTRL0
, FDO_STATIC_DUTY
, duty
));
1451 return smu_v11_0_set_fan_static_mode(smu
, FDO_PWM_MODE_STATIC
);
1455 smu_v11_0_set_fan_control_mode(struct smu_context
*smu
,
1461 case AMD_FAN_CTRL_NONE
:
1462 ret
= smu_v11_0_set_fan_speed_percent(smu
, 100);
1464 case AMD_FAN_CTRL_MANUAL
:
1465 ret
= smu_v11_0_auto_fan_control(smu
, 0);
1467 case AMD_FAN_CTRL_AUTO
:
1468 ret
= smu_v11_0_auto_fan_control(smu
, 1);
1475 pr_err("[%s]Set fan control mode failed!", __func__
);
1482 int smu_v11_0_set_fan_speed_rpm(struct smu_context
*smu
,
1485 struct amdgpu_device
*adev
= smu
->adev
;
1487 uint32_t tach_period
, crystal_clock_freq
;
1492 ret
= smu_v11_0_auto_fan_control(smu
, 0);
1496 crystal_clock_freq
= amdgpu_asic_get_xclk(adev
);
1497 tach_period
= 60 * crystal_clock_freq
* 10000 / (8 * speed
);
1498 WREG32_SOC15(THM
, 0, mmCG_TACH_CTRL
,
1499 REG_SET_FIELD(RREG32_SOC15(THM
, 0, mmCG_TACH_CTRL
),
1500 CG_TACH_CTRL
, TARGET_PERIOD
,
1503 ret
= smu_v11_0_set_fan_static_mode(smu
, FDO_PWM_MODE_STATIC_RPM
);
1508 int smu_v11_0_set_xgmi_pstate(struct smu_context
*smu
,
1512 ret
= smu_send_smc_msg_with_param(smu
,
1513 SMU_MSG_SetXgmiMode
,
1514 pstate
? XGMI_MODE_PSTATE_D0
: XGMI_MODE_PSTATE_D3
);
1518 #define THM_11_0__SRCID__THM_DIG_THERM_L2H 0 /* ASIC_TEMP > CG_THERMAL_INT.DIG_THERM_INTH */
1519 #define THM_11_0__SRCID__THM_DIG_THERM_H2L 1 /* ASIC_TEMP < CG_THERMAL_INT.DIG_THERM_INTL */
1521 static int smu_v11_0_irq_process(struct amdgpu_device
*adev
,
1522 struct amdgpu_irq_src
*source
,
1523 struct amdgpu_iv_entry
*entry
)
1525 uint32_t client_id
= entry
->client_id
;
1526 uint32_t src_id
= entry
->src_id
;
1528 if (client_id
== SOC15_IH_CLIENTID_THM
) {
1530 case THM_11_0__SRCID__THM_DIG_THERM_L2H
:
1531 pr_warn("GPU over temperature range detected on PCIe %d:%d.%d!\n",
1532 PCI_BUS_NUM(adev
->pdev
->devfn
),
1533 PCI_SLOT(adev
->pdev
->devfn
),
1534 PCI_FUNC(adev
->pdev
->devfn
));
1536 case THM_11_0__SRCID__THM_DIG_THERM_H2L
:
1537 pr_warn("GPU under temperature range detected on PCIe %d:%d.%d!\n",
1538 PCI_BUS_NUM(adev
->pdev
->devfn
),
1539 PCI_SLOT(adev
->pdev
->devfn
),
1540 PCI_FUNC(adev
->pdev
->devfn
));
1543 pr_warn("GPU under temperature range unknown src id (%d), detected on PCIe %d:%d.%d!\n",
1545 PCI_BUS_NUM(adev
->pdev
->devfn
),
1546 PCI_SLOT(adev
->pdev
->devfn
),
1547 PCI_FUNC(adev
->pdev
->devfn
));
1556 static const struct amdgpu_irq_src_funcs smu_v11_0_irq_funcs
=
1558 .process
= smu_v11_0_irq_process
,
1561 int smu_v11_0_register_irq_handler(struct smu_context
*smu
)
1563 struct amdgpu_device
*adev
= smu
->adev
;
1564 struct amdgpu_irq_src
*irq_src
= smu
->irq_source
;
1567 /* already register */
1571 irq_src
= kzalloc(sizeof(struct amdgpu_irq_src
), GFP_KERNEL
);
1574 smu
->irq_source
= irq_src
;
1576 irq_src
->funcs
= &smu_v11_0_irq_funcs
;
1578 ret
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_THM
,
1579 THM_11_0__SRCID__THM_DIG_THERM_L2H
,
1584 ret
= amdgpu_irq_add_id(adev
, SOC15_IH_CLIENTID_THM
,
1585 THM_11_0__SRCID__THM_DIG_THERM_H2L
,
1593 int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context
*smu
,
1594 struct pp_smu_nv_clock_table
*max_clocks
)
1596 struct smu_table_context
*table_context
= &smu
->smu_table
;
1597 struct smu_11_0_max_sustainable_clocks
*sustainable_clocks
= NULL
;
1599 if (!max_clocks
|| !table_context
->max_sustainable_clocks
)
1602 sustainable_clocks
= table_context
->max_sustainable_clocks
;
1604 max_clocks
->dcfClockInKhz
=
1605 (unsigned int) sustainable_clocks
->dcef_clock
* 1000;
1606 max_clocks
->displayClockInKhz
=
1607 (unsigned int) sustainable_clocks
->display_clock
* 1000;
1608 max_clocks
->phyClockInKhz
=
1609 (unsigned int) sustainable_clocks
->phy_clock
* 1000;
1610 max_clocks
->pixelClockInKhz
=
1611 (unsigned int) sustainable_clocks
->pixel_clock
* 1000;
1612 max_clocks
->uClockInKhz
=
1613 (unsigned int) sustainable_clocks
->uclock
* 1000;
1614 max_clocks
->socClockInKhz
=
1615 (unsigned int) sustainable_clocks
->soc_clock
* 1000;
1616 max_clocks
->dscClockInKhz
= 0;
1617 max_clocks
->dppClockInKhz
= 0;
1618 max_clocks
->fabricClockInKhz
= 0;
1623 int smu_v11_0_set_azalia_d3_pme(struct smu_context
*smu
)
1627 ret
= smu_send_smc_msg(smu
, SMU_MSG_BacoAudioD3PME
);
1632 static int smu_v11_0_baco_set_armd3_sequence(struct smu_context
*smu
, enum smu_v11_0_baco_seq baco_seq
)
1634 return smu_send_smc_msg_with_param(smu
, SMU_MSG_ArmD3
, baco_seq
);
1637 bool smu_v11_0_baco_is_support(struct smu_context
*smu
)
1639 struct amdgpu_device
*adev
= smu
->adev
;
1640 struct smu_baco_context
*smu_baco
= &smu
->smu_baco
;
1644 mutex_lock(&smu_baco
->mutex
);
1645 baco_support
= smu_baco
->platform_support
;
1646 mutex_unlock(&smu_baco
->mutex
);
1651 /* Arcturus does not support this bit mask */
1652 if (smu_feature_is_supported(smu
, SMU_FEATURE_BACO_BIT
) &&
1653 !smu_feature_is_enabled(smu
, SMU_FEATURE_BACO_BIT
))
1656 val
= RREG32_SOC15(NBIO
, 0, mmRCC_BIF_STRAP0
);
1657 if (val
& RCC_BIF_STRAP0__STRAP_PX_CAPABLE_MASK
)
1663 enum smu_baco_state
smu_v11_0_baco_get_state(struct smu_context
*smu
)
1665 struct smu_baco_context
*smu_baco
= &smu
->smu_baco
;
1666 enum smu_baco_state baco_state
;
1668 mutex_lock(&smu_baco
->mutex
);
1669 baco_state
= smu_baco
->state
;
1670 mutex_unlock(&smu_baco
->mutex
);
1675 int smu_v11_0_baco_set_state(struct smu_context
*smu
, enum smu_baco_state state
)
1678 struct smu_baco_context
*smu_baco
= &smu
->smu_baco
;
1679 struct amdgpu_device
*adev
= smu
->adev
;
1680 struct amdgpu_ras
*ras
= amdgpu_ras_get_context(adev
);
1681 uint32_t bif_doorbell_intr_cntl
;
1685 if (smu_v11_0_baco_get_state(smu
) == state
)
1688 mutex_lock(&smu_baco
->mutex
);
1690 bif_doorbell_intr_cntl
= RREG32_SOC15(NBIO
, 0, mmBIF_DOORBELL_INT_CNTL
);
1692 if (state
== SMU_BACO_STATE_ENTER
) {
1693 bif_doorbell_intr_cntl
= REG_SET_FIELD(bif_doorbell_intr_cntl
,
1694 BIF_DOORBELL_INT_CNTL
,
1695 DOORBELL_INTERRUPT_DISABLE
, 1);
1696 WREG32_SOC15(NBIO
, 0, mmBIF_DOORBELL_INT_CNTL
, bif_doorbell_intr_cntl
);
1698 if (!ras
|| !ras
->supported
) {
1699 data
= RREG32_SOC15(THM
, 0, mmTHM_BACO_CNTL
);
1701 WREG32_SOC15(THM
, 0, mmTHM_BACO_CNTL
, data
);
1703 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_EnterBaco
, 0);
1705 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_EnterBaco
, 1);
1708 ret
= smu_send_smc_msg(smu
, SMU_MSG_ExitBaco
);
1712 bif_doorbell_intr_cntl
= REG_SET_FIELD(bif_doorbell_intr_cntl
,
1713 BIF_DOORBELL_INT_CNTL
,
1714 DOORBELL_INTERRUPT_DISABLE
, 0);
1715 WREG32_SOC15(NBIO
, 0, mmBIF_DOORBELL_INT_CNTL
, bif_doorbell_intr_cntl
);
1717 /* clear vbios scratch 6 and 7 for coming asic reinit */
1718 WREG32(adev
->bios_scratch_reg_offset
+ 6, 0);
1719 WREG32(adev
->bios_scratch_reg_offset
+ 7, 0);
1724 smu_baco
->state
= state
;
1726 mutex_unlock(&smu_baco
->mutex
);
1730 int smu_v11_0_baco_enter(struct smu_context
*smu
)
1732 struct amdgpu_device
*adev
= smu
->adev
;
1735 /* Arcturus does not need this audio workaround */
1736 if (adev
->asic_type
!= CHIP_ARCTURUS
) {
1737 ret
= smu_v11_0_baco_set_armd3_sequence(smu
, BACO_SEQ_BACO
);
1742 ret
= smu_v11_0_baco_set_state(smu
, SMU_BACO_STATE_ENTER
);
1751 int smu_v11_0_baco_exit(struct smu_context
*smu
)
1755 ret
= smu_v11_0_baco_set_state(smu
, SMU_BACO_STATE_EXIT
);
1762 int smu_v11_0_get_dpm_ultimate_freq(struct smu_context
*smu
, enum smu_clk_type clk_type
,
1763 uint32_t *min
, uint32_t *max
)
1765 int ret
= 0, clk_id
= 0;
1768 clk_id
= smu_clk_get_index(smu
, clk_type
);
1773 param
= (clk_id
& 0xffff) << 16;
1776 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetMaxDpmFreq
, param
);
1779 ret
= smu_read_smc_arg(smu
, max
);
1785 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_GetMinDpmFreq
, param
);
1788 ret
= smu_read_smc_arg(smu
, min
);
1797 int smu_v11_0_set_soft_freq_limited_range(struct smu_context
*smu
, enum smu_clk_type clk_type
,
1798 uint32_t min
, uint32_t max
)
1800 int ret
= 0, clk_id
= 0;
1803 clk_id
= smu_clk_get_index(smu
, clk_type
);
1808 param
= (uint32_t)((clk_id
<< 16) | (max
& 0xffff));
1809 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetSoftMaxByFreq
,
1816 param
= (uint32_t)((clk_id
<< 16) | (min
& 0xffff));
1817 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetSoftMinByFreq
,
1826 int smu_v11_0_override_pcie_parameters(struct smu_context
*smu
)
1828 struct amdgpu_device
*adev
= smu
->adev
;
1829 uint32_t pcie_gen
= 0, pcie_width
= 0;
1832 if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4
)
1834 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
)
1836 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
)
1838 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
)
1841 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
1842 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
1843 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
1845 if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
)
1847 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
)
1849 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
)
1851 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
)
1853 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
)
1855 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
)
1858 ret
= smu_update_pcie_parameters(smu
, pcie_gen
, pcie_width
);
1861 pr_err("[%s] Attempt to override pcie params failed!\n", __func__
);
1867 int smu_v11_0_set_default_od_settings(struct smu_context
*smu
, bool initialize
, size_t overdrive_table_size
)
1869 struct smu_table_context
*table_context
= &smu
->smu_table
;
1873 if (table_context
->overdrive_table
) {
1876 table_context
->overdrive_table
= kzalloc(overdrive_table_size
, GFP_KERNEL
);
1877 if (!table_context
->overdrive_table
) {
1880 ret
= smu_update_table(smu
, SMU_TABLE_OVERDRIVE
, 0, table_context
->overdrive_table
, false);
1882 pr_err("Failed to export overdrive table!\n");
1886 ret
= smu_update_table(smu
, SMU_TABLE_OVERDRIVE
, 0, table_context
->overdrive_table
, true);
1888 pr_err("Failed to import overdrive table!\n");
1894 int smu_v11_0_set_performance_level(struct smu_context
*smu
,
1895 enum amd_dpm_forced_level level
)
1898 uint32_t sclk_mask
, mclk_mask
, soc_mask
;
1901 case AMD_DPM_FORCED_LEVEL_HIGH
:
1902 ret
= smu_force_dpm_limit_value(smu
, true);
1904 case AMD_DPM_FORCED_LEVEL_LOW
:
1905 ret
= smu_force_dpm_limit_value(smu
, false);
1907 case AMD_DPM_FORCED_LEVEL_AUTO
:
1908 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
1909 ret
= smu_unforce_dpm_levels(smu
);
1911 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
1912 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
:
1913 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
1914 ret
= smu_get_profiling_clk_mask(smu
, level
,
1920 smu_force_clk_levels(smu
, SMU_SCLK
, 1 << sclk_mask
, false);
1921 smu_force_clk_levels(smu
, SMU_MCLK
, 1 << mclk_mask
, false);
1922 smu_force_clk_levels(smu
, SMU_SOCCLK
, 1 << soc_mask
, false);
1924 case AMD_DPM_FORCED_LEVEL_MANUAL
:
1925 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
: