2 * Copyright 2019 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
27 #include "amdgpu_smu.h"
28 #include "smu_internal.h"
29 #include "soc15_common.h"
30 #include "smu_v11_0.h"
31 #include "smu_v12_0.h"
34 #include "vega20_ppt.h"
35 #include "arcturus_ppt.h"
36 #include "navi10_ppt.h"
37 #include "renoir_ppt.h"
39 #undef __SMU_DUMMY_MAP
40 #define __SMU_DUMMY_MAP(type) #type
41 static const char* __smu_message_names
[] = {
45 const char *smu_get_message_name(struct smu_context
*smu
, enum smu_message_type type
)
47 if (type
< 0 || type
>= SMU_MSG_MAX_COUNT
)
48 return "unknown smu message";
49 return __smu_message_names
[type
];
52 #undef __SMU_DUMMY_MAP
53 #define __SMU_DUMMY_MAP(fea) #fea
54 static const char* __smu_feature_names
[] = {
58 const char *smu_get_feature_name(struct smu_context
*smu
, enum smu_feature_mask feature
)
60 if (feature
< 0 || feature
>= SMU_FEATURE_COUNT
)
61 return "unknown smu feature";
62 return __smu_feature_names
[feature
];
65 size_t smu_sys_get_pp_feature_mask(struct smu_context
*smu
, char *buf
)
69 uint32_t feature_mask
[2] = { 0 };
70 int32_t feature_index
= 0;
72 uint32_t sort_feature
[SMU_FEATURE_COUNT
];
73 uint64_t hw_feature_count
= 0;
75 mutex_lock(&smu
->mutex
);
77 ret
= smu_feature_get_enabled_mask(smu
, feature_mask
, 2);
81 size
= sprintf(buf
+ size
, "features high: 0x%08x low: 0x%08x\n",
82 feature_mask
[1], feature_mask
[0]);
84 for (i
= 0; i
< SMU_FEATURE_COUNT
; i
++) {
85 feature_index
= smu_feature_get_index(smu
, i
);
86 if (feature_index
< 0)
88 sort_feature
[feature_index
] = i
;
92 for (i
= 0; i
< hw_feature_count
; i
++) {
93 size
+= sprintf(buf
+ size
, "%02d. %-20s (%2d) : %s\n",
95 smu_get_feature_name(smu
, sort_feature
[i
]),
97 !!smu_feature_is_enabled(smu
, sort_feature
[i
]) ?
98 "enabled" : "disabled");
102 mutex_unlock(&smu
->mutex
);
107 static int smu_feature_update_enable_state(struct smu_context
*smu
,
108 uint64_t feature_mask
,
111 struct smu_feature
*feature
= &smu
->smu_feature
;
112 uint32_t feature_low
= 0, feature_high
= 0;
115 if (!smu
->pm_enabled
)
118 feature_low
= (feature_mask
>> 0 ) & 0xffffffff;
119 feature_high
= (feature_mask
>> 32) & 0xffffffff;
122 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_EnableSmuFeaturesLow
,
126 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_EnableSmuFeaturesHigh
,
131 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_DisableSmuFeaturesLow
,
135 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_DisableSmuFeaturesHigh
,
141 mutex_lock(&feature
->mutex
);
143 bitmap_or(feature
->enabled
, feature
->enabled
,
144 (unsigned long *)(&feature_mask
), SMU_FEATURE_MAX
);
146 bitmap_andnot(feature
->enabled
, feature
->enabled
,
147 (unsigned long *)(&feature_mask
), SMU_FEATURE_MAX
);
148 mutex_unlock(&feature
->mutex
);
153 int smu_sys_set_pp_feature_mask(struct smu_context
*smu
, uint64_t new_mask
)
156 uint32_t feature_mask
[2] = { 0 };
157 uint64_t feature_2_enabled
= 0;
158 uint64_t feature_2_disabled
= 0;
159 uint64_t feature_enables
= 0;
161 mutex_lock(&smu
->mutex
);
163 ret
= smu_feature_get_enabled_mask(smu
, feature_mask
, 2);
167 feature_enables
= ((uint64_t)feature_mask
[1] << 32 | (uint64_t)feature_mask
[0]);
169 feature_2_enabled
= ~feature_enables
& new_mask
;
170 feature_2_disabled
= feature_enables
& ~new_mask
;
172 if (feature_2_enabled
) {
173 ret
= smu_feature_update_enable_state(smu
, feature_2_enabled
, true);
177 if (feature_2_disabled
) {
178 ret
= smu_feature_update_enable_state(smu
, feature_2_disabled
, false);
184 mutex_unlock(&smu
->mutex
);
189 int smu_get_smc_version(struct smu_context
*smu
, uint32_t *if_version
, uint32_t *smu_version
)
193 if (!if_version
&& !smu_version
)
197 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetDriverIfVersion
);
201 ret
= smu_read_smc_arg(smu
, if_version
);
207 ret
= smu_send_smc_msg(smu
, SMU_MSG_GetSmuVersion
);
211 ret
= smu_read_smc_arg(smu
, smu_version
);
219 int smu_set_soft_freq_range(struct smu_context
*smu
, enum smu_clk_type clk_type
,
220 uint32_t min
, uint32_t max
)
224 if (min
<= 0 && max
<= 0)
227 if (!smu_clk_dpm_is_enabled(smu
, clk_type
))
230 ret
= smu_set_soft_freq_limited_range(smu
, clk_type
, min
, max
);
234 int smu_set_hard_freq_range(struct smu_context
*smu
, enum smu_clk_type clk_type
,
235 uint32_t min
, uint32_t max
)
237 int ret
= 0, clk_id
= 0;
240 if (min
<= 0 && max
<= 0)
243 if (!smu_clk_dpm_is_enabled(smu
, clk_type
))
246 clk_id
= smu_clk_get_index(smu
, clk_type
);
251 param
= (uint32_t)((clk_id
<< 16) | (max
& 0xffff));
252 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetHardMaxByFreq
,
259 param
= (uint32_t)((clk_id
<< 16) | (min
& 0xffff));
260 ret
= smu_send_smc_msg_with_param(smu
, SMU_MSG_SetHardMinByFreq
,
270 int smu_get_dpm_freq_range(struct smu_context
*smu
, enum smu_clk_type clk_type
,
271 uint32_t *min
, uint32_t *max
, bool lock_needed
)
273 uint32_t clock_limit
;
280 mutex_lock(&smu
->mutex
);
282 if (!smu_clk_dpm_is_enabled(smu
, clk_type
)) {
286 clock_limit
= smu
->smu_table
.boot_values
.uclk
;
290 clock_limit
= smu
->smu_table
.boot_values
.gfxclk
;
293 clock_limit
= smu
->smu_table
.boot_values
.socclk
;
300 /* clock in Mhz unit */
302 *min
= clock_limit
/ 100;
304 *max
= clock_limit
/ 100;
307 * Todo: Use each asic(ASIC_ppt funcs) control the callbacks exposed to the
308 * core driver and then have helpers for stuff that is common(SMU_v11_x | SMU_v12_x funcs).
310 ret
= smu_get_dpm_ultimate_freq(smu
, clk_type
, min
, max
);
314 mutex_unlock(&smu
->mutex
);
319 int smu_get_dpm_freq_by_index(struct smu_context
*smu
, enum smu_clk_type clk_type
,
320 uint16_t level
, uint32_t *value
)
322 int ret
= 0, clk_id
= 0;
328 if (!smu_clk_dpm_is_enabled(smu
, clk_type
))
331 clk_id
= smu_clk_get_index(smu
, clk_type
);
335 param
= (uint32_t)(((clk_id
& 0xffff) << 16) | (level
& 0xffff));
337 ret
= smu_send_smc_msg_with_param(smu
,SMU_MSG_GetDpmFreqByIndex
,
342 ret
= smu_read_smc_arg(smu
, ¶m
);
346 /* BIT31: 0 - Fine grained DPM, 1 - Dicrete DPM
347 * now, we un-support it */
348 *value
= param
& 0x7fffffff;
353 int smu_get_dpm_level_count(struct smu_context
*smu
, enum smu_clk_type clk_type
,
356 return smu_get_dpm_freq_by_index(smu
, clk_type
, 0xff, value
);
359 int smu_get_dpm_level_range(struct smu_context
*smu
, enum smu_clk_type clk_type
,
360 uint32_t *min_value
, uint32_t *max_value
)
363 uint32_t level_count
= 0;
365 if (!min_value
&& !max_value
)
369 /* by default, level 0 clock value as min value */
370 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, 0, min_value
);
376 ret
= smu_get_dpm_level_count(smu
, clk_type
, &level_count
);
380 ret
= smu_get_dpm_freq_by_index(smu
, clk_type
, level_count
- 1, max_value
);
388 bool smu_clk_dpm_is_enabled(struct smu_context
*smu
, enum smu_clk_type clk_type
)
390 enum smu_feature_mask feature_id
= 0;
395 feature_id
= SMU_FEATURE_DPM_UCLK_BIT
;
399 feature_id
= SMU_FEATURE_DPM_GFXCLK_BIT
;
402 feature_id
= SMU_FEATURE_DPM_SOCCLK_BIT
;
408 if(!smu_feature_is_enabled(smu
, feature_id
)) {
416 * smu_dpm_set_power_gate - power gate/ungate the specific IP block
418 * @smu: smu_context pointer
419 * @block_type: the IP block to power gate/ungate
420 * @gate: to power gate if true, ungate otherwise
422 * This API uses no smu->mutex lock protection due to:
423 * 1. It is either called by other IP block(gfx/sdma/vcn/uvd/vce).
424 * This is guarded to be race condition free by the caller.
425 * 2. Or get called on user setting request of power_dpm_force_performance_level.
426 * Under this case, the smu->mutex lock protection is already enforced on
427 * the parent API smu_force_performance_level of the call path.
429 int smu_dpm_set_power_gate(struct smu_context
*smu
, uint32_t block_type
,
434 switch (block_type
) {
435 case AMD_IP_BLOCK_TYPE_UVD
:
436 ret
= smu_dpm_set_uvd_enable(smu
, !gate
);
438 case AMD_IP_BLOCK_TYPE_VCE
:
439 ret
= smu_dpm_set_vce_enable(smu
, !gate
);
441 case AMD_IP_BLOCK_TYPE_GFX
:
442 ret
= smu_gfx_off_control(smu
, gate
);
444 case AMD_IP_BLOCK_TYPE_SDMA
:
445 ret
= smu_powergate_sdma(smu
, gate
);
447 case AMD_IP_BLOCK_TYPE_JPEG
:
448 ret
= smu_dpm_set_jpeg_enable(smu
, !gate
);
457 int smu_get_power_num_states(struct smu_context
*smu
,
458 struct pp_states_info
*state_info
)
463 /* not support power state */
464 memset(state_info
, 0, sizeof(struct pp_states_info
));
465 state_info
->nums
= 1;
466 state_info
->states
[0] = POWER_STATE_TYPE_DEFAULT
;
471 int smu_common_read_sensor(struct smu_context
*smu
, enum amd_pp_sensors sensor
,
472 void *data
, uint32_t *size
)
474 struct smu_power_context
*smu_power
= &smu
->smu_power
;
475 struct smu_power_gate
*power_gate
= &smu_power
->power_gate
;
482 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK
:
483 *((uint32_t *)data
) = smu
->pstate_sclk
;
486 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK
:
487 *((uint32_t *)data
) = smu
->pstate_mclk
;
490 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK
:
491 ret
= smu_feature_get_enabled_mask(smu
, (uint32_t *)data
, 2);
494 case AMDGPU_PP_SENSOR_UVD_POWER
:
495 *(uint32_t *)data
= smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_UVD_BIT
) ? 1 : 0;
498 case AMDGPU_PP_SENSOR_VCE_POWER
:
499 *(uint32_t *)data
= smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_VCE_BIT
) ? 1 : 0;
502 case AMDGPU_PP_SENSOR_VCN_POWER_STATE
:
503 *(uint32_t *)data
= power_gate
->vcn_gated
? 0 : 1;
517 int smu_update_table(struct smu_context
*smu
, enum smu_table_id table_index
, int argument
,
518 void *table_data
, bool drv2smu
)
520 struct smu_table_context
*smu_table
= &smu
->smu_table
;
521 struct amdgpu_device
*adev
= smu
->adev
;
522 struct smu_table
*table
= &smu_table
->driver_table
;
523 int table_id
= smu_table_get_index(smu
, table_index
);
527 if (!table_data
|| table_id
>= SMU_TABLE_COUNT
|| table_id
< 0)
530 table_size
= smu_table
->tables
[table_index
].size
;
533 memcpy(table
->cpu_addr
, table_data
, table_size
);
535 * Flush hdp cache: to guard the content seen by
536 * GPU is consitent with CPU.
538 amdgpu_asic_flush_hdp(adev
, NULL
);
541 ret
= smu_send_smc_msg_with_param(smu
, drv2smu
?
542 SMU_MSG_TransferTableDram2Smu
:
543 SMU_MSG_TransferTableSmu2Dram
,
544 table_id
| ((argument
& 0xFFFF) << 16));
549 amdgpu_asic_flush_hdp(adev
, NULL
);
550 memcpy(table_data
, table
->cpu_addr
, table_size
);
556 bool is_support_sw_smu(struct amdgpu_device
*adev
)
558 if (adev
->asic_type
== CHIP_VEGA20
)
559 return (amdgpu_dpm
== 2) ? true : false;
560 else if (adev
->asic_type
>= CHIP_ARCTURUS
) {
561 if (amdgpu_sriov_vf(adev
)&& !amdgpu_sriov_is_pp_one_vf(adev
))
569 bool is_support_sw_smu_xgmi(struct amdgpu_device
*adev
)
571 if (!is_support_sw_smu(adev
))
574 if (adev
->asic_type
== CHIP_VEGA20
)
580 int smu_sys_get_pp_table(struct smu_context
*smu
, void **table
)
582 struct smu_table_context
*smu_table
= &smu
->smu_table
;
583 uint32_t powerplay_table_size
;
585 if (!smu_table
->power_play_table
&& !smu_table
->hardcode_pptable
)
588 mutex_lock(&smu
->mutex
);
590 if (smu_table
->hardcode_pptable
)
591 *table
= smu_table
->hardcode_pptable
;
593 *table
= smu_table
->power_play_table
;
595 powerplay_table_size
= smu_table
->power_play_table_size
;
597 mutex_unlock(&smu
->mutex
);
599 return powerplay_table_size
;
602 int smu_sys_set_pp_table(struct smu_context
*smu
, void *buf
, size_t size
)
604 struct smu_table_context
*smu_table
= &smu
->smu_table
;
605 ATOM_COMMON_TABLE_HEADER
*header
= (ATOM_COMMON_TABLE_HEADER
*)buf
;
608 if (!smu
->pm_enabled
)
610 if (header
->usStructureSize
!= size
) {
611 pr_err("pp table size not matched !\n");
615 mutex_lock(&smu
->mutex
);
616 if (!smu_table
->hardcode_pptable
)
617 smu_table
->hardcode_pptable
= kzalloc(size
, GFP_KERNEL
);
618 if (!smu_table
->hardcode_pptable
) {
623 memcpy(smu_table
->hardcode_pptable
, buf
, size
);
624 smu_table
->power_play_table
= smu_table
->hardcode_pptable
;
625 smu_table
->power_play_table_size
= size
;
628 * Special hw_fini action(for Navi1x, the DPMs disablement will be
629 * skipped) may be needed for custom pptable uploading.
631 smu
->uploading_custom_pp_table
= true;
633 ret
= smu_reset(smu
);
635 pr_info("smu reset failed, ret = %d\n", ret
);
637 smu
->uploading_custom_pp_table
= false;
640 mutex_unlock(&smu
->mutex
);
644 int smu_feature_init_dpm(struct smu_context
*smu
)
646 struct smu_feature
*feature
= &smu
->smu_feature
;
648 uint32_t allowed_feature_mask
[SMU_FEATURE_MAX
/32];
650 if (!smu
->pm_enabled
)
652 mutex_lock(&feature
->mutex
);
653 bitmap_zero(feature
->allowed
, SMU_FEATURE_MAX
);
654 mutex_unlock(&feature
->mutex
);
656 ret
= smu_get_allowed_feature_mask(smu
, allowed_feature_mask
,
661 mutex_lock(&feature
->mutex
);
662 bitmap_or(feature
->allowed
, feature
->allowed
,
663 (unsigned long *)allowed_feature_mask
,
664 feature
->feature_num
);
665 mutex_unlock(&feature
->mutex
);
671 int smu_feature_is_enabled(struct smu_context
*smu
, enum smu_feature_mask mask
)
673 struct smu_feature
*feature
= &smu
->smu_feature
;
680 feature_id
= smu_feature_get_index(smu
, mask
);
684 WARN_ON(feature_id
> feature
->feature_num
);
686 mutex_lock(&feature
->mutex
);
687 ret
= test_bit(feature_id
, feature
->enabled
);
688 mutex_unlock(&feature
->mutex
);
693 int smu_feature_set_enabled(struct smu_context
*smu
, enum smu_feature_mask mask
,
696 struct smu_feature
*feature
= &smu
->smu_feature
;
699 feature_id
= smu_feature_get_index(smu
, mask
);
703 WARN_ON(feature_id
> feature
->feature_num
);
705 return smu_feature_update_enable_state(smu
,
710 int smu_feature_is_supported(struct smu_context
*smu
, enum smu_feature_mask mask
)
712 struct smu_feature
*feature
= &smu
->smu_feature
;
716 feature_id
= smu_feature_get_index(smu
, mask
);
720 WARN_ON(feature_id
> feature
->feature_num
);
722 mutex_lock(&feature
->mutex
);
723 ret
= test_bit(feature_id
, feature
->supported
);
724 mutex_unlock(&feature
->mutex
);
729 int smu_feature_set_supported(struct smu_context
*smu
,
730 enum smu_feature_mask mask
,
733 struct smu_feature
*feature
= &smu
->smu_feature
;
737 feature_id
= smu_feature_get_index(smu
, mask
);
741 WARN_ON(feature_id
> feature
->feature_num
);
743 mutex_lock(&feature
->mutex
);
745 test_and_set_bit(feature_id
, feature
->supported
);
747 test_and_clear_bit(feature_id
, feature
->supported
);
748 mutex_unlock(&feature
->mutex
);
753 static int smu_set_funcs(struct amdgpu_device
*adev
)
755 struct smu_context
*smu
= &adev
->smu
;
757 if (adev
->pm
.pp_feature
& PP_OVERDRIVE_MASK
)
758 smu
->od_enabled
= true;
760 switch (adev
->asic_type
) {
762 adev
->pm
.pp_feature
&= ~PP_GFXOFF_MASK
;
763 vega20_set_ppt_funcs(smu
);
768 navi10_set_ppt_funcs(smu
);
771 adev
->pm
.pp_feature
&= ~PP_GFXOFF_MASK
;
772 arcturus_set_ppt_funcs(smu
);
773 /* OD is not supported on Arcturus */
774 smu
->od_enabled
=false;
777 renoir_set_ppt_funcs(smu
);
786 static int smu_early_init(void *handle
)
788 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
789 struct smu_context
*smu
= &adev
->smu
;
792 smu
->pm_enabled
= !!amdgpu_dpm
;
794 mutex_init(&smu
->mutex
);
796 return smu_set_funcs(adev
);
799 static int smu_late_init(void *handle
)
801 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
802 struct smu_context
*smu
= &adev
->smu
;
804 if (!smu
->pm_enabled
)
807 smu_handle_task(&adev
->smu
,
808 smu
->smu_dpm
.dpm_level
,
809 AMD_PP_TASK_COMPLETE_INIT
,
815 int smu_get_atom_data_table(struct smu_context
*smu
, uint32_t table
,
816 uint16_t *size
, uint8_t *frev
, uint8_t *crev
,
819 struct amdgpu_device
*adev
= smu
->adev
;
822 if (!amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, table
,
823 size
, frev
, crev
, &data_start
))
826 *addr
= (uint8_t *)adev
->mode_info
.atom_context
->bios
+ data_start
;
831 static int smu_initialize_pptable(struct smu_context
*smu
)
837 static int smu_smc_table_sw_init(struct smu_context
*smu
)
841 ret
= smu_initialize_pptable(smu
);
843 pr_err("Failed to init smu_initialize_pptable!\n");
848 * Create smu_table structure, and init smc tables such as
849 * TABLE_PPTABLE, TABLE_WATERMARKS, TABLE_SMU_METRICS, and etc.
851 ret
= smu_init_smc_tables(smu
);
853 pr_err("Failed to init smc tables!\n");
858 * Create smu_power_context structure, and allocate smu_dpm_context and
859 * context size to fill the smu_power_context data.
861 ret
= smu_init_power(smu
);
863 pr_err("Failed to init smu_init_power!\n");
870 static int smu_smc_table_sw_fini(struct smu_context
*smu
)
874 ret
= smu_fini_smc_tables(smu
);
876 pr_err("Failed to smu_fini_smc_tables!\n");
883 static int smu_sw_init(void *handle
)
885 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
886 struct smu_context
*smu
= &adev
->smu
;
889 smu
->pool_size
= adev
->pm
.smu_prv_buffer_size
;
890 smu
->smu_feature
.feature_num
= SMU_FEATURE_MAX
;
891 mutex_init(&smu
->smu_feature
.mutex
);
892 bitmap_zero(smu
->smu_feature
.supported
, SMU_FEATURE_MAX
);
893 bitmap_zero(smu
->smu_feature
.enabled
, SMU_FEATURE_MAX
);
894 bitmap_zero(smu
->smu_feature
.allowed
, SMU_FEATURE_MAX
);
896 mutex_init(&smu
->smu_baco
.mutex
);
897 smu
->smu_baco
.state
= SMU_BACO_STATE_EXIT
;
898 smu
->smu_baco
.platform_support
= false;
900 mutex_init(&smu
->sensor_lock
);
901 mutex_init(&smu
->metrics_lock
);
903 smu
->watermarks_bitmap
= 0;
904 smu
->power_profile_mode
= PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
905 smu
->default_power_profile_mode
= PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
907 smu
->workload_mask
= 1 << smu
->workload_prority
[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
];
908 smu
->workload_prority
[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
] = 0;
909 smu
->workload_prority
[PP_SMC_POWER_PROFILE_FULLSCREEN3D
] = 1;
910 smu
->workload_prority
[PP_SMC_POWER_PROFILE_POWERSAVING
] = 2;
911 smu
->workload_prority
[PP_SMC_POWER_PROFILE_VIDEO
] = 3;
912 smu
->workload_prority
[PP_SMC_POWER_PROFILE_VR
] = 4;
913 smu
->workload_prority
[PP_SMC_POWER_PROFILE_COMPUTE
] = 5;
914 smu
->workload_prority
[PP_SMC_POWER_PROFILE_CUSTOM
] = 6;
916 smu
->workload_setting
[0] = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
917 smu
->workload_setting
[1] = PP_SMC_POWER_PROFILE_FULLSCREEN3D
;
918 smu
->workload_setting
[2] = PP_SMC_POWER_PROFILE_POWERSAVING
;
919 smu
->workload_setting
[3] = PP_SMC_POWER_PROFILE_VIDEO
;
920 smu
->workload_setting
[4] = PP_SMC_POWER_PROFILE_VR
;
921 smu
->workload_setting
[5] = PP_SMC_POWER_PROFILE_COMPUTE
;
922 smu
->workload_setting
[6] = PP_SMC_POWER_PROFILE_CUSTOM
;
923 smu
->display_config
= &adev
->pm
.pm_display_cfg
;
925 smu
->smu_dpm
.dpm_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
926 smu
->smu_dpm
.requested_dpm_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
927 ret
= smu_init_microcode(smu
);
929 pr_err("Failed to load smu firmware!\n");
933 ret
= smu_smc_table_sw_init(smu
);
935 pr_err("Failed to sw init smc table!\n");
939 ret
= smu_register_irq_handler(smu
);
941 pr_err("Failed to register smc irq handler!\n");
948 static int smu_sw_fini(void *handle
)
950 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
951 struct smu_context
*smu
= &adev
->smu
;
954 kfree(smu
->irq_source
);
955 smu
->irq_source
= NULL
;
957 ret
= smu_smc_table_sw_fini(smu
);
959 pr_err("Failed to sw fini smc table!\n");
963 ret
= smu_fini_power(smu
);
965 pr_err("Failed to init smu_fini_power!\n");
972 static int smu_init_fb_allocations(struct smu_context
*smu
)
974 struct amdgpu_device
*adev
= smu
->adev
;
975 struct smu_table_context
*smu_table
= &smu
->smu_table
;
976 struct smu_table
*tables
= smu_table
->tables
;
977 struct smu_table
*driver_table
= &(smu_table
->driver_table
);
978 uint32_t max_table_size
= 0;
981 /* VRAM allocation for tool table */
982 if (tables
[SMU_TABLE_PMSTATUSLOG
].size
) {
983 ret
= amdgpu_bo_create_kernel(adev
,
984 tables
[SMU_TABLE_PMSTATUSLOG
].size
,
985 tables
[SMU_TABLE_PMSTATUSLOG
].align
,
986 tables
[SMU_TABLE_PMSTATUSLOG
].domain
,
987 &tables
[SMU_TABLE_PMSTATUSLOG
].bo
,
988 &tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
,
989 &tables
[SMU_TABLE_PMSTATUSLOG
].cpu_addr
);
991 pr_err("VRAM allocation for tool table failed!\n");
996 /* VRAM allocation for driver table */
997 for (i
= 0; i
< SMU_TABLE_COUNT
; i
++) {
998 if (tables
[i
].size
== 0)
1001 if (i
== SMU_TABLE_PMSTATUSLOG
)
1004 if (max_table_size
< tables
[i
].size
)
1005 max_table_size
= tables
[i
].size
;
1008 driver_table
->size
= max_table_size
;
1009 driver_table
->align
= PAGE_SIZE
;
1010 driver_table
->domain
= AMDGPU_GEM_DOMAIN_VRAM
;
1012 ret
= amdgpu_bo_create_kernel(adev
,
1014 driver_table
->align
,
1015 driver_table
->domain
,
1017 &driver_table
->mc_address
,
1018 &driver_table
->cpu_addr
);
1020 pr_err("VRAM allocation for driver table failed!\n");
1021 if (tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
)
1022 amdgpu_bo_free_kernel(&tables
[SMU_TABLE_PMSTATUSLOG
].bo
,
1023 &tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
,
1024 &tables
[SMU_TABLE_PMSTATUSLOG
].cpu_addr
);
1030 static int smu_fini_fb_allocations(struct smu_context
*smu
)
1032 struct smu_table_context
*smu_table
= &smu
->smu_table
;
1033 struct smu_table
*tables
= smu_table
->tables
;
1034 struct smu_table
*driver_table
= &(smu_table
->driver_table
);
1039 if (tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
)
1040 amdgpu_bo_free_kernel(&tables
[SMU_TABLE_PMSTATUSLOG
].bo
,
1041 &tables
[SMU_TABLE_PMSTATUSLOG
].mc_address
,
1042 &tables
[SMU_TABLE_PMSTATUSLOG
].cpu_addr
);
1044 amdgpu_bo_free_kernel(&driver_table
->bo
,
1045 &driver_table
->mc_address
,
1046 &driver_table
->cpu_addr
);
1051 static int smu_smc_table_hw_init(struct smu_context
*smu
,
1054 struct amdgpu_device
*adev
= smu
->adev
;
1057 if (smu_is_dpm_running(smu
) && adev
->in_suspend
) {
1058 pr_info("dpm has been enabled\n");
1062 if (adev
->asic_type
!= CHIP_ARCTURUS
) {
1063 ret
= smu_init_display_count(smu
, 0);
1069 /* get boot_values from vbios to set revision, gfxclk, and etc. */
1070 ret
= smu_get_vbios_bootup_values(smu
);
1074 ret
= smu_setup_pptable(smu
);
1078 ret
= smu_get_clk_info_from_vbios(smu
);
1083 * check if the format_revision in vbios is up to pptable header
1084 * version, and the structure size is not 0.
1086 ret
= smu_check_pptable(smu
);
1091 * allocate vram bos to store smc table contents.
1093 ret
= smu_init_fb_allocations(smu
);
1098 * Parse pptable format and fill PPTable_t smc_pptable to
1099 * smu_table_context structure. And read the smc_dpm_table from vbios,
1100 * then fill it into smc_pptable.
1102 ret
= smu_parse_pptable(smu
);
1107 * Send msg GetDriverIfVersion to check if the return value is equal
1108 * with DRIVER_IF_VERSION of smc header.
1110 ret
= smu_check_fw_version(smu
);
1115 /* smu_dump_pptable(smu); */
1116 if (!amdgpu_sriov_vf(adev
)) {
1117 ret
= smu_set_driver_table_location(smu
);
1122 * Copy pptable bo in the vram to smc with SMU MSGs such as
1123 * SetDriverDramAddr and TransferTableDram2Smu.
1125 ret
= smu_write_pptable(smu
);
1129 /* issue Run*Btc msg */
1130 ret
= smu_run_btc(smu
);
1133 ret
= smu_feature_set_allowed_mask(smu
);
1137 ret
= smu_system_features_control(smu
, true);
1141 if (adev
->asic_type
!= CHIP_ARCTURUS
) {
1142 ret
= smu_notify_display_change(smu
);
1147 * Set min deep sleep dce fclk with bootup value from vbios via
1148 * SetMinDeepSleepDcefclk MSG.
1150 ret
= smu_set_min_dcef_deep_sleep(smu
);
1156 * Set initialized values (get from vbios) to dpm tables context such as
1157 * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each
1161 ret
= smu_populate_smc_tables(smu
);
1165 ret
= smu_init_max_sustainable_clocks(smu
);
1170 if (adev
->asic_type
!= CHIP_ARCTURUS
) {
1171 ret
= smu_override_pcie_parameters(smu
);
1176 ret
= smu_set_default_od_settings(smu
, initialize
);
1181 ret
= smu_populate_umd_state_clk(smu
);
1185 ret
= smu_get_power_limit(smu
, &smu
->default_power_limit
, false, false);
1191 * Set PMSTATUSLOG table bo address with SetToolsDramAddr MSG for tools.
1193 if (!amdgpu_sriov_vf(adev
)) {
1194 ret
= smu_set_tool_table_location(smu
);
1196 if (!smu_is_dpm_running(smu
))
1197 pr_info("dpm has been disabled\n");
1203 * smu_alloc_memory_pool - allocate memory pool in the system memory
1205 * @smu: amdgpu_device pointer
1207 * This memory pool will be used for SMC use and msg SetSystemVirtualDramAddr
1208 * and DramLogSetDramAddr can notify it changed.
1210 * Returns 0 on success, error on failure.
1212 static int smu_alloc_memory_pool(struct smu_context
*smu
)
1214 struct amdgpu_device
*adev
= smu
->adev
;
1215 struct smu_table_context
*smu_table
= &smu
->smu_table
;
1216 struct smu_table
*memory_pool
= &smu_table
->memory_pool
;
1217 uint64_t pool_size
= smu
->pool_size
;
1220 if (pool_size
== SMU_MEMORY_POOL_SIZE_ZERO
)
1223 memory_pool
->size
= pool_size
;
1224 memory_pool
->align
= PAGE_SIZE
;
1225 memory_pool
->domain
= AMDGPU_GEM_DOMAIN_GTT
;
1227 switch (pool_size
) {
1228 case SMU_MEMORY_POOL_SIZE_256_MB
:
1229 case SMU_MEMORY_POOL_SIZE_512_MB
:
1230 case SMU_MEMORY_POOL_SIZE_1_GB
:
1231 case SMU_MEMORY_POOL_SIZE_2_GB
:
1232 ret
= amdgpu_bo_create_kernel(adev
,
1235 memory_pool
->domain
,
1237 &memory_pool
->mc_address
,
1238 &memory_pool
->cpu_addr
);
1247 static int smu_free_memory_pool(struct smu_context
*smu
)
1249 struct smu_table_context
*smu_table
= &smu
->smu_table
;
1250 struct smu_table
*memory_pool
= &smu_table
->memory_pool
;
1252 if (memory_pool
->size
== SMU_MEMORY_POOL_SIZE_ZERO
)
1255 amdgpu_bo_free_kernel(&memory_pool
->bo
,
1256 &memory_pool
->mc_address
,
1257 &memory_pool
->cpu_addr
);
1259 memset(memory_pool
, 0, sizeof(struct smu_table
));
1264 static int smu_start_smc_engine(struct smu_context
*smu
)
1266 struct amdgpu_device
*adev
= smu
->adev
;
1269 if (adev
->firmware
.load_type
!= AMDGPU_FW_LOAD_PSP
) {
1270 if (adev
->asic_type
< CHIP_NAVI10
) {
1271 if (smu
->ppt_funcs
->load_microcode
) {
1272 ret
= smu
->ppt_funcs
->load_microcode(smu
);
1279 if (smu
->ppt_funcs
->check_fw_status
) {
1280 ret
= smu
->ppt_funcs
->check_fw_status(smu
);
1282 pr_err("SMC is not ready\n");
1288 static int smu_hw_init(void *handle
)
1291 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1292 struct smu_context
*smu
= &adev
->smu
;
1294 ret
= smu_start_smc_engine(smu
);
1296 pr_err("SMU is not ready yet!\n");
1301 smu_powergate_sdma(&adev
->smu
, false);
1302 smu_powergate_vcn(&adev
->smu
, false);
1303 smu_powergate_jpeg(&adev
->smu
, false);
1304 smu_set_gfx_cgpg(&adev
->smu
, true);
1307 if (amdgpu_sriov_vf(adev
) && !amdgpu_sriov_is_pp_one_vf(adev
))
1310 if (!smu
->pm_enabled
)
1313 ret
= smu_feature_init_dpm(smu
);
1317 ret
= smu_smc_table_hw_init(smu
, true);
1321 ret
= smu_alloc_memory_pool(smu
);
1326 * Use msg SetSystemVirtualDramAddr and DramLogSetDramAddr can notify
1329 ret
= smu_notify_memory_pool_location(smu
);
1333 ret
= smu_start_thermal_control(smu
);
1337 if (!smu
->pm_enabled
)
1338 adev
->pm
.dpm_enabled
= false;
1340 adev
->pm
.dpm_enabled
= true; /* TODO: will set dpm_enabled flag while VCN and DAL DPM is workable */
1342 pr_info("SMU is initialized successfully!\n");
1350 static int smu_stop_dpms(struct smu_context
*smu
)
1352 return smu_system_features_control(smu
, false);
1355 static int smu_hw_fini(void *handle
)
1357 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1358 struct smu_context
*smu
= &adev
->smu
;
1359 struct smu_table_context
*table_context
= &smu
->smu_table
;
1362 if (amdgpu_sriov_vf(adev
)&& !amdgpu_sriov_is_pp_one_vf(adev
))
1366 smu_powergate_sdma(&adev
->smu
, true);
1367 smu_powergate_vcn(&adev
->smu
, true);
1368 smu_powergate_jpeg(&adev
->smu
, true);
1371 if (!smu
->pm_enabled
)
1374 if (!amdgpu_sriov_vf(adev
)){
1375 ret
= smu_stop_thermal_control(smu
);
1377 pr_warn("Fail to stop thermal control!\n");
1382 * For custom pptable uploading, skip the DPM features
1383 * disable process on Navi1x ASICs.
1384 * - As the gfx related features are under control of
1385 * RLC on those ASICs. RLC reinitialization will be
1386 * needed to reenable them. That will cost much more
1389 * - SMU firmware can handle the DPM reenablement
1392 if (!smu
->uploading_custom_pp_table
||
1393 !((adev
->asic_type
>= CHIP_NAVI10
) &&
1394 (adev
->asic_type
<= CHIP_NAVI12
))) {
1395 ret
= smu_stop_dpms(smu
);
1397 pr_warn("Fail to stop Dpms!\n");
1403 kfree(table_context
->driver_pptable
);
1404 table_context
->driver_pptable
= NULL
;
1406 kfree(table_context
->max_sustainable_clocks
);
1407 table_context
->max_sustainable_clocks
= NULL
;
1409 kfree(table_context
->overdrive_table
);
1410 table_context
->overdrive_table
= NULL
;
1412 ret
= smu_fini_fb_allocations(smu
);
1416 ret
= smu_free_memory_pool(smu
);
1423 int smu_reset(struct smu_context
*smu
)
1425 struct amdgpu_device
*adev
= smu
->adev
;
1428 ret
= smu_hw_fini(adev
);
1432 ret
= smu_hw_init(adev
);
1439 static int smu_suspend(void *handle
)
1442 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1443 struct smu_context
*smu
= &adev
->smu
;
1444 bool baco_feature_is_enabled
= false;
1446 if (!smu
->pm_enabled
)
1450 baco_feature_is_enabled
= smu_feature_is_enabled(smu
, SMU_FEATURE_BACO_BIT
);
1452 ret
= smu_system_features_control(smu
, false);
1456 if (baco_feature_is_enabled
) {
1457 ret
= smu_feature_set_enabled(smu
, SMU_FEATURE_BACO_BIT
, true);
1459 pr_warn("set BACO feature enabled failed, return %d\n", ret
);
1464 smu
->watermarks_bitmap
&= ~(WATERMARKS_LOADED
);
1466 if (adev
->asic_type
>= CHIP_NAVI10
&&
1467 adev
->gfx
.rlc
.funcs
->stop
)
1468 adev
->gfx
.rlc
.funcs
->stop(adev
);
1470 smu_set_gfx_cgpg(&adev
->smu
, false);
1475 static int smu_resume(void *handle
)
1478 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1479 struct smu_context
*smu
= &adev
->smu
;
1481 if (amdgpu_sriov_vf(adev
)&& !amdgpu_sriov_is_pp_one_vf(adev
))
1484 if (!smu
->pm_enabled
)
1487 pr_info("SMU is resuming...\n");
1489 ret
= smu_start_smc_engine(smu
);
1491 pr_err("SMU is not ready yet!\n");
1495 ret
= smu_smc_table_hw_init(smu
, false);
1499 ret
= smu_start_thermal_control(smu
);
1504 smu_set_gfx_cgpg(&adev
->smu
, true);
1506 smu
->disable_uclk_switch
= 0;
1508 pr_info("SMU is resumed successfully!\n");
1516 int smu_display_configuration_change(struct smu_context
*smu
,
1517 const struct amd_pp_display_configuration
*display_config
)
1520 int num_of_active_display
= 0;
1522 if (!smu
->pm_enabled
|| !is_support_sw_smu(smu
->adev
))
1525 if (!display_config
)
1528 mutex_lock(&smu
->mutex
);
1530 if (smu
->ppt_funcs
->set_deep_sleep_dcefclk
)
1531 smu
->ppt_funcs
->set_deep_sleep_dcefclk(smu
,
1532 display_config
->min_dcef_deep_sleep_set_clk
/ 100);
1534 for (index
= 0; index
< display_config
->num_path_including_non_display
; index
++) {
1535 if (display_config
->displays
[index
].controller_id
!= 0)
1536 num_of_active_display
++;
1539 smu_set_active_display_count(smu
, num_of_active_display
);
1541 smu_store_cc6_data(smu
, display_config
->cpu_pstate_separation_time
,
1542 display_config
->cpu_cc6_disable
,
1543 display_config
->cpu_pstate_disable
,
1544 display_config
->nb_pstate_switch_disable
);
1546 mutex_unlock(&smu
->mutex
);
1551 static int smu_get_clock_info(struct smu_context
*smu
,
1552 struct smu_clock_info
*clk_info
,
1553 enum smu_perf_level_designation designation
)
1556 struct smu_performance_level level
= {0};
1561 ret
= smu_get_perf_level(smu
, PERF_LEVEL_ACTIVITY
, &level
);
1565 clk_info
->min_mem_clk
= level
.memory_clock
;
1566 clk_info
->min_eng_clk
= level
.core_clock
;
1567 clk_info
->min_bus_bandwidth
= level
.non_local_mem_freq
* level
.non_local_mem_width
;
1569 ret
= smu_get_perf_level(smu
, designation
, &level
);
1573 clk_info
->min_mem_clk
= level
.memory_clock
;
1574 clk_info
->min_eng_clk
= level
.core_clock
;
1575 clk_info
->min_bus_bandwidth
= level
.non_local_mem_freq
* level
.non_local_mem_width
;
1580 int smu_get_current_clocks(struct smu_context
*smu
,
1581 struct amd_pp_clock_info
*clocks
)
1583 struct amd_pp_simple_clock_info simple_clocks
= {0};
1584 struct smu_clock_info hw_clocks
;
1587 if (!is_support_sw_smu(smu
->adev
))
1590 mutex_lock(&smu
->mutex
);
1592 smu_get_dal_power_level(smu
, &simple_clocks
);
1594 if (smu
->support_power_containment
)
1595 ret
= smu_get_clock_info(smu
, &hw_clocks
,
1596 PERF_LEVEL_POWER_CONTAINMENT
);
1598 ret
= smu_get_clock_info(smu
, &hw_clocks
, PERF_LEVEL_ACTIVITY
);
1601 pr_err("Error in smu_get_clock_info\n");
1605 clocks
->min_engine_clock
= hw_clocks
.min_eng_clk
;
1606 clocks
->max_engine_clock
= hw_clocks
.max_eng_clk
;
1607 clocks
->min_memory_clock
= hw_clocks
.min_mem_clk
;
1608 clocks
->max_memory_clock
= hw_clocks
.max_mem_clk
;
1609 clocks
->min_bus_bandwidth
= hw_clocks
.min_bus_bandwidth
;
1610 clocks
->max_bus_bandwidth
= hw_clocks
.max_bus_bandwidth
;
1611 clocks
->max_engine_clock_in_sr
= hw_clocks
.max_eng_clk
;
1612 clocks
->min_engine_clock_in_sr
= hw_clocks
.min_eng_clk
;
1614 if (simple_clocks
.level
== 0)
1615 clocks
->max_clocks_state
= PP_DAL_POWERLEVEL_7
;
1617 clocks
->max_clocks_state
= simple_clocks
.level
;
1619 if (!smu_get_current_shallow_sleep_clocks(smu
, &hw_clocks
)) {
1620 clocks
->max_engine_clock_in_sr
= hw_clocks
.max_eng_clk
;
1621 clocks
->min_engine_clock_in_sr
= hw_clocks
.min_eng_clk
;
1625 mutex_unlock(&smu
->mutex
);
1629 static int smu_set_clockgating_state(void *handle
,
1630 enum amd_clockgating_state state
)
1635 static int smu_set_powergating_state(void *handle
,
1636 enum amd_powergating_state state
)
1641 static int smu_enable_umd_pstate(void *handle
,
1642 enum amd_dpm_forced_level
*level
)
1644 uint32_t profile_mode_mask
= AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
|
1645 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
|
1646 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
|
1647 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
;
1649 struct smu_context
*smu
= (struct smu_context
*)(handle
);
1650 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1652 if (!smu
->is_apu
&& (!smu
->pm_enabled
|| !smu_dpm_ctx
->dpm_context
))
1655 if (!(smu_dpm_ctx
->dpm_level
& profile_mode_mask
)) {
1656 /* enter umd pstate, save current level, disable gfx cg*/
1657 if (*level
& profile_mode_mask
) {
1658 smu_dpm_ctx
->saved_dpm_level
= smu_dpm_ctx
->dpm_level
;
1659 smu_dpm_ctx
->enable_umd_pstate
= true;
1660 amdgpu_device_ip_set_clockgating_state(smu
->adev
,
1661 AMD_IP_BLOCK_TYPE_GFX
,
1662 AMD_CG_STATE_UNGATE
);
1663 amdgpu_device_ip_set_powergating_state(smu
->adev
,
1664 AMD_IP_BLOCK_TYPE_GFX
,
1665 AMD_PG_STATE_UNGATE
);
1668 /* exit umd pstate, restore level, enable gfx cg*/
1669 if (!(*level
& profile_mode_mask
)) {
1670 if (*level
== AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
)
1671 *level
= smu_dpm_ctx
->saved_dpm_level
;
1672 smu_dpm_ctx
->enable_umd_pstate
= false;
1673 amdgpu_device_ip_set_clockgating_state(smu
->adev
,
1674 AMD_IP_BLOCK_TYPE_GFX
,
1676 amdgpu_device_ip_set_powergating_state(smu
->adev
,
1677 AMD_IP_BLOCK_TYPE_GFX
,
1685 int smu_adjust_power_state_dynamic(struct smu_context
*smu
,
1686 enum amd_dpm_forced_level level
,
1687 bool skip_display_settings
)
1692 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1694 if (!smu
->pm_enabled
)
1697 if (!skip_display_settings
) {
1698 ret
= smu_display_config_changed(smu
);
1700 pr_err("Failed to change display config!");
1705 ret
= smu_apply_clocks_adjust_rules(smu
);
1707 pr_err("Failed to apply clocks adjust rules!");
1711 if (!skip_display_settings
) {
1712 ret
= smu_notify_smc_display_config(smu
);
1714 pr_err("Failed to notify smc display config!");
1719 if (smu_dpm_ctx
->dpm_level
!= level
) {
1720 ret
= smu_asic_set_performance_level(smu
, level
);
1722 pr_err("Failed to set performance level!");
1726 /* update the saved copy */
1727 smu_dpm_ctx
->dpm_level
= level
;
1730 if (smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
) {
1731 index
= fls(smu
->workload_mask
);
1732 index
= index
> 0 && index
<= WORKLOAD_POLICY_MAX
? index
- 1 : 0;
1733 workload
= smu
->workload_setting
[index
];
1735 if (smu
->power_profile_mode
!= workload
)
1736 smu_set_power_profile_mode(smu
, &workload
, 0, false);
1742 int smu_handle_task(struct smu_context
*smu
,
1743 enum amd_dpm_forced_level level
,
1744 enum amd_pp_task task_id
,
1750 mutex_lock(&smu
->mutex
);
1753 case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE
:
1754 ret
= smu_pre_display_config_changed(smu
);
1757 ret
= smu_set_cpu_power_state(smu
);
1760 ret
= smu_adjust_power_state_dynamic(smu
, level
, false);
1762 case AMD_PP_TASK_COMPLETE_INIT
:
1763 case AMD_PP_TASK_READJUST_POWER_STATE
:
1764 ret
= smu_adjust_power_state_dynamic(smu
, level
, true);
1772 mutex_unlock(&smu
->mutex
);
1777 int smu_switch_power_profile(struct smu_context
*smu
,
1778 enum PP_SMC_POWER_PROFILE type
,
1781 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1785 if (!smu
->pm_enabled
)
1788 if (!(type
< PP_SMC_POWER_PROFILE_CUSTOM
))
1791 mutex_lock(&smu
->mutex
);
1794 smu
->workload_mask
&= ~(1 << smu
->workload_prority
[type
]);
1795 index
= fls(smu
->workload_mask
);
1796 index
= index
> 0 && index
<= WORKLOAD_POLICY_MAX
? index
- 1 : 0;
1797 workload
= smu
->workload_setting
[index
];
1799 smu
->workload_mask
|= (1 << smu
->workload_prority
[type
]);
1800 index
= fls(smu
->workload_mask
);
1801 index
= index
<= WORKLOAD_POLICY_MAX
? index
- 1 : 0;
1802 workload
= smu
->workload_setting
[index
];
1805 if (smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
)
1806 smu_set_power_profile_mode(smu
, &workload
, 0, false);
1808 mutex_unlock(&smu
->mutex
);
1813 enum amd_dpm_forced_level
smu_get_performance_level(struct smu_context
*smu
)
1815 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1816 enum amd_dpm_forced_level level
;
1818 if (!smu
->is_apu
&& !smu_dpm_ctx
->dpm_context
)
1821 mutex_lock(&(smu
->mutex
));
1822 level
= smu_dpm_ctx
->dpm_level
;
1823 mutex_unlock(&(smu
->mutex
));
1828 int smu_force_performance_level(struct smu_context
*smu
, enum amd_dpm_forced_level level
)
1830 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1833 if (!smu
->is_apu
&& !smu_dpm_ctx
->dpm_context
)
1836 mutex_lock(&smu
->mutex
);
1838 ret
= smu_enable_umd_pstate(smu
, &level
);
1840 mutex_unlock(&smu
->mutex
);
1844 ret
= smu_handle_task(smu
, level
,
1845 AMD_PP_TASK_READJUST_POWER_STATE
,
1848 mutex_unlock(&smu
->mutex
);
1853 int smu_set_display_count(struct smu_context
*smu
, uint32_t count
)
1857 mutex_lock(&smu
->mutex
);
1858 ret
= smu_init_display_count(smu
, count
);
1859 mutex_unlock(&smu
->mutex
);
1864 int smu_force_clk_levels(struct smu_context
*smu
,
1865 enum smu_clk_type clk_type
,
1869 struct smu_dpm_context
*smu_dpm_ctx
= &(smu
->smu_dpm
);
1872 if (smu_dpm_ctx
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
) {
1873 pr_debug("force clock level is for dpm manual mode only.\n");
1878 mutex_lock(&smu
->mutex
);
1880 if (smu
->ppt_funcs
&& smu
->ppt_funcs
->force_clk_levels
)
1881 ret
= smu
->ppt_funcs
->force_clk_levels(smu
, clk_type
, mask
);
1884 mutex_unlock(&smu
->mutex
);
1889 int smu_set_mp1_state(struct smu_context
*smu
,
1890 enum pp_mp1_state mp1_state
)
1896 * The SMC is not fully ready. That may be
1897 * expected as the IP may be masked.
1898 * So, just return without error.
1900 if (!smu
->pm_enabled
)
1903 mutex_lock(&smu
->mutex
);
1905 switch (mp1_state
) {
1906 case PP_MP1_STATE_SHUTDOWN
:
1907 msg
= SMU_MSG_PrepareMp1ForShutdown
;
1909 case PP_MP1_STATE_UNLOAD
:
1910 msg
= SMU_MSG_PrepareMp1ForUnload
;
1912 case PP_MP1_STATE_RESET
:
1913 msg
= SMU_MSG_PrepareMp1ForReset
;
1915 case PP_MP1_STATE_NONE
:
1917 mutex_unlock(&smu
->mutex
);
1921 /* some asics may not support those messages */
1922 if (smu_msg_get_index(smu
, msg
) < 0) {
1923 mutex_unlock(&smu
->mutex
);
1927 ret
= smu_send_smc_msg(smu
, msg
);
1929 pr_err("[PrepareMp1] Failed!\n");
1931 mutex_unlock(&smu
->mutex
);
1936 int smu_set_df_cstate(struct smu_context
*smu
,
1937 enum pp_df_cstate state
)
1942 * The SMC is not fully ready. That may be
1943 * expected as the IP may be masked.
1944 * So, just return without error.
1946 if (!smu
->pm_enabled
)
1949 if (!smu
->ppt_funcs
|| !smu
->ppt_funcs
->set_df_cstate
)
1952 mutex_lock(&smu
->mutex
);
1954 ret
= smu
->ppt_funcs
->set_df_cstate(smu
, state
);
1956 pr_err("[SetDfCstate] failed!\n");
1958 mutex_unlock(&smu
->mutex
);
1963 int smu_write_watermarks_table(struct smu_context
*smu
)
1965 void *watermarks_table
= smu
->smu_table
.watermarks_table
;
1967 if (!watermarks_table
)
1970 return smu_update_table(smu
,
1971 SMU_TABLE_WATERMARKS
,
1977 int smu_set_watermarks_for_clock_ranges(struct smu_context
*smu
,
1978 struct dm_pp_wm_sets_with_clock_ranges_soc15
*clock_ranges
)
1980 void *table
= smu
->smu_table
.watermarks_table
;
1985 mutex_lock(&smu
->mutex
);
1987 if (!smu
->disable_watermark
&&
1988 smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_DCEFCLK_BIT
) &&
1989 smu_feature_is_enabled(smu
, SMU_FEATURE_DPM_SOCCLK_BIT
)) {
1990 smu_set_watermarks_table(smu
, table
, clock_ranges
);
1991 smu
->watermarks_bitmap
|= WATERMARKS_EXIST
;
1992 smu
->watermarks_bitmap
&= ~WATERMARKS_LOADED
;
1995 mutex_unlock(&smu
->mutex
);
2000 const struct amd_ip_funcs smu_ip_funcs
= {
2002 .early_init
= smu_early_init
,
2003 .late_init
= smu_late_init
,
2004 .sw_init
= smu_sw_init
,
2005 .sw_fini
= smu_sw_fini
,
2006 .hw_init
= smu_hw_init
,
2007 .hw_fini
= smu_hw_fini
,
2008 .suspend
= smu_suspend
,
2009 .resume
= smu_resume
,
2011 .check_soft_reset
= NULL
,
2012 .wait_for_idle
= NULL
,
2014 .set_clockgating_state
= smu_set_clockgating_state
,
2015 .set_powergating_state
= smu_set_powergating_state
,
2016 .enable_umd_pstate
= smu_enable_umd_pstate
,
2019 const struct amdgpu_ip_block_version smu_v11_0_ip_block
=
2021 .type
= AMD_IP_BLOCK_TYPE_SMC
,
2025 .funcs
= &smu_ip_funcs
,
2028 const struct amdgpu_ip_block_version smu_v12_0_ip_block
=
2030 .type
= AMD_IP_BLOCK_TYPE_SMC
,
2034 .funcs
= &smu_ip_funcs
,
2037 int smu_load_microcode(struct smu_context
*smu
)
2041 mutex_lock(&smu
->mutex
);
2043 if (smu
->ppt_funcs
->load_microcode
)
2044 ret
= smu
->ppt_funcs
->load_microcode(smu
);
2046 mutex_unlock(&smu
->mutex
);
2051 int smu_check_fw_status(struct smu_context
*smu
)
2055 mutex_lock(&smu
->mutex
);
2057 if (smu
->ppt_funcs
->check_fw_status
)
2058 ret
= smu
->ppt_funcs
->check_fw_status(smu
);
2060 mutex_unlock(&smu
->mutex
);
2065 int smu_set_gfx_cgpg(struct smu_context
*smu
, bool enabled
)
2069 mutex_lock(&smu
->mutex
);
2071 if (smu
->ppt_funcs
->set_gfx_cgpg
)
2072 ret
= smu
->ppt_funcs
->set_gfx_cgpg(smu
, enabled
);
2074 mutex_unlock(&smu
->mutex
);
2079 int smu_set_fan_speed_rpm(struct smu_context
*smu
, uint32_t speed
)
2083 mutex_lock(&smu
->mutex
);
2085 if (smu
->ppt_funcs
->set_fan_speed_rpm
)
2086 ret
= smu
->ppt_funcs
->set_fan_speed_rpm(smu
, speed
);
2088 mutex_unlock(&smu
->mutex
);
2093 int smu_get_power_limit(struct smu_context
*smu
,
2101 mutex_lock(&smu
->mutex
);
2103 if (smu
->ppt_funcs
->get_power_limit
)
2104 ret
= smu
->ppt_funcs
->get_power_limit(smu
, limit
, def
);
2107 mutex_unlock(&smu
->mutex
);
2112 int smu_set_power_limit(struct smu_context
*smu
, uint32_t limit
)
2116 mutex_lock(&smu
->mutex
);
2118 if (smu
->ppt_funcs
->set_power_limit
)
2119 ret
= smu
->ppt_funcs
->set_power_limit(smu
, limit
);
2121 mutex_unlock(&smu
->mutex
);
2126 int smu_print_clk_levels(struct smu_context
*smu
, enum smu_clk_type clk_type
, char *buf
)
2130 mutex_lock(&smu
->mutex
);
2132 if (smu
->ppt_funcs
->print_clk_levels
)
2133 ret
= smu
->ppt_funcs
->print_clk_levels(smu
, clk_type
, buf
);
2135 mutex_unlock(&smu
->mutex
);
2140 int smu_get_od_percentage(struct smu_context
*smu
, enum smu_clk_type type
)
2144 mutex_lock(&smu
->mutex
);
2146 if (smu
->ppt_funcs
->get_od_percentage
)
2147 ret
= smu
->ppt_funcs
->get_od_percentage(smu
, type
);
2149 mutex_unlock(&smu
->mutex
);
2154 int smu_set_od_percentage(struct smu_context
*smu
, enum smu_clk_type type
, uint32_t value
)
2158 mutex_lock(&smu
->mutex
);
2160 if (smu
->ppt_funcs
->set_od_percentage
)
2161 ret
= smu
->ppt_funcs
->set_od_percentage(smu
, type
, value
);
2163 mutex_unlock(&smu
->mutex
);
2168 int smu_od_edit_dpm_table(struct smu_context
*smu
,
2169 enum PP_OD_DPM_TABLE_COMMAND type
,
2170 long *input
, uint32_t size
)
2174 mutex_lock(&smu
->mutex
);
2176 if (smu
->ppt_funcs
->od_edit_dpm_table
)
2177 ret
= smu
->ppt_funcs
->od_edit_dpm_table(smu
, type
, input
, size
);
2179 mutex_unlock(&smu
->mutex
);
2184 int smu_read_sensor(struct smu_context
*smu
,
2185 enum amd_pp_sensors sensor
,
2186 void *data
, uint32_t *size
)
2190 mutex_lock(&smu
->mutex
);
2192 if (smu
->ppt_funcs
->read_sensor
)
2193 ret
= smu
->ppt_funcs
->read_sensor(smu
, sensor
, data
, size
);
2195 mutex_unlock(&smu
->mutex
);
2200 int smu_get_power_profile_mode(struct smu_context
*smu
, char *buf
)
2204 mutex_lock(&smu
->mutex
);
2206 if (smu
->ppt_funcs
->get_power_profile_mode
)
2207 ret
= smu
->ppt_funcs
->get_power_profile_mode(smu
, buf
);
2209 mutex_unlock(&smu
->mutex
);
2214 int smu_set_power_profile_mode(struct smu_context
*smu
,
2216 uint32_t param_size
,
2222 mutex_lock(&smu
->mutex
);
2224 if (smu
->ppt_funcs
->set_power_profile_mode
)
2225 ret
= smu
->ppt_funcs
->set_power_profile_mode(smu
, param
, param_size
);
2228 mutex_unlock(&smu
->mutex
);
2234 int smu_get_fan_control_mode(struct smu_context
*smu
)
2238 mutex_lock(&smu
->mutex
);
2240 if (smu
->ppt_funcs
->get_fan_control_mode
)
2241 ret
= smu
->ppt_funcs
->get_fan_control_mode(smu
);
2243 mutex_unlock(&smu
->mutex
);
2248 int smu_set_fan_control_mode(struct smu_context
*smu
, int value
)
2252 mutex_lock(&smu
->mutex
);
2254 if (smu
->ppt_funcs
->set_fan_control_mode
)
2255 ret
= smu
->ppt_funcs
->set_fan_control_mode(smu
, value
);
2257 mutex_unlock(&smu
->mutex
);
2262 int smu_get_fan_speed_percent(struct smu_context
*smu
, uint32_t *speed
)
2266 mutex_lock(&smu
->mutex
);
2268 if (smu
->ppt_funcs
->get_fan_speed_percent
)
2269 ret
= smu
->ppt_funcs
->get_fan_speed_percent(smu
, speed
);
2271 mutex_unlock(&smu
->mutex
);
2276 int smu_set_fan_speed_percent(struct smu_context
*smu
, uint32_t speed
)
2280 mutex_lock(&smu
->mutex
);
2282 if (smu
->ppt_funcs
->set_fan_speed_percent
)
2283 ret
= smu
->ppt_funcs
->set_fan_speed_percent(smu
, speed
);
2285 mutex_unlock(&smu
->mutex
);
2290 int smu_get_fan_speed_rpm(struct smu_context
*smu
, uint32_t *speed
)
2294 mutex_lock(&smu
->mutex
);
2296 if (smu
->ppt_funcs
->get_fan_speed_rpm
)
2297 ret
= smu
->ppt_funcs
->get_fan_speed_rpm(smu
, speed
);
2299 mutex_unlock(&smu
->mutex
);
2304 int smu_set_deep_sleep_dcefclk(struct smu_context
*smu
, int clk
)
2308 mutex_lock(&smu
->mutex
);
2310 if (smu
->ppt_funcs
->set_deep_sleep_dcefclk
)
2311 ret
= smu
->ppt_funcs
->set_deep_sleep_dcefclk(smu
, clk
);
2313 mutex_unlock(&smu
->mutex
);
2318 int smu_set_active_display_count(struct smu_context
*smu
, uint32_t count
)
2322 if (smu
->ppt_funcs
->set_active_display_count
)
2323 ret
= smu
->ppt_funcs
->set_active_display_count(smu
, count
);
2328 int smu_get_clock_by_type(struct smu_context
*smu
,
2329 enum amd_pp_clock_type type
,
2330 struct amd_pp_clocks
*clocks
)
2334 mutex_lock(&smu
->mutex
);
2336 if (smu
->ppt_funcs
->get_clock_by_type
)
2337 ret
= smu
->ppt_funcs
->get_clock_by_type(smu
, type
, clocks
);
2339 mutex_unlock(&smu
->mutex
);
2344 int smu_get_max_high_clocks(struct smu_context
*smu
,
2345 struct amd_pp_simple_clock_info
*clocks
)
2349 mutex_lock(&smu
->mutex
);
2351 if (smu
->ppt_funcs
->get_max_high_clocks
)
2352 ret
= smu
->ppt_funcs
->get_max_high_clocks(smu
, clocks
);
2354 mutex_unlock(&smu
->mutex
);
2359 int smu_get_clock_by_type_with_latency(struct smu_context
*smu
,
2360 enum smu_clk_type clk_type
,
2361 struct pp_clock_levels_with_latency
*clocks
)
2365 mutex_lock(&smu
->mutex
);
2367 if (smu
->ppt_funcs
->get_clock_by_type_with_latency
)
2368 ret
= smu
->ppt_funcs
->get_clock_by_type_with_latency(smu
, clk_type
, clocks
);
2370 mutex_unlock(&smu
->mutex
);
2375 int smu_get_clock_by_type_with_voltage(struct smu_context
*smu
,
2376 enum amd_pp_clock_type type
,
2377 struct pp_clock_levels_with_voltage
*clocks
)
2381 mutex_lock(&smu
->mutex
);
2383 if (smu
->ppt_funcs
->get_clock_by_type_with_voltage
)
2384 ret
= smu
->ppt_funcs
->get_clock_by_type_with_voltage(smu
, type
, clocks
);
2386 mutex_unlock(&smu
->mutex
);
2392 int smu_display_clock_voltage_request(struct smu_context
*smu
,
2393 struct pp_display_clock_request
*clock_req
)
2397 mutex_lock(&smu
->mutex
);
2399 if (smu
->ppt_funcs
->display_clock_voltage_request
)
2400 ret
= smu
->ppt_funcs
->display_clock_voltage_request(smu
, clock_req
);
2402 mutex_unlock(&smu
->mutex
);
2408 int smu_display_disable_memory_clock_switch(struct smu_context
*smu
, bool disable_memory_clock_switch
)
2412 mutex_lock(&smu
->mutex
);
2414 if (smu
->ppt_funcs
->display_disable_memory_clock_switch
)
2415 ret
= smu
->ppt_funcs
->display_disable_memory_clock_switch(smu
, disable_memory_clock_switch
);
2417 mutex_unlock(&smu
->mutex
);
2422 int smu_notify_smu_enable_pwe(struct smu_context
*smu
)
2426 mutex_lock(&smu
->mutex
);
2428 if (smu
->ppt_funcs
->notify_smu_enable_pwe
)
2429 ret
= smu
->ppt_funcs
->notify_smu_enable_pwe(smu
);
2431 mutex_unlock(&smu
->mutex
);
2436 int smu_set_xgmi_pstate(struct smu_context
*smu
,
2441 mutex_lock(&smu
->mutex
);
2443 if (smu
->ppt_funcs
->set_xgmi_pstate
)
2444 ret
= smu
->ppt_funcs
->set_xgmi_pstate(smu
, pstate
);
2446 mutex_unlock(&smu
->mutex
);
2451 int smu_set_azalia_d3_pme(struct smu_context
*smu
)
2455 mutex_lock(&smu
->mutex
);
2457 if (smu
->ppt_funcs
->set_azalia_d3_pme
)
2458 ret
= smu
->ppt_funcs
->set_azalia_d3_pme(smu
);
2460 mutex_unlock(&smu
->mutex
);
2465 bool smu_baco_is_support(struct smu_context
*smu
)
2469 mutex_lock(&smu
->mutex
);
2471 if (smu
->ppt_funcs
&& smu
->ppt_funcs
->baco_is_support
)
2472 ret
= smu
->ppt_funcs
->baco_is_support(smu
);
2474 mutex_unlock(&smu
->mutex
);
2479 int smu_baco_get_state(struct smu_context
*smu
, enum smu_baco_state
*state
)
2481 if (smu
->ppt_funcs
->baco_get_state
)
2484 mutex_lock(&smu
->mutex
);
2485 *state
= smu
->ppt_funcs
->baco_get_state(smu
);
2486 mutex_unlock(&smu
->mutex
);
2491 int smu_baco_enter(struct smu_context
*smu
)
2495 mutex_lock(&smu
->mutex
);
2497 if (smu
->ppt_funcs
->baco_enter
)
2498 ret
= smu
->ppt_funcs
->baco_enter(smu
);
2500 mutex_unlock(&smu
->mutex
);
2505 int smu_baco_exit(struct smu_context
*smu
)
2509 mutex_lock(&smu
->mutex
);
2511 if (smu
->ppt_funcs
->baco_exit
)
2512 ret
= smu
->ppt_funcs
->baco_exit(smu
);
2514 mutex_unlock(&smu
->mutex
);
2519 int smu_mode2_reset(struct smu_context
*smu
)
2523 mutex_lock(&smu
->mutex
);
2525 if (smu
->ppt_funcs
->mode2_reset
)
2526 ret
= smu
->ppt_funcs
->mode2_reset(smu
);
2528 mutex_unlock(&smu
->mutex
);
2533 int smu_get_max_sustainable_clocks_by_dc(struct smu_context
*smu
,
2534 struct pp_smu_nv_clock_table
*max_clocks
)
2538 mutex_lock(&smu
->mutex
);
2540 if (smu
->ppt_funcs
->get_max_sustainable_clocks_by_dc
)
2541 ret
= smu
->ppt_funcs
->get_max_sustainable_clocks_by_dc(smu
, max_clocks
);
2543 mutex_unlock(&smu
->mutex
);
2548 int smu_get_uclk_dpm_states(struct smu_context
*smu
,
2549 unsigned int *clock_values_in_khz
,
2550 unsigned int *num_states
)
2554 mutex_lock(&smu
->mutex
);
2556 if (smu
->ppt_funcs
->get_uclk_dpm_states
)
2557 ret
= smu
->ppt_funcs
->get_uclk_dpm_states(smu
, clock_values_in_khz
, num_states
);
2559 mutex_unlock(&smu
->mutex
);
2564 enum amd_pm_state_type
smu_get_current_power_state(struct smu_context
*smu
)
2566 enum amd_pm_state_type pm_state
= POWER_STATE_TYPE_DEFAULT
;
2568 mutex_lock(&smu
->mutex
);
2570 if (smu
->ppt_funcs
->get_current_power_state
)
2571 pm_state
= smu
->ppt_funcs
->get_current_power_state(smu
);
2573 mutex_unlock(&smu
->mutex
);
2578 int smu_get_dpm_clock_table(struct smu_context
*smu
,
2579 struct dpm_clocks
*clock_table
)
2583 mutex_lock(&smu
->mutex
);
2585 if (smu
->ppt_funcs
->get_dpm_clock_table
)
2586 ret
= smu
->ppt_funcs
->get_dpm_clock_table(smu
, clock_table
);
2588 mutex_unlock(&smu
->mutex
);
2593 uint32_t smu_get_pptable_power_limit(struct smu_context
*smu
)
2597 if (smu
->ppt_funcs
->get_pptable_power_limit
)
2598 ret
= smu
->ppt_funcs
->get_pptable_power_limit(smu
);
2603 int smu_send_smc_msg(struct smu_context
*smu
,
2604 enum smu_message_type msg
)
2608 ret
= smu_send_smc_msg_with_param(smu
, msg
, 0);