2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include "atom-types.h"
29 #include "processpptables.h"
30 #include "cgs_common.h"
33 #include "hardwaremanager.h"
35 #include "smu10_hwmgr.h"
36 #include "power_state.h"
37 #include "soc15_common.h"
40 #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
41 #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
42 #define SCLK_MIN_DIV_INTV_SHIFT 12
43 #define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
44 #define SMC_RAM_END 0x40000
46 #define mmPWR_MISC_CNTL_STATUS 0x0183
47 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
49 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
50 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
51 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
53 static const unsigned long SMU10_Magic
= (unsigned long) PHM_Rv_Magic
;
56 static int smu10_display_clock_voltage_request(struct pp_hwmgr
*hwmgr
,
57 struct pp_display_clock_request
*clock_req
)
59 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
60 enum amd_pp_clock_type clk_type
= clock_req
->clock_type
;
61 uint32_t clk_freq
= clock_req
->clock_freq_in_khz
/ 1000;
65 case amd_pp_dcf_clock
:
66 if (clk_freq
== smu10_data
->dcf_actual_hard_min_freq
)
68 msg
= PPSMC_MSG_SetHardMinDcefclkByFreq
;
69 smu10_data
->dcf_actual_hard_min_freq
= clk_freq
;
71 case amd_pp_soc_clock
:
72 msg
= PPSMC_MSG_SetHardMinSocclkByFreq
;
75 if (clk_freq
== smu10_data
->f_actual_hard_min_freq
)
77 smu10_data
->f_actual_hard_min_freq
= clk_freq
;
78 msg
= PPSMC_MSG_SetHardMinFclkByFreq
;
81 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
84 smum_send_msg_to_smc_with_parameter(hwmgr
, msg
, clk_freq
);
89 static struct smu10_power_state
*cast_smu10_ps(struct pp_hw_power_state
*hw_ps
)
91 if (SMU10_Magic
!= hw_ps
->magic
)
94 return (struct smu10_power_state
*)hw_ps
;
97 static const struct smu10_power_state
*cast_const_smu10_ps(
98 const struct pp_hw_power_state
*hw_ps
)
100 if (SMU10_Magic
!= hw_ps
->magic
)
103 return (struct smu10_power_state
*)hw_ps
;
106 static int smu10_initialize_dpm_defaults(struct pp_hwmgr
*hwmgr
)
108 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
110 smu10_data
->dce_slow_sclk_threshold
= 30000;
111 smu10_data
->thermal_auto_throttling_treshold
= 0;
112 smu10_data
->is_nb_dpm_enabled
= 1;
113 smu10_data
->dpm_flags
= 1;
114 smu10_data
->need_min_deep_sleep_dcefclk
= true;
115 smu10_data
->num_active_display
= 0;
116 smu10_data
->deep_sleep_dcefclk
= 0;
118 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
119 PHM_PlatformCaps_SclkDeepSleep
);
121 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
122 PHM_PlatformCaps_SclkThrottleLowNotification
);
124 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
125 PHM_PlatformCaps_PowerPlaySupport
);
129 static int smu10_construct_max_power_limits_table(struct pp_hwmgr
*hwmgr
,
130 struct phm_clock_and_voltage_limits
*table
)
135 static int smu10_init_dynamic_state_adjustment_rule_settings(
136 struct pp_hwmgr
*hwmgr
)
138 struct phm_clock_voltage_dependency_table
*table_clk_vlt
;
140 table_clk_vlt
= kzalloc(struct_size(table_clk_vlt
, entries
, 7),
143 if (NULL
== table_clk_vlt
) {
144 pr_err("Can not allocate memory!\n");
148 table_clk_vlt
->count
= 8;
149 table_clk_vlt
->entries
[0].clk
= PP_DAL_POWERLEVEL_0
;
150 table_clk_vlt
->entries
[0].v
= 0;
151 table_clk_vlt
->entries
[1].clk
= PP_DAL_POWERLEVEL_1
;
152 table_clk_vlt
->entries
[1].v
= 1;
153 table_clk_vlt
->entries
[2].clk
= PP_DAL_POWERLEVEL_2
;
154 table_clk_vlt
->entries
[2].v
= 2;
155 table_clk_vlt
->entries
[3].clk
= PP_DAL_POWERLEVEL_3
;
156 table_clk_vlt
->entries
[3].v
= 3;
157 table_clk_vlt
->entries
[4].clk
= PP_DAL_POWERLEVEL_4
;
158 table_clk_vlt
->entries
[4].v
= 4;
159 table_clk_vlt
->entries
[5].clk
= PP_DAL_POWERLEVEL_5
;
160 table_clk_vlt
->entries
[5].v
= 5;
161 table_clk_vlt
->entries
[6].clk
= PP_DAL_POWERLEVEL_6
;
162 table_clk_vlt
->entries
[6].v
= 6;
163 table_clk_vlt
->entries
[7].clk
= PP_DAL_POWERLEVEL_7
;
164 table_clk_vlt
->entries
[7].v
= 7;
165 hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
= table_clk_vlt
;
170 static int smu10_get_system_info_data(struct pp_hwmgr
*hwmgr
)
172 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)hwmgr
->backend
;
174 smu10_data
->sys_info
.htc_hyst_lmt
= 5;
175 smu10_data
->sys_info
.htc_tmp_lmt
= 203;
177 if (smu10_data
->thermal_auto_throttling_treshold
== 0)
178 smu10_data
->thermal_auto_throttling_treshold
= 203;
180 smu10_construct_max_power_limits_table (hwmgr
,
181 &hwmgr
->dyn_state
.max_clock_voltage_on_ac
);
183 smu10_init_dynamic_state_adjustment_rule_settings(hwmgr
);
188 static int smu10_construct_boot_state(struct pp_hwmgr
*hwmgr
)
193 static int smu10_set_clock_limit(struct pp_hwmgr
*hwmgr
, const void *input
)
195 struct PP_Clocks clocks
= {0};
196 struct pp_display_clock_request clock_req
;
198 clocks
.dcefClock
= hwmgr
->display_config
->min_dcef_set_clk
;
199 clock_req
.clock_type
= amd_pp_dcf_clock
;
200 clock_req
.clock_freq_in_khz
= clocks
.dcefClock
* 10;
202 PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr
, &clock_req
),
203 "Attempt to set DCF Clock Failed!", return -EINVAL
);
208 static int smu10_set_min_deep_sleep_dcefclk(struct pp_hwmgr
*hwmgr
, uint32_t clock
)
210 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
212 if (smu10_data
->need_min_deep_sleep_dcefclk
&&
213 smu10_data
->deep_sleep_dcefclk
!= clock
) {
214 smu10_data
->deep_sleep_dcefclk
= clock
;
215 smum_send_msg_to_smc_with_parameter(hwmgr
,
216 PPSMC_MSG_SetMinDeepSleepDcefclk
,
217 smu10_data
->deep_sleep_dcefclk
);
222 static int smu10_set_hard_min_dcefclk_by_freq(struct pp_hwmgr
*hwmgr
, uint32_t clock
)
224 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
226 if (smu10_data
->dcf_actual_hard_min_freq
&&
227 smu10_data
->dcf_actual_hard_min_freq
!= clock
) {
228 smu10_data
->dcf_actual_hard_min_freq
= clock
;
229 smum_send_msg_to_smc_with_parameter(hwmgr
,
230 PPSMC_MSG_SetHardMinDcefclkByFreq
,
231 smu10_data
->dcf_actual_hard_min_freq
);
236 static int smu10_set_hard_min_fclk_by_freq(struct pp_hwmgr
*hwmgr
, uint32_t clock
)
238 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
240 if (smu10_data
->f_actual_hard_min_freq
&&
241 smu10_data
->f_actual_hard_min_freq
!= clock
) {
242 smu10_data
->f_actual_hard_min_freq
= clock
;
243 smum_send_msg_to_smc_with_parameter(hwmgr
,
244 PPSMC_MSG_SetHardMinFclkByFreq
,
245 smu10_data
->f_actual_hard_min_freq
);
250 static int smu10_set_active_display_count(struct pp_hwmgr
*hwmgr
, uint32_t count
)
252 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
254 if (smu10_data
->num_active_display
!= count
) {
255 smu10_data
->num_active_display
= count
;
256 smum_send_msg_to_smc_with_parameter(hwmgr
,
257 PPSMC_MSG_SetDisplayCount
,
258 smu10_data
->num_active_display
);
264 static int smu10_set_power_state_tasks(struct pp_hwmgr
*hwmgr
, const void *input
)
266 return smu10_set_clock_limit(hwmgr
, input
);
269 static int smu10_init_power_gate_state(struct pp_hwmgr
*hwmgr
)
271 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
272 struct amdgpu_device
*adev
= hwmgr
->adev
;
274 smu10_data
->vcn_power_gated
= true;
275 smu10_data
->isp_tileA_power_gated
= true;
276 smu10_data
->isp_tileB_power_gated
= true;
278 if (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
)
279 return smum_send_msg_to_smc_with_parameter(hwmgr
,
280 PPSMC_MSG_SetGfxCGPG
,
287 static int smu10_setup_asic_task(struct pp_hwmgr
*hwmgr
)
289 return smu10_init_power_gate_state(hwmgr
);
292 static int smu10_reset_cc6_data(struct pp_hwmgr
*hwmgr
)
294 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
296 smu10_data
->separation_time
= 0;
297 smu10_data
->cc6_disable
= false;
298 smu10_data
->pstate_disable
= false;
299 smu10_data
->cc6_setting_changed
= false;
304 static int smu10_power_off_asic(struct pp_hwmgr
*hwmgr
)
306 return smu10_reset_cc6_data(hwmgr
);
309 static bool smu10_is_gfx_on(struct pp_hwmgr
*hwmgr
)
312 struct amdgpu_device
*adev
= hwmgr
->adev
;
314 reg
= RREG32_SOC15(PWR
, 0, mmPWR_MISC_CNTL_STATUS
);
315 if ((reg
& PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK
) ==
316 (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT
))
322 static int smu10_disable_gfx_off(struct pp_hwmgr
*hwmgr
)
324 struct amdgpu_device
*adev
= hwmgr
->adev
;
326 if (adev
->pm
.pp_feature
& PP_GFXOFF_MASK
) {
327 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_DisableGfxOff
);
329 /* confirm gfx is back to "on" state */
330 while (!smu10_is_gfx_on(hwmgr
))
337 static int smu10_disable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
342 static int smu10_enable_gfx_off(struct pp_hwmgr
*hwmgr
)
344 struct amdgpu_device
*adev
= hwmgr
->adev
;
346 if (adev
->pm
.pp_feature
& PP_GFXOFF_MASK
)
347 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_EnableGfxOff
);
352 static int smu10_enable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
357 static int smu10_gfx_off_control(struct pp_hwmgr
*hwmgr
, bool enable
)
360 return smu10_enable_gfx_off(hwmgr
);
362 return smu10_disable_gfx_off(hwmgr
);
365 static int smu10_apply_state_adjust_rules(struct pp_hwmgr
*hwmgr
,
366 struct pp_power_state
*prequest_ps
,
367 const struct pp_power_state
*pcurrent_ps
)
372 /* temporary hardcoded clock voltage breakdown tables */
373 static const DpmClock_t VddDcfClk
[]= {
379 static const DpmClock_t VddSocClk
[]= {
385 static const DpmClock_t VddFClk
[]= {
391 static const DpmClock_t VddDispClk
[]= {
397 static const DpmClock_t VddDppClk
[]= {
403 static const DpmClock_t VddPhyClk
[]= {
409 static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr
*hwmgr
,
410 struct smu10_voltage_dependency_table
**pptable
,
411 uint32_t num_entry
, const DpmClock_t
*pclk_dependency_table
)
413 uint32_t table_size
, i
;
414 struct smu10_voltage_dependency_table
*ptable
;
416 table_size
= sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table
) * num_entry
;
417 ptable
= kzalloc(table_size
, GFP_KERNEL
);
422 ptable
->count
= num_entry
;
424 for (i
= 0; i
< ptable
->count
; i
++) {
425 ptable
->entries
[i
].clk
= pclk_dependency_table
->Freq
* 100;
426 ptable
->entries
[i
].vol
= pclk_dependency_table
->Vol
;
427 pclk_dependency_table
++;
436 static int smu10_populate_clock_table(struct pp_hwmgr
*hwmgr
)
440 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
441 DpmClocks_t
*table
= &(smu10_data
->clock_table
);
442 struct smu10_clock_voltage_information
*pinfo
= &(smu10_data
->clock_vol_info
);
444 result
= smum_smc_table_manager(hwmgr
, (uint8_t *)table
, SMU10_CLOCKTABLE
, true);
446 PP_ASSERT_WITH_CODE((0 == result
),
447 "Attempt to copy clock table from smc failed",
450 if (0 == result
&& table
->DcefClocks
[0].Freq
!= 0) {
451 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_dcefclk
,
452 NUM_DCEFCLK_DPM_LEVELS
,
453 &smu10_data
->clock_table
.DcefClocks
[0]);
454 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_socclk
,
455 NUM_SOCCLK_DPM_LEVELS
,
456 &smu10_data
->clock_table
.SocClocks
[0]);
457 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_fclk
,
459 &smu10_data
->clock_table
.FClocks
[0]);
460 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_mclk
,
461 NUM_MEMCLK_DPM_LEVELS
,
462 &smu10_data
->clock_table
.MemClocks
[0]);
464 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_dcefclk
,
465 ARRAY_SIZE(VddDcfClk
),
467 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_socclk
,
468 ARRAY_SIZE(VddSocClk
),
470 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_fclk
,
474 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_dispclk
,
475 ARRAY_SIZE(VddDispClk
),
477 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_dppclk
,
478 ARRAY_SIZE(VddDppClk
), &VddDppClk
[0]);
479 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_phyclk
,
480 ARRAY_SIZE(VddPhyClk
), &VddPhyClk
[0]);
482 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetMinGfxclkFrequency
);
483 result
= smum_get_argument(hwmgr
);
484 smu10_data
->gfx_min_freq_limit
= result
/ 10 * 1000;
486 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetMaxGfxclkFrequency
);
487 result
= smum_get_argument(hwmgr
);
488 smu10_data
->gfx_max_freq_limit
= result
/ 10 * 1000;
493 static int smu10_hwmgr_backend_init(struct pp_hwmgr
*hwmgr
)
496 struct smu10_hwmgr
*data
;
498 data
= kzalloc(sizeof(struct smu10_hwmgr
), GFP_KERNEL
);
502 hwmgr
->backend
= data
;
504 result
= smu10_initialize_dpm_defaults(hwmgr
);
506 pr_err("smu10_initialize_dpm_defaults failed\n");
510 smu10_populate_clock_table(hwmgr
);
512 result
= smu10_get_system_info_data(hwmgr
);
514 pr_err("smu10_get_system_info_data failed\n");
518 smu10_construct_boot_state(hwmgr
);
520 hwmgr
->platform_descriptor
.hardwareActivityPerformanceLevels
=
521 SMU10_MAX_HARDWARE_POWERLEVELS
;
523 hwmgr
->platform_descriptor
.hardwarePerformanceLevels
=
524 SMU10_MAX_HARDWARE_POWERLEVELS
;
526 hwmgr
->platform_descriptor
.vbiosInterruptId
= 0;
528 hwmgr
->platform_descriptor
.clockStep
.engineClock
= 500;
530 hwmgr
->platform_descriptor
.clockStep
.memoryClock
= 500;
532 hwmgr
->platform_descriptor
.minimumClocksReductionPercentage
= 50;
534 hwmgr
->pstate_sclk
= SMU10_UMD_PSTATE_GFXCLK
* 100;
535 hwmgr
->pstate_mclk
= SMU10_UMD_PSTATE_FCLK
* 100;
540 static int smu10_hwmgr_backend_fini(struct pp_hwmgr
*hwmgr
)
542 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
543 struct smu10_clock_voltage_information
*pinfo
= &(smu10_data
->clock_vol_info
);
545 kfree(pinfo
->vdd_dep_on_dcefclk
);
546 pinfo
->vdd_dep_on_dcefclk
= NULL
;
547 kfree(pinfo
->vdd_dep_on_socclk
);
548 pinfo
->vdd_dep_on_socclk
= NULL
;
549 kfree(pinfo
->vdd_dep_on_fclk
);
550 pinfo
->vdd_dep_on_fclk
= NULL
;
551 kfree(pinfo
->vdd_dep_on_dispclk
);
552 pinfo
->vdd_dep_on_dispclk
= NULL
;
553 kfree(pinfo
->vdd_dep_on_dppclk
);
554 pinfo
->vdd_dep_on_dppclk
= NULL
;
555 kfree(pinfo
->vdd_dep_on_phyclk
);
556 pinfo
->vdd_dep_on_phyclk
= NULL
;
558 kfree(hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
);
559 hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
= NULL
;
561 kfree(hwmgr
->backend
);
562 hwmgr
->backend
= NULL
;
567 static int smu10_dpm_force_dpm_level(struct pp_hwmgr
*hwmgr
,
568 enum amd_dpm_forced_level level
)
570 struct smu10_hwmgr
*data
= hwmgr
->backend
;
571 uint32_t min_sclk
= hwmgr
->display_config
->min_core_set_clock
;
572 uint32_t min_mclk
= hwmgr
->display_config
->min_mem_set_clock
/100;
574 if (hwmgr
->smu_version
< 0x1E3700) {
575 pr_info("smu firmware version too old, can not set dpm level\n");
579 if (min_sclk
< data
->gfx_min_freq_limit
)
580 min_sclk
= data
->gfx_min_freq_limit
;
582 min_sclk
/= 100; /* transfer 10KHz to MHz */
583 if (min_mclk
< data
->clock_table
.FClocks
[0].Freq
)
584 min_mclk
= data
->clock_table
.FClocks
[0].Freq
;
587 case AMD_DPM_FORCED_LEVEL_HIGH
:
588 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
589 smum_send_msg_to_smc_with_parameter(hwmgr
,
590 PPSMC_MSG_SetHardMinGfxClk
,
591 data
->gfx_max_freq_limit
/100);
592 smum_send_msg_to_smc_with_parameter(hwmgr
,
593 PPSMC_MSG_SetHardMinFclkByFreq
,
594 SMU10_UMD_PSTATE_PEAK_FCLK
);
595 smum_send_msg_to_smc_with_parameter(hwmgr
,
596 PPSMC_MSG_SetHardMinSocclkByFreq
,
597 SMU10_UMD_PSTATE_PEAK_SOCCLK
);
598 smum_send_msg_to_smc_with_parameter(hwmgr
,
599 PPSMC_MSG_SetHardMinVcn
,
600 SMU10_UMD_PSTATE_VCE
);
602 smum_send_msg_to_smc_with_parameter(hwmgr
,
603 PPSMC_MSG_SetSoftMaxGfxClk
,
604 data
->gfx_max_freq_limit
/100);
605 smum_send_msg_to_smc_with_parameter(hwmgr
,
606 PPSMC_MSG_SetSoftMaxFclkByFreq
,
607 SMU10_UMD_PSTATE_PEAK_FCLK
);
608 smum_send_msg_to_smc_with_parameter(hwmgr
,
609 PPSMC_MSG_SetSoftMaxSocclkByFreq
,
610 SMU10_UMD_PSTATE_PEAK_SOCCLK
);
611 smum_send_msg_to_smc_with_parameter(hwmgr
,
612 PPSMC_MSG_SetSoftMaxVcn
,
613 SMU10_UMD_PSTATE_VCE
);
615 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
616 smum_send_msg_to_smc_with_parameter(hwmgr
,
617 PPSMC_MSG_SetHardMinGfxClk
,
619 smum_send_msg_to_smc_with_parameter(hwmgr
,
620 PPSMC_MSG_SetSoftMaxGfxClk
,
623 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
:
624 smum_send_msg_to_smc_with_parameter(hwmgr
,
625 PPSMC_MSG_SetHardMinFclkByFreq
,
627 smum_send_msg_to_smc_with_parameter(hwmgr
,
628 PPSMC_MSG_SetSoftMaxFclkByFreq
,
631 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
632 smum_send_msg_to_smc_with_parameter(hwmgr
,
633 PPSMC_MSG_SetHardMinGfxClk
,
634 SMU10_UMD_PSTATE_GFXCLK
);
635 smum_send_msg_to_smc_with_parameter(hwmgr
,
636 PPSMC_MSG_SetHardMinFclkByFreq
,
637 SMU10_UMD_PSTATE_FCLK
);
638 smum_send_msg_to_smc_with_parameter(hwmgr
,
639 PPSMC_MSG_SetHardMinSocclkByFreq
,
640 SMU10_UMD_PSTATE_SOCCLK
);
641 smum_send_msg_to_smc_with_parameter(hwmgr
,
642 PPSMC_MSG_SetHardMinVcn
,
643 SMU10_UMD_PSTATE_VCE
);
645 smum_send_msg_to_smc_with_parameter(hwmgr
,
646 PPSMC_MSG_SetSoftMaxGfxClk
,
647 SMU10_UMD_PSTATE_GFXCLK
);
648 smum_send_msg_to_smc_with_parameter(hwmgr
,
649 PPSMC_MSG_SetSoftMaxFclkByFreq
,
650 SMU10_UMD_PSTATE_FCLK
);
651 smum_send_msg_to_smc_with_parameter(hwmgr
,
652 PPSMC_MSG_SetSoftMaxSocclkByFreq
,
653 SMU10_UMD_PSTATE_SOCCLK
);
654 smum_send_msg_to_smc_with_parameter(hwmgr
,
655 PPSMC_MSG_SetSoftMaxVcn
,
656 SMU10_UMD_PSTATE_VCE
);
658 case AMD_DPM_FORCED_LEVEL_AUTO
:
659 smum_send_msg_to_smc_with_parameter(hwmgr
,
660 PPSMC_MSG_SetHardMinGfxClk
,
662 smum_send_msg_to_smc_with_parameter(hwmgr
,
663 PPSMC_MSG_SetHardMinFclkByFreq
,
664 hwmgr
->display_config
->num_display
> 3 ?
665 SMU10_UMD_PSTATE_PEAK_FCLK
:
668 smum_send_msg_to_smc_with_parameter(hwmgr
,
669 PPSMC_MSG_SetHardMinSocclkByFreq
,
670 SMU10_UMD_PSTATE_MIN_SOCCLK
);
671 smum_send_msg_to_smc_with_parameter(hwmgr
,
672 PPSMC_MSG_SetHardMinVcn
,
673 SMU10_UMD_PSTATE_MIN_VCE
);
675 smum_send_msg_to_smc_with_parameter(hwmgr
,
676 PPSMC_MSG_SetSoftMaxGfxClk
,
677 data
->gfx_max_freq_limit
/100);
678 smum_send_msg_to_smc_with_parameter(hwmgr
,
679 PPSMC_MSG_SetSoftMaxFclkByFreq
,
680 SMU10_UMD_PSTATE_PEAK_FCLK
);
681 smum_send_msg_to_smc_with_parameter(hwmgr
,
682 PPSMC_MSG_SetSoftMaxSocclkByFreq
,
683 SMU10_UMD_PSTATE_PEAK_SOCCLK
);
684 smum_send_msg_to_smc_with_parameter(hwmgr
,
685 PPSMC_MSG_SetSoftMaxVcn
,
686 SMU10_UMD_PSTATE_VCE
);
688 case AMD_DPM_FORCED_LEVEL_LOW
:
689 smum_send_msg_to_smc_with_parameter(hwmgr
,
690 PPSMC_MSG_SetHardMinGfxClk
,
691 data
->gfx_min_freq_limit
/100);
692 smum_send_msg_to_smc_with_parameter(hwmgr
,
693 PPSMC_MSG_SetSoftMaxGfxClk
,
694 data
->gfx_min_freq_limit
/100);
695 smum_send_msg_to_smc_with_parameter(hwmgr
,
696 PPSMC_MSG_SetHardMinFclkByFreq
,
698 smum_send_msg_to_smc_with_parameter(hwmgr
,
699 PPSMC_MSG_SetSoftMaxFclkByFreq
,
702 case AMD_DPM_FORCED_LEVEL_MANUAL
:
703 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
:
710 static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr
*hwmgr
, bool low
)
712 struct smu10_hwmgr
*data
;
717 data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
720 return data
->clock_vol_info
.vdd_dep_on_fclk
->entries
[0].clk
;
722 return data
->clock_vol_info
.vdd_dep_on_fclk
->entries
[
723 data
->clock_vol_info
.vdd_dep_on_fclk
->count
- 1].clk
;
726 static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr
*hwmgr
, bool low
)
728 struct smu10_hwmgr
*data
;
733 data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
736 return data
->gfx_min_freq_limit
;
738 return data
->gfx_max_freq_limit
;
741 static int smu10_dpm_patch_boot_state(struct pp_hwmgr
*hwmgr
,
742 struct pp_hw_power_state
*hw_ps
)
747 static int smu10_dpm_get_pp_table_entry_callback(
748 struct pp_hwmgr
*hwmgr
,
749 struct pp_hw_power_state
*hw_ps
,
751 const void *clock_info
)
753 struct smu10_power_state
*smu10_ps
= cast_smu10_ps(hw_ps
);
755 smu10_ps
->levels
[index
].engine_clock
= 0;
757 smu10_ps
->levels
[index
].vddc_index
= 0;
758 smu10_ps
->level
= index
+ 1;
760 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_SclkDeepSleep
)) {
761 smu10_ps
->levels
[index
].ds_divider_index
= 5;
762 smu10_ps
->levels
[index
].ss_divider_index
= 5;
768 static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr
*hwmgr
)
771 unsigned long ret
= 0;
773 result
= pp_tables_get_num_of_entries(hwmgr
, &ret
);
775 return result
? 0 : ret
;
778 static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr
*hwmgr
,
779 unsigned long entry
, struct pp_power_state
*ps
)
782 struct smu10_power_state
*smu10_ps
;
784 ps
->hardware
.magic
= SMU10_Magic
;
786 smu10_ps
= cast_smu10_ps(&(ps
->hardware
));
788 result
= pp_tables_get_entry(hwmgr
, entry
, ps
,
789 smu10_dpm_get_pp_table_entry_callback
);
791 smu10_ps
->uvd_clocks
.vclk
= ps
->uvd_clocks
.VCLK
;
792 smu10_ps
->uvd_clocks
.dclk
= ps
->uvd_clocks
.DCLK
;
797 static int smu10_get_power_state_size(struct pp_hwmgr
*hwmgr
)
799 return sizeof(struct smu10_power_state
);
802 static int smu10_set_cpu_power_state(struct pp_hwmgr
*hwmgr
)
808 static int smu10_store_cc6_data(struct pp_hwmgr
*hwmgr
, uint32_t separation_time
,
809 bool cc6_disable
, bool pstate_disable
, bool pstate_switch_disable
)
811 struct smu10_hwmgr
*data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
813 if (separation_time
!= data
->separation_time
||
814 cc6_disable
!= data
->cc6_disable
||
815 pstate_disable
!= data
->pstate_disable
) {
816 data
->separation_time
= separation_time
;
817 data
->cc6_disable
= cc6_disable
;
818 data
->pstate_disable
= pstate_disable
;
819 data
->cc6_setting_changed
= true;
824 static int smu10_get_dal_power_level(struct pp_hwmgr
*hwmgr
,
825 struct amd_pp_simple_clock_info
*info
)
830 static int smu10_force_clock_level(struct pp_hwmgr
*hwmgr
,
831 enum pp_clock_type type
, uint32_t mask
)
833 struct smu10_hwmgr
*data
= hwmgr
->backend
;
834 struct smu10_voltage_dependency_table
*mclk_table
=
835 data
->clock_vol_info
.vdd_dep_on_fclk
;
838 low
= mask
? (ffs(mask
) - 1) : 0;
839 high
= mask
? (fls(mask
) - 1) : 0;
843 if (low
> 2 || high
> 2) {
844 pr_info("Currently sclk only support 3 levels on RV\n");
848 smum_send_msg_to_smc_with_parameter(hwmgr
,
849 PPSMC_MSG_SetHardMinGfxClk
,
850 low
== 2 ? data
->gfx_max_freq_limit
/100 :
851 low
== 1 ? SMU10_UMD_PSTATE_GFXCLK
:
852 data
->gfx_min_freq_limit
/100);
854 smum_send_msg_to_smc_with_parameter(hwmgr
,
855 PPSMC_MSG_SetSoftMaxGfxClk
,
856 high
== 0 ? data
->gfx_min_freq_limit
/100 :
857 high
== 1 ? SMU10_UMD_PSTATE_GFXCLK
:
858 data
->gfx_max_freq_limit
/100);
862 if (low
> mclk_table
->count
- 1 || high
> mclk_table
->count
- 1)
865 smum_send_msg_to_smc_with_parameter(hwmgr
,
866 PPSMC_MSG_SetHardMinFclkByFreq
,
867 mclk_table
->entries
[low
].clk
/100);
869 smum_send_msg_to_smc_with_parameter(hwmgr
,
870 PPSMC_MSG_SetSoftMaxFclkByFreq
,
871 mclk_table
->entries
[high
].clk
/100);
881 static int smu10_print_clock_levels(struct pp_hwmgr
*hwmgr
,
882 enum pp_clock_type type
, char *buf
)
884 struct smu10_hwmgr
*data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
885 struct smu10_voltage_dependency_table
*mclk_table
=
886 data
->clock_vol_info
.vdd_dep_on_fclk
;
887 uint32_t i
, now
, size
= 0;
891 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetGfxclkFrequency
);
892 now
= smum_get_argument(hwmgr
);
894 /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
895 if (now
== data
->gfx_max_freq_limit
/100)
897 else if (now
== data
->gfx_min_freq_limit
/100)
902 size
+= sprintf(buf
+ size
, "0: %uMhz %s\n",
903 data
->gfx_min_freq_limit
/100,
905 size
+= sprintf(buf
+ size
, "1: %uMhz %s\n",
906 i
== 1 ? now
: SMU10_UMD_PSTATE_GFXCLK
,
908 size
+= sprintf(buf
+ size
, "2: %uMhz %s\n",
909 data
->gfx_max_freq_limit
/100,
913 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetFclkFrequency
);
914 now
= smum_get_argument(hwmgr
);
916 for (i
= 0; i
< mclk_table
->count
; i
++)
917 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
919 mclk_table
->entries
[i
].clk
/ 100,
920 ((mclk_table
->entries
[i
].clk
/ 100)
930 static int smu10_get_performance_level(struct pp_hwmgr
*hwmgr
, const struct pp_hw_power_state
*state
,
931 PHM_PerformanceLevelDesignation designation
, uint32_t index
,
932 PHM_PerformanceLevel
*level
)
934 struct smu10_hwmgr
*data
;
936 if (level
== NULL
|| hwmgr
== NULL
|| state
== NULL
)
939 data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
942 level
->memory_clock
= data
->clock_vol_info
.vdd_dep_on_fclk
->entries
[0].clk
;
943 level
->coreClock
= data
->gfx_min_freq_limit
;
945 level
->memory_clock
= data
->clock_vol_info
.vdd_dep_on_fclk
->entries
[
946 data
->clock_vol_info
.vdd_dep_on_fclk
->count
- 1].clk
;
947 level
->coreClock
= data
->gfx_max_freq_limit
;
950 level
->nonLocalMemoryFreq
= 0;
951 level
->nonLocalMemoryWidth
= 0;
956 static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr
*hwmgr
,
957 const struct pp_hw_power_state
*state
, struct pp_clock_info
*clock_info
)
959 const struct smu10_power_state
*ps
= cast_const_smu10_ps(state
);
961 clock_info
->min_eng_clk
= ps
->levels
[0].engine_clock
/ (1 << (ps
->levels
[0].ss_divider_index
));
962 clock_info
->max_eng_clk
= ps
->levels
[ps
->level
- 1].engine_clock
/ (1 << (ps
->levels
[ps
->level
- 1].ss_divider_index
));
967 #define MEM_FREQ_LOW_LATENCY 25000
968 #define MEM_FREQ_HIGH_LATENCY 80000
969 #define MEM_LATENCY_HIGH 245
970 #define MEM_LATENCY_LOW 35
971 #define MEM_LATENCY_ERR 0xFFFF
974 static uint32_t smu10_get_mem_latency(struct pp_hwmgr
*hwmgr
,
977 if (clock
>= MEM_FREQ_LOW_LATENCY
&&
978 clock
< MEM_FREQ_HIGH_LATENCY
)
979 return MEM_LATENCY_HIGH
;
980 else if (clock
>= MEM_FREQ_HIGH_LATENCY
)
981 return MEM_LATENCY_LOW
;
983 return MEM_LATENCY_ERR
;
986 static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr
*hwmgr
,
987 enum amd_pp_clock_type type
,
988 struct pp_clock_levels_with_latency
*clocks
)
991 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
992 struct smu10_clock_voltage_information
*pinfo
= &(smu10_data
->clock_vol_info
);
993 struct smu10_voltage_dependency_table
*pclk_vol_table
;
994 bool latency_required
= false;
1000 case amd_pp_mem_clock
:
1001 pclk_vol_table
= pinfo
->vdd_dep_on_mclk
;
1002 latency_required
= true;
1004 case amd_pp_f_clock
:
1005 pclk_vol_table
= pinfo
->vdd_dep_on_fclk
;
1006 latency_required
= true;
1008 case amd_pp_dcf_clock
:
1009 pclk_vol_table
= pinfo
->vdd_dep_on_dcefclk
;
1011 case amd_pp_disp_clock
:
1012 pclk_vol_table
= pinfo
->vdd_dep_on_dispclk
;
1014 case amd_pp_phy_clock
:
1015 pclk_vol_table
= pinfo
->vdd_dep_on_phyclk
;
1017 case amd_pp_dpp_clock
:
1018 pclk_vol_table
= pinfo
->vdd_dep_on_dppclk
;
1024 if (pclk_vol_table
== NULL
|| pclk_vol_table
->count
== 0)
1027 clocks
->num_levels
= 0;
1028 for (i
= 0; i
< pclk_vol_table
->count
; i
++) {
1029 clocks
->data
[i
].clocks_in_khz
= pclk_vol_table
->entries
[i
].clk
* 10;
1030 clocks
->data
[i
].latency_in_us
= latency_required
?
1031 smu10_get_mem_latency(hwmgr
,
1032 pclk_vol_table
->entries
[i
].clk
) :
1034 clocks
->num_levels
++;
1040 static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr
*hwmgr
,
1041 enum amd_pp_clock_type type
,
1042 struct pp_clock_levels_with_voltage
*clocks
)
1045 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
1046 struct smu10_clock_voltage_information
*pinfo
= &(smu10_data
->clock_vol_info
);
1047 struct smu10_voltage_dependency_table
*pclk_vol_table
= NULL
;
1053 case amd_pp_mem_clock
:
1054 pclk_vol_table
= pinfo
->vdd_dep_on_mclk
;
1056 case amd_pp_f_clock
:
1057 pclk_vol_table
= pinfo
->vdd_dep_on_fclk
;
1059 case amd_pp_dcf_clock
:
1060 pclk_vol_table
= pinfo
->vdd_dep_on_dcefclk
;
1062 case amd_pp_soc_clock
:
1063 pclk_vol_table
= pinfo
->vdd_dep_on_socclk
;
1065 case amd_pp_disp_clock
:
1066 pclk_vol_table
= pinfo
->vdd_dep_on_dispclk
;
1068 case amd_pp_phy_clock
:
1069 pclk_vol_table
= pinfo
->vdd_dep_on_phyclk
;
1075 if (pclk_vol_table
== NULL
|| pclk_vol_table
->count
== 0)
1078 clocks
->num_levels
= 0;
1079 for (i
= 0; i
< pclk_vol_table
->count
; i
++) {
1080 clocks
->data
[i
].clocks_in_khz
= pclk_vol_table
->entries
[i
].clk
* 10;
1081 clocks
->data
[i
].voltage_in_mv
= pclk_vol_table
->entries
[i
].vol
;
1082 clocks
->num_levels
++;
1090 static int smu10_get_max_high_clocks(struct pp_hwmgr
*hwmgr
, struct amd_pp_simple_clock_info
*clocks
)
1092 clocks
->engine_max_clock
= 80000; /* driver can't get engine clock, temp hard code to 800MHz */
1096 static int smu10_thermal_get_temperature(struct pp_hwmgr
*hwmgr
)
1098 struct amdgpu_device
*adev
= hwmgr
->adev
;
1099 uint32_t reg_value
= RREG32_SOC15(THM
, 0, mmTHM_TCON_CUR_TMP
);
1101 (reg_value
& THM_TCON_CUR_TMP__CUR_TEMP_MASK
) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT
;
1103 if (cur_temp
& THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK
)
1104 cur_temp
= ((cur_temp
/ 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1106 cur_temp
= (cur_temp
/ 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1111 static int smu10_read_sensor(struct pp_hwmgr
*hwmgr
, int idx
,
1112 void *value
, int *size
)
1114 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
1115 uint32_t sclk
, mclk
;
1119 case AMDGPU_PP_SENSOR_GFX_SCLK
:
1120 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetGfxclkFrequency
);
1121 sclk
= smum_get_argument(hwmgr
);
1122 /* in units of 10KHZ */
1123 *((uint32_t *)value
) = sclk
* 100;
1126 case AMDGPU_PP_SENSOR_GFX_MCLK
:
1127 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetFclkFrequency
);
1128 mclk
= smum_get_argument(hwmgr
);
1129 /* in units of 10KHZ */
1130 *((uint32_t *)value
) = mclk
* 100;
1133 case AMDGPU_PP_SENSOR_GPU_TEMP
:
1134 *((uint32_t *)value
) = smu10_thermal_get_temperature(hwmgr
);
1136 case AMDGPU_PP_SENSOR_VCN_POWER_STATE
:
1137 *(uint32_t *)value
= smu10_data
->vcn_power_gated
? 0 : 1;
1148 static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr
*hwmgr
,
1151 struct smu10_hwmgr
*data
= hwmgr
->backend
;
1152 struct dm_pp_wm_sets_with_clock_ranges_soc15
*wm_with_clock_ranges
= clock_ranges
;
1153 Watermarks_t
*table
= &(data
->water_marks_table
);
1155 smu_set_watermarks_for_clocks_ranges(table
,wm_with_clock_ranges
);
1156 smum_smc_table_manager(hwmgr
, (uint8_t *)table
, (uint16_t)SMU10_WMTABLE
, false);
1157 data
->water_marks_exist
= true;
1161 static int smu10_smus_notify_pwe(struct pp_hwmgr
*hwmgr
)
1164 return smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_SetRccPfcPmeRestoreRegister
);
1167 static int smu10_powergate_mmhub(struct pp_hwmgr
*hwmgr
)
1169 return smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_PowerGateMmHub
);
1172 static int smu10_powergate_sdma(struct pp_hwmgr
*hwmgr
, bool gate
)
1175 return smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_PowerDownSdma
);
1177 return smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_PowerUpSdma
);
1180 static void smu10_powergate_vcn(struct pp_hwmgr
*hwmgr
, bool bgate
)
1182 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
1185 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
1186 AMD_IP_BLOCK_TYPE_VCN
,
1188 smum_send_msg_to_smc_with_parameter(hwmgr
,
1189 PPSMC_MSG_PowerDownVcn
, 0);
1190 smu10_data
->vcn_power_gated
= true;
1192 smum_send_msg_to_smc_with_parameter(hwmgr
,
1193 PPSMC_MSG_PowerUpVcn
, 0);
1194 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
1195 AMD_IP_BLOCK_TYPE_VCN
,
1196 AMD_PG_STATE_UNGATE
);
1197 smu10_data
->vcn_power_gated
= false;
1201 static int conv_power_profile_to_pplib_workload(int power_profile
)
1203 int pplib_workload
= 0;
1205 switch (power_profile
) {
1206 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
:
1207 pplib_workload
= WORKLOAD_DEFAULT_BIT
;
1209 case PP_SMC_POWER_PROFILE_FULLSCREEN3D
:
1210 pplib_workload
= WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT
;
1212 case PP_SMC_POWER_PROFILE_POWERSAVING
:
1213 pplib_workload
= WORKLOAD_PPLIB_POWER_SAVING_BIT
;
1215 case PP_SMC_POWER_PROFILE_VIDEO
:
1216 pplib_workload
= WORKLOAD_PPLIB_VIDEO_BIT
;
1218 case PP_SMC_POWER_PROFILE_VR
:
1219 pplib_workload
= WORKLOAD_PPLIB_VR_BIT
;
1221 case PP_SMC_POWER_PROFILE_COMPUTE
:
1222 pplib_workload
= WORKLOAD_PPLIB_COMPUTE_BIT
;
1226 return pplib_workload
;
1229 static int smu10_get_power_profile_mode(struct pp_hwmgr
*hwmgr
, char *buf
)
1231 uint32_t i
, size
= 0;
1232 static const uint8_t
1233 profile_mode_setting
[6][4] = {{70, 60, 0, 0,},
1240 static const char *profile_name
[6] = {
1247 static const char *title
[6] = {"NUM",
1252 "MIN_ACTIVE_LEVEL"};
1257 size
+= sprintf(buf
+ size
, "%s %16s %s %s %s %s\n",title
[0],
1258 title
[1], title
[2], title
[3], title
[4], title
[5]);
1260 for (i
= 0; i
<= PP_SMC_POWER_PROFILE_COMPUTE
; i
++)
1261 size
+= sprintf(buf
+ size
, "%3d %14s%s: %14d %3d %10d %14d\n",
1262 i
, profile_name
[i
], (i
== hwmgr
->power_profile_mode
) ? "*" : " ",
1263 profile_mode_setting
[i
][0], profile_mode_setting
[i
][1],
1264 profile_mode_setting
[i
][2], profile_mode_setting
[i
][3]);
1269 static bool smu10_is_raven1_refresh(struct pp_hwmgr
*hwmgr
)
1271 struct amdgpu_device
*adev
= hwmgr
->adev
;
1272 if ((adev
->asic_type
== CHIP_RAVEN
) &&
1273 (adev
->rev_id
!= 0x15d8) &&
1274 (hwmgr
->smu_version
>= 0x41e2b))
1280 static int smu10_set_power_profile_mode(struct pp_hwmgr
*hwmgr
, long *input
, uint32_t size
)
1282 int workload_type
= 0;
1285 if (input
[size
] > PP_SMC_POWER_PROFILE_COMPUTE
) {
1286 pr_err("Invalid power profile mode %ld\n", input
[size
]);
1289 if (hwmgr
->power_profile_mode
== input
[size
])
1292 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
1294 conv_power_profile_to_pplib_workload(input
[size
]);
1295 if (workload_type
&&
1296 smu10_is_raven1_refresh(hwmgr
) &&
1297 !hwmgr
->gfxoff_state_changed_by_workload
) {
1298 smu10_gfx_off_control(hwmgr
, false);
1299 hwmgr
->gfxoff_state_changed_by_workload
= true;
1301 result
= smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_ActiveProcessNotify
,
1302 1 << workload_type
);
1304 hwmgr
->power_profile_mode
= input
[size
];
1305 if (workload_type
&& hwmgr
->gfxoff_state_changed_by_workload
) {
1306 smu10_gfx_off_control(hwmgr
, true);
1307 hwmgr
->gfxoff_state_changed_by_workload
= false;
1313 static int smu10_asic_reset(struct pp_hwmgr
*hwmgr
, enum SMU_ASIC_RESET_MODE mode
)
1315 return smum_send_msg_to_smc_with_parameter(hwmgr
,
1316 PPSMC_MSG_DeviceDriverReset
,
1320 static const struct pp_hwmgr_func smu10_hwmgr_funcs
= {
1321 .backend_init
= smu10_hwmgr_backend_init
,
1322 .backend_fini
= smu10_hwmgr_backend_fini
,
1324 .apply_state_adjust_rules
= smu10_apply_state_adjust_rules
,
1325 .force_dpm_level
= smu10_dpm_force_dpm_level
,
1326 .get_power_state_size
= smu10_get_power_state_size
,
1327 .powerdown_uvd
= NULL
,
1328 .powergate_uvd
= smu10_powergate_vcn
,
1329 .powergate_vce
= NULL
,
1330 .get_mclk
= smu10_dpm_get_mclk
,
1331 .get_sclk
= smu10_dpm_get_sclk
,
1332 .patch_boot_state
= smu10_dpm_patch_boot_state
,
1333 .get_pp_table_entry
= smu10_dpm_get_pp_table_entry
,
1334 .get_num_of_pp_table_entries
= smu10_dpm_get_num_of_pp_table_entries
,
1335 .set_cpu_power_state
= smu10_set_cpu_power_state
,
1336 .store_cc6_data
= smu10_store_cc6_data
,
1337 .force_clock_level
= smu10_force_clock_level
,
1338 .print_clock_levels
= smu10_print_clock_levels
,
1339 .get_dal_power_level
= smu10_get_dal_power_level
,
1340 .get_performance_level
= smu10_get_performance_level
,
1341 .get_current_shallow_sleep_clocks
= smu10_get_current_shallow_sleep_clocks
,
1342 .get_clock_by_type_with_latency
= smu10_get_clock_by_type_with_latency
,
1343 .get_clock_by_type_with_voltage
= smu10_get_clock_by_type_with_voltage
,
1344 .set_watermarks_for_clocks_ranges
= smu10_set_watermarks_for_clocks_ranges
,
1345 .get_max_high_clocks
= smu10_get_max_high_clocks
,
1346 .read_sensor
= smu10_read_sensor
,
1347 .set_active_display_count
= smu10_set_active_display_count
,
1348 .set_min_deep_sleep_dcefclk
= smu10_set_min_deep_sleep_dcefclk
,
1349 .dynamic_state_management_enable
= smu10_enable_dpm_tasks
,
1350 .power_off_asic
= smu10_power_off_asic
,
1351 .asic_setup
= smu10_setup_asic_task
,
1352 .power_state_set
= smu10_set_power_state_tasks
,
1353 .dynamic_state_management_disable
= smu10_disable_dpm_tasks
,
1354 .powergate_mmhub
= smu10_powergate_mmhub
,
1355 .smus_notify_pwe
= smu10_smus_notify_pwe
,
1356 .display_clock_voltage_request
= smu10_display_clock_voltage_request
,
1357 .powergate_gfx
= smu10_gfx_off_control
,
1358 .powergate_sdma
= smu10_powergate_sdma
,
1359 .set_hard_min_dcefclk_by_freq
= smu10_set_hard_min_dcefclk_by_freq
,
1360 .set_hard_min_fclk_by_freq
= smu10_set_hard_min_fclk_by_freq
,
1361 .get_power_profile_mode
= smu10_get_power_profile_mode
,
1362 .set_power_profile_mode
= smu10_set_power_profile_mode
,
1363 .asic_reset
= smu10_asic_reset
,
1366 int smu10_init_function_pointers(struct pp_hwmgr
*hwmgr
)
1368 hwmgr
->hwmgr_func
= &smu10_hwmgr_funcs
;
1369 hwmgr
->pptable_func
= &pptable_funcs
;