2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include "atom-types.h"
29 #include "processpptables.h"
30 #include "cgs_common.h"
31 #include "smu/smu_8_0_d.h"
32 #include "smu8_fusion.h"
33 #include "smu/smu_8_0_sh_mask.h"
36 #include "hardwaremanager.h"
38 #include "smu8_hwmgr.h"
39 #include "power_state.h"
40 #include "pp_thermal.h"
42 #define ixSMUSVI_NB_CURRENTVID 0xD8230044
43 #define CURRENT_NB_VID_MASK 0xff000000
44 #define CURRENT_NB_VID__SHIFT 24
45 #define ixSMUSVI_GFX_CURRENTVID 0xD8230048
46 #define CURRENT_GFX_VID_MASK 0xff000000
47 #define CURRENT_GFX_VID__SHIFT 24
49 static const unsigned long smu8_magic
= (unsigned long) PHM_Cz_Magic
;
51 static struct smu8_power_state
*cast_smu8_power_state(struct pp_hw_power_state
*hw_ps
)
53 if (smu8_magic
!= hw_ps
->magic
)
56 return (struct smu8_power_state
*)hw_ps
;
59 static const struct smu8_power_state
*cast_const_smu8_power_state(
60 const struct pp_hw_power_state
*hw_ps
)
62 if (smu8_magic
!= hw_ps
->magic
)
65 return (struct smu8_power_state
*)hw_ps
;
68 static uint32_t smu8_get_eclk_level(struct pp_hwmgr
*hwmgr
,
69 uint32_t clock
, uint32_t msg
)
72 struct phm_vce_clock_voltage_dependency_table
*ptable
=
73 hwmgr
->dyn_state
.vce_clock_voltage_dependency_table
;
76 case PPSMC_MSG_SetEclkSoftMin
:
77 case PPSMC_MSG_SetEclkHardMin
:
78 for (i
= 0; i
< (int)ptable
->count
; i
++) {
79 if (clock
<= ptable
->entries
[i
].ecclk
)
84 case PPSMC_MSG_SetEclkSoftMax
:
85 case PPSMC_MSG_SetEclkHardMax
:
86 for (i
= ptable
->count
- 1; i
>= 0; i
--) {
87 if (clock
>= ptable
->entries
[i
].ecclk
)
99 static uint32_t smu8_get_sclk_level(struct pp_hwmgr
*hwmgr
,
100 uint32_t clock
, uint32_t msg
)
103 struct phm_clock_voltage_dependency_table
*table
=
104 hwmgr
->dyn_state
.vddc_dependency_on_sclk
;
107 case PPSMC_MSG_SetSclkSoftMin
:
108 case PPSMC_MSG_SetSclkHardMin
:
109 for (i
= 0; i
< (int)table
->count
; i
++) {
110 if (clock
<= table
->entries
[i
].clk
)
115 case PPSMC_MSG_SetSclkSoftMax
:
116 case PPSMC_MSG_SetSclkHardMax
:
117 for (i
= table
->count
- 1; i
>= 0; i
--) {
118 if (clock
>= table
->entries
[i
].clk
)
129 static uint32_t smu8_get_uvd_level(struct pp_hwmgr
*hwmgr
,
130 uint32_t clock
, uint32_t msg
)
133 struct phm_uvd_clock_voltage_dependency_table
*ptable
=
134 hwmgr
->dyn_state
.uvd_clock_voltage_dependency_table
;
137 case PPSMC_MSG_SetUvdSoftMin
:
138 case PPSMC_MSG_SetUvdHardMin
:
139 for (i
= 0; i
< (int)ptable
->count
; i
++) {
140 if (clock
<= ptable
->entries
[i
].vclk
)
145 case PPSMC_MSG_SetUvdSoftMax
:
146 case PPSMC_MSG_SetUvdHardMax
:
147 for (i
= ptable
->count
- 1; i
>= 0; i
--) {
148 if (clock
>= ptable
->entries
[i
].vclk
)
160 static uint32_t smu8_get_max_sclk_level(struct pp_hwmgr
*hwmgr
)
162 struct smu8_hwmgr
*data
= hwmgr
->backend
;
164 if (data
->max_sclk_level
== 0) {
165 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetMaxSclkLevel
);
166 data
->max_sclk_level
= smum_get_argument(hwmgr
) + 1;
169 return data
->max_sclk_level
;
172 static int smu8_initialize_dpm_defaults(struct pp_hwmgr
*hwmgr
)
174 struct smu8_hwmgr
*data
= hwmgr
->backend
;
175 struct amdgpu_device
*adev
= hwmgr
->adev
;
177 data
->gfx_ramp_step
= 256*25/100;
178 data
->gfx_ramp_delay
= 1; /* by default, we delay 1us */
180 data
->mgcg_cgtt_local0
= 0x00000000;
181 data
->mgcg_cgtt_local1
= 0x00000000;
182 data
->clock_slow_down_freq
= 25000;
183 data
->skip_clock_slow_down
= 1;
184 data
->enable_nb_ps_policy
= 1; /* disable until UNB is ready, Enabled */
185 data
->voltage_drop_in_dce_power_gating
= 0; /* disable until fully verified */
186 data
->voting_rights_clients
= 0x00C00033;
187 data
->static_screen_threshold
= 8;
188 data
->ddi_power_gating_disabled
= 0;
189 data
->bapm_enabled
= 1;
190 data
->voltage_drop_threshold
= 0;
191 data
->gfx_power_gating_threshold
= 500;
192 data
->vce_slow_sclk_threshold
= 20000;
193 data
->dce_slow_sclk_threshold
= 30000;
194 data
->disable_driver_thermal_policy
= 1;
195 data
->disable_nb_ps3_in_battery
= 0;
197 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
198 PHM_PlatformCaps_ABM
);
200 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
201 PHM_PlatformCaps_NonABMSupportInPPLib
);
203 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
204 PHM_PlatformCaps_DynamicM3Arbiter
);
206 data
->override_dynamic_mgpg
= 1;
208 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
209 PHM_PlatformCaps_DynamicPatchPowerState
);
211 data
->thermal_auto_throttling_treshold
= 0;
213 data
->disable_gfx_power_gating_in_uvd
= 0;
215 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
216 PHM_PlatformCaps_DynamicUVDState
);
218 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
219 PHM_PlatformCaps_UVDDPM
);
220 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
221 PHM_PlatformCaps_VCEDPM
);
223 data
->cc6_settings
.cpu_cc6_disable
= false;
224 data
->cc6_settings
.cpu_pstate_disable
= false;
225 data
->cc6_settings
.nb_pstate_switch_disable
= false;
226 data
->cc6_settings
.cpu_pstate_separation_time
= 0;
228 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
229 PHM_PlatformCaps_DisableVoltageIsland
);
231 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
232 PHM_PlatformCaps_UVDPowerGating
);
233 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
234 PHM_PlatformCaps_VCEPowerGating
);
236 if (adev
->pg_flags
& AMD_PG_SUPPORT_UVD
)
237 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
238 PHM_PlatformCaps_UVDPowerGating
);
239 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCE
)
240 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
241 PHM_PlatformCaps_VCEPowerGating
);
247 /* convert form 8bit vid to real voltage in mV*4 */
248 static uint32_t smu8_convert_8Bit_index_to_voltage(
249 struct pp_hwmgr
*hwmgr
, uint16_t voltage
)
251 return 6200 - (voltage
* 25);
254 static int smu8_construct_max_power_limits_table(struct pp_hwmgr
*hwmgr
,
255 struct phm_clock_and_voltage_limits
*table
)
257 struct smu8_hwmgr
*data
= hwmgr
->backend
;
258 struct smu8_sys_info
*sys_info
= &data
->sys_info
;
259 struct phm_clock_voltage_dependency_table
*dep_table
=
260 hwmgr
->dyn_state
.vddc_dependency_on_sclk
;
262 if (dep_table
->count
> 0) {
263 table
->sclk
= dep_table
->entries
[dep_table
->count
-1].clk
;
264 table
->vddc
= smu8_convert_8Bit_index_to_voltage(hwmgr
,
265 (uint16_t)dep_table
->entries
[dep_table
->count
-1].v
);
267 table
->mclk
= sys_info
->nbp_memory_clock
[0];
271 static int smu8_init_dynamic_state_adjustment_rule_settings(
272 struct pp_hwmgr
*hwmgr
,
273 ATOM_CLK_VOLT_CAPABILITY
*disp_voltage_table
)
275 uint32_t table_size
=
276 sizeof(struct phm_clock_voltage_dependency_table
) +
277 (7 * sizeof(struct phm_clock_voltage_dependency_record
));
279 struct phm_clock_voltage_dependency_table
*table_clk_vlt
=
280 kzalloc(table_size
, GFP_KERNEL
);
282 if (NULL
== table_clk_vlt
) {
283 pr_err("Can not allocate memory!\n");
287 table_clk_vlt
->count
= 8;
288 table_clk_vlt
->entries
[0].clk
= PP_DAL_POWERLEVEL_0
;
289 table_clk_vlt
->entries
[0].v
= 0;
290 table_clk_vlt
->entries
[1].clk
= PP_DAL_POWERLEVEL_1
;
291 table_clk_vlt
->entries
[1].v
= 1;
292 table_clk_vlt
->entries
[2].clk
= PP_DAL_POWERLEVEL_2
;
293 table_clk_vlt
->entries
[2].v
= 2;
294 table_clk_vlt
->entries
[3].clk
= PP_DAL_POWERLEVEL_3
;
295 table_clk_vlt
->entries
[3].v
= 3;
296 table_clk_vlt
->entries
[4].clk
= PP_DAL_POWERLEVEL_4
;
297 table_clk_vlt
->entries
[4].v
= 4;
298 table_clk_vlt
->entries
[5].clk
= PP_DAL_POWERLEVEL_5
;
299 table_clk_vlt
->entries
[5].v
= 5;
300 table_clk_vlt
->entries
[6].clk
= PP_DAL_POWERLEVEL_6
;
301 table_clk_vlt
->entries
[6].v
= 6;
302 table_clk_vlt
->entries
[7].clk
= PP_DAL_POWERLEVEL_7
;
303 table_clk_vlt
->entries
[7].v
= 7;
304 hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
= table_clk_vlt
;
309 static int smu8_get_system_info_data(struct pp_hwmgr
*hwmgr
)
311 struct smu8_hwmgr
*data
= hwmgr
->backend
;
312 ATOM_INTEGRATED_SYSTEM_INFO_V1_9
*info
= NULL
;
318 info
= (ATOM_INTEGRATED_SYSTEM_INFO_V1_9
*)smu_atom_get_data_table(hwmgr
->adev
,
319 GetIndexIntoMasterTable(DATA
, IntegratedSystemInfo
),
320 &size
, &frev
, &crev
);
323 pr_err("Could not retrieve the Integrated System Info Table!\n");
328 pr_err("Unsupported IGP table: %d %d\n", frev
, crev
);
332 data
->sys_info
.bootup_uma_clock
=
333 le32_to_cpu(info
->ulBootUpUMAClock
);
335 data
->sys_info
.bootup_engine_clock
=
336 le32_to_cpu(info
->ulBootUpEngineClock
);
338 data
->sys_info
.dentist_vco_freq
=
339 le32_to_cpu(info
->ulDentistVCOFreq
);
341 data
->sys_info
.system_config
=
342 le32_to_cpu(info
->ulSystemConfig
);
344 data
->sys_info
.bootup_nb_voltage_index
=
345 le16_to_cpu(info
->usBootUpNBVoltage
);
347 data
->sys_info
.htc_hyst_lmt
=
348 (info
->ucHtcHystLmt
== 0) ? 5 : info
->ucHtcHystLmt
;
350 data
->sys_info
.htc_tmp_lmt
=
351 (info
->ucHtcTmpLmt
== 0) ? 203 : info
->ucHtcTmpLmt
;
353 if (data
->sys_info
.htc_tmp_lmt
<=
354 data
->sys_info
.htc_hyst_lmt
) {
355 pr_err("The htcTmpLmt should be larger than htcHystLmt.\n");
359 data
->sys_info
.nb_dpm_enable
=
360 data
->enable_nb_ps_policy
&&
361 (le32_to_cpu(info
->ulSystemConfig
) >> 3 & 0x1);
363 for (i
= 0; i
< SMU8_NUM_NBPSTATES
; i
++) {
364 if (i
< SMU8_NUM_NBPMEMORYCLOCK
) {
365 data
->sys_info
.nbp_memory_clock
[i
] =
366 le32_to_cpu(info
->ulNbpStateMemclkFreq
[i
]);
368 data
->sys_info
.nbp_n_clock
[i
] =
369 le32_to_cpu(info
->ulNbpStateNClkFreq
[i
]);
372 for (i
= 0; i
< MAX_DISPLAY_CLOCK_LEVEL
; i
++) {
373 data
->sys_info
.display_clock
[i
] =
374 le32_to_cpu(info
->sDispClkVoltageMapping
[i
].ulMaximumSupportedCLK
);
377 /* Here use 4 levels, make sure not exceed */
378 for (i
= 0; i
< SMU8_NUM_NBPSTATES
; i
++) {
379 data
->sys_info
.nbp_voltage_index
[i
] =
380 le16_to_cpu(info
->usNBPStateVoltage
[i
]);
383 if (!data
->sys_info
.nb_dpm_enable
) {
384 for (i
= 1; i
< SMU8_NUM_NBPSTATES
; i
++) {
385 if (i
< SMU8_NUM_NBPMEMORYCLOCK
) {
386 data
->sys_info
.nbp_memory_clock
[i
] =
387 data
->sys_info
.nbp_memory_clock
[0];
389 data
->sys_info
.nbp_n_clock
[i
] =
390 data
->sys_info
.nbp_n_clock
[0];
391 data
->sys_info
.nbp_voltage_index
[i
] =
392 data
->sys_info
.nbp_voltage_index
[0];
396 if (le32_to_cpu(info
->ulGPUCapInfo
) &
397 SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS
) {
398 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
399 PHM_PlatformCaps_EnableDFSBypass
);
402 data
->sys_info
.uma_channel_number
= info
->ucUMAChannelNumber
;
404 smu8_construct_max_power_limits_table (hwmgr
,
405 &hwmgr
->dyn_state
.max_clock_voltage_on_ac
);
407 smu8_init_dynamic_state_adjustment_rule_settings(hwmgr
,
408 &info
->sDISPCLK_Voltage
[0]);
413 static int smu8_construct_boot_state(struct pp_hwmgr
*hwmgr
)
415 struct smu8_hwmgr
*data
= hwmgr
->backend
;
417 data
->boot_power_level
.engineClock
=
418 data
->sys_info
.bootup_engine_clock
;
420 data
->boot_power_level
.vddcIndex
=
421 (uint8_t)data
->sys_info
.bootup_nb_voltage_index
;
423 data
->boot_power_level
.dsDividerIndex
= 0;
424 data
->boot_power_level
.ssDividerIndex
= 0;
425 data
->boot_power_level
.allowGnbSlow
= 1;
426 data
->boot_power_level
.forceNBPstate
= 0;
427 data
->boot_power_level
.hysteresis_up
= 0;
428 data
->boot_power_level
.numSIMDToPowerDown
= 0;
429 data
->boot_power_level
.display_wm
= 0;
430 data
->boot_power_level
.vce_wm
= 0;
435 static int smu8_upload_pptable_to_smu(struct pp_hwmgr
*hwmgr
)
437 struct SMU8_Fusion_ClkTable
*clock_table
;
441 pp_atomctrl_clock_dividers_kong dividers
;
443 struct phm_clock_voltage_dependency_table
*vddc_table
=
444 hwmgr
->dyn_state
.vddc_dependency_on_sclk
;
445 struct phm_clock_voltage_dependency_table
*vdd_gfx_table
=
446 hwmgr
->dyn_state
.vdd_gfx_dependency_on_sclk
;
447 struct phm_acp_clock_voltage_dependency_table
*acp_table
=
448 hwmgr
->dyn_state
.acp_clock_voltage_dependency_table
;
449 struct phm_uvd_clock_voltage_dependency_table
*uvd_table
=
450 hwmgr
->dyn_state
.uvd_clock_voltage_dependency_table
;
451 struct phm_vce_clock_voltage_dependency_table
*vce_table
=
452 hwmgr
->dyn_state
.vce_clock_voltage_dependency_table
;
454 if (!hwmgr
->need_pp_table_upload
)
457 ret
= smum_download_powerplay_table(hwmgr
, &table
);
459 PP_ASSERT_WITH_CODE((0 == ret
&& NULL
!= table
),
460 "Fail to get clock table from SMU!", return -EINVAL
;);
462 clock_table
= (struct SMU8_Fusion_ClkTable
*)table
;
464 /* patch clock table */
465 PP_ASSERT_WITH_CODE((vddc_table
->count
<= SMU8_MAX_HARDWARE_POWERLEVELS
),
466 "Dependency table entry exceeds max limit!", return -EINVAL
;);
467 PP_ASSERT_WITH_CODE((vdd_gfx_table
->count
<= SMU8_MAX_HARDWARE_POWERLEVELS
),
468 "Dependency table entry exceeds max limit!", return -EINVAL
;);
469 PP_ASSERT_WITH_CODE((acp_table
->count
<= SMU8_MAX_HARDWARE_POWERLEVELS
),
470 "Dependency table entry exceeds max limit!", return -EINVAL
;);
471 PP_ASSERT_WITH_CODE((uvd_table
->count
<= SMU8_MAX_HARDWARE_POWERLEVELS
),
472 "Dependency table entry exceeds max limit!", return -EINVAL
;);
473 PP_ASSERT_WITH_CODE((vce_table
->count
<= SMU8_MAX_HARDWARE_POWERLEVELS
),
474 "Dependency table entry exceeds max limit!", return -EINVAL
;);
476 for (i
= 0; i
< SMU8_MAX_HARDWARE_POWERLEVELS
; i
++) {
479 clock_table
->SclkBreakdownTable
.ClkLevel
[i
].GnbVid
=
480 (i
< vddc_table
->count
) ? (uint8_t)vddc_table
->entries
[i
].v
: 0;
481 clock_table
->SclkBreakdownTable
.ClkLevel
[i
].Frequency
=
482 (i
< vddc_table
->count
) ? vddc_table
->entries
[i
].clk
: 0;
484 atomctrl_get_engine_pll_dividers_kong(hwmgr
,
485 clock_table
->SclkBreakdownTable
.ClkLevel
[i
].Frequency
,
488 clock_table
->SclkBreakdownTable
.ClkLevel
[i
].DfsDid
=
489 (uint8_t)dividers
.pll_post_divider
;
492 clock_table
->SclkBreakdownTable
.ClkLevel
[i
].GfxVid
=
493 (i
< vdd_gfx_table
->count
) ? (uint8_t)vdd_gfx_table
->entries
[i
].v
: 0;
496 clock_table
->AclkBreakdownTable
.ClkLevel
[i
].GfxVid
=
497 (i
< acp_table
->count
) ? (uint8_t)acp_table
->entries
[i
].v
: 0;
498 clock_table
->AclkBreakdownTable
.ClkLevel
[i
].Frequency
=
499 (i
< acp_table
->count
) ? acp_table
->entries
[i
].acpclk
: 0;
501 atomctrl_get_engine_pll_dividers_kong(hwmgr
,
502 clock_table
->AclkBreakdownTable
.ClkLevel
[i
].Frequency
,
505 clock_table
->AclkBreakdownTable
.ClkLevel
[i
].DfsDid
=
506 (uint8_t)dividers
.pll_post_divider
;
510 clock_table
->VclkBreakdownTable
.ClkLevel
[i
].GfxVid
=
511 (i
< uvd_table
->count
) ? (uint8_t)uvd_table
->entries
[i
].v
: 0;
512 clock_table
->VclkBreakdownTable
.ClkLevel
[i
].Frequency
=
513 (i
< uvd_table
->count
) ? uvd_table
->entries
[i
].vclk
: 0;
515 atomctrl_get_engine_pll_dividers_kong(hwmgr
,
516 clock_table
->VclkBreakdownTable
.ClkLevel
[i
].Frequency
,
519 clock_table
->VclkBreakdownTable
.ClkLevel
[i
].DfsDid
=
520 (uint8_t)dividers
.pll_post_divider
;
522 clock_table
->DclkBreakdownTable
.ClkLevel
[i
].GfxVid
=
523 (i
< uvd_table
->count
) ? (uint8_t)uvd_table
->entries
[i
].v
: 0;
524 clock_table
->DclkBreakdownTable
.ClkLevel
[i
].Frequency
=
525 (i
< uvd_table
->count
) ? uvd_table
->entries
[i
].dclk
: 0;
527 atomctrl_get_engine_pll_dividers_kong(hwmgr
,
528 clock_table
->DclkBreakdownTable
.ClkLevel
[i
].Frequency
,
531 clock_table
->DclkBreakdownTable
.ClkLevel
[i
].DfsDid
=
532 (uint8_t)dividers
.pll_post_divider
;
535 clock_table
->EclkBreakdownTable
.ClkLevel
[i
].GfxVid
=
536 (i
< vce_table
->count
) ? (uint8_t)vce_table
->entries
[i
].v
: 0;
537 clock_table
->EclkBreakdownTable
.ClkLevel
[i
].Frequency
=
538 (i
< vce_table
->count
) ? vce_table
->entries
[i
].ecclk
: 0;
541 atomctrl_get_engine_pll_dividers_kong(hwmgr
,
542 clock_table
->EclkBreakdownTable
.ClkLevel
[i
].Frequency
,
545 clock_table
->EclkBreakdownTable
.ClkLevel
[i
].DfsDid
=
546 (uint8_t)dividers
.pll_post_divider
;
549 ret
= smum_upload_powerplay_table(hwmgr
);
554 static int smu8_init_sclk_limit(struct pp_hwmgr
*hwmgr
)
556 struct smu8_hwmgr
*data
= hwmgr
->backend
;
557 struct phm_clock_voltage_dependency_table
*table
=
558 hwmgr
->dyn_state
.vddc_dependency_on_sclk
;
559 unsigned long clock
= 0, level
;
561 if (NULL
== table
|| table
->count
<= 0)
564 data
->sclk_dpm
.soft_min_clk
= table
->entries
[0].clk
;
565 data
->sclk_dpm
.hard_min_clk
= table
->entries
[0].clk
;
567 level
= smu8_get_max_sclk_level(hwmgr
) - 1;
569 if (level
< table
->count
)
570 clock
= table
->entries
[level
].clk
;
572 clock
= table
->entries
[table
->count
- 1].clk
;
574 data
->sclk_dpm
.soft_max_clk
= clock
;
575 data
->sclk_dpm
.hard_max_clk
= clock
;
580 static int smu8_init_uvd_limit(struct pp_hwmgr
*hwmgr
)
582 struct smu8_hwmgr
*data
= hwmgr
->backend
;
583 struct phm_uvd_clock_voltage_dependency_table
*table
=
584 hwmgr
->dyn_state
.uvd_clock_voltage_dependency_table
;
585 unsigned long clock
= 0, level
;
587 if (NULL
== table
|| table
->count
<= 0)
590 data
->uvd_dpm
.soft_min_clk
= 0;
591 data
->uvd_dpm
.hard_min_clk
= 0;
593 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetMaxUvdLevel
);
594 level
= smum_get_argument(hwmgr
);
596 if (level
< table
->count
)
597 clock
= table
->entries
[level
].vclk
;
599 clock
= table
->entries
[table
->count
- 1].vclk
;
601 data
->uvd_dpm
.soft_max_clk
= clock
;
602 data
->uvd_dpm
.hard_max_clk
= clock
;
607 static int smu8_init_vce_limit(struct pp_hwmgr
*hwmgr
)
609 struct smu8_hwmgr
*data
= hwmgr
->backend
;
610 struct phm_vce_clock_voltage_dependency_table
*table
=
611 hwmgr
->dyn_state
.vce_clock_voltage_dependency_table
;
612 unsigned long clock
= 0, level
;
614 if (NULL
== table
|| table
->count
<= 0)
617 data
->vce_dpm
.soft_min_clk
= 0;
618 data
->vce_dpm
.hard_min_clk
= 0;
620 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetMaxEclkLevel
);
621 level
= smum_get_argument(hwmgr
);
623 if (level
< table
->count
)
624 clock
= table
->entries
[level
].ecclk
;
626 clock
= table
->entries
[table
->count
- 1].ecclk
;
628 data
->vce_dpm
.soft_max_clk
= clock
;
629 data
->vce_dpm
.hard_max_clk
= clock
;
634 static int smu8_init_acp_limit(struct pp_hwmgr
*hwmgr
)
636 struct smu8_hwmgr
*data
= hwmgr
->backend
;
637 struct phm_acp_clock_voltage_dependency_table
*table
=
638 hwmgr
->dyn_state
.acp_clock_voltage_dependency_table
;
639 unsigned long clock
= 0, level
;
641 if (NULL
== table
|| table
->count
<= 0)
644 data
->acp_dpm
.soft_min_clk
= 0;
645 data
->acp_dpm
.hard_min_clk
= 0;
647 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetMaxAclkLevel
);
648 level
= smum_get_argument(hwmgr
);
650 if (level
< table
->count
)
651 clock
= table
->entries
[level
].acpclk
;
653 clock
= table
->entries
[table
->count
- 1].acpclk
;
655 data
->acp_dpm
.soft_max_clk
= clock
;
656 data
->acp_dpm
.hard_max_clk
= clock
;
660 static void smu8_init_power_gate_state(struct pp_hwmgr
*hwmgr
)
662 struct smu8_hwmgr
*data
= hwmgr
->backend
;
664 data
->uvd_power_gated
= false;
665 data
->vce_power_gated
= false;
666 data
->samu_power_gated
= false;
667 data
->acp_power_gated
= false;
668 data
->pgacpinit
= true;
671 static void smu8_init_sclk_threshold(struct pp_hwmgr
*hwmgr
)
673 struct smu8_hwmgr
*data
= hwmgr
->backend
;
675 data
->low_sclk_interrupt_threshold
= 0;
678 static int smu8_update_sclk_limit(struct pp_hwmgr
*hwmgr
)
680 struct smu8_hwmgr
*data
= hwmgr
->backend
;
681 struct phm_clock_voltage_dependency_table
*table
=
682 hwmgr
->dyn_state
.vddc_dependency_on_sclk
;
684 unsigned long clock
= 0;
686 unsigned long stable_pstate_sclk
;
687 unsigned long percentage
;
689 data
->sclk_dpm
.soft_min_clk
= table
->entries
[0].clk
;
690 level
= smu8_get_max_sclk_level(hwmgr
) - 1;
692 if (level
< table
->count
)
693 data
->sclk_dpm
.soft_max_clk
= table
->entries
[level
].clk
;
695 data
->sclk_dpm
.soft_max_clk
= table
->entries
[table
->count
- 1].clk
;
697 clock
= hwmgr
->display_config
->min_core_set_clock
;
699 pr_debug("min_core_set_clock not set\n");
701 if (data
->sclk_dpm
.hard_min_clk
!= clock
) {
702 data
->sclk_dpm
.hard_min_clk
= clock
;
704 smum_send_msg_to_smc_with_parameter(hwmgr
,
705 PPSMC_MSG_SetSclkHardMin
,
706 smu8_get_sclk_level(hwmgr
,
707 data
->sclk_dpm
.hard_min_clk
,
708 PPSMC_MSG_SetSclkHardMin
));
711 clock
= data
->sclk_dpm
.soft_min_clk
;
713 /* update minimum clocks for Stable P-State feature */
714 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
715 PHM_PlatformCaps_StablePState
)) {
717 /*Sclk - calculate sclk value based on percentage and find FLOOR sclk from VddcDependencyOnSCLK table */
718 stable_pstate_sclk
= (hwmgr
->dyn_state
.max_clock_voltage_on_ac
.mclk
*
721 if (clock
< stable_pstate_sclk
)
722 clock
= stable_pstate_sclk
;
725 if (data
->sclk_dpm
.soft_min_clk
!= clock
) {
726 data
->sclk_dpm
.soft_min_clk
= clock
;
727 smum_send_msg_to_smc_with_parameter(hwmgr
,
728 PPSMC_MSG_SetSclkSoftMin
,
729 smu8_get_sclk_level(hwmgr
,
730 data
->sclk_dpm
.soft_min_clk
,
731 PPSMC_MSG_SetSclkSoftMin
));
734 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
735 PHM_PlatformCaps_StablePState
) &&
736 data
->sclk_dpm
.soft_max_clk
!= clock
) {
737 data
->sclk_dpm
.soft_max_clk
= clock
;
738 smum_send_msg_to_smc_with_parameter(hwmgr
,
739 PPSMC_MSG_SetSclkSoftMax
,
740 smu8_get_sclk_level(hwmgr
,
741 data
->sclk_dpm
.soft_max_clk
,
742 PPSMC_MSG_SetSclkSoftMax
));
748 static int smu8_set_deep_sleep_sclk_threshold(struct pp_hwmgr
*hwmgr
)
750 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
751 PHM_PlatformCaps_SclkDeepSleep
)) {
752 uint32_t clks
= hwmgr
->display_config
->min_core_set_clock_in_sr
;
754 clks
= SMU8_MIN_DEEP_SLEEP_SCLK
;
756 PP_DBG_LOG("Setting Deep Sleep Clock: %d\n", clks
);
758 smum_send_msg_to_smc_with_parameter(hwmgr
,
759 PPSMC_MSG_SetMinDeepSleepSclk
,
766 static int smu8_set_watermark_threshold(struct pp_hwmgr
*hwmgr
)
768 struct smu8_hwmgr
*data
=
771 smum_send_msg_to_smc_with_parameter(hwmgr
,
772 PPSMC_MSG_SetWatermarkFrequency
,
773 data
->sclk_dpm
.soft_max_clk
);
778 static int smu8_nbdpm_pstate_enable_disable(struct pp_hwmgr
*hwmgr
, bool enable
, bool lock
)
780 struct smu8_hwmgr
*hw_data
= hwmgr
->backend
;
782 if (hw_data
->is_nb_dpm_enabled
) {
784 PP_DBG_LOG("enable Low Memory PState.\n");
786 return smum_send_msg_to_smc_with_parameter(hwmgr
,
787 PPSMC_MSG_EnableLowMemoryPstate
,
790 PP_DBG_LOG("disable Low Memory PState.\n");
792 return smum_send_msg_to_smc_with_parameter(hwmgr
,
793 PPSMC_MSG_DisableLowMemoryPstate
,
801 static int smu8_disable_nb_dpm(struct pp_hwmgr
*hwmgr
)
805 struct smu8_hwmgr
*data
= hwmgr
->backend
;
806 unsigned long dpm_features
= 0;
808 if (data
->is_nb_dpm_enabled
) {
809 smu8_nbdpm_pstate_enable_disable(hwmgr
, true, true);
810 dpm_features
|= NB_DPM_MASK
;
811 ret
= smum_send_msg_to_smc_with_parameter(
813 PPSMC_MSG_DisableAllSmuFeatures
,
816 data
->is_nb_dpm_enabled
= false;
822 static int smu8_enable_nb_dpm(struct pp_hwmgr
*hwmgr
)
826 struct smu8_hwmgr
*data
= hwmgr
->backend
;
827 unsigned long dpm_features
= 0;
829 if (!data
->is_nb_dpm_enabled
) {
830 PP_DBG_LOG("enabling ALL SMU features.\n");
831 dpm_features
|= NB_DPM_MASK
;
832 ret
= smum_send_msg_to_smc_with_parameter(
834 PPSMC_MSG_EnableAllSmuFeatures
,
837 data
->is_nb_dpm_enabled
= true;
843 static int smu8_update_low_mem_pstate(struct pp_hwmgr
*hwmgr
, const void *input
)
846 bool enable_low_mem_state
;
847 struct smu8_hwmgr
*hw_data
= hwmgr
->backend
;
848 const struct phm_set_power_state_input
*states
= (struct phm_set_power_state_input
*)input
;
849 const struct smu8_power_state
*pnew_state
= cast_const_smu8_power_state(states
->pnew_state
);
851 if (hw_data
->sys_info
.nb_dpm_enable
) {
852 disable_switch
= hw_data
->cc6_settings
.nb_pstate_switch_disable
? true : false;
853 enable_low_mem_state
= hw_data
->cc6_settings
.nb_pstate_switch_disable
? false : true;
855 if (pnew_state
->action
== FORCE_HIGH
)
856 smu8_nbdpm_pstate_enable_disable(hwmgr
, false, disable_switch
);
857 else if (pnew_state
->action
== CANCEL_FORCE_HIGH
)
858 smu8_nbdpm_pstate_enable_disable(hwmgr
, true, disable_switch
);
860 smu8_nbdpm_pstate_enable_disable(hwmgr
, enable_low_mem_state
, disable_switch
);
865 static int smu8_set_power_state_tasks(struct pp_hwmgr
*hwmgr
, const void *input
)
869 smu8_update_sclk_limit(hwmgr
);
870 smu8_set_deep_sleep_sclk_threshold(hwmgr
);
871 smu8_set_watermark_threshold(hwmgr
);
872 ret
= smu8_enable_nb_dpm(hwmgr
);
875 smu8_update_low_mem_pstate(hwmgr
, input
);
881 static int smu8_setup_asic_task(struct pp_hwmgr
*hwmgr
)
885 ret
= smu8_upload_pptable_to_smu(hwmgr
);
888 ret
= smu8_init_sclk_limit(hwmgr
);
891 ret
= smu8_init_uvd_limit(hwmgr
);
894 ret
= smu8_init_vce_limit(hwmgr
);
897 ret
= smu8_init_acp_limit(hwmgr
);
901 smu8_init_power_gate_state(hwmgr
);
902 smu8_init_sclk_threshold(hwmgr
);
907 static void smu8_power_up_display_clock_sys_pll(struct pp_hwmgr
*hwmgr
)
909 struct smu8_hwmgr
*hw_data
= hwmgr
->backend
;
911 hw_data
->disp_clk_bypass_pending
= false;
912 hw_data
->disp_clk_bypass
= false;
915 static void smu8_clear_nb_dpm_flag(struct pp_hwmgr
*hwmgr
)
917 struct smu8_hwmgr
*hw_data
= hwmgr
->backend
;
919 hw_data
->is_nb_dpm_enabled
= false;
922 static void smu8_reset_cc6_data(struct pp_hwmgr
*hwmgr
)
924 struct smu8_hwmgr
*hw_data
= hwmgr
->backend
;
926 hw_data
->cc6_settings
.cc6_setting_changed
= false;
927 hw_data
->cc6_settings
.cpu_pstate_separation_time
= 0;
928 hw_data
->cc6_settings
.cpu_cc6_disable
= false;
929 hw_data
->cc6_settings
.cpu_pstate_disable
= false;
932 static int smu8_power_off_asic(struct pp_hwmgr
*hwmgr
)
934 smu8_power_up_display_clock_sys_pll(hwmgr
);
935 smu8_clear_nb_dpm_flag(hwmgr
);
936 smu8_reset_cc6_data(hwmgr
);
940 static void smu8_program_voting_clients(struct pp_hwmgr
*hwmgr
)
942 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
,
943 ixCG_FREQ_TRAN_VOTING_0
,
944 SMU8_VOTINGRIGHTSCLIENTS_DFLT0
);
947 static void smu8_clear_voting_clients(struct pp_hwmgr
*hwmgr
)
949 cgs_write_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
,
950 ixCG_FREQ_TRAN_VOTING_0
, 0);
953 static int smu8_start_dpm(struct pp_hwmgr
*hwmgr
)
955 struct smu8_hwmgr
*data
= hwmgr
->backend
;
957 data
->dpm_flags
|= DPMFlags_SCLK_Enabled
;
959 return smum_send_msg_to_smc_with_parameter(hwmgr
,
960 PPSMC_MSG_EnableAllSmuFeatures
,
964 static int smu8_stop_dpm(struct pp_hwmgr
*hwmgr
)
967 struct smu8_hwmgr
*data
= hwmgr
->backend
;
968 unsigned long dpm_features
= 0;
970 if (data
->dpm_flags
& DPMFlags_SCLK_Enabled
) {
971 dpm_features
|= SCLK_DPM_MASK
;
972 data
->dpm_flags
&= ~DPMFlags_SCLK_Enabled
;
973 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
974 PPSMC_MSG_DisableAllSmuFeatures
,
980 static int smu8_program_bootup_state(struct pp_hwmgr
*hwmgr
)
982 struct smu8_hwmgr
*data
= hwmgr
->backend
;
984 data
->sclk_dpm
.soft_min_clk
= data
->sys_info
.bootup_engine_clock
;
985 data
->sclk_dpm
.soft_max_clk
= data
->sys_info
.bootup_engine_clock
;
987 smum_send_msg_to_smc_with_parameter(hwmgr
,
988 PPSMC_MSG_SetSclkSoftMin
,
989 smu8_get_sclk_level(hwmgr
,
990 data
->sclk_dpm
.soft_min_clk
,
991 PPSMC_MSG_SetSclkSoftMin
));
993 smum_send_msg_to_smc_with_parameter(hwmgr
,
994 PPSMC_MSG_SetSclkSoftMax
,
995 smu8_get_sclk_level(hwmgr
,
996 data
->sclk_dpm
.soft_max_clk
,
997 PPSMC_MSG_SetSclkSoftMax
));
1002 static void smu8_reset_acp_boot_level(struct pp_hwmgr
*hwmgr
)
1004 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1006 data
->acp_boot_level
= 0xff;
1009 static int smu8_disable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
1011 smu8_disable_nb_dpm(hwmgr
);
1013 smu8_clear_voting_clients(hwmgr
);
1014 if (smu8_stop_dpm(hwmgr
))
1020 static int smu8_enable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
1022 smu8_program_voting_clients(hwmgr
);
1023 if (smu8_start_dpm(hwmgr
))
1025 smu8_program_bootup_state(hwmgr
);
1026 smu8_reset_acp_boot_level(hwmgr
);
1031 static int smu8_apply_state_adjust_rules(struct pp_hwmgr
*hwmgr
,
1032 struct pp_power_state
*prequest_ps
,
1033 const struct pp_power_state
*pcurrent_ps
)
1035 struct smu8_power_state
*smu8_ps
=
1036 cast_smu8_power_state(&prequest_ps
->hardware
);
1038 const struct smu8_power_state
*smu8_current_ps
=
1039 cast_const_smu8_power_state(&pcurrent_ps
->hardware
);
1041 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1042 struct PP_Clocks clocks
= {0, 0, 0, 0};
1045 smu8_ps
->need_dfs_bypass
= true;
1047 data
->battery_state
= (PP_StateUILabel_Battery
== prequest_ps
->classification
.ui_label
);
1049 clocks
.memoryClock
= hwmgr
->display_config
->min_mem_set_clock
!= 0 ?
1050 hwmgr
->display_config
->min_mem_set_clock
:
1051 data
->sys_info
.nbp_memory_clock
[1];
1054 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_StablePState
))
1055 clocks
.memoryClock
= hwmgr
->dyn_state
.max_clock_voltage_on_ac
.mclk
;
1057 force_high
= (clocks
.memoryClock
> data
->sys_info
.nbp_memory_clock
[SMU8_NUM_NBPMEMORYCLOCK
- 1])
1058 || (hwmgr
->display_config
->num_display
>= 3);
1060 smu8_ps
->action
= smu8_current_ps
->action
;
1062 if (hwmgr
->request_dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
)
1063 smu8_nbdpm_pstate_enable_disable(hwmgr
, false, false);
1064 else if (hwmgr
->request_dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
)
1065 smu8_nbdpm_pstate_enable_disable(hwmgr
, false, true);
1066 else if (!force_high
&& (smu8_ps
->action
== FORCE_HIGH
))
1067 smu8_ps
->action
= CANCEL_FORCE_HIGH
;
1068 else if (force_high
&& (smu8_ps
->action
!= FORCE_HIGH
))
1069 smu8_ps
->action
= FORCE_HIGH
;
1071 smu8_ps
->action
= DO_NOTHING
;
1076 static int smu8_hwmgr_backend_init(struct pp_hwmgr
*hwmgr
)
1079 struct smu8_hwmgr
*data
;
1081 data
= kzalloc(sizeof(struct smu8_hwmgr
), GFP_KERNEL
);
1085 hwmgr
->backend
= data
;
1087 result
= smu8_initialize_dpm_defaults(hwmgr
);
1089 pr_err("smu8_initialize_dpm_defaults failed\n");
1093 result
= smu8_get_system_info_data(hwmgr
);
1095 pr_err("smu8_get_system_info_data failed\n");
1099 smu8_construct_boot_state(hwmgr
);
1101 hwmgr
->platform_descriptor
.hardwareActivityPerformanceLevels
= SMU8_MAX_HARDWARE_POWERLEVELS
;
1106 static int smu8_hwmgr_backend_fini(struct pp_hwmgr
*hwmgr
)
1108 if (hwmgr
!= NULL
) {
1109 kfree(hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
);
1110 hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
= NULL
;
1112 kfree(hwmgr
->backend
);
1113 hwmgr
->backend
= NULL
;
1118 static int smu8_phm_force_dpm_highest(struct pp_hwmgr
*hwmgr
)
1120 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1122 smum_send_msg_to_smc_with_parameter(hwmgr
,
1123 PPSMC_MSG_SetSclkSoftMin
,
1124 smu8_get_sclk_level(hwmgr
,
1125 data
->sclk_dpm
.soft_max_clk
,
1126 PPSMC_MSG_SetSclkSoftMin
));
1128 smum_send_msg_to_smc_with_parameter(hwmgr
,
1129 PPSMC_MSG_SetSclkSoftMax
,
1130 smu8_get_sclk_level(hwmgr
,
1131 data
->sclk_dpm
.soft_max_clk
,
1132 PPSMC_MSG_SetSclkSoftMax
));
1137 static int smu8_phm_unforce_dpm_levels(struct pp_hwmgr
*hwmgr
)
1139 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1140 struct phm_clock_voltage_dependency_table
*table
=
1141 hwmgr
->dyn_state
.vddc_dependency_on_sclk
;
1142 unsigned long clock
= 0, level
;
1144 if (NULL
== table
|| table
->count
<= 0)
1147 data
->sclk_dpm
.soft_min_clk
= table
->entries
[0].clk
;
1148 data
->sclk_dpm
.hard_min_clk
= table
->entries
[0].clk
;
1149 hwmgr
->pstate_sclk
= table
->entries
[0].clk
;
1150 hwmgr
->pstate_mclk
= 0;
1152 level
= smu8_get_max_sclk_level(hwmgr
) - 1;
1154 if (level
< table
->count
)
1155 clock
= table
->entries
[level
].clk
;
1157 clock
= table
->entries
[table
->count
- 1].clk
;
1159 data
->sclk_dpm
.soft_max_clk
= clock
;
1160 data
->sclk_dpm
.hard_max_clk
= clock
;
1162 smum_send_msg_to_smc_with_parameter(hwmgr
,
1163 PPSMC_MSG_SetSclkSoftMin
,
1164 smu8_get_sclk_level(hwmgr
,
1165 data
->sclk_dpm
.soft_min_clk
,
1166 PPSMC_MSG_SetSclkSoftMin
));
1168 smum_send_msg_to_smc_with_parameter(hwmgr
,
1169 PPSMC_MSG_SetSclkSoftMax
,
1170 smu8_get_sclk_level(hwmgr
,
1171 data
->sclk_dpm
.soft_max_clk
,
1172 PPSMC_MSG_SetSclkSoftMax
));
1177 static int smu8_phm_force_dpm_lowest(struct pp_hwmgr
*hwmgr
)
1179 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1181 smum_send_msg_to_smc_with_parameter(hwmgr
,
1182 PPSMC_MSG_SetSclkSoftMax
,
1183 smu8_get_sclk_level(hwmgr
,
1184 data
->sclk_dpm
.soft_min_clk
,
1185 PPSMC_MSG_SetSclkSoftMax
));
1187 smum_send_msg_to_smc_with_parameter(hwmgr
,
1188 PPSMC_MSG_SetSclkSoftMin
,
1189 smu8_get_sclk_level(hwmgr
,
1190 data
->sclk_dpm
.soft_min_clk
,
1191 PPSMC_MSG_SetSclkSoftMin
));
1196 static int smu8_dpm_force_dpm_level(struct pp_hwmgr
*hwmgr
,
1197 enum amd_dpm_forced_level level
)
1202 case AMD_DPM_FORCED_LEVEL_HIGH
:
1203 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
1204 ret
= smu8_phm_force_dpm_highest(hwmgr
);
1206 case AMD_DPM_FORCED_LEVEL_LOW
:
1207 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
1208 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
1209 ret
= smu8_phm_force_dpm_lowest(hwmgr
);
1211 case AMD_DPM_FORCED_LEVEL_AUTO
:
1212 ret
= smu8_phm_unforce_dpm_levels(hwmgr
);
1214 case AMD_DPM_FORCED_LEVEL_MANUAL
:
1215 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
:
1223 static int smu8_dpm_powerdown_uvd(struct pp_hwmgr
*hwmgr
)
1225 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating
))
1226 return smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_UVDPowerOFF
);
1230 static int smu8_dpm_powerup_uvd(struct pp_hwmgr
*hwmgr
)
1232 if (PP_CAP(PHM_PlatformCaps_UVDPowerGating
)) {
1233 return smum_send_msg_to_smc_with_parameter(
1235 PPSMC_MSG_UVDPowerON
,
1236 PP_CAP(PHM_PlatformCaps_UVDDynamicPowerGating
) ? 1 : 0);
1242 static int smu8_dpm_update_vce_dpm(struct pp_hwmgr
*hwmgr
)
1244 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1245 struct phm_vce_clock_voltage_dependency_table
*ptable
=
1246 hwmgr
->dyn_state
.vce_clock_voltage_dependency_table
;
1248 /* Stable Pstate is enabled and we need to set the VCE DPM to highest level */
1249 if (PP_CAP(PHM_PlatformCaps_StablePState
) ||
1250 hwmgr
->en_umd_pstate
) {
1251 data
->vce_dpm
.hard_min_clk
=
1252 ptable
->entries
[ptable
->count
- 1].ecclk
;
1254 smum_send_msg_to_smc_with_parameter(hwmgr
,
1255 PPSMC_MSG_SetEclkHardMin
,
1256 smu8_get_eclk_level(hwmgr
,
1257 data
->vce_dpm
.hard_min_clk
,
1258 PPSMC_MSG_SetEclkHardMin
));
1261 smum_send_msg_to_smc_with_parameter(hwmgr
,
1262 PPSMC_MSG_SetEclkHardMin
, 0);
1263 /* disable ECLK DPM 0. Otherwise VCE could hang if
1264 * switching SCLK from DPM 0 to 6/7 */
1265 smum_send_msg_to_smc_with_parameter(hwmgr
,
1266 PPSMC_MSG_SetEclkSoftMin
, 1);
1271 static int smu8_dpm_powerdown_vce(struct pp_hwmgr
*hwmgr
)
1273 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating
))
1274 return smum_send_msg_to_smc(hwmgr
,
1275 PPSMC_MSG_VCEPowerOFF
);
1279 static int smu8_dpm_powerup_vce(struct pp_hwmgr
*hwmgr
)
1281 if (PP_CAP(PHM_PlatformCaps_VCEPowerGating
))
1282 return smum_send_msg_to_smc(hwmgr
,
1283 PPSMC_MSG_VCEPowerON
);
1287 static uint32_t smu8_dpm_get_mclk(struct pp_hwmgr
*hwmgr
, bool low
)
1289 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1291 return data
->sys_info
.bootup_uma_clock
;
1294 static uint32_t smu8_dpm_get_sclk(struct pp_hwmgr
*hwmgr
, bool low
)
1296 struct pp_power_state
*ps
;
1297 struct smu8_power_state
*smu8_ps
;
1302 ps
= hwmgr
->request_ps
;
1307 smu8_ps
= cast_smu8_power_state(&ps
->hardware
);
1310 return smu8_ps
->levels
[0].engineClock
;
1312 return smu8_ps
->levels
[smu8_ps
->level
-1].engineClock
;
1315 static int smu8_dpm_patch_boot_state(struct pp_hwmgr
*hwmgr
,
1316 struct pp_hw_power_state
*hw_ps
)
1318 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1319 struct smu8_power_state
*smu8_ps
= cast_smu8_power_state(hw_ps
);
1322 smu8_ps
->nbps_flags
= 0;
1323 smu8_ps
->bapm_flags
= 0;
1324 smu8_ps
->levels
[0] = data
->boot_power_level
;
1329 static int smu8_dpm_get_pp_table_entry_callback(
1330 struct pp_hwmgr
*hwmgr
,
1331 struct pp_hw_power_state
*hw_ps
,
1333 const void *clock_info
)
1335 struct smu8_power_state
*smu8_ps
= cast_smu8_power_state(hw_ps
);
1337 const ATOM_PPLIB_CZ_CLOCK_INFO
*smu8_clock_info
= clock_info
;
1339 struct phm_clock_voltage_dependency_table
*table
=
1340 hwmgr
->dyn_state
.vddc_dependency_on_sclk
;
1341 uint8_t clock_info_index
= smu8_clock_info
->index
;
1343 if (clock_info_index
> (uint8_t)(hwmgr
->platform_descriptor
.hardwareActivityPerformanceLevels
- 1))
1344 clock_info_index
= (uint8_t)(hwmgr
->platform_descriptor
.hardwareActivityPerformanceLevels
- 1);
1346 smu8_ps
->levels
[index
].engineClock
= table
->entries
[clock_info_index
].clk
;
1347 smu8_ps
->levels
[index
].vddcIndex
= (uint8_t)table
->entries
[clock_info_index
].v
;
1349 smu8_ps
->level
= index
+ 1;
1351 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_SclkDeepSleep
)) {
1352 smu8_ps
->levels
[index
].dsDividerIndex
= 5;
1353 smu8_ps
->levels
[index
].ssDividerIndex
= 5;
1359 static int smu8_dpm_get_num_of_pp_table_entries(struct pp_hwmgr
*hwmgr
)
1362 unsigned long ret
= 0;
1364 result
= pp_tables_get_num_of_entries(hwmgr
, &ret
);
1366 return result
? 0 : ret
;
1369 static int smu8_dpm_get_pp_table_entry(struct pp_hwmgr
*hwmgr
,
1370 unsigned long entry
, struct pp_power_state
*ps
)
1373 struct smu8_power_state
*smu8_ps
;
1375 ps
->hardware
.magic
= smu8_magic
;
1377 smu8_ps
= cast_smu8_power_state(&(ps
->hardware
));
1379 result
= pp_tables_get_entry(hwmgr
, entry
, ps
,
1380 smu8_dpm_get_pp_table_entry_callback
);
1382 smu8_ps
->uvd_clocks
.vclk
= ps
->uvd_clocks
.VCLK
;
1383 smu8_ps
->uvd_clocks
.dclk
= ps
->uvd_clocks
.DCLK
;
1388 static int smu8_get_power_state_size(struct pp_hwmgr
*hwmgr
)
1390 return sizeof(struct smu8_power_state
);
1393 static void smu8_hw_print_display_cfg(
1394 const struct cc6_settings
*cc6_settings
)
1396 PP_DBG_LOG("New Display Configuration:\n");
1398 PP_DBG_LOG(" cpu_cc6_disable: %d\n",
1399 cc6_settings
->cpu_cc6_disable
);
1400 PP_DBG_LOG(" cpu_pstate_disable: %d\n",
1401 cc6_settings
->cpu_pstate_disable
);
1402 PP_DBG_LOG(" nb_pstate_switch_disable: %d\n",
1403 cc6_settings
->nb_pstate_switch_disable
);
1404 PP_DBG_LOG(" cpu_pstate_separation_time: %d\n\n",
1405 cc6_settings
->cpu_pstate_separation_time
);
1408 static int smu8_set_cpu_power_state(struct pp_hwmgr
*hwmgr
)
1410 struct smu8_hwmgr
*hw_data
= hwmgr
->backend
;
1413 if (hw_data
->cc6_settings
.cc6_setting_changed
) {
1415 hw_data
->cc6_settings
.cc6_setting_changed
= false;
1417 smu8_hw_print_display_cfg(&hw_data
->cc6_settings
);
1419 data
|= (hw_data
->cc6_settings
.cpu_pstate_separation_time
1420 & PWRMGT_SEPARATION_TIME_MASK
)
1421 << PWRMGT_SEPARATION_TIME_SHIFT
;
1423 data
|= (hw_data
->cc6_settings
.cpu_cc6_disable
? 0x1 : 0x0)
1424 << PWRMGT_DISABLE_CPU_CSTATES_SHIFT
;
1426 data
|= (hw_data
->cc6_settings
.cpu_pstate_disable
? 0x1 : 0x0)
1427 << PWRMGT_DISABLE_CPU_PSTATES_SHIFT
;
1429 PP_DBG_LOG("SetDisplaySizePowerParams data: 0x%X\n",
1432 smum_send_msg_to_smc_with_parameter(hwmgr
,
1433 PPSMC_MSG_SetDisplaySizePowerParams
,
1441 static int smu8_store_cc6_data(struct pp_hwmgr
*hwmgr
, uint32_t separation_time
,
1442 bool cc6_disable
, bool pstate_disable
, bool pstate_switch_disable
)
1444 struct smu8_hwmgr
*hw_data
= hwmgr
->backend
;
1446 if (separation_time
!=
1447 hw_data
->cc6_settings
.cpu_pstate_separation_time
||
1448 cc6_disable
!= hw_data
->cc6_settings
.cpu_cc6_disable
||
1449 pstate_disable
!= hw_data
->cc6_settings
.cpu_pstate_disable
||
1450 pstate_switch_disable
!= hw_data
->cc6_settings
.nb_pstate_switch_disable
) {
1452 hw_data
->cc6_settings
.cc6_setting_changed
= true;
1454 hw_data
->cc6_settings
.cpu_pstate_separation_time
=
1456 hw_data
->cc6_settings
.cpu_cc6_disable
=
1458 hw_data
->cc6_settings
.cpu_pstate_disable
=
1460 hw_data
->cc6_settings
.nb_pstate_switch_disable
=
1461 pstate_switch_disable
;
1468 static int smu8_get_dal_power_level(struct pp_hwmgr
*hwmgr
,
1469 struct amd_pp_simple_clock_info
*info
)
1472 const struct phm_clock_voltage_dependency_table
*table
=
1473 hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
;
1474 const struct phm_clock_and_voltage_limits
*limits
=
1475 &hwmgr
->dyn_state
.max_clock_voltage_on_ac
;
1477 info
->engine_max_clock
= limits
->sclk
;
1478 info
->memory_max_clock
= limits
->mclk
;
1480 for (i
= table
->count
- 1; i
> 0; i
--) {
1481 if (limits
->vddc
>= table
->entries
[i
].v
) {
1482 info
->level
= table
->entries
[i
].clk
;
1489 static int smu8_force_clock_level(struct pp_hwmgr
*hwmgr
,
1490 enum pp_clock_type type
, uint32_t mask
)
1494 smum_send_msg_to_smc_with_parameter(hwmgr
,
1495 PPSMC_MSG_SetSclkSoftMin
,
1497 smum_send_msg_to_smc_with_parameter(hwmgr
,
1498 PPSMC_MSG_SetSclkSoftMax
,
1508 static int smu8_print_clock_levels(struct pp_hwmgr
*hwmgr
,
1509 enum pp_clock_type type
, char *buf
)
1511 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1512 struct phm_clock_voltage_dependency_table
*sclk_table
=
1513 hwmgr
->dyn_state
.vddc_dependency_on_sclk
;
1514 int i
, now
, size
= 0;
1518 now
= PHM_GET_FIELD(cgs_read_ind_register(hwmgr
->device
,
1520 ixTARGET_AND_CURRENT_PROFILE_INDEX
),
1521 TARGET_AND_CURRENT_PROFILE_INDEX
,
1524 for (i
= 0; i
< sclk_table
->count
; i
++)
1525 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
1526 i
, sclk_table
->entries
[i
].clk
/ 100,
1527 (i
== now
) ? "*" : "");
1530 now
= PHM_GET_FIELD(cgs_read_ind_register(hwmgr
->device
,
1532 ixTARGET_AND_CURRENT_PROFILE_INDEX
),
1533 TARGET_AND_CURRENT_PROFILE_INDEX
,
1536 for (i
= SMU8_NUM_NBPMEMORYCLOCK
; i
> 0; i
--)
1537 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
1538 SMU8_NUM_NBPMEMORYCLOCK
-i
, data
->sys_info
.nbp_memory_clock
[i
-1] / 100,
1539 (SMU8_NUM_NBPMEMORYCLOCK
-i
== now
) ? "*" : "");
1547 static int smu8_get_performance_level(struct pp_hwmgr
*hwmgr
, const struct pp_hw_power_state
*state
,
1548 PHM_PerformanceLevelDesignation designation
, uint32_t index
,
1549 PHM_PerformanceLevel
*level
)
1551 const struct smu8_power_state
*ps
;
1552 struct smu8_hwmgr
*data
;
1553 uint32_t level_index
;
1556 if (level
== NULL
|| hwmgr
== NULL
|| state
== NULL
)
1559 data
= hwmgr
->backend
;
1560 ps
= cast_const_smu8_power_state(state
);
1562 level_index
= index
> ps
->level
- 1 ? ps
->level
- 1 : index
;
1563 level
->coreClock
= ps
->levels
[level_index
].engineClock
;
1565 if (designation
== PHM_PerformanceLevelDesignation_PowerContainment
) {
1566 for (i
= 1; i
< ps
->level
; i
++) {
1567 if (ps
->levels
[i
].engineClock
> data
->dce_slow_sclk_threshold
) {
1568 level
->coreClock
= ps
->levels
[i
].engineClock
;
1574 if (level_index
== 0)
1575 level
->memory_clock
= data
->sys_info
.nbp_memory_clock
[SMU8_NUM_NBPMEMORYCLOCK
- 1];
1577 level
->memory_clock
= data
->sys_info
.nbp_memory_clock
[0];
1579 level
->vddc
= (smu8_convert_8Bit_index_to_voltage(hwmgr
, ps
->levels
[level_index
].vddcIndex
) + 2) / 4;
1580 level
->nonLocalMemoryFreq
= 0;
1581 level
->nonLocalMemoryWidth
= 0;
1586 static int smu8_get_current_shallow_sleep_clocks(struct pp_hwmgr
*hwmgr
,
1587 const struct pp_hw_power_state
*state
, struct pp_clock_info
*clock_info
)
1589 const struct smu8_power_state
*ps
= cast_const_smu8_power_state(state
);
1591 clock_info
->min_eng_clk
= ps
->levels
[0].engineClock
/ (1 << (ps
->levels
[0].ssDividerIndex
));
1592 clock_info
->max_eng_clk
= ps
->levels
[ps
->level
- 1].engineClock
/ (1 << (ps
->levels
[ps
->level
- 1].ssDividerIndex
));
1597 static int smu8_get_clock_by_type(struct pp_hwmgr
*hwmgr
, enum amd_pp_clock_type type
,
1598 struct amd_pp_clocks
*clocks
)
1600 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1602 struct phm_clock_voltage_dependency_table
*table
;
1604 clocks
->count
= smu8_get_max_sclk_level(hwmgr
);
1606 case amd_pp_disp_clock
:
1607 for (i
= 0; i
< clocks
->count
; i
++)
1608 clocks
->clock
[i
] = data
->sys_info
.display_clock
[i
] * 10;
1610 case amd_pp_sys_clock
:
1611 table
= hwmgr
->dyn_state
.vddc_dependency_on_sclk
;
1612 for (i
= 0; i
< clocks
->count
; i
++)
1613 clocks
->clock
[i
] = table
->entries
[i
].clk
* 10;
1615 case amd_pp_mem_clock
:
1616 clocks
->count
= SMU8_NUM_NBPMEMORYCLOCK
;
1617 for (i
= 0; i
< clocks
->count
; i
++)
1618 clocks
->clock
[i
] = data
->sys_info
.nbp_memory_clock
[clocks
->count
- 1 - i
] * 10;
1627 static int smu8_get_max_high_clocks(struct pp_hwmgr
*hwmgr
, struct amd_pp_simple_clock_info
*clocks
)
1629 struct phm_clock_voltage_dependency_table
*table
=
1630 hwmgr
->dyn_state
.vddc_dependency_on_sclk
;
1631 unsigned long level
;
1632 const struct phm_clock_and_voltage_limits
*limits
=
1633 &hwmgr
->dyn_state
.max_clock_voltage_on_ac
;
1635 if ((NULL
== table
) || (table
->count
<= 0) || (clocks
== NULL
))
1638 level
= smu8_get_max_sclk_level(hwmgr
) - 1;
1640 if (level
< table
->count
)
1641 clocks
->engine_max_clock
= table
->entries
[level
].clk
;
1643 clocks
->engine_max_clock
= table
->entries
[table
->count
- 1].clk
;
1645 clocks
->memory_max_clock
= limits
->mclk
;
1650 static int smu8_thermal_get_temperature(struct pp_hwmgr
*hwmgr
)
1652 int actual_temp
= 0;
1653 uint32_t val
= cgs_read_ind_register(hwmgr
->device
,
1654 CGS_IND_REG__SMC
, ixTHM_TCON_CUR_TMP
);
1655 uint32_t temp
= PHM_GET_FIELD(val
, THM_TCON_CUR_TMP
, CUR_TEMP
);
1657 if (PHM_GET_FIELD(val
, THM_TCON_CUR_TMP
, CUR_TEMP_RANGE_SEL
))
1658 actual_temp
= ((temp
/ 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1660 actual_temp
= (temp
/ 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1665 static int smu8_read_sensor(struct pp_hwmgr
*hwmgr
, int idx
,
1666 void *value
, int *size
)
1668 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1670 struct phm_clock_voltage_dependency_table
*table
=
1671 hwmgr
->dyn_state
.vddc_dependency_on_sclk
;
1673 struct phm_vce_clock_voltage_dependency_table
*vce_table
=
1674 hwmgr
->dyn_state
.vce_clock_voltage_dependency_table
;
1676 struct phm_uvd_clock_voltage_dependency_table
*uvd_table
=
1677 hwmgr
->dyn_state
.uvd_clock_voltage_dependency_table
;
1679 uint32_t sclk_index
= PHM_GET_FIELD(cgs_read_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixTARGET_AND_CURRENT_PROFILE_INDEX
),
1680 TARGET_AND_CURRENT_PROFILE_INDEX
, CURR_SCLK_INDEX
);
1681 uint32_t uvd_index
= PHM_GET_FIELD(cgs_read_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixTARGET_AND_CURRENT_PROFILE_INDEX_2
),
1682 TARGET_AND_CURRENT_PROFILE_INDEX_2
, CURR_UVD_INDEX
);
1683 uint32_t vce_index
= PHM_GET_FIELD(cgs_read_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixTARGET_AND_CURRENT_PROFILE_INDEX_2
),
1684 TARGET_AND_CURRENT_PROFILE_INDEX_2
, CURR_VCE_INDEX
);
1686 uint32_t sclk
, vclk
, dclk
, ecclk
, tmp
, activity_percent
;
1687 uint16_t vddnb
, vddgfx
;
1690 /* size must be at least 4 bytes for all sensors */
1696 case AMDGPU_PP_SENSOR_GFX_SCLK
:
1697 if (sclk_index
< NUM_SCLK_LEVELS
) {
1698 sclk
= table
->entries
[sclk_index
].clk
;
1699 *((uint32_t *)value
) = sclk
;
1703 case AMDGPU_PP_SENSOR_VDDNB
:
1704 tmp
= (cgs_read_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixSMUSVI_NB_CURRENTVID
) &
1705 CURRENT_NB_VID_MASK
) >> CURRENT_NB_VID__SHIFT
;
1706 vddnb
= smu8_convert_8Bit_index_to_voltage(hwmgr
, tmp
) / 4;
1707 *((uint32_t *)value
) = vddnb
;
1709 case AMDGPU_PP_SENSOR_VDDGFX
:
1710 tmp
= (cgs_read_ind_register(hwmgr
->device
, CGS_IND_REG__SMC
, ixSMUSVI_GFX_CURRENTVID
) &
1711 CURRENT_GFX_VID_MASK
) >> CURRENT_GFX_VID__SHIFT
;
1712 vddgfx
= smu8_convert_8Bit_index_to_voltage(hwmgr
, (u16
)tmp
) / 4;
1713 *((uint32_t *)value
) = vddgfx
;
1715 case AMDGPU_PP_SENSOR_UVD_VCLK
:
1716 if (!data
->uvd_power_gated
) {
1717 if (uvd_index
>= SMU8_MAX_HARDWARE_POWERLEVELS
) {
1720 vclk
= uvd_table
->entries
[uvd_index
].vclk
;
1721 *((uint32_t *)value
) = vclk
;
1725 *((uint32_t *)value
) = 0;
1727 case AMDGPU_PP_SENSOR_UVD_DCLK
:
1728 if (!data
->uvd_power_gated
) {
1729 if (uvd_index
>= SMU8_MAX_HARDWARE_POWERLEVELS
) {
1732 dclk
= uvd_table
->entries
[uvd_index
].dclk
;
1733 *((uint32_t *)value
) = dclk
;
1737 *((uint32_t *)value
) = 0;
1739 case AMDGPU_PP_SENSOR_VCE_ECCLK
:
1740 if (!data
->vce_power_gated
) {
1741 if (vce_index
>= SMU8_MAX_HARDWARE_POWERLEVELS
) {
1744 ecclk
= vce_table
->entries
[vce_index
].ecclk
;
1745 *((uint32_t *)value
) = ecclk
;
1749 *((uint32_t *)value
) = 0;
1751 case AMDGPU_PP_SENSOR_GPU_LOAD
:
1752 result
= smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetAverageGraphicsActivity
);
1754 activity_percent
= cgs_read_register(hwmgr
->device
, mmSMU_MP1_SRBM2P_ARG_0
);
1755 activity_percent
= activity_percent
> 100 ? 100 : activity_percent
;
1757 activity_percent
= 50;
1759 *((uint32_t *)value
) = activity_percent
;
1761 case AMDGPU_PP_SENSOR_UVD_POWER
:
1762 *((uint32_t *)value
) = data
->uvd_power_gated
? 0 : 1;
1764 case AMDGPU_PP_SENSOR_VCE_POWER
:
1765 *((uint32_t *)value
) = data
->vce_power_gated
? 0 : 1;
1767 case AMDGPU_PP_SENSOR_GPU_TEMP
:
1768 *((uint32_t *)value
) = smu8_thermal_get_temperature(hwmgr
);
1775 static int smu8_notify_cac_buffer_info(struct pp_hwmgr
*hwmgr
,
1776 uint32_t virtual_addr_low
,
1777 uint32_t virtual_addr_hi
,
1778 uint32_t mc_addr_low
,
1779 uint32_t mc_addr_hi
,
1782 smum_send_msg_to_smc_with_parameter(hwmgr
,
1783 PPSMC_MSG_DramAddrHiVirtual
,
1785 smum_send_msg_to_smc_with_parameter(hwmgr
,
1786 PPSMC_MSG_DramAddrLoVirtual
,
1788 smum_send_msg_to_smc_with_parameter(hwmgr
,
1789 PPSMC_MSG_DramAddrHiPhysical
,
1791 smum_send_msg_to_smc_with_parameter(hwmgr
,
1792 PPSMC_MSG_DramAddrLoPhysical
,
1795 smum_send_msg_to_smc_with_parameter(hwmgr
,
1796 PPSMC_MSG_DramBufferSize
,
1801 static int smu8_get_thermal_temperature_range(struct pp_hwmgr
*hwmgr
,
1802 struct PP_TemperatureRange
*thermal_data
)
1804 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1806 memcpy(thermal_data
, &SMU7ThermalPolicy
[0], sizeof(struct PP_TemperatureRange
));
1808 thermal_data
->max
= (data
->thermal_auto_throttling_treshold
+
1809 data
->sys_info
.htc_hyst_lmt
) *
1810 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1815 static int smu8_enable_disable_uvd_dpm(struct pp_hwmgr
*hwmgr
, bool enable
)
1817 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1818 uint32_t dpm_features
= 0;
1821 phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
1822 PHM_PlatformCaps_UVDDPM
)) {
1823 data
->dpm_flags
|= DPMFlags_UVD_Enabled
;
1824 dpm_features
|= UVD_DPM_MASK
;
1825 smum_send_msg_to_smc_with_parameter(hwmgr
,
1826 PPSMC_MSG_EnableAllSmuFeatures
, dpm_features
);
1828 dpm_features
|= UVD_DPM_MASK
;
1829 data
->dpm_flags
&= ~DPMFlags_UVD_Enabled
;
1830 smum_send_msg_to_smc_with_parameter(hwmgr
,
1831 PPSMC_MSG_DisableAllSmuFeatures
, dpm_features
);
1836 int smu8_dpm_update_uvd_dpm(struct pp_hwmgr
*hwmgr
, bool bgate
)
1838 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1839 struct phm_uvd_clock_voltage_dependency_table
*ptable
=
1840 hwmgr
->dyn_state
.uvd_clock_voltage_dependency_table
;
1843 /* Stable Pstate is enabled and we need to set the UVD DPM to highest level */
1844 if (PP_CAP(PHM_PlatformCaps_StablePState
) ||
1845 hwmgr
->en_umd_pstate
) {
1846 data
->uvd_dpm
.hard_min_clk
=
1847 ptable
->entries
[ptable
->count
- 1].vclk
;
1849 smum_send_msg_to_smc_with_parameter(hwmgr
,
1850 PPSMC_MSG_SetUvdHardMin
,
1851 smu8_get_uvd_level(hwmgr
,
1852 data
->uvd_dpm
.hard_min_clk
,
1853 PPSMC_MSG_SetUvdHardMin
));
1855 smu8_enable_disable_uvd_dpm(hwmgr
, true);
1857 smu8_enable_disable_uvd_dpm(hwmgr
, true);
1860 smu8_enable_disable_uvd_dpm(hwmgr
, false);
1866 static int smu8_enable_disable_vce_dpm(struct pp_hwmgr
*hwmgr
, bool enable
)
1868 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1869 uint32_t dpm_features
= 0;
1871 if (enable
&& phm_cap_enabled(
1872 hwmgr
->platform_descriptor
.platformCaps
,
1873 PHM_PlatformCaps_VCEDPM
)) {
1874 data
->dpm_flags
|= DPMFlags_VCE_Enabled
;
1875 dpm_features
|= VCE_DPM_MASK
;
1876 smum_send_msg_to_smc_with_parameter(hwmgr
,
1877 PPSMC_MSG_EnableAllSmuFeatures
, dpm_features
);
1879 dpm_features
|= VCE_DPM_MASK
;
1880 data
->dpm_flags
&= ~DPMFlags_VCE_Enabled
;
1881 smum_send_msg_to_smc_with_parameter(hwmgr
,
1882 PPSMC_MSG_DisableAllSmuFeatures
, dpm_features
);
1889 static void smu8_dpm_powergate_uvd(struct pp_hwmgr
*hwmgr
, bool bgate
)
1891 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1893 data
->uvd_power_gated
= bgate
;
1896 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
1897 AMD_IP_BLOCK_TYPE_UVD
,
1899 amdgpu_device_ip_set_clockgating_state(hwmgr
->adev
,
1900 AMD_IP_BLOCK_TYPE_UVD
,
1902 smu8_dpm_update_uvd_dpm(hwmgr
, true);
1903 smu8_dpm_powerdown_uvd(hwmgr
);
1905 smu8_dpm_powerup_uvd(hwmgr
);
1906 amdgpu_device_ip_set_clockgating_state(hwmgr
->adev
,
1907 AMD_IP_BLOCK_TYPE_UVD
,
1908 AMD_CG_STATE_UNGATE
);
1909 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
1910 AMD_IP_BLOCK_TYPE_UVD
,
1911 AMD_PG_STATE_UNGATE
);
1912 smu8_dpm_update_uvd_dpm(hwmgr
, false);
1917 static void smu8_dpm_powergate_vce(struct pp_hwmgr
*hwmgr
, bool bgate
)
1919 struct smu8_hwmgr
*data
= hwmgr
->backend
;
1922 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
1923 AMD_IP_BLOCK_TYPE_VCE
,
1925 amdgpu_device_ip_set_clockgating_state(hwmgr
->adev
,
1926 AMD_IP_BLOCK_TYPE_VCE
,
1928 smu8_enable_disable_vce_dpm(hwmgr
, false);
1929 smu8_dpm_powerdown_vce(hwmgr
);
1930 data
->vce_power_gated
= true;
1932 smu8_dpm_powerup_vce(hwmgr
);
1933 data
->vce_power_gated
= false;
1934 amdgpu_device_ip_set_clockgating_state(hwmgr
->adev
,
1935 AMD_IP_BLOCK_TYPE_VCE
,
1936 AMD_CG_STATE_UNGATE
);
1937 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
1938 AMD_IP_BLOCK_TYPE_VCE
,
1939 AMD_PG_STATE_UNGATE
);
1940 smu8_dpm_update_vce_dpm(hwmgr
);
1941 smu8_enable_disable_vce_dpm(hwmgr
, true);
1945 static const struct pp_hwmgr_func smu8_hwmgr_funcs
= {
1946 .backend_init
= smu8_hwmgr_backend_init
,
1947 .backend_fini
= smu8_hwmgr_backend_fini
,
1948 .apply_state_adjust_rules
= smu8_apply_state_adjust_rules
,
1949 .force_dpm_level
= smu8_dpm_force_dpm_level
,
1950 .get_power_state_size
= smu8_get_power_state_size
,
1951 .powerdown_uvd
= smu8_dpm_powerdown_uvd
,
1952 .powergate_uvd
= smu8_dpm_powergate_uvd
,
1953 .powergate_vce
= smu8_dpm_powergate_vce
,
1954 .get_mclk
= smu8_dpm_get_mclk
,
1955 .get_sclk
= smu8_dpm_get_sclk
,
1956 .patch_boot_state
= smu8_dpm_patch_boot_state
,
1957 .get_pp_table_entry
= smu8_dpm_get_pp_table_entry
,
1958 .get_num_of_pp_table_entries
= smu8_dpm_get_num_of_pp_table_entries
,
1959 .set_cpu_power_state
= smu8_set_cpu_power_state
,
1960 .store_cc6_data
= smu8_store_cc6_data
,
1961 .force_clock_level
= smu8_force_clock_level
,
1962 .print_clock_levels
= smu8_print_clock_levels
,
1963 .get_dal_power_level
= smu8_get_dal_power_level
,
1964 .get_performance_level
= smu8_get_performance_level
,
1965 .get_current_shallow_sleep_clocks
= smu8_get_current_shallow_sleep_clocks
,
1966 .get_clock_by_type
= smu8_get_clock_by_type
,
1967 .get_max_high_clocks
= smu8_get_max_high_clocks
,
1968 .read_sensor
= smu8_read_sensor
,
1969 .power_off_asic
= smu8_power_off_asic
,
1970 .asic_setup
= smu8_setup_asic_task
,
1971 .dynamic_state_management_enable
= smu8_enable_dpm_tasks
,
1972 .power_state_set
= smu8_set_power_state_tasks
,
1973 .dynamic_state_management_disable
= smu8_disable_dpm_tasks
,
1974 .notify_cac_buffer_info
= smu8_notify_cac_buffer_info
,
1975 .get_thermal_temperature_range
= smu8_get_thermal_temperature_range
,
1978 int smu8_init_function_pointers(struct pp_hwmgr
*hwmgr
)
1980 hwmgr
->hwmgr_func
= &smu8_hwmgr_funcs
;
1981 hwmgr
->pptable_func
= &pptable_funcs
;