2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
30 #include "amd_powerplay.h"
31 #include "vega12_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega12_inc.h"
37 #include "pppcielanes.h"
38 #include "vega12_hwmgr.h"
39 #include "vega12_processpptables.h"
40 #include "vega12_pptable.h"
41 #include "vega12_thermal.h"
42 #include "vega12_ppsmc.h"
44 #include "amd_pcie_helpers.h"
45 #include "ppinterrupt.h"
46 #include "pp_overdriver.h"
47 #include "pp_thermal.h"
48 #include "vega12_baco.h"
51 static int vega12_force_clock_level(struct pp_hwmgr
*hwmgr
,
52 enum pp_clock_type type
, uint32_t mask
);
53 static int vega12_get_clock_ranges(struct pp_hwmgr
*hwmgr
,
58 static void vega12_set_default_registry_data(struct pp_hwmgr
*hwmgr
)
60 struct vega12_hwmgr
*data
=
61 (struct vega12_hwmgr
*)(hwmgr
->backend
);
63 data
->gfxclk_average_alpha
= PPVEGA12_VEGA12GFXCLKAVERAGEALPHA_DFLT
;
64 data
->socclk_average_alpha
= PPVEGA12_VEGA12SOCCLKAVERAGEALPHA_DFLT
;
65 data
->uclk_average_alpha
= PPVEGA12_VEGA12UCLKCLKAVERAGEALPHA_DFLT
;
66 data
->gfx_activity_average_alpha
= PPVEGA12_VEGA12GFXACTIVITYAVERAGEALPHA_DFLT
;
67 data
->lowest_uclk_reserved_for_ulv
= PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT
;
69 data
->display_voltage_mode
= PPVEGA12_VEGA12DISPLAYVOLTAGEMODE_DFLT
;
70 data
->dcef_clk_quad_eqn_a
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
71 data
->dcef_clk_quad_eqn_b
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
72 data
->dcef_clk_quad_eqn_c
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
73 data
->disp_clk_quad_eqn_a
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
74 data
->disp_clk_quad_eqn_b
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
75 data
->disp_clk_quad_eqn_c
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
76 data
->pixel_clk_quad_eqn_a
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
77 data
->pixel_clk_quad_eqn_b
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
78 data
->pixel_clk_quad_eqn_c
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
79 data
->phy_clk_quad_eqn_a
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
80 data
->phy_clk_quad_eqn_b
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
81 data
->phy_clk_quad_eqn_c
= PPREGKEY_VEGA12QUADRATICEQUATION_DFLT
;
83 data
->registry_data
.disallowed_features
= 0x0;
84 data
->registry_data
.od_state_in_dc_support
= 0;
85 data
->registry_data
.thermal_support
= 1;
86 data
->registry_data
.skip_baco_hardware
= 0;
88 data
->registry_data
.log_avfs_param
= 0;
89 data
->registry_data
.sclk_throttle_low_notification
= 1;
90 data
->registry_data
.force_dpm_high
= 0;
91 data
->registry_data
.stable_pstate_sclk_dpm_percentage
= 75;
93 data
->registry_data
.didt_support
= 0;
94 if (data
->registry_data
.didt_support
) {
95 data
->registry_data
.didt_mode
= 6;
96 data
->registry_data
.sq_ramping_support
= 1;
97 data
->registry_data
.db_ramping_support
= 0;
98 data
->registry_data
.td_ramping_support
= 0;
99 data
->registry_data
.tcp_ramping_support
= 0;
100 data
->registry_data
.dbr_ramping_support
= 0;
101 data
->registry_data
.edc_didt_support
= 1;
102 data
->registry_data
.gc_didt_support
= 0;
103 data
->registry_data
.psm_didt_support
= 0;
106 data
->registry_data
.pcie_lane_override
= 0xff;
107 data
->registry_data
.pcie_speed_override
= 0xff;
108 data
->registry_data
.pcie_clock_override
= 0xffffffff;
109 data
->registry_data
.regulator_hot_gpio_support
= 1;
110 data
->registry_data
.ac_dc_switch_gpio_support
= 0;
111 data
->registry_data
.quick_transition_support
= 0;
112 data
->registry_data
.zrpm_start_temp
= 0xffff;
113 data
->registry_data
.zrpm_stop_temp
= 0xffff;
114 data
->registry_data
.odn_feature_enable
= 1;
115 data
->registry_data
.disable_water_mark
= 0;
116 data
->registry_data
.disable_pp_tuning
= 0;
117 data
->registry_data
.disable_xlpp_tuning
= 0;
118 data
->registry_data
.disable_workload_policy
= 0;
119 data
->registry_data
.perf_ui_tuning_profile_turbo
= 0x19190F0F;
120 data
->registry_data
.perf_ui_tuning_profile_powerSave
= 0x19191919;
121 data
->registry_data
.perf_ui_tuning_profile_xl
= 0x00000F0A;
122 data
->registry_data
.force_workload_policy_mask
= 0;
123 data
->registry_data
.disable_3d_fs_detection
= 0;
124 data
->registry_data
.fps_support
= 1;
125 data
->registry_data
.disable_auto_wattman
= 1;
126 data
->registry_data
.auto_wattman_debug
= 0;
127 data
->registry_data
.auto_wattman_sample_period
= 100;
128 data
->registry_data
.auto_wattman_threshold
= 50;
131 static int vega12_set_features_platform_caps(struct pp_hwmgr
*hwmgr
)
133 struct vega12_hwmgr
*data
=
134 (struct vega12_hwmgr
*)(hwmgr
->backend
);
135 struct amdgpu_device
*adev
= hwmgr
->adev
;
137 if (data
->vddci_control
== VEGA12_VOLTAGE_CONTROL_NONE
)
138 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
139 PHM_PlatformCaps_ControlVDDCI
);
141 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
142 PHM_PlatformCaps_TablelessHardwareInterface
);
144 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
145 PHM_PlatformCaps_EnableSMU7ThermalManagement
);
147 if (adev
->pg_flags
& AMD_PG_SUPPORT_UVD
) {
148 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
149 PHM_PlatformCaps_UVDPowerGating
);
150 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
151 PHM_PlatformCaps_UVDDynamicPowerGating
);
154 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCE
)
155 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
156 PHM_PlatformCaps_VCEPowerGating
);
158 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
159 PHM_PlatformCaps_UnTabledHardwareInterface
);
161 if (data
->registry_data
.odn_feature_enable
)
162 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
163 PHM_PlatformCaps_ODNinACSupport
);
165 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
166 PHM_PlatformCaps_OD6inACSupport
);
167 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
168 PHM_PlatformCaps_OD6PlusinACSupport
);
171 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
172 PHM_PlatformCaps_ActivityReporting
);
173 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
174 PHM_PlatformCaps_FanSpeedInTableIsRPM
);
176 if (data
->registry_data
.od_state_in_dc_support
) {
177 if (data
->registry_data
.odn_feature_enable
)
178 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
179 PHM_PlatformCaps_ODNinDCSupport
);
181 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
182 PHM_PlatformCaps_OD6inDCSupport
);
183 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
184 PHM_PlatformCaps_OD6PlusinDCSupport
);
188 if (data
->registry_data
.thermal_support
189 && data
->registry_data
.fuzzy_fan_control_support
190 && hwmgr
->thermal_controller
.advanceFanControlParameters
.usTMax
)
191 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
192 PHM_PlatformCaps_ODFuzzyFanControlSupport
);
194 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
195 PHM_PlatformCaps_DynamicPowerManagement
);
196 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
197 PHM_PlatformCaps_SMC
);
198 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
199 PHM_PlatformCaps_ThermalPolicyDelay
);
201 if (data
->registry_data
.force_dpm_high
)
202 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
203 PHM_PlatformCaps_ExclusiveModeAlwaysHigh
);
205 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
206 PHM_PlatformCaps_DynamicUVDState
);
208 if (data
->registry_data
.sclk_throttle_low_notification
)
209 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
210 PHM_PlatformCaps_SclkThrottleLowNotification
);
212 /* power tune caps */
213 /* assume disabled */
214 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
215 PHM_PlatformCaps_PowerContainment
);
216 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
217 PHM_PlatformCaps_DiDtSupport
);
218 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
219 PHM_PlatformCaps_SQRamping
);
220 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
221 PHM_PlatformCaps_DBRamping
);
222 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
223 PHM_PlatformCaps_TDRamping
);
224 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
225 PHM_PlatformCaps_TCPRamping
);
226 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
227 PHM_PlatformCaps_DBRRamping
);
228 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
229 PHM_PlatformCaps_DiDtEDCEnable
);
230 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
231 PHM_PlatformCaps_GCEDC
);
232 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
233 PHM_PlatformCaps_PSM
);
235 if (data
->registry_data
.didt_support
) {
236 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DiDtSupport
);
237 if (data
->registry_data
.sq_ramping_support
)
238 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_SQRamping
);
239 if (data
->registry_data
.db_ramping_support
)
240 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DBRamping
);
241 if (data
->registry_data
.td_ramping_support
)
242 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_TDRamping
);
243 if (data
->registry_data
.tcp_ramping_support
)
244 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_TCPRamping
);
245 if (data
->registry_data
.dbr_ramping_support
)
246 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DBRRamping
);
247 if (data
->registry_data
.edc_didt_support
)
248 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DiDtEDCEnable
);
249 if (data
->registry_data
.gc_didt_support
)
250 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_GCEDC
);
251 if (data
->registry_data
.psm_didt_support
)
252 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_PSM
);
255 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
256 PHM_PlatformCaps_RegulatorHot
);
258 if (data
->registry_data
.ac_dc_switch_gpio_support
) {
259 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
260 PHM_PlatformCaps_AutomaticDCTransition
);
261 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
262 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme
);
265 if (data
->registry_data
.quick_transition_support
) {
266 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
267 PHM_PlatformCaps_AutomaticDCTransition
);
268 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
269 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme
);
270 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
271 PHM_PlatformCaps_Falcon_QuickTransition
);
274 if (data
->lowest_uclk_reserved_for_ulv
!= PPVEGA12_VEGA12LOWESTUCLKRESERVEDFORULV_DFLT
) {
275 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
276 PHM_PlatformCaps_LowestUclkReservedForUlv
);
277 if (data
->lowest_uclk_reserved_for_ulv
== 1)
278 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
279 PHM_PlatformCaps_LowestUclkReservedForUlv
);
282 if (data
->registry_data
.custom_fan_support
)
283 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
284 PHM_PlatformCaps_CustomFanControlSupport
);
289 static void vega12_init_dpm_defaults(struct pp_hwmgr
*hwmgr
)
291 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
292 struct amdgpu_device
*adev
= hwmgr
->adev
;
293 uint32_t top32
, bottom32
;
296 data
->smu_features
[GNLD_DPM_PREFETCHER
].smu_feature_id
=
297 FEATURE_DPM_PREFETCHER_BIT
;
298 data
->smu_features
[GNLD_DPM_GFXCLK
].smu_feature_id
=
299 FEATURE_DPM_GFXCLK_BIT
;
300 data
->smu_features
[GNLD_DPM_UCLK
].smu_feature_id
=
301 FEATURE_DPM_UCLK_BIT
;
302 data
->smu_features
[GNLD_DPM_SOCCLK
].smu_feature_id
=
303 FEATURE_DPM_SOCCLK_BIT
;
304 data
->smu_features
[GNLD_DPM_UVD
].smu_feature_id
=
306 data
->smu_features
[GNLD_DPM_VCE
].smu_feature_id
=
308 data
->smu_features
[GNLD_ULV
].smu_feature_id
=
310 data
->smu_features
[GNLD_DPM_MP0CLK
].smu_feature_id
=
311 FEATURE_DPM_MP0CLK_BIT
;
312 data
->smu_features
[GNLD_DPM_LINK
].smu_feature_id
=
313 FEATURE_DPM_LINK_BIT
;
314 data
->smu_features
[GNLD_DPM_DCEFCLK
].smu_feature_id
=
315 FEATURE_DPM_DCEFCLK_BIT
;
316 data
->smu_features
[GNLD_DS_GFXCLK
].smu_feature_id
=
317 FEATURE_DS_GFXCLK_BIT
;
318 data
->smu_features
[GNLD_DS_SOCCLK
].smu_feature_id
=
319 FEATURE_DS_SOCCLK_BIT
;
320 data
->smu_features
[GNLD_DS_LCLK
].smu_feature_id
=
322 data
->smu_features
[GNLD_PPT
].smu_feature_id
=
324 data
->smu_features
[GNLD_TDC
].smu_feature_id
=
326 data
->smu_features
[GNLD_THERMAL
].smu_feature_id
=
328 data
->smu_features
[GNLD_GFX_PER_CU_CG
].smu_feature_id
=
329 FEATURE_GFX_PER_CU_CG_BIT
;
330 data
->smu_features
[GNLD_RM
].smu_feature_id
=
332 data
->smu_features
[GNLD_DS_DCEFCLK
].smu_feature_id
=
333 FEATURE_DS_DCEFCLK_BIT
;
334 data
->smu_features
[GNLD_ACDC
].smu_feature_id
=
336 data
->smu_features
[GNLD_VR0HOT
].smu_feature_id
=
338 data
->smu_features
[GNLD_VR1HOT
].smu_feature_id
=
340 data
->smu_features
[GNLD_FW_CTF
].smu_feature_id
=
342 data
->smu_features
[GNLD_LED_DISPLAY
].smu_feature_id
=
343 FEATURE_LED_DISPLAY_BIT
;
344 data
->smu_features
[GNLD_FAN_CONTROL
].smu_feature_id
=
345 FEATURE_FAN_CONTROL_BIT
;
346 data
->smu_features
[GNLD_DIDT
].smu_feature_id
= FEATURE_GFX_EDC_BIT
;
347 data
->smu_features
[GNLD_GFXOFF
].smu_feature_id
= FEATURE_GFXOFF_BIT
;
348 data
->smu_features
[GNLD_CG
].smu_feature_id
= FEATURE_CG_BIT
;
349 data
->smu_features
[GNLD_ACG
].smu_feature_id
= FEATURE_ACG_BIT
;
351 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
352 data
->smu_features
[i
].smu_feature_bitmap
=
353 (uint64_t)(1ULL << data
->smu_features
[i
].smu_feature_id
);
354 data
->smu_features
[i
].allowed
=
355 ((data
->registry_data
.disallowed_features
>> i
) & 1) ?
359 /* Get the SN to turn into a Unique ID */
360 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ReadSerialNumTop32
);
361 top32
= smum_get_argument(hwmgr
);
362 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ReadSerialNumBottom32
);
363 bottom32
= smum_get_argument(hwmgr
);
365 adev
->unique_id
= ((uint64_t)bottom32
<< 32) | top32
;
368 static int vega12_set_private_data_based_on_pptable(struct pp_hwmgr
*hwmgr
)
373 static int vega12_hwmgr_backend_fini(struct pp_hwmgr
*hwmgr
)
375 kfree(hwmgr
->backend
);
376 hwmgr
->backend
= NULL
;
381 static int vega12_hwmgr_backend_init(struct pp_hwmgr
*hwmgr
)
384 struct vega12_hwmgr
*data
;
385 struct amdgpu_device
*adev
= hwmgr
->adev
;
387 data
= kzalloc(sizeof(struct vega12_hwmgr
), GFP_KERNEL
);
391 hwmgr
->backend
= data
;
393 vega12_set_default_registry_data(hwmgr
);
395 data
->disable_dpm_mask
= 0xff;
396 data
->workload_mask
= 0xff;
398 /* need to set voltage control types before EVV patching */
399 data
->vddc_control
= VEGA12_VOLTAGE_CONTROL_NONE
;
400 data
->mvdd_control
= VEGA12_VOLTAGE_CONTROL_NONE
;
401 data
->vddci_control
= VEGA12_VOLTAGE_CONTROL_NONE
;
403 data
->water_marks_bitmap
= 0;
404 data
->avfs_exist
= false;
406 vega12_set_features_platform_caps(hwmgr
);
408 vega12_init_dpm_defaults(hwmgr
);
410 /* Parse pptable data read from VBIOS */
411 vega12_set_private_data_based_on_pptable(hwmgr
);
413 data
->is_tlu_enabled
= false;
415 hwmgr
->platform_descriptor
.hardwareActivityPerformanceLevels
=
416 VEGA12_MAX_HARDWARE_POWERLEVELS
;
417 hwmgr
->platform_descriptor
.hardwarePerformanceLevels
= 2;
418 hwmgr
->platform_descriptor
.minimumClocksReductionPercentage
= 50;
420 hwmgr
->platform_descriptor
.vbiosInterruptId
= 0x20000400; /* IRQ_SOURCE1_SW_INT */
421 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
422 hwmgr
->platform_descriptor
.clockStep
.engineClock
= 500;
423 hwmgr
->platform_descriptor
.clockStep
.memoryClock
= 500;
425 data
->total_active_cus
= adev
->gfx
.cu_info
.number
;
426 /* Setup default Overdrive Fan control settings */
427 data
->odn_fan_table
.target_fan_speed
=
428 hwmgr
->thermal_controller
.advanceFanControlParameters
.usMaxFanRPM
;
429 data
->odn_fan_table
.target_temperature
=
430 hwmgr
->thermal_controller
.advanceFanControlParameters
.ucTargetTemperature
;
431 data
->odn_fan_table
.min_performance_clock
=
432 hwmgr
->thermal_controller
.advanceFanControlParameters
.ulMinFanSCLKAcousticLimit
;
433 data
->odn_fan_table
.min_fan_limit
=
434 hwmgr
->thermal_controller
.advanceFanControlParameters
.usFanPWMMinLimit
*
435 hwmgr
->thermal_controller
.fanInfo
.ulMaxRPM
/ 100;
437 if (hwmgr
->feature_mask
& PP_GFXOFF_MASK
)
438 data
->gfxoff_controlled_by_driver
= true;
440 data
->gfxoff_controlled_by_driver
= false;
445 static int vega12_init_sclk_threshold(struct pp_hwmgr
*hwmgr
)
447 struct vega12_hwmgr
*data
=
448 (struct vega12_hwmgr
*)(hwmgr
->backend
);
450 data
->low_sclk_interrupt_threshold
= 0;
455 static int vega12_setup_asic_task(struct pp_hwmgr
*hwmgr
)
457 PP_ASSERT_WITH_CODE(!vega12_init_sclk_threshold(hwmgr
),
458 "Failed to init sclk threshold!",
465 * @fn vega12_init_dpm_state
466 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
468 * @param dpm_state - the address of the DPM Table to initiailize.
471 static void vega12_init_dpm_state(struct vega12_dpm_state
*dpm_state
)
473 dpm_state
->soft_min_level
= 0x0;
474 dpm_state
->soft_max_level
= 0xffff;
475 dpm_state
->hard_min_level
= 0x0;
476 dpm_state
->hard_max_level
= 0xffff;
479 static int vega12_get_number_of_dpm_level(struct pp_hwmgr
*hwmgr
,
480 PPCLK_e clk_id
, uint32_t *num_of_levels
)
484 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
485 PPSMC_MSG_GetDpmFreqByIndex
,
486 (clk_id
<< 16 | 0xFF));
487 PP_ASSERT_WITH_CODE(!ret
,
488 "[GetNumOfDpmLevel] failed to get dpm levels!",
491 *num_of_levels
= smum_get_argument(hwmgr
);
492 PP_ASSERT_WITH_CODE(*num_of_levels
> 0,
493 "[GetNumOfDpmLevel] number of clk levels is invalid!",
499 static int vega12_get_dpm_frequency_by_index(struct pp_hwmgr
*hwmgr
,
500 PPCLK_e clkID
, uint32_t index
, uint32_t *clock
)
503 *SMU expects the Clock ID to be in the top 16 bits.
504 *Lower 16 bits specify the level
506 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr
,
507 PPSMC_MSG_GetDpmFreqByIndex
, (clkID
<< 16 | index
)) == 0,
508 "[GetDpmFrequencyByIndex] Failed to get dpm frequency from SMU!",
511 *clock
= smum_get_argument(hwmgr
);
516 static int vega12_setup_single_dpm_table(struct pp_hwmgr
*hwmgr
,
517 struct vega12_single_dpm_table
*dpm_table
, PPCLK_e clk_id
)
520 uint32_t i
, num_of_levels
, clk
;
522 ret
= vega12_get_number_of_dpm_level(hwmgr
, clk_id
, &num_of_levels
);
523 PP_ASSERT_WITH_CODE(!ret
,
524 "[SetupSingleDpmTable] failed to get clk levels!",
527 dpm_table
->count
= num_of_levels
;
529 for (i
= 0; i
< num_of_levels
; i
++) {
530 ret
= vega12_get_dpm_frequency_by_index(hwmgr
, clk_id
, i
, &clk
);
531 PP_ASSERT_WITH_CODE(!ret
,
532 "[SetupSingleDpmTable] failed to get clk of specific level!",
534 dpm_table
->dpm_levels
[i
].value
= clk
;
535 dpm_table
->dpm_levels
[i
].enabled
= true;
542 * This function is to initialize all DPM state tables
543 * for SMU based on the dependency table.
544 * Dynamic state patching function will then trim these
545 * state tables to the allowed range based
546 * on the power policy or external client requests,
547 * such as UVD request, etc.
549 static int vega12_setup_default_dpm_tables(struct pp_hwmgr
*hwmgr
)
552 struct vega12_hwmgr
*data
=
553 (struct vega12_hwmgr
*)(hwmgr
->backend
);
554 struct vega12_single_dpm_table
*dpm_table
;
557 memset(&data
->dpm_table
, 0, sizeof(data
->dpm_table
));
560 dpm_table
= &(data
->dpm_table
.soc_table
);
561 if (data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
) {
562 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_SOCCLK
);
563 PP_ASSERT_WITH_CODE(!ret
,
564 "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
567 dpm_table
->count
= 1;
568 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.soc_clock
/ 100;
570 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
573 dpm_table
= &(data
->dpm_table
.gfx_table
);
574 if (data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
) {
575 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_GFXCLK
);
576 PP_ASSERT_WITH_CODE(!ret
,
577 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
580 dpm_table
->count
= 1;
581 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.gfx_clock
/ 100;
583 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
586 dpm_table
= &(data
->dpm_table
.mem_table
);
587 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
) {
588 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_UCLK
);
589 PP_ASSERT_WITH_CODE(!ret
,
590 "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
593 dpm_table
->count
= 1;
594 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.mem_clock
/ 100;
596 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
599 dpm_table
= &(data
->dpm_table
.eclk_table
);
600 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
) {
601 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_ECLK
);
602 PP_ASSERT_WITH_CODE(!ret
,
603 "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
606 dpm_table
->count
= 1;
607 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.eclock
/ 100;
609 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
612 dpm_table
= &(data
->dpm_table
.vclk_table
);
613 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
) {
614 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_VCLK
);
615 PP_ASSERT_WITH_CODE(!ret
,
616 "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
619 dpm_table
->count
= 1;
620 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.vclock
/ 100;
622 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
625 dpm_table
= &(data
->dpm_table
.dclk_table
);
626 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
) {
627 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_DCLK
);
628 PP_ASSERT_WITH_CODE(!ret
,
629 "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
632 dpm_table
->count
= 1;
633 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.dclock
/ 100;
635 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
638 dpm_table
= &(data
->dpm_table
.dcef_table
);
639 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
640 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_DCEFCLK
);
641 PP_ASSERT_WITH_CODE(!ret
,
642 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
645 dpm_table
->count
= 1;
646 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.dcef_clock
/ 100;
648 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
651 dpm_table
= &(data
->dpm_table
.pixel_table
);
652 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
653 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_PIXCLK
);
654 PP_ASSERT_WITH_CODE(!ret
,
655 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
658 dpm_table
->count
= 0;
659 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
662 dpm_table
= &(data
->dpm_table
.display_table
);
663 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
664 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_DISPCLK
);
665 PP_ASSERT_WITH_CODE(!ret
,
666 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
669 dpm_table
->count
= 0;
670 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
673 dpm_table
= &(data
->dpm_table
.phy_table
);
674 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
675 ret
= vega12_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_PHYCLK
);
676 PP_ASSERT_WITH_CODE(!ret
,
677 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
680 dpm_table
->count
= 0;
681 vega12_init_dpm_state(&(dpm_table
->dpm_state
));
683 /* save a copy of the default DPM table */
684 memcpy(&(data
->golden_dpm_table
), &(data
->dpm_table
),
685 sizeof(struct vega12_dpm_table
));
691 static int vega12_save_default_power_profile(struct pp_hwmgr
*hwmgr
)
693 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
694 struct vega12_single_dpm_table
*dpm_table
= &(data
->dpm_table
.gfx_table
);
697 hwmgr
->default_gfx_power_profile
.type
= AMD_PP_GFX_PROFILE
;
698 hwmgr
->default_compute_power_profile
.type
= AMD_PP_COMPUTE_PROFILE
;
700 /* Optimize compute power profile: Use only highest
701 * 2 power levels (if more than 2 are available)
703 if (dpm_table
->count
> 2)
704 min_level
= dpm_table
->count
- 2;
705 else if (dpm_table
->count
== 2)
710 hwmgr
->default_compute_power_profile
.min_sclk
=
711 dpm_table
->dpm_levels
[min_level
].value
;
713 hwmgr
->gfx_power_profile
= hwmgr
->default_gfx_power_profile
;
714 hwmgr
->compute_power_profile
= hwmgr
->default_compute_power_profile
;
721 * Initializes the SMC table and uploads it
723 * @param hwmgr the address of the powerplay hardware manager.
724 * @param pInput the pointer to input data (PowerState)
727 static int vega12_init_smc_table(struct pp_hwmgr
*hwmgr
)
730 struct vega12_hwmgr
*data
=
731 (struct vega12_hwmgr
*)(hwmgr
->backend
);
732 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
733 struct pp_atomfwctrl_bios_boot_up_values boot_up_values
;
734 struct phm_ppt_v3_information
*pptable_information
=
735 (struct phm_ppt_v3_information
*)hwmgr
->pptable
;
737 result
= pp_atomfwctrl_get_vbios_bootup_values(hwmgr
, &boot_up_values
);
739 data
->vbios_boot_state
.vddc
= boot_up_values
.usVddc
;
740 data
->vbios_boot_state
.vddci
= boot_up_values
.usVddci
;
741 data
->vbios_boot_state
.mvddc
= boot_up_values
.usMvddc
;
742 data
->vbios_boot_state
.gfx_clock
= boot_up_values
.ulGfxClk
;
743 data
->vbios_boot_state
.mem_clock
= boot_up_values
.ulUClk
;
744 data
->vbios_boot_state
.soc_clock
= boot_up_values
.ulSocClk
;
745 data
->vbios_boot_state
.dcef_clock
= boot_up_values
.ulDCEFClk
;
746 data
->vbios_boot_state
.uc_cooling_id
= boot_up_values
.ucCoolingID
;
747 data
->vbios_boot_state
.eclock
= boot_up_values
.ulEClk
;
748 data
->vbios_boot_state
.dclock
= boot_up_values
.ulDClk
;
749 data
->vbios_boot_state
.vclock
= boot_up_values
.ulVClk
;
750 smum_send_msg_to_smc_with_parameter(hwmgr
,
751 PPSMC_MSG_SetMinDeepSleepDcefclk
,
752 (uint32_t)(data
->vbios_boot_state
.dcef_clock
/ 100));
755 memcpy(pp_table
, pptable_information
->smc_pptable
, sizeof(PPTable_t
));
757 result
= smum_smc_table_manager(hwmgr
,
758 (uint8_t *)pp_table
, TABLE_PPTABLE
, false);
759 PP_ASSERT_WITH_CODE(!result
,
760 "Failed to upload PPtable!", return result
);
765 static int vega12_run_acg_btc(struct pp_hwmgr
*hwmgr
)
770 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_RunAcgBtc
) == 0,
771 "[Run_ACG_BTC] Attempt to run ACG BTC failed!",
774 result
= smum_get_argument(hwmgr
);
775 PP_ASSERT_WITH_CODE(result
== 1,
776 "Failed to run ACG BTC!", return -EINVAL
);
781 static int vega12_set_allowed_featuresmask(struct pp_hwmgr
*hwmgr
)
783 struct vega12_hwmgr
*data
=
784 (struct vega12_hwmgr
*)(hwmgr
->backend
);
786 uint32_t allowed_features_low
= 0, allowed_features_high
= 0;
788 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++)
789 if (data
->smu_features
[i
].allowed
)
790 data
->smu_features
[i
].smu_feature_id
> 31 ?
791 (allowed_features_high
|= ((data
->smu_features
[i
].smu_feature_bitmap
>> SMU_FEATURES_HIGH_SHIFT
) & 0xFFFFFFFF)) :
792 (allowed_features_low
|= ((data
->smu_features
[i
].smu_feature_bitmap
>> SMU_FEATURES_LOW_SHIFT
) & 0xFFFFFFFF));
795 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_SetAllowedFeaturesMaskHigh
, allowed_features_high
) == 0,
796 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (high) failed!",
800 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_SetAllowedFeaturesMaskLow
, allowed_features_low
) == 0,
801 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
807 static void vega12_init_powergate_state(struct pp_hwmgr
*hwmgr
)
809 struct vega12_hwmgr
*data
=
810 (struct vega12_hwmgr
*)(hwmgr
->backend
);
812 data
->uvd_power_gated
= true;
813 data
->vce_power_gated
= true;
815 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
)
816 data
->uvd_power_gated
= false;
818 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
)
819 data
->vce_power_gated
= false;
822 static int vega12_enable_all_smu_features(struct pp_hwmgr
*hwmgr
)
824 struct vega12_hwmgr
*data
=
825 (struct vega12_hwmgr
*)(hwmgr
->backend
);
826 uint64_t features_enabled
;
831 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_EnableAllSmuFeatures
) == 0,
832 "[EnableAllSMUFeatures] Failed to enable all smu features!",
835 if (vega12_get_enabled_smc_features(hwmgr
, &features_enabled
) == 0) {
836 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
837 enabled
= (features_enabled
& data
->smu_features
[i
].smu_feature_bitmap
) ? true : false;
838 data
->smu_features
[i
].enabled
= enabled
;
839 data
->smu_features
[i
].supported
= enabled
;
843 vega12_init_powergate_state(hwmgr
);
848 static int vega12_disable_all_smu_features(struct pp_hwmgr
*hwmgr
)
850 struct vega12_hwmgr
*data
=
851 (struct vega12_hwmgr
*)(hwmgr
->backend
);
852 uint64_t features_enabled
;
857 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_DisableAllSmuFeatures
) == 0,
858 "[DisableAllSMUFeatures] Failed to disable all smu features!",
861 if (vega12_get_enabled_smc_features(hwmgr
, &features_enabled
) == 0) {
862 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
863 enabled
= (features_enabled
& data
->smu_features
[i
].smu_feature_bitmap
) ? true : false;
864 data
->smu_features
[i
].enabled
= enabled
;
865 data
->smu_features
[i
].supported
= enabled
;
872 static int vega12_odn_initialize_default_settings(
873 struct pp_hwmgr
*hwmgr
)
878 static int vega12_set_overdrive_target_percentage(struct pp_hwmgr
*hwmgr
,
879 uint32_t adjust_percent
)
881 return smum_send_msg_to_smc_with_parameter(hwmgr
,
882 PPSMC_MSG_OverDriveSetPercentage
, adjust_percent
);
885 static int vega12_power_control_set_level(struct pp_hwmgr
*hwmgr
)
887 int adjust_percent
, result
= 0;
889 if (PP_CAP(PHM_PlatformCaps_PowerContainment
)) {
891 hwmgr
->platform_descriptor
.TDPAdjustmentPolarity
?
892 hwmgr
->platform_descriptor
.TDPAdjustment
:
893 (-1 * hwmgr
->platform_descriptor
.TDPAdjustment
);
894 result
= vega12_set_overdrive_target_percentage(hwmgr
,
895 (uint32_t)adjust_percent
);
900 static int vega12_get_all_clock_ranges_helper(struct pp_hwmgr
*hwmgr
,
901 PPCLK_e clkid
, struct vega12_clock_range
*clock
)
905 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_GetMaxDpmFreq
, (clkid
<< 16)) == 0,
906 "[GetClockRanges] Failed to get max ac clock from SMC!",
908 clock
->ACMax
= smum_get_argument(hwmgr
);
912 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_GetMinDpmFreq
, (clkid
<< 16)) == 0,
913 "[GetClockRanges] Failed to get min ac clock from SMC!",
915 clock
->ACMin
= smum_get_argument(hwmgr
);
919 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_GetDcModeMaxDpmFreq
, (clkid
<< 16)) == 0,
920 "[GetClockRanges] Failed to get max dc clock from SMC!",
922 clock
->DCMax
= smum_get_argument(hwmgr
);
927 static int vega12_get_all_clock_ranges(struct pp_hwmgr
*hwmgr
)
929 struct vega12_hwmgr
*data
=
930 (struct vega12_hwmgr
*)(hwmgr
->backend
);
933 for (i
= 0; i
< PPCLK_COUNT
; i
++)
934 PP_ASSERT_WITH_CODE(!vega12_get_all_clock_ranges_helper(hwmgr
,
935 i
, &(data
->clk_range
[i
])),
936 "Failed to get clk range from SMC!",
942 static int vega12_enable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
944 int tmp_result
, result
= 0;
946 smum_send_msg_to_smc_with_parameter(hwmgr
,
947 PPSMC_MSG_NumOfDisplays
, 0);
949 result
= vega12_set_allowed_featuresmask(hwmgr
);
950 PP_ASSERT_WITH_CODE(result
== 0,
951 "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
954 tmp_result
= vega12_init_smc_table(hwmgr
);
955 PP_ASSERT_WITH_CODE(!tmp_result
,
956 "Failed to initialize SMC table!",
957 result
= tmp_result
);
959 tmp_result
= vega12_run_acg_btc(hwmgr
);
960 PP_ASSERT_WITH_CODE(!tmp_result
,
961 "Failed to run ACG BTC!",
962 result
= tmp_result
);
964 result
= vega12_enable_all_smu_features(hwmgr
);
965 PP_ASSERT_WITH_CODE(!result
,
966 "Failed to enable all smu features!",
969 tmp_result
= vega12_power_control_set_level(hwmgr
);
970 PP_ASSERT_WITH_CODE(!tmp_result
,
971 "Failed to power control set level!",
972 result
= tmp_result
);
974 result
= vega12_get_all_clock_ranges(hwmgr
);
975 PP_ASSERT_WITH_CODE(!result
,
976 "Failed to get all clock ranges!",
979 result
= vega12_odn_initialize_default_settings(hwmgr
);
980 PP_ASSERT_WITH_CODE(!result
,
981 "Failed to power control set level!",
984 result
= vega12_setup_default_dpm_tables(hwmgr
);
985 PP_ASSERT_WITH_CODE(!result
,
986 "Failed to setup default DPM tables!",
991 static int vega12_patch_boot_state(struct pp_hwmgr
*hwmgr
,
992 struct pp_hw_power_state
*hw_ps
)
997 static uint32_t vega12_find_lowest_dpm_level(
998 struct vega12_single_dpm_table
*table
)
1002 for (i
= 0; i
< table
->count
; i
++) {
1003 if (table
->dpm_levels
[i
].enabled
)
1007 if (i
>= table
->count
) {
1009 table
->dpm_levels
[i
].enabled
= true;
1015 static uint32_t vega12_find_highest_dpm_level(
1016 struct vega12_single_dpm_table
*table
)
1019 PP_ASSERT_WITH_CODE(table
->count
<= MAX_REGULAR_DPM_NUMBER
,
1020 "[FindHighestDPMLevel] DPM Table has too many entries!",
1021 return MAX_REGULAR_DPM_NUMBER
- 1);
1023 for (i
= table
->count
- 1; i
>= 0; i
--) {
1024 if (table
->dpm_levels
[i
].enabled
)
1030 table
->dpm_levels
[i
].enabled
= true;
1036 static int vega12_upload_dpm_min_level(struct pp_hwmgr
*hwmgr
)
1038 struct vega12_hwmgr
*data
= hwmgr
->backend
;
1042 if (data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
) {
1043 min_freq
= data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
;
1044 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1045 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1046 (PPCLK_GFXCLK
<< 16) | (min_freq
& 0xffff))),
1047 "Failed to set soft min gfxclk !",
1051 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
) {
1052 min_freq
= data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
;
1053 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1054 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1055 (PPCLK_UCLK
<< 16) | (min_freq
& 0xffff))),
1056 "Failed to set soft min memclk !",
1059 min_freq
= data
->dpm_table
.mem_table
.dpm_state
.hard_min_level
;
1060 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1061 hwmgr
, PPSMC_MSG_SetHardMinByFreq
,
1062 (PPCLK_UCLK
<< 16) | (min_freq
& 0xffff))),
1063 "Failed to set hard min memclk !",
1067 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
) {
1068 min_freq
= data
->dpm_table
.vclk_table
.dpm_state
.soft_min_level
;
1070 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1071 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1072 (PPCLK_VCLK
<< 16) | (min_freq
& 0xffff))),
1073 "Failed to set soft min vclk!",
1076 min_freq
= data
->dpm_table
.dclk_table
.dpm_state
.soft_min_level
;
1078 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1079 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1080 (PPCLK_DCLK
<< 16) | (min_freq
& 0xffff))),
1081 "Failed to set soft min dclk!",
1085 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
) {
1086 min_freq
= data
->dpm_table
.eclk_table
.dpm_state
.soft_min_level
;
1088 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1089 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1090 (PPCLK_ECLK
<< 16) | (min_freq
& 0xffff))),
1091 "Failed to set soft min eclk!",
1095 if (data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
) {
1096 min_freq
= data
->dpm_table
.soc_table
.dpm_state
.soft_min_level
;
1098 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1099 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1100 (PPCLK_SOCCLK
<< 16) | (min_freq
& 0xffff))),
1101 "Failed to set soft min socclk!",
1105 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
1106 min_freq
= data
->dpm_table
.dcef_table
.dpm_state
.hard_min_level
;
1108 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1109 hwmgr
, PPSMC_MSG_SetHardMinByFreq
,
1110 (PPCLK_DCEFCLK
<< 16) | (min_freq
& 0xffff))),
1111 "Failed to set hard min dcefclk!",
1119 static int vega12_upload_dpm_max_level(struct pp_hwmgr
*hwmgr
)
1121 struct vega12_hwmgr
*data
= hwmgr
->backend
;
1125 if (data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
) {
1126 max_freq
= data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
;
1128 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1129 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1130 (PPCLK_GFXCLK
<< 16) | (max_freq
& 0xffff))),
1131 "Failed to set soft max gfxclk!",
1135 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
) {
1136 max_freq
= data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
;
1138 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1139 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1140 (PPCLK_UCLK
<< 16) | (max_freq
& 0xffff))),
1141 "Failed to set soft max memclk!",
1145 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
) {
1146 max_freq
= data
->dpm_table
.vclk_table
.dpm_state
.soft_max_level
;
1148 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1149 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1150 (PPCLK_VCLK
<< 16) | (max_freq
& 0xffff))),
1151 "Failed to set soft max vclk!",
1154 max_freq
= data
->dpm_table
.dclk_table
.dpm_state
.soft_max_level
;
1155 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1156 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1157 (PPCLK_DCLK
<< 16) | (max_freq
& 0xffff))),
1158 "Failed to set soft max dclk!",
1162 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
) {
1163 max_freq
= data
->dpm_table
.eclk_table
.dpm_state
.soft_max_level
;
1165 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1166 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1167 (PPCLK_ECLK
<< 16) | (max_freq
& 0xffff))),
1168 "Failed to set soft max eclk!",
1172 if (data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
) {
1173 max_freq
= data
->dpm_table
.soc_table
.dpm_state
.soft_max_level
;
1175 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1176 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1177 (PPCLK_SOCCLK
<< 16) | (max_freq
& 0xffff))),
1178 "Failed to set soft max socclk!",
1185 int vega12_enable_disable_vce_dpm(struct pp_hwmgr
*hwmgr
, bool enable
)
1187 struct vega12_hwmgr
*data
=
1188 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1190 if (data
->smu_features
[GNLD_DPM_VCE
].supported
) {
1191 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr
,
1193 data
->smu_features
[GNLD_DPM_VCE
].smu_feature_bitmap
),
1194 "Attempt to Enable/Disable DPM VCE Failed!",
1196 data
->smu_features
[GNLD_DPM_VCE
].enabled
= enable
;
1202 static uint32_t vega12_dpm_get_sclk(struct pp_hwmgr
*hwmgr
, bool low
)
1204 struct vega12_hwmgr
*data
=
1205 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1208 if (!data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
)
1212 PP_ASSERT_WITH_CODE(
1213 vega12_get_clock_ranges(hwmgr
, &gfx_clk
, PPCLK_GFXCLK
, false) == 0,
1214 "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
1217 PP_ASSERT_WITH_CODE(
1218 vega12_get_clock_ranges(hwmgr
, &gfx_clk
, PPCLK_GFXCLK
, true) == 0,
1219 "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
1222 return (gfx_clk
* 100);
1225 static uint32_t vega12_dpm_get_mclk(struct pp_hwmgr
*hwmgr
, bool low
)
1227 struct vega12_hwmgr
*data
=
1228 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1231 if (!data
->smu_features
[GNLD_DPM_UCLK
].enabled
)
1235 PP_ASSERT_WITH_CODE(
1236 vega12_get_clock_ranges(hwmgr
, &mem_clk
, PPCLK_UCLK
, false) == 0,
1237 "[GetMclks]: fail to get min PPCLK_UCLK\n",
1240 PP_ASSERT_WITH_CODE(
1241 vega12_get_clock_ranges(hwmgr
, &mem_clk
, PPCLK_UCLK
, true) == 0,
1242 "[GetMclks]: fail to get max PPCLK_UCLK\n",
1245 return (mem_clk
* 100);
1248 static int vega12_get_metrics_table(struct pp_hwmgr
*hwmgr
, SmuMetrics_t
*metrics_table
)
1250 struct vega12_hwmgr
*data
=
1251 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1254 if (!data
->metrics_time
|| time_after(jiffies
, data
->metrics_time
+ HZ
/ 2)) {
1255 ret
= smum_smc_table_manager(hwmgr
, (uint8_t *)metrics_table
,
1256 TABLE_SMU_METRICS
, true);
1258 pr_info("Failed to export SMU metrics table!\n");
1261 memcpy(&data
->metrics_table
, metrics_table
, sizeof(SmuMetrics_t
));
1262 data
->metrics_time
= jiffies
;
1264 memcpy(metrics_table
, &data
->metrics_table
, sizeof(SmuMetrics_t
));
1269 static int vega12_get_gpu_power(struct pp_hwmgr
*hwmgr
, uint32_t *query
)
1271 SmuMetrics_t metrics_table
;
1274 ret
= vega12_get_metrics_table(hwmgr
, &metrics_table
);
1278 *query
= metrics_table
.CurrSocketPower
<< 8;
1283 static int vega12_get_current_gfx_clk_freq(struct pp_hwmgr
*hwmgr
, uint32_t *gfx_freq
)
1285 uint32_t gfx_clk
= 0;
1289 PP_ASSERT_WITH_CODE(smum_send_msg_to_smc_with_parameter(hwmgr
,
1290 PPSMC_MSG_GetDpmClockFreq
, (PPCLK_GFXCLK
<< 16)) == 0,
1291 "[GetCurrentGfxClkFreq] Attempt to get Current GFXCLK Frequency Failed!",
1293 gfx_clk
= smum_get_argument(hwmgr
);
1295 *gfx_freq
= gfx_clk
* 100;
1300 static int vega12_get_current_mclk_freq(struct pp_hwmgr
*hwmgr
, uint32_t *mclk_freq
)
1302 uint32_t mem_clk
= 0;
1306 PP_ASSERT_WITH_CODE(
1307 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_GetDpmClockFreq
, (PPCLK_UCLK
<< 16)) == 0,
1308 "[GetCurrentMClkFreq] Attempt to get Current MCLK Frequency Failed!",
1310 mem_clk
= smum_get_argument(hwmgr
);
1312 *mclk_freq
= mem_clk
* 100;
1317 static int vega12_get_current_activity_percent(
1318 struct pp_hwmgr
*hwmgr
,
1320 uint32_t *activity_percent
)
1322 SmuMetrics_t metrics_table
;
1325 ret
= vega12_get_metrics_table(hwmgr
, &metrics_table
);
1330 case AMDGPU_PP_SENSOR_GPU_LOAD
:
1331 *activity_percent
= metrics_table
.AverageGfxActivity
;
1333 case AMDGPU_PP_SENSOR_MEM_LOAD
:
1334 *activity_percent
= metrics_table
.AverageUclkActivity
;
1337 pr_err("Invalid index for retrieving clock activity\n");
1344 static int vega12_read_sensor(struct pp_hwmgr
*hwmgr
, int idx
,
1345 void *value
, int *size
)
1347 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1348 SmuMetrics_t metrics_table
;
1352 case AMDGPU_PP_SENSOR_GFX_SCLK
:
1353 ret
= vega12_get_current_gfx_clk_freq(hwmgr
, (uint32_t *)value
);
1357 case AMDGPU_PP_SENSOR_GFX_MCLK
:
1358 ret
= vega12_get_current_mclk_freq(hwmgr
, (uint32_t *)value
);
1362 case AMDGPU_PP_SENSOR_GPU_LOAD
:
1363 case AMDGPU_PP_SENSOR_MEM_LOAD
:
1364 ret
= vega12_get_current_activity_percent(hwmgr
, idx
, (uint32_t *)value
);
1368 case AMDGPU_PP_SENSOR_GPU_TEMP
:
1369 *((uint32_t *)value
) = vega12_thermal_get_temperature(hwmgr
);
1372 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP
:
1373 ret
= vega12_get_metrics_table(hwmgr
, &metrics_table
);
1377 *((uint32_t *)value
) = metrics_table
.TemperatureHotspot
*
1378 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1381 case AMDGPU_PP_SENSOR_MEM_TEMP
:
1382 ret
= vega12_get_metrics_table(hwmgr
, &metrics_table
);
1386 *((uint32_t *)value
) = metrics_table
.TemperatureHBM
*
1387 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1390 case AMDGPU_PP_SENSOR_UVD_POWER
:
1391 *((uint32_t *)value
) = data
->uvd_power_gated
? 0 : 1;
1394 case AMDGPU_PP_SENSOR_VCE_POWER
:
1395 *((uint32_t *)value
) = data
->vce_power_gated
? 0 : 1;
1398 case AMDGPU_PP_SENSOR_GPU_POWER
:
1399 ret
= vega12_get_gpu_power(hwmgr
, (uint32_t *)value
);
1403 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK
:
1404 ret
= vega12_get_enabled_smc_features(hwmgr
, (uint64_t *)value
);
1415 static int vega12_notify_smc_display_change(struct pp_hwmgr
*hwmgr
,
1418 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1420 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
)
1421 return smum_send_msg_to_smc_with_parameter(hwmgr
,
1422 PPSMC_MSG_SetUclkFastSwitch
,
1428 int vega12_display_clock_voltage_request(struct pp_hwmgr
*hwmgr
,
1429 struct pp_display_clock_request
*clock_req
)
1432 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1433 enum amd_pp_clock_type clk_type
= clock_req
->clock_type
;
1434 uint32_t clk_freq
= clock_req
->clock_freq_in_khz
/ 1000;
1435 PPCLK_e clk_select
= 0;
1436 uint32_t clk_request
= 0;
1438 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
1440 case amd_pp_dcef_clock
:
1441 clk_select
= PPCLK_DCEFCLK
;
1443 case amd_pp_disp_clock
:
1444 clk_select
= PPCLK_DISPCLK
;
1446 case amd_pp_pixel_clock
:
1447 clk_select
= PPCLK_PIXCLK
;
1449 case amd_pp_phy_clock
:
1450 clk_select
= PPCLK_PHYCLK
;
1453 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
1459 clk_request
= (clk_select
<< 16) | clk_freq
;
1460 result
= smum_send_msg_to_smc_with_parameter(hwmgr
,
1461 PPSMC_MSG_SetHardMinByFreq
,
1469 static int vega12_notify_smc_display_config_after_ps_adjustment(
1470 struct pp_hwmgr
*hwmgr
)
1472 struct vega12_hwmgr
*data
=
1473 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1474 struct PP_Clocks min_clocks
= {0};
1475 struct pp_display_clock_request clock_req
;
1477 if ((hwmgr
->display_config
->num_display
> 1) &&
1478 !hwmgr
->display_config
->multi_monitor_in_sync
&&
1479 !hwmgr
->display_config
->nb_pstate_switch_disable
)
1480 vega12_notify_smc_display_change(hwmgr
, false);
1482 vega12_notify_smc_display_change(hwmgr
, true);
1484 min_clocks
.dcefClock
= hwmgr
->display_config
->min_dcef_set_clk
;
1485 min_clocks
.dcefClockInSR
= hwmgr
->display_config
->min_dcef_deep_sleep_set_clk
;
1486 min_clocks
.memoryClock
= hwmgr
->display_config
->min_mem_set_clock
;
1488 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].supported
) {
1489 clock_req
.clock_type
= amd_pp_dcef_clock
;
1490 clock_req
.clock_freq_in_khz
= min_clocks
.dcefClock
/10;
1491 if (!vega12_display_clock_voltage_request(hwmgr
, &clock_req
)) {
1492 if (data
->smu_features
[GNLD_DS_DCEFCLK
].supported
)
1493 PP_ASSERT_WITH_CODE(
1494 !smum_send_msg_to_smc_with_parameter(
1495 hwmgr
, PPSMC_MSG_SetMinDeepSleepDcefclk
,
1496 min_clocks
.dcefClockInSR
/100),
1497 "Attempt to set divider for DCEFCLK Failed!",
1500 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
1507 static int vega12_force_dpm_highest(struct pp_hwmgr
*hwmgr
)
1509 struct vega12_hwmgr
*data
=
1510 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1512 uint32_t soft_level
;
1514 soft_level
= vega12_find_highest_dpm_level(&(data
->dpm_table
.gfx_table
));
1516 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
=
1517 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
=
1518 data
->dpm_table
.gfx_table
.dpm_levels
[soft_level
].value
;
1520 soft_level
= vega12_find_highest_dpm_level(&(data
->dpm_table
.mem_table
));
1522 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
=
1523 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
=
1524 data
->dpm_table
.mem_table
.dpm_levels
[soft_level
].value
;
1526 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr
),
1527 "Failed to upload boot level to highest!",
1530 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr
),
1531 "Failed to upload dpm max level to highest!",
1537 static int vega12_force_dpm_lowest(struct pp_hwmgr
*hwmgr
)
1539 struct vega12_hwmgr
*data
=
1540 (struct vega12_hwmgr
*)(hwmgr
->backend
);
1541 uint32_t soft_level
;
1543 soft_level
= vega12_find_lowest_dpm_level(&(data
->dpm_table
.gfx_table
));
1545 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
=
1546 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
=
1547 data
->dpm_table
.gfx_table
.dpm_levels
[soft_level
].value
;
1549 soft_level
= vega12_find_lowest_dpm_level(&(data
->dpm_table
.mem_table
));
1551 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
=
1552 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
=
1553 data
->dpm_table
.mem_table
.dpm_levels
[soft_level
].value
;
1555 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr
),
1556 "Failed to upload boot level to highest!",
1559 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr
),
1560 "Failed to upload dpm max level to highest!",
1567 static int vega12_unforce_dpm_levels(struct pp_hwmgr
*hwmgr
)
1569 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_min_level(hwmgr
),
1570 "Failed to upload DPM Bootup Levels!",
1573 PP_ASSERT_WITH_CODE(!vega12_upload_dpm_max_level(hwmgr
),
1574 "Failed to upload DPM Max Levels!",
1580 static int vega12_get_profiling_clk_mask(struct pp_hwmgr
*hwmgr
, enum amd_dpm_forced_level level
,
1581 uint32_t *sclk_mask
, uint32_t *mclk_mask
, uint32_t *soc_mask
)
1583 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1584 struct vega12_single_dpm_table
*gfx_dpm_table
= &(data
->dpm_table
.gfx_table
);
1585 struct vega12_single_dpm_table
*mem_dpm_table
= &(data
->dpm_table
.mem_table
);
1586 struct vega12_single_dpm_table
*soc_dpm_table
= &(data
->dpm_table
.soc_table
);
1592 if (gfx_dpm_table
->count
> VEGA12_UMD_PSTATE_GFXCLK_LEVEL
&&
1593 mem_dpm_table
->count
> VEGA12_UMD_PSTATE_MCLK_LEVEL
&&
1594 soc_dpm_table
->count
> VEGA12_UMD_PSTATE_SOCCLK_LEVEL
) {
1595 *sclk_mask
= VEGA12_UMD_PSTATE_GFXCLK_LEVEL
;
1596 *mclk_mask
= VEGA12_UMD_PSTATE_MCLK_LEVEL
;
1597 *soc_mask
= VEGA12_UMD_PSTATE_SOCCLK_LEVEL
;
1600 if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) {
1602 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) {
1604 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
1605 *sclk_mask
= gfx_dpm_table
->count
- 1;
1606 *mclk_mask
= mem_dpm_table
->count
- 1;
1607 *soc_mask
= soc_dpm_table
->count
- 1;
1613 static void vega12_set_fan_control_mode(struct pp_hwmgr
*hwmgr
, uint32_t mode
)
1616 case AMD_FAN_CTRL_NONE
:
1618 case AMD_FAN_CTRL_MANUAL
:
1619 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl
))
1620 vega12_fan_ctrl_stop_smc_fan_control(hwmgr
);
1622 case AMD_FAN_CTRL_AUTO
:
1623 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl
))
1624 vega12_fan_ctrl_start_smc_fan_control(hwmgr
);
1631 static int vega12_dpm_force_dpm_level(struct pp_hwmgr
*hwmgr
,
1632 enum amd_dpm_forced_level level
)
1635 uint32_t sclk_mask
= 0;
1636 uint32_t mclk_mask
= 0;
1637 uint32_t soc_mask
= 0;
1640 case AMD_DPM_FORCED_LEVEL_HIGH
:
1641 ret
= vega12_force_dpm_highest(hwmgr
);
1643 case AMD_DPM_FORCED_LEVEL_LOW
:
1644 ret
= vega12_force_dpm_lowest(hwmgr
);
1646 case AMD_DPM_FORCED_LEVEL_AUTO
:
1647 ret
= vega12_unforce_dpm_levels(hwmgr
);
1649 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
1650 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
1651 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
:
1652 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
1653 ret
= vega12_get_profiling_clk_mask(hwmgr
, level
, &sclk_mask
, &mclk_mask
, &soc_mask
);
1656 vega12_force_clock_level(hwmgr
, PP_SCLK
, 1 << sclk_mask
);
1657 vega12_force_clock_level(hwmgr
, PP_MCLK
, 1 << mclk_mask
);
1659 case AMD_DPM_FORCED_LEVEL_MANUAL
:
1660 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
:
1668 static uint32_t vega12_get_fan_control_mode(struct pp_hwmgr
*hwmgr
)
1670 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1672 if (data
->smu_features
[GNLD_FAN_CONTROL
].enabled
== false)
1673 return AMD_FAN_CTRL_MANUAL
;
1675 return AMD_FAN_CTRL_AUTO
;
1678 static int vega12_get_dal_power_level(struct pp_hwmgr
*hwmgr
,
1679 struct amd_pp_simple_clock_info
*info
)
1682 struct phm_ppt_v2_information
*table_info
=
1683 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
1684 struct phm_clock_and_voltage_limits
*max_limits
=
1685 &table_info
->max_clock_voltage_on_ac
;
1687 info
->engine_max_clock
= max_limits
->sclk
;
1688 info
->memory_max_clock
= max_limits
->mclk
;
1693 static int vega12_get_clock_ranges(struct pp_hwmgr
*hwmgr
,
1695 PPCLK_e clock_select
,
1698 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1701 *clock
= data
->clk_range
[clock_select
].ACMax
;
1703 *clock
= data
->clk_range
[clock_select
].ACMin
;
1708 static int vega12_get_sclks(struct pp_hwmgr
*hwmgr
,
1709 struct pp_clock_levels_with_latency
*clocks
)
1711 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1714 struct vega12_single_dpm_table
*dpm_table
;
1716 if (!data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
)
1719 dpm_table
= &(data
->dpm_table
.gfx_table
);
1720 ucount
= (dpm_table
->count
> MAX_NUM_CLOCKS
) ?
1721 MAX_NUM_CLOCKS
: dpm_table
->count
;
1723 for (i
= 0; i
< ucount
; i
++) {
1724 clocks
->data
[i
].clocks_in_khz
=
1725 dpm_table
->dpm_levels
[i
].value
* 1000;
1727 clocks
->data
[i
].latency_in_us
= 0;
1730 clocks
->num_levels
= ucount
;
1735 static uint32_t vega12_get_mem_latency(struct pp_hwmgr
*hwmgr
,
1741 static int vega12_get_memclocks(struct pp_hwmgr
*hwmgr
,
1742 struct pp_clock_levels_with_latency
*clocks
)
1744 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1747 struct vega12_single_dpm_table
*dpm_table
;
1748 if (!data
->smu_features
[GNLD_DPM_UCLK
].enabled
)
1751 dpm_table
= &(data
->dpm_table
.mem_table
);
1752 ucount
= (dpm_table
->count
> MAX_NUM_CLOCKS
) ?
1753 MAX_NUM_CLOCKS
: dpm_table
->count
;
1755 for (i
= 0; i
< ucount
; i
++) {
1756 clocks
->data
[i
].clocks_in_khz
= dpm_table
->dpm_levels
[i
].value
* 1000;
1757 data
->mclk_latency_table
.entries
[i
].frequency
= dpm_table
->dpm_levels
[i
].value
* 100;
1758 clocks
->data
[i
].latency_in_us
=
1759 data
->mclk_latency_table
.entries
[i
].latency
=
1760 vega12_get_mem_latency(hwmgr
, dpm_table
->dpm_levels
[i
].value
);
1763 clocks
->num_levels
= data
->mclk_latency_table
.count
= ucount
;
1768 static int vega12_get_dcefclocks(struct pp_hwmgr
*hwmgr
,
1769 struct pp_clock_levels_with_latency
*clocks
)
1771 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1774 struct vega12_single_dpm_table
*dpm_table
;
1776 if (!data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
)
1780 dpm_table
= &(data
->dpm_table
.dcef_table
);
1781 ucount
= (dpm_table
->count
> MAX_NUM_CLOCKS
) ?
1782 MAX_NUM_CLOCKS
: dpm_table
->count
;
1784 for (i
= 0; i
< ucount
; i
++) {
1785 clocks
->data
[i
].clocks_in_khz
=
1786 dpm_table
->dpm_levels
[i
].value
* 1000;
1788 clocks
->data
[i
].latency_in_us
= 0;
1791 clocks
->num_levels
= ucount
;
1796 static int vega12_get_socclocks(struct pp_hwmgr
*hwmgr
,
1797 struct pp_clock_levels_with_latency
*clocks
)
1799 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1802 struct vega12_single_dpm_table
*dpm_table
;
1804 if (!data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
)
1808 dpm_table
= &(data
->dpm_table
.soc_table
);
1809 ucount
= (dpm_table
->count
> MAX_NUM_CLOCKS
) ?
1810 MAX_NUM_CLOCKS
: dpm_table
->count
;
1812 for (i
= 0; i
< ucount
; i
++) {
1813 clocks
->data
[i
].clocks_in_khz
=
1814 dpm_table
->dpm_levels
[i
].value
* 1000;
1816 clocks
->data
[i
].latency_in_us
= 0;
1819 clocks
->num_levels
= ucount
;
1825 static int vega12_get_clock_by_type_with_latency(struct pp_hwmgr
*hwmgr
,
1826 enum amd_pp_clock_type type
,
1827 struct pp_clock_levels_with_latency
*clocks
)
1832 case amd_pp_sys_clock
:
1833 ret
= vega12_get_sclks(hwmgr
, clocks
);
1835 case amd_pp_mem_clock
:
1836 ret
= vega12_get_memclocks(hwmgr
, clocks
);
1838 case amd_pp_dcef_clock
:
1839 ret
= vega12_get_dcefclocks(hwmgr
, clocks
);
1841 case amd_pp_soc_clock
:
1842 ret
= vega12_get_socclocks(hwmgr
, clocks
);
1851 static int vega12_get_clock_by_type_with_voltage(struct pp_hwmgr
*hwmgr
,
1852 enum amd_pp_clock_type type
,
1853 struct pp_clock_levels_with_voltage
*clocks
)
1855 clocks
->num_levels
= 0;
1860 static int vega12_set_watermarks_for_clocks_ranges(struct pp_hwmgr
*hwmgr
,
1863 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1864 Watermarks_t
*table
= &(data
->smc_state_table
.water_marks_table
);
1865 struct dm_pp_wm_sets_with_clock_ranges_soc15
*wm_with_clock_ranges
= clock_ranges
;
1867 if (!data
->registry_data
.disable_water_mark
&&
1868 data
->smu_features
[GNLD_DPM_DCEFCLK
].supported
&&
1869 data
->smu_features
[GNLD_DPM_SOCCLK
].supported
) {
1870 smu_set_watermarks_for_clocks_ranges(table
, wm_with_clock_ranges
);
1871 data
->water_marks_bitmap
|= WaterMarksExist
;
1872 data
->water_marks_bitmap
&= ~WaterMarksLoaded
;
1878 static int vega12_force_clock_level(struct pp_hwmgr
*hwmgr
,
1879 enum pp_clock_type type
, uint32_t mask
)
1881 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
1882 uint32_t soft_min_level
, soft_max_level
, hard_min_level
;
1887 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
1888 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
1890 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
=
1891 data
->dpm_table
.gfx_table
.dpm_levels
[soft_min_level
].value
;
1892 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
=
1893 data
->dpm_table
.gfx_table
.dpm_levels
[soft_max_level
].value
;
1895 ret
= vega12_upload_dpm_min_level(hwmgr
);
1896 PP_ASSERT_WITH_CODE(!ret
,
1897 "Failed to upload boot level to lowest!",
1900 ret
= vega12_upload_dpm_max_level(hwmgr
);
1901 PP_ASSERT_WITH_CODE(!ret
,
1902 "Failed to upload dpm max level to highest!",
1907 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
1908 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
1910 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
=
1911 data
->dpm_table
.mem_table
.dpm_levels
[soft_min_level
].value
;
1912 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
=
1913 data
->dpm_table
.mem_table
.dpm_levels
[soft_max_level
].value
;
1915 ret
= vega12_upload_dpm_min_level(hwmgr
);
1916 PP_ASSERT_WITH_CODE(!ret
,
1917 "Failed to upload boot level to lowest!",
1920 ret
= vega12_upload_dpm_max_level(hwmgr
);
1921 PP_ASSERT_WITH_CODE(!ret
,
1922 "Failed to upload dpm max level to highest!",
1928 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
1929 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
1931 if (soft_max_level
>= data
->dpm_table
.soc_table
.count
) {
1932 pr_err("Clock level specified %d is over max allowed %d\n",
1934 data
->dpm_table
.soc_table
.count
- 1);
1938 data
->dpm_table
.soc_table
.dpm_state
.soft_min_level
=
1939 data
->dpm_table
.soc_table
.dpm_levels
[soft_min_level
].value
;
1940 data
->dpm_table
.soc_table
.dpm_state
.soft_max_level
=
1941 data
->dpm_table
.soc_table
.dpm_levels
[soft_max_level
].value
;
1943 ret
= vega12_upload_dpm_min_level(hwmgr
);
1944 PP_ASSERT_WITH_CODE(!ret
,
1945 "Failed to upload boot level to lowest!",
1948 ret
= vega12_upload_dpm_max_level(hwmgr
);
1949 PP_ASSERT_WITH_CODE(!ret
,
1950 "Failed to upload dpm max level to highest!",
1956 hard_min_level
= mask
? (ffs(mask
) - 1) : 0;
1958 if (hard_min_level
>= data
->dpm_table
.dcef_table
.count
) {
1959 pr_err("Clock level specified %d is over max allowed %d\n",
1961 data
->dpm_table
.dcef_table
.count
- 1);
1965 data
->dpm_table
.dcef_table
.dpm_state
.hard_min_level
=
1966 data
->dpm_table
.dcef_table
.dpm_levels
[hard_min_level
].value
;
1968 ret
= vega12_upload_dpm_min_level(hwmgr
);
1969 PP_ASSERT_WITH_CODE(!ret
,
1970 "Failed to upload boot level to lowest!",
1973 //TODO: Setting DCEFCLK max dpm level is not supported
1987 static int vega12_get_ppfeature_status(struct pp_hwmgr
*hwmgr
, char *buf
)
1989 static const char *ppfeature_name
[] = {
2019 static const char *output_title
[] = {
2023 uint64_t features_enabled
;
2028 ret
= vega12_get_enabled_smc_features(hwmgr
, &features_enabled
);
2029 PP_ASSERT_WITH_CODE(!ret
,
2030 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
2033 size
+= sprintf(buf
+ size
, "Current ppfeatures: 0x%016llx\n", features_enabled
);
2034 size
+= sprintf(buf
+ size
, "%-19s %-22s %s\n",
2038 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
2039 size
+= sprintf(buf
+ size
, "%-19s 0x%016llx %6s\n",
2042 (features_enabled
& (1ULL << i
)) ? "Y" : "N");
2048 static int vega12_set_ppfeature_status(struct pp_hwmgr
*hwmgr
, uint64_t new_ppfeature_masks
)
2050 uint64_t features_enabled
;
2051 uint64_t features_to_enable
;
2052 uint64_t features_to_disable
;
2055 if (new_ppfeature_masks
>= (1ULL << GNLD_FEATURES_MAX
))
2058 ret
= vega12_get_enabled_smc_features(hwmgr
, &features_enabled
);
2062 features_to_disable
=
2063 features_enabled
& ~new_ppfeature_masks
;
2064 features_to_enable
=
2065 ~features_enabled
& new_ppfeature_masks
;
2067 pr_debug("features_to_disable 0x%llx\n", features_to_disable
);
2068 pr_debug("features_to_enable 0x%llx\n", features_to_enable
);
2070 if (features_to_disable
) {
2071 ret
= vega12_enable_smc_features(hwmgr
, false, features_to_disable
);
2076 if (features_to_enable
) {
2077 ret
= vega12_enable_smc_features(hwmgr
, true, features_to_enable
);
2085 static int vega12_print_clock_levels(struct pp_hwmgr
*hwmgr
,
2086 enum pp_clock_type type
, char *buf
)
2088 int i
, now
, size
= 0;
2089 struct pp_clock_levels_with_latency clocks
;
2093 PP_ASSERT_WITH_CODE(
2094 vega12_get_current_gfx_clk_freq(hwmgr
, &now
) == 0,
2095 "Attempt to get current gfx clk Failed!",
2098 PP_ASSERT_WITH_CODE(
2099 vega12_get_sclks(hwmgr
, &clocks
) == 0,
2100 "Attempt to get gfx clk levels Failed!",
2102 for (i
= 0; i
< clocks
.num_levels
; i
++)
2103 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
2104 i
, clocks
.data
[i
].clocks_in_khz
/ 1000,
2105 (clocks
.data
[i
].clocks_in_khz
/ 1000 == now
/ 100) ? "*" : "");
2109 PP_ASSERT_WITH_CODE(
2110 vega12_get_current_mclk_freq(hwmgr
, &now
) == 0,
2111 "Attempt to get current mclk freq Failed!",
2114 PP_ASSERT_WITH_CODE(
2115 vega12_get_memclocks(hwmgr
, &clocks
) == 0,
2116 "Attempt to get memory clk levels Failed!",
2118 for (i
= 0; i
< clocks
.num_levels
; i
++)
2119 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
2120 i
, clocks
.data
[i
].clocks_in_khz
/ 1000,
2121 (clocks
.data
[i
].clocks_in_khz
/ 1000 == now
/ 100) ? "*" : "");
2125 PP_ASSERT_WITH_CODE(
2126 smum_send_msg_to_smc_with_parameter(hwmgr
,
2127 PPSMC_MSG_GetDpmClockFreq
, (PPCLK_SOCCLK
<< 16)) == 0,
2128 "Attempt to get Current SOCCLK Frequency Failed!",
2130 now
= smum_get_argument(hwmgr
);
2132 PP_ASSERT_WITH_CODE(
2133 vega12_get_socclocks(hwmgr
, &clocks
) == 0,
2134 "Attempt to get soc clk levels Failed!",
2136 for (i
= 0; i
< clocks
.num_levels
; i
++)
2137 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
2138 i
, clocks
.data
[i
].clocks_in_khz
/ 1000,
2139 (clocks
.data
[i
].clocks_in_khz
/ 1000 == now
) ? "*" : "");
2143 PP_ASSERT_WITH_CODE(
2144 smum_send_msg_to_smc_with_parameter(hwmgr
,
2145 PPSMC_MSG_GetDpmClockFreq
, (PPCLK_DCEFCLK
<< 16)) == 0,
2146 "Attempt to get Current DCEFCLK Frequency Failed!",
2148 now
= smum_get_argument(hwmgr
);
2150 PP_ASSERT_WITH_CODE(
2151 vega12_get_dcefclocks(hwmgr
, &clocks
) == 0,
2152 "Attempt to get dcef clk levels Failed!",
2154 for (i
= 0; i
< clocks
.num_levels
; i
++)
2155 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
2156 i
, clocks
.data
[i
].clocks_in_khz
/ 1000,
2157 (clocks
.data
[i
].clocks_in_khz
/ 1000 == now
) ? "*" : "");
2169 static int vega12_apply_clocks_adjust_rules(struct pp_hwmgr
*hwmgr
)
2171 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2172 struct vega12_single_dpm_table
*dpm_table
;
2173 bool vblank_too_short
= false;
2174 bool disable_mclk_switching
;
2175 uint32_t i
, latency
;
2177 disable_mclk_switching
= ((1 < hwmgr
->display_config
->num_display
) &&
2178 !hwmgr
->display_config
->multi_monitor_in_sync
) ||
2180 latency
= hwmgr
->display_config
->dce_tolerable_mclk_in_active_latency
;
2183 dpm_table
= &(data
->dpm_table
.gfx_table
);
2184 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2185 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2186 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
2187 dpm_table
->dpm_state
.hard_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2189 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
2190 if (VEGA12_UMD_PSTATE_GFXCLK_LEVEL
< dpm_table
->count
) {
2191 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_GFXCLK_LEVEL
].value
;
2192 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_GFXCLK_LEVEL
].value
;
2195 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) {
2196 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2197 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[0].value
;
2200 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2201 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2202 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2207 dpm_table
= &(data
->dpm_table
.mem_table
);
2208 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2209 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2210 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
2211 dpm_table
->dpm_state
.hard_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2213 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
2214 if (VEGA12_UMD_PSTATE_MCLK_LEVEL
< dpm_table
->count
) {
2215 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_MCLK_LEVEL
].value
;
2216 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_MCLK_LEVEL
].value
;
2219 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) {
2220 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2221 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[0].value
;
2224 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2225 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2226 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2230 /* honour DAL's UCLK Hardmin */
2231 if (dpm_table
->dpm_state
.hard_min_level
< (hwmgr
->display_config
->min_mem_set_clock
/ 100))
2232 dpm_table
->dpm_state
.hard_min_level
= hwmgr
->display_config
->min_mem_set_clock
/ 100;
2234 /* Hardmin is dependent on displayconfig */
2235 if (disable_mclk_switching
) {
2236 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2237 for (i
= 0; i
< data
->mclk_latency_table
.count
- 1; i
++) {
2238 if (data
->mclk_latency_table
.entries
[i
].latency
<= latency
) {
2239 if (dpm_table
->dpm_levels
[i
].value
>= (hwmgr
->display_config
->min_mem_set_clock
/ 100)) {
2240 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[i
].value
;
2247 if (hwmgr
->display_config
->nb_pstate_switch_disable
)
2248 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2251 dpm_table
= &(data
->dpm_table
.vclk_table
);
2252 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2253 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2254 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
2255 dpm_table
->dpm_state
.hard_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2257 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
2258 if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL
< dpm_table
->count
) {
2259 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_UVDCLK_LEVEL
].value
;
2260 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_UVDCLK_LEVEL
].value
;
2263 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2264 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2265 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2270 dpm_table
= &(data
->dpm_table
.dclk_table
);
2271 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2272 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2273 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
2274 dpm_table
->dpm_state
.hard_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2276 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
2277 if (VEGA12_UMD_PSTATE_UVDCLK_LEVEL
< dpm_table
->count
) {
2278 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_UVDCLK_LEVEL
].value
;
2279 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_UVDCLK_LEVEL
].value
;
2282 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2283 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2284 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2289 dpm_table
= &(data
->dpm_table
.soc_table
);
2290 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2291 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2292 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
2293 dpm_table
->dpm_state
.hard_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2295 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
2296 if (VEGA12_UMD_PSTATE_SOCCLK_LEVEL
< dpm_table
->count
) {
2297 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_SOCCLK_LEVEL
].value
;
2298 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_SOCCLK_LEVEL
].value
;
2301 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2302 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2303 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2308 dpm_table
= &(data
->dpm_table
.eclk_table
);
2309 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
2310 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2311 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
2312 dpm_table
->dpm_state
.hard_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2314 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
2315 if (VEGA12_UMD_PSTATE_VCEMCLK_LEVEL
< dpm_table
->count
) {
2316 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL
].value
;
2317 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA12_UMD_PSTATE_VCEMCLK_LEVEL
].value
;
2320 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2321 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2322 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2329 static int vega12_set_uclk_to_highest_dpm_level(struct pp_hwmgr
*hwmgr
,
2330 struct vega12_single_dpm_table
*dpm_table
)
2332 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2335 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
) {
2336 PP_ASSERT_WITH_CODE(dpm_table
->count
> 0,
2337 "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
2339 PP_ASSERT_WITH_CODE(dpm_table
->count
<= NUM_UCLK_DPM_LEVELS
,
2340 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
2343 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
2344 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
2345 PPSMC_MSG_SetHardMinByFreq
,
2346 (PPCLK_UCLK
<< 16 ) | dpm_table
->dpm_state
.hard_min_level
)),
2347 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
2354 static int vega12_pre_display_configuration_changed_task(struct pp_hwmgr
*hwmgr
)
2356 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2359 smum_send_msg_to_smc_with_parameter(hwmgr
,
2360 PPSMC_MSG_NumOfDisplays
, 0);
2362 ret
= vega12_set_uclk_to_highest_dpm_level(hwmgr
,
2363 &data
->dpm_table
.mem_table
);
2368 static int vega12_display_configuration_changed_task(struct pp_hwmgr
*hwmgr
)
2370 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2372 Watermarks_t
*wm_table
= &(data
->smc_state_table
.water_marks_table
);
2374 if ((data
->water_marks_bitmap
& WaterMarksExist
) &&
2375 !(data
->water_marks_bitmap
& WaterMarksLoaded
)) {
2376 result
= smum_smc_table_manager(hwmgr
,
2377 (uint8_t *)wm_table
, TABLE_WATERMARKS
, false);
2378 PP_ASSERT_WITH_CODE(result
, "Failed to update WMTABLE!", return EINVAL
);
2379 data
->water_marks_bitmap
|= WaterMarksLoaded
;
2382 if ((data
->water_marks_bitmap
& WaterMarksExist
) &&
2383 data
->smu_features
[GNLD_DPM_DCEFCLK
].supported
&&
2384 data
->smu_features
[GNLD_DPM_SOCCLK
].supported
)
2385 smum_send_msg_to_smc_with_parameter(hwmgr
,
2386 PPSMC_MSG_NumOfDisplays
, hwmgr
->display_config
->num_display
);
2391 int vega12_enable_disable_uvd_dpm(struct pp_hwmgr
*hwmgr
, bool enable
)
2393 struct vega12_hwmgr
*data
=
2394 (struct vega12_hwmgr
*)(hwmgr
->backend
);
2396 if (data
->smu_features
[GNLD_DPM_UVD
].supported
) {
2397 PP_ASSERT_WITH_CODE(!vega12_enable_smc_features(hwmgr
,
2399 data
->smu_features
[GNLD_DPM_UVD
].smu_feature_bitmap
),
2400 "Attempt to Enable/Disable DPM UVD Failed!",
2402 data
->smu_features
[GNLD_DPM_UVD
].enabled
= enable
;
2408 static void vega12_power_gate_vce(struct pp_hwmgr
*hwmgr
, bool bgate
)
2410 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2412 if (data
->vce_power_gated
== bgate
)
2415 data
->vce_power_gated
= bgate
;
2416 vega12_enable_disable_vce_dpm(hwmgr
, !bgate
);
2419 static void vega12_power_gate_uvd(struct pp_hwmgr
*hwmgr
, bool bgate
)
2421 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2423 if (data
->uvd_power_gated
== bgate
)
2426 data
->uvd_power_gated
= bgate
;
2427 vega12_enable_disable_uvd_dpm(hwmgr
, !bgate
);
2431 vega12_check_smc_update_required_for_display_configuration(struct pp_hwmgr
*hwmgr
)
2433 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2434 bool is_update_required
= false;
2436 if (data
->display_timing
.num_existing_displays
!= hwmgr
->display_config
->num_display
)
2437 is_update_required
= true;
2439 if (data
->registry_data
.gfx_clk_deep_sleep_support
) {
2440 if (data
->display_timing
.min_clock_in_sr
!= hwmgr
->display_config
->min_core_set_clock_in_sr
)
2441 is_update_required
= true;
2444 return is_update_required
;
2447 static int vega12_disable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
2449 int tmp_result
, result
= 0;
2451 tmp_result
= vega12_disable_all_smu_features(hwmgr
);
2452 PP_ASSERT_WITH_CODE((tmp_result
== 0),
2453 "Failed to disable all smu features!", result
= tmp_result
);
2458 static int vega12_power_off_asic(struct pp_hwmgr
*hwmgr
)
2460 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2463 result
= vega12_disable_dpm_tasks(hwmgr
);
2464 PP_ASSERT_WITH_CODE((0 == result
),
2465 "[disable_dpm_tasks] Failed to disable DPM!",
2467 data
->water_marks_bitmap
&= ~(WaterMarksLoaded
);
2473 static void vega12_find_min_clock_index(struct pp_hwmgr
*hwmgr
,
2474 uint32_t *sclk_idx
, uint32_t *mclk_idx
,
2475 uint32_t min_sclk
, uint32_t min_mclk
)
2477 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2478 struct vega12_dpm_table
*dpm_table
= &(data
->dpm_table
);
2481 for (i
= 0; i
< dpm_table
->gfx_table
.count
; i
++) {
2482 if (dpm_table
->gfx_table
.dpm_levels
[i
].enabled
&&
2483 dpm_table
->gfx_table
.dpm_levels
[i
].value
>= min_sclk
) {
2489 for (i
= 0; i
< dpm_table
->mem_table
.count
; i
++) {
2490 if (dpm_table
->mem_table
.dpm_levels
[i
].enabled
&&
2491 dpm_table
->mem_table
.dpm_levels
[i
].value
>= min_mclk
) {
2500 static int vega12_set_power_profile_state(struct pp_hwmgr
*hwmgr
,
2501 struct amd_pp_profile
*request
)
2506 static int vega12_get_sclk_od(struct pp_hwmgr
*hwmgr
)
2508 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2509 struct vega12_single_dpm_table
*sclk_table
= &(data
->dpm_table
.gfx_table
);
2510 struct vega12_single_dpm_table
*golden_sclk_table
=
2511 &(data
->golden_dpm_table
.gfx_table
);
2512 int value
= sclk_table
->dpm_levels
[sclk_table
->count
- 1].value
;
2513 int golden_value
= golden_sclk_table
->dpm_levels
2514 [golden_sclk_table
->count
- 1].value
;
2516 value
-= golden_value
;
2517 value
= DIV_ROUND_UP(value
* 100, golden_value
);
2522 static int vega12_set_sclk_od(struct pp_hwmgr
*hwmgr
, uint32_t value
)
2527 static int vega12_get_mclk_od(struct pp_hwmgr
*hwmgr
)
2529 struct vega12_hwmgr
*data
= (struct vega12_hwmgr
*)(hwmgr
->backend
);
2530 struct vega12_single_dpm_table
*mclk_table
= &(data
->dpm_table
.mem_table
);
2531 struct vega12_single_dpm_table
*golden_mclk_table
=
2532 &(data
->golden_dpm_table
.mem_table
);
2533 int value
= mclk_table
->dpm_levels
[mclk_table
->count
- 1].value
;
2534 int golden_value
= golden_mclk_table
->dpm_levels
2535 [golden_mclk_table
->count
- 1].value
;
2537 value
-= golden_value
;
2538 value
= DIV_ROUND_UP(value
* 100, golden_value
);
2543 static int vega12_set_mclk_od(struct pp_hwmgr
*hwmgr
, uint32_t value
)
2549 static int vega12_notify_cac_buffer_info(struct pp_hwmgr
*hwmgr
,
2550 uint32_t virtual_addr_low
,
2551 uint32_t virtual_addr_hi
,
2552 uint32_t mc_addr_low
,
2553 uint32_t mc_addr_hi
,
2556 smum_send_msg_to_smc_with_parameter(hwmgr
,
2557 PPSMC_MSG_SetSystemVirtualDramAddrHigh
,
2559 smum_send_msg_to_smc_with_parameter(hwmgr
,
2560 PPSMC_MSG_SetSystemVirtualDramAddrLow
,
2562 smum_send_msg_to_smc_with_parameter(hwmgr
,
2563 PPSMC_MSG_DramLogSetDramAddrHigh
,
2566 smum_send_msg_to_smc_with_parameter(hwmgr
,
2567 PPSMC_MSG_DramLogSetDramAddrLow
,
2570 smum_send_msg_to_smc_with_parameter(hwmgr
,
2571 PPSMC_MSG_DramLogSetDramSize
,
2576 static int vega12_get_thermal_temperature_range(struct pp_hwmgr
*hwmgr
,
2577 struct PP_TemperatureRange
*thermal_data
)
2579 struct vega12_hwmgr
*data
=
2580 (struct vega12_hwmgr
*)(hwmgr
->backend
);
2581 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
2583 memcpy(thermal_data
, &SMU7ThermalWithDelayPolicy
[0], sizeof(struct PP_TemperatureRange
));
2585 thermal_data
->max
= pp_table
->TedgeLimit
*
2586 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2587 thermal_data
->edge_emergency_max
= (pp_table
->TedgeLimit
+ CTF_OFFSET_EDGE
) *
2588 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2589 thermal_data
->hotspot_crit_max
= pp_table
->ThotspotLimit
*
2590 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2591 thermal_data
->hotspot_emergency_max
= (pp_table
->ThotspotLimit
+ CTF_OFFSET_HOTSPOT
) *
2592 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2593 thermal_data
->mem_crit_max
= pp_table
->ThbmLimit
*
2594 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2595 thermal_data
->mem_emergency_max
= (pp_table
->ThbmLimit
+ CTF_OFFSET_HBM
)*
2596 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2601 static int vega12_enable_gfx_off(struct pp_hwmgr
*hwmgr
)
2603 struct vega12_hwmgr
*data
=
2604 (struct vega12_hwmgr
*)(hwmgr
->backend
);
2607 if (data
->gfxoff_controlled_by_driver
)
2608 ret
= smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_AllowGfxOff
);
2613 static int vega12_disable_gfx_off(struct pp_hwmgr
*hwmgr
)
2615 struct vega12_hwmgr
*data
=
2616 (struct vega12_hwmgr
*)(hwmgr
->backend
);
2619 if (data
->gfxoff_controlled_by_driver
)
2620 ret
= smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_DisallowGfxOff
);
2625 static int vega12_gfx_off_control(struct pp_hwmgr
*hwmgr
, bool enable
)
2628 return vega12_enable_gfx_off(hwmgr
);
2630 return vega12_disable_gfx_off(hwmgr
);
2633 static int vega12_get_performance_level(struct pp_hwmgr
*hwmgr
, const struct pp_hw_power_state
*state
,
2634 PHM_PerformanceLevelDesignation designation
, uint32_t index
,
2635 PHM_PerformanceLevel
*level
)
2640 static int vega12_set_mp1_state(struct pp_hwmgr
*hwmgr
,
2641 enum pp_mp1_state mp1_state
)
2646 switch (mp1_state
) {
2647 case PP_MP1_STATE_UNLOAD
:
2648 msg
= PPSMC_MSG_PrepareMp1ForUnload
;
2650 case PP_MP1_STATE_SHUTDOWN
:
2651 case PP_MP1_STATE_RESET
:
2652 case PP_MP1_STATE_NONE
:
2657 PP_ASSERT_WITH_CODE((ret
= smum_send_msg_to_smc(hwmgr
, msg
)) == 0,
2658 "[PrepareMp1] Failed!",
2664 static const struct pp_hwmgr_func vega12_hwmgr_funcs
= {
2665 .backend_init
= vega12_hwmgr_backend_init
,
2666 .backend_fini
= vega12_hwmgr_backend_fini
,
2667 .asic_setup
= vega12_setup_asic_task
,
2668 .dynamic_state_management_enable
= vega12_enable_dpm_tasks
,
2669 .dynamic_state_management_disable
= vega12_disable_dpm_tasks
,
2670 .patch_boot_state
= vega12_patch_boot_state
,
2671 .get_sclk
= vega12_dpm_get_sclk
,
2672 .get_mclk
= vega12_dpm_get_mclk
,
2673 .notify_smc_display_config_after_ps_adjustment
=
2674 vega12_notify_smc_display_config_after_ps_adjustment
,
2675 .force_dpm_level
= vega12_dpm_force_dpm_level
,
2676 .stop_thermal_controller
= vega12_thermal_stop_thermal_controller
,
2677 .get_fan_speed_info
= vega12_fan_ctrl_get_fan_speed_info
,
2678 .reset_fan_speed_to_default
=
2679 vega12_fan_ctrl_reset_fan_speed_to_default
,
2680 .get_fan_speed_rpm
= vega12_fan_ctrl_get_fan_speed_rpm
,
2681 .set_fan_control_mode
= vega12_set_fan_control_mode
,
2682 .get_fan_control_mode
= vega12_get_fan_control_mode
,
2683 .read_sensor
= vega12_read_sensor
,
2684 .get_dal_power_level
= vega12_get_dal_power_level
,
2685 .get_clock_by_type_with_latency
= vega12_get_clock_by_type_with_latency
,
2686 .get_clock_by_type_with_voltage
= vega12_get_clock_by_type_with_voltage
,
2687 .set_watermarks_for_clocks_ranges
= vega12_set_watermarks_for_clocks_ranges
,
2688 .display_clock_voltage_request
= vega12_display_clock_voltage_request
,
2689 .force_clock_level
= vega12_force_clock_level
,
2690 .print_clock_levels
= vega12_print_clock_levels
,
2691 .apply_clocks_adjust_rules
=
2692 vega12_apply_clocks_adjust_rules
,
2693 .pre_display_config_changed
=
2694 vega12_pre_display_configuration_changed_task
,
2695 .display_config_changed
= vega12_display_configuration_changed_task
,
2696 .powergate_uvd
= vega12_power_gate_uvd
,
2697 .powergate_vce
= vega12_power_gate_vce
,
2698 .check_smc_update_required_for_display_configuration
=
2699 vega12_check_smc_update_required_for_display_configuration
,
2700 .power_off_asic
= vega12_power_off_asic
,
2701 .disable_smc_firmware_ctf
= vega12_thermal_disable_alert
,
2703 .set_power_profile_state
= vega12_set_power_profile_state
,
2704 .get_sclk_od
= vega12_get_sclk_od
,
2705 .set_sclk_od
= vega12_set_sclk_od
,
2706 .get_mclk_od
= vega12_get_mclk_od
,
2707 .set_mclk_od
= vega12_set_mclk_od
,
2709 .notify_cac_buffer_info
= vega12_notify_cac_buffer_info
,
2710 .get_thermal_temperature_range
= vega12_get_thermal_temperature_range
,
2711 .register_irq_handlers
= smu9_register_irq_handlers
,
2712 .start_thermal_controller
= vega12_start_thermal_controller
,
2713 .powergate_gfx
= vega12_gfx_off_control
,
2714 .get_performance_level
= vega12_get_performance_level
,
2715 .get_asic_baco_capability
= smu9_baco_get_capability
,
2716 .get_asic_baco_state
= smu9_baco_get_state
,
2717 .set_asic_baco_state
= vega12_baco_set_state
,
2718 .get_ppfeature_status
= vega12_get_ppfeature_status
,
2719 .set_ppfeature_status
= vega12_set_ppfeature_status
,
2720 .set_mp1_state
= vega12_set_mp1_state
,
2723 int vega12_hwmgr_init(struct pp_hwmgr
*hwmgr
)
2725 hwmgr
->hwmgr_func
= &vega12_hwmgr_funcs
;
2726 hwmgr
->pptable_func
= &vega12_pptable_funcs
;