2 * Copyright 2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/slab.h>
30 #include "amd_powerplay.h"
31 #include "vega20_smumgr.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega20_powertune.h"
37 #include "vega20_inc.h"
38 #include "pppcielanes.h"
39 #include "vega20_hwmgr.h"
40 #include "vega20_processpptables.h"
41 #include "vega20_pptable.h"
42 #include "vega20_thermal.h"
43 #include "vega20_ppsmc.h"
45 #include "amd_pcie_helpers.h"
46 #include "ppinterrupt.h"
47 #include "pp_overdriver.h"
48 #include "pp_thermal.h"
49 #include "soc15_common.h"
50 #include "vega20_baco.h"
51 #include "smuio/smuio_9_0_offset.h"
52 #include "smuio/smuio_9_0_sh_mask.h"
53 #include "nbio/nbio_7_4_sh_mask.h"
55 #define smnPCIE_LC_SPEED_CNTL 0x11140290
56 #define smnPCIE_LC_LINK_WIDTH_CNTL 0x11140288
58 static void vega20_set_default_registry_data(struct pp_hwmgr
*hwmgr
)
60 struct vega20_hwmgr
*data
=
61 (struct vega20_hwmgr
*)(hwmgr
->backend
);
63 data
->gfxclk_average_alpha
= PPVEGA20_VEGA20GFXCLKAVERAGEALPHA_DFLT
;
64 data
->socclk_average_alpha
= PPVEGA20_VEGA20SOCCLKAVERAGEALPHA_DFLT
;
65 data
->uclk_average_alpha
= PPVEGA20_VEGA20UCLKCLKAVERAGEALPHA_DFLT
;
66 data
->gfx_activity_average_alpha
= PPVEGA20_VEGA20GFXACTIVITYAVERAGEALPHA_DFLT
;
67 data
->lowest_uclk_reserved_for_ulv
= PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT
;
69 data
->display_voltage_mode
= PPVEGA20_VEGA20DISPLAYVOLTAGEMODE_DFLT
;
70 data
->dcef_clk_quad_eqn_a
= PPREGKEY_VEGA20QUADRATICEQUATION_DFLT
;
71 data
->dcef_clk_quad_eqn_b
= PPREGKEY_VEGA20QUADRATICEQUATION_DFLT
;
72 data
->dcef_clk_quad_eqn_c
= PPREGKEY_VEGA20QUADRATICEQUATION_DFLT
;
73 data
->disp_clk_quad_eqn_a
= PPREGKEY_VEGA20QUADRATICEQUATION_DFLT
;
74 data
->disp_clk_quad_eqn_b
= PPREGKEY_VEGA20QUADRATICEQUATION_DFLT
;
75 data
->disp_clk_quad_eqn_c
= PPREGKEY_VEGA20QUADRATICEQUATION_DFLT
;
76 data
->pixel_clk_quad_eqn_a
= PPREGKEY_VEGA20QUADRATICEQUATION_DFLT
;
77 data
->pixel_clk_quad_eqn_b
= PPREGKEY_VEGA20QUADRATICEQUATION_DFLT
;
78 data
->pixel_clk_quad_eqn_c
= PPREGKEY_VEGA20QUADRATICEQUATION_DFLT
;
79 data
->phy_clk_quad_eqn_a
= PPREGKEY_VEGA20QUADRATICEQUATION_DFLT
;
80 data
->phy_clk_quad_eqn_b
= PPREGKEY_VEGA20QUADRATICEQUATION_DFLT
;
81 data
->phy_clk_quad_eqn_c
= PPREGKEY_VEGA20QUADRATICEQUATION_DFLT
;
84 * Disable the following features for now:
93 data
->registry_data
.disallowed_features
= 0xE0041C00;
94 /* ECC feature should be disabled on old SMUs */
95 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetSmuVersion
);
96 hwmgr
->smu_version
= smum_get_argument(hwmgr
);
97 if (hwmgr
->smu_version
< 0x282100)
98 data
->registry_data
.disallowed_features
|= FEATURE_ECC_MASK
;
100 if (!(hwmgr
->feature_mask
& PP_PCIE_DPM_MASK
))
101 data
->registry_data
.disallowed_features
|= FEATURE_DPM_LINK_MASK
;
103 if (!(hwmgr
->feature_mask
& PP_SCLK_DPM_MASK
))
104 data
->registry_data
.disallowed_features
|= FEATURE_DPM_GFXCLK_MASK
;
106 if (!(hwmgr
->feature_mask
& PP_SOCCLK_DPM_MASK
))
107 data
->registry_data
.disallowed_features
|= FEATURE_DPM_SOCCLK_MASK
;
109 if (!(hwmgr
->feature_mask
& PP_MCLK_DPM_MASK
))
110 data
->registry_data
.disallowed_features
|= FEATURE_DPM_UCLK_MASK
;
112 if (!(hwmgr
->feature_mask
& PP_DCEFCLK_DPM_MASK
))
113 data
->registry_data
.disallowed_features
|= FEATURE_DPM_DCEFCLK_MASK
;
115 if (!(hwmgr
->feature_mask
& PP_ULV_MASK
))
116 data
->registry_data
.disallowed_features
|= FEATURE_ULV_MASK
;
118 if (!(hwmgr
->feature_mask
& PP_SCLK_DEEP_SLEEP_MASK
))
119 data
->registry_data
.disallowed_features
|= FEATURE_DS_GFXCLK_MASK
;
121 data
->registry_data
.od_state_in_dc_support
= 0;
122 data
->registry_data
.thermal_support
= 1;
123 data
->registry_data
.skip_baco_hardware
= 0;
125 data
->registry_data
.log_avfs_param
= 0;
126 data
->registry_data
.sclk_throttle_low_notification
= 1;
127 data
->registry_data
.force_dpm_high
= 0;
128 data
->registry_data
.stable_pstate_sclk_dpm_percentage
= 75;
130 data
->registry_data
.didt_support
= 0;
131 if (data
->registry_data
.didt_support
) {
132 data
->registry_data
.didt_mode
= 6;
133 data
->registry_data
.sq_ramping_support
= 1;
134 data
->registry_data
.db_ramping_support
= 0;
135 data
->registry_data
.td_ramping_support
= 0;
136 data
->registry_data
.tcp_ramping_support
= 0;
137 data
->registry_data
.dbr_ramping_support
= 0;
138 data
->registry_data
.edc_didt_support
= 1;
139 data
->registry_data
.gc_didt_support
= 0;
140 data
->registry_data
.psm_didt_support
= 0;
143 data
->registry_data
.pcie_lane_override
= 0xff;
144 data
->registry_data
.pcie_speed_override
= 0xff;
145 data
->registry_data
.pcie_clock_override
= 0xffffffff;
146 data
->registry_data
.regulator_hot_gpio_support
= 1;
147 data
->registry_data
.ac_dc_switch_gpio_support
= 0;
148 data
->registry_data
.quick_transition_support
= 0;
149 data
->registry_data
.zrpm_start_temp
= 0xffff;
150 data
->registry_data
.zrpm_stop_temp
= 0xffff;
151 data
->registry_data
.od8_feature_enable
= 1;
152 data
->registry_data
.disable_water_mark
= 0;
153 data
->registry_data
.disable_pp_tuning
= 0;
154 data
->registry_data
.disable_xlpp_tuning
= 0;
155 data
->registry_data
.disable_workload_policy
= 0;
156 data
->registry_data
.perf_ui_tuning_profile_turbo
= 0x19190F0F;
157 data
->registry_data
.perf_ui_tuning_profile_powerSave
= 0x19191919;
158 data
->registry_data
.perf_ui_tuning_profile_xl
= 0x00000F0A;
159 data
->registry_data
.force_workload_policy_mask
= 0;
160 data
->registry_data
.disable_3d_fs_detection
= 0;
161 data
->registry_data
.fps_support
= 1;
162 data
->registry_data
.disable_auto_wattman
= 1;
163 data
->registry_data
.auto_wattman_debug
= 0;
164 data
->registry_data
.auto_wattman_sample_period
= 100;
165 data
->registry_data
.fclk_gfxclk_ratio
= 0;
166 data
->registry_data
.auto_wattman_threshold
= 50;
167 data
->registry_data
.gfxoff_controlled_by_driver
= 1;
168 data
->gfxoff_allowed
= false;
169 data
->counter_gfxoff
= 0;
172 static int vega20_set_features_platform_caps(struct pp_hwmgr
*hwmgr
)
174 struct vega20_hwmgr
*data
=
175 (struct vega20_hwmgr
*)(hwmgr
->backend
);
176 struct amdgpu_device
*adev
= hwmgr
->adev
;
178 if (data
->vddci_control
== VEGA20_VOLTAGE_CONTROL_NONE
)
179 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
180 PHM_PlatformCaps_ControlVDDCI
);
182 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
183 PHM_PlatformCaps_TablelessHardwareInterface
);
185 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
186 PHM_PlatformCaps_BACO
);
188 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
189 PHM_PlatformCaps_EnableSMU7ThermalManagement
);
191 if (adev
->pg_flags
& AMD_PG_SUPPORT_UVD
)
192 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
193 PHM_PlatformCaps_UVDPowerGating
);
195 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCE
)
196 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
197 PHM_PlatformCaps_VCEPowerGating
);
199 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
200 PHM_PlatformCaps_UnTabledHardwareInterface
);
202 if (data
->registry_data
.od8_feature_enable
)
203 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
204 PHM_PlatformCaps_OD8inACSupport
);
206 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
207 PHM_PlatformCaps_ActivityReporting
);
208 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
209 PHM_PlatformCaps_FanSpeedInTableIsRPM
);
211 if (data
->registry_data
.od_state_in_dc_support
) {
212 if (data
->registry_data
.od8_feature_enable
)
213 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
214 PHM_PlatformCaps_OD8inDCSupport
);
217 if (data
->registry_data
.thermal_support
&&
218 data
->registry_data
.fuzzy_fan_control_support
&&
219 hwmgr
->thermal_controller
.advanceFanControlParameters
.usTMax
)
220 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
221 PHM_PlatformCaps_ODFuzzyFanControlSupport
);
223 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
224 PHM_PlatformCaps_DynamicPowerManagement
);
225 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
226 PHM_PlatformCaps_SMC
);
227 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
228 PHM_PlatformCaps_ThermalPolicyDelay
);
230 if (data
->registry_data
.force_dpm_high
)
231 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
232 PHM_PlatformCaps_ExclusiveModeAlwaysHigh
);
234 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
235 PHM_PlatformCaps_DynamicUVDState
);
237 if (data
->registry_data
.sclk_throttle_low_notification
)
238 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
239 PHM_PlatformCaps_SclkThrottleLowNotification
);
241 /* power tune caps */
242 /* assume disabled */
243 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
244 PHM_PlatformCaps_PowerContainment
);
245 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
246 PHM_PlatformCaps_DiDtSupport
);
247 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
248 PHM_PlatformCaps_SQRamping
);
249 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
250 PHM_PlatformCaps_DBRamping
);
251 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
252 PHM_PlatformCaps_TDRamping
);
253 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
254 PHM_PlatformCaps_TCPRamping
);
255 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
256 PHM_PlatformCaps_DBRRamping
);
257 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
258 PHM_PlatformCaps_DiDtEDCEnable
);
259 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
260 PHM_PlatformCaps_GCEDC
);
261 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
262 PHM_PlatformCaps_PSM
);
264 if (data
->registry_data
.didt_support
) {
265 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
266 PHM_PlatformCaps_DiDtSupport
);
267 if (data
->registry_data
.sq_ramping_support
)
268 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
269 PHM_PlatformCaps_SQRamping
);
270 if (data
->registry_data
.db_ramping_support
)
271 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
272 PHM_PlatformCaps_DBRamping
);
273 if (data
->registry_data
.td_ramping_support
)
274 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
275 PHM_PlatformCaps_TDRamping
);
276 if (data
->registry_data
.tcp_ramping_support
)
277 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
278 PHM_PlatformCaps_TCPRamping
);
279 if (data
->registry_data
.dbr_ramping_support
)
280 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
281 PHM_PlatformCaps_DBRRamping
);
282 if (data
->registry_data
.edc_didt_support
)
283 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
284 PHM_PlatformCaps_DiDtEDCEnable
);
285 if (data
->registry_data
.gc_didt_support
)
286 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
287 PHM_PlatformCaps_GCEDC
);
288 if (data
->registry_data
.psm_didt_support
)
289 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
290 PHM_PlatformCaps_PSM
);
293 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
294 PHM_PlatformCaps_RegulatorHot
);
296 if (data
->registry_data
.ac_dc_switch_gpio_support
) {
297 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
298 PHM_PlatformCaps_AutomaticDCTransition
);
299 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
300 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme
);
303 if (data
->registry_data
.quick_transition_support
) {
304 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
305 PHM_PlatformCaps_AutomaticDCTransition
);
306 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
307 PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme
);
308 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
309 PHM_PlatformCaps_Falcon_QuickTransition
);
312 if (data
->lowest_uclk_reserved_for_ulv
!= PPVEGA20_VEGA20LOWESTUCLKRESERVEDFORULV_DFLT
) {
313 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
314 PHM_PlatformCaps_LowestUclkReservedForUlv
);
315 if (data
->lowest_uclk_reserved_for_ulv
== 1)
316 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
317 PHM_PlatformCaps_LowestUclkReservedForUlv
);
320 if (data
->registry_data
.custom_fan_support
)
321 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
322 PHM_PlatformCaps_CustomFanControlSupport
);
327 static void vega20_init_dpm_defaults(struct pp_hwmgr
*hwmgr
)
329 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
330 struct amdgpu_device
*adev
= hwmgr
->adev
;
331 uint32_t top32
, bottom32
;
334 data
->smu_features
[GNLD_DPM_PREFETCHER
].smu_feature_id
=
335 FEATURE_DPM_PREFETCHER_BIT
;
336 data
->smu_features
[GNLD_DPM_GFXCLK
].smu_feature_id
=
337 FEATURE_DPM_GFXCLK_BIT
;
338 data
->smu_features
[GNLD_DPM_UCLK
].smu_feature_id
=
339 FEATURE_DPM_UCLK_BIT
;
340 data
->smu_features
[GNLD_DPM_SOCCLK
].smu_feature_id
=
341 FEATURE_DPM_SOCCLK_BIT
;
342 data
->smu_features
[GNLD_DPM_UVD
].smu_feature_id
=
344 data
->smu_features
[GNLD_DPM_VCE
].smu_feature_id
=
346 data
->smu_features
[GNLD_ULV
].smu_feature_id
=
348 data
->smu_features
[GNLD_DPM_MP0CLK
].smu_feature_id
=
349 FEATURE_DPM_MP0CLK_BIT
;
350 data
->smu_features
[GNLD_DPM_LINK
].smu_feature_id
=
351 FEATURE_DPM_LINK_BIT
;
352 data
->smu_features
[GNLD_DPM_DCEFCLK
].smu_feature_id
=
353 FEATURE_DPM_DCEFCLK_BIT
;
354 data
->smu_features
[GNLD_DS_GFXCLK
].smu_feature_id
=
355 FEATURE_DS_GFXCLK_BIT
;
356 data
->smu_features
[GNLD_DS_SOCCLK
].smu_feature_id
=
357 FEATURE_DS_SOCCLK_BIT
;
358 data
->smu_features
[GNLD_DS_LCLK
].smu_feature_id
=
360 data
->smu_features
[GNLD_PPT
].smu_feature_id
=
362 data
->smu_features
[GNLD_TDC
].smu_feature_id
=
364 data
->smu_features
[GNLD_THERMAL
].smu_feature_id
=
366 data
->smu_features
[GNLD_GFX_PER_CU_CG
].smu_feature_id
=
367 FEATURE_GFX_PER_CU_CG_BIT
;
368 data
->smu_features
[GNLD_RM
].smu_feature_id
=
370 data
->smu_features
[GNLD_DS_DCEFCLK
].smu_feature_id
=
371 FEATURE_DS_DCEFCLK_BIT
;
372 data
->smu_features
[GNLD_ACDC
].smu_feature_id
=
374 data
->smu_features
[GNLD_VR0HOT
].smu_feature_id
=
376 data
->smu_features
[GNLD_VR1HOT
].smu_feature_id
=
378 data
->smu_features
[GNLD_FW_CTF
].smu_feature_id
=
380 data
->smu_features
[GNLD_LED_DISPLAY
].smu_feature_id
=
381 FEATURE_LED_DISPLAY_BIT
;
382 data
->smu_features
[GNLD_FAN_CONTROL
].smu_feature_id
=
383 FEATURE_FAN_CONTROL_BIT
;
384 data
->smu_features
[GNLD_DIDT
].smu_feature_id
= FEATURE_GFX_EDC_BIT
;
385 data
->smu_features
[GNLD_GFXOFF
].smu_feature_id
= FEATURE_GFXOFF_BIT
;
386 data
->smu_features
[GNLD_CG
].smu_feature_id
= FEATURE_CG_BIT
;
387 data
->smu_features
[GNLD_DPM_FCLK
].smu_feature_id
= FEATURE_DPM_FCLK_BIT
;
388 data
->smu_features
[GNLD_DS_FCLK
].smu_feature_id
= FEATURE_DS_FCLK_BIT
;
389 data
->smu_features
[GNLD_DS_MP1CLK
].smu_feature_id
= FEATURE_DS_MP1CLK_BIT
;
390 data
->smu_features
[GNLD_DS_MP0CLK
].smu_feature_id
= FEATURE_DS_MP0CLK_BIT
;
391 data
->smu_features
[GNLD_XGMI
].smu_feature_id
= FEATURE_XGMI_BIT
;
392 data
->smu_features
[GNLD_ECC
].smu_feature_id
= FEATURE_ECC_BIT
;
394 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
395 data
->smu_features
[i
].smu_feature_bitmap
=
396 (uint64_t)(1ULL << data
->smu_features
[i
].smu_feature_id
);
397 data
->smu_features
[i
].allowed
=
398 ((data
->registry_data
.disallowed_features
>> i
) & 1) ?
402 /* Get the SN to turn into a Unique ID */
403 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ReadSerialNumTop32
);
404 top32
= smum_get_argument(hwmgr
);
405 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ReadSerialNumBottom32
);
406 bottom32
= smum_get_argument(hwmgr
);
408 adev
->unique_id
= ((uint64_t)bottom32
<< 32) | top32
;
411 static int vega20_set_private_data_based_on_pptable(struct pp_hwmgr
*hwmgr
)
416 static int vega20_hwmgr_backend_fini(struct pp_hwmgr
*hwmgr
)
418 kfree(hwmgr
->backend
);
419 hwmgr
->backend
= NULL
;
424 static int vega20_hwmgr_backend_init(struct pp_hwmgr
*hwmgr
)
426 struct vega20_hwmgr
*data
;
427 struct amdgpu_device
*adev
= hwmgr
->adev
;
429 data
= kzalloc(sizeof(struct vega20_hwmgr
), GFP_KERNEL
);
433 hwmgr
->backend
= data
;
435 hwmgr
->workload_mask
= 1 << hwmgr
->workload_prority
[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
];
436 hwmgr
->power_profile_mode
= PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
437 hwmgr
->default_power_profile_mode
= PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
439 vega20_set_default_registry_data(hwmgr
);
441 data
->disable_dpm_mask
= 0xff;
443 /* need to set voltage control types before EVV patching */
444 data
->vddc_control
= VEGA20_VOLTAGE_CONTROL_NONE
;
445 data
->mvdd_control
= VEGA20_VOLTAGE_CONTROL_NONE
;
446 data
->vddci_control
= VEGA20_VOLTAGE_CONTROL_NONE
;
448 data
->water_marks_bitmap
= 0;
449 data
->avfs_exist
= false;
451 vega20_set_features_platform_caps(hwmgr
);
453 vega20_init_dpm_defaults(hwmgr
);
455 /* Parse pptable data read from VBIOS */
456 vega20_set_private_data_based_on_pptable(hwmgr
);
458 data
->is_tlu_enabled
= false;
460 hwmgr
->platform_descriptor
.hardwareActivityPerformanceLevels
=
461 VEGA20_MAX_HARDWARE_POWERLEVELS
;
462 hwmgr
->platform_descriptor
.hardwarePerformanceLevels
= 2;
463 hwmgr
->platform_descriptor
.minimumClocksReductionPercentage
= 50;
465 hwmgr
->platform_descriptor
.vbiosInterruptId
= 0x20000400; /* IRQ_SOURCE1_SW_INT */
466 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
467 hwmgr
->platform_descriptor
.clockStep
.engineClock
= 500;
468 hwmgr
->platform_descriptor
.clockStep
.memoryClock
= 500;
470 data
->total_active_cus
= adev
->gfx
.cu_info
.number
;
471 data
->is_custom_profile_set
= false;
476 static int vega20_init_sclk_threshold(struct pp_hwmgr
*hwmgr
)
478 struct vega20_hwmgr
*data
=
479 (struct vega20_hwmgr
*)(hwmgr
->backend
);
481 data
->low_sclk_interrupt_threshold
= 0;
486 static int vega20_setup_asic_task(struct pp_hwmgr
*hwmgr
)
488 struct amdgpu_device
*adev
= (struct amdgpu_device
*)(hwmgr
->adev
);
491 ret
= vega20_init_sclk_threshold(hwmgr
);
492 PP_ASSERT_WITH_CODE(!ret
,
493 "Failed to init sclk threshold!",
496 if (adev
->in_gpu_reset
&&
497 (amdgpu_asic_reset_method(adev
) == AMD_RESET_METHOD_BACO
)) {
499 ret
= vega20_baco_apply_vdci_flush_workaround(hwmgr
);
501 pr_err("Failed to apply vega20 baco workaround!\n");
508 * @fn vega20_init_dpm_state
509 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
511 * @param dpm_state - the address of the DPM Table to initiailize.
514 static void vega20_init_dpm_state(struct vega20_dpm_state
*dpm_state
)
516 dpm_state
->soft_min_level
= 0x0;
517 dpm_state
->soft_max_level
= VG20_CLOCK_MAX_DEFAULT
;
518 dpm_state
->hard_min_level
= 0x0;
519 dpm_state
->hard_max_level
= VG20_CLOCK_MAX_DEFAULT
;
522 static int vega20_get_number_of_dpm_level(struct pp_hwmgr
*hwmgr
,
523 PPCLK_e clk_id
, uint32_t *num_of_levels
)
527 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
528 PPSMC_MSG_GetDpmFreqByIndex
,
529 (clk_id
<< 16 | 0xFF));
530 PP_ASSERT_WITH_CODE(!ret
,
531 "[GetNumOfDpmLevel] failed to get dpm levels!",
534 *num_of_levels
= smum_get_argument(hwmgr
);
535 PP_ASSERT_WITH_CODE(*num_of_levels
> 0,
536 "[GetNumOfDpmLevel] number of clk levels is invalid!",
542 static int vega20_get_dpm_frequency_by_index(struct pp_hwmgr
*hwmgr
,
543 PPCLK_e clk_id
, uint32_t index
, uint32_t *clk
)
547 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
548 PPSMC_MSG_GetDpmFreqByIndex
,
549 (clk_id
<< 16 | index
));
550 PP_ASSERT_WITH_CODE(!ret
,
551 "[GetDpmFreqByIndex] failed to get dpm freq by index!",
554 *clk
= smum_get_argument(hwmgr
);
555 PP_ASSERT_WITH_CODE(*clk
,
556 "[GetDpmFreqByIndex] clk value is invalid!",
562 static int vega20_setup_single_dpm_table(struct pp_hwmgr
*hwmgr
,
563 struct vega20_single_dpm_table
*dpm_table
, PPCLK_e clk_id
)
566 uint32_t i
, num_of_levels
, clk
;
568 ret
= vega20_get_number_of_dpm_level(hwmgr
, clk_id
, &num_of_levels
);
569 PP_ASSERT_WITH_CODE(!ret
,
570 "[SetupSingleDpmTable] failed to get clk levels!",
573 dpm_table
->count
= num_of_levels
;
575 for (i
= 0; i
< num_of_levels
; i
++) {
576 ret
= vega20_get_dpm_frequency_by_index(hwmgr
, clk_id
, i
, &clk
);
577 PP_ASSERT_WITH_CODE(!ret
,
578 "[SetupSingleDpmTable] failed to get clk of specific level!",
580 dpm_table
->dpm_levels
[i
].value
= clk
;
581 dpm_table
->dpm_levels
[i
].enabled
= true;
587 static int vega20_setup_gfxclk_dpm_table(struct pp_hwmgr
*hwmgr
)
589 struct vega20_hwmgr
*data
=
590 (struct vega20_hwmgr
*)(hwmgr
->backend
);
591 struct vega20_single_dpm_table
*dpm_table
;
594 dpm_table
= &(data
->dpm_table
.gfx_table
);
595 if (data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
) {
596 ret
= vega20_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_GFXCLK
);
597 PP_ASSERT_WITH_CODE(!ret
,
598 "[SetupDefaultDpmTable] failed to get gfxclk dpm levels!",
601 dpm_table
->count
= 1;
602 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.gfx_clock
/ 100;
608 static int vega20_setup_memclk_dpm_table(struct pp_hwmgr
*hwmgr
)
610 struct vega20_hwmgr
*data
=
611 (struct vega20_hwmgr
*)(hwmgr
->backend
);
612 struct vega20_single_dpm_table
*dpm_table
;
615 dpm_table
= &(data
->dpm_table
.mem_table
);
616 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
) {
617 ret
= vega20_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_UCLK
);
618 PP_ASSERT_WITH_CODE(!ret
,
619 "[SetupDefaultDpmTable] failed to get memclk dpm levels!",
622 dpm_table
->count
= 1;
623 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.mem_clock
/ 100;
630 * This function is to initialize all DPM state tables
631 * for SMU based on the dependency table.
632 * Dynamic state patching function will then trim these
633 * state tables to the allowed range based
634 * on the power policy or external client requests,
635 * such as UVD request, etc.
637 static int vega20_setup_default_dpm_tables(struct pp_hwmgr
*hwmgr
)
639 struct vega20_hwmgr
*data
=
640 (struct vega20_hwmgr
*)(hwmgr
->backend
);
641 struct vega20_single_dpm_table
*dpm_table
;
644 memset(&data
->dpm_table
, 0, sizeof(data
->dpm_table
));
647 dpm_table
= &(data
->dpm_table
.soc_table
);
648 if (data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
) {
649 ret
= vega20_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_SOCCLK
);
650 PP_ASSERT_WITH_CODE(!ret
,
651 "[SetupDefaultDpmTable] failed to get socclk dpm levels!",
654 dpm_table
->count
= 1;
655 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.soc_clock
/ 100;
657 vega20_init_dpm_state(&(dpm_table
->dpm_state
));
660 dpm_table
= &(data
->dpm_table
.gfx_table
);
661 ret
= vega20_setup_gfxclk_dpm_table(hwmgr
);
664 vega20_init_dpm_state(&(dpm_table
->dpm_state
));
667 dpm_table
= &(data
->dpm_table
.mem_table
);
668 ret
= vega20_setup_memclk_dpm_table(hwmgr
);
671 vega20_init_dpm_state(&(dpm_table
->dpm_state
));
674 dpm_table
= &(data
->dpm_table
.eclk_table
);
675 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
) {
676 ret
= vega20_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_ECLK
);
677 PP_ASSERT_WITH_CODE(!ret
,
678 "[SetupDefaultDpmTable] failed to get eclk dpm levels!",
681 dpm_table
->count
= 1;
682 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.eclock
/ 100;
684 vega20_init_dpm_state(&(dpm_table
->dpm_state
));
687 dpm_table
= &(data
->dpm_table
.vclk_table
);
688 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
) {
689 ret
= vega20_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_VCLK
);
690 PP_ASSERT_WITH_CODE(!ret
,
691 "[SetupDefaultDpmTable] failed to get vclk dpm levels!",
694 dpm_table
->count
= 1;
695 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.vclock
/ 100;
697 vega20_init_dpm_state(&(dpm_table
->dpm_state
));
700 dpm_table
= &(data
->dpm_table
.dclk_table
);
701 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
) {
702 ret
= vega20_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_DCLK
);
703 PP_ASSERT_WITH_CODE(!ret
,
704 "[SetupDefaultDpmTable] failed to get dclk dpm levels!",
707 dpm_table
->count
= 1;
708 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.dclock
/ 100;
710 vega20_init_dpm_state(&(dpm_table
->dpm_state
));
713 dpm_table
= &(data
->dpm_table
.dcef_table
);
714 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
715 ret
= vega20_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_DCEFCLK
);
716 PP_ASSERT_WITH_CODE(!ret
,
717 "[SetupDefaultDpmTable] failed to get dcefclk dpm levels!",
720 dpm_table
->count
= 1;
721 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.dcef_clock
/ 100;
723 vega20_init_dpm_state(&(dpm_table
->dpm_state
));
726 dpm_table
= &(data
->dpm_table
.pixel_table
);
727 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
728 ret
= vega20_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_PIXCLK
);
729 PP_ASSERT_WITH_CODE(!ret
,
730 "[SetupDefaultDpmTable] failed to get pixclk dpm levels!",
733 dpm_table
->count
= 0;
734 vega20_init_dpm_state(&(dpm_table
->dpm_state
));
737 dpm_table
= &(data
->dpm_table
.display_table
);
738 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
739 ret
= vega20_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_DISPCLK
);
740 PP_ASSERT_WITH_CODE(!ret
,
741 "[SetupDefaultDpmTable] failed to get dispclk dpm levels!",
744 dpm_table
->count
= 0;
745 vega20_init_dpm_state(&(dpm_table
->dpm_state
));
748 dpm_table
= &(data
->dpm_table
.phy_table
);
749 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
750 ret
= vega20_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_PHYCLK
);
751 PP_ASSERT_WITH_CODE(!ret
,
752 "[SetupDefaultDpmTable] failed to get phyclk dpm levels!",
755 dpm_table
->count
= 0;
756 vega20_init_dpm_state(&(dpm_table
->dpm_state
));
759 dpm_table
= &(data
->dpm_table
.fclk_table
);
760 if (data
->smu_features
[GNLD_DPM_FCLK
].enabled
) {
761 ret
= vega20_setup_single_dpm_table(hwmgr
, dpm_table
, PPCLK_FCLK
);
762 PP_ASSERT_WITH_CODE(!ret
,
763 "[SetupDefaultDpmTable] failed to get fclk dpm levels!",
766 dpm_table
->count
= 1;
767 dpm_table
->dpm_levels
[0].value
= data
->vbios_boot_state
.fclock
/ 100;
769 vega20_init_dpm_state(&(dpm_table
->dpm_state
));
771 /* save a copy of the default DPM table */
772 memcpy(&(data
->golden_dpm_table
), &(data
->dpm_table
),
773 sizeof(struct vega20_dpm_table
));
779 * Initializes the SMC table and uploads it
781 * @param hwmgr the address of the powerplay hardware manager.
782 * @param pInput the pointer to input data (PowerState)
785 static int vega20_init_smc_table(struct pp_hwmgr
*hwmgr
)
788 struct vega20_hwmgr
*data
=
789 (struct vega20_hwmgr
*)(hwmgr
->backend
);
790 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
791 struct pp_atomfwctrl_bios_boot_up_values boot_up_values
;
792 struct phm_ppt_v3_information
*pptable_information
=
793 (struct phm_ppt_v3_information
*)hwmgr
->pptable
;
795 result
= pp_atomfwctrl_get_vbios_bootup_values(hwmgr
, &boot_up_values
);
796 PP_ASSERT_WITH_CODE(!result
,
797 "[InitSMCTable] Failed to get vbios bootup values!",
800 data
->vbios_boot_state
.vddc
= boot_up_values
.usVddc
;
801 data
->vbios_boot_state
.vddci
= boot_up_values
.usVddci
;
802 data
->vbios_boot_state
.mvddc
= boot_up_values
.usMvddc
;
803 data
->vbios_boot_state
.gfx_clock
= boot_up_values
.ulGfxClk
;
804 data
->vbios_boot_state
.mem_clock
= boot_up_values
.ulUClk
;
805 data
->vbios_boot_state
.soc_clock
= boot_up_values
.ulSocClk
;
806 data
->vbios_boot_state
.dcef_clock
= boot_up_values
.ulDCEFClk
;
807 data
->vbios_boot_state
.eclock
= boot_up_values
.ulEClk
;
808 data
->vbios_boot_state
.vclock
= boot_up_values
.ulVClk
;
809 data
->vbios_boot_state
.dclock
= boot_up_values
.ulDClk
;
810 data
->vbios_boot_state
.fclock
= boot_up_values
.ulFClk
;
811 data
->vbios_boot_state
.uc_cooling_id
= boot_up_values
.ucCoolingID
;
813 smum_send_msg_to_smc_with_parameter(hwmgr
,
814 PPSMC_MSG_SetMinDeepSleepDcefclk
,
815 (uint32_t)(data
->vbios_boot_state
.dcef_clock
/ 100));
817 memcpy(pp_table
, pptable_information
->smc_pptable
, sizeof(PPTable_t
));
819 result
= smum_smc_table_manager(hwmgr
,
820 (uint8_t *)pp_table
, TABLE_PPTABLE
, false);
821 PP_ASSERT_WITH_CODE(!result
,
822 "[InitSMCTable] Failed to upload PPtable!",
829 * Override PCIe link speed and link width for DPM Level 1. PPTable entries
830 * reflect the ASIC capabilities and not the system capabilities. For e.g.
831 * Vega20 board in a PCI Gen3 system. In this case, when SMU's tries to switch
832 * to DPM1, it fails as system doesn't support Gen4.
834 static int vega20_override_pcie_parameters(struct pp_hwmgr
*hwmgr
)
836 struct amdgpu_device
*adev
= (struct amdgpu_device
*)(hwmgr
->adev
);
837 struct vega20_hwmgr
*data
=
838 (struct vega20_hwmgr
*)(hwmgr
->backend
);
839 uint32_t pcie_gen
= 0, pcie_width
= 0, smu_pcie_arg
;
842 if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN4
)
844 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
)
846 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
)
848 else if (adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1
)
851 if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X16
)
853 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X12
)
855 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X8
)
857 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X4
)
859 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X2
)
861 else if (adev
->pm
.pcie_mlw_mask
& CAIL_PCIE_LINK_WIDTH_SUPPORT_X1
)
864 /* Bit 31:16: LCLK DPM level. 0 is DPM0, and 1 is DPM1
865 * Bit 15:8: PCIE GEN, 0 to 3 corresponds to GEN1 to GEN4
866 * Bit 7:0: PCIE lane width, 1 to 7 corresponds is x1 to x32
868 smu_pcie_arg
= (1 << 16) | (pcie_gen
<< 8) | pcie_width
;
869 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
870 PPSMC_MSG_OverridePcieParameters
, smu_pcie_arg
);
871 PP_ASSERT_WITH_CODE(!ret
,
872 "[OverridePcieParameters] Attempt to override pcie params failed!",
875 data
->pcie_parameters_override
= true;
876 data
->pcie_gen_level1
= pcie_gen
;
877 data
->pcie_width_level1
= pcie_width
;
882 static int vega20_set_allowed_featuresmask(struct pp_hwmgr
*hwmgr
)
884 struct vega20_hwmgr
*data
=
885 (struct vega20_hwmgr
*)(hwmgr
->backend
);
886 uint32_t allowed_features_low
= 0, allowed_features_high
= 0;
890 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++)
891 if (data
->smu_features
[i
].allowed
)
892 data
->smu_features
[i
].smu_feature_id
> 31 ?
893 (allowed_features_high
|=
894 ((data
->smu_features
[i
].smu_feature_bitmap
>> SMU_FEATURES_HIGH_SHIFT
)
896 (allowed_features_low
|=
897 ((data
->smu_features
[i
].smu_feature_bitmap
>> SMU_FEATURES_LOW_SHIFT
)
900 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
901 PPSMC_MSG_SetAllowedFeaturesMaskHigh
, allowed_features_high
);
902 PP_ASSERT_WITH_CODE(!ret
,
903 "[SetAllowedFeaturesMask] Attempt to set allowed features mask(high) failed!",
906 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
907 PPSMC_MSG_SetAllowedFeaturesMaskLow
, allowed_features_low
);
908 PP_ASSERT_WITH_CODE(!ret
,
909 "[SetAllowedFeaturesMask] Attempt to set allowed features mask (low) failed!",
915 static int vega20_run_btc(struct pp_hwmgr
*hwmgr
)
917 return smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_RunBtc
);
920 static int vega20_run_btc_afll(struct pp_hwmgr
*hwmgr
)
922 return smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_RunAfllBtc
);
925 static int vega20_enable_all_smu_features(struct pp_hwmgr
*hwmgr
)
927 struct vega20_hwmgr
*data
=
928 (struct vega20_hwmgr
*)(hwmgr
->backend
);
929 uint64_t features_enabled
;
934 PP_ASSERT_WITH_CODE((ret
= smum_send_msg_to_smc(hwmgr
,
935 PPSMC_MSG_EnableAllSmuFeatures
)) == 0,
936 "[EnableAllSMUFeatures] Failed to enable all smu features!",
939 ret
= vega20_get_enabled_smc_features(hwmgr
, &features_enabled
);
940 PP_ASSERT_WITH_CODE(!ret
,
941 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
944 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
945 enabled
= (features_enabled
& data
->smu_features
[i
].smu_feature_bitmap
) ?
947 data
->smu_features
[i
].enabled
= enabled
;
948 data
->smu_features
[i
].supported
= enabled
;
951 if (data
->smu_features
[i
].allowed
&& !enabled
)
952 pr_info("[EnableAllSMUFeatures] feature %d is expected enabled!", i
);
953 else if (!data
->smu_features
[i
].allowed
&& enabled
)
954 pr_info("[EnableAllSMUFeatures] feature %d is expected disabled!", i
);
961 static int vega20_notify_smc_display_change(struct pp_hwmgr
*hwmgr
)
963 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
965 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
)
966 return smum_send_msg_to_smc_with_parameter(hwmgr
,
967 PPSMC_MSG_SetUclkFastSwitch
,
973 static int vega20_send_clock_ratio(struct pp_hwmgr
*hwmgr
)
975 struct vega20_hwmgr
*data
=
976 (struct vega20_hwmgr
*)(hwmgr
->backend
);
978 return smum_send_msg_to_smc_with_parameter(hwmgr
,
979 PPSMC_MSG_SetFclkGfxClkRatio
,
980 data
->registry_data
.fclk_gfxclk_ratio
);
983 static int vega20_disable_all_smu_features(struct pp_hwmgr
*hwmgr
)
985 struct vega20_hwmgr
*data
=
986 (struct vega20_hwmgr
*)(hwmgr
->backend
);
987 uint64_t features_enabled
;
992 PP_ASSERT_WITH_CODE((ret
= smum_send_msg_to_smc(hwmgr
,
993 PPSMC_MSG_DisableAllSmuFeatures
)) == 0,
994 "[DisableAllSMUFeatures] Failed to disable all smu features!",
997 ret
= vega20_get_enabled_smc_features(hwmgr
, &features_enabled
);
998 PP_ASSERT_WITH_CODE(!ret
,
999 "[DisableAllSMUFeatures] Failed to get enabled smc features!",
1002 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
1003 enabled
= (features_enabled
& data
->smu_features
[i
].smu_feature_bitmap
) ?
1005 data
->smu_features
[i
].enabled
= enabled
;
1006 data
->smu_features
[i
].supported
= enabled
;
1012 static int vega20_od8_set_feature_capabilities(
1013 struct pp_hwmgr
*hwmgr
)
1015 struct phm_ppt_v3_information
*pptable_information
=
1016 (struct phm_ppt_v3_information
*)hwmgr
->pptable
;
1017 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
1018 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1019 struct vega20_od8_settings
*od_settings
= &(data
->od8_settings
);
1021 od_settings
->overdrive8_capabilities
= 0;
1023 if (data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
) {
1024 if (pptable_information
->od_feature_capabilities
[ATOM_VEGA20_ODFEATURE_GFXCLK_LIMITS
] &&
1025 pptable_information
->od_settings_max
[OD8_SETTING_GFXCLK_FMAX
] > 0 &&
1026 pptable_information
->od_settings_min
[OD8_SETTING_GFXCLK_FMIN
] > 0 &&
1027 (pptable_information
->od_settings_max
[OD8_SETTING_GFXCLK_FMAX
] >=
1028 pptable_information
->od_settings_min
[OD8_SETTING_GFXCLK_FMIN
]))
1029 od_settings
->overdrive8_capabilities
|= OD8_GFXCLK_LIMITS
;
1031 if (pptable_information
->od_feature_capabilities
[ATOM_VEGA20_ODFEATURE_GFXCLK_CURVE
] &&
1032 (pptable_information
->od_settings_min
[OD8_SETTING_GFXCLK_VOLTAGE1
] >=
1033 pp_table
->MinVoltageGfx
/ VOLTAGE_SCALE
) &&
1034 (pptable_information
->od_settings_max
[OD8_SETTING_GFXCLK_VOLTAGE3
] <=
1035 pp_table
->MaxVoltageGfx
/ VOLTAGE_SCALE
) &&
1036 (pptable_information
->od_settings_max
[OD8_SETTING_GFXCLK_VOLTAGE3
] >=
1037 pptable_information
->od_settings_min
[OD8_SETTING_GFXCLK_VOLTAGE1
]))
1038 od_settings
->overdrive8_capabilities
|= OD8_GFXCLK_CURVE
;
1041 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
) {
1042 pptable_information
->od_settings_min
[OD8_SETTING_UCLK_FMAX
] =
1043 data
->dpm_table
.mem_table
.dpm_levels
[data
->dpm_table
.mem_table
.count
- 2].value
;
1044 if (pptable_information
->od_feature_capabilities
[ATOM_VEGA20_ODFEATURE_UCLK_MAX
] &&
1045 pptable_information
->od_settings_min
[OD8_SETTING_UCLK_FMAX
] > 0 &&
1046 pptable_information
->od_settings_max
[OD8_SETTING_UCLK_FMAX
] > 0 &&
1047 (pptable_information
->od_settings_max
[OD8_SETTING_UCLK_FMAX
] >=
1048 pptable_information
->od_settings_min
[OD8_SETTING_UCLK_FMAX
]))
1049 od_settings
->overdrive8_capabilities
|= OD8_UCLK_MAX
;
1052 if (pptable_information
->od_feature_capabilities
[ATOM_VEGA20_ODFEATURE_POWER_LIMIT
] &&
1053 pptable_information
->od_settings_max
[OD8_SETTING_POWER_PERCENTAGE
] > 0 &&
1054 pptable_information
->od_settings_max
[OD8_SETTING_POWER_PERCENTAGE
] <= 100 &&
1055 pptable_information
->od_settings_min
[OD8_SETTING_POWER_PERCENTAGE
] > 0 &&
1056 pptable_information
->od_settings_min
[OD8_SETTING_POWER_PERCENTAGE
] <= 100)
1057 od_settings
->overdrive8_capabilities
|= OD8_POWER_LIMIT
;
1059 if (data
->smu_features
[GNLD_FAN_CONTROL
].enabled
) {
1060 if (pptable_information
->od_feature_capabilities
[ATOM_VEGA20_ODFEATURE_FAN_ACOUSTIC_LIMIT
] &&
1061 pptable_information
->od_settings_min
[OD8_SETTING_FAN_ACOUSTIC_LIMIT
] > 0 &&
1062 pptable_information
->od_settings_max
[OD8_SETTING_FAN_ACOUSTIC_LIMIT
] > 0 &&
1063 (pptable_information
->od_settings_max
[OD8_SETTING_FAN_ACOUSTIC_LIMIT
] >=
1064 pptable_information
->od_settings_min
[OD8_SETTING_FAN_ACOUSTIC_LIMIT
]))
1065 od_settings
->overdrive8_capabilities
|= OD8_ACOUSTIC_LIMIT_SCLK
;
1067 if (pptable_information
->od_feature_capabilities
[ATOM_VEGA20_ODFEATURE_FAN_SPEED_MIN
] &&
1068 (pptable_information
->od_settings_min
[OD8_SETTING_FAN_MIN_SPEED
] >=
1069 (pp_table
->FanPwmMin
* pp_table
->FanMaximumRpm
/ 100)) &&
1070 pptable_information
->od_settings_max
[OD8_SETTING_FAN_MIN_SPEED
] > 0 &&
1071 (pptable_information
->od_settings_max
[OD8_SETTING_FAN_MIN_SPEED
] >=
1072 pptable_information
->od_settings_min
[OD8_SETTING_FAN_MIN_SPEED
]))
1073 od_settings
->overdrive8_capabilities
|= OD8_FAN_SPEED_MIN
;
1076 if (data
->smu_features
[GNLD_THERMAL
].enabled
) {
1077 if (pptable_information
->od_feature_capabilities
[ATOM_VEGA20_ODFEATURE_TEMPERATURE_FAN
] &&
1078 pptable_information
->od_settings_max
[OD8_SETTING_FAN_TARGET_TEMP
] > 0 &&
1079 pptable_information
->od_settings_min
[OD8_SETTING_FAN_TARGET_TEMP
] > 0 &&
1080 (pptable_information
->od_settings_max
[OD8_SETTING_FAN_TARGET_TEMP
] >=
1081 pptable_information
->od_settings_min
[OD8_SETTING_FAN_TARGET_TEMP
]))
1082 od_settings
->overdrive8_capabilities
|= OD8_TEMPERATURE_FAN
;
1084 if (pptable_information
->od_feature_capabilities
[ATOM_VEGA20_ODFEATURE_TEMPERATURE_SYSTEM
] &&
1085 pptable_information
->od_settings_max
[OD8_SETTING_OPERATING_TEMP_MAX
] > 0 &&
1086 pptable_information
->od_settings_min
[OD8_SETTING_OPERATING_TEMP_MAX
] > 0 &&
1087 (pptable_information
->od_settings_max
[OD8_SETTING_OPERATING_TEMP_MAX
] >=
1088 pptable_information
->od_settings_min
[OD8_SETTING_OPERATING_TEMP_MAX
]))
1089 od_settings
->overdrive8_capabilities
|= OD8_TEMPERATURE_SYSTEM
;
1092 if (pptable_information
->od_feature_capabilities
[ATOM_VEGA20_ODFEATURE_MEMORY_TIMING_TUNE
])
1093 od_settings
->overdrive8_capabilities
|= OD8_MEMORY_TIMING_TUNE
;
1095 if (pptable_information
->od_feature_capabilities
[ATOM_VEGA20_ODFEATURE_FAN_ZERO_RPM_CONTROL
] &&
1096 pp_table
->FanZeroRpmEnable
)
1097 od_settings
->overdrive8_capabilities
|= OD8_FAN_ZERO_RPM_CONTROL
;
1099 if (!od_settings
->overdrive8_capabilities
)
1100 hwmgr
->od_enabled
= false;
1105 static int vega20_od8_set_feature_id(
1106 struct pp_hwmgr
*hwmgr
)
1108 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
1109 struct vega20_od8_settings
*od_settings
= &(data
->od8_settings
);
1111 if (od_settings
->overdrive8_capabilities
& OD8_GFXCLK_LIMITS
) {
1112 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FMIN
].feature_id
=
1114 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FMAX
].feature_id
=
1117 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FMIN
].feature_id
=
1119 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FMAX
].feature_id
=
1123 if (od_settings
->overdrive8_capabilities
& OD8_GFXCLK_CURVE
) {
1124 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FREQ1
].feature_id
=
1126 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE1
].feature_id
=
1128 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FREQ2
].feature_id
=
1130 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE2
].feature_id
=
1132 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FREQ3
].feature_id
=
1134 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE3
].feature_id
=
1137 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FREQ1
].feature_id
=
1139 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE1
].feature_id
=
1141 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FREQ2
].feature_id
=
1143 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE2
].feature_id
=
1145 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FREQ3
].feature_id
=
1147 od_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE3
].feature_id
=
1151 if (od_settings
->overdrive8_capabilities
& OD8_UCLK_MAX
)
1152 od_settings
->od8_settings_array
[OD8_SETTING_UCLK_FMAX
].feature_id
= OD8_UCLK_MAX
;
1154 od_settings
->od8_settings_array
[OD8_SETTING_UCLK_FMAX
].feature_id
= 0;
1156 if (od_settings
->overdrive8_capabilities
& OD8_POWER_LIMIT
)
1157 od_settings
->od8_settings_array
[OD8_SETTING_POWER_PERCENTAGE
].feature_id
= OD8_POWER_LIMIT
;
1159 od_settings
->od8_settings_array
[OD8_SETTING_POWER_PERCENTAGE
].feature_id
= 0;
1161 if (od_settings
->overdrive8_capabilities
& OD8_ACOUSTIC_LIMIT_SCLK
)
1162 od_settings
->od8_settings_array
[OD8_SETTING_FAN_ACOUSTIC_LIMIT
].feature_id
=
1163 OD8_ACOUSTIC_LIMIT_SCLK
;
1165 od_settings
->od8_settings_array
[OD8_SETTING_FAN_ACOUSTIC_LIMIT
].feature_id
=
1168 if (od_settings
->overdrive8_capabilities
& OD8_FAN_SPEED_MIN
)
1169 od_settings
->od8_settings_array
[OD8_SETTING_FAN_MIN_SPEED
].feature_id
=
1172 od_settings
->od8_settings_array
[OD8_SETTING_FAN_MIN_SPEED
].feature_id
=
1175 if (od_settings
->overdrive8_capabilities
& OD8_TEMPERATURE_FAN
)
1176 od_settings
->od8_settings_array
[OD8_SETTING_FAN_TARGET_TEMP
].feature_id
=
1177 OD8_TEMPERATURE_FAN
;
1179 od_settings
->od8_settings_array
[OD8_SETTING_FAN_TARGET_TEMP
].feature_id
=
1182 if (od_settings
->overdrive8_capabilities
& OD8_TEMPERATURE_SYSTEM
)
1183 od_settings
->od8_settings_array
[OD8_SETTING_OPERATING_TEMP_MAX
].feature_id
=
1184 OD8_TEMPERATURE_SYSTEM
;
1186 od_settings
->od8_settings_array
[OD8_SETTING_OPERATING_TEMP_MAX
].feature_id
=
1192 static int vega20_od8_get_gfx_clock_base_voltage(
1193 struct pp_hwmgr
*hwmgr
,
1199 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
1200 PPSMC_MSG_GetAVFSVoltageByDpm
,
1201 ((AVFS_CURVE
<< 24) | (OD8_HOTCURVE_TEMPERATURE
<< 16) | freq
));
1202 PP_ASSERT_WITH_CODE(!ret
,
1203 "[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!",
1206 *voltage
= smum_get_argument(hwmgr
);
1207 *voltage
= *voltage
/ VOLTAGE_SCALE
;
1212 static int vega20_od8_initialize_default_settings(
1213 struct pp_hwmgr
*hwmgr
)
1215 struct phm_ppt_v3_information
*pptable_information
=
1216 (struct phm_ppt_v3_information
*)hwmgr
->pptable
;
1217 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
1218 struct vega20_od8_settings
*od8_settings
= &(data
->od8_settings
);
1219 OverDriveTable_t
*od_table
= &(data
->smc_state_table
.overdrive_table
);
1222 /* Set Feature Capabilities */
1223 vega20_od8_set_feature_capabilities(hwmgr
);
1225 /* Map FeatureID to individual settings */
1226 vega20_od8_set_feature_id(hwmgr
);
1228 /* Set default values */
1229 ret
= smum_smc_table_manager(hwmgr
, (uint8_t *)od_table
, TABLE_OVERDRIVE
, true);
1230 PP_ASSERT_WITH_CODE(!ret
,
1231 "Failed to export over drive table!",
1234 if (od8_settings
->overdrive8_capabilities
& OD8_GFXCLK_LIMITS
) {
1235 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FMIN
].default_value
=
1236 od_table
->GfxclkFmin
;
1237 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FMAX
].default_value
=
1238 od_table
->GfxclkFmax
;
1240 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FMIN
].default_value
=
1242 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FMAX
].default_value
=
1246 if (od8_settings
->overdrive8_capabilities
& OD8_GFXCLK_CURVE
) {
1247 od_table
->GfxclkFreq1
= od_table
->GfxclkFmin
;
1248 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FREQ1
].default_value
=
1249 od_table
->GfxclkFreq1
;
1251 od_table
->GfxclkFreq3
= od_table
->GfxclkFmax
;
1252 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FREQ3
].default_value
=
1253 od_table
->GfxclkFreq3
;
1255 od_table
->GfxclkFreq2
= (od_table
->GfxclkFreq1
+ od_table
->GfxclkFreq3
) / 2;
1256 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FREQ2
].default_value
=
1257 od_table
->GfxclkFreq2
;
1259 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr
,
1260 &(od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE1
].default_value
),
1261 od_table
->GfxclkFreq1
),
1262 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1263 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE1
].default_value
= 0);
1264 od_table
->GfxclkVolt1
= od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE1
].default_value
1267 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr
,
1268 &(od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE2
].default_value
),
1269 od_table
->GfxclkFreq2
),
1270 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1271 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE2
].default_value
= 0);
1272 od_table
->GfxclkVolt2
= od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE2
].default_value
1275 PP_ASSERT_WITH_CODE(!vega20_od8_get_gfx_clock_base_voltage(hwmgr
,
1276 &(od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE3
].default_value
),
1277 od_table
->GfxclkFreq3
),
1278 "[PhwVega20_OD8_InitializeDefaultSettings] Failed to get Base clock voltage from SMU!",
1279 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE3
].default_value
= 0);
1280 od_table
->GfxclkVolt3
= od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE3
].default_value
1283 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FREQ1
].default_value
=
1285 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE1
].default_value
=
1287 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FREQ2
].default_value
=
1289 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE2
].default_value
=
1291 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_FREQ3
].default_value
=
1293 od8_settings
->od8_settings_array
[OD8_SETTING_GFXCLK_VOLTAGE3
].default_value
=
1297 if (od8_settings
->overdrive8_capabilities
& OD8_UCLK_MAX
)
1298 od8_settings
->od8_settings_array
[OD8_SETTING_UCLK_FMAX
].default_value
=
1301 od8_settings
->od8_settings_array
[OD8_SETTING_UCLK_FMAX
].default_value
=
1304 if (od8_settings
->overdrive8_capabilities
& OD8_POWER_LIMIT
)
1305 od8_settings
->od8_settings_array
[OD8_SETTING_POWER_PERCENTAGE
].default_value
=
1306 od_table
->OverDrivePct
;
1308 od8_settings
->od8_settings_array
[OD8_SETTING_POWER_PERCENTAGE
].default_value
=
1311 if (od8_settings
->overdrive8_capabilities
& OD8_ACOUSTIC_LIMIT_SCLK
)
1312 od8_settings
->od8_settings_array
[OD8_SETTING_FAN_ACOUSTIC_LIMIT
].default_value
=
1313 od_table
->FanMaximumRpm
;
1315 od8_settings
->od8_settings_array
[OD8_SETTING_FAN_ACOUSTIC_LIMIT
].default_value
=
1318 if (od8_settings
->overdrive8_capabilities
& OD8_FAN_SPEED_MIN
)
1319 od8_settings
->od8_settings_array
[OD8_SETTING_FAN_MIN_SPEED
].default_value
=
1320 od_table
->FanMinimumPwm
* data
->smc_state_table
.pp_table
.FanMaximumRpm
/ 100;
1322 od8_settings
->od8_settings_array
[OD8_SETTING_FAN_MIN_SPEED
].default_value
=
1325 if (od8_settings
->overdrive8_capabilities
& OD8_TEMPERATURE_FAN
)
1326 od8_settings
->od8_settings_array
[OD8_SETTING_FAN_TARGET_TEMP
].default_value
=
1327 od_table
->FanTargetTemperature
;
1329 od8_settings
->od8_settings_array
[OD8_SETTING_FAN_TARGET_TEMP
].default_value
=
1332 if (od8_settings
->overdrive8_capabilities
& OD8_TEMPERATURE_SYSTEM
)
1333 od8_settings
->od8_settings_array
[OD8_SETTING_OPERATING_TEMP_MAX
].default_value
=
1334 od_table
->MaxOpTemp
;
1336 od8_settings
->od8_settings_array
[OD8_SETTING_OPERATING_TEMP_MAX
].default_value
=
1339 for (i
= 0; i
< OD8_SETTING_COUNT
; i
++) {
1340 if (od8_settings
->od8_settings_array
[i
].feature_id
) {
1341 od8_settings
->od8_settings_array
[i
].min_value
=
1342 pptable_information
->od_settings_min
[i
];
1343 od8_settings
->od8_settings_array
[i
].max_value
=
1344 pptable_information
->od_settings_max
[i
];
1345 od8_settings
->od8_settings_array
[i
].current_value
=
1346 od8_settings
->od8_settings_array
[i
].default_value
;
1348 od8_settings
->od8_settings_array
[i
].min_value
=
1350 od8_settings
->od8_settings_array
[i
].max_value
=
1352 od8_settings
->od8_settings_array
[i
].current_value
=
1357 ret
= smum_smc_table_manager(hwmgr
, (uint8_t *)od_table
, TABLE_OVERDRIVE
, false);
1358 PP_ASSERT_WITH_CODE(!ret
,
1359 "Failed to import over drive table!",
1365 static int vega20_od8_set_settings(
1366 struct pp_hwmgr
*hwmgr
,
1370 OverDriveTable_t od_table
;
1372 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
1373 struct vega20_od8_single_setting
*od8_settings
=
1374 data
->od8_settings
.od8_settings_array
;
1376 ret
= smum_smc_table_manager(hwmgr
, (uint8_t *)(&od_table
), TABLE_OVERDRIVE
, true);
1377 PP_ASSERT_WITH_CODE(!ret
,
1378 "Failed to export over drive table!",
1382 case OD8_SETTING_GFXCLK_FMIN
:
1383 od_table
.GfxclkFmin
= (uint16_t)value
;
1385 case OD8_SETTING_GFXCLK_FMAX
:
1386 if (value
< od8_settings
[OD8_SETTING_GFXCLK_FMAX
].min_value
||
1387 value
> od8_settings
[OD8_SETTING_GFXCLK_FMAX
].max_value
)
1390 od_table
.GfxclkFmax
= (uint16_t)value
;
1392 case OD8_SETTING_GFXCLK_FREQ1
:
1393 od_table
.GfxclkFreq1
= (uint16_t)value
;
1395 case OD8_SETTING_GFXCLK_VOLTAGE1
:
1396 od_table
.GfxclkVolt1
= (uint16_t)value
;
1398 case OD8_SETTING_GFXCLK_FREQ2
:
1399 od_table
.GfxclkFreq2
= (uint16_t)value
;
1401 case OD8_SETTING_GFXCLK_VOLTAGE2
:
1402 od_table
.GfxclkVolt2
= (uint16_t)value
;
1404 case OD8_SETTING_GFXCLK_FREQ3
:
1405 od_table
.GfxclkFreq3
= (uint16_t)value
;
1407 case OD8_SETTING_GFXCLK_VOLTAGE3
:
1408 od_table
.GfxclkVolt3
= (uint16_t)value
;
1410 case OD8_SETTING_UCLK_FMAX
:
1411 if (value
< od8_settings
[OD8_SETTING_UCLK_FMAX
].min_value
||
1412 value
> od8_settings
[OD8_SETTING_UCLK_FMAX
].max_value
)
1414 od_table
.UclkFmax
= (uint16_t)value
;
1416 case OD8_SETTING_POWER_PERCENTAGE
:
1417 od_table
.OverDrivePct
= (int16_t)value
;
1419 case OD8_SETTING_FAN_ACOUSTIC_LIMIT
:
1420 od_table
.FanMaximumRpm
= (uint16_t)value
;
1422 case OD8_SETTING_FAN_MIN_SPEED
:
1423 od_table
.FanMinimumPwm
= (uint16_t)value
;
1425 case OD8_SETTING_FAN_TARGET_TEMP
:
1426 od_table
.FanTargetTemperature
= (uint16_t)value
;
1428 case OD8_SETTING_OPERATING_TEMP_MAX
:
1429 od_table
.MaxOpTemp
= (uint16_t)value
;
1433 ret
= smum_smc_table_manager(hwmgr
, (uint8_t *)(&od_table
), TABLE_OVERDRIVE
, false);
1434 PP_ASSERT_WITH_CODE(!ret
,
1435 "Failed to import over drive table!",
1441 static int vega20_get_sclk_od(
1442 struct pp_hwmgr
*hwmgr
)
1444 struct vega20_hwmgr
*data
= hwmgr
->backend
;
1445 struct vega20_single_dpm_table
*sclk_table
=
1446 &(data
->dpm_table
.gfx_table
);
1447 struct vega20_single_dpm_table
*golden_sclk_table
=
1448 &(data
->golden_dpm_table
.gfx_table
);
1449 int value
= sclk_table
->dpm_levels
[sclk_table
->count
- 1].value
;
1450 int golden_value
= golden_sclk_table
->dpm_levels
1451 [golden_sclk_table
->count
- 1].value
;
1454 value
-= golden_value
;
1455 value
= DIV_ROUND_UP(value
* 100, golden_value
);
1460 static int vega20_set_sclk_od(
1461 struct pp_hwmgr
*hwmgr
, uint32_t value
)
1463 struct vega20_hwmgr
*data
= hwmgr
->backend
;
1464 struct vega20_single_dpm_table
*golden_sclk_table
=
1465 &(data
->golden_dpm_table
.gfx_table
);
1469 od_sclk
= golden_sclk_table
->dpm_levels
[golden_sclk_table
->count
- 1].value
* value
;
1471 od_sclk
+= golden_sclk_table
->dpm_levels
[golden_sclk_table
->count
- 1].value
;
1473 ret
= vega20_od8_set_settings(hwmgr
, OD8_SETTING_GFXCLK_FMAX
, od_sclk
);
1474 PP_ASSERT_WITH_CODE(!ret
,
1475 "[SetSclkOD] failed to set od gfxclk!",
1478 /* retrieve updated gfxclk table */
1479 ret
= vega20_setup_gfxclk_dpm_table(hwmgr
);
1480 PP_ASSERT_WITH_CODE(!ret
,
1481 "[SetSclkOD] failed to refresh gfxclk table!",
1487 static int vega20_get_mclk_od(
1488 struct pp_hwmgr
*hwmgr
)
1490 struct vega20_hwmgr
*data
= hwmgr
->backend
;
1491 struct vega20_single_dpm_table
*mclk_table
=
1492 &(data
->dpm_table
.mem_table
);
1493 struct vega20_single_dpm_table
*golden_mclk_table
=
1494 &(data
->golden_dpm_table
.mem_table
);
1495 int value
= mclk_table
->dpm_levels
[mclk_table
->count
- 1].value
;
1496 int golden_value
= golden_mclk_table
->dpm_levels
1497 [golden_mclk_table
->count
- 1].value
;
1500 value
-= golden_value
;
1501 value
= DIV_ROUND_UP(value
* 100, golden_value
);
1506 static int vega20_set_mclk_od(
1507 struct pp_hwmgr
*hwmgr
, uint32_t value
)
1509 struct vega20_hwmgr
*data
= hwmgr
->backend
;
1510 struct vega20_single_dpm_table
*golden_mclk_table
=
1511 &(data
->golden_dpm_table
.mem_table
);
1515 od_mclk
= golden_mclk_table
->dpm_levels
[golden_mclk_table
->count
- 1].value
* value
;
1517 od_mclk
+= golden_mclk_table
->dpm_levels
[golden_mclk_table
->count
- 1].value
;
1519 ret
= vega20_od8_set_settings(hwmgr
, OD8_SETTING_UCLK_FMAX
, od_mclk
);
1520 PP_ASSERT_WITH_CODE(!ret
,
1521 "[SetMclkOD] failed to set od memclk!",
1524 /* retrieve updated memclk table */
1525 ret
= vega20_setup_memclk_dpm_table(hwmgr
);
1526 PP_ASSERT_WITH_CODE(!ret
,
1527 "[SetMclkOD] failed to refresh memclk table!",
1533 static int vega20_populate_umdpstate_clocks(
1534 struct pp_hwmgr
*hwmgr
)
1536 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
1537 struct vega20_single_dpm_table
*gfx_table
= &(data
->dpm_table
.gfx_table
);
1538 struct vega20_single_dpm_table
*mem_table
= &(data
->dpm_table
.mem_table
);
1540 hwmgr
->pstate_sclk
= gfx_table
->dpm_levels
[0].value
;
1541 hwmgr
->pstate_mclk
= mem_table
->dpm_levels
[0].value
;
1543 if (gfx_table
->count
> VEGA20_UMD_PSTATE_GFXCLK_LEVEL
&&
1544 mem_table
->count
> VEGA20_UMD_PSTATE_MCLK_LEVEL
) {
1545 hwmgr
->pstate_sclk
= gfx_table
->dpm_levels
[VEGA20_UMD_PSTATE_GFXCLK_LEVEL
].value
;
1546 hwmgr
->pstate_mclk
= mem_table
->dpm_levels
[VEGA20_UMD_PSTATE_MCLK_LEVEL
].value
;
1549 hwmgr
->pstate_sclk
= hwmgr
->pstate_sclk
* 100;
1550 hwmgr
->pstate_mclk
= hwmgr
->pstate_mclk
* 100;
1555 static int vega20_get_max_sustainable_clock(struct pp_hwmgr
*hwmgr
,
1556 PP_Clock
*clock
, PPCLK_e clock_select
)
1560 PP_ASSERT_WITH_CODE((ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
1561 PPSMC_MSG_GetDcModeMaxDpmFreq
,
1562 (clock_select
<< 16))) == 0,
1563 "[GetMaxSustainableClock] Failed to get max DC clock from SMC!",
1565 *clock
= smum_get_argument(hwmgr
);
1567 /* if DC limit is zero, return AC limit */
1569 PP_ASSERT_WITH_CODE((ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
1570 PPSMC_MSG_GetMaxDpmFreq
,
1571 (clock_select
<< 16))) == 0,
1572 "[GetMaxSustainableClock] failed to get max AC clock from SMC!",
1574 *clock
= smum_get_argument(hwmgr
);
1580 static int vega20_init_max_sustainable_clocks(struct pp_hwmgr
*hwmgr
)
1582 struct vega20_hwmgr
*data
=
1583 (struct vega20_hwmgr
*)(hwmgr
->backend
);
1584 struct vega20_max_sustainable_clocks
*max_sustainable_clocks
=
1585 &(data
->max_sustainable_clocks
);
1588 max_sustainable_clocks
->uclock
= data
->vbios_boot_state
.mem_clock
/ 100;
1589 max_sustainable_clocks
->soc_clock
= data
->vbios_boot_state
.soc_clock
/ 100;
1590 max_sustainable_clocks
->dcef_clock
= data
->vbios_boot_state
.dcef_clock
/ 100;
1591 max_sustainable_clocks
->display_clock
= 0xFFFFFFFF;
1592 max_sustainable_clocks
->phy_clock
= 0xFFFFFFFF;
1593 max_sustainable_clocks
->pixel_clock
= 0xFFFFFFFF;
1595 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
)
1596 PP_ASSERT_WITH_CODE((ret
= vega20_get_max_sustainable_clock(hwmgr
,
1597 &(max_sustainable_clocks
->uclock
),
1599 "[InitMaxSustainableClocks] failed to get max UCLK from SMC!",
1602 if (data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
)
1603 PP_ASSERT_WITH_CODE((ret
= vega20_get_max_sustainable_clock(hwmgr
,
1604 &(max_sustainable_clocks
->soc_clock
),
1605 PPCLK_SOCCLK
)) == 0,
1606 "[InitMaxSustainableClocks] failed to get max SOCCLK from SMC!",
1609 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
1610 PP_ASSERT_WITH_CODE((ret
= vega20_get_max_sustainable_clock(hwmgr
,
1611 &(max_sustainable_clocks
->dcef_clock
),
1612 PPCLK_DCEFCLK
)) == 0,
1613 "[InitMaxSustainableClocks] failed to get max DCEFCLK from SMC!",
1615 PP_ASSERT_WITH_CODE((ret
= vega20_get_max_sustainable_clock(hwmgr
,
1616 &(max_sustainable_clocks
->display_clock
),
1617 PPCLK_DISPCLK
)) == 0,
1618 "[InitMaxSustainableClocks] failed to get max DISPCLK from SMC!",
1620 PP_ASSERT_WITH_CODE((ret
= vega20_get_max_sustainable_clock(hwmgr
,
1621 &(max_sustainable_clocks
->phy_clock
),
1622 PPCLK_PHYCLK
)) == 0,
1623 "[InitMaxSustainableClocks] failed to get max PHYCLK from SMC!",
1625 PP_ASSERT_WITH_CODE((ret
= vega20_get_max_sustainable_clock(hwmgr
,
1626 &(max_sustainable_clocks
->pixel_clock
),
1627 PPCLK_PIXCLK
)) == 0,
1628 "[InitMaxSustainableClocks] failed to get max PIXCLK from SMC!",
1632 if (max_sustainable_clocks
->soc_clock
< max_sustainable_clocks
->uclock
)
1633 max_sustainable_clocks
->uclock
= max_sustainable_clocks
->soc_clock
;
1638 static int vega20_enable_mgpu_fan_boost(struct pp_hwmgr
*hwmgr
)
1642 result
= smum_send_msg_to_smc(hwmgr
,
1643 PPSMC_MSG_SetMGpuFanBoostLimitRpm
);
1644 PP_ASSERT_WITH_CODE(!result
,
1645 "[EnableMgpuFan] Failed to enable mgpu fan boost!",
1651 static void vega20_init_powergate_state(struct pp_hwmgr
*hwmgr
)
1653 struct vega20_hwmgr
*data
=
1654 (struct vega20_hwmgr
*)(hwmgr
->backend
);
1656 data
->uvd_power_gated
= true;
1657 data
->vce_power_gated
= true;
1659 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
)
1660 data
->uvd_power_gated
= false;
1662 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
)
1663 data
->vce_power_gated
= false;
1666 static int vega20_enable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
1670 smum_send_msg_to_smc_with_parameter(hwmgr
,
1671 PPSMC_MSG_NumOfDisplays
, 0);
1673 result
= vega20_set_allowed_featuresmask(hwmgr
);
1674 PP_ASSERT_WITH_CODE(!result
,
1675 "[EnableDPMTasks] Failed to set allowed featuresmask!\n",
1678 result
= vega20_init_smc_table(hwmgr
);
1679 PP_ASSERT_WITH_CODE(!result
,
1680 "[EnableDPMTasks] Failed to initialize SMC table!",
1683 result
= vega20_run_btc(hwmgr
);
1684 PP_ASSERT_WITH_CODE(!result
,
1685 "[EnableDPMTasks] Failed to run btc!",
1688 result
= vega20_run_btc_afll(hwmgr
);
1689 PP_ASSERT_WITH_CODE(!result
,
1690 "[EnableDPMTasks] Failed to run btc afll!",
1693 result
= vega20_enable_all_smu_features(hwmgr
);
1694 PP_ASSERT_WITH_CODE(!result
,
1695 "[EnableDPMTasks] Failed to enable all smu features!",
1698 result
= vega20_override_pcie_parameters(hwmgr
);
1699 PP_ASSERT_WITH_CODE(!result
,
1700 "[EnableDPMTasks] Failed to override pcie parameters!",
1703 result
= vega20_notify_smc_display_change(hwmgr
);
1704 PP_ASSERT_WITH_CODE(!result
,
1705 "[EnableDPMTasks] Failed to notify smc display change!",
1708 result
= vega20_send_clock_ratio(hwmgr
);
1709 PP_ASSERT_WITH_CODE(!result
,
1710 "[EnableDPMTasks] Failed to send clock ratio!",
1713 /* Initialize UVD/VCE powergating state */
1714 vega20_init_powergate_state(hwmgr
);
1716 result
= vega20_setup_default_dpm_tables(hwmgr
);
1717 PP_ASSERT_WITH_CODE(!result
,
1718 "[EnableDPMTasks] Failed to setup default DPM tables!",
1721 result
= vega20_init_max_sustainable_clocks(hwmgr
);
1722 PP_ASSERT_WITH_CODE(!result
,
1723 "[EnableDPMTasks] Failed to get maximum sustainable clocks!",
1726 result
= vega20_power_control_set_level(hwmgr
);
1727 PP_ASSERT_WITH_CODE(!result
,
1728 "[EnableDPMTasks] Failed to power control set level!",
1731 result
= vega20_od8_initialize_default_settings(hwmgr
);
1732 PP_ASSERT_WITH_CODE(!result
,
1733 "[EnableDPMTasks] Failed to initialize odn settings!",
1736 result
= vega20_populate_umdpstate_clocks(hwmgr
);
1737 PP_ASSERT_WITH_CODE(!result
,
1738 "[EnableDPMTasks] Failed to populate umdpstate clocks!",
1741 result
= smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_GetPptLimit
,
1742 POWER_SOURCE_AC
<< 16);
1743 PP_ASSERT_WITH_CODE(!result
,
1744 "[GetPptLimit] get default PPT limit failed!",
1746 hwmgr
->power_limit
=
1747 hwmgr
->default_power_limit
= smum_get_argument(hwmgr
);
1752 static uint32_t vega20_find_lowest_dpm_level(
1753 struct vega20_single_dpm_table
*table
)
1757 for (i
= 0; i
< table
->count
; i
++) {
1758 if (table
->dpm_levels
[i
].enabled
)
1761 if (i
>= table
->count
) {
1763 table
->dpm_levels
[i
].enabled
= true;
1769 static uint32_t vega20_find_highest_dpm_level(
1770 struct vega20_single_dpm_table
*table
)
1774 PP_ASSERT_WITH_CODE(table
!= NULL
,
1775 "[FindHighestDPMLevel] DPM Table does not exist!",
1777 PP_ASSERT_WITH_CODE(table
->count
> 0,
1778 "[FindHighestDPMLevel] DPM Table has no entry!",
1780 PP_ASSERT_WITH_CODE(table
->count
<= MAX_REGULAR_DPM_NUMBER
,
1781 "[FindHighestDPMLevel] DPM Table has too many entries!",
1782 return MAX_REGULAR_DPM_NUMBER
- 1);
1784 for (i
= table
->count
- 1; i
>= 0; i
--) {
1785 if (table
->dpm_levels
[i
].enabled
)
1790 table
->dpm_levels
[i
].enabled
= true;
1796 static int vega20_upload_dpm_min_level(struct pp_hwmgr
*hwmgr
, uint32_t feature_mask
)
1798 struct vega20_hwmgr
*data
=
1799 (struct vega20_hwmgr
*)(hwmgr
->backend
);
1803 if (data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
&&
1804 (feature_mask
& FEATURE_DPM_GFXCLK_MASK
)) {
1805 min_freq
= data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
;
1806 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1807 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1808 (PPCLK_GFXCLK
<< 16) | (min_freq
& 0xffff))),
1809 "Failed to set soft min gfxclk !",
1813 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
&&
1814 (feature_mask
& FEATURE_DPM_UCLK_MASK
)) {
1815 min_freq
= data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
;
1816 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1817 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1818 (PPCLK_UCLK
<< 16) | (min_freq
& 0xffff))),
1819 "Failed to set soft min memclk !",
1823 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
&&
1824 (feature_mask
& FEATURE_DPM_UVD_MASK
)) {
1825 min_freq
= data
->dpm_table
.vclk_table
.dpm_state
.soft_min_level
;
1827 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1828 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1829 (PPCLK_VCLK
<< 16) | (min_freq
& 0xffff))),
1830 "Failed to set soft min vclk!",
1833 min_freq
= data
->dpm_table
.dclk_table
.dpm_state
.soft_min_level
;
1835 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1836 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1837 (PPCLK_DCLK
<< 16) | (min_freq
& 0xffff))),
1838 "Failed to set soft min dclk!",
1842 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
&&
1843 (feature_mask
& FEATURE_DPM_VCE_MASK
)) {
1844 min_freq
= data
->dpm_table
.eclk_table
.dpm_state
.soft_min_level
;
1846 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1847 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1848 (PPCLK_ECLK
<< 16) | (min_freq
& 0xffff))),
1849 "Failed to set soft min eclk!",
1853 if (data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
&&
1854 (feature_mask
& FEATURE_DPM_SOCCLK_MASK
)) {
1855 min_freq
= data
->dpm_table
.soc_table
.dpm_state
.soft_min_level
;
1857 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1858 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1859 (PPCLK_SOCCLK
<< 16) | (min_freq
& 0xffff))),
1860 "Failed to set soft min socclk!",
1864 if (data
->smu_features
[GNLD_DPM_FCLK
].enabled
&&
1865 (feature_mask
& FEATURE_DPM_FCLK_MASK
)) {
1866 min_freq
= data
->dpm_table
.fclk_table
.dpm_state
.soft_min_level
;
1868 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1869 hwmgr
, PPSMC_MSG_SetSoftMinByFreq
,
1870 (PPCLK_FCLK
<< 16) | (min_freq
& 0xffff))),
1871 "Failed to set soft min fclk!",
1875 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
&&
1876 (feature_mask
& FEATURE_DPM_DCEFCLK_MASK
)) {
1877 min_freq
= data
->dpm_table
.dcef_table
.dpm_state
.hard_min_level
;
1879 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1880 hwmgr
, PPSMC_MSG_SetHardMinByFreq
,
1881 (PPCLK_DCEFCLK
<< 16) | (min_freq
& 0xffff))),
1882 "Failed to set hard min dcefclk!",
1889 static int vega20_upload_dpm_max_level(struct pp_hwmgr
*hwmgr
, uint32_t feature_mask
)
1891 struct vega20_hwmgr
*data
=
1892 (struct vega20_hwmgr
*)(hwmgr
->backend
);
1896 if (data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
&&
1897 (feature_mask
& FEATURE_DPM_GFXCLK_MASK
)) {
1898 max_freq
= data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
;
1900 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1901 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1902 (PPCLK_GFXCLK
<< 16) | (max_freq
& 0xffff))),
1903 "Failed to set soft max gfxclk!",
1907 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
&&
1908 (feature_mask
& FEATURE_DPM_UCLK_MASK
)) {
1909 max_freq
= data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
;
1911 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1912 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1913 (PPCLK_UCLK
<< 16) | (max_freq
& 0xffff))),
1914 "Failed to set soft max memclk!",
1918 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
&&
1919 (feature_mask
& FEATURE_DPM_UVD_MASK
)) {
1920 max_freq
= data
->dpm_table
.vclk_table
.dpm_state
.soft_max_level
;
1922 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1923 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1924 (PPCLK_VCLK
<< 16) | (max_freq
& 0xffff))),
1925 "Failed to set soft max vclk!",
1928 max_freq
= data
->dpm_table
.dclk_table
.dpm_state
.soft_max_level
;
1929 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1930 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1931 (PPCLK_DCLK
<< 16) | (max_freq
& 0xffff))),
1932 "Failed to set soft max dclk!",
1936 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
&&
1937 (feature_mask
& FEATURE_DPM_VCE_MASK
)) {
1938 max_freq
= data
->dpm_table
.eclk_table
.dpm_state
.soft_max_level
;
1940 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1941 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1942 (PPCLK_ECLK
<< 16) | (max_freq
& 0xffff))),
1943 "Failed to set soft max eclk!",
1947 if (data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
&&
1948 (feature_mask
& FEATURE_DPM_SOCCLK_MASK
)) {
1949 max_freq
= data
->dpm_table
.soc_table
.dpm_state
.soft_max_level
;
1951 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1952 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1953 (PPCLK_SOCCLK
<< 16) | (max_freq
& 0xffff))),
1954 "Failed to set soft max socclk!",
1958 if (data
->smu_features
[GNLD_DPM_FCLK
].enabled
&&
1959 (feature_mask
& FEATURE_DPM_FCLK_MASK
)) {
1960 max_freq
= data
->dpm_table
.fclk_table
.dpm_state
.soft_max_level
;
1962 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(
1963 hwmgr
, PPSMC_MSG_SetSoftMaxByFreq
,
1964 (PPCLK_FCLK
<< 16) | (max_freq
& 0xffff))),
1965 "Failed to set soft max fclk!",
1972 int vega20_enable_disable_vce_dpm(struct pp_hwmgr
*hwmgr
, bool enable
)
1974 struct vega20_hwmgr
*data
=
1975 (struct vega20_hwmgr
*)(hwmgr
->backend
);
1978 if (data
->smu_features
[GNLD_DPM_VCE
].supported
) {
1979 if (data
->smu_features
[GNLD_DPM_VCE
].enabled
== enable
) {
1981 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already enabled!\n");
1983 PP_DBG_LOG("[EnableDisableVCEDPM] feature VCE DPM already disabled!\n");
1986 ret
= vega20_enable_smc_features(hwmgr
,
1988 data
->smu_features
[GNLD_DPM_VCE
].smu_feature_bitmap
);
1989 PP_ASSERT_WITH_CODE(!ret
,
1990 "Attempt to Enable/Disable DPM VCE Failed!",
1992 data
->smu_features
[GNLD_DPM_VCE
].enabled
= enable
;
1998 static int vega20_get_clock_ranges(struct pp_hwmgr
*hwmgr
,
2000 PPCLK_e clock_select
,
2007 PP_ASSERT_WITH_CODE((ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
2008 PPSMC_MSG_GetMaxDpmFreq
, (clock_select
<< 16))) == 0,
2009 "[GetClockRanges] Failed to get max clock from SMC!",
2011 *clock
= smum_get_argument(hwmgr
);
2013 PP_ASSERT_WITH_CODE((ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
2014 PPSMC_MSG_GetMinDpmFreq
,
2015 (clock_select
<< 16))) == 0,
2016 "[GetClockRanges] Failed to get min clock from SMC!",
2018 *clock
= smum_get_argument(hwmgr
);
2024 static uint32_t vega20_dpm_get_sclk(struct pp_hwmgr
*hwmgr
, bool low
)
2026 struct vega20_hwmgr
*data
=
2027 (struct vega20_hwmgr
*)(hwmgr
->backend
);
2031 PP_ASSERT_WITH_CODE(data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
,
2032 "[GetSclks]: gfxclk dpm not enabled!\n",
2036 ret
= vega20_get_clock_ranges(hwmgr
, &gfx_clk
, PPCLK_GFXCLK
, false);
2037 PP_ASSERT_WITH_CODE(!ret
,
2038 "[GetSclks]: fail to get min PPCLK_GFXCLK\n",
2041 ret
= vega20_get_clock_ranges(hwmgr
, &gfx_clk
, PPCLK_GFXCLK
, true);
2042 PP_ASSERT_WITH_CODE(!ret
,
2043 "[GetSclks]: fail to get max PPCLK_GFXCLK\n",
2047 return (gfx_clk
* 100);
2050 static uint32_t vega20_dpm_get_mclk(struct pp_hwmgr
*hwmgr
, bool low
)
2052 struct vega20_hwmgr
*data
=
2053 (struct vega20_hwmgr
*)(hwmgr
->backend
);
2057 PP_ASSERT_WITH_CODE(data
->smu_features
[GNLD_DPM_UCLK
].enabled
,
2058 "[MemMclks]: memclk dpm not enabled!\n",
2062 ret
= vega20_get_clock_ranges(hwmgr
, &mem_clk
, PPCLK_UCLK
, false);
2063 PP_ASSERT_WITH_CODE(!ret
,
2064 "[GetMclks]: fail to get min PPCLK_UCLK\n",
2067 ret
= vega20_get_clock_ranges(hwmgr
, &mem_clk
, PPCLK_UCLK
, true);
2068 PP_ASSERT_WITH_CODE(!ret
,
2069 "[GetMclks]: fail to get max PPCLK_UCLK\n",
2073 return (mem_clk
* 100);
2076 static int vega20_get_metrics_table(struct pp_hwmgr
*hwmgr
, SmuMetrics_t
*metrics_table
)
2078 struct vega20_hwmgr
*data
=
2079 (struct vega20_hwmgr
*)(hwmgr
->backend
);
2082 if (!data
->metrics_time
|| time_after(jiffies
, data
->metrics_time
+ HZ
/ 2)) {
2083 ret
= smum_smc_table_manager(hwmgr
, (uint8_t *)metrics_table
,
2084 TABLE_SMU_METRICS
, true);
2086 pr_info("Failed to export SMU metrics table!\n");
2089 memcpy(&data
->metrics_table
, metrics_table
, sizeof(SmuMetrics_t
));
2090 data
->metrics_time
= jiffies
;
2092 memcpy(metrics_table
, &data
->metrics_table
, sizeof(SmuMetrics_t
));
2097 static int vega20_get_gpu_power(struct pp_hwmgr
*hwmgr
,
2101 SmuMetrics_t metrics_table
;
2103 ret
= vega20_get_metrics_table(hwmgr
, &metrics_table
);
2107 /* For the 40.46 release, they changed the value name */
2108 if (hwmgr
->smu_version
== 0x282e00)
2109 *query
= metrics_table
.AverageSocketPower
<< 8;
2111 *query
= metrics_table
.CurrSocketPower
<< 8;
2116 static int vega20_get_current_clk_freq(struct pp_hwmgr
*hwmgr
,
2117 PPCLK_e clk_id
, uint32_t *clk_freq
)
2123 PP_ASSERT_WITH_CODE((ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
2124 PPSMC_MSG_GetDpmClockFreq
, (clk_id
<< 16))) == 0,
2125 "[GetCurrentClkFreq] Attempt to get Current Frequency Failed!",
2127 *clk_freq
= smum_get_argument(hwmgr
);
2129 *clk_freq
= *clk_freq
* 100;
2134 static int vega20_get_current_activity_percent(struct pp_hwmgr
*hwmgr
,
2136 uint32_t *activity_percent
)
2139 SmuMetrics_t metrics_table
;
2141 ret
= vega20_get_metrics_table(hwmgr
, &metrics_table
);
2146 case AMDGPU_PP_SENSOR_GPU_LOAD
:
2147 *activity_percent
= metrics_table
.AverageGfxActivity
;
2149 case AMDGPU_PP_SENSOR_MEM_LOAD
:
2150 *activity_percent
= metrics_table
.AverageUclkActivity
;
2153 pr_err("Invalid index for retrieving clock activity\n");
2160 static int vega20_read_sensor(struct pp_hwmgr
*hwmgr
, int idx
,
2161 void *value
, int *size
)
2163 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
2164 struct amdgpu_device
*adev
= hwmgr
->adev
;
2165 SmuMetrics_t metrics_table
;
2170 case AMDGPU_PP_SENSOR_GFX_SCLK
:
2171 ret
= vega20_get_metrics_table(hwmgr
, &metrics_table
);
2175 *((uint32_t *)value
) = metrics_table
.AverageGfxclkFrequency
* 100;
2178 case AMDGPU_PP_SENSOR_GFX_MCLK
:
2179 ret
= vega20_get_current_clk_freq(hwmgr
,
2185 case AMDGPU_PP_SENSOR_GPU_LOAD
:
2186 case AMDGPU_PP_SENSOR_MEM_LOAD
:
2187 ret
= vega20_get_current_activity_percent(hwmgr
, idx
, (uint32_t *)value
);
2191 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP
:
2192 *((uint32_t *)value
) = vega20_thermal_get_temperature(hwmgr
);
2195 case AMDGPU_PP_SENSOR_EDGE_TEMP
:
2196 ret
= vega20_get_metrics_table(hwmgr
, &metrics_table
);
2200 *((uint32_t *)value
) = metrics_table
.TemperatureEdge
*
2201 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2204 case AMDGPU_PP_SENSOR_MEM_TEMP
:
2205 ret
= vega20_get_metrics_table(hwmgr
, &metrics_table
);
2209 *((uint32_t *)value
) = metrics_table
.TemperatureHBM
*
2210 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
2213 case AMDGPU_PP_SENSOR_UVD_POWER
:
2214 *((uint32_t *)value
) = data
->uvd_power_gated
? 0 : 1;
2217 case AMDGPU_PP_SENSOR_VCE_POWER
:
2218 *((uint32_t *)value
) = data
->vce_power_gated
? 0 : 1;
2221 case AMDGPU_PP_SENSOR_GPU_POWER
:
2223 ret
= vega20_get_gpu_power(hwmgr
, (uint32_t *)value
);
2225 case AMDGPU_PP_SENSOR_VDDGFX
:
2226 val_vid
= (RREG32_SOC15(SMUIO
, 0, mmSMUSVI0_TEL_PLANE0
) &
2227 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR_MASK
) >>
2228 SMUSVI0_TEL_PLANE0__SVI0_PLANE0_VDDCOR__SHIFT
;
2229 *((uint32_t *)value
) =
2230 (uint32_t)convert_to_vddc((uint8_t)val_vid
);
2232 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK
:
2233 ret
= vega20_get_enabled_smc_features(hwmgr
, (uint64_t *)value
);
2244 int vega20_display_clock_voltage_request(struct pp_hwmgr
*hwmgr
,
2245 struct pp_display_clock_request
*clock_req
)
2248 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
2249 enum amd_pp_clock_type clk_type
= clock_req
->clock_type
;
2250 uint32_t clk_freq
= clock_req
->clock_freq_in_khz
/ 1000;
2251 PPCLK_e clk_select
= 0;
2252 uint32_t clk_request
= 0;
2254 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
) {
2256 case amd_pp_dcef_clock
:
2257 clk_select
= PPCLK_DCEFCLK
;
2259 case amd_pp_disp_clock
:
2260 clk_select
= PPCLK_DISPCLK
;
2262 case amd_pp_pixel_clock
:
2263 clk_select
= PPCLK_PIXCLK
;
2265 case amd_pp_phy_clock
:
2266 clk_select
= PPCLK_PHYCLK
;
2269 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
2275 clk_request
= (clk_select
<< 16) | clk_freq
;
2276 result
= smum_send_msg_to_smc_with_parameter(hwmgr
,
2277 PPSMC_MSG_SetHardMinByFreq
,
2285 static int vega20_get_performance_level(struct pp_hwmgr
*hwmgr
, const struct pp_hw_power_state
*state
,
2286 PHM_PerformanceLevelDesignation designation
, uint32_t index
,
2287 PHM_PerformanceLevel
*level
)
2292 static int vega20_notify_smc_display_config_after_ps_adjustment(
2293 struct pp_hwmgr
*hwmgr
)
2295 struct vega20_hwmgr
*data
=
2296 (struct vega20_hwmgr
*)(hwmgr
->backend
);
2297 struct vega20_single_dpm_table
*dpm_table
=
2298 &data
->dpm_table
.mem_table
;
2299 struct PP_Clocks min_clocks
= {0};
2300 struct pp_display_clock_request clock_req
;
2303 min_clocks
.dcefClock
= hwmgr
->display_config
->min_dcef_set_clk
;
2304 min_clocks
.dcefClockInSR
= hwmgr
->display_config
->min_dcef_deep_sleep_set_clk
;
2305 min_clocks
.memoryClock
= hwmgr
->display_config
->min_mem_set_clock
;
2307 if (data
->smu_features
[GNLD_DPM_DCEFCLK
].supported
) {
2308 clock_req
.clock_type
= amd_pp_dcef_clock
;
2309 clock_req
.clock_freq_in_khz
= min_clocks
.dcefClock
* 10;
2310 if (!vega20_display_clock_voltage_request(hwmgr
, &clock_req
)) {
2311 if (data
->smu_features
[GNLD_DS_DCEFCLK
].supported
)
2312 PP_ASSERT_WITH_CODE((ret
= smum_send_msg_to_smc_with_parameter(
2313 hwmgr
, PPSMC_MSG_SetMinDeepSleepDcefclk
,
2314 min_clocks
.dcefClockInSR
/ 100)) == 0,
2315 "Attempt to set divider for DCEFCLK Failed!",
2318 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
2322 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
) {
2323 dpm_table
->dpm_state
.hard_min_level
= min_clocks
.memoryClock
/ 100;
2324 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
2325 PPSMC_MSG_SetHardMinByFreq
,
2326 (PPCLK_UCLK
<< 16 ) | dpm_table
->dpm_state
.hard_min_level
)),
2327 "[SetHardMinFreq] Set hard min uclk failed!",
2334 static int vega20_force_dpm_highest(struct pp_hwmgr
*hwmgr
)
2336 struct vega20_hwmgr
*data
=
2337 (struct vega20_hwmgr
*)(hwmgr
->backend
);
2338 uint32_t soft_level
;
2341 soft_level
= vega20_find_highest_dpm_level(&(data
->dpm_table
.gfx_table
));
2343 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
=
2344 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
=
2345 data
->dpm_table
.gfx_table
.dpm_levels
[soft_level
].value
;
2347 soft_level
= vega20_find_highest_dpm_level(&(data
->dpm_table
.mem_table
));
2349 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
=
2350 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
=
2351 data
->dpm_table
.mem_table
.dpm_levels
[soft_level
].value
;
2353 soft_level
= vega20_find_highest_dpm_level(&(data
->dpm_table
.soc_table
));
2355 data
->dpm_table
.soc_table
.dpm_state
.soft_min_level
=
2356 data
->dpm_table
.soc_table
.dpm_state
.soft_max_level
=
2357 data
->dpm_table
.soc_table
.dpm_levels
[soft_level
].value
;
2359 ret
= vega20_upload_dpm_min_level(hwmgr
, FEATURE_DPM_GFXCLK_MASK
|
2360 FEATURE_DPM_UCLK_MASK
|
2361 FEATURE_DPM_SOCCLK_MASK
);
2362 PP_ASSERT_WITH_CODE(!ret
,
2363 "Failed to upload boot level to highest!",
2366 ret
= vega20_upload_dpm_max_level(hwmgr
, FEATURE_DPM_GFXCLK_MASK
|
2367 FEATURE_DPM_UCLK_MASK
|
2368 FEATURE_DPM_SOCCLK_MASK
);
2369 PP_ASSERT_WITH_CODE(!ret
,
2370 "Failed to upload dpm max level to highest!",
2376 static int vega20_force_dpm_lowest(struct pp_hwmgr
*hwmgr
)
2378 struct vega20_hwmgr
*data
=
2379 (struct vega20_hwmgr
*)(hwmgr
->backend
);
2380 uint32_t soft_level
;
2383 soft_level
= vega20_find_lowest_dpm_level(&(data
->dpm_table
.gfx_table
));
2385 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
=
2386 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
=
2387 data
->dpm_table
.gfx_table
.dpm_levels
[soft_level
].value
;
2389 soft_level
= vega20_find_lowest_dpm_level(&(data
->dpm_table
.mem_table
));
2391 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
=
2392 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
=
2393 data
->dpm_table
.mem_table
.dpm_levels
[soft_level
].value
;
2395 soft_level
= vega20_find_lowest_dpm_level(&(data
->dpm_table
.soc_table
));
2397 data
->dpm_table
.soc_table
.dpm_state
.soft_min_level
=
2398 data
->dpm_table
.soc_table
.dpm_state
.soft_max_level
=
2399 data
->dpm_table
.soc_table
.dpm_levels
[soft_level
].value
;
2401 ret
= vega20_upload_dpm_min_level(hwmgr
, FEATURE_DPM_GFXCLK_MASK
|
2402 FEATURE_DPM_UCLK_MASK
|
2403 FEATURE_DPM_SOCCLK_MASK
);
2404 PP_ASSERT_WITH_CODE(!ret
,
2405 "Failed to upload boot level to highest!",
2408 ret
= vega20_upload_dpm_max_level(hwmgr
, FEATURE_DPM_GFXCLK_MASK
|
2409 FEATURE_DPM_UCLK_MASK
|
2410 FEATURE_DPM_SOCCLK_MASK
);
2411 PP_ASSERT_WITH_CODE(!ret
,
2412 "Failed to upload dpm max level to highest!",
2419 static int vega20_unforce_dpm_levels(struct pp_hwmgr
*hwmgr
)
2421 struct vega20_hwmgr
*data
=
2422 (struct vega20_hwmgr
*)(hwmgr
->backend
);
2423 uint32_t soft_min_level
, soft_max_level
;
2426 /* gfxclk soft min/max settings */
2428 vega20_find_lowest_dpm_level(&(data
->dpm_table
.gfx_table
));
2430 vega20_find_highest_dpm_level(&(data
->dpm_table
.gfx_table
));
2432 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
=
2433 data
->dpm_table
.gfx_table
.dpm_levels
[soft_min_level
].value
;
2434 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
=
2435 data
->dpm_table
.gfx_table
.dpm_levels
[soft_max_level
].value
;
2437 /* uclk soft min/max settings */
2439 vega20_find_lowest_dpm_level(&(data
->dpm_table
.mem_table
));
2441 vega20_find_highest_dpm_level(&(data
->dpm_table
.mem_table
));
2443 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
=
2444 data
->dpm_table
.mem_table
.dpm_levels
[soft_min_level
].value
;
2445 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
=
2446 data
->dpm_table
.mem_table
.dpm_levels
[soft_max_level
].value
;
2448 /* socclk soft min/max settings */
2450 vega20_find_lowest_dpm_level(&(data
->dpm_table
.soc_table
));
2452 vega20_find_highest_dpm_level(&(data
->dpm_table
.soc_table
));
2454 data
->dpm_table
.soc_table
.dpm_state
.soft_min_level
=
2455 data
->dpm_table
.soc_table
.dpm_levels
[soft_min_level
].value
;
2456 data
->dpm_table
.soc_table
.dpm_state
.soft_max_level
=
2457 data
->dpm_table
.soc_table
.dpm_levels
[soft_max_level
].value
;
2459 ret
= vega20_upload_dpm_min_level(hwmgr
, FEATURE_DPM_GFXCLK_MASK
|
2460 FEATURE_DPM_UCLK_MASK
|
2461 FEATURE_DPM_SOCCLK_MASK
);
2462 PP_ASSERT_WITH_CODE(!ret
,
2463 "Failed to upload DPM Bootup Levels!",
2466 ret
= vega20_upload_dpm_max_level(hwmgr
, FEATURE_DPM_GFXCLK_MASK
|
2467 FEATURE_DPM_UCLK_MASK
|
2468 FEATURE_DPM_SOCCLK_MASK
);
2469 PP_ASSERT_WITH_CODE(!ret
,
2470 "Failed to upload DPM Max Levels!",
2476 static int vega20_get_profiling_clk_mask(struct pp_hwmgr
*hwmgr
, enum amd_dpm_forced_level level
,
2477 uint32_t *sclk_mask
, uint32_t *mclk_mask
, uint32_t *soc_mask
)
2479 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
2480 struct vega20_single_dpm_table
*gfx_dpm_table
= &(data
->dpm_table
.gfx_table
);
2481 struct vega20_single_dpm_table
*mem_dpm_table
= &(data
->dpm_table
.mem_table
);
2482 struct vega20_single_dpm_table
*soc_dpm_table
= &(data
->dpm_table
.soc_table
);
2488 if (gfx_dpm_table
->count
> VEGA20_UMD_PSTATE_GFXCLK_LEVEL
&&
2489 mem_dpm_table
->count
> VEGA20_UMD_PSTATE_MCLK_LEVEL
&&
2490 soc_dpm_table
->count
> VEGA20_UMD_PSTATE_SOCCLK_LEVEL
) {
2491 *sclk_mask
= VEGA20_UMD_PSTATE_GFXCLK_LEVEL
;
2492 *mclk_mask
= VEGA20_UMD_PSTATE_MCLK_LEVEL
;
2493 *soc_mask
= VEGA20_UMD_PSTATE_SOCCLK_LEVEL
;
2496 if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) {
2498 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) {
2500 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
2501 *sclk_mask
= gfx_dpm_table
->count
- 1;
2502 *mclk_mask
= mem_dpm_table
->count
- 1;
2503 *soc_mask
= soc_dpm_table
->count
- 1;
2509 static int vega20_force_clock_level(struct pp_hwmgr
*hwmgr
,
2510 enum pp_clock_type type
, uint32_t mask
)
2512 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
2513 uint32_t soft_min_level
, soft_max_level
, hard_min_level
;
2518 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
2519 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
2521 if (soft_max_level
>= data
->dpm_table
.gfx_table
.count
) {
2522 pr_err("Clock level specified %d is over max allowed %d\n",
2524 data
->dpm_table
.gfx_table
.count
- 1);
2528 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
=
2529 data
->dpm_table
.gfx_table
.dpm_levels
[soft_min_level
].value
;
2530 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
=
2531 data
->dpm_table
.gfx_table
.dpm_levels
[soft_max_level
].value
;
2533 ret
= vega20_upload_dpm_min_level(hwmgr
, FEATURE_DPM_GFXCLK_MASK
);
2534 PP_ASSERT_WITH_CODE(!ret
,
2535 "Failed to upload boot level to lowest!",
2538 ret
= vega20_upload_dpm_max_level(hwmgr
, FEATURE_DPM_GFXCLK_MASK
);
2539 PP_ASSERT_WITH_CODE(!ret
,
2540 "Failed to upload dpm max level to highest!",
2545 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
2546 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
2548 if (soft_max_level
>= data
->dpm_table
.mem_table
.count
) {
2549 pr_err("Clock level specified %d is over max allowed %d\n",
2551 data
->dpm_table
.mem_table
.count
- 1);
2555 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
=
2556 data
->dpm_table
.mem_table
.dpm_levels
[soft_min_level
].value
;
2557 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
=
2558 data
->dpm_table
.mem_table
.dpm_levels
[soft_max_level
].value
;
2560 ret
= vega20_upload_dpm_min_level(hwmgr
, FEATURE_DPM_UCLK_MASK
);
2561 PP_ASSERT_WITH_CODE(!ret
,
2562 "Failed to upload boot level to lowest!",
2565 ret
= vega20_upload_dpm_max_level(hwmgr
, FEATURE_DPM_UCLK_MASK
);
2566 PP_ASSERT_WITH_CODE(!ret
,
2567 "Failed to upload dpm max level to highest!",
2573 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
2574 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
2576 if (soft_max_level
>= data
->dpm_table
.soc_table
.count
) {
2577 pr_err("Clock level specified %d is over max allowed %d\n",
2579 data
->dpm_table
.soc_table
.count
- 1);
2583 data
->dpm_table
.soc_table
.dpm_state
.soft_min_level
=
2584 data
->dpm_table
.soc_table
.dpm_levels
[soft_min_level
].value
;
2585 data
->dpm_table
.soc_table
.dpm_state
.soft_max_level
=
2586 data
->dpm_table
.soc_table
.dpm_levels
[soft_max_level
].value
;
2588 ret
= vega20_upload_dpm_min_level(hwmgr
, FEATURE_DPM_SOCCLK_MASK
);
2589 PP_ASSERT_WITH_CODE(!ret
,
2590 "Failed to upload boot level to lowest!",
2593 ret
= vega20_upload_dpm_max_level(hwmgr
, FEATURE_DPM_SOCCLK_MASK
);
2594 PP_ASSERT_WITH_CODE(!ret
,
2595 "Failed to upload dpm max level to highest!",
2601 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
2602 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
2604 if (soft_max_level
>= data
->dpm_table
.fclk_table
.count
) {
2605 pr_err("Clock level specified %d is over max allowed %d\n",
2607 data
->dpm_table
.fclk_table
.count
- 1);
2611 data
->dpm_table
.fclk_table
.dpm_state
.soft_min_level
=
2612 data
->dpm_table
.fclk_table
.dpm_levels
[soft_min_level
].value
;
2613 data
->dpm_table
.fclk_table
.dpm_state
.soft_max_level
=
2614 data
->dpm_table
.fclk_table
.dpm_levels
[soft_max_level
].value
;
2616 ret
= vega20_upload_dpm_min_level(hwmgr
, FEATURE_DPM_FCLK_MASK
);
2617 PP_ASSERT_WITH_CODE(!ret
,
2618 "Failed to upload boot level to lowest!",
2621 ret
= vega20_upload_dpm_max_level(hwmgr
, FEATURE_DPM_FCLK_MASK
);
2622 PP_ASSERT_WITH_CODE(!ret
,
2623 "Failed to upload dpm max level to highest!",
2629 hard_min_level
= mask
? (ffs(mask
) - 1) : 0;
2631 if (hard_min_level
>= data
->dpm_table
.dcef_table
.count
) {
2632 pr_err("Clock level specified %d is over max allowed %d\n",
2634 data
->dpm_table
.dcef_table
.count
- 1);
2638 data
->dpm_table
.dcef_table
.dpm_state
.hard_min_level
=
2639 data
->dpm_table
.dcef_table
.dpm_levels
[hard_min_level
].value
;
2641 ret
= vega20_upload_dpm_min_level(hwmgr
, FEATURE_DPM_DCEFCLK_MASK
);
2642 PP_ASSERT_WITH_CODE(!ret
,
2643 "Failed to upload boot level to lowest!",
2646 //TODO: Setting DCEFCLK max dpm level is not supported
2651 soft_min_level
= mask
? (ffs(mask
) - 1) : 0;
2652 soft_max_level
= mask
? (fls(mask
) - 1) : 0;
2653 if (soft_min_level
>= NUM_LINK_LEVELS
||
2654 soft_max_level
>= NUM_LINK_LEVELS
)
2657 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
2658 PPSMC_MSG_SetMinLinkDpmByIndex
, soft_min_level
);
2659 PP_ASSERT_WITH_CODE(!ret
,
2660 "Failed to set min link dpm level!",
2672 static int vega20_dpm_force_dpm_level(struct pp_hwmgr
*hwmgr
,
2673 enum amd_dpm_forced_level level
)
2676 uint32_t sclk_mask
, mclk_mask
, soc_mask
;
2679 case AMD_DPM_FORCED_LEVEL_HIGH
:
2680 ret
= vega20_force_dpm_highest(hwmgr
);
2683 case AMD_DPM_FORCED_LEVEL_LOW
:
2684 ret
= vega20_force_dpm_lowest(hwmgr
);
2687 case AMD_DPM_FORCED_LEVEL_AUTO
:
2688 ret
= vega20_unforce_dpm_levels(hwmgr
);
2691 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
2692 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
2693 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
:
2694 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
2695 ret
= vega20_get_profiling_clk_mask(hwmgr
, level
, &sclk_mask
, &mclk_mask
, &soc_mask
);
2698 vega20_force_clock_level(hwmgr
, PP_SCLK
, 1 << sclk_mask
);
2699 vega20_force_clock_level(hwmgr
, PP_MCLK
, 1 << mclk_mask
);
2700 vega20_force_clock_level(hwmgr
, PP_SOCCLK
, 1 << soc_mask
);
2703 case AMD_DPM_FORCED_LEVEL_MANUAL
:
2704 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
:
2712 static uint32_t vega20_get_fan_control_mode(struct pp_hwmgr
*hwmgr
)
2714 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
2716 if (data
->smu_features
[GNLD_FAN_CONTROL
].enabled
== false)
2717 return AMD_FAN_CTRL_MANUAL
;
2719 return AMD_FAN_CTRL_AUTO
;
2722 static void vega20_set_fan_control_mode(struct pp_hwmgr
*hwmgr
, uint32_t mode
)
2725 case AMD_FAN_CTRL_NONE
:
2726 vega20_fan_ctrl_set_fan_speed_percent(hwmgr
, 100);
2728 case AMD_FAN_CTRL_MANUAL
:
2729 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl
))
2730 vega20_fan_ctrl_stop_smc_fan_control(hwmgr
);
2732 case AMD_FAN_CTRL_AUTO
:
2733 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl
))
2734 vega20_fan_ctrl_start_smc_fan_control(hwmgr
);
2741 static int vega20_get_dal_power_level(struct pp_hwmgr
*hwmgr
,
2742 struct amd_pp_simple_clock_info
*info
)
2745 struct phm_ppt_v2_information
*table_info
=
2746 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
2747 struct phm_clock_and_voltage_limits
*max_limits
=
2748 &table_info
->max_clock_voltage_on_ac
;
2750 info
->engine_max_clock
= max_limits
->sclk
;
2751 info
->memory_max_clock
= max_limits
->mclk
;
2757 static int vega20_get_sclks(struct pp_hwmgr
*hwmgr
,
2758 struct pp_clock_levels_with_latency
*clocks
)
2760 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
2761 struct vega20_single_dpm_table
*dpm_table
= &(data
->dpm_table
.gfx_table
);
2764 if (!data
->smu_features
[GNLD_DPM_GFXCLK
].enabled
)
2767 count
= (dpm_table
->count
> MAX_NUM_CLOCKS
) ? MAX_NUM_CLOCKS
: dpm_table
->count
;
2768 clocks
->num_levels
= count
;
2770 for (i
= 0; i
< count
; i
++) {
2771 clocks
->data
[i
].clocks_in_khz
=
2772 dpm_table
->dpm_levels
[i
].value
* 1000;
2773 clocks
->data
[i
].latency_in_us
= 0;
2779 static uint32_t vega20_get_mem_latency(struct pp_hwmgr
*hwmgr
,
2785 static int vega20_get_memclocks(struct pp_hwmgr
*hwmgr
,
2786 struct pp_clock_levels_with_latency
*clocks
)
2788 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
2789 struct vega20_single_dpm_table
*dpm_table
= &(data
->dpm_table
.mem_table
);
2792 if (!data
->smu_features
[GNLD_DPM_UCLK
].enabled
)
2795 count
= (dpm_table
->count
> MAX_NUM_CLOCKS
) ? MAX_NUM_CLOCKS
: dpm_table
->count
;
2796 clocks
->num_levels
= data
->mclk_latency_table
.count
= count
;
2798 for (i
= 0; i
< count
; i
++) {
2799 clocks
->data
[i
].clocks_in_khz
=
2800 data
->mclk_latency_table
.entries
[i
].frequency
=
2801 dpm_table
->dpm_levels
[i
].value
* 1000;
2802 clocks
->data
[i
].latency_in_us
=
2803 data
->mclk_latency_table
.entries
[i
].latency
=
2804 vega20_get_mem_latency(hwmgr
, dpm_table
->dpm_levels
[i
].value
);
2810 static int vega20_get_dcefclocks(struct pp_hwmgr
*hwmgr
,
2811 struct pp_clock_levels_with_latency
*clocks
)
2813 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
2814 struct vega20_single_dpm_table
*dpm_table
= &(data
->dpm_table
.dcef_table
);
2817 if (!data
->smu_features
[GNLD_DPM_DCEFCLK
].enabled
)
2820 count
= (dpm_table
->count
> MAX_NUM_CLOCKS
) ? MAX_NUM_CLOCKS
: dpm_table
->count
;
2821 clocks
->num_levels
= count
;
2823 for (i
= 0; i
< count
; i
++) {
2824 clocks
->data
[i
].clocks_in_khz
=
2825 dpm_table
->dpm_levels
[i
].value
* 1000;
2826 clocks
->data
[i
].latency_in_us
= 0;
2832 static int vega20_get_socclocks(struct pp_hwmgr
*hwmgr
,
2833 struct pp_clock_levels_with_latency
*clocks
)
2835 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
2836 struct vega20_single_dpm_table
*dpm_table
= &(data
->dpm_table
.soc_table
);
2839 if (!data
->smu_features
[GNLD_DPM_SOCCLK
].enabled
)
2842 count
= (dpm_table
->count
> MAX_NUM_CLOCKS
) ? MAX_NUM_CLOCKS
: dpm_table
->count
;
2843 clocks
->num_levels
= count
;
2845 for (i
= 0; i
< count
; i
++) {
2846 clocks
->data
[i
].clocks_in_khz
=
2847 dpm_table
->dpm_levels
[i
].value
* 1000;
2848 clocks
->data
[i
].latency_in_us
= 0;
2855 static int vega20_get_clock_by_type_with_latency(struct pp_hwmgr
*hwmgr
,
2856 enum amd_pp_clock_type type
,
2857 struct pp_clock_levels_with_latency
*clocks
)
2862 case amd_pp_sys_clock
:
2863 ret
= vega20_get_sclks(hwmgr
, clocks
);
2865 case amd_pp_mem_clock
:
2866 ret
= vega20_get_memclocks(hwmgr
, clocks
);
2868 case amd_pp_dcef_clock
:
2869 ret
= vega20_get_dcefclocks(hwmgr
, clocks
);
2871 case amd_pp_soc_clock
:
2872 ret
= vega20_get_socclocks(hwmgr
, clocks
);
2881 static int vega20_get_clock_by_type_with_voltage(struct pp_hwmgr
*hwmgr
,
2882 enum amd_pp_clock_type type
,
2883 struct pp_clock_levels_with_voltage
*clocks
)
2885 clocks
->num_levels
= 0;
2890 static int vega20_set_watermarks_for_clocks_ranges(struct pp_hwmgr
*hwmgr
,
2893 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
2894 Watermarks_t
*table
= &(data
->smc_state_table
.water_marks_table
);
2895 struct dm_pp_wm_sets_with_clock_ranges_soc15
*wm_with_clock_ranges
= clock_ranges
;
2897 if (!data
->registry_data
.disable_water_mark
&&
2898 data
->smu_features
[GNLD_DPM_DCEFCLK
].supported
&&
2899 data
->smu_features
[GNLD_DPM_SOCCLK
].supported
) {
2900 smu_set_watermarks_for_clocks_ranges(table
, wm_with_clock_ranges
);
2901 data
->water_marks_bitmap
|= WaterMarksExist
;
2902 data
->water_marks_bitmap
&= ~WaterMarksLoaded
;
2908 static int vega20_odn_edit_dpm_table(struct pp_hwmgr
*hwmgr
,
2909 enum PP_OD_DPM_TABLE_COMMAND type
,
2910 long *input
, uint32_t size
)
2912 struct vega20_hwmgr
*data
=
2913 (struct vega20_hwmgr
*)(hwmgr
->backend
);
2914 struct vega20_od8_single_setting
*od8_settings
=
2915 data
->od8_settings
.od8_settings_array
;
2916 OverDriveTable_t
*od_table
=
2917 &(data
->smc_state_table
.overdrive_table
);
2918 int32_t input_index
, input_clk
, input_vol
, i
;
2922 PP_ASSERT_WITH_CODE(input
, "NULL user input for clock and voltage",
2926 case PP_OD_EDIT_SCLK_VDDC_TABLE
:
2927 if (!(od8_settings
[OD8_SETTING_GFXCLK_FMIN
].feature_id
&&
2928 od8_settings
[OD8_SETTING_GFXCLK_FMAX
].feature_id
)) {
2929 pr_info("Sclk min/max frequency overdrive not supported\n");
2933 for (i
= 0; i
< size
; i
+= 2) {
2935 pr_info("invalid number of input parameters %d\n",
2940 input_index
= input
[i
];
2941 input_clk
= input
[i
+ 1];
2943 if (input_index
!= 0 && input_index
!= 1) {
2944 pr_info("Invalid index %d\n", input_index
);
2945 pr_info("Support min/max sclk frequency setting only which index by 0/1\n");
2949 if (input_clk
< od8_settings
[OD8_SETTING_GFXCLK_FMIN
].min_value
||
2950 input_clk
> od8_settings
[OD8_SETTING_GFXCLK_FMAX
].max_value
) {
2951 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
2953 od8_settings
[OD8_SETTING_GFXCLK_FMIN
].min_value
,
2954 od8_settings
[OD8_SETTING_GFXCLK_FMAX
].max_value
);
2958 if ((input_index
== 0 && od_table
->GfxclkFmin
!= input_clk
) ||
2959 (input_index
== 1 && od_table
->GfxclkFmax
!= input_clk
))
2960 data
->gfxclk_overdrive
= true;
2962 if (input_index
== 0)
2963 od_table
->GfxclkFmin
= input_clk
;
2965 od_table
->GfxclkFmax
= input_clk
;
2970 case PP_OD_EDIT_MCLK_VDDC_TABLE
:
2971 if (!od8_settings
[OD8_SETTING_UCLK_FMAX
].feature_id
) {
2972 pr_info("Mclk max frequency overdrive not supported\n");
2976 for (i
= 0; i
< size
; i
+= 2) {
2978 pr_info("invalid number of input parameters %d\n",
2983 input_index
= input
[i
];
2984 input_clk
= input
[i
+ 1];
2986 if (input_index
!= 1) {
2987 pr_info("Invalid index %d\n", input_index
);
2988 pr_info("Support max Mclk frequency setting only which index by 1\n");
2992 if (input_clk
< od8_settings
[OD8_SETTING_UCLK_FMAX
].min_value
||
2993 input_clk
> od8_settings
[OD8_SETTING_UCLK_FMAX
].max_value
) {
2994 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
2996 od8_settings
[OD8_SETTING_UCLK_FMAX
].min_value
,
2997 od8_settings
[OD8_SETTING_UCLK_FMAX
].max_value
);
3001 if (input_index
== 1 && od_table
->UclkFmax
!= input_clk
)
3002 data
->memclk_overdrive
= true;
3004 od_table
->UclkFmax
= input_clk
;
3009 case PP_OD_EDIT_VDDC_CURVE
:
3010 if (!(od8_settings
[OD8_SETTING_GFXCLK_FREQ1
].feature_id
&&
3011 od8_settings
[OD8_SETTING_GFXCLK_FREQ2
].feature_id
&&
3012 od8_settings
[OD8_SETTING_GFXCLK_FREQ3
].feature_id
&&
3013 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE1
].feature_id
&&
3014 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE2
].feature_id
&&
3015 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE3
].feature_id
)) {
3016 pr_info("Voltage curve calibrate not supported\n");
3020 for (i
= 0; i
< size
; i
+= 3) {
3022 pr_info("invalid number of input parameters %d\n",
3027 input_index
= input
[i
];
3028 input_clk
= input
[i
+ 1];
3029 input_vol
= input
[i
+ 2];
3031 if (input_index
> 2) {
3032 pr_info("Setting for point %d is not supported\n",
3034 pr_info("Three supported points index by 0, 1, 2\n");
3038 od8_id
= OD8_SETTING_GFXCLK_FREQ1
+ 2 * input_index
;
3039 if (input_clk
< od8_settings
[od8_id
].min_value
||
3040 input_clk
> od8_settings
[od8_id
].max_value
) {
3041 pr_info("clock freq %d is not within allowed range [%d - %d]\n",
3043 od8_settings
[od8_id
].min_value
,
3044 od8_settings
[od8_id
].max_value
);
3048 od8_id
= OD8_SETTING_GFXCLK_VOLTAGE1
+ 2 * input_index
;
3049 if (input_vol
< od8_settings
[od8_id
].min_value
||
3050 input_vol
> od8_settings
[od8_id
].max_value
) {
3051 pr_info("clock voltage %d is not within allowed range [%d - %d]\n",
3053 od8_settings
[od8_id
].min_value
,
3054 od8_settings
[od8_id
].max_value
);
3058 switch (input_index
) {
3060 od_table
->GfxclkFreq1
= input_clk
;
3061 od_table
->GfxclkVolt1
= input_vol
* VOLTAGE_SCALE
;
3064 od_table
->GfxclkFreq2
= input_clk
;
3065 od_table
->GfxclkVolt2
= input_vol
* VOLTAGE_SCALE
;
3068 od_table
->GfxclkFreq3
= input_clk
;
3069 od_table
->GfxclkVolt3
= input_vol
* VOLTAGE_SCALE
;
3075 case PP_OD_RESTORE_DEFAULT_TABLE
:
3076 data
->gfxclk_overdrive
= false;
3077 data
->memclk_overdrive
= false;
3079 ret
= smum_smc_table_manager(hwmgr
,
3080 (uint8_t *)od_table
,
3081 TABLE_OVERDRIVE
, true);
3082 PP_ASSERT_WITH_CODE(!ret
,
3083 "Failed to export overdrive table!",
3087 case PP_OD_COMMIT_DPM_TABLE
:
3088 ret
= smum_smc_table_manager(hwmgr
,
3089 (uint8_t *)od_table
,
3090 TABLE_OVERDRIVE
, false);
3091 PP_ASSERT_WITH_CODE(!ret
,
3092 "Failed to import overdrive table!",
3095 /* retrieve updated gfxclk table */
3096 if (data
->gfxclk_overdrive
) {
3097 data
->gfxclk_overdrive
= false;
3099 ret
= vega20_setup_gfxclk_dpm_table(hwmgr
);
3104 /* retrieve updated memclk table */
3105 if (data
->memclk_overdrive
) {
3106 data
->memclk_overdrive
= false;
3108 ret
= vega20_setup_memclk_dpm_table(hwmgr
);
3121 static int vega20_set_mp1_state(struct pp_hwmgr
*hwmgr
,
3122 enum pp_mp1_state mp1_state
)
3127 switch (mp1_state
) {
3128 case PP_MP1_STATE_SHUTDOWN
:
3129 msg
= PPSMC_MSG_PrepareMp1ForShutdown
;
3131 case PP_MP1_STATE_UNLOAD
:
3132 msg
= PPSMC_MSG_PrepareMp1ForUnload
;
3134 case PP_MP1_STATE_RESET
:
3135 msg
= PPSMC_MSG_PrepareMp1ForReset
;
3137 case PP_MP1_STATE_NONE
:
3142 PP_ASSERT_WITH_CODE((ret
= smum_send_msg_to_smc(hwmgr
, msg
)) == 0,
3143 "[PrepareMp1] Failed!",
3149 static int vega20_get_ppfeature_status(struct pp_hwmgr
*hwmgr
, char *buf
)
3151 static const char *ppfeature_name
[] = {
3186 static const char *output_title
[] = {
3190 uint64_t features_enabled
;
3195 ret
= vega20_get_enabled_smc_features(hwmgr
, &features_enabled
);
3196 PP_ASSERT_WITH_CODE(!ret
,
3197 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
3200 size
+= sprintf(buf
+ size
, "Current ppfeatures: 0x%016llx\n", features_enabled
);
3201 size
+= sprintf(buf
+ size
, "%-19s %-22s %s\n",
3205 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
3206 size
+= sprintf(buf
+ size
, "%-19s 0x%016llx %6s\n",
3209 (features_enabled
& (1ULL << i
)) ? "Y" : "N");
3215 static int vega20_set_ppfeature_status(struct pp_hwmgr
*hwmgr
, uint64_t new_ppfeature_masks
)
3217 uint64_t features_enabled
;
3218 uint64_t features_to_enable
;
3219 uint64_t features_to_disable
;
3222 if (new_ppfeature_masks
>= (1ULL << GNLD_FEATURES_MAX
))
3225 ret
= vega20_get_enabled_smc_features(hwmgr
, &features_enabled
);
3229 features_to_disable
=
3230 features_enabled
& ~new_ppfeature_masks
;
3231 features_to_enable
=
3232 ~features_enabled
& new_ppfeature_masks
;
3234 pr_debug("features_to_disable 0x%llx\n", features_to_disable
);
3235 pr_debug("features_to_enable 0x%llx\n", features_to_enable
);
3237 if (features_to_disable
) {
3238 ret
= vega20_enable_smc_features(hwmgr
, false, features_to_disable
);
3243 if (features_to_enable
) {
3244 ret
= vega20_enable_smc_features(hwmgr
, true, features_to_enable
);
3252 static int vega20_print_clock_levels(struct pp_hwmgr
*hwmgr
,
3253 enum pp_clock_type type
, char *buf
)
3255 struct vega20_hwmgr
*data
=
3256 (struct vega20_hwmgr
*)(hwmgr
->backend
);
3257 struct vega20_od8_single_setting
*od8_settings
=
3258 data
->od8_settings
.od8_settings_array
;
3259 OverDriveTable_t
*od_table
=
3260 &(data
->smc_state_table
.overdrive_table
);
3261 struct phm_ppt_v3_information
*pptable_information
=
3262 (struct phm_ppt_v3_information
*)hwmgr
->pptable
;
3263 PPTable_t
*pptable
= (PPTable_t
*)pptable_information
->smc_pptable
;
3264 struct amdgpu_device
*adev
= hwmgr
->adev
;
3265 struct pp_clock_levels_with_latency clocks
;
3266 struct vega20_single_dpm_table
*fclk_dpm_table
=
3267 &(data
->dpm_table
.fclk_table
);
3268 int i
, now
, size
= 0;
3270 uint32_t gen_speed
, lane_width
, current_gen_speed
, current_lane_width
;
3274 ret
= vega20_get_current_clk_freq(hwmgr
, PPCLK_GFXCLK
, &now
);
3275 PP_ASSERT_WITH_CODE(!ret
,
3276 "Attempt to get current gfx clk Failed!",
3279 if (vega20_get_sclks(hwmgr
, &clocks
)) {
3280 size
+= sprintf(buf
+ size
, "0: %uMhz * (DPM disabled)\n",
3285 for (i
= 0; i
< clocks
.num_levels
; i
++)
3286 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
3287 i
, clocks
.data
[i
].clocks_in_khz
/ 1000,
3288 (clocks
.data
[i
].clocks_in_khz
== now
* 10) ? "*" : "");
3292 ret
= vega20_get_current_clk_freq(hwmgr
, PPCLK_UCLK
, &now
);
3293 PP_ASSERT_WITH_CODE(!ret
,
3294 "Attempt to get current mclk freq Failed!",
3297 if (vega20_get_memclocks(hwmgr
, &clocks
)) {
3298 size
+= sprintf(buf
+ size
, "0: %uMhz * (DPM disabled)\n",
3303 for (i
= 0; i
< clocks
.num_levels
; i
++)
3304 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
3305 i
, clocks
.data
[i
].clocks_in_khz
/ 1000,
3306 (clocks
.data
[i
].clocks_in_khz
== now
* 10) ? "*" : "");
3310 ret
= vega20_get_current_clk_freq(hwmgr
, PPCLK_SOCCLK
, &now
);
3311 PP_ASSERT_WITH_CODE(!ret
,
3312 "Attempt to get current socclk freq Failed!",
3315 if (vega20_get_socclocks(hwmgr
, &clocks
)) {
3316 size
+= sprintf(buf
+ size
, "0: %uMhz * (DPM disabled)\n",
3321 for (i
= 0; i
< clocks
.num_levels
; i
++)
3322 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
3323 i
, clocks
.data
[i
].clocks_in_khz
/ 1000,
3324 (clocks
.data
[i
].clocks_in_khz
== now
* 10) ? "*" : "");
3328 ret
= vega20_get_current_clk_freq(hwmgr
, PPCLK_FCLK
, &now
);
3329 PP_ASSERT_WITH_CODE(!ret
,
3330 "Attempt to get current fclk freq Failed!",
3333 for (i
= 0; i
< fclk_dpm_table
->count
; i
++)
3334 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
3335 i
, fclk_dpm_table
->dpm_levels
[i
].value
,
3336 fclk_dpm_table
->dpm_levels
[i
].value
== (now
/ 100) ? "*" : "");
3340 ret
= vega20_get_current_clk_freq(hwmgr
, PPCLK_DCEFCLK
, &now
);
3341 PP_ASSERT_WITH_CODE(!ret
,
3342 "Attempt to get current dcefclk freq Failed!",
3345 if (vega20_get_dcefclocks(hwmgr
, &clocks
)) {
3346 size
+= sprintf(buf
+ size
, "0: %uMhz * (DPM disabled)\n",
3351 for (i
= 0; i
< clocks
.num_levels
; i
++)
3352 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
3353 i
, clocks
.data
[i
].clocks_in_khz
/ 1000,
3354 (clocks
.data
[i
].clocks_in_khz
== now
* 10) ? "*" : "");
3358 current_gen_speed
= (RREG32_PCIE(smnPCIE_LC_SPEED_CNTL
) &
3359 PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK
)
3360 >> PSWUSP0_PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT
;
3361 current_lane_width
= (RREG32_PCIE(smnPCIE_LC_LINK_WIDTH_CNTL
) &
3362 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK
)
3363 >> PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT
;
3364 for (i
= 0; i
< NUM_LINK_LEVELS
; i
++) {
3365 if (i
== 1 && data
->pcie_parameters_override
) {
3366 gen_speed
= data
->pcie_gen_level1
;
3367 lane_width
= data
->pcie_width_level1
;
3369 gen_speed
= pptable
->PcieGenSpeed
[i
];
3370 lane_width
= pptable
->PcieLaneCount
[i
];
3372 size
+= sprintf(buf
+ size
, "%d: %s %s %dMhz %s\n", i
,
3373 (gen_speed
== 0) ? "2.5GT/s," :
3374 (gen_speed
== 1) ? "5.0GT/s," :
3375 (gen_speed
== 2) ? "8.0GT/s," :
3376 (gen_speed
== 3) ? "16.0GT/s," : "",
3377 (lane_width
== 1) ? "x1" :
3378 (lane_width
== 2) ? "x2" :
3379 (lane_width
== 3) ? "x4" :
3380 (lane_width
== 4) ? "x8" :
3381 (lane_width
== 5) ? "x12" :
3382 (lane_width
== 6) ? "x16" : "",
3383 pptable
->LclkFreq
[i
],
3384 (current_gen_speed
== gen_speed
) &&
3385 (current_lane_width
== lane_width
) ?
3391 if (od8_settings
[OD8_SETTING_GFXCLK_FMIN
].feature_id
&&
3392 od8_settings
[OD8_SETTING_GFXCLK_FMAX
].feature_id
) {
3393 size
= sprintf(buf
, "%s:\n", "OD_SCLK");
3394 size
+= sprintf(buf
+ size
, "0: %10uMhz\n",
3395 od_table
->GfxclkFmin
);
3396 size
+= sprintf(buf
+ size
, "1: %10uMhz\n",
3397 od_table
->GfxclkFmax
);
3402 if (od8_settings
[OD8_SETTING_UCLK_FMAX
].feature_id
) {
3403 size
= sprintf(buf
, "%s:\n", "OD_MCLK");
3404 size
+= sprintf(buf
+ size
, "1: %10uMhz\n",
3405 od_table
->UclkFmax
);
3411 if (od8_settings
[OD8_SETTING_GFXCLK_FREQ1
].feature_id
&&
3412 od8_settings
[OD8_SETTING_GFXCLK_FREQ2
].feature_id
&&
3413 od8_settings
[OD8_SETTING_GFXCLK_FREQ3
].feature_id
&&
3414 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE1
].feature_id
&&
3415 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE2
].feature_id
&&
3416 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE3
].feature_id
) {
3417 size
= sprintf(buf
, "%s:\n", "OD_VDDC_CURVE");
3418 size
+= sprintf(buf
+ size
, "0: %10uMhz %10dmV\n",
3419 od_table
->GfxclkFreq1
,
3420 od_table
->GfxclkVolt1
/ VOLTAGE_SCALE
);
3421 size
+= sprintf(buf
+ size
, "1: %10uMhz %10dmV\n",
3422 od_table
->GfxclkFreq2
,
3423 od_table
->GfxclkVolt2
/ VOLTAGE_SCALE
);
3424 size
+= sprintf(buf
+ size
, "2: %10uMhz %10dmV\n",
3425 od_table
->GfxclkFreq3
,
3426 od_table
->GfxclkVolt3
/ VOLTAGE_SCALE
);
3432 size
= sprintf(buf
, "%s:\n", "OD_RANGE");
3434 if (od8_settings
[OD8_SETTING_GFXCLK_FMIN
].feature_id
&&
3435 od8_settings
[OD8_SETTING_GFXCLK_FMAX
].feature_id
) {
3436 size
+= sprintf(buf
+ size
, "SCLK: %7uMhz %10uMhz\n",
3437 od8_settings
[OD8_SETTING_GFXCLK_FMIN
].min_value
,
3438 od8_settings
[OD8_SETTING_GFXCLK_FMAX
].max_value
);
3441 if (od8_settings
[OD8_SETTING_UCLK_FMAX
].feature_id
) {
3442 size
+= sprintf(buf
+ size
, "MCLK: %7uMhz %10uMhz\n",
3443 od8_settings
[OD8_SETTING_UCLK_FMAX
].min_value
,
3444 od8_settings
[OD8_SETTING_UCLK_FMAX
].max_value
);
3447 if (od8_settings
[OD8_SETTING_GFXCLK_FREQ1
].feature_id
&&
3448 od8_settings
[OD8_SETTING_GFXCLK_FREQ2
].feature_id
&&
3449 od8_settings
[OD8_SETTING_GFXCLK_FREQ3
].feature_id
&&
3450 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE1
].feature_id
&&
3451 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE2
].feature_id
&&
3452 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE3
].feature_id
) {
3453 size
+= sprintf(buf
+ size
, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
3454 od8_settings
[OD8_SETTING_GFXCLK_FREQ1
].min_value
,
3455 od8_settings
[OD8_SETTING_GFXCLK_FREQ1
].max_value
);
3456 size
+= sprintf(buf
+ size
, "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
3457 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE1
].min_value
,
3458 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE1
].max_value
);
3459 size
+= sprintf(buf
+ size
, "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
3460 od8_settings
[OD8_SETTING_GFXCLK_FREQ2
].min_value
,
3461 od8_settings
[OD8_SETTING_GFXCLK_FREQ2
].max_value
);
3462 size
+= sprintf(buf
+ size
, "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
3463 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE2
].min_value
,
3464 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE2
].max_value
);
3465 size
+= sprintf(buf
+ size
, "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
3466 od8_settings
[OD8_SETTING_GFXCLK_FREQ3
].min_value
,
3467 od8_settings
[OD8_SETTING_GFXCLK_FREQ3
].max_value
);
3468 size
+= sprintf(buf
+ size
, "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
3469 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE3
].min_value
,
3470 od8_settings
[OD8_SETTING_GFXCLK_VOLTAGE3
].max_value
);
3480 static int vega20_set_uclk_to_highest_dpm_level(struct pp_hwmgr
*hwmgr
,
3481 struct vega20_single_dpm_table
*dpm_table
)
3483 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
3486 if (data
->smu_features
[GNLD_DPM_UCLK
].enabled
) {
3487 PP_ASSERT_WITH_CODE(dpm_table
->count
> 0,
3488 "[SetUclkToHightestDpmLevel] Dpm table has no entry!",
3490 PP_ASSERT_WITH_CODE(dpm_table
->count
<= NUM_UCLK_DPM_LEVELS
,
3491 "[SetUclkToHightestDpmLevel] Dpm table has too many entries!",
3494 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3495 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
3496 PPSMC_MSG_SetHardMinByFreq
,
3497 (PPCLK_UCLK
<< 16 ) | dpm_table
->dpm_state
.hard_min_level
)),
3498 "[SetUclkToHightestDpmLevel] Set hard min uclk failed!",
3505 static int vega20_set_fclk_to_highest_dpm_level(struct pp_hwmgr
*hwmgr
)
3507 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
3508 struct vega20_single_dpm_table
*dpm_table
= &(data
->dpm_table
.fclk_table
);
3511 if (data
->smu_features
[GNLD_DPM_FCLK
].enabled
) {
3512 PP_ASSERT_WITH_CODE(dpm_table
->count
> 0,
3513 "[SetFclkToHightestDpmLevel] Dpm table has no entry!",
3515 PP_ASSERT_WITH_CODE(dpm_table
->count
<= NUM_FCLK_DPM_LEVELS
,
3516 "[SetFclkToHightestDpmLevel] Dpm table has too many entries!",
3519 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3520 PP_ASSERT_WITH_CODE(!(ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
3521 PPSMC_MSG_SetSoftMinByFreq
,
3522 (PPCLK_FCLK
<< 16 ) | dpm_table
->dpm_state
.soft_min_level
)),
3523 "[SetFclkToHightestDpmLevel] Set soft min fclk failed!",
3530 static int vega20_pre_display_configuration_changed_task(struct pp_hwmgr
*hwmgr
)
3532 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
3535 smum_send_msg_to_smc_with_parameter(hwmgr
,
3536 PPSMC_MSG_NumOfDisplays
, 0);
3538 ret
= vega20_set_uclk_to_highest_dpm_level(hwmgr
,
3539 &data
->dpm_table
.mem_table
);
3543 return vega20_set_fclk_to_highest_dpm_level(hwmgr
);
3546 static int vega20_display_configuration_changed_task(struct pp_hwmgr
*hwmgr
)
3548 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
3550 Watermarks_t
*wm_table
= &(data
->smc_state_table
.water_marks_table
);
3552 if ((data
->water_marks_bitmap
& WaterMarksExist
) &&
3553 !(data
->water_marks_bitmap
& WaterMarksLoaded
)) {
3554 result
= smum_smc_table_manager(hwmgr
,
3555 (uint8_t *)wm_table
, TABLE_WATERMARKS
, false);
3556 PP_ASSERT_WITH_CODE(!result
,
3557 "Failed to update WMTABLE!",
3559 data
->water_marks_bitmap
|= WaterMarksLoaded
;
3562 if ((data
->water_marks_bitmap
& WaterMarksExist
) &&
3563 data
->smu_features
[GNLD_DPM_DCEFCLK
].supported
&&
3564 data
->smu_features
[GNLD_DPM_SOCCLK
].supported
) {
3565 result
= smum_send_msg_to_smc_with_parameter(hwmgr
,
3566 PPSMC_MSG_NumOfDisplays
,
3567 hwmgr
->display_config
->num_display
);
3573 int vega20_enable_disable_uvd_dpm(struct pp_hwmgr
*hwmgr
, bool enable
)
3575 struct vega20_hwmgr
*data
=
3576 (struct vega20_hwmgr
*)(hwmgr
->backend
);
3579 if (data
->smu_features
[GNLD_DPM_UVD
].supported
) {
3580 if (data
->smu_features
[GNLD_DPM_UVD
].enabled
== enable
) {
3582 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already enabled!\n");
3584 PP_DBG_LOG("[EnableDisableUVDDPM] feature DPM UVD already disabled!\n");
3587 ret
= vega20_enable_smc_features(hwmgr
,
3589 data
->smu_features
[GNLD_DPM_UVD
].smu_feature_bitmap
);
3590 PP_ASSERT_WITH_CODE(!ret
,
3591 "[EnableDisableUVDDPM] Attempt to Enable/Disable DPM UVD Failed!",
3593 data
->smu_features
[GNLD_DPM_UVD
].enabled
= enable
;
3599 static void vega20_power_gate_vce(struct pp_hwmgr
*hwmgr
, bool bgate
)
3601 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
3603 if (data
->vce_power_gated
== bgate
)
3606 data
->vce_power_gated
= bgate
;
3608 vega20_enable_disable_vce_dpm(hwmgr
, !bgate
);
3609 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
3610 AMD_IP_BLOCK_TYPE_VCE
,
3613 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
3614 AMD_IP_BLOCK_TYPE_VCE
,
3615 AMD_PG_STATE_UNGATE
);
3616 vega20_enable_disable_vce_dpm(hwmgr
, !bgate
);
3621 static void vega20_power_gate_uvd(struct pp_hwmgr
*hwmgr
, bool bgate
)
3623 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
3625 if (data
->uvd_power_gated
== bgate
)
3628 data
->uvd_power_gated
= bgate
;
3629 vega20_enable_disable_uvd_dpm(hwmgr
, !bgate
);
3632 static int vega20_apply_clocks_adjust_rules(struct pp_hwmgr
*hwmgr
)
3634 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
3635 struct vega20_single_dpm_table
*dpm_table
;
3636 bool vblank_too_short
= false;
3637 bool disable_mclk_switching
;
3638 bool disable_fclk_switching
;
3639 uint32_t i
, latency
;
3641 disable_mclk_switching
= ((1 < hwmgr
->display_config
->num_display
) &&
3642 !hwmgr
->display_config
->multi_monitor_in_sync
) ||
3644 latency
= hwmgr
->display_config
->dce_tolerable_mclk_in_active_latency
;
3647 dpm_table
= &(data
->dpm_table
.gfx_table
);
3648 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
3649 dpm_table
->dpm_state
.soft_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3650 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
3651 dpm_table
->dpm_state
.hard_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3653 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
3654 if (VEGA20_UMD_PSTATE_GFXCLK_LEVEL
< dpm_table
->count
) {
3655 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA20_UMD_PSTATE_GFXCLK_LEVEL
].value
;
3656 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA20_UMD_PSTATE_GFXCLK_LEVEL
].value
;
3659 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) {
3660 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
3661 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[0].value
;
3664 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
3665 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3666 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3671 dpm_table
= &(data
->dpm_table
.mem_table
);
3672 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
3673 dpm_table
->dpm_state
.soft_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3674 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
3675 dpm_table
->dpm_state
.hard_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3677 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
3678 if (VEGA20_UMD_PSTATE_MCLK_LEVEL
< dpm_table
->count
) {
3679 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA20_UMD_PSTATE_MCLK_LEVEL
].value
;
3680 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA20_UMD_PSTATE_MCLK_LEVEL
].value
;
3683 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) {
3684 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
3685 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[0].value
;
3688 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
3689 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3690 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3694 /* honour DAL's UCLK Hardmin */
3695 if (dpm_table
->dpm_state
.hard_min_level
< (hwmgr
->display_config
->min_mem_set_clock
/ 100))
3696 dpm_table
->dpm_state
.hard_min_level
= hwmgr
->display_config
->min_mem_set_clock
/ 100;
3698 /* Hardmin is dependent on displayconfig */
3699 if (disable_mclk_switching
) {
3700 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3701 for (i
= 0; i
< data
->mclk_latency_table
.count
- 1; i
++) {
3702 if (data
->mclk_latency_table
.entries
[i
].latency
<= latency
) {
3703 if (dpm_table
->dpm_levels
[i
].value
>= (hwmgr
->display_config
->min_mem_set_clock
/ 100)) {
3704 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[i
].value
;
3711 if (hwmgr
->display_config
->nb_pstate_switch_disable
)
3712 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3714 if ((disable_mclk_switching
&&
3715 (dpm_table
->dpm_state
.hard_min_level
== dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
)) ||
3716 hwmgr
->display_config
->min_mem_set_clock
/ 100 >= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
)
3717 disable_fclk_switching
= true;
3719 disable_fclk_switching
= false;
3722 dpm_table
= &(data
->dpm_table
.fclk_table
);
3723 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
3724 dpm_table
->dpm_state
.soft_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3725 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
3726 dpm_table
->dpm_state
.hard_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3727 if (hwmgr
->display_config
->nb_pstate_switch_disable
|| disable_fclk_switching
)
3728 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3731 dpm_table
= &(data
->dpm_table
.vclk_table
);
3732 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
3733 dpm_table
->dpm_state
.soft_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3734 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
3735 dpm_table
->dpm_state
.hard_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3737 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
3738 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL
< dpm_table
->count
) {
3739 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA20_UMD_PSTATE_UVDCLK_LEVEL
].value
;
3740 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA20_UMD_PSTATE_UVDCLK_LEVEL
].value
;
3743 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
3744 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3745 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3750 dpm_table
= &(data
->dpm_table
.dclk_table
);
3751 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
3752 dpm_table
->dpm_state
.soft_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3753 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
3754 dpm_table
->dpm_state
.hard_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3756 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
3757 if (VEGA20_UMD_PSTATE_UVDCLK_LEVEL
< dpm_table
->count
) {
3758 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA20_UMD_PSTATE_UVDCLK_LEVEL
].value
;
3759 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA20_UMD_PSTATE_UVDCLK_LEVEL
].value
;
3762 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
3763 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3764 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3769 dpm_table
= &(data
->dpm_table
.soc_table
);
3770 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
3771 dpm_table
->dpm_state
.soft_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3772 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
3773 dpm_table
->dpm_state
.hard_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3775 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
3776 if (VEGA20_UMD_PSTATE_SOCCLK_LEVEL
< dpm_table
->count
) {
3777 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA20_UMD_PSTATE_SOCCLK_LEVEL
].value
;
3778 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA20_UMD_PSTATE_SOCCLK_LEVEL
].value
;
3781 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
3782 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3783 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3788 dpm_table
= &(data
->dpm_table
.eclk_table
);
3789 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[0].value
;
3790 dpm_table
->dpm_state
.soft_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3791 dpm_table
->dpm_state
.hard_min_level
= dpm_table
->dpm_levels
[0].value
;
3792 dpm_table
->dpm_state
.hard_max_level
= VG20_CLOCK_MAX_DEFAULT
;
3794 if (PP_CAP(PHM_PlatformCaps_UMDPState
)) {
3795 if (VEGA20_UMD_PSTATE_VCEMCLK_LEVEL
< dpm_table
->count
) {
3796 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL
].value
;
3797 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[VEGA20_UMD_PSTATE_VCEMCLK_LEVEL
].value
;
3800 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
3801 dpm_table
->dpm_state
.soft_min_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3802 dpm_table
->dpm_state
.soft_max_level
= dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
;
3810 vega20_check_smc_update_required_for_display_configuration(struct pp_hwmgr
*hwmgr
)
3812 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
3813 bool is_update_required
= false;
3815 if (data
->display_timing
.num_existing_displays
!=
3816 hwmgr
->display_config
->num_display
)
3817 is_update_required
= true;
3819 if (data
->registry_data
.gfx_clk_deep_sleep_support
&&
3820 (data
->display_timing
.min_clock_in_sr
!=
3821 hwmgr
->display_config
->min_core_set_clock_in_sr
))
3822 is_update_required
= true;
3824 return is_update_required
;
3827 static int vega20_disable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
3831 ret
= vega20_disable_all_smu_features(hwmgr
);
3832 PP_ASSERT_WITH_CODE(!ret
,
3833 "[DisableDpmTasks] Failed to disable all smu features!",
3839 static int vega20_power_off_asic(struct pp_hwmgr
*hwmgr
)
3841 struct vega20_hwmgr
*data
= (struct vega20_hwmgr
*)(hwmgr
->backend
);
3844 result
= vega20_disable_dpm_tasks(hwmgr
);
3845 PP_ASSERT_WITH_CODE((0 == result
),
3846 "[PowerOffAsic] Failed to disable DPM!",
3848 data
->water_marks_bitmap
&= ~(WaterMarksLoaded
);
3853 static int conv_power_profile_to_pplib_workload(int power_profile
)
3855 int pplib_workload
= 0;
3857 switch (power_profile
) {
3858 case PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
:
3859 pplib_workload
= WORKLOAD_DEFAULT_BIT
;
3861 case PP_SMC_POWER_PROFILE_FULLSCREEN3D
:
3862 pplib_workload
= WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT
;
3864 case PP_SMC_POWER_PROFILE_POWERSAVING
:
3865 pplib_workload
= WORKLOAD_PPLIB_POWER_SAVING_BIT
;
3867 case PP_SMC_POWER_PROFILE_VIDEO
:
3868 pplib_workload
= WORKLOAD_PPLIB_VIDEO_BIT
;
3870 case PP_SMC_POWER_PROFILE_VR
:
3871 pplib_workload
= WORKLOAD_PPLIB_VR_BIT
;
3873 case PP_SMC_POWER_PROFILE_COMPUTE
:
3874 pplib_workload
= WORKLOAD_PPLIB_COMPUTE_BIT
;
3876 case PP_SMC_POWER_PROFILE_CUSTOM
:
3877 pplib_workload
= WORKLOAD_PPLIB_CUSTOM_BIT
;
3881 return pplib_workload
;
3884 static int vega20_get_power_profile_mode(struct pp_hwmgr
*hwmgr
, char *buf
)
3886 DpmActivityMonitorCoeffInt_t activity_monitor
;
3887 uint32_t i
, size
= 0;
3888 uint16_t workload_type
= 0;
3889 static const char *profile_name
[] = {
3897 static const char *title
[] = {
3898 "PROFILE_INDEX(NAME)",
3902 "MinActiveFreqType",
3907 "PD_Data_error_coeff",
3908 "PD_Data_error_rate_coeff"};
3914 size
+= sprintf(buf
+ size
, "%16s %s %s %s %s %s %s %s %s %s %s\n",
3915 title
[0], title
[1], title
[2], title
[3], title
[4], title
[5],
3916 title
[6], title
[7], title
[8], title
[9], title
[10]);
3918 for (i
= 0; i
<= PP_SMC_POWER_PROFILE_CUSTOM
; i
++) {
3919 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
3920 workload_type
= conv_power_profile_to_pplib_workload(i
);
3921 result
= vega20_get_activity_monitor_coeff(hwmgr
,
3922 (uint8_t *)(&activity_monitor
), workload_type
);
3923 PP_ASSERT_WITH_CODE(!result
,
3924 "[GetPowerProfile] Failed to get activity monitor!",
3927 size
+= sprintf(buf
+ size
, "%2d %14s%s:\n",
3928 i
, profile_name
[i
], (i
== hwmgr
->power_profile_mode
) ? "*" : " ");
3930 size
+= sprintf(buf
+ size
, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
3934 activity_monitor
.Gfx_FPS
,
3935 activity_monitor
.Gfx_UseRlcBusy
,
3936 activity_monitor
.Gfx_MinActiveFreqType
,
3937 activity_monitor
.Gfx_MinActiveFreq
,
3938 activity_monitor
.Gfx_BoosterFreqType
,
3939 activity_monitor
.Gfx_BoosterFreq
,
3940 activity_monitor
.Gfx_PD_Data_limit_c
,
3941 activity_monitor
.Gfx_PD_Data_error_coeff
,
3942 activity_monitor
.Gfx_PD_Data_error_rate_coeff
);
3944 size
+= sprintf(buf
+ size
, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
3948 activity_monitor
.Soc_FPS
,
3949 activity_monitor
.Soc_UseRlcBusy
,
3950 activity_monitor
.Soc_MinActiveFreqType
,
3951 activity_monitor
.Soc_MinActiveFreq
,
3952 activity_monitor
.Soc_BoosterFreqType
,
3953 activity_monitor
.Soc_BoosterFreq
,
3954 activity_monitor
.Soc_PD_Data_limit_c
,
3955 activity_monitor
.Soc_PD_Data_error_coeff
,
3956 activity_monitor
.Soc_PD_Data_error_rate_coeff
);
3958 size
+= sprintf(buf
+ size
, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
3962 activity_monitor
.Mem_FPS
,
3963 activity_monitor
.Mem_UseRlcBusy
,
3964 activity_monitor
.Mem_MinActiveFreqType
,
3965 activity_monitor
.Mem_MinActiveFreq
,
3966 activity_monitor
.Mem_BoosterFreqType
,
3967 activity_monitor
.Mem_BoosterFreq
,
3968 activity_monitor
.Mem_PD_Data_limit_c
,
3969 activity_monitor
.Mem_PD_Data_error_coeff
,
3970 activity_monitor
.Mem_PD_Data_error_rate_coeff
);
3972 size
+= sprintf(buf
+ size
, "%19s %d(%13s) %7d %7d %7d %7d %7d %7d %7d %7d %7d\n",
3976 activity_monitor
.Fclk_FPS
,
3977 activity_monitor
.Fclk_UseRlcBusy
,
3978 activity_monitor
.Fclk_MinActiveFreqType
,
3979 activity_monitor
.Fclk_MinActiveFreq
,
3980 activity_monitor
.Fclk_BoosterFreqType
,
3981 activity_monitor
.Fclk_BoosterFreq
,
3982 activity_monitor
.Fclk_PD_Data_limit_c
,
3983 activity_monitor
.Fclk_PD_Data_error_coeff
,
3984 activity_monitor
.Fclk_PD_Data_error_rate_coeff
);
3990 static int vega20_set_power_profile_mode(struct pp_hwmgr
*hwmgr
, long *input
, uint32_t size
)
3992 DpmActivityMonitorCoeffInt_t activity_monitor
;
3993 int workload_type
, result
= 0;
3994 uint32_t power_profile_mode
= input
[size
];
3996 if (power_profile_mode
> PP_SMC_POWER_PROFILE_CUSTOM
) {
3997 pr_err("Invalid power profile mode %d\n", power_profile_mode
);
4001 if (power_profile_mode
== PP_SMC_POWER_PROFILE_CUSTOM
) {
4002 struct vega20_hwmgr
*data
=
4003 (struct vega20_hwmgr
*)(hwmgr
->backend
);
4004 if (size
== 0 && !data
->is_custom_profile_set
)
4006 if (size
< 10 && size
!= 0)
4009 result
= vega20_get_activity_monitor_coeff(hwmgr
,
4010 (uint8_t *)(&activity_monitor
),
4011 WORKLOAD_PPLIB_CUSTOM_BIT
);
4012 PP_ASSERT_WITH_CODE(!result
,
4013 "[SetPowerProfile] Failed to get activity monitor!",
4016 /* If size==0, then we want to apply the already-configured
4017 * CUSTOM profile again. Just apply it, since we checked its
4024 case 0: /* Gfxclk */
4025 activity_monitor
.Gfx_FPS
= input
[1];
4026 activity_monitor
.Gfx_UseRlcBusy
= input
[2];
4027 activity_monitor
.Gfx_MinActiveFreqType
= input
[3];
4028 activity_monitor
.Gfx_MinActiveFreq
= input
[4];
4029 activity_monitor
.Gfx_BoosterFreqType
= input
[5];
4030 activity_monitor
.Gfx_BoosterFreq
= input
[6];
4031 activity_monitor
.Gfx_PD_Data_limit_c
= input
[7];
4032 activity_monitor
.Gfx_PD_Data_error_coeff
= input
[8];
4033 activity_monitor
.Gfx_PD_Data_error_rate_coeff
= input
[9];
4035 case 1: /* Socclk */
4036 activity_monitor
.Soc_FPS
= input
[1];
4037 activity_monitor
.Soc_UseRlcBusy
= input
[2];
4038 activity_monitor
.Soc_MinActiveFreqType
= input
[3];
4039 activity_monitor
.Soc_MinActiveFreq
= input
[4];
4040 activity_monitor
.Soc_BoosterFreqType
= input
[5];
4041 activity_monitor
.Soc_BoosterFreq
= input
[6];
4042 activity_monitor
.Soc_PD_Data_limit_c
= input
[7];
4043 activity_monitor
.Soc_PD_Data_error_coeff
= input
[8];
4044 activity_monitor
.Soc_PD_Data_error_rate_coeff
= input
[9];
4047 activity_monitor
.Mem_FPS
= input
[1];
4048 activity_monitor
.Mem_UseRlcBusy
= input
[2];
4049 activity_monitor
.Mem_MinActiveFreqType
= input
[3];
4050 activity_monitor
.Mem_MinActiveFreq
= input
[4];
4051 activity_monitor
.Mem_BoosterFreqType
= input
[5];
4052 activity_monitor
.Mem_BoosterFreq
= input
[6];
4053 activity_monitor
.Mem_PD_Data_limit_c
= input
[7];
4054 activity_monitor
.Mem_PD_Data_error_coeff
= input
[8];
4055 activity_monitor
.Mem_PD_Data_error_rate_coeff
= input
[9];
4058 activity_monitor
.Fclk_FPS
= input
[1];
4059 activity_monitor
.Fclk_UseRlcBusy
= input
[2];
4060 activity_monitor
.Fclk_MinActiveFreqType
= input
[3];
4061 activity_monitor
.Fclk_MinActiveFreq
= input
[4];
4062 activity_monitor
.Fclk_BoosterFreqType
= input
[5];
4063 activity_monitor
.Fclk_BoosterFreq
= input
[6];
4064 activity_monitor
.Fclk_PD_Data_limit_c
= input
[7];
4065 activity_monitor
.Fclk_PD_Data_error_coeff
= input
[8];
4066 activity_monitor
.Fclk_PD_Data_error_rate_coeff
= input
[9];
4070 result
= vega20_set_activity_monitor_coeff(hwmgr
,
4071 (uint8_t *)(&activity_monitor
),
4072 WORKLOAD_PPLIB_CUSTOM_BIT
);
4073 data
->is_custom_profile_set
= true;
4074 PP_ASSERT_WITH_CODE(!result
,
4075 "[SetPowerProfile] Failed to set activity monitor!",
4080 /* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
4082 conv_power_profile_to_pplib_workload(power_profile_mode
);
4083 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_SetWorkloadMask
,
4084 1 << workload_type
);
4086 hwmgr
->power_profile_mode
= power_profile_mode
;
4091 static int vega20_notify_cac_buffer_info(struct pp_hwmgr
*hwmgr
,
4092 uint32_t virtual_addr_low
,
4093 uint32_t virtual_addr_hi
,
4094 uint32_t mc_addr_low
,
4095 uint32_t mc_addr_hi
,
4098 smum_send_msg_to_smc_with_parameter(hwmgr
,
4099 PPSMC_MSG_SetSystemVirtualDramAddrHigh
,
4101 smum_send_msg_to_smc_with_parameter(hwmgr
,
4102 PPSMC_MSG_SetSystemVirtualDramAddrLow
,
4104 smum_send_msg_to_smc_with_parameter(hwmgr
,
4105 PPSMC_MSG_DramLogSetDramAddrHigh
,
4108 smum_send_msg_to_smc_with_parameter(hwmgr
,
4109 PPSMC_MSG_DramLogSetDramAddrLow
,
4112 smum_send_msg_to_smc_with_parameter(hwmgr
,
4113 PPSMC_MSG_DramLogSetDramSize
,
4118 static int vega20_get_thermal_temperature_range(struct pp_hwmgr
*hwmgr
,
4119 struct PP_TemperatureRange
*thermal_data
)
4121 struct vega20_hwmgr
*data
=
4122 (struct vega20_hwmgr
*)(hwmgr
->backend
);
4123 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
4125 memcpy(thermal_data
, &SMU7ThermalWithDelayPolicy
[0], sizeof(struct PP_TemperatureRange
));
4127 thermal_data
->max
= pp_table
->TedgeLimit
*
4128 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
4129 thermal_data
->edge_emergency_max
= (pp_table
->TedgeLimit
+ CTF_OFFSET_EDGE
) *
4130 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
4131 thermal_data
->hotspot_crit_max
= pp_table
->ThotspotLimit
*
4132 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
4133 thermal_data
->hotspot_emergency_max
= (pp_table
->ThotspotLimit
+ CTF_OFFSET_HOTSPOT
) *
4134 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
4135 thermal_data
->mem_crit_max
= pp_table
->ThbmLimit
*
4136 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
4137 thermal_data
->mem_emergency_max
= (pp_table
->ThbmLimit
+ CTF_OFFSET_HBM
)*
4138 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
4143 static int vega20_smu_i2c_bus_access(struct pp_hwmgr
*hwmgr
, bool acquire
)
4147 /* I2C bus access can happen very early, when SMU not loaded yet */
4148 if (!vega20_is_smc_ram_running(hwmgr
))
4151 res
= smum_send_msg_to_smc_with_parameter(hwmgr
,
4153 PPSMC_MSG_RequestI2CBus
:
4154 PPSMC_MSG_ReleaseI2CBus
),
4157 PP_ASSERT_WITH_CODE(!res
, "[SmuI2CAccessBus] Failed to access bus!", return res
);
4161 static int vega20_set_df_cstate(struct pp_hwmgr
*hwmgr
,
4162 enum pp_df_cstate state
)
4166 /* PPSMC_MSG_DFCstateControl is supported with 40.50 and later fws */
4167 if (hwmgr
->smu_version
< 0x283200) {
4168 pr_err("Df cstate control is supported with 40.50 and later SMC fw!\n");
4172 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_DFCstateControl
, state
);
4174 pr_err("SetDfCstate failed!\n");
4179 static int vega20_set_xgmi_pstate(struct pp_hwmgr
*hwmgr
,
4184 ret
= smum_send_msg_to_smc_with_parameter(hwmgr
,
4185 PPSMC_MSG_SetXgmiMode
,
4186 pstate
? XGMI_MODE_PSTATE_D0
: XGMI_MODE_PSTATE_D3
);
4188 pr_err("SetXgmiPstate failed!\n");
4193 static const struct pp_hwmgr_func vega20_hwmgr_funcs
= {
4194 /* init/fini related */
4195 .backend_init
= vega20_hwmgr_backend_init
,
4196 .backend_fini
= vega20_hwmgr_backend_fini
,
4197 .asic_setup
= vega20_setup_asic_task
,
4198 .power_off_asic
= vega20_power_off_asic
,
4199 .dynamic_state_management_enable
= vega20_enable_dpm_tasks
,
4200 .dynamic_state_management_disable
= vega20_disable_dpm_tasks
,
4201 /* power state related */
4202 .apply_clocks_adjust_rules
= vega20_apply_clocks_adjust_rules
,
4203 .pre_display_config_changed
= vega20_pre_display_configuration_changed_task
,
4204 .display_config_changed
= vega20_display_configuration_changed_task
,
4205 .check_smc_update_required_for_display_configuration
=
4206 vega20_check_smc_update_required_for_display_configuration
,
4207 .notify_smc_display_config_after_ps_adjustment
=
4208 vega20_notify_smc_display_config_after_ps_adjustment
,
4210 .get_sclk
= vega20_dpm_get_sclk
,
4211 .get_mclk
= vega20_dpm_get_mclk
,
4212 .get_dal_power_level
= vega20_get_dal_power_level
,
4213 .get_clock_by_type_with_latency
= vega20_get_clock_by_type_with_latency
,
4214 .get_clock_by_type_with_voltage
= vega20_get_clock_by_type_with_voltage
,
4215 .set_watermarks_for_clocks_ranges
= vega20_set_watermarks_for_clocks_ranges
,
4216 .display_clock_voltage_request
= vega20_display_clock_voltage_request
,
4217 .get_performance_level
= vega20_get_performance_level
,
4218 /* UMD pstate, profile related */
4219 .force_dpm_level
= vega20_dpm_force_dpm_level
,
4220 .get_power_profile_mode
= vega20_get_power_profile_mode
,
4221 .set_power_profile_mode
= vega20_set_power_profile_mode
,
4223 .set_power_limit
= vega20_set_power_limit
,
4224 .get_sclk_od
= vega20_get_sclk_od
,
4225 .set_sclk_od
= vega20_set_sclk_od
,
4226 .get_mclk_od
= vega20_get_mclk_od
,
4227 .set_mclk_od
= vega20_set_mclk_od
,
4228 .odn_edit_dpm_table
= vega20_odn_edit_dpm_table
,
4229 /* for sysfs to retrive/set gfxclk/memclk */
4230 .force_clock_level
= vega20_force_clock_level
,
4231 .print_clock_levels
= vega20_print_clock_levels
,
4232 .read_sensor
= vega20_read_sensor
,
4233 .get_ppfeature_status
= vega20_get_ppfeature_status
,
4234 .set_ppfeature_status
= vega20_set_ppfeature_status
,
4235 /* powergate related */
4236 .powergate_uvd
= vega20_power_gate_uvd
,
4237 .powergate_vce
= vega20_power_gate_vce
,
4238 /* thermal related */
4239 .start_thermal_controller
= vega20_start_thermal_controller
,
4240 .stop_thermal_controller
= vega20_thermal_stop_thermal_controller
,
4241 .get_thermal_temperature_range
= vega20_get_thermal_temperature_range
,
4242 .register_irq_handlers
= smu9_register_irq_handlers
,
4243 .disable_smc_firmware_ctf
= vega20_thermal_disable_alert
,
4244 /* fan control related */
4245 .get_fan_speed_percent
= vega20_fan_ctrl_get_fan_speed_percent
,
4246 .set_fan_speed_percent
= vega20_fan_ctrl_set_fan_speed_percent
,
4247 .get_fan_speed_info
= vega20_fan_ctrl_get_fan_speed_info
,
4248 .get_fan_speed_rpm
= vega20_fan_ctrl_get_fan_speed_rpm
,
4249 .set_fan_speed_rpm
= vega20_fan_ctrl_set_fan_speed_rpm
,
4250 .get_fan_control_mode
= vega20_get_fan_control_mode
,
4251 .set_fan_control_mode
= vega20_set_fan_control_mode
,
4252 /* smu memory related */
4253 .notify_cac_buffer_info
= vega20_notify_cac_buffer_info
,
4254 .enable_mgpu_fan_boost
= vega20_enable_mgpu_fan_boost
,
4256 .get_asic_baco_capability
= vega20_baco_get_capability
,
4257 .get_asic_baco_state
= vega20_baco_get_state
,
4258 .set_asic_baco_state
= vega20_baco_set_state
,
4259 .set_mp1_state
= vega20_set_mp1_state
,
4260 .smu_i2c_bus_access
= vega20_smu_i2c_bus_access
,
4261 .set_df_cstate
= vega20_set_df_cstate
,
4262 .set_xgmi_pstate
= vega20_set_xgmi_pstate
,
4265 int vega20_hwmgr_init(struct pp_hwmgr
*hwmgr
)
4267 hwmgr
->hwmgr_func
= &vega20_hwmgr_funcs
;
4268 hwmgr
->pptable_func
= &vega20_pptable_funcs
;