2 * Copyright 2016 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/delay.h>
26 #include <linux/module.h>
27 #include <linux/pci.h>
28 #include <linux/slab.h>
31 #include "amd_powerplay.h"
32 #include "hardwaremanager.h"
33 #include "ppatomfwctrl.h"
34 #include "atomfirmware.h"
35 #include "cgs_common.h"
36 #include "vega10_powertune.h"
38 #include "smu9_driver_if.h"
39 #include "vega10_inc.h"
40 #include "soc15_common.h"
41 #include "pppcielanes.h"
42 #include "vega10_hwmgr.h"
43 #include "vega10_smumgr.h"
44 #include "vega10_processpptables.h"
45 #include "vega10_pptable.h"
46 #include "vega10_thermal.h"
48 #include "amd_pcie_helpers.h"
49 #include "ppinterrupt.h"
50 #include "pp_overdriver.h"
51 #include "pp_thermal.h"
52 #include "vega10_baco.h"
54 #include "smuio/smuio_9_0_offset.h"
55 #include "smuio/smuio_9_0_sh_mask.h"
57 #define HBM_MEMORY_CHANNEL_WIDTH 128
59 static const uint32_t channel_number
[] = {1, 2, 0, 4, 0, 8, 0, 16, 2};
61 #define mmDF_CS_AON0_DramBaseAddress0 0x0044
62 #define mmDF_CS_AON0_DramBaseAddress0_BASE_IDX 0
64 //DF_CS_AON0_DramBaseAddress0
65 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal__SHIFT 0x0
66 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn__SHIFT 0x1
67 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT 0x4
68 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel__SHIFT 0x8
69 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr__SHIFT 0xc
70 #define DF_CS_AON0_DramBaseAddress0__AddrRngVal_MASK 0x00000001L
71 #define DF_CS_AON0_DramBaseAddress0__LgcyMmioHoleEn_MASK 0x00000002L
72 #define DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK 0x000000F0L
73 #define DF_CS_AON0_DramBaseAddress0__IntLvAddrSel_MASK 0x00000700L
74 #define DF_CS_AON0_DramBaseAddress0__DramBaseAddr_MASK 0xFFFFF000L
91 static const ULONG PhwVega10_Magic
= (ULONG
)(PHM_VIslands_Magic
);
93 struct vega10_power_state
*cast_phw_vega10_power_state(
94 struct pp_hw_power_state
*hw_ps
)
96 PP_ASSERT_WITH_CODE((PhwVega10_Magic
== hw_ps
->magic
),
97 "Invalid Powerstate Type!",
100 return (struct vega10_power_state
*)hw_ps
;
103 const struct vega10_power_state
*cast_const_phw_vega10_power_state(
104 const struct pp_hw_power_state
*hw_ps
)
106 PP_ASSERT_WITH_CODE((PhwVega10_Magic
== hw_ps
->magic
),
107 "Invalid Powerstate Type!",
110 return (const struct vega10_power_state
*)hw_ps
;
113 static void vega10_set_default_registry_data(struct pp_hwmgr
*hwmgr
)
115 struct vega10_hwmgr
*data
= hwmgr
->backend
;
117 data
->registry_data
.sclk_dpm_key_disabled
=
118 hwmgr
->feature_mask
& PP_SCLK_DPM_MASK
? false : true;
119 data
->registry_data
.socclk_dpm_key_disabled
=
120 hwmgr
->feature_mask
& PP_SOCCLK_DPM_MASK
? false : true;
121 data
->registry_data
.mclk_dpm_key_disabled
=
122 hwmgr
->feature_mask
& PP_MCLK_DPM_MASK
? false : true;
123 data
->registry_data
.pcie_dpm_key_disabled
=
124 hwmgr
->feature_mask
& PP_PCIE_DPM_MASK
? false : true;
126 data
->registry_data
.dcefclk_dpm_key_disabled
=
127 hwmgr
->feature_mask
& PP_DCEFCLK_DPM_MASK
? false : true;
129 if (hwmgr
->feature_mask
& PP_POWER_CONTAINMENT_MASK
) {
130 data
->registry_data
.power_containment_support
= 1;
131 data
->registry_data
.enable_pkg_pwr_tracking_feature
= 1;
132 data
->registry_data
.enable_tdc_limit_feature
= 1;
135 data
->registry_data
.clock_stretcher_support
=
136 hwmgr
->feature_mask
& PP_CLOCK_STRETCH_MASK
? true : false;
138 data
->registry_data
.ulv_support
=
139 hwmgr
->feature_mask
& PP_ULV_MASK
? true : false;
141 data
->registry_data
.sclk_deep_sleep_support
=
142 hwmgr
->feature_mask
& PP_SCLK_DEEP_SLEEP_MASK
? true : false;
144 data
->registry_data
.disable_water_mark
= 0;
146 data
->registry_data
.fan_control_support
= 1;
147 data
->registry_data
.thermal_support
= 1;
148 data
->registry_data
.fw_ctf_enabled
= 1;
150 data
->registry_data
.avfs_support
=
151 hwmgr
->feature_mask
& PP_AVFS_MASK
? true : false;
152 data
->registry_data
.led_dpm_enabled
= 1;
154 data
->registry_data
.vr0hot_enabled
= 1;
155 data
->registry_data
.vr1hot_enabled
= 1;
156 data
->registry_data
.regulator_hot_gpio_support
= 1;
158 data
->registry_data
.didt_support
= 1;
159 if (data
->registry_data
.didt_support
) {
160 data
->registry_data
.didt_mode
= 6;
161 data
->registry_data
.sq_ramping_support
= 1;
162 data
->registry_data
.db_ramping_support
= 0;
163 data
->registry_data
.td_ramping_support
= 0;
164 data
->registry_data
.tcp_ramping_support
= 0;
165 data
->registry_data
.dbr_ramping_support
= 0;
166 data
->registry_data
.edc_didt_support
= 1;
167 data
->registry_data
.gc_didt_support
= 0;
168 data
->registry_data
.psm_didt_support
= 0;
171 data
->display_voltage_mode
= PPVEGA10_VEGA10DISPLAYVOLTAGEMODE_DFLT
;
172 data
->dcef_clk_quad_eqn_a
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
173 data
->dcef_clk_quad_eqn_b
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
174 data
->dcef_clk_quad_eqn_c
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
175 data
->disp_clk_quad_eqn_a
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
176 data
->disp_clk_quad_eqn_b
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
177 data
->disp_clk_quad_eqn_c
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
178 data
->pixel_clk_quad_eqn_a
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
179 data
->pixel_clk_quad_eqn_b
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
180 data
->pixel_clk_quad_eqn_c
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
181 data
->phy_clk_quad_eqn_a
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
182 data
->phy_clk_quad_eqn_b
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
183 data
->phy_clk_quad_eqn_c
= PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
;
185 data
->gfxclk_average_alpha
= PPVEGA10_VEGA10GFXCLKAVERAGEALPHA_DFLT
;
186 data
->socclk_average_alpha
= PPVEGA10_VEGA10SOCCLKAVERAGEALPHA_DFLT
;
187 data
->uclk_average_alpha
= PPVEGA10_VEGA10UCLKCLKAVERAGEALPHA_DFLT
;
188 data
->gfx_activity_average_alpha
= PPVEGA10_VEGA10GFXACTIVITYAVERAGEALPHA_DFLT
;
191 static int vega10_set_features_platform_caps(struct pp_hwmgr
*hwmgr
)
193 struct vega10_hwmgr
*data
= hwmgr
->backend
;
194 struct phm_ppt_v2_information
*table_info
=
195 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
196 struct amdgpu_device
*adev
= hwmgr
->adev
;
198 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
199 PHM_PlatformCaps_SclkDeepSleep
);
201 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
202 PHM_PlatformCaps_DynamicPatchPowerState
);
204 if (data
->vddci_control
== VEGA10_VOLTAGE_CONTROL_NONE
)
205 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
206 PHM_PlatformCaps_ControlVDDCI
);
208 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
209 PHM_PlatformCaps_EnableSMU7ThermalManagement
);
211 if (adev
->pg_flags
& AMD_PG_SUPPORT_UVD
)
212 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
213 PHM_PlatformCaps_UVDPowerGating
);
215 if (adev
->pg_flags
& AMD_PG_SUPPORT_VCE
)
216 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
217 PHM_PlatformCaps_VCEPowerGating
);
219 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
220 PHM_PlatformCaps_UnTabledHardwareInterface
);
222 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
223 PHM_PlatformCaps_FanSpeedInTableIsRPM
);
225 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
226 PHM_PlatformCaps_ODFuzzyFanControlSupport
);
228 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
229 PHM_PlatformCaps_DynamicPowerManagement
);
231 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
232 PHM_PlatformCaps_SMC
);
234 /* power tune caps */
235 /* assume disabled */
236 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
237 PHM_PlatformCaps_PowerContainment
);
238 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
239 PHM_PlatformCaps_DiDtSupport
);
240 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
241 PHM_PlatformCaps_SQRamping
);
242 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
243 PHM_PlatformCaps_DBRamping
);
244 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
245 PHM_PlatformCaps_TDRamping
);
246 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
247 PHM_PlatformCaps_TCPRamping
);
248 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
249 PHM_PlatformCaps_DBRRamping
);
250 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
251 PHM_PlatformCaps_DiDtEDCEnable
);
252 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
253 PHM_PlatformCaps_GCEDC
);
254 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
255 PHM_PlatformCaps_PSM
);
257 if (data
->registry_data
.didt_support
) {
258 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DiDtSupport
);
259 if (data
->registry_data
.sq_ramping_support
)
260 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_SQRamping
);
261 if (data
->registry_data
.db_ramping_support
)
262 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DBRamping
);
263 if (data
->registry_data
.td_ramping_support
)
264 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_TDRamping
);
265 if (data
->registry_data
.tcp_ramping_support
)
266 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_TCPRamping
);
267 if (data
->registry_data
.dbr_ramping_support
)
268 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DBRRamping
);
269 if (data
->registry_data
.edc_didt_support
)
270 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DiDtEDCEnable
);
271 if (data
->registry_data
.gc_didt_support
)
272 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_GCEDC
);
273 if (data
->registry_data
.psm_didt_support
)
274 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_PSM
);
277 if (data
->registry_data
.power_containment_support
)
278 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
279 PHM_PlatformCaps_PowerContainment
);
280 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
281 PHM_PlatformCaps_CAC
);
283 if (table_info
->tdp_table
->usClockStretchAmount
&&
284 data
->registry_data
.clock_stretcher_support
)
285 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
286 PHM_PlatformCaps_ClockStretcher
);
288 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
289 PHM_PlatformCaps_RegulatorHot
);
290 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
291 PHM_PlatformCaps_AutomaticDCTransition
);
293 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
294 PHM_PlatformCaps_UVDDPM
);
295 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
296 PHM_PlatformCaps_VCEDPM
);
301 static int vega10_odn_initial_default_setting(struct pp_hwmgr
*hwmgr
)
303 struct vega10_hwmgr
*data
= hwmgr
->backend
;
304 struct phm_ppt_v2_information
*table_info
=
305 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
306 struct vega10_odn_dpm_table
*odn_table
= &(data
->odn_dpm_table
);
307 struct vega10_odn_vddc_lookup_table
*od_lookup_table
;
308 struct phm_ppt_v1_voltage_lookup_table
*vddc_lookup_table
;
309 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
[3];
310 struct phm_ppt_v1_clock_voltage_dependency_table
*od_table
[3];
311 struct pp_atomfwctrl_avfs_parameters avfs_params
= {0};
315 result
= pp_atomfwctrl_get_avfs_information(hwmgr
, &avfs_params
);
317 data
->odn_dpm_table
.max_vddc
= avfs_params
.ulMaxVddc
;
318 data
->odn_dpm_table
.min_vddc
= avfs_params
.ulMinVddc
;
321 od_lookup_table
= &odn_table
->vddc_lookup_table
;
322 vddc_lookup_table
= table_info
->vddc_lookup_table
;
324 for (i
= 0; i
< vddc_lookup_table
->count
; i
++)
325 od_lookup_table
->entries
[i
].us_vdd
= vddc_lookup_table
->entries
[i
].us_vdd
;
327 od_lookup_table
->count
= vddc_lookup_table
->count
;
329 dep_table
[0] = table_info
->vdd_dep_on_sclk
;
330 dep_table
[1] = table_info
->vdd_dep_on_mclk
;
331 dep_table
[2] = table_info
->vdd_dep_on_socclk
;
332 od_table
[0] = (struct phm_ppt_v1_clock_voltage_dependency_table
*)&odn_table
->vdd_dep_on_sclk
;
333 od_table
[1] = (struct phm_ppt_v1_clock_voltage_dependency_table
*)&odn_table
->vdd_dep_on_mclk
;
334 od_table
[2] = (struct phm_ppt_v1_clock_voltage_dependency_table
*)&odn_table
->vdd_dep_on_socclk
;
336 for (i
= 0; i
< 3; i
++)
337 smu_get_voltage_dependency_table_ppt_v1(dep_table
[i
], od_table
[i
]);
339 if (odn_table
->max_vddc
== 0 || odn_table
->max_vddc
> 2000)
340 odn_table
->max_vddc
= dep_table
[0]->entries
[dep_table
[0]->count
- 1].vddc
;
341 if (odn_table
->min_vddc
== 0 || odn_table
->min_vddc
> 2000)
342 odn_table
->min_vddc
= dep_table
[0]->entries
[0].vddc
;
344 i
= od_table
[2]->count
- 1;
345 od_table
[2]->entries
[i
].clk
= hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
> od_table
[2]->entries
[i
].clk
?
346 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
:
347 od_table
[2]->entries
[i
].clk
;
348 od_table
[2]->entries
[i
].vddc
= odn_table
->max_vddc
> od_table
[2]->entries
[i
].vddc
?
349 odn_table
->max_vddc
:
350 od_table
[2]->entries
[i
].vddc
;
355 static void vega10_init_dpm_defaults(struct pp_hwmgr
*hwmgr
)
357 struct vega10_hwmgr
*data
= hwmgr
->backend
;
359 uint32_t sub_vendor_id
, hw_revision
;
360 uint32_t top32
, bottom32
;
361 struct amdgpu_device
*adev
= hwmgr
->adev
;
363 vega10_initialize_power_tune_defaults(hwmgr
);
365 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
366 data
->smu_features
[i
].smu_feature_id
= 0xffff;
367 data
->smu_features
[i
].smu_feature_bitmap
= 1 << i
;
368 data
->smu_features
[i
].enabled
= false;
369 data
->smu_features
[i
].supported
= false;
372 data
->smu_features
[GNLD_DPM_PREFETCHER
].smu_feature_id
=
373 FEATURE_DPM_PREFETCHER_BIT
;
374 data
->smu_features
[GNLD_DPM_GFXCLK
].smu_feature_id
=
375 FEATURE_DPM_GFXCLK_BIT
;
376 data
->smu_features
[GNLD_DPM_UCLK
].smu_feature_id
=
377 FEATURE_DPM_UCLK_BIT
;
378 data
->smu_features
[GNLD_DPM_SOCCLK
].smu_feature_id
=
379 FEATURE_DPM_SOCCLK_BIT
;
380 data
->smu_features
[GNLD_DPM_UVD
].smu_feature_id
=
382 data
->smu_features
[GNLD_DPM_VCE
].smu_feature_id
=
384 data
->smu_features
[GNLD_DPM_MP0CLK
].smu_feature_id
=
385 FEATURE_DPM_MP0CLK_BIT
;
386 data
->smu_features
[GNLD_DPM_LINK
].smu_feature_id
=
387 FEATURE_DPM_LINK_BIT
;
388 data
->smu_features
[GNLD_DPM_DCEFCLK
].smu_feature_id
=
389 FEATURE_DPM_DCEFCLK_BIT
;
390 data
->smu_features
[GNLD_ULV
].smu_feature_id
=
392 data
->smu_features
[GNLD_AVFS
].smu_feature_id
=
394 data
->smu_features
[GNLD_DS_GFXCLK
].smu_feature_id
=
395 FEATURE_DS_GFXCLK_BIT
;
396 data
->smu_features
[GNLD_DS_SOCCLK
].smu_feature_id
=
397 FEATURE_DS_SOCCLK_BIT
;
398 data
->smu_features
[GNLD_DS_LCLK
].smu_feature_id
=
400 data
->smu_features
[GNLD_PPT
].smu_feature_id
=
402 data
->smu_features
[GNLD_TDC
].smu_feature_id
=
404 data
->smu_features
[GNLD_THERMAL
].smu_feature_id
=
406 data
->smu_features
[GNLD_GFX_PER_CU_CG
].smu_feature_id
=
407 FEATURE_GFX_PER_CU_CG_BIT
;
408 data
->smu_features
[GNLD_RM
].smu_feature_id
=
410 data
->smu_features
[GNLD_DS_DCEFCLK
].smu_feature_id
=
411 FEATURE_DS_DCEFCLK_BIT
;
412 data
->smu_features
[GNLD_ACDC
].smu_feature_id
=
414 data
->smu_features
[GNLD_VR0HOT
].smu_feature_id
=
416 data
->smu_features
[GNLD_VR1HOT
].smu_feature_id
=
418 data
->smu_features
[GNLD_FW_CTF
].smu_feature_id
=
420 data
->smu_features
[GNLD_LED_DISPLAY
].smu_feature_id
=
421 FEATURE_LED_DISPLAY_BIT
;
422 data
->smu_features
[GNLD_FAN_CONTROL
].smu_feature_id
=
423 FEATURE_FAN_CONTROL_BIT
;
424 data
->smu_features
[GNLD_ACG
].smu_feature_id
= FEATURE_ACG_BIT
;
425 data
->smu_features
[GNLD_DIDT
].smu_feature_id
= FEATURE_GFX_EDC_BIT
;
426 data
->smu_features
[GNLD_PCC_LIMIT
].smu_feature_id
= FEATURE_PCC_LIMIT_CONTROL_BIT
;
428 if (!data
->registry_data
.prefetcher_dpm_key_disabled
)
429 data
->smu_features
[GNLD_DPM_PREFETCHER
].supported
= true;
431 if (!data
->registry_data
.sclk_dpm_key_disabled
)
432 data
->smu_features
[GNLD_DPM_GFXCLK
].supported
= true;
434 if (!data
->registry_data
.mclk_dpm_key_disabled
)
435 data
->smu_features
[GNLD_DPM_UCLK
].supported
= true;
437 if (!data
->registry_data
.socclk_dpm_key_disabled
)
438 data
->smu_features
[GNLD_DPM_SOCCLK
].supported
= true;
440 if (PP_CAP(PHM_PlatformCaps_UVDDPM
))
441 data
->smu_features
[GNLD_DPM_UVD
].supported
= true;
443 if (PP_CAP(PHM_PlatformCaps_VCEDPM
))
444 data
->smu_features
[GNLD_DPM_VCE
].supported
= true;
446 if (!data
->registry_data
.pcie_dpm_key_disabled
)
447 data
->smu_features
[GNLD_DPM_LINK
].supported
= true;
449 if (!data
->registry_data
.dcefclk_dpm_key_disabled
)
450 data
->smu_features
[GNLD_DPM_DCEFCLK
].supported
= true;
452 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep
) &&
453 data
->registry_data
.sclk_deep_sleep_support
) {
454 data
->smu_features
[GNLD_DS_GFXCLK
].supported
= true;
455 data
->smu_features
[GNLD_DS_SOCCLK
].supported
= true;
456 data
->smu_features
[GNLD_DS_LCLK
].supported
= true;
457 data
->smu_features
[GNLD_DS_DCEFCLK
].supported
= true;
460 if (data
->registry_data
.enable_pkg_pwr_tracking_feature
)
461 data
->smu_features
[GNLD_PPT
].supported
= true;
463 if (data
->registry_data
.enable_tdc_limit_feature
)
464 data
->smu_features
[GNLD_TDC
].supported
= true;
466 if (data
->registry_data
.thermal_support
)
467 data
->smu_features
[GNLD_THERMAL
].supported
= true;
469 if (data
->registry_data
.fan_control_support
)
470 data
->smu_features
[GNLD_FAN_CONTROL
].supported
= true;
472 if (data
->registry_data
.fw_ctf_enabled
)
473 data
->smu_features
[GNLD_FW_CTF
].supported
= true;
475 if (data
->registry_data
.avfs_support
)
476 data
->smu_features
[GNLD_AVFS
].supported
= true;
478 if (data
->registry_data
.led_dpm_enabled
)
479 data
->smu_features
[GNLD_LED_DISPLAY
].supported
= true;
481 if (data
->registry_data
.vr1hot_enabled
)
482 data
->smu_features
[GNLD_VR1HOT
].supported
= true;
484 if (data
->registry_data
.vr0hot_enabled
)
485 data
->smu_features
[GNLD_VR0HOT
].supported
= true;
487 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetSmuVersion
);
488 hwmgr
->smu_version
= smum_get_argument(hwmgr
);
489 /* ACG firmware has major version 5 */
490 if ((hwmgr
->smu_version
& 0xff000000) == 0x5000000)
491 data
->smu_features
[GNLD_ACG
].supported
= true;
492 if (data
->registry_data
.didt_support
)
493 data
->smu_features
[GNLD_DIDT
].supported
= true;
495 hw_revision
= adev
->pdev
->revision
;
496 sub_vendor_id
= adev
->pdev
->subsystem_vendor
;
498 if ((hwmgr
->chip_id
== 0x6862 ||
499 hwmgr
->chip_id
== 0x6861 ||
500 hwmgr
->chip_id
== 0x6868) &&
501 (hw_revision
== 0) &&
502 (sub_vendor_id
!= 0x1002))
503 data
->smu_features
[GNLD_PCC_LIMIT
].supported
= true;
505 /* Get the SN to turn into a Unique ID */
506 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ReadSerialNumTop32
);
507 top32
= smum_get_argument(hwmgr
);
508 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ReadSerialNumBottom32
);
509 bottom32
= smum_get_argument(hwmgr
);
511 adev
->unique_id
= ((uint64_t)bottom32
<< 32) | top32
;
514 #ifdef PPLIB_VEGA10_EVV_SUPPORT
515 static int vega10_get_socclk_for_voltage_evv(struct pp_hwmgr
*hwmgr
,
516 phm_ppt_v1_voltage_lookup_table
*lookup_table
,
517 uint16_t virtual_voltage_id
, int32_t *socclk
)
521 struct phm_ppt_v2_information
*table_info
=
522 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
524 PP_ASSERT_WITH_CODE(lookup_table
->count
!= 0,
525 "Lookup table is empty",
528 /* search for leakage voltage ID 0xff01 ~ 0xff08 and sclk */
529 for (entry_id
= 0; entry_id
< table_info
->vdd_dep_on_sclk
->count
; entry_id
++) {
530 voltage_id
= table_info
->vdd_dep_on_socclk
->entries
[entry_id
].vddInd
;
531 if (lookup_table
->entries
[voltage_id
].us_vdd
== virtual_voltage_id
)
535 PP_ASSERT_WITH_CODE(entry_id
< table_info
->vdd_dep_on_socclk
->count
,
536 "Can't find requested voltage id in vdd_dep_on_socclk table!",
539 *socclk
= table_info
->vdd_dep_on_socclk
->entries
[entry_id
].clk
;
544 #define ATOM_VIRTUAL_VOLTAGE_ID0 0xff01
546 * Get Leakage VDDC based on leakage ID.
548 * @param hwmgr the address of the powerplay hardware manager.
551 static int vega10_get_evv_voltages(struct pp_hwmgr
*hwmgr
)
553 struct vega10_hwmgr
*data
= hwmgr
->backend
;
558 struct phm_ppt_v2_information
*table_info
=
559 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
560 struct phm_ppt_v1_clock_voltage_dependency_table
*socclk_table
=
561 table_info
->vdd_dep_on_socclk
;
564 for (i
= 0; i
< VEGA10_MAX_LEAKAGE_COUNT
; i
++) {
565 vv_id
= ATOM_VIRTUAL_VOLTAGE_ID0
+ i
;
567 if (!vega10_get_socclk_for_voltage_evv(hwmgr
,
568 table_info
->vddc_lookup_table
, vv_id
, &sclk
)) {
569 if (PP_CAP(PHM_PlatformCaps_ClockStretcher
)) {
570 for (j
= 1; j
< socclk_table
->count
; j
++) {
571 if (socclk_table
->entries
[j
].clk
== sclk
&&
572 socclk_table
->entries
[j
].cks_enable
== 0) {
579 PP_ASSERT_WITH_CODE(!atomctrl_get_voltage_evv_on_sclk_ai(hwmgr
,
580 VOLTAGE_TYPE_VDDC
, sclk
, vv_id
, &vddc
),
581 "Error retrieving EVV voltage value!",
585 /* need to make sure vddc is less than 2v or else, it could burn the ASIC. */
586 PP_ASSERT_WITH_CODE((vddc
< 2000 && vddc
!= 0),
587 "Invalid VDDC value", result
= -EINVAL
;);
589 /* the voltage should not be zero nor equal to leakage ID */
590 if (vddc
!= 0 && vddc
!= vv_id
) {
591 data
->vddc_leakage
.actual_voltage
[data
->vddc_leakage
.count
] = (uint16_t)(vddc
/100);
592 data
->vddc_leakage
.leakage_id
[data
->vddc_leakage
.count
] = vv_id
;
593 data
->vddc_leakage
.count
++;
602 * Change virtual leakage voltage to actual value.
604 * @param hwmgr the address of the powerplay hardware manager.
605 * @param pointer to changing voltage
606 * @param pointer to leakage table
608 static void vega10_patch_with_vdd_leakage(struct pp_hwmgr
*hwmgr
,
609 uint16_t *voltage
, struct vega10_leakage_voltage
*leakage_table
)
613 /* search for leakage voltage ID 0xff01 ~ 0xff08 */
614 for (index
= 0; index
< leakage_table
->count
; index
++) {
615 /* if this voltage matches a leakage voltage ID */
616 /* patch with actual leakage voltage */
617 if (leakage_table
->leakage_id
[index
] == *voltage
) {
618 *voltage
= leakage_table
->actual_voltage
[index
];
623 if (*voltage
> ATOM_VIRTUAL_VOLTAGE_ID0
)
624 pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
628 * Patch voltage lookup table by EVV leakages.
630 * @param hwmgr the address of the powerplay hardware manager.
631 * @param pointer to voltage lookup table
632 * @param pointer to leakage table
635 static int vega10_patch_lookup_table_with_leakage(struct pp_hwmgr
*hwmgr
,
636 phm_ppt_v1_voltage_lookup_table
*lookup_table
,
637 struct vega10_leakage_voltage
*leakage_table
)
641 for (i
= 0; i
< lookup_table
->count
; i
++)
642 vega10_patch_with_vdd_leakage(hwmgr
,
643 &lookup_table
->entries
[i
].us_vdd
, leakage_table
);
648 static int vega10_patch_clock_voltage_limits_with_vddc_leakage(
649 struct pp_hwmgr
*hwmgr
, struct vega10_leakage_voltage
*leakage_table
,
652 vega10_patch_with_vdd_leakage(hwmgr
, (uint16_t *)vddc
, leakage_table
);
658 static int vega10_patch_voltage_dependency_tables_with_lookup_table(
659 struct pp_hwmgr
*hwmgr
)
661 uint8_t entry_id
, voltage_id
;
663 struct phm_ppt_v2_information
*table_info
=
664 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
665 struct phm_ppt_v1_mm_clock_voltage_dependency_table
*mm_table
=
666 table_info
->mm_dep_table
;
667 struct phm_ppt_v1_clock_voltage_dependency_table
*mclk_table
=
668 table_info
->vdd_dep_on_mclk
;
670 for (i
= 0; i
< 6; i
++) {
671 struct phm_ppt_v1_clock_voltage_dependency_table
*vdt
;
673 case 0: vdt
= table_info
->vdd_dep_on_socclk
; break;
674 case 1: vdt
= table_info
->vdd_dep_on_sclk
; break;
675 case 2: vdt
= table_info
->vdd_dep_on_dcefclk
; break;
676 case 3: vdt
= table_info
->vdd_dep_on_pixclk
; break;
677 case 4: vdt
= table_info
->vdd_dep_on_dispclk
; break;
678 case 5: vdt
= table_info
->vdd_dep_on_phyclk
; break;
681 for (entry_id
= 0; entry_id
< vdt
->count
; entry_id
++) {
682 voltage_id
= vdt
->entries
[entry_id
].vddInd
;
683 vdt
->entries
[entry_id
].vddc
=
684 table_info
->vddc_lookup_table
->entries
[voltage_id
].us_vdd
;
688 for (entry_id
= 0; entry_id
< mm_table
->count
; ++entry_id
) {
689 voltage_id
= mm_table
->entries
[entry_id
].vddcInd
;
690 mm_table
->entries
[entry_id
].vddc
=
691 table_info
->vddc_lookup_table
->entries
[voltage_id
].us_vdd
;
694 for (entry_id
= 0; entry_id
< mclk_table
->count
; ++entry_id
) {
695 voltage_id
= mclk_table
->entries
[entry_id
].vddInd
;
696 mclk_table
->entries
[entry_id
].vddc
=
697 table_info
->vddc_lookup_table
->entries
[voltage_id
].us_vdd
;
698 voltage_id
= mclk_table
->entries
[entry_id
].vddciInd
;
699 mclk_table
->entries
[entry_id
].vddci
=
700 table_info
->vddci_lookup_table
->entries
[voltage_id
].us_vdd
;
701 voltage_id
= mclk_table
->entries
[entry_id
].mvddInd
;
702 mclk_table
->entries
[entry_id
].mvdd
=
703 table_info
->vddmem_lookup_table
->entries
[voltage_id
].us_vdd
;
711 static int vega10_sort_lookup_table(struct pp_hwmgr
*hwmgr
,
712 struct phm_ppt_v1_voltage_lookup_table
*lookup_table
)
714 uint32_t table_size
, i
, j
;
716 PP_ASSERT_WITH_CODE(lookup_table
&& lookup_table
->count
,
717 "Lookup table is empty", return -EINVAL
);
719 table_size
= lookup_table
->count
;
721 /* Sorting voltages */
722 for (i
= 0; i
< table_size
- 1; i
++) {
723 for (j
= i
+ 1; j
> 0; j
--) {
724 if (lookup_table
->entries
[j
].us_vdd
<
725 lookup_table
->entries
[j
- 1].us_vdd
) {
726 swap(lookup_table
->entries
[j
- 1],
727 lookup_table
->entries
[j
]);
735 static int vega10_complete_dependency_tables(struct pp_hwmgr
*hwmgr
)
739 struct phm_ppt_v2_information
*table_info
=
740 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
741 #ifdef PPLIB_VEGA10_EVV_SUPPORT
742 struct vega10_hwmgr
*data
= hwmgr
->backend
;
744 tmp_result
= vega10_patch_lookup_table_with_leakage(hwmgr
,
745 table_info
->vddc_lookup_table
, &(data
->vddc_leakage
));
749 tmp_result
= vega10_patch_clock_voltage_limits_with_vddc_leakage(hwmgr
,
750 &(data
->vddc_leakage
), &table_info
->max_clock_voltage_on_dc
.vddc
);
755 tmp_result
= vega10_patch_voltage_dependency_tables_with_lookup_table(hwmgr
);
759 tmp_result
= vega10_sort_lookup_table(hwmgr
, table_info
->vddc_lookup_table
);
766 static int vega10_set_private_data_based_on_pptable(struct pp_hwmgr
*hwmgr
)
768 struct phm_ppt_v2_information
*table_info
=
769 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
770 struct phm_ppt_v1_clock_voltage_dependency_table
*allowed_sclk_vdd_table
=
771 table_info
->vdd_dep_on_socclk
;
772 struct phm_ppt_v1_clock_voltage_dependency_table
*allowed_mclk_vdd_table
=
773 table_info
->vdd_dep_on_mclk
;
775 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table
,
776 "VDD dependency on SCLK table is missing. This table is mandatory", return -EINVAL
);
777 PP_ASSERT_WITH_CODE(allowed_sclk_vdd_table
->count
>= 1,
778 "VDD dependency on SCLK table is empty. This table is mandatory", return -EINVAL
);
780 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table
,
781 "VDD dependency on MCLK table is missing. This table is mandatory", return -EINVAL
);
782 PP_ASSERT_WITH_CODE(allowed_mclk_vdd_table
->count
>= 1,
783 "VDD dependency on MCLK table is empty. This table is mandatory", return -EINVAL
);
785 table_info
->max_clock_voltage_on_ac
.sclk
=
786 allowed_sclk_vdd_table
->entries
[allowed_sclk_vdd_table
->count
- 1].clk
;
787 table_info
->max_clock_voltage_on_ac
.mclk
=
788 allowed_mclk_vdd_table
->entries
[allowed_mclk_vdd_table
->count
- 1].clk
;
789 table_info
->max_clock_voltage_on_ac
.vddc
=
790 allowed_sclk_vdd_table
->entries
[allowed_sclk_vdd_table
->count
- 1].vddc
;
791 table_info
->max_clock_voltage_on_ac
.vddci
=
792 allowed_mclk_vdd_table
->entries
[allowed_mclk_vdd_table
->count
- 1].vddci
;
794 hwmgr
->dyn_state
.max_clock_voltage_on_ac
.sclk
=
795 table_info
->max_clock_voltage_on_ac
.sclk
;
796 hwmgr
->dyn_state
.max_clock_voltage_on_ac
.mclk
=
797 table_info
->max_clock_voltage_on_ac
.mclk
;
798 hwmgr
->dyn_state
.max_clock_voltage_on_ac
.vddc
=
799 table_info
->max_clock_voltage_on_ac
.vddc
;
800 hwmgr
->dyn_state
.max_clock_voltage_on_ac
.vddci
=
801 table_info
->max_clock_voltage_on_ac
.vddci
;
806 static int vega10_hwmgr_backend_fini(struct pp_hwmgr
*hwmgr
)
808 kfree(hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
);
809 hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
= NULL
;
811 kfree(hwmgr
->backend
);
812 hwmgr
->backend
= NULL
;
817 static int vega10_hwmgr_backend_init(struct pp_hwmgr
*hwmgr
)
820 struct vega10_hwmgr
*data
;
821 uint32_t config_telemetry
= 0;
822 struct pp_atomfwctrl_voltage_table vol_table
;
823 struct amdgpu_device
*adev
= hwmgr
->adev
;
825 data
= kzalloc(sizeof(struct vega10_hwmgr
), GFP_KERNEL
);
829 hwmgr
->backend
= data
;
831 hwmgr
->workload_mask
= 1 << hwmgr
->workload_prority
[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
];
832 hwmgr
->power_profile_mode
= PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
833 hwmgr
->default_power_profile_mode
= PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT
;
835 vega10_set_default_registry_data(hwmgr
);
836 data
->disable_dpm_mask
= 0xff;
838 /* need to set voltage control types before EVV patching */
839 data
->vddc_control
= VEGA10_VOLTAGE_CONTROL_NONE
;
840 data
->mvdd_control
= VEGA10_VOLTAGE_CONTROL_NONE
;
841 data
->vddci_control
= VEGA10_VOLTAGE_CONTROL_NONE
;
844 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr
,
845 VOLTAGE_TYPE_VDDC
, VOLTAGE_OBJ_SVID2
)) {
846 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr
,
847 VOLTAGE_TYPE_VDDC
, VOLTAGE_OBJ_SVID2
,
849 config_telemetry
= ((vol_table
.telemetry_slope
<< 8) & 0xff00) |
850 (vol_table
.telemetry_offset
& 0xff);
851 data
->vddc_control
= VEGA10_VOLTAGE_CONTROL_BY_SVID2
;
854 kfree(hwmgr
->backend
);
855 hwmgr
->backend
= NULL
;
856 PP_ASSERT_WITH_CODE(false,
857 "VDDCR_SOC is not SVID2!",
862 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr
,
863 VOLTAGE_TYPE_MVDDC
, VOLTAGE_OBJ_SVID2
)) {
864 if (!pp_atomfwctrl_get_voltage_table_v4(hwmgr
,
865 VOLTAGE_TYPE_MVDDC
, VOLTAGE_OBJ_SVID2
,
868 ((vol_table
.telemetry_slope
<< 24) & 0xff000000) |
869 ((vol_table
.telemetry_offset
<< 16) & 0xff0000);
870 data
->mvdd_control
= VEGA10_VOLTAGE_CONTROL_BY_SVID2
;
875 if (PP_CAP(PHM_PlatformCaps_ControlVDDCI
)) {
876 if (pp_atomfwctrl_is_voltage_controlled_by_gpio_v4(hwmgr
,
877 VOLTAGE_TYPE_VDDCI
, VOLTAGE_OBJ_GPIO_LUT
))
878 data
->vddci_control
= VEGA10_VOLTAGE_CONTROL_BY_GPIO
;
881 data
->config_telemetry
= config_telemetry
;
883 vega10_set_features_platform_caps(hwmgr
);
885 vega10_init_dpm_defaults(hwmgr
);
887 #ifdef PPLIB_VEGA10_EVV_SUPPORT
888 /* Get leakage voltage based on leakage ID. */
889 PP_ASSERT_WITH_CODE(!vega10_get_evv_voltages(hwmgr
),
890 "Get EVV Voltage Failed. Abort Driver loading!",
894 /* Patch our voltage dependency table with actual leakage voltage
895 * We need to perform leakage translation before it's used by other functions
897 vega10_complete_dependency_tables(hwmgr
);
899 /* Parse pptable data read from VBIOS */
900 vega10_set_private_data_based_on_pptable(hwmgr
);
902 data
->is_tlu_enabled
= false;
904 hwmgr
->platform_descriptor
.hardwareActivityPerformanceLevels
=
905 VEGA10_MAX_HARDWARE_POWERLEVELS
;
906 hwmgr
->platform_descriptor
.hardwarePerformanceLevels
= 2;
907 hwmgr
->platform_descriptor
.minimumClocksReductionPercentage
= 50;
909 hwmgr
->platform_descriptor
.vbiosInterruptId
= 0x20000400; /* IRQ_SOURCE1_SW_INT */
910 /* The true clock step depends on the frequency, typically 4.5 or 9 MHz. Here we use 5. */
911 hwmgr
->platform_descriptor
.clockStep
.engineClock
= 500;
912 hwmgr
->platform_descriptor
.clockStep
.memoryClock
= 500;
914 data
->total_active_cus
= adev
->gfx
.cu_info
.number
;
918 /* Setup default Overdrive Fan control settings */
919 data
->odn_fan_table
.target_fan_speed
=
920 hwmgr
->thermal_controller
.advanceFanControlParameters
.usMaxFanRPM
;
921 data
->odn_fan_table
.target_temperature
=
922 hwmgr
->thermal_controller
.
923 advanceFanControlParameters
.ucTargetTemperature
;
924 data
->odn_fan_table
.min_performance_clock
=
925 hwmgr
->thermal_controller
.advanceFanControlParameters
.
926 ulMinFanSCLKAcousticLimit
;
927 data
->odn_fan_table
.min_fan_limit
=
928 hwmgr
->thermal_controller
.
929 advanceFanControlParameters
.usFanPWMMinLimit
*
930 hwmgr
->thermal_controller
.fanInfo
.ulMaxRPM
/ 100;
932 data
->mem_channels
= (RREG32_SOC15(DF
, 0, mmDF_CS_AON0_DramBaseAddress0
) &
933 DF_CS_AON0_DramBaseAddress0__IntLvNumChan_MASK
) >>
934 DF_CS_AON0_DramBaseAddress0__IntLvNumChan__SHIFT
;
935 PP_ASSERT_WITH_CODE(data
->mem_channels
< ARRAY_SIZE(channel_number
),
936 "Mem Channel Index Exceeded maximum!",
942 static int vega10_init_sclk_threshold(struct pp_hwmgr
*hwmgr
)
944 struct vega10_hwmgr
*data
= hwmgr
->backend
;
946 data
->low_sclk_interrupt_threshold
= 0;
951 static int vega10_setup_dpm_led_config(struct pp_hwmgr
*hwmgr
)
953 struct vega10_hwmgr
*data
= hwmgr
->backend
;
954 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
956 struct pp_atomfwctrl_voltage_table table
;
962 ret
= pp_atomfwctrl_get_voltage_table_v4(hwmgr
, VOLTAGE_TYPE_LEDDPM
,
963 VOLTAGE_OBJ_GPIO_LUT
, &table
);
966 tmp
= table
.mask_low
;
967 for (i
= 0, j
= 0; i
< 32; i
++) {
969 mask
|= (uint32_t)(i
<< (8 * j
));
977 pp_table
->LedPin0
= (uint8_t)(mask
& 0xff);
978 pp_table
->LedPin1
= (uint8_t)((mask
>> 8) & 0xff);
979 pp_table
->LedPin2
= (uint8_t)((mask
>> 16) & 0xff);
983 static int vega10_setup_asic_task(struct pp_hwmgr
*hwmgr
)
988 PP_ASSERT_WITH_CODE(!vega10_init_sclk_threshold(hwmgr
),
989 "Failed to init sclk threshold!",
992 PP_ASSERT_WITH_CODE(!vega10_setup_dpm_led_config(hwmgr
),
993 "Failed to set up led dpm config!",
996 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_NumOfDisplays
, 0);
1002 * Remove repeated voltage values and create table with unique values.
1004 * @param hwmgr the address of the powerplay hardware manager.
1005 * @param vol_table the pointer to changing voltage table
1006 * @return 0 in success
1009 static int vega10_trim_voltage_table(struct pp_hwmgr
*hwmgr
,
1010 struct pp_atomfwctrl_voltage_table
*vol_table
)
1015 struct pp_atomfwctrl_voltage_table
*table
;
1017 PP_ASSERT_WITH_CODE(vol_table
,
1018 "Voltage Table empty.", return -EINVAL
);
1019 table
= kzalloc(sizeof(struct pp_atomfwctrl_voltage_table
),
1025 table
->mask_low
= vol_table
->mask_low
;
1026 table
->phase_delay
= vol_table
->phase_delay
;
1028 for (i
= 0; i
< vol_table
->count
; i
++) {
1029 vvalue
= vol_table
->entries
[i
].value
;
1032 for (j
= 0; j
< table
->count
; j
++) {
1033 if (vvalue
== table
->entries
[j
].value
) {
1040 table
->entries
[table
->count
].value
= vvalue
;
1041 table
->entries
[table
->count
].smio_low
=
1042 vol_table
->entries
[i
].smio_low
;
1047 memcpy(vol_table
, table
, sizeof(struct pp_atomfwctrl_voltage_table
));
1053 static int vega10_get_mvdd_voltage_table(struct pp_hwmgr
*hwmgr
,
1054 phm_ppt_v1_clock_voltage_dependency_table
*dep_table
,
1055 struct pp_atomfwctrl_voltage_table
*vol_table
)
1059 PP_ASSERT_WITH_CODE(dep_table
->count
,
1060 "Voltage Dependency Table empty.",
1063 vol_table
->mask_low
= 0;
1064 vol_table
->phase_delay
= 0;
1065 vol_table
->count
= dep_table
->count
;
1067 for (i
= 0; i
< vol_table
->count
; i
++) {
1068 vol_table
->entries
[i
].value
= dep_table
->entries
[i
].mvdd
;
1069 vol_table
->entries
[i
].smio_low
= 0;
1072 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr
,
1074 "Failed to trim MVDD Table!",
1080 static int vega10_get_vddci_voltage_table(struct pp_hwmgr
*hwmgr
,
1081 phm_ppt_v1_clock_voltage_dependency_table
*dep_table
,
1082 struct pp_atomfwctrl_voltage_table
*vol_table
)
1086 PP_ASSERT_WITH_CODE(dep_table
->count
,
1087 "Voltage Dependency Table empty.",
1090 vol_table
->mask_low
= 0;
1091 vol_table
->phase_delay
= 0;
1092 vol_table
->count
= dep_table
->count
;
1094 for (i
= 0; i
< dep_table
->count
; i
++) {
1095 vol_table
->entries
[i
].value
= dep_table
->entries
[i
].vddci
;
1096 vol_table
->entries
[i
].smio_low
= 0;
1099 PP_ASSERT_WITH_CODE(!vega10_trim_voltage_table(hwmgr
, vol_table
),
1100 "Failed to trim VDDCI table.",
1106 static int vega10_get_vdd_voltage_table(struct pp_hwmgr
*hwmgr
,
1107 phm_ppt_v1_clock_voltage_dependency_table
*dep_table
,
1108 struct pp_atomfwctrl_voltage_table
*vol_table
)
1112 PP_ASSERT_WITH_CODE(dep_table
->count
,
1113 "Voltage Dependency Table empty.",
1116 vol_table
->mask_low
= 0;
1117 vol_table
->phase_delay
= 0;
1118 vol_table
->count
= dep_table
->count
;
1120 for (i
= 0; i
< vol_table
->count
; i
++) {
1121 vol_table
->entries
[i
].value
= dep_table
->entries
[i
].vddc
;
1122 vol_table
->entries
[i
].smio_low
= 0;
1128 /* ---- Voltage Tables ----
1129 * If the voltage table would be bigger than
1130 * what will fit into the state table on
1131 * the SMC keep only the higher entries.
1133 static void vega10_trim_voltage_table_to_fit_state_table(
1134 struct pp_hwmgr
*hwmgr
,
1135 uint32_t max_vol_steps
,
1136 struct pp_atomfwctrl_voltage_table
*vol_table
)
1138 unsigned int i
, diff
;
1140 if (vol_table
->count
<= max_vol_steps
)
1143 diff
= vol_table
->count
- max_vol_steps
;
1145 for (i
= 0; i
< max_vol_steps
; i
++)
1146 vol_table
->entries
[i
] = vol_table
->entries
[i
+ diff
];
1148 vol_table
->count
= max_vol_steps
;
1152 * Create Voltage Tables.
1154 * @param hwmgr the address of the powerplay hardware manager.
1157 static int vega10_construct_voltage_tables(struct pp_hwmgr
*hwmgr
)
1159 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1160 struct phm_ppt_v2_information
*table_info
=
1161 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
1164 if (data
->mvdd_control
== VEGA10_VOLTAGE_CONTROL_BY_SVID2
||
1165 data
->mvdd_control
== VEGA10_VOLTAGE_CONTROL_NONE
) {
1166 result
= vega10_get_mvdd_voltage_table(hwmgr
,
1167 table_info
->vdd_dep_on_mclk
,
1168 &(data
->mvdd_voltage_table
));
1169 PP_ASSERT_WITH_CODE(!result
,
1170 "Failed to retrieve MVDDC table!",
1174 if (data
->vddci_control
== VEGA10_VOLTAGE_CONTROL_NONE
) {
1175 result
= vega10_get_vddci_voltage_table(hwmgr
,
1176 table_info
->vdd_dep_on_mclk
,
1177 &(data
->vddci_voltage_table
));
1178 PP_ASSERT_WITH_CODE(!result
,
1179 "Failed to retrieve VDDCI_MEM table!",
1183 if (data
->vddc_control
== VEGA10_VOLTAGE_CONTROL_BY_SVID2
||
1184 data
->vddc_control
== VEGA10_VOLTAGE_CONTROL_NONE
) {
1185 result
= vega10_get_vdd_voltage_table(hwmgr
,
1186 table_info
->vdd_dep_on_sclk
,
1187 &(data
->vddc_voltage_table
));
1188 PP_ASSERT_WITH_CODE(!result
,
1189 "Failed to retrieve VDDCR_SOC table!",
1193 PP_ASSERT_WITH_CODE(data
->vddc_voltage_table
.count
<= 16,
1194 "Too many voltage values for VDDC. Trimming to fit state table.",
1195 vega10_trim_voltage_table_to_fit_state_table(hwmgr
,
1196 16, &(data
->vddc_voltage_table
)));
1198 PP_ASSERT_WITH_CODE(data
->vddci_voltage_table
.count
<= 16,
1199 "Too many voltage values for VDDCI. Trimming to fit state table.",
1200 vega10_trim_voltage_table_to_fit_state_table(hwmgr
,
1201 16, &(data
->vddci_voltage_table
)));
1203 PP_ASSERT_WITH_CODE(data
->mvdd_voltage_table
.count
<= 16,
1204 "Too many voltage values for MVDD. Trimming to fit state table.",
1205 vega10_trim_voltage_table_to_fit_state_table(hwmgr
,
1206 16, &(data
->mvdd_voltage_table
)));
1213 * @fn vega10_init_dpm_state
1214 * @brief Function to initialize all Soft Min/Max and Hard Min/Max to 0xff.
1216 * @param dpm_state - the address of the DPM Table to initiailize.
1219 static void vega10_init_dpm_state(struct vega10_dpm_state
*dpm_state
)
1221 dpm_state
->soft_min_level
= 0xff;
1222 dpm_state
->soft_max_level
= 0xff;
1223 dpm_state
->hard_min_level
= 0xff;
1224 dpm_state
->hard_max_level
= 0xff;
1227 static void vega10_setup_default_single_dpm_table(struct pp_hwmgr
*hwmgr
,
1228 struct vega10_single_dpm_table
*dpm_table
,
1229 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
)
1233 dpm_table
->count
= 0;
1235 for (i
= 0; i
< dep_table
->count
; i
++) {
1236 if (i
== 0 || dpm_table
->dpm_levels
[dpm_table
->count
- 1].value
<=
1237 dep_table
->entries
[i
].clk
) {
1238 dpm_table
->dpm_levels
[dpm_table
->count
].value
=
1239 dep_table
->entries
[i
].clk
;
1240 dpm_table
->dpm_levels
[dpm_table
->count
].enabled
= true;
1245 static int vega10_setup_default_pcie_table(struct pp_hwmgr
*hwmgr
)
1247 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1248 struct vega10_pcie_table
*pcie_table
= &(data
->dpm_table
.pcie_table
);
1249 struct phm_ppt_v2_information
*table_info
=
1250 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1251 struct phm_ppt_v1_pcie_table
*bios_pcie_table
=
1252 table_info
->pcie_table
;
1255 PP_ASSERT_WITH_CODE(bios_pcie_table
->count
,
1256 "Incorrect number of PCIE States from VBIOS!",
1259 for (i
= 0; i
< NUM_LINK_LEVELS
; i
++) {
1260 if (data
->registry_data
.pcieSpeedOverride
)
1261 pcie_table
->pcie_gen
[i
] =
1262 data
->registry_data
.pcieSpeedOverride
;
1264 pcie_table
->pcie_gen
[i
] =
1265 bios_pcie_table
->entries
[i
].gen_speed
;
1267 if (data
->registry_data
.pcieLaneOverride
)
1268 pcie_table
->pcie_lane
[i
] = (uint8_t)encode_pcie_lane_width(
1269 data
->registry_data
.pcieLaneOverride
);
1271 pcie_table
->pcie_lane
[i
] = (uint8_t)encode_pcie_lane_width(
1272 bios_pcie_table
->entries
[i
].lane_width
);
1273 if (data
->registry_data
.pcieClockOverride
)
1274 pcie_table
->lclk
[i
] =
1275 data
->registry_data
.pcieClockOverride
;
1277 pcie_table
->lclk
[i
] =
1278 bios_pcie_table
->entries
[i
].pcie_sclk
;
1281 pcie_table
->count
= NUM_LINK_LEVELS
;
1287 * This function is to initialize all DPM state tables
1288 * for SMU based on the dependency table.
1289 * Dynamic state patching function will then trim these
1290 * state tables to the allowed range based
1291 * on the power policy or external client requests,
1292 * such as UVD request, etc.
1294 static int vega10_setup_default_dpm_tables(struct pp_hwmgr
*hwmgr
)
1296 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1297 struct phm_ppt_v2_information
*table_info
=
1298 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1299 struct vega10_single_dpm_table
*dpm_table
;
1302 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_soc_table
=
1303 table_info
->vdd_dep_on_socclk
;
1304 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_gfx_table
=
1305 table_info
->vdd_dep_on_sclk
;
1306 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_mclk_table
=
1307 table_info
->vdd_dep_on_mclk
;
1308 struct phm_ppt_v1_mm_clock_voltage_dependency_table
*dep_mm_table
=
1309 table_info
->mm_dep_table
;
1310 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_dcef_table
=
1311 table_info
->vdd_dep_on_dcefclk
;
1312 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_pix_table
=
1313 table_info
->vdd_dep_on_pixclk
;
1314 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_disp_table
=
1315 table_info
->vdd_dep_on_dispclk
;
1316 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_phy_table
=
1317 table_info
->vdd_dep_on_phyclk
;
1319 PP_ASSERT_WITH_CODE(dep_soc_table
,
1320 "SOCCLK dependency table is missing. This table is mandatory",
1322 PP_ASSERT_WITH_CODE(dep_soc_table
->count
>= 1,
1323 "SOCCLK dependency table is empty. This table is mandatory",
1326 PP_ASSERT_WITH_CODE(dep_gfx_table
,
1327 "GFXCLK dependency table is missing. This table is mandatory",
1329 PP_ASSERT_WITH_CODE(dep_gfx_table
->count
>= 1,
1330 "GFXCLK dependency table is empty. This table is mandatory",
1333 PP_ASSERT_WITH_CODE(dep_mclk_table
,
1334 "MCLK dependency table is missing. This table is mandatory",
1336 PP_ASSERT_WITH_CODE(dep_mclk_table
->count
>= 1,
1337 "MCLK dependency table has to have is missing. This table is mandatory",
1340 /* Initialize Sclk DPM table based on allow Sclk values */
1341 dpm_table
= &(data
->dpm_table
.soc_table
);
1342 vega10_setup_default_single_dpm_table(hwmgr
,
1346 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1348 dpm_table
= &(data
->dpm_table
.gfx_table
);
1349 vega10_setup_default_single_dpm_table(hwmgr
,
1352 if (hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
== 0)
1353 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
=
1354 dpm_table
->dpm_levels
[dpm_table
->count
-1].value
;
1355 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1357 /* Initialize Mclk DPM table based on allow Mclk values */
1358 data
->dpm_table
.mem_table
.count
= 0;
1359 dpm_table
= &(data
->dpm_table
.mem_table
);
1360 vega10_setup_default_single_dpm_table(hwmgr
,
1363 if (hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
== 0)
1364 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
=
1365 dpm_table
->dpm_levels
[dpm_table
->count
-1].value
;
1366 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1368 data
->dpm_table
.eclk_table
.count
= 0;
1369 dpm_table
= &(data
->dpm_table
.eclk_table
);
1370 for (i
= 0; i
< dep_mm_table
->count
; i
++) {
1371 if (i
== 0 || dpm_table
->dpm_levels
1372 [dpm_table
->count
- 1].value
<=
1373 dep_mm_table
->entries
[i
].eclk
) {
1374 dpm_table
->dpm_levels
[dpm_table
->count
].value
=
1375 dep_mm_table
->entries
[i
].eclk
;
1376 dpm_table
->dpm_levels
[dpm_table
->count
].enabled
=
1377 (i
== 0) ? true : false;
1381 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1383 data
->dpm_table
.vclk_table
.count
= 0;
1384 data
->dpm_table
.dclk_table
.count
= 0;
1385 dpm_table
= &(data
->dpm_table
.vclk_table
);
1386 for (i
= 0; i
< dep_mm_table
->count
; i
++) {
1387 if (i
== 0 || dpm_table
->dpm_levels
1388 [dpm_table
->count
- 1].value
<=
1389 dep_mm_table
->entries
[i
].vclk
) {
1390 dpm_table
->dpm_levels
[dpm_table
->count
].value
=
1391 dep_mm_table
->entries
[i
].vclk
;
1392 dpm_table
->dpm_levels
[dpm_table
->count
].enabled
=
1393 (i
== 0) ? true : false;
1397 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1399 dpm_table
= &(data
->dpm_table
.dclk_table
);
1400 for (i
= 0; i
< dep_mm_table
->count
; i
++) {
1401 if (i
== 0 || dpm_table
->dpm_levels
1402 [dpm_table
->count
- 1].value
<=
1403 dep_mm_table
->entries
[i
].dclk
) {
1404 dpm_table
->dpm_levels
[dpm_table
->count
].value
=
1405 dep_mm_table
->entries
[i
].dclk
;
1406 dpm_table
->dpm_levels
[dpm_table
->count
].enabled
=
1407 (i
== 0) ? true : false;
1411 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1413 /* Assume there is no headless Vega10 for now */
1414 dpm_table
= &(data
->dpm_table
.dcef_table
);
1415 vega10_setup_default_single_dpm_table(hwmgr
,
1419 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1421 dpm_table
= &(data
->dpm_table
.pixel_table
);
1422 vega10_setup_default_single_dpm_table(hwmgr
,
1426 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1428 dpm_table
= &(data
->dpm_table
.display_table
);
1429 vega10_setup_default_single_dpm_table(hwmgr
,
1433 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1435 dpm_table
= &(data
->dpm_table
.phy_table
);
1436 vega10_setup_default_single_dpm_table(hwmgr
,
1440 vega10_init_dpm_state(&(dpm_table
->dpm_state
));
1442 vega10_setup_default_pcie_table(hwmgr
);
1444 /* Zero out the saved copy of the CUSTOM profile
1445 * This will be checked when trying to set the profile
1446 * and will require that new values be passed in
1448 data
->custom_profile_mode
[0] = 0;
1449 data
->custom_profile_mode
[1] = 0;
1450 data
->custom_profile_mode
[2] = 0;
1451 data
->custom_profile_mode
[3] = 0;
1453 /* save a copy of the default DPM table */
1454 memcpy(&(data
->golden_dpm_table
), &(data
->dpm_table
),
1455 sizeof(struct vega10_dpm_table
));
1461 * @fn vega10_populate_ulv_state
1462 * @brief Function to provide parameters for Utral Low Voltage state to SMC.
1464 * @param hwmgr - the address of the hardware manager.
1467 static int vega10_populate_ulv_state(struct pp_hwmgr
*hwmgr
)
1469 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1470 struct phm_ppt_v2_information
*table_info
=
1471 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1473 data
->smc_state_table
.pp_table
.UlvOffsetVid
=
1474 (uint8_t)table_info
->us_ulv_voltage_offset
;
1476 data
->smc_state_table
.pp_table
.UlvSmnclkDid
=
1477 (uint8_t)(table_info
->us_ulv_smnclk_did
);
1478 data
->smc_state_table
.pp_table
.UlvMp1clkDid
=
1479 (uint8_t)(table_info
->us_ulv_mp1clk_did
);
1480 data
->smc_state_table
.pp_table
.UlvGfxclkBypass
=
1481 (uint8_t)(table_info
->us_ulv_gfxclk_bypass
);
1482 data
->smc_state_table
.pp_table
.UlvPhaseSheddingPsi0
=
1483 (uint8_t)(data
->vddc_voltage_table
.psi0_enable
);
1484 data
->smc_state_table
.pp_table
.UlvPhaseSheddingPsi1
=
1485 (uint8_t)(data
->vddc_voltage_table
.psi1_enable
);
1490 static int vega10_populate_single_lclk_level(struct pp_hwmgr
*hwmgr
,
1491 uint32_t lclock
, uint8_t *curr_lclk_did
)
1493 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1495 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1497 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
1499 "Failed to get LCLK clock settings from VBIOS!",
1502 *curr_lclk_did
= dividers
.ulDid
;
1507 static int vega10_populate_smc_link_levels(struct pp_hwmgr
*hwmgr
)
1510 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1511 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1512 struct vega10_pcie_table
*pcie_table
=
1513 &(data
->dpm_table
.pcie_table
);
1516 for (i
= 0; i
< pcie_table
->count
; i
++) {
1517 pp_table
->PcieGenSpeed
[i
] = pcie_table
->pcie_gen
[i
];
1518 pp_table
->PcieLaneCount
[i
] = pcie_table
->pcie_lane
[i
];
1520 result
= vega10_populate_single_lclk_level(hwmgr
,
1521 pcie_table
->lclk
[i
], &(pp_table
->LclkDid
[i
]));
1523 pr_info("Populate LClock Level %d Failed!\n", i
);
1529 while (i
< NUM_LINK_LEVELS
) {
1530 pp_table
->PcieGenSpeed
[i
] = pcie_table
->pcie_gen
[j
];
1531 pp_table
->PcieLaneCount
[i
] = pcie_table
->pcie_lane
[j
];
1533 result
= vega10_populate_single_lclk_level(hwmgr
,
1534 pcie_table
->lclk
[j
], &(pp_table
->LclkDid
[i
]));
1536 pr_info("Populate LClock Level %d Failed!\n", i
);
1546 * Populates single SMC GFXSCLK structure using the provided engine clock
1548 * @param hwmgr the address of the hardware manager
1549 * @param gfx_clock the GFX clock to use to populate the structure.
1550 * @param current_gfxclk_level location in PPTable for the SMC GFXCLK structure.
1553 static int vega10_populate_single_gfx_level(struct pp_hwmgr
*hwmgr
,
1554 uint32_t gfx_clock
, PllSetting_t
*current_gfxclk_level
,
1557 struct phm_ppt_v2_information
*table_info
=
1558 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1559 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_on_sclk
;
1560 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1561 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1562 uint32_t gfx_max_clock
=
1563 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
;
1566 if (hwmgr
->od_enabled
)
1567 dep_on_sclk
= (struct phm_ppt_v1_clock_voltage_dependency_table
*)
1568 &(data
->odn_dpm_table
.vdd_dep_on_sclk
);
1570 dep_on_sclk
= table_info
->vdd_dep_on_sclk
;
1572 PP_ASSERT_WITH_CODE(dep_on_sclk
,
1573 "Invalid SOC_VDD-GFX_CLK Dependency Table!",
1576 if (data
->need_update_dpm_table
& DPMTABLE_OD_UPDATE_SCLK
)
1577 gfx_clock
= gfx_clock
> gfx_max_clock
? gfx_max_clock
: gfx_clock
;
1579 for (i
= 0; i
< dep_on_sclk
->count
; i
++) {
1580 if (dep_on_sclk
->entries
[i
].clk
== gfx_clock
)
1583 PP_ASSERT_WITH_CODE(dep_on_sclk
->count
> i
,
1584 "Cannot find gfx_clk in SOC_VDD-GFX_CLK!",
1588 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr
,
1589 COMPUTE_GPUCLK_INPUT_FLAG_GFXCLK
,
1590 gfx_clock
, ÷rs
),
1591 "Failed to get GFX Clock settings from VBIOS!",
1594 /* Feedback Multiplier: bit 0:8 int, bit 15:12 post_div, bit 31:16 frac */
1595 current_gfxclk_level
->FbMult
=
1596 cpu_to_le32(dividers
.ulPll_fb_mult
);
1597 /* Spread FB Multiplier bit: bit 0:8 int, bit 31:16 frac */
1598 current_gfxclk_level
->SsOn
= dividers
.ucPll_ss_enable
;
1599 current_gfxclk_level
->SsFbMult
=
1600 cpu_to_le32(dividers
.ulPll_ss_fbsmult
);
1601 current_gfxclk_level
->SsSlewFrac
=
1602 cpu_to_le16(dividers
.usPll_ss_slew_frac
);
1603 current_gfxclk_level
->Did
= (uint8_t)(dividers
.ulDid
);
1605 *acg_freq
= gfx_clock
/ 100; /* 100 Khz to Mhz conversion */
1611 * @brief Populates single SMC SOCCLK structure using the provided clock.
1613 * @param hwmgr - the address of the hardware manager.
1614 * @param soc_clock - the SOC clock to use to populate the structure.
1615 * @param current_socclk_level - location in PPTable for the SMC SOCCLK structure.
1616 * @return 0 on success..
1618 static int vega10_populate_single_soc_level(struct pp_hwmgr
*hwmgr
,
1619 uint32_t soc_clock
, uint8_t *current_soc_did
,
1620 uint8_t *current_vol_index
)
1622 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1623 struct phm_ppt_v2_information
*table_info
=
1624 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1625 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_on_soc
;
1626 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1629 if (hwmgr
->od_enabled
) {
1630 dep_on_soc
= (struct phm_ppt_v1_clock_voltage_dependency_table
*)
1631 &data
->odn_dpm_table
.vdd_dep_on_socclk
;
1632 for (i
= 0; i
< dep_on_soc
->count
; i
++) {
1633 if (dep_on_soc
->entries
[i
].clk
>= soc_clock
)
1637 dep_on_soc
= table_info
->vdd_dep_on_socclk
;
1638 for (i
= 0; i
< dep_on_soc
->count
; i
++) {
1639 if (dep_on_soc
->entries
[i
].clk
== soc_clock
)
1644 PP_ASSERT_WITH_CODE(dep_on_soc
->count
> i
,
1645 "Cannot find SOC_CLK in SOC_VDD-SOC_CLK Dependency Table",
1648 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr
,
1649 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
1650 soc_clock
, ÷rs
),
1651 "Failed to get SOC Clock settings from VBIOS!",
1654 *current_soc_did
= (uint8_t)dividers
.ulDid
;
1655 *current_vol_index
= (uint8_t)(dep_on_soc
->entries
[i
].vddInd
);
1660 * Populates all SMC SCLK levels' structure based on the trimmed allowed dpm engine clock states
1662 * @param hwmgr the address of the hardware manager
1664 static int vega10_populate_all_graphic_levels(struct pp_hwmgr
*hwmgr
)
1666 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1667 struct phm_ppt_v2_information
*table_info
=
1668 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1669 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1670 struct vega10_single_dpm_table
*dpm_table
= &(data
->dpm_table
.gfx_table
);
1674 for (i
= 0; i
< dpm_table
->count
; i
++) {
1675 result
= vega10_populate_single_gfx_level(hwmgr
,
1676 dpm_table
->dpm_levels
[i
].value
,
1677 &(pp_table
->GfxclkLevel
[i
]),
1678 &(pp_table
->AcgFreqTable
[i
]));
1684 while (i
< NUM_GFXCLK_DPM_LEVELS
) {
1685 result
= vega10_populate_single_gfx_level(hwmgr
,
1686 dpm_table
->dpm_levels
[j
].value
,
1687 &(pp_table
->GfxclkLevel
[i
]),
1688 &(pp_table
->AcgFreqTable
[i
]));
1694 pp_table
->GfxclkSlewRate
=
1695 cpu_to_le16(table_info
->us_gfxclk_slew_rate
);
1697 dpm_table
= &(data
->dpm_table
.soc_table
);
1698 for (i
= 0; i
< dpm_table
->count
; i
++) {
1699 result
= vega10_populate_single_soc_level(hwmgr
,
1700 dpm_table
->dpm_levels
[i
].value
,
1701 &(pp_table
->SocclkDid
[i
]),
1702 &(pp_table
->SocDpmVoltageIndex
[i
]));
1708 while (i
< NUM_SOCCLK_DPM_LEVELS
) {
1709 result
= vega10_populate_single_soc_level(hwmgr
,
1710 dpm_table
->dpm_levels
[j
].value
,
1711 &(pp_table
->SocclkDid
[i
]),
1712 &(pp_table
->SocDpmVoltageIndex
[i
]));
1721 static void vega10_populate_vddc_soc_levels(struct pp_hwmgr
*hwmgr
)
1723 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1724 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1725 struct phm_ppt_v2_information
*table_info
= hwmgr
->pptable
;
1726 struct phm_ppt_v1_voltage_lookup_table
*vddc_lookup_table
;
1728 uint8_t soc_vid
= 0;
1729 uint32_t i
, max_vddc_level
;
1731 if (hwmgr
->od_enabled
)
1732 vddc_lookup_table
= (struct phm_ppt_v1_voltage_lookup_table
*)&data
->odn_dpm_table
.vddc_lookup_table
;
1734 vddc_lookup_table
= table_info
->vddc_lookup_table
;
1736 max_vddc_level
= vddc_lookup_table
->count
;
1737 for (i
= 0; i
< max_vddc_level
; i
++) {
1738 soc_vid
= (uint8_t)convert_to_vid(vddc_lookup_table
->entries
[i
].us_vdd
);
1739 pp_table
->SocVid
[i
] = soc_vid
;
1741 while (i
< MAX_REGULAR_DPM_NUMBER
) {
1742 pp_table
->SocVid
[i
] = soc_vid
;
1748 * @brief Populates single SMC GFXCLK structure using the provided clock.
1750 * @param hwmgr - the address of the hardware manager.
1751 * @param mem_clock - the memory clock to use to populate the structure.
1752 * @return 0 on success..
1754 static int vega10_populate_single_memory_level(struct pp_hwmgr
*hwmgr
,
1755 uint32_t mem_clock
, uint8_t *current_mem_vid
,
1756 PllSetting_t
*current_memclk_level
, uint8_t *current_mem_soc_vind
)
1758 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1759 struct phm_ppt_v2_information
*table_info
=
1760 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1761 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_on_mclk
;
1762 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1763 uint32_t mem_max_clock
=
1764 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
;
1767 if (hwmgr
->od_enabled
)
1768 dep_on_mclk
= (struct phm_ppt_v1_clock_voltage_dependency_table
*)
1769 &data
->odn_dpm_table
.vdd_dep_on_mclk
;
1771 dep_on_mclk
= table_info
->vdd_dep_on_mclk
;
1773 PP_ASSERT_WITH_CODE(dep_on_mclk
,
1774 "Invalid SOC_VDD-UCLK Dependency Table!",
1777 if (data
->need_update_dpm_table
& DPMTABLE_OD_UPDATE_MCLK
) {
1778 mem_clock
= mem_clock
> mem_max_clock
? mem_max_clock
: mem_clock
;
1780 for (i
= 0; i
< dep_on_mclk
->count
; i
++) {
1781 if (dep_on_mclk
->entries
[i
].clk
== mem_clock
)
1784 PP_ASSERT_WITH_CODE(dep_on_mclk
->count
> i
,
1785 "Cannot find UCLK in SOC_VDD-UCLK Dependency Table!",
1789 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(
1790 hwmgr
, COMPUTE_GPUCLK_INPUT_FLAG_UCLK
, mem_clock
, ÷rs
),
1791 "Failed to get UCLK settings from VBIOS!",
1795 (uint8_t)(convert_to_vid(dep_on_mclk
->entries
[i
].mvdd
));
1796 *current_mem_soc_vind
=
1797 (uint8_t)(dep_on_mclk
->entries
[i
].vddInd
);
1798 current_memclk_level
->FbMult
= cpu_to_le32(dividers
.ulPll_fb_mult
);
1799 current_memclk_level
->Did
= (uint8_t)(dividers
.ulDid
);
1801 PP_ASSERT_WITH_CODE(current_memclk_level
->Did
>= 1,
1802 "Invalid Divider ID!",
1809 * @brief Populates all SMC MCLK levels' structure based on the trimmed allowed dpm memory clock states.
1811 * @param pHwMgr - the address of the hardware manager.
1812 * @return PP_Result_OK on success.
1814 static int vega10_populate_all_memory_levels(struct pp_hwmgr
*hwmgr
)
1816 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1817 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1818 struct vega10_single_dpm_table
*dpm_table
=
1819 &(data
->dpm_table
.mem_table
);
1823 for (i
= 0; i
< dpm_table
->count
; i
++) {
1824 result
= vega10_populate_single_memory_level(hwmgr
,
1825 dpm_table
->dpm_levels
[i
].value
,
1826 &(pp_table
->MemVid
[i
]),
1827 &(pp_table
->UclkLevel
[i
]),
1828 &(pp_table
->MemSocVoltageIndex
[i
]));
1834 while (i
< NUM_UCLK_DPM_LEVELS
) {
1835 result
= vega10_populate_single_memory_level(hwmgr
,
1836 dpm_table
->dpm_levels
[j
].value
,
1837 &(pp_table
->MemVid
[i
]),
1838 &(pp_table
->UclkLevel
[i
]),
1839 &(pp_table
->MemSocVoltageIndex
[i
]));
1845 pp_table
->NumMemoryChannels
= (uint16_t)(data
->mem_channels
);
1846 pp_table
->MemoryChannelWidth
=
1847 (uint16_t)(HBM_MEMORY_CHANNEL_WIDTH
*
1848 channel_number
[data
->mem_channels
]);
1850 pp_table
->LowestUclkReservedForUlv
=
1851 (uint8_t)(data
->lowest_uclk_reserved_for_ulv
);
1856 static int vega10_populate_single_display_type(struct pp_hwmgr
*hwmgr
,
1857 DSPCLK_e disp_clock
)
1859 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1860 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1861 struct phm_ppt_v2_information
*table_info
=
1862 (struct phm_ppt_v2_information
*)
1864 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
;
1866 uint16_t clk
= 0, vddc
= 0;
1869 switch (disp_clock
) {
1870 case DSPCLK_DCEFCLK
:
1871 dep_table
= table_info
->vdd_dep_on_dcefclk
;
1873 case DSPCLK_DISPCLK
:
1874 dep_table
= table_info
->vdd_dep_on_dispclk
;
1877 dep_table
= table_info
->vdd_dep_on_pixclk
;
1880 dep_table
= table_info
->vdd_dep_on_phyclk
;
1886 PP_ASSERT_WITH_CODE(dep_table
->count
<= NUM_DSPCLK_LEVELS
,
1887 "Number Of Entries Exceeded maximum!",
1890 for (i
= 0; i
< dep_table
->count
; i
++) {
1891 clk
= (uint16_t)(dep_table
->entries
[i
].clk
/ 100);
1892 vddc
= table_info
->vddc_lookup_table
->
1893 entries
[dep_table
->entries
[i
].vddInd
].us_vdd
;
1894 vid
= (uint8_t)convert_to_vid(vddc
);
1895 pp_table
->DisplayClockTable
[disp_clock
][i
].Freq
=
1897 pp_table
->DisplayClockTable
[disp_clock
][i
].Vid
=
1901 while (i
< NUM_DSPCLK_LEVELS
) {
1902 pp_table
->DisplayClockTable
[disp_clock
][i
].Freq
=
1904 pp_table
->DisplayClockTable
[disp_clock
][i
].Vid
=
1912 static int vega10_populate_all_display_clock_levels(struct pp_hwmgr
*hwmgr
)
1916 for (i
= 0; i
< DSPCLK_COUNT
; i
++) {
1917 PP_ASSERT_WITH_CODE(!vega10_populate_single_display_type(hwmgr
, i
),
1918 "Failed to populate Clock in DisplayClockTable!",
1925 static int vega10_populate_single_eclock_level(struct pp_hwmgr
*hwmgr
,
1926 uint32_t eclock
, uint8_t *current_eclk_did
,
1927 uint8_t *current_soc_vol
)
1929 struct phm_ppt_v2_information
*table_info
=
1930 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
1931 struct phm_ppt_v1_mm_clock_voltage_dependency_table
*dep_table
=
1932 table_info
->mm_dep_table
;
1933 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1936 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr
,
1937 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
1939 "Failed to get ECLK clock settings from VBIOS!",
1942 *current_eclk_did
= (uint8_t)dividers
.ulDid
;
1944 for (i
= 0; i
< dep_table
->count
; i
++) {
1945 if (dep_table
->entries
[i
].eclk
== eclock
)
1946 *current_soc_vol
= dep_table
->entries
[i
].vddcInd
;
1952 static int vega10_populate_smc_vce_levels(struct pp_hwmgr
*hwmgr
)
1954 struct vega10_hwmgr
*data
= hwmgr
->backend
;
1955 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
1956 struct vega10_single_dpm_table
*dpm_table
= &(data
->dpm_table
.eclk_table
);
1957 int result
= -EINVAL
;
1960 for (i
= 0; i
< dpm_table
->count
; i
++) {
1961 result
= vega10_populate_single_eclock_level(hwmgr
,
1962 dpm_table
->dpm_levels
[i
].value
,
1963 &(pp_table
->EclkDid
[i
]),
1964 &(pp_table
->VceDpmVoltageIndex
[i
]));
1970 while (i
< NUM_VCE_DPM_LEVELS
) {
1971 result
= vega10_populate_single_eclock_level(hwmgr
,
1972 dpm_table
->dpm_levels
[j
].value
,
1973 &(pp_table
->EclkDid
[i
]),
1974 &(pp_table
->VceDpmVoltageIndex
[i
]));
1983 static int vega10_populate_single_vclock_level(struct pp_hwmgr
*hwmgr
,
1984 uint32_t vclock
, uint8_t *current_vclk_did
)
1986 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
1988 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr
,
1989 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
1991 "Failed to get VCLK clock settings from VBIOS!",
1994 *current_vclk_did
= (uint8_t)dividers
.ulDid
;
1999 static int vega10_populate_single_dclock_level(struct pp_hwmgr
*hwmgr
,
2000 uint32_t dclock
, uint8_t *current_dclk_did
)
2002 struct pp_atomfwctrl_clock_dividers_soc15 dividers
;
2004 PP_ASSERT_WITH_CODE(!pp_atomfwctrl_get_gpu_pll_dividers_vega10(hwmgr
,
2005 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
2007 "Failed to get DCLK clock settings from VBIOS!",
2010 *current_dclk_did
= (uint8_t)dividers
.ulDid
;
2015 static int vega10_populate_smc_uvd_levels(struct pp_hwmgr
*hwmgr
)
2017 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2018 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
2019 struct vega10_single_dpm_table
*vclk_dpm_table
=
2020 &(data
->dpm_table
.vclk_table
);
2021 struct vega10_single_dpm_table
*dclk_dpm_table
=
2022 &(data
->dpm_table
.dclk_table
);
2023 struct phm_ppt_v2_information
*table_info
=
2024 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
2025 struct phm_ppt_v1_mm_clock_voltage_dependency_table
*dep_table
=
2026 table_info
->mm_dep_table
;
2027 int result
= -EINVAL
;
2030 for (i
= 0; i
< vclk_dpm_table
->count
; i
++) {
2031 result
= vega10_populate_single_vclock_level(hwmgr
,
2032 vclk_dpm_table
->dpm_levels
[i
].value
,
2033 &(pp_table
->VclkDid
[i
]));
2039 while (i
< NUM_UVD_DPM_LEVELS
) {
2040 result
= vega10_populate_single_vclock_level(hwmgr
,
2041 vclk_dpm_table
->dpm_levels
[j
].value
,
2042 &(pp_table
->VclkDid
[i
]));
2048 for (i
= 0; i
< dclk_dpm_table
->count
; i
++) {
2049 result
= vega10_populate_single_dclock_level(hwmgr
,
2050 dclk_dpm_table
->dpm_levels
[i
].value
,
2051 &(pp_table
->DclkDid
[i
]));
2057 while (i
< NUM_UVD_DPM_LEVELS
) {
2058 result
= vega10_populate_single_dclock_level(hwmgr
,
2059 dclk_dpm_table
->dpm_levels
[j
].value
,
2060 &(pp_table
->DclkDid
[i
]));
2066 for (i
= 0; i
< dep_table
->count
; i
++) {
2067 if (dep_table
->entries
[i
].vclk
==
2068 vclk_dpm_table
->dpm_levels
[i
].value
&&
2069 dep_table
->entries
[i
].dclk
==
2070 dclk_dpm_table
->dpm_levels
[i
].value
)
2071 pp_table
->UvdDpmVoltageIndex
[i
] =
2072 dep_table
->entries
[i
].vddcInd
;
2078 while (i
< NUM_UVD_DPM_LEVELS
) {
2079 pp_table
->UvdDpmVoltageIndex
[i
] = dep_table
->entries
[j
].vddcInd
;
2086 static int vega10_populate_clock_stretcher_table(struct pp_hwmgr
*hwmgr
)
2088 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2089 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
2090 struct phm_ppt_v2_information
*table_info
=
2091 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
2092 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
2093 table_info
->vdd_dep_on_sclk
;
2096 for (i
= 0; i
< dep_table
->count
; i
++) {
2097 pp_table
->CksEnable
[i
] = dep_table
->entries
[i
].cks_enable
;
2098 pp_table
->CksVidOffset
[i
] = (uint8_t)(dep_table
->entries
[i
].cks_voffset
2099 * VOLTAGE_VID_OFFSET_SCALE2
/ VOLTAGE_VID_OFFSET_SCALE1
);
2105 static int vega10_populate_avfs_parameters(struct pp_hwmgr
*hwmgr
)
2107 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2108 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
2109 struct phm_ppt_v2_information
*table_info
=
2110 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
2111 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
2112 table_info
->vdd_dep_on_sclk
;
2113 struct pp_atomfwctrl_avfs_parameters avfs_params
= {0};
2117 pp_table
->MinVoltageVid
= (uint8_t)0xff;
2118 pp_table
->MaxVoltageVid
= (uint8_t)0;
2120 if (data
->smu_features
[GNLD_AVFS
].supported
) {
2121 result
= pp_atomfwctrl_get_avfs_information(hwmgr
, &avfs_params
);
2123 pp_table
->MinVoltageVid
= (uint8_t)
2124 convert_to_vid((uint16_t)(avfs_params
.ulMinVddc
));
2125 pp_table
->MaxVoltageVid
= (uint8_t)
2126 convert_to_vid((uint16_t)(avfs_params
.ulMaxVddc
));
2128 pp_table
->AConstant
[0] = cpu_to_le32(avfs_params
.ulMeanNsigmaAcontant0
);
2129 pp_table
->AConstant
[1] = cpu_to_le32(avfs_params
.ulMeanNsigmaAcontant1
);
2130 pp_table
->AConstant
[2] = cpu_to_le32(avfs_params
.ulMeanNsigmaAcontant2
);
2131 pp_table
->DC_tol_sigma
= cpu_to_le16(avfs_params
.usMeanNsigmaDcTolSigma
);
2132 pp_table
->Platform_mean
= cpu_to_le16(avfs_params
.usMeanNsigmaPlatformMean
);
2133 pp_table
->Platform_sigma
= cpu_to_le16(avfs_params
.usMeanNsigmaDcTolSigma
);
2134 pp_table
->PSM_Age_CompFactor
= cpu_to_le16(avfs_params
.usPsmAgeComfactor
);
2136 pp_table
->BtcGbVdroopTableCksOff
.a0
=
2137 cpu_to_le32(avfs_params
.ulGbVdroopTableCksoffA0
);
2138 pp_table
->BtcGbVdroopTableCksOff
.a0_shift
= 20;
2139 pp_table
->BtcGbVdroopTableCksOff
.a1
=
2140 cpu_to_le32(avfs_params
.ulGbVdroopTableCksoffA1
);
2141 pp_table
->BtcGbVdroopTableCksOff
.a1_shift
= 20;
2142 pp_table
->BtcGbVdroopTableCksOff
.a2
=
2143 cpu_to_le32(avfs_params
.ulGbVdroopTableCksoffA2
);
2144 pp_table
->BtcGbVdroopTableCksOff
.a2_shift
= 20;
2146 pp_table
->OverrideBtcGbCksOn
= avfs_params
.ucEnableGbVdroopTableCkson
;
2147 pp_table
->BtcGbVdroopTableCksOn
.a0
=
2148 cpu_to_le32(avfs_params
.ulGbVdroopTableCksonA0
);
2149 pp_table
->BtcGbVdroopTableCksOn
.a0_shift
= 20;
2150 pp_table
->BtcGbVdroopTableCksOn
.a1
=
2151 cpu_to_le32(avfs_params
.ulGbVdroopTableCksonA1
);
2152 pp_table
->BtcGbVdroopTableCksOn
.a1_shift
= 20;
2153 pp_table
->BtcGbVdroopTableCksOn
.a2
=
2154 cpu_to_le32(avfs_params
.ulGbVdroopTableCksonA2
);
2155 pp_table
->BtcGbVdroopTableCksOn
.a2_shift
= 20;
2157 pp_table
->AvfsGbCksOn
.m1
=
2158 cpu_to_le32(avfs_params
.ulGbFuseTableCksonM1
);
2159 pp_table
->AvfsGbCksOn
.m2
=
2160 cpu_to_le32(avfs_params
.ulGbFuseTableCksonM2
);
2161 pp_table
->AvfsGbCksOn
.b
=
2162 cpu_to_le32(avfs_params
.ulGbFuseTableCksonB
);
2163 pp_table
->AvfsGbCksOn
.m1_shift
= 24;
2164 pp_table
->AvfsGbCksOn
.m2_shift
= 12;
2165 pp_table
->AvfsGbCksOn
.b_shift
= 0;
2167 pp_table
->OverrideAvfsGbCksOn
=
2168 avfs_params
.ucEnableGbFuseTableCkson
;
2169 pp_table
->AvfsGbCksOff
.m1
=
2170 cpu_to_le32(avfs_params
.ulGbFuseTableCksoffM1
);
2171 pp_table
->AvfsGbCksOff
.m2
=
2172 cpu_to_le32(avfs_params
.ulGbFuseTableCksoffM2
);
2173 pp_table
->AvfsGbCksOff
.b
=
2174 cpu_to_le32(avfs_params
.ulGbFuseTableCksoffB
);
2175 pp_table
->AvfsGbCksOff
.m1_shift
= 24;
2176 pp_table
->AvfsGbCksOff
.m2_shift
= 12;
2177 pp_table
->AvfsGbCksOff
.b_shift
= 0;
2179 for (i
= 0; i
< dep_table
->count
; i
++)
2180 pp_table
->StaticVoltageOffsetVid
[i
] =
2181 convert_to_vid((uint8_t)(dep_table
->entries
[i
].sclk_offset
));
2183 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2184 data
->disp_clk_quad_eqn_a
) &&
2185 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2186 data
->disp_clk_quad_eqn_b
)) {
2187 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].m1
=
2188 (int32_t)data
->disp_clk_quad_eqn_a
;
2189 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].m2
=
2190 (int32_t)data
->disp_clk_quad_eqn_b
;
2191 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].b
=
2192 (int32_t)data
->disp_clk_quad_eqn_c
;
2194 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].m1
=
2195 (int32_t)avfs_params
.ulDispclk2GfxclkM1
;
2196 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].m2
=
2197 (int32_t)avfs_params
.ulDispclk2GfxclkM2
;
2198 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].b
=
2199 (int32_t)avfs_params
.ulDispclk2GfxclkB
;
2202 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].m1_shift
= 24;
2203 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].m2_shift
= 12;
2204 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DISPCLK
].b_shift
= 12;
2206 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2207 data
->dcef_clk_quad_eqn_a
) &&
2208 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2209 data
->dcef_clk_quad_eqn_b
)) {
2210 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].m1
=
2211 (int32_t)data
->dcef_clk_quad_eqn_a
;
2212 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].m2
=
2213 (int32_t)data
->dcef_clk_quad_eqn_b
;
2214 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].b
=
2215 (int32_t)data
->dcef_clk_quad_eqn_c
;
2217 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].m1
=
2218 (int32_t)avfs_params
.ulDcefclk2GfxclkM1
;
2219 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].m2
=
2220 (int32_t)avfs_params
.ulDcefclk2GfxclkM2
;
2221 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].b
=
2222 (int32_t)avfs_params
.ulDcefclk2GfxclkB
;
2225 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].m1_shift
= 24;
2226 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].m2_shift
= 12;
2227 pp_table
->DisplayClock2Gfxclk
[DSPCLK_DCEFCLK
].b_shift
= 12;
2229 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2230 data
->pixel_clk_quad_eqn_a
) &&
2231 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2232 data
->pixel_clk_quad_eqn_b
)) {
2233 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].m1
=
2234 (int32_t)data
->pixel_clk_quad_eqn_a
;
2235 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].m2
=
2236 (int32_t)data
->pixel_clk_quad_eqn_b
;
2237 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].b
=
2238 (int32_t)data
->pixel_clk_quad_eqn_c
;
2240 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].m1
=
2241 (int32_t)avfs_params
.ulPixelclk2GfxclkM1
;
2242 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].m2
=
2243 (int32_t)avfs_params
.ulPixelclk2GfxclkM2
;
2244 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].b
=
2245 (int32_t)avfs_params
.ulPixelclk2GfxclkB
;
2248 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].m1_shift
= 24;
2249 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].m2_shift
= 12;
2250 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PIXCLK
].b_shift
= 12;
2251 if ((PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2252 data
->phy_clk_quad_eqn_a
) &&
2253 (PPREGKEY_VEGA10QUADRATICEQUATION_DFLT
!=
2254 data
->phy_clk_quad_eqn_b
)) {
2255 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].m1
=
2256 (int32_t)data
->phy_clk_quad_eqn_a
;
2257 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].m2
=
2258 (int32_t)data
->phy_clk_quad_eqn_b
;
2259 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].b
=
2260 (int32_t)data
->phy_clk_quad_eqn_c
;
2262 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].m1
=
2263 (int32_t)avfs_params
.ulPhyclk2GfxclkM1
;
2264 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].m2
=
2265 (int32_t)avfs_params
.ulPhyclk2GfxclkM2
;
2266 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].b
=
2267 (int32_t)avfs_params
.ulPhyclk2GfxclkB
;
2270 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].m1_shift
= 24;
2271 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].m2_shift
= 12;
2272 pp_table
->DisplayClock2Gfxclk
[DSPCLK_PHYCLK
].b_shift
= 12;
2274 pp_table
->AcgBtcGbVdroopTable
.a0
= avfs_params
.ulAcgGbVdroopTableA0
;
2275 pp_table
->AcgBtcGbVdroopTable
.a0_shift
= 20;
2276 pp_table
->AcgBtcGbVdroopTable
.a1
= avfs_params
.ulAcgGbVdroopTableA1
;
2277 pp_table
->AcgBtcGbVdroopTable
.a1_shift
= 20;
2278 pp_table
->AcgBtcGbVdroopTable
.a2
= avfs_params
.ulAcgGbVdroopTableA2
;
2279 pp_table
->AcgBtcGbVdroopTable
.a2_shift
= 20;
2281 pp_table
->AcgAvfsGb
.m1
= avfs_params
.ulAcgGbFuseTableM1
;
2282 pp_table
->AcgAvfsGb
.m2
= avfs_params
.ulAcgGbFuseTableM2
;
2283 pp_table
->AcgAvfsGb
.b
= avfs_params
.ulAcgGbFuseTableB
;
2284 pp_table
->AcgAvfsGb
.m1_shift
= 24;
2285 pp_table
->AcgAvfsGb
.m2_shift
= 12;
2286 pp_table
->AcgAvfsGb
.b_shift
= 0;
2289 data
->smu_features
[GNLD_AVFS
].supported
= false;
2296 static int vega10_acg_enable(struct pp_hwmgr
*hwmgr
)
2298 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2299 uint32_t agc_btc_response
;
2301 if (data
->smu_features
[GNLD_ACG
].supported
) {
2302 if (0 == vega10_enable_smc_features(hwmgr
, true,
2303 data
->smu_features
[GNLD_DPM_PREFETCHER
].smu_feature_bitmap
))
2304 data
->smu_features
[GNLD_DPM_PREFETCHER
].enabled
= true;
2306 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_InitializeAcg
);
2308 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_RunAcgBtc
);
2309 agc_btc_response
= smum_get_argument(hwmgr
);
2311 if (1 == agc_btc_response
) {
2312 if (1 == data
->acg_loop_state
)
2313 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_RunAcgInClosedLoop
);
2314 else if (2 == data
->acg_loop_state
)
2315 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_RunAcgInOpenLoop
);
2316 if (0 == vega10_enable_smc_features(hwmgr
, true,
2317 data
->smu_features
[GNLD_ACG
].smu_feature_bitmap
))
2318 data
->smu_features
[GNLD_ACG
].enabled
= true;
2320 pr_info("[ACG_Enable] ACG BTC Returned Failed Status!\n");
2321 data
->smu_features
[GNLD_ACG
].enabled
= false;
2328 static int vega10_acg_disable(struct pp_hwmgr
*hwmgr
)
2330 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2332 if (data
->smu_features
[GNLD_ACG
].supported
&&
2333 data
->smu_features
[GNLD_ACG
].enabled
)
2334 if (!vega10_enable_smc_features(hwmgr
, false,
2335 data
->smu_features
[GNLD_ACG
].smu_feature_bitmap
))
2336 data
->smu_features
[GNLD_ACG
].enabled
= false;
2341 static int vega10_populate_gpio_parameters(struct pp_hwmgr
*hwmgr
)
2343 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2344 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
2345 struct pp_atomfwctrl_gpio_parameters gpio_params
= {0};
2348 result
= pp_atomfwctrl_get_gpio_information(hwmgr
, &gpio_params
);
2350 if (PP_CAP(PHM_PlatformCaps_RegulatorHot
) &&
2351 data
->registry_data
.regulator_hot_gpio_support
) {
2352 pp_table
->VR0HotGpio
= gpio_params
.ucVR0HotGpio
;
2353 pp_table
->VR0HotPolarity
= gpio_params
.ucVR0HotPolarity
;
2354 pp_table
->VR1HotGpio
= gpio_params
.ucVR1HotGpio
;
2355 pp_table
->VR1HotPolarity
= gpio_params
.ucVR1HotPolarity
;
2357 pp_table
->VR0HotGpio
= 0;
2358 pp_table
->VR0HotPolarity
= 0;
2359 pp_table
->VR1HotGpio
= 0;
2360 pp_table
->VR1HotPolarity
= 0;
2363 if (PP_CAP(PHM_PlatformCaps_AutomaticDCTransition
) &&
2364 data
->registry_data
.ac_dc_switch_gpio_support
) {
2365 pp_table
->AcDcGpio
= gpio_params
.ucAcDcGpio
;
2366 pp_table
->AcDcPolarity
= gpio_params
.ucAcDcPolarity
;
2368 pp_table
->AcDcGpio
= 0;
2369 pp_table
->AcDcPolarity
= 0;
2376 static int vega10_avfs_enable(struct pp_hwmgr
*hwmgr
, bool enable
)
2378 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2380 if (data
->smu_features
[GNLD_AVFS
].supported
) {
2381 /* Already enabled or disabled */
2382 if (!(enable
^ data
->smu_features
[GNLD_AVFS
].enabled
))
2386 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2388 data
->smu_features
[GNLD_AVFS
].smu_feature_bitmap
),
2389 "[avfs_control] Attempt to Enable AVFS feature Failed!",
2391 data
->smu_features
[GNLD_AVFS
].enabled
= true;
2393 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2395 data
->smu_features
[GNLD_AVFS
].smu_feature_bitmap
),
2396 "[avfs_control] Attempt to Disable AVFS feature Failed!",
2398 data
->smu_features
[GNLD_AVFS
].enabled
= false;
2405 static int vega10_update_avfs(struct pp_hwmgr
*hwmgr
)
2407 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2409 if (data
->need_update_dpm_table
& DPMTABLE_OD_UPDATE_VDDC
) {
2410 vega10_avfs_enable(hwmgr
, false);
2411 } else if (data
->need_update_dpm_table
) {
2412 vega10_avfs_enable(hwmgr
, false);
2413 vega10_avfs_enable(hwmgr
, true);
2415 vega10_avfs_enable(hwmgr
, true);
2421 static int vega10_populate_and_upload_avfs_fuse_override(struct pp_hwmgr
*hwmgr
)
2425 uint64_t serial_number
= 0;
2426 uint32_t top32
, bottom32
;
2427 struct phm_fuses_default fuse
;
2429 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2430 AvfsFuseOverride_t
*avfs_fuse_table
= &(data
->smc_state_table
.avfs_fuse_override_table
);
2432 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ReadSerialNumTop32
);
2433 top32
= smum_get_argument(hwmgr
);
2435 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_ReadSerialNumBottom32
);
2436 bottom32
= smum_get_argument(hwmgr
);
2438 serial_number
= ((uint64_t)bottom32
<< 32) | top32
;
2440 if (pp_override_get_default_fuse_value(serial_number
, &fuse
) == 0) {
2441 avfs_fuse_table
->VFT0_b
= fuse
.VFT0_b
;
2442 avfs_fuse_table
->VFT0_m1
= fuse
.VFT0_m1
;
2443 avfs_fuse_table
->VFT0_m2
= fuse
.VFT0_m2
;
2444 avfs_fuse_table
->VFT1_b
= fuse
.VFT1_b
;
2445 avfs_fuse_table
->VFT1_m1
= fuse
.VFT1_m1
;
2446 avfs_fuse_table
->VFT1_m2
= fuse
.VFT1_m2
;
2447 avfs_fuse_table
->VFT2_b
= fuse
.VFT2_b
;
2448 avfs_fuse_table
->VFT2_m1
= fuse
.VFT2_m1
;
2449 avfs_fuse_table
->VFT2_m2
= fuse
.VFT2_m2
;
2450 result
= smum_smc_table_manager(hwmgr
, (uint8_t *)avfs_fuse_table
,
2451 AVFSFUSETABLE
, false);
2452 PP_ASSERT_WITH_CODE(!result
,
2453 "Failed to upload FuseOVerride!",
2460 static void vega10_check_dpm_table_updated(struct pp_hwmgr
*hwmgr
)
2462 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2463 struct vega10_odn_dpm_table
*odn_table
= &(data
->odn_dpm_table
);
2464 struct phm_ppt_v2_information
*table_info
= hwmgr
->pptable
;
2465 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
;
2466 struct phm_ppt_v1_clock_voltage_dependency_table
*odn_dep_table
;
2469 dep_table
= table_info
->vdd_dep_on_mclk
;
2470 odn_dep_table
= (struct phm_ppt_v1_clock_voltage_dependency_table
*)&(odn_table
->vdd_dep_on_mclk
);
2472 for (i
= 0; i
< dep_table
->count
; i
++) {
2473 if (dep_table
->entries
[i
].vddc
!= odn_dep_table
->entries
[i
].vddc
) {
2474 data
->need_update_dpm_table
|= DPMTABLE_OD_UPDATE_VDDC
| DPMTABLE_OD_UPDATE_MCLK
;
2479 dep_table
= table_info
->vdd_dep_on_sclk
;
2480 odn_dep_table
= (struct phm_ppt_v1_clock_voltage_dependency_table
*)&(odn_table
->vdd_dep_on_sclk
);
2481 for (i
= 0; i
< dep_table
->count
; i
++) {
2482 if (dep_table
->entries
[i
].vddc
!= odn_dep_table
->entries
[i
].vddc
) {
2483 data
->need_update_dpm_table
|= DPMTABLE_OD_UPDATE_VDDC
| DPMTABLE_OD_UPDATE_SCLK
;
2490 * Initializes the SMC table and uploads it
2492 * @param hwmgr the address of the powerplay hardware manager.
2493 * @param pInput the pointer to input data (PowerState)
2496 static int vega10_init_smc_table(struct pp_hwmgr
*hwmgr
)
2499 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2500 struct phm_ppt_v2_information
*table_info
=
2501 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
2502 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
2503 struct pp_atomfwctrl_voltage_table voltage_table
;
2504 struct pp_atomfwctrl_bios_boot_up_values boot_up_values
;
2505 struct vega10_odn_dpm_table
*odn_table
= &(data
->odn_dpm_table
);
2507 result
= vega10_setup_default_dpm_tables(hwmgr
);
2508 PP_ASSERT_WITH_CODE(!result
,
2509 "Failed to setup default DPM tables!",
2515 /* initialize ODN table */
2516 if (hwmgr
->od_enabled
) {
2517 if (odn_table
->max_vddc
) {
2518 data
->need_update_dpm_table
|= DPMTABLE_OD_UPDATE_SCLK
| DPMTABLE_OD_UPDATE_MCLK
;
2519 vega10_check_dpm_table_updated(hwmgr
);
2521 vega10_odn_initial_default_setting(hwmgr
);
2525 pp_atomfwctrl_get_voltage_table_v4(hwmgr
, VOLTAGE_TYPE_VDDC
,
2526 VOLTAGE_OBJ_SVID2
, &voltage_table
);
2527 pp_table
->MaxVidStep
= voltage_table
.max_vid_step
;
2529 pp_table
->GfxDpmVoltageMode
=
2530 (uint8_t)(table_info
->uc_gfx_dpm_voltage_mode
);
2531 pp_table
->SocDpmVoltageMode
=
2532 (uint8_t)(table_info
->uc_soc_dpm_voltage_mode
);
2533 pp_table
->UclkDpmVoltageMode
=
2534 (uint8_t)(table_info
->uc_uclk_dpm_voltage_mode
);
2535 pp_table
->UvdDpmVoltageMode
=
2536 (uint8_t)(table_info
->uc_uvd_dpm_voltage_mode
);
2537 pp_table
->VceDpmVoltageMode
=
2538 (uint8_t)(table_info
->uc_vce_dpm_voltage_mode
);
2539 pp_table
->Mp0DpmVoltageMode
=
2540 (uint8_t)(table_info
->uc_mp0_dpm_voltage_mode
);
2542 pp_table
->DisplayDpmVoltageMode
=
2543 (uint8_t)(table_info
->uc_dcef_dpm_voltage_mode
);
2545 data
->vddc_voltage_table
.psi0_enable
= voltage_table
.psi0_enable
;
2546 data
->vddc_voltage_table
.psi1_enable
= voltage_table
.psi1_enable
;
2548 if (data
->registry_data
.ulv_support
&&
2549 table_info
->us_ulv_voltage_offset
) {
2550 result
= vega10_populate_ulv_state(hwmgr
);
2551 PP_ASSERT_WITH_CODE(!result
,
2552 "Failed to initialize ULV state!",
2556 result
= vega10_populate_smc_link_levels(hwmgr
);
2557 PP_ASSERT_WITH_CODE(!result
,
2558 "Failed to initialize Link Level!",
2561 result
= vega10_populate_all_graphic_levels(hwmgr
);
2562 PP_ASSERT_WITH_CODE(!result
,
2563 "Failed to initialize Graphics Level!",
2566 result
= vega10_populate_all_memory_levels(hwmgr
);
2567 PP_ASSERT_WITH_CODE(!result
,
2568 "Failed to initialize Memory Level!",
2571 vega10_populate_vddc_soc_levels(hwmgr
);
2573 result
= vega10_populate_all_display_clock_levels(hwmgr
);
2574 PP_ASSERT_WITH_CODE(!result
,
2575 "Failed to initialize Display Level!",
2578 result
= vega10_populate_smc_vce_levels(hwmgr
);
2579 PP_ASSERT_WITH_CODE(!result
,
2580 "Failed to initialize VCE Level!",
2583 result
= vega10_populate_smc_uvd_levels(hwmgr
);
2584 PP_ASSERT_WITH_CODE(!result
,
2585 "Failed to initialize UVD Level!",
2588 if (data
->registry_data
.clock_stretcher_support
) {
2589 result
= vega10_populate_clock_stretcher_table(hwmgr
);
2590 PP_ASSERT_WITH_CODE(!result
,
2591 "Failed to populate Clock Stretcher Table!",
2595 result
= pp_atomfwctrl_get_vbios_bootup_values(hwmgr
, &boot_up_values
);
2597 data
->vbios_boot_state
.vddc
= boot_up_values
.usVddc
;
2598 data
->vbios_boot_state
.vddci
= boot_up_values
.usVddci
;
2599 data
->vbios_boot_state
.mvddc
= boot_up_values
.usMvddc
;
2600 data
->vbios_boot_state
.gfx_clock
= boot_up_values
.ulGfxClk
;
2601 data
->vbios_boot_state
.mem_clock
= boot_up_values
.ulUClk
;
2602 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr
,
2603 SMU9_SYSPLL0_SOCCLK_ID
, 0, &boot_up_values
.ulSocClk
);
2605 pp_atomfwctrl_get_clk_information_by_clkid(hwmgr
,
2606 SMU9_SYSPLL0_DCEFCLK_ID
, 0, &boot_up_values
.ulDCEFClk
);
2608 data
->vbios_boot_state
.soc_clock
= boot_up_values
.ulSocClk
;
2609 data
->vbios_boot_state
.dcef_clock
= boot_up_values
.ulDCEFClk
;
2610 if (0 != boot_up_values
.usVddc
) {
2611 smum_send_msg_to_smc_with_parameter(hwmgr
,
2612 PPSMC_MSG_SetFloorSocVoltage
,
2613 (boot_up_values
.usVddc
* 4));
2614 data
->vbios_boot_state
.bsoc_vddc_lock
= true;
2616 data
->vbios_boot_state
.bsoc_vddc_lock
= false;
2618 smum_send_msg_to_smc_with_parameter(hwmgr
,
2619 PPSMC_MSG_SetMinDeepSleepDcefclk
,
2620 (uint32_t)(data
->vbios_boot_state
.dcef_clock
/ 100));
2623 result
= vega10_populate_avfs_parameters(hwmgr
);
2624 PP_ASSERT_WITH_CODE(!result
,
2625 "Failed to initialize AVFS Parameters!",
2628 result
= vega10_populate_gpio_parameters(hwmgr
);
2629 PP_ASSERT_WITH_CODE(!result
,
2630 "Failed to initialize GPIO Parameters!",
2633 pp_table
->GfxclkAverageAlpha
= (uint8_t)
2634 (data
->gfxclk_average_alpha
);
2635 pp_table
->SocclkAverageAlpha
= (uint8_t)
2636 (data
->socclk_average_alpha
);
2637 pp_table
->UclkAverageAlpha
= (uint8_t)
2638 (data
->uclk_average_alpha
);
2639 pp_table
->GfxActivityAverageAlpha
= (uint8_t)
2640 (data
->gfx_activity_average_alpha
);
2642 vega10_populate_and_upload_avfs_fuse_override(hwmgr
);
2644 result
= smum_smc_table_manager(hwmgr
, (uint8_t *)pp_table
, PPTABLE
, false);
2646 PP_ASSERT_WITH_CODE(!result
,
2647 "Failed to upload PPtable!", return result
);
2649 result
= vega10_avfs_enable(hwmgr
, true);
2650 PP_ASSERT_WITH_CODE(!result
, "Attempt to enable AVFS feature Failed!",
2652 vega10_acg_enable(hwmgr
);
2657 static int vega10_enable_thermal_protection(struct pp_hwmgr
*hwmgr
)
2659 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2661 if (data
->smu_features
[GNLD_THERMAL
].supported
) {
2662 if (data
->smu_features
[GNLD_THERMAL
].enabled
)
2663 pr_info("THERMAL Feature Already enabled!");
2665 PP_ASSERT_WITH_CODE(
2666 !vega10_enable_smc_features(hwmgr
,
2668 data
->smu_features
[GNLD_THERMAL
].smu_feature_bitmap
),
2669 "Enable THERMAL Feature Failed!",
2671 data
->smu_features
[GNLD_THERMAL
].enabled
= true;
2677 static int vega10_disable_thermal_protection(struct pp_hwmgr
*hwmgr
)
2679 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2681 if (data
->smu_features
[GNLD_THERMAL
].supported
) {
2682 if (!data
->smu_features
[GNLD_THERMAL
].enabled
)
2683 pr_info("THERMAL Feature Already disabled!");
2685 PP_ASSERT_WITH_CODE(
2686 !vega10_enable_smc_features(hwmgr
,
2688 data
->smu_features
[GNLD_THERMAL
].smu_feature_bitmap
),
2689 "disable THERMAL Feature Failed!",
2691 data
->smu_features
[GNLD_THERMAL
].enabled
= false;
2697 static int vega10_enable_vrhot_feature(struct pp_hwmgr
*hwmgr
)
2699 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2701 if (PP_CAP(PHM_PlatformCaps_RegulatorHot
)) {
2702 if (data
->smu_features
[GNLD_VR0HOT
].supported
) {
2703 PP_ASSERT_WITH_CODE(
2704 !vega10_enable_smc_features(hwmgr
,
2706 data
->smu_features
[GNLD_VR0HOT
].smu_feature_bitmap
),
2707 "Attempt to Enable VR0 Hot feature Failed!",
2709 data
->smu_features
[GNLD_VR0HOT
].enabled
= true;
2711 if (data
->smu_features
[GNLD_VR1HOT
].supported
) {
2712 PP_ASSERT_WITH_CODE(
2713 !vega10_enable_smc_features(hwmgr
,
2715 data
->smu_features
[GNLD_VR1HOT
].smu_feature_bitmap
),
2716 "Attempt to Enable VR0 Hot feature Failed!",
2718 data
->smu_features
[GNLD_VR1HOT
].enabled
= true;
2725 static int vega10_enable_ulv(struct pp_hwmgr
*hwmgr
)
2727 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2729 if (data
->registry_data
.ulv_support
) {
2730 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2731 true, data
->smu_features
[GNLD_ULV
].smu_feature_bitmap
),
2732 "Enable ULV Feature Failed!",
2734 data
->smu_features
[GNLD_ULV
].enabled
= true;
2740 static int vega10_disable_ulv(struct pp_hwmgr
*hwmgr
)
2742 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2744 if (data
->registry_data
.ulv_support
) {
2745 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2746 false, data
->smu_features
[GNLD_ULV
].smu_feature_bitmap
),
2747 "disable ULV Feature Failed!",
2749 data
->smu_features
[GNLD_ULV
].enabled
= false;
2755 static int vega10_enable_deep_sleep_master_switch(struct pp_hwmgr
*hwmgr
)
2757 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2759 if (data
->smu_features
[GNLD_DS_GFXCLK
].supported
) {
2760 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2761 true, data
->smu_features
[GNLD_DS_GFXCLK
].smu_feature_bitmap
),
2762 "Attempt to Enable DS_GFXCLK Feature Failed!",
2764 data
->smu_features
[GNLD_DS_GFXCLK
].enabled
= true;
2767 if (data
->smu_features
[GNLD_DS_SOCCLK
].supported
) {
2768 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2769 true, data
->smu_features
[GNLD_DS_SOCCLK
].smu_feature_bitmap
),
2770 "Attempt to Enable DS_SOCCLK Feature Failed!",
2772 data
->smu_features
[GNLD_DS_SOCCLK
].enabled
= true;
2775 if (data
->smu_features
[GNLD_DS_LCLK
].supported
) {
2776 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2777 true, data
->smu_features
[GNLD_DS_LCLK
].smu_feature_bitmap
),
2778 "Attempt to Enable DS_LCLK Feature Failed!",
2780 data
->smu_features
[GNLD_DS_LCLK
].enabled
= true;
2783 if (data
->smu_features
[GNLD_DS_DCEFCLK
].supported
) {
2784 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2785 true, data
->smu_features
[GNLD_DS_DCEFCLK
].smu_feature_bitmap
),
2786 "Attempt to Enable DS_DCEFCLK Feature Failed!",
2788 data
->smu_features
[GNLD_DS_DCEFCLK
].enabled
= true;
2794 static int vega10_disable_deep_sleep_master_switch(struct pp_hwmgr
*hwmgr
)
2796 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2798 if (data
->smu_features
[GNLD_DS_GFXCLK
].supported
) {
2799 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2800 false, data
->smu_features
[GNLD_DS_GFXCLK
].smu_feature_bitmap
),
2801 "Attempt to disable DS_GFXCLK Feature Failed!",
2803 data
->smu_features
[GNLD_DS_GFXCLK
].enabled
= false;
2806 if (data
->smu_features
[GNLD_DS_SOCCLK
].supported
) {
2807 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2808 false, data
->smu_features
[GNLD_DS_SOCCLK
].smu_feature_bitmap
),
2809 "Attempt to disable DS_ Feature Failed!",
2811 data
->smu_features
[GNLD_DS_SOCCLK
].enabled
= false;
2814 if (data
->smu_features
[GNLD_DS_LCLK
].supported
) {
2815 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2816 false, data
->smu_features
[GNLD_DS_LCLK
].smu_feature_bitmap
),
2817 "Attempt to disable DS_LCLK Feature Failed!",
2819 data
->smu_features
[GNLD_DS_LCLK
].enabled
= false;
2822 if (data
->smu_features
[GNLD_DS_DCEFCLK
].supported
) {
2823 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2824 false, data
->smu_features
[GNLD_DS_DCEFCLK
].smu_feature_bitmap
),
2825 "Attempt to disable DS_DCEFCLK Feature Failed!",
2827 data
->smu_features
[GNLD_DS_DCEFCLK
].enabled
= false;
2833 static int vega10_stop_dpm(struct pp_hwmgr
*hwmgr
, uint32_t bitmap
)
2835 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2836 uint32_t i
, feature_mask
= 0;
2841 if(data
->smu_features
[GNLD_LED_DISPLAY
].supported
== true){
2842 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2843 false, data
->smu_features
[GNLD_LED_DISPLAY
].smu_feature_bitmap
),
2844 "Attempt to disable LED DPM feature failed!", return -EINVAL
);
2845 data
->smu_features
[GNLD_LED_DISPLAY
].enabled
= false;
2848 for (i
= 0; i
< GNLD_DPM_MAX
; i
++) {
2849 if (data
->smu_features
[i
].smu_feature_bitmap
& bitmap
) {
2850 if (data
->smu_features
[i
].supported
) {
2851 if (data
->smu_features
[i
].enabled
) {
2852 feature_mask
|= data
->smu_features
[i
].
2854 data
->smu_features
[i
].enabled
= false;
2860 vega10_enable_smc_features(hwmgr
, false, feature_mask
);
2866 * @brief Tell SMC to enabled the supported DPMs.
2868 * @param hwmgr - the address of the powerplay hardware manager.
2869 * @Param bitmap - bitmap for the features to enabled.
2870 * @return 0 on at least one DPM is successfully enabled.
2872 static int vega10_start_dpm(struct pp_hwmgr
*hwmgr
, uint32_t bitmap
)
2874 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2875 uint32_t i
, feature_mask
= 0;
2877 for (i
= 0; i
< GNLD_DPM_MAX
; i
++) {
2878 if (data
->smu_features
[i
].smu_feature_bitmap
& bitmap
) {
2879 if (data
->smu_features
[i
].supported
) {
2880 if (!data
->smu_features
[i
].enabled
) {
2881 feature_mask
|= data
->smu_features
[i
].
2883 data
->smu_features
[i
].enabled
= true;
2889 if (vega10_enable_smc_features(hwmgr
,
2890 true, feature_mask
)) {
2891 for (i
= 0; i
< GNLD_DPM_MAX
; i
++) {
2892 if (data
->smu_features
[i
].smu_feature_bitmap
&
2894 data
->smu_features
[i
].enabled
= false;
2898 if(data
->smu_features
[GNLD_LED_DISPLAY
].supported
== true){
2899 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2900 true, data
->smu_features
[GNLD_LED_DISPLAY
].smu_feature_bitmap
),
2901 "Attempt to Enable LED DPM feature Failed!", return -EINVAL
);
2902 data
->smu_features
[GNLD_LED_DISPLAY
].enabled
= true;
2905 if (data
->vbios_boot_state
.bsoc_vddc_lock
) {
2906 smum_send_msg_to_smc_with_parameter(hwmgr
,
2907 PPSMC_MSG_SetFloorSocVoltage
, 0);
2908 data
->vbios_boot_state
.bsoc_vddc_lock
= false;
2911 if (PP_CAP(PHM_PlatformCaps_Falcon_QuickTransition
)) {
2912 if (data
->smu_features
[GNLD_ACDC
].supported
) {
2913 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2914 true, data
->smu_features
[GNLD_ACDC
].smu_feature_bitmap
),
2915 "Attempt to Enable DS_GFXCLK Feature Failed!",
2917 data
->smu_features
[GNLD_ACDC
].enabled
= true;
2924 static int vega10_enable_disable_PCC_limit_feature(struct pp_hwmgr
*hwmgr
, bool enable
)
2926 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2928 if (data
->smu_features
[GNLD_PCC_LIMIT
].supported
) {
2929 if (enable
== data
->smu_features
[GNLD_PCC_LIMIT
].enabled
)
2930 pr_info("GNLD_PCC_LIMIT has been %s \n", enable
? "enabled" : "disabled");
2931 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
2932 enable
, data
->smu_features
[GNLD_PCC_LIMIT
].smu_feature_bitmap
),
2933 "Attempt to Enable PCC Limit feature Failed!",
2935 data
->smu_features
[GNLD_PCC_LIMIT
].enabled
= enable
;
2941 static int vega10_enable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
2943 struct vega10_hwmgr
*data
= hwmgr
->backend
;
2944 int tmp_result
, result
= 0;
2946 if (hwmgr
->not_vf
) {
2947 vega10_enable_disable_PCC_limit_feature(hwmgr
, true);
2949 smum_send_msg_to_smc_with_parameter(hwmgr
,
2950 PPSMC_MSG_ConfigureTelemetry
, data
->config_telemetry
);
2952 tmp_result
= vega10_construct_voltage_tables(hwmgr
);
2953 PP_ASSERT_WITH_CODE(!tmp_result
,
2954 "Failed to construct voltage tables!",
2955 result
= tmp_result
);
2958 if (hwmgr
->not_vf
|| hwmgr
->pp_one_vf
) {
2959 tmp_result
= vega10_init_smc_table(hwmgr
);
2960 PP_ASSERT_WITH_CODE(!tmp_result
,
2961 "Failed to initialize SMC table!",
2962 result
= tmp_result
);
2965 if (hwmgr
->not_vf
) {
2966 if (PP_CAP(PHM_PlatformCaps_ThermalController
)) {
2967 tmp_result
= vega10_enable_thermal_protection(hwmgr
);
2968 PP_ASSERT_WITH_CODE(!tmp_result
,
2969 "Failed to enable thermal protection!",
2970 result
= tmp_result
);
2973 tmp_result
= vega10_enable_vrhot_feature(hwmgr
);
2974 PP_ASSERT_WITH_CODE(!tmp_result
,
2975 "Failed to enable VR hot feature!",
2976 result
= tmp_result
);
2978 tmp_result
= vega10_enable_deep_sleep_master_switch(hwmgr
);
2979 PP_ASSERT_WITH_CODE(!tmp_result
,
2980 "Failed to enable deep sleep master switch!",
2981 result
= tmp_result
);
2984 if (hwmgr
->not_vf
) {
2985 tmp_result
= vega10_start_dpm(hwmgr
, SMC_DPM_FEATURES
);
2986 PP_ASSERT_WITH_CODE(!tmp_result
,
2987 "Failed to start DPM!", result
= tmp_result
);
2990 if (hwmgr
->not_vf
) {
2991 /* enable didt, do not abort if failed didt */
2992 tmp_result
= vega10_enable_didt_config(hwmgr
);
2993 PP_ASSERT(!tmp_result
,
2994 "Failed to enable didt config!");
2997 tmp_result
= vega10_enable_power_containment(hwmgr
);
2998 PP_ASSERT_WITH_CODE(!tmp_result
,
2999 "Failed to enable power containment!",
3000 result
= tmp_result
);
3002 if (hwmgr
->not_vf
) {
3003 tmp_result
= vega10_power_control_set_level(hwmgr
);
3004 PP_ASSERT_WITH_CODE(!tmp_result
,
3005 "Failed to power control set level!",
3006 result
= tmp_result
);
3008 tmp_result
= vega10_enable_ulv(hwmgr
);
3009 PP_ASSERT_WITH_CODE(!tmp_result
,
3010 "Failed to enable ULV!",
3011 result
= tmp_result
);
3017 static int vega10_get_power_state_size(struct pp_hwmgr
*hwmgr
)
3019 return sizeof(struct vega10_power_state
);
3022 static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr
*hwmgr
,
3023 void *state
, struct pp_power_state
*power_state
,
3024 void *pp_table
, uint32_t classification_flag
)
3026 ATOM_Vega10_GFXCLK_Dependency_Record_V2
*patom_record_V2
;
3027 struct vega10_power_state
*vega10_power_state
=
3028 cast_phw_vega10_power_state(&(power_state
->hardware
));
3029 struct vega10_performance_level
*performance_level
;
3030 ATOM_Vega10_State
*state_entry
= (ATOM_Vega10_State
*)state
;
3031 ATOM_Vega10_POWERPLAYTABLE
*powerplay_table
=
3032 (ATOM_Vega10_POWERPLAYTABLE
*)pp_table
;
3033 ATOM_Vega10_SOCCLK_Dependency_Table
*socclk_dep_table
=
3034 (ATOM_Vega10_SOCCLK_Dependency_Table
*)
3035 (((unsigned long)powerplay_table
) +
3036 le16_to_cpu(powerplay_table
->usSocclkDependencyTableOffset
));
3037 ATOM_Vega10_GFXCLK_Dependency_Table
*gfxclk_dep_table
=
3038 (ATOM_Vega10_GFXCLK_Dependency_Table
*)
3039 (((unsigned long)powerplay_table
) +
3040 le16_to_cpu(powerplay_table
->usGfxclkDependencyTableOffset
));
3041 ATOM_Vega10_MCLK_Dependency_Table
*mclk_dep_table
=
3042 (ATOM_Vega10_MCLK_Dependency_Table
*)
3043 (((unsigned long)powerplay_table
) +
3044 le16_to_cpu(powerplay_table
->usMclkDependencyTableOffset
));
3047 /* The following fields are not initialized here:
3048 * id orderedList allStatesList
3050 power_state
->classification
.ui_label
=
3051 (le16_to_cpu(state_entry
->usClassification
) &
3052 ATOM_PPLIB_CLASSIFICATION_UI_MASK
) >>
3053 ATOM_PPLIB_CLASSIFICATION_UI_SHIFT
;
3054 power_state
->classification
.flags
= classification_flag
;
3055 /* NOTE: There is a classification2 flag in BIOS
3056 * that is not being used right now
3058 power_state
->classification
.temporary_state
= false;
3059 power_state
->classification
.to_be_deleted
= false;
3061 power_state
->validation
.disallowOnDC
=
3062 ((le32_to_cpu(state_entry
->ulCapsAndSettings
) &
3063 ATOM_Vega10_DISALLOW_ON_DC
) != 0);
3065 power_state
->display
.disableFrameModulation
= false;
3066 power_state
->display
.limitRefreshrate
= false;
3067 power_state
->display
.enableVariBright
=
3068 ((le32_to_cpu(state_entry
->ulCapsAndSettings
) &
3069 ATOM_Vega10_ENABLE_VARIBRIGHT
) != 0);
3071 power_state
->validation
.supportedPowerLevels
= 0;
3072 power_state
->uvd_clocks
.VCLK
= 0;
3073 power_state
->uvd_clocks
.DCLK
= 0;
3074 power_state
->temperatures
.min
= 0;
3075 power_state
->temperatures
.max
= 0;
3077 performance_level
= &(vega10_power_state
->performance_levels
3078 [vega10_power_state
->performance_level_count
++]);
3080 PP_ASSERT_WITH_CODE(
3081 (vega10_power_state
->performance_level_count
<
3082 NUM_GFXCLK_DPM_LEVELS
),
3083 "Performance levels exceeds SMC limit!",
3086 PP_ASSERT_WITH_CODE(
3087 (vega10_power_state
->performance_level_count
<=
3088 hwmgr
->platform_descriptor
.
3089 hardwareActivityPerformanceLevels
),
3090 "Performance levels exceeds Driver limit!",
3093 /* Performance levels are arranged from low to high. */
3094 performance_level
->soc_clock
= socclk_dep_table
->entries
3095 [state_entry
->ucSocClockIndexLow
].ulClk
;
3096 performance_level
->gfx_clock
= gfxclk_dep_table
->entries
3097 [state_entry
->ucGfxClockIndexLow
].ulClk
;
3098 performance_level
->mem_clock
= mclk_dep_table
->entries
3099 [state_entry
->ucMemClockIndexLow
].ulMemClk
;
3101 performance_level
= &(vega10_power_state
->performance_levels
3102 [vega10_power_state
->performance_level_count
++]);
3103 performance_level
->soc_clock
= socclk_dep_table
->entries
3104 [state_entry
->ucSocClockIndexHigh
].ulClk
;
3105 if (gfxclk_dep_table
->ucRevId
== 0) {
3106 /* under vega10 pp one vf mode, the gfx clk dpm need be lower
3107 * to level-4 due to the limited 110w-power
3109 if (hwmgr
->pp_one_vf
&& (state_entry
->ucGfxClockIndexHigh
> 0))
3110 performance_level
->gfx_clock
=
3111 gfxclk_dep_table
->entries
[4].ulClk
;
3113 performance_level
->gfx_clock
= gfxclk_dep_table
->entries
3114 [state_entry
->ucGfxClockIndexHigh
].ulClk
;
3115 } else if (gfxclk_dep_table
->ucRevId
== 1) {
3116 patom_record_V2
= (ATOM_Vega10_GFXCLK_Dependency_Record_V2
*)gfxclk_dep_table
->entries
;
3117 if (hwmgr
->pp_one_vf
&& (state_entry
->ucGfxClockIndexHigh
> 0))
3118 performance_level
->gfx_clock
= patom_record_V2
[4].ulClk
;
3120 performance_level
->gfx_clock
=
3121 patom_record_V2
[state_entry
->ucGfxClockIndexHigh
].ulClk
;
3124 performance_level
->mem_clock
= mclk_dep_table
->entries
3125 [state_entry
->ucMemClockIndexHigh
].ulMemClk
;
3129 static int vega10_get_pp_table_entry(struct pp_hwmgr
*hwmgr
,
3130 unsigned long entry_index
, struct pp_power_state
*state
)
3133 struct vega10_power_state
*ps
;
3135 state
->hardware
.magic
= PhwVega10_Magic
;
3137 ps
= cast_phw_vega10_power_state(&state
->hardware
);
3139 result
= vega10_get_powerplay_table_entry(hwmgr
, entry_index
, state
,
3140 vega10_get_pp_table_entry_callback_func
);
3143 * This is the earliest time we have all the dependency table
3144 * and the VBIOS boot state
3146 /* set DC compatible flag if this state supports DC */
3147 if (!state
->validation
.disallowOnDC
)
3148 ps
->dc_compatible
= true;
3150 ps
->uvd_clks
.vclk
= state
->uvd_clocks
.VCLK
;
3151 ps
->uvd_clks
.dclk
= state
->uvd_clocks
.DCLK
;
3156 static int vega10_patch_boot_state(struct pp_hwmgr
*hwmgr
,
3157 struct pp_hw_power_state
*hw_ps
)
3162 static int vega10_apply_state_adjust_rules(struct pp_hwmgr
*hwmgr
,
3163 struct pp_power_state
*request_ps
,
3164 const struct pp_power_state
*current_ps
)
3166 struct amdgpu_device
*adev
= hwmgr
->adev
;
3167 struct vega10_power_state
*vega10_ps
=
3168 cast_phw_vega10_power_state(&request_ps
->hardware
);
3171 struct PP_Clocks minimum_clocks
= {0};
3172 bool disable_mclk_switching
;
3173 bool disable_mclk_switching_for_frame_lock
;
3174 bool disable_mclk_switching_for_vr
;
3175 bool force_mclk_high
;
3176 const struct phm_clock_and_voltage_limits
*max_limits
;
3178 struct vega10_hwmgr
*data
= hwmgr
->backend
;
3179 struct phm_ppt_v2_information
*table_info
=
3180 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
3182 uint32_t stable_pstate_sclk_dpm_percentage
;
3183 uint32_t stable_pstate_sclk
= 0, stable_pstate_mclk
= 0;
3186 data
->battery_state
= (PP_StateUILabel_Battery
==
3187 request_ps
->classification
.ui_label
);
3189 if (vega10_ps
->performance_level_count
!= 2)
3190 pr_info("VI should always have 2 performance levels");
3192 max_limits
= adev
->pm
.ac_power
?
3193 &(hwmgr
->dyn_state
.max_clock_voltage_on_ac
) :
3194 &(hwmgr
->dyn_state
.max_clock_voltage_on_dc
);
3196 /* Cap clock DPM tables at DC MAX if it is in DC. */
3197 if (!adev
->pm
.ac_power
) {
3198 for (i
= 0; i
< vega10_ps
->performance_level_count
; i
++) {
3199 if (vega10_ps
->performance_levels
[i
].mem_clock
>
3201 vega10_ps
->performance_levels
[i
].mem_clock
=
3203 if (vega10_ps
->performance_levels
[i
].gfx_clock
>
3205 vega10_ps
->performance_levels
[i
].gfx_clock
=
3210 /* result = PHM_CheckVBlankTime(hwmgr, &vblankTooShort);*/
3211 minimum_clocks
.engineClock
= hwmgr
->display_config
->min_core_set_clock
;
3212 minimum_clocks
.memoryClock
= hwmgr
->display_config
->min_mem_set_clock
;
3214 if (PP_CAP(PHM_PlatformCaps_StablePState
)) {
3215 stable_pstate_sclk_dpm_percentage
=
3216 data
->registry_data
.stable_pstate_sclk_dpm_percentage
;
3217 PP_ASSERT_WITH_CODE(
3218 data
->registry_data
.stable_pstate_sclk_dpm_percentage
>= 1 &&
3219 data
->registry_data
.stable_pstate_sclk_dpm_percentage
<= 100,
3220 "percent sclk value must range from 1% to 100%, setting default value",
3221 stable_pstate_sclk_dpm_percentage
= 75);
3223 max_limits
= &(hwmgr
->dyn_state
.max_clock_voltage_on_ac
);
3224 stable_pstate_sclk
= (max_limits
->sclk
*
3225 stable_pstate_sclk_dpm_percentage
) / 100;
3227 for (count
= table_info
->vdd_dep_on_sclk
->count
- 1;
3228 count
>= 0; count
--) {
3229 if (stable_pstate_sclk
>=
3230 table_info
->vdd_dep_on_sclk
->entries
[count
].clk
) {
3231 stable_pstate_sclk
=
3232 table_info
->vdd_dep_on_sclk
->entries
[count
].clk
;
3238 stable_pstate_sclk
= table_info
->vdd_dep_on_sclk
->entries
[0].clk
;
3240 stable_pstate_mclk
= max_limits
->mclk
;
3242 minimum_clocks
.engineClock
= stable_pstate_sclk
;
3243 minimum_clocks
.memoryClock
= stable_pstate_mclk
;
3246 disable_mclk_switching_for_frame_lock
=
3247 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchingForFrameLock
);
3248 disable_mclk_switching_for_vr
=
3249 PP_CAP(PHM_PlatformCaps_DisableMclkSwitchForVR
);
3250 force_mclk_high
= PP_CAP(PHM_PlatformCaps_ForceMclkHigh
);
3252 if (hwmgr
->display_config
->num_display
== 0)
3253 disable_mclk_switching
= false;
3255 disable_mclk_switching
= ((1 < hwmgr
->display_config
->num_display
) &&
3256 !hwmgr
->display_config
->multi_monitor_in_sync
) ||
3257 disable_mclk_switching_for_frame_lock
||
3258 disable_mclk_switching_for_vr
||
3261 sclk
= vega10_ps
->performance_levels
[0].gfx_clock
;
3262 mclk
= vega10_ps
->performance_levels
[0].mem_clock
;
3264 if (sclk
< minimum_clocks
.engineClock
)
3265 sclk
= (minimum_clocks
.engineClock
> max_limits
->sclk
) ?
3266 max_limits
->sclk
: minimum_clocks
.engineClock
;
3268 if (mclk
< minimum_clocks
.memoryClock
)
3269 mclk
= (minimum_clocks
.memoryClock
> max_limits
->mclk
) ?
3270 max_limits
->mclk
: minimum_clocks
.memoryClock
;
3272 vega10_ps
->performance_levels
[0].gfx_clock
= sclk
;
3273 vega10_ps
->performance_levels
[0].mem_clock
= mclk
;
3275 if (vega10_ps
->performance_levels
[1].gfx_clock
<
3276 vega10_ps
->performance_levels
[0].gfx_clock
)
3277 vega10_ps
->performance_levels
[0].gfx_clock
=
3278 vega10_ps
->performance_levels
[1].gfx_clock
;
3280 if (disable_mclk_switching
) {
3281 /* Set Mclk the max of level 0 and level 1 */
3282 if (mclk
< vega10_ps
->performance_levels
[1].mem_clock
)
3283 mclk
= vega10_ps
->performance_levels
[1].mem_clock
;
3285 /* Find the lowest MCLK frequency that is within
3286 * the tolerable latency defined in DAL
3288 latency
= hwmgr
->display_config
->dce_tolerable_mclk_in_active_latency
;
3289 for (i
= 0; i
< data
->mclk_latency_table
.count
; i
++) {
3290 if ((data
->mclk_latency_table
.entries
[i
].latency
<= latency
) &&
3291 (data
->mclk_latency_table
.entries
[i
].frequency
>=
3292 vega10_ps
->performance_levels
[0].mem_clock
) &&
3293 (data
->mclk_latency_table
.entries
[i
].frequency
<=
3294 vega10_ps
->performance_levels
[1].mem_clock
))
3295 mclk
= data
->mclk_latency_table
.entries
[i
].frequency
;
3297 vega10_ps
->performance_levels
[0].mem_clock
= mclk
;
3299 if (vega10_ps
->performance_levels
[1].mem_clock
<
3300 vega10_ps
->performance_levels
[0].mem_clock
)
3301 vega10_ps
->performance_levels
[0].mem_clock
=
3302 vega10_ps
->performance_levels
[1].mem_clock
;
3305 if (PP_CAP(PHM_PlatformCaps_StablePState
)) {
3306 for (i
= 0; i
< vega10_ps
->performance_level_count
; i
++) {
3307 vega10_ps
->performance_levels
[i
].gfx_clock
= stable_pstate_sclk
;
3308 vega10_ps
->performance_levels
[i
].mem_clock
= stable_pstate_mclk
;
3315 static int vega10_find_dpm_states_clocks_in_dpm_table(struct pp_hwmgr
*hwmgr
, const void *input
)
3317 struct vega10_hwmgr
*data
= hwmgr
->backend
;
3318 const struct phm_set_power_state_input
*states
=
3319 (const struct phm_set_power_state_input
*)input
;
3320 const struct vega10_power_state
*vega10_ps
=
3321 cast_const_phw_vega10_power_state(states
->pnew_state
);
3322 struct vega10_single_dpm_table
*sclk_table
= &(data
->dpm_table
.gfx_table
);
3323 uint32_t sclk
= vega10_ps
->performance_levels
3324 [vega10_ps
->performance_level_count
- 1].gfx_clock
;
3325 struct vega10_single_dpm_table
*mclk_table
= &(data
->dpm_table
.mem_table
);
3326 uint32_t mclk
= vega10_ps
->performance_levels
3327 [vega10_ps
->performance_level_count
- 1].mem_clock
;
3330 for (i
= 0; i
< sclk_table
->count
; i
++) {
3331 if (sclk
== sclk_table
->dpm_levels
[i
].value
)
3335 if (i
>= sclk_table
->count
) {
3336 if (sclk
> sclk_table
->dpm_levels
[i
-1].value
) {
3337 data
->need_update_dpm_table
|= DPMTABLE_OD_UPDATE_SCLK
;
3338 sclk_table
->dpm_levels
[i
-1].value
= sclk
;
3342 for (i
= 0; i
< mclk_table
->count
; i
++) {
3343 if (mclk
== mclk_table
->dpm_levels
[i
].value
)
3347 if (i
>= mclk_table
->count
) {
3348 if (mclk
> mclk_table
->dpm_levels
[i
-1].value
) {
3349 data
->need_update_dpm_table
|= DPMTABLE_OD_UPDATE_MCLK
;
3350 mclk_table
->dpm_levels
[i
-1].value
= mclk
;
3354 if (data
->display_timing
.num_existing_displays
!= hwmgr
->display_config
->num_display
)
3355 data
->need_update_dpm_table
|= DPMTABLE_UPDATE_MCLK
;
3360 static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
3361 struct pp_hwmgr
*hwmgr
, const void *input
)
3364 struct vega10_hwmgr
*data
= hwmgr
->backend
;
3365 struct vega10_dpm_table
*dpm_table
= &data
->dpm_table
;
3366 struct vega10_odn_dpm_table
*odn_table
= &data
->odn_dpm_table
;
3367 struct vega10_odn_clock_voltage_dependency_table
*odn_clk_table
= &odn_table
->vdd_dep_on_sclk
;
3370 if (!data
->need_update_dpm_table
)
3373 if (hwmgr
->od_enabled
&& data
->need_update_dpm_table
& DPMTABLE_OD_UPDATE_SCLK
) {
3374 for (count
= 0; count
< dpm_table
->gfx_table
.count
; count
++)
3375 dpm_table
->gfx_table
.dpm_levels
[count
].value
= odn_clk_table
->entries
[count
].clk
;
3378 odn_clk_table
= &odn_table
->vdd_dep_on_mclk
;
3379 if (hwmgr
->od_enabled
&& data
->need_update_dpm_table
& DPMTABLE_OD_UPDATE_MCLK
) {
3380 for (count
= 0; count
< dpm_table
->mem_table
.count
; count
++)
3381 dpm_table
->mem_table
.dpm_levels
[count
].value
= odn_clk_table
->entries
[count
].clk
;
3384 if (data
->need_update_dpm_table
&
3385 (DPMTABLE_OD_UPDATE_SCLK
+ DPMTABLE_UPDATE_SCLK
+ DPMTABLE_UPDATE_SOCCLK
)) {
3386 result
= vega10_populate_all_graphic_levels(hwmgr
);
3387 PP_ASSERT_WITH_CODE((0 == result
),
3388 "Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
3392 if (data
->need_update_dpm_table
&
3393 (DPMTABLE_OD_UPDATE_MCLK
+ DPMTABLE_UPDATE_MCLK
)) {
3394 result
= vega10_populate_all_memory_levels(hwmgr
);
3395 PP_ASSERT_WITH_CODE((0 == result
),
3396 "Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
3400 vega10_populate_vddc_soc_levels(hwmgr
);
3405 static int vega10_trim_single_dpm_states(struct pp_hwmgr
*hwmgr
,
3406 struct vega10_single_dpm_table
*dpm_table
,
3407 uint32_t low_limit
, uint32_t high_limit
)
3411 for (i
= 0; i
< dpm_table
->count
; i
++) {
3412 if ((dpm_table
->dpm_levels
[i
].value
< low_limit
) ||
3413 (dpm_table
->dpm_levels
[i
].value
> high_limit
))
3414 dpm_table
->dpm_levels
[i
].enabled
= false;
3416 dpm_table
->dpm_levels
[i
].enabled
= true;
3421 static int vega10_trim_single_dpm_states_with_mask(struct pp_hwmgr
*hwmgr
,
3422 struct vega10_single_dpm_table
*dpm_table
,
3423 uint32_t low_limit
, uint32_t high_limit
,
3424 uint32_t disable_dpm_mask
)
3428 for (i
= 0; i
< dpm_table
->count
; i
++) {
3429 if ((dpm_table
->dpm_levels
[i
].value
< low_limit
) ||
3430 (dpm_table
->dpm_levels
[i
].value
> high_limit
))
3431 dpm_table
->dpm_levels
[i
].enabled
= false;
3432 else if (!((1 << i
) & disable_dpm_mask
))
3433 dpm_table
->dpm_levels
[i
].enabled
= false;
3435 dpm_table
->dpm_levels
[i
].enabled
= true;
3440 static int vega10_trim_dpm_states(struct pp_hwmgr
*hwmgr
,
3441 const struct vega10_power_state
*vega10_ps
)
3443 struct vega10_hwmgr
*data
= hwmgr
->backend
;
3444 uint32_t high_limit_count
;
3446 PP_ASSERT_WITH_CODE((vega10_ps
->performance_level_count
>= 1),
3447 "power state did not have any performance level",
3450 high_limit_count
= (vega10_ps
->performance_level_count
== 1) ? 0 : 1;
3452 vega10_trim_single_dpm_states(hwmgr
,
3453 &(data
->dpm_table
.soc_table
),
3454 vega10_ps
->performance_levels
[0].soc_clock
,
3455 vega10_ps
->performance_levels
[high_limit_count
].soc_clock
);
3457 vega10_trim_single_dpm_states_with_mask(hwmgr
,
3458 &(data
->dpm_table
.gfx_table
),
3459 vega10_ps
->performance_levels
[0].gfx_clock
,
3460 vega10_ps
->performance_levels
[high_limit_count
].gfx_clock
,
3461 data
->disable_dpm_mask
);
3463 vega10_trim_single_dpm_states(hwmgr
,
3464 &(data
->dpm_table
.mem_table
),
3465 vega10_ps
->performance_levels
[0].mem_clock
,
3466 vega10_ps
->performance_levels
[high_limit_count
].mem_clock
);
3471 static uint32_t vega10_find_lowest_dpm_level(
3472 struct vega10_single_dpm_table
*table
)
3476 for (i
= 0; i
< table
->count
; i
++) {
3477 if (table
->dpm_levels
[i
].enabled
)
3484 static uint32_t vega10_find_highest_dpm_level(
3485 struct vega10_single_dpm_table
*table
)
3489 if (table
->count
<= MAX_REGULAR_DPM_NUMBER
) {
3490 for (i
= table
->count
; i
> 0; i
--) {
3491 if (table
->dpm_levels
[i
- 1].enabled
)
3495 pr_info("DPM Table Has Too Many Entries!");
3496 return MAX_REGULAR_DPM_NUMBER
- 1;
3502 static void vega10_apply_dal_minimum_voltage_request(
3503 struct pp_hwmgr
*hwmgr
)
3508 static int vega10_get_soc_index_for_max_uclk(struct pp_hwmgr
*hwmgr
)
3510 struct phm_ppt_v1_clock_voltage_dependency_table
*vdd_dep_table_on_mclk
;
3511 struct phm_ppt_v2_information
*table_info
=
3512 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
3514 vdd_dep_table_on_mclk
= table_info
->vdd_dep_on_mclk
;
3516 return vdd_dep_table_on_mclk
->entries
[NUM_UCLK_DPM_LEVELS
- 1].vddInd
+ 1;
3519 static int vega10_upload_dpm_bootup_level(struct pp_hwmgr
*hwmgr
)
3521 struct vega10_hwmgr
*data
= hwmgr
->backend
;
3522 uint32_t socclk_idx
;
3524 vega10_apply_dal_minimum_voltage_request(hwmgr
);
3526 if (!data
->registry_data
.sclk_dpm_key_disabled
) {
3527 if (data
->smc_state_table
.gfx_boot_level
!=
3528 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
) {
3529 smum_send_msg_to_smc_with_parameter(hwmgr
,
3530 PPSMC_MSG_SetSoftMinGfxclkByIndex
,
3531 data
->smc_state_table
.gfx_boot_level
);
3533 data
->dpm_table
.gfx_table
.dpm_state
.soft_min_level
=
3534 data
->smc_state_table
.gfx_boot_level
;
3538 if (!data
->registry_data
.mclk_dpm_key_disabled
) {
3539 if (data
->smc_state_table
.mem_boot_level
!=
3540 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
) {
3541 if ((data
->smc_state_table
.mem_boot_level
== NUM_UCLK_DPM_LEVELS
- 1)
3543 socclk_idx
= vega10_get_soc_index_for_max_uclk(hwmgr
);
3544 smum_send_msg_to_smc_with_parameter(hwmgr
,
3545 PPSMC_MSG_SetSoftMinSocclkByIndex
,
3548 smum_send_msg_to_smc_with_parameter(hwmgr
,
3549 PPSMC_MSG_SetSoftMinUclkByIndex
,
3550 data
->smc_state_table
.mem_boot_level
);
3552 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
=
3553 data
->smc_state_table
.mem_boot_level
;
3560 if (!data
->registry_data
.socclk_dpm_key_disabled
) {
3561 if (data
->smc_state_table
.soc_boot_level
!=
3562 data
->dpm_table
.soc_table
.dpm_state
.soft_min_level
) {
3563 smum_send_msg_to_smc_with_parameter(hwmgr
,
3564 PPSMC_MSG_SetSoftMinSocclkByIndex
,
3565 data
->smc_state_table
.soc_boot_level
);
3566 data
->dpm_table
.soc_table
.dpm_state
.soft_min_level
=
3567 data
->smc_state_table
.soc_boot_level
;
3574 static int vega10_upload_dpm_max_level(struct pp_hwmgr
*hwmgr
)
3576 struct vega10_hwmgr
*data
= hwmgr
->backend
;
3578 vega10_apply_dal_minimum_voltage_request(hwmgr
);
3580 if (!data
->registry_data
.sclk_dpm_key_disabled
) {
3581 if (data
->smc_state_table
.gfx_max_level
!=
3582 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
) {
3583 smum_send_msg_to_smc_with_parameter(hwmgr
,
3584 PPSMC_MSG_SetSoftMaxGfxclkByIndex
,
3585 data
->smc_state_table
.gfx_max_level
);
3586 data
->dpm_table
.gfx_table
.dpm_state
.soft_max_level
=
3587 data
->smc_state_table
.gfx_max_level
;
3591 if (!data
->registry_data
.mclk_dpm_key_disabled
) {
3592 if (data
->smc_state_table
.mem_max_level
!=
3593 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
) {
3594 smum_send_msg_to_smc_with_parameter(hwmgr
,
3595 PPSMC_MSG_SetSoftMaxUclkByIndex
,
3596 data
->smc_state_table
.mem_max_level
);
3597 data
->dpm_table
.mem_table
.dpm_state
.soft_max_level
=
3598 data
->smc_state_table
.mem_max_level
;
3605 if (!data
->registry_data
.socclk_dpm_key_disabled
) {
3606 if (data
->smc_state_table
.soc_max_level
!=
3607 data
->dpm_table
.soc_table
.dpm_state
.soft_max_level
) {
3608 smum_send_msg_to_smc_with_parameter(hwmgr
,
3609 PPSMC_MSG_SetSoftMaxSocclkByIndex
,
3610 data
->smc_state_table
.soc_max_level
);
3611 data
->dpm_table
.soc_table
.dpm_state
.soft_max_level
=
3612 data
->smc_state_table
.soc_max_level
;
3619 static int vega10_generate_dpm_level_enable_mask(
3620 struct pp_hwmgr
*hwmgr
, const void *input
)
3622 struct vega10_hwmgr
*data
= hwmgr
->backend
;
3623 const struct phm_set_power_state_input
*states
=
3624 (const struct phm_set_power_state_input
*)input
;
3625 const struct vega10_power_state
*vega10_ps
=
3626 cast_const_phw_vega10_power_state(states
->pnew_state
);
3629 PP_ASSERT_WITH_CODE(!vega10_trim_dpm_states(hwmgr
, vega10_ps
),
3630 "Attempt to Trim DPM States Failed!",
3633 data
->smc_state_table
.gfx_boot_level
=
3634 vega10_find_lowest_dpm_level(&(data
->dpm_table
.gfx_table
));
3635 data
->smc_state_table
.gfx_max_level
=
3636 vega10_find_highest_dpm_level(&(data
->dpm_table
.gfx_table
));
3637 data
->smc_state_table
.mem_boot_level
=
3638 vega10_find_lowest_dpm_level(&(data
->dpm_table
.mem_table
));
3639 data
->smc_state_table
.mem_max_level
=
3640 vega10_find_highest_dpm_level(&(data
->dpm_table
.mem_table
));
3641 data
->smc_state_table
.soc_boot_level
=
3642 vega10_find_lowest_dpm_level(&(data
->dpm_table
.soc_table
));
3643 data
->smc_state_table
.soc_max_level
=
3644 vega10_find_highest_dpm_level(&(data
->dpm_table
.soc_table
));
3646 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
3647 "Attempt to upload DPM Bootup Levels Failed!",
3649 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
3650 "Attempt to upload DPM Max Levels Failed!",
3652 for(i
= data
->smc_state_table
.gfx_boot_level
; i
< data
->smc_state_table
.gfx_max_level
; i
++)
3653 data
->dpm_table
.gfx_table
.dpm_levels
[i
].enabled
= true;
3656 for(i
= data
->smc_state_table
.mem_boot_level
; i
< data
->smc_state_table
.mem_max_level
; i
++)
3657 data
->dpm_table
.mem_table
.dpm_levels
[i
].enabled
= true;
3659 for (i
= data
->smc_state_table
.soc_boot_level
; i
< data
->smc_state_table
.soc_max_level
; i
++)
3660 data
->dpm_table
.soc_table
.dpm_levels
[i
].enabled
= true;
3665 int vega10_enable_disable_vce_dpm(struct pp_hwmgr
*hwmgr
, bool enable
)
3667 struct vega10_hwmgr
*data
= hwmgr
->backend
;
3669 if (data
->smu_features
[GNLD_DPM_VCE
].supported
) {
3670 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
3672 data
->smu_features
[GNLD_DPM_VCE
].smu_feature_bitmap
),
3673 "Attempt to Enable/Disable DPM VCE Failed!",
3675 data
->smu_features
[GNLD_DPM_VCE
].enabled
= enable
;
3681 static int vega10_update_sclk_threshold(struct pp_hwmgr
*hwmgr
)
3683 struct vega10_hwmgr
*data
= hwmgr
->backend
;
3684 uint32_t low_sclk_interrupt_threshold
= 0;
3686 if (PP_CAP(PHM_PlatformCaps_SclkThrottleLowNotification
) &&
3687 (data
->low_sclk_interrupt_threshold
!= 0)) {
3688 low_sclk_interrupt_threshold
=
3689 data
->low_sclk_interrupt_threshold
;
3691 data
->smc_state_table
.pp_table
.LowGfxclkInterruptThreshold
=
3692 cpu_to_le32(low_sclk_interrupt_threshold
);
3694 /* This message will also enable SmcToHost Interrupt */
3695 smum_send_msg_to_smc_with_parameter(hwmgr
,
3696 PPSMC_MSG_SetLowGfxclkInterruptThreshold
,
3697 (uint32_t)low_sclk_interrupt_threshold
);
3703 static int vega10_set_power_state_tasks(struct pp_hwmgr
*hwmgr
,
3706 int tmp_result
, result
= 0;
3707 struct vega10_hwmgr
*data
= hwmgr
->backend
;
3708 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
3710 tmp_result
= vega10_find_dpm_states_clocks_in_dpm_table(hwmgr
, input
);
3711 PP_ASSERT_WITH_CODE(!tmp_result
,
3712 "Failed to find DPM states clocks in DPM table!",
3713 result
= tmp_result
);
3715 tmp_result
= vega10_populate_and_upload_sclk_mclk_dpm_levels(hwmgr
, input
);
3716 PP_ASSERT_WITH_CODE(!tmp_result
,
3717 "Failed to populate and upload SCLK MCLK DPM levels!",
3718 result
= tmp_result
);
3720 tmp_result
= vega10_generate_dpm_level_enable_mask(hwmgr
, input
);
3721 PP_ASSERT_WITH_CODE(!tmp_result
,
3722 "Failed to generate DPM level enabled mask!",
3723 result
= tmp_result
);
3725 tmp_result
= vega10_update_sclk_threshold(hwmgr
);
3726 PP_ASSERT_WITH_CODE(!tmp_result
,
3727 "Failed to update SCLK threshold!",
3728 result
= tmp_result
);
3730 result
= smum_smc_table_manager(hwmgr
, (uint8_t *)pp_table
, PPTABLE
, false);
3731 PP_ASSERT_WITH_CODE(!result
,
3732 "Failed to upload PPtable!", return result
);
3735 * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
3736 * That effectively disables AVFS feature.
3738 if(hwmgr
->hardcode_pp_table
!= NULL
)
3739 data
->need_update_dpm_table
|= DPMTABLE_OD_UPDATE_VDDC
;
3741 vega10_update_avfs(hwmgr
);
3744 * Clear all OD flags except DPMTABLE_OD_UPDATE_VDDC.
3745 * That will help to keep AVFS disabled.
3747 data
->need_update_dpm_table
&= DPMTABLE_OD_UPDATE_VDDC
;
3752 static uint32_t vega10_dpm_get_sclk(struct pp_hwmgr
*hwmgr
, bool low
)
3754 struct pp_power_state
*ps
;
3755 struct vega10_power_state
*vega10_ps
;
3760 ps
= hwmgr
->request_ps
;
3765 vega10_ps
= cast_phw_vega10_power_state(&ps
->hardware
);
3768 return vega10_ps
->performance_levels
[0].gfx_clock
;
3770 return vega10_ps
->performance_levels
3771 [vega10_ps
->performance_level_count
- 1].gfx_clock
;
3774 static uint32_t vega10_dpm_get_mclk(struct pp_hwmgr
*hwmgr
, bool low
)
3776 struct pp_power_state
*ps
;
3777 struct vega10_power_state
*vega10_ps
;
3782 ps
= hwmgr
->request_ps
;
3787 vega10_ps
= cast_phw_vega10_power_state(&ps
->hardware
);
3790 return vega10_ps
->performance_levels
[0].mem_clock
;
3792 return vega10_ps
->performance_levels
3793 [vega10_ps
->performance_level_count
-1].mem_clock
;
3796 static int vega10_get_gpu_power(struct pp_hwmgr
*hwmgr
,
3804 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetCurrPkgPwr
);
3805 value
= smum_get_argument(hwmgr
);
3807 /* SMC returning actual watts, keep consistent with legacy asics, low 8 bit as 8 fractional bits */
3808 *query
= value
<< 8;
3813 static int vega10_read_sensor(struct pp_hwmgr
*hwmgr
, int idx
,
3814 void *value
, int *size
)
3816 struct amdgpu_device
*adev
= hwmgr
->adev
;
3817 uint32_t sclk_mhz
, mclk_idx
, activity_percent
= 0;
3818 struct vega10_hwmgr
*data
= hwmgr
->backend
;
3819 struct vega10_dpm_table
*dpm_table
= &data
->dpm_table
;
3824 case AMDGPU_PP_SENSOR_GFX_SCLK
:
3825 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetAverageGfxclkActualFrequency
);
3826 sclk_mhz
= smum_get_argument(hwmgr
);
3827 *((uint32_t *)value
) = sclk_mhz
* 100;
3829 case AMDGPU_PP_SENSOR_GFX_MCLK
:
3830 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetCurrentUclkIndex
);
3831 mclk_idx
= smum_get_argument(hwmgr
);
3832 if (mclk_idx
< dpm_table
->mem_table
.count
) {
3833 *((uint32_t *)value
) = dpm_table
->mem_table
.dpm_levels
[mclk_idx
].value
;
3839 case AMDGPU_PP_SENSOR_GPU_LOAD
:
3840 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_GetAverageGfxActivity
, 0);
3841 activity_percent
= smum_get_argument(hwmgr
);
3842 *((uint32_t *)value
) = activity_percent
> 100 ? 100 : activity_percent
;
3845 case AMDGPU_PP_SENSOR_GPU_TEMP
:
3846 *((uint32_t *)value
) = vega10_thermal_get_temperature(hwmgr
);
3849 case AMDGPU_PP_SENSOR_HOTSPOT_TEMP
:
3850 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetTemperatureHotspot
);
3851 *((uint32_t *)value
) = smum_get_argument(hwmgr
) *
3852 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
3855 case AMDGPU_PP_SENSOR_MEM_TEMP
:
3856 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetTemperatureHBM
);
3857 *((uint32_t *)value
) = smum_get_argument(hwmgr
) *
3858 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
3861 case AMDGPU_PP_SENSOR_UVD_POWER
:
3862 *((uint32_t *)value
) = data
->uvd_power_gated
? 0 : 1;
3865 case AMDGPU_PP_SENSOR_VCE_POWER
:
3866 *((uint32_t *)value
) = data
->vce_power_gated
? 0 : 1;
3869 case AMDGPU_PP_SENSOR_GPU_POWER
:
3870 ret
= vega10_get_gpu_power(hwmgr
, (uint32_t *)value
);
3872 case AMDGPU_PP_SENSOR_VDDGFX
:
3873 val_vid
= (RREG32_SOC15(SMUIO
, 0, mmSMUSVI0_PLANE0_CURRENTVID
) &
3874 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID_MASK
) >>
3875 SMUSVI0_PLANE0_CURRENTVID__CURRENT_SVI0_PLANE0_VID__SHIFT
;
3876 *((uint32_t *)value
) = (uint32_t)convert_to_vddc((uint8_t)val_vid
);
3878 case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK
:
3879 ret
= vega10_get_enabled_smc_features(hwmgr
, (uint64_t *)value
);
3891 static void vega10_notify_smc_display_change(struct pp_hwmgr
*hwmgr
,
3894 smum_send_msg_to_smc_with_parameter(hwmgr
,
3895 PPSMC_MSG_SetUclkFastSwitch
,
3899 int vega10_display_clock_voltage_request(struct pp_hwmgr
*hwmgr
,
3900 struct pp_display_clock_request
*clock_req
)
3903 enum amd_pp_clock_type clk_type
= clock_req
->clock_type
;
3904 uint32_t clk_freq
= clock_req
->clock_freq_in_khz
/ 1000;
3905 DSPCLK_e clk_select
= 0;
3906 uint32_t clk_request
= 0;
3909 case amd_pp_dcef_clock
:
3910 clk_select
= DSPCLK_DCEFCLK
;
3912 case amd_pp_disp_clock
:
3913 clk_select
= DSPCLK_DISPCLK
;
3915 case amd_pp_pixel_clock
:
3916 clk_select
= DSPCLK_PIXCLK
;
3918 case amd_pp_phy_clock
:
3919 clk_select
= DSPCLK_PHYCLK
;
3922 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
3928 clk_request
= (clk_freq
<< 16) | clk_select
;
3929 smum_send_msg_to_smc_with_parameter(hwmgr
,
3930 PPSMC_MSG_RequestDisplayClockByFreq
,
3937 static uint8_t vega10_get_uclk_index(struct pp_hwmgr
*hwmgr
,
3938 struct phm_ppt_v1_clock_voltage_dependency_table
*mclk_table
,
3944 if (mclk_table
== NULL
|| mclk_table
->count
== 0)
3947 count
= (uint8_t)(mclk_table
->count
);
3949 for(i
= 0; i
< count
; i
++) {
3950 if(mclk_table
->entries
[i
].clk
>= frequency
)
3957 static int vega10_notify_smc_display_config_after_ps_adjustment(
3958 struct pp_hwmgr
*hwmgr
)
3960 struct vega10_hwmgr
*data
= hwmgr
->backend
;
3961 struct vega10_single_dpm_table
*dpm_table
=
3962 &data
->dpm_table
.dcef_table
;
3963 struct phm_ppt_v2_information
*table_info
=
3964 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
3965 struct phm_ppt_v1_clock_voltage_dependency_table
*mclk_table
= table_info
->vdd_dep_on_mclk
;
3967 struct PP_Clocks min_clocks
= {0};
3969 struct pp_display_clock_request clock_req
;
3971 if ((hwmgr
->display_config
->num_display
> 1) &&
3972 !hwmgr
->display_config
->multi_monitor_in_sync
&&
3973 !hwmgr
->display_config
->nb_pstate_switch_disable
)
3974 vega10_notify_smc_display_change(hwmgr
, false);
3976 vega10_notify_smc_display_change(hwmgr
, true);
3978 min_clocks
.dcefClock
= hwmgr
->display_config
->min_dcef_set_clk
;
3979 min_clocks
.dcefClockInSR
= hwmgr
->display_config
->min_dcef_deep_sleep_set_clk
;
3980 min_clocks
.memoryClock
= hwmgr
->display_config
->min_mem_set_clock
;
3982 for (i
= 0; i
< dpm_table
->count
; i
++) {
3983 if (dpm_table
->dpm_levels
[i
].value
== min_clocks
.dcefClock
)
3987 if (i
< dpm_table
->count
) {
3988 clock_req
.clock_type
= amd_pp_dcef_clock
;
3989 clock_req
.clock_freq_in_khz
= dpm_table
->dpm_levels
[i
].value
* 10;
3990 if (!vega10_display_clock_voltage_request(hwmgr
, &clock_req
)) {
3991 smum_send_msg_to_smc_with_parameter(
3992 hwmgr
, PPSMC_MSG_SetMinDeepSleepDcefclk
,
3993 min_clocks
.dcefClockInSR
/ 100);
3995 pr_info("Attempt to set Hard Min for DCEFCLK Failed!");
3998 pr_debug("Cannot find requested DCEFCLK!");
4001 if (min_clocks
.memoryClock
!= 0) {
4002 idx
= vega10_get_uclk_index(hwmgr
, mclk_table
, min_clocks
.memoryClock
);
4003 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_SetSoftMinUclkByIndex
, idx
);
4004 data
->dpm_table
.mem_table
.dpm_state
.soft_min_level
= idx
;
4010 static int vega10_force_dpm_highest(struct pp_hwmgr
*hwmgr
)
4012 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4014 data
->smc_state_table
.gfx_boot_level
=
4015 data
->smc_state_table
.gfx_max_level
=
4016 vega10_find_highest_dpm_level(&(data
->dpm_table
.gfx_table
));
4017 data
->smc_state_table
.mem_boot_level
=
4018 data
->smc_state_table
.mem_max_level
=
4019 vega10_find_highest_dpm_level(&(data
->dpm_table
.mem_table
));
4021 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
4022 "Failed to upload boot level to highest!",
4025 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
4026 "Failed to upload dpm max level to highest!",
4032 static int vega10_force_dpm_lowest(struct pp_hwmgr
*hwmgr
)
4034 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4036 data
->smc_state_table
.gfx_boot_level
=
4037 data
->smc_state_table
.gfx_max_level
=
4038 vega10_find_lowest_dpm_level(&(data
->dpm_table
.gfx_table
));
4039 data
->smc_state_table
.mem_boot_level
=
4040 data
->smc_state_table
.mem_max_level
=
4041 vega10_find_lowest_dpm_level(&(data
->dpm_table
.mem_table
));
4043 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
4044 "Failed to upload boot level to highest!",
4047 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
4048 "Failed to upload dpm max level to highest!",
4055 static int vega10_unforce_dpm_levels(struct pp_hwmgr
*hwmgr
)
4057 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4059 data
->smc_state_table
.gfx_boot_level
=
4060 vega10_find_lowest_dpm_level(&(data
->dpm_table
.gfx_table
));
4061 data
->smc_state_table
.gfx_max_level
=
4062 vega10_find_highest_dpm_level(&(data
->dpm_table
.gfx_table
));
4063 data
->smc_state_table
.mem_boot_level
=
4064 vega10_find_lowest_dpm_level(&(data
->dpm_table
.mem_table
));
4065 data
->smc_state_table
.mem_max_level
=
4066 vega10_find_highest_dpm_level(&(data
->dpm_table
.mem_table
));
4068 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
4069 "Failed to upload DPM Bootup Levels!",
4072 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
4073 "Failed to upload DPM Max Levels!",
4078 static int vega10_get_profiling_clk_mask(struct pp_hwmgr
*hwmgr
, enum amd_dpm_forced_level level
,
4079 uint32_t *sclk_mask
, uint32_t *mclk_mask
, uint32_t *soc_mask
)
4081 struct phm_ppt_v2_information
*table_info
=
4082 (struct phm_ppt_v2_information
*)(hwmgr
->pptable
);
4084 if (table_info
->vdd_dep_on_sclk
->count
> VEGA10_UMD_PSTATE_GFXCLK_LEVEL
&&
4085 table_info
->vdd_dep_on_socclk
->count
> VEGA10_UMD_PSTATE_SOCCLK_LEVEL
&&
4086 table_info
->vdd_dep_on_mclk
->count
> VEGA10_UMD_PSTATE_MCLK_LEVEL
) {
4087 *sclk_mask
= VEGA10_UMD_PSTATE_GFXCLK_LEVEL
;
4088 *soc_mask
= VEGA10_UMD_PSTATE_SOCCLK_LEVEL
;
4089 *mclk_mask
= VEGA10_UMD_PSTATE_MCLK_LEVEL
;
4090 hwmgr
->pstate_sclk
= table_info
->vdd_dep_on_sclk
->entries
[VEGA10_UMD_PSTATE_GFXCLK_LEVEL
].clk
;
4091 hwmgr
->pstate_mclk
= table_info
->vdd_dep_on_mclk
->entries
[VEGA10_UMD_PSTATE_MCLK_LEVEL
].clk
;
4094 if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
) {
4096 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
) {
4098 } else if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
) {
4099 /* under vega10 pp one vf mode, the gfx clk dpm need be lower
4100 * to level-4 due to the limited power
4102 if (hwmgr
->pp_one_vf
)
4105 *sclk_mask
= table_info
->vdd_dep_on_sclk
->count
- 1;
4106 *soc_mask
= table_info
->vdd_dep_on_socclk
->count
- 1;
4107 *mclk_mask
= table_info
->vdd_dep_on_mclk
->count
- 1;
4113 static void vega10_set_fan_control_mode(struct pp_hwmgr
*hwmgr
, uint32_t mode
)
4119 case AMD_FAN_CTRL_NONE
:
4120 vega10_fan_ctrl_set_fan_speed_percent(hwmgr
, 100);
4122 case AMD_FAN_CTRL_MANUAL
:
4123 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl
))
4124 vega10_fan_ctrl_stop_smc_fan_control(hwmgr
);
4126 case AMD_FAN_CTRL_AUTO
:
4127 if (PP_CAP(PHM_PlatformCaps_MicrocodeFanControl
))
4128 vega10_fan_ctrl_start_smc_fan_control(hwmgr
);
4135 static int vega10_force_clock_level(struct pp_hwmgr
*hwmgr
,
4136 enum pp_clock_type type
, uint32_t mask
)
4138 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4142 data
->smc_state_table
.gfx_boot_level
= mask
? (ffs(mask
) - 1) : 0;
4143 data
->smc_state_table
.gfx_max_level
= mask
? (fls(mask
) - 1) : 0;
4145 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
4146 "Failed to upload boot level to lowest!",
4149 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
4150 "Failed to upload dpm max level to highest!",
4155 data
->smc_state_table
.mem_boot_level
= mask
? (ffs(mask
) - 1) : 0;
4156 data
->smc_state_table
.mem_max_level
= mask
? (fls(mask
) - 1) : 0;
4158 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
4159 "Failed to upload boot level to lowest!",
4162 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
4163 "Failed to upload dpm max level to highest!",
4169 data
->smc_state_table
.soc_boot_level
= mask
? (ffs(mask
) - 1) : 0;
4170 data
->smc_state_table
.soc_max_level
= mask
? (fls(mask
) - 1) : 0;
4172 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_bootup_level(hwmgr
),
4173 "Failed to upload boot level to lowest!",
4176 PP_ASSERT_WITH_CODE(!vega10_upload_dpm_max_level(hwmgr
),
4177 "Failed to upload dpm max level to highest!",
4183 pr_info("Setting DCEFCLK min/max dpm level is not supported!\n");
4194 static int vega10_dpm_force_dpm_level(struct pp_hwmgr
*hwmgr
,
4195 enum amd_dpm_forced_level level
)
4198 uint32_t sclk_mask
= 0;
4199 uint32_t mclk_mask
= 0;
4200 uint32_t soc_mask
= 0;
4202 if (hwmgr
->pstate_sclk
== 0)
4203 vega10_get_profiling_clk_mask(hwmgr
, level
, &sclk_mask
, &mclk_mask
, &soc_mask
);
4206 case AMD_DPM_FORCED_LEVEL_HIGH
:
4207 ret
= vega10_force_dpm_highest(hwmgr
);
4209 case AMD_DPM_FORCED_LEVEL_LOW
:
4210 ret
= vega10_force_dpm_lowest(hwmgr
);
4212 case AMD_DPM_FORCED_LEVEL_AUTO
:
4213 ret
= vega10_unforce_dpm_levels(hwmgr
);
4215 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
4216 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
4217 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
:
4218 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
4219 ret
= vega10_get_profiling_clk_mask(hwmgr
, level
, &sclk_mask
, &mclk_mask
, &soc_mask
);
4222 vega10_force_clock_level(hwmgr
, PP_SCLK
, 1<<sclk_mask
);
4223 vega10_force_clock_level(hwmgr
, PP_MCLK
, 1<<mclk_mask
);
4225 case AMD_DPM_FORCED_LEVEL_MANUAL
:
4226 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
:
4235 if (level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
&& hwmgr
->dpm_level
!= AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
)
4236 vega10_set_fan_control_mode(hwmgr
, AMD_FAN_CTRL_NONE
);
4237 else if (level
!= AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
&& hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
)
4238 vega10_set_fan_control_mode(hwmgr
, AMD_FAN_CTRL_AUTO
);
4244 static uint32_t vega10_get_fan_control_mode(struct pp_hwmgr
*hwmgr
)
4246 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4248 if (data
->smu_features
[GNLD_FAN_CONTROL
].enabled
== false)
4249 return AMD_FAN_CTRL_MANUAL
;
4251 return AMD_FAN_CTRL_AUTO
;
4254 static int vega10_get_dal_power_level(struct pp_hwmgr
*hwmgr
,
4255 struct amd_pp_simple_clock_info
*info
)
4257 struct phm_ppt_v2_information
*table_info
=
4258 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4259 struct phm_clock_and_voltage_limits
*max_limits
=
4260 &table_info
->max_clock_voltage_on_ac
;
4262 info
->engine_max_clock
= max_limits
->sclk
;
4263 info
->memory_max_clock
= max_limits
->mclk
;
4268 static void vega10_get_sclks(struct pp_hwmgr
*hwmgr
,
4269 struct pp_clock_levels_with_latency
*clocks
)
4271 struct phm_ppt_v2_information
*table_info
=
4272 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4273 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
4274 table_info
->vdd_dep_on_sclk
;
4277 clocks
->num_levels
= 0;
4278 for (i
= 0; i
< dep_table
->count
; i
++) {
4279 if (dep_table
->entries
[i
].clk
) {
4280 clocks
->data
[clocks
->num_levels
].clocks_in_khz
=
4281 dep_table
->entries
[i
].clk
* 10;
4282 clocks
->num_levels
++;
4288 static void vega10_get_memclocks(struct pp_hwmgr
*hwmgr
,
4289 struct pp_clock_levels_with_latency
*clocks
)
4291 struct phm_ppt_v2_information
*table_info
=
4292 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4293 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
4294 table_info
->vdd_dep_on_mclk
;
4295 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4299 for (i
= 0; i
< dep_table
->count
; i
++) {
4300 if (dep_table
->entries
[i
].clk
) {
4302 clocks
->data
[j
].clocks_in_khz
=
4303 dep_table
->entries
[i
].clk
* 10;
4304 data
->mclk_latency_table
.entries
[j
].frequency
=
4305 dep_table
->entries
[i
].clk
;
4306 clocks
->data
[j
].latency_in_us
=
4307 data
->mclk_latency_table
.entries
[j
].latency
= 25;
4311 clocks
->num_levels
= data
->mclk_latency_table
.count
= j
;
4314 static void vega10_get_dcefclocks(struct pp_hwmgr
*hwmgr
,
4315 struct pp_clock_levels_with_latency
*clocks
)
4317 struct phm_ppt_v2_information
*table_info
=
4318 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4319 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
4320 table_info
->vdd_dep_on_dcefclk
;
4323 for (i
= 0; i
< dep_table
->count
; i
++) {
4324 clocks
->data
[i
].clocks_in_khz
= dep_table
->entries
[i
].clk
* 10;
4325 clocks
->data
[i
].latency_in_us
= 0;
4326 clocks
->num_levels
++;
4330 static void vega10_get_socclocks(struct pp_hwmgr
*hwmgr
,
4331 struct pp_clock_levels_with_latency
*clocks
)
4333 struct phm_ppt_v2_information
*table_info
=
4334 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4335 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
=
4336 table_info
->vdd_dep_on_socclk
;
4339 for (i
= 0; i
< dep_table
->count
; i
++) {
4340 clocks
->data
[i
].clocks_in_khz
= dep_table
->entries
[i
].clk
* 10;
4341 clocks
->data
[i
].latency_in_us
= 0;
4342 clocks
->num_levels
++;
4346 static int vega10_get_clock_by_type_with_latency(struct pp_hwmgr
*hwmgr
,
4347 enum amd_pp_clock_type type
,
4348 struct pp_clock_levels_with_latency
*clocks
)
4351 case amd_pp_sys_clock
:
4352 vega10_get_sclks(hwmgr
, clocks
);
4354 case amd_pp_mem_clock
:
4355 vega10_get_memclocks(hwmgr
, clocks
);
4357 case amd_pp_dcef_clock
:
4358 vega10_get_dcefclocks(hwmgr
, clocks
);
4360 case amd_pp_soc_clock
:
4361 vega10_get_socclocks(hwmgr
, clocks
);
4370 static int vega10_get_clock_by_type_with_voltage(struct pp_hwmgr
*hwmgr
,
4371 enum amd_pp_clock_type type
,
4372 struct pp_clock_levels_with_voltage
*clocks
)
4374 struct phm_ppt_v2_information
*table_info
=
4375 (struct phm_ppt_v2_information
*)hwmgr
->pptable
;
4376 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
;
4380 case amd_pp_mem_clock
:
4381 dep_table
= table_info
->vdd_dep_on_mclk
;
4383 case amd_pp_dcef_clock
:
4384 dep_table
= table_info
->vdd_dep_on_dcefclk
;
4386 case amd_pp_disp_clock
:
4387 dep_table
= table_info
->vdd_dep_on_dispclk
;
4389 case amd_pp_pixel_clock
:
4390 dep_table
= table_info
->vdd_dep_on_pixclk
;
4392 case amd_pp_phy_clock
:
4393 dep_table
= table_info
->vdd_dep_on_phyclk
;
4399 for (i
= 0; i
< dep_table
->count
; i
++) {
4400 clocks
->data
[i
].clocks_in_khz
= dep_table
->entries
[i
].clk
* 10;
4401 clocks
->data
[i
].voltage_in_mv
= (uint32_t)(table_info
->vddc_lookup_table
->
4402 entries
[dep_table
->entries
[i
].vddInd
].us_vdd
);
4403 clocks
->num_levels
++;
4406 if (i
< dep_table
->count
)
4412 static int vega10_set_watermarks_for_clocks_ranges(struct pp_hwmgr
*hwmgr
,
4415 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4416 struct dm_pp_wm_sets_with_clock_ranges_soc15
*wm_with_clock_ranges
= clock_range
;
4417 Watermarks_t
*table
= &(data
->smc_state_table
.water_marks_table
);
4419 if (!data
->registry_data
.disable_water_mark
) {
4420 smu_set_watermarks_for_clocks_ranges(table
, wm_with_clock_ranges
);
4421 data
->water_marks_bitmap
= WaterMarksExist
;
4427 static int vega10_get_ppfeature_status(struct pp_hwmgr
*hwmgr
, char *buf
)
4429 static const char *ppfeature_name
[] = {
4460 static const char *output_title
[] = {
4464 uint64_t features_enabled
;
4469 ret
= vega10_get_enabled_smc_features(hwmgr
, &features_enabled
);
4470 PP_ASSERT_WITH_CODE(!ret
,
4471 "[EnableAllSmuFeatures] Failed to get enabled smc features!",
4474 size
+= sprintf(buf
+ size
, "Current ppfeatures: 0x%016llx\n", features_enabled
);
4475 size
+= sprintf(buf
+ size
, "%-19s %-22s %s\n",
4479 for (i
= 0; i
< GNLD_FEATURES_MAX
; i
++) {
4480 size
+= sprintf(buf
+ size
, "%-19s 0x%016llx %6s\n",
4483 (features_enabled
& (1ULL << i
)) ? "Y" : "N");
4489 static int vega10_set_ppfeature_status(struct pp_hwmgr
*hwmgr
, uint64_t new_ppfeature_masks
)
4491 uint64_t features_enabled
;
4492 uint64_t features_to_enable
;
4493 uint64_t features_to_disable
;
4496 if (new_ppfeature_masks
>= (1ULL << GNLD_FEATURES_MAX
))
4499 ret
= vega10_get_enabled_smc_features(hwmgr
, &features_enabled
);
4503 features_to_disable
=
4504 features_enabled
& ~new_ppfeature_masks
;
4505 features_to_enable
=
4506 ~features_enabled
& new_ppfeature_masks
;
4508 pr_debug("features_to_disable 0x%llx\n", features_to_disable
);
4509 pr_debug("features_to_enable 0x%llx\n", features_to_enable
);
4511 if (features_to_disable
) {
4512 ret
= vega10_enable_smc_features(hwmgr
, false, features_to_disable
);
4517 if (features_to_enable
) {
4518 ret
= vega10_enable_smc_features(hwmgr
, true, features_to_enable
);
4526 static int vega10_print_clock_levels(struct pp_hwmgr
*hwmgr
,
4527 enum pp_clock_type type
, char *buf
)
4529 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4530 struct vega10_single_dpm_table
*sclk_table
= &(data
->dpm_table
.gfx_table
);
4531 struct vega10_single_dpm_table
*mclk_table
= &(data
->dpm_table
.mem_table
);
4532 struct vega10_single_dpm_table
*soc_table
= &(data
->dpm_table
.soc_table
);
4533 struct vega10_single_dpm_table
*dcef_table
= &(data
->dpm_table
.dcef_table
);
4534 struct vega10_pcie_table
*pcie_table
= &(data
->dpm_table
.pcie_table
);
4535 struct vega10_odn_clock_voltage_dependency_table
*podn_vdd_dep
= NULL
;
4537 int i
, now
, size
= 0, count
= 0;
4541 if (data
->registry_data
.sclk_dpm_key_disabled
)
4544 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetCurrentGfxclkIndex
);
4545 now
= smum_get_argument(hwmgr
);
4547 if (hwmgr
->pp_one_vf
&&
4548 (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
))
4551 count
= sclk_table
->count
;
4552 for (i
= 0; i
< count
; i
++)
4553 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
4554 i
, sclk_table
->dpm_levels
[i
].value
/ 100,
4555 (i
== now
) ? "*" : "");
4558 if (data
->registry_data
.mclk_dpm_key_disabled
)
4561 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetCurrentUclkIndex
);
4562 now
= smum_get_argument(hwmgr
);
4564 for (i
= 0; i
< mclk_table
->count
; i
++)
4565 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
4566 i
, mclk_table
->dpm_levels
[i
].value
/ 100,
4567 (i
== now
) ? "*" : "");
4570 if (data
->registry_data
.socclk_dpm_key_disabled
)
4573 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetCurrentSocclkIndex
);
4574 now
= smum_get_argument(hwmgr
);
4576 for (i
= 0; i
< soc_table
->count
; i
++)
4577 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
4578 i
, soc_table
->dpm_levels
[i
].value
/ 100,
4579 (i
== now
) ? "*" : "");
4582 if (data
->registry_data
.dcefclk_dpm_key_disabled
)
4585 smum_send_msg_to_smc_with_parameter(hwmgr
,
4586 PPSMC_MSG_GetClockFreqMHz
, CLK_DCEFCLK
);
4587 now
= smum_get_argument(hwmgr
);
4589 for (i
= 0; i
< dcef_table
->count
; i
++)
4590 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
4591 i
, dcef_table
->dpm_levels
[i
].value
/ 100,
4592 (dcef_table
->dpm_levels
[i
].value
/ 100 == now
) ?
4596 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetCurrentLinkIndex
);
4597 now
= smum_get_argument(hwmgr
);
4599 for (i
= 0; i
< pcie_table
->count
; i
++)
4600 size
+= sprintf(buf
+ size
, "%d: %s %s\n", i
,
4601 (pcie_table
->pcie_gen
[i
] == 0) ? "2.5GT/s, x1" :
4602 (pcie_table
->pcie_gen
[i
] == 1) ? "5.0GT/s, x16" :
4603 (pcie_table
->pcie_gen
[i
] == 2) ? "8.0GT/s, x16" : "",
4604 (i
== now
) ? "*" : "");
4607 if (hwmgr
->od_enabled
) {
4608 size
= sprintf(buf
, "%s:\n", "OD_SCLK");
4609 podn_vdd_dep
= &data
->odn_dpm_table
.vdd_dep_on_sclk
;
4610 for (i
= 0; i
< podn_vdd_dep
->count
; i
++)
4611 size
+= sprintf(buf
+ size
, "%d: %10uMhz %10umV\n",
4612 i
, podn_vdd_dep
->entries
[i
].clk
/ 100,
4613 podn_vdd_dep
->entries
[i
].vddc
);
4617 if (hwmgr
->od_enabled
) {
4618 size
= sprintf(buf
, "%s:\n", "OD_MCLK");
4619 podn_vdd_dep
= &data
->odn_dpm_table
.vdd_dep_on_mclk
;
4620 for (i
= 0; i
< podn_vdd_dep
->count
; i
++)
4621 size
+= sprintf(buf
+ size
, "%d: %10uMhz %10umV\n",
4622 i
, podn_vdd_dep
->entries
[i
].clk
/100,
4623 podn_vdd_dep
->entries
[i
].vddc
);
4627 if (hwmgr
->od_enabled
) {
4628 size
= sprintf(buf
, "%s:\n", "OD_RANGE");
4629 size
+= sprintf(buf
+ size
, "SCLK: %7uMHz %10uMHz\n",
4630 data
->golden_dpm_table
.gfx_table
.dpm_levels
[0].value
/100,
4631 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
/100);
4632 size
+= sprintf(buf
+ size
, "MCLK: %7uMHz %10uMHz\n",
4633 data
->golden_dpm_table
.mem_table
.dpm_levels
[0].value
/100,
4634 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
/100);
4635 size
+= sprintf(buf
+ size
, "VDDC: %7umV %11umV\n",
4636 data
->odn_dpm_table
.min_vddc
,
4637 data
->odn_dpm_table
.max_vddc
);
4646 static int vega10_display_configuration_changed_task(struct pp_hwmgr
*hwmgr
)
4648 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4649 Watermarks_t
*wm_table
= &(data
->smc_state_table
.water_marks_table
);
4652 if ((data
->water_marks_bitmap
& WaterMarksExist
) &&
4653 !(data
->water_marks_bitmap
& WaterMarksLoaded
)) {
4654 result
= smum_smc_table_manager(hwmgr
, (uint8_t *)wm_table
, WMTABLE
, false);
4655 PP_ASSERT_WITH_CODE(result
, "Failed to update WMTABLE!", return EINVAL
);
4656 data
->water_marks_bitmap
|= WaterMarksLoaded
;
4659 if (data
->water_marks_bitmap
& WaterMarksLoaded
) {
4660 smum_send_msg_to_smc_with_parameter(hwmgr
,
4661 PPSMC_MSG_NumOfDisplays
, hwmgr
->display_config
->num_display
);
4667 int vega10_enable_disable_uvd_dpm(struct pp_hwmgr
*hwmgr
, bool enable
)
4669 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4671 if (data
->smu_features
[GNLD_DPM_UVD
].supported
) {
4672 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
4674 data
->smu_features
[GNLD_DPM_UVD
].smu_feature_bitmap
),
4675 "Attempt to Enable/Disable DPM UVD Failed!",
4677 data
->smu_features
[GNLD_DPM_UVD
].enabled
= enable
;
4682 static void vega10_power_gate_vce(struct pp_hwmgr
*hwmgr
, bool bgate
)
4684 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4686 data
->vce_power_gated
= bgate
;
4687 vega10_enable_disable_vce_dpm(hwmgr
, !bgate
);
4690 static void vega10_power_gate_uvd(struct pp_hwmgr
*hwmgr
, bool bgate
)
4692 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4694 data
->uvd_power_gated
= bgate
;
4695 vega10_enable_disable_uvd_dpm(hwmgr
, !bgate
);
4698 static inline bool vega10_are_power_levels_equal(
4699 const struct vega10_performance_level
*pl1
,
4700 const struct vega10_performance_level
*pl2
)
4702 return ((pl1
->soc_clock
== pl2
->soc_clock
) &&
4703 (pl1
->gfx_clock
== pl2
->gfx_clock
) &&
4704 (pl1
->mem_clock
== pl2
->mem_clock
));
4707 static int vega10_check_states_equal(struct pp_hwmgr
*hwmgr
,
4708 const struct pp_hw_power_state
*pstate1
,
4709 const struct pp_hw_power_state
*pstate2
, bool *equal
)
4711 const struct vega10_power_state
*psa
;
4712 const struct vega10_power_state
*psb
;
4715 if (pstate1
== NULL
|| pstate2
== NULL
|| equal
== NULL
)
4718 psa
= cast_const_phw_vega10_power_state(pstate1
);
4719 psb
= cast_const_phw_vega10_power_state(pstate2
);
4720 /* If the two states don't even have the same number of performance levels they cannot be the same state. */
4721 if (psa
->performance_level_count
!= psb
->performance_level_count
) {
4726 for (i
= 0; i
< psa
->performance_level_count
; i
++) {
4727 if (!vega10_are_power_levels_equal(&(psa
->performance_levels
[i
]), &(psb
->performance_levels
[i
]))) {
4728 /* If we have found even one performance level pair that is different the states are different. */
4734 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
4735 *equal
= ((psa
->uvd_clks
.vclk
== psb
->uvd_clks
.vclk
) && (psa
->uvd_clks
.dclk
== psb
->uvd_clks
.dclk
));
4736 *equal
&= ((psa
->vce_clks
.evclk
== psb
->vce_clks
.evclk
) && (psa
->vce_clks
.ecclk
== psb
->vce_clks
.ecclk
));
4737 *equal
&= (psa
->sclk_threshold
== psb
->sclk_threshold
);
4743 vega10_check_smc_update_required_for_display_configuration(struct pp_hwmgr
*hwmgr
)
4745 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4746 bool is_update_required
= false;
4748 if (data
->display_timing
.num_existing_displays
!= hwmgr
->display_config
->num_display
)
4749 is_update_required
= true;
4751 if (PP_CAP(PHM_PlatformCaps_SclkDeepSleep
)) {
4752 if (data
->display_timing
.min_clock_in_sr
!= hwmgr
->display_config
->min_core_set_clock_in_sr
)
4753 is_update_required
= true;
4756 return is_update_required
;
4759 static int vega10_disable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
4761 int tmp_result
, result
= 0;
4766 if (PP_CAP(PHM_PlatformCaps_ThermalController
))
4767 vega10_disable_thermal_protection(hwmgr
);
4769 tmp_result
= vega10_disable_power_containment(hwmgr
);
4770 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4771 "Failed to disable power containment!", result
= tmp_result
);
4773 tmp_result
= vega10_disable_didt_config(hwmgr
);
4774 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4775 "Failed to disable didt config!", result
= tmp_result
);
4777 tmp_result
= vega10_avfs_enable(hwmgr
, false);
4778 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4779 "Failed to disable AVFS!", result
= tmp_result
);
4781 tmp_result
= vega10_stop_dpm(hwmgr
, SMC_DPM_FEATURES
);
4782 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4783 "Failed to stop DPM!", result
= tmp_result
);
4785 tmp_result
= vega10_disable_deep_sleep_master_switch(hwmgr
);
4786 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4787 "Failed to disable deep sleep!", result
= tmp_result
);
4789 tmp_result
= vega10_disable_ulv(hwmgr
);
4790 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4791 "Failed to disable ulv!", result
= tmp_result
);
4793 tmp_result
= vega10_acg_disable(hwmgr
);
4794 PP_ASSERT_WITH_CODE((tmp_result
== 0),
4795 "Failed to disable acg!", result
= tmp_result
);
4797 vega10_enable_disable_PCC_limit_feature(hwmgr
, false);
4801 static int vega10_power_off_asic(struct pp_hwmgr
*hwmgr
)
4803 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4806 result
= vega10_disable_dpm_tasks(hwmgr
);
4807 PP_ASSERT_WITH_CODE((0 == result
),
4808 "[disable_dpm_tasks] Failed to disable DPM!",
4810 data
->water_marks_bitmap
&= ~(WaterMarksLoaded
);
4815 static int vega10_get_sclk_od(struct pp_hwmgr
*hwmgr
)
4817 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4818 struct vega10_single_dpm_table
*sclk_table
= &(data
->dpm_table
.gfx_table
);
4819 struct vega10_single_dpm_table
*golden_sclk_table
=
4820 &(data
->golden_dpm_table
.gfx_table
);
4821 int value
= sclk_table
->dpm_levels
[sclk_table
->count
- 1].value
;
4822 int golden_value
= golden_sclk_table
->dpm_levels
4823 [golden_sclk_table
->count
- 1].value
;
4825 value
-= golden_value
;
4826 value
= DIV_ROUND_UP(value
* 100, golden_value
);
4831 static int vega10_set_sclk_od(struct pp_hwmgr
*hwmgr
, uint32_t value
)
4833 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4834 struct vega10_single_dpm_table
*golden_sclk_table
=
4835 &(data
->golden_dpm_table
.gfx_table
);
4836 struct pp_power_state
*ps
;
4837 struct vega10_power_state
*vega10_ps
;
4839 ps
= hwmgr
->request_ps
;
4844 vega10_ps
= cast_phw_vega10_power_state(&ps
->hardware
);
4846 vega10_ps
->performance_levels
4847 [vega10_ps
->performance_level_count
- 1].gfx_clock
=
4848 golden_sclk_table
->dpm_levels
4849 [golden_sclk_table
->count
- 1].value
*
4851 golden_sclk_table
->dpm_levels
4852 [golden_sclk_table
->count
- 1].value
;
4854 if (vega10_ps
->performance_levels
4855 [vega10_ps
->performance_level_count
- 1].gfx_clock
>
4856 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
) {
4857 vega10_ps
->performance_levels
4858 [vega10_ps
->performance_level_count
- 1].gfx_clock
=
4859 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
;
4860 pr_warn("max sclk supported by vbios is %d\n",
4861 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
);
4866 static int vega10_get_mclk_od(struct pp_hwmgr
*hwmgr
)
4868 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4869 struct vega10_single_dpm_table
*mclk_table
= &(data
->dpm_table
.mem_table
);
4870 struct vega10_single_dpm_table
*golden_mclk_table
=
4871 &(data
->golden_dpm_table
.mem_table
);
4872 int value
= mclk_table
->dpm_levels
[mclk_table
->count
- 1].value
;
4873 int golden_value
= golden_mclk_table
->dpm_levels
4874 [golden_mclk_table
->count
- 1].value
;
4876 value
-= golden_value
;
4877 value
= DIV_ROUND_UP(value
* 100, golden_value
);
4882 static int vega10_set_mclk_od(struct pp_hwmgr
*hwmgr
, uint32_t value
)
4884 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4885 struct vega10_single_dpm_table
*golden_mclk_table
=
4886 &(data
->golden_dpm_table
.mem_table
);
4887 struct pp_power_state
*ps
;
4888 struct vega10_power_state
*vega10_ps
;
4890 ps
= hwmgr
->request_ps
;
4895 vega10_ps
= cast_phw_vega10_power_state(&ps
->hardware
);
4897 vega10_ps
->performance_levels
4898 [vega10_ps
->performance_level_count
- 1].mem_clock
=
4899 golden_mclk_table
->dpm_levels
4900 [golden_mclk_table
->count
- 1].value
*
4902 golden_mclk_table
->dpm_levels
4903 [golden_mclk_table
->count
- 1].value
;
4905 if (vega10_ps
->performance_levels
4906 [vega10_ps
->performance_level_count
- 1].mem_clock
>
4907 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
) {
4908 vega10_ps
->performance_levels
4909 [vega10_ps
->performance_level_count
- 1].mem_clock
=
4910 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
;
4911 pr_warn("max mclk supported by vbios is %d\n",
4912 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
);
4918 static int vega10_notify_cac_buffer_info(struct pp_hwmgr
*hwmgr
,
4919 uint32_t virtual_addr_low
,
4920 uint32_t virtual_addr_hi
,
4921 uint32_t mc_addr_low
,
4922 uint32_t mc_addr_hi
,
4925 smum_send_msg_to_smc_with_parameter(hwmgr
,
4926 PPSMC_MSG_SetSystemVirtualDramAddrHigh
,
4928 smum_send_msg_to_smc_with_parameter(hwmgr
,
4929 PPSMC_MSG_SetSystemVirtualDramAddrLow
,
4931 smum_send_msg_to_smc_with_parameter(hwmgr
,
4932 PPSMC_MSG_DramLogSetDramAddrHigh
,
4935 smum_send_msg_to_smc_with_parameter(hwmgr
,
4936 PPSMC_MSG_DramLogSetDramAddrLow
,
4939 smum_send_msg_to_smc_with_parameter(hwmgr
,
4940 PPSMC_MSG_DramLogSetDramSize
,
4945 static int vega10_get_thermal_temperature_range(struct pp_hwmgr
*hwmgr
,
4946 struct PP_TemperatureRange
*thermal_data
)
4948 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4949 PPTable_t
*pp_table
= &(data
->smc_state_table
.pp_table
);
4951 memcpy(thermal_data
, &SMU7ThermalWithDelayPolicy
[0], sizeof(struct PP_TemperatureRange
));
4953 thermal_data
->max
= pp_table
->TedgeLimit
*
4954 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
4955 thermal_data
->edge_emergency_max
= (pp_table
->TedgeLimit
+ CTF_OFFSET_EDGE
) *
4956 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
4957 thermal_data
->hotspot_crit_max
= pp_table
->ThotspotLimit
*
4958 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
4959 thermal_data
->hotspot_emergency_max
= (pp_table
->ThotspotLimit
+ CTF_OFFSET_HOTSPOT
) *
4960 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
4961 thermal_data
->mem_crit_max
= pp_table
->ThbmLimit
*
4962 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
4963 thermal_data
->mem_emergency_max
= (pp_table
->ThbmLimit
+ CTF_OFFSET_HBM
)*
4964 PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
4969 static int vega10_get_power_profile_mode(struct pp_hwmgr
*hwmgr
, char *buf
)
4971 struct vega10_hwmgr
*data
= hwmgr
->backend
;
4972 uint32_t i
, size
= 0;
4973 static const uint8_t profile_mode_setting
[6][4] = {{70, 60, 0, 0,},
4980 static const char *profile_name
[7] = {"BOOTUP_DEFAULT",
4987 static const char *title
[6] = {"NUM",
4992 "MIN_ACTIVE_LEVEL"};
4997 size
+= sprintf(buf
+ size
, "%s %16s %s %s %s %s\n",title
[0],
4998 title
[1], title
[2], title
[3], title
[4], title
[5]);
5000 for (i
= 0; i
< PP_SMC_POWER_PROFILE_CUSTOM
; i
++)
5001 size
+= sprintf(buf
+ size
, "%3d %14s%s: %14d %3d %10d %14d\n",
5002 i
, profile_name
[i
], (i
== hwmgr
->power_profile_mode
) ? "*" : " ",
5003 profile_mode_setting
[i
][0], profile_mode_setting
[i
][1],
5004 profile_mode_setting
[i
][2], profile_mode_setting
[i
][3]);
5005 size
+= sprintf(buf
+ size
, "%3d %14s%s: %14d %3d %10d %14d\n", i
,
5006 profile_name
[i
], (i
== hwmgr
->power_profile_mode
) ? "*" : " ",
5007 data
->custom_profile_mode
[0], data
->custom_profile_mode
[1],
5008 data
->custom_profile_mode
[2], data
->custom_profile_mode
[3]);
5012 static int vega10_set_power_profile_mode(struct pp_hwmgr
*hwmgr
, long *input
, uint32_t size
)
5014 struct vega10_hwmgr
*data
= hwmgr
->backend
;
5015 uint8_t busy_set_point
;
5017 uint8_t use_rlc_busy
;
5018 uint8_t min_active_level
;
5019 uint32_t power_profile_mode
= input
[size
];
5021 if (power_profile_mode
== PP_SMC_POWER_PROFILE_CUSTOM
) {
5022 if (size
!= 0 && size
!= 4)
5025 /* If size = 0 and the CUSTOM profile has been set already
5026 * then just apply the profile. The copy stored in the hwmgr
5027 * is zeroed out on init
5030 if (data
->custom_profile_mode
[0] != 0)
5036 data
->custom_profile_mode
[0] = busy_set_point
= input
[0];
5037 data
->custom_profile_mode
[1] = FPS
= input
[1];
5038 data
->custom_profile_mode
[2] = use_rlc_busy
= input
[2];
5039 data
->custom_profile_mode
[3] = min_active_level
= input
[3];
5040 smum_send_msg_to_smc_with_parameter(hwmgr
,
5041 PPSMC_MSG_SetCustomGfxDpmParameters
,
5042 busy_set_point
| FPS
<<8 |
5043 use_rlc_busy
<< 16 | min_active_level
<<24);
5047 smum_send_msg_to_smc_with_parameter(hwmgr
, PPSMC_MSG_SetWorkloadMask
,
5048 1 << power_profile_mode
);
5049 hwmgr
->power_profile_mode
= power_profile_mode
;
5055 static bool vega10_check_clk_voltage_valid(struct pp_hwmgr
*hwmgr
,
5056 enum PP_OD_DPM_TABLE_COMMAND type
,
5060 struct vega10_hwmgr
*data
= hwmgr
->backend
;
5061 struct vega10_odn_dpm_table
*odn_table
= &(data
->odn_dpm_table
);
5062 struct vega10_single_dpm_table
*golden_table
;
5064 if (voltage
< odn_table
->min_vddc
|| voltage
> odn_table
->max_vddc
) {
5065 pr_info("OD voltage is out of range [%d - %d] mV\n", odn_table
->min_vddc
, odn_table
->max_vddc
);
5069 if (type
== PP_OD_EDIT_SCLK_VDDC_TABLE
) {
5070 golden_table
= &(data
->golden_dpm_table
.gfx_table
);
5071 if (golden_table
->dpm_levels
[0].value
> clk
||
5072 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
< clk
) {
5073 pr_info("OD engine clock is out of range [%d - %d] MHz\n",
5074 golden_table
->dpm_levels
[0].value
/100,
5075 hwmgr
->platform_descriptor
.overdriveLimit
.engineClock
/100);
5078 } else if (type
== PP_OD_EDIT_MCLK_VDDC_TABLE
) {
5079 golden_table
= &(data
->golden_dpm_table
.mem_table
);
5080 if (golden_table
->dpm_levels
[0].value
> clk
||
5081 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
< clk
) {
5082 pr_info("OD memory clock is out of range [%d - %d] MHz\n",
5083 golden_table
->dpm_levels
[0].value
/100,
5084 hwmgr
->platform_descriptor
.overdriveLimit
.memoryClock
/100);
5094 static void vega10_odn_update_power_state(struct pp_hwmgr
*hwmgr
)
5096 struct vega10_hwmgr
*data
= hwmgr
->backend
;
5097 struct pp_power_state
*ps
= hwmgr
->request_ps
;
5098 struct vega10_power_state
*vega10_ps
;
5099 struct vega10_single_dpm_table
*gfx_dpm_table
=
5100 &data
->dpm_table
.gfx_table
;
5101 struct vega10_single_dpm_table
*soc_dpm_table
=
5102 &data
->dpm_table
.soc_table
;
5103 struct vega10_single_dpm_table
*mem_dpm_table
=
5104 &data
->dpm_table
.mem_table
;
5110 vega10_ps
= cast_phw_vega10_power_state(&ps
->hardware
);
5111 max_level
= vega10_ps
->performance_level_count
- 1;
5113 if (vega10_ps
->performance_levels
[max_level
].gfx_clock
!=
5114 gfx_dpm_table
->dpm_levels
[gfx_dpm_table
->count
- 1].value
)
5115 vega10_ps
->performance_levels
[max_level
].gfx_clock
=
5116 gfx_dpm_table
->dpm_levels
[gfx_dpm_table
->count
- 1].value
;
5118 if (vega10_ps
->performance_levels
[max_level
].soc_clock
!=
5119 soc_dpm_table
->dpm_levels
[soc_dpm_table
->count
- 1].value
)
5120 vega10_ps
->performance_levels
[max_level
].soc_clock
=
5121 soc_dpm_table
->dpm_levels
[soc_dpm_table
->count
- 1].value
;
5123 if (vega10_ps
->performance_levels
[max_level
].mem_clock
!=
5124 mem_dpm_table
->dpm_levels
[mem_dpm_table
->count
- 1].value
)
5125 vega10_ps
->performance_levels
[max_level
].mem_clock
=
5126 mem_dpm_table
->dpm_levels
[mem_dpm_table
->count
- 1].value
;
5131 ps
= (struct pp_power_state
*)((unsigned long)(hwmgr
->ps
) + hwmgr
->ps_size
* (hwmgr
->num_ps
- 1));
5132 vega10_ps
= cast_phw_vega10_power_state(&ps
->hardware
);
5133 max_level
= vega10_ps
->performance_level_count
- 1;
5135 if (vega10_ps
->performance_levels
[max_level
].gfx_clock
!=
5136 gfx_dpm_table
->dpm_levels
[gfx_dpm_table
->count
- 1].value
)
5137 vega10_ps
->performance_levels
[max_level
].gfx_clock
=
5138 gfx_dpm_table
->dpm_levels
[gfx_dpm_table
->count
- 1].value
;
5140 if (vega10_ps
->performance_levels
[max_level
].soc_clock
!=
5141 soc_dpm_table
->dpm_levels
[soc_dpm_table
->count
- 1].value
)
5142 vega10_ps
->performance_levels
[max_level
].soc_clock
=
5143 soc_dpm_table
->dpm_levels
[soc_dpm_table
->count
- 1].value
;
5145 if (vega10_ps
->performance_levels
[max_level
].mem_clock
!=
5146 mem_dpm_table
->dpm_levels
[mem_dpm_table
->count
- 1].value
)
5147 vega10_ps
->performance_levels
[max_level
].mem_clock
=
5148 mem_dpm_table
->dpm_levels
[mem_dpm_table
->count
- 1].value
;
5151 static void vega10_odn_update_soc_table(struct pp_hwmgr
*hwmgr
,
5152 enum PP_OD_DPM_TABLE_COMMAND type
)
5154 struct vega10_hwmgr
*data
= hwmgr
->backend
;
5155 struct phm_ppt_v2_information
*table_info
= hwmgr
->pptable
;
5156 struct phm_ppt_v1_clock_voltage_dependency_table
*dep_table
= table_info
->vdd_dep_on_socclk
;
5157 struct vega10_single_dpm_table
*dpm_table
= &data
->golden_dpm_table
.mem_table
;
5159 struct vega10_odn_clock_voltage_dependency_table
*podn_vdd_dep_on_socclk
=
5160 &data
->odn_dpm_table
.vdd_dep_on_socclk
;
5161 struct vega10_odn_vddc_lookup_table
*od_vddc_lookup_table
= &data
->odn_dpm_table
.vddc_lookup_table
;
5163 struct vega10_odn_clock_voltage_dependency_table
*podn_vdd_dep
;
5166 if (type
== PP_OD_EDIT_SCLK_VDDC_TABLE
) {
5167 podn_vdd_dep
= &data
->odn_dpm_table
.vdd_dep_on_sclk
;
5168 for (i
= 0; i
< podn_vdd_dep
->count
; i
++)
5169 od_vddc_lookup_table
->entries
[i
].us_vdd
= podn_vdd_dep
->entries
[i
].vddc
;
5170 } else if (type
== PP_OD_EDIT_MCLK_VDDC_TABLE
) {
5171 podn_vdd_dep
= &data
->odn_dpm_table
.vdd_dep_on_mclk
;
5172 for (i
= 0; i
< dpm_table
->count
; i
++) {
5173 for (j
= 0; j
< od_vddc_lookup_table
->count
; j
++) {
5174 if (od_vddc_lookup_table
->entries
[j
].us_vdd
>
5175 podn_vdd_dep
->entries
[i
].vddc
)
5178 if (j
== od_vddc_lookup_table
->count
) {
5179 j
= od_vddc_lookup_table
->count
- 1;
5180 od_vddc_lookup_table
->entries
[j
].us_vdd
=
5181 podn_vdd_dep
->entries
[i
].vddc
;
5182 data
->need_update_dpm_table
|= DPMTABLE_OD_UPDATE_VDDC
;
5184 podn_vdd_dep
->entries
[i
].vddInd
= j
;
5186 dpm_table
= &data
->dpm_table
.soc_table
;
5187 for (i
= 0; i
< dep_table
->count
; i
++) {
5188 if (dep_table
->entries
[i
].vddInd
== podn_vdd_dep
->entries
[podn_vdd_dep
->count
-1].vddInd
&&
5189 dep_table
->entries
[i
].clk
< podn_vdd_dep
->entries
[podn_vdd_dep
->count
-1].clk
) {
5190 data
->need_update_dpm_table
|= DPMTABLE_UPDATE_SOCCLK
;
5191 for (; (i
< dep_table
->count
) &&
5192 (dep_table
->entries
[i
].clk
< podn_vdd_dep
->entries
[podn_vdd_dep
->count
- 1].clk
); i
++) {
5193 podn_vdd_dep_on_socclk
->entries
[i
].clk
= podn_vdd_dep
->entries
[podn_vdd_dep
->count
-1].clk
;
5194 dpm_table
->dpm_levels
[i
].value
= podn_vdd_dep_on_socclk
->entries
[i
].clk
;
5198 dpm_table
->dpm_levels
[i
].value
= dep_table
->entries
[i
].clk
;
5199 podn_vdd_dep_on_socclk
->entries
[i
].vddc
= dep_table
->entries
[i
].vddc
;
5200 podn_vdd_dep_on_socclk
->entries
[i
].vddInd
= dep_table
->entries
[i
].vddInd
;
5201 podn_vdd_dep_on_socclk
->entries
[i
].clk
= dep_table
->entries
[i
].clk
;
5204 if (podn_vdd_dep_on_socclk
->entries
[podn_vdd_dep_on_socclk
->count
- 1].clk
<
5205 podn_vdd_dep
->entries
[podn_vdd_dep
->count
- 1].clk
) {
5206 data
->need_update_dpm_table
|= DPMTABLE_UPDATE_SOCCLK
;
5207 podn_vdd_dep_on_socclk
->entries
[podn_vdd_dep_on_socclk
->count
- 1].clk
=
5208 podn_vdd_dep
->entries
[podn_vdd_dep
->count
- 1].clk
;
5209 dpm_table
->dpm_levels
[podn_vdd_dep_on_socclk
->count
- 1].value
=
5210 podn_vdd_dep
->entries
[podn_vdd_dep
->count
- 1].clk
;
5212 if (podn_vdd_dep_on_socclk
->entries
[podn_vdd_dep_on_socclk
->count
- 1].vddInd
<
5213 podn_vdd_dep
->entries
[podn_vdd_dep
->count
- 1].vddInd
) {
5214 data
->need_update_dpm_table
|= DPMTABLE_UPDATE_SOCCLK
;
5215 podn_vdd_dep_on_socclk
->entries
[podn_vdd_dep_on_socclk
->count
- 1].vddInd
=
5216 podn_vdd_dep
->entries
[podn_vdd_dep
->count
- 1].vddInd
;
5219 vega10_odn_update_power_state(hwmgr
);
5222 static int vega10_odn_edit_dpm_table(struct pp_hwmgr
*hwmgr
,
5223 enum PP_OD_DPM_TABLE_COMMAND type
,
5224 long *input
, uint32_t size
)
5226 struct vega10_hwmgr
*data
= hwmgr
->backend
;
5227 struct vega10_odn_clock_voltage_dependency_table
*podn_vdd_dep_table
;
5228 struct vega10_single_dpm_table
*dpm_table
;
5232 uint32_t input_level
;
5235 PP_ASSERT_WITH_CODE(input
, "NULL user input for clock and voltage",
5238 if (!hwmgr
->od_enabled
) {
5239 pr_info("OverDrive feature not enabled\n");
5243 if (PP_OD_EDIT_SCLK_VDDC_TABLE
== type
) {
5244 dpm_table
= &data
->dpm_table
.gfx_table
;
5245 podn_vdd_dep_table
= &data
->odn_dpm_table
.vdd_dep_on_sclk
;
5246 data
->need_update_dpm_table
|= DPMTABLE_OD_UPDATE_SCLK
;
5247 } else if (PP_OD_EDIT_MCLK_VDDC_TABLE
== type
) {
5248 dpm_table
= &data
->dpm_table
.mem_table
;
5249 podn_vdd_dep_table
= &data
->odn_dpm_table
.vdd_dep_on_mclk
;
5250 data
->need_update_dpm_table
|= DPMTABLE_OD_UPDATE_MCLK
;
5251 } else if (PP_OD_RESTORE_DEFAULT_TABLE
== type
) {
5252 memcpy(&(data
->dpm_table
), &(data
->golden_dpm_table
), sizeof(struct vega10_dpm_table
));
5253 vega10_odn_initial_default_setting(hwmgr
);
5254 vega10_odn_update_power_state(hwmgr
);
5255 /* force to update all clock tables */
5256 data
->need_update_dpm_table
= DPMTABLE_UPDATE_SCLK
|
5257 DPMTABLE_UPDATE_MCLK
|
5258 DPMTABLE_UPDATE_SOCCLK
;
5260 } else if (PP_OD_COMMIT_DPM_TABLE
== type
) {
5261 vega10_check_dpm_table_updated(hwmgr
);
5267 for (i
= 0; i
< size
; i
+= 3) {
5268 if (i
+ 3 > size
|| input
[i
] >= podn_vdd_dep_table
->count
) {
5269 pr_info("invalid clock voltage input\n");
5272 input_level
= input
[i
];
5273 input_clk
= input
[i
+1] * 100;
5274 input_vol
= input
[i
+2];
5276 if (vega10_check_clk_voltage_valid(hwmgr
, type
, input_clk
, input_vol
)) {
5277 dpm_table
->dpm_levels
[input_level
].value
= input_clk
;
5278 podn_vdd_dep_table
->entries
[input_level
].clk
= input_clk
;
5279 podn_vdd_dep_table
->entries
[input_level
].vddc
= input_vol
;
5284 vega10_odn_update_soc_table(hwmgr
, type
);
5288 static int vega10_set_mp1_state(struct pp_hwmgr
*hwmgr
,
5289 enum pp_mp1_state mp1_state
)
5294 switch (mp1_state
) {
5295 case PP_MP1_STATE_UNLOAD
:
5296 msg
= PPSMC_MSG_PrepareMp1ForUnload
;
5298 case PP_MP1_STATE_SHUTDOWN
:
5299 case PP_MP1_STATE_RESET
:
5300 case PP_MP1_STATE_NONE
:
5305 PP_ASSERT_WITH_CODE((ret
= smum_send_msg_to_smc(hwmgr
, msg
)) == 0,
5306 "[PrepareMp1] Failed!",
5312 static int vega10_get_performance_level(struct pp_hwmgr
*hwmgr
, const struct pp_hw_power_state
*state
,
5313 PHM_PerformanceLevelDesignation designation
, uint32_t index
,
5314 PHM_PerformanceLevel
*level
)
5316 const struct vega10_power_state
*ps
;
5319 if (level
== NULL
|| hwmgr
== NULL
|| state
== NULL
)
5322 ps
= cast_const_phw_vega10_power_state(state
);
5324 i
= index
> ps
->performance_level_count
- 1 ?
5325 ps
->performance_level_count
- 1 : index
;
5327 level
->coreClock
= ps
->performance_levels
[i
].gfx_clock
;
5328 level
->memory_clock
= ps
->performance_levels
[i
].mem_clock
;
5333 static int vega10_disable_power_features_for_compute_performance(struct pp_hwmgr
*hwmgr
, bool disable
)
5335 struct vega10_hwmgr
*data
= hwmgr
->backend
;
5336 uint32_t feature_mask
= 0;
5339 feature_mask
|= data
->smu_features
[GNLD_ULV
].enabled
?
5340 data
->smu_features
[GNLD_ULV
].smu_feature_bitmap
: 0;
5341 feature_mask
|= data
->smu_features
[GNLD_DS_GFXCLK
].enabled
?
5342 data
->smu_features
[GNLD_DS_GFXCLK
].smu_feature_bitmap
: 0;
5343 feature_mask
|= data
->smu_features
[GNLD_DS_SOCCLK
].enabled
?
5344 data
->smu_features
[GNLD_DS_SOCCLK
].smu_feature_bitmap
: 0;
5345 feature_mask
|= data
->smu_features
[GNLD_DS_LCLK
].enabled
?
5346 data
->smu_features
[GNLD_DS_LCLK
].smu_feature_bitmap
: 0;
5347 feature_mask
|= data
->smu_features
[GNLD_DS_DCEFCLK
].enabled
?
5348 data
->smu_features
[GNLD_DS_DCEFCLK
].smu_feature_bitmap
: 0;
5350 feature_mask
|= (!data
->smu_features
[GNLD_ULV
].enabled
) ?
5351 data
->smu_features
[GNLD_ULV
].smu_feature_bitmap
: 0;
5352 feature_mask
|= (!data
->smu_features
[GNLD_DS_GFXCLK
].enabled
) ?
5353 data
->smu_features
[GNLD_DS_GFXCLK
].smu_feature_bitmap
: 0;
5354 feature_mask
|= (!data
->smu_features
[GNLD_DS_SOCCLK
].enabled
) ?
5355 data
->smu_features
[GNLD_DS_SOCCLK
].smu_feature_bitmap
: 0;
5356 feature_mask
|= (!data
->smu_features
[GNLD_DS_LCLK
].enabled
) ?
5357 data
->smu_features
[GNLD_DS_LCLK
].smu_feature_bitmap
: 0;
5358 feature_mask
|= (!data
->smu_features
[GNLD_DS_DCEFCLK
].enabled
) ?
5359 data
->smu_features
[GNLD_DS_DCEFCLK
].smu_feature_bitmap
: 0;
5363 PP_ASSERT_WITH_CODE(!vega10_enable_smc_features(hwmgr
,
5364 !disable
, feature_mask
),
5365 "enable/disable power features for compute performance Failed!",
5369 data
->smu_features
[GNLD_ULV
].enabled
= false;
5370 data
->smu_features
[GNLD_DS_GFXCLK
].enabled
= false;
5371 data
->smu_features
[GNLD_DS_SOCCLK
].enabled
= false;
5372 data
->smu_features
[GNLD_DS_LCLK
].enabled
= false;
5373 data
->smu_features
[GNLD_DS_DCEFCLK
].enabled
= false;
5375 data
->smu_features
[GNLD_ULV
].enabled
= true;
5376 data
->smu_features
[GNLD_DS_GFXCLK
].enabled
= true;
5377 data
->smu_features
[GNLD_DS_SOCCLK
].enabled
= true;
5378 data
->smu_features
[GNLD_DS_LCLK
].enabled
= true;
5379 data
->smu_features
[GNLD_DS_DCEFCLK
].enabled
= true;
5386 static const struct pp_hwmgr_func vega10_hwmgr_funcs
= {
5387 .backend_init
= vega10_hwmgr_backend_init
,
5388 .backend_fini
= vega10_hwmgr_backend_fini
,
5389 .asic_setup
= vega10_setup_asic_task
,
5390 .dynamic_state_management_enable
= vega10_enable_dpm_tasks
,
5391 .dynamic_state_management_disable
= vega10_disable_dpm_tasks
,
5392 .get_num_of_pp_table_entries
=
5393 vega10_get_number_of_powerplay_table_entries
,
5394 .get_power_state_size
= vega10_get_power_state_size
,
5395 .get_pp_table_entry
= vega10_get_pp_table_entry
,
5396 .patch_boot_state
= vega10_patch_boot_state
,
5397 .apply_state_adjust_rules
= vega10_apply_state_adjust_rules
,
5398 .power_state_set
= vega10_set_power_state_tasks
,
5399 .get_sclk
= vega10_dpm_get_sclk
,
5400 .get_mclk
= vega10_dpm_get_mclk
,
5401 .notify_smc_display_config_after_ps_adjustment
=
5402 vega10_notify_smc_display_config_after_ps_adjustment
,
5403 .force_dpm_level
= vega10_dpm_force_dpm_level
,
5404 .stop_thermal_controller
= vega10_thermal_stop_thermal_controller
,
5405 .get_fan_speed_info
= vega10_fan_ctrl_get_fan_speed_info
,
5406 .get_fan_speed_percent
= vega10_fan_ctrl_get_fan_speed_percent
,
5407 .set_fan_speed_percent
= vega10_fan_ctrl_set_fan_speed_percent
,
5408 .reset_fan_speed_to_default
=
5409 vega10_fan_ctrl_reset_fan_speed_to_default
,
5410 .get_fan_speed_rpm
= vega10_fan_ctrl_get_fan_speed_rpm
,
5411 .set_fan_speed_rpm
= vega10_fan_ctrl_set_fan_speed_rpm
,
5412 .uninitialize_thermal_controller
=
5413 vega10_thermal_ctrl_uninitialize_thermal_controller
,
5414 .set_fan_control_mode
= vega10_set_fan_control_mode
,
5415 .get_fan_control_mode
= vega10_get_fan_control_mode
,
5416 .read_sensor
= vega10_read_sensor
,
5417 .get_dal_power_level
= vega10_get_dal_power_level
,
5418 .get_clock_by_type_with_latency
= vega10_get_clock_by_type_with_latency
,
5419 .get_clock_by_type_with_voltage
= vega10_get_clock_by_type_with_voltage
,
5420 .set_watermarks_for_clocks_ranges
= vega10_set_watermarks_for_clocks_ranges
,
5421 .display_clock_voltage_request
= vega10_display_clock_voltage_request
,
5422 .force_clock_level
= vega10_force_clock_level
,
5423 .print_clock_levels
= vega10_print_clock_levels
,
5424 .display_config_changed
= vega10_display_configuration_changed_task
,
5425 .powergate_uvd
= vega10_power_gate_uvd
,
5426 .powergate_vce
= vega10_power_gate_vce
,
5427 .check_states_equal
= vega10_check_states_equal
,
5428 .check_smc_update_required_for_display_configuration
=
5429 vega10_check_smc_update_required_for_display_configuration
,
5430 .power_off_asic
= vega10_power_off_asic
,
5431 .disable_smc_firmware_ctf
= vega10_thermal_disable_alert
,
5432 .get_sclk_od
= vega10_get_sclk_od
,
5433 .set_sclk_od
= vega10_set_sclk_od
,
5434 .get_mclk_od
= vega10_get_mclk_od
,
5435 .set_mclk_od
= vega10_set_mclk_od
,
5436 .avfs_control
= vega10_avfs_enable
,
5437 .notify_cac_buffer_info
= vega10_notify_cac_buffer_info
,
5438 .get_thermal_temperature_range
= vega10_get_thermal_temperature_range
,
5439 .register_irq_handlers
= smu9_register_irq_handlers
,
5440 .start_thermal_controller
= vega10_start_thermal_controller
,
5441 .get_power_profile_mode
= vega10_get_power_profile_mode
,
5442 .set_power_profile_mode
= vega10_set_power_profile_mode
,
5443 .set_power_limit
= vega10_set_power_limit
,
5444 .odn_edit_dpm_table
= vega10_odn_edit_dpm_table
,
5445 .get_performance_level
= vega10_get_performance_level
,
5446 .get_asic_baco_capability
= smu9_baco_get_capability
,
5447 .get_asic_baco_state
= smu9_baco_get_state
,
5448 .set_asic_baco_state
= vega10_baco_set_state
,
5449 .enable_mgpu_fan_boost
= vega10_enable_mgpu_fan_boost
,
5450 .get_ppfeature_status
= vega10_get_ppfeature_status
,
5451 .set_ppfeature_status
= vega10_set_ppfeature_status
,
5452 .set_mp1_state
= vega10_set_mp1_state
,
5453 .disable_power_features_for_compute_performance
=
5454 vega10_disable_power_features_for_compute_performance
,
5457 int vega10_hwmgr_init(struct pp_hwmgr
*hwmgr
)
5459 struct amdgpu_device
*adev
= hwmgr
->adev
;
5461 hwmgr
->hwmgr_func
= &vega10_hwmgr_funcs
;
5462 hwmgr
->pptable_func
= &vega10_pptable_funcs
;
5463 if (amdgpu_passthrough(adev
))
5464 return vega10_baco_set_cap(hwmgr
);