2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include "atom-types.h"
29 #include "processpptables.h"
30 #include "cgs_common.h"
33 #include "hardwaremanager.h"
35 #include "smu10_hwmgr.h"
36 #include "power_state.h"
37 #include "soc15_common.h"
39 #define SMU10_MAX_DEEPSLEEP_DIVIDER_ID 5
40 #define SMU10_MINIMUM_ENGINE_CLOCK 800 /* 8Mhz, the low boundary of engine clock allowed on this chip */
41 #define SCLK_MIN_DIV_INTV_SHIFT 12
42 #define SMU10_DISPCLK_BYPASS_THRESHOLD 10000 /* 100Mhz */
43 #define SMC_RAM_END 0x40000
45 #define mmPWR_MISC_CNTL_STATUS 0x0183
46 #define mmPWR_MISC_CNTL_STATUS_BASE_IDX 0
47 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN__SHIFT 0x0
48 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT 0x1
49 #define PWR_MISC_CNTL_STATUS__PWR_GFX_RLC_CGPG_EN_MASK 0x00000001L
50 #define PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK 0x00000006L
52 static const unsigned long SMU10_Magic
= (unsigned long) PHM_Rv_Magic
;
55 static int smu10_display_clock_voltage_request(struct pp_hwmgr
*hwmgr
,
56 struct pp_display_clock_request
*clock_req
)
58 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
59 enum amd_pp_clock_type clk_type
= clock_req
->clock_type
;
60 uint32_t clk_freq
= clock_req
->clock_freq_in_khz
/ 1000;
64 case amd_pp_dcf_clock
:
65 if (clk_freq
== smu10_data
->dcf_actual_hard_min_freq
)
67 msg
= PPSMC_MSG_SetHardMinDcefclkByFreq
;
68 smu10_data
->dcf_actual_hard_min_freq
= clk_freq
;
70 case amd_pp_soc_clock
:
71 msg
= PPSMC_MSG_SetHardMinSocclkByFreq
;
74 if (clk_freq
== smu10_data
->f_actual_hard_min_freq
)
76 smu10_data
->f_actual_hard_min_freq
= clk_freq
;
77 msg
= PPSMC_MSG_SetHardMinFclkByFreq
;
80 pr_info("[DisplayClockVoltageRequest]Invalid Clock Type!");
83 smum_send_msg_to_smc_with_parameter(hwmgr
, msg
, clk_freq
);
88 static struct smu10_power_state
*cast_smu10_ps(struct pp_hw_power_state
*hw_ps
)
90 if (SMU10_Magic
!= hw_ps
->magic
)
93 return (struct smu10_power_state
*)hw_ps
;
96 static const struct smu10_power_state
*cast_const_smu10_ps(
97 const struct pp_hw_power_state
*hw_ps
)
99 if (SMU10_Magic
!= hw_ps
->magic
)
102 return (struct smu10_power_state
*)hw_ps
;
105 static int smu10_initialize_dpm_defaults(struct pp_hwmgr
*hwmgr
)
107 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
109 smu10_data
->dce_slow_sclk_threshold
= 30000;
110 smu10_data
->thermal_auto_throttling_treshold
= 0;
111 smu10_data
->is_nb_dpm_enabled
= 1;
112 smu10_data
->dpm_flags
= 1;
113 smu10_data
->need_min_deep_sleep_dcefclk
= true;
114 smu10_data
->num_active_display
= 0;
115 smu10_data
->deep_sleep_dcefclk
= 0;
117 if (hwmgr
->feature_mask
& PP_GFXOFF_MASK
)
118 smu10_data
->gfx_off_controled_by_driver
= true;
120 smu10_data
->gfx_off_controled_by_driver
= false;
122 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
123 PHM_PlatformCaps_SclkDeepSleep
);
125 phm_cap_unset(hwmgr
->platform_descriptor
.platformCaps
,
126 PHM_PlatformCaps_SclkThrottleLowNotification
);
128 phm_cap_set(hwmgr
->platform_descriptor
.platformCaps
,
129 PHM_PlatformCaps_PowerPlaySupport
);
133 static int smu10_construct_max_power_limits_table(struct pp_hwmgr
*hwmgr
,
134 struct phm_clock_and_voltage_limits
*table
)
139 static int smu10_init_dynamic_state_adjustment_rule_settings(
140 struct pp_hwmgr
*hwmgr
)
142 uint32_t table_size
=
143 sizeof(struct phm_clock_voltage_dependency_table
) +
144 (7 * sizeof(struct phm_clock_voltage_dependency_record
));
146 struct phm_clock_voltage_dependency_table
*table_clk_vlt
=
147 kzalloc(table_size
, GFP_KERNEL
);
149 if (NULL
== table_clk_vlt
) {
150 pr_err("Can not allocate memory!\n");
154 table_clk_vlt
->count
= 8;
155 table_clk_vlt
->entries
[0].clk
= PP_DAL_POWERLEVEL_0
;
156 table_clk_vlt
->entries
[0].v
= 0;
157 table_clk_vlt
->entries
[1].clk
= PP_DAL_POWERLEVEL_1
;
158 table_clk_vlt
->entries
[1].v
= 1;
159 table_clk_vlt
->entries
[2].clk
= PP_DAL_POWERLEVEL_2
;
160 table_clk_vlt
->entries
[2].v
= 2;
161 table_clk_vlt
->entries
[3].clk
= PP_DAL_POWERLEVEL_3
;
162 table_clk_vlt
->entries
[3].v
= 3;
163 table_clk_vlt
->entries
[4].clk
= PP_DAL_POWERLEVEL_4
;
164 table_clk_vlt
->entries
[4].v
= 4;
165 table_clk_vlt
->entries
[5].clk
= PP_DAL_POWERLEVEL_5
;
166 table_clk_vlt
->entries
[5].v
= 5;
167 table_clk_vlt
->entries
[6].clk
= PP_DAL_POWERLEVEL_6
;
168 table_clk_vlt
->entries
[6].v
= 6;
169 table_clk_vlt
->entries
[7].clk
= PP_DAL_POWERLEVEL_7
;
170 table_clk_vlt
->entries
[7].v
= 7;
171 hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
= table_clk_vlt
;
176 static int smu10_get_system_info_data(struct pp_hwmgr
*hwmgr
)
178 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)hwmgr
->backend
;
180 smu10_data
->sys_info
.htc_hyst_lmt
= 5;
181 smu10_data
->sys_info
.htc_tmp_lmt
= 203;
183 if (smu10_data
->thermal_auto_throttling_treshold
== 0)
184 smu10_data
->thermal_auto_throttling_treshold
= 203;
186 smu10_construct_max_power_limits_table (hwmgr
,
187 &hwmgr
->dyn_state
.max_clock_voltage_on_ac
);
189 smu10_init_dynamic_state_adjustment_rule_settings(hwmgr
);
194 static int smu10_construct_boot_state(struct pp_hwmgr
*hwmgr
)
199 static int smu10_set_clock_limit(struct pp_hwmgr
*hwmgr
, const void *input
)
201 struct PP_Clocks clocks
= {0};
202 struct pp_display_clock_request clock_req
;
204 clocks
.dcefClock
= hwmgr
->display_config
->min_dcef_set_clk
;
205 clock_req
.clock_type
= amd_pp_dcf_clock
;
206 clock_req
.clock_freq_in_khz
= clocks
.dcefClock
* 10;
208 PP_ASSERT_WITH_CODE(!smu10_display_clock_voltage_request(hwmgr
, &clock_req
),
209 "Attempt to set DCF Clock Failed!", return -EINVAL
);
214 static int smu10_set_deep_sleep_dcefclk(struct pp_hwmgr
*hwmgr
, uint32_t clock
)
216 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
218 if (smu10_data
->need_min_deep_sleep_dcefclk
&& smu10_data
->deep_sleep_dcefclk
!= clock
/100) {
219 smu10_data
->deep_sleep_dcefclk
= clock
/100;
220 smum_send_msg_to_smc_with_parameter(hwmgr
,
221 PPSMC_MSG_SetMinDeepSleepDcefclk
,
222 smu10_data
->deep_sleep_dcefclk
);
227 static int smu10_set_active_display_count(struct pp_hwmgr
*hwmgr
, uint32_t count
)
229 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
231 if (smu10_data
->num_active_display
!= count
) {
232 smu10_data
->num_active_display
= count
;
233 smum_send_msg_to_smc_with_parameter(hwmgr
,
234 PPSMC_MSG_SetDisplayCount
,
235 smu10_data
->num_active_display
);
241 static int smu10_set_power_state_tasks(struct pp_hwmgr
*hwmgr
, const void *input
)
243 return smu10_set_clock_limit(hwmgr
, input
);
246 static int smu10_init_power_gate_state(struct pp_hwmgr
*hwmgr
)
248 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
249 struct amdgpu_device
*adev
= hwmgr
->adev
;
251 smu10_data
->vcn_power_gated
= true;
252 smu10_data
->isp_tileA_power_gated
= true;
253 smu10_data
->isp_tileB_power_gated
= true;
255 if (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
)
256 return smum_send_msg_to_smc_with_parameter(hwmgr
,
257 PPSMC_MSG_SetGfxCGPG
,
264 static int smu10_setup_asic_task(struct pp_hwmgr
*hwmgr
)
266 return smu10_init_power_gate_state(hwmgr
);
269 static int smu10_reset_cc6_data(struct pp_hwmgr
*hwmgr
)
271 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
273 smu10_data
->separation_time
= 0;
274 smu10_data
->cc6_disable
= false;
275 smu10_data
->pstate_disable
= false;
276 smu10_data
->cc6_setting_changed
= false;
281 static int smu10_power_off_asic(struct pp_hwmgr
*hwmgr
)
283 return smu10_reset_cc6_data(hwmgr
);
286 static bool smu10_is_gfx_on(struct pp_hwmgr
*hwmgr
)
289 struct amdgpu_device
*adev
= hwmgr
->adev
;
291 reg
= RREG32_SOC15(PWR
, 0, mmPWR_MISC_CNTL_STATUS
);
292 if ((reg
& PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS_MASK
) ==
293 (0x2 << PWR_MISC_CNTL_STATUS__PWR_GFXOFF_STATUS__SHIFT
))
299 static int smu10_disable_gfx_off(struct pp_hwmgr
*hwmgr
)
301 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
303 if (smu10_data
->gfx_off_controled_by_driver
) {
304 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_DisableGfxOff
);
306 /* confirm gfx is back to "on" state */
307 while (!smu10_is_gfx_on(hwmgr
))
314 static int smu10_disable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
319 static int smu10_enable_gfx_off(struct pp_hwmgr
*hwmgr
)
321 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
323 if (smu10_data
->gfx_off_controled_by_driver
)
324 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_EnableGfxOff
);
329 static int smu10_enable_dpm_tasks(struct pp_hwmgr
*hwmgr
)
334 static int smu10_gfx_off_control(struct pp_hwmgr
*hwmgr
, bool enable
)
337 return smu10_enable_gfx_off(hwmgr
);
339 return smu10_disable_gfx_off(hwmgr
);
342 static int smu10_apply_state_adjust_rules(struct pp_hwmgr
*hwmgr
,
343 struct pp_power_state
*prequest_ps
,
344 const struct pp_power_state
*pcurrent_ps
)
349 /* temporary hardcoded clock voltage breakdown tables */
350 static const DpmClock_t VddDcfClk
[]= {
356 static const DpmClock_t VddSocClk
[]= {
362 static const DpmClock_t VddFClk
[]= {
368 static const DpmClock_t VddDispClk
[]= {
374 static const DpmClock_t VddDppClk
[]= {
380 static const DpmClock_t VddPhyClk
[]= {
386 static int smu10_get_clock_voltage_dependency_table(struct pp_hwmgr
*hwmgr
,
387 struct smu10_voltage_dependency_table
**pptable
,
388 uint32_t num_entry
, const DpmClock_t
*pclk_dependency_table
)
390 uint32_t table_size
, i
;
391 struct smu10_voltage_dependency_table
*ptable
;
393 table_size
= sizeof(uint32_t) + sizeof(struct smu10_voltage_dependency_table
) * num_entry
;
394 ptable
= kzalloc(table_size
, GFP_KERNEL
);
399 ptable
->count
= num_entry
;
401 for (i
= 0; i
< ptable
->count
; i
++) {
402 ptable
->entries
[i
].clk
= pclk_dependency_table
->Freq
* 100;
403 ptable
->entries
[i
].vol
= pclk_dependency_table
->Vol
;
404 pclk_dependency_table
++;
413 static int smu10_populate_clock_table(struct pp_hwmgr
*hwmgr
)
417 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
418 DpmClocks_t
*table
= &(smu10_data
->clock_table
);
419 struct smu10_clock_voltage_information
*pinfo
= &(smu10_data
->clock_vol_info
);
421 result
= smum_smc_table_manager(hwmgr
, (uint8_t *)table
, SMU10_CLOCKTABLE
, true);
423 PP_ASSERT_WITH_CODE((0 == result
),
424 "Attempt to copy clock table from smc failed",
427 if (0 == result
&& table
->DcefClocks
[0].Freq
!= 0) {
428 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_dcefclk
,
429 NUM_DCEFCLK_DPM_LEVELS
,
430 &smu10_data
->clock_table
.DcefClocks
[0]);
431 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_socclk
,
432 NUM_SOCCLK_DPM_LEVELS
,
433 &smu10_data
->clock_table
.SocClocks
[0]);
434 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_fclk
,
436 &smu10_data
->clock_table
.FClocks
[0]);
437 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_mclk
,
438 NUM_MEMCLK_DPM_LEVELS
,
439 &smu10_data
->clock_table
.MemClocks
[0]);
441 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_dcefclk
,
442 ARRAY_SIZE(VddDcfClk
),
444 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_socclk
,
445 ARRAY_SIZE(VddSocClk
),
447 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_fclk
,
451 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_dispclk
,
452 ARRAY_SIZE(VddDispClk
),
454 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_dppclk
,
455 ARRAY_SIZE(VddDppClk
), &VddDppClk
[0]);
456 smu10_get_clock_voltage_dependency_table(hwmgr
, &pinfo
->vdd_dep_on_phyclk
,
457 ARRAY_SIZE(VddPhyClk
), &VddPhyClk
[0]);
459 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetMinGfxclkFrequency
);
460 result
= smum_get_argument(hwmgr
);
461 smu10_data
->gfx_min_freq_limit
= result
/ 10 * 1000;
463 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetMaxGfxclkFrequency
);
464 result
= smum_get_argument(hwmgr
);
465 smu10_data
->gfx_max_freq_limit
= result
/ 10 * 1000;
470 static int smu10_hwmgr_backend_init(struct pp_hwmgr
*hwmgr
)
473 struct smu10_hwmgr
*data
;
475 data
= kzalloc(sizeof(struct smu10_hwmgr
), GFP_KERNEL
);
479 hwmgr
->backend
= data
;
481 result
= smu10_initialize_dpm_defaults(hwmgr
);
483 pr_err("smu10_initialize_dpm_defaults failed\n");
487 smu10_populate_clock_table(hwmgr
);
489 result
= smu10_get_system_info_data(hwmgr
);
491 pr_err("smu10_get_system_info_data failed\n");
495 smu10_construct_boot_state(hwmgr
);
497 hwmgr
->platform_descriptor
.hardwareActivityPerformanceLevels
=
498 SMU10_MAX_HARDWARE_POWERLEVELS
;
500 hwmgr
->platform_descriptor
.hardwarePerformanceLevels
=
501 SMU10_MAX_HARDWARE_POWERLEVELS
;
503 hwmgr
->platform_descriptor
.vbiosInterruptId
= 0;
505 hwmgr
->platform_descriptor
.clockStep
.engineClock
= 500;
507 hwmgr
->platform_descriptor
.clockStep
.memoryClock
= 500;
509 hwmgr
->platform_descriptor
.minimumClocksReductionPercentage
= 50;
511 hwmgr
->pstate_sclk
= SMU10_UMD_PSTATE_GFXCLK
* 100;
512 hwmgr
->pstate_mclk
= SMU10_UMD_PSTATE_FCLK
* 100;
517 static int smu10_hwmgr_backend_fini(struct pp_hwmgr
*hwmgr
)
519 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
520 struct smu10_clock_voltage_information
*pinfo
= &(smu10_data
->clock_vol_info
);
522 kfree(pinfo
->vdd_dep_on_dcefclk
);
523 pinfo
->vdd_dep_on_dcefclk
= NULL
;
524 kfree(pinfo
->vdd_dep_on_socclk
);
525 pinfo
->vdd_dep_on_socclk
= NULL
;
526 kfree(pinfo
->vdd_dep_on_fclk
);
527 pinfo
->vdd_dep_on_fclk
= NULL
;
528 kfree(pinfo
->vdd_dep_on_dispclk
);
529 pinfo
->vdd_dep_on_dispclk
= NULL
;
530 kfree(pinfo
->vdd_dep_on_dppclk
);
531 pinfo
->vdd_dep_on_dppclk
= NULL
;
532 kfree(pinfo
->vdd_dep_on_phyclk
);
533 pinfo
->vdd_dep_on_phyclk
= NULL
;
535 kfree(hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
);
536 hwmgr
->dyn_state
.vddc_dep_on_dal_pwrl
= NULL
;
538 kfree(hwmgr
->backend
);
539 hwmgr
->backend
= NULL
;
544 static int smu10_dpm_force_dpm_level(struct pp_hwmgr
*hwmgr
,
545 enum amd_dpm_forced_level level
)
547 struct smu10_hwmgr
*data
= hwmgr
->backend
;
549 if (hwmgr
->smu_version
< 0x1E3700) {
550 pr_info("smu firmware version too old, can not set dpm level\n");
555 case AMD_DPM_FORCED_LEVEL_HIGH
:
556 case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
:
557 smum_send_msg_to_smc_with_parameter(hwmgr
,
558 PPSMC_MSG_SetHardMinGfxClk
,
559 data
->gfx_max_freq_limit
/100);
560 smum_send_msg_to_smc_with_parameter(hwmgr
,
561 PPSMC_MSG_SetHardMinFclkByFreq
,
562 SMU10_UMD_PSTATE_PEAK_FCLK
);
563 smum_send_msg_to_smc_with_parameter(hwmgr
,
564 PPSMC_MSG_SetHardMinSocclkByFreq
,
565 SMU10_UMD_PSTATE_PEAK_SOCCLK
);
566 smum_send_msg_to_smc_with_parameter(hwmgr
,
567 PPSMC_MSG_SetHardMinVcn
,
568 SMU10_UMD_PSTATE_VCE
);
570 smum_send_msg_to_smc_with_parameter(hwmgr
,
571 PPSMC_MSG_SetSoftMaxGfxClk
,
572 data
->gfx_max_freq_limit
/100);
573 smum_send_msg_to_smc_with_parameter(hwmgr
,
574 PPSMC_MSG_SetSoftMaxFclkByFreq
,
575 SMU10_UMD_PSTATE_PEAK_FCLK
);
576 smum_send_msg_to_smc_with_parameter(hwmgr
,
577 PPSMC_MSG_SetSoftMaxSocclkByFreq
,
578 SMU10_UMD_PSTATE_PEAK_SOCCLK
);
579 smum_send_msg_to_smc_with_parameter(hwmgr
,
580 PPSMC_MSG_SetSoftMaxVcn
,
581 SMU10_UMD_PSTATE_VCE
);
583 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
:
584 smum_send_msg_to_smc_with_parameter(hwmgr
,
585 PPSMC_MSG_SetHardMinGfxClk
,
586 data
->gfx_min_freq_limit
/100);
587 smum_send_msg_to_smc_with_parameter(hwmgr
,
588 PPSMC_MSG_SetSoftMaxGfxClk
,
589 data
->gfx_min_freq_limit
/100);
591 case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
:
592 smum_send_msg_to_smc_with_parameter(hwmgr
,
593 PPSMC_MSG_SetHardMinFclkByFreq
,
594 SMU10_UMD_PSTATE_MIN_FCLK
);
595 smum_send_msg_to_smc_with_parameter(hwmgr
,
596 PPSMC_MSG_SetSoftMaxFclkByFreq
,
597 SMU10_UMD_PSTATE_MIN_FCLK
);
599 case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
:
600 smum_send_msg_to_smc_with_parameter(hwmgr
,
601 PPSMC_MSG_SetHardMinGfxClk
,
602 SMU10_UMD_PSTATE_GFXCLK
);
603 smum_send_msg_to_smc_with_parameter(hwmgr
,
604 PPSMC_MSG_SetHardMinFclkByFreq
,
605 SMU10_UMD_PSTATE_FCLK
);
606 smum_send_msg_to_smc_with_parameter(hwmgr
,
607 PPSMC_MSG_SetHardMinSocclkByFreq
,
608 SMU10_UMD_PSTATE_SOCCLK
);
609 smum_send_msg_to_smc_with_parameter(hwmgr
,
610 PPSMC_MSG_SetHardMinVcn
,
611 SMU10_UMD_PSTATE_VCE
);
613 smum_send_msg_to_smc_with_parameter(hwmgr
,
614 PPSMC_MSG_SetSoftMaxGfxClk
,
615 SMU10_UMD_PSTATE_GFXCLK
);
616 smum_send_msg_to_smc_with_parameter(hwmgr
,
617 PPSMC_MSG_SetSoftMaxFclkByFreq
,
618 SMU10_UMD_PSTATE_FCLK
);
619 smum_send_msg_to_smc_with_parameter(hwmgr
,
620 PPSMC_MSG_SetSoftMaxSocclkByFreq
,
621 SMU10_UMD_PSTATE_SOCCLK
);
622 smum_send_msg_to_smc_with_parameter(hwmgr
,
623 PPSMC_MSG_SetSoftMaxVcn
,
624 SMU10_UMD_PSTATE_VCE
);
626 case AMD_DPM_FORCED_LEVEL_AUTO
:
627 smum_send_msg_to_smc_with_parameter(hwmgr
,
628 PPSMC_MSG_SetHardMinGfxClk
,
629 data
->gfx_min_freq_limit
/100);
630 smum_send_msg_to_smc_with_parameter(hwmgr
,
631 PPSMC_MSG_SetHardMinFclkByFreq
,
632 hwmgr
->display_config
->num_display
> 3 ?
633 SMU10_UMD_PSTATE_PEAK_FCLK
:
634 SMU10_UMD_PSTATE_MIN_FCLK
);
636 smum_send_msg_to_smc_with_parameter(hwmgr
,
637 PPSMC_MSG_SetHardMinSocclkByFreq
,
638 SMU10_UMD_PSTATE_MIN_SOCCLK
);
639 smum_send_msg_to_smc_with_parameter(hwmgr
,
640 PPSMC_MSG_SetHardMinVcn
,
641 SMU10_UMD_PSTATE_MIN_VCE
);
643 smum_send_msg_to_smc_with_parameter(hwmgr
,
644 PPSMC_MSG_SetSoftMaxGfxClk
,
645 data
->gfx_max_freq_limit
/100);
646 smum_send_msg_to_smc_with_parameter(hwmgr
,
647 PPSMC_MSG_SetSoftMaxFclkByFreq
,
648 SMU10_UMD_PSTATE_PEAK_FCLK
);
649 smum_send_msg_to_smc_with_parameter(hwmgr
,
650 PPSMC_MSG_SetSoftMaxSocclkByFreq
,
651 SMU10_UMD_PSTATE_PEAK_SOCCLK
);
652 smum_send_msg_to_smc_with_parameter(hwmgr
,
653 PPSMC_MSG_SetSoftMaxVcn
,
654 SMU10_UMD_PSTATE_VCE
);
656 case AMD_DPM_FORCED_LEVEL_LOW
:
657 smum_send_msg_to_smc_with_parameter(hwmgr
,
658 PPSMC_MSG_SetHardMinGfxClk
,
659 data
->gfx_min_freq_limit
/100);
660 smum_send_msg_to_smc_with_parameter(hwmgr
,
661 PPSMC_MSG_SetSoftMaxGfxClk
,
662 data
->gfx_min_freq_limit
/100);
663 smum_send_msg_to_smc_with_parameter(hwmgr
,
664 PPSMC_MSG_SetHardMinFclkByFreq
,
665 SMU10_UMD_PSTATE_MIN_FCLK
);
666 smum_send_msg_to_smc_with_parameter(hwmgr
,
667 PPSMC_MSG_SetSoftMaxFclkByFreq
,
668 SMU10_UMD_PSTATE_MIN_FCLK
);
670 case AMD_DPM_FORCED_LEVEL_MANUAL
:
671 case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
:
678 static uint32_t smu10_dpm_get_mclk(struct pp_hwmgr
*hwmgr
, bool low
)
680 struct smu10_hwmgr
*data
;
685 data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
688 return data
->clock_vol_info
.vdd_dep_on_fclk
->entries
[0].clk
;
690 return data
->clock_vol_info
.vdd_dep_on_fclk
->entries
[
691 data
->clock_vol_info
.vdd_dep_on_fclk
->count
- 1].clk
;
694 static uint32_t smu10_dpm_get_sclk(struct pp_hwmgr
*hwmgr
, bool low
)
696 struct smu10_hwmgr
*data
;
701 data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
704 return data
->gfx_min_freq_limit
;
706 return data
->gfx_max_freq_limit
;
709 static int smu10_dpm_patch_boot_state(struct pp_hwmgr
*hwmgr
,
710 struct pp_hw_power_state
*hw_ps
)
715 static int smu10_dpm_get_pp_table_entry_callback(
716 struct pp_hwmgr
*hwmgr
,
717 struct pp_hw_power_state
*hw_ps
,
719 const void *clock_info
)
721 struct smu10_power_state
*smu10_ps
= cast_smu10_ps(hw_ps
);
723 smu10_ps
->levels
[index
].engine_clock
= 0;
725 smu10_ps
->levels
[index
].vddc_index
= 0;
726 smu10_ps
->level
= index
+ 1;
728 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_SclkDeepSleep
)) {
729 smu10_ps
->levels
[index
].ds_divider_index
= 5;
730 smu10_ps
->levels
[index
].ss_divider_index
= 5;
736 static int smu10_dpm_get_num_of_pp_table_entries(struct pp_hwmgr
*hwmgr
)
739 unsigned long ret
= 0;
741 result
= pp_tables_get_num_of_entries(hwmgr
, &ret
);
743 return result
? 0 : ret
;
746 static int smu10_dpm_get_pp_table_entry(struct pp_hwmgr
*hwmgr
,
747 unsigned long entry
, struct pp_power_state
*ps
)
750 struct smu10_power_state
*smu10_ps
;
752 ps
->hardware
.magic
= SMU10_Magic
;
754 smu10_ps
= cast_smu10_ps(&(ps
->hardware
));
756 result
= pp_tables_get_entry(hwmgr
, entry
, ps
,
757 smu10_dpm_get_pp_table_entry_callback
);
759 smu10_ps
->uvd_clocks
.vclk
= ps
->uvd_clocks
.VCLK
;
760 smu10_ps
->uvd_clocks
.dclk
= ps
->uvd_clocks
.DCLK
;
765 static int smu10_get_power_state_size(struct pp_hwmgr
*hwmgr
)
767 return sizeof(struct smu10_power_state
);
770 static int smu10_set_cpu_power_state(struct pp_hwmgr
*hwmgr
)
776 static int smu10_store_cc6_data(struct pp_hwmgr
*hwmgr
, uint32_t separation_time
,
777 bool cc6_disable
, bool pstate_disable
, bool pstate_switch_disable
)
779 struct smu10_hwmgr
*data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
781 if (separation_time
!= data
->separation_time
||
782 cc6_disable
!= data
->cc6_disable
||
783 pstate_disable
!= data
->pstate_disable
) {
784 data
->separation_time
= separation_time
;
785 data
->cc6_disable
= cc6_disable
;
786 data
->pstate_disable
= pstate_disable
;
787 data
->cc6_setting_changed
= true;
792 static int smu10_get_dal_power_level(struct pp_hwmgr
*hwmgr
,
793 struct amd_pp_simple_clock_info
*info
)
798 static int smu10_force_clock_level(struct pp_hwmgr
*hwmgr
,
799 enum pp_clock_type type
, uint32_t mask
)
801 struct smu10_hwmgr
*data
= hwmgr
->backend
;
802 struct smu10_voltage_dependency_table
*mclk_table
=
803 data
->clock_vol_info
.vdd_dep_on_fclk
;
806 low
= mask
? (ffs(mask
) - 1) : 0;
807 high
= mask
? (fls(mask
) - 1) : 0;
811 if (low
> 2 || high
> 2) {
812 pr_info("Currently sclk only support 3 levels on RV\n");
816 smum_send_msg_to_smc_with_parameter(hwmgr
,
817 PPSMC_MSG_SetHardMinGfxClk
,
818 low
== 2 ? data
->gfx_max_freq_limit
/100 :
819 low
== 1 ? SMU10_UMD_PSTATE_GFXCLK
:
820 data
->gfx_min_freq_limit
/100);
822 smum_send_msg_to_smc_with_parameter(hwmgr
,
823 PPSMC_MSG_SetSoftMaxGfxClk
,
824 high
== 0 ? data
->gfx_min_freq_limit
/100 :
825 high
== 1 ? SMU10_UMD_PSTATE_GFXCLK
:
826 data
->gfx_max_freq_limit
/100);
830 if (low
> mclk_table
->count
- 1 || high
> mclk_table
->count
- 1)
833 smum_send_msg_to_smc_with_parameter(hwmgr
,
834 PPSMC_MSG_SetHardMinFclkByFreq
,
835 mclk_table
->entries
[low
].clk
/100);
837 smum_send_msg_to_smc_with_parameter(hwmgr
,
838 PPSMC_MSG_SetSoftMaxFclkByFreq
,
839 mclk_table
->entries
[high
].clk
/100);
849 static int smu10_print_clock_levels(struct pp_hwmgr
*hwmgr
,
850 enum pp_clock_type type
, char *buf
)
852 struct smu10_hwmgr
*data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
853 struct smu10_voltage_dependency_table
*mclk_table
=
854 data
->clock_vol_info
.vdd_dep_on_fclk
;
855 uint32_t i
, now
, size
= 0;
859 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetGfxclkFrequency
);
860 now
= smum_get_argument(hwmgr
);
862 /* driver only know min/max gfx_clk, Add level 1 for all other gfx clks */
863 if (now
== data
->gfx_max_freq_limit
/100)
865 else if (now
== data
->gfx_min_freq_limit
/100)
870 size
+= sprintf(buf
+ size
, "0: %uMhz %s\n",
871 data
->gfx_min_freq_limit
/100,
873 size
+= sprintf(buf
+ size
, "1: %uMhz %s\n",
874 i
== 1 ? now
: SMU10_UMD_PSTATE_GFXCLK
,
876 size
+= sprintf(buf
+ size
, "2: %uMhz %s\n",
877 data
->gfx_max_freq_limit
/100,
881 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetFclkFrequency
);
882 now
= smum_get_argument(hwmgr
);
884 for (i
= 0; i
< mclk_table
->count
; i
++)
885 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
887 mclk_table
->entries
[i
].clk
/ 100,
888 ((mclk_table
->entries
[i
].clk
/ 100)
898 static int smu10_get_performance_level(struct pp_hwmgr
*hwmgr
, const struct pp_hw_power_state
*state
,
899 PHM_PerformanceLevelDesignation designation
, uint32_t index
,
900 PHM_PerformanceLevel
*level
)
902 struct smu10_hwmgr
*data
;
904 if (level
== NULL
|| hwmgr
== NULL
|| state
== NULL
)
907 data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
910 level
->memory_clock
= data
->clock_vol_info
.vdd_dep_on_fclk
->entries
[0].clk
;
911 level
->coreClock
= data
->gfx_min_freq_limit
;
913 level
->memory_clock
= data
->clock_vol_info
.vdd_dep_on_fclk
->entries
[
914 data
->clock_vol_info
.vdd_dep_on_fclk
->count
- 1].clk
;
915 level
->coreClock
= data
->gfx_max_freq_limit
;
918 level
->nonLocalMemoryFreq
= 0;
919 level
->nonLocalMemoryWidth
= 0;
924 static int smu10_get_current_shallow_sleep_clocks(struct pp_hwmgr
*hwmgr
,
925 const struct pp_hw_power_state
*state
, struct pp_clock_info
*clock_info
)
927 const struct smu10_power_state
*ps
= cast_const_smu10_ps(state
);
929 clock_info
->min_eng_clk
= ps
->levels
[0].engine_clock
/ (1 << (ps
->levels
[0].ss_divider_index
));
930 clock_info
->max_eng_clk
= ps
->levels
[ps
->level
- 1].engine_clock
/ (1 << (ps
->levels
[ps
->level
- 1].ss_divider_index
));
935 #define MEM_FREQ_LOW_LATENCY 25000
936 #define MEM_FREQ_HIGH_LATENCY 80000
937 #define MEM_LATENCY_HIGH 245
938 #define MEM_LATENCY_LOW 35
939 #define MEM_LATENCY_ERR 0xFFFF
942 static uint32_t smu10_get_mem_latency(struct pp_hwmgr
*hwmgr
,
945 if (clock
>= MEM_FREQ_LOW_LATENCY
&&
946 clock
< MEM_FREQ_HIGH_LATENCY
)
947 return MEM_LATENCY_HIGH
;
948 else if (clock
>= MEM_FREQ_HIGH_LATENCY
)
949 return MEM_LATENCY_LOW
;
951 return MEM_LATENCY_ERR
;
954 static int smu10_get_clock_by_type_with_latency(struct pp_hwmgr
*hwmgr
,
955 enum amd_pp_clock_type type
,
956 struct pp_clock_levels_with_latency
*clocks
)
959 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
960 struct smu10_clock_voltage_information
*pinfo
= &(smu10_data
->clock_vol_info
);
961 struct smu10_voltage_dependency_table
*pclk_vol_table
;
962 bool latency_required
= false;
968 case amd_pp_mem_clock
:
969 pclk_vol_table
= pinfo
->vdd_dep_on_mclk
;
970 latency_required
= true;
973 pclk_vol_table
= pinfo
->vdd_dep_on_fclk
;
974 latency_required
= true;
976 case amd_pp_dcf_clock
:
977 pclk_vol_table
= pinfo
->vdd_dep_on_dcefclk
;
979 case amd_pp_disp_clock
:
980 pclk_vol_table
= pinfo
->vdd_dep_on_dispclk
;
982 case amd_pp_phy_clock
:
983 pclk_vol_table
= pinfo
->vdd_dep_on_phyclk
;
985 case amd_pp_dpp_clock
:
986 pclk_vol_table
= pinfo
->vdd_dep_on_dppclk
;
992 if (pclk_vol_table
== NULL
|| pclk_vol_table
->count
== 0)
995 clocks
->num_levels
= 0;
996 for (i
= 0; i
< pclk_vol_table
->count
; i
++) {
997 if (pclk_vol_table
->entries
[i
].clk
) {
998 clocks
->data
[clocks
->num_levels
].clocks_in_khz
=
999 pclk_vol_table
->entries
[i
].clk
* 10;
1000 clocks
->data
[clocks
->num_levels
].latency_in_us
= latency_required
?
1001 smu10_get_mem_latency(hwmgr
,
1002 pclk_vol_table
->entries
[i
].clk
) :
1004 clocks
->num_levels
++;
1011 static int smu10_get_clock_by_type_with_voltage(struct pp_hwmgr
*hwmgr
,
1012 enum amd_pp_clock_type type
,
1013 struct pp_clock_levels_with_voltage
*clocks
)
1016 struct smu10_hwmgr
*smu10_data
= (struct smu10_hwmgr
*)(hwmgr
->backend
);
1017 struct smu10_clock_voltage_information
*pinfo
= &(smu10_data
->clock_vol_info
);
1018 struct smu10_voltage_dependency_table
*pclk_vol_table
= NULL
;
1024 case amd_pp_mem_clock
:
1025 pclk_vol_table
= pinfo
->vdd_dep_on_mclk
;
1027 case amd_pp_f_clock
:
1028 pclk_vol_table
= pinfo
->vdd_dep_on_fclk
;
1030 case amd_pp_dcf_clock
:
1031 pclk_vol_table
= pinfo
->vdd_dep_on_dcefclk
;
1033 case amd_pp_soc_clock
:
1034 pclk_vol_table
= pinfo
->vdd_dep_on_socclk
;
1036 case amd_pp_disp_clock
:
1037 pclk_vol_table
= pinfo
->vdd_dep_on_dispclk
;
1039 case amd_pp_phy_clock
:
1040 pclk_vol_table
= pinfo
->vdd_dep_on_phyclk
;
1046 if (pclk_vol_table
== NULL
|| pclk_vol_table
->count
== 0)
1049 clocks
->num_levels
= 0;
1050 for (i
= 0; i
< pclk_vol_table
->count
; i
++) {
1051 if (pclk_vol_table
->entries
[i
].clk
) {
1052 clocks
->data
[clocks
->num_levels
].clocks_in_khz
= pclk_vol_table
->entries
[i
].clk
* 10;
1053 clocks
->data
[clocks
->num_levels
].voltage_in_mv
= pclk_vol_table
->entries
[i
].vol
;
1054 clocks
->num_levels
++;
1063 static int smu10_get_max_high_clocks(struct pp_hwmgr
*hwmgr
, struct amd_pp_simple_clock_info
*clocks
)
1065 clocks
->engine_max_clock
= 80000; /* driver can't get engine clock, temp hard code to 800MHz */
1069 static int smu10_thermal_get_temperature(struct pp_hwmgr
*hwmgr
)
1071 struct amdgpu_device
*adev
= hwmgr
->adev
;
1072 uint32_t reg_value
= RREG32_SOC15(THM
, 0, mmTHM_TCON_CUR_TMP
);
1074 (reg_value
& THM_TCON_CUR_TMP__CUR_TEMP_MASK
) >> THM_TCON_CUR_TMP__CUR_TEMP__SHIFT
;
1076 if (cur_temp
& THM_TCON_CUR_TMP__CUR_TEMP_RANGE_SEL_MASK
)
1077 cur_temp
= ((cur_temp
/ 8) - 49) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1079 cur_temp
= (cur_temp
/ 8) * PP_TEMPERATURE_UNITS_PER_CENTIGRADES
;
1084 static int smu10_read_sensor(struct pp_hwmgr
*hwmgr
, int idx
,
1085 void *value
, int *size
)
1087 uint32_t sclk
, mclk
;
1091 case AMDGPU_PP_SENSOR_GFX_SCLK
:
1092 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetGfxclkFrequency
);
1093 sclk
= smum_get_argument(hwmgr
);
1094 /* in units of 10KHZ */
1095 *((uint32_t *)value
) = sclk
* 100;
1098 case AMDGPU_PP_SENSOR_GFX_MCLK
:
1099 smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_GetFclkFrequency
);
1100 mclk
= smum_get_argument(hwmgr
);
1101 /* in units of 10KHZ */
1102 *((uint32_t *)value
) = mclk
* 100;
1105 case AMDGPU_PP_SENSOR_GPU_TEMP
:
1106 *((uint32_t *)value
) = smu10_thermal_get_temperature(hwmgr
);
1116 static int smu10_set_watermarks_for_clocks_ranges(struct pp_hwmgr
*hwmgr
,
1119 struct smu10_hwmgr
*data
= hwmgr
->backend
;
1120 struct dm_pp_wm_sets_with_clock_ranges_soc15
*wm_with_clock_ranges
= clock_ranges
;
1121 Watermarks_t
*table
= &(data
->water_marks_table
);
1124 smu_set_watermarks_for_clocks_ranges(table
,wm_with_clock_ranges
);
1125 smum_smc_table_manager(hwmgr
, (uint8_t *)table
, (uint16_t)SMU10_WMTABLE
, false);
1126 data
->water_marks_exist
= true;
1130 static int smu10_smus_notify_pwe(struct pp_hwmgr
*hwmgr
)
1133 return smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_SetRccPfcPmeRestoreRegister
);
1136 static int smu10_powergate_mmhub(struct pp_hwmgr
*hwmgr
)
1138 return smum_send_msg_to_smc(hwmgr
, PPSMC_MSG_PowerGateMmHub
);
1141 static void smu10_powergate_vcn(struct pp_hwmgr
*hwmgr
, bool bgate
)
1144 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
1145 AMD_IP_BLOCK_TYPE_VCN
,
1147 smum_send_msg_to_smc_with_parameter(hwmgr
,
1148 PPSMC_MSG_PowerDownVcn
, 0);
1150 smum_send_msg_to_smc_with_parameter(hwmgr
,
1151 PPSMC_MSG_PowerUpVcn
, 0);
1152 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
1153 AMD_IP_BLOCK_TYPE_VCN
,
1154 AMD_PG_STATE_UNGATE
);
1158 static const struct pp_hwmgr_func smu10_hwmgr_funcs
= {
1159 .backend_init
= smu10_hwmgr_backend_init
,
1160 .backend_fini
= smu10_hwmgr_backend_fini
,
1162 .apply_state_adjust_rules
= smu10_apply_state_adjust_rules
,
1163 .force_dpm_level
= smu10_dpm_force_dpm_level
,
1164 .get_power_state_size
= smu10_get_power_state_size
,
1165 .powerdown_uvd
= NULL
,
1166 .powergate_uvd
= smu10_powergate_vcn
,
1167 .powergate_vce
= NULL
,
1168 .get_mclk
= smu10_dpm_get_mclk
,
1169 .get_sclk
= smu10_dpm_get_sclk
,
1170 .patch_boot_state
= smu10_dpm_patch_boot_state
,
1171 .get_pp_table_entry
= smu10_dpm_get_pp_table_entry
,
1172 .get_num_of_pp_table_entries
= smu10_dpm_get_num_of_pp_table_entries
,
1173 .set_cpu_power_state
= smu10_set_cpu_power_state
,
1174 .store_cc6_data
= smu10_store_cc6_data
,
1175 .force_clock_level
= smu10_force_clock_level
,
1176 .print_clock_levels
= smu10_print_clock_levels
,
1177 .get_dal_power_level
= smu10_get_dal_power_level
,
1178 .get_performance_level
= smu10_get_performance_level
,
1179 .get_current_shallow_sleep_clocks
= smu10_get_current_shallow_sleep_clocks
,
1180 .get_clock_by_type_with_latency
= smu10_get_clock_by_type_with_latency
,
1181 .get_clock_by_type_with_voltage
= smu10_get_clock_by_type_with_voltage
,
1182 .set_watermarks_for_clocks_ranges
= smu10_set_watermarks_for_clocks_ranges
,
1183 .get_max_high_clocks
= smu10_get_max_high_clocks
,
1184 .read_sensor
= smu10_read_sensor
,
1185 .set_active_display_count
= smu10_set_active_display_count
,
1186 .set_deep_sleep_dcefclk
= smu10_set_deep_sleep_dcefclk
,
1187 .dynamic_state_management_enable
= smu10_enable_dpm_tasks
,
1188 .power_off_asic
= smu10_power_off_asic
,
1189 .asic_setup
= smu10_setup_asic_task
,
1190 .power_state_set
= smu10_set_power_state_tasks
,
1191 .dynamic_state_management_disable
= smu10_disable_dpm_tasks
,
1192 .powergate_mmhub
= smu10_powergate_mmhub
,
1193 .smus_notify_pwe
= smu10_smus_notify_pwe
,
1194 .gfx_off_control
= smu10_gfx_off_control
,
1195 .display_clock_voltage_request
= smu10_display_clock_voltage_request
,
1196 .powergate_gfx
= smu10_gfx_off_control
,
1199 int smu10_init_function_pointers(struct pp_hwmgr
*hwmgr
)
1201 hwmgr
->hwmgr_func
= &smu10_hwmgr_funcs
;
1202 hwmgr
->pptable_func
= &pptable_funcs
;