2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
36 static const struct amd_pm_funcs pp_dpm_funcs
;
38 static int amd_powerplay_create(struct amdgpu_device
*adev
)
40 struct pp_hwmgr
*hwmgr
;
45 hwmgr
= kzalloc(sizeof(struct pp_hwmgr
), GFP_KERNEL
);
50 hwmgr
->not_vf
= !amdgpu_sriov_vf(adev
);
51 hwmgr
->device
= amdgpu_cgs_create_device(adev
);
52 mutex_init(&hwmgr
->smu_lock
);
53 hwmgr
->chip_family
= adev
->family
;
54 hwmgr
->chip_id
= adev
->asic_type
;
55 hwmgr
->feature_mask
= adev
->pm
.pp_feature
;
56 hwmgr
->display_config
= &adev
->pm
.pm_display_cfg
;
57 adev
->powerplay
.pp_handle
= hwmgr
;
58 adev
->powerplay
.pp_funcs
= &pp_dpm_funcs
;
63 static void amd_powerplay_destroy(struct amdgpu_device
*adev
)
65 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
67 kfree(hwmgr
->hardcode_pp_table
);
68 hwmgr
->hardcode_pp_table
= NULL
;
74 static int pp_early_init(void *handle
)
77 struct amdgpu_device
*adev
= handle
;
79 ret
= amd_powerplay_create(adev
);
84 ret
= hwmgr_early_init(adev
->powerplay
.pp_handle
);
91 static int pp_sw_init(void *handle
)
93 struct amdgpu_device
*adev
= handle
;
94 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
97 ret
= hwmgr_sw_init(hwmgr
);
99 pr_debug("powerplay sw init %s\n", ret
? "failed" : "successfully");
104 static int pp_sw_fini(void *handle
)
106 struct amdgpu_device
*adev
= handle
;
107 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
109 hwmgr_sw_fini(hwmgr
);
111 release_firmware(adev
->pm
.fw
);
117 static int pp_hw_init(void *handle
)
120 struct amdgpu_device
*adev
= handle
;
121 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
123 ret
= hwmgr_hw_init(hwmgr
);
126 pr_err("powerplay hw init failed\n");
131 static int pp_hw_fini(void *handle
)
133 struct amdgpu_device
*adev
= handle
;
134 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
136 hwmgr_hw_fini(hwmgr
);
141 static void pp_reserve_vram_for_smu(struct amdgpu_device
*adev
)
144 void *cpu_ptr
= NULL
;
146 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
148 if (amdgpu_bo_create_kernel(adev
, adev
->pm
.smu_prv_buffer_size
,
149 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_GTT
,
150 &adev
->pm
.smu_prv_buffer
,
153 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
157 if (hwmgr
->hwmgr_func
->notify_cac_buffer_info
)
158 r
= hwmgr
->hwmgr_func
->notify_cac_buffer_info(hwmgr
,
159 lower_32_bits((unsigned long)cpu_ptr
),
160 upper_32_bits((unsigned long)cpu_ptr
),
161 lower_32_bits(gpu_addr
),
162 upper_32_bits(gpu_addr
),
163 adev
->pm
.smu_prv_buffer_size
);
166 amdgpu_bo_free_kernel(&adev
->pm
.smu_prv_buffer
, NULL
, NULL
);
167 adev
->pm
.smu_prv_buffer
= NULL
;
168 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
172 static int pp_late_init(void *handle
)
174 struct amdgpu_device
*adev
= handle
;
175 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
177 if (hwmgr
&& hwmgr
->pm_en
) {
178 mutex_lock(&hwmgr
->smu_lock
);
179 hwmgr_handle_task(hwmgr
,
180 AMD_PP_TASK_COMPLETE_INIT
, NULL
);
181 mutex_unlock(&hwmgr
->smu_lock
);
183 if (adev
->pm
.smu_prv_buffer_size
!= 0)
184 pp_reserve_vram_for_smu(adev
);
189 static void pp_late_fini(void *handle
)
191 struct amdgpu_device
*adev
= handle
;
193 if (adev
->pm
.smu_prv_buffer
)
194 amdgpu_bo_free_kernel(&adev
->pm
.smu_prv_buffer
, NULL
, NULL
);
195 amd_powerplay_destroy(adev
);
199 static bool pp_is_idle(void *handle
)
204 static int pp_wait_for_idle(void *handle
)
209 static int pp_sw_reset(void *handle
)
214 static int pp_set_powergating_state(void *handle
,
215 enum amd_powergating_state state
)
220 static int pp_suspend(void *handle
)
222 struct amdgpu_device
*adev
= handle
;
223 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
225 return hwmgr_suspend(hwmgr
);
228 static int pp_resume(void *handle
)
230 struct amdgpu_device
*adev
= handle
;
231 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
233 return hwmgr_resume(hwmgr
);
236 static int pp_set_clockgating_state(void *handle
,
237 enum amd_clockgating_state state
)
242 static const struct amd_ip_funcs pp_ip_funcs
= {
244 .early_init
= pp_early_init
,
245 .late_init
= pp_late_init
,
246 .sw_init
= pp_sw_init
,
247 .sw_fini
= pp_sw_fini
,
248 .hw_init
= pp_hw_init
,
249 .hw_fini
= pp_hw_fini
,
250 .late_fini
= pp_late_fini
,
251 .suspend
= pp_suspend
,
253 .is_idle
= pp_is_idle
,
254 .wait_for_idle
= pp_wait_for_idle
,
255 .soft_reset
= pp_sw_reset
,
256 .set_clockgating_state
= pp_set_clockgating_state
,
257 .set_powergating_state
= pp_set_powergating_state
,
260 const struct amdgpu_ip_block_version pp_smu_ip_block
=
262 .type
= AMD_IP_BLOCK_TYPE_SMC
,
266 .funcs
= &pp_ip_funcs
,
269 /* This interface only be supported On Vi,
270 * because only smu7/8 can help to load gfx/sdma fw,
271 * smu need to be enabled before load other ip's fw.
272 * so call start smu to load smu7 fw and other ip's fw
274 static int pp_dpm_load_fw(void *handle
)
276 struct pp_hwmgr
*hwmgr
= handle
;
278 if (!hwmgr
|| !hwmgr
->smumgr_funcs
|| !hwmgr
->smumgr_funcs
->start_smu
)
281 if (hwmgr
->smumgr_funcs
->start_smu(hwmgr
)) {
282 pr_err("fw load failed\n");
289 static int pp_dpm_fw_loading_complete(void *handle
)
294 static int pp_set_clockgating_by_smu(void *handle
, uint32_t msg_id
)
296 struct pp_hwmgr
*hwmgr
= handle
;
298 if (!hwmgr
|| !hwmgr
->pm_en
)
301 if (hwmgr
->hwmgr_func
->update_clock_gatings
== NULL
) {
302 pr_info_ratelimited("%s was not implemented.\n", __func__
);
306 return hwmgr
->hwmgr_func
->update_clock_gatings(hwmgr
, &msg_id
);
309 static void pp_dpm_en_umd_pstate(struct pp_hwmgr
*hwmgr
,
310 enum amd_dpm_forced_level
*level
)
312 uint32_t profile_mode_mask
= AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
|
313 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
|
314 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
|
315 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
;
317 if (!(hwmgr
->dpm_level
& profile_mode_mask
)) {
318 /* enter umd pstate, save current level, disable gfx cg*/
319 if (*level
& profile_mode_mask
) {
320 hwmgr
->saved_dpm_level
= hwmgr
->dpm_level
;
321 hwmgr
->en_umd_pstate
= true;
322 amdgpu_device_ip_set_clockgating_state(hwmgr
->adev
,
323 AMD_IP_BLOCK_TYPE_GFX
,
324 AMD_CG_STATE_UNGATE
);
325 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
326 AMD_IP_BLOCK_TYPE_GFX
,
327 AMD_PG_STATE_UNGATE
);
330 /* exit umd pstate, restore level, enable gfx cg*/
331 if (!(*level
& profile_mode_mask
)) {
332 if (*level
== AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
)
333 *level
= hwmgr
->saved_dpm_level
;
334 hwmgr
->en_umd_pstate
= false;
335 amdgpu_device_ip_set_clockgating_state(hwmgr
->adev
,
336 AMD_IP_BLOCK_TYPE_GFX
,
338 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
339 AMD_IP_BLOCK_TYPE_GFX
,
345 static int pp_dpm_force_performance_level(void *handle
,
346 enum amd_dpm_forced_level level
)
348 struct pp_hwmgr
*hwmgr
= handle
;
350 if (!hwmgr
|| !hwmgr
->pm_en
)
353 if (level
== hwmgr
->dpm_level
)
356 mutex_lock(&hwmgr
->smu_lock
);
357 pp_dpm_en_umd_pstate(hwmgr
, &level
);
358 hwmgr
->request_dpm_level
= level
;
359 hwmgr_handle_task(hwmgr
, AMD_PP_TASK_READJUST_POWER_STATE
, NULL
);
360 mutex_unlock(&hwmgr
->smu_lock
);
365 static enum amd_dpm_forced_level
pp_dpm_get_performance_level(
368 struct pp_hwmgr
*hwmgr
= handle
;
369 enum amd_dpm_forced_level level
;
371 if (!hwmgr
|| !hwmgr
->pm_en
)
374 mutex_lock(&hwmgr
->smu_lock
);
375 level
= hwmgr
->dpm_level
;
376 mutex_unlock(&hwmgr
->smu_lock
);
380 static uint32_t pp_dpm_get_sclk(void *handle
, bool low
)
382 struct pp_hwmgr
*hwmgr
= handle
;
385 if (!hwmgr
|| !hwmgr
->pm_en
)
388 if (hwmgr
->hwmgr_func
->get_sclk
== NULL
) {
389 pr_info_ratelimited("%s was not implemented.\n", __func__
);
392 mutex_lock(&hwmgr
->smu_lock
);
393 clk
= hwmgr
->hwmgr_func
->get_sclk(hwmgr
, low
);
394 mutex_unlock(&hwmgr
->smu_lock
);
398 static uint32_t pp_dpm_get_mclk(void *handle
, bool low
)
400 struct pp_hwmgr
*hwmgr
= handle
;
403 if (!hwmgr
|| !hwmgr
->pm_en
)
406 if (hwmgr
->hwmgr_func
->get_mclk
== NULL
) {
407 pr_info_ratelimited("%s was not implemented.\n", __func__
);
410 mutex_lock(&hwmgr
->smu_lock
);
411 clk
= hwmgr
->hwmgr_func
->get_mclk(hwmgr
, low
);
412 mutex_unlock(&hwmgr
->smu_lock
);
416 static void pp_dpm_powergate_vce(void *handle
, bool gate
)
418 struct pp_hwmgr
*hwmgr
= handle
;
420 if (!hwmgr
|| !hwmgr
->pm_en
)
423 if (hwmgr
->hwmgr_func
->powergate_vce
== NULL
) {
424 pr_info_ratelimited("%s was not implemented.\n", __func__
);
427 mutex_lock(&hwmgr
->smu_lock
);
428 hwmgr
->hwmgr_func
->powergate_vce(hwmgr
, gate
);
429 mutex_unlock(&hwmgr
->smu_lock
);
432 static void pp_dpm_powergate_uvd(void *handle
, bool gate
)
434 struct pp_hwmgr
*hwmgr
= handle
;
436 if (!hwmgr
|| !hwmgr
->pm_en
)
439 if (hwmgr
->hwmgr_func
->powergate_uvd
== NULL
) {
440 pr_info_ratelimited("%s was not implemented.\n", __func__
);
443 mutex_lock(&hwmgr
->smu_lock
);
444 hwmgr
->hwmgr_func
->powergate_uvd(hwmgr
, gate
);
445 mutex_unlock(&hwmgr
->smu_lock
);
448 static int pp_dpm_dispatch_tasks(void *handle
, enum amd_pp_task task_id
,
449 enum amd_pm_state_type
*user_state
)
452 struct pp_hwmgr
*hwmgr
= handle
;
454 if (!hwmgr
|| !hwmgr
->pm_en
)
457 mutex_lock(&hwmgr
->smu_lock
);
458 ret
= hwmgr_handle_task(hwmgr
, task_id
, user_state
);
459 mutex_unlock(&hwmgr
->smu_lock
);
464 static enum amd_pm_state_type
pp_dpm_get_current_power_state(void *handle
)
466 struct pp_hwmgr
*hwmgr
= handle
;
467 struct pp_power_state
*state
;
468 enum amd_pm_state_type pm_type
;
470 if (!hwmgr
|| !hwmgr
->pm_en
|| !hwmgr
->current_ps
)
473 mutex_lock(&hwmgr
->smu_lock
);
475 state
= hwmgr
->current_ps
;
477 switch (state
->classification
.ui_label
) {
478 case PP_StateUILabel_Battery
:
479 pm_type
= POWER_STATE_TYPE_BATTERY
;
481 case PP_StateUILabel_Balanced
:
482 pm_type
= POWER_STATE_TYPE_BALANCED
;
484 case PP_StateUILabel_Performance
:
485 pm_type
= POWER_STATE_TYPE_PERFORMANCE
;
488 if (state
->classification
.flags
& PP_StateClassificationFlag_Boot
)
489 pm_type
= POWER_STATE_TYPE_INTERNAL_BOOT
;
491 pm_type
= POWER_STATE_TYPE_DEFAULT
;
494 mutex_unlock(&hwmgr
->smu_lock
);
499 static void pp_dpm_set_fan_control_mode(void *handle
, uint32_t mode
)
501 struct pp_hwmgr
*hwmgr
= handle
;
503 if (!hwmgr
|| !hwmgr
->pm_en
)
506 if (hwmgr
->hwmgr_func
->set_fan_control_mode
== NULL
) {
507 pr_info_ratelimited("%s was not implemented.\n", __func__
);
510 mutex_lock(&hwmgr
->smu_lock
);
511 hwmgr
->hwmgr_func
->set_fan_control_mode(hwmgr
, mode
);
512 mutex_unlock(&hwmgr
->smu_lock
);
515 static uint32_t pp_dpm_get_fan_control_mode(void *handle
)
517 struct pp_hwmgr
*hwmgr
= handle
;
520 if (!hwmgr
|| !hwmgr
->pm_en
)
523 if (hwmgr
->hwmgr_func
->get_fan_control_mode
== NULL
) {
524 pr_info_ratelimited("%s was not implemented.\n", __func__
);
527 mutex_lock(&hwmgr
->smu_lock
);
528 mode
= hwmgr
->hwmgr_func
->get_fan_control_mode(hwmgr
);
529 mutex_unlock(&hwmgr
->smu_lock
);
533 static int pp_dpm_set_fan_speed_percent(void *handle
, uint32_t percent
)
535 struct pp_hwmgr
*hwmgr
= handle
;
538 if (!hwmgr
|| !hwmgr
->pm_en
)
541 if (hwmgr
->hwmgr_func
->set_fan_speed_percent
== NULL
) {
542 pr_info_ratelimited("%s was not implemented.\n", __func__
);
545 mutex_lock(&hwmgr
->smu_lock
);
546 ret
= hwmgr
->hwmgr_func
->set_fan_speed_percent(hwmgr
, percent
);
547 mutex_unlock(&hwmgr
->smu_lock
);
551 static int pp_dpm_get_fan_speed_percent(void *handle
, uint32_t *speed
)
553 struct pp_hwmgr
*hwmgr
= handle
;
556 if (!hwmgr
|| !hwmgr
->pm_en
)
559 if (hwmgr
->hwmgr_func
->get_fan_speed_percent
== NULL
) {
560 pr_info_ratelimited("%s was not implemented.\n", __func__
);
564 mutex_lock(&hwmgr
->smu_lock
);
565 ret
= hwmgr
->hwmgr_func
->get_fan_speed_percent(hwmgr
, speed
);
566 mutex_unlock(&hwmgr
->smu_lock
);
570 static int pp_dpm_get_fan_speed_rpm(void *handle
, uint32_t *rpm
)
572 struct pp_hwmgr
*hwmgr
= handle
;
575 if (!hwmgr
|| !hwmgr
->pm_en
)
578 if (hwmgr
->hwmgr_func
->get_fan_speed_rpm
== NULL
)
581 mutex_lock(&hwmgr
->smu_lock
);
582 ret
= hwmgr
->hwmgr_func
->get_fan_speed_rpm(hwmgr
, rpm
);
583 mutex_unlock(&hwmgr
->smu_lock
);
587 static int pp_dpm_set_fan_speed_rpm(void *handle
, uint32_t rpm
)
589 struct pp_hwmgr
*hwmgr
= handle
;
592 if (!hwmgr
|| !hwmgr
->pm_en
)
595 if (hwmgr
->hwmgr_func
->set_fan_speed_rpm
== NULL
) {
596 pr_info_ratelimited("%s was not implemented.\n", __func__
);
599 mutex_lock(&hwmgr
->smu_lock
);
600 ret
= hwmgr
->hwmgr_func
->set_fan_speed_rpm(hwmgr
, rpm
);
601 mutex_unlock(&hwmgr
->smu_lock
);
605 static int pp_dpm_get_pp_num_states(void *handle
,
606 struct pp_states_info
*data
)
608 struct pp_hwmgr
*hwmgr
= handle
;
611 memset(data
, 0, sizeof(*data
));
613 if (!hwmgr
|| !hwmgr
->pm_en
||!hwmgr
->ps
)
616 mutex_lock(&hwmgr
->smu_lock
);
618 data
->nums
= hwmgr
->num_ps
;
620 for (i
= 0; i
< hwmgr
->num_ps
; i
++) {
621 struct pp_power_state
*state
= (struct pp_power_state
*)
622 ((unsigned long)hwmgr
->ps
+ i
* hwmgr
->ps_size
);
623 switch (state
->classification
.ui_label
) {
624 case PP_StateUILabel_Battery
:
625 data
->states
[i
] = POWER_STATE_TYPE_BATTERY
;
627 case PP_StateUILabel_Balanced
:
628 data
->states
[i
] = POWER_STATE_TYPE_BALANCED
;
630 case PP_StateUILabel_Performance
:
631 data
->states
[i
] = POWER_STATE_TYPE_PERFORMANCE
;
634 if (state
->classification
.flags
& PP_StateClassificationFlag_Boot
)
635 data
->states
[i
] = POWER_STATE_TYPE_INTERNAL_BOOT
;
637 data
->states
[i
] = POWER_STATE_TYPE_DEFAULT
;
640 mutex_unlock(&hwmgr
->smu_lock
);
644 static int pp_dpm_get_pp_table(void *handle
, char **table
)
646 struct pp_hwmgr
*hwmgr
= handle
;
649 if (!hwmgr
|| !hwmgr
->pm_en
||!hwmgr
->soft_pp_table
)
652 mutex_lock(&hwmgr
->smu_lock
);
653 *table
= (char *)hwmgr
->soft_pp_table
;
654 size
= hwmgr
->soft_pp_table_size
;
655 mutex_unlock(&hwmgr
->smu_lock
);
659 static int amd_powerplay_reset(void *handle
)
661 struct pp_hwmgr
*hwmgr
= handle
;
664 ret
= hwmgr_hw_fini(hwmgr
);
668 ret
= hwmgr_hw_init(hwmgr
);
672 return hwmgr_handle_task(hwmgr
, AMD_PP_TASK_COMPLETE_INIT
, NULL
);
675 static int pp_dpm_set_pp_table(void *handle
, const char *buf
, size_t size
)
677 struct pp_hwmgr
*hwmgr
= handle
;
680 if (!hwmgr
|| !hwmgr
->pm_en
)
683 mutex_lock(&hwmgr
->smu_lock
);
684 if (!hwmgr
->hardcode_pp_table
) {
685 hwmgr
->hardcode_pp_table
= kmemdup(hwmgr
->soft_pp_table
,
686 hwmgr
->soft_pp_table_size
,
688 if (!hwmgr
->hardcode_pp_table
)
692 memcpy(hwmgr
->hardcode_pp_table
, buf
, size
);
694 hwmgr
->soft_pp_table
= hwmgr
->hardcode_pp_table
;
696 ret
= amd_powerplay_reset(handle
);
700 if (hwmgr
->hwmgr_func
->avfs_control
) {
701 ret
= hwmgr
->hwmgr_func
->avfs_control(hwmgr
, false);
705 mutex_unlock(&hwmgr
->smu_lock
);
708 mutex_unlock(&hwmgr
->smu_lock
);
712 static int pp_dpm_force_clock_level(void *handle
,
713 enum pp_clock_type type
, uint32_t mask
)
715 struct pp_hwmgr
*hwmgr
= handle
;
718 if (!hwmgr
|| !hwmgr
->pm_en
)
721 if (hwmgr
->hwmgr_func
->force_clock_level
== NULL
) {
722 pr_info_ratelimited("%s was not implemented.\n", __func__
);
726 if (hwmgr
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
) {
727 pr_debug("force clock level is for dpm manual mode only.\n");
731 mutex_lock(&hwmgr
->smu_lock
);
732 ret
= hwmgr
->hwmgr_func
->force_clock_level(hwmgr
, type
, mask
);
733 mutex_unlock(&hwmgr
->smu_lock
);
737 static int pp_dpm_print_clock_levels(void *handle
,
738 enum pp_clock_type type
, char *buf
)
740 struct pp_hwmgr
*hwmgr
= handle
;
743 if (!hwmgr
|| !hwmgr
->pm_en
)
746 if (hwmgr
->hwmgr_func
->print_clock_levels
== NULL
) {
747 pr_info_ratelimited("%s was not implemented.\n", __func__
);
750 mutex_lock(&hwmgr
->smu_lock
);
751 ret
= hwmgr
->hwmgr_func
->print_clock_levels(hwmgr
, type
, buf
);
752 mutex_unlock(&hwmgr
->smu_lock
);
756 static int pp_dpm_get_sclk_od(void *handle
)
758 struct pp_hwmgr
*hwmgr
= handle
;
761 if (!hwmgr
|| !hwmgr
->pm_en
)
764 if (hwmgr
->hwmgr_func
->get_sclk_od
== NULL
) {
765 pr_info_ratelimited("%s was not implemented.\n", __func__
);
768 mutex_lock(&hwmgr
->smu_lock
);
769 ret
= hwmgr
->hwmgr_func
->get_sclk_od(hwmgr
);
770 mutex_unlock(&hwmgr
->smu_lock
);
774 static int pp_dpm_set_sclk_od(void *handle
, uint32_t value
)
776 struct pp_hwmgr
*hwmgr
= handle
;
779 if (!hwmgr
|| !hwmgr
->pm_en
)
782 if (hwmgr
->hwmgr_func
->set_sclk_od
== NULL
) {
783 pr_info_ratelimited("%s was not implemented.\n", __func__
);
787 mutex_lock(&hwmgr
->smu_lock
);
788 ret
= hwmgr
->hwmgr_func
->set_sclk_od(hwmgr
, value
);
789 mutex_unlock(&hwmgr
->smu_lock
);
793 static int pp_dpm_get_mclk_od(void *handle
)
795 struct pp_hwmgr
*hwmgr
= handle
;
798 if (!hwmgr
|| !hwmgr
->pm_en
)
801 if (hwmgr
->hwmgr_func
->get_mclk_od
== NULL
) {
802 pr_info_ratelimited("%s was not implemented.\n", __func__
);
805 mutex_lock(&hwmgr
->smu_lock
);
806 ret
= hwmgr
->hwmgr_func
->get_mclk_od(hwmgr
);
807 mutex_unlock(&hwmgr
->smu_lock
);
811 static int pp_dpm_set_mclk_od(void *handle
, uint32_t value
)
813 struct pp_hwmgr
*hwmgr
= handle
;
816 if (!hwmgr
|| !hwmgr
->pm_en
)
819 if (hwmgr
->hwmgr_func
->set_mclk_od
== NULL
) {
820 pr_info_ratelimited("%s was not implemented.\n", __func__
);
823 mutex_lock(&hwmgr
->smu_lock
);
824 ret
= hwmgr
->hwmgr_func
->set_mclk_od(hwmgr
, value
);
825 mutex_unlock(&hwmgr
->smu_lock
);
829 static int pp_dpm_read_sensor(void *handle
, int idx
,
830 void *value
, int *size
)
832 struct pp_hwmgr
*hwmgr
= handle
;
835 if (!hwmgr
|| !hwmgr
->pm_en
|| !value
)
839 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK
:
840 *((uint32_t *)value
) = hwmgr
->pstate_sclk
;
842 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK
:
843 *((uint32_t *)value
) = hwmgr
->pstate_mclk
;
845 case AMDGPU_PP_SENSOR_MIN_FAN_RPM
:
846 *((uint32_t *)value
) = hwmgr
->thermal_controller
.fanInfo
.ulMinRPM
;
848 case AMDGPU_PP_SENSOR_MAX_FAN_RPM
:
849 *((uint32_t *)value
) = hwmgr
->thermal_controller
.fanInfo
.ulMaxRPM
;
852 mutex_lock(&hwmgr
->smu_lock
);
853 ret
= hwmgr
->hwmgr_func
->read_sensor(hwmgr
, idx
, value
, size
);
854 mutex_unlock(&hwmgr
->smu_lock
);
859 static struct amd_vce_state
*
860 pp_dpm_get_vce_clock_state(void *handle
, unsigned idx
)
862 struct pp_hwmgr
*hwmgr
= handle
;
864 if (!hwmgr
|| !hwmgr
->pm_en
)
867 if (idx
< hwmgr
->num_vce_state_tables
)
868 return &hwmgr
->vce_states
[idx
];
872 static int pp_get_power_profile_mode(void *handle
, char *buf
)
874 struct pp_hwmgr
*hwmgr
= handle
;
876 if (!hwmgr
|| !hwmgr
->pm_en
|| !buf
)
879 if (hwmgr
->hwmgr_func
->get_power_profile_mode
== NULL
) {
880 pr_info_ratelimited("%s was not implemented.\n", __func__
);
881 return snprintf(buf
, PAGE_SIZE
, "\n");
884 return hwmgr
->hwmgr_func
->get_power_profile_mode(hwmgr
, buf
);
887 static int pp_set_power_profile_mode(void *handle
, long *input
, uint32_t size
)
889 struct pp_hwmgr
*hwmgr
= handle
;
892 if (!hwmgr
|| !hwmgr
->pm_en
)
895 if (hwmgr
->hwmgr_func
->set_power_profile_mode
== NULL
) {
896 pr_info_ratelimited("%s was not implemented.\n", __func__
);
900 if (hwmgr
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
) {
901 pr_debug("power profile setting is for manual dpm mode only.\n");
905 mutex_lock(&hwmgr
->smu_lock
);
906 ret
= hwmgr
->hwmgr_func
->set_power_profile_mode(hwmgr
, input
, size
);
907 mutex_unlock(&hwmgr
->smu_lock
);
911 static int pp_odn_edit_dpm_table(void *handle
, uint32_t type
, long *input
, uint32_t size
)
913 struct pp_hwmgr
*hwmgr
= handle
;
915 if (!hwmgr
|| !hwmgr
->pm_en
)
918 if (hwmgr
->hwmgr_func
->odn_edit_dpm_table
== NULL
) {
919 pr_info_ratelimited("%s was not implemented.\n", __func__
);
923 return hwmgr
->hwmgr_func
->odn_edit_dpm_table(hwmgr
, type
, input
, size
);
926 static int pp_dpm_set_mp1_state(void *handle
, enum pp_mp1_state mp1_state
)
928 struct pp_hwmgr
*hwmgr
= handle
;
936 if (hwmgr
->hwmgr_func
->set_mp1_state
)
937 return hwmgr
->hwmgr_func
->set_mp1_state(hwmgr
, mp1_state
);
942 static int pp_dpm_switch_power_profile(void *handle
,
943 enum PP_SMC_POWER_PROFILE type
, bool en
)
945 struct pp_hwmgr
*hwmgr
= handle
;
949 if (!hwmgr
|| !hwmgr
->pm_en
)
952 if (hwmgr
->hwmgr_func
->set_power_profile_mode
== NULL
) {
953 pr_info_ratelimited("%s was not implemented.\n", __func__
);
957 if (!(type
< PP_SMC_POWER_PROFILE_CUSTOM
))
960 mutex_lock(&hwmgr
->smu_lock
);
963 hwmgr
->workload_mask
&= ~(1 << hwmgr
->workload_prority
[type
]);
964 index
= fls(hwmgr
->workload_mask
);
965 index
= index
> 0 && index
<= Workload_Policy_Max
? index
- 1 : 0;
966 workload
= hwmgr
->workload_setting
[index
];
968 hwmgr
->workload_mask
|= (1 << hwmgr
->workload_prority
[type
]);
969 index
= fls(hwmgr
->workload_mask
);
970 index
= index
<= Workload_Policy_Max
? index
- 1 : 0;
971 workload
= hwmgr
->workload_setting
[index
];
974 if (type
== PP_SMC_POWER_PROFILE_COMPUTE
&&
975 hwmgr
->hwmgr_func
->disable_power_features_for_compute_performance
) {
976 if (hwmgr
->hwmgr_func
->disable_power_features_for_compute_performance(hwmgr
, en
)) {
977 mutex_unlock(&hwmgr
->smu_lock
);
982 if (hwmgr
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
)
983 hwmgr
->hwmgr_func
->set_power_profile_mode(hwmgr
, &workload
, 0);
984 mutex_unlock(&hwmgr
->smu_lock
);
989 static int pp_set_power_limit(void *handle
, uint32_t limit
)
991 struct pp_hwmgr
*hwmgr
= handle
;
992 uint32_t max_power_limit
;
994 if (!hwmgr
|| !hwmgr
->pm_en
)
997 if (hwmgr
->hwmgr_func
->set_power_limit
== NULL
) {
998 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1003 limit
= hwmgr
->default_power_limit
;
1005 max_power_limit
= hwmgr
->default_power_limit
;
1006 if (hwmgr
->od_enabled
) {
1007 max_power_limit
*= (100 + hwmgr
->platform_descriptor
.TDPODLimit
);
1008 max_power_limit
/= 100;
1011 if (limit
> max_power_limit
)
1014 mutex_lock(&hwmgr
->smu_lock
);
1015 hwmgr
->hwmgr_func
->set_power_limit(hwmgr
, limit
);
1016 hwmgr
->power_limit
= limit
;
1017 mutex_unlock(&hwmgr
->smu_lock
);
1021 static int pp_get_power_limit(void *handle
, uint32_t *limit
, bool default_limit
)
1023 struct pp_hwmgr
*hwmgr
= handle
;
1025 if (!hwmgr
|| !hwmgr
->pm_en
||!limit
)
1028 mutex_lock(&hwmgr
->smu_lock
);
1030 if (default_limit
) {
1031 *limit
= hwmgr
->default_power_limit
;
1032 if (hwmgr
->od_enabled
) {
1033 *limit
*= (100 + hwmgr
->platform_descriptor
.TDPODLimit
);
1038 *limit
= hwmgr
->power_limit
;
1040 mutex_unlock(&hwmgr
->smu_lock
);
1045 static int pp_display_configuration_change(void *handle
,
1046 const struct amd_pp_display_configuration
*display_config
)
1048 struct pp_hwmgr
*hwmgr
= handle
;
1050 if (!hwmgr
|| !hwmgr
->pm_en
)
1053 mutex_lock(&hwmgr
->smu_lock
);
1054 phm_store_dal_configuration_data(hwmgr
, display_config
);
1055 mutex_unlock(&hwmgr
->smu_lock
);
1059 static int pp_get_display_power_level(void *handle
,
1060 struct amd_pp_simple_clock_info
*output
)
1062 struct pp_hwmgr
*hwmgr
= handle
;
1065 if (!hwmgr
|| !hwmgr
->pm_en
||!output
)
1068 mutex_lock(&hwmgr
->smu_lock
);
1069 ret
= phm_get_dal_power_level(hwmgr
, output
);
1070 mutex_unlock(&hwmgr
->smu_lock
);
1074 static int pp_get_current_clocks(void *handle
,
1075 struct amd_pp_clock_info
*clocks
)
1077 struct amd_pp_simple_clock_info simple_clocks
= { 0 };
1078 struct pp_clock_info hw_clocks
;
1079 struct pp_hwmgr
*hwmgr
= handle
;
1082 if (!hwmgr
|| !hwmgr
->pm_en
)
1085 mutex_lock(&hwmgr
->smu_lock
);
1087 phm_get_dal_power_level(hwmgr
, &simple_clocks
);
1089 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
1090 PHM_PlatformCaps_PowerContainment
))
1091 ret
= phm_get_clock_info(hwmgr
, &hwmgr
->current_ps
->hardware
,
1092 &hw_clocks
, PHM_PerformanceLevelDesignation_PowerContainment
);
1094 ret
= phm_get_clock_info(hwmgr
, &hwmgr
->current_ps
->hardware
,
1095 &hw_clocks
, PHM_PerformanceLevelDesignation_Activity
);
1098 pr_debug("Error in phm_get_clock_info \n");
1099 mutex_unlock(&hwmgr
->smu_lock
);
1103 clocks
->min_engine_clock
= hw_clocks
.min_eng_clk
;
1104 clocks
->max_engine_clock
= hw_clocks
.max_eng_clk
;
1105 clocks
->min_memory_clock
= hw_clocks
.min_mem_clk
;
1106 clocks
->max_memory_clock
= hw_clocks
.max_mem_clk
;
1107 clocks
->min_bus_bandwidth
= hw_clocks
.min_bus_bandwidth
;
1108 clocks
->max_bus_bandwidth
= hw_clocks
.max_bus_bandwidth
;
1110 clocks
->max_engine_clock_in_sr
= hw_clocks
.max_eng_clk
;
1111 clocks
->min_engine_clock_in_sr
= hw_clocks
.min_eng_clk
;
1113 if (simple_clocks
.level
== 0)
1114 clocks
->max_clocks_state
= PP_DAL_POWERLEVEL_7
;
1116 clocks
->max_clocks_state
= simple_clocks
.level
;
1118 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr
, &hwmgr
->current_ps
->hardware
, &hw_clocks
)) {
1119 clocks
->max_engine_clock_in_sr
= hw_clocks
.max_eng_clk
;
1120 clocks
->min_engine_clock_in_sr
= hw_clocks
.min_eng_clk
;
1122 mutex_unlock(&hwmgr
->smu_lock
);
1126 static int pp_get_clock_by_type(void *handle
, enum amd_pp_clock_type type
, struct amd_pp_clocks
*clocks
)
1128 struct pp_hwmgr
*hwmgr
= handle
;
1131 if (!hwmgr
|| !hwmgr
->pm_en
)
1137 mutex_lock(&hwmgr
->smu_lock
);
1138 ret
= phm_get_clock_by_type(hwmgr
, type
, clocks
);
1139 mutex_unlock(&hwmgr
->smu_lock
);
1143 static int pp_get_clock_by_type_with_latency(void *handle
,
1144 enum amd_pp_clock_type type
,
1145 struct pp_clock_levels_with_latency
*clocks
)
1147 struct pp_hwmgr
*hwmgr
= handle
;
1150 if (!hwmgr
|| !hwmgr
->pm_en
||!clocks
)
1153 mutex_lock(&hwmgr
->smu_lock
);
1154 ret
= phm_get_clock_by_type_with_latency(hwmgr
, type
, clocks
);
1155 mutex_unlock(&hwmgr
->smu_lock
);
1159 static int pp_get_clock_by_type_with_voltage(void *handle
,
1160 enum amd_pp_clock_type type
,
1161 struct pp_clock_levels_with_voltage
*clocks
)
1163 struct pp_hwmgr
*hwmgr
= handle
;
1166 if (!hwmgr
|| !hwmgr
->pm_en
||!clocks
)
1169 mutex_lock(&hwmgr
->smu_lock
);
1171 ret
= phm_get_clock_by_type_with_voltage(hwmgr
, type
, clocks
);
1173 mutex_unlock(&hwmgr
->smu_lock
);
1177 static int pp_set_watermarks_for_clocks_ranges(void *handle
,
1180 struct pp_hwmgr
*hwmgr
= handle
;
1183 if (!hwmgr
|| !hwmgr
->pm_en
|| !clock_ranges
)
1186 mutex_lock(&hwmgr
->smu_lock
);
1187 ret
= phm_set_watermarks_for_clocks_ranges(hwmgr
,
1189 mutex_unlock(&hwmgr
->smu_lock
);
1194 static int pp_display_clock_voltage_request(void *handle
,
1195 struct pp_display_clock_request
*clock
)
1197 struct pp_hwmgr
*hwmgr
= handle
;
1200 if (!hwmgr
|| !hwmgr
->pm_en
||!clock
)
1203 mutex_lock(&hwmgr
->smu_lock
);
1204 ret
= phm_display_clock_voltage_request(hwmgr
, clock
);
1205 mutex_unlock(&hwmgr
->smu_lock
);
1210 static int pp_get_display_mode_validation_clocks(void *handle
,
1211 struct amd_pp_simple_clock_info
*clocks
)
1213 struct pp_hwmgr
*hwmgr
= handle
;
1216 if (!hwmgr
|| !hwmgr
->pm_en
||!clocks
)
1219 clocks
->level
= PP_DAL_POWERLEVEL_7
;
1221 mutex_lock(&hwmgr
->smu_lock
);
1223 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DynamicPatchPowerState
))
1224 ret
= phm_get_max_high_clocks(hwmgr
, clocks
);
1226 mutex_unlock(&hwmgr
->smu_lock
);
1230 static int pp_dpm_powergate_mmhub(void *handle
)
1232 struct pp_hwmgr
*hwmgr
= handle
;
1234 if (!hwmgr
|| !hwmgr
->pm_en
)
1237 if (hwmgr
->hwmgr_func
->powergate_mmhub
== NULL
) {
1238 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1242 return hwmgr
->hwmgr_func
->powergate_mmhub(hwmgr
);
1245 static int pp_dpm_powergate_gfx(void *handle
, bool gate
)
1247 struct pp_hwmgr
*hwmgr
= handle
;
1249 if (!hwmgr
|| !hwmgr
->pm_en
)
1252 if (hwmgr
->hwmgr_func
->powergate_gfx
== NULL
) {
1253 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1257 return hwmgr
->hwmgr_func
->powergate_gfx(hwmgr
, gate
);
1260 static void pp_dpm_powergate_acp(void *handle
, bool gate
)
1262 struct pp_hwmgr
*hwmgr
= handle
;
1264 if (!hwmgr
|| !hwmgr
->pm_en
)
1267 if (hwmgr
->hwmgr_func
->powergate_acp
== NULL
) {
1268 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1272 hwmgr
->hwmgr_func
->powergate_acp(hwmgr
, gate
);
1275 static void pp_dpm_powergate_sdma(void *handle
, bool gate
)
1277 struct pp_hwmgr
*hwmgr
= handle
;
1282 if (hwmgr
->hwmgr_func
->powergate_sdma
== NULL
) {
1283 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1287 hwmgr
->hwmgr_func
->powergate_sdma(hwmgr
, gate
);
1290 static int pp_set_powergating_by_smu(void *handle
,
1291 uint32_t block_type
, bool gate
)
1295 switch (block_type
) {
1296 case AMD_IP_BLOCK_TYPE_UVD
:
1297 case AMD_IP_BLOCK_TYPE_VCN
:
1298 pp_dpm_powergate_uvd(handle
, gate
);
1300 case AMD_IP_BLOCK_TYPE_VCE
:
1301 pp_dpm_powergate_vce(handle
, gate
);
1303 case AMD_IP_BLOCK_TYPE_GMC
:
1304 pp_dpm_powergate_mmhub(handle
);
1306 case AMD_IP_BLOCK_TYPE_GFX
:
1307 ret
= pp_dpm_powergate_gfx(handle
, gate
);
1309 case AMD_IP_BLOCK_TYPE_ACP
:
1310 pp_dpm_powergate_acp(handle
, gate
);
1312 case AMD_IP_BLOCK_TYPE_SDMA
:
1313 pp_dpm_powergate_sdma(handle
, gate
);
1321 static int pp_notify_smu_enable_pwe(void *handle
)
1323 struct pp_hwmgr
*hwmgr
= handle
;
1325 if (!hwmgr
|| !hwmgr
->pm_en
)
1328 if (hwmgr
->hwmgr_func
->smus_notify_pwe
== NULL
) {
1329 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1333 mutex_lock(&hwmgr
->smu_lock
);
1334 hwmgr
->hwmgr_func
->smus_notify_pwe(hwmgr
);
1335 mutex_unlock(&hwmgr
->smu_lock
);
1340 static int pp_enable_mgpu_fan_boost(void *handle
)
1342 struct pp_hwmgr
*hwmgr
= handle
;
1347 if (!hwmgr
->pm_en
||
1348 hwmgr
->hwmgr_func
->enable_mgpu_fan_boost
== NULL
)
1351 mutex_lock(&hwmgr
->smu_lock
);
1352 hwmgr
->hwmgr_func
->enable_mgpu_fan_boost(hwmgr
);
1353 mutex_unlock(&hwmgr
->smu_lock
);
1358 static int pp_set_min_deep_sleep_dcefclk(void *handle
, uint32_t clock
)
1360 struct pp_hwmgr
*hwmgr
= handle
;
1362 if (!hwmgr
|| !hwmgr
->pm_en
)
1365 if (hwmgr
->hwmgr_func
->set_min_deep_sleep_dcefclk
== NULL
) {
1366 pr_debug("%s was not implemented.\n", __func__
);
1370 mutex_lock(&hwmgr
->smu_lock
);
1371 hwmgr
->hwmgr_func
->set_min_deep_sleep_dcefclk(hwmgr
, clock
);
1372 mutex_unlock(&hwmgr
->smu_lock
);
1377 static int pp_set_hard_min_dcefclk_by_freq(void *handle
, uint32_t clock
)
1379 struct pp_hwmgr
*hwmgr
= handle
;
1381 if (!hwmgr
|| !hwmgr
->pm_en
)
1384 if (hwmgr
->hwmgr_func
->set_hard_min_dcefclk_by_freq
== NULL
) {
1385 pr_debug("%s was not implemented.\n", __func__
);
1389 mutex_lock(&hwmgr
->smu_lock
);
1390 hwmgr
->hwmgr_func
->set_hard_min_dcefclk_by_freq(hwmgr
, clock
);
1391 mutex_unlock(&hwmgr
->smu_lock
);
1396 static int pp_set_hard_min_fclk_by_freq(void *handle
, uint32_t clock
)
1398 struct pp_hwmgr
*hwmgr
= handle
;
1400 if (!hwmgr
|| !hwmgr
->pm_en
)
1403 if (hwmgr
->hwmgr_func
->set_hard_min_fclk_by_freq
== NULL
) {
1404 pr_debug("%s was not implemented.\n", __func__
);
1408 mutex_lock(&hwmgr
->smu_lock
);
1409 hwmgr
->hwmgr_func
->set_hard_min_fclk_by_freq(hwmgr
, clock
);
1410 mutex_unlock(&hwmgr
->smu_lock
);
1415 static int pp_set_active_display_count(void *handle
, uint32_t count
)
1417 struct pp_hwmgr
*hwmgr
= handle
;
1420 if (!hwmgr
|| !hwmgr
->pm_en
)
1423 mutex_lock(&hwmgr
->smu_lock
);
1424 ret
= phm_set_active_display_count(hwmgr
, count
);
1425 mutex_unlock(&hwmgr
->smu_lock
);
1430 static int pp_get_asic_baco_capability(void *handle
, bool *cap
)
1432 struct pp_hwmgr
*hwmgr
= handle
;
1438 if (!hwmgr
->pm_en
|| !hwmgr
->hwmgr_func
->get_asic_baco_capability
)
1441 mutex_lock(&hwmgr
->smu_lock
);
1442 hwmgr
->hwmgr_func
->get_asic_baco_capability(hwmgr
, cap
);
1443 mutex_unlock(&hwmgr
->smu_lock
);
1448 static int pp_get_asic_baco_state(void *handle
, int *state
)
1450 struct pp_hwmgr
*hwmgr
= handle
;
1455 if (!hwmgr
->pm_en
|| !hwmgr
->hwmgr_func
->get_asic_baco_state
)
1458 mutex_lock(&hwmgr
->smu_lock
);
1459 hwmgr
->hwmgr_func
->get_asic_baco_state(hwmgr
, (enum BACO_STATE
*)state
);
1460 mutex_unlock(&hwmgr
->smu_lock
);
1465 static int pp_set_asic_baco_state(void *handle
, int state
)
1467 struct pp_hwmgr
*hwmgr
= handle
;
1472 if (!hwmgr
->pm_en
|| !hwmgr
->hwmgr_func
->set_asic_baco_state
)
1475 mutex_lock(&hwmgr
->smu_lock
);
1476 hwmgr
->hwmgr_func
->set_asic_baco_state(hwmgr
, (enum BACO_STATE
)state
);
1477 mutex_unlock(&hwmgr
->smu_lock
);
1482 static int pp_get_ppfeature_status(void *handle
, char *buf
)
1484 struct pp_hwmgr
*hwmgr
= handle
;
1487 if (!hwmgr
|| !hwmgr
->pm_en
|| !buf
)
1490 if (hwmgr
->hwmgr_func
->get_ppfeature_status
== NULL
) {
1491 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1495 mutex_lock(&hwmgr
->smu_lock
);
1496 ret
= hwmgr
->hwmgr_func
->get_ppfeature_status(hwmgr
, buf
);
1497 mutex_unlock(&hwmgr
->smu_lock
);
1502 static int pp_set_ppfeature_status(void *handle
, uint64_t ppfeature_masks
)
1504 struct pp_hwmgr
*hwmgr
= handle
;
1507 if (!hwmgr
|| !hwmgr
->pm_en
)
1510 if (hwmgr
->hwmgr_func
->set_ppfeature_status
== NULL
) {
1511 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1515 mutex_lock(&hwmgr
->smu_lock
);
1516 ret
= hwmgr
->hwmgr_func
->set_ppfeature_status(hwmgr
, ppfeature_masks
);
1517 mutex_unlock(&hwmgr
->smu_lock
);
1522 static int pp_asic_reset_mode_2(void *handle
)
1524 struct pp_hwmgr
*hwmgr
= handle
;
1527 if (!hwmgr
|| !hwmgr
->pm_en
)
1530 if (hwmgr
->hwmgr_func
->asic_reset
== NULL
) {
1531 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1535 mutex_lock(&hwmgr
->smu_lock
);
1536 ret
= hwmgr
->hwmgr_func
->asic_reset(hwmgr
, SMU_ASIC_RESET_MODE_2
);
1537 mutex_unlock(&hwmgr
->smu_lock
);
1542 static int pp_smu_i2c_bus_access(void *handle
, bool acquire
)
1544 struct pp_hwmgr
*hwmgr
= handle
;
1547 if (!hwmgr
|| !hwmgr
->pm_en
)
1550 if (hwmgr
->hwmgr_func
->smu_i2c_bus_access
== NULL
) {
1551 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1555 mutex_lock(&hwmgr
->smu_lock
);
1556 ret
= hwmgr
->hwmgr_func
->smu_i2c_bus_access(hwmgr
, acquire
);
1557 mutex_unlock(&hwmgr
->smu_lock
);
1562 static int pp_set_df_cstate(void *handle
, enum pp_df_cstate state
)
1564 struct pp_hwmgr
*hwmgr
= handle
;
1569 if (!hwmgr
->pm_en
|| !hwmgr
->hwmgr_func
->set_df_cstate
)
1572 mutex_lock(&hwmgr
->smu_lock
);
1573 hwmgr
->hwmgr_func
->set_df_cstate(hwmgr
, state
);
1574 mutex_unlock(&hwmgr
->smu_lock
);
1579 static int pp_set_xgmi_pstate(void *handle
, uint32_t pstate
)
1581 struct pp_hwmgr
*hwmgr
= handle
;
1586 if (!hwmgr
->pm_en
|| !hwmgr
->hwmgr_func
->set_xgmi_pstate
)
1589 mutex_lock(&hwmgr
->smu_lock
);
1590 hwmgr
->hwmgr_func
->set_xgmi_pstate(hwmgr
, pstate
);
1591 mutex_unlock(&hwmgr
->smu_lock
);
1596 static const struct amd_pm_funcs pp_dpm_funcs
= {
1597 .load_firmware
= pp_dpm_load_fw
,
1598 .wait_for_fw_loading_complete
= pp_dpm_fw_loading_complete
,
1599 .force_performance_level
= pp_dpm_force_performance_level
,
1600 .get_performance_level
= pp_dpm_get_performance_level
,
1601 .get_current_power_state
= pp_dpm_get_current_power_state
,
1602 .dispatch_tasks
= pp_dpm_dispatch_tasks
,
1603 .set_fan_control_mode
= pp_dpm_set_fan_control_mode
,
1604 .get_fan_control_mode
= pp_dpm_get_fan_control_mode
,
1605 .set_fan_speed_percent
= pp_dpm_set_fan_speed_percent
,
1606 .get_fan_speed_percent
= pp_dpm_get_fan_speed_percent
,
1607 .get_fan_speed_rpm
= pp_dpm_get_fan_speed_rpm
,
1608 .set_fan_speed_rpm
= pp_dpm_set_fan_speed_rpm
,
1609 .get_pp_num_states
= pp_dpm_get_pp_num_states
,
1610 .get_pp_table
= pp_dpm_get_pp_table
,
1611 .set_pp_table
= pp_dpm_set_pp_table
,
1612 .force_clock_level
= pp_dpm_force_clock_level
,
1613 .print_clock_levels
= pp_dpm_print_clock_levels
,
1614 .get_sclk_od
= pp_dpm_get_sclk_od
,
1615 .set_sclk_od
= pp_dpm_set_sclk_od
,
1616 .get_mclk_od
= pp_dpm_get_mclk_od
,
1617 .set_mclk_od
= pp_dpm_set_mclk_od
,
1618 .read_sensor
= pp_dpm_read_sensor
,
1619 .get_vce_clock_state
= pp_dpm_get_vce_clock_state
,
1620 .switch_power_profile
= pp_dpm_switch_power_profile
,
1621 .set_clockgating_by_smu
= pp_set_clockgating_by_smu
,
1622 .set_powergating_by_smu
= pp_set_powergating_by_smu
,
1623 .get_power_profile_mode
= pp_get_power_profile_mode
,
1624 .set_power_profile_mode
= pp_set_power_profile_mode
,
1625 .odn_edit_dpm_table
= pp_odn_edit_dpm_table
,
1626 .set_mp1_state
= pp_dpm_set_mp1_state
,
1627 .set_power_limit
= pp_set_power_limit
,
1628 .get_power_limit
= pp_get_power_limit
,
1630 .get_sclk
= pp_dpm_get_sclk
,
1631 .get_mclk
= pp_dpm_get_mclk
,
1632 .display_configuration_change
= pp_display_configuration_change
,
1633 .get_display_power_level
= pp_get_display_power_level
,
1634 .get_current_clocks
= pp_get_current_clocks
,
1635 .get_clock_by_type
= pp_get_clock_by_type
,
1636 .get_clock_by_type_with_latency
= pp_get_clock_by_type_with_latency
,
1637 .get_clock_by_type_with_voltage
= pp_get_clock_by_type_with_voltage
,
1638 .set_watermarks_for_clocks_ranges
= pp_set_watermarks_for_clocks_ranges
,
1639 .display_clock_voltage_request
= pp_display_clock_voltage_request
,
1640 .get_display_mode_validation_clocks
= pp_get_display_mode_validation_clocks
,
1641 .notify_smu_enable_pwe
= pp_notify_smu_enable_pwe
,
1642 .enable_mgpu_fan_boost
= pp_enable_mgpu_fan_boost
,
1643 .set_active_display_count
= pp_set_active_display_count
,
1644 .set_min_deep_sleep_dcefclk
= pp_set_min_deep_sleep_dcefclk
,
1645 .set_hard_min_dcefclk_by_freq
= pp_set_hard_min_dcefclk_by_freq
,
1646 .set_hard_min_fclk_by_freq
= pp_set_hard_min_fclk_by_freq
,
1647 .get_asic_baco_capability
= pp_get_asic_baco_capability
,
1648 .get_asic_baco_state
= pp_get_asic_baco_state
,
1649 .set_asic_baco_state
= pp_set_asic_baco_state
,
1650 .get_ppfeature_status
= pp_get_ppfeature_status
,
1651 .set_ppfeature_status
= pp_set_ppfeature_status
,
1652 .asic_reset_mode_2
= pp_asic_reset_mode_2
,
1653 .smu_i2c_bus_access
= pp_smu_i2c_bus_access
,
1654 .set_df_cstate
= pp_set_df_cstate
,
1655 .set_xgmi_pstate
= pp_set_xgmi_pstate
,