2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
36 static const struct amd_pm_funcs pp_dpm_funcs
;
38 static int amd_powerplay_create(struct amdgpu_device
*adev
)
40 struct pp_hwmgr
*hwmgr
;
45 hwmgr
= kzalloc(sizeof(struct pp_hwmgr
), GFP_KERNEL
);
50 hwmgr
->not_vf
= !amdgpu_sriov_vf(adev
);
51 hwmgr
->device
= amdgpu_cgs_create_device(adev
);
52 mutex_init(&hwmgr
->smu_lock
);
53 mutex_init(&hwmgr
->msg_lock
);
54 hwmgr
->chip_family
= adev
->family
;
55 hwmgr
->chip_id
= adev
->asic_type
;
56 hwmgr
->feature_mask
= adev
->pm
.pp_feature
;
57 hwmgr
->display_config
= &adev
->pm
.pm_display_cfg
;
58 adev
->powerplay
.pp_handle
= hwmgr
;
59 adev
->powerplay
.pp_funcs
= &pp_dpm_funcs
;
64 static void amd_powerplay_destroy(struct amdgpu_device
*adev
)
66 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
68 mutex_destroy(&hwmgr
->msg_lock
);
70 kfree(hwmgr
->hardcode_pp_table
);
71 hwmgr
->hardcode_pp_table
= NULL
;
77 static int pp_early_init(void *handle
)
80 struct amdgpu_device
*adev
= handle
;
82 ret
= amd_powerplay_create(adev
);
87 ret
= hwmgr_early_init(adev
->powerplay
.pp_handle
);
94 static int pp_sw_init(void *handle
)
96 struct amdgpu_device
*adev
= handle
;
97 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
100 ret
= hwmgr_sw_init(hwmgr
);
102 pr_debug("powerplay sw init %s\n", ret
? "failed" : "successfully");
107 static int pp_sw_fini(void *handle
)
109 struct amdgpu_device
*adev
= handle
;
110 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
112 hwmgr_sw_fini(hwmgr
);
114 release_firmware(adev
->pm
.fw
);
120 static int pp_hw_init(void *handle
)
123 struct amdgpu_device
*adev
= handle
;
124 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
126 ret
= hwmgr_hw_init(hwmgr
);
129 pr_err("powerplay hw init failed\n");
134 static int pp_hw_fini(void *handle
)
136 struct amdgpu_device
*adev
= handle
;
137 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
139 hwmgr_hw_fini(hwmgr
);
144 static void pp_reserve_vram_for_smu(struct amdgpu_device
*adev
)
147 void *cpu_ptr
= NULL
;
149 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
151 if (amdgpu_bo_create_kernel(adev
, adev
->pm
.smu_prv_buffer_size
,
152 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_GTT
,
153 &adev
->pm
.smu_prv_buffer
,
156 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
160 if (hwmgr
->hwmgr_func
->notify_cac_buffer_info
)
161 r
= hwmgr
->hwmgr_func
->notify_cac_buffer_info(hwmgr
,
162 lower_32_bits((unsigned long)cpu_ptr
),
163 upper_32_bits((unsigned long)cpu_ptr
),
164 lower_32_bits(gpu_addr
),
165 upper_32_bits(gpu_addr
),
166 adev
->pm
.smu_prv_buffer_size
);
169 amdgpu_bo_free_kernel(&adev
->pm
.smu_prv_buffer
, NULL
, NULL
);
170 adev
->pm
.smu_prv_buffer
= NULL
;
171 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
175 static int pp_late_init(void *handle
)
177 struct amdgpu_device
*adev
= handle
;
178 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
180 if (hwmgr
&& hwmgr
->pm_en
) {
181 mutex_lock(&hwmgr
->smu_lock
);
182 hwmgr_handle_task(hwmgr
,
183 AMD_PP_TASK_COMPLETE_INIT
, NULL
);
184 mutex_unlock(&hwmgr
->smu_lock
);
186 if (adev
->pm
.smu_prv_buffer_size
!= 0)
187 pp_reserve_vram_for_smu(adev
);
192 static void pp_late_fini(void *handle
)
194 struct amdgpu_device
*adev
= handle
;
196 if (adev
->pm
.smu_prv_buffer
)
197 amdgpu_bo_free_kernel(&adev
->pm
.smu_prv_buffer
, NULL
, NULL
);
198 amd_powerplay_destroy(adev
);
202 static bool pp_is_idle(void *handle
)
207 static int pp_wait_for_idle(void *handle
)
212 static int pp_sw_reset(void *handle
)
217 static int pp_set_powergating_state(void *handle
,
218 enum amd_powergating_state state
)
223 static int pp_suspend(void *handle
)
225 struct amdgpu_device
*adev
= handle
;
226 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
228 return hwmgr_suspend(hwmgr
);
231 static int pp_resume(void *handle
)
233 struct amdgpu_device
*adev
= handle
;
234 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
236 return hwmgr_resume(hwmgr
);
239 static int pp_set_clockgating_state(void *handle
,
240 enum amd_clockgating_state state
)
245 static const struct amd_ip_funcs pp_ip_funcs
= {
247 .early_init
= pp_early_init
,
248 .late_init
= pp_late_init
,
249 .sw_init
= pp_sw_init
,
250 .sw_fini
= pp_sw_fini
,
251 .hw_init
= pp_hw_init
,
252 .hw_fini
= pp_hw_fini
,
253 .late_fini
= pp_late_fini
,
254 .suspend
= pp_suspend
,
256 .is_idle
= pp_is_idle
,
257 .wait_for_idle
= pp_wait_for_idle
,
258 .soft_reset
= pp_sw_reset
,
259 .set_clockgating_state
= pp_set_clockgating_state
,
260 .set_powergating_state
= pp_set_powergating_state
,
263 const struct amdgpu_ip_block_version pp_smu_ip_block
=
265 .type
= AMD_IP_BLOCK_TYPE_SMC
,
269 .funcs
= &pp_ip_funcs
,
272 /* This interface only be supported On Vi,
273 * because only smu7/8 can help to load gfx/sdma fw,
274 * smu need to be enabled before load other ip's fw.
275 * so call start smu to load smu7 fw and other ip's fw
277 static int pp_dpm_load_fw(void *handle
)
279 struct pp_hwmgr
*hwmgr
= handle
;
281 if (!hwmgr
|| !hwmgr
->smumgr_funcs
|| !hwmgr
->smumgr_funcs
->start_smu
)
284 if (hwmgr
->smumgr_funcs
->start_smu(hwmgr
)) {
285 pr_err("fw load failed\n");
292 static int pp_dpm_fw_loading_complete(void *handle
)
297 static int pp_set_clockgating_by_smu(void *handle
, uint32_t msg_id
)
299 struct pp_hwmgr
*hwmgr
= handle
;
301 if (!hwmgr
|| !hwmgr
->pm_en
)
304 if (hwmgr
->hwmgr_func
->update_clock_gatings
== NULL
) {
305 pr_info_ratelimited("%s was not implemented.\n", __func__
);
309 return hwmgr
->hwmgr_func
->update_clock_gatings(hwmgr
, &msg_id
);
312 static void pp_dpm_en_umd_pstate(struct pp_hwmgr
*hwmgr
,
313 enum amd_dpm_forced_level
*level
)
315 uint32_t profile_mode_mask
= AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
|
316 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
|
317 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
|
318 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
;
320 if (!(hwmgr
->dpm_level
& profile_mode_mask
)) {
321 /* enter umd pstate, save current level, disable gfx cg*/
322 if (*level
& profile_mode_mask
) {
323 hwmgr
->saved_dpm_level
= hwmgr
->dpm_level
;
324 hwmgr
->en_umd_pstate
= true;
325 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
326 AMD_IP_BLOCK_TYPE_GFX
,
327 AMD_PG_STATE_UNGATE
);
328 amdgpu_device_ip_set_clockgating_state(hwmgr
->adev
,
329 AMD_IP_BLOCK_TYPE_GFX
,
330 AMD_CG_STATE_UNGATE
);
333 /* exit umd pstate, restore level, enable gfx cg*/
334 if (!(*level
& profile_mode_mask
)) {
335 if (*level
== AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
)
336 *level
= hwmgr
->saved_dpm_level
;
337 hwmgr
->en_umd_pstate
= false;
338 amdgpu_device_ip_set_clockgating_state(hwmgr
->adev
,
339 AMD_IP_BLOCK_TYPE_GFX
,
341 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
342 AMD_IP_BLOCK_TYPE_GFX
,
348 static int pp_dpm_force_performance_level(void *handle
,
349 enum amd_dpm_forced_level level
)
351 struct pp_hwmgr
*hwmgr
= handle
;
353 if (!hwmgr
|| !hwmgr
->pm_en
)
356 if (level
== hwmgr
->dpm_level
)
359 mutex_lock(&hwmgr
->smu_lock
);
360 pp_dpm_en_umd_pstate(hwmgr
, &level
);
361 hwmgr
->request_dpm_level
= level
;
362 hwmgr_handle_task(hwmgr
, AMD_PP_TASK_READJUST_POWER_STATE
, NULL
);
363 mutex_unlock(&hwmgr
->smu_lock
);
368 static enum amd_dpm_forced_level
pp_dpm_get_performance_level(
371 struct pp_hwmgr
*hwmgr
= handle
;
372 enum amd_dpm_forced_level level
;
374 if (!hwmgr
|| !hwmgr
->pm_en
)
377 mutex_lock(&hwmgr
->smu_lock
);
378 level
= hwmgr
->dpm_level
;
379 mutex_unlock(&hwmgr
->smu_lock
);
383 static uint32_t pp_dpm_get_sclk(void *handle
, bool low
)
385 struct pp_hwmgr
*hwmgr
= handle
;
388 if (!hwmgr
|| !hwmgr
->pm_en
)
391 if (hwmgr
->hwmgr_func
->get_sclk
== NULL
) {
392 pr_info_ratelimited("%s was not implemented.\n", __func__
);
395 mutex_lock(&hwmgr
->smu_lock
);
396 clk
= hwmgr
->hwmgr_func
->get_sclk(hwmgr
, low
);
397 mutex_unlock(&hwmgr
->smu_lock
);
401 static uint32_t pp_dpm_get_mclk(void *handle
, bool low
)
403 struct pp_hwmgr
*hwmgr
= handle
;
406 if (!hwmgr
|| !hwmgr
->pm_en
)
409 if (hwmgr
->hwmgr_func
->get_mclk
== NULL
) {
410 pr_info_ratelimited("%s was not implemented.\n", __func__
);
413 mutex_lock(&hwmgr
->smu_lock
);
414 clk
= hwmgr
->hwmgr_func
->get_mclk(hwmgr
, low
);
415 mutex_unlock(&hwmgr
->smu_lock
);
419 static void pp_dpm_powergate_vce(void *handle
, bool gate
)
421 struct pp_hwmgr
*hwmgr
= handle
;
423 if (!hwmgr
|| !hwmgr
->pm_en
)
426 if (hwmgr
->hwmgr_func
->powergate_vce
== NULL
) {
427 pr_info_ratelimited("%s was not implemented.\n", __func__
);
430 mutex_lock(&hwmgr
->smu_lock
);
431 hwmgr
->hwmgr_func
->powergate_vce(hwmgr
, gate
);
432 mutex_unlock(&hwmgr
->smu_lock
);
435 static void pp_dpm_powergate_uvd(void *handle
, bool gate
)
437 struct pp_hwmgr
*hwmgr
= handle
;
439 if (!hwmgr
|| !hwmgr
->pm_en
)
442 if (hwmgr
->hwmgr_func
->powergate_uvd
== NULL
) {
443 pr_info_ratelimited("%s was not implemented.\n", __func__
);
446 mutex_lock(&hwmgr
->smu_lock
);
447 hwmgr
->hwmgr_func
->powergate_uvd(hwmgr
, gate
);
448 mutex_unlock(&hwmgr
->smu_lock
);
451 static int pp_dpm_dispatch_tasks(void *handle
, enum amd_pp_task task_id
,
452 enum amd_pm_state_type
*user_state
)
455 struct pp_hwmgr
*hwmgr
= handle
;
457 if (!hwmgr
|| !hwmgr
->pm_en
)
460 mutex_lock(&hwmgr
->smu_lock
);
461 ret
= hwmgr_handle_task(hwmgr
, task_id
, user_state
);
462 mutex_unlock(&hwmgr
->smu_lock
);
467 static enum amd_pm_state_type
pp_dpm_get_current_power_state(void *handle
)
469 struct pp_hwmgr
*hwmgr
= handle
;
470 struct pp_power_state
*state
;
471 enum amd_pm_state_type pm_type
;
473 if (!hwmgr
|| !hwmgr
->pm_en
|| !hwmgr
->current_ps
)
476 mutex_lock(&hwmgr
->smu_lock
);
478 state
= hwmgr
->current_ps
;
480 switch (state
->classification
.ui_label
) {
481 case PP_StateUILabel_Battery
:
482 pm_type
= POWER_STATE_TYPE_BATTERY
;
484 case PP_StateUILabel_Balanced
:
485 pm_type
= POWER_STATE_TYPE_BALANCED
;
487 case PP_StateUILabel_Performance
:
488 pm_type
= POWER_STATE_TYPE_PERFORMANCE
;
491 if (state
->classification
.flags
& PP_StateClassificationFlag_Boot
)
492 pm_type
= POWER_STATE_TYPE_INTERNAL_BOOT
;
494 pm_type
= POWER_STATE_TYPE_DEFAULT
;
497 mutex_unlock(&hwmgr
->smu_lock
);
502 static void pp_dpm_set_fan_control_mode(void *handle
, uint32_t mode
)
504 struct pp_hwmgr
*hwmgr
= handle
;
506 if (!hwmgr
|| !hwmgr
->pm_en
)
509 if (hwmgr
->hwmgr_func
->set_fan_control_mode
== NULL
) {
510 pr_info_ratelimited("%s was not implemented.\n", __func__
);
513 mutex_lock(&hwmgr
->smu_lock
);
514 hwmgr
->hwmgr_func
->set_fan_control_mode(hwmgr
, mode
);
515 mutex_unlock(&hwmgr
->smu_lock
);
518 static uint32_t pp_dpm_get_fan_control_mode(void *handle
)
520 struct pp_hwmgr
*hwmgr
= handle
;
523 if (!hwmgr
|| !hwmgr
->pm_en
)
526 if (hwmgr
->hwmgr_func
->get_fan_control_mode
== NULL
) {
527 pr_info_ratelimited("%s was not implemented.\n", __func__
);
530 mutex_lock(&hwmgr
->smu_lock
);
531 mode
= hwmgr
->hwmgr_func
->get_fan_control_mode(hwmgr
);
532 mutex_unlock(&hwmgr
->smu_lock
);
536 static int pp_dpm_set_fan_speed_percent(void *handle
, uint32_t percent
)
538 struct pp_hwmgr
*hwmgr
= handle
;
541 if (!hwmgr
|| !hwmgr
->pm_en
)
544 if (hwmgr
->hwmgr_func
->set_fan_speed_percent
== NULL
) {
545 pr_info_ratelimited("%s was not implemented.\n", __func__
);
548 mutex_lock(&hwmgr
->smu_lock
);
549 ret
= hwmgr
->hwmgr_func
->set_fan_speed_percent(hwmgr
, percent
);
550 mutex_unlock(&hwmgr
->smu_lock
);
554 static int pp_dpm_get_fan_speed_percent(void *handle
, uint32_t *speed
)
556 struct pp_hwmgr
*hwmgr
= handle
;
559 if (!hwmgr
|| !hwmgr
->pm_en
)
562 if (hwmgr
->hwmgr_func
->get_fan_speed_percent
== NULL
) {
563 pr_info_ratelimited("%s was not implemented.\n", __func__
);
567 mutex_lock(&hwmgr
->smu_lock
);
568 ret
= hwmgr
->hwmgr_func
->get_fan_speed_percent(hwmgr
, speed
);
569 mutex_unlock(&hwmgr
->smu_lock
);
573 static int pp_dpm_get_fan_speed_rpm(void *handle
, uint32_t *rpm
)
575 struct pp_hwmgr
*hwmgr
= handle
;
578 if (!hwmgr
|| !hwmgr
->pm_en
)
581 if (hwmgr
->hwmgr_func
->get_fan_speed_rpm
== NULL
)
584 mutex_lock(&hwmgr
->smu_lock
);
585 ret
= hwmgr
->hwmgr_func
->get_fan_speed_rpm(hwmgr
, rpm
);
586 mutex_unlock(&hwmgr
->smu_lock
);
590 static int pp_dpm_set_fan_speed_rpm(void *handle
, uint32_t rpm
)
592 struct pp_hwmgr
*hwmgr
= handle
;
595 if (!hwmgr
|| !hwmgr
->pm_en
)
598 if (hwmgr
->hwmgr_func
->set_fan_speed_rpm
== NULL
) {
599 pr_info_ratelimited("%s was not implemented.\n", __func__
);
602 mutex_lock(&hwmgr
->smu_lock
);
603 ret
= hwmgr
->hwmgr_func
->set_fan_speed_rpm(hwmgr
, rpm
);
604 mutex_unlock(&hwmgr
->smu_lock
);
608 static int pp_dpm_get_pp_num_states(void *handle
,
609 struct pp_states_info
*data
)
611 struct pp_hwmgr
*hwmgr
= handle
;
614 memset(data
, 0, sizeof(*data
));
616 if (!hwmgr
|| !hwmgr
->pm_en
||!hwmgr
->ps
)
619 mutex_lock(&hwmgr
->smu_lock
);
621 data
->nums
= hwmgr
->num_ps
;
623 for (i
= 0; i
< hwmgr
->num_ps
; i
++) {
624 struct pp_power_state
*state
= (struct pp_power_state
*)
625 ((unsigned long)hwmgr
->ps
+ i
* hwmgr
->ps_size
);
626 switch (state
->classification
.ui_label
) {
627 case PP_StateUILabel_Battery
:
628 data
->states
[i
] = POWER_STATE_TYPE_BATTERY
;
630 case PP_StateUILabel_Balanced
:
631 data
->states
[i
] = POWER_STATE_TYPE_BALANCED
;
633 case PP_StateUILabel_Performance
:
634 data
->states
[i
] = POWER_STATE_TYPE_PERFORMANCE
;
637 if (state
->classification
.flags
& PP_StateClassificationFlag_Boot
)
638 data
->states
[i
] = POWER_STATE_TYPE_INTERNAL_BOOT
;
640 data
->states
[i
] = POWER_STATE_TYPE_DEFAULT
;
643 mutex_unlock(&hwmgr
->smu_lock
);
647 static int pp_dpm_get_pp_table(void *handle
, char **table
)
649 struct pp_hwmgr
*hwmgr
= handle
;
652 if (!hwmgr
|| !hwmgr
->pm_en
||!hwmgr
->soft_pp_table
)
655 mutex_lock(&hwmgr
->smu_lock
);
656 *table
= (char *)hwmgr
->soft_pp_table
;
657 size
= hwmgr
->soft_pp_table_size
;
658 mutex_unlock(&hwmgr
->smu_lock
);
662 static int amd_powerplay_reset(void *handle
)
664 struct pp_hwmgr
*hwmgr
= handle
;
667 ret
= hwmgr_hw_fini(hwmgr
);
671 ret
= hwmgr_hw_init(hwmgr
);
675 return hwmgr_handle_task(hwmgr
, AMD_PP_TASK_COMPLETE_INIT
, NULL
);
678 static int pp_dpm_set_pp_table(void *handle
, const char *buf
, size_t size
)
680 struct pp_hwmgr
*hwmgr
= handle
;
683 if (!hwmgr
|| !hwmgr
->pm_en
)
686 mutex_lock(&hwmgr
->smu_lock
);
687 if (!hwmgr
->hardcode_pp_table
) {
688 hwmgr
->hardcode_pp_table
= kmemdup(hwmgr
->soft_pp_table
,
689 hwmgr
->soft_pp_table_size
,
691 if (!hwmgr
->hardcode_pp_table
)
695 memcpy(hwmgr
->hardcode_pp_table
, buf
, size
);
697 hwmgr
->soft_pp_table
= hwmgr
->hardcode_pp_table
;
699 ret
= amd_powerplay_reset(handle
);
703 if (hwmgr
->hwmgr_func
->avfs_control
) {
704 ret
= hwmgr
->hwmgr_func
->avfs_control(hwmgr
, false);
708 mutex_unlock(&hwmgr
->smu_lock
);
711 mutex_unlock(&hwmgr
->smu_lock
);
715 static int pp_dpm_force_clock_level(void *handle
,
716 enum pp_clock_type type
, uint32_t mask
)
718 struct pp_hwmgr
*hwmgr
= handle
;
721 if (!hwmgr
|| !hwmgr
->pm_en
)
724 if (hwmgr
->hwmgr_func
->force_clock_level
== NULL
) {
725 pr_info_ratelimited("%s was not implemented.\n", __func__
);
729 if (hwmgr
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
) {
730 pr_debug("force clock level is for dpm manual mode only.\n");
734 mutex_lock(&hwmgr
->smu_lock
);
735 ret
= hwmgr
->hwmgr_func
->force_clock_level(hwmgr
, type
, mask
);
736 mutex_unlock(&hwmgr
->smu_lock
);
740 static int pp_dpm_print_clock_levels(void *handle
,
741 enum pp_clock_type type
, char *buf
)
743 struct pp_hwmgr
*hwmgr
= handle
;
746 if (!hwmgr
|| !hwmgr
->pm_en
)
749 if (hwmgr
->hwmgr_func
->print_clock_levels
== NULL
) {
750 pr_info_ratelimited("%s was not implemented.\n", __func__
);
753 mutex_lock(&hwmgr
->smu_lock
);
754 ret
= hwmgr
->hwmgr_func
->print_clock_levels(hwmgr
, type
, buf
);
755 mutex_unlock(&hwmgr
->smu_lock
);
759 static int pp_dpm_get_sclk_od(void *handle
)
761 struct pp_hwmgr
*hwmgr
= handle
;
764 if (!hwmgr
|| !hwmgr
->pm_en
)
767 if (hwmgr
->hwmgr_func
->get_sclk_od
== NULL
) {
768 pr_info_ratelimited("%s was not implemented.\n", __func__
);
771 mutex_lock(&hwmgr
->smu_lock
);
772 ret
= hwmgr
->hwmgr_func
->get_sclk_od(hwmgr
);
773 mutex_unlock(&hwmgr
->smu_lock
);
777 static int pp_dpm_set_sclk_od(void *handle
, uint32_t value
)
779 struct pp_hwmgr
*hwmgr
= handle
;
782 if (!hwmgr
|| !hwmgr
->pm_en
)
785 if (hwmgr
->hwmgr_func
->set_sclk_od
== NULL
) {
786 pr_info_ratelimited("%s was not implemented.\n", __func__
);
790 mutex_lock(&hwmgr
->smu_lock
);
791 ret
= hwmgr
->hwmgr_func
->set_sclk_od(hwmgr
, value
);
792 mutex_unlock(&hwmgr
->smu_lock
);
796 static int pp_dpm_get_mclk_od(void *handle
)
798 struct pp_hwmgr
*hwmgr
= handle
;
801 if (!hwmgr
|| !hwmgr
->pm_en
)
804 if (hwmgr
->hwmgr_func
->get_mclk_od
== NULL
) {
805 pr_info_ratelimited("%s was not implemented.\n", __func__
);
808 mutex_lock(&hwmgr
->smu_lock
);
809 ret
= hwmgr
->hwmgr_func
->get_mclk_od(hwmgr
);
810 mutex_unlock(&hwmgr
->smu_lock
);
814 static int pp_dpm_set_mclk_od(void *handle
, uint32_t value
)
816 struct pp_hwmgr
*hwmgr
= handle
;
819 if (!hwmgr
|| !hwmgr
->pm_en
)
822 if (hwmgr
->hwmgr_func
->set_mclk_od
== NULL
) {
823 pr_info_ratelimited("%s was not implemented.\n", __func__
);
826 mutex_lock(&hwmgr
->smu_lock
);
827 ret
= hwmgr
->hwmgr_func
->set_mclk_od(hwmgr
, value
);
828 mutex_unlock(&hwmgr
->smu_lock
);
832 static int pp_dpm_read_sensor(void *handle
, int idx
,
833 void *value
, int *size
)
835 struct pp_hwmgr
*hwmgr
= handle
;
838 if (!hwmgr
|| !hwmgr
->pm_en
|| !value
)
842 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK
:
843 *((uint32_t *)value
) = hwmgr
->pstate_sclk
;
845 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK
:
846 *((uint32_t *)value
) = hwmgr
->pstate_mclk
;
848 case AMDGPU_PP_SENSOR_MIN_FAN_RPM
:
849 *((uint32_t *)value
) = hwmgr
->thermal_controller
.fanInfo
.ulMinRPM
;
851 case AMDGPU_PP_SENSOR_MAX_FAN_RPM
:
852 *((uint32_t *)value
) = hwmgr
->thermal_controller
.fanInfo
.ulMaxRPM
;
855 mutex_lock(&hwmgr
->smu_lock
);
856 ret
= hwmgr
->hwmgr_func
->read_sensor(hwmgr
, idx
, value
, size
);
857 mutex_unlock(&hwmgr
->smu_lock
);
862 static struct amd_vce_state
*
863 pp_dpm_get_vce_clock_state(void *handle
, unsigned idx
)
865 struct pp_hwmgr
*hwmgr
= handle
;
867 if (!hwmgr
|| !hwmgr
->pm_en
)
870 if (idx
< hwmgr
->num_vce_state_tables
)
871 return &hwmgr
->vce_states
[idx
];
875 static int pp_get_power_profile_mode(void *handle
, char *buf
)
877 struct pp_hwmgr
*hwmgr
= handle
;
879 if (!hwmgr
|| !hwmgr
->pm_en
|| !buf
)
882 if (hwmgr
->hwmgr_func
->get_power_profile_mode
== NULL
) {
883 pr_info_ratelimited("%s was not implemented.\n", __func__
);
884 return snprintf(buf
, PAGE_SIZE
, "\n");
887 return hwmgr
->hwmgr_func
->get_power_profile_mode(hwmgr
, buf
);
890 static int pp_set_power_profile_mode(void *handle
, long *input
, uint32_t size
)
892 struct pp_hwmgr
*hwmgr
= handle
;
895 if (!hwmgr
|| !hwmgr
->pm_en
)
898 if (hwmgr
->hwmgr_func
->set_power_profile_mode
== NULL
) {
899 pr_info_ratelimited("%s was not implemented.\n", __func__
);
903 if (hwmgr
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
) {
904 pr_debug("power profile setting is for manual dpm mode only.\n");
908 mutex_lock(&hwmgr
->smu_lock
);
909 ret
= hwmgr
->hwmgr_func
->set_power_profile_mode(hwmgr
, input
, size
);
910 mutex_unlock(&hwmgr
->smu_lock
);
914 static int pp_set_fine_grain_clk_vol(void *handle
, uint32_t type
, long *input
, uint32_t size
)
916 struct pp_hwmgr
*hwmgr
= handle
;
918 if (!hwmgr
|| !hwmgr
->pm_en
)
921 if (hwmgr
->hwmgr_func
->set_fine_grain_clk_vol
== NULL
)
924 return hwmgr
->hwmgr_func
->set_fine_grain_clk_vol(hwmgr
, type
, input
, size
);
927 static int pp_odn_edit_dpm_table(void *handle
, uint32_t type
, long *input
, uint32_t size
)
929 struct pp_hwmgr
*hwmgr
= handle
;
931 if (!hwmgr
|| !hwmgr
->pm_en
)
934 if (hwmgr
->hwmgr_func
->odn_edit_dpm_table
== NULL
) {
935 pr_info_ratelimited("%s was not implemented.\n", __func__
);
939 return hwmgr
->hwmgr_func
->odn_edit_dpm_table(hwmgr
, type
, input
, size
);
942 static int pp_dpm_set_mp1_state(void *handle
, enum pp_mp1_state mp1_state
)
944 struct pp_hwmgr
*hwmgr
= handle
;
952 if (hwmgr
->hwmgr_func
->set_mp1_state
)
953 return hwmgr
->hwmgr_func
->set_mp1_state(hwmgr
, mp1_state
);
958 static int pp_dpm_switch_power_profile(void *handle
,
959 enum PP_SMC_POWER_PROFILE type
, bool en
)
961 struct pp_hwmgr
*hwmgr
= handle
;
965 if (!hwmgr
|| !hwmgr
->pm_en
)
968 if (hwmgr
->hwmgr_func
->set_power_profile_mode
== NULL
) {
969 pr_info_ratelimited("%s was not implemented.\n", __func__
);
973 if (!(type
< PP_SMC_POWER_PROFILE_CUSTOM
))
976 mutex_lock(&hwmgr
->smu_lock
);
979 hwmgr
->workload_mask
&= ~(1 << hwmgr
->workload_prority
[type
]);
980 index
= fls(hwmgr
->workload_mask
);
981 index
= index
> 0 && index
<= Workload_Policy_Max
? index
- 1 : 0;
982 workload
= hwmgr
->workload_setting
[index
];
984 hwmgr
->workload_mask
|= (1 << hwmgr
->workload_prority
[type
]);
985 index
= fls(hwmgr
->workload_mask
);
986 index
= index
<= Workload_Policy_Max
? index
- 1 : 0;
987 workload
= hwmgr
->workload_setting
[index
];
990 if (type
== PP_SMC_POWER_PROFILE_COMPUTE
&&
991 hwmgr
->hwmgr_func
->disable_power_features_for_compute_performance
) {
992 if (hwmgr
->hwmgr_func
->disable_power_features_for_compute_performance(hwmgr
, en
)) {
993 mutex_unlock(&hwmgr
->smu_lock
);
998 if (hwmgr
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
)
999 hwmgr
->hwmgr_func
->set_power_profile_mode(hwmgr
, &workload
, 0);
1000 mutex_unlock(&hwmgr
->smu_lock
);
1005 static int pp_set_power_limit(void *handle
, uint32_t limit
)
1007 struct pp_hwmgr
*hwmgr
= handle
;
1008 uint32_t max_power_limit
;
1010 if (!hwmgr
|| !hwmgr
->pm_en
)
1013 if (hwmgr
->hwmgr_func
->set_power_limit
== NULL
) {
1014 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1019 limit
= hwmgr
->default_power_limit
;
1021 max_power_limit
= hwmgr
->default_power_limit
;
1022 if (hwmgr
->od_enabled
) {
1023 max_power_limit
*= (100 + hwmgr
->platform_descriptor
.TDPODLimit
);
1024 max_power_limit
/= 100;
1027 if (limit
> max_power_limit
)
1030 mutex_lock(&hwmgr
->smu_lock
);
1031 hwmgr
->hwmgr_func
->set_power_limit(hwmgr
, limit
);
1032 hwmgr
->power_limit
= limit
;
1033 mutex_unlock(&hwmgr
->smu_lock
);
1037 static int pp_get_power_limit(void *handle
, uint32_t *limit
, bool default_limit
)
1039 struct pp_hwmgr
*hwmgr
= handle
;
1041 if (!hwmgr
|| !hwmgr
->pm_en
||!limit
)
1044 mutex_lock(&hwmgr
->smu_lock
);
1046 if (default_limit
) {
1047 *limit
= hwmgr
->default_power_limit
;
1048 if (hwmgr
->od_enabled
) {
1049 *limit
*= (100 + hwmgr
->platform_descriptor
.TDPODLimit
);
1054 *limit
= hwmgr
->power_limit
;
1056 mutex_unlock(&hwmgr
->smu_lock
);
1061 static int pp_display_configuration_change(void *handle
,
1062 const struct amd_pp_display_configuration
*display_config
)
1064 struct pp_hwmgr
*hwmgr
= handle
;
1066 if (!hwmgr
|| !hwmgr
->pm_en
)
1069 mutex_lock(&hwmgr
->smu_lock
);
1070 phm_store_dal_configuration_data(hwmgr
, display_config
);
1071 mutex_unlock(&hwmgr
->smu_lock
);
1075 static int pp_get_display_power_level(void *handle
,
1076 struct amd_pp_simple_clock_info
*output
)
1078 struct pp_hwmgr
*hwmgr
= handle
;
1081 if (!hwmgr
|| !hwmgr
->pm_en
||!output
)
1084 mutex_lock(&hwmgr
->smu_lock
);
1085 ret
= phm_get_dal_power_level(hwmgr
, output
);
1086 mutex_unlock(&hwmgr
->smu_lock
);
1090 static int pp_get_current_clocks(void *handle
,
1091 struct amd_pp_clock_info
*clocks
)
1093 struct amd_pp_simple_clock_info simple_clocks
= { 0 };
1094 struct pp_clock_info hw_clocks
;
1095 struct pp_hwmgr
*hwmgr
= handle
;
1098 if (!hwmgr
|| !hwmgr
->pm_en
)
1101 mutex_lock(&hwmgr
->smu_lock
);
1103 phm_get_dal_power_level(hwmgr
, &simple_clocks
);
1105 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
1106 PHM_PlatformCaps_PowerContainment
))
1107 ret
= phm_get_clock_info(hwmgr
, &hwmgr
->current_ps
->hardware
,
1108 &hw_clocks
, PHM_PerformanceLevelDesignation_PowerContainment
);
1110 ret
= phm_get_clock_info(hwmgr
, &hwmgr
->current_ps
->hardware
,
1111 &hw_clocks
, PHM_PerformanceLevelDesignation_Activity
);
1114 pr_debug("Error in phm_get_clock_info \n");
1115 mutex_unlock(&hwmgr
->smu_lock
);
1119 clocks
->min_engine_clock
= hw_clocks
.min_eng_clk
;
1120 clocks
->max_engine_clock
= hw_clocks
.max_eng_clk
;
1121 clocks
->min_memory_clock
= hw_clocks
.min_mem_clk
;
1122 clocks
->max_memory_clock
= hw_clocks
.max_mem_clk
;
1123 clocks
->min_bus_bandwidth
= hw_clocks
.min_bus_bandwidth
;
1124 clocks
->max_bus_bandwidth
= hw_clocks
.max_bus_bandwidth
;
1126 clocks
->max_engine_clock_in_sr
= hw_clocks
.max_eng_clk
;
1127 clocks
->min_engine_clock_in_sr
= hw_clocks
.min_eng_clk
;
1129 if (simple_clocks
.level
== 0)
1130 clocks
->max_clocks_state
= PP_DAL_POWERLEVEL_7
;
1132 clocks
->max_clocks_state
= simple_clocks
.level
;
1134 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr
, &hwmgr
->current_ps
->hardware
, &hw_clocks
)) {
1135 clocks
->max_engine_clock_in_sr
= hw_clocks
.max_eng_clk
;
1136 clocks
->min_engine_clock_in_sr
= hw_clocks
.min_eng_clk
;
1138 mutex_unlock(&hwmgr
->smu_lock
);
1142 static int pp_get_clock_by_type(void *handle
, enum amd_pp_clock_type type
, struct amd_pp_clocks
*clocks
)
1144 struct pp_hwmgr
*hwmgr
= handle
;
1147 if (!hwmgr
|| !hwmgr
->pm_en
)
1153 mutex_lock(&hwmgr
->smu_lock
);
1154 ret
= phm_get_clock_by_type(hwmgr
, type
, clocks
);
1155 mutex_unlock(&hwmgr
->smu_lock
);
1159 static int pp_get_clock_by_type_with_latency(void *handle
,
1160 enum amd_pp_clock_type type
,
1161 struct pp_clock_levels_with_latency
*clocks
)
1163 struct pp_hwmgr
*hwmgr
= handle
;
1166 if (!hwmgr
|| !hwmgr
->pm_en
||!clocks
)
1169 mutex_lock(&hwmgr
->smu_lock
);
1170 ret
= phm_get_clock_by_type_with_latency(hwmgr
, type
, clocks
);
1171 mutex_unlock(&hwmgr
->smu_lock
);
1175 static int pp_get_clock_by_type_with_voltage(void *handle
,
1176 enum amd_pp_clock_type type
,
1177 struct pp_clock_levels_with_voltage
*clocks
)
1179 struct pp_hwmgr
*hwmgr
= handle
;
1182 if (!hwmgr
|| !hwmgr
->pm_en
||!clocks
)
1185 mutex_lock(&hwmgr
->smu_lock
);
1187 ret
= phm_get_clock_by_type_with_voltage(hwmgr
, type
, clocks
);
1189 mutex_unlock(&hwmgr
->smu_lock
);
1193 static int pp_set_watermarks_for_clocks_ranges(void *handle
,
1196 struct pp_hwmgr
*hwmgr
= handle
;
1199 if (!hwmgr
|| !hwmgr
->pm_en
|| !clock_ranges
)
1202 mutex_lock(&hwmgr
->smu_lock
);
1203 ret
= phm_set_watermarks_for_clocks_ranges(hwmgr
,
1205 mutex_unlock(&hwmgr
->smu_lock
);
1210 static int pp_display_clock_voltage_request(void *handle
,
1211 struct pp_display_clock_request
*clock
)
1213 struct pp_hwmgr
*hwmgr
= handle
;
1216 if (!hwmgr
|| !hwmgr
->pm_en
||!clock
)
1219 mutex_lock(&hwmgr
->smu_lock
);
1220 ret
= phm_display_clock_voltage_request(hwmgr
, clock
);
1221 mutex_unlock(&hwmgr
->smu_lock
);
1226 static int pp_get_display_mode_validation_clocks(void *handle
,
1227 struct amd_pp_simple_clock_info
*clocks
)
1229 struct pp_hwmgr
*hwmgr
= handle
;
1232 if (!hwmgr
|| !hwmgr
->pm_en
||!clocks
)
1235 clocks
->level
= PP_DAL_POWERLEVEL_7
;
1237 mutex_lock(&hwmgr
->smu_lock
);
1239 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DynamicPatchPowerState
))
1240 ret
= phm_get_max_high_clocks(hwmgr
, clocks
);
1242 mutex_unlock(&hwmgr
->smu_lock
);
1246 static int pp_dpm_powergate_mmhub(void *handle
)
1248 struct pp_hwmgr
*hwmgr
= handle
;
1250 if (!hwmgr
|| !hwmgr
->pm_en
)
1253 if (hwmgr
->hwmgr_func
->powergate_mmhub
== NULL
) {
1254 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1258 return hwmgr
->hwmgr_func
->powergate_mmhub(hwmgr
);
1261 static int pp_dpm_powergate_gfx(void *handle
, bool gate
)
1263 struct pp_hwmgr
*hwmgr
= handle
;
1265 if (!hwmgr
|| !hwmgr
->pm_en
)
1268 if (hwmgr
->hwmgr_func
->powergate_gfx
== NULL
) {
1269 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1273 return hwmgr
->hwmgr_func
->powergate_gfx(hwmgr
, gate
);
1276 static void pp_dpm_powergate_acp(void *handle
, bool gate
)
1278 struct pp_hwmgr
*hwmgr
= handle
;
1280 if (!hwmgr
|| !hwmgr
->pm_en
)
1283 if (hwmgr
->hwmgr_func
->powergate_acp
== NULL
) {
1284 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1288 hwmgr
->hwmgr_func
->powergate_acp(hwmgr
, gate
);
1291 static void pp_dpm_powergate_sdma(void *handle
, bool gate
)
1293 struct pp_hwmgr
*hwmgr
= handle
;
1298 if (hwmgr
->hwmgr_func
->powergate_sdma
== NULL
) {
1299 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1303 hwmgr
->hwmgr_func
->powergate_sdma(hwmgr
, gate
);
1306 static int pp_set_powergating_by_smu(void *handle
,
1307 uint32_t block_type
, bool gate
)
1311 switch (block_type
) {
1312 case AMD_IP_BLOCK_TYPE_UVD
:
1313 case AMD_IP_BLOCK_TYPE_VCN
:
1314 pp_dpm_powergate_uvd(handle
, gate
);
1316 case AMD_IP_BLOCK_TYPE_VCE
:
1317 pp_dpm_powergate_vce(handle
, gate
);
1319 case AMD_IP_BLOCK_TYPE_GMC
:
1320 pp_dpm_powergate_mmhub(handle
);
1322 case AMD_IP_BLOCK_TYPE_GFX
:
1323 ret
= pp_dpm_powergate_gfx(handle
, gate
);
1325 case AMD_IP_BLOCK_TYPE_ACP
:
1326 pp_dpm_powergate_acp(handle
, gate
);
1328 case AMD_IP_BLOCK_TYPE_SDMA
:
1329 pp_dpm_powergate_sdma(handle
, gate
);
1337 static int pp_notify_smu_enable_pwe(void *handle
)
1339 struct pp_hwmgr
*hwmgr
= handle
;
1341 if (!hwmgr
|| !hwmgr
->pm_en
)
1344 if (hwmgr
->hwmgr_func
->smus_notify_pwe
== NULL
) {
1345 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1349 mutex_lock(&hwmgr
->smu_lock
);
1350 hwmgr
->hwmgr_func
->smus_notify_pwe(hwmgr
);
1351 mutex_unlock(&hwmgr
->smu_lock
);
1356 static int pp_enable_mgpu_fan_boost(void *handle
)
1358 struct pp_hwmgr
*hwmgr
= handle
;
1363 if (!hwmgr
->pm_en
||
1364 hwmgr
->hwmgr_func
->enable_mgpu_fan_boost
== NULL
)
1367 mutex_lock(&hwmgr
->smu_lock
);
1368 hwmgr
->hwmgr_func
->enable_mgpu_fan_boost(hwmgr
);
1369 mutex_unlock(&hwmgr
->smu_lock
);
1374 static int pp_set_min_deep_sleep_dcefclk(void *handle
, uint32_t clock
)
1376 struct pp_hwmgr
*hwmgr
= handle
;
1378 if (!hwmgr
|| !hwmgr
->pm_en
)
1381 if (hwmgr
->hwmgr_func
->set_min_deep_sleep_dcefclk
== NULL
) {
1382 pr_debug("%s was not implemented.\n", __func__
);
1386 mutex_lock(&hwmgr
->smu_lock
);
1387 hwmgr
->hwmgr_func
->set_min_deep_sleep_dcefclk(hwmgr
, clock
);
1388 mutex_unlock(&hwmgr
->smu_lock
);
1393 static int pp_set_hard_min_dcefclk_by_freq(void *handle
, uint32_t clock
)
1395 struct pp_hwmgr
*hwmgr
= handle
;
1397 if (!hwmgr
|| !hwmgr
->pm_en
)
1400 if (hwmgr
->hwmgr_func
->set_hard_min_dcefclk_by_freq
== NULL
) {
1401 pr_debug("%s was not implemented.\n", __func__
);
1405 mutex_lock(&hwmgr
->smu_lock
);
1406 hwmgr
->hwmgr_func
->set_hard_min_dcefclk_by_freq(hwmgr
, clock
);
1407 mutex_unlock(&hwmgr
->smu_lock
);
1412 static int pp_set_hard_min_fclk_by_freq(void *handle
, uint32_t clock
)
1414 struct pp_hwmgr
*hwmgr
= handle
;
1416 if (!hwmgr
|| !hwmgr
->pm_en
)
1419 if (hwmgr
->hwmgr_func
->set_hard_min_fclk_by_freq
== NULL
) {
1420 pr_debug("%s was not implemented.\n", __func__
);
1424 mutex_lock(&hwmgr
->smu_lock
);
1425 hwmgr
->hwmgr_func
->set_hard_min_fclk_by_freq(hwmgr
, clock
);
1426 mutex_unlock(&hwmgr
->smu_lock
);
1431 static int pp_set_active_display_count(void *handle
, uint32_t count
)
1433 struct pp_hwmgr
*hwmgr
= handle
;
1436 if (!hwmgr
|| !hwmgr
->pm_en
)
1439 mutex_lock(&hwmgr
->smu_lock
);
1440 ret
= phm_set_active_display_count(hwmgr
, count
);
1441 mutex_unlock(&hwmgr
->smu_lock
);
1446 static int pp_get_asic_baco_capability(void *handle
, bool *cap
)
1448 struct pp_hwmgr
*hwmgr
= handle
;
1454 if (!(hwmgr
->not_vf
&& amdgpu_dpm
) ||
1455 !hwmgr
->hwmgr_func
->get_asic_baco_capability
)
1458 mutex_lock(&hwmgr
->smu_lock
);
1459 hwmgr
->hwmgr_func
->get_asic_baco_capability(hwmgr
, cap
);
1460 mutex_unlock(&hwmgr
->smu_lock
);
1465 static int pp_get_asic_baco_state(void *handle
, int *state
)
1467 struct pp_hwmgr
*hwmgr
= handle
;
1472 if (!hwmgr
->pm_en
|| !hwmgr
->hwmgr_func
->get_asic_baco_state
)
1475 mutex_lock(&hwmgr
->smu_lock
);
1476 hwmgr
->hwmgr_func
->get_asic_baco_state(hwmgr
, (enum BACO_STATE
*)state
);
1477 mutex_unlock(&hwmgr
->smu_lock
);
1482 static int pp_set_asic_baco_state(void *handle
, int state
)
1484 struct pp_hwmgr
*hwmgr
= handle
;
1489 if (!(hwmgr
->not_vf
&& amdgpu_dpm
) ||
1490 !hwmgr
->hwmgr_func
->set_asic_baco_state
)
1493 mutex_lock(&hwmgr
->smu_lock
);
1494 hwmgr
->hwmgr_func
->set_asic_baco_state(hwmgr
, (enum BACO_STATE
)state
);
1495 mutex_unlock(&hwmgr
->smu_lock
);
1500 static int pp_get_ppfeature_status(void *handle
, char *buf
)
1502 struct pp_hwmgr
*hwmgr
= handle
;
1505 if (!hwmgr
|| !hwmgr
->pm_en
|| !buf
)
1508 if (hwmgr
->hwmgr_func
->get_ppfeature_status
== NULL
) {
1509 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1513 mutex_lock(&hwmgr
->smu_lock
);
1514 ret
= hwmgr
->hwmgr_func
->get_ppfeature_status(hwmgr
, buf
);
1515 mutex_unlock(&hwmgr
->smu_lock
);
1520 static int pp_set_ppfeature_status(void *handle
, uint64_t ppfeature_masks
)
1522 struct pp_hwmgr
*hwmgr
= handle
;
1525 if (!hwmgr
|| !hwmgr
->pm_en
)
1528 if (hwmgr
->hwmgr_func
->set_ppfeature_status
== NULL
) {
1529 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1533 mutex_lock(&hwmgr
->smu_lock
);
1534 ret
= hwmgr
->hwmgr_func
->set_ppfeature_status(hwmgr
, ppfeature_masks
);
1535 mutex_unlock(&hwmgr
->smu_lock
);
1540 static int pp_asic_reset_mode_2(void *handle
)
1542 struct pp_hwmgr
*hwmgr
= handle
;
1545 if (!hwmgr
|| !hwmgr
->pm_en
)
1548 if (hwmgr
->hwmgr_func
->asic_reset
== NULL
) {
1549 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1553 mutex_lock(&hwmgr
->smu_lock
);
1554 ret
= hwmgr
->hwmgr_func
->asic_reset(hwmgr
, SMU_ASIC_RESET_MODE_2
);
1555 mutex_unlock(&hwmgr
->smu_lock
);
1560 static int pp_smu_i2c_bus_access(void *handle
, bool acquire
)
1562 struct pp_hwmgr
*hwmgr
= handle
;
1565 if (!hwmgr
|| !hwmgr
->pm_en
)
1568 if (hwmgr
->hwmgr_func
->smu_i2c_bus_access
== NULL
) {
1569 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1573 mutex_lock(&hwmgr
->smu_lock
);
1574 ret
= hwmgr
->hwmgr_func
->smu_i2c_bus_access(hwmgr
, acquire
);
1575 mutex_unlock(&hwmgr
->smu_lock
);
1580 static int pp_set_df_cstate(void *handle
, enum pp_df_cstate state
)
1582 struct pp_hwmgr
*hwmgr
= handle
;
1587 if (!hwmgr
->pm_en
|| !hwmgr
->hwmgr_func
->set_df_cstate
)
1590 mutex_lock(&hwmgr
->smu_lock
);
1591 hwmgr
->hwmgr_func
->set_df_cstate(hwmgr
, state
);
1592 mutex_unlock(&hwmgr
->smu_lock
);
1597 static int pp_set_xgmi_pstate(void *handle
, uint32_t pstate
)
1599 struct pp_hwmgr
*hwmgr
= handle
;
1604 if (!hwmgr
->pm_en
|| !hwmgr
->hwmgr_func
->set_xgmi_pstate
)
1607 mutex_lock(&hwmgr
->smu_lock
);
1608 hwmgr
->hwmgr_func
->set_xgmi_pstate(hwmgr
, pstate
);
1609 mutex_unlock(&hwmgr
->smu_lock
);
1614 static ssize_t
pp_get_gpu_metrics(void *handle
, void **table
)
1616 struct pp_hwmgr
*hwmgr
= handle
;
1622 if (!hwmgr
->pm_en
|| !hwmgr
->hwmgr_func
->get_gpu_metrics
)
1625 mutex_lock(&hwmgr
->smu_lock
);
1626 size
= hwmgr
->hwmgr_func
->get_gpu_metrics(hwmgr
, table
);
1627 mutex_unlock(&hwmgr
->smu_lock
);
1632 static int pp_gfx_state_change_set(void *handle
, uint32_t state
)
1634 struct pp_hwmgr
*hwmgr
= handle
;
1636 if (!hwmgr
|| !hwmgr
->pm_en
)
1639 if (hwmgr
->hwmgr_func
->gfx_state_change
== NULL
) {
1640 pr_info_ratelimited("%s was not implemented.\n", __func__
);
1644 mutex_lock(&hwmgr
->smu_lock
);
1645 hwmgr
->hwmgr_func
->gfx_state_change(hwmgr
, state
);
1646 mutex_unlock(&hwmgr
->smu_lock
);
1650 static const struct amd_pm_funcs pp_dpm_funcs
= {
1651 .load_firmware
= pp_dpm_load_fw
,
1652 .wait_for_fw_loading_complete
= pp_dpm_fw_loading_complete
,
1653 .force_performance_level
= pp_dpm_force_performance_level
,
1654 .get_performance_level
= pp_dpm_get_performance_level
,
1655 .get_current_power_state
= pp_dpm_get_current_power_state
,
1656 .dispatch_tasks
= pp_dpm_dispatch_tasks
,
1657 .set_fan_control_mode
= pp_dpm_set_fan_control_mode
,
1658 .get_fan_control_mode
= pp_dpm_get_fan_control_mode
,
1659 .set_fan_speed_percent
= pp_dpm_set_fan_speed_percent
,
1660 .get_fan_speed_percent
= pp_dpm_get_fan_speed_percent
,
1661 .get_fan_speed_rpm
= pp_dpm_get_fan_speed_rpm
,
1662 .set_fan_speed_rpm
= pp_dpm_set_fan_speed_rpm
,
1663 .get_pp_num_states
= pp_dpm_get_pp_num_states
,
1664 .get_pp_table
= pp_dpm_get_pp_table
,
1665 .set_pp_table
= pp_dpm_set_pp_table
,
1666 .force_clock_level
= pp_dpm_force_clock_level
,
1667 .print_clock_levels
= pp_dpm_print_clock_levels
,
1668 .get_sclk_od
= pp_dpm_get_sclk_od
,
1669 .set_sclk_od
= pp_dpm_set_sclk_od
,
1670 .get_mclk_od
= pp_dpm_get_mclk_od
,
1671 .set_mclk_od
= pp_dpm_set_mclk_od
,
1672 .read_sensor
= pp_dpm_read_sensor
,
1673 .get_vce_clock_state
= pp_dpm_get_vce_clock_state
,
1674 .switch_power_profile
= pp_dpm_switch_power_profile
,
1675 .set_clockgating_by_smu
= pp_set_clockgating_by_smu
,
1676 .set_powergating_by_smu
= pp_set_powergating_by_smu
,
1677 .get_power_profile_mode
= pp_get_power_profile_mode
,
1678 .set_power_profile_mode
= pp_set_power_profile_mode
,
1679 .set_fine_grain_clk_vol
= pp_set_fine_grain_clk_vol
,
1680 .odn_edit_dpm_table
= pp_odn_edit_dpm_table
,
1681 .set_mp1_state
= pp_dpm_set_mp1_state
,
1682 .set_power_limit
= pp_set_power_limit
,
1683 .get_power_limit
= pp_get_power_limit
,
1685 .get_sclk
= pp_dpm_get_sclk
,
1686 .get_mclk
= pp_dpm_get_mclk
,
1687 .display_configuration_change
= pp_display_configuration_change
,
1688 .get_display_power_level
= pp_get_display_power_level
,
1689 .get_current_clocks
= pp_get_current_clocks
,
1690 .get_clock_by_type
= pp_get_clock_by_type
,
1691 .get_clock_by_type_with_latency
= pp_get_clock_by_type_with_latency
,
1692 .get_clock_by_type_with_voltage
= pp_get_clock_by_type_with_voltage
,
1693 .set_watermarks_for_clocks_ranges
= pp_set_watermarks_for_clocks_ranges
,
1694 .display_clock_voltage_request
= pp_display_clock_voltage_request
,
1695 .get_display_mode_validation_clocks
= pp_get_display_mode_validation_clocks
,
1696 .notify_smu_enable_pwe
= pp_notify_smu_enable_pwe
,
1697 .enable_mgpu_fan_boost
= pp_enable_mgpu_fan_boost
,
1698 .set_active_display_count
= pp_set_active_display_count
,
1699 .set_min_deep_sleep_dcefclk
= pp_set_min_deep_sleep_dcefclk
,
1700 .set_hard_min_dcefclk_by_freq
= pp_set_hard_min_dcefclk_by_freq
,
1701 .set_hard_min_fclk_by_freq
= pp_set_hard_min_fclk_by_freq
,
1702 .get_asic_baco_capability
= pp_get_asic_baco_capability
,
1703 .get_asic_baco_state
= pp_get_asic_baco_state
,
1704 .set_asic_baco_state
= pp_set_asic_baco_state
,
1705 .get_ppfeature_status
= pp_get_ppfeature_status
,
1706 .set_ppfeature_status
= pp_set_ppfeature_status
,
1707 .asic_reset_mode_2
= pp_asic_reset_mode_2
,
1708 .smu_i2c_bus_access
= pp_smu_i2c_bus_access
,
1709 .set_df_cstate
= pp_set_df_cstate
,
1710 .set_xgmi_pstate
= pp_set_xgmi_pstate
,
1711 .get_gpu_metrics
= pp_get_gpu_metrics
,
1712 .gfx_state_change_set
= pp_gfx_state_change_set
,