2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
36 static const struct amd_pm_funcs pp_dpm_funcs
;
38 static int amd_powerplay_create(struct amdgpu_device
*adev
)
40 struct pp_hwmgr
*hwmgr
;
45 hwmgr
= kzalloc(sizeof(struct pp_hwmgr
), GFP_KERNEL
);
50 hwmgr
->not_vf
= !amdgpu_sriov_vf(adev
);
51 hwmgr
->pm_en
= (amdgpu_dpm
&& hwmgr
->not_vf
) ? true : false;
52 hwmgr
->device
= amdgpu_cgs_create_device(adev
);
53 mutex_init(&hwmgr
->smu_lock
);
54 hwmgr
->chip_family
= adev
->family
;
55 hwmgr
->chip_id
= adev
->asic_type
;
56 hwmgr
->feature_mask
= adev
->powerplay
.pp_feature
;
57 hwmgr
->display_config
= &adev
->pm
.pm_display_cfg
;
58 adev
->powerplay
.pp_handle
= hwmgr
;
59 adev
->powerplay
.pp_funcs
= &pp_dpm_funcs
;
64 static void amd_powerplay_destroy(struct amdgpu_device
*adev
)
66 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
68 kfree(hwmgr
->hardcode_pp_table
);
69 hwmgr
->hardcode_pp_table
= NULL
;
75 static int pp_early_init(void *handle
)
78 struct amdgpu_device
*adev
= handle
;
80 ret
= amd_powerplay_create(adev
);
85 ret
= hwmgr_early_init(adev
->powerplay
.pp_handle
);
92 static int pp_sw_init(void *handle
)
94 struct amdgpu_device
*adev
= handle
;
95 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
98 ret
= hwmgr_sw_init(hwmgr
);
100 pr_debug("powerplay sw init %s\n", ret
? "failed" : "successfully");
105 static int pp_sw_fini(void *handle
)
107 struct amdgpu_device
*adev
= handle
;
108 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
110 hwmgr_sw_fini(hwmgr
);
112 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_SMU
) {
113 release_firmware(adev
->pm
.fw
);
115 amdgpu_ucode_fini_bo(adev
);
121 static int pp_hw_init(void *handle
)
124 struct amdgpu_device
*adev
= handle
;
125 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
127 if (adev
->firmware
.load_type
== AMDGPU_FW_LOAD_SMU
)
128 amdgpu_ucode_init_bo(adev
);
130 ret
= hwmgr_hw_init(hwmgr
);
133 pr_err("powerplay hw init failed\n");
138 static int pp_hw_fini(void *handle
)
140 struct amdgpu_device
*adev
= handle
;
141 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
143 hwmgr_hw_fini(hwmgr
);
148 static void pp_reserve_vram_for_smu(struct amdgpu_device
*adev
)
151 void *cpu_ptr
= NULL
;
153 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
155 if (amdgpu_bo_create_kernel(adev
, adev
->pm
.smu_prv_buffer_size
,
156 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_GTT
,
157 &adev
->pm
.smu_prv_buffer
,
160 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
164 if (hwmgr
->hwmgr_func
->notify_cac_buffer_info
)
165 r
= hwmgr
->hwmgr_func
->notify_cac_buffer_info(hwmgr
,
166 lower_32_bits((unsigned long)cpu_ptr
),
167 upper_32_bits((unsigned long)cpu_ptr
),
168 lower_32_bits(gpu_addr
),
169 upper_32_bits(gpu_addr
),
170 adev
->pm
.smu_prv_buffer_size
);
173 amdgpu_bo_free_kernel(&adev
->pm
.smu_prv_buffer
, NULL
, NULL
);
174 adev
->pm
.smu_prv_buffer
= NULL
;
175 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
179 static int pp_late_init(void *handle
)
181 struct amdgpu_device
*adev
= handle
;
182 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
184 if (hwmgr
&& hwmgr
->pm_en
) {
185 mutex_lock(&hwmgr
->smu_lock
);
186 hwmgr_handle_task(hwmgr
,
187 AMD_PP_TASK_COMPLETE_INIT
, NULL
);
188 mutex_unlock(&hwmgr
->smu_lock
);
190 if (adev
->pm
.smu_prv_buffer_size
!= 0)
191 pp_reserve_vram_for_smu(adev
);
196 static void pp_late_fini(void *handle
)
198 struct amdgpu_device
*adev
= handle
;
200 if (adev
->pm
.smu_prv_buffer
)
201 amdgpu_bo_free_kernel(&adev
->pm
.smu_prv_buffer
, NULL
, NULL
);
202 amd_powerplay_destroy(adev
);
206 static bool pp_is_idle(void *handle
)
211 static int pp_wait_for_idle(void *handle
)
216 static int pp_sw_reset(void *handle
)
221 static int pp_set_powergating_state(void *handle
,
222 enum amd_powergating_state state
)
227 static int pp_suspend(void *handle
)
229 struct amdgpu_device
*adev
= handle
;
230 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
232 return hwmgr_suspend(hwmgr
);
235 static int pp_resume(void *handle
)
237 struct amdgpu_device
*adev
= handle
;
238 struct pp_hwmgr
*hwmgr
= adev
->powerplay
.pp_handle
;
240 return hwmgr_resume(hwmgr
);
243 static int pp_set_clockgating_state(void *handle
,
244 enum amd_clockgating_state state
)
249 static const struct amd_ip_funcs pp_ip_funcs
= {
251 .early_init
= pp_early_init
,
252 .late_init
= pp_late_init
,
253 .sw_init
= pp_sw_init
,
254 .sw_fini
= pp_sw_fini
,
255 .hw_init
= pp_hw_init
,
256 .hw_fini
= pp_hw_fini
,
257 .late_fini
= pp_late_fini
,
258 .suspend
= pp_suspend
,
260 .is_idle
= pp_is_idle
,
261 .wait_for_idle
= pp_wait_for_idle
,
262 .soft_reset
= pp_sw_reset
,
263 .set_clockgating_state
= pp_set_clockgating_state
,
264 .set_powergating_state
= pp_set_powergating_state
,
267 const struct amdgpu_ip_block_version pp_smu_ip_block
=
269 .type
= AMD_IP_BLOCK_TYPE_SMC
,
273 .funcs
= &pp_ip_funcs
,
276 static int pp_dpm_load_fw(void *handle
)
281 static int pp_dpm_fw_loading_complete(void *handle
)
286 static int pp_set_clockgating_by_smu(void *handle
, uint32_t msg_id
)
288 struct pp_hwmgr
*hwmgr
= handle
;
290 if (!hwmgr
|| !hwmgr
->pm_en
)
293 if (hwmgr
->hwmgr_func
->update_clock_gatings
== NULL
) {
294 pr_info("%s was not implemented.\n", __func__
);
298 return hwmgr
->hwmgr_func
->update_clock_gatings(hwmgr
, &msg_id
);
301 static void pp_dpm_en_umd_pstate(struct pp_hwmgr
*hwmgr
,
302 enum amd_dpm_forced_level
*level
)
304 uint32_t profile_mode_mask
= AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD
|
305 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK
|
306 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK
|
307 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK
;
309 if (!(hwmgr
->dpm_level
& profile_mode_mask
)) {
310 /* enter umd pstate, save current level, disable gfx cg*/
311 if (*level
& profile_mode_mask
) {
312 hwmgr
->saved_dpm_level
= hwmgr
->dpm_level
;
313 hwmgr
->en_umd_pstate
= true;
314 amdgpu_device_ip_set_clockgating_state(hwmgr
->adev
,
315 AMD_IP_BLOCK_TYPE_GFX
,
316 AMD_CG_STATE_UNGATE
);
317 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
318 AMD_IP_BLOCK_TYPE_GFX
,
319 AMD_PG_STATE_UNGATE
);
322 /* exit umd pstate, restore level, enable gfx cg*/
323 if (!(*level
& profile_mode_mask
)) {
324 if (*level
== AMD_DPM_FORCED_LEVEL_PROFILE_EXIT
)
325 *level
= hwmgr
->saved_dpm_level
;
326 hwmgr
->en_umd_pstate
= false;
327 amdgpu_device_ip_set_clockgating_state(hwmgr
->adev
,
328 AMD_IP_BLOCK_TYPE_GFX
,
330 amdgpu_device_ip_set_powergating_state(hwmgr
->adev
,
331 AMD_IP_BLOCK_TYPE_GFX
,
337 static int pp_dpm_force_performance_level(void *handle
,
338 enum amd_dpm_forced_level level
)
340 struct pp_hwmgr
*hwmgr
= handle
;
342 if (!hwmgr
|| !hwmgr
->pm_en
)
345 if (level
== hwmgr
->dpm_level
)
348 mutex_lock(&hwmgr
->smu_lock
);
349 pp_dpm_en_umd_pstate(hwmgr
, &level
);
350 hwmgr
->request_dpm_level
= level
;
351 hwmgr_handle_task(hwmgr
, AMD_PP_TASK_READJUST_POWER_STATE
, NULL
);
352 mutex_unlock(&hwmgr
->smu_lock
);
357 static enum amd_dpm_forced_level
pp_dpm_get_performance_level(
360 struct pp_hwmgr
*hwmgr
= handle
;
361 enum amd_dpm_forced_level level
;
363 if (!hwmgr
|| !hwmgr
->pm_en
)
366 mutex_lock(&hwmgr
->smu_lock
);
367 level
= hwmgr
->dpm_level
;
368 mutex_unlock(&hwmgr
->smu_lock
);
372 static uint32_t pp_dpm_get_sclk(void *handle
, bool low
)
374 struct pp_hwmgr
*hwmgr
= handle
;
377 if (!hwmgr
|| !hwmgr
->pm_en
)
380 if (hwmgr
->hwmgr_func
->get_sclk
== NULL
) {
381 pr_info("%s was not implemented.\n", __func__
);
384 mutex_lock(&hwmgr
->smu_lock
);
385 clk
= hwmgr
->hwmgr_func
->get_sclk(hwmgr
, low
);
386 mutex_unlock(&hwmgr
->smu_lock
);
390 static uint32_t pp_dpm_get_mclk(void *handle
, bool low
)
392 struct pp_hwmgr
*hwmgr
= handle
;
395 if (!hwmgr
|| !hwmgr
->pm_en
)
398 if (hwmgr
->hwmgr_func
->get_mclk
== NULL
) {
399 pr_info("%s was not implemented.\n", __func__
);
402 mutex_lock(&hwmgr
->smu_lock
);
403 clk
= hwmgr
->hwmgr_func
->get_mclk(hwmgr
, low
);
404 mutex_unlock(&hwmgr
->smu_lock
);
408 static void pp_dpm_powergate_vce(void *handle
, bool gate
)
410 struct pp_hwmgr
*hwmgr
= handle
;
412 if (!hwmgr
|| !hwmgr
->pm_en
)
415 if (hwmgr
->hwmgr_func
->powergate_vce
== NULL
) {
416 pr_info("%s was not implemented.\n", __func__
);
419 mutex_lock(&hwmgr
->smu_lock
);
420 hwmgr
->hwmgr_func
->powergate_vce(hwmgr
, gate
);
421 mutex_unlock(&hwmgr
->smu_lock
);
424 static void pp_dpm_powergate_uvd(void *handle
, bool gate
)
426 struct pp_hwmgr
*hwmgr
= handle
;
428 if (!hwmgr
|| !hwmgr
->pm_en
)
431 if (hwmgr
->hwmgr_func
->powergate_uvd
== NULL
) {
432 pr_info("%s was not implemented.\n", __func__
);
435 mutex_lock(&hwmgr
->smu_lock
);
436 hwmgr
->hwmgr_func
->powergate_uvd(hwmgr
, gate
);
437 mutex_unlock(&hwmgr
->smu_lock
);
440 static int pp_dpm_dispatch_tasks(void *handle
, enum amd_pp_task task_id
,
441 enum amd_pm_state_type
*user_state
)
444 struct pp_hwmgr
*hwmgr
= handle
;
446 if (!hwmgr
|| !hwmgr
->pm_en
)
449 mutex_lock(&hwmgr
->smu_lock
);
450 ret
= hwmgr_handle_task(hwmgr
, task_id
, user_state
);
451 mutex_unlock(&hwmgr
->smu_lock
);
456 static enum amd_pm_state_type
pp_dpm_get_current_power_state(void *handle
)
458 struct pp_hwmgr
*hwmgr
= handle
;
459 struct pp_power_state
*state
;
460 enum amd_pm_state_type pm_type
;
462 if (!hwmgr
|| !hwmgr
->pm_en
|| !hwmgr
->current_ps
)
465 mutex_lock(&hwmgr
->smu_lock
);
467 state
= hwmgr
->current_ps
;
469 switch (state
->classification
.ui_label
) {
470 case PP_StateUILabel_Battery
:
471 pm_type
= POWER_STATE_TYPE_BATTERY
;
473 case PP_StateUILabel_Balanced
:
474 pm_type
= POWER_STATE_TYPE_BALANCED
;
476 case PP_StateUILabel_Performance
:
477 pm_type
= POWER_STATE_TYPE_PERFORMANCE
;
480 if (state
->classification
.flags
& PP_StateClassificationFlag_Boot
)
481 pm_type
= POWER_STATE_TYPE_INTERNAL_BOOT
;
483 pm_type
= POWER_STATE_TYPE_DEFAULT
;
486 mutex_unlock(&hwmgr
->smu_lock
);
491 static void pp_dpm_set_fan_control_mode(void *handle
, uint32_t mode
)
493 struct pp_hwmgr
*hwmgr
= handle
;
495 if (!hwmgr
|| !hwmgr
->pm_en
)
498 if (hwmgr
->hwmgr_func
->set_fan_control_mode
== NULL
) {
499 pr_info("%s was not implemented.\n", __func__
);
502 mutex_lock(&hwmgr
->smu_lock
);
503 hwmgr
->hwmgr_func
->set_fan_control_mode(hwmgr
, mode
);
504 mutex_unlock(&hwmgr
->smu_lock
);
507 static uint32_t pp_dpm_get_fan_control_mode(void *handle
)
509 struct pp_hwmgr
*hwmgr
= handle
;
512 if (!hwmgr
|| !hwmgr
->pm_en
)
515 if (hwmgr
->hwmgr_func
->get_fan_control_mode
== NULL
) {
516 pr_info("%s was not implemented.\n", __func__
);
519 mutex_lock(&hwmgr
->smu_lock
);
520 mode
= hwmgr
->hwmgr_func
->get_fan_control_mode(hwmgr
);
521 mutex_unlock(&hwmgr
->smu_lock
);
525 static int pp_dpm_set_fan_speed_percent(void *handle
, uint32_t percent
)
527 struct pp_hwmgr
*hwmgr
= handle
;
530 if (!hwmgr
|| !hwmgr
->pm_en
)
533 if (hwmgr
->hwmgr_func
->set_fan_speed_percent
== NULL
) {
534 pr_info("%s was not implemented.\n", __func__
);
537 mutex_lock(&hwmgr
->smu_lock
);
538 ret
= hwmgr
->hwmgr_func
->set_fan_speed_percent(hwmgr
, percent
);
539 mutex_unlock(&hwmgr
->smu_lock
);
543 static int pp_dpm_get_fan_speed_percent(void *handle
, uint32_t *speed
)
545 struct pp_hwmgr
*hwmgr
= handle
;
548 if (!hwmgr
|| !hwmgr
->pm_en
)
551 if (hwmgr
->hwmgr_func
->get_fan_speed_percent
== NULL
) {
552 pr_info("%s was not implemented.\n", __func__
);
556 mutex_lock(&hwmgr
->smu_lock
);
557 ret
= hwmgr
->hwmgr_func
->get_fan_speed_percent(hwmgr
, speed
);
558 mutex_unlock(&hwmgr
->smu_lock
);
562 static int pp_dpm_get_fan_speed_rpm(void *handle
, uint32_t *rpm
)
564 struct pp_hwmgr
*hwmgr
= handle
;
567 if (!hwmgr
|| !hwmgr
->pm_en
)
570 if (hwmgr
->hwmgr_func
->get_fan_speed_rpm
== NULL
)
573 mutex_lock(&hwmgr
->smu_lock
);
574 ret
= hwmgr
->hwmgr_func
->get_fan_speed_rpm(hwmgr
, rpm
);
575 mutex_unlock(&hwmgr
->smu_lock
);
579 static int pp_dpm_get_pp_num_states(void *handle
,
580 struct pp_states_info
*data
)
582 struct pp_hwmgr
*hwmgr
= handle
;
585 memset(data
, 0, sizeof(*data
));
587 if (!hwmgr
|| !hwmgr
->pm_en
||!hwmgr
->ps
)
590 mutex_lock(&hwmgr
->smu_lock
);
592 data
->nums
= hwmgr
->num_ps
;
594 for (i
= 0; i
< hwmgr
->num_ps
; i
++) {
595 struct pp_power_state
*state
= (struct pp_power_state
*)
596 ((unsigned long)hwmgr
->ps
+ i
* hwmgr
->ps_size
);
597 switch (state
->classification
.ui_label
) {
598 case PP_StateUILabel_Battery
:
599 data
->states
[i
] = POWER_STATE_TYPE_BATTERY
;
601 case PP_StateUILabel_Balanced
:
602 data
->states
[i
] = POWER_STATE_TYPE_BALANCED
;
604 case PP_StateUILabel_Performance
:
605 data
->states
[i
] = POWER_STATE_TYPE_PERFORMANCE
;
608 if (state
->classification
.flags
& PP_StateClassificationFlag_Boot
)
609 data
->states
[i
] = POWER_STATE_TYPE_INTERNAL_BOOT
;
611 data
->states
[i
] = POWER_STATE_TYPE_DEFAULT
;
614 mutex_unlock(&hwmgr
->smu_lock
);
618 static int pp_dpm_get_pp_table(void *handle
, char **table
)
620 struct pp_hwmgr
*hwmgr
= handle
;
623 if (!hwmgr
|| !hwmgr
->pm_en
||!hwmgr
->soft_pp_table
)
626 mutex_lock(&hwmgr
->smu_lock
);
627 *table
= (char *)hwmgr
->soft_pp_table
;
628 size
= hwmgr
->soft_pp_table_size
;
629 mutex_unlock(&hwmgr
->smu_lock
);
633 static int amd_powerplay_reset(void *handle
)
635 struct pp_hwmgr
*hwmgr
= handle
;
638 ret
= hwmgr_hw_fini(hwmgr
);
642 ret
= hwmgr_hw_init(hwmgr
);
646 return hwmgr_handle_task(hwmgr
, AMD_PP_TASK_COMPLETE_INIT
, NULL
);
649 static int pp_dpm_set_pp_table(void *handle
, const char *buf
, size_t size
)
651 struct pp_hwmgr
*hwmgr
= handle
;
654 if (!hwmgr
|| !hwmgr
->pm_en
)
657 mutex_lock(&hwmgr
->smu_lock
);
658 if (!hwmgr
->hardcode_pp_table
) {
659 hwmgr
->hardcode_pp_table
= kmemdup(hwmgr
->soft_pp_table
,
660 hwmgr
->soft_pp_table_size
,
662 if (!hwmgr
->hardcode_pp_table
)
666 memcpy(hwmgr
->hardcode_pp_table
, buf
, size
);
668 hwmgr
->soft_pp_table
= hwmgr
->hardcode_pp_table
;
670 ret
= amd_powerplay_reset(handle
);
674 if (hwmgr
->hwmgr_func
->avfs_control
) {
675 ret
= hwmgr
->hwmgr_func
->avfs_control(hwmgr
, false);
679 mutex_unlock(&hwmgr
->smu_lock
);
682 mutex_unlock(&hwmgr
->smu_lock
);
686 static int pp_dpm_force_clock_level(void *handle
,
687 enum pp_clock_type type
, uint32_t mask
)
689 struct pp_hwmgr
*hwmgr
= handle
;
692 if (!hwmgr
|| !hwmgr
->pm_en
)
695 if (hwmgr
->hwmgr_func
->force_clock_level
== NULL
) {
696 pr_info("%s was not implemented.\n", __func__
);
699 mutex_lock(&hwmgr
->smu_lock
);
700 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_MANUAL
)
701 ret
= hwmgr
->hwmgr_func
->force_clock_level(hwmgr
, type
, mask
);
704 mutex_unlock(&hwmgr
->smu_lock
);
708 static int pp_dpm_print_clock_levels(void *handle
,
709 enum pp_clock_type type
, char *buf
)
711 struct pp_hwmgr
*hwmgr
= handle
;
714 if (!hwmgr
|| !hwmgr
->pm_en
)
717 if (hwmgr
->hwmgr_func
->print_clock_levels
== NULL
) {
718 pr_info("%s was not implemented.\n", __func__
);
721 mutex_lock(&hwmgr
->smu_lock
);
722 ret
= hwmgr
->hwmgr_func
->print_clock_levels(hwmgr
, type
, buf
);
723 mutex_unlock(&hwmgr
->smu_lock
);
727 static int pp_dpm_get_sclk_od(void *handle
)
729 struct pp_hwmgr
*hwmgr
= handle
;
732 if (!hwmgr
|| !hwmgr
->pm_en
)
735 if (hwmgr
->hwmgr_func
->get_sclk_od
== NULL
) {
736 pr_info("%s was not implemented.\n", __func__
);
739 mutex_lock(&hwmgr
->smu_lock
);
740 ret
= hwmgr
->hwmgr_func
->get_sclk_od(hwmgr
);
741 mutex_unlock(&hwmgr
->smu_lock
);
745 static int pp_dpm_set_sclk_od(void *handle
, uint32_t value
)
747 struct pp_hwmgr
*hwmgr
= handle
;
750 if (!hwmgr
|| !hwmgr
->pm_en
)
753 if (hwmgr
->hwmgr_func
->set_sclk_od
== NULL
) {
754 pr_info("%s was not implemented.\n", __func__
);
758 mutex_lock(&hwmgr
->smu_lock
);
759 ret
= hwmgr
->hwmgr_func
->set_sclk_od(hwmgr
, value
);
760 mutex_unlock(&hwmgr
->smu_lock
);
764 static int pp_dpm_get_mclk_od(void *handle
)
766 struct pp_hwmgr
*hwmgr
= handle
;
769 if (!hwmgr
|| !hwmgr
->pm_en
)
772 if (hwmgr
->hwmgr_func
->get_mclk_od
== NULL
) {
773 pr_info("%s was not implemented.\n", __func__
);
776 mutex_lock(&hwmgr
->smu_lock
);
777 ret
= hwmgr
->hwmgr_func
->get_mclk_od(hwmgr
);
778 mutex_unlock(&hwmgr
->smu_lock
);
782 static int pp_dpm_set_mclk_od(void *handle
, uint32_t value
)
784 struct pp_hwmgr
*hwmgr
= handle
;
787 if (!hwmgr
|| !hwmgr
->pm_en
)
790 if (hwmgr
->hwmgr_func
->set_mclk_od
== NULL
) {
791 pr_info("%s was not implemented.\n", __func__
);
794 mutex_lock(&hwmgr
->smu_lock
);
795 ret
= hwmgr
->hwmgr_func
->set_mclk_od(hwmgr
, value
);
796 mutex_unlock(&hwmgr
->smu_lock
);
800 static int pp_dpm_read_sensor(void *handle
, int idx
,
801 void *value
, int *size
)
803 struct pp_hwmgr
*hwmgr
= handle
;
806 if (!hwmgr
|| !hwmgr
->pm_en
|| !value
)
810 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK
:
811 *((uint32_t *)value
) = hwmgr
->pstate_sclk
;
813 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK
:
814 *((uint32_t *)value
) = hwmgr
->pstate_mclk
;
817 mutex_lock(&hwmgr
->smu_lock
);
818 ret
= hwmgr
->hwmgr_func
->read_sensor(hwmgr
, idx
, value
, size
);
819 mutex_unlock(&hwmgr
->smu_lock
);
824 static struct amd_vce_state
*
825 pp_dpm_get_vce_clock_state(void *handle
, unsigned idx
)
827 struct pp_hwmgr
*hwmgr
= handle
;
829 if (!hwmgr
|| !hwmgr
->pm_en
)
832 if (idx
< hwmgr
->num_vce_state_tables
)
833 return &hwmgr
->vce_states
[idx
];
837 static int pp_get_power_profile_mode(void *handle
, char *buf
)
839 struct pp_hwmgr
*hwmgr
= handle
;
841 if (!hwmgr
|| !hwmgr
->pm_en
|| !buf
)
844 if (hwmgr
->hwmgr_func
->get_power_profile_mode
== NULL
) {
845 pr_info("%s was not implemented.\n", __func__
);
846 return snprintf(buf
, PAGE_SIZE
, "\n");
849 return hwmgr
->hwmgr_func
->get_power_profile_mode(hwmgr
, buf
);
852 static int pp_set_power_profile_mode(void *handle
, long *input
, uint32_t size
)
854 struct pp_hwmgr
*hwmgr
= handle
;
857 if (!hwmgr
|| !hwmgr
->pm_en
)
860 if (hwmgr
->hwmgr_func
->set_power_profile_mode
== NULL
) {
861 pr_info("%s was not implemented.\n", __func__
);
864 mutex_lock(&hwmgr
->smu_lock
);
865 if (hwmgr
->dpm_level
== AMD_DPM_FORCED_LEVEL_MANUAL
)
866 ret
= hwmgr
->hwmgr_func
->set_power_profile_mode(hwmgr
, input
, size
);
867 mutex_unlock(&hwmgr
->smu_lock
);
871 static int pp_odn_edit_dpm_table(void *handle
, uint32_t type
, long *input
, uint32_t size
)
873 struct pp_hwmgr
*hwmgr
= handle
;
875 if (!hwmgr
|| !hwmgr
->pm_en
)
878 if (hwmgr
->hwmgr_func
->odn_edit_dpm_table
== NULL
) {
879 pr_info("%s was not implemented.\n", __func__
);
883 return hwmgr
->hwmgr_func
->odn_edit_dpm_table(hwmgr
, type
, input
, size
);
886 static int pp_dpm_switch_power_profile(void *handle
,
887 enum PP_SMC_POWER_PROFILE type
, bool en
)
889 struct pp_hwmgr
*hwmgr
= handle
;
893 if (!hwmgr
|| !hwmgr
->pm_en
)
896 if (hwmgr
->hwmgr_func
->set_power_profile_mode
== NULL
) {
897 pr_info("%s was not implemented.\n", __func__
);
901 if (!(type
< PP_SMC_POWER_PROFILE_CUSTOM
))
904 mutex_lock(&hwmgr
->smu_lock
);
907 hwmgr
->workload_mask
&= ~(1 << hwmgr
->workload_prority
[type
]);
908 index
= fls(hwmgr
->workload_mask
);
909 index
= index
> 0 && index
<= Workload_Policy_Max
? index
- 1 : 0;
910 workload
= hwmgr
->workload_setting
[index
];
912 hwmgr
->workload_mask
|= (1 << hwmgr
->workload_prority
[type
]);
913 index
= fls(hwmgr
->workload_mask
);
914 index
= index
<= Workload_Policy_Max
? index
- 1 : 0;
915 workload
= hwmgr
->workload_setting
[index
];
918 if (hwmgr
->dpm_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
)
919 hwmgr
->hwmgr_func
->set_power_profile_mode(hwmgr
, &workload
, 0);
920 mutex_unlock(&hwmgr
->smu_lock
);
925 static int pp_set_power_limit(void *handle
, uint32_t limit
)
927 struct pp_hwmgr
*hwmgr
= handle
;
929 if (!hwmgr
|| !hwmgr
->pm_en
)
932 if (hwmgr
->hwmgr_func
->set_power_limit
== NULL
) {
933 pr_info("%s was not implemented.\n", __func__
);
938 limit
= hwmgr
->default_power_limit
;
940 if (limit
> hwmgr
->default_power_limit
)
943 mutex_lock(&hwmgr
->smu_lock
);
944 hwmgr
->hwmgr_func
->set_power_limit(hwmgr
, limit
);
945 hwmgr
->power_limit
= limit
;
946 mutex_unlock(&hwmgr
->smu_lock
);
950 static int pp_get_power_limit(void *handle
, uint32_t *limit
, bool default_limit
)
952 struct pp_hwmgr
*hwmgr
= handle
;
954 if (!hwmgr
|| !hwmgr
->pm_en
||!limit
)
957 mutex_lock(&hwmgr
->smu_lock
);
960 *limit
= hwmgr
->default_power_limit
;
962 *limit
= hwmgr
->power_limit
;
964 mutex_unlock(&hwmgr
->smu_lock
);
969 static int pp_display_configuration_change(void *handle
,
970 const struct amd_pp_display_configuration
*display_config
)
972 struct pp_hwmgr
*hwmgr
= handle
;
974 if (!hwmgr
|| !hwmgr
->pm_en
)
977 mutex_lock(&hwmgr
->smu_lock
);
978 phm_store_dal_configuration_data(hwmgr
, display_config
);
979 mutex_unlock(&hwmgr
->smu_lock
);
983 static int pp_get_display_power_level(void *handle
,
984 struct amd_pp_simple_clock_info
*output
)
986 struct pp_hwmgr
*hwmgr
= handle
;
989 if (!hwmgr
|| !hwmgr
->pm_en
||!output
)
992 mutex_lock(&hwmgr
->smu_lock
);
993 ret
= phm_get_dal_power_level(hwmgr
, output
);
994 mutex_unlock(&hwmgr
->smu_lock
);
998 static int pp_get_current_clocks(void *handle
,
999 struct amd_pp_clock_info
*clocks
)
1001 struct amd_pp_simple_clock_info simple_clocks
= { 0 };
1002 struct pp_clock_info hw_clocks
;
1003 struct pp_hwmgr
*hwmgr
= handle
;
1006 if (!hwmgr
|| !hwmgr
->pm_en
)
1009 mutex_lock(&hwmgr
->smu_lock
);
1011 phm_get_dal_power_level(hwmgr
, &simple_clocks
);
1013 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
1014 PHM_PlatformCaps_PowerContainment
))
1015 ret
= phm_get_clock_info(hwmgr
, &hwmgr
->current_ps
->hardware
,
1016 &hw_clocks
, PHM_PerformanceLevelDesignation_PowerContainment
);
1018 ret
= phm_get_clock_info(hwmgr
, &hwmgr
->current_ps
->hardware
,
1019 &hw_clocks
, PHM_PerformanceLevelDesignation_Activity
);
1022 pr_info("Error in phm_get_clock_info \n");
1023 mutex_unlock(&hwmgr
->smu_lock
);
1027 clocks
->min_engine_clock
= hw_clocks
.min_eng_clk
;
1028 clocks
->max_engine_clock
= hw_clocks
.max_eng_clk
;
1029 clocks
->min_memory_clock
= hw_clocks
.min_mem_clk
;
1030 clocks
->max_memory_clock
= hw_clocks
.max_mem_clk
;
1031 clocks
->min_bus_bandwidth
= hw_clocks
.min_bus_bandwidth
;
1032 clocks
->max_bus_bandwidth
= hw_clocks
.max_bus_bandwidth
;
1034 clocks
->max_engine_clock_in_sr
= hw_clocks
.max_eng_clk
;
1035 clocks
->min_engine_clock_in_sr
= hw_clocks
.min_eng_clk
;
1037 if (simple_clocks
.level
== 0)
1038 clocks
->max_clocks_state
= PP_DAL_POWERLEVEL_7
;
1040 clocks
->max_clocks_state
= simple_clocks
.level
;
1042 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr
, &hwmgr
->current_ps
->hardware
, &hw_clocks
)) {
1043 clocks
->max_engine_clock_in_sr
= hw_clocks
.max_eng_clk
;
1044 clocks
->min_engine_clock_in_sr
= hw_clocks
.min_eng_clk
;
1046 mutex_unlock(&hwmgr
->smu_lock
);
1050 static int pp_get_clock_by_type(void *handle
, enum amd_pp_clock_type type
, struct amd_pp_clocks
*clocks
)
1052 struct pp_hwmgr
*hwmgr
= handle
;
1055 if (!hwmgr
|| !hwmgr
->pm_en
)
1061 mutex_lock(&hwmgr
->smu_lock
);
1062 ret
= phm_get_clock_by_type(hwmgr
, type
, clocks
);
1063 mutex_unlock(&hwmgr
->smu_lock
);
1067 static int pp_get_clock_by_type_with_latency(void *handle
,
1068 enum amd_pp_clock_type type
,
1069 struct pp_clock_levels_with_latency
*clocks
)
1071 struct pp_hwmgr
*hwmgr
= handle
;
1074 if (!hwmgr
|| !hwmgr
->pm_en
||!clocks
)
1077 mutex_lock(&hwmgr
->smu_lock
);
1078 ret
= phm_get_clock_by_type_with_latency(hwmgr
, type
, clocks
);
1079 mutex_unlock(&hwmgr
->smu_lock
);
1083 static int pp_get_clock_by_type_with_voltage(void *handle
,
1084 enum amd_pp_clock_type type
,
1085 struct pp_clock_levels_with_voltage
*clocks
)
1087 struct pp_hwmgr
*hwmgr
= handle
;
1090 if (!hwmgr
|| !hwmgr
->pm_en
||!clocks
)
1093 mutex_lock(&hwmgr
->smu_lock
);
1095 ret
= phm_get_clock_by_type_with_voltage(hwmgr
, type
, clocks
);
1097 mutex_unlock(&hwmgr
->smu_lock
);
1101 static int pp_set_watermarks_for_clocks_ranges(void *handle
,
1104 struct pp_hwmgr
*hwmgr
= handle
;
1107 if (!hwmgr
|| !hwmgr
->pm_en
|| !clock_ranges
)
1110 mutex_lock(&hwmgr
->smu_lock
);
1111 ret
= phm_set_watermarks_for_clocks_ranges(hwmgr
,
1113 mutex_unlock(&hwmgr
->smu_lock
);
1118 static int pp_display_clock_voltage_request(void *handle
,
1119 struct pp_display_clock_request
*clock
)
1121 struct pp_hwmgr
*hwmgr
= handle
;
1124 if (!hwmgr
|| !hwmgr
->pm_en
||!clock
)
1127 mutex_lock(&hwmgr
->smu_lock
);
1128 ret
= phm_display_clock_voltage_request(hwmgr
, clock
);
1129 mutex_unlock(&hwmgr
->smu_lock
);
1134 static int pp_get_display_mode_validation_clocks(void *handle
,
1135 struct amd_pp_simple_clock_info
*clocks
)
1137 struct pp_hwmgr
*hwmgr
= handle
;
1140 if (!hwmgr
|| !hwmgr
->pm_en
||!clocks
)
1143 clocks
->level
= PP_DAL_POWERLEVEL_7
;
1145 mutex_lock(&hwmgr
->smu_lock
);
1147 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
, PHM_PlatformCaps_DynamicPatchPowerState
))
1148 ret
= phm_get_max_high_clocks(hwmgr
, clocks
);
1150 mutex_unlock(&hwmgr
->smu_lock
);
1154 static int pp_dpm_powergate_mmhub(void *handle
)
1156 struct pp_hwmgr
*hwmgr
= handle
;
1158 if (!hwmgr
|| !hwmgr
->pm_en
)
1161 if (hwmgr
->hwmgr_func
->powergate_mmhub
== NULL
) {
1162 pr_info("%s was not implemented.\n", __func__
);
1166 return hwmgr
->hwmgr_func
->powergate_mmhub(hwmgr
);
1169 static int pp_dpm_powergate_gfx(void *handle
, bool gate
)
1171 struct pp_hwmgr
*hwmgr
= handle
;
1173 if (!hwmgr
|| !hwmgr
->pm_en
)
1176 if (hwmgr
->hwmgr_func
->powergate_gfx
== NULL
) {
1177 pr_info("%s was not implemented.\n", __func__
);
1181 return hwmgr
->hwmgr_func
->powergate_gfx(hwmgr
, gate
);
1184 static int pp_set_powergating_by_smu(void *handle
,
1185 uint32_t block_type
, bool gate
)
1189 switch (block_type
) {
1190 case AMD_IP_BLOCK_TYPE_UVD
:
1191 case AMD_IP_BLOCK_TYPE_VCN
:
1192 pp_dpm_powergate_uvd(handle
, gate
);
1194 case AMD_IP_BLOCK_TYPE_VCE
:
1195 pp_dpm_powergate_vce(handle
, gate
);
1197 case AMD_IP_BLOCK_TYPE_GMC
:
1198 pp_dpm_powergate_mmhub(handle
);
1200 case AMD_IP_BLOCK_TYPE_GFX
:
1201 ret
= pp_dpm_powergate_gfx(handle
, gate
);
1209 static int pp_notify_smu_enable_pwe(void *handle
)
1211 struct pp_hwmgr
*hwmgr
= handle
;
1213 if (!hwmgr
|| !hwmgr
->pm_en
)
1216 if (hwmgr
->hwmgr_func
->smus_notify_pwe
== NULL
) {
1217 pr_info("%s was not implemented.\n", __func__
);
1221 mutex_lock(&hwmgr
->smu_lock
);
1222 hwmgr
->hwmgr_func
->smus_notify_pwe(hwmgr
);
1223 mutex_unlock(&hwmgr
->smu_lock
);
1228 static const struct amd_pm_funcs pp_dpm_funcs
= {
1229 .load_firmware
= pp_dpm_load_fw
,
1230 .wait_for_fw_loading_complete
= pp_dpm_fw_loading_complete
,
1231 .force_performance_level
= pp_dpm_force_performance_level
,
1232 .get_performance_level
= pp_dpm_get_performance_level
,
1233 .get_current_power_state
= pp_dpm_get_current_power_state
,
1234 .dispatch_tasks
= pp_dpm_dispatch_tasks
,
1235 .set_fan_control_mode
= pp_dpm_set_fan_control_mode
,
1236 .get_fan_control_mode
= pp_dpm_get_fan_control_mode
,
1237 .set_fan_speed_percent
= pp_dpm_set_fan_speed_percent
,
1238 .get_fan_speed_percent
= pp_dpm_get_fan_speed_percent
,
1239 .get_fan_speed_rpm
= pp_dpm_get_fan_speed_rpm
,
1240 .get_pp_num_states
= pp_dpm_get_pp_num_states
,
1241 .get_pp_table
= pp_dpm_get_pp_table
,
1242 .set_pp_table
= pp_dpm_set_pp_table
,
1243 .force_clock_level
= pp_dpm_force_clock_level
,
1244 .print_clock_levels
= pp_dpm_print_clock_levels
,
1245 .get_sclk_od
= pp_dpm_get_sclk_od
,
1246 .set_sclk_od
= pp_dpm_set_sclk_od
,
1247 .get_mclk_od
= pp_dpm_get_mclk_od
,
1248 .set_mclk_od
= pp_dpm_set_mclk_od
,
1249 .read_sensor
= pp_dpm_read_sensor
,
1250 .get_vce_clock_state
= pp_dpm_get_vce_clock_state
,
1251 .switch_power_profile
= pp_dpm_switch_power_profile
,
1252 .set_clockgating_by_smu
= pp_set_clockgating_by_smu
,
1253 .set_powergating_by_smu
= pp_set_powergating_by_smu
,
1254 .get_power_profile_mode
= pp_get_power_profile_mode
,
1255 .set_power_profile_mode
= pp_set_power_profile_mode
,
1256 .odn_edit_dpm_table
= pp_odn_edit_dpm_table
,
1257 .set_power_limit
= pp_set_power_limit
,
1258 .get_power_limit
= pp_get_power_limit
,
1260 .get_sclk
= pp_dpm_get_sclk
,
1261 .get_mclk
= pp_dpm_get_mclk
,
1262 .display_configuration_change
= pp_display_configuration_change
,
1263 .get_display_power_level
= pp_get_display_power_level
,
1264 .get_current_clocks
= pp_get_current_clocks
,
1265 .get_clock_by_type
= pp_get_clock_by_type
,
1266 .get_clock_by_type_with_latency
= pp_get_clock_by_type_with_latency
,
1267 .get_clock_by_type_with_voltage
= pp_get_clock_by_type_with_voltage
,
1268 .set_watermarks_for_clocks_ranges
= pp_set_watermarks_for_clocks_ranges
,
1269 .display_clock_voltage_request
= pp_display_clock_voltage_request
,
1270 .get_display_mode_validation_clocks
= pp_get_display_mode_validation_clocks
,
1271 .notify_smu_enable_pwe
= pp_notify_smu_enable_pwe
,