treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / gpu / drm / amd / powerplay / amd_powerplay.c
blobc195575366a3b2e4ea48c0748cd520dc2d2fbd26
1 /*
2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include "pp_debug.h"
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/gfp.h>
27 #include <linux/slab.h>
28 #include <linux/firmware.h>
29 #include "amd_shared.h"
30 #include "amd_powerplay.h"
31 #include "power_state.h"
32 #include "amdgpu.h"
33 #include "hwmgr.h"
36 static const struct amd_pm_funcs pp_dpm_funcs;
38 static int amd_powerplay_create(struct amdgpu_device *adev)
40 struct pp_hwmgr *hwmgr;
42 if (adev == NULL)
43 return -EINVAL;
45 hwmgr = kzalloc(sizeof(struct pp_hwmgr), GFP_KERNEL);
46 if (hwmgr == NULL)
47 return -ENOMEM;
49 hwmgr->adev = adev;
50 hwmgr->not_vf = !amdgpu_sriov_vf(adev);
51 hwmgr->device = amdgpu_cgs_create_device(adev);
52 mutex_init(&hwmgr->smu_lock);
53 hwmgr->chip_family = adev->family;
54 hwmgr->chip_id = adev->asic_type;
55 hwmgr->feature_mask = adev->pm.pp_feature;
56 hwmgr->display_config = &adev->pm.pm_display_cfg;
57 adev->powerplay.pp_handle = hwmgr;
58 adev->powerplay.pp_funcs = &pp_dpm_funcs;
59 return 0;
63 static void amd_powerplay_destroy(struct amdgpu_device *adev)
65 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
67 kfree(hwmgr->hardcode_pp_table);
68 hwmgr->hardcode_pp_table = NULL;
70 kfree(hwmgr);
71 hwmgr = NULL;
74 static int pp_early_init(void *handle)
76 int ret;
77 struct amdgpu_device *adev = handle;
79 ret = amd_powerplay_create(adev);
81 if (ret != 0)
82 return ret;
84 ret = hwmgr_early_init(adev->powerplay.pp_handle);
85 if (ret)
86 return -EINVAL;
88 return 0;
91 static int pp_sw_init(void *handle)
93 struct amdgpu_device *adev = handle;
94 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
95 int ret = 0;
97 ret = hwmgr_sw_init(hwmgr);
99 pr_debug("powerplay sw init %s\n", ret ? "failed" : "successfully");
101 return ret;
104 static int pp_sw_fini(void *handle)
106 struct amdgpu_device *adev = handle;
107 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
109 hwmgr_sw_fini(hwmgr);
111 release_firmware(adev->pm.fw);
112 adev->pm.fw = NULL;
114 return 0;
117 static int pp_hw_init(void *handle)
119 int ret = 0;
120 struct amdgpu_device *adev = handle;
121 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
123 ret = hwmgr_hw_init(hwmgr);
125 if (ret)
126 pr_err("powerplay hw init failed\n");
128 return ret;
131 static int pp_hw_fini(void *handle)
133 struct amdgpu_device *adev = handle;
134 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
136 hwmgr_hw_fini(hwmgr);
138 return 0;
141 static void pp_reserve_vram_for_smu(struct amdgpu_device *adev)
143 int r = -EINVAL;
144 void *cpu_ptr = NULL;
145 uint64_t gpu_addr;
146 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
148 if (amdgpu_bo_create_kernel(adev, adev->pm.smu_prv_buffer_size,
149 PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT,
150 &adev->pm.smu_prv_buffer,
151 &gpu_addr,
152 &cpu_ptr)) {
153 DRM_ERROR("amdgpu: failed to create smu prv buffer\n");
154 return;
157 if (hwmgr->hwmgr_func->notify_cac_buffer_info)
158 r = hwmgr->hwmgr_func->notify_cac_buffer_info(hwmgr,
159 lower_32_bits((unsigned long)cpu_ptr),
160 upper_32_bits((unsigned long)cpu_ptr),
161 lower_32_bits(gpu_addr),
162 upper_32_bits(gpu_addr),
163 adev->pm.smu_prv_buffer_size);
165 if (r) {
166 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
167 adev->pm.smu_prv_buffer = NULL;
168 DRM_ERROR("amdgpu: failed to notify SMU buffer address\n");
172 static int pp_late_init(void *handle)
174 struct amdgpu_device *adev = handle;
175 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
177 if (hwmgr && hwmgr->pm_en) {
178 mutex_lock(&hwmgr->smu_lock);
179 hwmgr_handle_task(hwmgr,
180 AMD_PP_TASK_COMPLETE_INIT, NULL);
181 mutex_unlock(&hwmgr->smu_lock);
183 if (adev->pm.smu_prv_buffer_size != 0)
184 pp_reserve_vram_for_smu(adev);
186 return 0;
189 static void pp_late_fini(void *handle)
191 struct amdgpu_device *adev = handle;
193 if (adev->pm.smu_prv_buffer)
194 amdgpu_bo_free_kernel(&adev->pm.smu_prv_buffer, NULL, NULL);
195 amd_powerplay_destroy(adev);
199 static bool pp_is_idle(void *handle)
201 return false;
204 static int pp_wait_for_idle(void *handle)
206 return 0;
209 static int pp_sw_reset(void *handle)
211 return 0;
214 static int pp_set_powergating_state(void *handle,
215 enum amd_powergating_state state)
217 return 0;
220 static int pp_suspend(void *handle)
222 struct amdgpu_device *adev = handle;
223 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
225 return hwmgr_suspend(hwmgr);
228 static int pp_resume(void *handle)
230 struct amdgpu_device *adev = handle;
231 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
233 return hwmgr_resume(hwmgr);
236 static int pp_set_clockgating_state(void *handle,
237 enum amd_clockgating_state state)
239 return 0;
242 static const struct amd_ip_funcs pp_ip_funcs = {
243 .name = "powerplay",
244 .early_init = pp_early_init,
245 .late_init = pp_late_init,
246 .sw_init = pp_sw_init,
247 .sw_fini = pp_sw_fini,
248 .hw_init = pp_hw_init,
249 .hw_fini = pp_hw_fini,
250 .late_fini = pp_late_fini,
251 .suspend = pp_suspend,
252 .resume = pp_resume,
253 .is_idle = pp_is_idle,
254 .wait_for_idle = pp_wait_for_idle,
255 .soft_reset = pp_sw_reset,
256 .set_clockgating_state = pp_set_clockgating_state,
257 .set_powergating_state = pp_set_powergating_state,
260 const struct amdgpu_ip_block_version pp_smu_ip_block =
262 .type = AMD_IP_BLOCK_TYPE_SMC,
263 .major = 1,
264 .minor = 0,
265 .rev = 0,
266 .funcs = &pp_ip_funcs,
269 /* This interface only be supported On Vi,
270 * because only smu7/8 can help to load gfx/sdma fw,
271 * smu need to be enabled before load other ip's fw.
272 * so call start smu to load smu7 fw and other ip's fw
274 static int pp_dpm_load_fw(void *handle)
276 struct pp_hwmgr *hwmgr = handle;
278 if (!hwmgr || !hwmgr->smumgr_funcs || !hwmgr->smumgr_funcs->start_smu)
279 return -EINVAL;
281 if (hwmgr->smumgr_funcs->start_smu(hwmgr)) {
282 pr_err("fw load failed\n");
283 return -EINVAL;
286 return 0;
289 static int pp_dpm_fw_loading_complete(void *handle)
291 return 0;
294 static int pp_set_clockgating_by_smu(void *handle, uint32_t msg_id)
296 struct pp_hwmgr *hwmgr = handle;
298 if (!hwmgr || !hwmgr->pm_en)
299 return -EINVAL;
301 if (hwmgr->hwmgr_func->update_clock_gatings == NULL) {
302 pr_info_ratelimited("%s was not implemented.\n", __func__);
303 return 0;
306 return hwmgr->hwmgr_func->update_clock_gatings(hwmgr, &msg_id);
309 static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
310 enum amd_dpm_forced_level *level)
312 uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
313 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
314 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
315 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
317 if (!(hwmgr->dpm_level & profile_mode_mask)) {
318 /* enter umd pstate, save current level, disable gfx cg*/
319 if (*level & profile_mode_mask) {
320 hwmgr->saved_dpm_level = hwmgr->dpm_level;
321 hwmgr->en_umd_pstate = true;
322 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
323 AMD_IP_BLOCK_TYPE_GFX,
324 AMD_CG_STATE_UNGATE);
325 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
326 AMD_IP_BLOCK_TYPE_GFX,
327 AMD_PG_STATE_UNGATE);
329 } else {
330 /* exit umd pstate, restore level, enable gfx cg*/
331 if (!(*level & profile_mode_mask)) {
332 if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
333 *level = hwmgr->saved_dpm_level;
334 hwmgr->en_umd_pstate = false;
335 amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
336 AMD_IP_BLOCK_TYPE_GFX,
337 AMD_CG_STATE_GATE);
338 amdgpu_device_ip_set_powergating_state(hwmgr->adev,
339 AMD_IP_BLOCK_TYPE_GFX,
340 AMD_PG_STATE_GATE);
345 static int pp_dpm_force_performance_level(void *handle,
346 enum amd_dpm_forced_level level)
348 struct pp_hwmgr *hwmgr = handle;
350 if (!hwmgr || !hwmgr->pm_en)
351 return -EINVAL;
353 if (level == hwmgr->dpm_level)
354 return 0;
356 mutex_lock(&hwmgr->smu_lock);
357 pp_dpm_en_umd_pstate(hwmgr, &level);
358 hwmgr->request_dpm_level = level;
359 hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
360 mutex_unlock(&hwmgr->smu_lock);
362 return 0;
365 static enum amd_dpm_forced_level pp_dpm_get_performance_level(
366 void *handle)
368 struct pp_hwmgr *hwmgr = handle;
369 enum amd_dpm_forced_level level;
371 if (!hwmgr || !hwmgr->pm_en)
372 return -EINVAL;
374 mutex_lock(&hwmgr->smu_lock);
375 level = hwmgr->dpm_level;
376 mutex_unlock(&hwmgr->smu_lock);
377 return level;
380 static uint32_t pp_dpm_get_sclk(void *handle, bool low)
382 struct pp_hwmgr *hwmgr = handle;
383 uint32_t clk = 0;
385 if (!hwmgr || !hwmgr->pm_en)
386 return 0;
388 if (hwmgr->hwmgr_func->get_sclk == NULL) {
389 pr_info_ratelimited("%s was not implemented.\n", __func__);
390 return 0;
392 mutex_lock(&hwmgr->smu_lock);
393 clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
394 mutex_unlock(&hwmgr->smu_lock);
395 return clk;
398 static uint32_t pp_dpm_get_mclk(void *handle, bool low)
400 struct pp_hwmgr *hwmgr = handle;
401 uint32_t clk = 0;
403 if (!hwmgr || !hwmgr->pm_en)
404 return 0;
406 if (hwmgr->hwmgr_func->get_mclk == NULL) {
407 pr_info_ratelimited("%s was not implemented.\n", __func__);
408 return 0;
410 mutex_lock(&hwmgr->smu_lock);
411 clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
412 mutex_unlock(&hwmgr->smu_lock);
413 return clk;
416 static void pp_dpm_powergate_vce(void *handle, bool gate)
418 struct pp_hwmgr *hwmgr = handle;
420 if (!hwmgr || !hwmgr->pm_en)
421 return;
423 if (hwmgr->hwmgr_func->powergate_vce == NULL) {
424 pr_info_ratelimited("%s was not implemented.\n", __func__);
425 return;
427 mutex_lock(&hwmgr->smu_lock);
428 hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
429 mutex_unlock(&hwmgr->smu_lock);
432 static void pp_dpm_powergate_uvd(void *handle, bool gate)
434 struct pp_hwmgr *hwmgr = handle;
436 if (!hwmgr || !hwmgr->pm_en)
437 return;
439 if (hwmgr->hwmgr_func->powergate_uvd == NULL) {
440 pr_info_ratelimited("%s was not implemented.\n", __func__);
441 return;
443 mutex_lock(&hwmgr->smu_lock);
444 hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
445 mutex_unlock(&hwmgr->smu_lock);
448 static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
449 enum amd_pm_state_type *user_state)
451 int ret = 0;
452 struct pp_hwmgr *hwmgr = handle;
454 if (!hwmgr || !hwmgr->pm_en)
455 return -EINVAL;
457 mutex_lock(&hwmgr->smu_lock);
458 ret = hwmgr_handle_task(hwmgr, task_id, user_state);
459 mutex_unlock(&hwmgr->smu_lock);
461 return ret;
464 static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
466 struct pp_hwmgr *hwmgr = handle;
467 struct pp_power_state *state;
468 enum amd_pm_state_type pm_type;
470 if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
471 return -EINVAL;
473 mutex_lock(&hwmgr->smu_lock);
475 state = hwmgr->current_ps;
477 switch (state->classification.ui_label) {
478 case PP_StateUILabel_Battery:
479 pm_type = POWER_STATE_TYPE_BATTERY;
480 break;
481 case PP_StateUILabel_Balanced:
482 pm_type = POWER_STATE_TYPE_BALANCED;
483 break;
484 case PP_StateUILabel_Performance:
485 pm_type = POWER_STATE_TYPE_PERFORMANCE;
486 break;
487 default:
488 if (state->classification.flags & PP_StateClassificationFlag_Boot)
489 pm_type = POWER_STATE_TYPE_INTERNAL_BOOT;
490 else
491 pm_type = POWER_STATE_TYPE_DEFAULT;
492 break;
494 mutex_unlock(&hwmgr->smu_lock);
496 return pm_type;
499 static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
501 struct pp_hwmgr *hwmgr = handle;
503 if (!hwmgr || !hwmgr->pm_en)
504 return;
506 if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
507 pr_info_ratelimited("%s was not implemented.\n", __func__);
508 return;
510 mutex_lock(&hwmgr->smu_lock);
511 hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
512 mutex_unlock(&hwmgr->smu_lock);
515 static uint32_t pp_dpm_get_fan_control_mode(void *handle)
517 struct pp_hwmgr *hwmgr = handle;
518 uint32_t mode = 0;
520 if (!hwmgr || !hwmgr->pm_en)
521 return 0;
523 if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
524 pr_info_ratelimited("%s was not implemented.\n", __func__);
525 return 0;
527 mutex_lock(&hwmgr->smu_lock);
528 mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
529 mutex_unlock(&hwmgr->smu_lock);
530 return mode;
533 static int pp_dpm_set_fan_speed_percent(void *handle, uint32_t percent)
535 struct pp_hwmgr *hwmgr = handle;
536 int ret = 0;
538 if (!hwmgr || !hwmgr->pm_en)
539 return -EINVAL;
541 if (hwmgr->hwmgr_func->set_fan_speed_percent == NULL) {
542 pr_info_ratelimited("%s was not implemented.\n", __func__);
543 return 0;
545 mutex_lock(&hwmgr->smu_lock);
546 ret = hwmgr->hwmgr_func->set_fan_speed_percent(hwmgr, percent);
547 mutex_unlock(&hwmgr->smu_lock);
548 return ret;
551 static int pp_dpm_get_fan_speed_percent(void *handle, uint32_t *speed)
553 struct pp_hwmgr *hwmgr = handle;
554 int ret = 0;
556 if (!hwmgr || !hwmgr->pm_en)
557 return -EINVAL;
559 if (hwmgr->hwmgr_func->get_fan_speed_percent == NULL) {
560 pr_info_ratelimited("%s was not implemented.\n", __func__);
561 return 0;
564 mutex_lock(&hwmgr->smu_lock);
565 ret = hwmgr->hwmgr_func->get_fan_speed_percent(hwmgr, speed);
566 mutex_unlock(&hwmgr->smu_lock);
567 return ret;
570 static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
572 struct pp_hwmgr *hwmgr = handle;
573 int ret = 0;
575 if (!hwmgr || !hwmgr->pm_en)
576 return -EINVAL;
578 if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
579 return -EINVAL;
581 mutex_lock(&hwmgr->smu_lock);
582 ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
583 mutex_unlock(&hwmgr->smu_lock);
584 return ret;
587 static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
589 struct pp_hwmgr *hwmgr = handle;
590 int ret = 0;
592 if (!hwmgr || !hwmgr->pm_en)
593 return -EINVAL;
595 if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
596 pr_info_ratelimited("%s was not implemented.\n", __func__);
597 return 0;
599 mutex_lock(&hwmgr->smu_lock);
600 ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
601 mutex_unlock(&hwmgr->smu_lock);
602 return ret;
605 static int pp_dpm_get_pp_num_states(void *handle,
606 struct pp_states_info *data)
608 struct pp_hwmgr *hwmgr = handle;
609 int i;
611 memset(data, 0, sizeof(*data));
613 if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
614 return -EINVAL;
616 mutex_lock(&hwmgr->smu_lock);
618 data->nums = hwmgr->num_ps;
620 for (i = 0; i < hwmgr->num_ps; i++) {
621 struct pp_power_state *state = (struct pp_power_state *)
622 ((unsigned long)hwmgr->ps + i * hwmgr->ps_size);
623 switch (state->classification.ui_label) {
624 case PP_StateUILabel_Battery:
625 data->states[i] = POWER_STATE_TYPE_BATTERY;
626 break;
627 case PP_StateUILabel_Balanced:
628 data->states[i] = POWER_STATE_TYPE_BALANCED;
629 break;
630 case PP_StateUILabel_Performance:
631 data->states[i] = POWER_STATE_TYPE_PERFORMANCE;
632 break;
633 default:
634 if (state->classification.flags & PP_StateClassificationFlag_Boot)
635 data->states[i] = POWER_STATE_TYPE_INTERNAL_BOOT;
636 else
637 data->states[i] = POWER_STATE_TYPE_DEFAULT;
640 mutex_unlock(&hwmgr->smu_lock);
641 return 0;
644 static int pp_dpm_get_pp_table(void *handle, char **table)
646 struct pp_hwmgr *hwmgr = handle;
647 int size = 0;
649 if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
650 return -EINVAL;
652 mutex_lock(&hwmgr->smu_lock);
653 *table = (char *)hwmgr->soft_pp_table;
654 size = hwmgr->soft_pp_table_size;
655 mutex_unlock(&hwmgr->smu_lock);
656 return size;
659 static int amd_powerplay_reset(void *handle)
661 struct pp_hwmgr *hwmgr = handle;
662 int ret;
664 ret = hwmgr_hw_fini(hwmgr);
665 if (ret)
666 return ret;
668 ret = hwmgr_hw_init(hwmgr);
669 if (ret)
670 return ret;
672 return hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL);
675 static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
677 struct pp_hwmgr *hwmgr = handle;
678 int ret = -ENOMEM;
680 if (!hwmgr || !hwmgr->pm_en)
681 return -EINVAL;
683 mutex_lock(&hwmgr->smu_lock);
684 if (!hwmgr->hardcode_pp_table) {
685 hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
686 hwmgr->soft_pp_table_size,
687 GFP_KERNEL);
688 if (!hwmgr->hardcode_pp_table)
689 goto err;
692 memcpy(hwmgr->hardcode_pp_table, buf, size);
694 hwmgr->soft_pp_table = hwmgr->hardcode_pp_table;
696 ret = amd_powerplay_reset(handle);
697 if (ret)
698 goto err;
700 if (hwmgr->hwmgr_func->avfs_control) {
701 ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
702 if (ret)
703 goto err;
705 mutex_unlock(&hwmgr->smu_lock);
706 return 0;
707 err:
708 mutex_unlock(&hwmgr->smu_lock);
709 return ret;
712 static int pp_dpm_force_clock_level(void *handle,
713 enum pp_clock_type type, uint32_t mask)
715 struct pp_hwmgr *hwmgr = handle;
716 int ret = 0;
718 if (!hwmgr || !hwmgr->pm_en)
719 return -EINVAL;
721 if (hwmgr->hwmgr_func->force_clock_level == NULL) {
722 pr_info_ratelimited("%s was not implemented.\n", __func__);
723 return 0;
726 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
727 pr_debug("force clock level is for dpm manual mode only.\n");
728 return -EINVAL;
731 mutex_lock(&hwmgr->smu_lock);
732 ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
733 mutex_unlock(&hwmgr->smu_lock);
734 return ret;
737 static int pp_dpm_print_clock_levels(void *handle,
738 enum pp_clock_type type, char *buf)
740 struct pp_hwmgr *hwmgr = handle;
741 int ret = 0;
743 if (!hwmgr || !hwmgr->pm_en)
744 return -EINVAL;
746 if (hwmgr->hwmgr_func->print_clock_levels == NULL) {
747 pr_info_ratelimited("%s was not implemented.\n", __func__);
748 return 0;
750 mutex_lock(&hwmgr->smu_lock);
751 ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
752 mutex_unlock(&hwmgr->smu_lock);
753 return ret;
756 static int pp_dpm_get_sclk_od(void *handle)
758 struct pp_hwmgr *hwmgr = handle;
759 int ret = 0;
761 if (!hwmgr || !hwmgr->pm_en)
762 return -EINVAL;
764 if (hwmgr->hwmgr_func->get_sclk_od == NULL) {
765 pr_info_ratelimited("%s was not implemented.\n", __func__);
766 return 0;
768 mutex_lock(&hwmgr->smu_lock);
769 ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
770 mutex_unlock(&hwmgr->smu_lock);
771 return ret;
774 static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
776 struct pp_hwmgr *hwmgr = handle;
777 int ret = 0;
779 if (!hwmgr || !hwmgr->pm_en)
780 return -EINVAL;
782 if (hwmgr->hwmgr_func->set_sclk_od == NULL) {
783 pr_info_ratelimited("%s was not implemented.\n", __func__);
784 return 0;
787 mutex_lock(&hwmgr->smu_lock);
788 ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
789 mutex_unlock(&hwmgr->smu_lock);
790 return ret;
793 static int pp_dpm_get_mclk_od(void *handle)
795 struct pp_hwmgr *hwmgr = handle;
796 int ret = 0;
798 if (!hwmgr || !hwmgr->pm_en)
799 return -EINVAL;
801 if (hwmgr->hwmgr_func->get_mclk_od == NULL) {
802 pr_info_ratelimited("%s was not implemented.\n", __func__);
803 return 0;
805 mutex_lock(&hwmgr->smu_lock);
806 ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
807 mutex_unlock(&hwmgr->smu_lock);
808 return ret;
811 static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
813 struct pp_hwmgr *hwmgr = handle;
814 int ret = 0;
816 if (!hwmgr || !hwmgr->pm_en)
817 return -EINVAL;
819 if (hwmgr->hwmgr_func->set_mclk_od == NULL) {
820 pr_info_ratelimited("%s was not implemented.\n", __func__);
821 return 0;
823 mutex_lock(&hwmgr->smu_lock);
824 ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
825 mutex_unlock(&hwmgr->smu_lock);
826 return ret;
829 static int pp_dpm_read_sensor(void *handle, int idx,
830 void *value, int *size)
832 struct pp_hwmgr *hwmgr = handle;
833 int ret = 0;
835 if (!hwmgr || !hwmgr->pm_en || !value)
836 return -EINVAL;
838 switch (idx) {
839 case AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK:
840 *((uint32_t *)value) = hwmgr->pstate_sclk;
841 return 0;
842 case AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK:
843 *((uint32_t *)value) = hwmgr->pstate_mclk;
844 return 0;
845 case AMDGPU_PP_SENSOR_MIN_FAN_RPM:
846 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMinRPM;
847 return 0;
848 case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
849 *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
850 return 0;
851 default:
852 mutex_lock(&hwmgr->smu_lock);
853 ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
854 mutex_unlock(&hwmgr->smu_lock);
855 return ret;
859 static struct amd_vce_state*
860 pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
862 struct pp_hwmgr *hwmgr = handle;
864 if (!hwmgr || !hwmgr->pm_en)
865 return NULL;
867 if (idx < hwmgr->num_vce_state_tables)
868 return &hwmgr->vce_states[idx];
869 return NULL;
872 static int pp_get_power_profile_mode(void *handle, char *buf)
874 struct pp_hwmgr *hwmgr = handle;
876 if (!hwmgr || !hwmgr->pm_en || !buf)
877 return -EINVAL;
879 if (hwmgr->hwmgr_func->get_power_profile_mode == NULL) {
880 pr_info_ratelimited("%s was not implemented.\n", __func__);
881 return snprintf(buf, PAGE_SIZE, "\n");
884 return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
887 static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
889 struct pp_hwmgr *hwmgr = handle;
890 int ret = -EINVAL;
892 if (!hwmgr || !hwmgr->pm_en)
893 return ret;
895 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
896 pr_info_ratelimited("%s was not implemented.\n", __func__);
897 return ret;
900 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
901 pr_debug("power profile setting is for manual dpm mode only.\n");
902 return ret;
905 mutex_lock(&hwmgr->smu_lock);
906 ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
907 mutex_unlock(&hwmgr->smu_lock);
908 return ret;
911 static int pp_odn_edit_dpm_table(void *handle, uint32_t type, long *input, uint32_t size)
913 struct pp_hwmgr *hwmgr = handle;
915 if (!hwmgr || !hwmgr->pm_en)
916 return -EINVAL;
918 if (hwmgr->hwmgr_func->odn_edit_dpm_table == NULL) {
919 pr_info_ratelimited("%s was not implemented.\n", __func__);
920 return -EINVAL;
923 return hwmgr->hwmgr_func->odn_edit_dpm_table(hwmgr, type, input, size);
926 static int pp_dpm_set_mp1_state(void *handle, enum pp_mp1_state mp1_state)
928 struct pp_hwmgr *hwmgr = handle;
930 if (!hwmgr)
931 return -EINVAL;
933 if (!hwmgr->pm_en)
934 return 0;
936 if (hwmgr->hwmgr_func->set_mp1_state)
937 return hwmgr->hwmgr_func->set_mp1_state(hwmgr, mp1_state);
939 return 0;
942 static int pp_dpm_switch_power_profile(void *handle,
943 enum PP_SMC_POWER_PROFILE type, bool en)
945 struct pp_hwmgr *hwmgr = handle;
946 long workload;
947 uint32_t index;
949 if (!hwmgr || !hwmgr->pm_en)
950 return -EINVAL;
952 if (hwmgr->hwmgr_func->set_power_profile_mode == NULL) {
953 pr_info_ratelimited("%s was not implemented.\n", __func__);
954 return -EINVAL;
957 if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
958 return -EINVAL;
960 mutex_lock(&hwmgr->smu_lock);
962 if (!en) {
963 hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
964 index = fls(hwmgr->workload_mask);
965 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
966 workload = hwmgr->workload_setting[index];
967 } else {
968 hwmgr->workload_mask |= (1 << hwmgr->workload_prority[type]);
969 index = fls(hwmgr->workload_mask);
970 index = index <= Workload_Policy_Max ? index - 1 : 0;
971 workload = hwmgr->workload_setting[index];
974 if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
975 hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
976 if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
977 mutex_unlock(&hwmgr->smu_lock);
978 return -EINVAL;
982 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
983 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
984 mutex_unlock(&hwmgr->smu_lock);
986 return 0;
989 static int pp_set_power_limit(void *handle, uint32_t limit)
991 struct pp_hwmgr *hwmgr = handle;
992 uint32_t max_power_limit;
994 if (!hwmgr || !hwmgr->pm_en)
995 return -EINVAL;
997 if (hwmgr->hwmgr_func->set_power_limit == NULL) {
998 pr_info_ratelimited("%s was not implemented.\n", __func__);
999 return -EINVAL;
1002 if (limit == 0)
1003 limit = hwmgr->default_power_limit;
1005 max_power_limit = hwmgr->default_power_limit;
1006 if (hwmgr->od_enabled) {
1007 max_power_limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1008 max_power_limit /= 100;
1011 if (limit > max_power_limit)
1012 return -EINVAL;
1014 mutex_lock(&hwmgr->smu_lock);
1015 hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
1016 hwmgr->power_limit = limit;
1017 mutex_unlock(&hwmgr->smu_lock);
1018 return 0;
1021 static int pp_get_power_limit(void *handle, uint32_t *limit, bool default_limit)
1023 struct pp_hwmgr *hwmgr = handle;
1025 if (!hwmgr || !hwmgr->pm_en ||!limit)
1026 return -EINVAL;
1028 mutex_lock(&hwmgr->smu_lock);
1030 if (default_limit) {
1031 *limit = hwmgr->default_power_limit;
1032 if (hwmgr->od_enabled) {
1033 *limit *= (100 + hwmgr->platform_descriptor.TDPODLimit);
1034 *limit /= 100;
1037 else
1038 *limit = hwmgr->power_limit;
1040 mutex_unlock(&hwmgr->smu_lock);
1042 return 0;
1045 static int pp_display_configuration_change(void *handle,
1046 const struct amd_pp_display_configuration *display_config)
1048 struct pp_hwmgr *hwmgr = handle;
1050 if (!hwmgr || !hwmgr->pm_en)
1051 return -EINVAL;
1053 mutex_lock(&hwmgr->smu_lock);
1054 phm_store_dal_configuration_data(hwmgr, display_config);
1055 mutex_unlock(&hwmgr->smu_lock);
1056 return 0;
1059 static int pp_get_display_power_level(void *handle,
1060 struct amd_pp_simple_clock_info *output)
1062 struct pp_hwmgr *hwmgr = handle;
1063 int ret = 0;
1065 if (!hwmgr || !hwmgr->pm_en ||!output)
1066 return -EINVAL;
1068 mutex_lock(&hwmgr->smu_lock);
1069 ret = phm_get_dal_power_level(hwmgr, output);
1070 mutex_unlock(&hwmgr->smu_lock);
1071 return ret;
1074 static int pp_get_current_clocks(void *handle,
1075 struct amd_pp_clock_info *clocks)
1077 struct amd_pp_simple_clock_info simple_clocks = { 0 };
1078 struct pp_clock_info hw_clocks;
1079 struct pp_hwmgr *hwmgr = handle;
1080 int ret = 0;
1082 if (!hwmgr || !hwmgr->pm_en)
1083 return -EINVAL;
1085 mutex_lock(&hwmgr->smu_lock);
1087 phm_get_dal_power_level(hwmgr, &simple_clocks);
1089 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
1090 PHM_PlatformCaps_PowerContainment))
1091 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1092 &hw_clocks, PHM_PerformanceLevelDesignation_PowerContainment);
1093 else
1094 ret = phm_get_clock_info(hwmgr, &hwmgr->current_ps->hardware,
1095 &hw_clocks, PHM_PerformanceLevelDesignation_Activity);
1097 if (ret) {
1098 pr_debug("Error in phm_get_clock_info \n");
1099 mutex_unlock(&hwmgr->smu_lock);
1100 return -EINVAL;
1103 clocks->min_engine_clock = hw_clocks.min_eng_clk;
1104 clocks->max_engine_clock = hw_clocks.max_eng_clk;
1105 clocks->min_memory_clock = hw_clocks.min_mem_clk;
1106 clocks->max_memory_clock = hw_clocks.max_mem_clk;
1107 clocks->min_bus_bandwidth = hw_clocks.min_bus_bandwidth;
1108 clocks->max_bus_bandwidth = hw_clocks.max_bus_bandwidth;
1110 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1111 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1113 if (simple_clocks.level == 0)
1114 clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
1115 else
1116 clocks->max_clocks_state = simple_clocks.level;
1118 if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
1119 clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
1120 clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
1122 mutex_unlock(&hwmgr->smu_lock);
1123 return 0;
1126 static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
1128 struct pp_hwmgr *hwmgr = handle;
1129 int ret = 0;
1131 if (!hwmgr || !hwmgr->pm_en)
1132 return -EINVAL;
1134 if (clocks == NULL)
1135 return -EINVAL;
1137 mutex_lock(&hwmgr->smu_lock);
1138 ret = phm_get_clock_by_type(hwmgr, type, clocks);
1139 mutex_unlock(&hwmgr->smu_lock);
1140 return ret;
1143 static int pp_get_clock_by_type_with_latency(void *handle,
1144 enum amd_pp_clock_type type,
1145 struct pp_clock_levels_with_latency *clocks)
1147 struct pp_hwmgr *hwmgr = handle;
1148 int ret = 0;
1150 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1151 return -EINVAL;
1153 mutex_lock(&hwmgr->smu_lock);
1154 ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
1155 mutex_unlock(&hwmgr->smu_lock);
1156 return ret;
1159 static int pp_get_clock_by_type_with_voltage(void *handle,
1160 enum amd_pp_clock_type type,
1161 struct pp_clock_levels_with_voltage *clocks)
1163 struct pp_hwmgr *hwmgr = handle;
1164 int ret = 0;
1166 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1167 return -EINVAL;
1169 mutex_lock(&hwmgr->smu_lock);
1171 ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
1173 mutex_unlock(&hwmgr->smu_lock);
1174 return ret;
1177 static int pp_set_watermarks_for_clocks_ranges(void *handle,
1178 void *clock_ranges)
1180 struct pp_hwmgr *hwmgr = handle;
1181 int ret = 0;
1183 if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
1184 return -EINVAL;
1186 mutex_lock(&hwmgr->smu_lock);
1187 ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
1188 clock_ranges);
1189 mutex_unlock(&hwmgr->smu_lock);
1191 return ret;
1194 static int pp_display_clock_voltage_request(void *handle,
1195 struct pp_display_clock_request *clock)
1197 struct pp_hwmgr *hwmgr = handle;
1198 int ret = 0;
1200 if (!hwmgr || !hwmgr->pm_en ||!clock)
1201 return -EINVAL;
1203 mutex_lock(&hwmgr->smu_lock);
1204 ret = phm_display_clock_voltage_request(hwmgr, clock);
1205 mutex_unlock(&hwmgr->smu_lock);
1207 return ret;
1210 static int pp_get_display_mode_validation_clocks(void *handle,
1211 struct amd_pp_simple_clock_info *clocks)
1213 struct pp_hwmgr *hwmgr = handle;
1214 int ret = 0;
1216 if (!hwmgr || !hwmgr->pm_en ||!clocks)
1217 return -EINVAL;
1219 clocks->level = PP_DAL_POWERLEVEL_7;
1221 mutex_lock(&hwmgr->smu_lock);
1223 if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
1224 ret = phm_get_max_high_clocks(hwmgr, clocks);
1226 mutex_unlock(&hwmgr->smu_lock);
1227 return ret;
1230 static int pp_dpm_powergate_mmhub(void *handle)
1232 struct pp_hwmgr *hwmgr = handle;
1234 if (!hwmgr || !hwmgr->pm_en)
1235 return -EINVAL;
1237 if (hwmgr->hwmgr_func->powergate_mmhub == NULL) {
1238 pr_info_ratelimited("%s was not implemented.\n", __func__);
1239 return 0;
1242 return hwmgr->hwmgr_func->powergate_mmhub(hwmgr);
1245 static int pp_dpm_powergate_gfx(void *handle, bool gate)
1247 struct pp_hwmgr *hwmgr = handle;
1249 if (!hwmgr || !hwmgr->pm_en)
1250 return 0;
1252 if (hwmgr->hwmgr_func->powergate_gfx == NULL) {
1253 pr_info_ratelimited("%s was not implemented.\n", __func__);
1254 return 0;
1257 return hwmgr->hwmgr_func->powergate_gfx(hwmgr, gate);
1260 static void pp_dpm_powergate_acp(void *handle, bool gate)
1262 struct pp_hwmgr *hwmgr = handle;
1264 if (!hwmgr || !hwmgr->pm_en)
1265 return;
1267 if (hwmgr->hwmgr_func->powergate_acp == NULL) {
1268 pr_info_ratelimited("%s was not implemented.\n", __func__);
1269 return;
1272 hwmgr->hwmgr_func->powergate_acp(hwmgr, gate);
1275 static void pp_dpm_powergate_sdma(void *handle, bool gate)
1277 struct pp_hwmgr *hwmgr = handle;
1279 if (!hwmgr)
1280 return;
1282 if (hwmgr->hwmgr_func->powergate_sdma == NULL) {
1283 pr_info_ratelimited("%s was not implemented.\n", __func__);
1284 return;
1287 hwmgr->hwmgr_func->powergate_sdma(hwmgr, gate);
1290 static int pp_set_powergating_by_smu(void *handle,
1291 uint32_t block_type, bool gate)
1293 int ret = 0;
1295 switch (block_type) {
1296 case AMD_IP_BLOCK_TYPE_UVD:
1297 case AMD_IP_BLOCK_TYPE_VCN:
1298 pp_dpm_powergate_uvd(handle, gate);
1299 break;
1300 case AMD_IP_BLOCK_TYPE_VCE:
1301 pp_dpm_powergate_vce(handle, gate);
1302 break;
1303 case AMD_IP_BLOCK_TYPE_GMC:
1304 pp_dpm_powergate_mmhub(handle);
1305 break;
1306 case AMD_IP_BLOCK_TYPE_GFX:
1307 ret = pp_dpm_powergate_gfx(handle, gate);
1308 break;
1309 case AMD_IP_BLOCK_TYPE_ACP:
1310 pp_dpm_powergate_acp(handle, gate);
1311 break;
1312 case AMD_IP_BLOCK_TYPE_SDMA:
1313 pp_dpm_powergate_sdma(handle, gate);
1314 break;
1315 default:
1316 break;
1318 return ret;
1321 static int pp_notify_smu_enable_pwe(void *handle)
1323 struct pp_hwmgr *hwmgr = handle;
1325 if (!hwmgr || !hwmgr->pm_en)
1326 return -EINVAL;
1328 if (hwmgr->hwmgr_func->smus_notify_pwe == NULL) {
1329 pr_info_ratelimited("%s was not implemented.\n", __func__);
1330 return -EINVAL;
1333 mutex_lock(&hwmgr->smu_lock);
1334 hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
1335 mutex_unlock(&hwmgr->smu_lock);
1337 return 0;
1340 static int pp_enable_mgpu_fan_boost(void *handle)
1342 struct pp_hwmgr *hwmgr = handle;
1344 if (!hwmgr)
1345 return -EINVAL;
1347 if (!hwmgr->pm_en ||
1348 hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
1349 return 0;
1351 mutex_lock(&hwmgr->smu_lock);
1352 hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
1353 mutex_unlock(&hwmgr->smu_lock);
1355 return 0;
1358 static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
1360 struct pp_hwmgr *hwmgr = handle;
1362 if (!hwmgr || !hwmgr->pm_en)
1363 return -EINVAL;
1365 if (hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk == NULL) {
1366 pr_debug("%s was not implemented.\n", __func__);
1367 return -EINVAL;
1370 mutex_lock(&hwmgr->smu_lock);
1371 hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
1372 mutex_unlock(&hwmgr->smu_lock);
1374 return 0;
1377 static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
1379 struct pp_hwmgr *hwmgr = handle;
1381 if (!hwmgr || !hwmgr->pm_en)
1382 return -EINVAL;
1384 if (hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq == NULL) {
1385 pr_debug("%s was not implemented.\n", __func__);
1386 return -EINVAL;
1389 mutex_lock(&hwmgr->smu_lock);
1390 hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
1391 mutex_unlock(&hwmgr->smu_lock);
1393 return 0;
1396 static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
1398 struct pp_hwmgr *hwmgr = handle;
1400 if (!hwmgr || !hwmgr->pm_en)
1401 return -EINVAL;
1403 if (hwmgr->hwmgr_func->set_hard_min_fclk_by_freq == NULL) {
1404 pr_debug("%s was not implemented.\n", __func__);
1405 return -EINVAL;
1408 mutex_lock(&hwmgr->smu_lock);
1409 hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
1410 mutex_unlock(&hwmgr->smu_lock);
1412 return 0;
1415 static int pp_set_active_display_count(void *handle, uint32_t count)
1417 struct pp_hwmgr *hwmgr = handle;
1418 int ret = 0;
1420 if (!hwmgr || !hwmgr->pm_en)
1421 return -EINVAL;
1423 mutex_lock(&hwmgr->smu_lock);
1424 ret = phm_set_active_display_count(hwmgr, count);
1425 mutex_unlock(&hwmgr->smu_lock);
1427 return ret;
1430 static int pp_get_asic_baco_capability(void *handle, bool *cap)
1432 struct pp_hwmgr *hwmgr = handle;
1434 *cap = false;
1435 if (!hwmgr)
1436 return -EINVAL;
1438 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
1439 return 0;
1441 mutex_lock(&hwmgr->smu_lock);
1442 hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
1443 mutex_unlock(&hwmgr->smu_lock);
1445 return 0;
1448 static int pp_get_asic_baco_state(void *handle, int *state)
1450 struct pp_hwmgr *hwmgr = handle;
1452 if (!hwmgr)
1453 return -EINVAL;
1455 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
1456 return 0;
1458 mutex_lock(&hwmgr->smu_lock);
1459 hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
1460 mutex_unlock(&hwmgr->smu_lock);
1462 return 0;
1465 static int pp_set_asic_baco_state(void *handle, int state)
1467 struct pp_hwmgr *hwmgr = handle;
1469 if (!hwmgr)
1470 return -EINVAL;
1472 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
1473 return 0;
1475 mutex_lock(&hwmgr->smu_lock);
1476 hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
1477 mutex_unlock(&hwmgr->smu_lock);
1479 return 0;
1482 static int pp_get_ppfeature_status(void *handle, char *buf)
1484 struct pp_hwmgr *hwmgr = handle;
1485 int ret = 0;
1487 if (!hwmgr || !hwmgr->pm_en || !buf)
1488 return -EINVAL;
1490 if (hwmgr->hwmgr_func->get_ppfeature_status == NULL) {
1491 pr_info_ratelimited("%s was not implemented.\n", __func__);
1492 return -EINVAL;
1495 mutex_lock(&hwmgr->smu_lock);
1496 ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
1497 mutex_unlock(&hwmgr->smu_lock);
1499 return ret;
1502 static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
1504 struct pp_hwmgr *hwmgr = handle;
1505 int ret = 0;
1507 if (!hwmgr || !hwmgr->pm_en)
1508 return -EINVAL;
1510 if (hwmgr->hwmgr_func->set_ppfeature_status == NULL) {
1511 pr_info_ratelimited("%s was not implemented.\n", __func__);
1512 return -EINVAL;
1515 mutex_lock(&hwmgr->smu_lock);
1516 ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
1517 mutex_unlock(&hwmgr->smu_lock);
1519 return ret;
1522 static int pp_asic_reset_mode_2(void *handle)
1524 struct pp_hwmgr *hwmgr = handle;
1525 int ret = 0;
1527 if (!hwmgr || !hwmgr->pm_en)
1528 return -EINVAL;
1530 if (hwmgr->hwmgr_func->asic_reset == NULL) {
1531 pr_info_ratelimited("%s was not implemented.\n", __func__);
1532 return -EINVAL;
1535 mutex_lock(&hwmgr->smu_lock);
1536 ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
1537 mutex_unlock(&hwmgr->smu_lock);
1539 return ret;
1542 static int pp_smu_i2c_bus_access(void *handle, bool acquire)
1544 struct pp_hwmgr *hwmgr = handle;
1545 int ret = 0;
1547 if (!hwmgr || !hwmgr->pm_en)
1548 return -EINVAL;
1550 if (hwmgr->hwmgr_func->smu_i2c_bus_access == NULL) {
1551 pr_info_ratelimited("%s was not implemented.\n", __func__);
1552 return -EINVAL;
1555 mutex_lock(&hwmgr->smu_lock);
1556 ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
1557 mutex_unlock(&hwmgr->smu_lock);
1559 return ret;
1562 static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
1564 struct pp_hwmgr *hwmgr = handle;
1566 if (!hwmgr)
1567 return -EINVAL;
1569 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
1570 return 0;
1572 mutex_lock(&hwmgr->smu_lock);
1573 hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
1574 mutex_unlock(&hwmgr->smu_lock);
1576 return 0;
1579 static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
1581 struct pp_hwmgr *hwmgr = handle;
1583 if (!hwmgr)
1584 return -EINVAL;
1586 if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
1587 return 0;
1589 mutex_lock(&hwmgr->smu_lock);
1590 hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
1591 mutex_unlock(&hwmgr->smu_lock);
1593 return 0;
1596 static const struct amd_pm_funcs pp_dpm_funcs = {
1597 .load_firmware = pp_dpm_load_fw,
1598 .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
1599 .force_performance_level = pp_dpm_force_performance_level,
1600 .get_performance_level = pp_dpm_get_performance_level,
1601 .get_current_power_state = pp_dpm_get_current_power_state,
1602 .dispatch_tasks = pp_dpm_dispatch_tasks,
1603 .set_fan_control_mode = pp_dpm_set_fan_control_mode,
1604 .get_fan_control_mode = pp_dpm_get_fan_control_mode,
1605 .set_fan_speed_percent = pp_dpm_set_fan_speed_percent,
1606 .get_fan_speed_percent = pp_dpm_get_fan_speed_percent,
1607 .get_fan_speed_rpm = pp_dpm_get_fan_speed_rpm,
1608 .set_fan_speed_rpm = pp_dpm_set_fan_speed_rpm,
1609 .get_pp_num_states = pp_dpm_get_pp_num_states,
1610 .get_pp_table = pp_dpm_get_pp_table,
1611 .set_pp_table = pp_dpm_set_pp_table,
1612 .force_clock_level = pp_dpm_force_clock_level,
1613 .print_clock_levels = pp_dpm_print_clock_levels,
1614 .get_sclk_od = pp_dpm_get_sclk_od,
1615 .set_sclk_od = pp_dpm_set_sclk_od,
1616 .get_mclk_od = pp_dpm_get_mclk_od,
1617 .set_mclk_od = pp_dpm_set_mclk_od,
1618 .read_sensor = pp_dpm_read_sensor,
1619 .get_vce_clock_state = pp_dpm_get_vce_clock_state,
1620 .switch_power_profile = pp_dpm_switch_power_profile,
1621 .set_clockgating_by_smu = pp_set_clockgating_by_smu,
1622 .set_powergating_by_smu = pp_set_powergating_by_smu,
1623 .get_power_profile_mode = pp_get_power_profile_mode,
1624 .set_power_profile_mode = pp_set_power_profile_mode,
1625 .odn_edit_dpm_table = pp_odn_edit_dpm_table,
1626 .set_mp1_state = pp_dpm_set_mp1_state,
1627 .set_power_limit = pp_set_power_limit,
1628 .get_power_limit = pp_get_power_limit,
1629 /* export to DC */
1630 .get_sclk = pp_dpm_get_sclk,
1631 .get_mclk = pp_dpm_get_mclk,
1632 .display_configuration_change = pp_display_configuration_change,
1633 .get_display_power_level = pp_get_display_power_level,
1634 .get_current_clocks = pp_get_current_clocks,
1635 .get_clock_by_type = pp_get_clock_by_type,
1636 .get_clock_by_type_with_latency = pp_get_clock_by_type_with_latency,
1637 .get_clock_by_type_with_voltage = pp_get_clock_by_type_with_voltage,
1638 .set_watermarks_for_clocks_ranges = pp_set_watermarks_for_clocks_ranges,
1639 .display_clock_voltage_request = pp_display_clock_voltage_request,
1640 .get_display_mode_validation_clocks = pp_get_display_mode_validation_clocks,
1641 .notify_smu_enable_pwe = pp_notify_smu_enable_pwe,
1642 .enable_mgpu_fan_boost = pp_enable_mgpu_fan_boost,
1643 .set_active_display_count = pp_set_active_display_count,
1644 .set_min_deep_sleep_dcefclk = pp_set_min_deep_sleep_dcefclk,
1645 .set_hard_min_dcefclk_by_freq = pp_set_hard_min_dcefclk_by_freq,
1646 .set_hard_min_fclk_by_freq = pp_set_hard_min_fclk_by_freq,
1647 .get_asic_baco_capability = pp_get_asic_baco_capability,
1648 .get_asic_baco_state = pp_get_asic_baco_state,
1649 .set_asic_baco_state = pp_set_asic_baco_state,
1650 .get_ppfeature_status = pp_get_ppfeature_status,
1651 .set_ppfeature_status = pp_set_ppfeature_status,
1652 .asic_reset_mode_2 = pp_asic_reset_mode_2,
1653 .smu_i2c_bus_access = pp_smu_i2c_bus_access,
1654 .set_df_cstate = pp_set_df_cstate,
1655 .set_xgmi_pstate = pp_set_xgmi_pstate,