2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/errno.h>
26 #include "hardwaremanager.h"
27 #include "power_state.h"
30 #define TEMP_RANGE_MIN (0)
31 #define TEMP_RANGE_MAX (80 * 1000)
33 #define PHM_FUNC_CHECK(hw) \
35 if ((hw) == NULL || (hw)->hwmgr_func == NULL) \
39 int phm_setup_asic(struct pp_hwmgr
*hwmgr
)
41 PHM_FUNC_CHECK(hwmgr
);
43 if (NULL
!= hwmgr
->hwmgr_func
->asic_setup
)
44 return hwmgr
->hwmgr_func
->asic_setup(hwmgr
);
49 int phm_power_down_asic(struct pp_hwmgr
*hwmgr
)
51 PHM_FUNC_CHECK(hwmgr
);
53 if (NULL
!= hwmgr
->hwmgr_func
->power_off_asic
)
54 return hwmgr
->hwmgr_func
->power_off_asic(hwmgr
);
59 int phm_set_power_state(struct pp_hwmgr
*hwmgr
,
60 const struct pp_hw_power_state
*pcurrent_state
,
61 const struct pp_hw_power_state
*pnew_power_state
)
63 struct phm_set_power_state_input states
;
65 PHM_FUNC_CHECK(hwmgr
);
67 states
.pcurrent_state
= pcurrent_state
;
68 states
.pnew_state
= pnew_power_state
;
70 if (NULL
!= hwmgr
->hwmgr_func
->power_state_set
)
71 return hwmgr
->hwmgr_func
->power_state_set(hwmgr
, &states
);
76 int phm_enable_dynamic_state_management(struct pp_hwmgr
*hwmgr
)
78 struct amdgpu_device
*adev
= NULL
;
80 PHM_FUNC_CHECK(hwmgr
);
83 /* Skip for suspend/resume case */
84 if (!hwmgr
->pp_one_vf
&& smum_is_dpm_running(hwmgr
)
85 && !amdgpu_passthrough(adev
) && adev
->in_suspend
) {
86 pr_info("dpm has been enabled\n");
90 if (NULL
!= hwmgr
->hwmgr_func
->dynamic_state_management_enable
)
91 ret
= hwmgr
->hwmgr_func
->dynamic_state_management_enable(hwmgr
);
96 int phm_disable_dynamic_state_management(struct pp_hwmgr
*hwmgr
)
100 PHM_FUNC_CHECK(hwmgr
);
105 if (!smum_is_dpm_running(hwmgr
)) {
106 pr_info("dpm has been disabled\n");
110 if (hwmgr
->hwmgr_func
->dynamic_state_management_disable
)
111 ret
= hwmgr
->hwmgr_func
->dynamic_state_management_disable(hwmgr
);
116 int phm_force_dpm_levels(struct pp_hwmgr
*hwmgr
, enum amd_dpm_forced_level level
)
120 PHM_FUNC_CHECK(hwmgr
);
122 if (hwmgr
->hwmgr_func
->force_dpm_level
!= NULL
)
123 ret
= hwmgr
->hwmgr_func
->force_dpm_level(hwmgr
, level
);
128 int phm_apply_state_adjust_rules(struct pp_hwmgr
*hwmgr
,
129 struct pp_power_state
*adjusted_ps
,
130 const struct pp_power_state
*current_ps
)
132 PHM_FUNC_CHECK(hwmgr
);
134 if (hwmgr
->hwmgr_func
->apply_state_adjust_rules
!= NULL
)
135 return hwmgr
->hwmgr_func
->apply_state_adjust_rules(
142 int phm_apply_clock_adjust_rules(struct pp_hwmgr
*hwmgr
)
144 PHM_FUNC_CHECK(hwmgr
);
146 if (hwmgr
->hwmgr_func
->apply_clocks_adjust_rules
!= NULL
)
147 return hwmgr
->hwmgr_func
->apply_clocks_adjust_rules(hwmgr
);
151 int phm_powerdown_uvd(struct pp_hwmgr
*hwmgr
)
153 PHM_FUNC_CHECK(hwmgr
);
155 if (hwmgr
->hwmgr_func
->powerdown_uvd
!= NULL
)
156 return hwmgr
->hwmgr_func
->powerdown_uvd(hwmgr
);
161 int phm_disable_clock_power_gatings(struct pp_hwmgr
*hwmgr
)
163 PHM_FUNC_CHECK(hwmgr
);
165 if (NULL
!= hwmgr
->hwmgr_func
->disable_clock_power_gating
)
166 return hwmgr
->hwmgr_func
->disable_clock_power_gating(hwmgr
);
171 int phm_pre_display_configuration_changed(struct pp_hwmgr
*hwmgr
)
173 PHM_FUNC_CHECK(hwmgr
);
175 if (NULL
!= hwmgr
->hwmgr_func
->pre_display_config_changed
)
176 hwmgr
->hwmgr_func
->pre_display_config_changed(hwmgr
);
182 int phm_display_configuration_changed(struct pp_hwmgr
*hwmgr
)
184 PHM_FUNC_CHECK(hwmgr
);
186 if (NULL
!= hwmgr
->hwmgr_func
->display_config_changed
)
187 hwmgr
->hwmgr_func
->display_config_changed(hwmgr
);
192 int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr
*hwmgr
)
194 PHM_FUNC_CHECK(hwmgr
);
196 if (NULL
!= hwmgr
->hwmgr_func
->notify_smc_display_config_after_ps_adjustment
)
197 hwmgr
->hwmgr_func
->notify_smc_display_config_after_ps_adjustment(hwmgr
);
202 int phm_stop_thermal_controller(struct pp_hwmgr
*hwmgr
)
204 PHM_FUNC_CHECK(hwmgr
);
209 if (hwmgr
->hwmgr_func
->stop_thermal_controller
== NULL
)
212 return hwmgr
->hwmgr_func
->stop_thermal_controller(hwmgr
);
215 int phm_register_irq_handlers(struct pp_hwmgr
*hwmgr
)
217 PHM_FUNC_CHECK(hwmgr
);
219 if (hwmgr
->hwmgr_func
->register_irq_handlers
!= NULL
)
220 return hwmgr
->hwmgr_func
->register_irq_handlers(hwmgr
);
226 * Initializes the thermal controller subsystem.
228 * @param pHwMgr the address of the powerplay hardware manager.
229 * @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the dispatcher.
231 int phm_start_thermal_controller(struct pp_hwmgr
*hwmgr
)
234 struct PP_TemperatureRange range
= {
244 struct amdgpu_device
*adev
= hwmgr
->adev
;
249 if (hwmgr
->hwmgr_func
->get_thermal_temperature_range
)
250 hwmgr
->hwmgr_func
->get_thermal_temperature_range(
253 if (phm_cap_enabled(hwmgr
->platform_descriptor
.platformCaps
,
254 PHM_PlatformCaps_ThermalController
)
255 && hwmgr
->hwmgr_func
->start_thermal_controller
!= NULL
)
256 ret
= hwmgr
->hwmgr_func
->start_thermal_controller(hwmgr
, &range
);
258 adev
->pm
.dpm
.thermal
.min_temp
= range
.min
;
259 adev
->pm
.dpm
.thermal
.max_temp
= range
.max
;
260 adev
->pm
.dpm
.thermal
.max_edge_emergency_temp
= range
.edge_emergency_max
;
261 adev
->pm
.dpm
.thermal
.min_hotspot_temp
= range
.hotspot_min
;
262 adev
->pm
.dpm
.thermal
.max_hotspot_crit_temp
= range
.hotspot_crit_max
;
263 adev
->pm
.dpm
.thermal
.max_hotspot_emergency_temp
= range
.hotspot_emergency_max
;
264 adev
->pm
.dpm
.thermal
.min_mem_temp
= range
.mem_min
;
265 adev
->pm
.dpm
.thermal
.max_mem_crit_temp
= range
.mem_crit_max
;
266 adev
->pm
.dpm
.thermal
.max_mem_emergency_temp
= range
.mem_emergency_max
;
272 bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr
*hwmgr
)
274 PHM_FUNC_CHECK(hwmgr
);
275 if (hwmgr
->pp_one_vf
)
278 if (hwmgr
->hwmgr_func
->check_smc_update_required_for_display_configuration
== NULL
)
281 return hwmgr
->hwmgr_func
->check_smc_update_required_for_display_configuration(hwmgr
);
285 int phm_check_states_equal(struct pp_hwmgr
*hwmgr
,
286 const struct pp_hw_power_state
*pstate1
,
287 const struct pp_hw_power_state
*pstate2
,
290 PHM_FUNC_CHECK(hwmgr
);
292 if (hwmgr
->hwmgr_func
->check_states_equal
== NULL
)
295 return hwmgr
->hwmgr_func
->check_states_equal(hwmgr
, pstate1
, pstate2
, equal
);
298 int phm_store_dal_configuration_data(struct pp_hwmgr
*hwmgr
,
299 const struct amd_pp_display_configuration
*display_config
)
302 int number_of_active_display
= 0;
304 PHM_FUNC_CHECK(hwmgr
);
306 if (display_config
== NULL
)
309 if (NULL
!= hwmgr
->hwmgr_func
->set_min_deep_sleep_dcefclk
)
310 hwmgr
->hwmgr_func
->set_min_deep_sleep_dcefclk(hwmgr
, display_config
->min_dcef_deep_sleep_set_clk
);
312 for (index
= 0; index
< display_config
->num_path_including_non_display
; index
++) {
313 if (display_config
->displays
[index
].controller_id
!= 0)
314 number_of_active_display
++;
317 if (NULL
!= hwmgr
->hwmgr_func
->set_active_display_count
)
318 hwmgr
->hwmgr_func
->set_active_display_count(hwmgr
, number_of_active_display
);
320 if (hwmgr
->hwmgr_func
->store_cc6_data
== NULL
)
323 /* TODO: pass other display configuration in the future */
325 if (hwmgr
->hwmgr_func
->store_cc6_data
)
326 hwmgr
->hwmgr_func
->store_cc6_data(hwmgr
,
327 display_config
->cpu_pstate_separation_time
,
328 display_config
->cpu_cc6_disable
,
329 display_config
->cpu_pstate_disable
,
330 display_config
->nb_pstate_switch_disable
);
335 int phm_get_dal_power_level(struct pp_hwmgr
*hwmgr
,
336 struct amd_pp_simple_clock_info
*info
)
338 PHM_FUNC_CHECK(hwmgr
);
340 if (info
== NULL
|| hwmgr
->hwmgr_func
->get_dal_power_level
== NULL
)
342 return hwmgr
->hwmgr_func
->get_dal_power_level(hwmgr
, info
);
345 int phm_set_cpu_power_state(struct pp_hwmgr
*hwmgr
)
347 PHM_FUNC_CHECK(hwmgr
);
349 if (hwmgr
->hwmgr_func
->set_cpu_power_state
!= NULL
)
350 return hwmgr
->hwmgr_func
->set_cpu_power_state(hwmgr
);
356 int phm_get_performance_level(struct pp_hwmgr
*hwmgr
, const struct pp_hw_power_state
*state
,
357 PHM_PerformanceLevelDesignation designation
, uint32_t index
,
358 PHM_PerformanceLevel
*level
)
360 PHM_FUNC_CHECK(hwmgr
);
361 if (hwmgr
->hwmgr_func
->get_performance_level
== NULL
)
364 return hwmgr
->hwmgr_func
->get_performance_level(hwmgr
, state
, designation
, index
, level
);
373 * @param pHwMgr the address of the powerplay hardware manager.
374 * @param pPowerState the address of the Power State structure.
375 * @param pClockInfo the address of PP_ClockInfo structure where the result will be returned.
376 * @exception PP_Result_Failed if any of the paramters is NULL, otherwise the return value from the back-end.
378 int phm_get_clock_info(struct pp_hwmgr
*hwmgr
, const struct pp_hw_power_state
*state
, struct pp_clock_info
*pclock_info
,
379 PHM_PerformanceLevelDesignation designation
)
382 PHM_PerformanceLevel performance_level
= {0};
384 PHM_FUNC_CHECK(hwmgr
);
386 PP_ASSERT_WITH_CODE((NULL
!= state
), "Invalid Input!", return -EINVAL
);
387 PP_ASSERT_WITH_CODE((NULL
!= pclock_info
), "Invalid Input!", return -EINVAL
);
389 result
= phm_get_performance_level(hwmgr
, state
, PHM_PerformanceLevelDesignation_Activity
, 0, &performance_level
);
391 PP_ASSERT_WITH_CODE((0 == result
), "Failed to retrieve minimum clocks.", return result
);
394 pclock_info
->min_mem_clk
= performance_level
.memory_clock
;
395 pclock_info
->min_eng_clk
= performance_level
.coreClock
;
396 pclock_info
->min_bus_bandwidth
= performance_level
.nonLocalMemoryFreq
* performance_level
.nonLocalMemoryWidth
;
399 result
= phm_get_performance_level(hwmgr
, state
, designation
,
400 (hwmgr
->platform_descriptor
.hardwareActivityPerformanceLevels
- 1), &performance_level
);
402 PP_ASSERT_WITH_CODE((0 == result
), "Failed to retrieve maximum clocks.", return result
);
404 pclock_info
->max_mem_clk
= performance_level
.memory_clock
;
405 pclock_info
->max_eng_clk
= performance_level
.coreClock
;
406 pclock_info
->max_bus_bandwidth
= performance_level
.nonLocalMemoryFreq
* performance_level
.nonLocalMemoryWidth
;
411 int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr
*hwmgr
, const struct pp_hw_power_state
*state
, struct pp_clock_info
*clock_info
)
413 PHM_FUNC_CHECK(hwmgr
);
415 if (hwmgr
->hwmgr_func
->get_current_shallow_sleep_clocks
== NULL
)
418 return hwmgr
->hwmgr_func
->get_current_shallow_sleep_clocks(hwmgr
, state
, clock_info
);
422 int phm_get_clock_by_type(struct pp_hwmgr
*hwmgr
, enum amd_pp_clock_type type
, struct amd_pp_clocks
*clocks
)
424 PHM_FUNC_CHECK(hwmgr
);
426 if (hwmgr
->hwmgr_func
->get_clock_by_type
== NULL
)
429 return hwmgr
->hwmgr_func
->get_clock_by_type(hwmgr
, type
, clocks
);
433 int phm_get_clock_by_type_with_latency(struct pp_hwmgr
*hwmgr
,
434 enum amd_pp_clock_type type
,
435 struct pp_clock_levels_with_latency
*clocks
)
437 PHM_FUNC_CHECK(hwmgr
);
439 if (hwmgr
->hwmgr_func
->get_clock_by_type_with_latency
== NULL
)
442 return hwmgr
->hwmgr_func
->get_clock_by_type_with_latency(hwmgr
, type
, clocks
);
446 int phm_get_clock_by_type_with_voltage(struct pp_hwmgr
*hwmgr
,
447 enum amd_pp_clock_type type
,
448 struct pp_clock_levels_with_voltage
*clocks
)
450 PHM_FUNC_CHECK(hwmgr
);
452 if (hwmgr
->hwmgr_func
->get_clock_by_type_with_voltage
== NULL
)
455 return hwmgr
->hwmgr_func
->get_clock_by_type_with_voltage(hwmgr
, type
, clocks
);
459 int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr
*hwmgr
,
462 PHM_FUNC_CHECK(hwmgr
);
464 if (!hwmgr
->hwmgr_func
->set_watermarks_for_clocks_ranges
)
467 return hwmgr
->hwmgr_func
->set_watermarks_for_clocks_ranges(hwmgr
,
471 int phm_display_clock_voltage_request(struct pp_hwmgr
*hwmgr
,
472 struct pp_display_clock_request
*clock
)
474 PHM_FUNC_CHECK(hwmgr
);
476 if (!hwmgr
->hwmgr_func
->display_clock_voltage_request
)
479 return hwmgr
->hwmgr_func
->display_clock_voltage_request(hwmgr
, clock
);
482 int phm_get_max_high_clocks(struct pp_hwmgr
*hwmgr
, struct amd_pp_simple_clock_info
*clocks
)
484 PHM_FUNC_CHECK(hwmgr
);
486 if (hwmgr
->hwmgr_func
->get_max_high_clocks
== NULL
)
489 return hwmgr
->hwmgr_func
->get_max_high_clocks(hwmgr
, clocks
);
492 int phm_disable_smc_firmware_ctf(struct pp_hwmgr
*hwmgr
)
494 PHM_FUNC_CHECK(hwmgr
);
499 if (hwmgr
->hwmgr_func
->disable_smc_firmware_ctf
== NULL
)
502 return hwmgr
->hwmgr_func
->disable_smc_firmware_ctf(hwmgr
);
505 int phm_set_active_display_count(struct pp_hwmgr
*hwmgr
, uint32_t count
)
507 PHM_FUNC_CHECK(hwmgr
);
509 if (!hwmgr
->hwmgr_func
->set_active_display_count
)
512 return hwmgr
->hwmgr_func
->set_active_display_count(hwmgr
, count
);
515 int phm_set_min_deep_sleep_dcefclk(struct pp_hwmgr
*hwmgr
, uint32_t clock
)
517 PHM_FUNC_CHECK(hwmgr
);
519 if (!hwmgr
->hwmgr_func
->set_min_deep_sleep_dcefclk
)
522 return hwmgr
->hwmgr_func
->set_min_deep_sleep_dcefclk(hwmgr
, clock
);
525 int phm_set_hard_min_dcefclk_by_freq(struct pp_hwmgr
*hwmgr
, uint32_t clock
)
527 PHM_FUNC_CHECK(hwmgr
);
529 if (!hwmgr
->hwmgr_func
->set_hard_min_dcefclk_by_freq
)
532 return hwmgr
->hwmgr_func
->set_hard_min_dcefclk_by_freq(hwmgr
, clock
);
535 int phm_set_hard_min_fclk_by_freq(struct pp_hwmgr
*hwmgr
, uint32_t clock
)
537 PHM_FUNC_CHECK(hwmgr
);
539 if (!hwmgr
->hwmgr_func
->set_hard_min_fclk_by_freq
)
542 return hwmgr
->hwmgr_func
->set_hard_min_fclk_by_freq(hwmgr
, clock
);