Linux 4.19.133
[linux/fpc-iii.git] / drivers / gpu / drm / amd / powerplay / hwmgr / pp_psm.c
blob56437866d1206c163f36593e2764bfb6bfd96170
1 /*
2 * Copyright 2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/types.h>
25 #include <linux/kernel.h>
26 #include <linux/slab.h>
27 #include "pp_psm.h"
29 int psm_init_power_state_table(struct pp_hwmgr *hwmgr)
31 int result;
32 unsigned int i;
33 unsigned int table_entries;
34 struct pp_power_state *state;
35 int size;
37 if (hwmgr->hwmgr_func->get_num_of_pp_table_entries == NULL)
38 return 0;
40 if (hwmgr->hwmgr_func->get_power_state_size == NULL)
41 return 0;
43 hwmgr->num_ps = table_entries = hwmgr->hwmgr_func->get_num_of_pp_table_entries(hwmgr);
45 hwmgr->ps_size = size = hwmgr->hwmgr_func->get_power_state_size(hwmgr) +
46 sizeof(struct pp_power_state);
48 if (table_entries == 0 || size == 0) {
49 pr_warn("Please check whether power state management is supported on this asic\n");
50 return 0;
53 hwmgr->ps = kcalloc(table_entries, size, GFP_KERNEL);
54 if (hwmgr->ps == NULL)
55 return -ENOMEM;
57 hwmgr->request_ps = kzalloc(size, GFP_KERNEL);
58 if (hwmgr->request_ps == NULL) {
59 kfree(hwmgr->ps);
60 hwmgr->ps = NULL;
61 return -ENOMEM;
64 hwmgr->current_ps = kzalloc(size, GFP_KERNEL);
65 if (hwmgr->current_ps == NULL) {
66 kfree(hwmgr->request_ps);
67 kfree(hwmgr->ps);
68 hwmgr->request_ps = NULL;
69 hwmgr->ps = NULL;
70 return -ENOMEM;
73 state = hwmgr->ps;
75 for (i = 0; i < table_entries; i++) {
76 result = hwmgr->hwmgr_func->get_pp_table_entry(hwmgr, i, state);
78 if (state->classification.flags & PP_StateClassificationFlag_Boot) {
79 hwmgr->boot_ps = state;
80 memcpy(hwmgr->current_ps, state, size);
81 memcpy(hwmgr->request_ps, state, size);
84 state->id = i + 1; /* assigned unique num for every power state id */
86 if (state->classification.flags & PP_StateClassificationFlag_Uvd)
87 hwmgr->uvd_ps = state;
88 state = (struct pp_power_state *)((unsigned long)state + size);
91 return 0;
94 int psm_fini_power_state_table(struct pp_hwmgr *hwmgr)
96 if (hwmgr == NULL)
97 return -EINVAL;
99 if (!hwmgr->ps)
100 return 0;
102 kfree(hwmgr->current_ps);
103 kfree(hwmgr->request_ps);
104 kfree(hwmgr->ps);
105 hwmgr->request_ps = NULL;
106 hwmgr->ps = NULL;
107 hwmgr->current_ps = NULL;
108 return 0;
111 static int psm_get_ui_state(struct pp_hwmgr *hwmgr,
112 enum PP_StateUILabel ui_label,
113 unsigned long *state_id)
115 struct pp_power_state *state;
116 int table_entries;
117 int i;
119 table_entries = hwmgr->num_ps;
120 state = hwmgr->ps;
122 for (i = 0; i < table_entries; i++) {
123 if (state->classification.ui_label & ui_label) {
124 *state_id = state->id;
125 return 0;
127 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
129 return -EINVAL;
132 static int psm_get_state_by_classification(struct pp_hwmgr *hwmgr,
133 enum PP_StateClassificationFlag flag,
134 unsigned long *state_id)
136 struct pp_power_state *state;
137 int table_entries;
138 int i;
140 table_entries = hwmgr->num_ps;
141 state = hwmgr->ps;
143 for (i = 0; i < table_entries; i++) {
144 if (state->classification.flags & flag) {
145 *state_id = state->id;
146 return 0;
148 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
150 return -EINVAL;
153 static int psm_set_states(struct pp_hwmgr *hwmgr, unsigned long state_id)
155 struct pp_power_state *state;
156 int table_entries;
157 int i;
159 table_entries = hwmgr->num_ps;
161 state = hwmgr->ps;
163 for (i = 0; i < table_entries; i++) {
164 if (state->id == state_id) {
165 memcpy(hwmgr->request_ps, state, hwmgr->ps_size);
166 return 0;
168 state = (struct pp_power_state *)((unsigned long)state + hwmgr->ps_size);
170 return -EINVAL;
173 int psm_set_boot_states(struct pp_hwmgr *hwmgr)
175 unsigned long state_id;
176 int ret = -EINVAL;
178 if (!hwmgr->ps)
179 return 0;
181 if (!psm_get_state_by_classification(hwmgr, PP_StateClassificationFlag_Boot,
182 &state_id))
183 ret = psm_set_states(hwmgr, state_id);
185 return ret;
188 int psm_set_performance_states(struct pp_hwmgr *hwmgr)
190 unsigned long state_id;
191 int ret = -EINVAL;
193 if (!hwmgr->ps)
194 return 0;
196 if (!psm_get_ui_state(hwmgr, PP_StateUILabel_Performance,
197 &state_id))
198 ret = psm_set_states(hwmgr, state_id);
200 return ret;
203 int psm_set_user_performance_state(struct pp_hwmgr *hwmgr,
204 enum PP_StateUILabel label_id,
205 struct pp_power_state **state)
207 int table_entries;
208 int i;
210 if (!hwmgr->ps)
211 return 0;
213 table_entries = hwmgr->num_ps;
214 *state = hwmgr->ps;
216 restart_search:
217 for (i = 0; i < table_entries; i++) {
218 if ((*state)->classification.ui_label & label_id)
219 return 0;
220 *state = (struct pp_power_state *)((uintptr_t)*state + hwmgr->ps_size);
223 switch (label_id) {
224 case PP_StateUILabel_Battery:
225 case PP_StateUILabel_Balanced:
226 label_id = PP_StateUILabel_Performance;
227 goto restart_search;
228 default:
229 break;
231 return -EINVAL;
234 static void power_state_management(struct pp_hwmgr *hwmgr,
235 struct pp_power_state *new_ps)
237 struct pp_power_state *pcurrent;
238 struct pp_power_state *requested;
239 bool equal;
241 if (new_ps != NULL)
242 requested = new_ps;
243 else
244 requested = hwmgr->request_ps;
246 pcurrent = hwmgr->current_ps;
248 phm_apply_state_adjust_rules(hwmgr, requested, pcurrent);
249 if (pcurrent == NULL || (0 != phm_check_states_equal(hwmgr,
250 &pcurrent->hardware, &requested->hardware, &equal)))
251 equal = false;
253 if (!equal || phm_check_smc_update_required_for_display_configuration(hwmgr)) {
254 phm_set_power_state(hwmgr, &pcurrent->hardware, &requested->hardware);
255 memcpy(hwmgr->current_ps, hwmgr->request_ps, hwmgr->ps_size);
259 int psm_adjust_power_state_dynamic(struct pp_hwmgr *hwmgr, bool skip,
260 struct pp_power_state *new_ps)
262 uint32_t index;
263 long workload;
265 if (skip)
266 return 0;
268 phm_display_configuration_changed(hwmgr);
270 if (hwmgr->ps)
271 power_state_management(hwmgr, new_ps);
272 else
274 * for vega12/vega20 which does not support power state manager
275 * DAL clock limits should also be honoured
277 phm_apply_clock_adjust_rules(hwmgr);
279 phm_notify_smc_display_config_after_ps_adjustment(hwmgr);
281 if (!phm_force_dpm_levels(hwmgr, hwmgr->request_dpm_level))
282 hwmgr->dpm_level = hwmgr->request_dpm_level;
284 if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
285 index = fls(hwmgr->workload_mask);
286 index = index > 0 && index <= Workload_Policy_Max ? index - 1 : 0;
287 workload = hwmgr->workload_setting[index];
289 if (hwmgr->power_profile_mode != workload && hwmgr->hwmgr_func->set_power_profile_mode)
290 hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
293 return 0;