dt-bindings: mtd: ingenic: Use standard ecc-engine property
[linux/fpc-iii.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_dpm.c
blob344967df31379295236a279305a5fe88d329ae4d
1 /*
2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
25 #include <drm/drmP.h>
26 #include "amdgpu.h"
27 #include "amdgpu_atombios.h"
28 #include "amdgpu_i2c.h"
29 #include "amdgpu_dpm.h"
30 #include "atom.h"
31 #include "amd_pcie.h"
33 void amdgpu_dpm_print_class_info(u32 class, u32 class2)
35 const char *s;
37 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
38 case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
39 default:
40 s = "none";
41 break;
42 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
43 s = "battery";
44 break;
45 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
46 s = "balanced";
47 break;
48 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
49 s = "performance";
50 break;
52 printk("\tui class: %s\n", s);
53 printk("\tinternal class:");
54 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
55 (class2 == 0))
56 pr_cont(" none");
57 else {
58 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
59 pr_cont(" boot");
60 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
61 pr_cont(" thermal");
62 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
63 pr_cont(" limited_pwr");
64 if (class & ATOM_PPLIB_CLASSIFICATION_REST)
65 pr_cont(" rest");
66 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
67 pr_cont(" forced");
68 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
69 pr_cont(" 3d_perf");
70 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
71 pr_cont(" ovrdrv");
72 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
73 pr_cont(" uvd");
74 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
75 pr_cont(" 3d_low");
76 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
77 pr_cont(" acpi");
78 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
79 pr_cont(" uvd_hd2");
80 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
81 pr_cont(" uvd_hd");
82 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
83 pr_cont(" uvd_sd");
84 if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
85 pr_cont(" limited_pwr2");
86 if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
87 pr_cont(" ulv");
88 if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
89 pr_cont(" uvd_mvc");
91 pr_cont("\n");
94 void amdgpu_dpm_print_cap_info(u32 caps)
96 printk("\tcaps:");
97 if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
98 pr_cont(" single_disp");
99 if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
100 pr_cont(" video");
101 if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
102 pr_cont(" no_dc");
103 pr_cont("\n");
106 void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
107 struct amdgpu_ps *rps)
109 printk("\tstatus:");
110 if (rps == adev->pm.dpm.current_ps)
111 pr_cont(" c");
112 if (rps == adev->pm.dpm.requested_ps)
113 pr_cont(" r");
114 if (rps == adev->pm.dpm.boot_ps)
115 pr_cont(" b");
116 pr_cont("\n");
119 void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
121 struct drm_device *ddev = adev->ddev;
122 struct drm_crtc *crtc;
123 struct amdgpu_crtc *amdgpu_crtc;
125 adev->pm.dpm.new_active_crtcs = 0;
126 adev->pm.dpm.new_active_crtc_count = 0;
127 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
128 list_for_each_entry(crtc,
129 &ddev->mode_config.crtc_list, head) {
130 amdgpu_crtc = to_amdgpu_crtc(crtc);
131 if (amdgpu_crtc->enabled) {
132 adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
133 adev->pm.dpm.new_active_crtc_count++;
140 u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
142 struct drm_device *dev = adev->ddev;
143 struct drm_crtc *crtc;
144 struct amdgpu_crtc *amdgpu_crtc;
145 u32 vblank_in_pixels;
146 u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
148 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
149 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
150 amdgpu_crtc = to_amdgpu_crtc(crtc);
151 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
152 vblank_in_pixels =
153 amdgpu_crtc->hw_mode.crtc_htotal *
154 (amdgpu_crtc->hw_mode.crtc_vblank_end -
155 amdgpu_crtc->hw_mode.crtc_vdisplay +
156 (amdgpu_crtc->v_border * 2));
158 vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
159 break;
164 return vblank_time_us;
167 u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
169 struct drm_device *dev = adev->ddev;
170 struct drm_crtc *crtc;
171 struct amdgpu_crtc *amdgpu_crtc;
172 u32 vrefresh = 0;
174 if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
175 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
176 amdgpu_crtc = to_amdgpu_crtc(crtc);
177 if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
178 vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
179 break;
184 return vrefresh;
187 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
189 switch (sensor) {
190 case THERMAL_TYPE_RV6XX:
191 case THERMAL_TYPE_RV770:
192 case THERMAL_TYPE_EVERGREEN:
193 case THERMAL_TYPE_SUMO:
194 case THERMAL_TYPE_NI:
195 case THERMAL_TYPE_SI:
196 case THERMAL_TYPE_CI:
197 case THERMAL_TYPE_KV:
198 return true;
199 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
200 case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
201 return false; /* need special handling */
202 case THERMAL_TYPE_NONE:
203 case THERMAL_TYPE_EXTERNAL:
204 case THERMAL_TYPE_EXTERNAL_GPIO:
205 default:
206 return false;
210 union power_info {
211 struct _ATOM_POWERPLAY_INFO info;
212 struct _ATOM_POWERPLAY_INFO_V2 info_2;
213 struct _ATOM_POWERPLAY_INFO_V3 info_3;
214 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
215 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
216 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
217 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
218 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
221 union fan_info {
222 struct _ATOM_PPLIB_FANTABLE fan;
223 struct _ATOM_PPLIB_FANTABLE2 fan2;
224 struct _ATOM_PPLIB_FANTABLE3 fan3;
227 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
228 ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
230 u32 size = atom_table->ucNumEntries *
231 sizeof(struct amdgpu_clock_voltage_dependency_entry);
232 int i;
233 ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
235 amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
236 if (!amdgpu_table->entries)
237 return -ENOMEM;
239 entry = &atom_table->entries[0];
240 for (i = 0; i < atom_table->ucNumEntries; i++) {
241 amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
242 (entry->ucClockHigh << 16);
243 amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
244 entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
245 ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
247 amdgpu_table->count = atom_table->ucNumEntries;
249 return 0;
252 int amdgpu_get_platform_caps(struct amdgpu_device *adev)
254 struct amdgpu_mode_info *mode_info = &adev->mode_info;
255 union power_info *power_info;
256 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
257 u16 data_offset;
258 u8 frev, crev;
260 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
261 &frev, &crev, &data_offset))
262 return -EINVAL;
263 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
265 adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
266 adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
267 adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
269 return 0;
272 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
273 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
274 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
275 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
276 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
277 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
278 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
279 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
280 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
282 int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
284 struct amdgpu_mode_info *mode_info = &adev->mode_info;
285 union power_info *power_info;
286 union fan_info *fan_info;
287 ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
288 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
289 u16 data_offset;
290 u8 frev, crev;
291 int ret, i;
293 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
294 &frev, &crev, &data_offset))
295 return -EINVAL;
296 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
298 /* fan table */
299 if (le16_to_cpu(power_info->pplib.usTableSize) >=
300 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
301 if (power_info->pplib3.usFanTableOffset) {
302 fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
303 le16_to_cpu(power_info->pplib3.usFanTableOffset));
304 adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
305 adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
306 adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
307 adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
308 adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
309 adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
310 adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
311 if (fan_info->fan.ucFanTableFormat >= 2)
312 adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
313 else
314 adev->pm.dpm.fan.t_max = 10900;
315 adev->pm.dpm.fan.cycle_delay = 100000;
316 if (fan_info->fan.ucFanTableFormat >= 3) {
317 adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
318 adev->pm.dpm.fan.default_max_fan_pwm =
319 le16_to_cpu(fan_info->fan3.usFanPWMMax);
320 adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
321 adev->pm.dpm.fan.fan_output_sensitivity =
322 le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
324 adev->pm.dpm.fan.ucode_fan_control = true;
328 /* clock dependancy tables, shedding tables */
329 if (le16_to_cpu(power_info->pplib.usTableSize) >=
330 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
331 if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
332 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
333 (mode_info->atom_context->bios + data_offset +
334 le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
335 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
336 dep_table);
337 if (ret) {
338 amdgpu_free_extended_power_table(adev);
339 return ret;
342 if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
343 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
344 (mode_info->atom_context->bios + data_offset +
345 le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
346 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
347 dep_table);
348 if (ret) {
349 amdgpu_free_extended_power_table(adev);
350 return ret;
353 if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
354 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
355 (mode_info->atom_context->bios + data_offset +
356 le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
357 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
358 dep_table);
359 if (ret) {
360 amdgpu_free_extended_power_table(adev);
361 return ret;
364 if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
365 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
366 (mode_info->atom_context->bios + data_offset +
367 le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
368 ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
369 dep_table);
370 if (ret) {
371 amdgpu_free_extended_power_table(adev);
372 return ret;
375 if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
376 ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
377 (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
378 (mode_info->atom_context->bios + data_offset +
379 le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
380 if (clk_v->ucNumEntries) {
381 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
382 le16_to_cpu(clk_v->entries[0].usSclkLow) |
383 (clk_v->entries[0].ucSclkHigh << 16);
384 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
385 le16_to_cpu(clk_v->entries[0].usMclkLow) |
386 (clk_v->entries[0].ucMclkHigh << 16);
387 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
388 le16_to_cpu(clk_v->entries[0].usVddc);
389 adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
390 le16_to_cpu(clk_v->entries[0].usVddci);
393 if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
394 ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
395 (ATOM_PPLIB_PhaseSheddingLimits_Table *)
396 (mode_info->atom_context->bios + data_offset +
397 le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
398 ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
400 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
401 kcalloc(psl->ucNumEntries,
402 sizeof(struct amdgpu_phase_shedding_limits_entry),
403 GFP_KERNEL);
404 if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
405 amdgpu_free_extended_power_table(adev);
406 return -ENOMEM;
409 entry = &psl->entries[0];
410 for (i = 0; i < psl->ucNumEntries; i++) {
411 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
412 le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
413 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
414 le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
415 adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
416 le16_to_cpu(entry->usVoltage);
417 entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
418 ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
420 adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
421 psl->ucNumEntries;
425 /* cac data */
426 if (le16_to_cpu(power_info->pplib.usTableSize) >=
427 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
428 adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
429 adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
430 adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
431 adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
432 if (adev->pm.dpm.tdp_od_limit)
433 adev->pm.dpm.power_control = true;
434 else
435 adev->pm.dpm.power_control = false;
436 adev->pm.dpm.tdp_adjustment = 0;
437 adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
438 adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
439 adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
440 if (power_info->pplib5.usCACLeakageTableOffset) {
441 ATOM_PPLIB_CAC_Leakage_Table *cac_table =
442 (ATOM_PPLIB_CAC_Leakage_Table *)
443 (mode_info->atom_context->bios + data_offset +
444 le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
445 ATOM_PPLIB_CAC_Leakage_Record *entry;
446 u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
447 adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
448 if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
449 amdgpu_free_extended_power_table(adev);
450 return -ENOMEM;
452 entry = &cac_table->entries[0];
453 for (i = 0; i < cac_table->ucNumEntries; i++) {
454 if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
455 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
456 le16_to_cpu(entry->usVddc1);
457 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
458 le16_to_cpu(entry->usVddc2);
459 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
460 le16_to_cpu(entry->usVddc3);
461 } else {
462 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
463 le16_to_cpu(entry->usVddc);
464 adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
465 le32_to_cpu(entry->ulLeakageValue);
467 entry = (ATOM_PPLIB_CAC_Leakage_Record *)
468 ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
470 adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
474 /* ext tables */
475 if (le16_to_cpu(power_info->pplib.usTableSize) >=
476 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
477 ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
478 (mode_info->atom_context->bios + data_offset +
479 le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
480 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
481 ext_hdr->usVCETableOffset) {
482 VCEClockInfoArray *array = (VCEClockInfoArray *)
483 (mode_info->atom_context->bios + data_offset +
484 le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
485 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
486 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
487 (mode_info->atom_context->bios + data_offset +
488 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
489 1 + array->ucNumEntries * sizeof(VCEClockInfo));
490 ATOM_PPLIB_VCE_State_Table *states =
491 (ATOM_PPLIB_VCE_State_Table *)
492 (mode_info->atom_context->bios + data_offset +
493 le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
494 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
495 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
496 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
497 ATOM_PPLIB_VCE_State_Record *state_entry;
498 VCEClockInfo *vce_clk;
499 u32 size = limits->numEntries *
500 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
501 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
502 kzalloc(size, GFP_KERNEL);
503 if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
504 amdgpu_free_extended_power_table(adev);
505 return -ENOMEM;
507 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
508 limits->numEntries;
509 entry = &limits->entries[0];
510 state_entry = &states->entries[0];
511 for (i = 0; i < limits->numEntries; i++) {
512 vce_clk = (VCEClockInfo *)
513 ((u8 *)&array->entries[0] +
514 (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
515 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
516 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
517 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
518 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
519 adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
520 le16_to_cpu(entry->usVoltage);
521 entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
522 ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
524 adev->pm.dpm.num_of_vce_states =
525 states->numEntries > AMD_MAX_VCE_LEVELS ?
526 AMD_MAX_VCE_LEVELS : states->numEntries;
527 for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
528 vce_clk = (VCEClockInfo *)
529 ((u8 *)&array->entries[0] +
530 (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
531 adev->pm.dpm.vce_states[i].evclk =
532 le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
533 adev->pm.dpm.vce_states[i].ecclk =
534 le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
535 adev->pm.dpm.vce_states[i].clk_idx =
536 state_entry->ucClockInfoIndex & 0x3f;
537 adev->pm.dpm.vce_states[i].pstate =
538 (state_entry->ucClockInfoIndex & 0xc0) >> 6;
539 state_entry = (ATOM_PPLIB_VCE_State_Record *)
540 ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
543 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
544 ext_hdr->usUVDTableOffset) {
545 UVDClockInfoArray *array = (UVDClockInfoArray *)
546 (mode_info->atom_context->bios + data_offset +
547 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
548 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
549 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
550 (mode_info->atom_context->bios + data_offset +
551 le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
552 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
553 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
554 u32 size = limits->numEntries *
555 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
556 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
557 kzalloc(size, GFP_KERNEL);
558 if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
559 amdgpu_free_extended_power_table(adev);
560 return -ENOMEM;
562 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
563 limits->numEntries;
564 entry = &limits->entries[0];
565 for (i = 0; i < limits->numEntries; i++) {
566 UVDClockInfo *uvd_clk = (UVDClockInfo *)
567 ((u8 *)&array->entries[0] +
568 (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
569 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
570 le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
571 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
572 le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
573 adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
574 le16_to_cpu(entry->usVoltage);
575 entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
576 ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
579 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
580 ext_hdr->usSAMUTableOffset) {
581 ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
582 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
583 (mode_info->atom_context->bios + data_offset +
584 le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
585 ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
586 u32 size = limits->numEntries *
587 sizeof(struct amdgpu_clock_voltage_dependency_entry);
588 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
589 kzalloc(size, GFP_KERNEL);
590 if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
591 amdgpu_free_extended_power_table(adev);
592 return -ENOMEM;
594 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
595 limits->numEntries;
596 entry = &limits->entries[0];
597 for (i = 0; i < limits->numEntries; i++) {
598 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
599 le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
600 adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
601 le16_to_cpu(entry->usVoltage);
602 entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
603 ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
606 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
607 ext_hdr->usPPMTableOffset) {
608 ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
609 (mode_info->atom_context->bios + data_offset +
610 le16_to_cpu(ext_hdr->usPPMTableOffset));
611 adev->pm.dpm.dyn_state.ppm_table =
612 kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
613 if (!adev->pm.dpm.dyn_state.ppm_table) {
614 amdgpu_free_extended_power_table(adev);
615 return -ENOMEM;
617 adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
618 adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
619 le16_to_cpu(ppm->usCpuCoreNumber);
620 adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
621 le32_to_cpu(ppm->ulPlatformTDP);
622 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
623 le32_to_cpu(ppm->ulSmallACPlatformTDP);
624 adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
625 le32_to_cpu(ppm->ulPlatformTDC);
626 adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
627 le32_to_cpu(ppm->ulSmallACPlatformTDC);
628 adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
629 le32_to_cpu(ppm->ulApuTDP);
630 adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
631 le32_to_cpu(ppm->ulDGpuTDP);
632 adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
633 le32_to_cpu(ppm->ulDGpuUlvPower);
634 adev->pm.dpm.dyn_state.ppm_table->tj_max =
635 le32_to_cpu(ppm->ulTjmax);
637 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
638 ext_hdr->usACPTableOffset) {
639 ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
640 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
641 (mode_info->atom_context->bios + data_offset +
642 le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
643 ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
644 u32 size = limits->numEntries *
645 sizeof(struct amdgpu_clock_voltage_dependency_entry);
646 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
647 kzalloc(size, GFP_KERNEL);
648 if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
649 amdgpu_free_extended_power_table(adev);
650 return -ENOMEM;
652 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
653 limits->numEntries;
654 entry = &limits->entries[0];
655 for (i = 0; i < limits->numEntries; i++) {
656 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
657 le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
658 adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
659 le16_to_cpu(entry->usVoltage);
660 entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
661 ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
664 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
665 ext_hdr->usPowerTuneTableOffset) {
666 u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
667 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
668 ATOM_PowerTune_Table *pt;
669 adev->pm.dpm.dyn_state.cac_tdp_table =
670 kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
671 if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
672 amdgpu_free_extended_power_table(adev);
673 return -ENOMEM;
675 if (rev > 0) {
676 ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
677 (mode_info->atom_context->bios + data_offset +
678 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
679 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
680 ppt->usMaximumPowerDeliveryLimit;
681 pt = &ppt->power_tune_table;
682 } else {
683 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
684 (mode_info->atom_context->bios + data_offset +
685 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
686 adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
687 pt = &ppt->power_tune_table;
689 adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
690 adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
691 le16_to_cpu(pt->usConfigurableTDP);
692 adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
693 adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
694 le16_to_cpu(pt->usBatteryPowerLimit);
695 adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
696 le16_to_cpu(pt->usSmallPowerLimit);
697 adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
698 le16_to_cpu(pt->usLowCACLeakage);
699 adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
700 le16_to_cpu(pt->usHighCACLeakage);
702 if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
703 ext_hdr->usSclkVddgfxTableOffset) {
704 dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
705 (mode_info->atom_context->bios + data_offset +
706 le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
707 ret = amdgpu_parse_clk_voltage_dep_table(
708 &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
709 dep_table);
710 if (ret) {
711 kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
712 return ret;
717 return 0;
720 void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
722 struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
724 kfree(dyn_state->vddc_dependency_on_sclk.entries);
725 kfree(dyn_state->vddci_dependency_on_mclk.entries);
726 kfree(dyn_state->vddc_dependency_on_mclk.entries);
727 kfree(dyn_state->mvdd_dependency_on_mclk.entries);
728 kfree(dyn_state->cac_leakage_table.entries);
729 kfree(dyn_state->phase_shedding_limits_table.entries);
730 kfree(dyn_state->ppm_table);
731 kfree(dyn_state->cac_tdp_table);
732 kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
733 kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
734 kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
735 kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
736 kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
739 static const char *pp_lib_thermal_controller_names[] = {
740 "NONE",
741 "lm63",
742 "adm1032",
743 "adm1030",
744 "max6649",
745 "lm64",
746 "f75375",
747 "RV6xx",
748 "RV770",
749 "adt7473",
750 "NONE",
751 "External GPIO",
752 "Evergreen",
753 "emc2103",
754 "Sumo",
755 "Northern Islands",
756 "Southern Islands",
757 "lm96163",
758 "Sea Islands",
759 "Kaveri/Kabini",
762 void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
764 struct amdgpu_mode_info *mode_info = &adev->mode_info;
765 ATOM_PPLIB_POWERPLAYTABLE *power_table;
766 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
767 ATOM_PPLIB_THERMALCONTROLLER *controller;
768 struct amdgpu_i2c_bus_rec i2c_bus;
769 u16 data_offset;
770 u8 frev, crev;
772 if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
773 &frev, &crev, &data_offset))
774 return;
775 power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
776 (mode_info->atom_context->bios + data_offset);
777 controller = &power_table->sThermalController;
779 /* add the i2c bus for thermal/fan chip */
780 if (controller->ucType > 0) {
781 if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
782 adev->pm.no_fan = true;
783 adev->pm.fan_pulses_per_revolution =
784 controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
785 if (adev->pm.fan_pulses_per_revolution) {
786 adev->pm.fan_min_rpm = controller->ucFanMinRPM;
787 adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
789 if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
790 DRM_INFO("Internal thermal controller %s fan control\n",
791 (controller->ucFanParameters &
792 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
793 adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
794 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
795 DRM_INFO("Internal thermal controller %s fan control\n",
796 (controller->ucFanParameters &
797 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
798 adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
799 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
800 DRM_INFO("Internal thermal controller %s fan control\n",
801 (controller->ucFanParameters &
802 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
803 adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
804 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
805 DRM_INFO("Internal thermal controller %s fan control\n",
806 (controller->ucFanParameters &
807 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
808 adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
809 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
810 DRM_INFO("Internal thermal controller %s fan control\n",
811 (controller->ucFanParameters &
812 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
813 adev->pm.int_thermal_type = THERMAL_TYPE_NI;
814 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
815 DRM_INFO("Internal thermal controller %s fan control\n",
816 (controller->ucFanParameters &
817 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
818 adev->pm.int_thermal_type = THERMAL_TYPE_SI;
819 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
820 DRM_INFO("Internal thermal controller %s fan control\n",
821 (controller->ucFanParameters &
822 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
823 adev->pm.int_thermal_type = THERMAL_TYPE_CI;
824 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
825 DRM_INFO("Internal thermal controller %s fan control\n",
826 (controller->ucFanParameters &
827 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
828 adev->pm.int_thermal_type = THERMAL_TYPE_KV;
829 } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
830 DRM_INFO("External GPIO thermal controller %s fan control\n",
831 (controller->ucFanParameters &
832 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
833 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
834 } else if (controller->ucType ==
835 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
836 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
837 (controller->ucFanParameters &
838 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
839 adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
840 } else if (controller->ucType ==
841 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
842 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
843 (controller->ucFanParameters &
844 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
845 adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
846 } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
847 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
848 pp_lib_thermal_controller_names[controller->ucType],
849 controller->ucI2cAddress >> 1,
850 (controller->ucFanParameters &
851 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
852 adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
853 i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
854 adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
855 if (adev->pm.i2c_bus) {
856 struct i2c_board_info info = { };
857 const char *name = pp_lib_thermal_controller_names[controller->ucType];
858 info.addr = controller->ucI2cAddress >> 1;
859 strlcpy(info.type, name, sizeof(info.type));
860 i2c_new_device(&adev->pm.i2c_bus->adapter, &info);
862 } else {
863 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
864 controller->ucType,
865 controller->ucI2cAddress >> 1,
866 (controller->ucFanParameters &
867 ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
872 enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
873 u32 sys_mask,
874 enum amdgpu_pcie_gen asic_gen,
875 enum amdgpu_pcie_gen default_gen)
877 switch (asic_gen) {
878 case AMDGPU_PCIE_GEN1:
879 return AMDGPU_PCIE_GEN1;
880 case AMDGPU_PCIE_GEN2:
881 return AMDGPU_PCIE_GEN2;
882 case AMDGPU_PCIE_GEN3:
883 return AMDGPU_PCIE_GEN3;
884 default:
885 if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
886 (default_gen == AMDGPU_PCIE_GEN3))
887 return AMDGPU_PCIE_GEN3;
888 else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
889 (default_gen == AMDGPU_PCIE_GEN2))
890 return AMDGPU_PCIE_GEN2;
891 else
892 return AMDGPU_PCIE_GEN1;
894 return AMDGPU_PCIE_GEN1;
897 struct amd_vce_state*
898 amdgpu_get_vce_clock_state(void *handle, u32 idx)
900 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
902 if (idx < adev->pm.dpm.num_of_vce_states)
903 return &adev->pm.dpm.vce_states[idx];
905 return NULL;