2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
26 #include "amdgpu_atombios.h"
27 #include "amdgpu_i2c.h"
28 #include "amdgpu_dpm.h"
32 void amdgpu_dpm_print_class_info(u32
class, u32 class2
)
36 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) {
37 case ATOM_PPLIB_CLASSIFICATION_UI_NONE
:
41 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
:
44 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED
:
47 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
:
51 printk("\tui class: %s\n", s
);
52 printk("\tinternal class:");
53 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK
) == 0) &&
57 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT
)
59 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL
)
61 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE
)
62 pr_cont(" limited_pwr");
63 if (class & ATOM_PPLIB_CLASSIFICATION_REST
)
65 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED
)
67 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE
)
69 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE
)
71 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
73 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW
)
75 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI
)
77 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
79 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
81 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
83 if (class2
& ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2
)
84 pr_cont(" limited_pwr2");
85 if (class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
)
87 if (class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
93 void amdgpu_dpm_print_cap_info(u32 caps
)
96 if (caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
)
97 pr_cont(" single_disp");
98 if (caps
& ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK
)
100 if (caps
& ATOM_PPLIB_DISALLOW_ON_DC
)
105 void amdgpu_dpm_print_ps_status(struct amdgpu_device
*adev
,
106 struct amdgpu_ps
*rps
)
109 if (rps
== adev
->pm
.dpm
.current_ps
)
111 if (rps
== adev
->pm
.dpm
.requested_ps
)
113 if (rps
== adev
->pm
.dpm
.boot_ps
)
118 void amdgpu_dpm_get_active_displays(struct amdgpu_device
*adev
)
120 struct drm_device
*ddev
= adev
->ddev
;
121 struct drm_crtc
*crtc
;
122 struct amdgpu_crtc
*amdgpu_crtc
;
124 adev
->pm
.dpm
.new_active_crtcs
= 0;
125 adev
->pm
.dpm
.new_active_crtc_count
= 0;
126 if (adev
->mode_info
.num_crtc
&& adev
->mode_info
.mode_config_initialized
) {
127 list_for_each_entry(crtc
,
128 &ddev
->mode_config
.crtc_list
, head
) {
129 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
130 if (amdgpu_crtc
->enabled
) {
131 adev
->pm
.dpm
.new_active_crtcs
|= (1 << amdgpu_crtc
->crtc_id
);
132 adev
->pm
.dpm
.new_active_crtc_count
++;
139 u32
amdgpu_dpm_get_vblank_time(struct amdgpu_device
*adev
)
141 struct drm_device
*dev
= adev
->ddev
;
142 struct drm_crtc
*crtc
;
143 struct amdgpu_crtc
*amdgpu_crtc
;
144 u32 vblank_in_pixels
;
145 u32 vblank_time_us
= 0xffffffff; /* if the displays are off, vblank time is max */
147 if (adev
->mode_info
.num_crtc
&& adev
->mode_info
.mode_config_initialized
) {
148 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
149 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
150 if (crtc
->enabled
&& amdgpu_crtc
->enabled
&& amdgpu_crtc
->hw_mode
.clock
) {
152 amdgpu_crtc
->hw_mode
.crtc_htotal
*
153 (amdgpu_crtc
->hw_mode
.crtc_vblank_end
-
154 amdgpu_crtc
->hw_mode
.crtc_vdisplay
+
155 (amdgpu_crtc
->v_border
* 2));
157 vblank_time_us
= vblank_in_pixels
* 1000 / amdgpu_crtc
->hw_mode
.clock
;
163 return vblank_time_us
;
166 u32
amdgpu_dpm_get_vrefresh(struct amdgpu_device
*adev
)
168 struct drm_device
*dev
= adev
->ddev
;
169 struct drm_crtc
*crtc
;
170 struct amdgpu_crtc
*amdgpu_crtc
;
173 if (adev
->mode_info
.num_crtc
&& adev
->mode_info
.mode_config_initialized
) {
174 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
175 amdgpu_crtc
= to_amdgpu_crtc(crtc
);
176 if (crtc
->enabled
&& amdgpu_crtc
->enabled
&& amdgpu_crtc
->hw_mode
.clock
) {
177 vrefresh
= drm_mode_vrefresh(&amdgpu_crtc
->hw_mode
);
186 bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor
)
189 case THERMAL_TYPE_RV6XX
:
190 case THERMAL_TYPE_RV770
:
191 case THERMAL_TYPE_EVERGREEN
:
192 case THERMAL_TYPE_SUMO
:
193 case THERMAL_TYPE_NI
:
194 case THERMAL_TYPE_SI
:
195 case THERMAL_TYPE_CI
:
196 case THERMAL_TYPE_KV
:
198 case THERMAL_TYPE_ADT7473_WITH_INTERNAL
:
199 case THERMAL_TYPE_EMC2103_WITH_INTERNAL
:
200 return false; /* need special handling */
201 case THERMAL_TYPE_NONE
:
202 case THERMAL_TYPE_EXTERNAL
:
203 case THERMAL_TYPE_EXTERNAL_GPIO
:
210 struct _ATOM_POWERPLAY_INFO info
;
211 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
212 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
213 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
214 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
215 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
216 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4
;
217 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5
;
221 struct _ATOM_PPLIB_FANTABLE fan
;
222 struct _ATOM_PPLIB_FANTABLE2 fan2
;
223 struct _ATOM_PPLIB_FANTABLE3 fan3
;
226 static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table
*amdgpu_table
,
227 ATOM_PPLIB_Clock_Voltage_Dependency_Table
*atom_table
)
229 u32 size
= atom_table
->ucNumEntries
*
230 sizeof(struct amdgpu_clock_voltage_dependency_entry
);
232 ATOM_PPLIB_Clock_Voltage_Dependency_Record
*entry
;
234 amdgpu_table
->entries
= kzalloc(size
, GFP_KERNEL
);
235 if (!amdgpu_table
->entries
)
238 entry
= &atom_table
->entries
[0];
239 for (i
= 0; i
< atom_table
->ucNumEntries
; i
++) {
240 amdgpu_table
->entries
[i
].clk
= le16_to_cpu(entry
->usClockLow
) |
241 (entry
->ucClockHigh
<< 16);
242 amdgpu_table
->entries
[i
].v
= le16_to_cpu(entry
->usVoltage
);
243 entry
= (ATOM_PPLIB_Clock_Voltage_Dependency_Record
*)
244 ((u8
*)entry
+ sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record
));
246 amdgpu_table
->count
= atom_table
->ucNumEntries
;
251 int amdgpu_get_platform_caps(struct amdgpu_device
*adev
)
253 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
254 union power_info
*power_info
;
255 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
259 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
260 &frev
, &crev
, &data_offset
))
262 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
264 adev
->pm
.dpm
.platform_caps
= le32_to_cpu(power_info
->pplib
.ulPlatformCaps
);
265 adev
->pm
.dpm
.backbias_response_time
= le16_to_cpu(power_info
->pplib
.usBackbiasTime
);
266 adev
->pm
.dpm
.voltage_response_time
= le16_to_cpu(power_info
->pplib
.usVoltageTime
);
271 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
272 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
273 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
274 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
275 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
276 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
277 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
278 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
279 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
281 int amdgpu_parse_extended_power_table(struct amdgpu_device
*adev
)
283 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
284 union power_info
*power_info
;
285 union fan_info
*fan_info
;
286 ATOM_PPLIB_Clock_Voltage_Dependency_Table
*dep_table
;
287 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
292 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
293 &frev
, &crev
, &data_offset
))
295 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
298 if (le16_to_cpu(power_info
->pplib
.usTableSize
) >=
299 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3
)) {
300 if (power_info
->pplib3
.usFanTableOffset
) {
301 fan_info
= (union fan_info
*)(mode_info
->atom_context
->bios
+ data_offset
+
302 le16_to_cpu(power_info
->pplib3
.usFanTableOffset
));
303 adev
->pm
.dpm
.fan
.t_hyst
= fan_info
->fan
.ucTHyst
;
304 adev
->pm
.dpm
.fan
.t_min
= le16_to_cpu(fan_info
->fan
.usTMin
);
305 adev
->pm
.dpm
.fan
.t_med
= le16_to_cpu(fan_info
->fan
.usTMed
);
306 adev
->pm
.dpm
.fan
.t_high
= le16_to_cpu(fan_info
->fan
.usTHigh
);
307 adev
->pm
.dpm
.fan
.pwm_min
= le16_to_cpu(fan_info
->fan
.usPWMMin
);
308 adev
->pm
.dpm
.fan
.pwm_med
= le16_to_cpu(fan_info
->fan
.usPWMMed
);
309 adev
->pm
.dpm
.fan
.pwm_high
= le16_to_cpu(fan_info
->fan
.usPWMHigh
);
310 if (fan_info
->fan
.ucFanTableFormat
>= 2)
311 adev
->pm
.dpm
.fan
.t_max
= le16_to_cpu(fan_info
->fan2
.usTMax
);
313 adev
->pm
.dpm
.fan
.t_max
= 10900;
314 adev
->pm
.dpm
.fan
.cycle_delay
= 100000;
315 if (fan_info
->fan
.ucFanTableFormat
>= 3) {
316 adev
->pm
.dpm
.fan
.control_mode
= fan_info
->fan3
.ucFanControlMode
;
317 adev
->pm
.dpm
.fan
.default_max_fan_pwm
=
318 le16_to_cpu(fan_info
->fan3
.usFanPWMMax
);
319 adev
->pm
.dpm
.fan
.default_fan_output_sensitivity
= 4836;
320 adev
->pm
.dpm
.fan
.fan_output_sensitivity
=
321 le16_to_cpu(fan_info
->fan3
.usFanOutputSensitivity
);
323 adev
->pm
.dpm
.fan
.ucode_fan_control
= true;
327 /* clock dependancy tables, shedding tables */
328 if (le16_to_cpu(power_info
->pplib
.usTableSize
) >=
329 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4
)) {
330 if (power_info
->pplib4
.usVddcDependencyOnSCLKOffset
) {
331 dep_table
= (ATOM_PPLIB_Clock_Voltage_Dependency_Table
*)
332 (mode_info
->atom_context
->bios
+ data_offset
+
333 le16_to_cpu(power_info
->pplib4
.usVddcDependencyOnSCLKOffset
));
334 ret
= amdgpu_parse_clk_voltage_dep_table(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
,
337 amdgpu_free_extended_power_table(adev
);
341 if (power_info
->pplib4
.usVddciDependencyOnMCLKOffset
) {
342 dep_table
= (ATOM_PPLIB_Clock_Voltage_Dependency_Table
*)
343 (mode_info
->atom_context
->bios
+ data_offset
+
344 le16_to_cpu(power_info
->pplib4
.usVddciDependencyOnMCLKOffset
));
345 ret
= amdgpu_parse_clk_voltage_dep_table(&adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
348 amdgpu_free_extended_power_table(adev
);
352 if (power_info
->pplib4
.usVddcDependencyOnMCLKOffset
) {
353 dep_table
= (ATOM_PPLIB_Clock_Voltage_Dependency_Table
*)
354 (mode_info
->atom_context
->bios
+ data_offset
+
355 le16_to_cpu(power_info
->pplib4
.usVddcDependencyOnMCLKOffset
));
356 ret
= amdgpu_parse_clk_voltage_dep_table(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
359 amdgpu_free_extended_power_table(adev
);
363 if (power_info
->pplib4
.usMvddDependencyOnMCLKOffset
) {
364 dep_table
= (ATOM_PPLIB_Clock_Voltage_Dependency_Table
*)
365 (mode_info
->atom_context
->bios
+ data_offset
+
366 le16_to_cpu(power_info
->pplib4
.usMvddDependencyOnMCLKOffset
));
367 ret
= amdgpu_parse_clk_voltage_dep_table(&adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
,
370 amdgpu_free_extended_power_table(adev
);
374 if (power_info
->pplib4
.usMaxClockVoltageOnDCOffset
) {
375 ATOM_PPLIB_Clock_Voltage_Limit_Table
*clk_v
=
376 (ATOM_PPLIB_Clock_Voltage_Limit_Table
*)
377 (mode_info
->atom_context
->bios
+ data_offset
+
378 le16_to_cpu(power_info
->pplib4
.usMaxClockVoltageOnDCOffset
));
379 if (clk_v
->ucNumEntries
) {
380 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.sclk
=
381 le16_to_cpu(clk_v
->entries
[0].usSclkLow
) |
382 (clk_v
->entries
[0].ucSclkHigh
<< 16);
383 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.mclk
=
384 le16_to_cpu(clk_v
->entries
[0].usMclkLow
) |
385 (clk_v
->entries
[0].ucMclkHigh
<< 16);
386 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.vddc
=
387 le16_to_cpu(clk_v
->entries
[0].usVddc
);
388 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.vddci
=
389 le16_to_cpu(clk_v
->entries
[0].usVddci
);
392 if (power_info
->pplib4
.usVddcPhaseShedLimitsTableOffset
) {
393 ATOM_PPLIB_PhaseSheddingLimits_Table
*psl
=
394 (ATOM_PPLIB_PhaseSheddingLimits_Table
*)
395 (mode_info
->atom_context
->bios
+ data_offset
+
396 le16_to_cpu(power_info
->pplib4
.usVddcPhaseShedLimitsTableOffset
));
397 ATOM_PPLIB_PhaseSheddingLimits_Record
*entry
;
399 adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
.entries
=
400 kcalloc(psl
->ucNumEntries
,
401 sizeof(struct amdgpu_phase_shedding_limits_entry
),
403 if (!adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
.entries
) {
404 amdgpu_free_extended_power_table(adev
);
408 entry
= &psl
->entries
[0];
409 for (i
= 0; i
< psl
->ucNumEntries
; i
++) {
410 adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
.entries
[i
].sclk
=
411 le16_to_cpu(entry
->usSclkLow
) | (entry
->ucSclkHigh
<< 16);
412 adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
.entries
[i
].mclk
=
413 le16_to_cpu(entry
->usMclkLow
) | (entry
->ucMclkHigh
<< 16);
414 adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
.entries
[i
].voltage
=
415 le16_to_cpu(entry
->usVoltage
);
416 entry
= (ATOM_PPLIB_PhaseSheddingLimits_Record
*)
417 ((u8
*)entry
+ sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record
));
419 adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
.count
=
425 if (le16_to_cpu(power_info
->pplib
.usTableSize
) >=
426 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5
)) {
427 adev
->pm
.dpm
.tdp_limit
= le32_to_cpu(power_info
->pplib5
.ulTDPLimit
);
428 adev
->pm
.dpm
.near_tdp_limit
= le32_to_cpu(power_info
->pplib5
.ulNearTDPLimit
);
429 adev
->pm
.dpm
.near_tdp_limit_adjusted
= adev
->pm
.dpm
.near_tdp_limit
;
430 adev
->pm
.dpm
.tdp_od_limit
= le16_to_cpu(power_info
->pplib5
.usTDPODLimit
);
431 if (adev
->pm
.dpm
.tdp_od_limit
)
432 adev
->pm
.dpm
.power_control
= true;
434 adev
->pm
.dpm
.power_control
= false;
435 adev
->pm
.dpm
.tdp_adjustment
= 0;
436 adev
->pm
.dpm
.sq_ramping_threshold
= le32_to_cpu(power_info
->pplib5
.ulSQRampingThreshold
);
437 adev
->pm
.dpm
.cac_leakage
= le32_to_cpu(power_info
->pplib5
.ulCACLeakage
);
438 adev
->pm
.dpm
.load_line_slope
= le16_to_cpu(power_info
->pplib5
.usLoadLineSlope
);
439 if (power_info
->pplib5
.usCACLeakageTableOffset
) {
440 ATOM_PPLIB_CAC_Leakage_Table
*cac_table
=
441 (ATOM_PPLIB_CAC_Leakage_Table
*)
442 (mode_info
->atom_context
->bios
+ data_offset
+
443 le16_to_cpu(power_info
->pplib5
.usCACLeakageTableOffset
));
444 ATOM_PPLIB_CAC_Leakage_Record
*entry
;
445 u32 size
= cac_table
->ucNumEntries
* sizeof(struct amdgpu_cac_leakage_table
);
446 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
= kzalloc(size
, GFP_KERNEL
);
447 if (!adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
) {
448 amdgpu_free_extended_power_table(adev
);
451 entry
= &cac_table
->entries
[0];
452 for (i
= 0; i
< cac_table
->ucNumEntries
; i
++) {
453 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_EVV
) {
454 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc1
=
455 le16_to_cpu(entry
->usVddc1
);
456 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc2
=
457 le16_to_cpu(entry
->usVddc2
);
458 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc3
=
459 le16_to_cpu(entry
->usVddc3
);
461 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc
=
462 le16_to_cpu(entry
->usVddc
);
463 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].leakage
=
464 le32_to_cpu(entry
->ulLeakageValue
);
466 entry
= (ATOM_PPLIB_CAC_Leakage_Record
*)
467 ((u8
*)entry
+ sizeof(ATOM_PPLIB_CAC_Leakage_Record
));
469 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
= cac_table
->ucNumEntries
;
474 if (le16_to_cpu(power_info
->pplib
.usTableSize
) >=
475 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3
)) {
476 ATOM_PPLIB_EXTENDEDHEADER
*ext_hdr
= (ATOM_PPLIB_EXTENDEDHEADER
*)
477 (mode_info
->atom_context
->bios
+ data_offset
+
478 le16_to_cpu(power_info
->pplib3
.usExtendendedHeaderOffset
));
479 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2
) &&
480 ext_hdr
->usVCETableOffset
) {
481 VCEClockInfoArray
*array
= (VCEClockInfoArray
*)
482 (mode_info
->atom_context
->bios
+ data_offset
+
483 le16_to_cpu(ext_hdr
->usVCETableOffset
) + 1);
484 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
*limits
=
485 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
*)
486 (mode_info
->atom_context
->bios
+ data_offset
+
487 le16_to_cpu(ext_hdr
->usVCETableOffset
) + 1 +
488 1 + array
->ucNumEntries
* sizeof(VCEClockInfo
));
489 ATOM_PPLIB_VCE_State_Table
*states
=
490 (ATOM_PPLIB_VCE_State_Table
*)
491 (mode_info
->atom_context
->bios
+ data_offset
+
492 le16_to_cpu(ext_hdr
->usVCETableOffset
) + 1 +
493 1 + (array
->ucNumEntries
* sizeof (VCEClockInfo
)) +
494 1 + (limits
->numEntries
* sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
)));
495 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
*entry
;
496 ATOM_PPLIB_VCE_State_Record
*state_entry
;
497 VCEClockInfo
*vce_clk
;
498 u32 size
= limits
->numEntries
*
499 sizeof(struct amdgpu_vce_clock_voltage_dependency_entry
);
500 adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
=
501 kzalloc(size
, GFP_KERNEL
);
502 if (!adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
) {
503 amdgpu_free_extended_power_table(adev
);
506 adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.count
=
508 entry
= &limits
->entries
[0];
509 state_entry
= &states
->entries
[0];
510 for (i
= 0; i
< limits
->numEntries
; i
++) {
511 vce_clk
= (VCEClockInfo
*)
512 ((u8
*)&array
->entries
[0] +
513 (entry
->ucVCEClockInfoIndex
* sizeof(VCEClockInfo
)));
514 adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
[i
].evclk
=
515 le16_to_cpu(vce_clk
->usEVClkLow
) | (vce_clk
->ucEVClkHigh
<< 16);
516 adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
[i
].ecclk
=
517 le16_to_cpu(vce_clk
->usECClkLow
) | (vce_clk
->ucECClkHigh
<< 16);
518 adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
[i
].v
=
519 le16_to_cpu(entry
->usVoltage
);
520 entry
= (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
*)
521 ((u8
*)entry
+ sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
));
523 adev
->pm
.dpm
.num_of_vce_states
=
524 states
->numEntries
> AMD_MAX_VCE_LEVELS
?
525 AMD_MAX_VCE_LEVELS
: states
->numEntries
;
526 for (i
= 0; i
< adev
->pm
.dpm
.num_of_vce_states
; i
++) {
527 vce_clk
= (VCEClockInfo
*)
528 ((u8
*)&array
->entries
[0] +
529 (state_entry
->ucVCEClockInfoIndex
* sizeof(VCEClockInfo
)));
530 adev
->pm
.dpm
.vce_states
[i
].evclk
=
531 le16_to_cpu(vce_clk
->usEVClkLow
) | (vce_clk
->ucEVClkHigh
<< 16);
532 adev
->pm
.dpm
.vce_states
[i
].ecclk
=
533 le16_to_cpu(vce_clk
->usECClkLow
) | (vce_clk
->ucECClkHigh
<< 16);
534 adev
->pm
.dpm
.vce_states
[i
].clk_idx
=
535 state_entry
->ucClockInfoIndex
& 0x3f;
536 adev
->pm
.dpm
.vce_states
[i
].pstate
=
537 (state_entry
->ucClockInfoIndex
& 0xc0) >> 6;
538 state_entry
= (ATOM_PPLIB_VCE_State_Record
*)
539 ((u8
*)state_entry
+ sizeof(ATOM_PPLIB_VCE_State_Record
));
542 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3
) &&
543 ext_hdr
->usUVDTableOffset
) {
544 UVDClockInfoArray
*array
= (UVDClockInfoArray
*)
545 (mode_info
->atom_context
->bios
+ data_offset
+
546 le16_to_cpu(ext_hdr
->usUVDTableOffset
) + 1);
547 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
*limits
=
548 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
*)
549 (mode_info
->atom_context
->bios
+ data_offset
+
550 le16_to_cpu(ext_hdr
->usUVDTableOffset
) + 1 +
551 1 + (array
->ucNumEntries
* sizeof (UVDClockInfo
)));
552 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
*entry
;
553 u32 size
= limits
->numEntries
*
554 sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry
);
555 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
=
556 kzalloc(size
, GFP_KERNEL
);
557 if (!adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
) {
558 amdgpu_free_extended_power_table(adev
);
561 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.count
=
563 entry
= &limits
->entries
[0];
564 for (i
= 0; i
< limits
->numEntries
; i
++) {
565 UVDClockInfo
*uvd_clk
= (UVDClockInfo
*)
566 ((u8
*)&array
->entries
[0] +
567 (entry
->ucUVDClockInfoIndex
* sizeof(UVDClockInfo
)));
568 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[i
].vclk
=
569 le16_to_cpu(uvd_clk
->usVClkLow
) | (uvd_clk
->ucVClkHigh
<< 16);
570 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[i
].dclk
=
571 le16_to_cpu(uvd_clk
->usDClkLow
) | (uvd_clk
->ucDClkHigh
<< 16);
572 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[i
].v
=
573 le16_to_cpu(entry
->usVoltage
);
574 entry
= (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
*)
575 ((u8
*)entry
+ sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
));
578 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4
) &&
579 ext_hdr
->usSAMUTableOffset
) {
580 ATOM_PPLIB_SAMClk_Voltage_Limit_Table
*limits
=
581 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table
*)
582 (mode_info
->atom_context
->bios
+ data_offset
+
583 le16_to_cpu(ext_hdr
->usSAMUTableOffset
) + 1);
584 ATOM_PPLIB_SAMClk_Voltage_Limit_Record
*entry
;
585 u32 size
= limits
->numEntries
*
586 sizeof(struct amdgpu_clock_voltage_dependency_entry
);
587 adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
=
588 kzalloc(size
, GFP_KERNEL
);
589 if (!adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
) {
590 amdgpu_free_extended_power_table(adev
);
593 adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.count
=
595 entry
= &limits
->entries
[0];
596 for (i
= 0; i
< limits
->numEntries
; i
++) {
597 adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
[i
].clk
=
598 le16_to_cpu(entry
->usSAMClockLow
) | (entry
->ucSAMClockHigh
<< 16);
599 adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
[i
].v
=
600 le16_to_cpu(entry
->usVoltage
);
601 entry
= (ATOM_PPLIB_SAMClk_Voltage_Limit_Record
*)
602 ((u8
*)entry
+ sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record
));
605 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5
) &&
606 ext_hdr
->usPPMTableOffset
) {
607 ATOM_PPLIB_PPM_Table
*ppm
= (ATOM_PPLIB_PPM_Table
*)
608 (mode_info
->atom_context
->bios
+ data_offset
+
609 le16_to_cpu(ext_hdr
->usPPMTableOffset
));
610 adev
->pm
.dpm
.dyn_state
.ppm_table
=
611 kzalloc(sizeof(struct amdgpu_ppm_table
), GFP_KERNEL
);
612 if (!adev
->pm
.dpm
.dyn_state
.ppm_table
) {
613 amdgpu_free_extended_power_table(adev
);
616 adev
->pm
.dpm
.dyn_state
.ppm_table
->ppm_design
= ppm
->ucPpmDesign
;
617 adev
->pm
.dpm
.dyn_state
.ppm_table
->cpu_core_number
=
618 le16_to_cpu(ppm
->usCpuCoreNumber
);
619 adev
->pm
.dpm
.dyn_state
.ppm_table
->platform_tdp
=
620 le32_to_cpu(ppm
->ulPlatformTDP
);
621 adev
->pm
.dpm
.dyn_state
.ppm_table
->small_ac_platform_tdp
=
622 le32_to_cpu(ppm
->ulSmallACPlatformTDP
);
623 adev
->pm
.dpm
.dyn_state
.ppm_table
->platform_tdc
=
624 le32_to_cpu(ppm
->ulPlatformTDC
);
625 adev
->pm
.dpm
.dyn_state
.ppm_table
->small_ac_platform_tdc
=
626 le32_to_cpu(ppm
->ulSmallACPlatformTDC
);
627 adev
->pm
.dpm
.dyn_state
.ppm_table
->apu_tdp
=
628 le32_to_cpu(ppm
->ulApuTDP
);
629 adev
->pm
.dpm
.dyn_state
.ppm_table
->dgpu_tdp
=
630 le32_to_cpu(ppm
->ulDGpuTDP
);
631 adev
->pm
.dpm
.dyn_state
.ppm_table
->dgpu_ulv_power
=
632 le32_to_cpu(ppm
->ulDGpuUlvPower
);
633 adev
->pm
.dpm
.dyn_state
.ppm_table
->tj_max
=
634 le32_to_cpu(ppm
->ulTjmax
);
636 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6
) &&
637 ext_hdr
->usACPTableOffset
) {
638 ATOM_PPLIB_ACPClk_Voltage_Limit_Table
*limits
=
639 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table
*)
640 (mode_info
->atom_context
->bios
+ data_offset
+
641 le16_to_cpu(ext_hdr
->usACPTableOffset
) + 1);
642 ATOM_PPLIB_ACPClk_Voltage_Limit_Record
*entry
;
643 u32 size
= limits
->numEntries
*
644 sizeof(struct amdgpu_clock_voltage_dependency_entry
);
645 adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
=
646 kzalloc(size
, GFP_KERNEL
);
647 if (!adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
) {
648 amdgpu_free_extended_power_table(adev
);
651 adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.count
=
653 entry
= &limits
->entries
[0];
654 for (i
= 0; i
< limits
->numEntries
; i
++) {
655 adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
[i
].clk
=
656 le16_to_cpu(entry
->usACPClockLow
) | (entry
->ucACPClockHigh
<< 16);
657 adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
[i
].v
=
658 le16_to_cpu(entry
->usVoltage
);
659 entry
= (ATOM_PPLIB_ACPClk_Voltage_Limit_Record
*)
660 ((u8
*)entry
+ sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record
));
663 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7
) &&
664 ext_hdr
->usPowerTuneTableOffset
) {
665 u8 rev
= *(u8
*)(mode_info
->atom_context
->bios
+ data_offset
+
666 le16_to_cpu(ext_hdr
->usPowerTuneTableOffset
));
667 ATOM_PowerTune_Table
*pt
;
668 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
=
669 kzalloc(sizeof(struct amdgpu_cac_tdp_table
), GFP_KERNEL
);
670 if (!adev
->pm
.dpm
.dyn_state
.cac_tdp_table
) {
671 amdgpu_free_extended_power_table(adev
);
675 ATOM_PPLIB_POWERTUNE_Table_V1
*ppt
= (ATOM_PPLIB_POWERTUNE_Table_V1
*)
676 (mode_info
->atom_context
->bios
+ data_offset
+
677 le16_to_cpu(ext_hdr
->usPowerTuneTableOffset
));
678 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
->maximum_power_delivery_limit
=
679 ppt
->usMaximumPowerDeliveryLimit
;
680 pt
= &ppt
->power_tune_table
;
682 ATOM_PPLIB_POWERTUNE_Table
*ppt
= (ATOM_PPLIB_POWERTUNE_Table
*)
683 (mode_info
->atom_context
->bios
+ data_offset
+
684 le16_to_cpu(ext_hdr
->usPowerTuneTableOffset
));
685 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
->maximum_power_delivery_limit
= 255;
686 pt
= &ppt
->power_tune_table
;
688 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
->tdp
= le16_to_cpu(pt
->usTDP
);
689 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
->configurable_tdp
=
690 le16_to_cpu(pt
->usConfigurableTDP
);
691 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
->tdc
= le16_to_cpu(pt
->usTDC
);
692 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
->battery_power_limit
=
693 le16_to_cpu(pt
->usBatteryPowerLimit
);
694 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
->small_power_limit
=
695 le16_to_cpu(pt
->usSmallPowerLimit
);
696 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
->low_cac_leakage
=
697 le16_to_cpu(pt
->usLowCACLeakage
);
698 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
->high_cac_leakage
=
699 le16_to_cpu(pt
->usHighCACLeakage
);
701 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8
) &&
702 ext_hdr
->usSclkVddgfxTableOffset
) {
703 dep_table
= (ATOM_PPLIB_Clock_Voltage_Dependency_Table
*)
704 (mode_info
->atom_context
->bios
+ data_offset
+
705 le16_to_cpu(ext_hdr
->usSclkVddgfxTableOffset
));
706 ret
= amdgpu_parse_clk_voltage_dep_table(
707 &adev
->pm
.dpm
.dyn_state
.vddgfx_dependency_on_sclk
,
710 kfree(adev
->pm
.dpm
.dyn_state
.vddgfx_dependency_on_sclk
.entries
);
719 void amdgpu_free_extended_power_table(struct amdgpu_device
*adev
)
721 struct amdgpu_dpm_dynamic_state
*dyn_state
= &adev
->pm
.dpm
.dyn_state
;
723 kfree(dyn_state
->vddc_dependency_on_sclk
.entries
);
724 kfree(dyn_state
->vddci_dependency_on_mclk
.entries
);
725 kfree(dyn_state
->vddc_dependency_on_mclk
.entries
);
726 kfree(dyn_state
->mvdd_dependency_on_mclk
.entries
);
727 kfree(dyn_state
->cac_leakage_table
.entries
);
728 kfree(dyn_state
->phase_shedding_limits_table
.entries
);
729 kfree(dyn_state
->ppm_table
);
730 kfree(dyn_state
->cac_tdp_table
);
731 kfree(dyn_state
->vce_clock_voltage_dependency_table
.entries
);
732 kfree(dyn_state
->uvd_clock_voltage_dependency_table
.entries
);
733 kfree(dyn_state
->samu_clock_voltage_dependency_table
.entries
);
734 kfree(dyn_state
->acp_clock_voltage_dependency_table
.entries
);
735 kfree(dyn_state
->vddgfx_dependency_on_sclk
.entries
);
738 static const char *pp_lib_thermal_controller_names
[] = {
761 void amdgpu_add_thermal_controller(struct amdgpu_device
*adev
)
763 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
764 ATOM_PPLIB_POWERPLAYTABLE
*power_table
;
765 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
766 ATOM_PPLIB_THERMALCONTROLLER
*controller
;
767 struct amdgpu_i2c_bus_rec i2c_bus
;
771 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
772 &frev
, &crev
, &data_offset
))
774 power_table
= (ATOM_PPLIB_POWERPLAYTABLE
*)
775 (mode_info
->atom_context
->bios
+ data_offset
);
776 controller
= &power_table
->sThermalController
;
778 /* add the i2c bus for thermal/fan chip */
779 if (controller
->ucType
> 0) {
780 if (controller
->ucFanParameters
& ATOM_PP_FANPARAMETERS_NOFAN
)
781 adev
->pm
.no_fan
= true;
782 adev
->pm
.fan_pulses_per_revolution
=
783 controller
->ucFanParameters
& ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK
;
784 if (adev
->pm
.fan_pulses_per_revolution
) {
785 adev
->pm
.fan_min_rpm
= controller
->ucFanMinRPM
;
786 adev
->pm
.fan_max_rpm
= controller
->ucFanMaxRPM
;
788 if (controller
->ucType
== ATOM_PP_THERMALCONTROLLER_RV6xx
) {
789 DRM_INFO("Internal thermal controller %s fan control\n",
790 (controller
->ucFanParameters
&
791 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
792 adev
->pm
.int_thermal_type
= THERMAL_TYPE_RV6XX
;
793 } else if (controller
->ucType
== ATOM_PP_THERMALCONTROLLER_RV770
) {
794 DRM_INFO("Internal thermal controller %s fan control\n",
795 (controller
->ucFanParameters
&
796 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
797 adev
->pm
.int_thermal_type
= THERMAL_TYPE_RV770
;
798 } else if (controller
->ucType
== ATOM_PP_THERMALCONTROLLER_EVERGREEN
) {
799 DRM_INFO("Internal thermal controller %s fan control\n",
800 (controller
->ucFanParameters
&
801 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
802 adev
->pm
.int_thermal_type
= THERMAL_TYPE_EVERGREEN
;
803 } else if (controller
->ucType
== ATOM_PP_THERMALCONTROLLER_SUMO
) {
804 DRM_INFO("Internal thermal controller %s fan control\n",
805 (controller
->ucFanParameters
&
806 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
807 adev
->pm
.int_thermal_type
= THERMAL_TYPE_SUMO
;
808 } else if (controller
->ucType
== ATOM_PP_THERMALCONTROLLER_NISLANDS
) {
809 DRM_INFO("Internal thermal controller %s fan control\n",
810 (controller
->ucFanParameters
&
811 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
812 adev
->pm
.int_thermal_type
= THERMAL_TYPE_NI
;
813 } else if (controller
->ucType
== ATOM_PP_THERMALCONTROLLER_SISLANDS
) {
814 DRM_INFO("Internal thermal controller %s fan control\n",
815 (controller
->ucFanParameters
&
816 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
817 adev
->pm
.int_thermal_type
= THERMAL_TYPE_SI
;
818 } else if (controller
->ucType
== ATOM_PP_THERMALCONTROLLER_CISLANDS
) {
819 DRM_INFO("Internal thermal controller %s fan control\n",
820 (controller
->ucFanParameters
&
821 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
822 adev
->pm
.int_thermal_type
= THERMAL_TYPE_CI
;
823 } else if (controller
->ucType
== ATOM_PP_THERMALCONTROLLER_KAVERI
) {
824 DRM_INFO("Internal thermal controller %s fan control\n",
825 (controller
->ucFanParameters
&
826 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
827 adev
->pm
.int_thermal_type
= THERMAL_TYPE_KV
;
828 } else if (controller
->ucType
== ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO
) {
829 DRM_INFO("External GPIO thermal controller %s fan control\n",
830 (controller
->ucFanParameters
&
831 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
832 adev
->pm
.int_thermal_type
= THERMAL_TYPE_EXTERNAL_GPIO
;
833 } else if (controller
->ucType
==
834 ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL
) {
835 DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
836 (controller
->ucFanParameters
&
837 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
838 adev
->pm
.int_thermal_type
= THERMAL_TYPE_ADT7473_WITH_INTERNAL
;
839 } else if (controller
->ucType
==
840 ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL
) {
841 DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
842 (controller
->ucFanParameters
&
843 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
844 adev
->pm
.int_thermal_type
= THERMAL_TYPE_EMC2103_WITH_INTERNAL
;
845 } else if (controller
->ucType
< ARRAY_SIZE(pp_lib_thermal_controller_names
)) {
846 DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
847 pp_lib_thermal_controller_names
[controller
->ucType
],
848 controller
->ucI2cAddress
>> 1,
849 (controller
->ucFanParameters
&
850 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
851 adev
->pm
.int_thermal_type
= THERMAL_TYPE_EXTERNAL
;
852 i2c_bus
= amdgpu_atombios_lookup_i2c_gpio(adev
, controller
->ucI2cLine
);
853 adev
->pm
.i2c_bus
= amdgpu_i2c_lookup(adev
, &i2c_bus
);
854 if (adev
->pm
.i2c_bus
) {
855 struct i2c_board_info info
= { };
856 const char *name
= pp_lib_thermal_controller_names
[controller
->ucType
];
857 info
.addr
= controller
->ucI2cAddress
>> 1;
858 strlcpy(info
.type
, name
, sizeof(info
.type
));
859 i2c_new_device(&adev
->pm
.i2c_bus
->adapter
, &info
);
862 DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
864 controller
->ucI2cAddress
>> 1,
865 (controller
->ucFanParameters
&
866 ATOM_PP_FANPARAMETERS_NOFAN
) ? "without" : "with");
871 enum amdgpu_pcie_gen
amdgpu_get_pcie_gen_support(struct amdgpu_device
*adev
,
873 enum amdgpu_pcie_gen asic_gen
,
874 enum amdgpu_pcie_gen default_gen
)
877 case AMDGPU_PCIE_GEN1
:
878 return AMDGPU_PCIE_GEN1
;
879 case AMDGPU_PCIE_GEN2
:
880 return AMDGPU_PCIE_GEN2
;
881 case AMDGPU_PCIE_GEN3
:
882 return AMDGPU_PCIE_GEN3
;
884 if ((sys_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3
) &&
885 (default_gen
== AMDGPU_PCIE_GEN3
))
886 return AMDGPU_PCIE_GEN3
;
887 else if ((sys_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2
) &&
888 (default_gen
== AMDGPU_PCIE_GEN2
))
889 return AMDGPU_PCIE_GEN2
;
891 return AMDGPU_PCIE_GEN1
;
893 return AMDGPU_PCIE_GEN1
;
896 struct amd_vce_state
*
897 amdgpu_get_vce_clock_state(void *handle
, u32 idx
)
899 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
901 if (idx
< adev
->pm
.dpm
.num_of_vce_states
)
902 return &adev
->pm
.dpm
.vce_states
[idx
];
907 int amdgpu_dpm_get_sclk(struct amdgpu_device
*adev
, bool low
)
911 if (is_support_sw_smu(adev
)) {
912 ret
= smu_get_dpm_freq_range(&adev
->smu
, SMU_GFXCLK
,
913 low
? &clk_freq
: NULL
,
914 !low
? &clk_freq
: NULL
,
918 return clk_freq
* 100;
921 return (adev
)->powerplay
.pp_funcs
->get_sclk((adev
)->powerplay
.pp_handle
, (low
));
925 int amdgpu_dpm_get_mclk(struct amdgpu_device
*adev
, bool low
)
929 if (is_support_sw_smu(adev
)) {
930 ret
= smu_get_dpm_freq_range(&adev
->smu
, SMU_UCLK
,
931 low
? &clk_freq
: NULL
,
932 !low
? &clk_freq
: NULL
,
936 return clk_freq
* 100;
939 return (adev
)->powerplay
.pp_funcs
->get_mclk((adev
)->powerplay
.pp_handle
, (low
));
943 int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device
*adev
, uint32_t block_type
, bool gate
)
946 bool swsmu
= is_support_sw_smu(adev
);
948 switch (block_type
) {
949 case AMD_IP_BLOCK_TYPE_UVD
:
950 case AMD_IP_BLOCK_TYPE_VCE
:
952 ret
= smu_dpm_set_power_gate(&adev
->smu
, block_type
, gate
);
953 } else if (adev
->powerplay
.pp_funcs
&&
954 adev
->powerplay
.pp_funcs
->set_powergating_by_smu
) {
956 * TODO: need a better lock mechanism
958 * Here adev->pm.mutex lock protection is enforced on
959 * UVD and VCE cases only. Since for other cases, there
960 * may be already lock protection in amdgpu_pm.c.
961 * This is a quick fix for the deadlock issue below.
962 * NFO: task ocltst:2028 blocked for more than 120 seconds.
963 * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu
964 * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
965 * cltst D 0 2028 2026 0x00000000
967 * __schedule+0x2c0/0x870
969 * schedule_preempt_disabled+0xe/0x10
970 * __mutex_lock.isra.9+0x26d/0x4e0
971 * __mutex_lock_slowpath+0x13/0x20
972 * ? __mutex_lock_slowpath+0x13/0x20
973 * mutex_lock+0x2f/0x40
974 * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
975 * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
976 * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
977 * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
978 * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
979 * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
981 mutex_lock(&adev
->pm
.mutex
);
982 ret
= ((adev
)->powerplay
.pp_funcs
->set_powergating_by_smu(
983 (adev
)->powerplay
.pp_handle
, block_type
, gate
));
984 mutex_unlock(&adev
->pm
.mutex
);
987 case AMD_IP_BLOCK_TYPE_GFX
:
988 case AMD_IP_BLOCK_TYPE_VCN
:
989 case AMD_IP_BLOCK_TYPE_SDMA
:
991 ret
= smu_dpm_set_power_gate(&adev
->smu
, block_type
, gate
);
992 else if (adev
->powerplay
.pp_funcs
&&
993 adev
->powerplay
.pp_funcs
->set_powergating_by_smu
)
994 ret
= ((adev
)->powerplay
.pp_funcs
->set_powergating_by_smu(
995 (adev
)->powerplay
.pp_handle
, block_type
, gate
));
997 case AMD_IP_BLOCK_TYPE_JPEG
:
999 ret
= smu_dpm_set_power_gate(&adev
->smu
, block_type
, gate
);
1001 case AMD_IP_BLOCK_TYPE_GMC
:
1002 case AMD_IP_BLOCK_TYPE_ACP
:
1003 if (adev
->powerplay
.pp_funcs
&&
1004 adev
->powerplay
.pp_funcs
->set_powergating_by_smu
)
1005 ret
= ((adev
)->powerplay
.pp_funcs
->set_powergating_by_smu(
1006 (adev
)->powerplay
.pp_handle
, block_type
, gate
));
1015 int amdgpu_dpm_baco_enter(struct amdgpu_device
*adev
)
1017 const struct amd_pm_funcs
*pp_funcs
= adev
->powerplay
.pp_funcs
;
1018 void *pp_handle
= adev
->powerplay
.pp_handle
;
1019 struct smu_context
*smu
= &adev
->smu
;
1022 if (is_support_sw_smu(adev
)) {
1023 ret
= smu_baco_enter(smu
);
1025 if (!pp_funcs
|| !pp_funcs
->set_asic_baco_state
)
1028 /* enter BACO state */
1029 ret
= pp_funcs
->set_asic_baco_state(pp_handle
, 1);
1035 int amdgpu_dpm_baco_exit(struct amdgpu_device
*adev
)
1037 const struct amd_pm_funcs
*pp_funcs
= adev
->powerplay
.pp_funcs
;
1038 void *pp_handle
= adev
->powerplay
.pp_handle
;
1039 struct smu_context
*smu
= &adev
->smu
;
1042 if (is_support_sw_smu(adev
)) {
1043 ret
= smu_baco_exit(smu
);
1045 if (!pp_funcs
|| !pp_funcs
->set_asic_baco_state
)
1048 /* exit BACO state */
1049 ret
= pp_funcs
->set_asic_baco_state(pp_handle
, 0);
1055 int amdgpu_dpm_set_mp1_state(struct amdgpu_device
*adev
,
1056 enum pp_mp1_state mp1_state
)
1060 if (is_support_sw_smu(adev
)) {
1061 ret
= smu_set_mp1_state(&adev
->smu
, mp1_state
);
1062 } else if (adev
->powerplay
.pp_funcs
&&
1063 adev
->powerplay
.pp_funcs
->set_mp1_state
) {
1064 ret
= adev
->powerplay
.pp_funcs
->set_mp1_state(
1065 adev
->powerplay
.pp_handle
,
1072 bool amdgpu_dpm_is_baco_supported(struct amdgpu_device
*adev
)
1074 const struct amd_pm_funcs
*pp_funcs
= adev
->powerplay
.pp_funcs
;
1075 void *pp_handle
= adev
->powerplay
.pp_handle
;
1076 struct smu_context
*smu
= &adev
->smu
;
1079 if (is_support_sw_smu(adev
)) {
1080 return smu_baco_is_support(smu
);
1082 if (!pp_funcs
|| !pp_funcs
->get_asic_baco_capability
)
1085 if (pp_funcs
->get_asic_baco_capability(pp_handle
, &baco_cap
))
1088 return baco_cap
? true : false;
1092 int amdgpu_dpm_mode2_reset(struct amdgpu_device
*adev
)
1094 const struct amd_pm_funcs
*pp_funcs
= adev
->powerplay
.pp_funcs
;
1095 void *pp_handle
= adev
->powerplay
.pp_handle
;
1096 struct smu_context
*smu
= &adev
->smu
;
1098 if (is_support_sw_smu(adev
)) {
1099 return smu_mode2_reset(smu
);
1101 if (!pp_funcs
|| !pp_funcs
->asic_reset_mode_2
)
1104 return pp_funcs
->asic_reset_mode_2(pp_handle
);
1108 int amdgpu_dpm_baco_reset(struct amdgpu_device
*adev
)
1110 const struct amd_pm_funcs
*pp_funcs
= adev
->powerplay
.pp_funcs
;
1111 void *pp_handle
= adev
->powerplay
.pp_handle
;
1112 struct smu_context
*smu
= &adev
->smu
;
1115 dev_info(adev
->dev
, "GPU BACO reset\n");
1117 if (is_support_sw_smu(adev
)) {
1118 ret
= smu_baco_enter(smu
);
1122 ret
= smu_baco_exit(smu
);
1127 || !pp_funcs
->set_asic_baco_state
)
1130 /* enter BACO state */
1131 ret
= pp_funcs
->set_asic_baco_state(pp_handle
, 1);
1135 /* exit BACO state */
1136 ret
= pp_funcs
->set_asic_baco_state(pp_handle
, 0);
1144 int amdgpu_dpm_switch_power_profile(struct amdgpu_device
*adev
,
1145 enum PP_SMC_POWER_PROFILE type
,
1150 if (is_support_sw_smu(adev
))
1151 ret
= smu_switch_power_profile(&adev
->smu
, type
, en
);
1152 else if (adev
->powerplay
.pp_funcs
&&
1153 adev
->powerplay
.pp_funcs
->switch_power_profile
)
1154 ret
= adev
->powerplay
.pp_funcs
->switch_power_profile(
1155 adev
->powerplay
.pp_handle
, type
, en
);
1160 int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device
*adev
,
1165 if (is_support_sw_smu_xgmi(adev
))
1166 ret
= smu_set_xgmi_pstate(&adev
->smu
, pstate
);
1167 else if (adev
->powerplay
.pp_funcs
&&
1168 adev
->powerplay
.pp_funcs
->set_xgmi_pstate
)
1169 ret
= adev
->powerplay
.pp_funcs
->set_xgmi_pstate(adev
->powerplay
.pp_handle
,