2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
31 const u32 r600_utc
[R600_PM_NUMBER_OF_TC
] =
50 const u32 r600_dtc
[R600_PM_NUMBER_OF_TC
] =
69 void r600_dpm_print_class_info(u32
class, u32 class2
)
71 printk("\tui class: ");
72 switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) {
73 case ATOM_PPLIB_CLASSIFICATION_UI_NONE
:
77 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
:
80 case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED
:
83 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
:
84 printk("performance\n");
87 printk("\tinternal class: ");
88 if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK
) == 0) &&
92 if (class & ATOM_PPLIB_CLASSIFICATION_BOOT
)
94 if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL
)
96 if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE
)
97 printk("limited_pwr ");
98 if (class & ATOM_PPLIB_CLASSIFICATION_REST
)
100 if (class & ATOM_PPLIB_CLASSIFICATION_FORCED
)
102 if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE
)
104 if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE
)
106 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
108 if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW
)
110 if (class & ATOM_PPLIB_CLASSIFICATION_ACPI
)
112 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
114 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
116 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
118 if (class2
& ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2
)
119 printk("limited_pwr2 ");
120 if (class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
)
122 if (class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
128 void r600_dpm_print_cap_info(u32 caps
)
131 if (caps
& ATOM_PPLIB_SINGLE_DISPLAY_ONLY
)
132 printk("single_disp ");
133 if (caps
& ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK
)
135 if (caps
& ATOM_PPLIB_DISALLOW_ON_DC
)
140 void r600_dpm_print_ps_status(struct radeon_device
*rdev
,
141 struct radeon_ps
*rps
)
143 printk("\tstatus: ");
144 if (rps
== rdev
->pm
.dpm
.current_ps
)
146 if (rps
== rdev
->pm
.dpm
.requested_ps
)
148 if (rps
== rdev
->pm
.dpm
.boot_ps
)
153 u32
r600_dpm_get_vblank_time(struct radeon_device
*rdev
)
155 struct drm_device
*dev
= rdev
->ddev
;
156 struct drm_crtc
*crtc
;
157 struct radeon_crtc
*radeon_crtc
;
158 u32 line_time_us
, vblank_lines
;
159 u32 vblank_time_us
= 0xffffffff; /* if the displays are off, vblank time is max */
161 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
162 radeon_crtc
= to_radeon_crtc(crtc
);
163 if (crtc
->enabled
&& radeon_crtc
->enabled
&& radeon_crtc
->hw_mode
.clock
) {
164 line_time_us
= (radeon_crtc
->hw_mode
.crtc_htotal
* 1000) /
165 radeon_crtc
->hw_mode
.clock
;
166 vblank_lines
= radeon_crtc
->hw_mode
.crtc_vblank_end
-
167 radeon_crtc
->hw_mode
.crtc_vdisplay
+
168 (radeon_crtc
->v_border
* 2);
169 vblank_time_us
= vblank_lines
* line_time_us
;
174 return vblank_time_us
;
177 u32
r600_dpm_get_vrefresh(struct radeon_device
*rdev
)
179 struct drm_device
*dev
= rdev
->ddev
;
180 struct drm_crtc
*crtc
;
181 struct radeon_crtc
*radeon_crtc
;
184 list_for_each_entry(crtc
, &dev
->mode_config
.crtc_list
, head
) {
185 radeon_crtc
= to_radeon_crtc(crtc
);
186 if (crtc
->enabled
&& radeon_crtc
->enabled
&& radeon_crtc
->hw_mode
.clock
) {
187 vrefresh
= radeon_crtc
->hw_mode
.vrefresh
;
195 void r600_calculate_u_and_p(u32 i
, u32 r_c
, u32 p_b
,
202 i_c
= (i
* r_c
) / 100;
211 *p
= i_c
/ (1 << (2 * (*u
)));
214 int r600_calculate_at(u32 t
, u32 h
, u32 fh
, u32 fl
, u32
*tl
, u32
*th
)
219 if ((fl
== 0) || (fh
== 0) || (fl
> fh
))
223 t1
= (t
* (k
- 100));
224 a
= (1000 * (100 * h
+ t1
)) / (10000 + (t1
/ 100));
226 ah
= ((a
* t
) + 5000) / 10000;
235 void r600_gfx_clockgating_enable(struct radeon_device
*rdev
, bool enable
)
240 WREG32_P(SCLK_PWRMGT_CNTL
, DYN_GFX_CLK_OFF_EN
, ~DYN_GFX_CLK_OFF_EN
);
242 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~DYN_GFX_CLK_OFF_EN
);
244 WREG32(CG_RLC_REQ_AND_RSP
, 0x2);
246 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
247 if (((RREG32(CG_RLC_REQ_AND_RSP
) & CG_RLC_RSP_TYPE_MASK
) >> CG_RLC_RSP_TYPE_SHIFT
) == 1)
252 WREG32(CG_RLC_REQ_AND_RSP
, 0x0);
254 WREG32(GRBM_PWR_CNTL
, 0x1);
255 RREG32(GRBM_PWR_CNTL
);
259 void r600_dynamicpm_enable(struct radeon_device
*rdev
, bool enable
)
262 WREG32_P(GENERAL_PWRMGT
, GLOBAL_PWRMGT_EN
, ~GLOBAL_PWRMGT_EN
);
264 WREG32_P(GENERAL_PWRMGT
, 0, ~GLOBAL_PWRMGT_EN
);
267 void r600_enable_thermal_protection(struct radeon_device
*rdev
, bool enable
)
270 WREG32_P(GENERAL_PWRMGT
, 0, ~THERMAL_PROTECTION_DIS
);
272 WREG32_P(GENERAL_PWRMGT
, THERMAL_PROTECTION_DIS
, ~THERMAL_PROTECTION_DIS
);
275 void r600_enable_acpi_pm(struct radeon_device
*rdev
)
277 WREG32_P(GENERAL_PWRMGT
, STATIC_PM_EN
, ~STATIC_PM_EN
);
280 void r600_enable_dynamic_pcie_gen2(struct radeon_device
*rdev
, bool enable
)
283 WREG32_P(GENERAL_PWRMGT
, ENABLE_GEN2PCIE
, ~ENABLE_GEN2PCIE
);
285 WREG32_P(GENERAL_PWRMGT
, 0, ~ENABLE_GEN2PCIE
);
288 bool r600_dynamicpm_enabled(struct radeon_device
*rdev
)
290 if (RREG32(GENERAL_PWRMGT
) & GLOBAL_PWRMGT_EN
)
296 void r600_enable_sclk_control(struct radeon_device
*rdev
, bool enable
)
299 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~SCLK_PWRMGT_OFF
);
301 WREG32_P(SCLK_PWRMGT_CNTL
, SCLK_PWRMGT_OFF
, ~SCLK_PWRMGT_OFF
);
304 void r600_enable_mclk_control(struct radeon_device
*rdev
, bool enable
)
307 WREG32_P(MCLK_PWRMGT_CNTL
, 0, ~MPLL_PWRMGT_OFF
);
309 WREG32_P(MCLK_PWRMGT_CNTL
, MPLL_PWRMGT_OFF
, ~MPLL_PWRMGT_OFF
);
312 void r600_enable_spll_bypass(struct radeon_device
*rdev
, bool enable
)
315 WREG32_P(CG_SPLL_FUNC_CNTL
, SPLL_BYPASS_EN
, ~SPLL_BYPASS_EN
);
317 WREG32_P(CG_SPLL_FUNC_CNTL
, 0, ~SPLL_BYPASS_EN
);
320 void r600_wait_for_spll_change(struct radeon_device
*rdev
)
324 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
325 if (RREG32(CG_SPLL_FUNC_CNTL
) & SPLL_CHG_STATUS
)
331 void r600_set_bsp(struct radeon_device
*rdev
, u32 u
, u32 p
)
333 WREG32(CG_BSP
, BSP(p
) | BSU(u
));
336 void r600_set_at(struct radeon_device
*rdev
,
337 u32 l_to_m
, u32 m_to_h
,
338 u32 h_to_m
, u32 m_to_l
)
340 WREG32(CG_RT
, FLS(l_to_m
) | FMS(m_to_h
));
341 WREG32(CG_LT
, FHS(h_to_m
) | FMS(m_to_l
));
344 void r600_set_tc(struct radeon_device
*rdev
,
345 u32 index
, u32 u_t
, u32 d_t
)
347 WREG32(CG_FFCT_0
+ (index
* 4), UTC_0(u_t
) | DTC_0(d_t
));
350 void r600_select_td(struct radeon_device
*rdev
,
353 if (td
== R600_TD_AUTO
)
354 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~FIR_FORCE_TREND_SEL
);
356 WREG32_P(SCLK_PWRMGT_CNTL
, FIR_FORCE_TREND_SEL
, ~FIR_FORCE_TREND_SEL
);
357 if (td
== R600_TD_UP
)
358 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~FIR_TREND_MODE
);
359 if (td
== R600_TD_DOWN
)
360 WREG32_P(SCLK_PWRMGT_CNTL
, FIR_TREND_MODE
, ~FIR_TREND_MODE
);
363 void r600_set_vrc(struct radeon_device
*rdev
, u32 vrv
)
368 void r600_set_tpu(struct radeon_device
*rdev
, u32 u
)
370 WREG32_P(CG_TPC
, TPU(u
), ~TPU_MASK
);
373 void r600_set_tpc(struct radeon_device
*rdev
, u32 c
)
375 WREG32_P(CG_TPC
, TPCC(c
), ~TPCC_MASK
);
378 void r600_set_sstu(struct radeon_device
*rdev
, u32 u
)
380 WREG32_P(CG_SSP
, CG_SSTU(u
), ~CG_SSTU_MASK
);
383 void r600_set_sst(struct radeon_device
*rdev
, u32 t
)
385 WREG32_P(CG_SSP
, CG_SST(t
), ~CG_SST_MASK
);
388 void r600_set_git(struct radeon_device
*rdev
, u32 t
)
390 WREG32_P(CG_GIT
, CG_GICST(t
), ~CG_GICST_MASK
);
393 void r600_set_fctu(struct radeon_device
*rdev
, u32 u
)
395 WREG32_P(CG_FC_T
, FC_TU(u
), ~FC_TU_MASK
);
398 void r600_set_fct(struct radeon_device
*rdev
, u32 t
)
400 WREG32_P(CG_FC_T
, FC_T(t
), ~FC_T_MASK
);
403 void r600_set_ctxcgtt3d_rphc(struct radeon_device
*rdev
, u32 p
)
405 WREG32_P(CG_CTX_CGTT3D_R
, PHC(p
), ~PHC_MASK
);
408 void r600_set_ctxcgtt3d_rsdc(struct radeon_device
*rdev
, u32 s
)
410 WREG32_P(CG_CTX_CGTT3D_R
, SDC(s
), ~SDC_MASK
);
413 void r600_set_vddc3d_oorsu(struct radeon_device
*rdev
, u32 u
)
415 WREG32_P(CG_VDDC3D_OOR
, SU(u
), ~SU_MASK
);
418 void r600_set_vddc3d_oorphc(struct radeon_device
*rdev
, u32 p
)
420 WREG32_P(CG_VDDC3D_OOR
, PHC(p
), ~PHC_MASK
);
423 void r600_set_vddc3d_oorsdc(struct radeon_device
*rdev
, u32 s
)
425 WREG32_P(CG_VDDC3D_OOR
, SDC(s
), ~SDC_MASK
);
428 void r600_set_mpll_lock_time(struct radeon_device
*rdev
, u32 lock_time
)
430 WREG32_P(MPLL_TIME
, MPLL_LOCK_TIME(lock_time
), ~MPLL_LOCK_TIME_MASK
);
433 void r600_set_mpll_reset_time(struct radeon_device
*rdev
, u32 reset_time
)
435 WREG32_P(MPLL_TIME
, MPLL_RESET_TIME(reset_time
), ~MPLL_RESET_TIME_MASK
);
438 void r600_engine_clock_entry_enable(struct radeon_device
*rdev
,
439 u32 index
, bool enable
)
442 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2
+ (index
* 4 * 2),
443 STEP_0_SPLL_ENTRY_VALID
, ~STEP_0_SPLL_ENTRY_VALID
);
445 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2
+ (index
* 4 * 2),
446 0, ~STEP_0_SPLL_ENTRY_VALID
);
449 void r600_engine_clock_entry_enable_pulse_skipping(struct radeon_device
*rdev
,
450 u32 index
, bool enable
)
453 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2
+ (index
* 4 * 2),
454 STEP_0_SPLL_STEP_ENABLE
, ~STEP_0_SPLL_STEP_ENABLE
);
456 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2
+ (index
* 4 * 2),
457 0, ~STEP_0_SPLL_STEP_ENABLE
);
460 void r600_engine_clock_entry_enable_post_divider(struct radeon_device
*rdev
,
461 u32 index
, bool enable
)
464 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2
+ (index
* 4 * 2),
465 STEP_0_POST_DIV_EN
, ~STEP_0_POST_DIV_EN
);
467 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART2
+ (index
* 4 * 2),
468 0, ~STEP_0_POST_DIV_EN
);
471 void r600_engine_clock_entry_set_post_divider(struct radeon_device
*rdev
,
472 u32 index
, u32 divider
)
474 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1
+ (index
* 4 * 2),
475 STEP_0_SPLL_POST_DIV(divider
), ~STEP_0_SPLL_POST_DIV_MASK
);
478 void r600_engine_clock_entry_set_reference_divider(struct radeon_device
*rdev
,
479 u32 index
, u32 divider
)
481 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1
+ (index
* 4 * 2),
482 STEP_0_SPLL_REF_DIV(divider
), ~STEP_0_SPLL_REF_DIV_MASK
);
485 void r600_engine_clock_entry_set_feedback_divider(struct radeon_device
*rdev
,
486 u32 index
, u32 divider
)
488 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1
+ (index
* 4 * 2),
489 STEP_0_SPLL_FB_DIV(divider
), ~STEP_0_SPLL_FB_DIV_MASK
);
492 void r600_engine_clock_entry_set_step_time(struct radeon_device
*rdev
,
493 u32 index
, u32 step_time
)
495 WREG32_P(SCLK_FREQ_SETTING_STEP_0_PART1
+ (index
* 4 * 2),
496 STEP_0_SPLL_STEP_TIME(step_time
), ~STEP_0_SPLL_STEP_TIME_MASK
);
499 void r600_vid_rt_set_ssu(struct radeon_device
*rdev
, u32 u
)
501 WREG32_P(VID_RT
, SSTU(u
), ~SSTU_MASK
);
504 void r600_vid_rt_set_vru(struct radeon_device
*rdev
, u32 u
)
506 WREG32_P(VID_RT
, VID_CRTU(u
), ~VID_CRTU_MASK
);
509 void r600_vid_rt_set_vrt(struct radeon_device
*rdev
, u32 rt
)
511 WREG32_P(VID_RT
, VID_CRT(rt
), ~VID_CRT_MASK
);
514 void r600_voltage_control_enable_pins(struct radeon_device
*rdev
,
517 WREG32(LOWER_GPIO_ENABLE
, mask
& 0xffffffff);
518 WREG32(UPPER_GPIO_ENABLE
, upper_32_bits(mask
));
522 void r600_voltage_control_program_voltages(struct radeon_device
*rdev
,
523 enum r600_power_level index
, u64 pins
)
526 u32 ix
= 3 - (3 & index
);
528 WREG32(CTXSW_VID_LOWER_GPIO_CNTL
+ (ix
* 4), pins
& 0xffffffff);
530 mask
= 7 << (3 * ix
);
531 tmp
= RREG32(VID_UPPER_GPIO_CNTL
);
532 tmp
= (tmp
& ~mask
) | ((pins
>> (32 - (3 * ix
))) & mask
);
533 WREG32(VID_UPPER_GPIO_CNTL
, tmp
);
536 void r600_voltage_control_deactivate_static_control(struct radeon_device
*rdev
,
541 gpio
= RREG32(GPIOPAD_MASK
);
543 WREG32(GPIOPAD_MASK
, gpio
);
545 gpio
= RREG32(GPIOPAD_EN
);
547 WREG32(GPIOPAD_EN
, gpio
);
549 gpio
= RREG32(GPIOPAD_A
);
551 WREG32(GPIOPAD_A
, gpio
);
554 void r600_power_level_enable(struct radeon_device
*rdev
,
555 enum r600_power_level index
, bool enable
)
557 u32 ix
= 3 - (3 & index
);
560 WREG32_P(CTXSW_PROFILE_INDEX
+ (ix
* 4), CTXSW_FREQ_STATE_ENABLE
,
561 ~CTXSW_FREQ_STATE_ENABLE
);
563 WREG32_P(CTXSW_PROFILE_INDEX
+ (ix
* 4), 0,
564 ~CTXSW_FREQ_STATE_ENABLE
);
567 void r600_power_level_set_voltage_index(struct radeon_device
*rdev
,
568 enum r600_power_level index
, u32 voltage_index
)
570 u32 ix
= 3 - (3 & index
);
572 WREG32_P(CTXSW_PROFILE_INDEX
+ (ix
* 4),
573 CTXSW_FREQ_VIDS_CFG_INDEX(voltage_index
), ~CTXSW_FREQ_VIDS_CFG_INDEX_MASK
);
576 void r600_power_level_set_mem_clock_index(struct radeon_device
*rdev
,
577 enum r600_power_level index
, u32 mem_clock_index
)
579 u32 ix
= 3 - (3 & index
);
581 WREG32_P(CTXSW_PROFILE_INDEX
+ (ix
* 4),
582 CTXSW_FREQ_MCLK_CFG_INDEX(mem_clock_index
), ~CTXSW_FREQ_MCLK_CFG_INDEX_MASK
);
585 void r600_power_level_set_eng_clock_index(struct radeon_device
*rdev
,
586 enum r600_power_level index
, u32 eng_clock_index
)
588 u32 ix
= 3 - (3 & index
);
590 WREG32_P(CTXSW_PROFILE_INDEX
+ (ix
* 4),
591 CTXSW_FREQ_SCLK_CFG_INDEX(eng_clock_index
), ~CTXSW_FREQ_SCLK_CFG_INDEX_MASK
);
594 void r600_power_level_set_watermark_id(struct radeon_device
*rdev
,
595 enum r600_power_level index
,
596 enum r600_display_watermark watermark_id
)
598 u32 ix
= 3 - (3 & index
);
601 if (watermark_id
== R600_DISPLAY_WATERMARK_HIGH
)
602 tmp
= CTXSW_FREQ_DISPLAY_WATERMARK
;
603 WREG32_P(CTXSW_PROFILE_INDEX
+ (ix
* 4), tmp
, ~CTXSW_FREQ_DISPLAY_WATERMARK
);
606 void r600_power_level_set_pcie_gen2(struct radeon_device
*rdev
,
607 enum r600_power_level index
, bool compatible
)
609 u32 ix
= 3 - (3 & index
);
613 tmp
= CTXSW_FREQ_GEN2PCIE_VOLT
;
614 WREG32_P(CTXSW_PROFILE_INDEX
+ (ix
* 4), tmp
, ~CTXSW_FREQ_GEN2PCIE_VOLT
);
617 enum r600_power_level
r600_power_level_get_current_index(struct radeon_device
*rdev
)
621 tmp
= RREG32(TARGET_AND_CURRENT_PROFILE_INDEX
) & CURRENT_PROFILE_INDEX_MASK
;
622 tmp
>>= CURRENT_PROFILE_INDEX_SHIFT
;
626 enum r600_power_level
r600_power_level_get_target_index(struct radeon_device
*rdev
)
630 tmp
= RREG32(TARGET_AND_CURRENT_PROFILE_INDEX
) & TARGET_PROFILE_INDEX_MASK
;
631 tmp
>>= TARGET_PROFILE_INDEX_SHIFT
;
635 void r600_power_level_set_enter_index(struct radeon_device
*rdev
,
636 enum r600_power_level index
)
638 WREG32_P(TARGET_AND_CURRENT_PROFILE_INDEX
, DYN_PWR_ENTER_INDEX(index
),
639 ~DYN_PWR_ENTER_INDEX_MASK
);
642 void r600_wait_for_power_level_unequal(struct radeon_device
*rdev
,
643 enum r600_power_level index
)
647 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
648 if (r600_power_level_get_target_index(rdev
) != index
)
653 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
654 if (r600_power_level_get_current_index(rdev
) != index
)
660 void r600_wait_for_power_level(struct radeon_device
*rdev
,
661 enum r600_power_level index
)
665 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
666 if (r600_power_level_get_target_index(rdev
) == index
)
671 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
672 if (r600_power_level_get_current_index(rdev
) == index
)
678 void r600_start_dpm(struct radeon_device
*rdev
)
680 r600_enable_sclk_control(rdev
, false);
681 r600_enable_mclk_control(rdev
, false);
683 r600_dynamicpm_enable(rdev
, true);
685 radeon_wait_for_vblank(rdev
, 0);
686 radeon_wait_for_vblank(rdev
, 1);
688 r600_enable_spll_bypass(rdev
, true);
689 r600_wait_for_spll_change(rdev
);
690 r600_enable_spll_bypass(rdev
, false);
691 r600_wait_for_spll_change(rdev
);
693 r600_enable_spll_bypass(rdev
, true);
694 r600_wait_for_spll_change(rdev
);
695 r600_enable_spll_bypass(rdev
, false);
696 r600_wait_for_spll_change(rdev
);
698 r600_enable_sclk_control(rdev
, true);
699 r600_enable_mclk_control(rdev
, true);
702 void r600_stop_dpm(struct radeon_device
*rdev
)
704 r600_dynamicpm_enable(rdev
, false);
707 int r600_dpm_pre_set_power_state(struct radeon_device
*rdev
)
712 void r600_dpm_post_set_power_state(struct radeon_device
*rdev
)
717 bool r600_is_uvd_state(u32
class, u32 class2
)
719 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
721 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
723 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
725 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
727 if (class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
732 static int r600_set_thermal_temperature_range(struct radeon_device
*rdev
,
733 int min_temp
, int max_temp
)
735 int low_temp
= 0 * 1000;
736 int high_temp
= 255 * 1000;
738 if (low_temp
< min_temp
)
740 if (high_temp
> max_temp
)
741 high_temp
= max_temp
;
742 if (high_temp
< low_temp
) {
743 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
747 WREG32_P(CG_THERMAL_INT
, DIG_THERM_INTH(high_temp
/ 1000), ~DIG_THERM_INTH_MASK
);
748 WREG32_P(CG_THERMAL_INT
, DIG_THERM_INTL(low_temp
/ 1000), ~DIG_THERM_INTL_MASK
);
749 WREG32_P(CG_THERMAL_CTRL
, DIG_THERM_DPM(high_temp
/ 1000), ~DIG_THERM_DPM_MASK
);
751 rdev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
752 rdev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
757 bool r600_is_internal_thermal_sensor(enum radeon_int_thermal_type sensor
)
760 case THERMAL_TYPE_RV6XX
:
761 case THERMAL_TYPE_RV770
:
762 case THERMAL_TYPE_EVERGREEN
:
763 case THERMAL_TYPE_SUMO
:
764 case THERMAL_TYPE_NI
:
765 case THERMAL_TYPE_SI
:
766 case THERMAL_TYPE_CI
:
767 case THERMAL_TYPE_KV
:
769 case THERMAL_TYPE_ADT7473_WITH_INTERNAL
:
770 case THERMAL_TYPE_EMC2103_WITH_INTERNAL
:
771 return false; /* need special handling */
772 case THERMAL_TYPE_NONE
:
773 case THERMAL_TYPE_EXTERNAL
:
774 case THERMAL_TYPE_EXTERNAL_GPIO
:
780 int r600_dpm_late_enable(struct radeon_device
*rdev
)
784 if (rdev
->irq
.installed
&&
785 r600_is_internal_thermal_sensor(rdev
->pm
.int_thermal_type
)) {
786 ret
= r600_set_thermal_temperature_range(rdev
, R600_TEMP_RANGE_MIN
, R600_TEMP_RANGE_MAX
);
789 rdev
->irq
.dpm_thermal
= true;
790 radeon_irq_set(rdev
);
797 struct _ATOM_POWERPLAY_INFO info
;
798 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
799 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
800 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
801 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
802 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
803 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4
;
804 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5
;
808 struct _ATOM_PPLIB_FANTABLE fan
;
809 struct _ATOM_PPLIB_FANTABLE2 fan2
;
812 static int r600_parse_clk_voltage_dep_table(struct radeon_clock_voltage_dependency_table
*radeon_table
,
813 ATOM_PPLIB_Clock_Voltage_Dependency_Table
*atom_table
)
815 u32 size
= atom_table
->ucNumEntries
*
816 sizeof(struct radeon_clock_voltage_dependency_entry
);
818 ATOM_PPLIB_Clock_Voltage_Dependency_Record
*entry
;
820 radeon_table
->entries
= kzalloc(size
, GFP_KERNEL
);
821 if (!radeon_table
->entries
)
824 entry
= &atom_table
->entries
[0];
825 for (i
= 0; i
< atom_table
->ucNumEntries
; i
++) {
826 radeon_table
->entries
[i
].clk
= le16_to_cpu(entry
->usClockLow
) |
827 (entry
->ucClockHigh
<< 16);
828 radeon_table
->entries
[i
].v
= le16_to_cpu(entry
->usVoltage
);
829 entry
= (ATOM_PPLIB_Clock_Voltage_Dependency_Record
*)
830 ((u8
*)entry
+ sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record
));
832 radeon_table
->count
= atom_table
->ucNumEntries
;
837 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
838 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
839 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
840 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
841 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
842 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
843 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
845 int r600_parse_extended_power_table(struct radeon_device
*rdev
)
847 struct radeon_mode_info
*mode_info
= &rdev
->mode_info
;
848 union power_info
*power_info
;
849 union fan_info
*fan_info
;
850 ATOM_PPLIB_Clock_Voltage_Dependency_Table
*dep_table
;
851 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
856 if (!atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
857 &frev
, &crev
, &data_offset
))
859 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
862 if (le16_to_cpu(power_info
->pplib
.usTableSize
) >=
863 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3
)) {
864 if (power_info
->pplib3
.usFanTableOffset
) {
865 fan_info
= (union fan_info
*)(mode_info
->atom_context
->bios
+ data_offset
+
866 le16_to_cpu(power_info
->pplib3
.usFanTableOffset
));
867 rdev
->pm
.dpm
.fan
.t_hyst
= fan_info
->fan
.ucTHyst
;
868 rdev
->pm
.dpm
.fan
.t_min
= le16_to_cpu(fan_info
->fan
.usTMin
);
869 rdev
->pm
.dpm
.fan
.t_med
= le16_to_cpu(fan_info
->fan
.usTMed
);
870 rdev
->pm
.dpm
.fan
.t_high
= le16_to_cpu(fan_info
->fan
.usTHigh
);
871 rdev
->pm
.dpm
.fan
.pwm_min
= le16_to_cpu(fan_info
->fan
.usPWMMin
);
872 rdev
->pm
.dpm
.fan
.pwm_med
= le16_to_cpu(fan_info
->fan
.usPWMMed
);
873 rdev
->pm
.dpm
.fan
.pwm_high
= le16_to_cpu(fan_info
->fan
.usPWMHigh
);
874 if (fan_info
->fan
.ucFanTableFormat
>= 2)
875 rdev
->pm
.dpm
.fan
.t_max
= le16_to_cpu(fan_info
->fan2
.usTMax
);
877 rdev
->pm
.dpm
.fan
.t_max
= 10900;
878 rdev
->pm
.dpm
.fan
.cycle_delay
= 100000;
879 rdev
->pm
.dpm
.fan
.ucode_fan_control
= true;
883 /* clock dependancy tables, shedding tables */
884 if (le16_to_cpu(power_info
->pplib
.usTableSize
) >=
885 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4
)) {
886 if (power_info
->pplib4
.usVddcDependencyOnSCLKOffset
) {
887 dep_table
= (ATOM_PPLIB_Clock_Voltage_Dependency_Table
*)
888 (mode_info
->atom_context
->bios
+ data_offset
+
889 le16_to_cpu(power_info
->pplib4
.usVddcDependencyOnSCLKOffset
));
890 ret
= r600_parse_clk_voltage_dep_table(&rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
,
895 if (power_info
->pplib4
.usVddciDependencyOnMCLKOffset
) {
896 dep_table
= (ATOM_PPLIB_Clock_Voltage_Dependency_Table
*)
897 (mode_info
->atom_context
->bios
+ data_offset
+
898 le16_to_cpu(power_info
->pplib4
.usVddciDependencyOnMCLKOffset
));
899 ret
= r600_parse_clk_voltage_dep_table(&rdev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
902 kfree(rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
);
906 if (power_info
->pplib4
.usVddcDependencyOnMCLKOffset
) {
907 dep_table
= (ATOM_PPLIB_Clock_Voltage_Dependency_Table
*)
908 (mode_info
->atom_context
->bios
+ data_offset
+
909 le16_to_cpu(power_info
->pplib4
.usVddcDependencyOnMCLKOffset
));
910 ret
= r600_parse_clk_voltage_dep_table(&rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
913 kfree(rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
);
914 kfree(rdev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
.entries
);
918 if (power_info
->pplib4
.usMvddDependencyOnMCLKOffset
) {
919 dep_table
= (ATOM_PPLIB_Clock_Voltage_Dependency_Table
*)
920 (mode_info
->atom_context
->bios
+ data_offset
+
921 le16_to_cpu(power_info
->pplib4
.usMvddDependencyOnMCLKOffset
));
922 ret
= r600_parse_clk_voltage_dep_table(&rdev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
,
925 kfree(rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
);
926 kfree(rdev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
.entries
);
927 kfree(rdev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
.entries
);
931 if (power_info
->pplib4
.usMaxClockVoltageOnDCOffset
) {
932 ATOM_PPLIB_Clock_Voltage_Limit_Table
*clk_v
=
933 (ATOM_PPLIB_Clock_Voltage_Limit_Table
*)
934 (mode_info
->atom_context
->bios
+ data_offset
+
935 le16_to_cpu(power_info
->pplib4
.usMaxClockVoltageOnDCOffset
));
936 if (clk_v
->ucNumEntries
) {
937 rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.sclk
=
938 le16_to_cpu(clk_v
->entries
[0].usSclkLow
) |
939 (clk_v
->entries
[0].ucSclkHigh
<< 16);
940 rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.mclk
=
941 le16_to_cpu(clk_v
->entries
[0].usMclkLow
) |
942 (clk_v
->entries
[0].ucMclkHigh
<< 16);
943 rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.vddc
=
944 le16_to_cpu(clk_v
->entries
[0].usVddc
);
945 rdev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.vddci
=
946 le16_to_cpu(clk_v
->entries
[0].usVddci
);
949 if (power_info
->pplib4
.usVddcPhaseShedLimitsTableOffset
) {
950 ATOM_PPLIB_PhaseSheddingLimits_Table
*psl
=
951 (ATOM_PPLIB_PhaseSheddingLimits_Table
*)
952 (mode_info
->atom_context
->bios
+ data_offset
+
953 le16_to_cpu(power_info
->pplib4
.usVddcPhaseShedLimitsTableOffset
));
954 ATOM_PPLIB_PhaseSheddingLimits_Record
*entry
;
956 rdev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
.entries
=
957 kzalloc(psl
->ucNumEntries
*
958 sizeof(struct radeon_phase_shedding_limits_entry
),
960 if (!rdev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
.entries
) {
961 r600_free_extended_power_table(rdev
);
965 entry
= &psl
->entries
[0];
966 for (i
= 0; i
< psl
->ucNumEntries
; i
++) {
967 rdev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
.entries
[i
].sclk
=
968 le16_to_cpu(entry
->usSclkLow
) | (entry
->ucSclkHigh
<< 16);
969 rdev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
.entries
[i
].mclk
=
970 le16_to_cpu(entry
->usMclkLow
) | (entry
->ucMclkHigh
<< 16);
971 rdev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
.entries
[i
].voltage
=
972 le16_to_cpu(entry
->usVoltage
);
973 entry
= (ATOM_PPLIB_PhaseSheddingLimits_Record
*)
974 ((u8
*)entry
+ sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record
));
976 rdev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
.count
=
982 if (le16_to_cpu(power_info
->pplib
.usTableSize
) >=
983 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5
)) {
984 rdev
->pm
.dpm
.tdp_limit
= le32_to_cpu(power_info
->pplib5
.ulTDPLimit
);
985 rdev
->pm
.dpm
.near_tdp_limit
= le32_to_cpu(power_info
->pplib5
.ulNearTDPLimit
);
986 rdev
->pm
.dpm
.near_tdp_limit_adjusted
= rdev
->pm
.dpm
.near_tdp_limit
;
987 rdev
->pm
.dpm
.tdp_od_limit
= le16_to_cpu(power_info
->pplib5
.usTDPODLimit
);
988 if (rdev
->pm
.dpm
.tdp_od_limit
)
989 rdev
->pm
.dpm
.power_control
= true;
991 rdev
->pm
.dpm
.power_control
= false;
992 rdev
->pm
.dpm
.tdp_adjustment
= 0;
993 rdev
->pm
.dpm
.sq_ramping_threshold
= le32_to_cpu(power_info
->pplib5
.ulSQRampingThreshold
);
994 rdev
->pm
.dpm
.cac_leakage
= le32_to_cpu(power_info
->pplib5
.ulCACLeakage
);
995 rdev
->pm
.dpm
.load_line_slope
= le16_to_cpu(power_info
->pplib5
.usLoadLineSlope
);
996 if (power_info
->pplib5
.usCACLeakageTableOffset
) {
997 ATOM_PPLIB_CAC_Leakage_Table
*cac_table
=
998 (ATOM_PPLIB_CAC_Leakage_Table
*)
999 (mode_info
->atom_context
->bios
+ data_offset
+
1000 le16_to_cpu(power_info
->pplib5
.usCACLeakageTableOffset
));
1001 ATOM_PPLIB_CAC_Leakage_Record
*entry
;
1002 u32 size
= cac_table
->ucNumEntries
* sizeof(struct radeon_cac_leakage_table
);
1003 rdev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
= kzalloc(size
, GFP_KERNEL
);
1004 if (!rdev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
) {
1005 r600_free_extended_power_table(rdev
);
1008 entry
= &cac_table
->entries
[0];
1009 for (i
= 0; i
< cac_table
->ucNumEntries
; i
++) {
1010 if (rdev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_EVV
) {
1011 rdev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc1
=
1012 le16_to_cpu(entry
->usVddc1
);
1013 rdev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc2
=
1014 le16_to_cpu(entry
->usVddc2
);
1015 rdev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc3
=
1016 le16_to_cpu(entry
->usVddc3
);
1018 rdev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc
=
1019 le16_to_cpu(entry
->usVddc
);
1020 rdev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].leakage
=
1021 le32_to_cpu(entry
->ulLeakageValue
);
1023 entry
= (ATOM_PPLIB_CAC_Leakage_Record
*)
1024 ((u8
*)entry
+ sizeof(ATOM_PPLIB_CAC_Leakage_Record
));
1026 rdev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
= cac_table
->ucNumEntries
;
1031 if (le16_to_cpu(power_info
->pplib
.usTableSize
) >=
1032 sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3
)) {
1033 ATOM_PPLIB_EXTENDEDHEADER
*ext_hdr
= (ATOM_PPLIB_EXTENDEDHEADER
*)
1034 (mode_info
->atom_context
->bios
+ data_offset
+
1035 le16_to_cpu(power_info
->pplib3
.usExtendendedHeaderOffset
));
1036 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2
) &&
1037 ext_hdr
->usVCETableOffset
) {
1038 VCEClockInfoArray
*array
= (VCEClockInfoArray
*)
1039 (mode_info
->atom_context
->bios
+ data_offset
+
1040 le16_to_cpu(ext_hdr
->usVCETableOffset
) + 1);
1041 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
*limits
=
1042 (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table
*)
1043 (mode_info
->atom_context
->bios
+ data_offset
+
1044 le16_to_cpu(ext_hdr
->usVCETableOffset
) + 1 +
1045 1 + array
->ucNumEntries
* sizeof(VCEClockInfo
));
1046 ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
*entry
;
1047 u32 size
= limits
->numEntries
*
1048 sizeof(struct radeon_vce_clock_voltage_dependency_entry
);
1049 rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
=
1050 kzalloc(size
, GFP_KERNEL
);
1051 if (!rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
) {
1052 r600_free_extended_power_table(rdev
);
1055 rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.count
=
1057 entry
= &limits
->entries
[0];
1058 for (i
= 0; i
< limits
->numEntries
; i
++) {
1059 VCEClockInfo
*vce_clk
= (VCEClockInfo
*)
1060 ((u8
*)&array
->entries
[0] +
1061 (entry
->ucVCEClockInfoIndex
* sizeof(VCEClockInfo
)));
1062 rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
[i
].evclk
=
1063 le16_to_cpu(vce_clk
->usEVClkLow
) | (vce_clk
->ucEVClkHigh
<< 16);
1064 rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
[i
].ecclk
=
1065 le16_to_cpu(vce_clk
->usECClkLow
) | (vce_clk
->ucECClkHigh
<< 16);
1066 rdev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
[i
].v
=
1067 le16_to_cpu(entry
->usVoltage
);
1068 entry
= (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
*)
1069 ((u8
*)entry
+ sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record
));
1072 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3
) &&
1073 ext_hdr
->usUVDTableOffset
) {
1074 UVDClockInfoArray
*array
= (UVDClockInfoArray
*)
1075 (mode_info
->atom_context
->bios
+ data_offset
+
1076 le16_to_cpu(ext_hdr
->usUVDTableOffset
) + 1);
1077 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
*limits
=
1078 (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table
*)
1079 (mode_info
->atom_context
->bios
+ data_offset
+
1080 le16_to_cpu(ext_hdr
->usUVDTableOffset
) + 1 +
1081 1 + (array
->ucNumEntries
* sizeof (UVDClockInfo
)));
1082 ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
*entry
;
1083 u32 size
= limits
->numEntries
*
1084 sizeof(struct radeon_uvd_clock_voltage_dependency_entry
);
1085 rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
=
1086 kzalloc(size
, GFP_KERNEL
);
1087 if (!rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
) {
1088 r600_free_extended_power_table(rdev
);
1091 rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.count
=
1093 entry
= &limits
->entries
[0];
1094 for (i
= 0; i
< limits
->numEntries
; i
++) {
1095 UVDClockInfo
*uvd_clk
= (UVDClockInfo
*)
1096 ((u8
*)&array
->entries
[0] +
1097 (entry
->ucUVDClockInfoIndex
* sizeof(UVDClockInfo
)));
1098 rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[i
].vclk
=
1099 le16_to_cpu(uvd_clk
->usVClkLow
) | (uvd_clk
->ucVClkHigh
<< 16);
1100 rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[i
].dclk
=
1101 le16_to_cpu(uvd_clk
->usDClkLow
) | (uvd_clk
->ucDClkHigh
<< 16);
1102 rdev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[i
].v
=
1103 le16_to_cpu(entry
->usVoltage
);
1104 entry
= (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
*)
1105 ((u8
*)entry
+ sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record
));
1108 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4
) &&
1109 ext_hdr
->usSAMUTableOffset
) {
1110 ATOM_PPLIB_SAMClk_Voltage_Limit_Table
*limits
=
1111 (ATOM_PPLIB_SAMClk_Voltage_Limit_Table
*)
1112 (mode_info
->atom_context
->bios
+ data_offset
+
1113 le16_to_cpu(ext_hdr
->usSAMUTableOffset
) + 1);
1114 ATOM_PPLIB_SAMClk_Voltage_Limit_Record
*entry
;
1115 u32 size
= limits
->numEntries
*
1116 sizeof(struct radeon_clock_voltage_dependency_entry
);
1117 rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
=
1118 kzalloc(size
, GFP_KERNEL
);
1119 if (!rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
) {
1120 r600_free_extended_power_table(rdev
);
1123 rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.count
=
1125 entry
= &limits
->entries
[0];
1126 for (i
= 0; i
< limits
->numEntries
; i
++) {
1127 rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
[i
].clk
=
1128 le16_to_cpu(entry
->usSAMClockLow
) | (entry
->ucSAMClockHigh
<< 16);
1129 rdev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
[i
].v
=
1130 le16_to_cpu(entry
->usVoltage
);
1131 entry
= (ATOM_PPLIB_SAMClk_Voltage_Limit_Record
*)
1132 ((u8
*)entry
+ sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record
));
1135 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5
) &&
1136 ext_hdr
->usPPMTableOffset
) {
1137 ATOM_PPLIB_PPM_Table
*ppm
= (ATOM_PPLIB_PPM_Table
*)
1138 (mode_info
->atom_context
->bios
+ data_offset
+
1139 le16_to_cpu(ext_hdr
->usPPMTableOffset
));
1140 rdev
->pm
.dpm
.dyn_state
.ppm_table
=
1141 kzalloc(sizeof(struct radeon_ppm_table
), GFP_KERNEL
);
1142 if (!rdev
->pm
.dpm
.dyn_state
.ppm_table
) {
1143 r600_free_extended_power_table(rdev
);
1146 rdev
->pm
.dpm
.dyn_state
.ppm_table
->ppm_design
= ppm
->ucPpmDesign
;
1147 rdev
->pm
.dpm
.dyn_state
.ppm_table
->cpu_core_number
=
1148 le16_to_cpu(ppm
->usCpuCoreNumber
);
1149 rdev
->pm
.dpm
.dyn_state
.ppm_table
->platform_tdp
=
1150 le32_to_cpu(ppm
->ulPlatformTDP
);
1151 rdev
->pm
.dpm
.dyn_state
.ppm_table
->small_ac_platform_tdp
=
1152 le32_to_cpu(ppm
->ulSmallACPlatformTDP
);
1153 rdev
->pm
.dpm
.dyn_state
.ppm_table
->platform_tdc
=
1154 le32_to_cpu(ppm
->ulPlatformTDC
);
1155 rdev
->pm
.dpm
.dyn_state
.ppm_table
->small_ac_platform_tdc
=
1156 le32_to_cpu(ppm
->ulSmallACPlatformTDC
);
1157 rdev
->pm
.dpm
.dyn_state
.ppm_table
->apu_tdp
=
1158 le32_to_cpu(ppm
->ulApuTDP
);
1159 rdev
->pm
.dpm
.dyn_state
.ppm_table
->dgpu_tdp
=
1160 le32_to_cpu(ppm
->ulDGpuTDP
);
1161 rdev
->pm
.dpm
.dyn_state
.ppm_table
->dgpu_ulv_power
=
1162 le32_to_cpu(ppm
->ulDGpuUlvPower
);
1163 rdev
->pm
.dpm
.dyn_state
.ppm_table
->tj_max
=
1164 le32_to_cpu(ppm
->ulTjmax
);
1166 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6
) &&
1167 ext_hdr
->usACPTableOffset
) {
1168 ATOM_PPLIB_ACPClk_Voltage_Limit_Table
*limits
=
1169 (ATOM_PPLIB_ACPClk_Voltage_Limit_Table
*)
1170 (mode_info
->atom_context
->bios
+ data_offset
+
1171 le16_to_cpu(ext_hdr
->usACPTableOffset
) + 1);
1172 ATOM_PPLIB_ACPClk_Voltage_Limit_Record
*entry
;
1173 u32 size
= limits
->numEntries
*
1174 sizeof(struct radeon_clock_voltage_dependency_entry
);
1175 rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
=
1176 kzalloc(size
, GFP_KERNEL
);
1177 if (!rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
) {
1178 r600_free_extended_power_table(rdev
);
1181 rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.count
=
1183 entry
= &limits
->entries
[0];
1184 for (i
= 0; i
< limits
->numEntries
; i
++) {
1185 rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
[i
].clk
=
1186 le16_to_cpu(entry
->usACPClockLow
) | (entry
->ucACPClockHigh
<< 16);
1187 rdev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
[i
].v
=
1188 le16_to_cpu(entry
->usVoltage
);
1189 entry
= (ATOM_PPLIB_ACPClk_Voltage_Limit_Record
*)
1190 ((u8
*)entry
+ sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record
));
1193 if ((le16_to_cpu(ext_hdr
->usSize
) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7
) &&
1194 ext_hdr
->usPowerTuneTableOffset
) {
1195 u8 rev
= *(u8
*)(mode_info
->atom_context
->bios
+ data_offset
+
1196 le16_to_cpu(ext_hdr
->usPowerTuneTableOffset
));
1197 ATOM_PowerTune_Table
*pt
;
1198 rdev
->pm
.dpm
.dyn_state
.cac_tdp_table
=
1199 kzalloc(sizeof(struct radeon_cac_tdp_table
), GFP_KERNEL
);
1200 if (!rdev
->pm
.dpm
.dyn_state
.cac_tdp_table
) {
1201 r600_free_extended_power_table(rdev
);
1205 ATOM_PPLIB_POWERTUNE_Table_V1
*ppt
= (ATOM_PPLIB_POWERTUNE_Table_V1
*)
1206 (mode_info
->atom_context
->bios
+ data_offset
+
1207 le16_to_cpu(ext_hdr
->usPowerTuneTableOffset
));
1208 rdev
->pm
.dpm
.dyn_state
.cac_tdp_table
->maximum_power_delivery_limit
=
1209 ppt
->usMaximumPowerDeliveryLimit
;
1210 pt
= &ppt
->power_tune_table
;
1212 ATOM_PPLIB_POWERTUNE_Table
*ppt
= (ATOM_PPLIB_POWERTUNE_Table
*)
1213 (mode_info
->atom_context
->bios
+ data_offset
+
1214 le16_to_cpu(ext_hdr
->usPowerTuneTableOffset
));
1215 rdev
->pm
.dpm
.dyn_state
.cac_tdp_table
->maximum_power_delivery_limit
= 255;
1216 pt
= &ppt
->power_tune_table
;
1218 rdev
->pm
.dpm
.dyn_state
.cac_tdp_table
->tdp
= le16_to_cpu(pt
->usTDP
);
1219 rdev
->pm
.dpm
.dyn_state
.cac_tdp_table
->configurable_tdp
=
1220 le16_to_cpu(pt
->usConfigurableTDP
);
1221 rdev
->pm
.dpm
.dyn_state
.cac_tdp_table
->tdc
= le16_to_cpu(pt
->usTDC
);
1222 rdev
->pm
.dpm
.dyn_state
.cac_tdp_table
->battery_power_limit
=
1223 le16_to_cpu(pt
->usBatteryPowerLimit
);
1224 rdev
->pm
.dpm
.dyn_state
.cac_tdp_table
->small_power_limit
=
1225 le16_to_cpu(pt
->usSmallPowerLimit
);
1226 rdev
->pm
.dpm
.dyn_state
.cac_tdp_table
->low_cac_leakage
=
1227 le16_to_cpu(pt
->usLowCACLeakage
);
1228 rdev
->pm
.dpm
.dyn_state
.cac_tdp_table
->high_cac_leakage
=
1229 le16_to_cpu(pt
->usHighCACLeakage
);
1236 void r600_free_extended_power_table(struct radeon_device
*rdev
)
1238 struct radeon_dpm_dynamic_state
*dyn_state
= &rdev
->pm
.dpm
.dyn_state
;
1240 kfree(dyn_state
->vddc_dependency_on_sclk
.entries
);
1241 kfree(dyn_state
->vddci_dependency_on_mclk
.entries
);
1242 kfree(dyn_state
->vddc_dependency_on_mclk
.entries
);
1243 kfree(dyn_state
->mvdd_dependency_on_mclk
.entries
);
1244 kfree(dyn_state
->cac_leakage_table
.entries
);
1245 kfree(dyn_state
->phase_shedding_limits_table
.entries
);
1246 kfree(dyn_state
->ppm_table
);
1247 kfree(dyn_state
->cac_tdp_table
);
1248 kfree(dyn_state
->vce_clock_voltage_dependency_table
.entries
);
1249 kfree(dyn_state
->uvd_clock_voltage_dependency_table
.entries
);
1250 kfree(dyn_state
->samu_clock_voltage_dependency_table
.entries
);
1251 kfree(dyn_state
->acp_clock_voltage_dependency_table
.entries
);
1254 enum radeon_pcie_gen
r600_get_pcie_gen_support(struct radeon_device
*rdev
,
1256 enum radeon_pcie_gen asic_gen
,
1257 enum radeon_pcie_gen default_gen
)
1260 case RADEON_PCIE_GEN1
:
1261 return RADEON_PCIE_GEN1
;
1262 case RADEON_PCIE_GEN2
:
1263 return RADEON_PCIE_GEN2
;
1264 case RADEON_PCIE_GEN3
:
1265 return RADEON_PCIE_GEN3
;
1267 if ((sys_mask
& DRM_PCIE_SPEED_80
) && (default_gen
== RADEON_PCIE_GEN3
))
1268 return RADEON_PCIE_GEN3
;
1269 else if ((sys_mask
& DRM_PCIE_SPEED_50
) && (default_gen
== RADEON_PCIE_GEN2
))
1270 return RADEON_PCIE_GEN2
;
1272 return RADEON_PCIE_GEN1
;
1274 return RADEON_PCIE_GEN1
;
1277 u16
r600_get_pcie_lane_support(struct radeon_device
*rdev
,
1281 switch (asic_lanes
) {
1284 return default_lanes
;
1300 u8
r600_encode_pci_lane_width(u32 lanes
)
1302 u8 encoded_lanes
[] = { 0, 1, 2, 0, 3, 0, 0, 0, 4, 0, 0, 0, 5, 0, 0, 0, 6 };
1307 return encoded_lanes
[lanes
];