Merge tag 'io_uring-5.11-2021-01-16' of git://git.kernel.dk/linux-block
[linux/fpc-iii.git] / drivers / gpu / drm / radeon / rv770_dpm.c
blobef2f1a048cfeddf4ece2450abe7f112139682d93
1 /*
2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
25 #include "radeon.h"
26 #include "radeon_asic.h"
27 #include "rv770.h"
28 #include "rv770d.h"
29 #include "r600_dpm.h"
30 #include "rv770_dpm.h"
31 #include "cypress_dpm.h"
32 #include "atom.h"
33 #include "evergreen.h"
34 #include <linux/seq_file.h>
36 #define MC_CG_ARB_FREQ_F0 0x0a
37 #define MC_CG_ARB_FREQ_F1 0x0b
38 #define MC_CG_ARB_FREQ_F2 0x0c
39 #define MC_CG_ARB_FREQ_F3 0x0d
41 #define MC_CG_SEQ_DRAMCONF_S0 0x05
42 #define MC_CG_SEQ_DRAMCONF_S1 0x06
44 #define PCIE_BUS_CLK 10000
45 #define TCLK (PCIE_BUS_CLK / 10)
47 #define SMC_RAM_END 0xC000
49 struct rv7xx_ps *rv770_get_ps(struct radeon_ps *rps)
51 struct rv7xx_ps *ps = rps->ps_priv;
53 return ps;
56 struct rv7xx_power_info *rv770_get_pi(struct radeon_device *rdev)
58 struct rv7xx_power_info *pi = rdev->pm.dpm.priv;
60 return pi;
63 struct evergreen_power_info *evergreen_get_pi(struct radeon_device *rdev)
65 struct evergreen_power_info *pi = rdev->pm.dpm.priv;
67 return pi;
70 static void rv770_enable_bif_dynamic_pcie_gen2(struct radeon_device *rdev,
71 bool enable)
73 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
74 u32 tmp;
76 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
77 if (enable) {
78 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
79 tmp |= LC_HW_VOLTAGE_IF_CONTROL(1);
80 tmp |= LC_GEN2_EN_STRAP;
81 } else {
82 if (!pi->boot_in_gen2) {
83 tmp &= ~LC_HW_VOLTAGE_IF_CONTROL_MASK;
84 tmp &= ~LC_GEN2_EN_STRAP;
87 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) ||
88 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
89 WREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL, tmp);
93 static void rv770_enable_l0s(struct radeon_device *rdev)
95 u32 tmp;
97 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L0S_INACTIVITY_MASK;
98 tmp |= LC_L0S_INACTIVITY(3);
99 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
102 static void rv770_enable_l1(struct radeon_device *rdev)
104 u32 tmp;
106 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL);
107 tmp &= ~LC_L1_INACTIVITY_MASK;
108 tmp |= LC_L1_INACTIVITY(4);
109 tmp &= ~LC_PMI_TO_L1_DIS;
110 tmp &= ~LC_ASPM_TO_L1_DIS;
111 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
114 static void rv770_enable_pll_sleep_in_l1(struct radeon_device *rdev)
116 u32 tmp;
118 tmp = RREG32_PCIE_PORT(PCIE_LC_CNTL) & ~LC_L1_INACTIVITY_MASK;
119 tmp |= LC_L1_INACTIVITY(8);
120 WREG32_PCIE_PORT(PCIE_LC_CNTL, tmp);
122 /* NOTE, this is a PCIE indirect reg, not PCIE PORT */
123 tmp = RREG32_PCIE(PCIE_P_CNTL);
124 tmp |= P_PLL_PWRDN_IN_L1L23;
125 tmp &= ~P_PLL_BUF_PDNB;
126 tmp &= ~P_PLL_PDNB;
127 tmp |= P_ALLOW_PRX_FRONTEND_SHUTOFF;
128 WREG32_PCIE(PCIE_P_CNTL, tmp);
131 static void rv770_gfx_clock_gating_enable(struct radeon_device *rdev,
132 bool enable)
134 if (enable)
135 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
136 else {
137 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
138 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
139 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
140 RREG32(GB_TILING_CONFIG);
144 static void rv770_mg_clock_gating_enable(struct radeon_device *rdev,
145 bool enable)
147 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
149 if (enable) {
150 u32 mgcg_cgtt_local0;
152 if (rdev->family == CHIP_RV770)
153 mgcg_cgtt_local0 = RV770_MGCGTTLOCAL0_DFLT;
154 else
155 mgcg_cgtt_local0 = RV7XX_MGCGTTLOCAL0_DFLT;
157 WREG32(CG_CGTT_LOCAL_0, mgcg_cgtt_local0);
158 WREG32(CG_CGTT_LOCAL_1, (RV770_MGCGTTLOCAL1_DFLT & 0xFFFFCFFF));
160 if (pi->mgcgtssm)
161 WREG32(CGTS_SM_CTRL_REG, RV770_MGCGCGTSSMCTRL_DFLT);
162 } else {
163 WREG32(CG_CGTT_LOCAL_0, 0xFFFFFFFF);
164 WREG32(CG_CGTT_LOCAL_1, 0xFFFFCFFF);
168 void rv770_restore_cgcg(struct radeon_device *rdev)
170 bool dpm_en = false, cg_en = false;
172 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
173 dpm_en = true;
174 if (RREG32(SCLK_PWRMGT_CNTL) & DYN_GFX_CLK_OFF_EN)
175 cg_en = true;
177 if (dpm_en && !cg_en)
178 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
181 static void rv770_start_dpm(struct radeon_device *rdev)
183 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
185 WREG32_P(MCLK_PWRMGT_CNTL, 0, ~MPLL_PWRMGT_OFF);
187 WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
190 void rv770_stop_dpm(struct radeon_device *rdev)
192 PPSMC_Result result;
194 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_TwoLevelsDisabled);
196 if (result != PPSMC_Result_OK)
197 DRM_DEBUG("Could not force DPM to low.\n");
199 WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
201 WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
203 WREG32_P(MCLK_PWRMGT_CNTL, MPLL_PWRMGT_OFF, ~MPLL_PWRMGT_OFF);
206 bool rv770_dpm_enabled(struct radeon_device *rdev)
208 if (RREG32(GENERAL_PWRMGT) & GLOBAL_PWRMGT_EN)
209 return true;
210 else
211 return false;
214 void rv770_enable_thermal_protection(struct radeon_device *rdev,
215 bool enable)
217 if (enable)
218 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
219 else
220 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
223 void rv770_enable_acpi_pm(struct radeon_device *rdev)
225 WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
228 u8 rv770_get_seq_value(struct radeon_device *rdev,
229 struct rv7xx_pl *pl)
231 return (pl->flags & ATOM_PPLIB_R600_FLAGS_LOWPOWER) ?
232 MC_CG_SEQ_DRAMCONF_S0 : MC_CG_SEQ_DRAMCONF_S1;
235 #if 0
236 int rv770_read_smc_soft_register(struct radeon_device *rdev,
237 u16 reg_offset, u32 *value)
239 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
241 return rv770_read_smc_sram_dword(rdev,
242 pi->soft_regs_start + reg_offset,
243 value, pi->sram_end);
245 #endif
247 int rv770_write_smc_soft_register(struct radeon_device *rdev,
248 u16 reg_offset, u32 value)
250 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
252 return rv770_write_smc_sram_dword(rdev,
253 pi->soft_regs_start + reg_offset,
254 value, pi->sram_end);
257 int rv770_populate_smc_t(struct radeon_device *rdev,
258 struct radeon_ps *radeon_state,
259 RV770_SMC_SWSTATE *smc_state)
261 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
262 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
263 int i;
264 int a_n;
265 int a_d;
266 u8 l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
267 u8 r[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE];
268 u32 a_t;
270 l[0] = 0;
271 r[2] = 100;
273 a_n = (int)state->medium.sclk * pi->lmp +
274 (int)state->low.sclk * (R600_AH_DFLT - pi->rlp);
275 a_d = (int)state->low.sclk * (100 - (int)pi->rlp) +
276 (int)state->medium.sclk * pi->lmp;
278 l[1] = (u8)(pi->lmp - (int)pi->lmp * a_n / a_d);
279 r[0] = (u8)(pi->rlp + (100 - (int)pi->rlp) * a_n / a_d);
281 a_n = (int)state->high.sclk * pi->lhp + (int)state->medium.sclk *
282 (R600_AH_DFLT - pi->rmp);
283 a_d = (int)state->medium.sclk * (100 - (int)pi->rmp) +
284 (int)state->high.sclk * pi->lhp;
286 l[2] = (u8)(pi->lhp - (int)pi->lhp * a_n / a_d);
287 r[1] = (u8)(pi->rmp + (100 - (int)pi->rmp) * a_n / a_d);
289 for (i = 0; i < (RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1); i++) {
290 a_t = CG_R(r[i] * pi->bsp / 200) | CG_L(l[i] * pi->bsp / 200);
291 smc_state->levels[i].aT = cpu_to_be32(a_t);
294 a_t = CG_R(r[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200) |
295 CG_L(l[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1] * pi->pbsp / 200);
297 smc_state->levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1].aT =
298 cpu_to_be32(a_t);
300 return 0;
303 int rv770_populate_smc_sp(struct radeon_device *rdev,
304 struct radeon_ps *radeon_state,
305 RV770_SMC_SWSTATE *smc_state)
307 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
308 int i;
310 for (i = 0; i < (RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1); i++)
311 smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
313 smc_state->levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1].bSP =
314 cpu_to_be32(pi->psp);
316 return 0;
319 static void rv770_calculate_fractional_mpll_feedback_divider(u32 memory_clock,
320 u32 reference_clock,
321 bool gddr5,
322 struct atom_clock_dividers *dividers,
323 u32 *clkf,
324 u32 *clkfrac)
326 u32 post_divider, reference_divider, feedback_divider8;
327 u32 fyclk;
329 if (gddr5)
330 fyclk = (memory_clock * 8) / 2;
331 else
332 fyclk = (memory_clock * 4) / 2;
334 post_divider = dividers->post_div;
335 reference_divider = dividers->ref_div;
337 feedback_divider8 =
338 (8 * fyclk * reference_divider * post_divider) / reference_clock;
340 *clkf = feedback_divider8 / 8;
341 *clkfrac = feedback_divider8 % 8;
344 static int rv770_encode_yclk_post_div(u32 postdiv, u32 *encoded_postdiv)
346 int ret = 0;
348 switch (postdiv) {
349 case 1:
350 *encoded_postdiv = 0;
351 break;
352 case 2:
353 *encoded_postdiv = 1;
354 break;
355 case 4:
356 *encoded_postdiv = 2;
357 break;
358 case 8:
359 *encoded_postdiv = 3;
360 break;
361 case 16:
362 *encoded_postdiv = 4;
363 break;
364 default:
365 ret = -EINVAL;
366 break;
369 return ret;
372 u32 rv770_map_clkf_to_ibias(struct radeon_device *rdev, u32 clkf)
374 if (clkf <= 0x10)
375 return 0x4B;
376 if (clkf <= 0x19)
377 return 0x5B;
378 if (clkf <= 0x21)
379 return 0x2B;
380 if (clkf <= 0x27)
381 return 0x6C;
382 if (clkf <= 0x31)
383 return 0x9D;
384 return 0xC6;
387 static int rv770_populate_mclk_value(struct radeon_device *rdev,
388 u32 engine_clock, u32 memory_clock,
389 RV7XX_SMC_MCLK_VALUE *mclk)
391 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
392 u8 encoded_reference_dividers[] = { 0, 16, 17, 20, 21 };
393 u32 mpll_ad_func_cntl =
394 pi->clk_regs.rv770.mpll_ad_func_cntl;
395 u32 mpll_ad_func_cntl_2 =
396 pi->clk_regs.rv770.mpll_ad_func_cntl_2;
397 u32 mpll_dq_func_cntl =
398 pi->clk_regs.rv770.mpll_dq_func_cntl;
399 u32 mpll_dq_func_cntl_2 =
400 pi->clk_regs.rv770.mpll_dq_func_cntl_2;
401 u32 mclk_pwrmgt_cntl =
402 pi->clk_regs.rv770.mclk_pwrmgt_cntl;
403 u32 dll_cntl = pi->clk_regs.rv770.dll_cntl;
404 struct atom_clock_dividers dividers;
405 u32 reference_clock = rdev->clock.mpll.reference_freq;
406 u32 clkf, clkfrac;
407 u32 postdiv_yclk;
408 u32 ibias;
409 int ret;
411 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_MEMORY_PLL_PARAM,
412 memory_clock, false, &dividers);
413 if (ret)
414 return ret;
416 if ((dividers.ref_div < 1) || (dividers.ref_div > 5))
417 return -EINVAL;
419 rv770_calculate_fractional_mpll_feedback_divider(memory_clock, reference_clock,
420 pi->mem_gddr5,
421 &dividers, &clkf, &clkfrac);
423 ret = rv770_encode_yclk_post_div(dividers.post_div, &postdiv_yclk);
424 if (ret)
425 return ret;
427 ibias = rv770_map_clkf_to_ibias(rdev, clkf);
429 mpll_ad_func_cntl &= ~(CLKR_MASK |
430 YCLK_POST_DIV_MASK |
431 CLKF_MASK |
432 CLKFRAC_MASK |
433 IBIAS_MASK);
434 mpll_ad_func_cntl |= CLKR(encoded_reference_dividers[dividers.ref_div - 1]);
435 mpll_ad_func_cntl |= YCLK_POST_DIV(postdiv_yclk);
436 mpll_ad_func_cntl |= CLKF(clkf);
437 mpll_ad_func_cntl |= CLKFRAC(clkfrac);
438 mpll_ad_func_cntl |= IBIAS(ibias);
440 if (dividers.vco_mode)
441 mpll_ad_func_cntl_2 |= VCO_MODE;
442 else
443 mpll_ad_func_cntl_2 &= ~VCO_MODE;
445 if (pi->mem_gddr5) {
446 rv770_calculate_fractional_mpll_feedback_divider(memory_clock,
447 reference_clock,
448 pi->mem_gddr5,
449 &dividers, &clkf, &clkfrac);
451 ibias = rv770_map_clkf_to_ibias(rdev, clkf);
453 ret = rv770_encode_yclk_post_div(dividers.post_div, &postdiv_yclk);
454 if (ret)
455 return ret;
457 mpll_dq_func_cntl &= ~(CLKR_MASK |
458 YCLK_POST_DIV_MASK |
459 CLKF_MASK |
460 CLKFRAC_MASK |
461 IBIAS_MASK);
462 mpll_dq_func_cntl |= CLKR(encoded_reference_dividers[dividers.ref_div - 1]);
463 mpll_dq_func_cntl |= YCLK_POST_DIV(postdiv_yclk);
464 mpll_dq_func_cntl |= CLKF(clkf);
465 mpll_dq_func_cntl |= CLKFRAC(clkfrac);
466 mpll_dq_func_cntl |= IBIAS(ibias);
468 if (dividers.vco_mode)
469 mpll_dq_func_cntl_2 |= VCO_MODE;
470 else
471 mpll_dq_func_cntl_2 &= ~VCO_MODE;
474 mclk->mclk770.mclk_value = cpu_to_be32(memory_clock);
475 mclk->mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
476 mclk->mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
477 mclk->mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
478 mclk->mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
479 mclk->mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
480 mclk->mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
482 return 0;
485 static int rv770_populate_sclk_value(struct radeon_device *rdev,
486 u32 engine_clock,
487 RV770_SMC_SCLK_VALUE *sclk)
489 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
490 struct atom_clock_dividers dividers;
491 u32 spll_func_cntl =
492 pi->clk_regs.rv770.cg_spll_func_cntl;
493 u32 spll_func_cntl_2 =
494 pi->clk_regs.rv770.cg_spll_func_cntl_2;
495 u32 spll_func_cntl_3 =
496 pi->clk_regs.rv770.cg_spll_func_cntl_3;
497 u32 cg_spll_spread_spectrum =
498 pi->clk_regs.rv770.cg_spll_spread_spectrum;
499 u32 cg_spll_spread_spectrum_2 =
500 pi->clk_regs.rv770.cg_spll_spread_spectrum_2;
501 u64 tmp;
502 u32 reference_clock = rdev->clock.spll.reference_freq;
503 u32 reference_divider, post_divider;
504 u32 fbdiv;
505 int ret;
507 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
508 engine_clock, false, &dividers);
509 if (ret)
510 return ret;
512 reference_divider = 1 + dividers.ref_div;
514 if (dividers.enable_post_div)
515 post_divider = (0x0f & (dividers.post_div >> 4)) + (0x0f & dividers.post_div) + 2;
516 else
517 post_divider = 1;
519 tmp = (u64) engine_clock * reference_divider * post_divider * 16384;
520 do_div(tmp, reference_clock);
521 fbdiv = (u32) tmp;
523 if (dividers.enable_post_div)
524 spll_func_cntl |= SPLL_DIVEN;
525 else
526 spll_func_cntl &= ~SPLL_DIVEN;
527 spll_func_cntl &= ~(SPLL_HILEN_MASK | SPLL_LOLEN_MASK | SPLL_REF_DIV_MASK);
528 spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
529 spll_func_cntl |= SPLL_HILEN((dividers.post_div >> 4) & 0xf);
530 spll_func_cntl |= SPLL_LOLEN(dividers.post_div & 0xf);
532 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
533 spll_func_cntl_2 |= SCLK_MUX_SEL(2);
535 spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
536 spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
537 spll_func_cntl_3 |= SPLL_DITHEN;
539 if (pi->sclk_ss) {
540 struct radeon_atom_ss ss;
541 u32 vco_freq = engine_clock * post_divider;
543 if (radeon_atombios_get_asic_ss_info(rdev, &ss,
544 ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
545 u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
546 u32 clk_v = ss.percentage * fbdiv / (clk_s * 10000);
548 cg_spll_spread_spectrum &= ~CLKS_MASK;
549 cg_spll_spread_spectrum |= CLKS(clk_s);
550 cg_spll_spread_spectrum |= SSEN;
552 cg_spll_spread_spectrum_2 &= ~CLKV_MASK;
553 cg_spll_spread_spectrum_2 |= CLKV(clk_v);
557 sclk->sclk_value = cpu_to_be32(engine_clock);
558 sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
559 sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
560 sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
561 sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(cg_spll_spread_spectrum);
562 sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(cg_spll_spread_spectrum_2);
564 return 0;
567 int rv770_populate_vddc_value(struct radeon_device *rdev, u16 vddc,
568 RV770_SMC_VOLTAGE_VALUE *voltage)
570 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
571 int i;
573 if (!pi->voltage_control) {
574 voltage->index = 0;
575 voltage->value = 0;
576 return 0;
579 for (i = 0; i < pi->valid_vddc_entries; i++) {
580 if (vddc <= pi->vddc_table[i].vddc) {
581 voltage->index = pi->vddc_table[i].vddc_index;
582 voltage->value = cpu_to_be16(vddc);
583 break;
587 if (i == pi->valid_vddc_entries)
588 return -EINVAL;
590 return 0;
593 int rv770_populate_mvdd_value(struct radeon_device *rdev, u32 mclk,
594 RV770_SMC_VOLTAGE_VALUE *voltage)
596 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
598 if (!pi->mvdd_control) {
599 voltage->index = MVDD_HIGH_INDEX;
600 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
601 return 0;
604 if (mclk <= pi->mvdd_split_frequency) {
605 voltage->index = MVDD_LOW_INDEX;
606 voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
607 } else {
608 voltage->index = MVDD_HIGH_INDEX;
609 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
612 return 0;
615 static int rv770_convert_power_level_to_smc(struct radeon_device *rdev,
616 struct rv7xx_pl *pl,
617 RV770_SMC_HW_PERFORMANCE_LEVEL *level,
618 u8 watermark_level)
620 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
621 int ret;
623 level->gen2PCIE = pi->pcie_gen2 ?
624 ((pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0) : 0;
625 level->gen2XSP = (pl->flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2) ? 1 : 0;
626 level->backbias = (pl->flags & ATOM_PPLIB_R600_FLAGS_BACKBIASENABLE) ? 1 : 0;
627 level->displayWatermark = watermark_level;
629 if (rdev->family == CHIP_RV740)
630 ret = rv740_populate_sclk_value(rdev, pl->sclk,
631 &level->sclk);
632 else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
633 ret = rv730_populate_sclk_value(rdev, pl->sclk,
634 &level->sclk);
635 else
636 ret = rv770_populate_sclk_value(rdev, pl->sclk,
637 &level->sclk);
638 if (ret)
639 return ret;
641 if (rdev->family == CHIP_RV740) {
642 if (pi->mem_gddr5) {
643 if (pl->mclk <= pi->mclk_strobe_mode_threshold)
644 level->strobeMode =
645 rv740_get_mclk_frequency_ratio(pl->mclk) | 0x10;
646 else
647 level->strobeMode = 0;
649 if (pl->mclk > pi->mclk_edc_enable_threshold)
650 level->mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
651 else
652 level->mcFlags = 0;
654 ret = rv740_populate_mclk_value(rdev, pl->sclk,
655 pl->mclk, &level->mclk);
656 } else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
657 ret = rv730_populate_mclk_value(rdev, pl->sclk,
658 pl->mclk, &level->mclk);
659 else
660 ret = rv770_populate_mclk_value(rdev, pl->sclk,
661 pl->mclk, &level->mclk);
662 if (ret)
663 return ret;
665 ret = rv770_populate_vddc_value(rdev, pl->vddc,
666 &level->vddc);
667 if (ret)
668 return ret;
670 ret = rv770_populate_mvdd_value(rdev, pl->mclk, &level->mvdd);
672 return ret;
675 static int rv770_convert_power_state_to_smc(struct radeon_device *rdev,
676 struct radeon_ps *radeon_state,
677 RV770_SMC_SWSTATE *smc_state)
679 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
680 int ret;
682 if (!(radeon_state->caps & ATOM_PPLIB_DISALLOW_ON_DC))
683 smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
685 ret = rv770_convert_power_level_to_smc(rdev,
686 &state->low,
687 &smc_state->levels[0],
688 PPSMC_DISPLAY_WATERMARK_LOW);
689 if (ret)
690 return ret;
692 ret = rv770_convert_power_level_to_smc(rdev,
693 &state->medium,
694 &smc_state->levels[1],
695 PPSMC_DISPLAY_WATERMARK_LOW);
696 if (ret)
697 return ret;
699 ret = rv770_convert_power_level_to_smc(rdev,
700 &state->high,
701 &smc_state->levels[2],
702 PPSMC_DISPLAY_WATERMARK_HIGH);
703 if (ret)
704 return ret;
706 smc_state->levels[0].arbValue = MC_CG_ARB_FREQ_F1;
707 smc_state->levels[1].arbValue = MC_CG_ARB_FREQ_F2;
708 smc_state->levels[2].arbValue = MC_CG_ARB_FREQ_F3;
710 smc_state->levels[0].seqValue = rv770_get_seq_value(rdev,
711 &state->low);
712 smc_state->levels[1].seqValue = rv770_get_seq_value(rdev,
713 &state->medium);
714 smc_state->levels[2].seqValue = rv770_get_seq_value(rdev,
715 &state->high);
717 rv770_populate_smc_sp(rdev, radeon_state, smc_state);
719 return rv770_populate_smc_t(rdev, radeon_state, smc_state);
723 u32 rv770_calculate_memory_refresh_rate(struct radeon_device *rdev,
724 u32 engine_clock)
726 u32 dram_rows;
727 u32 dram_refresh_rate;
728 u32 mc_arb_rfsh_rate;
729 u32 tmp;
731 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
732 dram_rows = 1 << (tmp + 10);
733 tmp = RREG32(MC_SEQ_MISC0) & 3;
734 dram_refresh_rate = 1 << (tmp + 3);
735 mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
737 return mc_arb_rfsh_rate;
740 static void rv770_program_memory_timing_parameters(struct radeon_device *rdev,
741 struct radeon_ps *radeon_state)
743 struct rv7xx_ps *state = rv770_get_ps(radeon_state);
744 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
745 u32 sqm_ratio;
746 u32 arb_refresh_rate;
747 u32 high_clock;
749 if (state->high.sclk < (state->low.sclk * 0xFF / 0x40))
750 high_clock = state->high.sclk;
751 else
752 high_clock = (state->low.sclk * 0xFF / 0x40);
754 radeon_atom_set_engine_dram_timings(rdev, high_clock,
755 state->high.mclk);
757 sqm_ratio =
758 STATE0(64 * high_clock / pi->boot_sclk) |
759 STATE1(64 * high_clock / state->low.sclk) |
760 STATE2(64 * high_clock / state->medium.sclk) |
761 STATE3(64 * high_clock / state->high.sclk);
762 WREG32(MC_ARB_SQM_RATIO, sqm_ratio);
764 arb_refresh_rate =
765 POWERMODE0(rv770_calculate_memory_refresh_rate(rdev, pi->boot_sclk)) |
766 POWERMODE1(rv770_calculate_memory_refresh_rate(rdev, state->low.sclk)) |
767 POWERMODE2(rv770_calculate_memory_refresh_rate(rdev, state->medium.sclk)) |
768 POWERMODE3(rv770_calculate_memory_refresh_rate(rdev, state->high.sclk));
769 WREG32(MC_ARB_RFSH_RATE, arb_refresh_rate);
772 void rv770_enable_backbias(struct radeon_device *rdev,
773 bool enable)
775 if (enable)
776 WREG32_P(GENERAL_PWRMGT, BACKBIAS_PAD_EN, ~BACKBIAS_PAD_EN);
777 else
778 WREG32_P(GENERAL_PWRMGT, 0, ~(BACKBIAS_VALUE | BACKBIAS_PAD_EN));
781 static void rv770_enable_spread_spectrum(struct radeon_device *rdev,
782 bool enable)
784 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
786 if (enable) {
787 if (pi->sclk_ss)
788 WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
790 if (pi->mclk_ss) {
791 if (rdev->family == CHIP_RV740)
792 rv740_enable_mclk_spread_spectrum(rdev, true);
794 } else {
795 WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
797 WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
799 WREG32_P(CG_MPLL_SPREAD_SPECTRUM, 0, ~SSEN);
801 if (rdev->family == CHIP_RV740)
802 rv740_enable_mclk_spread_spectrum(rdev, false);
806 static void rv770_program_mpll_timing_parameters(struct radeon_device *rdev)
808 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
810 if ((rdev->family == CHIP_RV770) && !pi->mem_gddr5) {
811 WREG32(MPLL_TIME,
812 (MPLL_LOCK_TIME(R600_MPLLLOCKTIME_DFLT * pi->ref_div) |
813 MPLL_RESET_TIME(R600_MPLLRESETTIME_DFLT)));
817 void rv770_setup_bsp(struct radeon_device *rdev)
819 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
820 u32 xclk = radeon_get_xclk(rdev);
822 r600_calculate_u_and_p(pi->asi,
823 xclk,
825 &pi->bsp,
826 &pi->bsu);
828 r600_calculate_u_and_p(pi->pasi,
829 xclk,
831 &pi->pbsp,
832 &pi->pbsu);
834 pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
835 pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
837 WREG32(CG_BSP, pi->dsp);
841 void rv770_program_git(struct radeon_device *rdev)
843 WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
846 void rv770_program_tp(struct radeon_device *rdev)
848 int i;
849 enum r600_td td = R600_TD_DFLT;
851 for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
852 WREG32(CG_FFCT_0 + (i * 4), (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
854 if (td == R600_TD_AUTO)
855 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
856 else
857 WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
858 if (td == R600_TD_UP)
859 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
860 if (td == R600_TD_DOWN)
861 WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
864 void rv770_program_tpp(struct radeon_device *rdev)
866 WREG32(CG_TPC, R600_TPC_DFLT);
869 void rv770_program_sstp(struct radeon_device *rdev)
871 WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
874 void rv770_program_engine_speed_parameters(struct radeon_device *rdev)
876 WREG32_P(SPLL_CNTL_MODE, SPLL_DIV_SYNC, ~SPLL_DIV_SYNC);
879 static void rv770_enable_display_gap(struct radeon_device *rdev)
881 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
883 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
884 tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) |
885 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
886 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
889 void rv770_program_vc(struct radeon_device *rdev)
891 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
893 WREG32(CG_FTV, pi->vrc);
896 void rv770_clear_vc(struct radeon_device *rdev)
898 WREG32(CG_FTV, 0);
901 int rv770_upload_firmware(struct radeon_device *rdev)
903 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
904 int ret;
906 rv770_reset_smc(rdev);
907 rv770_stop_smc_clock(rdev);
909 ret = rv770_load_smc_ucode(rdev, pi->sram_end);
910 if (ret)
911 return ret;
913 return 0;
916 static int rv770_populate_smc_acpi_state(struct radeon_device *rdev,
917 RV770_SMC_STATETABLE *table)
919 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
921 u32 mpll_ad_func_cntl =
922 pi->clk_regs.rv770.mpll_ad_func_cntl;
923 u32 mpll_ad_func_cntl_2 =
924 pi->clk_regs.rv770.mpll_ad_func_cntl_2;
925 u32 mpll_dq_func_cntl =
926 pi->clk_regs.rv770.mpll_dq_func_cntl;
927 u32 mpll_dq_func_cntl_2 =
928 pi->clk_regs.rv770.mpll_dq_func_cntl_2;
929 u32 spll_func_cntl =
930 pi->clk_regs.rv770.cg_spll_func_cntl;
931 u32 spll_func_cntl_2 =
932 pi->clk_regs.rv770.cg_spll_func_cntl_2;
933 u32 spll_func_cntl_3 =
934 pi->clk_regs.rv770.cg_spll_func_cntl_3;
935 u32 mclk_pwrmgt_cntl;
936 u32 dll_cntl;
938 table->ACPIState = table->initialState;
940 table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
942 if (pi->acpi_vddc) {
943 rv770_populate_vddc_value(rdev, pi->acpi_vddc,
944 &table->ACPIState.levels[0].vddc);
945 if (pi->pcie_gen2) {
946 if (pi->acpi_pcie_gen2)
947 table->ACPIState.levels[0].gen2PCIE = 1;
948 else
949 table->ACPIState.levels[0].gen2PCIE = 0;
950 } else
951 table->ACPIState.levels[0].gen2PCIE = 0;
952 if (pi->acpi_pcie_gen2)
953 table->ACPIState.levels[0].gen2XSP = 1;
954 else
955 table->ACPIState.levels[0].gen2XSP = 0;
956 } else {
957 rv770_populate_vddc_value(rdev, pi->min_vddc_in_table,
958 &table->ACPIState.levels[0].vddc);
959 table->ACPIState.levels[0].gen2PCIE = 0;
963 mpll_ad_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
965 mpll_dq_func_cntl_2 |= BIAS_GEN_PDNB | RESET_EN;
967 mclk_pwrmgt_cntl = (MRDCKA0_RESET |
968 MRDCKA1_RESET |
969 MRDCKB0_RESET |
970 MRDCKB1_RESET |
971 MRDCKC0_RESET |
972 MRDCKC1_RESET |
973 MRDCKD0_RESET |
974 MRDCKD1_RESET);
976 dll_cntl = 0xff000000;
978 spll_func_cntl |= SPLL_RESET | SPLL_SLEEP | SPLL_BYPASS_EN;
980 spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
981 spll_func_cntl_2 |= SCLK_MUX_SEL(4);
983 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
984 table->ACPIState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 = cpu_to_be32(mpll_ad_func_cntl_2);
985 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
986 table->ACPIState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 = cpu_to_be32(mpll_dq_func_cntl_2);
988 table->ACPIState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
989 table->ACPIState.levels[0].mclk.mclk770.vDLL_CNTL = cpu_to_be32(dll_cntl);
991 table->ACPIState.levels[0].mclk.mclk770.mclk_value = 0;
993 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL = cpu_to_be32(spll_func_cntl);
994 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(spll_func_cntl_2);
995 table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(spll_func_cntl_3);
997 table->ACPIState.levels[0].sclk.sclk_value = 0;
999 rv770_populate_mvdd_value(rdev, 0, &table->ACPIState.levels[0].mvdd);
1001 table->ACPIState.levels[1] = table->ACPIState.levels[0];
1002 table->ACPIState.levels[2] = table->ACPIState.levels[0];
1004 return 0;
1007 int rv770_populate_initial_mvdd_value(struct radeon_device *rdev,
1008 RV770_SMC_VOLTAGE_VALUE *voltage)
1010 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1012 if ((pi->s0_vid_lower_smio_cntl & pi->mvdd_mask_low) ==
1013 (pi->mvdd_low_smio[MVDD_LOW_INDEX] & pi->mvdd_mask_low) ) {
1014 voltage->index = MVDD_LOW_INDEX;
1015 voltage->value = cpu_to_be16(MVDD_LOW_VALUE);
1016 } else {
1017 voltage->index = MVDD_HIGH_INDEX;
1018 voltage->value = cpu_to_be16(MVDD_HIGH_VALUE);
1021 return 0;
1024 static int rv770_populate_smc_initial_state(struct radeon_device *rdev,
1025 struct radeon_ps *radeon_state,
1026 RV770_SMC_STATETABLE *table)
1028 struct rv7xx_ps *initial_state = rv770_get_ps(radeon_state);
1029 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1030 u32 a_t;
1032 table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL =
1033 cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl);
1034 table->initialState.levels[0].mclk.mclk770.vMPLL_AD_FUNC_CNTL_2 =
1035 cpu_to_be32(pi->clk_regs.rv770.mpll_ad_func_cntl_2);
1036 table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL =
1037 cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl);
1038 table->initialState.levels[0].mclk.mclk770.vMPLL_DQ_FUNC_CNTL_2 =
1039 cpu_to_be32(pi->clk_regs.rv770.mpll_dq_func_cntl_2);
1040 table->initialState.levels[0].mclk.mclk770.vMCLK_PWRMGT_CNTL =
1041 cpu_to_be32(pi->clk_regs.rv770.mclk_pwrmgt_cntl);
1042 table->initialState.levels[0].mclk.mclk770.vDLL_CNTL =
1043 cpu_to_be32(pi->clk_regs.rv770.dll_cntl);
1045 table->initialState.levels[0].mclk.mclk770.vMPLL_SS =
1046 cpu_to_be32(pi->clk_regs.rv770.mpll_ss1);
1047 table->initialState.levels[0].mclk.mclk770.vMPLL_SS2 =
1048 cpu_to_be32(pi->clk_regs.rv770.mpll_ss2);
1050 table->initialState.levels[0].mclk.mclk770.mclk_value =
1051 cpu_to_be32(initial_state->low.mclk);
1053 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
1054 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl);
1055 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
1056 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_2);
1057 table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
1058 cpu_to_be32(pi->clk_regs.rv770.cg_spll_func_cntl_3);
1059 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
1060 cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum);
1061 table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2 =
1062 cpu_to_be32(pi->clk_regs.rv770.cg_spll_spread_spectrum_2);
1064 table->initialState.levels[0].sclk.sclk_value =
1065 cpu_to_be32(initial_state->low.sclk);
1067 table->initialState.levels[0].arbValue = MC_CG_ARB_FREQ_F0;
1069 table->initialState.levels[0].seqValue =
1070 rv770_get_seq_value(rdev, &initial_state->low);
1072 rv770_populate_vddc_value(rdev,
1073 initial_state->low.vddc,
1074 &table->initialState.levels[0].vddc);
1075 rv770_populate_initial_mvdd_value(rdev,
1076 &table->initialState.levels[0].mvdd);
1078 a_t = CG_R(0xffff) | CG_L(0);
1079 table->initialState.levels[0].aT = cpu_to_be32(a_t);
1081 table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
1083 if (pi->boot_in_gen2)
1084 table->initialState.levels[0].gen2PCIE = 1;
1085 else
1086 table->initialState.levels[0].gen2PCIE = 0;
1087 if (initial_state->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
1088 table->initialState.levels[0].gen2XSP = 1;
1089 else
1090 table->initialState.levels[0].gen2XSP = 0;
1092 if (rdev->family == CHIP_RV740) {
1093 if (pi->mem_gddr5) {
1094 if (initial_state->low.mclk <= pi->mclk_strobe_mode_threshold)
1095 table->initialState.levels[0].strobeMode =
1096 rv740_get_mclk_frequency_ratio(initial_state->low.mclk) | 0x10;
1097 else
1098 table->initialState.levels[0].strobeMode = 0;
1100 if (initial_state->low.mclk >= pi->mclk_edc_enable_threshold)
1101 table->initialState.levels[0].mcFlags = SMC_MC_EDC_RD_FLAG | SMC_MC_EDC_WR_FLAG;
1102 else
1103 table->initialState.levels[0].mcFlags = 0;
1107 table->initialState.levels[1] = table->initialState.levels[0];
1108 table->initialState.levels[2] = table->initialState.levels[0];
1110 table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
1112 return 0;
1115 static int rv770_populate_smc_vddc_table(struct radeon_device *rdev,
1116 RV770_SMC_STATETABLE *table)
1118 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1119 int i;
1121 for (i = 0; i < pi->valid_vddc_entries; i++) {
1122 table->highSMIO[pi->vddc_table[i].vddc_index] =
1123 pi->vddc_table[i].high_smio;
1124 table->lowSMIO[pi->vddc_table[i].vddc_index] =
1125 cpu_to_be32(pi->vddc_table[i].low_smio);
1128 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDC] = 0;
1129 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDC] =
1130 cpu_to_be32(pi->vddc_mask_low);
1132 for (i = 0;
1133 ((i < pi->valid_vddc_entries) &&
1134 (pi->max_vddc_in_table >
1135 pi->vddc_table[i].vddc));
1136 i++);
1138 table->maxVDDCIndexInPPTable =
1139 pi->vddc_table[i].vddc_index;
1141 return 0;
1144 static int rv770_populate_smc_mvdd_table(struct radeon_device *rdev,
1145 RV770_SMC_STATETABLE *table)
1147 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1149 if (pi->mvdd_control) {
1150 table->lowSMIO[MVDD_HIGH_INDEX] |=
1151 cpu_to_be32(pi->mvdd_low_smio[MVDD_HIGH_INDEX]);
1152 table->lowSMIO[MVDD_LOW_INDEX] |=
1153 cpu_to_be32(pi->mvdd_low_smio[MVDD_LOW_INDEX]);
1155 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_MVDD] = 0;
1156 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_MVDD] =
1157 cpu_to_be32(pi->mvdd_mask_low);
1160 return 0;
1163 static int rv770_init_smc_table(struct radeon_device *rdev,
1164 struct radeon_ps *radeon_boot_state)
1166 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1167 struct rv7xx_ps *boot_state = rv770_get_ps(radeon_boot_state);
1168 RV770_SMC_STATETABLE *table = &pi->smc_statetable;
1169 int ret;
1171 memset(table, 0, sizeof(RV770_SMC_STATETABLE));
1173 pi->boot_sclk = boot_state->low.sclk;
1175 rv770_populate_smc_vddc_table(rdev, table);
1176 rv770_populate_smc_mvdd_table(rdev, table);
1178 switch (rdev->pm.int_thermal_type) {
1179 case THERMAL_TYPE_RV770:
1180 case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
1181 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
1182 break;
1183 case THERMAL_TYPE_NONE:
1184 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
1185 break;
1186 case THERMAL_TYPE_EXTERNAL_GPIO:
1187 default:
1188 table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
1189 break;
1192 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) {
1193 table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
1195 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_DONT_WAIT_FOR_VBLANK_ON_ALERT)
1196 table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK;
1198 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_GOTO_BOOT_ON_ALERT)
1199 table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE;
1202 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
1203 table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
1205 if (pi->mem_gddr5)
1206 table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
1208 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1209 ret = rv730_populate_smc_initial_state(rdev, radeon_boot_state, table);
1210 else
1211 ret = rv770_populate_smc_initial_state(rdev, radeon_boot_state, table);
1212 if (ret)
1213 return ret;
1215 if (rdev->family == CHIP_RV740)
1216 ret = rv740_populate_smc_acpi_state(rdev, table);
1217 else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1218 ret = rv730_populate_smc_acpi_state(rdev, table);
1219 else
1220 ret = rv770_populate_smc_acpi_state(rdev, table);
1221 if (ret)
1222 return ret;
1224 table->driverState = table->initialState;
1226 return rv770_copy_bytes_to_smc(rdev,
1227 pi->state_table_start,
1228 (const u8 *)table,
1229 sizeof(RV770_SMC_STATETABLE),
1230 pi->sram_end);
1233 static int rv770_construct_vddc_table(struct radeon_device *rdev)
1235 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1236 u16 min, max, step;
1237 u32 steps = 0;
1238 u8 vddc_index = 0;
1239 u32 i;
1241 radeon_atom_get_min_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &min);
1242 radeon_atom_get_max_voltage(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &max);
1243 radeon_atom_get_voltage_step(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, &step);
1245 steps = (max - min) / step + 1;
1247 if (steps > MAX_NO_VREG_STEPS)
1248 return -EINVAL;
1250 for (i = 0; i < steps; i++) {
1251 u32 gpio_pins, gpio_mask;
1253 pi->vddc_table[i].vddc = (u16)(min + i * step);
1254 radeon_atom_get_voltage_gpio_settings(rdev,
1255 pi->vddc_table[i].vddc,
1256 SET_VOLTAGE_TYPE_ASIC_VDDC,
1257 &gpio_pins, &gpio_mask);
1258 pi->vddc_table[i].low_smio = gpio_pins & gpio_mask;
1259 pi->vddc_table[i].high_smio = 0;
1260 pi->vddc_mask_low = gpio_mask;
1261 if (i > 0) {
1262 if ((pi->vddc_table[i].low_smio !=
1263 pi->vddc_table[i - 1].low_smio ) ||
1264 (pi->vddc_table[i].high_smio !=
1265 pi->vddc_table[i - 1].high_smio))
1266 vddc_index++;
1268 pi->vddc_table[i].vddc_index = vddc_index;
1271 pi->valid_vddc_entries = (u8)steps;
1273 return 0;
1276 static u32 rv770_get_mclk_split_point(struct atom_memory_info *memory_info)
1278 if (memory_info->mem_type == MEM_TYPE_GDDR3)
1279 return 30000;
1281 return 0;
1284 static int rv770_get_mvdd_pin_configuration(struct radeon_device *rdev)
1286 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1287 u32 gpio_pins, gpio_mask;
1289 radeon_atom_get_voltage_gpio_settings(rdev,
1290 MVDD_HIGH_VALUE, SET_VOLTAGE_TYPE_ASIC_MVDDC,
1291 &gpio_pins, &gpio_mask);
1292 pi->mvdd_mask_low = gpio_mask;
1293 pi->mvdd_low_smio[MVDD_HIGH_INDEX] =
1294 gpio_pins & gpio_mask;
1296 radeon_atom_get_voltage_gpio_settings(rdev,
1297 MVDD_LOW_VALUE, SET_VOLTAGE_TYPE_ASIC_MVDDC,
1298 &gpio_pins, &gpio_mask);
1299 pi->mvdd_low_smio[MVDD_LOW_INDEX] =
1300 gpio_pins & gpio_mask;
1302 return 0;
1305 u8 rv770_get_memory_module_index(struct radeon_device *rdev)
1307 return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
1310 static int rv770_get_mvdd_configuration(struct radeon_device *rdev)
1312 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1313 u8 memory_module_index;
1314 struct atom_memory_info memory_info;
1316 memory_module_index = rv770_get_memory_module_index(rdev);
1318 if (radeon_atom_get_memory_info(rdev, memory_module_index, &memory_info)) {
1319 pi->mvdd_control = false;
1320 return 0;
1323 pi->mvdd_split_frequency =
1324 rv770_get_mclk_split_point(&memory_info);
1326 if (pi->mvdd_split_frequency == 0) {
1327 pi->mvdd_control = false;
1328 return 0;
1331 return rv770_get_mvdd_pin_configuration(rdev);
1334 void rv770_enable_voltage_control(struct radeon_device *rdev,
1335 bool enable)
1337 if (enable)
1338 WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
1339 else
1340 WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
1343 static void rv770_program_display_gap(struct radeon_device *rdev)
1345 u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
1347 tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
1348 if (rdev->pm.dpm.new_active_crtcs & 1) {
1349 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1350 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1351 } else if (rdev->pm.dpm.new_active_crtcs & 2) {
1352 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1353 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK);
1354 } else {
1355 tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1356 tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE);
1358 WREG32(CG_DISPLAY_GAP_CNTL, tmp);
1361 static void rv770_enable_dynamic_pcie_gen2(struct radeon_device *rdev,
1362 bool enable)
1364 rv770_enable_bif_dynamic_pcie_gen2(rdev, enable);
1366 if (enable)
1367 WREG32_P(GENERAL_PWRMGT, ENABLE_GEN2PCIE, ~ENABLE_GEN2PCIE);
1368 else
1369 WREG32_P(GENERAL_PWRMGT, 0, ~ENABLE_GEN2PCIE);
1372 static void r7xx_program_memory_timing_parameters(struct radeon_device *rdev,
1373 struct radeon_ps *radeon_new_state)
1375 if ((rdev->family == CHIP_RV730) ||
1376 (rdev->family == CHIP_RV710) ||
1377 (rdev->family == CHIP_RV740))
1378 rv730_program_memory_timing_parameters(rdev, radeon_new_state);
1379 else
1380 rv770_program_memory_timing_parameters(rdev, radeon_new_state);
1383 static int rv770_upload_sw_state(struct radeon_device *rdev,
1384 struct radeon_ps *radeon_new_state)
1386 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1387 u16 address = pi->state_table_start +
1388 offsetof(RV770_SMC_STATETABLE, driverState);
1389 RV770_SMC_SWSTATE state = { 0 };
1390 int ret;
1392 ret = rv770_convert_power_state_to_smc(rdev, radeon_new_state, &state);
1393 if (ret)
1394 return ret;
1396 return rv770_copy_bytes_to_smc(rdev, address, (const u8 *)&state,
1397 sizeof(RV770_SMC_SWSTATE),
1398 pi->sram_end);
1401 int rv770_halt_smc(struct radeon_device *rdev)
1403 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
1404 return -EINVAL;
1406 if (rv770_wait_for_smc_inactive(rdev) != PPSMC_Result_OK)
1407 return -EINVAL;
1409 return 0;
1412 int rv770_resume_smc(struct radeon_device *rdev)
1414 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_Resume) != PPSMC_Result_OK)
1415 return -EINVAL;
1416 return 0;
1419 int rv770_set_sw_state(struct radeon_device *rdev)
1421 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToSwState) != PPSMC_Result_OK)
1422 DRM_DEBUG("rv770_set_sw_state failed\n");
1423 return 0;
1426 int rv770_set_boot_state(struct radeon_device *rdev)
1428 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_SwitchToInitialState) != PPSMC_Result_OK)
1429 return -EINVAL;
1430 return 0;
1433 void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev,
1434 struct radeon_ps *new_ps,
1435 struct radeon_ps *old_ps)
1437 struct rv7xx_ps *new_state = rv770_get_ps(new_ps);
1438 struct rv7xx_ps *current_state = rv770_get_ps(old_ps);
1440 if ((new_ps->vclk == old_ps->vclk) &&
1441 (new_ps->dclk == old_ps->dclk))
1442 return;
1444 if (new_state->high.sclk >= current_state->high.sclk)
1445 return;
1447 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
1450 void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev,
1451 struct radeon_ps *new_ps,
1452 struct radeon_ps *old_ps)
1454 struct rv7xx_ps *new_state = rv770_get_ps(new_ps);
1455 struct rv7xx_ps *current_state = rv770_get_ps(old_ps);
1457 if ((new_ps->vclk == old_ps->vclk) &&
1458 (new_ps->dclk == old_ps->dclk))
1459 return;
1461 if (new_state->high.sclk < current_state->high.sclk)
1462 return;
1464 radeon_set_uvd_clocks(rdev, new_ps->vclk, new_ps->dclk);
1467 int rv770_restrict_performance_levels_before_switch(struct radeon_device *rdev)
1469 if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_NoForcedLevel)) != PPSMC_Result_OK)
1470 return -EINVAL;
1472 if (rv770_send_msg_to_smc(rdev, (PPSMC_Msg)(PPSMC_MSG_TwoLevelsDisabled)) != PPSMC_Result_OK)
1473 return -EINVAL;
1475 return 0;
1478 int rv770_dpm_force_performance_level(struct radeon_device *rdev,
1479 enum radeon_dpm_forced_level level)
1481 PPSMC_Msg msg;
1483 if (level == RADEON_DPM_FORCED_LEVEL_HIGH) {
1484 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_ZeroLevelsDisabled) != PPSMC_Result_OK)
1485 return -EINVAL;
1486 msg = PPSMC_MSG_ForceHigh;
1487 } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) {
1488 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
1489 return -EINVAL;
1490 msg = (PPSMC_Msg)(PPSMC_MSG_TwoLevelsDisabled);
1491 } else {
1492 if (rv770_send_msg_to_smc(rdev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
1493 return -EINVAL;
1494 msg = (PPSMC_Msg)(PPSMC_MSG_ZeroLevelsDisabled);
1497 if (rv770_send_msg_to_smc(rdev, msg) != PPSMC_Result_OK)
1498 return -EINVAL;
1500 rdev->pm.dpm.forced_level = level;
1502 return 0;
1505 void r7xx_start_smc(struct radeon_device *rdev)
1507 rv770_start_smc(rdev);
1508 rv770_start_smc_clock(rdev);
1512 void r7xx_stop_smc(struct radeon_device *rdev)
1514 rv770_reset_smc(rdev);
1515 rv770_stop_smc_clock(rdev);
1518 static void rv770_read_clock_registers(struct radeon_device *rdev)
1520 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1522 pi->clk_regs.rv770.cg_spll_func_cntl =
1523 RREG32(CG_SPLL_FUNC_CNTL);
1524 pi->clk_regs.rv770.cg_spll_func_cntl_2 =
1525 RREG32(CG_SPLL_FUNC_CNTL_2);
1526 pi->clk_regs.rv770.cg_spll_func_cntl_3 =
1527 RREG32(CG_SPLL_FUNC_CNTL_3);
1528 pi->clk_regs.rv770.cg_spll_spread_spectrum =
1529 RREG32(CG_SPLL_SPREAD_SPECTRUM);
1530 pi->clk_regs.rv770.cg_spll_spread_spectrum_2 =
1531 RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
1532 pi->clk_regs.rv770.mpll_ad_func_cntl =
1533 RREG32(MPLL_AD_FUNC_CNTL);
1534 pi->clk_regs.rv770.mpll_ad_func_cntl_2 =
1535 RREG32(MPLL_AD_FUNC_CNTL_2);
1536 pi->clk_regs.rv770.mpll_dq_func_cntl =
1537 RREG32(MPLL_DQ_FUNC_CNTL);
1538 pi->clk_regs.rv770.mpll_dq_func_cntl_2 =
1539 RREG32(MPLL_DQ_FUNC_CNTL_2);
1540 pi->clk_regs.rv770.mclk_pwrmgt_cntl =
1541 RREG32(MCLK_PWRMGT_CNTL);
1542 pi->clk_regs.rv770.dll_cntl = RREG32(DLL_CNTL);
1545 static void r7xx_read_clock_registers(struct radeon_device *rdev)
1547 if (rdev->family == CHIP_RV740)
1548 rv740_read_clock_registers(rdev);
1549 else if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1550 rv730_read_clock_registers(rdev);
1551 else
1552 rv770_read_clock_registers(rdev);
1555 void rv770_read_voltage_smio_registers(struct radeon_device *rdev)
1557 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1559 pi->s0_vid_lower_smio_cntl =
1560 RREG32(S0_VID_LOWER_SMIO_CNTL);
1563 void rv770_reset_smio_status(struct radeon_device *rdev)
1565 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1566 u32 sw_smio_index, vid_smio_cntl;
1568 sw_smio_index =
1569 (RREG32(GENERAL_PWRMGT) & SW_SMIO_INDEX_MASK) >> SW_SMIO_INDEX_SHIFT;
1570 switch (sw_smio_index) {
1571 case 3:
1572 vid_smio_cntl = RREG32(S3_VID_LOWER_SMIO_CNTL);
1573 break;
1574 case 2:
1575 vid_smio_cntl = RREG32(S2_VID_LOWER_SMIO_CNTL);
1576 break;
1577 case 1:
1578 vid_smio_cntl = RREG32(S1_VID_LOWER_SMIO_CNTL);
1579 break;
1580 case 0:
1581 return;
1582 default:
1583 vid_smio_cntl = pi->s0_vid_lower_smio_cntl;
1584 break;
1587 WREG32(S0_VID_LOWER_SMIO_CNTL, vid_smio_cntl);
1588 WREG32_P(GENERAL_PWRMGT, SW_SMIO_INDEX(0), ~SW_SMIO_INDEX_MASK);
1591 void rv770_get_memory_type(struct radeon_device *rdev)
1593 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1594 u32 tmp;
1596 tmp = RREG32(MC_SEQ_MISC0);
1598 if (((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT) ==
1599 MC_SEQ_MISC0_GDDR5_VALUE)
1600 pi->mem_gddr5 = true;
1601 else
1602 pi->mem_gddr5 = false;
1606 void rv770_get_pcie_gen2_status(struct radeon_device *rdev)
1608 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1609 u32 tmp;
1611 tmp = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL);
1613 if ((tmp & LC_OTHER_SIDE_EVER_SENT_GEN2) &&
1614 (tmp & LC_OTHER_SIDE_SUPPORTS_GEN2))
1615 pi->pcie_gen2 = true;
1616 else
1617 pi->pcie_gen2 = false;
1619 if (pi->pcie_gen2) {
1620 if (tmp & LC_CURRENT_DATA_RATE)
1621 pi->boot_in_gen2 = true;
1622 else
1623 pi->boot_in_gen2 = false;
1624 } else
1625 pi->boot_in_gen2 = false;
1628 #if 0
1629 static int rv770_enter_ulp_state(struct radeon_device *rdev)
1631 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1633 if (pi->gfx_clock_gating) {
1634 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~DYN_GFX_CLK_OFF_EN);
1635 WREG32_P(SCLK_PWRMGT_CNTL, GFX_CLK_FORCE_ON, ~GFX_CLK_FORCE_ON);
1636 WREG32_P(SCLK_PWRMGT_CNTL, 0, ~GFX_CLK_FORCE_ON);
1637 RREG32(GB_TILING_CONFIG);
1640 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_SwitchToMinimumPower),
1641 ~HOST_SMC_MSG_MASK);
1643 udelay(7000);
1645 return 0;
1648 static int rv770_exit_ulp_state(struct radeon_device *rdev)
1650 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1651 int i;
1653 WREG32_P(SMC_MSG, HOST_SMC_MSG(PPSMC_MSG_ResumeFromMinimumPower),
1654 ~HOST_SMC_MSG_MASK);
1656 udelay(7000);
1658 for (i = 0; i < rdev->usec_timeout; i++) {
1659 if (((RREG32(SMC_MSG) & HOST_SMC_RESP_MASK) >> HOST_SMC_RESP_SHIFT) == 1)
1660 break;
1661 udelay(1000);
1664 if (pi->gfx_clock_gating)
1665 WREG32_P(SCLK_PWRMGT_CNTL, DYN_GFX_CLK_OFF_EN, ~DYN_GFX_CLK_OFF_EN);
1667 return 0;
1669 #endif
1671 static void rv770_get_mclk_odt_threshold(struct radeon_device *rdev)
1673 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1674 u8 memory_module_index;
1675 struct atom_memory_info memory_info;
1677 pi->mclk_odt_threshold = 0;
1679 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710)) {
1680 memory_module_index = rv770_get_memory_module_index(rdev);
1682 if (radeon_atom_get_memory_info(rdev, memory_module_index, &memory_info))
1683 return;
1685 if (memory_info.mem_type == MEM_TYPE_DDR2 ||
1686 memory_info.mem_type == MEM_TYPE_DDR3)
1687 pi->mclk_odt_threshold = 30000;
1691 void rv770_get_max_vddc(struct radeon_device *rdev)
1693 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1694 u16 vddc;
1696 if (radeon_atom_get_max_vddc(rdev, 0, 0, &vddc))
1697 pi->max_vddc = 0;
1698 else
1699 pi->max_vddc = vddc;
1702 void rv770_program_response_times(struct radeon_device *rdev)
1704 u32 voltage_response_time, backbias_response_time;
1705 u32 acpi_delay_time, vbi_time_out;
1706 u32 vddc_dly, bb_dly, acpi_dly, vbi_dly;
1707 u32 reference_clock;
1709 voltage_response_time = (u32)rdev->pm.dpm.voltage_response_time;
1710 backbias_response_time = (u32)rdev->pm.dpm.backbias_response_time;
1712 if (voltage_response_time == 0)
1713 voltage_response_time = 1000;
1715 if (backbias_response_time == 0)
1716 backbias_response_time = 1000;
1718 acpi_delay_time = 15000;
1719 vbi_time_out = 100000;
1721 reference_clock = radeon_get_xclk(rdev);
1723 vddc_dly = (voltage_response_time * reference_clock) / 1600;
1724 bb_dly = (backbias_response_time * reference_clock) / 1600;
1725 acpi_dly = (acpi_delay_time * reference_clock) / 1600;
1726 vbi_dly = (vbi_time_out * reference_clock) / 1600;
1728 rv770_write_smc_soft_register(rdev,
1729 RV770_SMC_SOFT_REGISTER_delay_vreg, vddc_dly);
1730 rv770_write_smc_soft_register(rdev,
1731 RV770_SMC_SOFT_REGISTER_delay_bbias, bb_dly);
1732 rv770_write_smc_soft_register(rdev,
1733 RV770_SMC_SOFT_REGISTER_delay_acpi, acpi_dly);
1734 rv770_write_smc_soft_register(rdev,
1735 RV770_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
1736 #if 0
1737 /* XXX look up hw revision */
1738 if (WEKIVA_A21)
1739 rv770_write_smc_soft_register(rdev,
1740 RV770_SMC_SOFT_REGISTER_baby_step_timer,
1741 0x10);
1742 #endif
1745 static void rv770_program_dcodt_before_state_switch(struct radeon_device *rdev,
1746 struct radeon_ps *radeon_new_state,
1747 struct radeon_ps *radeon_current_state)
1749 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1750 struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
1751 struct rv7xx_ps *current_state = rv770_get_ps(radeon_current_state);
1752 bool current_use_dc = false;
1753 bool new_use_dc = false;
1755 if (pi->mclk_odt_threshold == 0)
1756 return;
1758 if (current_state->high.mclk <= pi->mclk_odt_threshold)
1759 current_use_dc = true;
1761 if (new_state->high.mclk <= pi->mclk_odt_threshold)
1762 new_use_dc = true;
1764 if (current_use_dc == new_use_dc)
1765 return;
1767 if (!current_use_dc && new_use_dc)
1768 return;
1770 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1771 rv730_program_dcodt(rdev, new_use_dc);
1774 static void rv770_program_dcodt_after_state_switch(struct radeon_device *rdev,
1775 struct radeon_ps *radeon_new_state,
1776 struct radeon_ps *radeon_current_state)
1778 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1779 struct rv7xx_ps *new_state = rv770_get_ps(radeon_new_state);
1780 struct rv7xx_ps *current_state = rv770_get_ps(radeon_current_state);
1781 bool current_use_dc = false;
1782 bool new_use_dc = false;
1784 if (pi->mclk_odt_threshold == 0)
1785 return;
1787 if (current_state->high.mclk <= pi->mclk_odt_threshold)
1788 current_use_dc = true;
1790 if (new_state->high.mclk <= pi->mclk_odt_threshold)
1791 new_use_dc = true;
1793 if (current_use_dc == new_use_dc)
1794 return;
1796 if (current_use_dc && !new_use_dc)
1797 return;
1799 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1800 rv730_program_dcodt(rdev, new_use_dc);
1803 static void rv770_retrieve_odt_values(struct radeon_device *rdev)
1805 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1807 if (pi->mclk_odt_threshold == 0)
1808 return;
1810 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1811 rv730_get_odt_values(rdev);
1814 static void rv770_set_dpm_event_sources(struct radeon_device *rdev, u32 sources)
1816 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1817 bool want_thermal_protection;
1818 enum radeon_dpm_event_src dpm_event_src;
1820 switch (sources) {
1821 case 0:
1822 default:
1823 want_thermal_protection = false;
1824 break;
1825 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL):
1826 want_thermal_protection = true;
1827 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGITAL;
1828 break;
1830 case (1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
1831 want_thermal_protection = true;
1832 dpm_event_src = RADEON_DPM_EVENT_SRC_EXTERNAL;
1833 break;
1835 case ((1 << RADEON_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
1836 (1 << RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL)):
1837 want_thermal_protection = true;
1838 dpm_event_src = RADEON_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
1839 break;
1842 if (want_thermal_protection) {
1843 WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
1844 if (pi->thermal_protection)
1845 WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
1846 } else {
1847 WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
1851 void rv770_enable_auto_throttle_source(struct radeon_device *rdev,
1852 enum radeon_dpm_auto_throttle_src source,
1853 bool enable)
1855 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1857 if (enable) {
1858 if (!(pi->active_auto_throttle_sources & (1 << source))) {
1859 pi->active_auto_throttle_sources |= 1 << source;
1860 rv770_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1862 } else {
1863 if (pi->active_auto_throttle_sources & (1 << source)) {
1864 pi->active_auto_throttle_sources &= ~(1 << source);
1865 rv770_set_dpm_event_sources(rdev, pi->active_auto_throttle_sources);
1870 static int rv770_set_thermal_temperature_range(struct radeon_device *rdev,
1871 int min_temp, int max_temp)
1873 int low_temp = 0 * 1000;
1874 int high_temp = 255 * 1000;
1876 if (low_temp < min_temp)
1877 low_temp = min_temp;
1878 if (high_temp > max_temp)
1879 high_temp = max_temp;
1880 if (high_temp < low_temp) {
1881 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
1882 return -EINVAL;
1885 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
1886 WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
1887 WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
1889 rdev->pm.dpm.thermal.min_temp = low_temp;
1890 rdev->pm.dpm.thermal.max_temp = high_temp;
1892 return 0;
1895 int rv770_dpm_enable(struct radeon_device *rdev)
1897 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
1898 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
1899 int ret;
1901 if (pi->gfx_clock_gating)
1902 rv770_restore_cgcg(rdev);
1904 if (rv770_dpm_enabled(rdev))
1905 return -EINVAL;
1907 if (pi->voltage_control) {
1908 rv770_enable_voltage_control(rdev, true);
1909 ret = rv770_construct_vddc_table(rdev);
1910 if (ret) {
1911 DRM_ERROR("rv770_construct_vddc_table failed\n");
1912 return ret;
1916 if (pi->dcodt)
1917 rv770_retrieve_odt_values(rdev);
1919 if (pi->mvdd_control) {
1920 ret = rv770_get_mvdd_configuration(rdev);
1921 if (ret) {
1922 DRM_ERROR("rv770_get_mvdd_configuration failed\n");
1923 return ret;
1927 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_BACKBIAS)
1928 rv770_enable_backbias(rdev, true);
1930 rv770_enable_spread_spectrum(rdev, true);
1932 if (pi->thermal_protection)
1933 rv770_enable_thermal_protection(rdev, true);
1935 rv770_program_mpll_timing_parameters(rdev);
1936 rv770_setup_bsp(rdev);
1937 rv770_program_git(rdev);
1938 rv770_program_tp(rdev);
1939 rv770_program_tpp(rdev);
1940 rv770_program_sstp(rdev);
1941 rv770_program_engine_speed_parameters(rdev);
1942 rv770_enable_display_gap(rdev);
1943 rv770_program_vc(rdev);
1945 if (pi->dynamic_pcie_gen2)
1946 rv770_enable_dynamic_pcie_gen2(rdev, true);
1948 ret = rv770_upload_firmware(rdev);
1949 if (ret) {
1950 DRM_ERROR("rv770_upload_firmware failed\n");
1951 return ret;
1953 ret = rv770_init_smc_table(rdev, boot_ps);
1954 if (ret) {
1955 DRM_ERROR("rv770_init_smc_table failed\n");
1956 return ret;
1959 rv770_program_response_times(rdev);
1960 r7xx_start_smc(rdev);
1962 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
1963 rv730_start_dpm(rdev);
1964 else
1965 rv770_start_dpm(rdev);
1967 if (pi->gfx_clock_gating)
1968 rv770_gfx_clock_gating_enable(rdev, true);
1970 if (pi->mg_clock_gating)
1971 rv770_mg_clock_gating_enable(rdev, true);
1973 rv770_enable_auto_throttle_source(rdev, RADEON_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
1975 return 0;
1978 int rv770_dpm_late_enable(struct radeon_device *rdev)
1980 int ret;
1982 if (rdev->irq.installed &&
1983 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
1984 PPSMC_Result result;
1986 ret = rv770_set_thermal_temperature_range(rdev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
1987 if (ret)
1988 return ret;
1989 rdev->irq.dpm_thermal = true;
1990 radeon_irq_set(rdev);
1991 result = rv770_send_msg_to_smc(rdev, PPSMC_MSG_EnableThermalInterrupt);
1993 if (result != PPSMC_Result_OK)
1994 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1997 return 0;
2000 void rv770_dpm_disable(struct radeon_device *rdev)
2002 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2004 if (!rv770_dpm_enabled(rdev))
2005 return;
2007 rv770_clear_vc(rdev);
2009 if (pi->thermal_protection)
2010 rv770_enable_thermal_protection(rdev, false);
2012 rv770_enable_spread_spectrum(rdev, false);
2014 if (pi->dynamic_pcie_gen2)
2015 rv770_enable_dynamic_pcie_gen2(rdev, false);
2017 if (rdev->irq.installed &&
2018 r600_is_internal_thermal_sensor(rdev->pm.int_thermal_type)) {
2019 rdev->irq.dpm_thermal = false;
2020 radeon_irq_set(rdev);
2023 if (pi->gfx_clock_gating)
2024 rv770_gfx_clock_gating_enable(rdev, false);
2026 if (pi->mg_clock_gating)
2027 rv770_mg_clock_gating_enable(rdev, false);
2029 if ((rdev->family == CHIP_RV730) || (rdev->family == CHIP_RV710))
2030 rv730_stop_dpm(rdev);
2031 else
2032 rv770_stop_dpm(rdev);
2034 r7xx_stop_smc(rdev);
2035 rv770_reset_smio_status(rdev);
2038 int rv770_dpm_set_power_state(struct radeon_device *rdev)
2040 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2041 struct radeon_ps *new_ps = rdev->pm.dpm.requested_ps;
2042 struct radeon_ps *old_ps = rdev->pm.dpm.current_ps;
2043 int ret;
2045 ret = rv770_restrict_performance_levels_before_switch(rdev);
2046 if (ret) {
2047 DRM_ERROR("rv770_restrict_performance_levels_before_switch failed\n");
2048 return ret;
2050 rv770_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps);
2051 ret = rv770_halt_smc(rdev);
2052 if (ret) {
2053 DRM_ERROR("rv770_halt_smc failed\n");
2054 return ret;
2056 ret = rv770_upload_sw_state(rdev, new_ps);
2057 if (ret) {
2058 DRM_ERROR("rv770_upload_sw_state failed\n");
2059 return ret;
2061 r7xx_program_memory_timing_parameters(rdev, new_ps);
2062 if (pi->dcodt)
2063 rv770_program_dcodt_before_state_switch(rdev, new_ps, old_ps);
2064 ret = rv770_resume_smc(rdev);
2065 if (ret) {
2066 DRM_ERROR("rv770_resume_smc failed\n");
2067 return ret;
2069 ret = rv770_set_sw_state(rdev);
2070 if (ret) {
2071 DRM_ERROR("rv770_set_sw_state failed\n");
2072 return ret;
2074 if (pi->dcodt)
2075 rv770_program_dcodt_after_state_switch(rdev, new_ps, old_ps);
2076 rv770_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps);
2078 return 0;
2081 #if 0
2082 void rv770_dpm_reset_asic(struct radeon_device *rdev)
2084 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2085 struct radeon_ps *boot_ps = rdev->pm.dpm.boot_ps;
2087 rv770_restrict_performance_levels_before_switch(rdev);
2088 if (pi->dcodt)
2089 rv770_program_dcodt_before_state_switch(rdev, boot_ps, boot_ps);
2090 rv770_set_boot_state(rdev);
2091 if (pi->dcodt)
2092 rv770_program_dcodt_after_state_switch(rdev, boot_ps, boot_ps);
2094 #endif
2096 void rv770_dpm_setup_asic(struct radeon_device *rdev)
2098 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2100 r7xx_read_clock_registers(rdev);
2101 rv770_read_voltage_smio_registers(rdev);
2102 rv770_get_memory_type(rdev);
2103 if (pi->dcodt)
2104 rv770_get_mclk_odt_threshold(rdev);
2105 rv770_get_pcie_gen2_status(rdev);
2107 rv770_enable_acpi_pm(rdev);
2109 if (radeon_aspm != 0) {
2110 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L0s)
2111 rv770_enable_l0s(rdev);
2112 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_ASPM_L1)
2113 rv770_enable_l1(rdev);
2114 if (rdev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_TURNOFFPLL_ASPML1)
2115 rv770_enable_pll_sleep_in_l1(rdev);
2119 void rv770_dpm_display_configuration_changed(struct radeon_device *rdev)
2121 rv770_program_display_gap(rdev);
2124 union power_info {
2125 struct _ATOM_POWERPLAY_INFO info;
2126 struct _ATOM_POWERPLAY_INFO_V2 info_2;
2127 struct _ATOM_POWERPLAY_INFO_V3 info_3;
2128 struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
2129 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
2130 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
2133 union pplib_clock_info {
2134 struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
2135 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
2136 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
2137 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
2140 union pplib_power_state {
2141 struct _ATOM_PPLIB_STATE v1;
2142 struct _ATOM_PPLIB_STATE_V2 v2;
2145 static void rv7xx_parse_pplib_non_clock_info(struct radeon_device *rdev,
2146 struct radeon_ps *rps,
2147 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
2148 u8 table_rev)
2150 rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
2151 rps->class = le16_to_cpu(non_clock_info->usClassification);
2152 rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
2154 if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
2155 rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
2156 rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
2157 } else {
2158 rps->vclk = 0;
2159 rps->dclk = 0;
2162 if (r600_is_uvd_state(rps->class, rps->class2)) {
2163 if ((rps->vclk == 0) || (rps->dclk == 0)) {
2164 rps->vclk = RV770_DEFAULT_VCLK_FREQ;
2165 rps->dclk = RV770_DEFAULT_DCLK_FREQ;
2169 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
2170 rdev->pm.dpm.boot_ps = rps;
2171 if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
2172 rdev->pm.dpm.uvd_ps = rps;
2175 static void rv7xx_parse_pplib_clock_info(struct radeon_device *rdev,
2176 struct radeon_ps *rps, int index,
2177 union pplib_clock_info *clock_info)
2179 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2180 struct evergreen_power_info *eg_pi = evergreen_get_pi(rdev);
2181 struct rv7xx_ps *ps = rv770_get_ps(rps);
2182 u32 sclk, mclk;
2183 struct rv7xx_pl *pl;
2185 switch (index) {
2186 case 0:
2187 pl = &ps->low;
2188 break;
2189 case 1:
2190 pl = &ps->medium;
2191 break;
2192 case 2:
2193 default:
2194 pl = &ps->high;
2195 break;
2198 if (rdev->family >= CHIP_CEDAR) {
2199 sclk = le16_to_cpu(clock_info->evergreen.usEngineClockLow);
2200 sclk |= clock_info->evergreen.ucEngineClockHigh << 16;
2201 mclk = le16_to_cpu(clock_info->evergreen.usMemoryClockLow);
2202 mclk |= clock_info->evergreen.ucMemoryClockHigh << 16;
2204 pl->vddc = le16_to_cpu(clock_info->evergreen.usVDDC);
2205 pl->vddci = le16_to_cpu(clock_info->evergreen.usVDDCI);
2206 pl->flags = le32_to_cpu(clock_info->evergreen.ulFlags);
2207 } else {
2208 sclk = le16_to_cpu(clock_info->r600.usEngineClockLow);
2209 sclk |= clock_info->r600.ucEngineClockHigh << 16;
2210 mclk = le16_to_cpu(clock_info->r600.usMemoryClockLow);
2211 mclk |= clock_info->r600.ucMemoryClockHigh << 16;
2213 pl->vddc = le16_to_cpu(clock_info->r600.usVDDC);
2214 pl->flags = le32_to_cpu(clock_info->r600.ulFlags);
2217 pl->mclk = mclk;
2218 pl->sclk = sclk;
2220 /* patch up vddc if necessary */
2221 if (pl->vddc == 0xff01) {
2222 if (pi->max_vddc)
2223 pl->vddc = pi->max_vddc;
2226 if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
2227 pi->acpi_vddc = pl->vddc;
2228 if (rdev->family >= CHIP_CEDAR)
2229 eg_pi->acpi_vddci = pl->vddci;
2230 if (ps->low.flags & ATOM_PPLIB_R600_FLAGS_PCIEGEN2)
2231 pi->acpi_pcie_gen2 = true;
2232 else
2233 pi->acpi_pcie_gen2 = false;
2236 if (rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) {
2237 if (rdev->family >= CHIP_BARTS) {
2238 eg_pi->ulv.supported = true;
2239 eg_pi->ulv.pl = pl;
2243 if (pi->min_vddc_in_table > pl->vddc)
2244 pi->min_vddc_in_table = pl->vddc;
2246 if (pi->max_vddc_in_table < pl->vddc)
2247 pi->max_vddc_in_table = pl->vddc;
2249 /* patch up boot state */
2250 if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
2251 u16 vddc, vddci, mvdd;
2252 radeon_atombios_get_default_voltages(rdev, &vddc, &vddci, &mvdd);
2253 pl->mclk = rdev->clock.default_mclk;
2254 pl->sclk = rdev->clock.default_sclk;
2255 pl->vddc = vddc;
2256 pl->vddci = vddci;
2259 if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
2260 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
2261 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
2262 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
2263 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
2264 rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
2268 int rv7xx_parse_power_table(struct radeon_device *rdev)
2270 struct radeon_mode_info *mode_info = &rdev->mode_info;
2271 struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
2272 union pplib_power_state *power_state;
2273 int i, j;
2274 union pplib_clock_info *clock_info;
2275 union power_info *power_info;
2276 int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
2277 u16 data_offset;
2278 u8 frev, crev;
2279 struct rv7xx_ps *ps;
2281 if (!atom_parse_data_header(mode_info->atom_context, index, NULL,
2282 &frev, &crev, &data_offset))
2283 return -EINVAL;
2284 power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
2286 rdev->pm.dpm.ps = kcalloc(power_info->pplib.ucNumStates,
2287 sizeof(struct radeon_ps),
2288 GFP_KERNEL);
2289 if (!rdev->pm.dpm.ps)
2290 return -ENOMEM;
2292 for (i = 0; i < power_info->pplib.ucNumStates; i++) {
2293 power_state = (union pplib_power_state *)
2294 (mode_info->atom_context->bios + data_offset +
2295 le16_to_cpu(power_info->pplib.usStateArrayOffset) +
2296 i * power_info->pplib.ucStateEntrySize);
2297 non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
2298 (mode_info->atom_context->bios + data_offset +
2299 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset) +
2300 (power_state->v1.ucNonClockStateIndex *
2301 power_info->pplib.ucNonClockSize));
2302 if (power_info->pplib.ucStateEntrySize - 1) {
2303 u8 *idx;
2304 ps = kzalloc(sizeof(struct rv7xx_ps), GFP_KERNEL);
2305 if (ps == NULL) {
2306 kfree(rdev->pm.dpm.ps);
2307 return -ENOMEM;
2309 rdev->pm.dpm.ps[i].ps_priv = ps;
2310 rv7xx_parse_pplib_non_clock_info(rdev, &rdev->pm.dpm.ps[i],
2311 non_clock_info,
2312 power_info->pplib.ucNonClockSize);
2313 idx = (u8 *)&power_state->v1.ucClockStateIndices[0];
2314 for (j = 0; j < (power_info->pplib.ucStateEntrySize - 1); j++) {
2315 clock_info = (union pplib_clock_info *)
2316 (mode_info->atom_context->bios + data_offset +
2317 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset) +
2318 (idx[j] * power_info->pplib.ucClockInfoSize));
2319 rv7xx_parse_pplib_clock_info(rdev,
2320 &rdev->pm.dpm.ps[i], j,
2321 clock_info);
2325 rdev->pm.dpm.num_ps = power_info->pplib.ucNumStates;
2326 return 0;
2329 void rv770_get_engine_memory_ss(struct radeon_device *rdev)
2331 struct rv7xx_power_info *pi = rv770_get_pi(rdev);
2332 struct radeon_atom_ss ss;
2334 pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2335 ASIC_INTERNAL_ENGINE_SS, 0);
2336 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2337 ASIC_INTERNAL_MEMORY_SS, 0);
2339 if (pi->sclk_ss || pi->mclk_ss)
2340 pi->dynamic_ss = true;
2341 else
2342 pi->dynamic_ss = false;
2345 int rv770_dpm_init(struct radeon_device *rdev)
2347 struct rv7xx_power_info *pi;
2348 struct atom_clock_dividers dividers;
2349 int ret;
2351 pi = kzalloc(sizeof(struct rv7xx_power_info), GFP_KERNEL);
2352 if (pi == NULL)
2353 return -ENOMEM;
2354 rdev->pm.dpm.priv = pi;
2356 rv770_get_max_vddc(rdev);
2358 pi->acpi_vddc = 0;
2359 pi->min_vddc_in_table = 0;
2360 pi->max_vddc_in_table = 0;
2362 ret = r600_get_platform_caps(rdev);
2363 if (ret)
2364 return ret;
2366 ret = rv7xx_parse_power_table(rdev);
2367 if (ret)
2368 return ret;
2370 if (rdev->pm.dpm.voltage_response_time == 0)
2371 rdev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
2372 if (rdev->pm.dpm.backbias_response_time == 0)
2373 rdev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
2375 ret = radeon_atom_get_clock_dividers(rdev, COMPUTE_ENGINE_PLL_PARAM,
2376 0, false, &dividers);
2377 if (ret)
2378 pi->ref_div = dividers.ref_div + 1;
2379 else
2380 pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
2382 pi->mclk_strobe_mode_threshold = 30000;
2383 pi->mclk_edc_enable_threshold = 30000;
2385 pi->rlp = RV770_RLP_DFLT;
2386 pi->rmp = RV770_RMP_DFLT;
2387 pi->lhp = RV770_LHP_DFLT;
2388 pi->lmp = RV770_LMP_DFLT;
2390 pi->voltage_control =
2391 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, 0);
2393 pi->mvdd_control =
2394 radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0);
2396 rv770_get_engine_memory_ss(rdev);
2398 pi->asi = RV770_ASI_DFLT;
2399 pi->pasi = RV770_HASI_DFLT;
2400 pi->vrc = RV770_VRC_DFLT;
2402 pi->power_gating = false;
2404 pi->gfx_clock_gating = true;
2406 pi->mg_clock_gating = true;
2407 pi->mgcgtssm = true;
2409 pi->dynamic_pcie_gen2 = true;
2411 if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)
2412 pi->thermal_protection = true;
2413 else
2414 pi->thermal_protection = false;
2416 pi->display_gap = true;
2418 if (rdev->flags & RADEON_IS_MOBILITY)
2419 pi->dcodt = true;
2420 else
2421 pi->dcodt = false;
2423 pi->ulps = true;
2425 pi->mclk_stutter_mode_threshold = 0;
2427 pi->sram_end = SMC_RAM_END;
2428 pi->state_table_start = RV770_SMC_TABLE_ADDRESS;
2429 pi->soft_regs_start = RV770_SMC_SOFT_REGISTERS_START;
2431 return 0;
2434 void rv770_dpm_print_power_state(struct radeon_device *rdev,
2435 struct radeon_ps *rps)
2437 struct rv7xx_ps *ps = rv770_get_ps(rps);
2438 struct rv7xx_pl *pl;
2440 r600_dpm_print_class_info(rps->class, rps->class2);
2441 r600_dpm_print_cap_info(rps->caps);
2442 printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2443 if (rdev->family >= CHIP_CEDAR) {
2444 pl = &ps->low;
2445 printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u vddci: %u\n",
2446 pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2447 pl = &ps->medium;
2448 printk("\t\tpower level 1 sclk: %u mclk: %u vddc: %u vddci: %u\n",
2449 pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2450 pl = &ps->high;
2451 printk("\t\tpower level 2 sclk: %u mclk: %u vddc: %u vddci: %u\n",
2452 pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2453 } else {
2454 pl = &ps->low;
2455 printk("\t\tpower level 0 sclk: %u mclk: %u vddc: %u\n",
2456 pl->sclk, pl->mclk, pl->vddc);
2457 pl = &ps->medium;
2458 printk("\t\tpower level 1 sclk: %u mclk: %u vddc: %u\n",
2459 pl->sclk, pl->mclk, pl->vddc);
2460 pl = &ps->high;
2461 printk("\t\tpower level 2 sclk: %u mclk: %u vddc: %u\n",
2462 pl->sclk, pl->mclk, pl->vddc);
2464 r600_dpm_print_ps_status(rdev, rps);
2467 void rv770_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev,
2468 struct seq_file *m)
2470 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
2471 struct rv7xx_ps *ps = rv770_get_ps(rps);
2472 struct rv7xx_pl *pl;
2473 u32 current_index =
2474 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
2475 CURRENT_PROFILE_INDEX_SHIFT;
2477 if (current_index > 2) {
2478 seq_printf(m, "invalid dpm profile %d\n", current_index);
2479 } else {
2480 if (current_index == 0)
2481 pl = &ps->low;
2482 else if (current_index == 1)
2483 pl = &ps->medium;
2484 else /* current_index == 2 */
2485 pl = &ps->high;
2486 seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
2487 if (rdev->family >= CHIP_CEDAR) {
2488 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
2489 current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
2490 } else {
2491 seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u\n",
2492 current_index, pl->sclk, pl->mclk, pl->vddc);
2497 u32 rv770_dpm_get_current_sclk(struct radeon_device *rdev)
2499 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
2500 struct rv7xx_ps *ps = rv770_get_ps(rps);
2501 struct rv7xx_pl *pl;
2502 u32 current_index =
2503 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
2504 CURRENT_PROFILE_INDEX_SHIFT;
2506 if (current_index > 2) {
2507 return 0;
2508 } else {
2509 if (current_index == 0)
2510 pl = &ps->low;
2511 else if (current_index == 1)
2512 pl = &ps->medium;
2513 else /* current_index == 2 */
2514 pl = &ps->high;
2515 return pl->sclk;
2519 u32 rv770_dpm_get_current_mclk(struct radeon_device *rdev)
2521 struct radeon_ps *rps = rdev->pm.dpm.current_ps;
2522 struct rv7xx_ps *ps = rv770_get_ps(rps);
2523 struct rv7xx_pl *pl;
2524 u32 current_index =
2525 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_PROFILE_INDEX_MASK) >>
2526 CURRENT_PROFILE_INDEX_SHIFT;
2528 if (current_index > 2) {
2529 return 0;
2530 } else {
2531 if (current_index == 0)
2532 pl = &ps->low;
2533 else if (current_index == 1)
2534 pl = &ps->medium;
2535 else /* current_index == 2 */
2536 pl = &ps->high;
2537 return pl->mclk;
2541 void rv770_dpm_fini(struct radeon_device *rdev)
2543 int i;
2545 for (i = 0; i < rdev->pm.dpm.num_ps; i++) {
2546 kfree(rdev->pm.dpm.ps[i].ps_priv);
2548 kfree(rdev->pm.dpm.ps);
2549 kfree(rdev->pm.dpm.priv);
2552 u32 rv770_dpm_get_sclk(struct radeon_device *rdev, bool low)
2554 struct rv7xx_ps *requested_state = rv770_get_ps(rdev->pm.dpm.requested_ps);
2556 if (low)
2557 return requested_state->low.sclk;
2558 else
2559 return requested_state->high.sclk;
2562 u32 rv770_dpm_get_mclk(struct radeon_device *rdev, bool low)
2564 struct rv7xx_ps *requested_state = rv770_get_ps(rdev->pm.dpm.requested_ps);
2566 if (low)
2567 return requested_state->low.mclk;
2568 else
2569 return requested_state->high.mclk;
2572 bool rv770_dpm_vblank_too_short(struct radeon_device *rdev)
2574 u32 vblank_time = r600_dpm_get_vblank_time(rdev);
2575 u32 switch_limit = 200; /* 300 */
2577 /* RV770 */
2578 /* mclk switching doesn't seem to work reliably on desktop RV770s */
2579 if ((rdev->family == CHIP_RV770) &&
2580 !(rdev->flags & RADEON_IS_MOBILITY))
2581 switch_limit = 0xffffffff; /* disable mclk switching */
2583 if (vblank_time < switch_limit)
2584 return true;
2585 else
2586 return false;