2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_pm.h"
28 #include "amdgpu_ucode.h"
30 #include "amdgpu_dpm.h"
35 #include <linux/seq_file.h>
37 #include "smu/smu_7_0_1_d.h"
38 #include "smu/smu_7_0_1_sh_mask.h"
40 #include "dce/dce_8_0_d.h"
41 #include "dce/dce_8_0_sh_mask.h"
43 #include "bif/bif_4_1_d.h"
44 #include "bif/bif_4_1_sh_mask.h"
46 #include "gca/gfx_7_2_d.h"
47 #include "gca/gfx_7_2_sh_mask.h"
49 #include "gmc/gmc_7_1_d.h"
50 #include "gmc/gmc_7_1_sh_mask.h"
52 MODULE_FIRMWARE("amdgpu/bonaire_smc.bin");
53 MODULE_FIRMWARE("amdgpu/bonaire_k_smc.bin");
54 MODULE_FIRMWARE("amdgpu/hawaii_smc.bin");
55 MODULE_FIRMWARE("amdgpu/hawaii_k_smc.bin");
57 #define MC_CG_ARB_FREQ_F0 0x0a
58 #define MC_CG_ARB_FREQ_F1 0x0b
59 #define MC_CG_ARB_FREQ_F2 0x0c
60 #define MC_CG_ARB_FREQ_F3 0x0d
62 #define SMC_RAM_END 0x40000
64 #define VOLTAGE_SCALE 4
65 #define VOLTAGE_VID_OFFSET_SCALE1 625
66 #define VOLTAGE_VID_OFFSET_SCALE2 100
68 static const struct amd_pm_funcs ci_dpm_funcs
;
70 static const struct ci_pt_defaults defaults_hawaii_xt
=
72 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
73 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
74 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
77 static const struct ci_pt_defaults defaults_hawaii_pro
=
79 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
80 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
81 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
84 static const struct ci_pt_defaults defaults_bonaire_xt
=
86 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
87 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
88 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
92 static const struct ci_pt_defaults defaults_bonaire_pro
=
94 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
95 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
96 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
100 static const struct ci_pt_defaults defaults_saturn_xt
=
102 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
103 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
104 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
108 static const struct ci_pt_defaults defaults_saturn_pro
=
110 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
111 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
112 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
116 static const struct ci_pt_config_reg didt_config_ci
[] =
118 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
119 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
120 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
121 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
122 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
123 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
124 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
125 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
126 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
127 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
128 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
129 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
130 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND
},
131 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND
},
132 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND
},
133 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
134 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
135 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
136 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
137 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
138 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
139 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
140 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
141 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
142 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
143 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
144 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
145 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
146 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
147 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
148 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND
},
149 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND
},
150 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND
},
151 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
152 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
153 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
154 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
155 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
156 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
157 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
158 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
159 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
160 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
161 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
162 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
163 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
164 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
165 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
166 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND
},
167 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND
},
168 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND
},
169 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
170 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
171 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
172 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
173 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
174 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
175 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
176 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
177 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
178 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
179 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
180 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
181 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
182 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
183 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
184 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND
},
185 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND
},
186 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND
},
187 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
188 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
189 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
193 static u8
ci_get_memory_module_index(struct amdgpu_device
*adev
)
195 return (u8
) ((RREG32(mmBIOS_SCRATCH_4
) >> 16) & 0xff);
198 #define MC_CG_ARB_FREQ_F0 0x0a
199 #define MC_CG_ARB_FREQ_F1 0x0b
200 #define MC_CG_ARB_FREQ_F2 0x0c
201 #define MC_CG_ARB_FREQ_F3 0x0d
203 static int ci_copy_and_switch_arb_sets(struct amdgpu_device
*adev
,
204 u32 arb_freq_src
, u32 arb_freq_dest
)
206 u32 mc_arb_dram_timing
;
207 u32 mc_arb_dram_timing2
;
211 switch (arb_freq_src
) {
212 case MC_CG_ARB_FREQ_F0
:
213 mc_arb_dram_timing
= RREG32(mmMC_ARB_DRAM_TIMING
);
214 mc_arb_dram_timing2
= RREG32(mmMC_ARB_DRAM_TIMING2
);
215 burst_time
= (RREG32(mmMC_ARB_BURST_TIME
) & MC_ARB_BURST_TIME__STATE0_MASK
) >>
216 MC_ARB_BURST_TIME__STATE0__SHIFT
;
218 case MC_CG_ARB_FREQ_F1
:
219 mc_arb_dram_timing
= RREG32(mmMC_ARB_DRAM_TIMING_1
);
220 mc_arb_dram_timing2
= RREG32(mmMC_ARB_DRAM_TIMING2_1
);
221 burst_time
= (RREG32(mmMC_ARB_BURST_TIME
) & MC_ARB_BURST_TIME__STATE1_MASK
) >>
222 MC_ARB_BURST_TIME__STATE1__SHIFT
;
228 switch (arb_freq_dest
) {
229 case MC_CG_ARB_FREQ_F0
:
230 WREG32(mmMC_ARB_DRAM_TIMING
, mc_arb_dram_timing
);
231 WREG32(mmMC_ARB_DRAM_TIMING2
, mc_arb_dram_timing2
);
232 WREG32_P(mmMC_ARB_BURST_TIME
, (burst_time
<< MC_ARB_BURST_TIME__STATE0__SHIFT
),
233 ~MC_ARB_BURST_TIME__STATE0_MASK
);
235 case MC_CG_ARB_FREQ_F1
:
236 WREG32(mmMC_ARB_DRAM_TIMING_1
, mc_arb_dram_timing
);
237 WREG32(mmMC_ARB_DRAM_TIMING2_1
, mc_arb_dram_timing2
);
238 WREG32_P(mmMC_ARB_BURST_TIME
, (burst_time
<< MC_ARB_BURST_TIME__STATE1__SHIFT
),
239 ~MC_ARB_BURST_TIME__STATE1_MASK
);
245 mc_cg_config
= RREG32(mmMC_CG_CONFIG
) | 0x0000000F;
246 WREG32(mmMC_CG_CONFIG
, mc_cg_config
);
247 WREG32_P(mmMC_ARB_CG
, (arb_freq_dest
) << MC_ARB_CG__CG_ARB_REQ__SHIFT
,
248 ~MC_ARB_CG__CG_ARB_REQ_MASK
);
253 static u8
ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock
)
257 if (memory_clock
< 10000)
259 else if (memory_clock
>= 80000)
260 mc_para_index
= 0x0f;
262 mc_para_index
= (u8
)((memory_clock
- 10000) / 5000 + 1);
263 return mc_para_index
;
266 static u8
ci_get_mclk_frequency_ratio(u32 memory_clock
, bool strobe_mode
)
271 if (memory_clock
< 12500)
272 mc_para_index
= 0x00;
273 else if (memory_clock
> 47500)
274 mc_para_index
= 0x0f;
276 mc_para_index
= (u8
)((memory_clock
- 10000) / 2500);
278 if (memory_clock
< 65000)
279 mc_para_index
= 0x00;
280 else if (memory_clock
> 135000)
281 mc_para_index
= 0x0f;
283 mc_para_index
= (u8
)((memory_clock
- 60000) / 5000);
285 return mc_para_index
;
288 static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device
*adev
,
289 u32 max_voltage_steps
,
290 struct atom_voltage_table
*voltage_table
)
292 unsigned int i
, diff
;
294 if (voltage_table
->count
<= max_voltage_steps
)
297 diff
= voltage_table
->count
- max_voltage_steps
;
299 for (i
= 0; i
< max_voltage_steps
; i
++)
300 voltage_table
->entries
[i
] = voltage_table
->entries
[i
+ diff
];
302 voltage_table
->count
= max_voltage_steps
;
305 static int ci_get_std_voltage_value_sidd(struct amdgpu_device
*adev
,
306 struct atom_voltage_table_entry
*voltage_table
,
307 u16
*std_voltage_hi_sidd
, u16
*std_voltage_lo_sidd
);
308 static int ci_set_power_limit(struct amdgpu_device
*adev
, u32 n
);
309 static int ci_set_overdrive_target_tdp(struct amdgpu_device
*adev
,
311 static int ci_update_uvd_dpm(struct amdgpu_device
*adev
, bool gate
);
312 static void ci_dpm_set_irq_funcs(struct amdgpu_device
*adev
);
314 static PPSMC_Result
amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device
*adev
,
315 PPSMC_Msg msg
, u32 parameter
);
316 static void ci_thermal_start_smc_fan_control(struct amdgpu_device
*adev
);
317 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device
*adev
);
319 static struct ci_power_info
*ci_get_pi(struct amdgpu_device
*adev
)
321 struct ci_power_info
*pi
= adev
->pm
.dpm
.priv
;
326 static struct ci_ps
*ci_get_ps(struct amdgpu_ps
*rps
)
328 struct ci_ps
*ps
= rps
->ps_priv
;
333 static void ci_initialize_powertune_defaults(struct amdgpu_device
*adev
)
335 struct ci_power_info
*pi
= ci_get_pi(adev
);
337 switch (adev
->pdev
->device
) {
345 pi
->powertune_defaults
= &defaults_bonaire_xt
;
351 pi
->powertune_defaults
= &defaults_saturn_xt
;
355 pi
->powertune_defaults
= &defaults_hawaii_xt
;
359 pi
->powertune_defaults
= &defaults_hawaii_pro
;
369 pi
->powertune_defaults
= &defaults_bonaire_xt
;
373 pi
->dte_tj_offset
= 0;
375 pi
->caps_power_containment
= true;
376 pi
->caps_cac
= false;
377 pi
->caps_sq_ramping
= false;
378 pi
->caps_db_ramping
= false;
379 pi
->caps_td_ramping
= false;
380 pi
->caps_tcp_ramping
= false;
382 if (pi
->caps_power_containment
) {
384 if (adev
->asic_type
== CHIP_HAWAII
)
385 pi
->enable_bapm_feature
= false;
387 pi
->enable_bapm_feature
= true;
388 pi
->enable_tdc_limit_feature
= true;
389 pi
->enable_pkg_pwr_tracking_feature
= true;
393 static u8
ci_convert_to_vid(u16 vddc
)
395 return (6200 - (vddc
* VOLTAGE_SCALE
)) / 25;
398 static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device
*adev
)
400 struct ci_power_info
*pi
= ci_get_pi(adev
);
401 u8
*hi_vid
= pi
->smc_powertune_table
.BapmVddCVidHiSidd
;
402 u8
*lo_vid
= pi
->smc_powertune_table
.BapmVddCVidLoSidd
;
403 u8
*hi2_vid
= pi
->smc_powertune_table
.BapmVddCVidHiSidd2
;
406 if (adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
== NULL
)
408 if (adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
> 8)
410 if (adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
!=
411 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
)
414 for (i
= 0; i
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
; i
++) {
415 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_EVV
) {
416 lo_vid
[i
] = ci_convert_to_vid(adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc1
);
417 hi_vid
[i
] = ci_convert_to_vid(adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc2
);
418 hi2_vid
[i
] = ci_convert_to_vid(adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc3
);
420 lo_vid
[i
] = ci_convert_to_vid(adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc
);
421 hi_vid
[i
] = ci_convert_to_vid((u16
)adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].leakage
);
427 static int ci_populate_vddc_vid(struct amdgpu_device
*adev
)
429 struct ci_power_info
*pi
= ci_get_pi(adev
);
430 u8
*vid
= pi
->smc_powertune_table
.VddCVid
;
433 if (pi
->vddc_voltage_table
.count
> 8)
436 for (i
= 0; i
< pi
->vddc_voltage_table
.count
; i
++)
437 vid
[i
] = ci_convert_to_vid(pi
->vddc_voltage_table
.entries
[i
].value
);
442 static int ci_populate_svi_load_line(struct amdgpu_device
*adev
)
444 struct ci_power_info
*pi
= ci_get_pi(adev
);
445 const struct ci_pt_defaults
*pt_defaults
= pi
->powertune_defaults
;
447 pi
->smc_powertune_table
.SviLoadLineEn
= pt_defaults
->svi_load_line_en
;
448 pi
->smc_powertune_table
.SviLoadLineVddC
= pt_defaults
->svi_load_line_vddc
;
449 pi
->smc_powertune_table
.SviLoadLineTrimVddC
= 3;
450 pi
->smc_powertune_table
.SviLoadLineOffsetVddC
= 0;
455 static int ci_populate_tdc_limit(struct amdgpu_device
*adev
)
457 struct ci_power_info
*pi
= ci_get_pi(adev
);
458 const struct ci_pt_defaults
*pt_defaults
= pi
->powertune_defaults
;
461 tdc_limit
= adev
->pm
.dpm
.dyn_state
.cac_tdp_table
->tdc
* 256;
462 pi
->smc_powertune_table
.TDC_VDDC_PkgLimit
= cpu_to_be16(tdc_limit
);
463 pi
->smc_powertune_table
.TDC_VDDC_ThrottleReleaseLimitPerc
=
464 pt_defaults
->tdc_vddc_throttle_release_limit_perc
;
465 pi
->smc_powertune_table
.TDC_MAWt
= pt_defaults
->tdc_mawt
;
470 static int ci_populate_dw8(struct amdgpu_device
*adev
)
472 struct ci_power_info
*pi
= ci_get_pi(adev
);
473 const struct ci_pt_defaults
*pt_defaults
= pi
->powertune_defaults
;
476 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
477 SMU7_FIRMWARE_HEADER_LOCATION
+
478 offsetof(SMU7_Firmware_Header
, PmFuseTable
) +
479 offsetof(SMU7_Discrete_PmFuses
, TdcWaterfallCtl
),
480 (u32
*)&pi
->smc_powertune_table
.TdcWaterfallCtl
,
485 pi
->smc_powertune_table
.TdcWaterfallCtl
= pt_defaults
->tdc_waterfall_ctl
;
490 static int ci_populate_fuzzy_fan(struct amdgpu_device
*adev
)
492 struct ci_power_info
*pi
= ci_get_pi(adev
);
494 if ((adev
->pm
.dpm
.fan
.fan_output_sensitivity
& (1 << 15)) ||
495 (adev
->pm
.dpm
.fan
.fan_output_sensitivity
== 0))
496 adev
->pm
.dpm
.fan
.fan_output_sensitivity
=
497 adev
->pm
.dpm
.fan
.default_fan_output_sensitivity
;
499 pi
->smc_powertune_table
.FuzzyFan_PwmSetDelta
=
500 cpu_to_be16(adev
->pm
.dpm
.fan
.fan_output_sensitivity
);
505 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device
*adev
)
507 struct ci_power_info
*pi
= ci_get_pi(adev
);
508 u8
*hi_vid
= pi
->smc_powertune_table
.BapmVddCVidHiSidd
;
509 u8
*lo_vid
= pi
->smc_powertune_table
.BapmVddCVidLoSidd
;
512 min
= max
= hi_vid
[0];
513 for (i
= 0; i
< 8; i
++) {
514 if (0 != hi_vid
[i
]) {
521 if (0 != lo_vid
[i
]) {
529 if ((min
== 0) || (max
== 0))
531 pi
->smc_powertune_table
.GnbLPMLMaxVid
= (u8
)max
;
532 pi
->smc_powertune_table
.GnbLPMLMinVid
= (u8
)min
;
537 static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device
*adev
)
539 struct ci_power_info
*pi
= ci_get_pi(adev
);
540 u16 hi_sidd
= pi
->smc_powertune_table
.BapmVddCBaseLeakageHiSidd
;
541 u16 lo_sidd
= pi
->smc_powertune_table
.BapmVddCBaseLeakageLoSidd
;
542 struct amdgpu_cac_tdp_table
*cac_tdp_table
=
543 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
;
545 hi_sidd
= cac_tdp_table
->high_cac_leakage
/ 100 * 256;
546 lo_sidd
= cac_tdp_table
->low_cac_leakage
/ 100 * 256;
548 pi
->smc_powertune_table
.BapmVddCBaseLeakageHiSidd
= cpu_to_be16(hi_sidd
);
549 pi
->smc_powertune_table
.BapmVddCBaseLeakageLoSidd
= cpu_to_be16(lo_sidd
);
554 static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device
*adev
)
556 struct ci_power_info
*pi
= ci_get_pi(adev
);
557 const struct ci_pt_defaults
*pt_defaults
= pi
->powertune_defaults
;
558 SMU7_Discrete_DpmTable
*dpm_table
= &pi
->smc_state_table
;
559 struct amdgpu_cac_tdp_table
*cac_tdp_table
=
560 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
;
561 struct amdgpu_ppm_table
*ppm
= adev
->pm
.dpm
.dyn_state
.ppm_table
;
566 dpm_table
->DefaultTdp
= cac_tdp_table
->tdp
* 256;
567 dpm_table
->TargetTdp
= cac_tdp_table
->configurable_tdp
* 256;
569 dpm_table
->DTETjOffset
= (u8
)pi
->dte_tj_offset
;
570 dpm_table
->GpuTjMax
=
571 (u8
)(pi
->thermal_temp_setting
.temperature_high
/ 1000);
572 dpm_table
->GpuTjHyst
= 8;
574 dpm_table
->DTEAmbientTempBase
= pt_defaults
->dte_ambient_temp_base
;
577 dpm_table
->PPM_PkgPwrLimit
= cpu_to_be16((u16
)ppm
->dgpu_tdp
* 256 / 1000);
578 dpm_table
->PPM_TemperatureLimit
= cpu_to_be16((u16
)ppm
->tj_max
* 256);
580 dpm_table
->PPM_PkgPwrLimit
= cpu_to_be16(0);
581 dpm_table
->PPM_TemperatureLimit
= cpu_to_be16(0);
584 dpm_table
->BAPM_TEMP_GRADIENT
= cpu_to_be32(pt_defaults
->bapm_temp_gradient
);
585 def1
= pt_defaults
->bapmti_r
;
586 def2
= pt_defaults
->bapmti_rc
;
588 for (i
= 0; i
< SMU7_DTE_ITERATIONS
; i
++) {
589 for (j
= 0; j
< SMU7_DTE_SOURCES
; j
++) {
590 for (k
= 0; k
< SMU7_DTE_SINKS
; k
++) {
591 dpm_table
->BAPMTI_R
[i
][j
][k
] = cpu_to_be16(*def1
);
592 dpm_table
->BAPMTI_RC
[i
][j
][k
] = cpu_to_be16(*def2
);
602 static int ci_populate_pm_base(struct amdgpu_device
*adev
)
604 struct ci_power_info
*pi
= ci_get_pi(adev
);
605 u32 pm_fuse_table_offset
;
608 if (pi
->caps_power_containment
) {
609 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
610 SMU7_FIRMWARE_HEADER_LOCATION
+
611 offsetof(SMU7_Firmware_Header
, PmFuseTable
),
612 &pm_fuse_table_offset
, pi
->sram_end
);
615 ret
= ci_populate_bapm_vddc_vid_sidd(adev
);
618 ret
= ci_populate_vddc_vid(adev
);
621 ret
= ci_populate_svi_load_line(adev
);
624 ret
= ci_populate_tdc_limit(adev
);
627 ret
= ci_populate_dw8(adev
);
630 ret
= ci_populate_fuzzy_fan(adev
);
633 ret
= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev
);
636 ret
= ci_populate_bapm_vddc_base_leakage_sidd(adev
);
639 ret
= amdgpu_ci_copy_bytes_to_smc(adev
, pm_fuse_table_offset
,
640 (u8
*)&pi
->smc_powertune_table
,
641 sizeof(SMU7_Discrete_PmFuses
), pi
->sram_end
);
649 static void ci_do_enable_didt(struct amdgpu_device
*adev
, const bool enable
)
651 struct ci_power_info
*pi
= ci_get_pi(adev
);
654 if (pi
->caps_sq_ramping
) {
655 data
= RREG32_DIDT(ixDIDT_SQ_CTRL0
);
657 data
|= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK
;
659 data
&= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK
;
660 WREG32_DIDT(ixDIDT_SQ_CTRL0
, data
);
663 if (pi
->caps_db_ramping
) {
664 data
= RREG32_DIDT(ixDIDT_DB_CTRL0
);
666 data
|= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK
;
668 data
&= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK
;
669 WREG32_DIDT(ixDIDT_DB_CTRL0
, data
);
672 if (pi
->caps_td_ramping
) {
673 data
= RREG32_DIDT(ixDIDT_TD_CTRL0
);
675 data
|= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK
;
677 data
&= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK
;
678 WREG32_DIDT(ixDIDT_TD_CTRL0
, data
);
681 if (pi
->caps_tcp_ramping
) {
682 data
= RREG32_DIDT(ixDIDT_TCP_CTRL0
);
684 data
|= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK
;
686 data
&= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK
;
687 WREG32_DIDT(ixDIDT_TCP_CTRL0
, data
);
691 static int ci_program_pt_config_registers(struct amdgpu_device
*adev
,
692 const struct ci_pt_config_reg
*cac_config_regs
)
694 const struct ci_pt_config_reg
*config_regs
= cac_config_regs
;
698 if (config_regs
== NULL
)
701 while (config_regs
->offset
!= 0xFFFFFFFF) {
702 if (config_regs
->type
== CISLANDS_CONFIGREG_CACHE
) {
703 cache
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
705 switch (config_regs
->type
) {
706 case CISLANDS_CONFIGREG_SMC_IND
:
707 data
= RREG32_SMC(config_regs
->offset
);
709 case CISLANDS_CONFIGREG_DIDT_IND
:
710 data
= RREG32_DIDT(config_regs
->offset
);
713 data
= RREG32(config_regs
->offset
);
717 data
&= ~config_regs
->mask
;
718 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
721 switch (config_regs
->type
) {
722 case CISLANDS_CONFIGREG_SMC_IND
:
723 WREG32_SMC(config_regs
->offset
, data
);
725 case CISLANDS_CONFIGREG_DIDT_IND
:
726 WREG32_DIDT(config_regs
->offset
, data
);
729 WREG32(config_regs
->offset
, data
);
739 static int ci_enable_didt(struct amdgpu_device
*adev
, bool enable
)
741 struct ci_power_info
*pi
= ci_get_pi(adev
);
744 if (pi
->caps_sq_ramping
|| pi
->caps_db_ramping
||
745 pi
->caps_td_ramping
|| pi
->caps_tcp_ramping
) {
746 adev
->gfx
.rlc
.funcs
->enter_safe_mode(adev
);
749 ret
= ci_program_pt_config_registers(adev
, didt_config_ci
);
751 adev
->gfx
.rlc
.funcs
->exit_safe_mode(adev
);
756 ci_do_enable_didt(adev
, enable
);
758 adev
->gfx
.rlc
.funcs
->exit_safe_mode(adev
);
764 static int ci_enable_power_containment(struct amdgpu_device
*adev
, bool enable
)
766 struct ci_power_info
*pi
= ci_get_pi(adev
);
767 PPSMC_Result smc_result
;
771 pi
->power_containment_features
= 0;
772 if (pi
->caps_power_containment
) {
773 if (pi
->enable_bapm_feature
) {
774 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_EnableDTE
);
775 if (smc_result
!= PPSMC_Result_OK
)
778 pi
->power_containment_features
|= POWERCONTAINMENT_FEATURE_BAPM
;
781 if (pi
->enable_tdc_limit_feature
) {
782 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_TDCLimitEnable
);
783 if (smc_result
!= PPSMC_Result_OK
)
786 pi
->power_containment_features
|= POWERCONTAINMENT_FEATURE_TDCLimit
;
789 if (pi
->enable_pkg_pwr_tracking_feature
) {
790 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_PkgPwrLimitEnable
);
791 if (smc_result
!= PPSMC_Result_OK
) {
794 struct amdgpu_cac_tdp_table
*cac_tdp_table
=
795 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
;
796 u32 default_pwr_limit
=
797 (u32
)(cac_tdp_table
->maximum_power_delivery_limit
* 256);
799 pi
->power_containment_features
|= POWERCONTAINMENT_FEATURE_PkgPwrLimit
;
801 ci_set_power_limit(adev
, default_pwr_limit
);
806 if (pi
->caps_power_containment
&& pi
->power_containment_features
) {
807 if (pi
->power_containment_features
& POWERCONTAINMENT_FEATURE_TDCLimit
)
808 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_TDCLimitDisable
);
810 if (pi
->power_containment_features
& POWERCONTAINMENT_FEATURE_BAPM
)
811 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_DisableDTE
);
813 if (pi
->power_containment_features
& POWERCONTAINMENT_FEATURE_PkgPwrLimit
)
814 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_PkgPwrLimitDisable
);
815 pi
->power_containment_features
= 0;
822 static int ci_enable_smc_cac(struct amdgpu_device
*adev
, bool enable
)
824 struct ci_power_info
*pi
= ci_get_pi(adev
);
825 PPSMC_Result smc_result
;
830 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_EnableCac
);
831 if (smc_result
!= PPSMC_Result_OK
) {
833 pi
->cac_enabled
= false;
835 pi
->cac_enabled
= true;
837 } else if (pi
->cac_enabled
) {
838 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_DisableCac
);
839 pi
->cac_enabled
= false;
846 static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device
*adev
,
849 struct ci_power_info
*pi
= ci_get_pi(adev
);
850 PPSMC_Result smc_result
= PPSMC_Result_OK
;
852 if (pi
->thermal_sclk_dpm_enabled
) {
854 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_ENABLE_THERMAL_DPM
);
856 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_DISABLE_THERMAL_DPM
);
859 if (smc_result
== PPSMC_Result_OK
)
865 static int ci_power_control_set_level(struct amdgpu_device
*adev
)
867 struct ci_power_info
*pi
= ci_get_pi(adev
);
868 struct amdgpu_cac_tdp_table
*cac_tdp_table
=
869 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
;
873 bool adjust_polarity
= false; /* ??? */
875 if (pi
->caps_power_containment
) {
876 adjust_percent
= adjust_polarity
?
877 adev
->pm
.dpm
.tdp_adjustment
: (-1 * adev
->pm
.dpm
.tdp_adjustment
);
878 target_tdp
= ((100 + adjust_percent
) *
879 (s32
)cac_tdp_table
->configurable_tdp
) / 100;
881 ret
= ci_set_overdrive_target_tdp(adev
, (u32
)target_tdp
);
887 static void ci_dpm_powergate_uvd(void *handle
, bool gate
)
889 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
890 struct ci_power_info
*pi
= ci_get_pi(adev
);
892 pi
->uvd_power_gated
= gate
;
895 /* stop the UVD block */
896 amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
898 ci_update_uvd_dpm(adev
, gate
);
900 amdgpu_device_ip_set_powergating_state(adev
, AMD_IP_BLOCK_TYPE_UVD
,
901 AMD_PG_STATE_UNGATE
);
902 ci_update_uvd_dpm(adev
, gate
);
906 static bool ci_dpm_vblank_too_short(void *handle
)
908 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
909 u32 vblank_time
= amdgpu_dpm_get_vblank_time(adev
);
910 u32 switch_limit
= adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
? 450 : 300;
912 /* disable mclk switching if the refresh is >120Hz, even if the
913 * blanking period would allow it
915 if (amdgpu_dpm_get_vrefresh(adev
) > 120)
918 if (vblank_time
< switch_limit
)
925 static void ci_apply_state_adjust_rules(struct amdgpu_device
*adev
,
926 struct amdgpu_ps
*rps
)
928 struct ci_ps
*ps
= ci_get_ps(rps
);
929 struct ci_power_info
*pi
= ci_get_pi(adev
);
930 struct amdgpu_clock_and_voltage_limits
*max_limits
;
931 bool disable_mclk_switching
;
935 if (rps
->vce_active
) {
936 rps
->evclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].evclk
;
937 rps
->ecclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].ecclk
;
943 if ((adev
->pm
.dpm
.new_active_crtc_count
> 1) ||
944 ci_dpm_vblank_too_short(adev
))
945 disable_mclk_switching
= true;
947 disable_mclk_switching
= false;
949 if ((rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
)
950 pi
->battery_state
= true;
952 pi
->battery_state
= false;
954 if (adev
->pm
.ac_power
)
955 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
957 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
959 if (adev
->pm
.ac_power
== false) {
960 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
961 if (ps
->performance_levels
[i
].mclk
> max_limits
->mclk
)
962 ps
->performance_levels
[i
].mclk
= max_limits
->mclk
;
963 if (ps
->performance_levels
[i
].sclk
> max_limits
->sclk
)
964 ps
->performance_levels
[i
].sclk
= max_limits
->sclk
;
968 /* XXX validate the min clocks required for display */
970 if (disable_mclk_switching
) {
971 mclk
= ps
->performance_levels
[ps
->performance_level_count
- 1].mclk
;
972 sclk
= ps
->performance_levels
[0].sclk
;
974 mclk
= ps
->performance_levels
[0].mclk
;
975 sclk
= ps
->performance_levels
[0].sclk
;
978 if (adev
->pm
.pm_display_cfg
.min_core_set_clock
> sclk
)
979 sclk
= adev
->pm
.pm_display_cfg
.min_core_set_clock
;
981 if (adev
->pm
.pm_display_cfg
.min_mem_set_clock
> mclk
)
982 mclk
= adev
->pm
.pm_display_cfg
.min_mem_set_clock
;
984 if (rps
->vce_active
) {
985 if (sclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
)
986 sclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
;
987 if (mclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].mclk
)
988 mclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].mclk
;
991 ps
->performance_levels
[0].sclk
= sclk
;
992 ps
->performance_levels
[0].mclk
= mclk
;
994 if (ps
->performance_levels
[1].sclk
< ps
->performance_levels
[0].sclk
)
995 ps
->performance_levels
[1].sclk
= ps
->performance_levels
[0].sclk
;
997 if (disable_mclk_switching
) {
998 if (ps
->performance_levels
[0].mclk
< ps
->performance_levels
[1].mclk
)
999 ps
->performance_levels
[0].mclk
= ps
->performance_levels
[1].mclk
;
1001 if (ps
->performance_levels
[1].mclk
< ps
->performance_levels
[0].mclk
)
1002 ps
->performance_levels
[1].mclk
= ps
->performance_levels
[0].mclk
;
1006 static int ci_thermal_set_temperature_range(struct amdgpu_device
*adev
,
1007 int min_temp
, int max_temp
)
1009 int low_temp
= 0 * 1000;
1010 int high_temp
= 255 * 1000;
1013 if (low_temp
< min_temp
)
1014 low_temp
= min_temp
;
1015 if (high_temp
> max_temp
)
1016 high_temp
= max_temp
;
1017 if (high_temp
< low_temp
) {
1018 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
1022 tmp
= RREG32_SMC(ixCG_THERMAL_INT
);
1023 tmp
&= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK
| CG_THERMAL_INT__DIG_THERM_INTL_MASK
);
1024 tmp
|= ((high_temp
/ 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT
) |
1025 ((low_temp
/ 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT
;
1026 WREG32_SMC(ixCG_THERMAL_INT
, tmp
);
1029 /* XXX: need to figure out how to handle this properly */
1030 tmp
= RREG32_SMC(ixCG_THERMAL_CTRL
);
1031 tmp
&= DIG_THERM_DPM_MASK
;
1032 tmp
|= DIG_THERM_DPM(high_temp
/ 1000);
1033 WREG32_SMC(ixCG_THERMAL_CTRL
, tmp
);
1036 adev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
1037 adev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
1041 static int ci_thermal_enable_alert(struct amdgpu_device
*adev
,
1044 u32 thermal_int
= RREG32_SMC(ixCG_THERMAL_INT
);
1045 PPSMC_Result result
;
1048 thermal_int
&= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
|
1049 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
);
1050 WREG32_SMC(ixCG_THERMAL_INT
, thermal_int
);
1051 result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_Thermal_Cntl_Enable
);
1052 if (result
!= PPSMC_Result_OK
) {
1053 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1057 thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
|
1058 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
1059 WREG32_SMC(ixCG_THERMAL_INT
, thermal_int
);
1060 result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_Thermal_Cntl_Disable
);
1061 if (result
!= PPSMC_Result_OK
) {
1062 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1070 static void ci_fan_ctrl_set_static_mode(struct amdgpu_device
*adev
, u32 mode
)
1072 struct ci_power_info
*pi
= ci_get_pi(adev
);
1075 if (pi
->fan_ctrl_is_in_default_mode
) {
1076 tmp
= (RREG32_SMC(ixCG_FDO_CTRL2
) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK
)
1077 >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT
;
1078 pi
->fan_ctrl_default_mode
= tmp
;
1079 tmp
= (RREG32_SMC(ixCG_FDO_CTRL2
) & CG_FDO_CTRL2__TMIN_MASK
)
1080 >> CG_FDO_CTRL2__TMIN__SHIFT
;
1082 pi
->fan_ctrl_is_in_default_mode
= false;
1085 tmp
= RREG32_SMC(ixCG_FDO_CTRL2
) & ~CG_FDO_CTRL2__TMIN_MASK
;
1086 tmp
|= 0 << CG_FDO_CTRL2__TMIN__SHIFT
;
1087 WREG32_SMC(ixCG_FDO_CTRL2
, tmp
);
1089 tmp
= RREG32_SMC(ixCG_FDO_CTRL2
) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK
;
1090 tmp
|= mode
<< CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT
;
1091 WREG32_SMC(ixCG_FDO_CTRL2
, tmp
);
1094 static int ci_thermal_setup_fan_table(struct amdgpu_device
*adev
)
1096 struct ci_power_info
*pi
= ci_get_pi(adev
);
1097 SMU7_Discrete_FanTable fan_table
= { FDO_MODE_HARDWARE
};
1099 u32 t_diff1
, t_diff2
, pwm_diff1
, pwm_diff2
;
1100 u16 fdo_min
, slope1
, slope2
;
1101 u32 reference_clock
, tmp
;
1105 if (!pi
->fan_table_start
) {
1106 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
1110 duty100
= (RREG32_SMC(ixCG_FDO_CTRL1
) & CG_FDO_CTRL1__FMAX_DUTY100_MASK
)
1111 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT
;
1114 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
1118 tmp64
= (u64
)adev
->pm
.dpm
.fan
.pwm_min
* duty100
;
1119 do_div(tmp64
, 10000);
1120 fdo_min
= (u16
)tmp64
;
1122 t_diff1
= adev
->pm
.dpm
.fan
.t_med
- adev
->pm
.dpm
.fan
.t_min
;
1123 t_diff2
= adev
->pm
.dpm
.fan
.t_high
- adev
->pm
.dpm
.fan
.t_med
;
1125 pwm_diff1
= adev
->pm
.dpm
.fan
.pwm_med
- adev
->pm
.dpm
.fan
.pwm_min
;
1126 pwm_diff2
= adev
->pm
.dpm
.fan
.pwm_high
- adev
->pm
.dpm
.fan
.pwm_med
;
1128 slope1
= (u16
)((50 + ((16 * duty100
* pwm_diff1
) / t_diff1
)) / 100);
1129 slope2
= (u16
)((50 + ((16 * duty100
* pwm_diff2
) / t_diff2
)) / 100);
1131 fan_table
.TempMin
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_min
) / 100);
1132 fan_table
.TempMed
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_med
) / 100);
1133 fan_table
.TempMax
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_max
) / 100);
1135 fan_table
.Slope1
= cpu_to_be16(slope1
);
1136 fan_table
.Slope2
= cpu_to_be16(slope2
);
1138 fan_table
.FdoMin
= cpu_to_be16(fdo_min
);
1140 fan_table
.HystDown
= cpu_to_be16(adev
->pm
.dpm
.fan
.t_hyst
);
1142 fan_table
.HystUp
= cpu_to_be16(1);
1144 fan_table
.HystSlope
= cpu_to_be16(1);
1146 fan_table
.TempRespLim
= cpu_to_be16(5);
1148 reference_clock
= amdgpu_asic_get_xclk(adev
);
1150 fan_table
.RefreshPeriod
= cpu_to_be32((adev
->pm
.dpm
.fan
.cycle_delay
*
1151 reference_clock
) / 1600);
1153 fan_table
.FdoMax
= cpu_to_be16((u16
)duty100
);
1155 tmp
= (RREG32_SMC(ixCG_MULT_THERMAL_CTRL
) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK
)
1156 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT
;
1157 fan_table
.TempSrc
= (uint8_t)tmp
;
1159 ret
= amdgpu_ci_copy_bytes_to_smc(adev
,
1160 pi
->fan_table_start
,
1166 DRM_ERROR("Failed to load fan table to the SMC.");
1167 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
1173 static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device
*adev
)
1175 struct ci_power_info
*pi
= ci_get_pi(adev
);
1178 if (pi
->caps_od_fuzzy_fan_control_support
) {
1179 ret
= amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
1180 PPSMC_StartFanControl
,
1182 if (ret
!= PPSMC_Result_OK
)
1184 ret
= amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
1185 PPSMC_MSG_SetFanPwmMax
,
1186 adev
->pm
.dpm
.fan
.default_max_fan_pwm
);
1187 if (ret
!= PPSMC_Result_OK
)
1190 ret
= amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
1191 PPSMC_StartFanControl
,
1193 if (ret
!= PPSMC_Result_OK
)
1197 pi
->fan_is_controlled_by_smc
= true;
1202 static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device
*adev
)
1205 struct ci_power_info
*pi
= ci_get_pi(adev
);
1207 ret
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_StopFanControl
);
1208 if (ret
== PPSMC_Result_OK
) {
1209 pi
->fan_is_controlled_by_smc
= false;
1216 static int ci_dpm_get_fan_speed_percent(void *handle
,
1221 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1223 if (adev
->pm
.no_fan
)
1226 duty100
= (RREG32_SMC(ixCG_FDO_CTRL1
) & CG_FDO_CTRL1__FMAX_DUTY100_MASK
)
1227 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT
;
1228 duty
= (RREG32_SMC(ixCG_THERMAL_STATUS
) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK
)
1229 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT
;
1234 tmp64
= (u64
)duty
* 100;
1235 do_div(tmp64
, duty100
);
1236 *speed
= (u32
)tmp64
;
1244 static int ci_dpm_set_fan_speed_percent(void *handle
,
1250 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1251 struct ci_power_info
*pi
= ci_get_pi(adev
);
1253 if (adev
->pm
.no_fan
)
1256 if (pi
->fan_is_controlled_by_smc
)
1262 duty100
= (RREG32_SMC(ixCG_FDO_CTRL1
) & CG_FDO_CTRL1__FMAX_DUTY100_MASK
)
1263 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT
;
1268 tmp64
= (u64
)speed
* duty100
;
1272 tmp
= RREG32_SMC(ixCG_FDO_CTRL0
) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK
;
1273 tmp
|= duty
<< CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT
;
1274 WREG32_SMC(ixCG_FDO_CTRL0
, tmp
);
1279 static void ci_dpm_set_fan_control_mode(void *handle
, u32 mode
)
1281 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1284 case AMD_FAN_CTRL_NONE
:
1285 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
1286 ci_fan_ctrl_stop_smc_fan_control(adev
);
1287 ci_dpm_set_fan_speed_percent(adev
, 100);
1289 case AMD_FAN_CTRL_MANUAL
:
1290 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
1291 ci_fan_ctrl_stop_smc_fan_control(adev
);
1293 case AMD_FAN_CTRL_AUTO
:
1294 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
1295 ci_thermal_start_smc_fan_control(adev
);
1302 static u32
ci_dpm_get_fan_control_mode(void *handle
)
1304 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
1305 struct ci_power_info
*pi
= ci_get_pi(adev
);
1307 if (pi
->fan_is_controlled_by_smc
)
1308 return AMD_FAN_CTRL_AUTO
;
1310 return AMD_FAN_CTRL_MANUAL
;
1314 static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device
*adev
,
1318 u32 xclk
= amdgpu_asic_get_xclk(adev
);
1320 if (adev
->pm
.no_fan
)
1323 if (adev
->pm
.fan_pulses_per_revolution
== 0)
1326 tach_period
= (RREG32_SMC(ixCG_TACH_STATUS
) & CG_TACH_STATUS__TACH_PERIOD_MASK
)
1327 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT
;
1328 if (tach_period
== 0)
1331 *speed
= 60 * xclk
* 10000 / tach_period
;
1336 static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device
*adev
,
1339 u32 tach_period
, tmp
;
1340 u32 xclk
= amdgpu_asic_get_xclk(adev
);
1342 if (adev
->pm
.no_fan
)
1345 if (adev
->pm
.fan_pulses_per_revolution
== 0)
1348 if ((speed
< adev
->pm
.fan_min_rpm
) ||
1349 (speed
> adev
->pm
.fan_max_rpm
))
1352 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
1353 ci_fan_ctrl_stop_smc_fan_control(adev
);
1355 tach_period
= 60 * xclk
* 10000 / (8 * speed
);
1356 tmp
= RREG32_SMC(ixCG_TACH_CTRL
) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK
;
1357 tmp
|= tach_period
<< CG_TACH_CTRL__TARGET_PERIOD__SHIFT
;
1358 WREG32_SMC(CG_TACH_CTRL
, tmp
);
1360 ci_fan_ctrl_set_static_mode(adev
, FDO_PWM_MODE_STATIC_RPM
);
1366 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device
*adev
)
1368 struct ci_power_info
*pi
= ci_get_pi(adev
);
1371 if (!pi
->fan_ctrl_is_in_default_mode
) {
1372 tmp
= RREG32_SMC(ixCG_FDO_CTRL2
) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK
;
1373 tmp
|= pi
->fan_ctrl_default_mode
<< CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT
;
1374 WREG32_SMC(ixCG_FDO_CTRL2
, tmp
);
1376 tmp
= RREG32_SMC(ixCG_FDO_CTRL2
) & ~CG_FDO_CTRL2__TMIN_MASK
;
1377 tmp
|= pi
->t_min
<< CG_FDO_CTRL2__TMIN__SHIFT
;
1378 WREG32_SMC(ixCG_FDO_CTRL2
, tmp
);
1379 pi
->fan_ctrl_is_in_default_mode
= true;
1383 static void ci_thermal_start_smc_fan_control(struct amdgpu_device
*adev
)
1385 if (adev
->pm
.dpm
.fan
.ucode_fan_control
) {
1386 ci_fan_ctrl_start_smc_fan_control(adev
);
1387 ci_fan_ctrl_set_static_mode(adev
, FDO_PWM_MODE_STATIC
);
1391 static void ci_thermal_initialize(struct amdgpu_device
*adev
)
1395 if (adev
->pm
.fan_pulses_per_revolution
) {
1396 tmp
= RREG32_SMC(ixCG_TACH_CTRL
) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK
;
1397 tmp
|= (adev
->pm
.fan_pulses_per_revolution
- 1)
1398 << CG_TACH_CTRL__EDGE_PER_REV__SHIFT
;
1399 WREG32_SMC(ixCG_TACH_CTRL
, tmp
);
1402 tmp
= RREG32_SMC(ixCG_FDO_CTRL2
) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK
;
1403 tmp
|= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT
;
1404 WREG32_SMC(ixCG_FDO_CTRL2
, tmp
);
1407 static int ci_thermal_start_thermal_controller(struct amdgpu_device
*adev
)
1411 ci_thermal_initialize(adev
);
1412 ret
= ci_thermal_set_temperature_range(adev
, CISLANDS_TEMP_RANGE_MIN
, CISLANDS_TEMP_RANGE_MAX
);
1415 ret
= ci_thermal_enable_alert(adev
, true);
1418 if (adev
->pm
.dpm
.fan
.ucode_fan_control
) {
1419 ret
= ci_thermal_setup_fan_table(adev
);
1422 ci_thermal_start_smc_fan_control(adev
);
1428 static void ci_thermal_stop_thermal_controller(struct amdgpu_device
*adev
)
1430 if (!adev
->pm
.no_fan
)
1431 ci_fan_ctrl_set_default_mode(adev
);
1434 static int ci_read_smc_soft_register(struct amdgpu_device
*adev
,
1435 u16 reg_offset
, u32
*value
)
1437 struct ci_power_info
*pi
= ci_get_pi(adev
);
1439 return amdgpu_ci_read_smc_sram_dword(adev
,
1440 pi
->soft_regs_start
+ reg_offset
,
1441 value
, pi
->sram_end
);
1444 static int ci_write_smc_soft_register(struct amdgpu_device
*adev
,
1445 u16 reg_offset
, u32 value
)
1447 struct ci_power_info
*pi
= ci_get_pi(adev
);
1449 return amdgpu_ci_write_smc_sram_dword(adev
,
1450 pi
->soft_regs_start
+ reg_offset
,
1451 value
, pi
->sram_end
);
1454 static void ci_init_fps_limits(struct amdgpu_device
*adev
)
1456 struct ci_power_info
*pi
= ci_get_pi(adev
);
1457 SMU7_Discrete_DpmTable
*table
= &pi
->smc_state_table
;
1463 table
->FpsHighT
= cpu_to_be16(tmp
);
1466 table
->FpsLowT
= cpu_to_be16(tmp
);
1470 static int ci_update_sclk_t(struct amdgpu_device
*adev
)
1472 struct ci_power_info
*pi
= ci_get_pi(adev
);
1474 u32 low_sclk_interrupt_t
= 0;
1476 if (pi
->caps_sclk_throttle_low_notification
) {
1477 low_sclk_interrupt_t
= cpu_to_be32(pi
->low_sclk_interrupt_t
);
1479 ret
= amdgpu_ci_copy_bytes_to_smc(adev
,
1480 pi
->dpm_table_start
+
1481 offsetof(SMU7_Discrete_DpmTable
, LowSclkInterruptT
),
1482 (u8
*)&low_sclk_interrupt_t
,
1483 sizeof(u32
), pi
->sram_end
);
1490 static void ci_get_leakage_voltages(struct amdgpu_device
*adev
)
1492 struct ci_power_info
*pi
= ci_get_pi(adev
);
1493 u16 leakage_id
, virtual_voltage_id
;
1497 pi
->vddc_leakage
.count
= 0;
1498 pi
->vddci_leakage
.count
= 0;
1500 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_EVV
) {
1501 for (i
= 0; i
< CISLANDS_MAX_LEAKAGE_COUNT
; i
++) {
1502 virtual_voltage_id
= ATOM_VIRTUAL_VOLTAGE_ID0
+ i
;
1503 if (amdgpu_atombios_get_voltage_evv(adev
, virtual_voltage_id
, &vddc
) != 0)
1505 if (vddc
!= 0 && vddc
!= virtual_voltage_id
) {
1506 pi
->vddc_leakage
.actual_voltage
[pi
->vddc_leakage
.count
] = vddc
;
1507 pi
->vddc_leakage
.leakage_id
[pi
->vddc_leakage
.count
] = virtual_voltage_id
;
1508 pi
->vddc_leakage
.count
++;
1511 } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev
, &leakage_id
) == 0) {
1512 for (i
= 0; i
< CISLANDS_MAX_LEAKAGE_COUNT
; i
++) {
1513 virtual_voltage_id
= ATOM_VIRTUAL_VOLTAGE_ID0
+ i
;
1514 if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev
, &vddc
, &vddci
,
1517 if (vddc
!= 0 && vddc
!= virtual_voltage_id
) {
1518 pi
->vddc_leakage
.actual_voltage
[pi
->vddc_leakage
.count
] = vddc
;
1519 pi
->vddc_leakage
.leakage_id
[pi
->vddc_leakage
.count
] = virtual_voltage_id
;
1520 pi
->vddc_leakage
.count
++;
1522 if (vddci
!= 0 && vddci
!= virtual_voltage_id
) {
1523 pi
->vddci_leakage
.actual_voltage
[pi
->vddci_leakage
.count
] = vddci
;
1524 pi
->vddci_leakage
.leakage_id
[pi
->vddci_leakage
.count
] = virtual_voltage_id
;
1525 pi
->vddci_leakage
.count
++;
1532 static void ci_set_dpm_event_sources(struct amdgpu_device
*adev
, u32 sources
)
1534 struct ci_power_info
*pi
= ci_get_pi(adev
);
1535 bool want_thermal_protection
;
1536 enum amdgpu_dpm_event_src dpm_event_src
;
1542 want_thermal_protection
= false;
1544 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
):
1545 want_thermal_protection
= true;
1546 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_DIGITAL
;
1548 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
):
1549 want_thermal_protection
= true;
1550 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_EXTERNAL
;
1552 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
) |
1553 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
)):
1554 want_thermal_protection
= true;
1555 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL
;
1559 if (want_thermal_protection
) {
1561 /* XXX: need to figure out how to handle this properly */
1562 tmp
= RREG32_SMC(ixCG_THERMAL_CTRL
);
1563 tmp
&= DPM_EVENT_SRC_MASK
;
1564 tmp
|= DPM_EVENT_SRC(dpm_event_src
);
1565 WREG32_SMC(ixCG_THERMAL_CTRL
, tmp
);
1568 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
1569 if (pi
->thermal_protection
)
1570 tmp
&= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK
;
1572 tmp
|= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK
;
1573 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
1575 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
1576 tmp
|= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK
;
1577 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
1581 static void ci_enable_auto_throttle_source(struct amdgpu_device
*adev
,
1582 enum amdgpu_dpm_auto_throttle_src source
,
1585 struct ci_power_info
*pi
= ci_get_pi(adev
);
1588 if (!(pi
->active_auto_throttle_sources
& (1 << source
))) {
1589 pi
->active_auto_throttle_sources
|= 1 << source
;
1590 ci_set_dpm_event_sources(adev
, pi
->active_auto_throttle_sources
);
1593 if (pi
->active_auto_throttle_sources
& (1 << source
)) {
1594 pi
->active_auto_throttle_sources
&= ~(1 << source
);
1595 ci_set_dpm_event_sources(adev
, pi
->active_auto_throttle_sources
);
1600 static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device
*adev
)
1602 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_REGULATOR_HOT
)
1603 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_EnableVRHotGPIOInterrupt
);
1606 static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device
*adev
)
1608 struct ci_power_info
*pi
= ci_get_pi(adev
);
1609 PPSMC_Result smc_result
;
1611 if (!pi
->need_update_smu7_dpm_table
)
1614 if ((!pi
->sclk_dpm_key_disabled
) &&
1615 (pi
->need_update_smu7_dpm_table
& (DPMTABLE_OD_UPDATE_SCLK
| DPMTABLE_UPDATE_SCLK
))) {
1616 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_SCLKDPM_UnfreezeLevel
);
1617 if (smc_result
!= PPSMC_Result_OK
)
1621 if ((!pi
->mclk_dpm_key_disabled
) &&
1622 (pi
->need_update_smu7_dpm_table
& DPMTABLE_OD_UPDATE_MCLK
)) {
1623 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MCLKDPM_UnfreezeLevel
);
1624 if (smc_result
!= PPSMC_Result_OK
)
1628 pi
->need_update_smu7_dpm_table
= 0;
1632 static int ci_enable_sclk_mclk_dpm(struct amdgpu_device
*adev
, bool enable
)
1634 struct ci_power_info
*pi
= ci_get_pi(adev
);
1635 PPSMC_Result smc_result
;
1638 if (!pi
->sclk_dpm_key_disabled
) {
1639 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_DPM_Enable
);
1640 if (smc_result
!= PPSMC_Result_OK
)
1644 if (!pi
->mclk_dpm_key_disabled
) {
1645 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MCLKDPM_Enable
);
1646 if (smc_result
!= PPSMC_Result_OK
)
1649 WREG32_P(mmMC_SEQ_CNTL_3
, MC_SEQ_CNTL_3__CAC_EN_MASK
,
1650 ~MC_SEQ_CNTL_3__CAC_EN_MASK
);
1652 WREG32_SMC(ixLCAC_MC0_CNTL
, 0x05);
1653 WREG32_SMC(ixLCAC_MC1_CNTL
, 0x05);
1654 WREG32_SMC(ixLCAC_CPL_CNTL
, 0x100005);
1658 WREG32_SMC(ixLCAC_MC0_CNTL
, 0x400005);
1659 WREG32_SMC(ixLCAC_MC1_CNTL
, 0x400005);
1660 WREG32_SMC(ixLCAC_CPL_CNTL
, 0x500005);
1663 if (!pi
->sclk_dpm_key_disabled
) {
1664 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_DPM_Disable
);
1665 if (smc_result
!= PPSMC_Result_OK
)
1669 if (!pi
->mclk_dpm_key_disabled
) {
1670 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MCLKDPM_Disable
);
1671 if (smc_result
!= PPSMC_Result_OK
)
1679 static int ci_start_dpm(struct amdgpu_device
*adev
)
1681 struct ci_power_info
*pi
= ci_get_pi(adev
);
1682 PPSMC_Result smc_result
;
1686 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
1687 tmp
|= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK
;
1688 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
1690 tmp
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
1691 tmp
|= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK
;
1692 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, tmp
);
1694 ci_write_smc_soft_register(adev
, offsetof(SMU7_SoftRegisters
, VoltageChangeTimeout
), 0x1000);
1696 WREG32_P(mmBIF_LNCNT_RESET
, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK
);
1698 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_Voltage_Cntl_Enable
);
1699 if (smc_result
!= PPSMC_Result_OK
)
1702 ret
= ci_enable_sclk_mclk_dpm(adev
, true);
1706 if (!pi
->pcie_dpm_key_disabled
) {
1707 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_PCIeDPM_Enable
);
1708 if (smc_result
!= PPSMC_Result_OK
)
1715 static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device
*adev
)
1717 struct ci_power_info
*pi
= ci_get_pi(adev
);
1718 PPSMC_Result smc_result
;
1720 if (!pi
->need_update_smu7_dpm_table
)
1723 if ((!pi
->sclk_dpm_key_disabled
) &&
1724 (pi
->need_update_smu7_dpm_table
& (DPMTABLE_OD_UPDATE_SCLK
| DPMTABLE_UPDATE_SCLK
))) {
1725 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_SCLKDPM_FreezeLevel
);
1726 if (smc_result
!= PPSMC_Result_OK
)
1730 if ((!pi
->mclk_dpm_key_disabled
) &&
1731 (pi
->need_update_smu7_dpm_table
& DPMTABLE_OD_UPDATE_MCLK
)) {
1732 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MCLKDPM_FreezeLevel
);
1733 if (smc_result
!= PPSMC_Result_OK
)
1740 static int ci_stop_dpm(struct amdgpu_device
*adev
)
1742 struct ci_power_info
*pi
= ci_get_pi(adev
);
1743 PPSMC_Result smc_result
;
1747 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
1748 tmp
&= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK
;
1749 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
1751 tmp
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
1752 tmp
&= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK
;
1753 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, tmp
);
1755 if (!pi
->pcie_dpm_key_disabled
) {
1756 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_PCIeDPM_Disable
);
1757 if (smc_result
!= PPSMC_Result_OK
)
1761 ret
= ci_enable_sclk_mclk_dpm(adev
, false);
1765 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_Voltage_Cntl_Disable
);
1766 if (smc_result
!= PPSMC_Result_OK
)
1772 static void ci_enable_sclk_control(struct amdgpu_device
*adev
, bool enable
)
1774 u32 tmp
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
1777 tmp
&= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK
;
1779 tmp
|= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK
;
1780 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, tmp
);
1784 static int ci_notify_hw_of_power_source(struct amdgpu_device
*adev
,
1787 struct ci_power_info
*pi
= ci_get_pi(adev
);
1788 struct amdgpu_cac_tdp_table
*cac_tdp_table
=
1789 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
;
1793 power_limit
= (u32
)(cac_tdp_table
->maximum_power_delivery_limit
* 256);
1795 power_limit
= (u32
)(cac_tdp_table
->battery_power_limit
* 256);
1797 ci_set_power_limit(adev
, power_limit
);
1799 if (pi
->caps_automatic_dc_transition
) {
1801 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_RunningOnAC
);
1803 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_Remove_DC_Clamp
);
1810 static PPSMC_Result
amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device
*adev
,
1811 PPSMC_Msg msg
, u32 parameter
)
1813 WREG32(mmSMC_MSG_ARG_0
, parameter
);
1814 return amdgpu_ci_send_msg_to_smc(adev
, msg
);
1817 static PPSMC_Result
amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device
*adev
,
1818 PPSMC_Msg msg
, u32
*parameter
)
1820 PPSMC_Result smc_result
;
1822 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, msg
);
1824 if ((smc_result
== PPSMC_Result_OK
) && parameter
)
1825 *parameter
= RREG32(mmSMC_MSG_ARG_0
);
1830 static int ci_dpm_force_state_sclk(struct amdgpu_device
*adev
, u32 n
)
1832 struct ci_power_info
*pi
= ci_get_pi(adev
);
1834 if (!pi
->sclk_dpm_key_disabled
) {
1835 PPSMC_Result smc_result
=
1836 amdgpu_ci_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SCLKDPM_SetEnabledMask
, 1 << n
);
1837 if (smc_result
!= PPSMC_Result_OK
)
1844 static int ci_dpm_force_state_mclk(struct amdgpu_device
*adev
, u32 n
)
1846 struct ci_power_info
*pi
= ci_get_pi(adev
);
1848 if (!pi
->mclk_dpm_key_disabled
) {
1849 PPSMC_Result smc_result
=
1850 amdgpu_ci_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_MCLKDPM_SetEnabledMask
, 1 << n
);
1851 if (smc_result
!= PPSMC_Result_OK
)
1858 static int ci_dpm_force_state_pcie(struct amdgpu_device
*adev
, u32 n
)
1860 struct ci_power_info
*pi
= ci_get_pi(adev
);
1862 if (!pi
->pcie_dpm_key_disabled
) {
1863 PPSMC_Result smc_result
=
1864 amdgpu_ci_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_PCIeDPM_ForceLevel
, n
);
1865 if (smc_result
!= PPSMC_Result_OK
)
1872 static int ci_set_power_limit(struct amdgpu_device
*adev
, u32 n
)
1874 struct ci_power_info
*pi
= ci_get_pi(adev
);
1876 if (pi
->power_containment_features
& POWERCONTAINMENT_FEATURE_PkgPwrLimit
) {
1877 PPSMC_Result smc_result
=
1878 amdgpu_ci_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_PkgPwrSetLimit
, n
);
1879 if (smc_result
!= PPSMC_Result_OK
)
1886 static int ci_set_overdrive_target_tdp(struct amdgpu_device
*adev
,
1889 PPSMC_Result smc_result
=
1890 amdgpu_ci_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_OverDriveSetTargetTdp
, target_tdp
);
1891 if (smc_result
!= PPSMC_Result_OK
)
1897 static int ci_set_boot_state(struct amdgpu_device
*adev
)
1899 return ci_enable_sclk_mclk_dpm(adev
, false);
1903 static u32
ci_get_average_sclk_freq(struct amdgpu_device
*adev
)
1906 PPSMC_Result smc_result
=
1907 amdgpu_ci_send_msg_to_smc_return_parameter(adev
,
1908 PPSMC_MSG_API_GetSclkFrequency
,
1910 if (smc_result
!= PPSMC_Result_OK
)
1916 static u32
ci_get_average_mclk_freq(struct amdgpu_device
*adev
)
1919 PPSMC_Result smc_result
=
1920 amdgpu_ci_send_msg_to_smc_return_parameter(adev
,
1921 PPSMC_MSG_API_GetMclkFrequency
,
1923 if (smc_result
!= PPSMC_Result_OK
)
1929 static void ci_dpm_start_smc(struct amdgpu_device
*adev
)
1933 amdgpu_ci_program_jump_on_start(adev
);
1934 amdgpu_ci_start_smc_clock(adev
);
1935 amdgpu_ci_start_smc(adev
);
1936 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1937 if (RREG32_SMC(ixFIRMWARE_FLAGS
) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK
)
1942 static void ci_dpm_stop_smc(struct amdgpu_device
*adev
)
1944 amdgpu_ci_reset_smc(adev
);
1945 amdgpu_ci_stop_smc_clock(adev
);
1948 static int ci_process_firmware_header(struct amdgpu_device
*adev
)
1950 struct ci_power_info
*pi
= ci_get_pi(adev
);
1954 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
1955 SMU7_FIRMWARE_HEADER_LOCATION
+
1956 offsetof(SMU7_Firmware_Header
, DpmTable
),
1957 &tmp
, pi
->sram_end
);
1961 pi
->dpm_table_start
= tmp
;
1963 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
1964 SMU7_FIRMWARE_HEADER_LOCATION
+
1965 offsetof(SMU7_Firmware_Header
, SoftRegisters
),
1966 &tmp
, pi
->sram_end
);
1970 pi
->soft_regs_start
= tmp
;
1972 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
1973 SMU7_FIRMWARE_HEADER_LOCATION
+
1974 offsetof(SMU7_Firmware_Header
, mcRegisterTable
),
1975 &tmp
, pi
->sram_end
);
1979 pi
->mc_reg_table_start
= tmp
;
1981 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
1982 SMU7_FIRMWARE_HEADER_LOCATION
+
1983 offsetof(SMU7_Firmware_Header
, FanTable
),
1984 &tmp
, pi
->sram_end
);
1988 pi
->fan_table_start
= tmp
;
1990 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
1991 SMU7_FIRMWARE_HEADER_LOCATION
+
1992 offsetof(SMU7_Firmware_Header
, mcArbDramTimingTable
),
1993 &tmp
, pi
->sram_end
);
1997 pi
->arb_table_start
= tmp
;
2002 static void ci_read_clock_registers(struct amdgpu_device
*adev
)
2004 struct ci_power_info
*pi
= ci_get_pi(adev
);
2006 pi
->clock_registers
.cg_spll_func_cntl
=
2007 RREG32_SMC(ixCG_SPLL_FUNC_CNTL
);
2008 pi
->clock_registers
.cg_spll_func_cntl_2
=
2009 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2
);
2010 pi
->clock_registers
.cg_spll_func_cntl_3
=
2011 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3
);
2012 pi
->clock_registers
.cg_spll_func_cntl_4
=
2013 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4
);
2014 pi
->clock_registers
.cg_spll_spread_spectrum
=
2015 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM
);
2016 pi
->clock_registers
.cg_spll_spread_spectrum_2
=
2017 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2
);
2018 pi
->clock_registers
.dll_cntl
= RREG32(mmDLL_CNTL
);
2019 pi
->clock_registers
.mclk_pwrmgt_cntl
= RREG32(mmMCLK_PWRMGT_CNTL
);
2020 pi
->clock_registers
.mpll_ad_func_cntl
= RREG32(mmMPLL_AD_FUNC_CNTL
);
2021 pi
->clock_registers
.mpll_dq_func_cntl
= RREG32(mmMPLL_DQ_FUNC_CNTL
);
2022 pi
->clock_registers
.mpll_func_cntl
= RREG32(mmMPLL_FUNC_CNTL
);
2023 pi
->clock_registers
.mpll_func_cntl_1
= RREG32(mmMPLL_FUNC_CNTL_1
);
2024 pi
->clock_registers
.mpll_func_cntl_2
= RREG32(mmMPLL_FUNC_CNTL_2
);
2025 pi
->clock_registers
.mpll_ss1
= RREG32(mmMPLL_SS1
);
2026 pi
->clock_registers
.mpll_ss2
= RREG32(mmMPLL_SS2
);
2029 static void ci_init_sclk_t(struct amdgpu_device
*adev
)
2031 struct ci_power_info
*pi
= ci_get_pi(adev
);
2033 pi
->low_sclk_interrupt_t
= 0;
2036 static void ci_enable_thermal_protection(struct amdgpu_device
*adev
,
2039 u32 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
2042 tmp
&= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK
;
2044 tmp
|= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK
;
2045 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
2048 static void ci_enable_acpi_power_management(struct amdgpu_device
*adev
)
2050 u32 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
2052 tmp
|= GENERAL_PWRMGT__STATIC_PM_EN_MASK
;
2054 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
2058 static int ci_enter_ulp_state(struct amdgpu_device
*adev
)
2061 WREG32(mmSMC_MESSAGE_0
, PPSMC_MSG_SwitchToMinimumPower
);
2068 static int ci_exit_ulp_state(struct amdgpu_device
*adev
)
2072 WREG32(mmSMC_MESSAGE_0
, PPSMC_MSG_ResumeFromMinimumPower
);
2076 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
2077 if (RREG32(mmSMC_RESP_0
) == 1)
2086 static int ci_notify_smc_display_change(struct amdgpu_device
*adev
,
2089 PPSMC_Msg msg
= has_display
? PPSMC_MSG_HasDisplay
: PPSMC_MSG_NoDisplay
;
2091 return (amdgpu_ci_send_msg_to_smc(adev
, msg
) == PPSMC_Result_OK
) ? 0 : -EINVAL
;
2094 static int ci_enable_ds_master_switch(struct amdgpu_device
*adev
,
2097 struct ci_power_info
*pi
= ci_get_pi(adev
);
2100 if (pi
->caps_sclk_ds
) {
2101 if (amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MASTER_DeepSleep_ON
) != PPSMC_Result_OK
)
2104 if (amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MASTER_DeepSleep_OFF
) != PPSMC_Result_OK
)
2108 if (pi
->caps_sclk_ds
) {
2109 if (amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MASTER_DeepSleep_OFF
) != PPSMC_Result_OK
)
2117 static void ci_program_display_gap(struct amdgpu_device
*adev
)
2119 u32 tmp
= RREG32_SMC(ixCG_DISPLAY_GAP_CNTL
);
2120 u32 pre_vbi_time_in_us
;
2121 u32 frame_time_in_us
;
2122 u32 ref_clock
= adev
->clock
.spll
.reference_freq
;
2123 u32 refresh_rate
= amdgpu_dpm_get_vrefresh(adev
);
2124 u32 vblank_time
= amdgpu_dpm_get_vblank_time(adev
);
2126 tmp
&= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK
;
2127 if (adev
->pm
.dpm
.new_active_crtc_count
> 0)
2128 tmp
|= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM
<< CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT
);
2130 tmp
|= (AMDGPU_PM_DISPLAY_GAP_IGNORE
<< CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT
);
2131 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL
, tmp
);
2133 if (refresh_rate
== 0)
2135 if (vblank_time
== 0xffffffff)
2137 frame_time_in_us
= 1000000 / refresh_rate
;
2138 pre_vbi_time_in_us
=
2139 frame_time_in_us
- 200 - vblank_time
;
2140 tmp
= pre_vbi_time_in_us
* (ref_clock
/ 100);
2142 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2
, tmp
);
2143 ci_write_smc_soft_register(adev
, offsetof(SMU7_SoftRegisters
, PreVBlankGap
), 0x64);
2144 ci_write_smc_soft_register(adev
, offsetof(SMU7_SoftRegisters
, VBlankTimeout
), (frame_time_in_us
- pre_vbi_time_in_us
));
2147 ci_notify_smc_display_change(adev
, (adev
->pm
.dpm
.new_active_crtc_count
== 1));
2151 static void ci_enable_spread_spectrum(struct amdgpu_device
*adev
, bool enable
)
2153 struct ci_power_info
*pi
= ci_get_pi(adev
);
2157 if (pi
->caps_sclk_ss_support
) {
2158 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
2159 tmp
|= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK
;
2160 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
2163 tmp
= RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM
);
2164 tmp
&= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK
;
2165 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM
, tmp
);
2167 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
2168 tmp
&= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK
;
2169 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
2173 static void ci_program_sstp(struct amdgpu_device
*adev
)
2175 WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER
,
2176 ((CISLANDS_SSTU_DFLT
<< CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT
) |
2177 (CISLANDS_SST_DFLT
<< CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT
)));
2180 static void ci_enable_display_gap(struct amdgpu_device
*adev
)
2182 u32 tmp
= RREG32_SMC(ixCG_DISPLAY_GAP_CNTL
);
2184 tmp
&= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK
|
2185 CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK
);
2186 tmp
|= ((AMDGPU_PM_DISPLAY_GAP_IGNORE
<< CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT
) |
2187 (AMDGPU_PM_DISPLAY_GAP_VBLANK
<< CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT
));
2189 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL
, tmp
);
2192 static void ci_program_vc(struct amdgpu_device
*adev
)
2196 tmp
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
2197 tmp
&= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK
| SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK
);
2198 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, tmp
);
2200 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, CISLANDS_VRC_DFLT0
);
2201 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1
, CISLANDS_VRC_DFLT1
);
2202 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2
, CISLANDS_VRC_DFLT2
);
2203 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3
, CISLANDS_VRC_DFLT3
);
2204 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4
, CISLANDS_VRC_DFLT4
);
2205 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5
, CISLANDS_VRC_DFLT5
);
2206 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6
, CISLANDS_VRC_DFLT6
);
2207 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7
, CISLANDS_VRC_DFLT7
);
2210 static void ci_clear_vc(struct amdgpu_device
*adev
)
2214 tmp
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
2215 tmp
|= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK
| SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK
);
2216 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, tmp
);
2218 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, 0);
2219 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1
, 0);
2220 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2
, 0);
2221 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3
, 0);
2222 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4
, 0);
2223 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5
, 0);
2224 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6
, 0);
2225 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7
, 0);
2228 static int ci_upload_firmware(struct amdgpu_device
*adev
)
2232 if (amdgpu_ci_is_smc_running(adev
)) {
2233 DRM_INFO("smc is running, no need to load smc firmware\n");
2237 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
2238 if (RREG32_SMC(ixRCU_UC_EVENTS
) & RCU_UC_EVENTS__boot_seq_done_MASK
)
2241 WREG32_SMC(ixSMC_SYSCON_MISC_CNTL
, 1);
2243 amdgpu_ci_stop_smc_clock(adev
);
2244 amdgpu_ci_reset_smc(adev
);
2246 ret
= amdgpu_ci_load_smc_ucode(adev
, SMC_RAM_END
);
2252 static int ci_get_svi2_voltage_table(struct amdgpu_device
*adev
,
2253 struct amdgpu_clock_voltage_dependency_table
*voltage_dependency_table
,
2254 struct atom_voltage_table
*voltage_table
)
2258 if (voltage_dependency_table
== NULL
)
2261 voltage_table
->mask_low
= 0;
2262 voltage_table
->phase_delay
= 0;
2264 voltage_table
->count
= voltage_dependency_table
->count
;
2265 for (i
= 0; i
< voltage_table
->count
; i
++) {
2266 voltage_table
->entries
[i
].value
= voltage_dependency_table
->entries
[i
].v
;
2267 voltage_table
->entries
[i
].smio_low
= 0;
2273 static int ci_construct_voltage_tables(struct amdgpu_device
*adev
)
2275 struct ci_power_info
*pi
= ci_get_pi(adev
);
2278 if (pi
->voltage_control
== CISLANDS_VOLTAGE_CONTROL_BY_GPIO
) {
2279 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDC
,
2280 VOLTAGE_OBJ_GPIO_LUT
,
2281 &pi
->vddc_voltage_table
);
2284 } else if (pi
->voltage_control
== CISLANDS_VOLTAGE_CONTROL_BY_SVID2
) {
2285 ret
= ci_get_svi2_voltage_table(adev
,
2286 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
2287 &pi
->vddc_voltage_table
);
2292 if (pi
->vddc_voltage_table
.count
> SMU7_MAX_LEVELS_VDDC
)
2293 ci_trim_voltage_table_to_fit_state_table(adev
, SMU7_MAX_LEVELS_VDDC
,
2294 &pi
->vddc_voltage_table
);
2296 if (pi
->vddci_control
== CISLANDS_VOLTAGE_CONTROL_BY_GPIO
) {
2297 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDCI
,
2298 VOLTAGE_OBJ_GPIO_LUT
,
2299 &pi
->vddci_voltage_table
);
2302 } else if (pi
->vddci_control
== CISLANDS_VOLTAGE_CONTROL_BY_SVID2
) {
2303 ret
= ci_get_svi2_voltage_table(adev
,
2304 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
2305 &pi
->vddci_voltage_table
);
2310 if (pi
->vddci_voltage_table
.count
> SMU7_MAX_LEVELS_VDDCI
)
2311 ci_trim_voltage_table_to_fit_state_table(adev
, SMU7_MAX_LEVELS_VDDCI
,
2312 &pi
->vddci_voltage_table
);
2314 if (pi
->mvdd_control
== CISLANDS_VOLTAGE_CONTROL_BY_GPIO
) {
2315 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_MVDDC
,
2316 VOLTAGE_OBJ_GPIO_LUT
,
2317 &pi
->mvdd_voltage_table
);
2320 } else if (pi
->mvdd_control
== CISLANDS_VOLTAGE_CONTROL_BY_SVID2
) {
2321 ret
= ci_get_svi2_voltage_table(adev
,
2322 &adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
,
2323 &pi
->mvdd_voltage_table
);
2328 if (pi
->mvdd_voltage_table
.count
> SMU7_MAX_LEVELS_MVDD
)
2329 ci_trim_voltage_table_to_fit_state_table(adev
, SMU7_MAX_LEVELS_MVDD
,
2330 &pi
->mvdd_voltage_table
);
2335 static void ci_populate_smc_voltage_table(struct amdgpu_device
*adev
,
2336 struct atom_voltage_table_entry
*voltage_table
,
2337 SMU7_Discrete_VoltageLevel
*smc_voltage_table
)
2341 ret
= ci_get_std_voltage_value_sidd(adev
, voltage_table
,
2342 &smc_voltage_table
->StdVoltageHiSidd
,
2343 &smc_voltage_table
->StdVoltageLoSidd
);
2346 smc_voltage_table
->StdVoltageHiSidd
= voltage_table
->value
* VOLTAGE_SCALE
;
2347 smc_voltage_table
->StdVoltageLoSidd
= voltage_table
->value
* VOLTAGE_SCALE
;
2350 smc_voltage_table
->Voltage
= cpu_to_be16(voltage_table
->value
* VOLTAGE_SCALE
);
2351 smc_voltage_table
->StdVoltageHiSidd
=
2352 cpu_to_be16(smc_voltage_table
->StdVoltageHiSidd
);
2353 smc_voltage_table
->StdVoltageLoSidd
=
2354 cpu_to_be16(smc_voltage_table
->StdVoltageLoSidd
);
2357 static int ci_populate_smc_vddc_table(struct amdgpu_device
*adev
,
2358 SMU7_Discrete_DpmTable
*table
)
2360 struct ci_power_info
*pi
= ci_get_pi(adev
);
2363 table
->VddcLevelCount
= pi
->vddc_voltage_table
.count
;
2364 for (count
= 0; count
< table
->VddcLevelCount
; count
++) {
2365 ci_populate_smc_voltage_table(adev
,
2366 &pi
->vddc_voltage_table
.entries
[count
],
2367 &table
->VddcLevel
[count
]);
2369 if (pi
->voltage_control
== CISLANDS_VOLTAGE_CONTROL_BY_GPIO
)
2370 table
->VddcLevel
[count
].Smio
|=
2371 pi
->vddc_voltage_table
.entries
[count
].smio_low
;
2373 table
->VddcLevel
[count
].Smio
= 0;
2375 table
->VddcLevelCount
= cpu_to_be32(table
->VddcLevelCount
);
2380 static int ci_populate_smc_vddci_table(struct amdgpu_device
*adev
,
2381 SMU7_Discrete_DpmTable
*table
)
2384 struct ci_power_info
*pi
= ci_get_pi(adev
);
2386 table
->VddciLevelCount
= pi
->vddci_voltage_table
.count
;
2387 for (count
= 0; count
< table
->VddciLevelCount
; count
++) {
2388 ci_populate_smc_voltage_table(adev
,
2389 &pi
->vddci_voltage_table
.entries
[count
],
2390 &table
->VddciLevel
[count
]);
2392 if (pi
->vddci_control
== CISLANDS_VOLTAGE_CONTROL_BY_GPIO
)
2393 table
->VddciLevel
[count
].Smio
|=
2394 pi
->vddci_voltage_table
.entries
[count
].smio_low
;
2396 table
->VddciLevel
[count
].Smio
= 0;
2398 table
->VddciLevelCount
= cpu_to_be32(table
->VddciLevelCount
);
2403 static int ci_populate_smc_mvdd_table(struct amdgpu_device
*adev
,
2404 SMU7_Discrete_DpmTable
*table
)
2406 struct ci_power_info
*pi
= ci_get_pi(adev
);
2409 table
->MvddLevelCount
= pi
->mvdd_voltage_table
.count
;
2410 for (count
= 0; count
< table
->MvddLevelCount
; count
++) {
2411 ci_populate_smc_voltage_table(adev
,
2412 &pi
->mvdd_voltage_table
.entries
[count
],
2413 &table
->MvddLevel
[count
]);
2415 if (pi
->mvdd_control
== CISLANDS_VOLTAGE_CONTROL_BY_GPIO
)
2416 table
->MvddLevel
[count
].Smio
|=
2417 pi
->mvdd_voltage_table
.entries
[count
].smio_low
;
2419 table
->MvddLevel
[count
].Smio
= 0;
2421 table
->MvddLevelCount
= cpu_to_be32(table
->MvddLevelCount
);
2426 static int ci_populate_smc_voltage_tables(struct amdgpu_device
*adev
,
2427 SMU7_Discrete_DpmTable
*table
)
2431 ret
= ci_populate_smc_vddc_table(adev
, table
);
2435 ret
= ci_populate_smc_vddci_table(adev
, table
);
2439 ret
= ci_populate_smc_mvdd_table(adev
, table
);
2446 static int ci_populate_mvdd_value(struct amdgpu_device
*adev
, u32 mclk
,
2447 SMU7_Discrete_VoltageLevel
*voltage
)
2449 struct ci_power_info
*pi
= ci_get_pi(adev
);
2452 if (pi
->mvdd_control
!= CISLANDS_VOLTAGE_CONTROL_NONE
) {
2453 for (i
= 0; i
< adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
.count
; i
++) {
2454 if (mclk
<= adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
.entries
[i
].clk
) {
2455 voltage
->Voltage
= pi
->mvdd_voltage_table
.entries
[i
].value
;
2460 if (i
>= adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
.count
)
2467 static int ci_get_std_voltage_value_sidd(struct amdgpu_device
*adev
,
2468 struct atom_voltage_table_entry
*voltage_table
,
2469 u16
*std_voltage_hi_sidd
, u16
*std_voltage_lo_sidd
)
2472 bool voltage_found
= false;
2473 *std_voltage_hi_sidd
= voltage_table
->value
* VOLTAGE_SCALE
;
2474 *std_voltage_lo_sidd
= voltage_table
->value
* VOLTAGE_SCALE
;
2476 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
== NULL
)
2479 if (adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
) {
2480 for (v_index
= 0; (u32
)v_index
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; v_index
++) {
2481 if (voltage_table
->value
==
2482 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[v_index
].v
) {
2483 voltage_found
= true;
2484 if ((u32
)v_index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
2487 idx
= adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
- 1;
2488 *std_voltage_lo_sidd
=
2489 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[idx
].vddc
* VOLTAGE_SCALE
;
2490 *std_voltage_hi_sidd
=
2491 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[idx
].leakage
* VOLTAGE_SCALE
;
2496 if (!voltage_found
) {
2497 for (v_index
= 0; (u32
)v_index
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; v_index
++) {
2498 if (voltage_table
->value
<=
2499 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[v_index
].v
) {
2500 voltage_found
= true;
2501 if ((u32
)v_index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
2504 idx
= adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
- 1;
2505 *std_voltage_lo_sidd
=
2506 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[idx
].vddc
* VOLTAGE_SCALE
;
2507 *std_voltage_hi_sidd
=
2508 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[idx
].leakage
* VOLTAGE_SCALE
;
2518 static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device
*adev
,
2519 const struct amdgpu_phase_shedding_limits_table
*limits
,
2521 u32
*phase_shedding
)
2525 *phase_shedding
= 1;
2527 for (i
= 0; i
< limits
->count
; i
++) {
2528 if (sclk
< limits
->entries
[i
].sclk
) {
2529 *phase_shedding
= i
;
2535 static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device
*adev
,
2536 const struct amdgpu_phase_shedding_limits_table
*limits
,
2538 u32
*phase_shedding
)
2542 *phase_shedding
= 1;
2544 for (i
= 0; i
< limits
->count
; i
++) {
2545 if (mclk
< limits
->entries
[i
].mclk
) {
2546 *phase_shedding
= i
;
2552 static int ci_init_arb_table_index(struct amdgpu_device
*adev
)
2554 struct ci_power_info
*pi
= ci_get_pi(adev
);
2558 ret
= amdgpu_ci_read_smc_sram_dword(adev
, pi
->arb_table_start
,
2559 &tmp
, pi
->sram_end
);
2564 tmp
|= MC_CG_ARB_FREQ_F1
<< 24;
2566 return amdgpu_ci_write_smc_sram_dword(adev
, pi
->arb_table_start
,
2570 static int ci_get_dependency_volt_by_clk(struct amdgpu_device
*adev
,
2571 struct amdgpu_clock_voltage_dependency_table
*allowed_clock_voltage_table
,
2572 u32 clock
, u32
*voltage
)
2576 if (allowed_clock_voltage_table
->count
== 0)
2579 for (i
= 0; i
< allowed_clock_voltage_table
->count
; i
++) {
2580 if (allowed_clock_voltage_table
->entries
[i
].clk
>= clock
) {
2581 *voltage
= allowed_clock_voltage_table
->entries
[i
].v
;
2586 *voltage
= allowed_clock_voltage_table
->entries
[i
-1].v
;
2591 static u8
ci_get_sleep_divider_id_from_clock(u32 sclk
, u32 min_sclk_in_sr
)
2595 u32 min
= max(min_sclk_in_sr
, (u32
)CISLAND_MINIMUM_ENGINE_CLOCK
);
2600 for (i
= CISLAND_MAX_DEEPSLEEP_DIVIDER_ID
; ; i
--) {
2602 if (tmp
>= min
|| i
== 0)
2609 static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device
*adev
)
2611 return ci_copy_and_switch_arb_sets(adev
, MC_CG_ARB_FREQ_F0
, MC_CG_ARB_FREQ_F1
);
2614 static int ci_reset_to_default(struct amdgpu_device
*adev
)
2616 return (amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_ResetToDefaults
) == PPSMC_Result_OK
) ?
2620 static int ci_force_switch_to_arb_f0(struct amdgpu_device
*adev
)
2624 tmp
= (RREG32_SMC(ixSMC_SCRATCH9
) & 0x0000ff00) >> 8;
2626 if (tmp
== MC_CG_ARB_FREQ_F0
)
2629 return ci_copy_and_switch_arb_sets(adev
, tmp
, MC_CG_ARB_FREQ_F0
);
2632 static void ci_register_patching_mc_arb(struct amdgpu_device
*adev
,
2633 const u32 engine_clock
,
2634 const u32 memory_clock
,
2640 tmp
= RREG32(mmMC_SEQ_MISC0
);
2641 patch
= ((tmp
& 0x0000f00) == 0x300) ? true : false;
2644 ((adev
->pdev
->device
== 0x67B0) ||
2645 (adev
->pdev
->device
== 0x67B1))) {
2646 if ((memory_clock
> 100000) && (memory_clock
<= 125000)) {
2647 tmp2
= (((0x31 * engine_clock
) / 125000) - 1) & 0xff;
2648 *dram_timimg2
&= ~0x00ff0000;
2649 *dram_timimg2
|= tmp2
<< 16;
2650 } else if ((memory_clock
> 125000) && (memory_clock
<= 137500)) {
2651 tmp2
= (((0x36 * engine_clock
) / 137500) - 1) & 0xff;
2652 *dram_timimg2
&= ~0x00ff0000;
2653 *dram_timimg2
|= tmp2
<< 16;
2658 static int ci_populate_memory_timing_parameters(struct amdgpu_device
*adev
,
2661 SMU7_Discrete_MCArbDramTimingTableEntry
*arb_regs
)
2667 amdgpu_atombios_set_engine_dram_timings(adev
, sclk
, mclk
);
2669 dram_timing
= RREG32(mmMC_ARB_DRAM_TIMING
);
2670 dram_timing2
= RREG32(mmMC_ARB_DRAM_TIMING2
);
2671 burst_time
= RREG32(mmMC_ARB_BURST_TIME
) & MC_ARB_BURST_TIME__STATE0_MASK
;
2673 ci_register_patching_mc_arb(adev
, sclk
, mclk
, &dram_timing2
);
2675 arb_regs
->McArbDramTiming
= cpu_to_be32(dram_timing
);
2676 arb_regs
->McArbDramTiming2
= cpu_to_be32(dram_timing2
);
2677 arb_regs
->McArbBurstTime
= (u8
)burst_time
;
2682 static int ci_do_program_memory_timing_parameters(struct amdgpu_device
*adev
)
2684 struct ci_power_info
*pi
= ci_get_pi(adev
);
2685 SMU7_Discrete_MCArbDramTimingTable arb_regs
;
2689 memset(&arb_regs
, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable
));
2691 for (i
= 0; i
< pi
->dpm_table
.sclk_table
.count
; i
++) {
2692 for (j
= 0; j
< pi
->dpm_table
.mclk_table
.count
; j
++) {
2693 ret
= ci_populate_memory_timing_parameters(adev
,
2694 pi
->dpm_table
.sclk_table
.dpm_levels
[i
].value
,
2695 pi
->dpm_table
.mclk_table
.dpm_levels
[j
].value
,
2696 &arb_regs
.entries
[i
][j
]);
2703 ret
= amdgpu_ci_copy_bytes_to_smc(adev
,
2704 pi
->arb_table_start
,
2706 sizeof(SMU7_Discrete_MCArbDramTimingTable
),
2712 static int ci_program_memory_timing_parameters(struct amdgpu_device
*adev
)
2714 struct ci_power_info
*pi
= ci_get_pi(adev
);
2716 if (pi
->need_update_smu7_dpm_table
== 0)
2719 return ci_do_program_memory_timing_parameters(adev
);
2722 static void ci_populate_smc_initial_state(struct amdgpu_device
*adev
,
2723 struct amdgpu_ps
*amdgpu_boot_state
)
2725 struct ci_ps
*boot_state
= ci_get_ps(amdgpu_boot_state
);
2726 struct ci_power_info
*pi
= ci_get_pi(adev
);
2729 for (level
= 0; level
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; level
++) {
2730 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[level
].clk
>=
2731 boot_state
->performance_levels
[0].sclk
) {
2732 pi
->smc_state_table
.GraphicsBootLevel
= level
;
2737 for (level
= 0; level
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
.count
; level
++) {
2738 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
.entries
[level
].clk
>=
2739 boot_state
->performance_levels
[0].mclk
) {
2740 pi
->smc_state_table
.MemoryBootLevel
= level
;
2746 static u32
ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table
*dpm_table
)
2751 for (i
= dpm_table
->count
; i
> 0; i
--) {
2752 mask_value
= mask_value
<< 1;
2753 if (dpm_table
->dpm_levels
[i
-1].enabled
)
2756 mask_value
&= 0xFFFFFFFE;
2762 static void ci_populate_smc_link_level(struct amdgpu_device
*adev
,
2763 SMU7_Discrete_DpmTable
*table
)
2765 struct ci_power_info
*pi
= ci_get_pi(adev
);
2766 struct ci_dpm_table
*dpm_table
= &pi
->dpm_table
;
2769 for (i
= 0; i
< dpm_table
->pcie_speed_table
.count
; i
++) {
2770 table
->LinkLevel
[i
].PcieGenSpeed
=
2771 (u8
)dpm_table
->pcie_speed_table
.dpm_levels
[i
].value
;
2772 table
->LinkLevel
[i
].PcieLaneCount
=
2773 amdgpu_encode_pci_lane_width(dpm_table
->pcie_speed_table
.dpm_levels
[i
].param1
);
2774 table
->LinkLevel
[i
].EnabledForActivity
= 1;
2775 table
->LinkLevel
[i
].DownT
= cpu_to_be32(5);
2776 table
->LinkLevel
[i
].UpT
= cpu_to_be32(30);
2779 pi
->smc_state_table
.LinkLevelCount
= (u8
)dpm_table
->pcie_speed_table
.count
;
2780 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
=
2781 ci_get_dpm_level_enable_mask_value(&dpm_table
->pcie_speed_table
);
2784 static int ci_populate_smc_uvd_level(struct amdgpu_device
*adev
,
2785 SMU7_Discrete_DpmTable
*table
)
2788 struct atom_clock_dividers dividers
;
2791 table
->UvdLevelCount
=
2792 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.count
;
2794 for (count
= 0; count
< table
->UvdLevelCount
; count
++) {
2795 table
->UvdLevel
[count
].VclkFrequency
=
2796 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[count
].vclk
;
2797 table
->UvdLevel
[count
].DclkFrequency
=
2798 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[count
].dclk
;
2799 table
->UvdLevel
[count
].MinVddc
=
2800 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[count
].v
* VOLTAGE_SCALE
;
2801 table
->UvdLevel
[count
].MinVddcPhases
= 1;
2803 ret
= amdgpu_atombios_get_clock_dividers(adev
,
2804 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
2805 table
->UvdLevel
[count
].VclkFrequency
, false, ÷rs
);
2809 table
->UvdLevel
[count
].VclkDivider
= (u8
)dividers
.post_divider
;
2811 ret
= amdgpu_atombios_get_clock_dividers(adev
,
2812 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
2813 table
->UvdLevel
[count
].DclkFrequency
, false, ÷rs
);
2817 table
->UvdLevel
[count
].DclkDivider
= (u8
)dividers
.post_divider
;
2819 table
->UvdLevel
[count
].VclkFrequency
= cpu_to_be32(table
->UvdLevel
[count
].VclkFrequency
);
2820 table
->UvdLevel
[count
].DclkFrequency
= cpu_to_be32(table
->UvdLevel
[count
].DclkFrequency
);
2821 table
->UvdLevel
[count
].MinVddc
= cpu_to_be16(table
->UvdLevel
[count
].MinVddc
);
2827 static int ci_populate_smc_vce_level(struct amdgpu_device
*adev
,
2828 SMU7_Discrete_DpmTable
*table
)
2831 struct atom_clock_dividers dividers
;
2834 table
->VceLevelCount
=
2835 adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.count
;
2837 for (count
= 0; count
< table
->VceLevelCount
; count
++) {
2838 table
->VceLevel
[count
].Frequency
=
2839 adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
[count
].evclk
;
2840 table
->VceLevel
[count
].MinVoltage
=
2841 (u16
)adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
[count
].v
* VOLTAGE_SCALE
;
2842 table
->VceLevel
[count
].MinPhases
= 1;
2844 ret
= amdgpu_atombios_get_clock_dividers(adev
,
2845 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
2846 table
->VceLevel
[count
].Frequency
, false, ÷rs
);
2850 table
->VceLevel
[count
].Divider
= (u8
)dividers
.post_divider
;
2852 table
->VceLevel
[count
].Frequency
= cpu_to_be32(table
->VceLevel
[count
].Frequency
);
2853 table
->VceLevel
[count
].MinVoltage
= cpu_to_be16(table
->VceLevel
[count
].MinVoltage
);
2860 static int ci_populate_smc_acp_level(struct amdgpu_device
*adev
,
2861 SMU7_Discrete_DpmTable
*table
)
2864 struct atom_clock_dividers dividers
;
2867 table
->AcpLevelCount
= (u8
)
2868 (adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.count
);
2870 for (count
= 0; count
< table
->AcpLevelCount
; count
++) {
2871 table
->AcpLevel
[count
].Frequency
=
2872 adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
[count
].clk
;
2873 table
->AcpLevel
[count
].MinVoltage
=
2874 adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
[count
].v
;
2875 table
->AcpLevel
[count
].MinPhases
= 1;
2877 ret
= amdgpu_atombios_get_clock_dividers(adev
,
2878 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
2879 table
->AcpLevel
[count
].Frequency
, false, ÷rs
);
2883 table
->AcpLevel
[count
].Divider
= (u8
)dividers
.post_divider
;
2885 table
->AcpLevel
[count
].Frequency
= cpu_to_be32(table
->AcpLevel
[count
].Frequency
);
2886 table
->AcpLevel
[count
].MinVoltage
= cpu_to_be16(table
->AcpLevel
[count
].MinVoltage
);
2892 static int ci_populate_smc_samu_level(struct amdgpu_device
*adev
,
2893 SMU7_Discrete_DpmTable
*table
)
2896 struct atom_clock_dividers dividers
;
2899 table
->SamuLevelCount
=
2900 adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.count
;
2902 for (count
= 0; count
< table
->SamuLevelCount
; count
++) {
2903 table
->SamuLevel
[count
].Frequency
=
2904 adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
[count
].clk
;
2905 table
->SamuLevel
[count
].MinVoltage
=
2906 adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
[count
].v
* VOLTAGE_SCALE
;
2907 table
->SamuLevel
[count
].MinPhases
= 1;
2909 ret
= amdgpu_atombios_get_clock_dividers(adev
,
2910 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
2911 table
->SamuLevel
[count
].Frequency
, false, ÷rs
);
2915 table
->SamuLevel
[count
].Divider
= (u8
)dividers
.post_divider
;
2917 table
->SamuLevel
[count
].Frequency
= cpu_to_be32(table
->SamuLevel
[count
].Frequency
);
2918 table
->SamuLevel
[count
].MinVoltage
= cpu_to_be16(table
->SamuLevel
[count
].MinVoltage
);
2924 static int ci_calculate_mclk_params(struct amdgpu_device
*adev
,
2926 SMU7_Discrete_MemoryLevel
*mclk
,
2930 struct ci_power_info
*pi
= ci_get_pi(adev
);
2931 u32 dll_cntl
= pi
->clock_registers
.dll_cntl
;
2932 u32 mclk_pwrmgt_cntl
= pi
->clock_registers
.mclk_pwrmgt_cntl
;
2933 u32 mpll_ad_func_cntl
= pi
->clock_registers
.mpll_ad_func_cntl
;
2934 u32 mpll_dq_func_cntl
= pi
->clock_registers
.mpll_dq_func_cntl
;
2935 u32 mpll_func_cntl
= pi
->clock_registers
.mpll_func_cntl
;
2936 u32 mpll_func_cntl_1
= pi
->clock_registers
.mpll_func_cntl_1
;
2937 u32 mpll_func_cntl_2
= pi
->clock_registers
.mpll_func_cntl_2
;
2938 u32 mpll_ss1
= pi
->clock_registers
.mpll_ss1
;
2939 u32 mpll_ss2
= pi
->clock_registers
.mpll_ss2
;
2940 struct atom_mpll_param mpll_param
;
2943 ret
= amdgpu_atombios_get_memory_pll_dividers(adev
, memory_clock
, strobe_mode
, &mpll_param
);
2947 mpll_func_cntl
&= ~MPLL_FUNC_CNTL__BWCTRL_MASK
;
2948 mpll_func_cntl
|= (mpll_param
.bwcntl
<< MPLL_FUNC_CNTL__BWCTRL__SHIFT
);
2950 mpll_func_cntl_1
&= ~(MPLL_FUNC_CNTL_1__CLKF_MASK
| MPLL_FUNC_CNTL_1__CLKFRAC_MASK
|
2951 MPLL_FUNC_CNTL_1__VCO_MODE_MASK
);
2952 mpll_func_cntl_1
|= (mpll_param
.clkf
) << MPLL_FUNC_CNTL_1__CLKF__SHIFT
|
2953 (mpll_param
.clkfrac
<< MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT
) |
2954 (mpll_param
.vco_mode
<< MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT
);
2956 mpll_ad_func_cntl
&= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK
;
2957 mpll_ad_func_cntl
|= (mpll_param
.post_div
<< MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT
);
2959 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
2960 mpll_dq_func_cntl
&= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK
|
2961 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK
);
2962 mpll_dq_func_cntl
|= (mpll_param
.yclk_sel
<< MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT
) |
2963 (mpll_param
.post_div
<< MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT
);
2966 if (pi
->caps_mclk_ss_support
) {
2967 struct amdgpu_atom_ss ss
;
2970 u32 reference_clock
= adev
->clock
.mpll
.reference_freq
;
2972 if (mpll_param
.qdr
== 1)
2973 freq_nom
= memory_clock
* 4 * (1 << mpll_param
.post_div
);
2975 freq_nom
= memory_clock
* 2 * (1 << mpll_param
.post_div
);
2977 tmp
= (freq_nom
/ reference_clock
);
2979 if (amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
2980 ASIC_INTERNAL_MEMORY_SS
, freq_nom
)) {
2981 u32 clks
= reference_clock
* 5 / ss
.rate
;
2982 u32 clkv
= (u32
)((((131 * ss
.percentage
* ss
.rate
) / 100) * tmp
) / freq_nom
);
2984 mpll_ss1
&= ~MPLL_SS1__CLKV_MASK
;
2985 mpll_ss1
|= (clkv
<< MPLL_SS1__CLKV__SHIFT
);
2987 mpll_ss2
&= ~MPLL_SS2__CLKS_MASK
;
2988 mpll_ss2
|= (clks
<< MPLL_SS2__CLKS__SHIFT
);
2992 mclk_pwrmgt_cntl
&= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK
;
2993 mclk_pwrmgt_cntl
|= (mpll_param
.dll_speed
<< MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT
);
2996 mclk_pwrmgt_cntl
|= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK
|
2997 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK
;
2999 mclk_pwrmgt_cntl
&= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK
|
3000 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK
);
3002 mclk
->MclkFrequency
= memory_clock
;
3003 mclk
->MpllFuncCntl
= mpll_func_cntl
;
3004 mclk
->MpllFuncCntl_1
= mpll_func_cntl_1
;
3005 mclk
->MpllFuncCntl_2
= mpll_func_cntl_2
;
3006 mclk
->MpllAdFuncCntl
= mpll_ad_func_cntl
;
3007 mclk
->MpllDqFuncCntl
= mpll_dq_func_cntl
;
3008 mclk
->MclkPwrmgtCntl
= mclk_pwrmgt_cntl
;
3009 mclk
->DllCntl
= dll_cntl
;
3010 mclk
->MpllSs1
= mpll_ss1
;
3011 mclk
->MpllSs2
= mpll_ss2
;
3016 static int ci_populate_single_memory_level(struct amdgpu_device
*adev
,
3018 SMU7_Discrete_MemoryLevel
*memory_level
)
3020 struct ci_power_info
*pi
= ci_get_pi(adev
);
3024 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
.entries
) {
3025 ret
= ci_get_dependency_volt_by_clk(adev
,
3026 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
3027 memory_clock
, &memory_level
->MinVddc
);
3032 if (adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
.entries
) {
3033 ret
= ci_get_dependency_volt_by_clk(adev
,
3034 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
3035 memory_clock
, &memory_level
->MinVddci
);
3040 if (adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
.entries
) {
3041 ret
= ci_get_dependency_volt_by_clk(adev
,
3042 &adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
,
3043 memory_clock
, &memory_level
->MinMvdd
);
3048 memory_level
->MinVddcPhases
= 1;
3050 if (pi
->vddc_phase_shed_control
)
3051 ci_populate_phase_value_based_on_mclk(adev
,
3052 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
3054 &memory_level
->MinVddcPhases
);
3056 memory_level
->EnabledForActivity
= 1;
3057 memory_level
->EnabledForThrottle
= 1;
3058 memory_level
->UpH
= 0;
3059 memory_level
->DownH
= 100;
3060 memory_level
->VoltageDownH
= 0;
3061 memory_level
->ActivityLevel
= (u16
)pi
->mclk_activity_target
;
3063 memory_level
->StutterEnable
= false;
3064 memory_level
->StrobeEnable
= false;
3065 memory_level
->EdcReadEnable
= false;
3066 memory_level
->EdcWriteEnable
= false;
3067 memory_level
->RttEnable
= false;
3069 memory_level
->DisplayWatermark
= PPSMC_DISPLAY_WATERMARK_LOW
;
3071 if (pi
->mclk_stutter_mode_threshold
&&
3072 (memory_clock
<= pi
->mclk_stutter_mode_threshold
) &&
3073 (!pi
->uvd_enabled
) &&
3074 (RREG32(mmDPG_PIPE_STUTTER_CONTROL
) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK
) &&
3075 (adev
->pm
.dpm
.new_active_crtc_count
<= 2))
3076 memory_level
->StutterEnable
= true;
3078 if (pi
->mclk_strobe_mode_threshold
&&
3079 (memory_clock
<= pi
->mclk_strobe_mode_threshold
))
3080 memory_level
->StrobeEnable
= 1;
3082 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
3083 memory_level
->StrobeRatio
=
3084 ci_get_mclk_frequency_ratio(memory_clock
, memory_level
->StrobeEnable
);
3085 if (pi
->mclk_edc_enable_threshold
&&
3086 (memory_clock
> pi
->mclk_edc_enable_threshold
))
3087 memory_level
->EdcReadEnable
= true;
3089 if (pi
->mclk_edc_wr_enable_threshold
&&
3090 (memory_clock
> pi
->mclk_edc_wr_enable_threshold
))
3091 memory_level
->EdcWriteEnable
= true;
3093 if (memory_level
->StrobeEnable
) {
3094 if (ci_get_mclk_frequency_ratio(memory_clock
, true) >=
3095 ((RREG32(mmMC_SEQ_MISC7
) >> 16) & 0xf))
3096 dll_state_on
= ((RREG32(mmMC_SEQ_MISC5
) >> 1) & 0x1) ? true : false;
3098 dll_state_on
= ((RREG32(mmMC_SEQ_MISC6
) >> 1) & 0x1) ? true : false;
3100 dll_state_on
= pi
->dll_default_on
;
3103 memory_level
->StrobeRatio
= ci_get_ddr3_mclk_frequency_ratio(memory_clock
);
3104 dll_state_on
= ((RREG32(mmMC_SEQ_MISC5
) >> 1) & 0x1) ? true : false;
3107 ret
= ci_calculate_mclk_params(adev
, memory_clock
, memory_level
, memory_level
->StrobeEnable
, dll_state_on
);
3111 memory_level
->MinVddc
= cpu_to_be32(memory_level
->MinVddc
* VOLTAGE_SCALE
);
3112 memory_level
->MinVddcPhases
= cpu_to_be32(memory_level
->MinVddcPhases
);
3113 memory_level
->MinVddci
= cpu_to_be32(memory_level
->MinVddci
* VOLTAGE_SCALE
);
3114 memory_level
->MinMvdd
= cpu_to_be32(memory_level
->MinMvdd
* VOLTAGE_SCALE
);
3116 memory_level
->MclkFrequency
= cpu_to_be32(memory_level
->MclkFrequency
);
3117 memory_level
->ActivityLevel
= cpu_to_be16(memory_level
->ActivityLevel
);
3118 memory_level
->MpllFuncCntl
= cpu_to_be32(memory_level
->MpllFuncCntl
);
3119 memory_level
->MpllFuncCntl_1
= cpu_to_be32(memory_level
->MpllFuncCntl_1
);
3120 memory_level
->MpllFuncCntl_2
= cpu_to_be32(memory_level
->MpllFuncCntl_2
);
3121 memory_level
->MpllAdFuncCntl
= cpu_to_be32(memory_level
->MpllAdFuncCntl
);
3122 memory_level
->MpllDqFuncCntl
= cpu_to_be32(memory_level
->MpllDqFuncCntl
);
3123 memory_level
->MclkPwrmgtCntl
= cpu_to_be32(memory_level
->MclkPwrmgtCntl
);
3124 memory_level
->DllCntl
= cpu_to_be32(memory_level
->DllCntl
);
3125 memory_level
->MpllSs1
= cpu_to_be32(memory_level
->MpllSs1
);
3126 memory_level
->MpllSs2
= cpu_to_be32(memory_level
->MpllSs2
);
3131 static int ci_populate_smc_acpi_level(struct amdgpu_device
*adev
,
3132 SMU7_Discrete_DpmTable
*table
)
3134 struct ci_power_info
*pi
= ci_get_pi(adev
);
3135 struct atom_clock_dividers dividers
;
3136 SMU7_Discrete_VoltageLevel voltage_level
;
3137 u32 spll_func_cntl
= pi
->clock_registers
.cg_spll_func_cntl
;
3138 u32 spll_func_cntl_2
= pi
->clock_registers
.cg_spll_func_cntl_2
;
3139 u32 dll_cntl
= pi
->clock_registers
.dll_cntl
;
3140 u32 mclk_pwrmgt_cntl
= pi
->clock_registers
.mclk_pwrmgt_cntl
;
3143 table
->ACPILevel
.Flags
&= ~PPSMC_SWSTATE_FLAG_DC
;
3146 table
->ACPILevel
.MinVddc
= cpu_to_be32(pi
->acpi_vddc
* VOLTAGE_SCALE
);
3148 table
->ACPILevel
.MinVddc
= cpu_to_be32(pi
->min_vddc_in_pp_table
* VOLTAGE_SCALE
);
3150 table
->ACPILevel
.MinVddcPhases
= pi
->vddc_phase_shed_control
? 0 : 1;
3152 table
->ACPILevel
.SclkFrequency
= adev
->clock
.spll
.reference_freq
;
3154 ret
= amdgpu_atombios_get_clock_dividers(adev
,
3155 COMPUTE_GPUCLK_INPUT_FLAG_SCLK
,
3156 table
->ACPILevel
.SclkFrequency
, false, ÷rs
);
3160 table
->ACPILevel
.SclkDid
= (u8
)dividers
.post_divider
;
3161 table
->ACPILevel
.DisplayWatermark
= PPSMC_DISPLAY_WATERMARK_LOW
;
3162 table
->ACPILevel
.DeepSleepDivId
= 0;
3164 spll_func_cntl
&= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK
;
3165 spll_func_cntl
|= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK
;
3167 spll_func_cntl_2
&= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK
;
3168 spll_func_cntl_2
|= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT
);
3170 table
->ACPILevel
.CgSpllFuncCntl
= spll_func_cntl
;
3171 table
->ACPILevel
.CgSpllFuncCntl2
= spll_func_cntl_2
;
3172 table
->ACPILevel
.CgSpllFuncCntl3
= pi
->clock_registers
.cg_spll_func_cntl_3
;
3173 table
->ACPILevel
.CgSpllFuncCntl4
= pi
->clock_registers
.cg_spll_func_cntl_4
;
3174 table
->ACPILevel
.SpllSpreadSpectrum
= pi
->clock_registers
.cg_spll_spread_spectrum
;
3175 table
->ACPILevel
.SpllSpreadSpectrum2
= pi
->clock_registers
.cg_spll_spread_spectrum_2
;
3176 table
->ACPILevel
.CcPwrDynRm
= 0;
3177 table
->ACPILevel
.CcPwrDynRm1
= 0;
3179 table
->ACPILevel
.Flags
= cpu_to_be32(table
->ACPILevel
.Flags
);
3180 table
->ACPILevel
.MinVddcPhases
= cpu_to_be32(table
->ACPILevel
.MinVddcPhases
);
3181 table
->ACPILevel
.SclkFrequency
= cpu_to_be32(table
->ACPILevel
.SclkFrequency
);
3182 table
->ACPILevel
.CgSpllFuncCntl
= cpu_to_be32(table
->ACPILevel
.CgSpllFuncCntl
);
3183 table
->ACPILevel
.CgSpllFuncCntl2
= cpu_to_be32(table
->ACPILevel
.CgSpllFuncCntl2
);
3184 table
->ACPILevel
.CgSpllFuncCntl3
= cpu_to_be32(table
->ACPILevel
.CgSpllFuncCntl3
);
3185 table
->ACPILevel
.CgSpllFuncCntl4
= cpu_to_be32(table
->ACPILevel
.CgSpllFuncCntl4
);
3186 table
->ACPILevel
.SpllSpreadSpectrum
= cpu_to_be32(table
->ACPILevel
.SpllSpreadSpectrum
);
3187 table
->ACPILevel
.SpllSpreadSpectrum2
= cpu_to_be32(table
->ACPILevel
.SpllSpreadSpectrum2
);
3188 table
->ACPILevel
.CcPwrDynRm
= cpu_to_be32(table
->ACPILevel
.CcPwrDynRm
);
3189 table
->ACPILevel
.CcPwrDynRm1
= cpu_to_be32(table
->ACPILevel
.CcPwrDynRm1
);
3191 table
->MemoryACPILevel
.MinVddc
= table
->ACPILevel
.MinVddc
;
3192 table
->MemoryACPILevel
.MinVddcPhases
= table
->ACPILevel
.MinVddcPhases
;
3194 if (pi
->vddci_control
!= CISLANDS_VOLTAGE_CONTROL_NONE
) {
3196 table
->MemoryACPILevel
.MinVddci
=
3197 cpu_to_be32(pi
->acpi_vddci
* VOLTAGE_SCALE
);
3199 table
->MemoryACPILevel
.MinVddci
=
3200 cpu_to_be32(pi
->min_vddci_in_pp_table
* VOLTAGE_SCALE
);
3203 if (ci_populate_mvdd_value(adev
, 0, &voltage_level
))
3204 table
->MemoryACPILevel
.MinMvdd
= 0;
3206 table
->MemoryACPILevel
.MinMvdd
=
3207 cpu_to_be32(voltage_level
.Voltage
* VOLTAGE_SCALE
);
3209 mclk_pwrmgt_cntl
|= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK
|
3210 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK
;
3211 mclk_pwrmgt_cntl
&= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK
|
3212 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK
);
3214 dll_cntl
&= ~(DLL_CNTL__MRDCK0_BYPASS_MASK
| DLL_CNTL__MRDCK1_BYPASS_MASK
);
3216 table
->MemoryACPILevel
.DllCntl
= cpu_to_be32(dll_cntl
);
3217 table
->MemoryACPILevel
.MclkPwrmgtCntl
= cpu_to_be32(mclk_pwrmgt_cntl
);
3218 table
->MemoryACPILevel
.MpllAdFuncCntl
=
3219 cpu_to_be32(pi
->clock_registers
.mpll_ad_func_cntl
);
3220 table
->MemoryACPILevel
.MpllDqFuncCntl
=
3221 cpu_to_be32(pi
->clock_registers
.mpll_dq_func_cntl
);
3222 table
->MemoryACPILevel
.MpllFuncCntl
=
3223 cpu_to_be32(pi
->clock_registers
.mpll_func_cntl
);
3224 table
->MemoryACPILevel
.MpllFuncCntl_1
=
3225 cpu_to_be32(pi
->clock_registers
.mpll_func_cntl_1
);
3226 table
->MemoryACPILevel
.MpllFuncCntl_2
=
3227 cpu_to_be32(pi
->clock_registers
.mpll_func_cntl_2
);
3228 table
->MemoryACPILevel
.MpllSs1
= cpu_to_be32(pi
->clock_registers
.mpll_ss1
);
3229 table
->MemoryACPILevel
.MpllSs2
= cpu_to_be32(pi
->clock_registers
.mpll_ss2
);
3231 table
->MemoryACPILevel
.EnabledForThrottle
= 0;
3232 table
->MemoryACPILevel
.EnabledForActivity
= 0;
3233 table
->MemoryACPILevel
.UpH
= 0;
3234 table
->MemoryACPILevel
.DownH
= 100;
3235 table
->MemoryACPILevel
.VoltageDownH
= 0;
3236 table
->MemoryACPILevel
.ActivityLevel
=
3237 cpu_to_be16((u16
)pi
->mclk_activity_target
);
3239 table
->MemoryACPILevel
.StutterEnable
= false;
3240 table
->MemoryACPILevel
.StrobeEnable
= false;
3241 table
->MemoryACPILevel
.EdcReadEnable
= false;
3242 table
->MemoryACPILevel
.EdcWriteEnable
= false;
3243 table
->MemoryACPILevel
.RttEnable
= false;
3249 static int ci_enable_ulv(struct amdgpu_device
*adev
, bool enable
)
3251 struct ci_power_info
*pi
= ci_get_pi(adev
);
3252 struct ci_ulv_parm
*ulv
= &pi
->ulv
;
3254 if (ulv
->supported
) {
3256 return (amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_EnableULV
) == PPSMC_Result_OK
) ?
3259 return (amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_DisableULV
) == PPSMC_Result_OK
) ?
3266 static int ci_populate_ulv_level(struct amdgpu_device
*adev
,
3267 SMU7_Discrete_Ulv
*state
)
3269 struct ci_power_info
*pi
= ci_get_pi(adev
);
3270 u16 ulv_voltage
= adev
->pm
.dpm
.backbias_response_time
;
3272 state
->CcPwrDynRm
= 0;
3273 state
->CcPwrDynRm1
= 0;
3275 if (ulv_voltage
== 0) {
3276 pi
->ulv
.supported
= false;
3280 if (pi
->voltage_control
!= CISLANDS_VOLTAGE_CONTROL_BY_SVID2
) {
3281 if (ulv_voltage
> adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[0].v
)
3282 state
->VddcOffset
= 0;
3285 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[0].v
- ulv_voltage
;
3287 if (ulv_voltage
> adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[0].v
)
3288 state
->VddcOffsetVid
= 0;
3290 state
->VddcOffsetVid
= (u8
)
3291 ((adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[0].v
- ulv_voltage
) *
3292 VOLTAGE_VID_OFFSET_SCALE2
/ VOLTAGE_VID_OFFSET_SCALE1
);
3294 state
->VddcPhase
= pi
->vddc_phase_shed_control
? 0 : 1;
3296 state
->CcPwrDynRm
= cpu_to_be32(state
->CcPwrDynRm
);
3297 state
->CcPwrDynRm1
= cpu_to_be32(state
->CcPwrDynRm1
);
3298 state
->VddcOffset
= cpu_to_be16(state
->VddcOffset
);
3303 static int ci_calculate_sclk_params(struct amdgpu_device
*adev
,
3305 SMU7_Discrete_GraphicsLevel
*sclk
)
3307 struct ci_power_info
*pi
= ci_get_pi(adev
);
3308 struct atom_clock_dividers dividers
;
3309 u32 spll_func_cntl_3
= pi
->clock_registers
.cg_spll_func_cntl_3
;
3310 u32 spll_func_cntl_4
= pi
->clock_registers
.cg_spll_func_cntl_4
;
3311 u32 cg_spll_spread_spectrum
= pi
->clock_registers
.cg_spll_spread_spectrum
;
3312 u32 cg_spll_spread_spectrum_2
= pi
->clock_registers
.cg_spll_spread_spectrum_2
;
3313 u32 reference_clock
= adev
->clock
.spll
.reference_freq
;
3314 u32 reference_divider
;
3318 ret
= amdgpu_atombios_get_clock_dividers(adev
,
3319 COMPUTE_GPUCLK_INPUT_FLAG_SCLK
,
3320 engine_clock
, false, ÷rs
);
3324 reference_divider
= 1 + dividers
.ref_div
;
3325 fbdiv
= dividers
.fb_div
& 0x3FFFFFF;
3327 spll_func_cntl_3
&= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK
;
3328 spll_func_cntl_3
|= (fbdiv
<< CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT
);
3329 spll_func_cntl_3
|= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK
;
3331 if (pi
->caps_sclk_ss_support
) {
3332 struct amdgpu_atom_ss ss
;
3333 u32 vco_freq
= engine_clock
* dividers
.post_div
;
3335 if (amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
3336 ASIC_INTERNAL_ENGINE_SS
, vco_freq
)) {
3337 u32 clk_s
= reference_clock
* 5 / (reference_divider
* ss
.rate
);
3338 u32 clk_v
= 4 * ss
.percentage
* fbdiv
/ (clk_s
* 10000);
3340 cg_spll_spread_spectrum
&= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK
| CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK
);
3341 cg_spll_spread_spectrum
|= (clk_s
<< CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT
);
3342 cg_spll_spread_spectrum
|= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT
);
3344 cg_spll_spread_spectrum_2
&= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK
;
3345 cg_spll_spread_spectrum_2
|= (clk_v
<< CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT
);
3349 sclk
->SclkFrequency
= engine_clock
;
3350 sclk
->CgSpllFuncCntl3
= spll_func_cntl_3
;
3351 sclk
->CgSpllFuncCntl4
= spll_func_cntl_4
;
3352 sclk
->SpllSpreadSpectrum
= cg_spll_spread_spectrum
;
3353 sclk
->SpllSpreadSpectrum2
= cg_spll_spread_spectrum_2
;
3354 sclk
->SclkDid
= (u8
)dividers
.post_divider
;
3359 static int ci_populate_single_graphic_level(struct amdgpu_device
*adev
,
3361 u16 sclk_activity_level_t
,
3362 SMU7_Discrete_GraphicsLevel
*graphic_level
)
3364 struct ci_power_info
*pi
= ci_get_pi(adev
);
3367 ret
= ci_calculate_sclk_params(adev
, engine_clock
, graphic_level
);
3371 ret
= ci_get_dependency_volt_by_clk(adev
,
3372 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
,
3373 engine_clock
, &graphic_level
->MinVddc
);
3377 graphic_level
->SclkFrequency
= engine_clock
;
3379 graphic_level
->Flags
= 0;
3380 graphic_level
->MinVddcPhases
= 1;
3382 if (pi
->vddc_phase_shed_control
)
3383 ci_populate_phase_value_based_on_sclk(adev
,
3384 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
3386 &graphic_level
->MinVddcPhases
);
3388 graphic_level
->ActivityLevel
= sclk_activity_level_t
;
3390 graphic_level
->CcPwrDynRm
= 0;
3391 graphic_level
->CcPwrDynRm1
= 0;
3392 graphic_level
->EnabledForThrottle
= 1;
3393 graphic_level
->UpH
= 0;
3394 graphic_level
->DownH
= 0;
3395 graphic_level
->VoltageDownH
= 0;
3396 graphic_level
->PowerThrottle
= 0;
3398 if (pi
->caps_sclk_ds
)
3399 graphic_level
->DeepSleepDivId
= ci_get_sleep_divider_id_from_clock(engine_clock
,
3400 CISLAND_MINIMUM_ENGINE_CLOCK
);
3402 graphic_level
->DisplayWatermark
= PPSMC_DISPLAY_WATERMARK_LOW
;
3404 graphic_level
->Flags
= cpu_to_be32(graphic_level
->Flags
);
3405 graphic_level
->MinVddc
= cpu_to_be32(graphic_level
->MinVddc
* VOLTAGE_SCALE
);
3406 graphic_level
->MinVddcPhases
= cpu_to_be32(graphic_level
->MinVddcPhases
);
3407 graphic_level
->SclkFrequency
= cpu_to_be32(graphic_level
->SclkFrequency
);
3408 graphic_level
->ActivityLevel
= cpu_to_be16(graphic_level
->ActivityLevel
);
3409 graphic_level
->CgSpllFuncCntl3
= cpu_to_be32(graphic_level
->CgSpllFuncCntl3
);
3410 graphic_level
->CgSpllFuncCntl4
= cpu_to_be32(graphic_level
->CgSpllFuncCntl4
);
3411 graphic_level
->SpllSpreadSpectrum
= cpu_to_be32(graphic_level
->SpllSpreadSpectrum
);
3412 graphic_level
->SpllSpreadSpectrum2
= cpu_to_be32(graphic_level
->SpllSpreadSpectrum2
);
3413 graphic_level
->CcPwrDynRm
= cpu_to_be32(graphic_level
->CcPwrDynRm
);
3414 graphic_level
->CcPwrDynRm1
= cpu_to_be32(graphic_level
->CcPwrDynRm1
);
3419 static int ci_populate_all_graphic_levels(struct amdgpu_device
*adev
)
3421 struct ci_power_info
*pi
= ci_get_pi(adev
);
3422 struct ci_dpm_table
*dpm_table
= &pi
->dpm_table
;
3423 u32 level_array_address
= pi
->dpm_table_start
+
3424 offsetof(SMU7_Discrete_DpmTable
, GraphicsLevel
);
3425 u32 level_array_size
= sizeof(SMU7_Discrete_GraphicsLevel
) *
3426 SMU7_MAX_LEVELS_GRAPHICS
;
3427 SMU7_Discrete_GraphicsLevel
*levels
= pi
->smc_state_table
.GraphicsLevel
;
3430 memset(levels
, 0, level_array_size
);
3432 for (i
= 0; i
< dpm_table
->sclk_table
.count
; i
++) {
3433 ret
= ci_populate_single_graphic_level(adev
,
3434 dpm_table
->sclk_table
.dpm_levels
[i
].value
,
3435 (u16
)pi
->activity_target
[i
],
3436 &pi
->smc_state_table
.GraphicsLevel
[i
]);
3440 pi
->smc_state_table
.GraphicsLevel
[i
].DeepSleepDivId
= 0;
3441 if (i
== (dpm_table
->sclk_table
.count
- 1))
3442 pi
->smc_state_table
.GraphicsLevel
[i
].DisplayWatermark
=
3443 PPSMC_DISPLAY_WATERMARK_HIGH
;
3445 pi
->smc_state_table
.GraphicsLevel
[0].EnabledForActivity
= 1;
3447 pi
->smc_state_table
.GraphicsDpmLevelCount
= (u8
)dpm_table
->sclk_table
.count
;
3448 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
=
3449 ci_get_dpm_level_enable_mask_value(&dpm_table
->sclk_table
);
3451 ret
= amdgpu_ci_copy_bytes_to_smc(adev
, level_array_address
,
3452 (u8
*)levels
, level_array_size
,
3460 static int ci_populate_ulv_state(struct amdgpu_device
*adev
,
3461 SMU7_Discrete_Ulv
*ulv_level
)
3463 return ci_populate_ulv_level(adev
, ulv_level
);
3466 static int ci_populate_all_memory_levels(struct amdgpu_device
*adev
)
3468 struct ci_power_info
*pi
= ci_get_pi(adev
);
3469 struct ci_dpm_table
*dpm_table
= &pi
->dpm_table
;
3470 u32 level_array_address
= pi
->dpm_table_start
+
3471 offsetof(SMU7_Discrete_DpmTable
, MemoryLevel
);
3472 u32 level_array_size
= sizeof(SMU7_Discrete_MemoryLevel
) *
3473 SMU7_MAX_LEVELS_MEMORY
;
3474 SMU7_Discrete_MemoryLevel
*levels
= pi
->smc_state_table
.MemoryLevel
;
3477 memset(levels
, 0, level_array_size
);
3479 for (i
= 0; i
< dpm_table
->mclk_table
.count
; i
++) {
3480 if (dpm_table
->mclk_table
.dpm_levels
[i
].value
== 0)
3482 ret
= ci_populate_single_memory_level(adev
,
3483 dpm_table
->mclk_table
.dpm_levels
[i
].value
,
3484 &pi
->smc_state_table
.MemoryLevel
[i
]);
3489 if ((dpm_table
->mclk_table
.count
>= 2) &&
3490 ((adev
->pdev
->device
== 0x67B0) || (adev
->pdev
->device
== 0x67B1))) {
3491 pi
->smc_state_table
.MemoryLevel
[1].MinVddc
=
3492 pi
->smc_state_table
.MemoryLevel
[0].MinVddc
;
3493 pi
->smc_state_table
.MemoryLevel
[1].MinVddcPhases
=
3494 pi
->smc_state_table
.MemoryLevel
[0].MinVddcPhases
;
3497 pi
->smc_state_table
.MemoryLevel
[0].ActivityLevel
= cpu_to_be16(0x1F);
3499 pi
->smc_state_table
.MemoryDpmLevelCount
= (u8
)dpm_table
->mclk_table
.count
;
3500 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
=
3501 ci_get_dpm_level_enable_mask_value(&dpm_table
->mclk_table
);
3503 pi
->smc_state_table
.MemoryLevel
[dpm_table
->mclk_table
.count
- 1].DisplayWatermark
=
3504 PPSMC_DISPLAY_WATERMARK_HIGH
;
3506 ret
= amdgpu_ci_copy_bytes_to_smc(adev
, level_array_address
,
3507 (u8
*)levels
, level_array_size
,
3515 static void ci_reset_single_dpm_table(struct amdgpu_device
*adev
,
3516 struct ci_single_dpm_table
* dpm_table
,
3521 dpm_table
->count
= count
;
3522 for (i
= 0; i
< MAX_REGULAR_DPM_NUMBER
; i
++)
3523 dpm_table
->dpm_levels
[i
].enabled
= false;
3526 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table
* dpm_table
,
3527 u32 index
, u32 pcie_gen
, u32 pcie_lanes
)
3529 dpm_table
->dpm_levels
[index
].value
= pcie_gen
;
3530 dpm_table
->dpm_levels
[index
].param1
= pcie_lanes
;
3531 dpm_table
->dpm_levels
[index
].enabled
= true;
3534 static int ci_setup_default_pcie_tables(struct amdgpu_device
*adev
)
3536 struct ci_power_info
*pi
= ci_get_pi(adev
);
3538 if (!pi
->use_pcie_performance_levels
&& !pi
->use_pcie_powersaving_levels
)
3541 if (pi
->use_pcie_performance_levels
&& !pi
->use_pcie_powersaving_levels
) {
3542 pi
->pcie_gen_powersaving
= pi
->pcie_gen_performance
;
3543 pi
->pcie_lane_powersaving
= pi
->pcie_lane_performance
;
3544 } else if (!pi
->use_pcie_performance_levels
&& pi
->use_pcie_powersaving_levels
) {
3545 pi
->pcie_gen_performance
= pi
->pcie_gen_powersaving
;
3546 pi
->pcie_lane_performance
= pi
->pcie_lane_powersaving
;
3549 ci_reset_single_dpm_table(adev
,
3550 &pi
->dpm_table
.pcie_speed_table
,
3551 SMU7_MAX_LEVELS_LINK
);
3553 if (adev
->asic_type
== CHIP_BONAIRE
)
3554 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 0,
3555 pi
->pcie_gen_powersaving
.min
,
3556 pi
->pcie_lane_powersaving
.max
);
3558 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 0,
3559 pi
->pcie_gen_powersaving
.min
,
3560 pi
->pcie_lane_powersaving
.min
);
3561 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 1,
3562 pi
->pcie_gen_performance
.min
,
3563 pi
->pcie_lane_performance
.min
);
3564 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 2,
3565 pi
->pcie_gen_powersaving
.min
,
3566 pi
->pcie_lane_powersaving
.max
);
3567 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 3,
3568 pi
->pcie_gen_performance
.min
,
3569 pi
->pcie_lane_performance
.max
);
3570 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 4,
3571 pi
->pcie_gen_powersaving
.max
,
3572 pi
->pcie_lane_powersaving
.max
);
3573 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 5,
3574 pi
->pcie_gen_performance
.max
,
3575 pi
->pcie_lane_performance
.max
);
3577 pi
->dpm_table
.pcie_speed_table
.count
= 6;
3582 static int ci_setup_default_dpm_tables(struct amdgpu_device
*adev
)
3584 struct ci_power_info
*pi
= ci_get_pi(adev
);
3585 struct amdgpu_clock_voltage_dependency_table
*allowed_sclk_vddc_table
=
3586 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
3587 struct amdgpu_clock_voltage_dependency_table
*allowed_mclk_table
=
3588 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
;
3589 struct amdgpu_cac_leakage_table
*std_voltage_table
=
3590 &adev
->pm
.dpm
.dyn_state
.cac_leakage_table
;
3593 if (allowed_sclk_vddc_table
== NULL
)
3595 if (allowed_sclk_vddc_table
->count
< 1)
3597 if (allowed_mclk_table
== NULL
)
3599 if (allowed_mclk_table
->count
< 1)
3602 memset(&pi
->dpm_table
, 0, sizeof(struct ci_dpm_table
));
3604 ci_reset_single_dpm_table(adev
,
3605 &pi
->dpm_table
.sclk_table
,
3606 SMU7_MAX_LEVELS_GRAPHICS
);
3607 ci_reset_single_dpm_table(adev
,
3608 &pi
->dpm_table
.mclk_table
,
3609 SMU7_MAX_LEVELS_MEMORY
);
3610 ci_reset_single_dpm_table(adev
,
3611 &pi
->dpm_table
.vddc_table
,
3612 SMU7_MAX_LEVELS_VDDC
);
3613 ci_reset_single_dpm_table(adev
,
3614 &pi
->dpm_table
.vddci_table
,
3615 SMU7_MAX_LEVELS_VDDCI
);
3616 ci_reset_single_dpm_table(adev
,
3617 &pi
->dpm_table
.mvdd_table
,
3618 SMU7_MAX_LEVELS_MVDD
);
3620 pi
->dpm_table
.sclk_table
.count
= 0;
3621 for (i
= 0; i
< allowed_sclk_vddc_table
->count
; i
++) {
3623 (pi
->dpm_table
.sclk_table
.dpm_levels
[pi
->dpm_table
.sclk_table
.count
-1].value
!=
3624 allowed_sclk_vddc_table
->entries
[i
].clk
)) {
3625 pi
->dpm_table
.sclk_table
.dpm_levels
[pi
->dpm_table
.sclk_table
.count
].value
=
3626 allowed_sclk_vddc_table
->entries
[i
].clk
;
3627 pi
->dpm_table
.sclk_table
.dpm_levels
[pi
->dpm_table
.sclk_table
.count
].enabled
=
3628 (i
== 0) ? true : false;
3629 pi
->dpm_table
.sclk_table
.count
++;
3633 pi
->dpm_table
.mclk_table
.count
= 0;
3634 for (i
= 0; i
< allowed_mclk_table
->count
; i
++) {
3636 (pi
->dpm_table
.mclk_table
.dpm_levels
[pi
->dpm_table
.mclk_table
.count
-1].value
!=
3637 allowed_mclk_table
->entries
[i
].clk
)) {
3638 pi
->dpm_table
.mclk_table
.dpm_levels
[pi
->dpm_table
.mclk_table
.count
].value
=
3639 allowed_mclk_table
->entries
[i
].clk
;
3640 pi
->dpm_table
.mclk_table
.dpm_levels
[pi
->dpm_table
.mclk_table
.count
].enabled
=
3641 (i
== 0) ? true : false;
3642 pi
->dpm_table
.mclk_table
.count
++;
3646 for (i
= 0; i
< allowed_sclk_vddc_table
->count
; i
++) {
3647 pi
->dpm_table
.vddc_table
.dpm_levels
[i
].value
=
3648 allowed_sclk_vddc_table
->entries
[i
].v
;
3649 pi
->dpm_table
.vddc_table
.dpm_levels
[i
].param1
=
3650 std_voltage_table
->entries
[i
].leakage
;
3651 pi
->dpm_table
.vddc_table
.dpm_levels
[i
].enabled
= true;
3653 pi
->dpm_table
.vddc_table
.count
= allowed_sclk_vddc_table
->count
;
3655 allowed_mclk_table
= &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
;
3656 if (allowed_mclk_table
) {
3657 for (i
= 0; i
< allowed_mclk_table
->count
; i
++) {
3658 pi
->dpm_table
.vddci_table
.dpm_levels
[i
].value
=
3659 allowed_mclk_table
->entries
[i
].v
;
3660 pi
->dpm_table
.vddci_table
.dpm_levels
[i
].enabled
= true;
3662 pi
->dpm_table
.vddci_table
.count
= allowed_mclk_table
->count
;
3665 allowed_mclk_table
= &adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
;
3666 if (allowed_mclk_table
) {
3667 for (i
= 0; i
< allowed_mclk_table
->count
; i
++) {
3668 pi
->dpm_table
.mvdd_table
.dpm_levels
[i
].value
=
3669 allowed_mclk_table
->entries
[i
].v
;
3670 pi
->dpm_table
.mvdd_table
.dpm_levels
[i
].enabled
= true;
3672 pi
->dpm_table
.mvdd_table
.count
= allowed_mclk_table
->count
;
3675 ci_setup_default_pcie_tables(adev
);
3677 /* save a copy of the default DPM table */
3678 memcpy(&(pi
->golden_dpm_table
), &(pi
->dpm_table
),
3679 sizeof(struct ci_dpm_table
));
3684 static int ci_find_boot_level(struct ci_single_dpm_table
*table
,
3685 u32 value
, u32
*boot_level
)
3690 for(i
= 0; i
< table
->count
; i
++) {
3691 if (value
== table
->dpm_levels
[i
].value
) {
3700 static int ci_init_smc_table(struct amdgpu_device
*adev
)
3702 struct ci_power_info
*pi
= ci_get_pi(adev
);
3703 struct ci_ulv_parm
*ulv
= &pi
->ulv
;
3704 struct amdgpu_ps
*amdgpu_boot_state
= adev
->pm
.dpm
.boot_ps
;
3705 SMU7_Discrete_DpmTable
*table
= &pi
->smc_state_table
;
3708 ret
= ci_setup_default_dpm_tables(adev
);
3712 if (pi
->voltage_control
!= CISLANDS_VOLTAGE_CONTROL_NONE
)
3713 ci_populate_smc_voltage_tables(adev
, table
);
3715 ci_init_fps_limits(adev
);
3717 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_HARDWAREDC
)
3718 table
->SystemFlags
|= PPSMC_SYSTEMFLAG_GPIO_DC
;
3720 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_STEPVDDC
)
3721 table
->SystemFlags
|= PPSMC_SYSTEMFLAG_STEPVDDC
;
3723 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
3724 table
->SystemFlags
|= PPSMC_SYSTEMFLAG_GDDR5
;
3726 if (ulv
->supported
) {
3727 ret
= ci_populate_ulv_state(adev
, &pi
->smc_state_table
.Ulv
);
3730 WREG32_SMC(ixCG_ULV_PARAMETER
, ulv
->cg_ulv_parameter
);
3733 ret
= ci_populate_all_graphic_levels(adev
);
3737 ret
= ci_populate_all_memory_levels(adev
);
3741 ci_populate_smc_link_level(adev
, table
);
3743 ret
= ci_populate_smc_acpi_level(adev
, table
);
3747 ret
= ci_populate_smc_vce_level(adev
, table
);
3751 ret
= ci_populate_smc_acp_level(adev
, table
);
3755 ret
= ci_populate_smc_samu_level(adev
, table
);
3759 ret
= ci_do_program_memory_timing_parameters(adev
);
3763 ret
= ci_populate_smc_uvd_level(adev
, table
);
3767 table
->UvdBootLevel
= 0;
3768 table
->VceBootLevel
= 0;
3769 table
->AcpBootLevel
= 0;
3770 table
->SamuBootLevel
= 0;
3771 table
->GraphicsBootLevel
= 0;
3772 table
->MemoryBootLevel
= 0;
3774 ret
= ci_find_boot_level(&pi
->dpm_table
.sclk_table
,
3775 pi
->vbios_boot_state
.sclk_bootup_value
,
3776 (u32
*)&pi
->smc_state_table
.GraphicsBootLevel
);
3778 ret
= ci_find_boot_level(&pi
->dpm_table
.mclk_table
,
3779 pi
->vbios_boot_state
.mclk_bootup_value
,
3780 (u32
*)&pi
->smc_state_table
.MemoryBootLevel
);
3782 table
->BootVddc
= pi
->vbios_boot_state
.vddc_bootup_value
;
3783 table
->BootVddci
= pi
->vbios_boot_state
.vddci_bootup_value
;
3784 table
->BootMVdd
= pi
->vbios_boot_state
.mvdd_bootup_value
;
3786 ci_populate_smc_initial_state(adev
, amdgpu_boot_state
);
3788 ret
= ci_populate_bapm_parameters_in_dpm_table(adev
);
3792 table
->UVDInterval
= 1;
3793 table
->VCEInterval
= 1;
3794 table
->ACPInterval
= 1;
3795 table
->SAMUInterval
= 1;
3796 table
->GraphicsVoltageChangeEnable
= 1;
3797 table
->GraphicsThermThrottleEnable
= 1;
3798 table
->GraphicsInterval
= 1;
3799 table
->VoltageInterval
= 1;
3800 table
->ThermalInterval
= 1;
3801 table
->TemperatureLimitHigh
= (u16
)((pi
->thermal_temp_setting
.temperature_high
*
3802 CISLANDS_Q88_FORMAT_CONVERSION_UNIT
) / 1000);
3803 table
->TemperatureLimitLow
= (u16
)((pi
->thermal_temp_setting
.temperature_low
*
3804 CISLANDS_Q88_FORMAT_CONVERSION_UNIT
) / 1000);
3805 table
->MemoryVoltageChangeEnable
= 1;
3806 table
->MemoryInterval
= 1;
3807 table
->VoltageResponseTime
= 0;
3808 table
->VddcVddciDelta
= 4000;
3809 table
->PhaseResponseTime
= 0;
3810 table
->MemoryThermThrottleEnable
= 1;
3811 table
->PCIeBootLinkLevel
= pi
->dpm_table
.pcie_speed_table
.count
- 1;
3812 table
->PCIeGenInterval
= 1;
3813 if (pi
->voltage_control
== CISLANDS_VOLTAGE_CONTROL_BY_SVID2
)
3814 table
->SVI2Enable
= 1;
3816 table
->SVI2Enable
= 0;
3818 table
->ThermGpio
= 17;
3819 table
->SclkStepSize
= 0x4000;
3821 table
->SystemFlags
= cpu_to_be32(table
->SystemFlags
);
3822 table
->SmioMaskVddcVid
= cpu_to_be32(table
->SmioMaskVddcVid
);
3823 table
->SmioMaskVddcPhase
= cpu_to_be32(table
->SmioMaskVddcPhase
);
3824 table
->SmioMaskVddciVid
= cpu_to_be32(table
->SmioMaskVddciVid
);
3825 table
->SmioMaskMvddVid
= cpu_to_be32(table
->SmioMaskMvddVid
);
3826 table
->SclkStepSize
= cpu_to_be32(table
->SclkStepSize
);
3827 table
->TemperatureLimitHigh
= cpu_to_be16(table
->TemperatureLimitHigh
);
3828 table
->TemperatureLimitLow
= cpu_to_be16(table
->TemperatureLimitLow
);
3829 table
->VddcVddciDelta
= cpu_to_be16(table
->VddcVddciDelta
);
3830 table
->VoltageResponseTime
= cpu_to_be16(table
->VoltageResponseTime
);
3831 table
->PhaseResponseTime
= cpu_to_be16(table
->PhaseResponseTime
);
3832 table
->BootVddc
= cpu_to_be16(table
->BootVddc
* VOLTAGE_SCALE
);
3833 table
->BootVddci
= cpu_to_be16(table
->BootVddci
* VOLTAGE_SCALE
);
3834 table
->BootMVdd
= cpu_to_be16(table
->BootMVdd
* VOLTAGE_SCALE
);
3836 ret
= amdgpu_ci_copy_bytes_to_smc(adev
,
3837 pi
->dpm_table_start
+
3838 offsetof(SMU7_Discrete_DpmTable
, SystemFlags
),
3839 (u8
*)&table
->SystemFlags
,
3840 sizeof(SMU7_Discrete_DpmTable
) - 3 * sizeof(SMU7_PIDController
),
3848 static void ci_trim_single_dpm_states(struct amdgpu_device
*adev
,
3849 struct ci_single_dpm_table
*dpm_table
,
3850 u32 low_limit
, u32 high_limit
)
3854 for (i
= 0; i
< dpm_table
->count
; i
++) {
3855 if ((dpm_table
->dpm_levels
[i
].value
< low_limit
) ||
3856 (dpm_table
->dpm_levels
[i
].value
> high_limit
))
3857 dpm_table
->dpm_levels
[i
].enabled
= false;
3859 dpm_table
->dpm_levels
[i
].enabled
= true;
3863 static void ci_trim_pcie_dpm_states(struct amdgpu_device
*adev
,
3864 u32 speed_low
, u32 lanes_low
,
3865 u32 speed_high
, u32 lanes_high
)
3867 struct ci_power_info
*pi
= ci_get_pi(adev
);
3868 struct ci_single_dpm_table
*pcie_table
= &pi
->dpm_table
.pcie_speed_table
;
3871 for (i
= 0; i
< pcie_table
->count
; i
++) {
3872 if ((pcie_table
->dpm_levels
[i
].value
< speed_low
) ||
3873 (pcie_table
->dpm_levels
[i
].param1
< lanes_low
) ||
3874 (pcie_table
->dpm_levels
[i
].value
> speed_high
) ||
3875 (pcie_table
->dpm_levels
[i
].param1
> lanes_high
))
3876 pcie_table
->dpm_levels
[i
].enabled
= false;
3878 pcie_table
->dpm_levels
[i
].enabled
= true;
3881 for (i
= 0; i
< pcie_table
->count
; i
++) {
3882 if (pcie_table
->dpm_levels
[i
].enabled
) {
3883 for (j
= i
+ 1; j
< pcie_table
->count
; j
++) {
3884 if (pcie_table
->dpm_levels
[j
].enabled
) {
3885 if ((pcie_table
->dpm_levels
[i
].value
== pcie_table
->dpm_levels
[j
].value
) &&
3886 (pcie_table
->dpm_levels
[i
].param1
== pcie_table
->dpm_levels
[j
].param1
))
3887 pcie_table
->dpm_levels
[j
].enabled
= false;
3894 static int ci_trim_dpm_states(struct amdgpu_device
*adev
,
3895 struct amdgpu_ps
*amdgpu_state
)
3897 struct ci_ps
*state
= ci_get_ps(amdgpu_state
);
3898 struct ci_power_info
*pi
= ci_get_pi(adev
);
3899 u32 high_limit_count
;
3901 if (state
->performance_level_count
< 1)
3904 if (state
->performance_level_count
== 1)
3905 high_limit_count
= 0;
3907 high_limit_count
= 1;
3909 ci_trim_single_dpm_states(adev
,
3910 &pi
->dpm_table
.sclk_table
,
3911 state
->performance_levels
[0].sclk
,
3912 state
->performance_levels
[high_limit_count
].sclk
);
3914 ci_trim_single_dpm_states(adev
,
3915 &pi
->dpm_table
.mclk_table
,
3916 state
->performance_levels
[0].mclk
,
3917 state
->performance_levels
[high_limit_count
].mclk
);
3919 ci_trim_pcie_dpm_states(adev
,
3920 state
->performance_levels
[0].pcie_gen
,
3921 state
->performance_levels
[0].pcie_lane
,
3922 state
->performance_levels
[high_limit_count
].pcie_gen
,
3923 state
->performance_levels
[high_limit_count
].pcie_lane
);
3928 static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device
*adev
)
3930 struct amdgpu_clock_voltage_dependency_table
*disp_voltage_table
=
3931 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
;
3932 struct amdgpu_clock_voltage_dependency_table
*vddc_table
=
3933 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
3934 u32 requested_voltage
= 0;
3937 if (disp_voltage_table
== NULL
)
3939 if (!disp_voltage_table
->count
)
3942 for (i
= 0; i
< disp_voltage_table
->count
; i
++) {
3943 if (adev
->clock
.current_dispclk
== disp_voltage_table
->entries
[i
].clk
)
3944 requested_voltage
= disp_voltage_table
->entries
[i
].v
;
3947 for (i
= 0; i
< vddc_table
->count
; i
++) {
3948 if (requested_voltage
<= vddc_table
->entries
[i
].v
) {
3949 requested_voltage
= vddc_table
->entries
[i
].v
;
3950 return (amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
3951 PPSMC_MSG_VddC_Request
,
3952 requested_voltage
* VOLTAGE_SCALE
) == PPSMC_Result_OK
) ?
3960 static int ci_upload_dpm_level_enable_mask(struct amdgpu_device
*adev
)
3962 struct ci_power_info
*pi
= ci_get_pi(adev
);
3963 PPSMC_Result result
;
3965 ci_apply_disp_minimum_voltage_request(adev
);
3967 if (!pi
->sclk_dpm_key_disabled
) {
3968 if (pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
) {
3969 result
= amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
3970 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
3971 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
);
3972 if (result
!= PPSMC_Result_OK
)
3977 if (!pi
->mclk_dpm_key_disabled
) {
3978 if (pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
) {
3979 result
= amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
3980 PPSMC_MSG_MCLKDPM_SetEnabledMask
,
3981 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
);
3982 if (result
!= PPSMC_Result_OK
)
3988 if (!pi
->pcie_dpm_key_disabled
) {
3989 if (pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
) {
3990 result
= amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
3991 PPSMC_MSG_PCIeDPM_SetEnabledMask
,
3992 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
);
3993 if (result
!= PPSMC_Result_OK
)
4002 static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device
*adev
,
4003 struct amdgpu_ps
*amdgpu_state
)
4005 struct ci_power_info
*pi
= ci_get_pi(adev
);
4006 struct ci_ps
*state
= ci_get_ps(amdgpu_state
);
4007 struct ci_single_dpm_table
*sclk_table
= &pi
->dpm_table
.sclk_table
;
4008 u32 sclk
= state
->performance_levels
[state
->performance_level_count
-1].sclk
;
4009 struct ci_single_dpm_table
*mclk_table
= &pi
->dpm_table
.mclk_table
;
4010 u32 mclk
= state
->performance_levels
[state
->performance_level_count
-1].mclk
;
4013 pi
->need_update_smu7_dpm_table
= 0;
4015 for (i
= 0; i
< sclk_table
->count
; i
++) {
4016 if (sclk
== sclk_table
->dpm_levels
[i
].value
)
4020 if (i
>= sclk_table
->count
) {
4021 pi
->need_update_smu7_dpm_table
|= DPMTABLE_OD_UPDATE_SCLK
;
4023 /* XXX check display min clock requirements */
4024 if (CISLAND_MINIMUM_ENGINE_CLOCK
!= CISLAND_MINIMUM_ENGINE_CLOCK
)
4025 pi
->need_update_smu7_dpm_table
|= DPMTABLE_UPDATE_SCLK
;
4028 for (i
= 0; i
< mclk_table
->count
; i
++) {
4029 if (mclk
== mclk_table
->dpm_levels
[i
].value
)
4033 if (i
>= mclk_table
->count
)
4034 pi
->need_update_smu7_dpm_table
|= DPMTABLE_OD_UPDATE_MCLK
;
4036 if (adev
->pm
.dpm
.current_active_crtc_count
!=
4037 adev
->pm
.dpm
.new_active_crtc_count
)
4038 pi
->need_update_smu7_dpm_table
|= DPMTABLE_UPDATE_MCLK
;
4041 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device
*adev
,
4042 struct amdgpu_ps
*amdgpu_state
)
4044 struct ci_power_info
*pi
= ci_get_pi(adev
);
4045 struct ci_ps
*state
= ci_get_ps(amdgpu_state
);
4046 u32 sclk
= state
->performance_levels
[state
->performance_level_count
-1].sclk
;
4047 u32 mclk
= state
->performance_levels
[state
->performance_level_count
-1].mclk
;
4048 struct ci_dpm_table
*dpm_table
= &pi
->dpm_table
;
4051 if (!pi
->need_update_smu7_dpm_table
)
4054 if (pi
->need_update_smu7_dpm_table
& DPMTABLE_OD_UPDATE_SCLK
)
4055 dpm_table
->sclk_table
.dpm_levels
[dpm_table
->sclk_table
.count
-1].value
= sclk
;
4057 if (pi
->need_update_smu7_dpm_table
& DPMTABLE_OD_UPDATE_MCLK
)
4058 dpm_table
->mclk_table
.dpm_levels
[dpm_table
->mclk_table
.count
-1].value
= mclk
;
4060 if (pi
->need_update_smu7_dpm_table
& (DPMTABLE_OD_UPDATE_SCLK
| DPMTABLE_UPDATE_SCLK
)) {
4061 ret
= ci_populate_all_graphic_levels(adev
);
4066 if (pi
->need_update_smu7_dpm_table
& (DPMTABLE_OD_UPDATE_MCLK
| DPMTABLE_UPDATE_MCLK
)) {
4067 ret
= ci_populate_all_memory_levels(adev
);
4075 static int ci_enable_uvd_dpm(struct amdgpu_device
*adev
, bool enable
)
4077 struct ci_power_info
*pi
= ci_get_pi(adev
);
4078 const struct amdgpu_clock_and_voltage_limits
*max_limits
;
4081 if (adev
->pm
.ac_power
)
4082 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
4084 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
4087 pi
->dpm_level_enable_mask
.uvd_dpm_enable_mask
= 0;
4089 for (i
= adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.count
- 1; i
>= 0; i
--) {
4090 if (adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[i
].v
<= max_limits
->vddc
) {
4091 pi
->dpm_level_enable_mask
.uvd_dpm_enable_mask
|= 1 << i
;
4093 if (!pi
->caps_uvd_dpm
)
4098 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
4099 PPSMC_MSG_UVDDPM_SetEnabledMask
,
4100 pi
->dpm_level_enable_mask
.uvd_dpm_enable_mask
);
4102 if (pi
->last_mclk_dpm_enable_mask
& 0x1) {
4103 pi
->uvd_enabled
= true;
4104 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
&= 0xFFFFFFFE;
4105 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
4106 PPSMC_MSG_MCLKDPM_SetEnabledMask
,
4107 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
);
4110 if (pi
->uvd_enabled
) {
4111 pi
->uvd_enabled
= false;
4112 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
|= 1;
4113 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
4114 PPSMC_MSG_MCLKDPM_SetEnabledMask
,
4115 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
);
4119 return (amdgpu_ci_send_msg_to_smc(adev
, enable
?
4120 PPSMC_MSG_UVDDPM_Enable
: PPSMC_MSG_UVDDPM_Disable
) == PPSMC_Result_OK
) ?
4124 static int ci_enable_vce_dpm(struct amdgpu_device
*adev
, bool enable
)
4126 struct ci_power_info
*pi
= ci_get_pi(adev
);
4127 const struct amdgpu_clock_and_voltage_limits
*max_limits
;
4130 if (adev
->pm
.ac_power
)
4131 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
4133 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
4136 pi
->dpm_level_enable_mask
.vce_dpm_enable_mask
= 0;
4137 for (i
= adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.count
- 1; i
>= 0; i
--) {
4138 if (adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
[i
].v
<= max_limits
->vddc
) {
4139 pi
->dpm_level_enable_mask
.vce_dpm_enable_mask
|= 1 << i
;
4141 if (!pi
->caps_vce_dpm
)
4146 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
4147 PPSMC_MSG_VCEDPM_SetEnabledMask
,
4148 pi
->dpm_level_enable_mask
.vce_dpm_enable_mask
);
4151 return (amdgpu_ci_send_msg_to_smc(adev
, enable
?
4152 PPSMC_MSG_VCEDPM_Enable
: PPSMC_MSG_VCEDPM_Disable
) == PPSMC_Result_OK
) ?
4157 static int ci_enable_samu_dpm(struct amdgpu_device
*adev
, bool enable
)
4159 struct ci_power_info
*pi
= ci_get_pi(adev
);
4160 const struct amdgpu_clock_and_voltage_limits
*max_limits
;
4163 if (adev
->pm
.ac_power
)
4164 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
4166 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
4169 pi
->dpm_level_enable_mask
.samu_dpm_enable_mask
= 0;
4170 for (i
= adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.count
- 1; i
>= 0; i
--) {
4171 if (adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
[i
].v
<= max_limits
->vddc
) {
4172 pi
->dpm_level_enable_mask
.samu_dpm_enable_mask
|= 1 << i
;
4174 if (!pi
->caps_samu_dpm
)
4179 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
4180 PPSMC_MSG_SAMUDPM_SetEnabledMask
,
4181 pi
->dpm_level_enable_mask
.samu_dpm_enable_mask
);
4183 return (amdgpu_ci_send_msg_to_smc(adev
, enable
?
4184 PPSMC_MSG_SAMUDPM_Enable
: PPSMC_MSG_SAMUDPM_Disable
) == PPSMC_Result_OK
) ?
4188 static int ci_enable_acp_dpm(struct amdgpu_device
*adev
, bool enable
)
4190 struct ci_power_info
*pi
= ci_get_pi(adev
);
4191 const struct amdgpu_clock_and_voltage_limits
*max_limits
;
4194 if (adev
->pm
.ac_power
)
4195 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
4197 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
4200 pi
->dpm_level_enable_mask
.acp_dpm_enable_mask
= 0;
4201 for (i
= adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.count
- 1; i
>= 0; i
--) {
4202 if (adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
[i
].v
<= max_limits
->vddc
) {
4203 pi
->dpm_level_enable_mask
.acp_dpm_enable_mask
|= 1 << i
;
4205 if (!pi
->caps_acp_dpm
)
4210 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
4211 PPSMC_MSG_ACPDPM_SetEnabledMask
,
4212 pi
->dpm_level_enable_mask
.acp_dpm_enable_mask
);
4215 return (amdgpu_ci_send_msg_to_smc(adev
, enable
?
4216 PPSMC_MSG_ACPDPM_Enable
: PPSMC_MSG_ACPDPM_Disable
) == PPSMC_Result_OK
) ?
4221 static int ci_update_uvd_dpm(struct amdgpu_device
*adev
, bool gate
)
4223 struct ci_power_info
*pi
= ci_get_pi(adev
);
4228 /* turn the clocks on when decoding */
4229 if (pi
->caps_uvd_dpm
||
4230 (adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.count
<= 0))
4231 pi
->smc_state_table
.UvdBootLevel
= 0;
4233 pi
->smc_state_table
.UvdBootLevel
=
4234 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.count
- 1;
4236 tmp
= RREG32_SMC(ixDPM_TABLE_475
);
4237 tmp
&= ~DPM_TABLE_475__UvdBootLevel_MASK
;
4238 tmp
|= (pi
->smc_state_table
.UvdBootLevel
<< DPM_TABLE_475__UvdBootLevel__SHIFT
);
4239 WREG32_SMC(ixDPM_TABLE_475
, tmp
);
4240 ret
= ci_enable_uvd_dpm(adev
, true);
4242 ret
= ci_enable_uvd_dpm(adev
, false);
4250 static u8
ci_get_vce_boot_level(struct amdgpu_device
*adev
)
4253 u32 min_evclk
= 30000; /* ??? */
4254 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
4255 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
4257 for (i
= 0; i
< table
->count
; i
++) {
4258 if (table
->entries
[i
].evclk
>= min_evclk
)
4262 return table
->count
- 1;
4265 static int ci_update_vce_dpm(struct amdgpu_device
*adev
,
4266 struct amdgpu_ps
*amdgpu_new_state
,
4267 struct amdgpu_ps
*amdgpu_current_state
)
4269 struct ci_power_info
*pi
= ci_get_pi(adev
);
4273 if (amdgpu_current_state
->evclk
!= amdgpu_new_state
->evclk
) {
4274 if (amdgpu_new_state
->evclk
) {
4275 pi
->smc_state_table
.VceBootLevel
= ci_get_vce_boot_level(adev
);
4276 tmp
= RREG32_SMC(ixDPM_TABLE_475
);
4277 tmp
&= ~DPM_TABLE_475__VceBootLevel_MASK
;
4278 tmp
|= (pi
->smc_state_table
.VceBootLevel
<< DPM_TABLE_475__VceBootLevel__SHIFT
);
4279 WREG32_SMC(ixDPM_TABLE_475
, tmp
);
4281 ret
= ci_enable_vce_dpm(adev
, true);
4283 ret
= ci_enable_vce_dpm(adev
, false);
4292 static int ci_update_samu_dpm(struct amdgpu_device
*adev
, bool gate
)
4294 return ci_enable_samu_dpm(adev
, gate
);
4297 static int ci_update_acp_dpm(struct amdgpu_device
*adev
, bool gate
)
4299 struct ci_power_info
*pi
= ci_get_pi(adev
);
4303 pi
->smc_state_table
.AcpBootLevel
= 0;
4305 tmp
= RREG32_SMC(ixDPM_TABLE_475
);
4306 tmp
&= ~AcpBootLevel_MASK
;
4307 tmp
|= AcpBootLevel(pi
->smc_state_table
.AcpBootLevel
);
4308 WREG32_SMC(ixDPM_TABLE_475
, tmp
);
4311 return ci_enable_acp_dpm(adev
, !gate
);
4315 static int ci_generate_dpm_level_enable_mask(struct amdgpu_device
*adev
,
4316 struct amdgpu_ps
*amdgpu_state
)
4318 struct ci_power_info
*pi
= ci_get_pi(adev
);
4321 ret
= ci_trim_dpm_states(adev
, amdgpu_state
);
4325 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
=
4326 ci_get_dpm_level_enable_mask_value(&pi
->dpm_table
.sclk_table
);
4327 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
=
4328 ci_get_dpm_level_enable_mask_value(&pi
->dpm_table
.mclk_table
);
4329 pi
->last_mclk_dpm_enable_mask
=
4330 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
;
4331 if (pi
->uvd_enabled
) {
4332 if (pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
& 1)
4333 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
&= 0xFFFFFFFE;
4335 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
=
4336 ci_get_dpm_level_enable_mask_value(&pi
->dpm_table
.pcie_speed_table
);
4341 static u32
ci_get_lowest_enabled_level(struct amdgpu_device
*adev
,
4346 while ((level_mask
& (1 << level
)) == 0)
4353 static int ci_dpm_force_performance_level(void *handle
,
4354 enum amd_dpm_forced_level level
)
4356 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4357 struct ci_power_info
*pi
= ci_get_pi(adev
);
4361 if (level
== AMD_DPM_FORCED_LEVEL_HIGH
) {
4362 if ((!pi
->pcie_dpm_key_disabled
) &&
4363 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
) {
4365 tmp
= pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
;
4369 ret
= ci_dpm_force_state_pcie(adev
, level
);
4372 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4373 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1
) &
4374 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK
) >>
4375 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT
;
4382 if ((!pi
->sclk_dpm_key_disabled
) &&
4383 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
) {
4385 tmp
= pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
;
4389 ret
= ci_dpm_force_state_sclk(adev
, levels
);
4392 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4393 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
4394 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK
) >>
4395 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT
;
4402 if ((!pi
->mclk_dpm_key_disabled
) &&
4403 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
) {
4405 tmp
= pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
;
4409 ret
= ci_dpm_force_state_mclk(adev
, levels
);
4412 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4413 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
4414 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK
) >>
4415 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT
;
4422 } else if (level
== AMD_DPM_FORCED_LEVEL_LOW
) {
4423 if ((!pi
->sclk_dpm_key_disabled
) &&
4424 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
) {
4425 levels
= ci_get_lowest_enabled_level(adev
,
4426 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
);
4427 ret
= ci_dpm_force_state_sclk(adev
, levels
);
4430 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4431 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
4432 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK
) >>
4433 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT
;
4439 if ((!pi
->mclk_dpm_key_disabled
) &&
4440 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
) {
4441 levels
= ci_get_lowest_enabled_level(adev
,
4442 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
);
4443 ret
= ci_dpm_force_state_mclk(adev
, levels
);
4446 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4447 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
4448 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK
) >>
4449 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT
;
4455 if ((!pi
->pcie_dpm_key_disabled
) &&
4456 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
) {
4457 levels
= ci_get_lowest_enabled_level(adev
,
4458 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
);
4459 ret
= ci_dpm_force_state_pcie(adev
, levels
);
4462 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4463 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1
) &
4464 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK
) >>
4465 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT
;
4471 } else if (level
== AMD_DPM_FORCED_LEVEL_AUTO
) {
4472 if (!pi
->pcie_dpm_key_disabled
) {
4473 PPSMC_Result smc_result
;
4475 smc_result
= amdgpu_ci_send_msg_to_smc(adev
,
4476 PPSMC_MSG_PCIeDPM_UnForceLevel
);
4477 if (smc_result
!= PPSMC_Result_OK
)
4480 ret
= ci_upload_dpm_level_enable_mask(adev
);
4485 adev
->pm
.dpm
.forced_level
= level
;
4490 static int ci_set_mc_special_registers(struct amdgpu_device
*adev
,
4491 struct ci_mc_reg_table
*table
)
4496 for (i
= 0, j
= table
->last
; i
< table
->last
; i
++) {
4497 if (j
>= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4499 switch(table
->mc_reg_address
[i
].s1
) {
4500 case mmMC_SEQ_MISC1
:
4501 temp_reg
= RREG32(mmMC_PMG_CMD_EMRS
);
4502 table
->mc_reg_address
[j
].s1
= mmMC_PMG_CMD_EMRS
;
4503 table
->mc_reg_address
[j
].s0
= mmMC_SEQ_PMG_CMD_EMRS_LP
;
4504 for (k
= 0; k
< table
->num_entries
; k
++) {
4505 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
4506 ((temp_reg
& 0xffff0000)) | ((table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xffff0000) >> 16);
4510 if (j
>= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4512 temp_reg
= RREG32(mmMC_PMG_CMD_MRS
);
4513 table
->mc_reg_address
[j
].s1
= mmMC_PMG_CMD_MRS
;
4514 table
->mc_reg_address
[j
].s0
= mmMC_SEQ_PMG_CMD_MRS_LP
;
4515 for (k
= 0; k
< table
->num_entries
; k
++) {
4516 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
4517 (temp_reg
& 0xffff0000) | (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0x0000ffff);
4518 if (adev
->gmc
.vram_type
!= AMDGPU_VRAM_TYPE_GDDR5
)
4519 table
->mc_reg_table_entry
[k
].mc_data
[j
] |= 0x100;
4523 if (adev
->gmc
.vram_type
!= AMDGPU_VRAM_TYPE_GDDR5
) {
4524 if (j
>= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4526 table
->mc_reg_address
[j
].s1
= mmMC_PMG_AUTO_CMD
;
4527 table
->mc_reg_address
[j
].s0
= mmMC_PMG_AUTO_CMD
;
4528 for (k
= 0; k
< table
->num_entries
; k
++) {
4529 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
4530 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xffff0000) >> 16;
4535 case mmMC_SEQ_RESERVE_M
:
4536 temp_reg
= RREG32(mmMC_PMG_CMD_MRS1
);
4537 table
->mc_reg_address
[j
].s1
= mmMC_PMG_CMD_MRS1
;
4538 table
->mc_reg_address
[j
].s0
= mmMC_SEQ_PMG_CMD_MRS1_LP
;
4539 for (k
= 0; k
< table
->num_entries
; k
++) {
4540 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
4541 (temp_reg
& 0xffff0000) | (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0x0000ffff);
4556 static bool ci_check_s0_mc_reg_index(u16 in_reg
, u16
*out_reg
)
4561 case mmMC_SEQ_RAS_TIMING
:
4562 *out_reg
= mmMC_SEQ_RAS_TIMING_LP
;
4564 case mmMC_SEQ_DLL_STBY
:
4565 *out_reg
= mmMC_SEQ_DLL_STBY_LP
;
4567 case mmMC_SEQ_G5PDX_CMD0
:
4568 *out_reg
= mmMC_SEQ_G5PDX_CMD0_LP
;
4570 case mmMC_SEQ_G5PDX_CMD1
:
4571 *out_reg
= mmMC_SEQ_G5PDX_CMD1_LP
;
4573 case mmMC_SEQ_G5PDX_CTRL
:
4574 *out_reg
= mmMC_SEQ_G5PDX_CTRL_LP
;
4576 case mmMC_SEQ_CAS_TIMING
:
4577 *out_reg
= mmMC_SEQ_CAS_TIMING_LP
;
4579 case mmMC_SEQ_MISC_TIMING
:
4580 *out_reg
= mmMC_SEQ_MISC_TIMING_LP
;
4582 case mmMC_SEQ_MISC_TIMING2
:
4583 *out_reg
= mmMC_SEQ_MISC_TIMING2_LP
;
4585 case mmMC_SEQ_PMG_DVS_CMD
:
4586 *out_reg
= mmMC_SEQ_PMG_DVS_CMD_LP
;
4588 case mmMC_SEQ_PMG_DVS_CTL
:
4589 *out_reg
= mmMC_SEQ_PMG_DVS_CTL_LP
;
4591 case mmMC_SEQ_RD_CTL_D0
:
4592 *out_reg
= mmMC_SEQ_RD_CTL_D0_LP
;
4594 case mmMC_SEQ_RD_CTL_D1
:
4595 *out_reg
= mmMC_SEQ_RD_CTL_D1_LP
;
4597 case mmMC_SEQ_WR_CTL_D0
:
4598 *out_reg
= mmMC_SEQ_WR_CTL_D0_LP
;
4600 case mmMC_SEQ_WR_CTL_D1
:
4601 *out_reg
= mmMC_SEQ_WR_CTL_D1_LP
;
4603 case mmMC_PMG_CMD_EMRS
:
4604 *out_reg
= mmMC_SEQ_PMG_CMD_EMRS_LP
;
4606 case mmMC_PMG_CMD_MRS
:
4607 *out_reg
= mmMC_SEQ_PMG_CMD_MRS_LP
;
4609 case mmMC_PMG_CMD_MRS1
:
4610 *out_reg
= mmMC_SEQ_PMG_CMD_MRS1_LP
;
4612 case mmMC_SEQ_PMG_TIMING
:
4613 *out_reg
= mmMC_SEQ_PMG_TIMING_LP
;
4615 case mmMC_PMG_CMD_MRS2
:
4616 *out_reg
= mmMC_SEQ_PMG_CMD_MRS2_LP
;
4618 case mmMC_SEQ_WR_CTL_2
:
4619 *out_reg
= mmMC_SEQ_WR_CTL_2_LP
;
4629 static void ci_set_valid_flag(struct ci_mc_reg_table
*table
)
4633 for (i
= 0; i
< table
->last
; i
++) {
4634 for (j
= 1; j
< table
->num_entries
; j
++) {
4635 if (table
->mc_reg_table_entry
[j
-1].mc_data
[i
] !=
4636 table
->mc_reg_table_entry
[j
].mc_data
[i
]) {
4637 table
->valid_flag
|= 1 << i
;
4644 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table
*table
)
4649 for (i
= 0; i
< table
->last
; i
++) {
4650 table
->mc_reg_address
[i
].s0
=
4651 ci_check_s0_mc_reg_index(table
->mc_reg_address
[i
].s1
, &address
) ?
4652 address
: table
->mc_reg_address
[i
].s1
;
4656 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table
*table
,
4657 struct ci_mc_reg_table
*ci_table
)
4661 if (table
->last
> SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4663 if (table
->num_entries
> MAX_AC_TIMING_ENTRIES
)
4666 for (i
= 0; i
< table
->last
; i
++)
4667 ci_table
->mc_reg_address
[i
].s1
= table
->mc_reg_address
[i
].s1
;
4669 ci_table
->last
= table
->last
;
4671 for (i
= 0; i
< table
->num_entries
; i
++) {
4672 ci_table
->mc_reg_table_entry
[i
].mclk_max
=
4673 table
->mc_reg_table_entry
[i
].mclk_max
;
4674 for (j
= 0; j
< table
->last
; j
++)
4675 ci_table
->mc_reg_table_entry
[i
].mc_data
[j
] =
4676 table
->mc_reg_table_entry
[i
].mc_data
[j
];
4678 ci_table
->num_entries
= table
->num_entries
;
4683 static int ci_register_patching_mc_seq(struct amdgpu_device
*adev
,
4684 struct ci_mc_reg_table
*table
)
4690 tmp
= RREG32(mmMC_SEQ_MISC0
);
4691 patch
= ((tmp
& 0x0000f00) == 0x300) ? true : false;
4694 ((adev
->pdev
->device
== 0x67B0) ||
4695 (adev
->pdev
->device
== 0x67B1))) {
4696 for (i
= 0; i
< table
->last
; i
++) {
4697 if (table
->last
>= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4699 switch (table
->mc_reg_address
[i
].s1
) {
4700 case mmMC_SEQ_MISC1
:
4701 for (k
= 0; k
< table
->num_entries
; k
++) {
4702 if ((table
->mc_reg_table_entry
[k
].mclk_max
== 125000) ||
4703 (table
->mc_reg_table_entry
[k
].mclk_max
== 137500))
4704 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4705 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFFFFFF8) |
4709 case mmMC_SEQ_WR_CTL_D0
:
4710 for (k
= 0; k
< table
->num_entries
; k
++) {
4711 if ((table
->mc_reg_table_entry
[k
].mclk_max
== 125000) ||
4712 (table
->mc_reg_table_entry
[k
].mclk_max
== 137500))
4713 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4714 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFFF0F00) |
4718 case mmMC_SEQ_WR_CTL_D1
:
4719 for (k
= 0; k
< table
->num_entries
; k
++) {
4720 if ((table
->mc_reg_table_entry
[k
].mclk_max
== 125000) ||
4721 (table
->mc_reg_table_entry
[k
].mclk_max
== 137500))
4722 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4723 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFFF0F00) |
4727 case mmMC_SEQ_WR_CTL_2
:
4728 for (k
= 0; k
< table
->num_entries
; k
++) {
4729 if ((table
->mc_reg_table_entry
[k
].mclk_max
== 125000) ||
4730 (table
->mc_reg_table_entry
[k
].mclk_max
== 137500))
4731 table
->mc_reg_table_entry
[k
].mc_data
[i
] = 0;
4734 case mmMC_SEQ_CAS_TIMING
:
4735 for (k
= 0; k
< table
->num_entries
; k
++) {
4736 if (table
->mc_reg_table_entry
[k
].mclk_max
== 125000)
4737 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4738 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFE0FE0F) |
4740 else if (table
->mc_reg_table_entry
[k
].mclk_max
== 137500)
4741 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4742 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFE0FE0F) |
4746 case mmMC_SEQ_MISC_TIMING
:
4747 for (k
= 0; k
< table
->num_entries
; k
++) {
4748 if (table
->mc_reg_table_entry
[k
].mclk_max
== 125000)
4749 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4750 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFFFFFE0) |
4752 else if (table
->mc_reg_table_entry
[k
].mclk_max
== 137500)
4753 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4754 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFFFFFE0) |
4763 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, 3);
4764 tmp
= RREG32(mmMC_SEQ_IO_DEBUG_DATA
);
4765 tmp
= (tmp
& 0xFFF8FFFF) | (1 << 16);
4766 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, 3);
4767 WREG32(mmMC_SEQ_IO_DEBUG_DATA
, tmp
);
4773 static int ci_initialize_mc_reg_table(struct amdgpu_device
*adev
)
4775 struct ci_power_info
*pi
= ci_get_pi(adev
);
4776 struct atom_mc_reg_table
*table
;
4777 struct ci_mc_reg_table
*ci_table
= &pi
->mc_reg_table
;
4778 u8 module_index
= ci_get_memory_module_index(adev
);
4781 table
= kzalloc(sizeof(struct atom_mc_reg_table
), GFP_KERNEL
);
4785 WREG32(mmMC_SEQ_RAS_TIMING_LP
, RREG32(mmMC_SEQ_RAS_TIMING
));
4786 WREG32(mmMC_SEQ_CAS_TIMING_LP
, RREG32(mmMC_SEQ_CAS_TIMING
));
4787 WREG32(mmMC_SEQ_DLL_STBY_LP
, RREG32(mmMC_SEQ_DLL_STBY
));
4788 WREG32(mmMC_SEQ_G5PDX_CMD0_LP
, RREG32(mmMC_SEQ_G5PDX_CMD0
));
4789 WREG32(mmMC_SEQ_G5PDX_CMD1_LP
, RREG32(mmMC_SEQ_G5PDX_CMD1
));
4790 WREG32(mmMC_SEQ_G5PDX_CTRL_LP
, RREG32(mmMC_SEQ_G5PDX_CTRL
));
4791 WREG32(mmMC_SEQ_PMG_DVS_CMD_LP
, RREG32(mmMC_SEQ_PMG_DVS_CMD
));
4792 WREG32(mmMC_SEQ_PMG_DVS_CTL_LP
, RREG32(mmMC_SEQ_PMG_DVS_CTL
));
4793 WREG32(mmMC_SEQ_MISC_TIMING_LP
, RREG32(mmMC_SEQ_MISC_TIMING
));
4794 WREG32(mmMC_SEQ_MISC_TIMING2_LP
, RREG32(mmMC_SEQ_MISC_TIMING2
));
4795 WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP
, RREG32(mmMC_PMG_CMD_EMRS
));
4796 WREG32(mmMC_SEQ_PMG_CMD_MRS_LP
, RREG32(mmMC_PMG_CMD_MRS
));
4797 WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP
, RREG32(mmMC_PMG_CMD_MRS1
));
4798 WREG32(mmMC_SEQ_WR_CTL_D0_LP
, RREG32(mmMC_SEQ_WR_CTL_D0
));
4799 WREG32(mmMC_SEQ_WR_CTL_D1_LP
, RREG32(mmMC_SEQ_WR_CTL_D1
));
4800 WREG32(mmMC_SEQ_RD_CTL_D0_LP
, RREG32(mmMC_SEQ_RD_CTL_D0
));
4801 WREG32(mmMC_SEQ_RD_CTL_D1_LP
, RREG32(mmMC_SEQ_RD_CTL_D1
));
4802 WREG32(mmMC_SEQ_PMG_TIMING_LP
, RREG32(mmMC_SEQ_PMG_TIMING
));
4803 WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP
, RREG32(mmMC_PMG_CMD_MRS2
));
4804 WREG32(mmMC_SEQ_WR_CTL_2_LP
, RREG32(mmMC_SEQ_WR_CTL_2
));
4806 ret
= amdgpu_atombios_init_mc_reg_table(adev
, module_index
, table
);
4810 ret
= ci_copy_vbios_mc_reg_table(table
, ci_table
);
4814 ci_set_s0_mc_reg_index(ci_table
);
4816 ret
= ci_register_patching_mc_seq(adev
, ci_table
);
4820 ret
= ci_set_mc_special_registers(adev
, ci_table
);
4824 ci_set_valid_flag(ci_table
);
4832 static int ci_populate_mc_reg_addresses(struct amdgpu_device
*adev
,
4833 SMU7_Discrete_MCRegisters
*mc_reg_table
)
4835 struct ci_power_info
*pi
= ci_get_pi(adev
);
4838 for (i
= 0, j
= 0; j
< pi
->mc_reg_table
.last
; j
++) {
4839 if (pi
->mc_reg_table
.valid_flag
& (1 << j
)) {
4840 if (i
>= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4842 mc_reg_table
->address
[i
].s0
= cpu_to_be16(pi
->mc_reg_table
.mc_reg_address
[j
].s0
);
4843 mc_reg_table
->address
[i
].s1
= cpu_to_be16(pi
->mc_reg_table
.mc_reg_address
[j
].s1
);
4848 mc_reg_table
->last
= (u8
)i
;
4853 static void ci_convert_mc_registers(const struct ci_mc_reg_entry
*entry
,
4854 SMU7_Discrete_MCRegisterSet
*data
,
4855 u32 num_entries
, u32 valid_flag
)
4859 for (i
= 0, j
= 0; j
< num_entries
; j
++) {
4860 if (valid_flag
& (1 << j
)) {
4861 data
->value
[i
] = cpu_to_be32(entry
->mc_data
[j
]);
4867 static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device
*adev
,
4868 const u32 memory_clock
,
4869 SMU7_Discrete_MCRegisterSet
*mc_reg_table_data
)
4871 struct ci_power_info
*pi
= ci_get_pi(adev
);
4874 for(i
= 0; i
< pi
->mc_reg_table
.num_entries
; i
++) {
4875 if (memory_clock
<= pi
->mc_reg_table
.mc_reg_table_entry
[i
].mclk_max
)
4879 if ((i
== pi
->mc_reg_table
.num_entries
) && (i
> 0))
4882 ci_convert_mc_registers(&pi
->mc_reg_table
.mc_reg_table_entry
[i
],
4883 mc_reg_table_data
, pi
->mc_reg_table
.last
,
4884 pi
->mc_reg_table
.valid_flag
);
4887 static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device
*adev
,
4888 SMU7_Discrete_MCRegisters
*mc_reg_table
)
4890 struct ci_power_info
*pi
= ci_get_pi(adev
);
4893 for (i
= 0; i
< pi
->dpm_table
.mclk_table
.count
; i
++)
4894 ci_convert_mc_reg_table_entry_to_smc(adev
,
4895 pi
->dpm_table
.mclk_table
.dpm_levels
[i
].value
,
4896 &mc_reg_table
->data
[i
]);
4899 static int ci_populate_initial_mc_reg_table(struct amdgpu_device
*adev
)
4901 struct ci_power_info
*pi
= ci_get_pi(adev
);
4904 memset(&pi
->smc_mc_reg_table
, 0, sizeof(SMU7_Discrete_MCRegisters
));
4906 ret
= ci_populate_mc_reg_addresses(adev
, &pi
->smc_mc_reg_table
);
4909 ci_convert_mc_reg_table_to_smc(adev
, &pi
->smc_mc_reg_table
);
4911 return amdgpu_ci_copy_bytes_to_smc(adev
,
4912 pi
->mc_reg_table_start
,
4913 (u8
*)&pi
->smc_mc_reg_table
,
4914 sizeof(SMU7_Discrete_MCRegisters
),
4918 static int ci_update_and_upload_mc_reg_table(struct amdgpu_device
*adev
)
4920 struct ci_power_info
*pi
= ci_get_pi(adev
);
4922 if (!(pi
->need_update_smu7_dpm_table
& DPMTABLE_OD_UPDATE_MCLK
))
4925 memset(&pi
->smc_mc_reg_table
, 0, sizeof(SMU7_Discrete_MCRegisters
));
4927 ci_convert_mc_reg_table_to_smc(adev
, &pi
->smc_mc_reg_table
);
4929 return amdgpu_ci_copy_bytes_to_smc(adev
,
4930 pi
->mc_reg_table_start
+
4931 offsetof(SMU7_Discrete_MCRegisters
, data
[0]),
4932 (u8
*)&pi
->smc_mc_reg_table
.data
[0],
4933 sizeof(SMU7_Discrete_MCRegisterSet
) *
4934 pi
->dpm_table
.mclk_table
.count
,
4938 static void ci_enable_voltage_control(struct amdgpu_device
*adev
)
4940 u32 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
4942 tmp
|= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK
;
4943 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
4946 static enum amdgpu_pcie_gen
ci_get_maximum_link_speed(struct amdgpu_device
*adev
,
4947 struct amdgpu_ps
*amdgpu_state
)
4949 struct ci_ps
*state
= ci_get_ps(amdgpu_state
);
4951 u16 pcie_speed
, max_speed
= 0;
4953 for (i
= 0; i
< state
->performance_level_count
; i
++) {
4954 pcie_speed
= state
->performance_levels
[i
].pcie_gen
;
4955 if (max_speed
< pcie_speed
)
4956 max_speed
= pcie_speed
;
4962 static u16
ci_get_current_pcie_speed(struct amdgpu_device
*adev
)
4966 speed_cntl
= RREG32_PCIE(ixPCIE_LC_SPEED_CNTL
) &
4967 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK
;
4968 speed_cntl
>>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT
;
4970 return (u16
)speed_cntl
;
4973 static int ci_get_current_pcie_lane_number(struct amdgpu_device
*adev
)
4977 link_width
= RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL
) &
4978 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK
;
4979 link_width
>>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT
;
4981 switch (link_width
) {
4997 static void ci_request_link_speed_change_before_state_change(struct amdgpu_device
*adev
,
4998 struct amdgpu_ps
*amdgpu_new_state
,
4999 struct amdgpu_ps
*amdgpu_current_state
)
5001 struct ci_power_info
*pi
= ci_get_pi(adev
);
5002 enum amdgpu_pcie_gen target_link_speed
=
5003 ci_get_maximum_link_speed(adev
, amdgpu_new_state
);
5004 enum amdgpu_pcie_gen current_link_speed
;
5006 if (pi
->force_pcie_gen
== AMDGPU_PCIE_GEN_INVALID
)
5007 current_link_speed
= ci_get_maximum_link_speed(adev
, amdgpu_current_state
);
5009 current_link_speed
= pi
->force_pcie_gen
;
5011 pi
->force_pcie_gen
= AMDGPU_PCIE_GEN_INVALID
;
5012 pi
->pspp_notify_required
= false;
5013 if (target_link_speed
> current_link_speed
) {
5014 switch (target_link_speed
) {
5016 case AMDGPU_PCIE_GEN3
:
5017 if (amdgpu_acpi_pcie_performance_request(adev
, PCIE_PERF_REQ_PECI_GEN3
, false) == 0)
5019 pi
->force_pcie_gen
= AMDGPU_PCIE_GEN2
;
5020 if (current_link_speed
== AMDGPU_PCIE_GEN2
)
5022 case AMDGPU_PCIE_GEN2
:
5023 if (amdgpu_acpi_pcie_performance_request(adev
, PCIE_PERF_REQ_PECI_GEN2
, false) == 0)
5027 pi
->force_pcie_gen
= ci_get_current_pcie_speed(adev
);
5031 if (target_link_speed
< current_link_speed
)
5032 pi
->pspp_notify_required
= true;
5036 static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device
*adev
,
5037 struct amdgpu_ps
*amdgpu_new_state
,
5038 struct amdgpu_ps
*amdgpu_current_state
)
5040 struct ci_power_info
*pi
= ci_get_pi(adev
);
5041 enum amdgpu_pcie_gen target_link_speed
=
5042 ci_get_maximum_link_speed(adev
, amdgpu_new_state
);
5045 if (pi
->pspp_notify_required
) {
5046 if (target_link_speed
== AMDGPU_PCIE_GEN3
)
5047 request
= PCIE_PERF_REQ_PECI_GEN3
;
5048 else if (target_link_speed
== AMDGPU_PCIE_GEN2
)
5049 request
= PCIE_PERF_REQ_PECI_GEN2
;
5051 request
= PCIE_PERF_REQ_PECI_GEN1
;
5053 if ((request
== PCIE_PERF_REQ_PECI_GEN1
) &&
5054 (ci_get_current_pcie_speed(adev
) > 0))
5058 amdgpu_acpi_pcie_performance_request(adev
, request
, false);
5063 static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device
*adev
)
5065 struct ci_power_info
*pi
= ci_get_pi(adev
);
5066 struct amdgpu_clock_voltage_dependency_table
*allowed_sclk_vddc_table
=
5067 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
5068 struct amdgpu_clock_voltage_dependency_table
*allowed_mclk_vddc_table
=
5069 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
;
5070 struct amdgpu_clock_voltage_dependency_table
*allowed_mclk_vddci_table
=
5071 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
;
5073 if (allowed_sclk_vddc_table
== NULL
)
5075 if (allowed_sclk_vddc_table
->count
< 1)
5077 if (allowed_mclk_vddc_table
== NULL
)
5079 if (allowed_mclk_vddc_table
->count
< 1)
5081 if (allowed_mclk_vddci_table
== NULL
)
5083 if (allowed_mclk_vddci_table
->count
< 1)
5086 pi
->min_vddc_in_pp_table
= allowed_sclk_vddc_table
->entries
[0].v
;
5087 pi
->max_vddc_in_pp_table
=
5088 allowed_sclk_vddc_table
->entries
[allowed_sclk_vddc_table
->count
- 1].v
;
5090 pi
->min_vddci_in_pp_table
= allowed_mclk_vddci_table
->entries
[0].v
;
5091 pi
->max_vddci_in_pp_table
=
5092 allowed_mclk_vddci_table
->entries
[allowed_mclk_vddci_table
->count
- 1].v
;
5094 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.sclk
=
5095 allowed_sclk_vddc_table
->entries
[allowed_sclk_vddc_table
->count
- 1].clk
;
5096 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.mclk
=
5097 allowed_mclk_vddc_table
->entries
[allowed_sclk_vddc_table
->count
- 1].clk
;
5098 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.vddc
=
5099 allowed_sclk_vddc_table
->entries
[allowed_sclk_vddc_table
->count
- 1].v
;
5100 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.vddci
=
5101 allowed_mclk_vddci_table
->entries
[allowed_mclk_vddci_table
->count
- 1].v
;
5106 static void ci_patch_with_vddc_leakage(struct amdgpu_device
*adev
, u16
*vddc
)
5108 struct ci_power_info
*pi
= ci_get_pi(adev
);
5109 struct ci_leakage_voltage
*leakage_table
= &pi
->vddc_leakage
;
5112 for (leakage_index
= 0; leakage_index
< leakage_table
->count
; leakage_index
++) {
5113 if (leakage_table
->leakage_id
[leakage_index
] == *vddc
) {
5114 *vddc
= leakage_table
->actual_voltage
[leakage_index
];
5120 static void ci_patch_with_vddci_leakage(struct amdgpu_device
*adev
, u16
*vddci
)
5122 struct ci_power_info
*pi
= ci_get_pi(adev
);
5123 struct ci_leakage_voltage
*leakage_table
= &pi
->vddci_leakage
;
5126 for (leakage_index
= 0; leakage_index
< leakage_table
->count
; leakage_index
++) {
5127 if (leakage_table
->leakage_id
[leakage_index
] == *vddci
) {
5128 *vddci
= leakage_table
->actual_voltage
[leakage_index
];
5134 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device
*adev
,
5135 struct amdgpu_clock_voltage_dependency_table
*table
)
5140 for (i
= 0; i
< table
->count
; i
++)
5141 ci_patch_with_vddc_leakage(adev
, &table
->entries
[i
].v
);
5145 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device
*adev
,
5146 struct amdgpu_clock_voltage_dependency_table
*table
)
5151 for (i
= 0; i
< table
->count
; i
++)
5152 ci_patch_with_vddci_leakage(adev
, &table
->entries
[i
].v
);
5156 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device
*adev
,
5157 struct amdgpu_vce_clock_voltage_dependency_table
*table
)
5162 for (i
= 0; i
< table
->count
; i
++)
5163 ci_patch_with_vddc_leakage(adev
, &table
->entries
[i
].v
);
5167 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device
*adev
,
5168 struct amdgpu_uvd_clock_voltage_dependency_table
*table
)
5173 for (i
= 0; i
< table
->count
; i
++)
5174 ci_patch_with_vddc_leakage(adev
, &table
->entries
[i
].v
);
5178 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device
*adev
,
5179 struct amdgpu_phase_shedding_limits_table
*table
)
5184 for (i
= 0; i
< table
->count
; i
++)
5185 ci_patch_with_vddc_leakage(adev
, &table
->entries
[i
].voltage
);
5189 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device
*adev
,
5190 struct amdgpu_clock_and_voltage_limits
*table
)
5193 ci_patch_with_vddc_leakage(adev
, (u16
*)&table
->vddc
);
5194 ci_patch_with_vddci_leakage(adev
, (u16
*)&table
->vddci
);
5198 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device
*adev
,
5199 struct amdgpu_cac_leakage_table
*table
)
5204 for (i
= 0; i
< table
->count
; i
++)
5205 ci_patch_with_vddc_leakage(adev
, &table
->entries
[i
].vddc
);
5209 static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device
*adev
)
5212 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5213 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
);
5214 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5215 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
);
5216 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5217 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
);
5218 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev
,
5219 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
);
5220 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5221 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
);
5222 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5223 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
);
5224 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5225 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
);
5226 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5227 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
);
5228 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev
,
5229 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
);
5230 ci_patch_clock_voltage_limits_with_vddc_leakage(adev
,
5231 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
);
5232 ci_patch_clock_voltage_limits_with_vddc_leakage(adev
,
5233 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
);
5234 ci_patch_cac_leakage_table_with_vddc_leakage(adev
,
5235 &adev
->pm
.dpm
.dyn_state
.cac_leakage_table
);
5239 static void ci_update_current_ps(struct amdgpu_device
*adev
,
5240 struct amdgpu_ps
*rps
)
5242 struct ci_ps
*new_ps
= ci_get_ps(rps
);
5243 struct ci_power_info
*pi
= ci_get_pi(adev
);
5245 pi
->current_rps
= *rps
;
5246 pi
->current_ps
= *new_ps
;
5247 pi
->current_rps
.ps_priv
= &pi
->current_ps
;
5248 adev
->pm
.dpm
.current_ps
= &pi
->current_rps
;
5251 static void ci_update_requested_ps(struct amdgpu_device
*adev
,
5252 struct amdgpu_ps
*rps
)
5254 struct ci_ps
*new_ps
= ci_get_ps(rps
);
5255 struct ci_power_info
*pi
= ci_get_pi(adev
);
5257 pi
->requested_rps
= *rps
;
5258 pi
->requested_ps
= *new_ps
;
5259 pi
->requested_rps
.ps_priv
= &pi
->requested_ps
;
5260 adev
->pm
.dpm
.requested_ps
= &pi
->requested_rps
;
5263 static int ci_dpm_pre_set_power_state(void *handle
)
5265 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
5266 struct ci_power_info
*pi
= ci_get_pi(adev
);
5267 struct amdgpu_ps requested_ps
= *adev
->pm
.dpm
.requested_ps
;
5268 struct amdgpu_ps
*new_ps
= &requested_ps
;
5270 ci_update_requested_ps(adev
, new_ps
);
5272 ci_apply_state_adjust_rules(adev
, &pi
->requested_rps
);
5277 static void ci_dpm_post_set_power_state(void *handle
)
5279 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
5280 struct ci_power_info
*pi
= ci_get_pi(adev
);
5281 struct amdgpu_ps
*new_ps
= &pi
->requested_rps
;
5283 ci_update_current_ps(adev
, new_ps
);
5287 static void ci_dpm_setup_asic(struct amdgpu_device
*adev
)
5289 ci_read_clock_registers(adev
);
5290 ci_enable_acpi_power_management(adev
);
5291 ci_init_sclk_t(adev
);
5294 static int ci_dpm_enable(struct amdgpu_device
*adev
)
5296 struct ci_power_info
*pi
= ci_get_pi(adev
);
5297 struct amdgpu_ps
*boot_ps
= adev
->pm
.dpm
.boot_ps
;
5300 if (pi
->voltage_control
!= CISLANDS_VOLTAGE_CONTROL_NONE
) {
5301 ci_enable_voltage_control(adev
);
5302 ret
= ci_construct_voltage_tables(adev
);
5304 DRM_ERROR("ci_construct_voltage_tables failed\n");
5308 if (pi
->caps_dynamic_ac_timing
) {
5309 ret
= ci_initialize_mc_reg_table(adev
);
5311 pi
->caps_dynamic_ac_timing
= false;
5314 ci_enable_spread_spectrum(adev
, true);
5315 if (pi
->thermal_protection
)
5316 ci_enable_thermal_protection(adev
, true);
5317 ci_program_sstp(adev
);
5318 ci_enable_display_gap(adev
);
5319 ci_program_vc(adev
);
5320 ret
= ci_upload_firmware(adev
);
5322 DRM_ERROR("ci_upload_firmware failed\n");
5325 ret
= ci_process_firmware_header(adev
);
5327 DRM_ERROR("ci_process_firmware_header failed\n");
5330 ret
= ci_initial_switch_from_arb_f0_to_f1(adev
);
5332 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5335 ret
= ci_init_smc_table(adev
);
5337 DRM_ERROR("ci_init_smc_table failed\n");
5340 ret
= ci_init_arb_table_index(adev
);
5342 DRM_ERROR("ci_init_arb_table_index failed\n");
5345 if (pi
->caps_dynamic_ac_timing
) {
5346 ret
= ci_populate_initial_mc_reg_table(adev
);
5348 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5352 ret
= ci_populate_pm_base(adev
);
5354 DRM_ERROR("ci_populate_pm_base failed\n");
5357 ci_dpm_start_smc(adev
);
5358 ci_enable_vr_hot_gpio_interrupt(adev
);
5359 ret
= ci_notify_smc_display_change(adev
, false);
5361 DRM_ERROR("ci_notify_smc_display_change failed\n");
5364 ci_enable_sclk_control(adev
, true);
5365 ret
= ci_enable_ulv(adev
, true);
5367 DRM_ERROR("ci_enable_ulv failed\n");
5370 ret
= ci_enable_ds_master_switch(adev
, true);
5372 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5375 ret
= ci_start_dpm(adev
);
5377 DRM_ERROR("ci_start_dpm failed\n");
5380 ret
= ci_enable_didt(adev
, true);
5382 DRM_ERROR("ci_enable_didt failed\n");
5385 ret
= ci_enable_smc_cac(adev
, true);
5387 DRM_ERROR("ci_enable_smc_cac failed\n");
5390 ret
= ci_enable_power_containment(adev
, true);
5392 DRM_ERROR("ci_enable_power_containment failed\n");
5396 ret
= ci_power_control_set_level(adev
);
5398 DRM_ERROR("ci_power_control_set_level failed\n");
5402 ci_enable_auto_throttle_source(adev
, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
, true);
5404 ret
= ci_enable_thermal_based_sclk_dpm(adev
, true);
5406 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5410 ci_thermal_start_thermal_controller(adev
);
5412 ci_update_current_ps(adev
, boot_ps
);
5417 static void ci_dpm_disable(struct amdgpu_device
*adev
)
5419 struct ci_power_info
*pi
= ci_get_pi(adev
);
5420 struct amdgpu_ps
*boot_ps
= adev
->pm
.dpm
.boot_ps
;
5422 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
5423 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
);
5424 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
5425 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
);
5427 ci_dpm_powergate_uvd(adev
, true);
5429 if (!amdgpu_ci_is_smc_running(adev
))
5432 ci_thermal_stop_thermal_controller(adev
);
5434 if (pi
->thermal_protection
)
5435 ci_enable_thermal_protection(adev
, false);
5436 ci_enable_power_containment(adev
, false);
5437 ci_enable_smc_cac(adev
, false);
5438 ci_enable_didt(adev
, false);
5439 ci_enable_spread_spectrum(adev
, false);
5440 ci_enable_auto_throttle_source(adev
, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
, false);
5442 ci_enable_ds_master_switch(adev
, false);
5443 ci_enable_ulv(adev
, false);
5445 ci_reset_to_default(adev
);
5446 ci_dpm_stop_smc(adev
);
5447 ci_force_switch_to_arb_f0(adev
);
5448 ci_enable_thermal_based_sclk_dpm(adev
, false);
5450 ci_update_current_ps(adev
, boot_ps
);
5453 static int ci_dpm_set_power_state(void *handle
)
5455 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
5456 struct ci_power_info
*pi
= ci_get_pi(adev
);
5457 struct amdgpu_ps
*new_ps
= &pi
->requested_rps
;
5458 struct amdgpu_ps
*old_ps
= &pi
->current_rps
;
5461 ci_find_dpm_states_clocks_in_dpm_table(adev
, new_ps
);
5462 if (pi
->pcie_performance_request
)
5463 ci_request_link_speed_change_before_state_change(adev
, new_ps
, old_ps
);
5464 ret
= ci_freeze_sclk_mclk_dpm(adev
);
5466 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5469 ret
= ci_populate_and_upload_sclk_mclk_dpm_levels(adev
, new_ps
);
5471 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5474 ret
= ci_generate_dpm_level_enable_mask(adev
, new_ps
);
5476 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5480 ret
= ci_update_vce_dpm(adev
, new_ps
, old_ps
);
5482 DRM_ERROR("ci_update_vce_dpm failed\n");
5486 ret
= ci_update_sclk_t(adev
);
5488 DRM_ERROR("ci_update_sclk_t failed\n");
5491 if (pi
->caps_dynamic_ac_timing
) {
5492 ret
= ci_update_and_upload_mc_reg_table(adev
);
5494 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5498 ret
= ci_program_memory_timing_parameters(adev
);
5500 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5503 ret
= ci_unfreeze_sclk_mclk_dpm(adev
);
5505 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5508 ret
= ci_upload_dpm_level_enable_mask(adev
);
5510 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5513 if (pi
->pcie_performance_request
)
5514 ci_notify_link_speed_change_after_state_change(adev
, new_ps
, old_ps
);
5520 static void ci_dpm_reset_asic(struct amdgpu_device
*adev
)
5522 ci_set_boot_state(adev
);
5526 static void ci_dpm_display_configuration_changed(void *handle
)
5528 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
5530 ci_program_display_gap(adev
);
5534 struct _ATOM_POWERPLAY_INFO info
;
5535 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
5536 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
5537 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
5538 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
5539 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
5542 union pplib_clock_info
{
5543 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
5544 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
5545 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
5546 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
5547 struct _ATOM_PPLIB_SI_CLOCK_INFO si
;
5548 struct _ATOM_PPLIB_CI_CLOCK_INFO ci
;
5551 union pplib_power_state
{
5552 struct _ATOM_PPLIB_STATE v1
;
5553 struct _ATOM_PPLIB_STATE_V2 v2
;
5556 static void ci_parse_pplib_non_clock_info(struct amdgpu_device
*adev
,
5557 struct amdgpu_ps
*rps
,
5558 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
5561 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
5562 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
5563 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
5565 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
5566 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
5567 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
5573 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
)
5574 adev
->pm
.dpm
.boot_ps
= rps
;
5575 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
5576 adev
->pm
.dpm
.uvd_ps
= rps
;
5579 static void ci_parse_pplib_clock_info(struct amdgpu_device
*adev
,
5580 struct amdgpu_ps
*rps
, int index
,
5581 union pplib_clock_info
*clock_info
)
5583 struct ci_power_info
*pi
= ci_get_pi(adev
);
5584 struct ci_ps
*ps
= ci_get_ps(rps
);
5585 struct ci_pl
*pl
= &ps
->performance_levels
[index
];
5587 ps
->performance_level_count
= index
+ 1;
5589 pl
->sclk
= le16_to_cpu(clock_info
->ci
.usEngineClockLow
);
5590 pl
->sclk
|= clock_info
->ci
.ucEngineClockHigh
<< 16;
5591 pl
->mclk
= le16_to_cpu(clock_info
->ci
.usMemoryClockLow
);
5592 pl
->mclk
|= clock_info
->ci
.ucMemoryClockHigh
<< 16;
5594 pl
->pcie_gen
= amdgpu_get_pcie_gen_support(adev
,
5596 pi
->vbios_boot_state
.pcie_gen_bootup_value
,
5597 clock_info
->ci
.ucPCIEGen
);
5598 pl
->pcie_lane
= amdgpu_get_pcie_lane_support(adev
,
5599 pi
->vbios_boot_state
.pcie_lane_bootup_value
,
5600 le16_to_cpu(clock_info
->ci
.usPCIELane
));
5602 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
) {
5603 pi
->acpi_pcie_gen
= pl
->pcie_gen
;
5606 if (rps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
) {
5607 pi
->ulv
.supported
= true;
5609 pi
->ulv
.cg_ulv_parameter
= CISLANDS_CGULVPARAMETER_DFLT
;
5612 /* patch up boot state */
5613 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
5614 pl
->mclk
= pi
->vbios_boot_state
.mclk_bootup_value
;
5615 pl
->sclk
= pi
->vbios_boot_state
.sclk_bootup_value
;
5616 pl
->pcie_gen
= pi
->vbios_boot_state
.pcie_gen_bootup_value
;
5617 pl
->pcie_lane
= pi
->vbios_boot_state
.pcie_lane_bootup_value
;
5620 switch (rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) {
5621 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
:
5622 pi
->use_pcie_powersaving_levels
= true;
5623 if (pi
->pcie_gen_powersaving
.max
< pl
->pcie_gen
)
5624 pi
->pcie_gen_powersaving
.max
= pl
->pcie_gen
;
5625 if (pi
->pcie_gen_powersaving
.min
> pl
->pcie_gen
)
5626 pi
->pcie_gen_powersaving
.min
= pl
->pcie_gen
;
5627 if (pi
->pcie_lane_powersaving
.max
< pl
->pcie_lane
)
5628 pi
->pcie_lane_powersaving
.max
= pl
->pcie_lane
;
5629 if (pi
->pcie_lane_powersaving
.min
> pl
->pcie_lane
)
5630 pi
->pcie_lane_powersaving
.min
= pl
->pcie_lane
;
5632 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
:
5633 pi
->use_pcie_performance_levels
= true;
5634 if (pi
->pcie_gen_performance
.max
< pl
->pcie_gen
)
5635 pi
->pcie_gen_performance
.max
= pl
->pcie_gen
;
5636 if (pi
->pcie_gen_performance
.min
> pl
->pcie_gen
)
5637 pi
->pcie_gen_performance
.min
= pl
->pcie_gen
;
5638 if (pi
->pcie_lane_performance
.max
< pl
->pcie_lane
)
5639 pi
->pcie_lane_performance
.max
= pl
->pcie_lane
;
5640 if (pi
->pcie_lane_performance
.min
> pl
->pcie_lane
)
5641 pi
->pcie_lane_performance
.min
= pl
->pcie_lane
;
5648 static int ci_parse_power_table(struct amdgpu_device
*adev
)
5650 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
5651 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
5652 union pplib_power_state
*power_state
;
5653 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
5654 union pplib_clock_info
*clock_info
;
5655 struct _StateArray
*state_array
;
5656 struct _ClockInfoArray
*clock_info_array
;
5657 struct _NonClockInfoArray
*non_clock_info_array
;
5658 union power_info
*power_info
;
5659 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
5662 u8
*power_state_offset
;
5665 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
5666 &frev
, &crev
, &data_offset
))
5668 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
5670 amdgpu_add_thermal_controller(adev
);
5672 state_array
= (struct _StateArray
*)
5673 (mode_info
->atom_context
->bios
+ data_offset
+
5674 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
5675 clock_info_array
= (struct _ClockInfoArray
*)
5676 (mode_info
->atom_context
->bios
+ data_offset
+
5677 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
5678 non_clock_info_array
= (struct _NonClockInfoArray
*)
5679 (mode_info
->atom_context
->bios
+ data_offset
+
5680 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
5682 adev
->pm
.dpm
.ps
= kcalloc(state_array
->ucNumEntries
,
5683 sizeof(struct amdgpu_ps
),
5685 if (!adev
->pm
.dpm
.ps
)
5687 power_state_offset
= (u8
*)state_array
->states
;
5688 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
5690 power_state
= (union pplib_power_state
*)power_state_offset
;
5691 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
5692 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
5693 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
5694 ps
= kzalloc(sizeof(struct ci_ps
), GFP_KERNEL
);
5696 kfree(adev
->pm
.dpm
.ps
);
5699 adev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
5700 ci_parse_pplib_non_clock_info(adev
, &adev
->pm
.dpm
.ps
[i
],
5702 non_clock_info_array
->ucEntrySize
);
5704 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
5705 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
5706 clock_array_index
= idx
[j
];
5707 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
5709 if (k
>= CISLANDS_MAX_HARDWARE_POWERLEVELS
)
5711 clock_info
= (union pplib_clock_info
*)
5712 ((u8
*)&clock_info_array
->clockInfo
[0] +
5713 (clock_array_index
* clock_info_array
->ucEntrySize
));
5714 ci_parse_pplib_clock_info(adev
,
5715 &adev
->pm
.dpm
.ps
[i
], k
,
5719 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
5721 adev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
5723 /* fill in the vce power states */
5724 for (i
= 0; i
< adev
->pm
.dpm
.num_of_vce_states
; i
++) {
5726 clock_array_index
= adev
->pm
.dpm
.vce_states
[i
].clk_idx
;
5727 clock_info
= (union pplib_clock_info
*)
5728 &clock_info_array
->clockInfo
[clock_array_index
* clock_info_array
->ucEntrySize
];
5729 sclk
= le16_to_cpu(clock_info
->ci
.usEngineClockLow
);
5730 sclk
|= clock_info
->ci
.ucEngineClockHigh
<< 16;
5731 mclk
= le16_to_cpu(clock_info
->ci
.usMemoryClockLow
);
5732 mclk
|= clock_info
->ci
.ucMemoryClockHigh
<< 16;
5733 adev
->pm
.dpm
.vce_states
[i
].sclk
= sclk
;
5734 adev
->pm
.dpm
.vce_states
[i
].mclk
= mclk
;
5740 static int ci_get_vbios_boot_values(struct amdgpu_device
*adev
,
5741 struct ci_vbios_boot_state
*boot_state
)
5743 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
5744 int index
= GetIndexIntoMasterTable(DATA
, FirmwareInfo
);
5745 ATOM_FIRMWARE_INFO_V2_2
*firmware_info
;
5749 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
5750 &frev
, &crev
, &data_offset
)) {
5752 (ATOM_FIRMWARE_INFO_V2_2
*)(mode_info
->atom_context
->bios
+
5754 boot_state
->mvdd_bootup_value
= le16_to_cpu(firmware_info
->usBootUpMVDDCVoltage
);
5755 boot_state
->vddc_bootup_value
= le16_to_cpu(firmware_info
->usBootUpVDDCVoltage
);
5756 boot_state
->vddci_bootup_value
= le16_to_cpu(firmware_info
->usBootUpVDDCIVoltage
);
5757 boot_state
->pcie_gen_bootup_value
= ci_get_current_pcie_speed(adev
);
5758 boot_state
->pcie_lane_bootup_value
= ci_get_current_pcie_lane_number(adev
);
5759 boot_state
->sclk_bootup_value
= le32_to_cpu(firmware_info
->ulDefaultEngineClock
);
5760 boot_state
->mclk_bootup_value
= le32_to_cpu(firmware_info
->ulDefaultMemoryClock
);
5767 static void ci_dpm_fini(struct amdgpu_device
*adev
)
5771 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
5772 kfree(adev
->pm
.dpm
.ps
[i
].ps_priv
);
5774 kfree(adev
->pm
.dpm
.ps
);
5775 kfree(adev
->pm
.dpm
.priv
);
5776 kfree(adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
);
5777 amdgpu_free_extended_power_table(adev
);
5781 * ci_dpm_init_microcode - load ucode images from disk
5783 * @adev: amdgpu_device pointer
5785 * Use the firmware interface to load the ucode images into
5786 * the driver (not loaded into hw).
5787 * Returns 0 on success, error on failure.
5789 static int ci_dpm_init_microcode(struct amdgpu_device
*adev
)
5791 const char *chip_name
;
5797 switch (adev
->asic_type
) {
5799 if ((adev
->pdev
->revision
== 0x80) ||
5800 (adev
->pdev
->revision
== 0x81) ||
5801 (adev
->pdev
->device
== 0x665f))
5802 chip_name
= "bonaire_k";
5804 chip_name
= "bonaire";
5807 if (adev
->pdev
->revision
== 0x80)
5808 chip_name
= "hawaii_k";
5810 chip_name
= "hawaii";
5818 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_smc.bin", chip_name
);
5819 err
= request_firmware(&adev
->pm
.fw
, fw_name
, adev
->dev
);
5822 err
= amdgpu_ucode_validate(adev
->pm
.fw
);
5826 pr_err("cik_smc: Failed to load firmware \"%s\"\n", fw_name
);
5827 release_firmware(adev
->pm
.fw
);
5833 static int ci_dpm_init(struct amdgpu_device
*adev
)
5835 int index
= GetIndexIntoMasterTable(DATA
, ASIC_InternalSS_Info
);
5836 SMU7_Discrete_DpmTable
*dpm_table
;
5837 struct amdgpu_gpio_rec gpio
;
5838 u16 data_offset
, size
;
5840 struct ci_power_info
*pi
;
5843 pi
= kzalloc(sizeof(struct ci_power_info
), GFP_KERNEL
);
5846 adev
->pm
.dpm
.priv
= pi
;
5849 adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_MASK
;
5851 pi
->force_pcie_gen
= AMDGPU_PCIE_GEN_INVALID
;
5853 pi
->pcie_gen_performance
.max
= AMDGPU_PCIE_GEN1
;
5854 pi
->pcie_gen_performance
.min
= AMDGPU_PCIE_GEN3
;
5855 pi
->pcie_gen_powersaving
.max
= AMDGPU_PCIE_GEN1
;
5856 pi
->pcie_gen_powersaving
.min
= AMDGPU_PCIE_GEN3
;
5858 pi
->pcie_lane_performance
.max
= 0;
5859 pi
->pcie_lane_performance
.min
= 16;
5860 pi
->pcie_lane_powersaving
.max
= 0;
5861 pi
->pcie_lane_powersaving
.min
= 16;
5863 ret
= ci_get_vbios_boot_values(adev
, &pi
->vbios_boot_state
);
5869 ret
= amdgpu_get_platform_caps(adev
);
5875 ret
= amdgpu_parse_extended_power_table(adev
);
5881 ret
= ci_parse_power_table(adev
);
5887 pi
->dll_default_on
= false;
5888 pi
->sram_end
= SMC_RAM_END
;
5890 pi
->activity_target
[0] = CISLAND_TARGETACTIVITY_DFLT
;
5891 pi
->activity_target
[1] = CISLAND_TARGETACTIVITY_DFLT
;
5892 pi
->activity_target
[2] = CISLAND_TARGETACTIVITY_DFLT
;
5893 pi
->activity_target
[3] = CISLAND_TARGETACTIVITY_DFLT
;
5894 pi
->activity_target
[4] = CISLAND_TARGETACTIVITY_DFLT
;
5895 pi
->activity_target
[5] = CISLAND_TARGETACTIVITY_DFLT
;
5896 pi
->activity_target
[6] = CISLAND_TARGETACTIVITY_DFLT
;
5897 pi
->activity_target
[7] = CISLAND_TARGETACTIVITY_DFLT
;
5899 pi
->mclk_activity_target
= CISLAND_MCLK_TARGETACTIVITY_DFLT
;
5901 pi
->sclk_dpm_key_disabled
= 0;
5902 pi
->mclk_dpm_key_disabled
= 0;
5903 pi
->pcie_dpm_key_disabled
= 0;
5904 pi
->thermal_sclk_dpm_enabled
= 0;
5906 if (adev
->powerplay
.pp_feature
& PP_SCLK_DEEP_SLEEP_MASK
)
5907 pi
->caps_sclk_ds
= true;
5909 pi
->caps_sclk_ds
= false;
5911 pi
->mclk_strobe_mode_threshold
= 40000;
5912 pi
->mclk_stutter_mode_threshold
= 40000;
5913 pi
->mclk_edc_enable_threshold
= 40000;
5914 pi
->mclk_edc_wr_enable_threshold
= 40000;
5916 ci_initialize_powertune_defaults(adev
);
5918 pi
->caps_fps
= false;
5920 pi
->caps_sclk_throttle_low_notification
= false;
5922 pi
->caps_uvd_dpm
= true;
5923 pi
->caps_vce_dpm
= true;
5925 ci_get_leakage_voltages(adev
);
5926 ci_patch_dependency_tables_with_leakage(adev
);
5927 ci_set_private_data_variables_based_on_pptable(adev
);
5929 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
=
5931 sizeof(struct amdgpu_clock_voltage_dependency_entry
),
5933 if (!adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
) {
5937 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.count
= 4;
5938 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[0].clk
= 0;
5939 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[0].v
= 0;
5940 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[1].clk
= 36000;
5941 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[1].v
= 720;
5942 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[2].clk
= 54000;
5943 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[2].v
= 810;
5944 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[3].clk
= 72000;
5945 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[3].v
= 900;
5947 adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
= 4;
5948 adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
= 15000;
5949 adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
= 200;
5951 adev
->pm
.dpm
.dyn_state
.valid_sclk_values
.count
= 0;
5952 adev
->pm
.dpm
.dyn_state
.valid_sclk_values
.values
= NULL
;
5953 adev
->pm
.dpm
.dyn_state
.valid_mclk_values
.count
= 0;
5954 adev
->pm
.dpm
.dyn_state
.valid_mclk_values
.values
= NULL
;
5956 if (adev
->asic_type
== CHIP_HAWAII
) {
5957 pi
->thermal_temp_setting
.temperature_low
= 94500;
5958 pi
->thermal_temp_setting
.temperature_high
= 95000;
5959 pi
->thermal_temp_setting
.temperature_shutdown
= 104000;
5961 pi
->thermal_temp_setting
.temperature_low
= 99500;
5962 pi
->thermal_temp_setting
.temperature_high
= 100000;
5963 pi
->thermal_temp_setting
.temperature_shutdown
= 104000;
5966 pi
->uvd_enabled
= false;
5968 dpm_table
= &pi
->smc_state_table
;
5970 gpio
= amdgpu_atombios_lookup_gpio(adev
, VDDC_VRHOT_GPIO_PINID
);
5972 dpm_table
->VRHotGpio
= gpio
.shift
;
5973 adev
->pm
.dpm
.platform_caps
|= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT
;
5975 dpm_table
->VRHotGpio
= CISLANDS_UNUSED_GPIO_PIN
;
5976 adev
->pm
.dpm
.platform_caps
&= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT
;
5979 gpio
= amdgpu_atombios_lookup_gpio(adev
, PP_AC_DC_SWITCH_GPIO_PINID
);
5981 dpm_table
->AcDcGpio
= gpio
.shift
;
5982 adev
->pm
.dpm
.platform_caps
|= ATOM_PP_PLATFORM_CAP_HARDWAREDC
;
5984 dpm_table
->AcDcGpio
= CISLANDS_UNUSED_GPIO_PIN
;
5985 adev
->pm
.dpm
.platform_caps
&= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC
;
5988 gpio
= amdgpu_atombios_lookup_gpio(adev
, VDDC_PCC_GPIO_PINID
);
5990 u32 tmp
= RREG32_SMC(ixCNB_PWRMGT_CNTL
);
5992 switch (gpio
.shift
) {
5994 tmp
&= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK
;
5995 tmp
|= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT
;
5998 tmp
&= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK
;
5999 tmp
|= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT
;
6002 tmp
|= CNB_PWRMGT_CNTL__GNB_SLOW_MASK
;
6005 tmp
|= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK
;
6008 tmp
|= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK
;
6011 DRM_INFO("Invalid PCC GPIO: %u!\n", gpio
.shift
);
6014 WREG32_SMC(ixCNB_PWRMGT_CNTL
, tmp
);
6017 pi
->voltage_control
= CISLANDS_VOLTAGE_CONTROL_NONE
;
6018 pi
->vddci_control
= CISLANDS_VOLTAGE_CONTROL_NONE
;
6019 pi
->mvdd_control
= CISLANDS_VOLTAGE_CONTROL_NONE
;
6020 if (amdgpu_atombios_is_voltage_gpio(adev
, VOLTAGE_TYPE_VDDC
, VOLTAGE_OBJ_GPIO_LUT
))
6021 pi
->voltage_control
= CISLANDS_VOLTAGE_CONTROL_BY_GPIO
;
6022 else if (amdgpu_atombios_is_voltage_gpio(adev
, VOLTAGE_TYPE_VDDC
, VOLTAGE_OBJ_SVID2
))
6023 pi
->voltage_control
= CISLANDS_VOLTAGE_CONTROL_BY_SVID2
;
6025 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL
) {
6026 if (amdgpu_atombios_is_voltage_gpio(adev
, VOLTAGE_TYPE_VDDCI
, VOLTAGE_OBJ_GPIO_LUT
))
6027 pi
->vddci_control
= CISLANDS_VOLTAGE_CONTROL_BY_GPIO
;
6028 else if (amdgpu_atombios_is_voltage_gpio(adev
, VOLTAGE_TYPE_VDDCI
, VOLTAGE_OBJ_SVID2
))
6029 pi
->vddci_control
= CISLANDS_VOLTAGE_CONTROL_BY_SVID2
;
6031 adev
->pm
.dpm
.platform_caps
&= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL
;
6034 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_MVDDCONTROL
) {
6035 if (amdgpu_atombios_is_voltage_gpio(adev
, VOLTAGE_TYPE_MVDDC
, VOLTAGE_OBJ_GPIO_LUT
))
6036 pi
->mvdd_control
= CISLANDS_VOLTAGE_CONTROL_BY_GPIO
;
6037 else if (amdgpu_atombios_is_voltage_gpio(adev
, VOLTAGE_TYPE_MVDDC
, VOLTAGE_OBJ_SVID2
))
6038 pi
->mvdd_control
= CISLANDS_VOLTAGE_CONTROL_BY_SVID2
;
6040 adev
->pm
.dpm
.platform_caps
&= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL
;
6043 pi
->vddc_phase_shed_control
= true;
6045 #if defined(CONFIG_ACPI)
6046 pi
->pcie_performance_request
=
6047 amdgpu_acpi_is_pcie_performance_request_supported(adev
);
6049 pi
->pcie_performance_request
= false;
6052 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
6053 &frev
, &crev
, &data_offset
)) {
6054 pi
->caps_sclk_ss_support
= true;
6055 pi
->caps_mclk_ss_support
= true;
6056 pi
->dynamic_ss
= true;
6058 pi
->caps_sclk_ss_support
= false;
6059 pi
->caps_mclk_ss_support
= false;
6060 pi
->dynamic_ss
= true;
6063 if (adev
->pm
.int_thermal_type
!= THERMAL_TYPE_NONE
)
6064 pi
->thermal_protection
= true;
6066 pi
->thermal_protection
= false;
6068 pi
->caps_dynamic_ac_timing
= true;
6070 pi
->uvd_power_gated
= true;
6072 /* make sure dc limits are valid */
6073 if ((adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.sclk
== 0) ||
6074 (adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.mclk
== 0))
6075 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
=
6076 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
6078 pi
->fan_ctrl_is_in_default_mode
= true;
6084 ci_dpm_debugfs_print_current_performance_level(void *handle
,
6087 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6088 struct ci_power_info
*pi
= ci_get_pi(adev
);
6089 struct amdgpu_ps
*rps
= &pi
->current_rps
;
6090 u32 sclk
= ci_get_average_sclk_freq(adev
);
6091 u32 mclk
= ci_get_average_mclk_freq(adev
);
6092 u32 activity_percent
= 50;
6095 ret
= ci_read_smc_soft_register(adev
, offsetof(SMU7_SoftRegisters
, AverageGraphicsA
),
6099 activity_percent
+= 0x80;
6100 activity_percent
>>= 8;
6101 activity_percent
= activity_percent
> 100 ? 100 : activity_percent
;
6104 seq_printf(m
, "uvd %sabled\n", pi
->uvd_power_gated
? "dis" : "en");
6105 seq_printf(m
, "vce %sabled\n", rps
->vce_active
? "en" : "dis");
6106 seq_printf(m
, "power level avg sclk: %u mclk: %u\n",
6108 seq_printf(m
, "GPU load: %u %%\n", activity_percent
);
6111 static void ci_dpm_print_power_state(void *handle
, void *current_ps
)
6113 struct amdgpu_ps
*rps
= (struct amdgpu_ps
*)current_ps
;
6114 struct ci_ps
*ps
= ci_get_ps(rps
);
6117 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6119 amdgpu_dpm_print_class_info(rps
->class, rps
->class2
);
6120 amdgpu_dpm_print_cap_info(rps
->caps
);
6121 printk("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
6122 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
6123 pl
= &ps
->performance_levels
[i
];
6124 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6125 i
, pl
->sclk
, pl
->mclk
, pl
->pcie_gen
+ 1, pl
->pcie_lane
);
6127 amdgpu_dpm_print_ps_status(adev
, rps
);
6130 static inline bool ci_are_power_levels_equal(const struct ci_pl
*ci_cpl1
,
6131 const struct ci_pl
*ci_cpl2
)
6133 return ((ci_cpl1
->mclk
== ci_cpl2
->mclk
) &&
6134 (ci_cpl1
->sclk
== ci_cpl2
->sclk
) &&
6135 (ci_cpl1
->pcie_gen
== ci_cpl2
->pcie_gen
) &&
6136 (ci_cpl1
->pcie_lane
== ci_cpl2
->pcie_lane
));
6139 static int ci_check_state_equal(void *handle
,
6144 struct ci_ps
*ci_cps
;
6145 struct ci_ps
*ci_rps
;
6147 struct amdgpu_ps
*cps
= (struct amdgpu_ps
*)current_ps
;
6148 struct amdgpu_ps
*rps
= (struct amdgpu_ps
*)request_ps
;
6149 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6151 if (adev
== NULL
|| cps
== NULL
|| rps
== NULL
|| equal
== NULL
)
6154 ci_cps
= ci_get_ps((struct amdgpu_ps
*)cps
);
6155 ci_rps
= ci_get_ps((struct amdgpu_ps
*)rps
);
6157 if (ci_cps
== NULL
) {
6162 if (ci_cps
->performance_level_count
!= ci_rps
->performance_level_count
) {
6168 for (i
= 0; i
< ci_cps
->performance_level_count
; i
++) {
6169 if (!ci_are_power_levels_equal(&(ci_cps
->performance_levels
[i
]),
6170 &(ci_rps
->performance_levels
[i
]))) {
6176 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
6177 *equal
= ((cps
->vclk
== rps
->vclk
) && (cps
->dclk
== rps
->dclk
));
6178 *equal
&= ((cps
->evclk
== rps
->evclk
) && (cps
->ecclk
== rps
->ecclk
));
6183 static u32
ci_dpm_get_sclk(void *handle
, bool low
)
6185 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6186 struct ci_power_info
*pi
= ci_get_pi(adev
);
6187 struct ci_ps
*requested_state
= ci_get_ps(&pi
->requested_rps
);
6190 return requested_state
->performance_levels
[0].sclk
;
6192 return requested_state
->performance_levels
[requested_state
->performance_level_count
- 1].sclk
;
6195 static u32
ci_dpm_get_mclk(void *handle
, bool low
)
6197 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6198 struct ci_power_info
*pi
= ci_get_pi(adev
);
6199 struct ci_ps
*requested_state
= ci_get_ps(&pi
->requested_rps
);
6202 return requested_state
->performance_levels
[0].mclk
;
6204 return requested_state
->performance_levels
[requested_state
->performance_level_count
- 1].mclk
;
6207 /* get temperature in millidegrees */
6208 static int ci_dpm_get_temp(void *handle
)
6211 int actual_temp
= 0;
6212 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6214 temp
= (RREG32_SMC(ixCG_MULT_THERMAL_STATUS
) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK
) >>
6215 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT
;
6220 actual_temp
= temp
& 0x1ff;
6222 actual_temp
= actual_temp
* 1000;
6227 static int ci_set_temperature_range(struct amdgpu_device
*adev
)
6231 ret
= ci_thermal_enable_alert(adev
, false);
6234 ret
= ci_thermal_set_temperature_range(adev
, CISLANDS_TEMP_RANGE_MIN
,
6235 CISLANDS_TEMP_RANGE_MAX
);
6238 ret
= ci_thermal_enable_alert(adev
, true);
6244 static int ci_dpm_early_init(void *handle
)
6246 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6248 adev
->powerplay
.pp_funcs
= &ci_dpm_funcs
;
6249 adev
->powerplay
.pp_handle
= adev
;
6250 ci_dpm_set_irq_funcs(adev
);
6255 static int ci_dpm_late_init(void *handle
)
6258 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6260 if (!adev
->pm
.dpm_enabled
)
6263 /* init the sysfs and debugfs files late */
6264 ret
= amdgpu_pm_sysfs_init(adev
);
6268 ret
= ci_set_temperature_range(adev
);
6275 static int ci_dpm_sw_init(void *handle
)
6278 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6280 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 230,
6281 &adev
->pm
.dpm
.thermal
.irq
);
6285 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 231,
6286 &adev
->pm
.dpm
.thermal
.irq
);
6290 /* default to balanced state */
6291 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_BALANCED
;
6292 adev
->pm
.dpm
.user_state
= POWER_STATE_TYPE_BALANCED
;
6293 adev
->pm
.dpm
.forced_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
6294 adev
->pm
.default_sclk
= adev
->clock
.default_sclk
;
6295 adev
->pm
.default_mclk
= adev
->clock
.default_mclk
;
6296 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
6297 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
6298 adev
->pm
.int_thermal_type
= THERMAL_TYPE_NONE
;
6300 ret
= ci_dpm_init_microcode(adev
);
6304 if (amdgpu_dpm
== 0)
6307 INIT_WORK(&adev
->pm
.dpm
.thermal
.work
, amdgpu_dpm_thermal_work_handler
);
6308 mutex_lock(&adev
->pm
.mutex
);
6309 ret
= ci_dpm_init(adev
);
6312 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
6313 if (amdgpu_dpm
== 1)
6314 amdgpu_pm_print_power_states(adev
);
6315 mutex_unlock(&adev
->pm
.mutex
);
6316 DRM_INFO("amdgpu: dpm initialized\n");
6322 mutex_unlock(&adev
->pm
.mutex
);
6323 DRM_ERROR("amdgpu: dpm initialization failed\n");
6327 static int ci_dpm_sw_fini(void *handle
)
6329 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6331 flush_work(&adev
->pm
.dpm
.thermal
.work
);
6333 mutex_lock(&adev
->pm
.mutex
);
6335 mutex_unlock(&adev
->pm
.mutex
);
6337 release_firmware(adev
->pm
.fw
);
6343 static int ci_dpm_hw_init(void *handle
)
6347 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6350 ret
= ci_upload_firmware(adev
);
6352 DRM_ERROR("ci_upload_firmware failed\n");
6355 ci_dpm_start_smc(adev
);
6359 mutex_lock(&adev
->pm
.mutex
);
6360 ci_dpm_setup_asic(adev
);
6361 ret
= ci_dpm_enable(adev
);
6363 adev
->pm
.dpm_enabled
= false;
6365 adev
->pm
.dpm_enabled
= true;
6366 mutex_unlock(&adev
->pm
.mutex
);
6371 static int ci_dpm_hw_fini(void *handle
)
6373 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6375 if (adev
->pm
.dpm_enabled
) {
6376 mutex_lock(&adev
->pm
.mutex
);
6377 ci_dpm_disable(adev
);
6378 mutex_unlock(&adev
->pm
.mutex
);
6380 ci_dpm_stop_smc(adev
);
6386 static int ci_dpm_suspend(void *handle
)
6388 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6390 if (adev
->pm
.dpm_enabled
) {
6391 mutex_lock(&adev
->pm
.mutex
);
6392 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
6393 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
);
6394 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
6395 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
);
6396 adev
->pm
.dpm
.last_user_state
= adev
->pm
.dpm
.user_state
;
6397 adev
->pm
.dpm
.last_state
= adev
->pm
.dpm
.state
;
6398 adev
->pm
.dpm
.user_state
= POWER_STATE_TYPE_INTERNAL_BOOT
;
6399 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_INTERNAL_BOOT
;
6400 mutex_unlock(&adev
->pm
.mutex
);
6401 amdgpu_pm_compute_clocks(adev
);
6408 static int ci_dpm_resume(void *handle
)
6411 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6413 if (adev
->pm
.dpm_enabled
) {
6414 /* asic init will reset to the boot state */
6415 mutex_lock(&adev
->pm
.mutex
);
6416 ci_dpm_setup_asic(adev
);
6417 ret
= ci_dpm_enable(adev
);
6419 adev
->pm
.dpm_enabled
= false;
6421 adev
->pm
.dpm_enabled
= true;
6422 adev
->pm
.dpm
.user_state
= adev
->pm
.dpm
.last_user_state
;
6423 adev
->pm
.dpm
.state
= adev
->pm
.dpm
.last_state
;
6424 mutex_unlock(&adev
->pm
.mutex
);
6425 if (adev
->pm
.dpm_enabled
)
6426 amdgpu_pm_compute_clocks(adev
);
6431 static bool ci_dpm_is_idle(void *handle
)
6437 static int ci_dpm_wait_for_idle(void *handle
)
6443 static int ci_dpm_soft_reset(void *handle
)
6448 static int ci_dpm_set_interrupt_state(struct amdgpu_device
*adev
,
6449 struct amdgpu_irq_src
*source
,
6451 enum amdgpu_interrupt_state state
)
6456 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
:
6458 case AMDGPU_IRQ_STATE_DISABLE
:
6459 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT
);
6460 cg_thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
;
6461 WREG32_SMC(ixCG_THERMAL_INT
, cg_thermal_int
);
6463 case AMDGPU_IRQ_STATE_ENABLE
:
6464 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT
);
6465 cg_thermal_int
&= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
;
6466 WREG32_SMC(ixCG_THERMAL_INT
, cg_thermal_int
);
6473 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
:
6475 case AMDGPU_IRQ_STATE_DISABLE
:
6476 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT
);
6477 cg_thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
6478 WREG32_SMC(ixCG_THERMAL_INT
, cg_thermal_int
);
6480 case AMDGPU_IRQ_STATE_ENABLE
:
6481 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT
);
6482 cg_thermal_int
&= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
6483 WREG32_SMC(ixCG_THERMAL_INT
, cg_thermal_int
);
6496 static int ci_dpm_process_interrupt(struct amdgpu_device
*adev
,
6497 struct amdgpu_irq_src
*source
,
6498 struct amdgpu_iv_entry
*entry
)
6500 bool queue_thermal
= false;
6505 switch (entry
->src_id
) {
6506 case 230: /* thermal low to high */
6507 DRM_DEBUG("IH: thermal low to high\n");
6508 adev
->pm
.dpm
.thermal
.high_to_low
= false;
6509 queue_thermal
= true;
6511 case 231: /* thermal high to low */
6512 DRM_DEBUG("IH: thermal high to low\n");
6513 adev
->pm
.dpm
.thermal
.high_to_low
= true;
6514 queue_thermal
= true;
6521 schedule_work(&adev
->pm
.dpm
.thermal
.work
);
6526 static int ci_dpm_set_clockgating_state(void *handle
,
6527 enum amd_clockgating_state state
)
6532 static int ci_dpm_set_powergating_state(void *handle
,
6533 enum amd_powergating_state state
)
6538 static int ci_dpm_print_clock_levels(void *handle
,
6539 enum pp_clock_type type
, char *buf
)
6541 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6542 struct ci_power_info
*pi
= ci_get_pi(adev
);
6543 struct ci_single_dpm_table
*sclk_table
= &pi
->dpm_table
.sclk_table
;
6544 struct ci_single_dpm_table
*mclk_table
= &pi
->dpm_table
.mclk_table
;
6545 struct ci_single_dpm_table
*pcie_table
= &pi
->dpm_table
.pcie_speed_table
;
6547 int i
, now
, size
= 0;
6548 uint32_t clock
, pcie_speed
;
6552 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_API_GetSclkFrequency
);
6553 clock
= RREG32(mmSMC_MSG_ARG_0
);
6555 for (i
= 0; i
< sclk_table
->count
; i
++) {
6556 if (clock
> sclk_table
->dpm_levels
[i
].value
)
6562 for (i
= 0; i
< sclk_table
->count
; i
++)
6563 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
6564 i
, sclk_table
->dpm_levels
[i
].value
/ 100,
6565 (i
== now
) ? "*" : "");
6568 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_API_GetMclkFrequency
);
6569 clock
= RREG32(mmSMC_MSG_ARG_0
);
6571 for (i
= 0; i
< mclk_table
->count
; i
++) {
6572 if (clock
> mclk_table
->dpm_levels
[i
].value
)
6578 for (i
= 0; i
< mclk_table
->count
; i
++)
6579 size
+= sprintf(buf
+ size
, "%d: %uMhz %s\n",
6580 i
, mclk_table
->dpm_levels
[i
].value
/ 100,
6581 (i
== now
) ? "*" : "");
6584 pcie_speed
= ci_get_current_pcie_speed(adev
);
6585 for (i
= 0; i
< pcie_table
->count
; i
++) {
6586 if (pcie_speed
!= pcie_table
->dpm_levels
[i
].value
)
6592 for (i
= 0; i
< pcie_table
->count
; i
++)
6593 size
+= sprintf(buf
+ size
, "%d: %s %s\n", i
,
6594 (pcie_table
->dpm_levels
[i
].value
== 0) ? "2.5GT/s, x1" :
6595 (pcie_table
->dpm_levels
[i
].value
== 1) ? "5.0GT/s, x16" :
6596 (pcie_table
->dpm_levels
[i
].value
== 2) ? "8.0GT/s, x16" : "",
6597 (i
== now
) ? "*" : "");
6606 static int ci_dpm_force_clock_level(void *handle
,
6607 enum pp_clock_type type
, uint32_t mask
)
6609 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6610 struct ci_power_info
*pi
= ci_get_pi(adev
);
6612 if (adev
->pm
.dpm
.forced_level
!= AMD_DPM_FORCED_LEVEL_MANUAL
)
6620 if (!pi
->sclk_dpm_key_disabled
)
6621 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
6622 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
6623 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
& mask
);
6627 if (!pi
->mclk_dpm_key_disabled
)
6628 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
6629 PPSMC_MSG_MCLKDPM_SetEnabledMask
,
6630 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
& mask
);
6635 uint32_t tmp
= mask
& pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
;
6637 if (!pi
->pcie_dpm_key_disabled
) {
6638 if (fls(tmp
) != ffs(tmp
))
6639 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_PCIeDPM_UnForceLevel
);
6641 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
6642 PPSMC_MSG_PCIeDPM_ForceLevel
,
6654 static int ci_dpm_get_sclk_od(void *handle
)
6656 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6657 struct ci_power_info
*pi
= ci_get_pi(adev
);
6658 struct ci_single_dpm_table
*sclk_table
= &(pi
->dpm_table
.sclk_table
);
6659 struct ci_single_dpm_table
*golden_sclk_table
=
6660 &(pi
->golden_dpm_table
.sclk_table
);
6663 value
= (sclk_table
->dpm_levels
[sclk_table
->count
- 1].value
-
6664 golden_sclk_table
->dpm_levels
[golden_sclk_table
->count
- 1].value
) *
6666 golden_sclk_table
->dpm_levels
[golden_sclk_table
->count
- 1].value
;
6671 static int ci_dpm_set_sclk_od(void *handle
, uint32_t value
)
6673 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6674 struct ci_power_info
*pi
= ci_get_pi(adev
);
6675 struct ci_ps
*ps
= ci_get_ps(adev
->pm
.dpm
.requested_ps
);
6676 struct ci_single_dpm_table
*golden_sclk_table
=
6677 &(pi
->golden_dpm_table
.sclk_table
);
6682 ps
->performance_levels
[ps
->performance_level_count
- 1].sclk
=
6683 golden_sclk_table
->dpm_levels
[golden_sclk_table
->count
- 1].value
*
6685 golden_sclk_table
->dpm_levels
[golden_sclk_table
->count
- 1].value
;
6690 static int ci_dpm_get_mclk_od(void *handle
)
6692 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6693 struct ci_power_info
*pi
= ci_get_pi(adev
);
6694 struct ci_single_dpm_table
*mclk_table
= &(pi
->dpm_table
.mclk_table
);
6695 struct ci_single_dpm_table
*golden_mclk_table
=
6696 &(pi
->golden_dpm_table
.mclk_table
);
6699 value
= (mclk_table
->dpm_levels
[mclk_table
->count
- 1].value
-
6700 golden_mclk_table
->dpm_levels
[golden_mclk_table
->count
- 1].value
) *
6702 golden_mclk_table
->dpm_levels
[golden_mclk_table
->count
- 1].value
;
6707 static int ci_dpm_set_mclk_od(void *handle
, uint32_t value
)
6709 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6710 struct ci_power_info
*pi
= ci_get_pi(adev
);
6711 struct ci_ps
*ps
= ci_get_ps(adev
->pm
.dpm
.requested_ps
);
6712 struct ci_single_dpm_table
*golden_mclk_table
=
6713 &(pi
->golden_dpm_table
.mclk_table
);
6718 ps
->performance_levels
[ps
->performance_level_count
- 1].mclk
=
6719 golden_mclk_table
->dpm_levels
[golden_mclk_table
->count
- 1].value
*
6721 golden_mclk_table
->dpm_levels
[golden_mclk_table
->count
- 1].value
;
6726 static int ci_dpm_read_sensor(void *handle
, int idx
,
6727 void *value
, int *size
)
6729 u32 activity_percent
= 50;
6731 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6733 /* size must be at least 4 bytes for all sensors */
6738 case AMDGPU_PP_SENSOR_GFX_SCLK
:
6739 *((uint32_t *)value
) = ci_get_average_sclk_freq(adev
);
6742 case AMDGPU_PP_SENSOR_GFX_MCLK
:
6743 *((uint32_t *)value
) = ci_get_average_mclk_freq(adev
);
6746 case AMDGPU_PP_SENSOR_GPU_TEMP
:
6747 *((uint32_t *)value
) = ci_dpm_get_temp(adev
);
6750 case AMDGPU_PP_SENSOR_GPU_LOAD
:
6751 ret
= ci_read_smc_soft_register(adev
,
6752 offsetof(SMU7_SoftRegisters
,
6756 activity_percent
+= 0x80;
6757 activity_percent
>>= 8;
6759 activity_percent
> 100 ? 100 : activity_percent
;
6761 *((uint32_t *)value
) = activity_percent
;
6769 static int ci_set_powergating_by_smu(void *handle
,
6770 uint32_t block_type
, bool gate
)
6772 switch (block_type
) {
6773 case AMD_IP_BLOCK_TYPE_UVD
:
6774 ci_dpm_powergate_uvd(handle
, gate
);
6782 static const struct amd_ip_funcs ci_dpm_ip_funcs
= {
6784 .early_init
= ci_dpm_early_init
,
6785 .late_init
= ci_dpm_late_init
,
6786 .sw_init
= ci_dpm_sw_init
,
6787 .sw_fini
= ci_dpm_sw_fini
,
6788 .hw_init
= ci_dpm_hw_init
,
6789 .hw_fini
= ci_dpm_hw_fini
,
6790 .suspend
= ci_dpm_suspend
,
6791 .resume
= ci_dpm_resume
,
6792 .is_idle
= ci_dpm_is_idle
,
6793 .wait_for_idle
= ci_dpm_wait_for_idle
,
6794 .soft_reset
= ci_dpm_soft_reset
,
6795 .set_clockgating_state
= ci_dpm_set_clockgating_state
,
6796 .set_powergating_state
= ci_dpm_set_powergating_state
,
6799 const struct amdgpu_ip_block_version ci_smu_ip_block
=
6801 .type
= AMD_IP_BLOCK_TYPE_SMC
,
6805 .funcs
= &ci_dpm_ip_funcs
,
6808 static const struct amd_pm_funcs ci_dpm_funcs
= {
6809 .pre_set_power_state
= &ci_dpm_pre_set_power_state
,
6810 .set_power_state
= &ci_dpm_set_power_state
,
6811 .post_set_power_state
= &ci_dpm_post_set_power_state
,
6812 .display_configuration_changed
= &ci_dpm_display_configuration_changed
,
6813 .get_sclk
= &ci_dpm_get_sclk
,
6814 .get_mclk
= &ci_dpm_get_mclk
,
6815 .print_power_state
= &ci_dpm_print_power_state
,
6816 .debugfs_print_current_performance_level
= &ci_dpm_debugfs_print_current_performance_level
,
6817 .force_performance_level
= &ci_dpm_force_performance_level
,
6818 .vblank_too_short
= &ci_dpm_vblank_too_short
,
6819 .set_powergating_by_smu
= &ci_set_powergating_by_smu
,
6820 .set_fan_control_mode
= &ci_dpm_set_fan_control_mode
,
6821 .get_fan_control_mode
= &ci_dpm_get_fan_control_mode
,
6822 .set_fan_speed_percent
= &ci_dpm_set_fan_speed_percent
,
6823 .get_fan_speed_percent
= &ci_dpm_get_fan_speed_percent
,
6824 .print_clock_levels
= ci_dpm_print_clock_levels
,
6825 .force_clock_level
= ci_dpm_force_clock_level
,
6826 .get_sclk_od
= ci_dpm_get_sclk_od
,
6827 .set_sclk_od
= ci_dpm_set_sclk_od
,
6828 .get_mclk_od
= ci_dpm_get_mclk_od
,
6829 .set_mclk_od
= ci_dpm_set_mclk_od
,
6830 .check_state_equal
= ci_check_state_equal
,
6831 .get_vce_clock_state
= amdgpu_get_vce_clock_state
,
6832 .read_sensor
= ci_dpm_read_sensor
,
6835 static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs
= {
6836 .set
= ci_dpm_set_interrupt_state
,
6837 .process
= ci_dpm_process_interrupt
,
6840 static void ci_dpm_set_irq_funcs(struct amdgpu_device
*adev
)
6842 adev
->pm
.dpm
.thermal
.irq
.num_types
= AMDGPU_THERMAL_IRQ_LAST
;
6843 adev
->pm
.dpm
.thermal
.irq
.funcs
= &ci_dpm_irq_funcs
;