2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/firmware.h>
27 #include "amdgpu_pm.h"
28 #include "amdgpu_ucode.h"
30 #include "amdgpu_dpm.h"
34 #include <linux/seq_file.h>
36 #include "smu/smu_7_0_1_d.h"
37 #include "smu/smu_7_0_1_sh_mask.h"
39 #include "dce/dce_8_0_d.h"
40 #include "dce/dce_8_0_sh_mask.h"
42 #include "bif/bif_4_1_d.h"
43 #include "bif/bif_4_1_sh_mask.h"
45 #include "gca/gfx_7_2_d.h"
46 #include "gca/gfx_7_2_sh_mask.h"
48 #include "gmc/gmc_7_1_d.h"
49 #include "gmc/gmc_7_1_sh_mask.h"
51 MODULE_FIRMWARE("radeon/bonaire_smc.bin");
52 MODULE_FIRMWARE("radeon/hawaii_smc.bin");
54 #define MC_CG_ARB_FREQ_F0 0x0a
55 #define MC_CG_ARB_FREQ_F1 0x0b
56 #define MC_CG_ARB_FREQ_F2 0x0c
57 #define MC_CG_ARB_FREQ_F3 0x0d
59 #define SMC_RAM_END 0x40000
61 #define VOLTAGE_SCALE 4
62 #define VOLTAGE_VID_OFFSET_SCALE1 625
63 #define VOLTAGE_VID_OFFSET_SCALE2 100
65 static const struct ci_pt_defaults defaults_hawaii_xt
=
67 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0xB0000,
68 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
69 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
72 static const struct ci_pt_defaults defaults_hawaii_pro
=
74 1, 0xF, 0xFD, 0x19, 5, 0x14, 0, 0x65062,
75 { 0x2E, 0x00, 0x00, 0x88, 0x00, 0x00, 0x72, 0x60, 0x51, 0xA7, 0x79, 0x6B, 0x90, 0xBD, 0x79 },
76 { 0x217, 0x217, 0x217, 0x242, 0x242, 0x242, 0x269, 0x269, 0x269, 0x2A1, 0x2A1, 0x2A1, 0x2C9, 0x2C9, 0x2C9 }
79 static const struct ci_pt_defaults defaults_bonaire_xt
=
81 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0xB0000,
82 { 0x79, 0x253, 0x25D, 0xAE, 0x72, 0x80, 0x83, 0x86, 0x6F, 0xC8, 0xC9, 0xC9, 0x2F, 0x4D, 0x61 },
83 { 0x17C, 0x172, 0x180, 0x1BC, 0x1B3, 0x1BD, 0x206, 0x200, 0x203, 0x25D, 0x25A, 0x255, 0x2C3, 0x2C5, 0x2B4 }
86 static const struct ci_pt_defaults defaults_bonaire_pro
=
88 1, 0xF, 0xFD, 0x19, 5, 45, 0, 0x65062,
89 { 0x8C, 0x23F, 0x244, 0xA6, 0x83, 0x85, 0x86, 0x86, 0x83, 0xDB, 0xDB, 0xDA, 0x67, 0x60, 0x5F },
90 { 0x187, 0x193, 0x193, 0x1C7, 0x1D1, 0x1D1, 0x210, 0x219, 0x219, 0x266, 0x26C, 0x26C, 0x2C9, 0x2CB, 0x2CB }
93 static const struct ci_pt_defaults defaults_saturn_xt
=
95 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x70000,
96 { 0x8C, 0x247, 0x249, 0xA6, 0x80, 0x81, 0x8B, 0x89, 0x86, 0xC9, 0xCA, 0xC9, 0x4D, 0x4D, 0x4D },
97 { 0x187, 0x187, 0x187, 0x1C7, 0x1C7, 0x1C7, 0x210, 0x210, 0x210, 0x266, 0x266, 0x266, 0x2C9, 0x2C9, 0x2C9 }
100 static const struct ci_pt_defaults defaults_saturn_pro
=
102 1, 0xF, 0xFD, 0x19, 5, 55, 0, 0x30000,
103 { 0x96, 0x21D, 0x23B, 0xA1, 0x85, 0x87, 0x83, 0x84, 0x81, 0xE6, 0xE6, 0xE6, 0x71, 0x6A, 0x6A },
104 { 0x193, 0x19E, 0x19E, 0x1D2, 0x1DC, 0x1DC, 0x21A, 0x223, 0x223, 0x26E, 0x27E, 0x274, 0x2CF, 0x2D2, 0x2D2 }
107 static const struct ci_pt_config_reg didt_config_ci
[] =
109 { 0x10, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
110 { 0x10, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
111 { 0x10, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
112 { 0x10, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
113 { 0x11, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
114 { 0x11, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
115 { 0x11, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
116 { 0x11, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
117 { 0x12, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
118 { 0x12, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
119 { 0x12, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
120 { 0x12, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
121 { 0x2, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND
},
122 { 0x2, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND
},
123 { 0x2, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND
},
124 { 0x1, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
125 { 0x1, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
126 { 0x0, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
127 { 0x30, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
128 { 0x30, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
129 { 0x30, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
130 { 0x30, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
131 { 0x31, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
132 { 0x31, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
133 { 0x31, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
134 { 0x31, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
135 { 0x32, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
136 { 0x32, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
137 { 0x32, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
138 { 0x32, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
139 { 0x22, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND
},
140 { 0x22, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND
},
141 { 0x22, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND
},
142 { 0x21, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
143 { 0x21, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
144 { 0x20, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
145 { 0x50, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
146 { 0x50, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
147 { 0x50, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
148 { 0x50, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
149 { 0x51, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
150 { 0x51, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
151 { 0x51, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
152 { 0x51, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
153 { 0x52, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
154 { 0x52, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
155 { 0x52, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
156 { 0x52, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
157 { 0x42, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND
},
158 { 0x42, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND
},
159 { 0x42, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND
},
160 { 0x41, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
161 { 0x41, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
162 { 0x40, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
163 { 0x70, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
164 { 0x70, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
165 { 0x70, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
166 { 0x70, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
167 { 0x71, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
168 { 0x71, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
169 { 0x71, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
170 { 0x71, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
171 { 0x72, 0x000000ff, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
172 { 0x72, 0x0000ff00, 8, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
173 { 0x72, 0x00ff0000, 16, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
174 { 0x72, 0xff000000, 24, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
175 { 0x62, 0x00003fff, 0, 0x4, CISLANDS_CONFIGREG_DIDT_IND
},
176 { 0x62, 0x03ff0000, 16, 0x80, CISLANDS_CONFIGREG_DIDT_IND
},
177 { 0x62, 0x78000000, 27, 0x3, CISLANDS_CONFIGREG_DIDT_IND
},
178 { 0x61, 0x0000ffff, 0, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
179 { 0x61, 0xffff0000, 16, 0x3FFF, CISLANDS_CONFIGREG_DIDT_IND
},
180 { 0x60, 0x00000001, 0, 0x0, CISLANDS_CONFIGREG_DIDT_IND
},
184 static u8
ci_get_memory_module_index(struct amdgpu_device
*adev
)
186 return (u8
) ((RREG32(mmBIOS_SCRATCH_4
) >> 16) & 0xff);
189 #define MC_CG_ARB_FREQ_F0 0x0a
190 #define MC_CG_ARB_FREQ_F1 0x0b
191 #define MC_CG_ARB_FREQ_F2 0x0c
192 #define MC_CG_ARB_FREQ_F3 0x0d
194 static int ci_copy_and_switch_arb_sets(struct amdgpu_device
*adev
,
195 u32 arb_freq_src
, u32 arb_freq_dest
)
197 u32 mc_arb_dram_timing
;
198 u32 mc_arb_dram_timing2
;
202 switch (arb_freq_src
) {
203 case MC_CG_ARB_FREQ_F0
:
204 mc_arb_dram_timing
= RREG32(mmMC_ARB_DRAM_TIMING
);
205 mc_arb_dram_timing2
= RREG32(mmMC_ARB_DRAM_TIMING2
);
206 burst_time
= (RREG32(mmMC_ARB_BURST_TIME
) & MC_ARB_BURST_TIME__STATE0_MASK
) >>
207 MC_ARB_BURST_TIME__STATE0__SHIFT
;
209 case MC_CG_ARB_FREQ_F1
:
210 mc_arb_dram_timing
= RREG32(mmMC_ARB_DRAM_TIMING_1
);
211 mc_arb_dram_timing2
= RREG32(mmMC_ARB_DRAM_TIMING2_1
);
212 burst_time
= (RREG32(mmMC_ARB_BURST_TIME
) & MC_ARB_BURST_TIME__STATE1_MASK
) >>
213 MC_ARB_BURST_TIME__STATE1__SHIFT
;
219 switch (arb_freq_dest
) {
220 case MC_CG_ARB_FREQ_F0
:
221 WREG32(mmMC_ARB_DRAM_TIMING
, mc_arb_dram_timing
);
222 WREG32(mmMC_ARB_DRAM_TIMING2
, mc_arb_dram_timing2
);
223 WREG32_P(mmMC_ARB_BURST_TIME
, (burst_time
<< MC_ARB_BURST_TIME__STATE0__SHIFT
),
224 ~MC_ARB_BURST_TIME__STATE0_MASK
);
226 case MC_CG_ARB_FREQ_F1
:
227 WREG32(mmMC_ARB_DRAM_TIMING_1
, mc_arb_dram_timing
);
228 WREG32(mmMC_ARB_DRAM_TIMING2_1
, mc_arb_dram_timing2
);
229 WREG32_P(mmMC_ARB_BURST_TIME
, (burst_time
<< MC_ARB_BURST_TIME__STATE1__SHIFT
),
230 ~MC_ARB_BURST_TIME__STATE1_MASK
);
236 mc_cg_config
= RREG32(mmMC_CG_CONFIG
) | 0x0000000F;
237 WREG32(mmMC_CG_CONFIG
, mc_cg_config
);
238 WREG32_P(mmMC_ARB_CG
, (arb_freq_dest
) << MC_ARB_CG__CG_ARB_REQ__SHIFT
,
239 ~MC_ARB_CG__CG_ARB_REQ_MASK
);
244 static u8
ci_get_ddr3_mclk_frequency_ratio(u32 memory_clock
)
248 if (memory_clock
< 10000)
250 else if (memory_clock
>= 80000)
251 mc_para_index
= 0x0f;
253 mc_para_index
= (u8
)((memory_clock
- 10000) / 5000 + 1);
254 return mc_para_index
;
257 static u8
ci_get_mclk_frequency_ratio(u32 memory_clock
, bool strobe_mode
)
262 if (memory_clock
< 12500)
263 mc_para_index
= 0x00;
264 else if (memory_clock
> 47500)
265 mc_para_index
= 0x0f;
267 mc_para_index
= (u8
)((memory_clock
- 10000) / 2500);
269 if (memory_clock
< 65000)
270 mc_para_index
= 0x00;
271 else if (memory_clock
> 135000)
272 mc_para_index
= 0x0f;
274 mc_para_index
= (u8
)((memory_clock
- 60000) / 5000);
276 return mc_para_index
;
279 static void ci_trim_voltage_table_to_fit_state_table(struct amdgpu_device
*adev
,
280 u32 max_voltage_steps
,
281 struct atom_voltage_table
*voltage_table
)
283 unsigned int i
, diff
;
285 if (voltage_table
->count
<= max_voltage_steps
)
288 diff
= voltage_table
->count
- max_voltage_steps
;
290 for (i
= 0; i
< max_voltage_steps
; i
++)
291 voltage_table
->entries
[i
] = voltage_table
->entries
[i
+ diff
];
293 voltage_table
->count
= max_voltage_steps
;
296 static int ci_get_std_voltage_value_sidd(struct amdgpu_device
*adev
,
297 struct atom_voltage_table_entry
*voltage_table
,
298 u16
*std_voltage_hi_sidd
, u16
*std_voltage_lo_sidd
);
299 static int ci_set_power_limit(struct amdgpu_device
*adev
, u32 n
);
300 static int ci_set_overdrive_target_tdp(struct amdgpu_device
*adev
,
302 static int ci_update_uvd_dpm(struct amdgpu_device
*adev
, bool gate
);
303 static void ci_dpm_set_dpm_funcs(struct amdgpu_device
*adev
);
304 static void ci_dpm_set_irq_funcs(struct amdgpu_device
*adev
);
306 static PPSMC_Result
amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device
*adev
,
307 PPSMC_Msg msg
, u32 parameter
);
308 static void ci_thermal_start_smc_fan_control(struct amdgpu_device
*adev
);
309 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device
*adev
);
311 static struct ci_power_info
*ci_get_pi(struct amdgpu_device
*adev
)
313 struct ci_power_info
*pi
= adev
->pm
.dpm
.priv
;
318 static struct ci_ps
*ci_get_ps(struct amdgpu_ps
*rps
)
320 struct ci_ps
*ps
= rps
->ps_priv
;
325 static void ci_initialize_powertune_defaults(struct amdgpu_device
*adev
)
327 struct ci_power_info
*pi
= ci_get_pi(adev
);
329 switch (adev
->pdev
->device
) {
337 pi
->powertune_defaults
= &defaults_bonaire_xt
;
343 pi
->powertune_defaults
= &defaults_saturn_xt
;
347 pi
->powertune_defaults
= &defaults_hawaii_xt
;
351 pi
->powertune_defaults
= &defaults_hawaii_pro
;
361 pi
->powertune_defaults
= &defaults_bonaire_xt
;
365 pi
->dte_tj_offset
= 0;
367 pi
->caps_power_containment
= true;
368 pi
->caps_cac
= false;
369 pi
->caps_sq_ramping
= false;
370 pi
->caps_db_ramping
= false;
371 pi
->caps_td_ramping
= false;
372 pi
->caps_tcp_ramping
= false;
374 if (pi
->caps_power_containment
) {
376 if (adev
->asic_type
== CHIP_HAWAII
)
377 pi
->enable_bapm_feature
= false;
379 pi
->enable_bapm_feature
= true;
380 pi
->enable_tdc_limit_feature
= true;
381 pi
->enable_pkg_pwr_tracking_feature
= true;
385 static u8
ci_convert_to_vid(u16 vddc
)
387 return (6200 - (vddc
* VOLTAGE_SCALE
)) / 25;
390 static int ci_populate_bapm_vddc_vid_sidd(struct amdgpu_device
*adev
)
392 struct ci_power_info
*pi
= ci_get_pi(adev
);
393 u8
*hi_vid
= pi
->smc_powertune_table
.BapmVddCVidHiSidd
;
394 u8
*lo_vid
= pi
->smc_powertune_table
.BapmVddCVidLoSidd
;
395 u8
*hi2_vid
= pi
->smc_powertune_table
.BapmVddCVidHiSidd2
;
398 if (adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
== NULL
)
400 if (adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
> 8)
402 if (adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
!=
403 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
)
406 for (i
= 0; i
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
; i
++) {
407 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_EVV
) {
408 lo_vid
[i
] = ci_convert_to_vid(adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc1
);
409 hi_vid
[i
] = ci_convert_to_vid(adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc2
);
410 hi2_vid
[i
] = ci_convert_to_vid(adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc3
);
412 lo_vid
[i
] = ci_convert_to_vid(adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].vddc
);
413 hi_vid
[i
] = ci_convert_to_vid((u16
)adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[i
].leakage
);
419 static int ci_populate_vddc_vid(struct amdgpu_device
*adev
)
421 struct ci_power_info
*pi
= ci_get_pi(adev
);
422 u8
*vid
= pi
->smc_powertune_table
.VddCVid
;
425 if (pi
->vddc_voltage_table
.count
> 8)
428 for (i
= 0; i
< pi
->vddc_voltage_table
.count
; i
++)
429 vid
[i
] = ci_convert_to_vid(pi
->vddc_voltage_table
.entries
[i
].value
);
434 static int ci_populate_svi_load_line(struct amdgpu_device
*adev
)
436 struct ci_power_info
*pi
= ci_get_pi(adev
);
437 const struct ci_pt_defaults
*pt_defaults
= pi
->powertune_defaults
;
439 pi
->smc_powertune_table
.SviLoadLineEn
= pt_defaults
->svi_load_line_en
;
440 pi
->smc_powertune_table
.SviLoadLineVddC
= pt_defaults
->svi_load_line_vddc
;
441 pi
->smc_powertune_table
.SviLoadLineTrimVddC
= 3;
442 pi
->smc_powertune_table
.SviLoadLineOffsetVddC
= 0;
447 static int ci_populate_tdc_limit(struct amdgpu_device
*adev
)
449 struct ci_power_info
*pi
= ci_get_pi(adev
);
450 const struct ci_pt_defaults
*pt_defaults
= pi
->powertune_defaults
;
453 tdc_limit
= adev
->pm
.dpm
.dyn_state
.cac_tdp_table
->tdc
* 256;
454 pi
->smc_powertune_table
.TDC_VDDC_PkgLimit
= cpu_to_be16(tdc_limit
);
455 pi
->smc_powertune_table
.TDC_VDDC_ThrottleReleaseLimitPerc
=
456 pt_defaults
->tdc_vddc_throttle_release_limit_perc
;
457 pi
->smc_powertune_table
.TDC_MAWt
= pt_defaults
->tdc_mawt
;
462 static int ci_populate_dw8(struct amdgpu_device
*adev
)
464 struct ci_power_info
*pi
= ci_get_pi(adev
);
465 const struct ci_pt_defaults
*pt_defaults
= pi
->powertune_defaults
;
468 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
469 SMU7_FIRMWARE_HEADER_LOCATION
+
470 offsetof(SMU7_Firmware_Header
, PmFuseTable
) +
471 offsetof(SMU7_Discrete_PmFuses
, TdcWaterfallCtl
),
472 (u32
*)&pi
->smc_powertune_table
.TdcWaterfallCtl
,
477 pi
->smc_powertune_table
.TdcWaterfallCtl
= pt_defaults
->tdc_waterfall_ctl
;
482 static int ci_populate_fuzzy_fan(struct amdgpu_device
*adev
)
484 struct ci_power_info
*pi
= ci_get_pi(adev
);
486 if ((adev
->pm
.dpm
.fan
.fan_output_sensitivity
& (1 << 15)) ||
487 (adev
->pm
.dpm
.fan
.fan_output_sensitivity
== 0))
488 adev
->pm
.dpm
.fan
.fan_output_sensitivity
=
489 adev
->pm
.dpm
.fan
.default_fan_output_sensitivity
;
491 pi
->smc_powertune_table
.FuzzyFan_PwmSetDelta
=
492 cpu_to_be16(adev
->pm
.dpm
.fan
.fan_output_sensitivity
);
497 static int ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(struct amdgpu_device
*adev
)
499 struct ci_power_info
*pi
= ci_get_pi(adev
);
500 u8
*hi_vid
= pi
->smc_powertune_table
.BapmVddCVidHiSidd
;
501 u8
*lo_vid
= pi
->smc_powertune_table
.BapmVddCVidLoSidd
;
504 min
= max
= hi_vid
[0];
505 for (i
= 0; i
< 8; i
++) {
506 if (0 != hi_vid
[i
]) {
513 if (0 != lo_vid
[i
]) {
521 if ((min
== 0) || (max
== 0))
523 pi
->smc_powertune_table
.GnbLPMLMaxVid
= (u8
)max
;
524 pi
->smc_powertune_table
.GnbLPMLMinVid
= (u8
)min
;
529 static int ci_populate_bapm_vddc_base_leakage_sidd(struct amdgpu_device
*adev
)
531 struct ci_power_info
*pi
= ci_get_pi(adev
);
532 u16 hi_sidd
= pi
->smc_powertune_table
.BapmVddCBaseLeakageHiSidd
;
533 u16 lo_sidd
= pi
->smc_powertune_table
.BapmVddCBaseLeakageLoSidd
;
534 struct amdgpu_cac_tdp_table
*cac_tdp_table
=
535 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
;
537 hi_sidd
= cac_tdp_table
->high_cac_leakage
/ 100 * 256;
538 lo_sidd
= cac_tdp_table
->low_cac_leakage
/ 100 * 256;
540 pi
->smc_powertune_table
.BapmVddCBaseLeakageHiSidd
= cpu_to_be16(hi_sidd
);
541 pi
->smc_powertune_table
.BapmVddCBaseLeakageLoSidd
= cpu_to_be16(lo_sidd
);
546 static int ci_populate_bapm_parameters_in_dpm_table(struct amdgpu_device
*adev
)
548 struct ci_power_info
*pi
= ci_get_pi(adev
);
549 const struct ci_pt_defaults
*pt_defaults
= pi
->powertune_defaults
;
550 SMU7_Discrete_DpmTable
*dpm_table
= &pi
->smc_state_table
;
551 struct amdgpu_cac_tdp_table
*cac_tdp_table
=
552 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
;
553 struct amdgpu_ppm_table
*ppm
= adev
->pm
.dpm
.dyn_state
.ppm_table
;
558 dpm_table
->DefaultTdp
= cac_tdp_table
->tdp
* 256;
559 dpm_table
->TargetTdp
= cac_tdp_table
->configurable_tdp
* 256;
561 dpm_table
->DTETjOffset
= (u8
)pi
->dte_tj_offset
;
562 dpm_table
->GpuTjMax
=
563 (u8
)(pi
->thermal_temp_setting
.temperature_high
/ 1000);
564 dpm_table
->GpuTjHyst
= 8;
566 dpm_table
->DTEAmbientTempBase
= pt_defaults
->dte_ambient_temp_base
;
569 dpm_table
->PPM_PkgPwrLimit
= cpu_to_be16((u16
)ppm
->dgpu_tdp
* 256 / 1000);
570 dpm_table
->PPM_TemperatureLimit
= cpu_to_be16((u16
)ppm
->tj_max
* 256);
572 dpm_table
->PPM_PkgPwrLimit
= cpu_to_be16(0);
573 dpm_table
->PPM_TemperatureLimit
= cpu_to_be16(0);
576 dpm_table
->BAPM_TEMP_GRADIENT
= cpu_to_be32(pt_defaults
->bapm_temp_gradient
);
577 def1
= pt_defaults
->bapmti_r
;
578 def2
= pt_defaults
->bapmti_rc
;
580 for (i
= 0; i
< SMU7_DTE_ITERATIONS
; i
++) {
581 for (j
= 0; j
< SMU7_DTE_SOURCES
; j
++) {
582 for (k
= 0; k
< SMU7_DTE_SINKS
; k
++) {
583 dpm_table
->BAPMTI_R
[i
][j
][k
] = cpu_to_be16(*def1
);
584 dpm_table
->BAPMTI_RC
[i
][j
][k
] = cpu_to_be16(*def2
);
594 static int ci_populate_pm_base(struct amdgpu_device
*adev
)
596 struct ci_power_info
*pi
= ci_get_pi(adev
);
597 u32 pm_fuse_table_offset
;
600 if (pi
->caps_power_containment
) {
601 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
602 SMU7_FIRMWARE_HEADER_LOCATION
+
603 offsetof(SMU7_Firmware_Header
, PmFuseTable
),
604 &pm_fuse_table_offset
, pi
->sram_end
);
607 ret
= ci_populate_bapm_vddc_vid_sidd(adev
);
610 ret
= ci_populate_vddc_vid(adev
);
613 ret
= ci_populate_svi_load_line(adev
);
616 ret
= ci_populate_tdc_limit(adev
);
619 ret
= ci_populate_dw8(adev
);
622 ret
= ci_populate_fuzzy_fan(adev
);
625 ret
= ci_min_max_v_gnbl_pm_lid_from_bapm_vddc(adev
);
628 ret
= ci_populate_bapm_vddc_base_leakage_sidd(adev
);
631 ret
= amdgpu_ci_copy_bytes_to_smc(adev
, pm_fuse_table_offset
,
632 (u8
*)&pi
->smc_powertune_table
,
633 sizeof(SMU7_Discrete_PmFuses
), pi
->sram_end
);
641 static void ci_do_enable_didt(struct amdgpu_device
*adev
, const bool enable
)
643 struct ci_power_info
*pi
= ci_get_pi(adev
);
646 if (pi
->caps_sq_ramping
) {
647 data
= RREG32_DIDT(ixDIDT_SQ_CTRL0
);
649 data
|= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK
;
651 data
&= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK
;
652 WREG32_DIDT(ixDIDT_SQ_CTRL0
, data
);
655 if (pi
->caps_db_ramping
) {
656 data
= RREG32_DIDT(ixDIDT_DB_CTRL0
);
658 data
|= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK
;
660 data
&= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK
;
661 WREG32_DIDT(ixDIDT_DB_CTRL0
, data
);
664 if (pi
->caps_td_ramping
) {
665 data
= RREG32_DIDT(ixDIDT_TD_CTRL0
);
667 data
|= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK
;
669 data
&= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK
;
670 WREG32_DIDT(ixDIDT_TD_CTRL0
, data
);
673 if (pi
->caps_tcp_ramping
) {
674 data
= RREG32_DIDT(ixDIDT_TCP_CTRL0
);
676 data
|= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK
;
678 data
&= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK
;
679 WREG32_DIDT(ixDIDT_TCP_CTRL0
, data
);
683 static int ci_program_pt_config_registers(struct amdgpu_device
*adev
,
684 const struct ci_pt_config_reg
*cac_config_regs
)
686 const struct ci_pt_config_reg
*config_regs
= cac_config_regs
;
690 if (config_regs
== NULL
)
693 while (config_regs
->offset
!= 0xFFFFFFFF) {
694 if (config_regs
->type
== CISLANDS_CONFIGREG_CACHE
) {
695 cache
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
697 switch (config_regs
->type
) {
698 case CISLANDS_CONFIGREG_SMC_IND
:
699 data
= RREG32_SMC(config_regs
->offset
);
701 case CISLANDS_CONFIGREG_DIDT_IND
:
702 data
= RREG32_DIDT(config_regs
->offset
);
705 data
= RREG32(config_regs
->offset
);
709 data
&= ~config_regs
->mask
;
710 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
713 switch (config_regs
->type
) {
714 case CISLANDS_CONFIGREG_SMC_IND
:
715 WREG32_SMC(config_regs
->offset
, data
);
717 case CISLANDS_CONFIGREG_DIDT_IND
:
718 WREG32_DIDT(config_regs
->offset
, data
);
721 WREG32(config_regs
->offset
, data
);
731 static int ci_enable_didt(struct amdgpu_device
*adev
, bool enable
)
733 struct ci_power_info
*pi
= ci_get_pi(adev
);
736 if (pi
->caps_sq_ramping
|| pi
->caps_db_ramping
||
737 pi
->caps_td_ramping
|| pi
->caps_tcp_ramping
) {
738 gfx_v7_0_enter_rlc_safe_mode(adev
);
741 ret
= ci_program_pt_config_registers(adev
, didt_config_ci
);
743 gfx_v7_0_exit_rlc_safe_mode(adev
);
748 ci_do_enable_didt(adev
, enable
);
750 gfx_v7_0_exit_rlc_safe_mode(adev
);
756 static int ci_enable_power_containment(struct amdgpu_device
*adev
, bool enable
)
758 struct ci_power_info
*pi
= ci_get_pi(adev
);
759 PPSMC_Result smc_result
;
763 pi
->power_containment_features
= 0;
764 if (pi
->caps_power_containment
) {
765 if (pi
->enable_bapm_feature
) {
766 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_EnableDTE
);
767 if (smc_result
!= PPSMC_Result_OK
)
770 pi
->power_containment_features
|= POWERCONTAINMENT_FEATURE_BAPM
;
773 if (pi
->enable_tdc_limit_feature
) {
774 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_TDCLimitEnable
);
775 if (smc_result
!= PPSMC_Result_OK
)
778 pi
->power_containment_features
|= POWERCONTAINMENT_FEATURE_TDCLimit
;
781 if (pi
->enable_pkg_pwr_tracking_feature
) {
782 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_PkgPwrLimitEnable
);
783 if (smc_result
!= PPSMC_Result_OK
) {
786 struct amdgpu_cac_tdp_table
*cac_tdp_table
=
787 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
;
788 u32 default_pwr_limit
=
789 (u32
)(cac_tdp_table
->maximum_power_delivery_limit
* 256);
791 pi
->power_containment_features
|= POWERCONTAINMENT_FEATURE_PkgPwrLimit
;
793 ci_set_power_limit(adev
, default_pwr_limit
);
798 if (pi
->caps_power_containment
&& pi
->power_containment_features
) {
799 if (pi
->power_containment_features
& POWERCONTAINMENT_FEATURE_TDCLimit
)
800 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_TDCLimitDisable
);
802 if (pi
->power_containment_features
& POWERCONTAINMENT_FEATURE_BAPM
)
803 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_DisableDTE
);
805 if (pi
->power_containment_features
& POWERCONTAINMENT_FEATURE_PkgPwrLimit
)
806 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_PkgPwrLimitDisable
);
807 pi
->power_containment_features
= 0;
814 static int ci_enable_smc_cac(struct amdgpu_device
*adev
, bool enable
)
816 struct ci_power_info
*pi
= ci_get_pi(adev
);
817 PPSMC_Result smc_result
;
822 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_EnableCac
);
823 if (smc_result
!= PPSMC_Result_OK
) {
825 pi
->cac_enabled
= false;
827 pi
->cac_enabled
= true;
829 } else if (pi
->cac_enabled
) {
830 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_DisableCac
);
831 pi
->cac_enabled
= false;
838 static int ci_enable_thermal_based_sclk_dpm(struct amdgpu_device
*adev
,
841 struct ci_power_info
*pi
= ci_get_pi(adev
);
842 PPSMC_Result smc_result
= PPSMC_Result_OK
;
844 if (pi
->thermal_sclk_dpm_enabled
) {
846 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_ENABLE_THERMAL_DPM
);
848 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_DISABLE_THERMAL_DPM
);
851 if (smc_result
== PPSMC_Result_OK
)
857 static int ci_power_control_set_level(struct amdgpu_device
*adev
)
859 struct ci_power_info
*pi
= ci_get_pi(adev
);
860 struct amdgpu_cac_tdp_table
*cac_tdp_table
=
861 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
;
865 bool adjust_polarity
= false; /* ??? */
867 if (pi
->caps_power_containment
) {
868 adjust_percent
= adjust_polarity
?
869 adev
->pm
.dpm
.tdp_adjustment
: (-1 * adev
->pm
.dpm
.tdp_adjustment
);
870 target_tdp
= ((100 + adjust_percent
) *
871 (s32
)cac_tdp_table
->configurable_tdp
) / 100;
873 ret
= ci_set_overdrive_target_tdp(adev
, (u32
)target_tdp
);
879 static void ci_dpm_powergate_uvd(struct amdgpu_device
*adev
, bool gate
)
881 struct ci_power_info
*pi
= ci_get_pi(adev
);
883 if (pi
->uvd_power_gated
== gate
)
886 pi
->uvd_power_gated
= gate
;
888 ci_update_uvd_dpm(adev
, gate
);
891 static bool ci_dpm_vblank_too_short(struct amdgpu_device
*adev
)
893 u32 vblank_time
= amdgpu_dpm_get_vblank_time(adev
);
894 u32 switch_limit
= adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
? 450 : 300;
896 if (vblank_time
< switch_limit
)
903 static void ci_apply_state_adjust_rules(struct amdgpu_device
*adev
,
904 struct amdgpu_ps
*rps
)
906 struct ci_ps
*ps
= ci_get_ps(rps
);
907 struct ci_power_info
*pi
= ci_get_pi(adev
);
908 struct amdgpu_clock_and_voltage_limits
*max_limits
;
909 bool disable_mclk_switching
;
913 if (rps
->vce_active
) {
914 rps
->evclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].evclk
;
915 rps
->ecclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].ecclk
;
921 if ((adev
->pm
.dpm
.new_active_crtc_count
> 1) ||
922 ci_dpm_vblank_too_short(adev
))
923 disable_mclk_switching
= true;
925 disable_mclk_switching
= false;
927 if ((rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
)
928 pi
->battery_state
= true;
930 pi
->battery_state
= false;
932 if (adev
->pm
.dpm
.ac_power
)
933 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
935 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
937 if (adev
->pm
.dpm
.ac_power
== false) {
938 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
939 if (ps
->performance_levels
[i
].mclk
> max_limits
->mclk
)
940 ps
->performance_levels
[i
].mclk
= max_limits
->mclk
;
941 if (ps
->performance_levels
[i
].sclk
> max_limits
->sclk
)
942 ps
->performance_levels
[i
].sclk
= max_limits
->sclk
;
946 /* XXX validate the min clocks required for display */
948 if (disable_mclk_switching
) {
949 mclk
= ps
->performance_levels
[ps
->performance_level_count
- 1].mclk
;
950 sclk
= ps
->performance_levels
[0].sclk
;
952 mclk
= ps
->performance_levels
[0].mclk
;
953 sclk
= ps
->performance_levels
[0].sclk
;
956 if (rps
->vce_active
) {
957 if (sclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
)
958 sclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
;
959 if (mclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].mclk
)
960 mclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].mclk
;
963 ps
->performance_levels
[0].sclk
= sclk
;
964 ps
->performance_levels
[0].mclk
= mclk
;
966 if (ps
->performance_levels
[1].sclk
< ps
->performance_levels
[0].sclk
)
967 ps
->performance_levels
[1].sclk
= ps
->performance_levels
[0].sclk
;
969 if (disable_mclk_switching
) {
970 if (ps
->performance_levels
[0].mclk
< ps
->performance_levels
[1].mclk
)
971 ps
->performance_levels
[0].mclk
= ps
->performance_levels
[1].mclk
;
973 if (ps
->performance_levels
[1].mclk
< ps
->performance_levels
[0].mclk
)
974 ps
->performance_levels
[1].mclk
= ps
->performance_levels
[0].mclk
;
978 static int ci_thermal_set_temperature_range(struct amdgpu_device
*adev
,
979 int min_temp
, int max_temp
)
981 int low_temp
= 0 * 1000;
982 int high_temp
= 255 * 1000;
985 if (low_temp
< min_temp
)
987 if (high_temp
> max_temp
)
988 high_temp
= max_temp
;
989 if (high_temp
< low_temp
) {
990 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
994 tmp
= RREG32_SMC(ixCG_THERMAL_INT
);
995 tmp
&= ~(CG_THERMAL_INT__DIG_THERM_INTH_MASK
| CG_THERMAL_INT__DIG_THERM_INTL_MASK
);
996 tmp
|= ((high_temp
/ 1000) << CG_THERMAL_INT__DIG_THERM_INTH__SHIFT
) |
997 ((low_temp
/ 1000)) << CG_THERMAL_INT__DIG_THERM_INTL__SHIFT
;
998 WREG32_SMC(ixCG_THERMAL_INT
, tmp
);
1001 /* XXX: need to figure out how to handle this properly */
1002 tmp
= RREG32_SMC(ixCG_THERMAL_CTRL
);
1003 tmp
&= DIG_THERM_DPM_MASK
;
1004 tmp
|= DIG_THERM_DPM(high_temp
/ 1000);
1005 WREG32_SMC(ixCG_THERMAL_CTRL
, tmp
);
1008 adev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
1009 adev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
1013 static int ci_thermal_enable_alert(struct amdgpu_device
*adev
,
1016 u32 thermal_int
= RREG32_SMC(ixCG_THERMAL_INT
);
1017 PPSMC_Result result
;
1020 thermal_int
&= ~(CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
|
1021 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
);
1022 WREG32_SMC(ixCG_THERMAL_INT
, thermal_int
);
1023 result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_Thermal_Cntl_Enable
);
1024 if (result
!= PPSMC_Result_OK
) {
1025 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
1029 thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
|
1030 CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
1031 WREG32_SMC(ixCG_THERMAL_INT
, thermal_int
);
1032 result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_Thermal_Cntl_Disable
);
1033 if (result
!= PPSMC_Result_OK
) {
1034 DRM_DEBUG_KMS("Could not disable thermal interrupts.\n");
1042 static void ci_fan_ctrl_set_static_mode(struct amdgpu_device
*adev
, u32 mode
)
1044 struct ci_power_info
*pi
= ci_get_pi(adev
);
1047 if (pi
->fan_ctrl_is_in_default_mode
) {
1048 tmp
= (RREG32_SMC(ixCG_FDO_CTRL2
) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK
)
1049 >> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT
;
1050 pi
->fan_ctrl_default_mode
= tmp
;
1051 tmp
= (RREG32_SMC(ixCG_FDO_CTRL2
) & CG_FDO_CTRL2__TMIN_MASK
)
1052 >> CG_FDO_CTRL2__TMIN__SHIFT
;
1054 pi
->fan_ctrl_is_in_default_mode
= false;
1057 tmp
= RREG32_SMC(ixCG_FDO_CTRL2
) & ~CG_FDO_CTRL2__TMIN_MASK
;
1058 tmp
|= 0 << CG_FDO_CTRL2__TMIN__SHIFT
;
1059 WREG32_SMC(ixCG_FDO_CTRL2
, tmp
);
1061 tmp
= RREG32_SMC(ixCG_FDO_CTRL2
) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK
;
1062 tmp
|= mode
<< CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT
;
1063 WREG32_SMC(ixCG_FDO_CTRL2
, tmp
);
1066 static int ci_thermal_setup_fan_table(struct amdgpu_device
*adev
)
1068 struct ci_power_info
*pi
= ci_get_pi(adev
);
1069 SMU7_Discrete_FanTable fan_table
= { FDO_MODE_HARDWARE
};
1071 u32 t_diff1
, t_diff2
, pwm_diff1
, pwm_diff2
;
1072 u16 fdo_min
, slope1
, slope2
;
1073 u32 reference_clock
, tmp
;
1077 if (!pi
->fan_table_start
) {
1078 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
1082 duty100
= (RREG32_SMC(ixCG_FDO_CTRL1
) & CG_FDO_CTRL1__FMAX_DUTY100_MASK
)
1083 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT
;
1086 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
1090 tmp64
= (u64
)adev
->pm
.dpm
.fan
.pwm_min
* duty100
;
1091 do_div(tmp64
, 10000);
1092 fdo_min
= (u16
)tmp64
;
1094 t_diff1
= adev
->pm
.dpm
.fan
.t_med
- adev
->pm
.dpm
.fan
.t_min
;
1095 t_diff2
= adev
->pm
.dpm
.fan
.t_high
- adev
->pm
.dpm
.fan
.t_med
;
1097 pwm_diff1
= adev
->pm
.dpm
.fan
.pwm_med
- adev
->pm
.dpm
.fan
.pwm_min
;
1098 pwm_diff2
= adev
->pm
.dpm
.fan
.pwm_high
- adev
->pm
.dpm
.fan
.pwm_med
;
1100 slope1
= (u16
)((50 + ((16 * duty100
* pwm_diff1
) / t_diff1
)) / 100);
1101 slope2
= (u16
)((50 + ((16 * duty100
* pwm_diff2
) / t_diff2
)) / 100);
1103 fan_table
.TempMin
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_min
) / 100);
1104 fan_table
.TempMed
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_med
) / 100);
1105 fan_table
.TempMax
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_max
) / 100);
1107 fan_table
.Slope1
= cpu_to_be16(slope1
);
1108 fan_table
.Slope2
= cpu_to_be16(slope2
);
1110 fan_table
.FdoMin
= cpu_to_be16(fdo_min
);
1112 fan_table
.HystDown
= cpu_to_be16(adev
->pm
.dpm
.fan
.t_hyst
);
1114 fan_table
.HystUp
= cpu_to_be16(1);
1116 fan_table
.HystSlope
= cpu_to_be16(1);
1118 fan_table
.TempRespLim
= cpu_to_be16(5);
1120 reference_clock
= amdgpu_asic_get_xclk(adev
);
1122 fan_table
.RefreshPeriod
= cpu_to_be32((adev
->pm
.dpm
.fan
.cycle_delay
*
1123 reference_clock
) / 1600);
1125 fan_table
.FdoMax
= cpu_to_be16((u16
)duty100
);
1127 tmp
= (RREG32_SMC(ixCG_MULT_THERMAL_CTRL
) & CG_MULT_THERMAL_CTRL__TEMP_SEL_MASK
)
1128 >> CG_MULT_THERMAL_CTRL__TEMP_SEL__SHIFT
;
1129 fan_table
.TempSrc
= (uint8_t)tmp
;
1131 ret
= amdgpu_ci_copy_bytes_to_smc(adev
,
1132 pi
->fan_table_start
,
1138 DRM_ERROR("Failed to load fan table to the SMC.");
1139 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
1145 static int ci_fan_ctrl_start_smc_fan_control(struct amdgpu_device
*adev
)
1147 struct ci_power_info
*pi
= ci_get_pi(adev
);
1150 if (pi
->caps_od_fuzzy_fan_control_support
) {
1151 ret
= amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
1152 PPSMC_StartFanControl
,
1154 if (ret
!= PPSMC_Result_OK
)
1156 ret
= amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
1157 PPSMC_MSG_SetFanPwmMax
,
1158 adev
->pm
.dpm
.fan
.default_max_fan_pwm
);
1159 if (ret
!= PPSMC_Result_OK
)
1162 ret
= amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
1163 PPSMC_StartFanControl
,
1165 if (ret
!= PPSMC_Result_OK
)
1169 pi
->fan_is_controlled_by_smc
= true;
1174 static int ci_fan_ctrl_stop_smc_fan_control(struct amdgpu_device
*adev
)
1177 struct ci_power_info
*pi
= ci_get_pi(adev
);
1179 ret
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_StopFanControl
);
1180 if (ret
== PPSMC_Result_OK
) {
1181 pi
->fan_is_controlled_by_smc
= false;
1188 static int ci_dpm_get_fan_speed_percent(struct amdgpu_device
*adev
,
1194 if (adev
->pm
.no_fan
)
1197 duty100
= (RREG32_SMC(ixCG_FDO_CTRL1
) & CG_FDO_CTRL1__FMAX_DUTY100_MASK
)
1198 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT
;
1199 duty
= (RREG32_SMC(ixCG_THERMAL_STATUS
) & CG_THERMAL_STATUS__FDO_PWM_DUTY_MASK
)
1200 >> CG_THERMAL_STATUS__FDO_PWM_DUTY__SHIFT
;
1205 tmp64
= (u64
)duty
* 100;
1206 do_div(tmp64
, duty100
);
1207 *speed
= (u32
)tmp64
;
1215 static int ci_dpm_set_fan_speed_percent(struct amdgpu_device
*adev
,
1221 struct ci_power_info
*pi
= ci_get_pi(adev
);
1223 if (adev
->pm
.no_fan
)
1226 if (pi
->fan_is_controlled_by_smc
)
1232 duty100
= (RREG32_SMC(ixCG_FDO_CTRL1
) & CG_FDO_CTRL1__FMAX_DUTY100_MASK
)
1233 >> CG_FDO_CTRL1__FMAX_DUTY100__SHIFT
;
1238 tmp64
= (u64
)speed
* duty100
;
1242 tmp
= RREG32_SMC(ixCG_FDO_CTRL0
) & ~CG_FDO_CTRL0__FDO_STATIC_DUTY_MASK
;
1243 tmp
|= duty
<< CG_FDO_CTRL0__FDO_STATIC_DUTY__SHIFT
;
1244 WREG32_SMC(ixCG_FDO_CTRL0
, tmp
);
1249 static void ci_dpm_set_fan_control_mode(struct amdgpu_device
*adev
, u32 mode
)
1252 /* stop auto-manage */
1253 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
1254 ci_fan_ctrl_stop_smc_fan_control(adev
);
1255 ci_fan_ctrl_set_static_mode(adev
, mode
);
1257 /* restart auto-manage */
1258 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
1259 ci_thermal_start_smc_fan_control(adev
);
1261 ci_fan_ctrl_set_default_mode(adev
);
1265 static u32
ci_dpm_get_fan_control_mode(struct amdgpu_device
*adev
)
1267 struct ci_power_info
*pi
= ci_get_pi(adev
);
1270 if (pi
->fan_is_controlled_by_smc
)
1273 tmp
= RREG32_SMC(ixCG_FDO_CTRL2
) & CG_FDO_CTRL2__FDO_PWM_MODE_MASK
;
1274 return (tmp
>> CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT
);
1278 static int ci_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device
*adev
,
1282 u32 xclk
= amdgpu_asic_get_xclk(adev
);
1284 if (adev
->pm
.no_fan
)
1287 if (adev
->pm
.fan_pulses_per_revolution
== 0)
1290 tach_period
= (RREG32_SMC(ixCG_TACH_STATUS
) & CG_TACH_STATUS__TACH_PERIOD_MASK
)
1291 >> CG_TACH_STATUS__TACH_PERIOD__SHIFT
;
1292 if (tach_period
== 0)
1295 *speed
= 60 * xclk
* 10000 / tach_period
;
1300 static int ci_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device
*adev
,
1303 u32 tach_period
, tmp
;
1304 u32 xclk
= amdgpu_asic_get_xclk(adev
);
1306 if (adev
->pm
.no_fan
)
1309 if (adev
->pm
.fan_pulses_per_revolution
== 0)
1312 if ((speed
< adev
->pm
.fan_min_rpm
) ||
1313 (speed
> adev
->pm
.fan_max_rpm
))
1316 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
1317 ci_fan_ctrl_stop_smc_fan_control(adev
);
1319 tach_period
= 60 * xclk
* 10000 / (8 * speed
);
1320 tmp
= RREG32_SMC(ixCG_TACH_CTRL
) & ~CG_TACH_CTRL__TARGET_PERIOD_MASK
;
1321 tmp
|= tach_period
<< CG_TACH_CTRL__TARGET_PERIOD__SHIFT
;
1322 WREG32_SMC(CG_TACH_CTRL
, tmp
);
1324 ci_fan_ctrl_set_static_mode(adev
, FDO_PWM_MODE_STATIC_RPM
);
1330 static void ci_fan_ctrl_set_default_mode(struct amdgpu_device
*adev
)
1332 struct ci_power_info
*pi
= ci_get_pi(adev
);
1335 if (!pi
->fan_ctrl_is_in_default_mode
) {
1336 tmp
= RREG32_SMC(ixCG_FDO_CTRL2
) & ~CG_FDO_CTRL2__FDO_PWM_MODE_MASK
;
1337 tmp
|= pi
->fan_ctrl_default_mode
<< CG_FDO_CTRL2__FDO_PWM_MODE__SHIFT
;
1338 WREG32_SMC(ixCG_FDO_CTRL2
, tmp
);
1340 tmp
= RREG32_SMC(ixCG_FDO_CTRL2
) & ~CG_FDO_CTRL2__TMIN_MASK
;
1341 tmp
|= pi
->t_min
<< CG_FDO_CTRL2__TMIN__SHIFT
;
1342 WREG32_SMC(ixCG_FDO_CTRL2
, tmp
);
1343 pi
->fan_ctrl_is_in_default_mode
= true;
1347 static void ci_thermal_start_smc_fan_control(struct amdgpu_device
*adev
)
1349 if (adev
->pm
.dpm
.fan
.ucode_fan_control
) {
1350 ci_fan_ctrl_start_smc_fan_control(adev
);
1351 ci_fan_ctrl_set_static_mode(adev
, FDO_PWM_MODE_STATIC
);
1355 static void ci_thermal_initialize(struct amdgpu_device
*adev
)
1359 if (adev
->pm
.fan_pulses_per_revolution
) {
1360 tmp
= RREG32_SMC(ixCG_TACH_CTRL
) & ~CG_TACH_CTRL__EDGE_PER_REV_MASK
;
1361 tmp
|= (adev
->pm
.fan_pulses_per_revolution
- 1)
1362 << CG_TACH_CTRL__EDGE_PER_REV__SHIFT
;
1363 WREG32_SMC(ixCG_TACH_CTRL
, tmp
);
1366 tmp
= RREG32_SMC(ixCG_FDO_CTRL2
) & ~CG_FDO_CTRL2__TACH_PWM_RESP_RATE_MASK
;
1367 tmp
|= 0x28 << CG_FDO_CTRL2__TACH_PWM_RESP_RATE__SHIFT
;
1368 WREG32_SMC(ixCG_FDO_CTRL2
, tmp
);
1371 static int ci_thermal_start_thermal_controller(struct amdgpu_device
*adev
)
1375 ci_thermal_initialize(adev
);
1376 ret
= ci_thermal_set_temperature_range(adev
, CISLANDS_TEMP_RANGE_MIN
, CISLANDS_TEMP_RANGE_MAX
);
1379 ret
= ci_thermal_enable_alert(adev
, true);
1382 if (adev
->pm
.dpm
.fan
.ucode_fan_control
) {
1383 ret
= ci_thermal_setup_fan_table(adev
);
1386 ci_thermal_start_smc_fan_control(adev
);
1392 static void ci_thermal_stop_thermal_controller(struct amdgpu_device
*adev
)
1394 if (!adev
->pm
.no_fan
)
1395 ci_fan_ctrl_set_default_mode(adev
);
1399 static int ci_read_smc_soft_register(struct amdgpu_device
*adev
,
1400 u16 reg_offset
, u32
*value
)
1402 struct ci_power_info
*pi
= ci_get_pi(adev
);
1404 return amdgpu_ci_read_smc_sram_dword(adev
,
1405 pi
->soft_regs_start
+ reg_offset
,
1406 value
, pi
->sram_end
);
1410 static int ci_write_smc_soft_register(struct amdgpu_device
*adev
,
1411 u16 reg_offset
, u32 value
)
1413 struct ci_power_info
*pi
= ci_get_pi(adev
);
1415 return amdgpu_ci_write_smc_sram_dword(adev
,
1416 pi
->soft_regs_start
+ reg_offset
,
1417 value
, pi
->sram_end
);
1420 static void ci_init_fps_limits(struct amdgpu_device
*adev
)
1422 struct ci_power_info
*pi
= ci_get_pi(adev
);
1423 SMU7_Discrete_DpmTable
*table
= &pi
->smc_state_table
;
1429 table
->FpsHighT
= cpu_to_be16(tmp
);
1432 table
->FpsLowT
= cpu_to_be16(tmp
);
1436 static int ci_update_sclk_t(struct amdgpu_device
*adev
)
1438 struct ci_power_info
*pi
= ci_get_pi(adev
);
1440 u32 low_sclk_interrupt_t
= 0;
1442 if (pi
->caps_sclk_throttle_low_notification
) {
1443 low_sclk_interrupt_t
= cpu_to_be32(pi
->low_sclk_interrupt_t
);
1445 ret
= amdgpu_ci_copy_bytes_to_smc(adev
,
1446 pi
->dpm_table_start
+
1447 offsetof(SMU7_Discrete_DpmTable
, LowSclkInterruptT
),
1448 (u8
*)&low_sclk_interrupt_t
,
1449 sizeof(u32
), pi
->sram_end
);
1456 static void ci_get_leakage_voltages(struct amdgpu_device
*adev
)
1458 struct ci_power_info
*pi
= ci_get_pi(adev
);
1459 u16 leakage_id
, virtual_voltage_id
;
1463 pi
->vddc_leakage
.count
= 0;
1464 pi
->vddci_leakage
.count
= 0;
1466 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_EVV
) {
1467 for (i
= 0; i
< CISLANDS_MAX_LEAKAGE_COUNT
; i
++) {
1468 virtual_voltage_id
= ATOM_VIRTUAL_VOLTAGE_ID0
+ i
;
1469 if (amdgpu_atombios_get_voltage_evv(adev
, virtual_voltage_id
, &vddc
) != 0)
1471 if (vddc
!= 0 && vddc
!= virtual_voltage_id
) {
1472 pi
->vddc_leakage
.actual_voltage
[pi
->vddc_leakage
.count
] = vddc
;
1473 pi
->vddc_leakage
.leakage_id
[pi
->vddc_leakage
.count
] = virtual_voltage_id
;
1474 pi
->vddc_leakage
.count
++;
1477 } else if (amdgpu_atombios_get_leakage_id_from_vbios(adev
, &leakage_id
) == 0) {
1478 for (i
= 0; i
< CISLANDS_MAX_LEAKAGE_COUNT
; i
++) {
1479 virtual_voltage_id
= ATOM_VIRTUAL_VOLTAGE_ID0
+ i
;
1480 if (amdgpu_atombios_get_leakage_vddc_based_on_leakage_params(adev
, &vddc
, &vddci
,
1483 if (vddc
!= 0 && vddc
!= virtual_voltage_id
) {
1484 pi
->vddc_leakage
.actual_voltage
[pi
->vddc_leakage
.count
] = vddc
;
1485 pi
->vddc_leakage
.leakage_id
[pi
->vddc_leakage
.count
] = virtual_voltage_id
;
1486 pi
->vddc_leakage
.count
++;
1488 if (vddci
!= 0 && vddci
!= virtual_voltage_id
) {
1489 pi
->vddci_leakage
.actual_voltage
[pi
->vddci_leakage
.count
] = vddci
;
1490 pi
->vddci_leakage
.leakage_id
[pi
->vddci_leakage
.count
] = virtual_voltage_id
;
1491 pi
->vddci_leakage
.count
++;
1498 static void ci_set_dpm_event_sources(struct amdgpu_device
*adev
, u32 sources
)
1500 struct ci_power_info
*pi
= ci_get_pi(adev
);
1501 bool want_thermal_protection
;
1502 enum amdgpu_dpm_event_src dpm_event_src
;
1508 want_thermal_protection
= false;
1510 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
):
1511 want_thermal_protection
= true;
1512 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_DIGITAL
;
1514 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
):
1515 want_thermal_protection
= true;
1516 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_EXTERNAL
;
1518 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
) |
1519 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
)):
1520 want_thermal_protection
= true;
1521 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL
;
1525 if (want_thermal_protection
) {
1527 /* XXX: need to figure out how to handle this properly */
1528 tmp
= RREG32_SMC(ixCG_THERMAL_CTRL
);
1529 tmp
&= DPM_EVENT_SRC_MASK
;
1530 tmp
|= DPM_EVENT_SRC(dpm_event_src
);
1531 WREG32_SMC(ixCG_THERMAL_CTRL
, tmp
);
1534 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
1535 if (pi
->thermal_protection
)
1536 tmp
&= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK
;
1538 tmp
|= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK
;
1539 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
1541 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
1542 tmp
|= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK
;
1543 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
1547 static void ci_enable_auto_throttle_source(struct amdgpu_device
*adev
,
1548 enum amdgpu_dpm_auto_throttle_src source
,
1551 struct ci_power_info
*pi
= ci_get_pi(adev
);
1554 if (!(pi
->active_auto_throttle_sources
& (1 << source
))) {
1555 pi
->active_auto_throttle_sources
|= 1 << source
;
1556 ci_set_dpm_event_sources(adev
, pi
->active_auto_throttle_sources
);
1559 if (pi
->active_auto_throttle_sources
& (1 << source
)) {
1560 pi
->active_auto_throttle_sources
&= ~(1 << source
);
1561 ci_set_dpm_event_sources(adev
, pi
->active_auto_throttle_sources
);
1566 static void ci_enable_vr_hot_gpio_interrupt(struct amdgpu_device
*adev
)
1568 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_REGULATOR_HOT
)
1569 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_EnableVRHotGPIOInterrupt
);
1572 static int ci_unfreeze_sclk_mclk_dpm(struct amdgpu_device
*adev
)
1574 struct ci_power_info
*pi
= ci_get_pi(adev
);
1575 PPSMC_Result smc_result
;
1577 if (!pi
->need_update_smu7_dpm_table
)
1580 if ((!pi
->sclk_dpm_key_disabled
) &&
1581 (pi
->need_update_smu7_dpm_table
& (DPMTABLE_OD_UPDATE_SCLK
| DPMTABLE_UPDATE_SCLK
))) {
1582 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_SCLKDPM_UnfreezeLevel
);
1583 if (smc_result
!= PPSMC_Result_OK
)
1587 if ((!pi
->mclk_dpm_key_disabled
) &&
1588 (pi
->need_update_smu7_dpm_table
& DPMTABLE_OD_UPDATE_MCLK
)) {
1589 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MCLKDPM_UnfreezeLevel
);
1590 if (smc_result
!= PPSMC_Result_OK
)
1594 pi
->need_update_smu7_dpm_table
= 0;
1598 static int ci_enable_sclk_mclk_dpm(struct amdgpu_device
*adev
, bool enable
)
1600 struct ci_power_info
*pi
= ci_get_pi(adev
);
1601 PPSMC_Result smc_result
;
1604 if (!pi
->sclk_dpm_key_disabled
) {
1605 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_DPM_Enable
);
1606 if (smc_result
!= PPSMC_Result_OK
)
1610 if (!pi
->mclk_dpm_key_disabled
) {
1611 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MCLKDPM_Enable
);
1612 if (smc_result
!= PPSMC_Result_OK
)
1615 WREG32_P(mmMC_SEQ_CNTL_3
, MC_SEQ_CNTL_3__CAC_EN_MASK
,
1616 ~MC_SEQ_CNTL_3__CAC_EN_MASK
);
1618 WREG32_SMC(ixLCAC_MC0_CNTL
, 0x05);
1619 WREG32_SMC(ixLCAC_MC1_CNTL
, 0x05);
1620 WREG32_SMC(ixLCAC_CPL_CNTL
, 0x100005);
1624 WREG32_SMC(ixLCAC_MC0_CNTL
, 0x400005);
1625 WREG32_SMC(ixLCAC_MC1_CNTL
, 0x400005);
1626 WREG32_SMC(ixLCAC_CPL_CNTL
, 0x500005);
1629 if (!pi
->sclk_dpm_key_disabled
) {
1630 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_DPM_Disable
);
1631 if (smc_result
!= PPSMC_Result_OK
)
1635 if (!pi
->mclk_dpm_key_disabled
) {
1636 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MCLKDPM_Disable
);
1637 if (smc_result
!= PPSMC_Result_OK
)
1645 static int ci_start_dpm(struct amdgpu_device
*adev
)
1647 struct ci_power_info
*pi
= ci_get_pi(adev
);
1648 PPSMC_Result smc_result
;
1652 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
1653 tmp
|= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK
;
1654 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
1656 tmp
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
1657 tmp
|= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK
;
1658 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, tmp
);
1660 ci_write_smc_soft_register(adev
, offsetof(SMU7_SoftRegisters
, VoltageChangeTimeout
), 0x1000);
1662 WREG32_P(mmBIF_LNCNT_RESET
, 0, ~BIF_LNCNT_RESET__RESET_LNCNT_EN_MASK
);
1664 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_Voltage_Cntl_Enable
);
1665 if (smc_result
!= PPSMC_Result_OK
)
1668 ret
= ci_enable_sclk_mclk_dpm(adev
, true);
1672 if (!pi
->pcie_dpm_key_disabled
) {
1673 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_PCIeDPM_Enable
);
1674 if (smc_result
!= PPSMC_Result_OK
)
1681 static int ci_freeze_sclk_mclk_dpm(struct amdgpu_device
*adev
)
1683 struct ci_power_info
*pi
= ci_get_pi(adev
);
1684 PPSMC_Result smc_result
;
1686 if (!pi
->need_update_smu7_dpm_table
)
1689 if ((!pi
->sclk_dpm_key_disabled
) &&
1690 (pi
->need_update_smu7_dpm_table
& (DPMTABLE_OD_UPDATE_SCLK
| DPMTABLE_UPDATE_SCLK
))) {
1691 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_SCLKDPM_FreezeLevel
);
1692 if (smc_result
!= PPSMC_Result_OK
)
1696 if ((!pi
->mclk_dpm_key_disabled
) &&
1697 (pi
->need_update_smu7_dpm_table
& DPMTABLE_OD_UPDATE_MCLK
)) {
1698 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MCLKDPM_FreezeLevel
);
1699 if (smc_result
!= PPSMC_Result_OK
)
1706 static int ci_stop_dpm(struct amdgpu_device
*adev
)
1708 struct ci_power_info
*pi
= ci_get_pi(adev
);
1709 PPSMC_Result smc_result
;
1713 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
1714 tmp
&= ~GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK
;
1715 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
1717 tmp
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
1718 tmp
&= ~SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK
;
1719 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, tmp
);
1721 if (!pi
->pcie_dpm_key_disabled
) {
1722 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_PCIeDPM_Disable
);
1723 if (smc_result
!= PPSMC_Result_OK
)
1727 ret
= ci_enable_sclk_mclk_dpm(adev
, false);
1731 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_Voltage_Cntl_Disable
);
1732 if (smc_result
!= PPSMC_Result_OK
)
1738 static void ci_enable_sclk_control(struct amdgpu_device
*adev
, bool enable
)
1740 u32 tmp
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
1743 tmp
&= ~SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK
;
1745 tmp
|= SCLK_PWRMGT_CNTL__SCLK_PWRMGT_OFF_MASK
;
1746 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, tmp
);
1750 static int ci_notify_hw_of_power_source(struct amdgpu_device
*adev
,
1753 struct ci_power_info
*pi
= ci_get_pi(adev
);
1754 struct amdgpu_cac_tdp_table
*cac_tdp_table
=
1755 adev
->pm
.dpm
.dyn_state
.cac_tdp_table
;
1759 power_limit
= (u32
)(cac_tdp_table
->maximum_power_delivery_limit
* 256);
1761 power_limit
= (u32
)(cac_tdp_table
->battery_power_limit
* 256);
1763 ci_set_power_limit(adev
, power_limit
);
1765 if (pi
->caps_automatic_dc_transition
) {
1767 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_RunningOnAC
);
1769 amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_Remove_DC_Clamp
);
1776 static PPSMC_Result
amdgpu_ci_send_msg_to_smc_with_parameter(struct amdgpu_device
*adev
,
1777 PPSMC_Msg msg
, u32 parameter
)
1779 WREG32(mmSMC_MSG_ARG_0
, parameter
);
1780 return amdgpu_ci_send_msg_to_smc(adev
, msg
);
1783 static PPSMC_Result
amdgpu_ci_send_msg_to_smc_return_parameter(struct amdgpu_device
*adev
,
1784 PPSMC_Msg msg
, u32
*parameter
)
1786 PPSMC_Result smc_result
;
1788 smc_result
= amdgpu_ci_send_msg_to_smc(adev
, msg
);
1790 if ((smc_result
== PPSMC_Result_OK
) && parameter
)
1791 *parameter
= RREG32(mmSMC_MSG_ARG_0
);
1796 static int ci_dpm_force_state_sclk(struct amdgpu_device
*adev
, u32 n
)
1798 struct ci_power_info
*pi
= ci_get_pi(adev
);
1800 if (!pi
->sclk_dpm_key_disabled
) {
1801 PPSMC_Result smc_result
=
1802 amdgpu_ci_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SCLKDPM_SetEnabledMask
, 1 << n
);
1803 if (smc_result
!= PPSMC_Result_OK
)
1810 static int ci_dpm_force_state_mclk(struct amdgpu_device
*adev
, u32 n
)
1812 struct ci_power_info
*pi
= ci_get_pi(adev
);
1814 if (!pi
->mclk_dpm_key_disabled
) {
1815 PPSMC_Result smc_result
=
1816 amdgpu_ci_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_MCLKDPM_SetEnabledMask
, 1 << n
);
1817 if (smc_result
!= PPSMC_Result_OK
)
1824 static int ci_dpm_force_state_pcie(struct amdgpu_device
*adev
, u32 n
)
1826 struct ci_power_info
*pi
= ci_get_pi(adev
);
1828 if (!pi
->pcie_dpm_key_disabled
) {
1829 PPSMC_Result smc_result
=
1830 amdgpu_ci_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_PCIeDPM_ForceLevel
, n
);
1831 if (smc_result
!= PPSMC_Result_OK
)
1838 static int ci_set_power_limit(struct amdgpu_device
*adev
, u32 n
)
1840 struct ci_power_info
*pi
= ci_get_pi(adev
);
1842 if (pi
->power_containment_features
& POWERCONTAINMENT_FEATURE_PkgPwrLimit
) {
1843 PPSMC_Result smc_result
=
1844 amdgpu_ci_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_PkgPwrSetLimit
, n
);
1845 if (smc_result
!= PPSMC_Result_OK
)
1852 static int ci_set_overdrive_target_tdp(struct amdgpu_device
*adev
,
1855 PPSMC_Result smc_result
=
1856 amdgpu_ci_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_OverDriveSetTargetTdp
, target_tdp
);
1857 if (smc_result
!= PPSMC_Result_OK
)
1863 static int ci_set_boot_state(struct amdgpu_device
*adev
)
1865 return ci_enable_sclk_mclk_dpm(adev
, false);
1869 static u32
ci_get_average_sclk_freq(struct amdgpu_device
*adev
)
1872 PPSMC_Result smc_result
=
1873 amdgpu_ci_send_msg_to_smc_return_parameter(adev
,
1874 PPSMC_MSG_API_GetSclkFrequency
,
1876 if (smc_result
!= PPSMC_Result_OK
)
1882 static u32
ci_get_average_mclk_freq(struct amdgpu_device
*adev
)
1885 PPSMC_Result smc_result
=
1886 amdgpu_ci_send_msg_to_smc_return_parameter(adev
,
1887 PPSMC_MSG_API_GetMclkFrequency
,
1889 if (smc_result
!= PPSMC_Result_OK
)
1895 static void ci_dpm_start_smc(struct amdgpu_device
*adev
)
1899 amdgpu_ci_program_jump_on_start(adev
);
1900 amdgpu_ci_start_smc_clock(adev
);
1901 amdgpu_ci_start_smc(adev
);
1902 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1903 if (RREG32_SMC(ixFIRMWARE_FLAGS
) & FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK
)
1908 static void ci_dpm_stop_smc(struct amdgpu_device
*adev
)
1910 amdgpu_ci_reset_smc(adev
);
1911 amdgpu_ci_stop_smc_clock(adev
);
1914 static int ci_process_firmware_header(struct amdgpu_device
*adev
)
1916 struct ci_power_info
*pi
= ci_get_pi(adev
);
1920 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
1921 SMU7_FIRMWARE_HEADER_LOCATION
+
1922 offsetof(SMU7_Firmware_Header
, DpmTable
),
1923 &tmp
, pi
->sram_end
);
1927 pi
->dpm_table_start
= tmp
;
1929 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
1930 SMU7_FIRMWARE_HEADER_LOCATION
+
1931 offsetof(SMU7_Firmware_Header
, SoftRegisters
),
1932 &tmp
, pi
->sram_end
);
1936 pi
->soft_regs_start
= tmp
;
1938 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
1939 SMU7_FIRMWARE_HEADER_LOCATION
+
1940 offsetof(SMU7_Firmware_Header
, mcRegisterTable
),
1941 &tmp
, pi
->sram_end
);
1945 pi
->mc_reg_table_start
= tmp
;
1947 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
1948 SMU7_FIRMWARE_HEADER_LOCATION
+
1949 offsetof(SMU7_Firmware_Header
, FanTable
),
1950 &tmp
, pi
->sram_end
);
1954 pi
->fan_table_start
= tmp
;
1956 ret
= amdgpu_ci_read_smc_sram_dword(adev
,
1957 SMU7_FIRMWARE_HEADER_LOCATION
+
1958 offsetof(SMU7_Firmware_Header
, mcArbDramTimingTable
),
1959 &tmp
, pi
->sram_end
);
1963 pi
->arb_table_start
= tmp
;
1968 static void ci_read_clock_registers(struct amdgpu_device
*adev
)
1970 struct ci_power_info
*pi
= ci_get_pi(adev
);
1972 pi
->clock_registers
.cg_spll_func_cntl
=
1973 RREG32_SMC(ixCG_SPLL_FUNC_CNTL
);
1974 pi
->clock_registers
.cg_spll_func_cntl_2
=
1975 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2
);
1976 pi
->clock_registers
.cg_spll_func_cntl_3
=
1977 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3
);
1978 pi
->clock_registers
.cg_spll_func_cntl_4
=
1979 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4
);
1980 pi
->clock_registers
.cg_spll_spread_spectrum
=
1981 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM
);
1982 pi
->clock_registers
.cg_spll_spread_spectrum_2
=
1983 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2
);
1984 pi
->clock_registers
.dll_cntl
= RREG32(mmDLL_CNTL
);
1985 pi
->clock_registers
.mclk_pwrmgt_cntl
= RREG32(mmMCLK_PWRMGT_CNTL
);
1986 pi
->clock_registers
.mpll_ad_func_cntl
= RREG32(mmMPLL_AD_FUNC_CNTL
);
1987 pi
->clock_registers
.mpll_dq_func_cntl
= RREG32(mmMPLL_DQ_FUNC_CNTL
);
1988 pi
->clock_registers
.mpll_func_cntl
= RREG32(mmMPLL_FUNC_CNTL
);
1989 pi
->clock_registers
.mpll_func_cntl_1
= RREG32(mmMPLL_FUNC_CNTL_1
);
1990 pi
->clock_registers
.mpll_func_cntl_2
= RREG32(mmMPLL_FUNC_CNTL_2
);
1991 pi
->clock_registers
.mpll_ss1
= RREG32(mmMPLL_SS1
);
1992 pi
->clock_registers
.mpll_ss2
= RREG32(mmMPLL_SS2
);
1995 static void ci_init_sclk_t(struct amdgpu_device
*adev
)
1997 struct ci_power_info
*pi
= ci_get_pi(adev
);
1999 pi
->low_sclk_interrupt_t
= 0;
2002 static void ci_enable_thermal_protection(struct amdgpu_device
*adev
,
2005 u32 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
2008 tmp
&= ~GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK
;
2010 tmp
|= GENERAL_PWRMGT__THERMAL_PROTECTION_DIS_MASK
;
2011 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
2014 static void ci_enable_acpi_power_management(struct amdgpu_device
*adev
)
2016 u32 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
2018 tmp
|= GENERAL_PWRMGT__STATIC_PM_EN_MASK
;
2020 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
2024 static int ci_enter_ulp_state(struct amdgpu_device
*adev
)
2027 WREG32(mmSMC_MESSAGE_0
, PPSMC_MSG_SwitchToMinimumPower
);
2034 static int ci_exit_ulp_state(struct amdgpu_device
*adev
)
2038 WREG32(mmSMC_MESSAGE_0
, PPSMC_MSG_ResumeFromMinimumPower
);
2042 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
2043 if (RREG32(mmSMC_RESP_0
) == 1)
2052 static int ci_notify_smc_display_change(struct amdgpu_device
*adev
,
2055 PPSMC_Msg msg
= has_display
? PPSMC_MSG_HasDisplay
: PPSMC_MSG_NoDisplay
;
2057 return (amdgpu_ci_send_msg_to_smc(adev
, msg
) == PPSMC_Result_OK
) ? 0 : -EINVAL
;
2060 static int ci_enable_ds_master_switch(struct amdgpu_device
*adev
,
2063 struct ci_power_info
*pi
= ci_get_pi(adev
);
2066 if (pi
->caps_sclk_ds
) {
2067 if (amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MASTER_DeepSleep_ON
) != PPSMC_Result_OK
)
2070 if (amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MASTER_DeepSleep_OFF
) != PPSMC_Result_OK
)
2074 if (pi
->caps_sclk_ds
) {
2075 if (amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_MASTER_DeepSleep_OFF
) != PPSMC_Result_OK
)
2083 static void ci_program_display_gap(struct amdgpu_device
*adev
)
2085 u32 tmp
= RREG32_SMC(ixCG_DISPLAY_GAP_CNTL
);
2086 u32 pre_vbi_time_in_us
;
2087 u32 frame_time_in_us
;
2088 u32 ref_clock
= adev
->clock
.spll
.reference_freq
;
2089 u32 refresh_rate
= amdgpu_dpm_get_vrefresh(adev
);
2090 u32 vblank_time
= amdgpu_dpm_get_vblank_time(adev
);
2092 tmp
&= ~CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK
;
2093 if (adev
->pm
.dpm
.new_active_crtc_count
> 0)
2094 tmp
|= (AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM
<< CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT
);
2096 tmp
|= (AMDGPU_PM_DISPLAY_GAP_IGNORE
<< CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT
);
2097 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL
, tmp
);
2099 if (refresh_rate
== 0)
2101 if (vblank_time
== 0xffffffff)
2103 frame_time_in_us
= 1000000 / refresh_rate
;
2104 pre_vbi_time_in_us
=
2105 frame_time_in_us
- 200 - vblank_time
;
2106 tmp
= pre_vbi_time_in_us
* (ref_clock
/ 100);
2108 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL2
, tmp
);
2109 ci_write_smc_soft_register(adev
, offsetof(SMU7_SoftRegisters
, PreVBlankGap
), 0x64);
2110 ci_write_smc_soft_register(adev
, offsetof(SMU7_SoftRegisters
, VBlankTimeout
), (frame_time_in_us
- pre_vbi_time_in_us
));
2113 ci_notify_smc_display_change(adev
, (adev
->pm
.dpm
.new_active_crtc_count
== 1));
2117 static void ci_enable_spread_spectrum(struct amdgpu_device
*adev
, bool enable
)
2119 struct ci_power_info
*pi
= ci_get_pi(adev
);
2123 if (pi
->caps_sclk_ss_support
) {
2124 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
2125 tmp
|= GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK
;
2126 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
2129 tmp
= RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM
);
2130 tmp
&= ~CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK
;
2131 WREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM
, tmp
);
2133 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
2134 tmp
&= ~GENERAL_PWRMGT__DYN_SPREAD_SPECTRUM_EN_MASK
;
2135 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
2139 static void ci_program_sstp(struct amdgpu_device
*adev
)
2141 WREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER
,
2142 ((CISLANDS_SSTU_DFLT
<< CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD_UNIT__SHIFT
) |
2143 (CISLANDS_SST_DFLT
<< CG_STATIC_SCREEN_PARAMETER__STATIC_SCREEN_THRESHOLD__SHIFT
)));
2146 static void ci_enable_display_gap(struct amdgpu_device
*adev
)
2148 u32 tmp
= RREG32_SMC(ixCG_DISPLAY_GAP_CNTL
);
2150 tmp
&= ~(CG_DISPLAY_GAP_CNTL__DISP_GAP_MASK
|
2151 CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG_MASK
);
2152 tmp
|= ((AMDGPU_PM_DISPLAY_GAP_IGNORE
<< CG_DISPLAY_GAP_CNTL__DISP_GAP__SHIFT
) |
2153 (AMDGPU_PM_DISPLAY_GAP_VBLANK
<< CG_DISPLAY_GAP_CNTL__DISP_GAP_MCHG__SHIFT
));
2155 WREG32_SMC(ixCG_DISPLAY_GAP_CNTL
, tmp
);
2158 static void ci_program_vc(struct amdgpu_device
*adev
)
2162 tmp
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
2163 tmp
&= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK
| SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK
);
2164 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, tmp
);
2166 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, CISLANDS_VRC_DFLT0
);
2167 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1
, CISLANDS_VRC_DFLT1
);
2168 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2
, CISLANDS_VRC_DFLT2
);
2169 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3
, CISLANDS_VRC_DFLT3
);
2170 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4
, CISLANDS_VRC_DFLT4
);
2171 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5
, CISLANDS_VRC_DFLT5
);
2172 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6
, CISLANDS_VRC_DFLT6
);
2173 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7
, CISLANDS_VRC_DFLT7
);
2176 static void ci_clear_vc(struct amdgpu_device
*adev
)
2180 tmp
= RREG32_SMC(ixSCLK_PWRMGT_CNTL
);
2181 tmp
|= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK
| SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK
);
2182 WREG32_SMC(ixSCLK_PWRMGT_CNTL
, tmp
);
2184 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
, 0);
2185 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_1
, 0);
2186 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_2
, 0);
2187 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_3
, 0);
2188 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_4
, 0);
2189 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_5
, 0);
2190 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_6
, 0);
2191 WREG32_SMC(ixCG_FREQ_TRAN_VOTING_7
, 0);
2194 static int ci_upload_firmware(struct amdgpu_device
*adev
)
2196 struct ci_power_info
*pi
= ci_get_pi(adev
);
2199 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
2200 if (RREG32_SMC(ixRCU_UC_EVENTS
) & RCU_UC_EVENTS__boot_seq_done_MASK
)
2203 WREG32_SMC(ixSMC_SYSCON_MISC_CNTL
, 1);
2205 amdgpu_ci_stop_smc_clock(adev
);
2206 amdgpu_ci_reset_smc(adev
);
2208 ret
= amdgpu_ci_load_smc_ucode(adev
, pi
->sram_end
);
2214 static int ci_get_svi2_voltage_table(struct amdgpu_device
*adev
,
2215 struct amdgpu_clock_voltage_dependency_table
*voltage_dependency_table
,
2216 struct atom_voltage_table
*voltage_table
)
2220 if (voltage_dependency_table
== NULL
)
2223 voltage_table
->mask_low
= 0;
2224 voltage_table
->phase_delay
= 0;
2226 voltage_table
->count
= voltage_dependency_table
->count
;
2227 for (i
= 0; i
< voltage_table
->count
; i
++) {
2228 voltage_table
->entries
[i
].value
= voltage_dependency_table
->entries
[i
].v
;
2229 voltage_table
->entries
[i
].smio_low
= 0;
2235 static int ci_construct_voltage_tables(struct amdgpu_device
*adev
)
2237 struct ci_power_info
*pi
= ci_get_pi(adev
);
2240 if (pi
->voltage_control
== CISLANDS_VOLTAGE_CONTROL_BY_GPIO
) {
2241 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDC
,
2242 VOLTAGE_OBJ_GPIO_LUT
,
2243 &pi
->vddc_voltage_table
);
2246 } else if (pi
->voltage_control
== CISLANDS_VOLTAGE_CONTROL_BY_SVID2
) {
2247 ret
= ci_get_svi2_voltage_table(adev
,
2248 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
2249 &pi
->vddc_voltage_table
);
2254 if (pi
->vddc_voltage_table
.count
> SMU7_MAX_LEVELS_VDDC
)
2255 ci_trim_voltage_table_to_fit_state_table(adev
, SMU7_MAX_LEVELS_VDDC
,
2256 &pi
->vddc_voltage_table
);
2258 if (pi
->vddci_control
== CISLANDS_VOLTAGE_CONTROL_BY_GPIO
) {
2259 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDCI
,
2260 VOLTAGE_OBJ_GPIO_LUT
,
2261 &pi
->vddci_voltage_table
);
2264 } else if (pi
->vddci_control
== CISLANDS_VOLTAGE_CONTROL_BY_SVID2
) {
2265 ret
= ci_get_svi2_voltage_table(adev
,
2266 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
2267 &pi
->vddci_voltage_table
);
2272 if (pi
->vddci_voltage_table
.count
> SMU7_MAX_LEVELS_VDDCI
)
2273 ci_trim_voltage_table_to_fit_state_table(adev
, SMU7_MAX_LEVELS_VDDCI
,
2274 &pi
->vddci_voltage_table
);
2276 if (pi
->mvdd_control
== CISLANDS_VOLTAGE_CONTROL_BY_GPIO
) {
2277 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_MVDDC
,
2278 VOLTAGE_OBJ_GPIO_LUT
,
2279 &pi
->mvdd_voltage_table
);
2282 } else if (pi
->mvdd_control
== CISLANDS_VOLTAGE_CONTROL_BY_SVID2
) {
2283 ret
= ci_get_svi2_voltage_table(adev
,
2284 &adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
,
2285 &pi
->mvdd_voltage_table
);
2290 if (pi
->mvdd_voltage_table
.count
> SMU7_MAX_LEVELS_MVDD
)
2291 ci_trim_voltage_table_to_fit_state_table(adev
, SMU7_MAX_LEVELS_MVDD
,
2292 &pi
->mvdd_voltage_table
);
2297 static void ci_populate_smc_voltage_table(struct amdgpu_device
*adev
,
2298 struct atom_voltage_table_entry
*voltage_table
,
2299 SMU7_Discrete_VoltageLevel
*smc_voltage_table
)
2303 ret
= ci_get_std_voltage_value_sidd(adev
, voltage_table
,
2304 &smc_voltage_table
->StdVoltageHiSidd
,
2305 &smc_voltage_table
->StdVoltageLoSidd
);
2308 smc_voltage_table
->StdVoltageHiSidd
= voltage_table
->value
* VOLTAGE_SCALE
;
2309 smc_voltage_table
->StdVoltageLoSidd
= voltage_table
->value
* VOLTAGE_SCALE
;
2312 smc_voltage_table
->Voltage
= cpu_to_be16(voltage_table
->value
* VOLTAGE_SCALE
);
2313 smc_voltage_table
->StdVoltageHiSidd
=
2314 cpu_to_be16(smc_voltage_table
->StdVoltageHiSidd
);
2315 smc_voltage_table
->StdVoltageLoSidd
=
2316 cpu_to_be16(smc_voltage_table
->StdVoltageLoSidd
);
2319 static int ci_populate_smc_vddc_table(struct amdgpu_device
*adev
,
2320 SMU7_Discrete_DpmTable
*table
)
2322 struct ci_power_info
*pi
= ci_get_pi(adev
);
2325 table
->VddcLevelCount
= pi
->vddc_voltage_table
.count
;
2326 for (count
= 0; count
< table
->VddcLevelCount
; count
++) {
2327 ci_populate_smc_voltage_table(adev
,
2328 &pi
->vddc_voltage_table
.entries
[count
],
2329 &table
->VddcLevel
[count
]);
2331 if (pi
->voltage_control
== CISLANDS_VOLTAGE_CONTROL_BY_GPIO
)
2332 table
->VddcLevel
[count
].Smio
|=
2333 pi
->vddc_voltage_table
.entries
[count
].smio_low
;
2335 table
->VddcLevel
[count
].Smio
= 0;
2337 table
->VddcLevelCount
= cpu_to_be32(table
->VddcLevelCount
);
2342 static int ci_populate_smc_vddci_table(struct amdgpu_device
*adev
,
2343 SMU7_Discrete_DpmTable
*table
)
2346 struct ci_power_info
*pi
= ci_get_pi(adev
);
2348 table
->VddciLevelCount
= pi
->vddci_voltage_table
.count
;
2349 for (count
= 0; count
< table
->VddciLevelCount
; count
++) {
2350 ci_populate_smc_voltage_table(adev
,
2351 &pi
->vddci_voltage_table
.entries
[count
],
2352 &table
->VddciLevel
[count
]);
2354 if (pi
->vddci_control
== CISLANDS_VOLTAGE_CONTROL_BY_GPIO
)
2355 table
->VddciLevel
[count
].Smio
|=
2356 pi
->vddci_voltage_table
.entries
[count
].smio_low
;
2358 table
->VddciLevel
[count
].Smio
= 0;
2360 table
->VddciLevelCount
= cpu_to_be32(table
->VddciLevelCount
);
2365 static int ci_populate_smc_mvdd_table(struct amdgpu_device
*adev
,
2366 SMU7_Discrete_DpmTable
*table
)
2368 struct ci_power_info
*pi
= ci_get_pi(adev
);
2371 table
->MvddLevelCount
= pi
->mvdd_voltage_table
.count
;
2372 for (count
= 0; count
< table
->MvddLevelCount
; count
++) {
2373 ci_populate_smc_voltage_table(adev
,
2374 &pi
->mvdd_voltage_table
.entries
[count
],
2375 &table
->MvddLevel
[count
]);
2377 if (pi
->mvdd_control
== CISLANDS_VOLTAGE_CONTROL_BY_GPIO
)
2378 table
->MvddLevel
[count
].Smio
|=
2379 pi
->mvdd_voltage_table
.entries
[count
].smio_low
;
2381 table
->MvddLevel
[count
].Smio
= 0;
2383 table
->MvddLevelCount
= cpu_to_be32(table
->MvddLevelCount
);
2388 static int ci_populate_smc_voltage_tables(struct amdgpu_device
*adev
,
2389 SMU7_Discrete_DpmTable
*table
)
2393 ret
= ci_populate_smc_vddc_table(adev
, table
);
2397 ret
= ci_populate_smc_vddci_table(adev
, table
);
2401 ret
= ci_populate_smc_mvdd_table(adev
, table
);
2408 static int ci_populate_mvdd_value(struct amdgpu_device
*adev
, u32 mclk
,
2409 SMU7_Discrete_VoltageLevel
*voltage
)
2411 struct ci_power_info
*pi
= ci_get_pi(adev
);
2414 if (pi
->mvdd_control
!= CISLANDS_VOLTAGE_CONTROL_NONE
) {
2415 for (i
= 0; i
< adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
.count
; i
++) {
2416 if (mclk
<= adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
.entries
[i
].clk
) {
2417 voltage
->Voltage
= pi
->mvdd_voltage_table
.entries
[i
].value
;
2422 if (i
>= adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
.count
)
2429 static int ci_get_std_voltage_value_sidd(struct amdgpu_device
*adev
,
2430 struct atom_voltage_table_entry
*voltage_table
,
2431 u16
*std_voltage_hi_sidd
, u16
*std_voltage_lo_sidd
)
2434 bool voltage_found
= false;
2435 *std_voltage_hi_sidd
= voltage_table
->value
* VOLTAGE_SCALE
;
2436 *std_voltage_lo_sidd
= voltage_table
->value
* VOLTAGE_SCALE
;
2438 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
== NULL
)
2441 if (adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
) {
2442 for (v_index
= 0; (u32
)v_index
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; v_index
++) {
2443 if (voltage_table
->value
==
2444 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[v_index
].v
) {
2445 voltage_found
= true;
2446 if ((u32
)v_index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
2449 idx
= adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
- 1;
2450 *std_voltage_lo_sidd
=
2451 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[idx
].vddc
* VOLTAGE_SCALE
;
2452 *std_voltage_hi_sidd
=
2453 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[idx
].leakage
* VOLTAGE_SCALE
;
2458 if (!voltage_found
) {
2459 for (v_index
= 0; (u32
)v_index
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; v_index
++) {
2460 if (voltage_table
->value
<=
2461 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[v_index
].v
) {
2462 voltage_found
= true;
2463 if ((u32
)v_index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
2466 idx
= adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
- 1;
2467 *std_voltage_lo_sidd
=
2468 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[idx
].vddc
* VOLTAGE_SCALE
;
2469 *std_voltage_hi_sidd
=
2470 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[idx
].leakage
* VOLTAGE_SCALE
;
2480 static void ci_populate_phase_value_based_on_sclk(struct amdgpu_device
*adev
,
2481 const struct amdgpu_phase_shedding_limits_table
*limits
,
2483 u32
*phase_shedding
)
2487 *phase_shedding
= 1;
2489 for (i
= 0; i
< limits
->count
; i
++) {
2490 if (sclk
< limits
->entries
[i
].sclk
) {
2491 *phase_shedding
= i
;
2497 static void ci_populate_phase_value_based_on_mclk(struct amdgpu_device
*adev
,
2498 const struct amdgpu_phase_shedding_limits_table
*limits
,
2500 u32
*phase_shedding
)
2504 *phase_shedding
= 1;
2506 for (i
= 0; i
< limits
->count
; i
++) {
2507 if (mclk
< limits
->entries
[i
].mclk
) {
2508 *phase_shedding
= i
;
2514 static int ci_init_arb_table_index(struct amdgpu_device
*adev
)
2516 struct ci_power_info
*pi
= ci_get_pi(adev
);
2520 ret
= amdgpu_ci_read_smc_sram_dword(adev
, pi
->arb_table_start
,
2521 &tmp
, pi
->sram_end
);
2526 tmp
|= MC_CG_ARB_FREQ_F1
<< 24;
2528 return amdgpu_ci_write_smc_sram_dword(adev
, pi
->arb_table_start
,
2532 static int ci_get_dependency_volt_by_clk(struct amdgpu_device
*adev
,
2533 struct amdgpu_clock_voltage_dependency_table
*allowed_clock_voltage_table
,
2534 u32 clock
, u32
*voltage
)
2538 if (allowed_clock_voltage_table
->count
== 0)
2541 for (i
= 0; i
< allowed_clock_voltage_table
->count
; i
++) {
2542 if (allowed_clock_voltage_table
->entries
[i
].clk
>= clock
) {
2543 *voltage
= allowed_clock_voltage_table
->entries
[i
].v
;
2548 *voltage
= allowed_clock_voltage_table
->entries
[i
-1].v
;
2553 static u8
ci_get_sleep_divider_id_from_clock(struct amdgpu_device
*adev
,
2554 u32 sclk
, u32 min_sclk_in_sr
)
2558 u32 min
= (min_sclk_in_sr
> CISLAND_MINIMUM_ENGINE_CLOCK
) ?
2559 min_sclk_in_sr
: CISLAND_MINIMUM_ENGINE_CLOCK
;
2564 for (i
= CISLAND_MAX_DEEPSLEEP_DIVIDER_ID
; ; i
--) {
2565 tmp
= sclk
/ (1 << i
);
2566 if (tmp
>= min
|| i
== 0)
2573 static int ci_initial_switch_from_arb_f0_to_f1(struct amdgpu_device
*adev
)
2575 return ci_copy_and_switch_arb_sets(adev
, MC_CG_ARB_FREQ_F0
, MC_CG_ARB_FREQ_F1
);
2578 static int ci_reset_to_default(struct amdgpu_device
*adev
)
2580 return (amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_ResetToDefaults
) == PPSMC_Result_OK
) ?
2584 static int ci_force_switch_to_arb_f0(struct amdgpu_device
*adev
)
2588 tmp
= (RREG32_SMC(ixSMC_SCRATCH9
) & 0x0000ff00) >> 8;
2590 if (tmp
== MC_CG_ARB_FREQ_F0
)
2593 return ci_copy_and_switch_arb_sets(adev
, tmp
, MC_CG_ARB_FREQ_F0
);
2596 static void ci_register_patching_mc_arb(struct amdgpu_device
*adev
,
2597 const u32 engine_clock
,
2598 const u32 memory_clock
,
2604 tmp
= RREG32(mmMC_SEQ_MISC0
);
2605 patch
= ((tmp
& 0x0000f00) == 0x300) ? true : false;
2608 ((adev
->pdev
->device
== 0x67B0) ||
2609 (adev
->pdev
->device
== 0x67B1))) {
2610 if ((memory_clock
> 100000) && (memory_clock
<= 125000)) {
2611 tmp2
= (((0x31 * engine_clock
) / 125000) - 1) & 0xff;
2612 *dram_timimg2
&= ~0x00ff0000;
2613 *dram_timimg2
|= tmp2
<< 16;
2614 } else if ((memory_clock
> 125000) && (memory_clock
<= 137500)) {
2615 tmp2
= (((0x36 * engine_clock
) / 137500) - 1) & 0xff;
2616 *dram_timimg2
&= ~0x00ff0000;
2617 *dram_timimg2
|= tmp2
<< 16;
2622 static int ci_populate_memory_timing_parameters(struct amdgpu_device
*adev
,
2625 SMU7_Discrete_MCArbDramTimingTableEntry
*arb_regs
)
2631 amdgpu_atombios_set_engine_dram_timings(adev
, sclk
, mclk
);
2633 dram_timing
= RREG32(mmMC_ARB_DRAM_TIMING
);
2634 dram_timing2
= RREG32(mmMC_ARB_DRAM_TIMING2
);
2635 burst_time
= RREG32(mmMC_ARB_BURST_TIME
) & MC_ARB_BURST_TIME__STATE0_MASK
;
2637 ci_register_patching_mc_arb(adev
, sclk
, mclk
, &dram_timing2
);
2639 arb_regs
->McArbDramTiming
= cpu_to_be32(dram_timing
);
2640 arb_regs
->McArbDramTiming2
= cpu_to_be32(dram_timing2
);
2641 arb_regs
->McArbBurstTime
= (u8
)burst_time
;
2646 static int ci_do_program_memory_timing_parameters(struct amdgpu_device
*adev
)
2648 struct ci_power_info
*pi
= ci_get_pi(adev
);
2649 SMU7_Discrete_MCArbDramTimingTable arb_regs
;
2653 memset(&arb_regs
, 0, sizeof(SMU7_Discrete_MCArbDramTimingTable
));
2655 for (i
= 0; i
< pi
->dpm_table
.sclk_table
.count
; i
++) {
2656 for (j
= 0; j
< pi
->dpm_table
.mclk_table
.count
; j
++) {
2657 ret
= ci_populate_memory_timing_parameters(adev
,
2658 pi
->dpm_table
.sclk_table
.dpm_levels
[i
].value
,
2659 pi
->dpm_table
.mclk_table
.dpm_levels
[j
].value
,
2660 &arb_regs
.entries
[i
][j
]);
2667 ret
= amdgpu_ci_copy_bytes_to_smc(adev
,
2668 pi
->arb_table_start
,
2670 sizeof(SMU7_Discrete_MCArbDramTimingTable
),
2676 static int ci_program_memory_timing_parameters(struct amdgpu_device
*adev
)
2678 struct ci_power_info
*pi
= ci_get_pi(adev
);
2680 if (pi
->need_update_smu7_dpm_table
== 0)
2683 return ci_do_program_memory_timing_parameters(adev
);
2686 static void ci_populate_smc_initial_state(struct amdgpu_device
*adev
,
2687 struct amdgpu_ps
*amdgpu_boot_state
)
2689 struct ci_ps
*boot_state
= ci_get_ps(amdgpu_boot_state
);
2690 struct ci_power_info
*pi
= ci_get_pi(adev
);
2693 for (level
= 0; level
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; level
++) {
2694 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[level
].clk
>=
2695 boot_state
->performance_levels
[0].sclk
) {
2696 pi
->smc_state_table
.GraphicsBootLevel
= level
;
2701 for (level
= 0; level
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
.count
; level
++) {
2702 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
.entries
[level
].clk
>=
2703 boot_state
->performance_levels
[0].mclk
) {
2704 pi
->smc_state_table
.MemoryBootLevel
= level
;
2710 static u32
ci_get_dpm_level_enable_mask_value(struct ci_single_dpm_table
*dpm_table
)
2715 for (i
= dpm_table
->count
; i
> 0; i
--) {
2716 mask_value
= mask_value
<< 1;
2717 if (dpm_table
->dpm_levels
[i
-1].enabled
)
2720 mask_value
&= 0xFFFFFFFE;
2726 static void ci_populate_smc_link_level(struct amdgpu_device
*adev
,
2727 SMU7_Discrete_DpmTable
*table
)
2729 struct ci_power_info
*pi
= ci_get_pi(adev
);
2730 struct ci_dpm_table
*dpm_table
= &pi
->dpm_table
;
2733 for (i
= 0; i
< dpm_table
->pcie_speed_table
.count
; i
++) {
2734 table
->LinkLevel
[i
].PcieGenSpeed
=
2735 (u8
)dpm_table
->pcie_speed_table
.dpm_levels
[i
].value
;
2736 table
->LinkLevel
[i
].PcieLaneCount
=
2737 amdgpu_encode_pci_lane_width(dpm_table
->pcie_speed_table
.dpm_levels
[i
].param1
);
2738 table
->LinkLevel
[i
].EnabledForActivity
= 1;
2739 table
->LinkLevel
[i
].DownT
= cpu_to_be32(5);
2740 table
->LinkLevel
[i
].UpT
= cpu_to_be32(30);
2743 pi
->smc_state_table
.LinkLevelCount
= (u8
)dpm_table
->pcie_speed_table
.count
;
2744 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
=
2745 ci_get_dpm_level_enable_mask_value(&dpm_table
->pcie_speed_table
);
2748 static int ci_populate_smc_uvd_level(struct amdgpu_device
*adev
,
2749 SMU7_Discrete_DpmTable
*table
)
2752 struct atom_clock_dividers dividers
;
2755 table
->UvdLevelCount
=
2756 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.count
;
2758 for (count
= 0; count
< table
->UvdLevelCount
; count
++) {
2759 table
->UvdLevel
[count
].VclkFrequency
=
2760 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[count
].vclk
;
2761 table
->UvdLevel
[count
].DclkFrequency
=
2762 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[count
].dclk
;
2763 table
->UvdLevel
[count
].MinVddc
=
2764 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[count
].v
* VOLTAGE_SCALE
;
2765 table
->UvdLevel
[count
].MinVddcPhases
= 1;
2767 ret
= amdgpu_atombios_get_clock_dividers(adev
,
2768 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
2769 table
->UvdLevel
[count
].VclkFrequency
, false, ÷rs
);
2773 table
->UvdLevel
[count
].VclkDivider
= (u8
)dividers
.post_divider
;
2775 ret
= amdgpu_atombios_get_clock_dividers(adev
,
2776 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
2777 table
->UvdLevel
[count
].DclkFrequency
, false, ÷rs
);
2781 table
->UvdLevel
[count
].DclkDivider
= (u8
)dividers
.post_divider
;
2783 table
->UvdLevel
[count
].VclkFrequency
= cpu_to_be32(table
->UvdLevel
[count
].VclkFrequency
);
2784 table
->UvdLevel
[count
].DclkFrequency
= cpu_to_be32(table
->UvdLevel
[count
].DclkFrequency
);
2785 table
->UvdLevel
[count
].MinVddc
= cpu_to_be16(table
->UvdLevel
[count
].MinVddc
);
2791 static int ci_populate_smc_vce_level(struct amdgpu_device
*adev
,
2792 SMU7_Discrete_DpmTable
*table
)
2795 struct atom_clock_dividers dividers
;
2798 table
->VceLevelCount
=
2799 adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.count
;
2801 for (count
= 0; count
< table
->VceLevelCount
; count
++) {
2802 table
->VceLevel
[count
].Frequency
=
2803 adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
[count
].evclk
;
2804 table
->VceLevel
[count
].MinVoltage
=
2805 (u16
)adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
[count
].v
* VOLTAGE_SCALE
;
2806 table
->VceLevel
[count
].MinPhases
= 1;
2808 ret
= amdgpu_atombios_get_clock_dividers(adev
,
2809 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
2810 table
->VceLevel
[count
].Frequency
, false, ÷rs
);
2814 table
->VceLevel
[count
].Divider
= (u8
)dividers
.post_divider
;
2816 table
->VceLevel
[count
].Frequency
= cpu_to_be32(table
->VceLevel
[count
].Frequency
);
2817 table
->VceLevel
[count
].MinVoltage
= cpu_to_be16(table
->VceLevel
[count
].MinVoltage
);
2824 static int ci_populate_smc_acp_level(struct amdgpu_device
*adev
,
2825 SMU7_Discrete_DpmTable
*table
)
2828 struct atom_clock_dividers dividers
;
2831 table
->AcpLevelCount
= (u8
)
2832 (adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.count
);
2834 for (count
= 0; count
< table
->AcpLevelCount
; count
++) {
2835 table
->AcpLevel
[count
].Frequency
=
2836 adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
[count
].clk
;
2837 table
->AcpLevel
[count
].MinVoltage
=
2838 adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
[count
].v
;
2839 table
->AcpLevel
[count
].MinPhases
= 1;
2841 ret
= amdgpu_atombios_get_clock_dividers(adev
,
2842 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
2843 table
->AcpLevel
[count
].Frequency
, false, ÷rs
);
2847 table
->AcpLevel
[count
].Divider
= (u8
)dividers
.post_divider
;
2849 table
->AcpLevel
[count
].Frequency
= cpu_to_be32(table
->AcpLevel
[count
].Frequency
);
2850 table
->AcpLevel
[count
].MinVoltage
= cpu_to_be16(table
->AcpLevel
[count
].MinVoltage
);
2856 static int ci_populate_smc_samu_level(struct amdgpu_device
*adev
,
2857 SMU7_Discrete_DpmTable
*table
)
2860 struct atom_clock_dividers dividers
;
2863 table
->SamuLevelCount
=
2864 adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.count
;
2866 for (count
= 0; count
< table
->SamuLevelCount
; count
++) {
2867 table
->SamuLevel
[count
].Frequency
=
2868 adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
[count
].clk
;
2869 table
->SamuLevel
[count
].MinVoltage
=
2870 adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
[count
].v
* VOLTAGE_SCALE
;
2871 table
->SamuLevel
[count
].MinPhases
= 1;
2873 ret
= amdgpu_atombios_get_clock_dividers(adev
,
2874 COMPUTE_GPUCLK_INPUT_FLAG_DEFAULT_GPUCLK
,
2875 table
->SamuLevel
[count
].Frequency
, false, ÷rs
);
2879 table
->SamuLevel
[count
].Divider
= (u8
)dividers
.post_divider
;
2881 table
->SamuLevel
[count
].Frequency
= cpu_to_be32(table
->SamuLevel
[count
].Frequency
);
2882 table
->SamuLevel
[count
].MinVoltage
= cpu_to_be16(table
->SamuLevel
[count
].MinVoltage
);
2888 static int ci_calculate_mclk_params(struct amdgpu_device
*adev
,
2890 SMU7_Discrete_MemoryLevel
*mclk
,
2894 struct ci_power_info
*pi
= ci_get_pi(adev
);
2895 u32 dll_cntl
= pi
->clock_registers
.dll_cntl
;
2896 u32 mclk_pwrmgt_cntl
= pi
->clock_registers
.mclk_pwrmgt_cntl
;
2897 u32 mpll_ad_func_cntl
= pi
->clock_registers
.mpll_ad_func_cntl
;
2898 u32 mpll_dq_func_cntl
= pi
->clock_registers
.mpll_dq_func_cntl
;
2899 u32 mpll_func_cntl
= pi
->clock_registers
.mpll_func_cntl
;
2900 u32 mpll_func_cntl_1
= pi
->clock_registers
.mpll_func_cntl_1
;
2901 u32 mpll_func_cntl_2
= pi
->clock_registers
.mpll_func_cntl_2
;
2902 u32 mpll_ss1
= pi
->clock_registers
.mpll_ss1
;
2903 u32 mpll_ss2
= pi
->clock_registers
.mpll_ss2
;
2904 struct atom_mpll_param mpll_param
;
2907 ret
= amdgpu_atombios_get_memory_pll_dividers(adev
, memory_clock
, strobe_mode
, &mpll_param
);
2911 mpll_func_cntl
&= ~MPLL_FUNC_CNTL__BWCTRL_MASK
;
2912 mpll_func_cntl
|= (mpll_param
.bwcntl
<< MPLL_FUNC_CNTL__BWCTRL__SHIFT
);
2914 mpll_func_cntl_1
&= ~(MPLL_FUNC_CNTL_1__CLKF_MASK
| MPLL_FUNC_CNTL_1__CLKFRAC_MASK
|
2915 MPLL_FUNC_CNTL_1__VCO_MODE_MASK
);
2916 mpll_func_cntl_1
|= (mpll_param
.clkf
) << MPLL_FUNC_CNTL_1__CLKF__SHIFT
|
2917 (mpll_param
.clkfrac
<< MPLL_FUNC_CNTL_1__CLKFRAC__SHIFT
) |
2918 (mpll_param
.vco_mode
<< MPLL_FUNC_CNTL_1__VCO_MODE__SHIFT
);
2920 mpll_ad_func_cntl
&= ~MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK
;
2921 mpll_ad_func_cntl
|= (mpll_param
.post_div
<< MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT
);
2923 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
2924 mpll_dq_func_cntl
&= ~(MPLL_DQ_FUNC_CNTL__YCLK_SEL_MASK
|
2925 MPLL_AD_FUNC_CNTL__YCLK_POST_DIV_MASK
);
2926 mpll_dq_func_cntl
|= (mpll_param
.yclk_sel
<< MPLL_DQ_FUNC_CNTL__YCLK_SEL__SHIFT
) |
2927 (mpll_param
.post_div
<< MPLL_AD_FUNC_CNTL__YCLK_POST_DIV__SHIFT
);
2930 if (pi
->caps_mclk_ss_support
) {
2931 struct amdgpu_atom_ss ss
;
2934 u32 reference_clock
= adev
->clock
.mpll
.reference_freq
;
2936 if (mpll_param
.qdr
== 1)
2937 freq_nom
= memory_clock
* 4 * (1 << mpll_param
.post_div
);
2939 freq_nom
= memory_clock
* 2 * (1 << mpll_param
.post_div
);
2941 tmp
= (freq_nom
/ reference_clock
);
2943 if (amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
2944 ASIC_INTERNAL_MEMORY_SS
, freq_nom
)) {
2945 u32 clks
= reference_clock
* 5 / ss
.rate
;
2946 u32 clkv
= (u32
)((((131 * ss
.percentage
* ss
.rate
) / 100) * tmp
) / freq_nom
);
2948 mpll_ss1
&= ~MPLL_SS1__CLKV_MASK
;
2949 mpll_ss1
|= (clkv
<< MPLL_SS1__CLKV__SHIFT
);
2951 mpll_ss2
&= ~MPLL_SS2__CLKS_MASK
;
2952 mpll_ss2
|= (clks
<< MPLL_SS2__CLKS__SHIFT
);
2956 mclk_pwrmgt_cntl
&= ~MCLK_PWRMGT_CNTL__DLL_SPEED_MASK
;
2957 mclk_pwrmgt_cntl
|= (mpll_param
.dll_speed
<< MCLK_PWRMGT_CNTL__DLL_SPEED__SHIFT
);
2960 mclk_pwrmgt_cntl
|= MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK
|
2961 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK
;
2963 mclk_pwrmgt_cntl
&= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK
|
2964 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK
);
2966 mclk
->MclkFrequency
= memory_clock
;
2967 mclk
->MpllFuncCntl
= mpll_func_cntl
;
2968 mclk
->MpllFuncCntl_1
= mpll_func_cntl_1
;
2969 mclk
->MpllFuncCntl_2
= mpll_func_cntl_2
;
2970 mclk
->MpllAdFuncCntl
= mpll_ad_func_cntl
;
2971 mclk
->MpllDqFuncCntl
= mpll_dq_func_cntl
;
2972 mclk
->MclkPwrmgtCntl
= mclk_pwrmgt_cntl
;
2973 mclk
->DllCntl
= dll_cntl
;
2974 mclk
->MpllSs1
= mpll_ss1
;
2975 mclk
->MpllSs2
= mpll_ss2
;
2980 static int ci_populate_single_memory_level(struct amdgpu_device
*adev
,
2982 SMU7_Discrete_MemoryLevel
*memory_level
)
2984 struct ci_power_info
*pi
= ci_get_pi(adev
);
2988 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
.entries
) {
2989 ret
= ci_get_dependency_volt_by_clk(adev
,
2990 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
2991 memory_clock
, &memory_level
->MinVddc
);
2996 if (adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
.entries
) {
2997 ret
= ci_get_dependency_volt_by_clk(adev
,
2998 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
2999 memory_clock
, &memory_level
->MinVddci
);
3004 if (adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
.entries
) {
3005 ret
= ci_get_dependency_volt_by_clk(adev
,
3006 &adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
,
3007 memory_clock
, &memory_level
->MinMvdd
);
3012 memory_level
->MinVddcPhases
= 1;
3014 if (pi
->vddc_phase_shed_control
)
3015 ci_populate_phase_value_based_on_mclk(adev
,
3016 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
3018 &memory_level
->MinVddcPhases
);
3020 memory_level
->EnabledForThrottle
= 1;
3021 memory_level
->EnabledForActivity
= 1;
3022 memory_level
->UpH
= 0;
3023 memory_level
->DownH
= 100;
3024 memory_level
->VoltageDownH
= 0;
3025 memory_level
->ActivityLevel
= (u16
)pi
->mclk_activity_target
;
3027 memory_level
->StutterEnable
= false;
3028 memory_level
->StrobeEnable
= false;
3029 memory_level
->EdcReadEnable
= false;
3030 memory_level
->EdcWriteEnable
= false;
3031 memory_level
->RttEnable
= false;
3033 memory_level
->DisplayWatermark
= PPSMC_DISPLAY_WATERMARK_LOW
;
3035 if (pi
->mclk_stutter_mode_threshold
&&
3036 (memory_clock
<= pi
->mclk_stutter_mode_threshold
) &&
3037 (pi
->uvd_enabled
== false) &&
3038 (RREG32(mmDPG_PIPE_STUTTER_CONTROL
) & DPG_PIPE_STUTTER_CONTROL__STUTTER_ENABLE_MASK
) &&
3039 (adev
->pm
.dpm
.new_active_crtc_count
<= 2))
3040 memory_level
->StutterEnable
= true;
3042 if (pi
->mclk_strobe_mode_threshold
&&
3043 (memory_clock
<= pi
->mclk_strobe_mode_threshold
))
3044 memory_level
->StrobeEnable
= 1;
3046 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
3047 memory_level
->StrobeRatio
=
3048 ci_get_mclk_frequency_ratio(memory_clock
, memory_level
->StrobeEnable
);
3049 if (pi
->mclk_edc_enable_threshold
&&
3050 (memory_clock
> pi
->mclk_edc_enable_threshold
))
3051 memory_level
->EdcReadEnable
= true;
3053 if (pi
->mclk_edc_wr_enable_threshold
&&
3054 (memory_clock
> pi
->mclk_edc_wr_enable_threshold
))
3055 memory_level
->EdcWriteEnable
= true;
3057 if (memory_level
->StrobeEnable
) {
3058 if (ci_get_mclk_frequency_ratio(memory_clock
, true) >=
3059 ((RREG32(mmMC_SEQ_MISC7
) >> 16) & 0xf))
3060 dll_state_on
= ((RREG32(mmMC_SEQ_MISC5
) >> 1) & 0x1) ? true : false;
3062 dll_state_on
= ((RREG32(mmMC_SEQ_MISC6
) >> 1) & 0x1) ? true : false;
3064 dll_state_on
= pi
->dll_default_on
;
3067 memory_level
->StrobeRatio
= ci_get_ddr3_mclk_frequency_ratio(memory_clock
);
3068 dll_state_on
= ((RREG32(mmMC_SEQ_MISC5
) >> 1) & 0x1) ? true : false;
3071 ret
= ci_calculate_mclk_params(adev
, memory_clock
, memory_level
, memory_level
->StrobeEnable
, dll_state_on
);
3075 memory_level
->MinVddc
= cpu_to_be32(memory_level
->MinVddc
* VOLTAGE_SCALE
);
3076 memory_level
->MinVddcPhases
= cpu_to_be32(memory_level
->MinVddcPhases
);
3077 memory_level
->MinVddci
= cpu_to_be32(memory_level
->MinVddci
* VOLTAGE_SCALE
);
3078 memory_level
->MinMvdd
= cpu_to_be32(memory_level
->MinMvdd
* VOLTAGE_SCALE
);
3080 memory_level
->MclkFrequency
= cpu_to_be32(memory_level
->MclkFrequency
);
3081 memory_level
->ActivityLevel
= cpu_to_be16(memory_level
->ActivityLevel
);
3082 memory_level
->MpllFuncCntl
= cpu_to_be32(memory_level
->MpllFuncCntl
);
3083 memory_level
->MpllFuncCntl_1
= cpu_to_be32(memory_level
->MpllFuncCntl_1
);
3084 memory_level
->MpllFuncCntl_2
= cpu_to_be32(memory_level
->MpllFuncCntl_2
);
3085 memory_level
->MpllAdFuncCntl
= cpu_to_be32(memory_level
->MpllAdFuncCntl
);
3086 memory_level
->MpllDqFuncCntl
= cpu_to_be32(memory_level
->MpllDqFuncCntl
);
3087 memory_level
->MclkPwrmgtCntl
= cpu_to_be32(memory_level
->MclkPwrmgtCntl
);
3088 memory_level
->DllCntl
= cpu_to_be32(memory_level
->DllCntl
);
3089 memory_level
->MpllSs1
= cpu_to_be32(memory_level
->MpllSs1
);
3090 memory_level
->MpllSs2
= cpu_to_be32(memory_level
->MpllSs2
);
3095 static int ci_populate_smc_acpi_level(struct amdgpu_device
*adev
,
3096 SMU7_Discrete_DpmTable
*table
)
3098 struct ci_power_info
*pi
= ci_get_pi(adev
);
3099 struct atom_clock_dividers dividers
;
3100 SMU7_Discrete_VoltageLevel voltage_level
;
3101 u32 spll_func_cntl
= pi
->clock_registers
.cg_spll_func_cntl
;
3102 u32 spll_func_cntl_2
= pi
->clock_registers
.cg_spll_func_cntl_2
;
3103 u32 dll_cntl
= pi
->clock_registers
.dll_cntl
;
3104 u32 mclk_pwrmgt_cntl
= pi
->clock_registers
.mclk_pwrmgt_cntl
;
3107 table
->ACPILevel
.Flags
&= ~PPSMC_SWSTATE_FLAG_DC
;
3110 table
->ACPILevel
.MinVddc
= cpu_to_be32(pi
->acpi_vddc
* VOLTAGE_SCALE
);
3112 table
->ACPILevel
.MinVddc
= cpu_to_be32(pi
->min_vddc_in_pp_table
* VOLTAGE_SCALE
);
3114 table
->ACPILevel
.MinVddcPhases
= pi
->vddc_phase_shed_control
? 0 : 1;
3116 table
->ACPILevel
.SclkFrequency
= adev
->clock
.spll
.reference_freq
;
3118 ret
= amdgpu_atombios_get_clock_dividers(adev
,
3119 COMPUTE_GPUCLK_INPUT_FLAG_SCLK
,
3120 table
->ACPILevel
.SclkFrequency
, false, ÷rs
);
3124 table
->ACPILevel
.SclkDid
= (u8
)dividers
.post_divider
;
3125 table
->ACPILevel
.DisplayWatermark
= PPSMC_DISPLAY_WATERMARK_LOW
;
3126 table
->ACPILevel
.DeepSleepDivId
= 0;
3128 spll_func_cntl
&= ~CG_SPLL_FUNC_CNTL__SPLL_PWRON_MASK
;
3129 spll_func_cntl
|= CG_SPLL_FUNC_CNTL__SPLL_RESET_MASK
;
3131 spll_func_cntl_2
&= ~CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL_MASK
;
3132 spll_func_cntl_2
|= (4 << CG_SPLL_FUNC_CNTL_2__SCLK_MUX_SEL__SHIFT
);
3134 table
->ACPILevel
.CgSpllFuncCntl
= spll_func_cntl
;
3135 table
->ACPILevel
.CgSpllFuncCntl2
= spll_func_cntl_2
;
3136 table
->ACPILevel
.CgSpllFuncCntl3
= pi
->clock_registers
.cg_spll_func_cntl_3
;
3137 table
->ACPILevel
.CgSpllFuncCntl4
= pi
->clock_registers
.cg_spll_func_cntl_4
;
3138 table
->ACPILevel
.SpllSpreadSpectrum
= pi
->clock_registers
.cg_spll_spread_spectrum
;
3139 table
->ACPILevel
.SpllSpreadSpectrum2
= pi
->clock_registers
.cg_spll_spread_spectrum_2
;
3140 table
->ACPILevel
.CcPwrDynRm
= 0;
3141 table
->ACPILevel
.CcPwrDynRm1
= 0;
3143 table
->ACPILevel
.Flags
= cpu_to_be32(table
->ACPILevel
.Flags
);
3144 table
->ACPILevel
.MinVddcPhases
= cpu_to_be32(table
->ACPILevel
.MinVddcPhases
);
3145 table
->ACPILevel
.SclkFrequency
= cpu_to_be32(table
->ACPILevel
.SclkFrequency
);
3146 table
->ACPILevel
.CgSpllFuncCntl
= cpu_to_be32(table
->ACPILevel
.CgSpllFuncCntl
);
3147 table
->ACPILevel
.CgSpllFuncCntl2
= cpu_to_be32(table
->ACPILevel
.CgSpllFuncCntl2
);
3148 table
->ACPILevel
.CgSpllFuncCntl3
= cpu_to_be32(table
->ACPILevel
.CgSpllFuncCntl3
);
3149 table
->ACPILevel
.CgSpllFuncCntl4
= cpu_to_be32(table
->ACPILevel
.CgSpllFuncCntl4
);
3150 table
->ACPILevel
.SpllSpreadSpectrum
= cpu_to_be32(table
->ACPILevel
.SpllSpreadSpectrum
);
3151 table
->ACPILevel
.SpllSpreadSpectrum2
= cpu_to_be32(table
->ACPILevel
.SpllSpreadSpectrum2
);
3152 table
->ACPILevel
.CcPwrDynRm
= cpu_to_be32(table
->ACPILevel
.CcPwrDynRm
);
3153 table
->ACPILevel
.CcPwrDynRm1
= cpu_to_be32(table
->ACPILevel
.CcPwrDynRm1
);
3155 table
->MemoryACPILevel
.MinVddc
= table
->ACPILevel
.MinVddc
;
3156 table
->MemoryACPILevel
.MinVddcPhases
= table
->ACPILevel
.MinVddcPhases
;
3158 if (pi
->vddci_control
!= CISLANDS_VOLTAGE_CONTROL_NONE
) {
3160 table
->MemoryACPILevel
.MinVddci
=
3161 cpu_to_be32(pi
->acpi_vddci
* VOLTAGE_SCALE
);
3163 table
->MemoryACPILevel
.MinVddci
=
3164 cpu_to_be32(pi
->min_vddci_in_pp_table
* VOLTAGE_SCALE
);
3167 if (ci_populate_mvdd_value(adev
, 0, &voltage_level
))
3168 table
->MemoryACPILevel
.MinMvdd
= 0;
3170 table
->MemoryACPILevel
.MinMvdd
=
3171 cpu_to_be32(voltage_level
.Voltage
* VOLTAGE_SCALE
);
3173 mclk_pwrmgt_cntl
|= MCLK_PWRMGT_CNTL__MRDCK0_RESET_MASK
|
3174 MCLK_PWRMGT_CNTL__MRDCK1_RESET_MASK
;
3175 mclk_pwrmgt_cntl
&= ~(MCLK_PWRMGT_CNTL__MRDCK0_PDNB_MASK
|
3176 MCLK_PWRMGT_CNTL__MRDCK1_PDNB_MASK
);
3178 dll_cntl
&= ~(DLL_CNTL__MRDCK0_BYPASS_MASK
| DLL_CNTL__MRDCK1_BYPASS_MASK
);
3180 table
->MemoryACPILevel
.DllCntl
= cpu_to_be32(dll_cntl
);
3181 table
->MemoryACPILevel
.MclkPwrmgtCntl
= cpu_to_be32(mclk_pwrmgt_cntl
);
3182 table
->MemoryACPILevel
.MpllAdFuncCntl
=
3183 cpu_to_be32(pi
->clock_registers
.mpll_ad_func_cntl
);
3184 table
->MemoryACPILevel
.MpllDqFuncCntl
=
3185 cpu_to_be32(pi
->clock_registers
.mpll_dq_func_cntl
);
3186 table
->MemoryACPILevel
.MpllFuncCntl
=
3187 cpu_to_be32(pi
->clock_registers
.mpll_func_cntl
);
3188 table
->MemoryACPILevel
.MpllFuncCntl_1
=
3189 cpu_to_be32(pi
->clock_registers
.mpll_func_cntl_1
);
3190 table
->MemoryACPILevel
.MpllFuncCntl_2
=
3191 cpu_to_be32(pi
->clock_registers
.mpll_func_cntl_2
);
3192 table
->MemoryACPILevel
.MpllSs1
= cpu_to_be32(pi
->clock_registers
.mpll_ss1
);
3193 table
->MemoryACPILevel
.MpllSs2
= cpu_to_be32(pi
->clock_registers
.mpll_ss2
);
3195 table
->MemoryACPILevel
.EnabledForThrottle
= 0;
3196 table
->MemoryACPILevel
.EnabledForActivity
= 0;
3197 table
->MemoryACPILevel
.UpH
= 0;
3198 table
->MemoryACPILevel
.DownH
= 100;
3199 table
->MemoryACPILevel
.VoltageDownH
= 0;
3200 table
->MemoryACPILevel
.ActivityLevel
=
3201 cpu_to_be16((u16
)pi
->mclk_activity_target
);
3203 table
->MemoryACPILevel
.StutterEnable
= false;
3204 table
->MemoryACPILevel
.StrobeEnable
= false;
3205 table
->MemoryACPILevel
.EdcReadEnable
= false;
3206 table
->MemoryACPILevel
.EdcWriteEnable
= false;
3207 table
->MemoryACPILevel
.RttEnable
= false;
3213 static int ci_enable_ulv(struct amdgpu_device
*adev
, bool enable
)
3215 struct ci_power_info
*pi
= ci_get_pi(adev
);
3216 struct ci_ulv_parm
*ulv
= &pi
->ulv
;
3218 if (ulv
->supported
) {
3220 return (amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_EnableULV
) == PPSMC_Result_OK
) ?
3223 return (amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_DisableULV
) == PPSMC_Result_OK
) ?
3230 static int ci_populate_ulv_level(struct amdgpu_device
*adev
,
3231 SMU7_Discrete_Ulv
*state
)
3233 struct ci_power_info
*pi
= ci_get_pi(adev
);
3234 u16 ulv_voltage
= adev
->pm
.dpm
.backbias_response_time
;
3236 state
->CcPwrDynRm
= 0;
3237 state
->CcPwrDynRm1
= 0;
3239 if (ulv_voltage
== 0) {
3240 pi
->ulv
.supported
= false;
3244 if (pi
->voltage_control
!= CISLANDS_VOLTAGE_CONTROL_BY_SVID2
) {
3245 if (ulv_voltage
> adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[0].v
)
3246 state
->VddcOffset
= 0;
3249 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[0].v
- ulv_voltage
;
3251 if (ulv_voltage
> adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[0].v
)
3252 state
->VddcOffsetVid
= 0;
3254 state
->VddcOffsetVid
= (u8
)
3255 ((adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[0].v
- ulv_voltage
) *
3256 VOLTAGE_VID_OFFSET_SCALE2
/ VOLTAGE_VID_OFFSET_SCALE1
);
3258 state
->VddcPhase
= pi
->vddc_phase_shed_control
? 0 : 1;
3260 state
->CcPwrDynRm
= cpu_to_be32(state
->CcPwrDynRm
);
3261 state
->CcPwrDynRm1
= cpu_to_be32(state
->CcPwrDynRm1
);
3262 state
->VddcOffset
= cpu_to_be16(state
->VddcOffset
);
3267 static int ci_calculate_sclk_params(struct amdgpu_device
*adev
,
3269 SMU7_Discrete_GraphicsLevel
*sclk
)
3271 struct ci_power_info
*pi
= ci_get_pi(adev
);
3272 struct atom_clock_dividers dividers
;
3273 u32 spll_func_cntl_3
= pi
->clock_registers
.cg_spll_func_cntl_3
;
3274 u32 spll_func_cntl_4
= pi
->clock_registers
.cg_spll_func_cntl_4
;
3275 u32 cg_spll_spread_spectrum
= pi
->clock_registers
.cg_spll_spread_spectrum
;
3276 u32 cg_spll_spread_spectrum_2
= pi
->clock_registers
.cg_spll_spread_spectrum_2
;
3277 u32 reference_clock
= adev
->clock
.spll
.reference_freq
;
3278 u32 reference_divider
;
3282 ret
= amdgpu_atombios_get_clock_dividers(adev
,
3283 COMPUTE_GPUCLK_INPUT_FLAG_SCLK
,
3284 engine_clock
, false, ÷rs
);
3288 reference_divider
= 1 + dividers
.ref_div
;
3289 fbdiv
= dividers
.fb_div
& 0x3FFFFFF;
3291 spll_func_cntl_3
&= ~CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV_MASK
;
3292 spll_func_cntl_3
|= (fbdiv
<< CG_SPLL_FUNC_CNTL_3__SPLL_FB_DIV__SHIFT
);
3293 spll_func_cntl_3
|= CG_SPLL_FUNC_CNTL_3__SPLL_DITHEN_MASK
;
3295 if (pi
->caps_sclk_ss_support
) {
3296 struct amdgpu_atom_ss ss
;
3297 u32 vco_freq
= engine_clock
* dividers
.post_div
;
3299 if (amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
3300 ASIC_INTERNAL_ENGINE_SS
, vco_freq
)) {
3301 u32 clk_s
= reference_clock
* 5 / (reference_divider
* ss
.rate
);
3302 u32 clk_v
= 4 * ss
.percentage
* fbdiv
/ (clk_s
* 10000);
3304 cg_spll_spread_spectrum
&= ~(CG_SPLL_SPREAD_SPECTRUM__CLKS_MASK
| CG_SPLL_SPREAD_SPECTRUM__SSEN_MASK
);
3305 cg_spll_spread_spectrum
|= (clk_s
<< CG_SPLL_SPREAD_SPECTRUM__CLKS__SHIFT
);
3306 cg_spll_spread_spectrum
|= (1 << CG_SPLL_SPREAD_SPECTRUM__SSEN__SHIFT
);
3308 cg_spll_spread_spectrum_2
&= ~CG_SPLL_SPREAD_SPECTRUM_2__CLKV_MASK
;
3309 cg_spll_spread_spectrum_2
|= (clk_v
<< CG_SPLL_SPREAD_SPECTRUM_2__CLKV__SHIFT
);
3313 sclk
->SclkFrequency
= engine_clock
;
3314 sclk
->CgSpllFuncCntl3
= spll_func_cntl_3
;
3315 sclk
->CgSpllFuncCntl4
= spll_func_cntl_4
;
3316 sclk
->SpllSpreadSpectrum
= cg_spll_spread_spectrum
;
3317 sclk
->SpllSpreadSpectrum2
= cg_spll_spread_spectrum_2
;
3318 sclk
->SclkDid
= (u8
)dividers
.post_divider
;
3323 static int ci_populate_single_graphic_level(struct amdgpu_device
*adev
,
3325 u16 sclk_activity_level_t
,
3326 SMU7_Discrete_GraphicsLevel
*graphic_level
)
3328 struct ci_power_info
*pi
= ci_get_pi(adev
);
3331 ret
= ci_calculate_sclk_params(adev
, engine_clock
, graphic_level
);
3335 ret
= ci_get_dependency_volt_by_clk(adev
,
3336 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
,
3337 engine_clock
, &graphic_level
->MinVddc
);
3341 graphic_level
->SclkFrequency
= engine_clock
;
3343 graphic_level
->Flags
= 0;
3344 graphic_level
->MinVddcPhases
= 1;
3346 if (pi
->vddc_phase_shed_control
)
3347 ci_populate_phase_value_based_on_sclk(adev
,
3348 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
3350 &graphic_level
->MinVddcPhases
);
3352 graphic_level
->ActivityLevel
= sclk_activity_level_t
;
3354 graphic_level
->CcPwrDynRm
= 0;
3355 graphic_level
->CcPwrDynRm1
= 0;
3356 graphic_level
->EnabledForThrottle
= 1;
3357 graphic_level
->UpH
= 0;
3358 graphic_level
->DownH
= 0;
3359 graphic_level
->VoltageDownH
= 0;
3360 graphic_level
->PowerThrottle
= 0;
3362 if (pi
->caps_sclk_ds
)
3363 graphic_level
->DeepSleepDivId
= ci_get_sleep_divider_id_from_clock(adev
,
3365 CISLAND_MINIMUM_ENGINE_CLOCK
);
3367 graphic_level
->DisplayWatermark
= PPSMC_DISPLAY_WATERMARK_LOW
;
3369 graphic_level
->Flags
= cpu_to_be32(graphic_level
->Flags
);
3370 graphic_level
->MinVddc
= cpu_to_be32(graphic_level
->MinVddc
* VOLTAGE_SCALE
);
3371 graphic_level
->MinVddcPhases
= cpu_to_be32(graphic_level
->MinVddcPhases
);
3372 graphic_level
->SclkFrequency
= cpu_to_be32(graphic_level
->SclkFrequency
);
3373 graphic_level
->ActivityLevel
= cpu_to_be16(graphic_level
->ActivityLevel
);
3374 graphic_level
->CgSpllFuncCntl3
= cpu_to_be32(graphic_level
->CgSpllFuncCntl3
);
3375 graphic_level
->CgSpllFuncCntl4
= cpu_to_be32(graphic_level
->CgSpllFuncCntl4
);
3376 graphic_level
->SpllSpreadSpectrum
= cpu_to_be32(graphic_level
->SpllSpreadSpectrum
);
3377 graphic_level
->SpllSpreadSpectrum2
= cpu_to_be32(graphic_level
->SpllSpreadSpectrum2
);
3378 graphic_level
->CcPwrDynRm
= cpu_to_be32(graphic_level
->CcPwrDynRm
);
3379 graphic_level
->CcPwrDynRm1
= cpu_to_be32(graphic_level
->CcPwrDynRm1
);
3380 graphic_level
->EnabledForActivity
= 1;
3385 static int ci_populate_all_graphic_levels(struct amdgpu_device
*adev
)
3387 struct ci_power_info
*pi
= ci_get_pi(adev
);
3388 struct ci_dpm_table
*dpm_table
= &pi
->dpm_table
;
3389 u32 level_array_address
= pi
->dpm_table_start
+
3390 offsetof(SMU7_Discrete_DpmTable
, GraphicsLevel
);
3391 u32 level_array_size
= sizeof(SMU7_Discrete_GraphicsLevel
) *
3392 SMU7_MAX_LEVELS_GRAPHICS
;
3393 SMU7_Discrete_GraphicsLevel
*levels
= pi
->smc_state_table
.GraphicsLevel
;
3396 memset(levels
, 0, level_array_size
);
3398 for (i
= 0; i
< dpm_table
->sclk_table
.count
; i
++) {
3399 ret
= ci_populate_single_graphic_level(adev
,
3400 dpm_table
->sclk_table
.dpm_levels
[i
].value
,
3401 (u16
)pi
->activity_target
[i
],
3402 &pi
->smc_state_table
.GraphicsLevel
[i
]);
3406 pi
->smc_state_table
.GraphicsLevel
[i
].DeepSleepDivId
= 0;
3407 if (i
== (dpm_table
->sclk_table
.count
- 1))
3408 pi
->smc_state_table
.GraphicsLevel
[i
].DisplayWatermark
=
3409 PPSMC_DISPLAY_WATERMARK_HIGH
;
3412 pi
->smc_state_table
.GraphicsDpmLevelCount
= (u8
)dpm_table
->sclk_table
.count
;
3413 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
=
3414 ci_get_dpm_level_enable_mask_value(&dpm_table
->sclk_table
);
3416 ret
= amdgpu_ci_copy_bytes_to_smc(adev
, level_array_address
,
3417 (u8
*)levels
, level_array_size
,
3425 static int ci_populate_ulv_state(struct amdgpu_device
*adev
,
3426 SMU7_Discrete_Ulv
*ulv_level
)
3428 return ci_populate_ulv_level(adev
, ulv_level
);
3431 static int ci_populate_all_memory_levels(struct amdgpu_device
*adev
)
3433 struct ci_power_info
*pi
= ci_get_pi(adev
);
3434 struct ci_dpm_table
*dpm_table
= &pi
->dpm_table
;
3435 u32 level_array_address
= pi
->dpm_table_start
+
3436 offsetof(SMU7_Discrete_DpmTable
, MemoryLevel
);
3437 u32 level_array_size
= sizeof(SMU7_Discrete_MemoryLevel
) *
3438 SMU7_MAX_LEVELS_MEMORY
;
3439 SMU7_Discrete_MemoryLevel
*levels
= pi
->smc_state_table
.MemoryLevel
;
3442 memset(levels
, 0, level_array_size
);
3444 for (i
= 0; i
< dpm_table
->mclk_table
.count
; i
++) {
3445 if (dpm_table
->mclk_table
.dpm_levels
[i
].value
== 0)
3447 ret
= ci_populate_single_memory_level(adev
,
3448 dpm_table
->mclk_table
.dpm_levels
[i
].value
,
3449 &pi
->smc_state_table
.MemoryLevel
[i
]);
3454 if ((dpm_table
->mclk_table
.count
>= 2) &&
3455 ((adev
->pdev
->device
== 0x67B0) || (adev
->pdev
->device
== 0x67B1))) {
3456 pi
->smc_state_table
.MemoryLevel
[1].MinVddc
=
3457 pi
->smc_state_table
.MemoryLevel
[0].MinVddc
;
3458 pi
->smc_state_table
.MemoryLevel
[1].MinVddcPhases
=
3459 pi
->smc_state_table
.MemoryLevel
[0].MinVddcPhases
;
3462 pi
->smc_state_table
.MemoryLevel
[0].ActivityLevel
= cpu_to_be16(0x1F);
3464 pi
->smc_state_table
.MemoryDpmLevelCount
= (u8
)dpm_table
->mclk_table
.count
;
3465 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
=
3466 ci_get_dpm_level_enable_mask_value(&dpm_table
->mclk_table
);
3468 pi
->smc_state_table
.MemoryLevel
[dpm_table
->mclk_table
.count
- 1].DisplayWatermark
=
3469 PPSMC_DISPLAY_WATERMARK_HIGH
;
3471 ret
= amdgpu_ci_copy_bytes_to_smc(adev
, level_array_address
,
3472 (u8
*)levels
, level_array_size
,
3480 static void ci_reset_single_dpm_table(struct amdgpu_device
*adev
,
3481 struct ci_single_dpm_table
* dpm_table
,
3486 dpm_table
->count
= count
;
3487 for (i
= 0; i
< MAX_REGULAR_DPM_NUMBER
; i
++)
3488 dpm_table
->dpm_levels
[i
].enabled
= false;
3491 static void ci_setup_pcie_table_entry(struct ci_single_dpm_table
* dpm_table
,
3492 u32 index
, u32 pcie_gen
, u32 pcie_lanes
)
3494 dpm_table
->dpm_levels
[index
].value
= pcie_gen
;
3495 dpm_table
->dpm_levels
[index
].param1
= pcie_lanes
;
3496 dpm_table
->dpm_levels
[index
].enabled
= true;
3499 static int ci_setup_default_pcie_tables(struct amdgpu_device
*adev
)
3501 struct ci_power_info
*pi
= ci_get_pi(adev
);
3503 if (!pi
->use_pcie_performance_levels
&& !pi
->use_pcie_powersaving_levels
)
3506 if (pi
->use_pcie_performance_levels
&& !pi
->use_pcie_powersaving_levels
) {
3507 pi
->pcie_gen_powersaving
= pi
->pcie_gen_performance
;
3508 pi
->pcie_lane_powersaving
= pi
->pcie_lane_performance
;
3509 } else if (!pi
->use_pcie_performance_levels
&& pi
->use_pcie_powersaving_levels
) {
3510 pi
->pcie_gen_performance
= pi
->pcie_gen_powersaving
;
3511 pi
->pcie_lane_performance
= pi
->pcie_lane_powersaving
;
3514 ci_reset_single_dpm_table(adev
,
3515 &pi
->dpm_table
.pcie_speed_table
,
3516 SMU7_MAX_LEVELS_LINK
);
3518 if (adev
->asic_type
== CHIP_BONAIRE
)
3519 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 0,
3520 pi
->pcie_gen_powersaving
.min
,
3521 pi
->pcie_lane_powersaving
.max
);
3523 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 0,
3524 pi
->pcie_gen_powersaving
.min
,
3525 pi
->pcie_lane_powersaving
.min
);
3526 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 1,
3527 pi
->pcie_gen_performance
.min
,
3528 pi
->pcie_lane_performance
.min
);
3529 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 2,
3530 pi
->pcie_gen_powersaving
.min
,
3531 pi
->pcie_lane_powersaving
.max
);
3532 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 3,
3533 pi
->pcie_gen_performance
.min
,
3534 pi
->pcie_lane_performance
.max
);
3535 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 4,
3536 pi
->pcie_gen_powersaving
.max
,
3537 pi
->pcie_lane_powersaving
.max
);
3538 ci_setup_pcie_table_entry(&pi
->dpm_table
.pcie_speed_table
, 5,
3539 pi
->pcie_gen_performance
.max
,
3540 pi
->pcie_lane_performance
.max
);
3542 pi
->dpm_table
.pcie_speed_table
.count
= 6;
3547 static int ci_setup_default_dpm_tables(struct amdgpu_device
*adev
)
3549 struct ci_power_info
*pi
= ci_get_pi(adev
);
3550 struct amdgpu_clock_voltage_dependency_table
*allowed_sclk_vddc_table
=
3551 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
3552 struct amdgpu_clock_voltage_dependency_table
*allowed_mclk_table
=
3553 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
;
3554 struct amdgpu_cac_leakage_table
*std_voltage_table
=
3555 &adev
->pm
.dpm
.dyn_state
.cac_leakage_table
;
3558 if (allowed_sclk_vddc_table
== NULL
)
3560 if (allowed_sclk_vddc_table
->count
< 1)
3562 if (allowed_mclk_table
== NULL
)
3564 if (allowed_mclk_table
->count
< 1)
3567 memset(&pi
->dpm_table
, 0, sizeof(struct ci_dpm_table
));
3569 ci_reset_single_dpm_table(adev
,
3570 &pi
->dpm_table
.sclk_table
,
3571 SMU7_MAX_LEVELS_GRAPHICS
);
3572 ci_reset_single_dpm_table(adev
,
3573 &pi
->dpm_table
.mclk_table
,
3574 SMU7_MAX_LEVELS_MEMORY
);
3575 ci_reset_single_dpm_table(adev
,
3576 &pi
->dpm_table
.vddc_table
,
3577 SMU7_MAX_LEVELS_VDDC
);
3578 ci_reset_single_dpm_table(adev
,
3579 &pi
->dpm_table
.vddci_table
,
3580 SMU7_MAX_LEVELS_VDDCI
);
3581 ci_reset_single_dpm_table(adev
,
3582 &pi
->dpm_table
.mvdd_table
,
3583 SMU7_MAX_LEVELS_MVDD
);
3585 pi
->dpm_table
.sclk_table
.count
= 0;
3586 for (i
= 0; i
< allowed_sclk_vddc_table
->count
; i
++) {
3588 (pi
->dpm_table
.sclk_table
.dpm_levels
[pi
->dpm_table
.sclk_table
.count
-1].value
!=
3589 allowed_sclk_vddc_table
->entries
[i
].clk
)) {
3590 pi
->dpm_table
.sclk_table
.dpm_levels
[pi
->dpm_table
.sclk_table
.count
].value
=
3591 allowed_sclk_vddc_table
->entries
[i
].clk
;
3592 pi
->dpm_table
.sclk_table
.dpm_levels
[pi
->dpm_table
.sclk_table
.count
].enabled
=
3593 (i
== 0) ? true : false;
3594 pi
->dpm_table
.sclk_table
.count
++;
3598 pi
->dpm_table
.mclk_table
.count
= 0;
3599 for (i
= 0; i
< allowed_mclk_table
->count
; i
++) {
3601 (pi
->dpm_table
.mclk_table
.dpm_levels
[pi
->dpm_table
.mclk_table
.count
-1].value
!=
3602 allowed_mclk_table
->entries
[i
].clk
)) {
3603 pi
->dpm_table
.mclk_table
.dpm_levels
[pi
->dpm_table
.mclk_table
.count
].value
=
3604 allowed_mclk_table
->entries
[i
].clk
;
3605 pi
->dpm_table
.mclk_table
.dpm_levels
[pi
->dpm_table
.mclk_table
.count
].enabled
=
3606 (i
== 0) ? true : false;
3607 pi
->dpm_table
.mclk_table
.count
++;
3611 for (i
= 0; i
< allowed_sclk_vddc_table
->count
; i
++) {
3612 pi
->dpm_table
.vddc_table
.dpm_levels
[i
].value
=
3613 allowed_sclk_vddc_table
->entries
[i
].v
;
3614 pi
->dpm_table
.vddc_table
.dpm_levels
[i
].param1
=
3615 std_voltage_table
->entries
[i
].leakage
;
3616 pi
->dpm_table
.vddc_table
.dpm_levels
[i
].enabled
= true;
3618 pi
->dpm_table
.vddc_table
.count
= allowed_sclk_vddc_table
->count
;
3620 allowed_mclk_table
= &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
;
3621 if (allowed_mclk_table
) {
3622 for (i
= 0; i
< allowed_mclk_table
->count
; i
++) {
3623 pi
->dpm_table
.vddci_table
.dpm_levels
[i
].value
=
3624 allowed_mclk_table
->entries
[i
].v
;
3625 pi
->dpm_table
.vddci_table
.dpm_levels
[i
].enabled
= true;
3627 pi
->dpm_table
.vddci_table
.count
= allowed_mclk_table
->count
;
3630 allowed_mclk_table
= &adev
->pm
.dpm
.dyn_state
.mvdd_dependency_on_mclk
;
3631 if (allowed_mclk_table
) {
3632 for (i
= 0; i
< allowed_mclk_table
->count
; i
++) {
3633 pi
->dpm_table
.mvdd_table
.dpm_levels
[i
].value
=
3634 allowed_mclk_table
->entries
[i
].v
;
3635 pi
->dpm_table
.mvdd_table
.dpm_levels
[i
].enabled
= true;
3637 pi
->dpm_table
.mvdd_table
.count
= allowed_mclk_table
->count
;
3640 ci_setup_default_pcie_tables(adev
);
3645 static int ci_find_boot_level(struct ci_single_dpm_table
*table
,
3646 u32 value
, u32
*boot_level
)
3651 for(i
= 0; i
< table
->count
; i
++) {
3652 if (value
== table
->dpm_levels
[i
].value
) {
3661 static int ci_init_smc_table(struct amdgpu_device
*adev
)
3663 struct ci_power_info
*pi
= ci_get_pi(adev
);
3664 struct ci_ulv_parm
*ulv
= &pi
->ulv
;
3665 struct amdgpu_ps
*amdgpu_boot_state
= adev
->pm
.dpm
.boot_ps
;
3666 SMU7_Discrete_DpmTable
*table
= &pi
->smc_state_table
;
3669 ret
= ci_setup_default_dpm_tables(adev
);
3673 if (pi
->voltage_control
!= CISLANDS_VOLTAGE_CONTROL_NONE
)
3674 ci_populate_smc_voltage_tables(adev
, table
);
3676 ci_init_fps_limits(adev
);
3678 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_HARDWAREDC
)
3679 table
->SystemFlags
|= PPSMC_SYSTEMFLAG_GPIO_DC
;
3681 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_STEPVDDC
)
3682 table
->SystemFlags
|= PPSMC_SYSTEMFLAG_STEPVDDC
;
3684 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
3685 table
->SystemFlags
|= PPSMC_SYSTEMFLAG_GDDR5
;
3687 if (ulv
->supported
) {
3688 ret
= ci_populate_ulv_state(adev
, &pi
->smc_state_table
.Ulv
);
3691 WREG32_SMC(ixCG_ULV_PARAMETER
, ulv
->cg_ulv_parameter
);
3694 ret
= ci_populate_all_graphic_levels(adev
);
3698 ret
= ci_populate_all_memory_levels(adev
);
3702 ci_populate_smc_link_level(adev
, table
);
3704 ret
= ci_populate_smc_acpi_level(adev
, table
);
3708 ret
= ci_populate_smc_vce_level(adev
, table
);
3712 ret
= ci_populate_smc_acp_level(adev
, table
);
3716 ret
= ci_populate_smc_samu_level(adev
, table
);
3720 ret
= ci_do_program_memory_timing_parameters(adev
);
3724 ret
= ci_populate_smc_uvd_level(adev
, table
);
3728 table
->UvdBootLevel
= 0;
3729 table
->VceBootLevel
= 0;
3730 table
->AcpBootLevel
= 0;
3731 table
->SamuBootLevel
= 0;
3732 table
->GraphicsBootLevel
= 0;
3733 table
->MemoryBootLevel
= 0;
3735 ret
= ci_find_boot_level(&pi
->dpm_table
.sclk_table
,
3736 pi
->vbios_boot_state
.sclk_bootup_value
,
3737 (u32
*)&pi
->smc_state_table
.GraphicsBootLevel
);
3739 ret
= ci_find_boot_level(&pi
->dpm_table
.mclk_table
,
3740 pi
->vbios_boot_state
.mclk_bootup_value
,
3741 (u32
*)&pi
->smc_state_table
.MemoryBootLevel
);
3743 table
->BootVddc
= pi
->vbios_boot_state
.vddc_bootup_value
;
3744 table
->BootVddci
= pi
->vbios_boot_state
.vddci_bootup_value
;
3745 table
->BootMVdd
= pi
->vbios_boot_state
.mvdd_bootup_value
;
3747 ci_populate_smc_initial_state(adev
, amdgpu_boot_state
);
3749 ret
= ci_populate_bapm_parameters_in_dpm_table(adev
);
3753 table
->UVDInterval
= 1;
3754 table
->VCEInterval
= 1;
3755 table
->ACPInterval
= 1;
3756 table
->SAMUInterval
= 1;
3757 table
->GraphicsVoltageChangeEnable
= 1;
3758 table
->GraphicsThermThrottleEnable
= 1;
3759 table
->GraphicsInterval
= 1;
3760 table
->VoltageInterval
= 1;
3761 table
->ThermalInterval
= 1;
3762 table
->TemperatureLimitHigh
= (u16
)((pi
->thermal_temp_setting
.temperature_high
*
3763 CISLANDS_Q88_FORMAT_CONVERSION_UNIT
) / 1000);
3764 table
->TemperatureLimitLow
= (u16
)((pi
->thermal_temp_setting
.temperature_low
*
3765 CISLANDS_Q88_FORMAT_CONVERSION_UNIT
) / 1000);
3766 table
->MemoryVoltageChangeEnable
= 1;
3767 table
->MemoryInterval
= 1;
3768 table
->VoltageResponseTime
= 0;
3769 table
->VddcVddciDelta
= 4000;
3770 table
->PhaseResponseTime
= 0;
3771 table
->MemoryThermThrottleEnable
= 1;
3772 table
->PCIeBootLinkLevel
= pi
->dpm_table
.pcie_speed_table
.count
- 1;
3773 table
->PCIeGenInterval
= 1;
3774 if (pi
->voltage_control
== CISLANDS_VOLTAGE_CONTROL_BY_SVID2
)
3775 table
->SVI2Enable
= 1;
3777 table
->SVI2Enable
= 0;
3779 table
->ThermGpio
= 17;
3780 table
->SclkStepSize
= 0x4000;
3782 table
->SystemFlags
= cpu_to_be32(table
->SystemFlags
);
3783 table
->SmioMaskVddcVid
= cpu_to_be32(table
->SmioMaskVddcVid
);
3784 table
->SmioMaskVddcPhase
= cpu_to_be32(table
->SmioMaskVddcPhase
);
3785 table
->SmioMaskVddciVid
= cpu_to_be32(table
->SmioMaskVddciVid
);
3786 table
->SmioMaskMvddVid
= cpu_to_be32(table
->SmioMaskMvddVid
);
3787 table
->SclkStepSize
= cpu_to_be32(table
->SclkStepSize
);
3788 table
->TemperatureLimitHigh
= cpu_to_be16(table
->TemperatureLimitHigh
);
3789 table
->TemperatureLimitLow
= cpu_to_be16(table
->TemperatureLimitLow
);
3790 table
->VddcVddciDelta
= cpu_to_be16(table
->VddcVddciDelta
);
3791 table
->VoltageResponseTime
= cpu_to_be16(table
->VoltageResponseTime
);
3792 table
->PhaseResponseTime
= cpu_to_be16(table
->PhaseResponseTime
);
3793 table
->BootVddc
= cpu_to_be16(table
->BootVddc
* VOLTAGE_SCALE
);
3794 table
->BootVddci
= cpu_to_be16(table
->BootVddci
* VOLTAGE_SCALE
);
3795 table
->BootMVdd
= cpu_to_be16(table
->BootMVdd
* VOLTAGE_SCALE
);
3797 ret
= amdgpu_ci_copy_bytes_to_smc(adev
,
3798 pi
->dpm_table_start
+
3799 offsetof(SMU7_Discrete_DpmTable
, SystemFlags
),
3800 (u8
*)&table
->SystemFlags
,
3801 sizeof(SMU7_Discrete_DpmTable
) - 3 * sizeof(SMU7_PIDController
),
3809 static void ci_trim_single_dpm_states(struct amdgpu_device
*adev
,
3810 struct ci_single_dpm_table
*dpm_table
,
3811 u32 low_limit
, u32 high_limit
)
3815 for (i
= 0; i
< dpm_table
->count
; i
++) {
3816 if ((dpm_table
->dpm_levels
[i
].value
< low_limit
) ||
3817 (dpm_table
->dpm_levels
[i
].value
> high_limit
))
3818 dpm_table
->dpm_levels
[i
].enabled
= false;
3820 dpm_table
->dpm_levels
[i
].enabled
= true;
3824 static void ci_trim_pcie_dpm_states(struct amdgpu_device
*adev
,
3825 u32 speed_low
, u32 lanes_low
,
3826 u32 speed_high
, u32 lanes_high
)
3828 struct ci_power_info
*pi
= ci_get_pi(adev
);
3829 struct ci_single_dpm_table
*pcie_table
= &pi
->dpm_table
.pcie_speed_table
;
3832 for (i
= 0; i
< pcie_table
->count
; i
++) {
3833 if ((pcie_table
->dpm_levels
[i
].value
< speed_low
) ||
3834 (pcie_table
->dpm_levels
[i
].param1
< lanes_low
) ||
3835 (pcie_table
->dpm_levels
[i
].value
> speed_high
) ||
3836 (pcie_table
->dpm_levels
[i
].param1
> lanes_high
))
3837 pcie_table
->dpm_levels
[i
].enabled
= false;
3839 pcie_table
->dpm_levels
[i
].enabled
= true;
3842 for (i
= 0; i
< pcie_table
->count
; i
++) {
3843 if (pcie_table
->dpm_levels
[i
].enabled
) {
3844 for (j
= i
+ 1; j
< pcie_table
->count
; j
++) {
3845 if (pcie_table
->dpm_levels
[j
].enabled
) {
3846 if ((pcie_table
->dpm_levels
[i
].value
== pcie_table
->dpm_levels
[j
].value
) &&
3847 (pcie_table
->dpm_levels
[i
].param1
== pcie_table
->dpm_levels
[j
].param1
))
3848 pcie_table
->dpm_levels
[j
].enabled
= false;
3855 static int ci_trim_dpm_states(struct amdgpu_device
*adev
,
3856 struct amdgpu_ps
*amdgpu_state
)
3858 struct ci_ps
*state
= ci_get_ps(amdgpu_state
);
3859 struct ci_power_info
*pi
= ci_get_pi(adev
);
3860 u32 high_limit_count
;
3862 if (state
->performance_level_count
< 1)
3865 if (state
->performance_level_count
== 1)
3866 high_limit_count
= 0;
3868 high_limit_count
= 1;
3870 ci_trim_single_dpm_states(adev
,
3871 &pi
->dpm_table
.sclk_table
,
3872 state
->performance_levels
[0].sclk
,
3873 state
->performance_levels
[high_limit_count
].sclk
);
3875 ci_trim_single_dpm_states(adev
,
3876 &pi
->dpm_table
.mclk_table
,
3877 state
->performance_levels
[0].mclk
,
3878 state
->performance_levels
[high_limit_count
].mclk
);
3880 ci_trim_pcie_dpm_states(adev
,
3881 state
->performance_levels
[0].pcie_gen
,
3882 state
->performance_levels
[0].pcie_lane
,
3883 state
->performance_levels
[high_limit_count
].pcie_gen
,
3884 state
->performance_levels
[high_limit_count
].pcie_lane
);
3889 static int ci_apply_disp_minimum_voltage_request(struct amdgpu_device
*adev
)
3891 struct amdgpu_clock_voltage_dependency_table
*disp_voltage_table
=
3892 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
;
3893 struct amdgpu_clock_voltage_dependency_table
*vddc_table
=
3894 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
3895 u32 requested_voltage
= 0;
3898 if (disp_voltage_table
== NULL
)
3900 if (!disp_voltage_table
->count
)
3903 for (i
= 0; i
< disp_voltage_table
->count
; i
++) {
3904 if (adev
->clock
.current_dispclk
== disp_voltage_table
->entries
[i
].clk
)
3905 requested_voltage
= disp_voltage_table
->entries
[i
].v
;
3908 for (i
= 0; i
< vddc_table
->count
; i
++) {
3909 if (requested_voltage
<= vddc_table
->entries
[i
].v
) {
3910 requested_voltage
= vddc_table
->entries
[i
].v
;
3911 return (amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
3912 PPSMC_MSG_VddC_Request
,
3913 requested_voltage
* VOLTAGE_SCALE
) == PPSMC_Result_OK
) ?
3921 static int ci_upload_dpm_level_enable_mask(struct amdgpu_device
*adev
)
3923 struct ci_power_info
*pi
= ci_get_pi(adev
);
3924 PPSMC_Result result
;
3926 ci_apply_disp_minimum_voltage_request(adev
);
3928 if (!pi
->sclk_dpm_key_disabled
) {
3929 if (pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
) {
3930 result
= amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
3931 PPSMC_MSG_SCLKDPM_SetEnabledMask
,
3932 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
);
3933 if (result
!= PPSMC_Result_OK
)
3938 if (!pi
->mclk_dpm_key_disabled
) {
3939 if (pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
) {
3940 result
= amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
3941 PPSMC_MSG_MCLKDPM_SetEnabledMask
,
3942 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
);
3943 if (result
!= PPSMC_Result_OK
)
3949 if (!pi
->pcie_dpm_key_disabled
) {
3950 if (pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
) {
3951 result
= amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
3952 PPSMC_MSG_PCIeDPM_SetEnabledMask
,
3953 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
);
3954 if (result
!= PPSMC_Result_OK
)
3963 static void ci_find_dpm_states_clocks_in_dpm_table(struct amdgpu_device
*adev
,
3964 struct amdgpu_ps
*amdgpu_state
)
3966 struct ci_power_info
*pi
= ci_get_pi(adev
);
3967 struct ci_ps
*state
= ci_get_ps(amdgpu_state
);
3968 struct ci_single_dpm_table
*sclk_table
= &pi
->dpm_table
.sclk_table
;
3969 u32 sclk
= state
->performance_levels
[state
->performance_level_count
-1].sclk
;
3970 struct ci_single_dpm_table
*mclk_table
= &pi
->dpm_table
.mclk_table
;
3971 u32 mclk
= state
->performance_levels
[state
->performance_level_count
-1].mclk
;
3974 pi
->need_update_smu7_dpm_table
= 0;
3976 for (i
= 0; i
< sclk_table
->count
; i
++) {
3977 if (sclk
== sclk_table
->dpm_levels
[i
].value
)
3981 if (i
>= sclk_table
->count
) {
3982 pi
->need_update_smu7_dpm_table
|= DPMTABLE_OD_UPDATE_SCLK
;
3984 /* XXX check display min clock requirements */
3985 if (CISLAND_MINIMUM_ENGINE_CLOCK
!= CISLAND_MINIMUM_ENGINE_CLOCK
)
3986 pi
->need_update_smu7_dpm_table
|= DPMTABLE_UPDATE_SCLK
;
3989 for (i
= 0; i
< mclk_table
->count
; i
++) {
3990 if (mclk
== mclk_table
->dpm_levels
[i
].value
)
3994 if (i
>= mclk_table
->count
)
3995 pi
->need_update_smu7_dpm_table
|= DPMTABLE_OD_UPDATE_MCLK
;
3997 if (adev
->pm
.dpm
.current_active_crtc_count
!=
3998 adev
->pm
.dpm
.new_active_crtc_count
)
3999 pi
->need_update_smu7_dpm_table
|= DPMTABLE_UPDATE_MCLK
;
4002 static int ci_populate_and_upload_sclk_mclk_dpm_levels(struct amdgpu_device
*adev
,
4003 struct amdgpu_ps
*amdgpu_state
)
4005 struct ci_power_info
*pi
= ci_get_pi(adev
);
4006 struct ci_ps
*state
= ci_get_ps(amdgpu_state
);
4007 u32 sclk
= state
->performance_levels
[state
->performance_level_count
-1].sclk
;
4008 u32 mclk
= state
->performance_levels
[state
->performance_level_count
-1].mclk
;
4009 struct ci_dpm_table
*dpm_table
= &pi
->dpm_table
;
4012 if (!pi
->need_update_smu7_dpm_table
)
4015 if (pi
->need_update_smu7_dpm_table
& DPMTABLE_OD_UPDATE_SCLK
)
4016 dpm_table
->sclk_table
.dpm_levels
[dpm_table
->sclk_table
.count
-1].value
= sclk
;
4018 if (pi
->need_update_smu7_dpm_table
& DPMTABLE_OD_UPDATE_MCLK
)
4019 dpm_table
->mclk_table
.dpm_levels
[dpm_table
->mclk_table
.count
-1].value
= mclk
;
4021 if (pi
->need_update_smu7_dpm_table
& (DPMTABLE_OD_UPDATE_SCLK
| DPMTABLE_UPDATE_SCLK
)) {
4022 ret
= ci_populate_all_graphic_levels(adev
);
4027 if (pi
->need_update_smu7_dpm_table
& (DPMTABLE_OD_UPDATE_MCLK
| DPMTABLE_UPDATE_MCLK
)) {
4028 ret
= ci_populate_all_memory_levels(adev
);
4036 static int ci_enable_uvd_dpm(struct amdgpu_device
*adev
, bool enable
)
4038 struct ci_power_info
*pi
= ci_get_pi(adev
);
4039 const struct amdgpu_clock_and_voltage_limits
*max_limits
;
4042 if (adev
->pm
.dpm
.ac_power
)
4043 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
4045 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
4048 pi
->dpm_level_enable_mask
.uvd_dpm_enable_mask
= 0;
4050 for (i
= adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.count
- 1; i
>= 0; i
--) {
4051 if (adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.entries
[i
].v
<= max_limits
->vddc
) {
4052 pi
->dpm_level_enable_mask
.uvd_dpm_enable_mask
|= 1 << i
;
4054 if (!pi
->caps_uvd_dpm
)
4059 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
4060 PPSMC_MSG_UVDDPM_SetEnabledMask
,
4061 pi
->dpm_level_enable_mask
.uvd_dpm_enable_mask
);
4063 if (pi
->last_mclk_dpm_enable_mask
& 0x1) {
4064 pi
->uvd_enabled
= true;
4065 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
&= 0xFFFFFFFE;
4066 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
4067 PPSMC_MSG_MCLKDPM_SetEnabledMask
,
4068 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
);
4071 if (pi
->last_mclk_dpm_enable_mask
& 0x1) {
4072 pi
->uvd_enabled
= false;
4073 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
|= 1;
4074 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
4075 PPSMC_MSG_MCLKDPM_SetEnabledMask
,
4076 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
);
4080 return (amdgpu_ci_send_msg_to_smc(adev
, enable
?
4081 PPSMC_MSG_UVDDPM_Enable
: PPSMC_MSG_UVDDPM_Disable
) == PPSMC_Result_OK
) ?
4085 static int ci_enable_vce_dpm(struct amdgpu_device
*adev
, bool enable
)
4087 struct ci_power_info
*pi
= ci_get_pi(adev
);
4088 const struct amdgpu_clock_and_voltage_limits
*max_limits
;
4091 if (adev
->pm
.dpm
.ac_power
)
4092 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
4094 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
4097 pi
->dpm_level_enable_mask
.vce_dpm_enable_mask
= 0;
4098 for (i
= adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.count
- 1; i
>= 0; i
--) {
4099 if (adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
.entries
[i
].v
<= max_limits
->vddc
) {
4100 pi
->dpm_level_enable_mask
.vce_dpm_enable_mask
|= 1 << i
;
4102 if (!pi
->caps_vce_dpm
)
4107 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
4108 PPSMC_MSG_VCEDPM_SetEnabledMask
,
4109 pi
->dpm_level_enable_mask
.vce_dpm_enable_mask
);
4112 return (amdgpu_ci_send_msg_to_smc(adev
, enable
?
4113 PPSMC_MSG_VCEDPM_Enable
: PPSMC_MSG_VCEDPM_Disable
) == PPSMC_Result_OK
) ?
4118 static int ci_enable_samu_dpm(struct amdgpu_device
*adev
, bool enable
)
4120 struct ci_power_info
*pi
= ci_get_pi(adev
);
4121 const struct amdgpu_clock_and_voltage_limits
*max_limits
;
4124 if (adev
->pm
.dpm
.ac_power
)
4125 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
4127 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
4130 pi
->dpm_level_enable_mask
.samu_dpm_enable_mask
= 0;
4131 for (i
= adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.count
- 1; i
>= 0; i
--) {
4132 if (adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
.entries
[i
].v
<= max_limits
->vddc
) {
4133 pi
->dpm_level_enable_mask
.samu_dpm_enable_mask
|= 1 << i
;
4135 if (!pi
->caps_samu_dpm
)
4140 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
4141 PPSMC_MSG_SAMUDPM_SetEnabledMask
,
4142 pi
->dpm_level_enable_mask
.samu_dpm_enable_mask
);
4144 return (amdgpu_ci_send_msg_to_smc(adev
, enable
?
4145 PPSMC_MSG_SAMUDPM_Enable
: PPSMC_MSG_SAMUDPM_Disable
) == PPSMC_Result_OK
) ?
4149 static int ci_enable_acp_dpm(struct amdgpu_device
*adev
, bool enable
)
4151 struct ci_power_info
*pi
= ci_get_pi(adev
);
4152 const struct amdgpu_clock_and_voltage_limits
*max_limits
;
4155 if (adev
->pm
.dpm
.ac_power
)
4156 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
4158 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
4161 pi
->dpm_level_enable_mask
.acp_dpm_enable_mask
= 0;
4162 for (i
= adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.count
- 1; i
>= 0; i
--) {
4163 if (adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
.entries
[i
].v
<= max_limits
->vddc
) {
4164 pi
->dpm_level_enable_mask
.acp_dpm_enable_mask
|= 1 << i
;
4166 if (!pi
->caps_acp_dpm
)
4171 amdgpu_ci_send_msg_to_smc_with_parameter(adev
,
4172 PPSMC_MSG_ACPDPM_SetEnabledMask
,
4173 pi
->dpm_level_enable_mask
.acp_dpm_enable_mask
);
4176 return (amdgpu_ci_send_msg_to_smc(adev
, enable
?
4177 PPSMC_MSG_ACPDPM_Enable
: PPSMC_MSG_ACPDPM_Disable
) == PPSMC_Result_OK
) ?
4182 static int ci_update_uvd_dpm(struct amdgpu_device
*adev
, bool gate
)
4184 struct ci_power_info
*pi
= ci_get_pi(adev
);
4188 if (pi
->caps_uvd_dpm
||
4189 (adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.count
<= 0))
4190 pi
->smc_state_table
.UvdBootLevel
= 0;
4192 pi
->smc_state_table
.UvdBootLevel
=
4193 adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
.count
- 1;
4195 tmp
= RREG32_SMC(ixDPM_TABLE_475
);
4196 tmp
&= ~DPM_TABLE_475__UvdBootLevel_MASK
;
4197 tmp
|= (pi
->smc_state_table
.UvdBootLevel
<< DPM_TABLE_475__UvdBootLevel__SHIFT
);
4198 WREG32_SMC(ixDPM_TABLE_475
, tmp
);
4201 return ci_enable_uvd_dpm(adev
, !gate
);
4204 static u8
ci_get_vce_boot_level(struct amdgpu_device
*adev
)
4207 u32 min_evclk
= 30000; /* ??? */
4208 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
4209 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
4211 for (i
= 0; i
< table
->count
; i
++) {
4212 if (table
->entries
[i
].evclk
>= min_evclk
)
4216 return table
->count
- 1;
4219 static int ci_update_vce_dpm(struct amdgpu_device
*adev
,
4220 struct amdgpu_ps
*amdgpu_new_state
,
4221 struct amdgpu_ps
*amdgpu_current_state
)
4223 struct ci_power_info
*pi
= ci_get_pi(adev
);
4227 if (amdgpu_current_state
->evclk
!= amdgpu_new_state
->evclk
) {
4228 if (amdgpu_new_state
->evclk
) {
4229 /* turn the clocks on when encoding */
4230 ret
= amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
4231 AMD_CG_STATE_UNGATE
);
4235 pi
->smc_state_table
.VceBootLevel
= ci_get_vce_boot_level(adev
);
4236 tmp
= RREG32_SMC(ixDPM_TABLE_475
);
4237 tmp
&= ~DPM_TABLE_475__VceBootLevel_MASK
;
4238 tmp
|= (pi
->smc_state_table
.VceBootLevel
<< DPM_TABLE_475__VceBootLevel__SHIFT
);
4239 WREG32_SMC(ixDPM_TABLE_475
, tmp
);
4241 ret
= ci_enable_vce_dpm(adev
, true);
4243 /* turn the clocks off when not encoding */
4244 ret
= amdgpu_set_clockgating_state(adev
, AMD_IP_BLOCK_TYPE_VCE
,
4249 ret
= ci_enable_vce_dpm(adev
, false);
4256 static int ci_update_samu_dpm(struct amdgpu_device
*adev
, bool gate
)
4258 return ci_enable_samu_dpm(adev
, gate
);
4261 static int ci_update_acp_dpm(struct amdgpu_device
*adev
, bool gate
)
4263 struct ci_power_info
*pi
= ci_get_pi(adev
);
4267 pi
->smc_state_table
.AcpBootLevel
= 0;
4269 tmp
= RREG32_SMC(ixDPM_TABLE_475
);
4270 tmp
&= ~AcpBootLevel_MASK
;
4271 tmp
|= AcpBootLevel(pi
->smc_state_table
.AcpBootLevel
);
4272 WREG32_SMC(ixDPM_TABLE_475
, tmp
);
4275 return ci_enable_acp_dpm(adev
, !gate
);
4279 static int ci_generate_dpm_level_enable_mask(struct amdgpu_device
*adev
,
4280 struct amdgpu_ps
*amdgpu_state
)
4282 struct ci_power_info
*pi
= ci_get_pi(adev
);
4285 ret
= ci_trim_dpm_states(adev
, amdgpu_state
);
4289 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
=
4290 ci_get_dpm_level_enable_mask_value(&pi
->dpm_table
.sclk_table
);
4291 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
=
4292 ci_get_dpm_level_enable_mask_value(&pi
->dpm_table
.mclk_table
);
4293 pi
->last_mclk_dpm_enable_mask
=
4294 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
;
4295 if (pi
->uvd_enabled
) {
4296 if (pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
& 1)
4297 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
&= 0xFFFFFFFE;
4299 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
=
4300 ci_get_dpm_level_enable_mask_value(&pi
->dpm_table
.pcie_speed_table
);
4305 static u32
ci_get_lowest_enabled_level(struct amdgpu_device
*adev
,
4310 while ((level_mask
& (1 << level
)) == 0)
4317 static int ci_dpm_force_performance_level(struct amdgpu_device
*adev
,
4318 enum amdgpu_dpm_forced_level level
)
4320 struct ci_power_info
*pi
= ci_get_pi(adev
);
4324 if (level
== AMDGPU_DPM_FORCED_LEVEL_HIGH
) {
4325 if ((!pi
->pcie_dpm_key_disabled
) &&
4326 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
) {
4328 tmp
= pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
;
4332 ret
= ci_dpm_force_state_pcie(adev
, level
);
4335 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4336 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1
) &
4337 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK
) >>
4338 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT
;
4345 if ((!pi
->sclk_dpm_key_disabled
) &&
4346 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
) {
4348 tmp
= pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
;
4352 ret
= ci_dpm_force_state_sclk(adev
, levels
);
4355 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4356 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
4357 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK
) >>
4358 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT
;
4365 if ((!pi
->mclk_dpm_key_disabled
) &&
4366 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
) {
4368 tmp
= pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
;
4372 ret
= ci_dpm_force_state_mclk(adev
, levels
);
4375 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4376 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
4377 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK
) >>
4378 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT
;
4385 if ((!pi
->pcie_dpm_key_disabled
) &&
4386 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
) {
4388 tmp
= pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
;
4392 ret
= ci_dpm_force_state_pcie(adev
, level
);
4395 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4396 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1
) &
4397 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK
) >>
4398 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT
;
4405 } else if (level
== AMDGPU_DPM_FORCED_LEVEL_LOW
) {
4406 if ((!pi
->sclk_dpm_key_disabled
) &&
4407 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
) {
4408 levels
= ci_get_lowest_enabled_level(adev
,
4409 pi
->dpm_level_enable_mask
.sclk_dpm_enable_mask
);
4410 ret
= ci_dpm_force_state_sclk(adev
, levels
);
4413 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4414 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
4415 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK
) >>
4416 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT
;
4422 if ((!pi
->mclk_dpm_key_disabled
) &&
4423 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
) {
4424 levels
= ci_get_lowest_enabled_level(adev
,
4425 pi
->dpm_level_enable_mask
.mclk_dpm_enable_mask
);
4426 ret
= ci_dpm_force_state_mclk(adev
, levels
);
4429 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4430 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX
) &
4431 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX_MASK
) >>
4432 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_MCLK_INDEX__SHIFT
;
4438 if ((!pi
->pcie_dpm_key_disabled
) &&
4439 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
) {
4440 levels
= ci_get_lowest_enabled_level(adev
,
4441 pi
->dpm_level_enable_mask
.pcie_dpm_enable_mask
);
4442 ret
= ci_dpm_force_state_pcie(adev
, levels
);
4445 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4446 tmp
= (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_1
) &
4447 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX_MASK
) >>
4448 TARGET_AND_CURRENT_PROFILE_INDEX_1__CURR_PCIE_INDEX__SHIFT
;
4454 } else if (level
== AMDGPU_DPM_FORCED_LEVEL_AUTO
) {
4455 if (!pi
->pcie_dpm_key_disabled
) {
4456 PPSMC_Result smc_result
;
4458 smc_result
= amdgpu_ci_send_msg_to_smc(adev
,
4459 PPSMC_MSG_PCIeDPM_UnForceLevel
);
4460 if (smc_result
!= PPSMC_Result_OK
)
4463 ret
= ci_upload_dpm_level_enable_mask(adev
);
4468 adev
->pm
.dpm
.forced_level
= level
;
4473 static int ci_set_mc_special_registers(struct amdgpu_device
*adev
,
4474 struct ci_mc_reg_table
*table
)
4479 for (i
= 0, j
= table
->last
; i
< table
->last
; i
++) {
4480 if (j
>= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4482 switch(table
->mc_reg_address
[i
].s1
) {
4483 case mmMC_SEQ_MISC1
:
4484 temp_reg
= RREG32(mmMC_PMG_CMD_EMRS
);
4485 table
->mc_reg_address
[j
].s1
= mmMC_PMG_CMD_EMRS
;
4486 table
->mc_reg_address
[j
].s0
= mmMC_SEQ_PMG_CMD_EMRS_LP
;
4487 for (k
= 0; k
< table
->num_entries
; k
++) {
4488 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
4489 ((temp_reg
& 0xffff0000)) | ((table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xffff0000) >> 16);
4492 if (j
>= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4495 temp_reg
= RREG32(mmMC_PMG_CMD_MRS
);
4496 table
->mc_reg_address
[j
].s1
= mmMC_PMG_CMD_MRS
;
4497 table
->mc_reg_address
[j
].s0
= mmMC_SEQ_PMG_CMD_MRS_LP
;
4498 for (k
= 0; k
< table
->num_entries
; k
++) {
4499 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
4500 (temp_reg
& 0xffff0000) | (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0x0000ffff);
4501 if (adev
->mc
.vram_type
!= AMDGPU_VRAM_TYPE_GDDR5
)
4502 table
->mc_reg_table_entry
[k
].mc_data
[j
] |= 0x100;
4505 if (j
> SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4508 if (adev
->mc
.vram_type
!= AMDGPU_VRAM_TYPE_GDDR5
) {
4509 table
->mc_reg_address
[j
].s1
= mmMC_PMG_AUTO_CMD
;
4510 table
->mc_reg_address
[j
].s0
= mmMC_PMG_AUTO_CMD
;
4511 for (k
= 0; k
< table
->num_entries
; k
++) {
4512 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
4513 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xffff0000) >> 16;
4516 if (j
> SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4520 case mmMC_SEQ_RESERVE_M
:
4521 temp_reg
= RREG32(mmMC_PMG_CMD_MRS1
);
4522 table
->mc_reg_address
[j
].s1
= mmMC_PMG_CMD_MRS1
;
4523 table
->mc_reg_address
[j
].s0
= mmMC_SEQ_PMG_CMD_MRS1_LP
;
4524 for (k
= 0; k
< table
->num_entries
; k
++) {
4525 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
4526 (temp_reg
& 0xffff0000) | (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0x0000ffff);
4529 if (j
> SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4543 static bool ci_check_s0_mc_reg_index(u16 in_reg
, u16
*out_reg
)
4548 case mmMC_SEQ_RAS_TIMING
:
4549 *out_reg
= mmMC_SEQ_RAS_TIMING_LP
;
4551 case mmMC_SEQ_DLL_STBY
:
4552 *out_reg
= mmMC_SEQ_DLL_STBY_LP
;
4554 case mmMC_SEQ_G5PDX_CMD0
:
4555 *out_reg
= mmMC_SEQ_G5PDX_CMD0_LP
;
4557 case mmMC_SEQ_G5PDX_CMD1
:
4558 *out_reg
= mmMC_SEQ_G5PDX_CMD1_LP
;
4560 case mmMC_SEQ_G5PDX_CTRL
:
4561 *out_reg
= mmMC_SEQ_G5PDX_CTRL_LP
;
4563 case mmMC_SEQ_CAS_TIMING
:
4564 *out_reg
= mmMC_SEQ_CAS_TIMING_LP
;
4566 case mmMC_SEQ_MISC_TIMING
:
4567 *out_reg
= mmMC_SEQ_MISC_TIMING_LP
;
4569 case mmMC_SEQ_MISC_TIMING2
:
4570 *out_reg
= mmMC_SEQ_MISC_TIMING2_LP
;
4572 case mmMC_SEQ_PMG_DVS_CMD
:
4573 *out_reg
= mmMC_SEQ_PMG_DVS_CMD_LP
;
4575 case mmMC_SEQ_PMG_DVS_CTL
:
4576 *out_reg
= mmMC_SEQ_PMG_DVS_CTL_LP
;
4578 case mmMC_SEQ_RD_CTL_D0
:
4579 *out_reg
= mmMC_SEQ_RD_CTL_D0_LP
;
4581 case mmMC_SEQ_RD_CTL_D1
:
4582 *out_reg
= mmMC_SEQ_RD_CTL_D1_LP
;
4584 case mmMC_SEQ_WR_CTL_D0
:
4585 *out_reg
= mmMC_SEQ_WR_CTL_D0_LP
;
4587 case mmMC_SEQ_WR_CTL_D1
:
4588 *out_reg
= mmMC_SEQ_WR_CTL_D1_LP
;
4590 case mmMC_PMG_CMD_EMRS
:
4591 *out_reg
= mmMC_SEQ_PMG_CMD_EMRS_LP
;
4593 case mmMC_PMG_CMD_MRS
:
4594 *out_reg
= mmMC_SEQ_PMG_CMD_MRS_LP
;
4596 case mmMC_PMG_CMD_MRS1
:
4597 *out_reg
= mmMC_SEQ_PMG_CMD_MRS1_LP
;
4599 case mmMC_SEQ_PMG_TIMING
:
4600 *out_reg
= mmMC_SEQ_PMG_TIMING_LP
;
4602 case mmMC_PMG_CMD_MRS2
:
4603 *out_reg
= mmMC_SEQ_PMG_CMD_MRS2_LP
;
4605 case mmMC_SEQ_WR_CTL_2
:
4606 *out_reg
= mmMC_SEQ_WR_CTL_2_LP
;
4616 static void ci_set_valid_flag(struct ci_mc_reg_table
*table
)
4620 for (i
= 0; i
< table
->last
; i
++) {
4621 for (j
= 1; j
< table
->num_entries
; j
++) {
4622 if (table
->mc_reg_table_entry
[j
-1].mc_data
[i
] !=
4623 table
->mc_reg_table_entry
[j
].mc_data
[i
]) {
4624 table
->valid_flag
|= 1 << i
;
4631 static void ci_set_s0_mc_reg_index(struct ci_mc_reg_table
*table
)
4636 for (i
= 0; i
< table
->last
; i
++) {
4637 table
->mc_reg_address
[i
].s0
=
4638 ci_check_s0_mc_reg_index(table
->mc_reg_address
[i
].s1
, &address
) ?
4639 address
: table
->mc_reg_address
[i
].s1
;
4643 static int ci_copy_vbios_mc_reg_table(const struct atom_mc_reg_table
*table
,
4644 struct ci_mc_reg_table
*ci_table
)
4648 if (table
->last
> SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4650 if (table
->num_entries
> MAX_AC_TIMING_ENTRIES
)
4653 for (i
= 0; i
< table
->last
; i
++)
4654 ci_table
->mc_reg_address
[i
].s1
= table
->mc_reg_address
[i
].s1
;
4656 ci_table
->last
= table
->last
;
4658 for (i
= 0; i
< table
->num_entries
; i
++) {
4659 ci_table
->mc_reg_table_entry
[i
].mclk_max
=
4660 table
->mc_reg_table_entry
[i
].mclk_max
;
4661 for (j
= 0; j
< table
->last
; j
++)
4662 ci_table
->mc_reg_table_entry
[i
].mc_data
[j
] =
4663 table
->mc_reg_table_entry
[i
].mc_data
[j
];
4665 ci_table
->num_entries
= table
->num_entries
;
4670 static int ci_register_patching_mc_seq(struct amdgpu_device
*adev
,
4671 struct ci_mc_reg_table
*table
)
4677 tmp
= RREG32(mmMC_SEQ_MISC0
);
4678 patch
= ((tmp
& 0x0000f00) == 0x300) ? true : false;
4681 ((adev
->pdev
->device
== 0x67B0) ||
4682 (adev
->pdev
->device
== 0x67B1))) {
4683 for (i
= 0; i
< table
->last
; i
++) {
4684 if (table
->last
>= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4686 switch (table
->mc_reg_address
[i
].s1
) {
4687 case mmMC_SEQ_MISC1
:
4688 for (k
= 0; k
< table
->num_entries
; k
++) {
4689 if ((table
->mc_reg_table_entry
[k
].mclk_max
== 125000) ||
4690 (table
->mc_reg_table_entry
[k
].mclk_max
== 137500))
4691 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4692 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFFFFFF8) |
4696 case mmMC_SEQ_WR_CTL_D0
:
4697 for (k
= 0; k
< table
->num_entries
; k
++) {
4698 if ((table
->mc_reg_table_entry
[k
].mclk_max
== 125000) ||
4699 (table
->mc_reg_table_entry
[k
].mclk_max
== 137500))
4700 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4701 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFFF0F00) |
4705 case mmMC_SEQ_WR_CTL_D1
:
4706 for (k
= 0; k
< table
->num_entries
; k
++) {
4707 if ((table
->mc_reg_table_entry
[k
].mclk_max
== 125000) ||
4708 (table
->mc_reg_table_entry
[k
].mclk_max
== 137500))
4709 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4710 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFFF0F00) |
4714 case mmMC_SEQ_WR_CTL_2
:
4715 for (k
= 0; k
< table
->num_entries
; k
++) {
4716 if ((table
->mc_reg_table_entry
[k
].mclk_max
== 125000) ||
4717 (table
->mc_reg_table_entry
[k
].mclk_max
== 137500))
4718 table
->mc_reg_table_entry
[k
].mc_data
[i
] = 0;
4721 case mmMC_SEQ_CAS_TIMING
:
4722 for (k
= 0; k
< table
->num_entries
; k
++) {
4723 if (table
->mc_reg_table_entry
[k
].mclk_max
== 125000)
4724 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4725 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFE0FE0F) |
4727 else if (table
->mc_reg_table_entry
[k
].mclk_max
== 137500)
4728 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4729 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFE0FE0F) |
4733 case mmMC_SEQ_MISC_TIMING
:
4734 for (k
= 0; k
< table
->num_entries
; k
++) {
4735 if (table
->mc_reg_table_entry
[k
].mclk_max
== 125000)
4736 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4737 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFFFFFE0) |
4739 else if (table
->mc_reg_table_entry
[k
].mclk_max
== 137500)
4740 table
->mc_reg_table_entry
[k
].mc_data
[i
] =
4741 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xFFFFFFE0) |
4750 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, 3);
4751 tmp
= RREG32(mmMC_SEQ_IO_DEBUG_DATA
);
4752 tmp
= (tmp
& 0xFFF8FFFF) | (1 << 16);
4753 WREG32(mmMC_SEQ_IO_DEBUG_INDEX
, 3);
4754 WREG32(mmMC_SEQ_IO_DEBUG_DATA
, tmp
);
4760 static int ci_initialize_mc_reg_table(struct amdgpu_device
*adev
)
4762 struct ci_power_info
*pi
= ci_get_pi(adev
);
4763 struct atom_mc_reg_table
*table
;
4764 struct ci_mc_reg_table
*ci_table
= &pi
->mc_reg_table
;
4765 u8 module_index
= ci_get_memory_module_index(adev
);
4768 table
= kzalloc(sizeof(struct atom_mc_reg_table
), GFP_KERNEL
);
4772 WREG32(mmMC_SEQ_RAS_TIMING_LP
, RREG32(mmMC_SEQ_RAS_TIMING
));
4773 WREG32(mmMC_SEQ_CAS_TIMING_LP
, RREG32(mmMC_SEQ_CAS_TIMING
));
4774 WREG32(mmMC_SEQ_DLL_STBY_LP
, RREG32(mmMC_SEQ_DLL_STBY
));
4775 WREG32(mmMC_SEQ_G5PDX_CMD0_LP
, RREG32(mmMC_SEQ_G5PDX_CMD0
));
4776 WREG32(mmMC_SEQ_G5PDX_CMD1_LP
, RREG32(mmMC_SEQ_G5PDX_CMD1
));
4777 WREG32(mmMC_SEQ_G5PDX_CTRL_LP
, RREG32(mmMC_SEQ_G5PDX_CTRL
));
4778 WREG32(mmMC_SEQ_PMG_DVS_CMD_LP
, RREG32(mmMC_SEQ_PMG_DVS_CMD
));
4779 WREG32(mmMC_SEQ_PMG_DVS_CTL_LP
, RREG32(mmMC_SEQ_PMG_DVS_CTL
));
4780 WREG32(mmMC_SEQ_MISC_TIMING_LP
, RREG32(mmMC_SEQ_MISC_TIMING
));
4781 WREG32(mmMC_SEQ_MISC_TIMING2_LP
, RREG32(mmMC_SEQ_MISC_TIMING2
));
4782 WREG32(mmMC_SEQ_PMG_CMD_EMRS_LP
, RREG32(mmMC_PMG_CMD_EMRS
));
4783 WREG32(mmMC_SEQ_PMG_CMD_MRS_LP
, RREG32(mmMC_PMG_CMD_MRS
));
4784 WREG32(mmMC_SEQ_PMG_CMD_MRS1_LP
, RREG32(mmMC_PMG_CMD_MRS1
));
4785 WREG32(mmMC_SEQ_WR_CTL_D0_LP
, RREG32(mmMC_SEQ_WR_CTL_D0
));
4786 WREG32(mmMC_SEQ_WR_CTL_D1_LP
, RREG32(mmMC_SEQ_WR_CTL_D1
));
4787 WREG32(mmMC_SEQ_RD_CTL_D0_LP
, RREG32(mmMC_SEQ_RD_CTL_D0
));
4788 WREG32(mmMC_SEQ_RD_CTL_D1_LP
, RREG32(mmMC_SEQ_RD_CTL_D1
));
4789 WREG32(mmMC_SEQ_PMG_TIMING_LP
, RREG32(mmMC_SEQ_PMG_TIMING
));
4790 WREG32(mmMC_SEQ_PMG_CMD_MRS2_LP
, RREG32(mmMC_PMG_CMD_MRS2
));
4791 WREG32(mmMC_SEQ_WR_CTL_2_LP
, RREG32(mmMC_SEQ_WR_CTL_2
));
4793 ret
= amdgpu_atombios_init_mc_reg_table(adev
, module_index
, table
);
4797 ret
= ci_copy_vbios_mc_reg_table(table
, ci_table
);
4801 ci_set_s0_mc_reg_index(ci_table
);
4803 ret
= ci_register_patching_mc_seq(adev
, ci_table
);
4807 ret
= ci_set_mc_special_registers(adev
, ci_table
);
4811 ci_set_valid_flag(ci_table
);
4819 static int ci_populate_mc_reg_addresses(struct amdgpu_device
*adev
,
4820 SMU7_Discrete_MCRegisters
*mc_reg_table
)
4822 struct ci_power_info
*pi
= ci_get_pi(adev
);
4825 for (i
= 0, j
= 0; j
< pi
->mc_reg_table
.last
; j
++) {
4826 if (pi
->mc_reg_table
.valid_flag
& (1 << j
)) {
4827 if (i
>= SMU7_DISCRETE_MC_REGISTER_ARRAY_SIZE
)
4829 mc_reg_table
->address
[i
].s0
= cpu_to_be16(pi
->mc_reg_table
.mc_reg_address
[j
].s0
);
4830 mc_reg_table
->address
[i
].s1
= cpu_to_be16(pi
->mc_reg_table
.mc_reg_address
[j
].s1
);
4835 mc_reg_table
->last
= (u8
)i
;
4840 static void ci_convert_mc_registers(const struct ci_mc_reg_entry
*entry
,
4841 SMU7_Discrete_MCRegisterSet
*data
,
4842 u32 num_entries
, u32 valid_flag
)
4846 for (i
= 0, j
= 0; j
< num_entries
; j
++) {
4847 if (valid_flag
& (1 << j
)) {
4848 data
->value
[i
] = cpu_to_be32(entry
->mc_data
[j
]);
4854 static void ci_convert_mc_reg_table_entry_to_smc(struct amdgpu_device
*adev
,
4855 const u32 memory_clock
,
4856 SMU7_Discrete_MCRegisterSet
*mc_reg_table_data
)
4858 struct ci_power_info
*pi
= ci_get_pi(adev
);
4861 for(i
= 0; i
< pi
->mc_reg_table
.num_entries
; i
++) {
4862 if (memory_clock
<= pi
->mc_reg_table
.mc_reg_table_entry
[i
].mclk_max
)
4866 if ((i
== pi
->mc_reg_table
.num_entries
) && (i
> 0))
4869 ci_convert_mc_registers(&pi
->mc_reg_table
.mc_reg_table_entry
[i
],
4870 mc_reg_table_data
, pi
->mc_reg_table
.last
,
4871 pi
->mc_reg_table
.valid_flag
);
4874 static void ci_convert_mc_reg_table_to_smc(struct amdgpu_device
*adev
,
4875 SMU7_Discrete_MCRegisters
*mc_reg_table
)
4877 struct ci_power_info
*pi
= ci_get_pi(adev
);
4880 for (i
= 0; i
< pi
->dpm_table
.mclk_table
.count
; i
++)
4881 ci_convert_mc_reg_table_entry_to_smc(adev
,
4882 pi
->dpm_table
.mclk_table
.dpm_levels
[i
].value
,
4883 &mc_reg_table
->data
[i
]);
4886 static int ci_populate_initial_mc_reg_table(struct amdgpu_device
*adev
)
4888 struct ci_power_info
*pi
= ci_get_pi(adev
);
4891 memset(&pi
->smc_mc_reg_table
, 0, sizeof(SMU7_Discrete_MCRegisters
));
4893 ret
= ci_populate_mc_reg_addresses(adev
, &pi
->smc_mc_reg_table
);
4896 ci_convert_mc_reg_table_to_smc(adev
, &pi
->smc_mc_reg_table
);
4898 return amdgpu_ci_copy_bytes_to_smc(adev
,
4899 pi
->mc_reg_table_start
,
4900 (u8
*)&pi
->smc_mc_reg_table
,
4901 sizeof(SMU7_Discrete_MCRegisters
),
4905 static int ci_update_and_upload_mc_reg_table(struct amdgpu_device
*adev
)
4907 struct ci_power_info
*pi
= ci_get_pi(adev
);
4909 if (!(pi
->need_update_smu7_dpm_table
& DPMTABLE_OD_UPDATE_MCLK
))
4912 memset(&pi
->smc_mc_reg_table
, 0, sizeof(SMU7_Discrete_MCRegisters
));
4914 ci_convert_mc_reg_table_to_smc(adev
, &pi
->smc_mc_reg_table
);
4916 return amdgpu_ci_copy_bytes_to_smc(adev
,
4917 pi
->mc_reg_table_start
+
4918 offsetof(SMU7_Discrete_MCRegisters
, data
[0]),
4919 (u8
*)&pi
->smc_mc_reg_table
.data
[0],
4920 sizeof(SMU7_Discrete_MCRegisterSet
) *
4921 pi
->dpm_table
.mclk_table
.count
,
4925 static void ci_enable_voltage_control(struct amdgpu_device
*adev
)
4927 u32 tmp
= RREG32_SMC(ixGENERAL_PWRMGT
);
4929 tmp
|= GENERAL_PWRMGT__VOLT_PWRMGT_EN_MASK
;
4930 WREG32_SMC(ixGENERAL_PWRMGT
, tmp
);
4933 static enum amdgpu_pcie_gen
ci_get_maximum_link_speed(struct amdgpu_device
*adev
,
4934 struct amdgpu_ps
*amdgpu_state
)
4936 struct ci_ps
*state
= ci_get_ps(amdgpu_state
);
4938 u16 pcie_speed
, max_speed
= 0;
4940 for (i
= 0; i
< state
->performance_level_count
; i
++) {
4941 pcie_speed
= state
->performance_levels
[i
].pcie_gen
;
4942 if (max_speed
< pcie_speed
)
4943 max_speed
= pcie_speed
;
4949 static u16
ci_get_current_pcie_speed(struct amdgpu_device
*adev
)
4953 speed_cntl
= RREG32_PCIE(ixPCIE_LC_SPEED_CNTL
) &
4954 PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE_MASK
;
4955 speed_cntl
>>= PCIE_LC_SPEED_CNTL__LC_CURRENT_DATA_RATE__SHIFT
;
4957 return (u16
)speed_cntl
;
4960 static int ci_get_current_pcie_lane_number(struct amdgpu_device
*adev
)
4964 link_width
= RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL
) &
4965 PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD_MASK
;
4966 link_width
>>= PCIE_LC_LINK_WIDTH_CNTL__LC_LINK_WIDTH_RD__SHIFT
;
4968 switch (link_width
) {
4984 static void ci_request_link_speed_change_before_state_change(struct amdgpu_device
*adev
,
4985 struct amdgpu_ps
*amdgpu_new_state
,
4986 struct amdgpu_ps
*amdgpu_current_state
)
4988 struct ci_power_info
*pi
= ci_get_pi(adev
);
4989 enum amdgpu_pcie_gen target_link_speed
=
4990 ci_get_maximum_link_speed(adev
, amdgpu_new_state
);
4991 enum amdgpu_pcie_gen current_link_speed
;
4993 if (pi
->force_pcie_gen
== AMDGPU_PCIE_GEN_INVALID
)
4994 current_link_speed
= ci_get_maximum_link_speed(adev
, amdgpu_current_state
);
4996 current_link_speed
= pi
->force_pcie_gen
;
4998 pi
->force_pcie_gen
= AMDGPU_PCIE_GEN_INVALID
;
4999 pi
->pspp_notify_required
= false;
5000 if (target_link_speed
> current_link_speed
) {
5001 switch (target_link_speed
) {
5003 case AMDGPU_PCIE_GEN3
:
5004 if (amdgpu_acpi_pcie_performance_request(adev
, PCIE_PERF_REQ_PECI_GEN3
, false) == 0)
5006 pi
->force_pcie_gen
= AMDGPU_PCIE_GEN2
;
5007 if (current_link_speed
== AMDGPU_PCIE_GEN2
)
5009 case AMDGPU_PCIE_GEN2
:
5010 if (amdgpu_acpi_pcie_performance_request(adev
, PCIE_PERF_REQ_PECI_GEN2
, false) == 0)
5014 pi
->force_pcie_gen
= ci_get_current_pcie_speed(adev
);
5018 if (target_link_speed
< current_link_speed
)
5019 pi
->pspp_notify_required
= true;
5023 static void ci_notify_link_speed_change_after_state_change(struct amdgpu_device
*adev
,
5024 struct amdgpu_ps
*amdgpu_new_state
,
5025 struct amdgpu_ps
*amdgpu_current_state
)
5027 struct ci_power_info
*pi
= ci_get_pi(adev
);
5028 enum amdgpu_pcie_gen target_link_speed
=
5029 ci_get_maximum_link_speed(adev
, amdgpu_new_state
);
5032 if (pi
->pspp_notify_required
) {
5033 if (target_link_speed
== AMDGPU_PCIE_GEN3
)
5034 request
= PCIE_PERF_REQ_PECI_GEN3
;
5035 else if (target_link_speed
== AMDGPU_PCIE_GEN2
)
5036 request
= PCIE_PERF_REQ_PECI_GEN2
;
5038 request
= PCIE_PERF_REQ_PECI_GEN1
;
5040 if ((request
== PCIE_PERF_REQ_PECI_GEN1
) &&
5041 (ci_get_current_pcie_speed(adev
) > 0))
5045 amdgpu_acpi_pcie_performance_request(adev
, request
, false);
5050 static int ci_set_private_data_variables_based_on_pptable(struct amdgpu_device
*adev
)
5052 struct ci_power_info
*pi
= ci_get_pi(adev
);
5053 struct amdgpu_clock_voltage_dependency_table
*allowed_sclk_vddc_table
=
5054 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
;
5055 struct amdgpu_clock_voltage_dependency_table
*allowed_mclk_vddc_table
=
5056 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
;
5057 struct amdgpu_clock_voltage_dependency_table
*allowed_mclk_vddci_table
=
5058 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
;
5060 if (allowed_sclk_vddc_table
== NULL
)
5062 if (allowed_sclk_vddc_table
->count
< 1)
5064 if (allowed_mclk_vddc_table
== NULL
)
5066 if (allowed_mclk_vddc_table
->count
< 1)
5068 if (allowed_mclk_vddci_table
== NULL
)
5070 if (allowed_mclk_vddci_table
->count
< 1)
5073 pi
->min_vddc_in_pp_table
= allowed_sclk_vddc_table
->entries
[0].v
;
5074 pi
->max_vddc_in_pp_table
=
5075 allowed_sclk_vddc_table
->entries
[allowed_sclk_vddc_table
->count
- 1].v
;
5077 pi
->min_vddci_in_pp_table
= allowed_mclk_vddci_table
->entries
[0].v
;
5078 pi
->max_vddci_in_pp_table
=
5079 allowed_mclk_vddci_table
->entries
[allowed_mclk_vddci_table
->count
- 1].v
;
5081 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.sclk
=
5082 allowed_sclk_vddc_table
->entries
[allowed_sclk_vddc_table
->count
- 1].clk
;
5083 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.mclk
=
5084 allowed_mclk_vddc_table
->entries
[allowed_sclk_vddc_table
->count
- 1].clk
;
5085 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.vddc
=
5086 allowed_sclk_vddc_table
->entries
[allowed_sclk_vddc_table
->count
- 1].v
;
5087 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.vddci
=
5088 allowed_mclk_vddci_table
->entries
[allowed_mclk_vddci_table
->count
- 1].v
;
5093 static void ci_patch_with_vddc_leakage(struct amdgpu_device
*adev
, u16
*vddc
)
5095 struct ci_power_info
*pi
= ci_get_pi(adev
);
5096 struct ci_leakage_voltage
*leakage_table
= &pi
->vddc_leakage
;
5099 for (leakage_index
= 0; leakage_index
< leakage_table
->count
; leakage_index
++) {
5100 if (leakage_table
->leakage_id
[leakage_index
] == *vddc
) {
5101 *vddc
= leakage_table
->actual_voltage
[leakage_index
];
5107 static void ci_patch_with_vddci_leakage(struct amdgpu_device
*adev
, u16
*vddci
)
5109 struct ci_power_info
*pi
= ci_get_pi(adev
);
5110 struct ci_leakage_voltage
*leakage_table
= &pi
->vddci_leakage
;
5113 for (leakage_index
= 0; leakage_index
< leakage_table
->count
; leakage_index
++) {
5114 if (leakage_table
->leakage_id
[leakage_index
] == *vddci
) {
5115 *vddci
= leakage_table
->actual_voltage
[leakage_index
];
5121 static void ci_patch_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device
*adev
,
5122 struct amdgpu_clock_voltage_dependency_table
*table
)
5127 for (i
= 0; i
< table
->count
; i
++)
5128 ci_patch_with_vddc_leakage(adev
, &table
->entries
[i
].v
);
5132 static void ci_patch_clock_voltage_dependency_table_with_vddci_leakage(struct amdgpu_device
*adev
,
5133 struct amdgpu_clock_voltage_dependency_table
*table
)
5138 for (i
= 0; i
< table
->count
; i
++)
5139 ci_patch_with_vddci_leakage(adev
, &table
->entries
[i
].v
);
5143 static void ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device
*adev
,
5144 struct amdgpu_vce_clock_voltage_dependency_table
*table
)
5149 for (i
= 0; i
< table
->count
; i
++)
5150 ci_patch_with_vddc_leakage(adev
, &table
->entries
[i
].v
);
5154 static void ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(struct amdgpu_device
*adev
,
5155 struct amdgpu_uvd_clock_voltage_dependency_table
*table
)
5160 for (i
= 0; i
< table
->count
; i
++)
5161 ci_patch_with_vddc_leakage(adev
, &table
->entries
[i
].v
);
5165 static void ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(struct amdgpu_device
*adev
,
5166 struct amdgpu_phase_shedding_limits_table
*table
)
5171 for (i
= 0; i
< table
->count
; i
++)
5172 ci_patch_with_vddc_leakage(adev
, &table
->entries
[i
].voltage
);
5176 static void ci_patch_clock_voltage_limits_with_vddc_leakage(struct amdgpu_device
*adev
,
5177 struct amdgpu_clock_and_voltage_limits
*table
)
5180 ci_patch_with_vddc_leakage(adev
, (u16
*)&table
->vddc
);
5181 ci_patch_with_vddci_leakage(adev
, (u16
*)&table
->vddci
);
5185 static void ci_patch_cac_leakage_table_with_vddc_leakage(struct amdgpu_device
*adev
,
5186 struct amdgpu_cac_leakage_table
*table
)
5191 for (i
= 0; i
< table
->count
; i
++)
5192 ci_patch_with_vddc_leakage(adev
, &table
->entries
[i
].vddc
);
5196 static void ci_patch_dependency_tables_with_leakage(struct amdgpu_device
*adev
)
5199 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5200 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
);
5201 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5202 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
);
5203 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5204 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
);
5205 ci_patch_clock_voltage_dependency_table_with_vddci_leakage(adev
,
5206 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
);
5207 ci_patch_vce_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5208 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
);
5209 ci_patch_uvd_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5210 &adev
->pm
.dpm
.dyn_state
.uvd_clock_voltage_dependency_table
);
5211 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5212 &adev
->pm
.dpm
.dyn_state
.samu_clock_voltage_dependency_table
);
5213 ci_patch_clock_voltage_dependency_table_with_vddc_leakage(adev
,
5214 &adev
->pm
.dpm
.dyn_state
.acp_clock_voltage_dependency_table
);
5215 ci_patch_vddc_phase_shed_limit_table_with_vddc_leakage(adev
,
5216 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
);
5217 ci_patch_clock_voltage_limits_with_vddc_leakage(adev
,
5218 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
);
5219 ci_patch_clock_voltage_limits_with_vddc_leakage(adev
,
5220 &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
);
5221 ci_patch_cac_leakage_table_with_vddc_leakage(adev
,
5222 &adev
->pm
.dpm
.dyn_state
.cac_leakage_table
);
5226 static void ci_update_current_ps(struct amdgpu_device
*adev
,
5227 struct amdgpu_ps
*rps
)
5229 struct ci_ps
*new_ps
= ci_get_ps(rps
);
5230 struct ci_power_info
*pi
= ci_get_pi(adev
);
5232 pi
->current_rps
= *rps
;
5233 pi
->current_ps
= *new_ps
;
5234 pi
->current_rps
.ps_priv
= &pi
->current_ps
;
5237 static void ci_update_requested_ps(struct amdgpu_device
*adev
,
5238 struct amdgpu_ps
*rps
)
5240 struct ci_ps
*new_ps
= ci_get_ps(rps
);
5241 struct ci_power_info
*pi
= ci_get_pi(adev
);
5243 pi
->requested_rps
= *rps
;
5244 pi
->requested_ps
= *new_ps
;
5245 pi
->requested_rps
.ps_priv
= &pi
->requested_ps
;
5248 static int ci_dpm_pre_set_power_state(struct amdgpu_device
*adev
)
5250 struct ci_power_info
*pi
= ci_get_pi(adev
);
5251 struct amdgpu_ps requested_ps
= *adev
->pm
.dpm
.requested_ps
;
5252 struct amdgpu_ps
*new_ps
= &requested_ps
;
5254 ci_update_requested_ps(adev
, new_ps
);
5256 ci_apply_state_adjust_rules(adev
, &pi
->requested_rps
);
5261 static void ci_dpm_post_set_power_state(struct amdgpu_device
*adev
)
5263 struct ci_power_info
*pi
= ci_get_pi(adev
);
5264 struct amdgpu_ps
*new_ps
= &pi
->requested_rps
;
5266 ci_update_current_ps(adev
, new_ps
);
5270 static void ci_dpm_setup_asic(struct amdgpu_device
*adev
)
5272 ci_read_clock_registers(adev
);
5273 ci_enable_acpi_power_management(adev
);
5274 ci_init_sclk_t(adev
);
5277 static int ci_dpm_enable(struct amdgpu_device
*adev
)
5279 struct ci_power_info
*pi
= ci_get_pi(adev
);
5280 struct amdgpu_ps
*boot_ps
= adev
->pm
.dpm
.boot_ps
;
5283 if (amdgpu_ci_is_smc_running(adev
))
5285 if (pi
->voltage_control
!= CISLANDS_VOLTAGE_CONTROL_NONE
) {
5286 ci_enable_voltage_control(adev
);
5287 ret
= ci_construct_voltage_tables(adev
);
5289 DRM_ERROR("ci_construct_voltage_tables failed\n");
5293 if (pi
->caps_dynamic_ac_timing
) {
5294 ret
= ci_initialize_mc_reg_table(adev
);
5296 pi
->caps_dynamic_ac_timing
= false;
5299 ci_enable_spread_spectrum(adev
, true);
5300 if (pi
->thermal_protection
)
5301 ci_enable_thermal_protection(adev
, true);
5302 ci_program_sstp(adev
);
5303 ci_enable_display_gap(adev
);
5304 ci_program_vc(adev
);
5305 ret
= ci_upload_firmware(adev
);
5307 DRM_ERROR("ci_upload_firmware failed\n");
5310 ret
= ci_process_firmware_header(adev
);
5312 DRM_ERROR("ci_process_firmware_header failed\n");
5315 ret
= ci_initial_switch_from_arb_f0_to_f1(adev
);
5317 DRM_ERROR("ci_initial_switch_from_arb_f0_to_f1 failed\n");
5320 ret
= ci_init_smc_table(adev
);
5322 DRM_ERROR("ci_init_smc_table failed\n");
5325 ret
= ci_init_arb_table_index(adev
);
5327 DRM_ERROR("ci_init_arb_table_index failed\n");
5330 if (pi
->caps_dynamic_ac_timing
) {
5331 ret
= ci_populate_initial_mc_reg_table(adev
);
5333 DRM_ERROR("ci_populate_initial_mc_reg_table failed\n");
5337 ret
= ci_populate_pm_base(adev
);
5339 DRM_ERROR("ci_populate_pm_base failed\n");
5342 ci_dpm_start_smc(adev
);
5343 ci_enable_vr_hot_gpio_interrupt(adev
);
5344 ret
= ci_notify_smc_display_change(adev
, false);
5346 DRM_ERROR("ci_notify_smc_display_change failed\n");
5349 ci_enable_sclk_control(adev
, true);
5350 ret
= ci_enable_ulv(adev
, true);
5352 DRM_ERROR("ci_enable_ulv failed\n");
5355 ret
= ci_enable_ds_master_switch(adev
, true);
5357 DRM_ERROR("ci_enable_ds_master_switch failed\n");
5360 ret
= ci_start_dpm(adev
);
5362 DRM_ERROR("ci_start_dpm failed\n");
5365 ret
= ci_enable_didt(adev
, true);
5367 DRM_ERROR("ci_enable_didt failed\n");
5370 ret
= ci_enable_smc_cac(adev
, true);
5372 DRM_ERROR("ci_enable_smc_cac failed\n");
5375 ret
= ci_enable_power_containment(adev
, true);
5377 DRM_ERROR("ci_enable_power_containment failed\n");
5381 ret
= ci_power_control_set_level(adev
);
5383 DRM_ERROR("ci_power_control_set_level failed\n");
5387 ci_enable_auto_throttle_source(adev
, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
, true);
5389 ret
= ci_enable_thermal_based_sclk_dpm(adev
, true);
5391 DRM_ERROR("ci_enable_thermal_based_sclk_dpm failed\n");
5395 ci_thermal_start_thermal_controller(adev
);
5397 ci_update_current_ps(adev
, boot_ps
);
5399 if (adev
->irq
.installed
&&
5400 amdgpu_is_internal_thermal_sensor(adev
->pm
.int_thermal_type
)) {
5402 PPSMC_Result result
;
5404 ret
= ci_thermal_set_temperature_range(adev
, CISLANDS_TEMP_RANGE_MIN
,
5405 CISLANDS_TEMP_RANGE_MAX
);
5407 DRM_ERROR("ci_thermal_set_temperature_range failed\n");
5410 amdgpu_irq_get(adev
, &adev
->pm
.dpm
.thermal
.irq
,
5411 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
);
5412 amdgpu_irq_get(adev
, &adev
->pm
.dpm
.thermal
.irq
,
5413 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
);
5416 result
= amdgpu_ci_send_msg_to_smc(adev
, PPSMC_MSG_EnableThermalInterrupt
);
5418 if (result
!= PPSMC_Result_OK
)
5419 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
5426 static void ci_dpm_disable(struct amdgpu_device
*adev
)
5428 struct ci_power_info
*pi
= ci_get_pi(adev
);
5429 struct amdgpu_ps
*boot_ps
= adev
->pm
.dpm
.boot_ps
;
5431 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
5432 AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
);
5433 amdgpu_irq_put(adev
, &adev
->pm
.dpm
.thermal
.irq
,
5434 AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
);
5436 ci_dpm_powergate_uvd(adev
, false);
5438 if (!amdgpu_ci_is_smc_running(adev
))
5441 ci_thermal_stop_thermal_controller(adev
);
5443 if (pi
->thermal_protection
)
5444 ci_enable_thermal_protection(adev
, false);
5445 ci_enable_power_containment(adev
, false);
5446 ci_enable_smc_cac(adev
, false);
5447 ci_enable_didt(adev
, false);
5448 ci_enable_spread_spectrum(adev
, false);
5449 ci_enable_auto_throttle_source(adev
, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
, false);
5451 ci_enable_ds_master_switch(adev
, false);
5452 ci_enable_ulv(adev
, false);
5454 ci_reset_to_default(adev
);
5455 ci_dpm_stop_smc(adev
);
5456 ci_force_switch_to_arb_f0(adev
);
5457 ci_enable_thermal_based_sclk_dpm(adev
, false);
5459 ci_update_current_ps(adev
, boot_ps
);
5462 static int ci_dpm_set_power_state(struct amdgpu_device
*adev
)
5464 struct ci_power_info
*pi
= ci_get_pi(adev
);
5465 struct amdgpu_ps
*new_ps
= &pi
->requested_rps
;
5466 struct amdgpu_ps
*old_ps
= &pi
->current_rps
;
5469 ci_find_dpm_states_clocks_in_dpm_table(adev
, new_ps
);
5470 if (pi
->pcie_performance_request
)
5471 ci_request_link_speed_change_before_state_change(adev
, new_ps
, old_ps
);
5472 ret
= ci_freeze_sclk_mclk_dpm(adev
);
5474 DRM_ERROR("ci_freeze_sclk_mclk_dpm failed\n");
5477 ret
= ci_populate_and_upload_sclk_mclk_dpm_levels(adev
, new_ps
);
5479 DRM_ERROR("ci_populate_and_upload_sclk_mclk_dpm_levels failed\n");
5482 ret
= ci_generate_dpm_level_enable_mask(adev
, new_ps
);
5484 DRM_ERROR("ci_generate_dpm_level_enable_mask failed\n");
5488 ret
= ci_update_vce_dpm(adev
, new_ps
, old_ps
);
5490 DRM_ERROR("ci_update_vce_dpm failed\n");
5494 ret
= ci_update_sclk_t(adev
);
5496 DRM_ERROR("ci_update_sclk_t failed\n");
5499 if (pi
->caps_dynamic_ac_timing
) {
5500 ret
= ci_update_and_upload_mc_reg_table(adev
);
5502 DRM_ERROR("ci_update_and_upload_mc_reg_table failed\n");
5506 ret
= ci_program_memory_timing_parameters(adev
);
5508 DRM_ERROR("ci_program_memory_timing_parameters failed\n");
5511 ret
= ci_unfreeze_sclk_mclk_dpm(adev
);
5513 DRM_ERROR("ci_unfreeze_sclk_mclk_dpm failed\n");
5516 ret
= ci_upload_dpm_level_enable_mask(adev
);
5518 DRM_ERROR("ci_upload_dpm_level_enable_mask failed\n");
5521 if (pi
->pcie_performance_request
)
5522 ci_notify_link_speed_change_after_state_change(adev
, new_ps
, old_ps
);
5528 static void ci_dpm_reset_asic(struct amdgpu_device
*adev
)
5530 ci_set_boot_state(adev
);
5534 static void ci_dpm_display_configuration_changed(struct amdgpu_device
*adev
)
5536 ci_program_display_gap(adev
);
5540 struct _ATOM_POWERPLAY_INFO info
;
5541 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
5542 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
5543 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
5544 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
5545 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
5548 union pplib_clock_info
{
5549 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
5550 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
5551 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
5552 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
5553 struct _ATOM_PPLIB_SI_CLOCK_INFO si
;
5554 struct _ATOM_PPLIB_CI_CLOCK_INFO ci
;
5557 union pplib_power_state
{
5558 struct _ATOM_PPLIB_STATE v1
;
5559 struct _ATOM_PPLIB_STATE_V2 v2
;
5562 static void ci_parse_pplib_non_clock_info(struct amdgpu_device
*adev
,
5563 struct amdgpu_ps
*rps
,
5564 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
5567 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
5568 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
5569 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
5571 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
5572 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
5573 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
5579 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
)
5580 adev
->pm
.dpm
.boot_ps
= rps
;
5581 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
5582 adev
->pm
.dpm
.uvd_ps
= rps
;
5585 static void ci_parse_pplib_clock_info(struct amdgpu_device
*adev
,
5586 struct amdgpu_ps
*rps
, int index
,
5587 union pplib_clock_info
*clock_info
)
5589 struct ci_power_info
*pi
= ci_get_pi(adev
);
5590 struct ci_ps
*ps
= ci_get_ps(rps
);
5591 struct ci_pl
*pl
= &ps
->performance_levels
[index
];
5593 ps
->performance_level_count
= index
+ 1;
5595 pl
->sclk
= le16_to_cpu(clock_info
->ci
.usEngineClockLow
);
5596 pl
->sclk
|= clock_info
->ci
.ucEngineClockHigh
<< 16;
5597 pl
->mclk
= le16_to_cpu(clock_info
->ci
.usMemoryClockLow
);
5598 pl
->mclk
|= clock_info
->ci
.ucMemoryClockHigh
<< 16;
5600 pl
->pcie_gen
= amdgpu_get_pcie_gen_support(adev
,
5602 pi
->vbios_boot_state
.pcie_gen_bootup_value
,
5603 clock_info
->ci
.ucPCIEGen
);
5604 pl
->pcie_lane
= amdgpu_get_pcie_lane_support(adev
,
5605 pi
->vbios_boot_state
.pcie_lane_bootup_value
,
5606 le16_to_cpu(clock_info
->ci
.usPCIELane
));
5608 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
) {
5609 pi
->acpi_pcie_gen
= pl
->pcie_gen
;
5612 if (rps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
) {
5613 pi
->ulv
.supported
= true;
5615 pi
->ulv
.cg_ulv_parameter
= CISLANDS_CGULVPARAMETER_DFLT
;
5618 /* patch up boot state */
5619 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
5620 pl
->mclk
= pi
->vbios_boot_state
.mclk_bootup_value
;
5621 pl
->sclk
= pi
->vbios_boot_state
.sclk_bootup_value
;
5622 pl
->pcie_gen
= pi
->vbios_boot_state
.pcie_gen_bootup_value
;
5623 pl
->pcie_lane
= pi
->vbios_boot_state
.pcie_lane_bootup_value
;
5626 switch (rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) {
5627 case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY
:
5628 pi
->use_pcie_powersaving_levels
= true;
5629 if (pi
->pcie_gen_powersaving
.max
< pl
->pcie_gen
)
5630 pi
->pcie_gen_powersaving
.max
= pl
->pcie_gen
;
5631 if (pi
->pcie_gen_powersaving
.min
> pl
->pcie_gen
)
5632 pi
->pcie_gen_powersaving
.min
= pl
->pcie_gen
;
5633 if (pi
->pcie_lane_powersaving
.max
< pl
->pcie_lane
)
5634 pi
->pcie_lane_powersaving
.max
= pl
->pcie_lane
;
5635 if (pi
->pcie_lane_powersaving
.min
> pl
->pcie_lane
)
5636 pi
->pcie_lane_powersaving
.min
= pl
->pcie_lane
;
5638 case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
:
5639 pi
->use_pcie_performance_levels
= true;
5640 if (pi
->pcie_gen_performance
.max
< pl
->pcie_gen
)
5641 pi
->pcie_gen_performance
.max
= pl
->pcie_gen
;
5642 if (pi
->pcie_gen_performance
.min
> pl
->pcie_gen
)
5643 pi
->pcie_gen_performance
.min
= pl
->pcie_gen
;
5644 if (pi
->pcie_lane_performance
.max
< pl
->pcie_lane
)
5645 pi
->pcie_lane_performance
.max
= pl
->pcie_lane
;
5646 if (pi
->pcie_lane_performance
.min
> pl
->pcie_lane
)
5647 pi
->pcie_lane_performance
.min
= pl
->pcie_lane
;
5654 static int ci_parse_power_table(struct amdgpu_device
*adev
)
5656 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
5657 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
5658 union pplib_power_state
*power_state
;
5659 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
5660 union pplib_clock_info
*clock_info
;
5661 struct _StateArray
*state_array
;
5662 struct _ClockInfoArray
*clock_info_array
;
5663 struct _NonClockInfoArray
*non_clock_info_array
;
5664 union power_info
*power_info
;
5665 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
5668 u8
*power_state_offset
;
5671 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
5672 &frev
, &crev
, &data_offset
))
5674 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
5676 amdgpu_add_thermal_controller(adev
);
5678 state_array
= (struct _StateArray
*)
5679 (mode_info
->atom_context
->bios
+ data_offset
+
5680 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
5681 clock_info_array
= (struct _ClockInfoArray
*)
5682 (mode_info
->atom_context
->bios
+ data_offset
+
5683 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
5684 non_clock_info_array
= (struct _NonClockInfoArray
*)
5685 (mode_info
->atom_context
->bios
+ data_offset
+
5686 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
5688 adev
->pm
.dpm
.ps
= kzalloc(sizeof(struct amdgpu_ps
) *
5689 state_array
->ucNumEntries
, GFP_KERNEL
);
5690 if (!adev
->pm
.dpm
.ps
)
5692 power_state_offset
= (u8
*)state_array
->states
;
5693 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
5695 power_state
= (union pplib_power_state
*)power_state_offset
;
5696 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
5697 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
5698 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
5699 ps
= kzalloc(sizeof(struct ci_ps
), GFP_KERNEL
);
5701 kfree(adev
->pm
.dpm
.ps
);
5704 adev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
5705 ci_parse_pplib_non_clock_info(adev
, &adev
->pm
.dpm
.ps
[i
],
5707 non_clock_info_array
->ucEntrySize
);
5709 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
5710 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
5711 clock_array_index
= idx
[j
];
5712 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
5714 if (k
>= CISLANDS_MAX_HARDWARE_POWERLEVELS
)
5716 clock_info
= (union pplib_clock_info
*)
5717 ((u8
*)&clock_info_array
->clockInfo
[0] +
5718 (clock_array_index
* clock_info_array
->ucEntrySize
));
5719 ci_parse_pplib_clock_info(adev
,
5720 &adev
->pm
.dpm
.ps
[i
], k
,
5724 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
5726 adev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
5728 /* fill in the vce power states */
5729 for (i
= 0; i
< AMDGPU_MAX_VCE_LEVELS
; i
++) {
5731 clock_array_index
= adev
->pm
.dpm
.vce_states
[i
].clk_idx
;
5732 clock_info
= (union pplib_clock_info
*)
5733 &clock_info_array
->clockInfo
[clock_array_index
* clock_info_array
->ucEntrySize
];
5734 sclk
= le16_to_cpu(clock_info
->ci
.usEngineClockLow
);
5735 sclk
|= clock_info
->ci
.ucEngineClockHigh
<< 16;
5736 mclk
= le16_to_cpu(clock_info
->ci
.usMemoryClockLow
);
5737 mclk
|= clock_info
->ci
.ucMemoryClockHigh
<< 16;
5738 adev
->pm
.dpm
.vce_states
[i
].sclk
= sclk
;
5739 adev
->pm
.dpm
.vce_states
[i
].mclk
= mclk
;
5745 static int ci_get_vbios_boot_values(struct amdgpu_device
*adev
,
5746 struct ci_vbios_boot_state
*boot_state
)
5748 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
5749 int index
= GetIndexIntoMasterTable(DATA
, FirmwareInfo
);
5750 ATOM_FIRMWARE_INFO_V2_2
*firmware_info
;
5754 if (amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
5755 &frev
, &crev
, &data_offset
)) {
5757 (ATOM_FIRMWARE_INFO_V2_2
*)(mode_info
->atom_context
->bios
+
5759 boot_state
->mvdd_bootup_value
= le16_to_cpu(firmware_info
->usBootUpMVDDCVoltage
);
5760 boot_state
->vddc_bootup_value
= le16_to_cpu(firmware_info
->usBootUpVDDCVoltage
);
5761 boot_state
->vddci_bootup_value
= le16_to_cpu(firmware_info
->usBootUpVDDCIVoltage
);
5762 boot_state
->pcie_gen_bootup_value
= ci_get_current_pcie_speed(adev
);
5763 boot_state
->pcie_lane_bootup_value
= ci_get_current_pcie_lane_number(adev
);
5764 boot_state
->sclk_bootup_value
= le32_to_cpu(firmware_info
->ulDefaultEngineClock
);
5765 boot_state
->mclk_bootup_value
= le32_to_cpu(firmware_info
->ulDefaultMemoryClock
);
5772 static void ci_dpm_fini(struct amdgpu_device
*adev
)
5776 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++) {
5777 kfree(adev
->pm
.dpm
.ps
[i
].ps_priv
);
5779 kfree(adev
->pm
.dpm
.ps
);
5780 kfree(adev
->pm
.dpm
.priv
);
5781 kfree(adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
);
5782 amdgpu_free_extended_power_table(adev
);
5786 * ci_dpm_init_microcode - load ucode images from disk
5788 * @adev: amdgpu_device pointer
5790 * Use the firmware interface to load the ucode images into
5791 * the driver (not loaded into hw).
5792 * Returns 0 on success, error on failure.
5794 static int ci_dpm_init_microcode(struct amdgpu_device
*adev
)
5796 const char *chip_name
;
5802 switch (adev
->asic_type
) {
5804 chip_name
= "bonaire";
5807 chip_name
= "hawaii";
5814 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_smc.bin", chip_name
);
5815 err
= request_firmware(&adev
->pm
.fw
, fw_name
, adev
->dev
);
5818 err
= amdgpu_ucode_validate(adev
->pm
.fw
);
5823 "cik_smc: Failed to load firmware \"%s\"\n",
5825 release_firmware(adev
->pm
.fw
);
5831 static int ci_dpm_init(struct amdgpu_device
*adev
)
5833 int index
= GetIndexIntoMasterTable(DATA
, ASIC_InternalSS_Info
);
5834 SMU7_Discrete_DpmTable
*dpm_table
;
5835 struct amdgpu_gpio_rec gpio
;
5836 u16 data_offset
, size
;
5838 struct ci_power_info
*pi
;
5842 pi
= kzalloc(sizeof(struct ci_power_info
), GFP_KERNEL
);
5845 adev
->pm
.dpm
.priv
= pi
;
5847 ret
= drm_pcie_get_speed_cap_mask(adev
->ddev
, &mask
);
5849 pi
->sys_pcie_mask
= 0;
5851 pi
->sys_pcie_mask
= mask
;
5852 pi
->force_pcie_gen
= AMDGPU_PCIE_GEN_INVALID
;
5854 pi
->pcie_gen_performance
.max
= AMDGPU_PCIE_GEN1
;
5855 pi
->pcie_gen_performance
.min
= AMDGPU_PCIE_GEN3
;
5856 pi
->pcie_gen_powersaving
.max
= AMDGPU_PCIE_GEN1
;
5857 pi
->pcie_gen_powersaving
.min
= AMDGPU_PCIE_GEN3
;
5859 pi
->pcie_lane_performance
.max
= 0;
5860 pi
->pcie_lane_performance
.min
= 16;
5861 pi
->pcie_lane_powersaving
.max
= 0;
5862 pi
->pcie_lane_powersaving
.min
= 16;
5864 ret
= ci_get_vbios_boot_values(adev
, &pi
->vbios_boot_state
);
5870 ret
= amdgpu_get_platform_caps(adev
);
5876 ret
= amdgpu_parse_extended_power_table(adev
);
5882 ret
= ci_parse_power_table(adev
);
5888 pi
->dll_default_on
= false;
5889 pi
->sram_end
= SMC_RAM_END
;
5891 pi
->activity_target
[0] = CISLAND_TARGETACTIVITY_DFLT
;
5892 pi
->activity_target
[1] = CISLAND_TARGETACTIVITY_DFLT
;
5893 pi
->activity_target
[2] = CISLAND_TARGETACTIVITY_DFLT
;
5894 pi
->activity_target
[3] = CISLAND_TARGETACTIVITY_DFLT
;
5895 pi
->activity_target
[4] = CISLAND_TARGETACTIVITY_DFLT
;
5896 pi
->activity_target
[5] = CISLAND_TARGETACTIVITY_DFLT
;
5897 pi
->activity_target
[6] = CISLAND_TARGETACTIVITY_DFLT
;
5898 pi
->activity_target
[7] = CISLAND_TARGETACTIVITY_DFLT
;
5900 pi
->mclk_activity_target
= CISLAND_MCLK_TARGETACTIVITY_DFLT
;
5902 pi
->sclk_dpm_key_disabled
= 0;
5903 pi
->mclk_dpm_key_disabled
= 0;
5904 pi
->pcie_dpm_key_disabled
= 0;
5905 pi
->thermal_sclk_dpm_enabled
= 0;
5907 pi
->caps_sclk_ds
= true;
5909 pi
->mclk_strobe_mode_threshold
= 40000;
5910 pi
->mclk_stutter_mode_threshold
= 40000;
5911 pi
->mclk_edc_enable_threshold
= 40000;
5912 pi
->mclk_edc_wr_enable_threshold
= 40000;
5914 ci_initialize_powertune_defaults(adev
);
5916 pi
->caps_fps
= false;
5918 pi
->caps_sclk_throttle_low_notification
= false;
5920 pi
->caps_uvd_dpm
= true;
5921 pi
->caps_vce_dpm
= true;
5923 ci_get_leakage_voltages(adev
);
5924 ci_patch_dependency_tables_with_leakage(adev
);
5925 ci_set_private_data_variables_based_on_pptable(adev
);
5927 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
=
5928 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry
), GFP_KERNEL
);
5929 if (!adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
) {
5933 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.count
= 4;
5934 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[0].clk
= 0;
5935 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[0].v
= 0;
5936 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[1].clk
= 36000;
5937 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[1].v
= 720;
5938 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[2].clk
= 54000;
5939 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[2].v
= 810;
5940 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[3].clk
= 72000;
5941 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[3].v
= 900;
5943 adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
= 4;
5944 adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
= 15000;
5945 adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
= 200;
5947 adev
->pm
.dpm
.dyn_state
.valid_sclk_values
.count
= 0;
5948 adev
->pm
.dpm
.dyn_state
.valid_sclk_values
.values
= NULL
;
5949 adev
->pm
.dpm
.dyn_state
.valid_mclk_values
.count
= 0;
5950 adev
->pm
.dpm
.dyn_state
.valid_mclk_values
.values
= NULL
;
5952 if (adev
->asic_type
== CHIP_HAWAII
) {
5953 pi
->thermal_temp_setting
.temperature_low
= 94500;
5954 pi
->thermal_temp_setting
.temperature_high
= 95000;
5955 pi
->thermal_temp_setting
.temperature_shutdown
= 104000;
5957 pi
->thermal_temp_setting
.temperature_low
= 99500;
5958 pi
->thermal_temp_setting
.temperature_high
= 100000;
5959 pi
->thermal_temp_setting
.temperature_shutdown
= 104000;
5962 pi
->uvd_enabled
= false;
5964 dpm_table
= &pi
->smc_state_table
;
5966 gpio
= amdgpu_atombios_lookup_gpio(adev
, VDDC_VRHOT_GPIO_PINID
);
5968 dpm_table
->VRHotGpio
= gpio
.shift
;
5969 adev
->pm
.dpm
.platform_caps
|= ATOM_PP_PLATFORM_CAP_REGULATOR_HOT
;
5971 dpm_table
->VRHotGpio
= CISLANDS_UNUSED_GPIO_PIN
;
5972 adev
->pm
.dpm
.platform_caps
&= ~ATOM_PP_PLATFORM_CAP_REGULATOR_HOT
;
5975 gpio
= amdgpu_atombios_lookup_gpio(adev
, PP_AC_DC_SWITCH_GPIO_PINID
);
5977 dpm_table
->AcDcGpio
= gpio
.shift
;
5978 adev
->pm
.dpm
.platform_caps
|= ATOM_PP_PLATFORM_CAP_HARDWAREDC
;
5980 dpm_table
->AcDcGpio
= CISLANDS_UNUSED_GPIO_PIN
;
5981 adev
->pm
.dpm
.platform_caps
&= ~ATOM_PP_PLATFORM_CAP_HARDWAREDC
;
5984 gpio
= amdgpu_atombios_lookup_gpio(adev
, VDDC_PCC_GPIO_PINID
);
5986 u32 tmp
= RREG32_SMC(ixCNB_PWRMGT_CNTL
);
5988 switch (gpio
.shift
) {
5990 tmp
&= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK
;
5991 tmp
|= 1 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT
;
5994 tmp
&= ~CNB_PWRMGT_CNTL__GNB_SLOW_MODE_MASK
;
5995 tmp
|= 2 << CNB_PWRMGT_CNTL__GNB_SLOW_MODE__SHIFT
;
5998 tmp
|= CNB_PWRMGT_CNTL__GNB_SLOW_MASK
;
6001 tmp
|= CNB_PWRMGT_CNTL__FORCE_NB_PS1_MASK
;
6004 tmp
|= CNB_PWRMGT_CNTL__DPM_ENABLED_MASK
;
6007 DRM_ERROR("Invalid PCC GPIO: %u!\n", gpio
.shift
);
6010 WREG32_SMC(ixCNB_PWRMGT_CNTL
, tmp
);
6013 pi
->voltage_control
= CISLANDS_VOLTAGE_CONTROL_NONE
;
6014 pi
->vddci_control
= CISLANDS_VOLTAGE_CONTROL_NONE
;
6015 pi
->mvdd_control
= CISLANDS_VOLTAGE_CONTROL_NONE
;
6016 if (amdgpu_atombios_is_voltage_gpio(adev
, VOLTAGE_TYPE_VDDC
, VOLTAGE_OBJ_GPIO_LUT
))
6017 pi
->voltage_control
= CISLANDS_VOLTAGE_CONTROL_BY_GPIO
;
6018 else if (amdgpu_atombios_is_voltage_gpio(adev
, VOLTAGE_TYPE_VDDC
, VOLTAGE_OBJ_SVID2
))
6019 pi
->voltage_control
= CISLANDS_VOLTAGE_CONTROL_BY_SVID2
;
6021 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL
) {
6022 if (amdgpu_atombios_is_voltage_gpio(adev
, VOLTAGE_TYPE_VDDCI
, VOLTAGE_OBJ_GPIO_LUT
))
6023 pi
->vddci_control
= CISLANDS_VOLTAGE_CONTROL_BY_GPIO
;
6024 else if (amdgpu_atombios_is_voltage_gpio(adev
, VOLTAGE_TYPE_VDDCI
, VOLTAGE_OBJ_SVID2
))
6025 pi
->vddci_control
= CISLANDS_VOLTAGE_CONTROL_BY_SVID2
;
6027 adev
->pm
.dpm
.platform_caps
&= ~ATOM_PP_PLATFORM_CAP_VDDCI_CONTROL
;
6030 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_MVDDCONTROL
) {
6031 if (amdgpu_atombios_is_voltage_gpio(adev
, VOLTAGE_TYPE_MVDDC
, VOLTAGE_OBJ_GPIO_LUT
))
6032 pi
->mvdd_control
= CISLANDS_VOLTAGE_CONTROL_BY_GPIO
;
6033 else if (amdgpu_atombios_is_voltage_gpio(adev
, VOLTAGE_TYPE_MVDDC
, VOLTAGE_OBJ_SVID2
))
6034 pi
->mvdd_control
= CISLANDS_VOLTAGE_CONTROL_BY_SVID2
;
6036 adev
->pm
.dpm
.platform_caps
&= ~ATOM_PP_PLATFORM_CAP_MVDDCONTROL
;
6039 pi
->vddc_phase_shed_control
= true;
6041 #if defined(CONFIG_ACPI)
6042 pi
->pcie_performance_request
=
6043 amdgpu_acpi_is_pcie_performance_request_supported(adev
);
6045 pi
->pcie_performance_request
= false;
6048 if (amdgpu_atom_parse_data_header(adev
->mode_info
.atom_context
, index
, &size
,
6049 &frev
, &crev
, &data_offset
)) {
6050 pi
->caps_sclk_ss_support
= true;
6051 pi
->caps_mclk_ss_support
= true;
6052 pi
->dynamic_ss
= true;
6054 pi
->caps_sclk_ss_support
= false;
6055 pi
->caps_mclk_ss_support
= false;
6056 pi
->dynamic_ss
= true;
6059 if (adev
->pm
.int_thermal_type
!= THERMAL_TYPE_NONE
)
6060 pi
->thermal_protection
= true;
6062 pi
->thermal_protection
= false;
6064 pi
->caps_dynamic_ac_timing
= true;
6066 pi
->uvd_power_gated
= false;
6068 /* make sure dc limits are valid */
6069 if ((adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.sclk
== 0) ||
6070 (adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.mclk
== 0))
6071 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
=
6072 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
6074 pi
->fan_ctrl_is_in_default_mode
= true;
6080 ci_dpm_debugfs_print_current_performance_level(struct amdgpu_device
*adev
,
6083 struct ci_power_info
*pi
= ci_get_pi(adev
);
6084 struct amdgpu_ps
*rps
= &pi
->current_rps
;
6085 u32 sclk
= ci_get_average_sclk_freq(adev
);
6086 u32 mclk
= ci_get_average_mclk_freq(adev
);
6088 seq_printf(m
, "uvd %sabled\n", pi
->uvd_enabled
? "en" : "dis");
6089 seq_printf(m
, "vce %sabled\n", rps
->vce_active
? "en" : "dis");
6090 seq_printf(m
, "power level avg sclk: %u mclk: %u\n",
6094 static void ci_dpm_print_power_state(struct amdgpu_device
*adev
,
6095 struct amdgpu_ps
*rps
)
6097 struct ci_ps
*ps
= ci_get_ps(rps
);
6101 amdgpu_dpm_print_class_info(rps
->class, rps
->class2
);
6102 amdgpu_dpm_print_cap_info(rps
->caps
);
6103 printk("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
6104 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
6105 pl
= &ps
->performance_levels
[i
];
6106 printk("\t\tpower level %d sclk: %u mclk: %u pcie gen: %u pcie lanes: %u\n",
6107 i
, pl
->sclk
, pl
->mclk
, pl
->pcie_gen
+ 1, pl
->pcie_lane
);
6109 amdgpu_dpm_print_ps_status(adev
, rps
);
6112 static u32
ci_dpm_get_sclk(struct amdgpu_device
*adev
, bool low
)
6114 struct ci_power_info
*pi
= ci_get_pi(adev
);
6115 struct ci_ps
*requested_state
= ci_get_ps(&pi
->requested_rps
);
6118 return requested_state
->performance_levels
[0].sclk
;
6120 return requested_state
->performance_levels
[requested_state
->performance_level_count
- 1].sclk
;
6123 static u32
ci_dpm_get_mclk(struct amdgpu_device
*adev
, bool low
)
6125 struct ci_power_info
*pi
= ci_get_pi(adev
);
6126 struct ci_ps
*requested_state
= ci_get_ps(&pi
->requested_rps
);
6129 return requested_state
->performance_levels
[0].mclk
;
6131 return requested_state
->performance_levels
[requested_state
->performance_level_count
- 1].mclk
;
6134 /* get temperature in millidegrees */
6135 static int ci_dpm_get_temp(struct amdgpu_device
*adev
)
6138 int actual_temp
= 0;
6140 temp
= (RREG32_SMC(ixCG_MULT_THERMAL_STATUS
) & CG_MULT_THERMAL_STATUS__CTF_TEMP_MASK
) >>
6141 CG_MULT_THERMAL_STATUS__CTF_TEMP__SHIFT
;
6146 actual_temp
= temp
& 0x1ff;
6148 actual_temp
= actual_temp
* 1000;
6153 static int ci_set_temperature_range(struct amdgpu_device
*adev
)
6157 ret
= ci_thermal_enable_alert(adev
, false);
6160 ret
= ci_thermal_set_temperature_range(adev
, CISLANDS_TEMP_RANGE_MIN
,
6161 CISLANDS_TEMP_RANGE_MAX
);
6164 ret
= ci_thermal_enable_alert(adev
, true);
6170 static int ci_dpm_early_init(void *handle
)
6172 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6174 ci_dpm_set_dpm_funcs(adev
);
6175 ci_dpm_set_irq_funcs(adev
);
6180 static int ci_dpm_late_init(void *handle
)
6183 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6188 ret
= ci_set_temperature_range(adev
);
6192 ci_dpm_powergate_uvd(adev
, true);
6197 static int ci_dpm_sw_init(void *handle
)
6200 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6202 ret
= amdgpu_irq_add_id(adev
, 230, &adev
->pm
.dpm
.thermal
.irq
);
6206 ret
= amdgpu_irq_add_id(adev
, 231, &adev
->pm
.dpm
.thermal
.irq
);
6210 /* default to balanced state */
6211 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_BALANCED
;
6212 adev
->pm
.dpm
.user_state
= POWER_STATE_TYPE_BALANCED
;
6213 adev
->pm
.dpm
.forced_level
= AMDGPU_DPM_FORCED_LEVEL_AUTO
;
6214 adev
->pm
.default_sclk
= adev
->clock
.default_sclk
;
6215 adev
->pm
.default_mclk
= adev
->clock
.default_mclk
;
6216 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
6217 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
6218 adev
->pm
.int_thermal_type
= THERMAL_TYPE_NONE
;
6220 if (amdgpu_dpm
== 0)
6223 ret
= ci_dpm_init_microcode(adev
);
6227 INIT_WORK(&adev
->pm
.dpm
.thermal
.work
, amdgpu_dpm_thermal_work_handler
);
6228 mutex_lock(&adev
->pm
.mutex
);
6229 ret
= ci_dpm_init(adev
);
6232 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
6233 if (amdgpu_dpm
== 1)
6234 amdgpu_pm_print_power_states(adev
);
6235 ret
= amdgpu_pm_sysfs_init(adev
);
6238 mutex_unlock(&adev
->pm
.mutex
);
6239 DRM_INFO("amdgpu: dpm initialized\n");
6245 mutex_unlock(&adev
->pm
.mutex
);
6246 DRM_ERROR("amdgpu: dpm initialization failed\n");
6250 static int ci_dpm_sw_fini(void *handle
)
6252 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6254 mutex_lock(&adev
->pm
.mutex
);
6255 amdgpu_pm_sysfs_fini(adev
);
6257 mutex_unlock(&adev
->pm
.mutex
);
6262 static int ci_dpm_hw_init(void *handle
)
6266 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6271 mutex_lock(&adev
->pm
.mutex
);
6272 ci_dpm_setup_asic(adev
);
6273 ret
= ci_dpm_enable(adev
);
6275 adev
->pm
.dpm_enabled
= false;
6277 adev
->pm
.dpm_enabled
= true;
6278 mutex_unlock(&adev
->pm
.mutex
);
6283 static int ci_dpm_hw_fini(void *handle
)
6285 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6287 if (adev
->pm
.dpm_enabled
) {
6288 mutex_lock(&adev
->pm
.mutex
);
6289 ci_dpm_disable(adev
);
6290 mutex_unlock(&adev
->pm
.mutex
);
6296 static int ci_dpm_suspend(void *handle
)
6298 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6300 if (adev
->pm
.dpm_enabled
) {
6301 mutex_lock(&adev
->pm
.mutex
);
6303 ci_dpm_disable(adev
);
6304 /* reset the power state */
6305 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
6306 mutex_unlock(&adev
->pm
.mutex
);
6311 static int ci_dpm_resume(void *handle
)
6314 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6316 if (adev
->pm
.dpm_enabled
) {
6317 /* asic init will reset to the boot state */
6318 mutex_lock(&adev
->pm
.mutex
);
6319 ci_dpm_setup_asic(adev
);
6320 ret
= ci_dpm_enable(adev
);
6322 adev
->pm
.dpm_enabled
= false;
6324 adev
->pm
.dpm_enabled
= true;
6325 mutex_unlock(&adev
->pm
.mutex
);
6326 if (adev
->pm
.dpm_enabled
)
6327 amdgpu_pm_compute_clocks(adev
);
6332 static bool ci_dpm_is_idle(void *handle
)
6338 static int ci_dpm_wait_for_idle(void *handle
)
6344 static void ci_dpm_print_status(void *handle
)
6346 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6348 dev_info(adev
->dev
, "CIK DPM registers\n");
6349 dev_info(adev
->dev
, " BIOS_SCRATCH_4=0x%08X\n",
6350 RREG32(mmBIOS_SCRATCH_4
));
6351 dev_info(adev
->dev
, " MC_ARB_DRAM_TIMING=0x%08X\n",
6352 RREG32(mmMC_ARB_DRAM_TIMING
));
6353 dev_info(adev
->dev
, " MC_ARB_DRAM_TIMING2=0x%08X\n",
6354 RREG32(mmMC_ARB_DRAM_TIMING2
));
6355 dev_info(adev
->dev
, " MC_ARB_BURST_TIME=0x%08X\n",
6356 RREG32(mmMC_ARB_BURST_TIME
));
6357 dev_info(adev
->dev
, " MC_ARB_DRAM_TIMING_1=0x%08X\n",
6358 RREG32(mmMC_ARB_DRAM_TIMING_1
));
6359 dev_info(adev
->dev
, " MC_ARB_DRAM_TIMING2_1=0x%08X\n",
6360 RREG32(mmMC_ARB_DRAM_TIMING2_1
));
6361 dev_info(adev
->dev
, " MC_CG_CONFIG=0x%08X\n",
6362 RREG32(mmMC_CG_CONFIG
));
6363 dev_info(adev
->dev
, " MC_ARB_CG=0x%08X\n",
6364 RREG32(mmMC_ARB_CG
));
6365 dev_info(adev
->dev
, " DIDT_SQ_CTRL0=0x%08X\n",
6366 RREG32_DIDT(ixDIDT_SQ_CTRL0
));
6367 dev_info(adev
->dev
, " DIDT_DB_CTRL0=0x%08X\n",
6368 RREG32_DIDT(ixDIDT_DB_CTRL0
));
6369 dev_info(adev
->dev
, " DIDT_TD_CTRL0=0x%08X\n",
6370 RREG32_DIDT(ixDIDT_TD_CTRL0
));
6371 dev_info(adev
->dev
, " DIDT_TCP_CTRL0=0x%08X\n",
6372 RREG32_DIDT(ixDIDT_TCP_CTRL0
));
6373 dev_info(adev
->dev
, " CG_THERMAL_INT=0x%08X\n",
6374 RREG32_SMC(ixCG_THERMAL_INT
));
6375 dev_info(adev
->dev
, " CG_THERMAL_CTRL=0x%08X\n",
6376 RREG32_SMC(ixCG_THERMAL_CTRL
));
6377 dev_info(adev
->dev
, " GENERAL_PWRMGT=0x%08X\n",
6378 RREG32_SMC(ixGENERAL_PWRMGT
));
6379 dev_info(adev
->dev
, " MC_SEQ_CNTL_3=0x%08X\n",
6380 RREG32(mmMC_SEQ_CNTL_3
));
6381 dev_info(adev
->dev
, " LCAC_MC0_CNTL=0x%08X\n",
6382 RREG32_SMC(ixLCAC_MC0_CNTL
));
6383 dev_info(adev
->dev
, " LCAC_MC1_CNTL=0x%08X\n",
6384 RREG32_SMC(ixLCAC_MC1_CNTL
));
6385 dev_info(adev
->dev
, " LCAC_CPL_CNTL=0x%08X\n",
6386 RREG32_SMC(ixLCAC_CPL_CNTL
));
6387 dev_info(adev
->dev
, " SCLK_PWRMGT_CNTL=0x%08X\n",
6388 RREG32_SMC(ixSCLK_PWRMGT_CNTL
));
6389 dev_info(adev
->dev
, " BIF_LNCNT_RESET=0x%08X\n",
6390 RREG32(mmBIF_LNCNT_RESET
));
6391 dev_info(adev
->dev
, " FIRMWARE_FLAGS=0x%08X\n",
6392 RREG32_SMC(ixFIRMWARE_FLAGS
));
6393 dev_info(adev
->dev
, " CG_SPLL_FUNC_CNTL=0x%08X\n",
6394 RREG32_SMC(ixCG_SPLL_FUNC_CNTL
));
6395 dev_info(adev
->dev
, " CG_SPLL_FUNC_CNTL_2=0x%08X\n",
6396 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_2
));
6397 dev_info(adev
->dev
, " CG_SPLL_FUNC_CNTL_3=0x%08X\n",
6398 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_3
));
6399 dev_info(adev
->dev
, " CG_SPLL_FUNC_CNTL_4=0x%08X\n",
6400 RREG32_SMC(ixCG_SPLL_FUNC_CNTL_4
));
6401 dev_info(adev
->dev
, " CG_SPLL_SPREAD_SPECTRUM=0x%08X\n",
6402 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM
));
6403 dev_info(adev
->dev
, " CG_SPLL_SPREAD_SPECTRUM_2=0x%08X\n",
6404 RREG32_SMC(ixCG_SPLL_SPREAD_SPECTRUM_2
));
6405 dev_info(adev
->dev
, " DLL_CNTL=0x%08X\n",
6406 RREG32(mmDLL_CNTL
));
6407 dev_info(adev
->dev
, " MCLK_PWRMGT_CNTL=0x%08X\n",
6408 RREG32(mmMCLK_PWRMGT_CNTL
));
6409 dev_info(adev
->dev
, " MPLL_AD_FUNC_CNTL=0x%08X\n",
6410 RREG32(mmMPLL_AD_FUNC_CNTL
));
6411 dev_info(adev
->dev
, " MPLL_DQ_FUNC_CNTL=0x%08X\n",
6412 RREG32(mmMPLL_DQ_FUNC_CNTL
));
6413 dev_info(adev
->dev
, " MPLL_FUNC_CNTL=0x%08X\n",
6414 RREG32(mmMPLL_FUNC_CNTL
));
6415 dev_info(adev
->dev
, " MPLL_FUNC_CNTL_1=0x%08X\n",
6416 RREG32(mmMPLL_FUNC_CNTL_1
));
6417 dev_info(adev
->dev
, " MPLL_FUNC_CNTL_2=0x%08X\n",
6418 RREG32(mmMPLL_FUNC_CNTL_2
));
6419 dev_info(adev
->dev
, " MPLL_SS1=0x%08X\n",
6420 RREG32(mmMPLL_SS1
));
6421 dev_info(adev
->dev
, " MPLL_SS2=0x%08X\n",
6422 RREG32(mmMPLL_SS2
));
6423 dev_info(adev
->dev
, " CG_DISPLAY_GAP_CNTL=0x%08X\n",
6424 RREG32_SMC(ixCG_DISPLAY_GAP_CNTL
));
6425 dev_info(adev
->dev
, " CG_DISPLAY_GAP_CNTL2=0x%08X\n",
6426 RREG32_SMC(ixCG_DISPLAY_GAP_CNTL2
));
6427 dev_info(adev
->dev
, " CG_STATIC_SCREEN_PARAMETER=0x%08X\n",
6428 RREG32_SMC(ixCG_STATIC_SCREEN_PARAMETER
));
6429 dev_info(adev
->dev
, " CG_FREQ_TRAN_VOTING_0=0x%08X\n",
6430 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_0
));
6431 dev_info(adev
->dev
, " CG_FREQ_TRAN_VOTING_1=0x%08X\n",
6432 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_1
));
6433 dev_info(adev
->dev
, " CG_FREQ_TRAN_VOTING_2=0x%08X\n",
6434 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_2
));
6435 dev_info(adev
->dev
, " CG_FREQ_TRAN_VOTING_3=0x%08X\n",
6436 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_3
));
6437 dev_info(adev
->dev
, " CG_FREQ_TRAN_VOTING_4=0x%08X\n",
6438 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_4
));
6439 dev_info(adev
->dev
, " CG_FREQ_TRAN_VOTING_5=0x%08X\n",
6440 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_5
));
6441 dev_info(adev
->dev
, " CG_FREQ_TRAN_VOTING_6=0x%08X\n",
6442 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_6
));
6443 dev_info(adev
->dev
, " CG_FREQ_TRAN_VOTING_7=0x%08X\n",
6444 RREG32_SMC(ixCG_FREQ_TRAN_VOTING_7
));
6445 dev_info(adev
->dev
, " RCU_UC_EVENTS=0x%08X\n",
6446 RREG32_SMC(ixRCU_UC_EVENTS
));
6447 dev_info(adev
->dev
, " DPM_TABLE_475=0x%08X\n",
6448 RREG32_SMC(ixDPM_TABLE_475
));
6449 dev_info(adev
->dev
, " MC_SEQ_RAS_TIMING_LP=0x%08X\n",
6450 RREG32(mmMC_SEQ_RAS_TIMING_LP
));
6451 dev_info(adev
->dev
, " MC_SEQ_RAS_TIMING=0x%08X\n",
6452 RREG32(mmMC_SEQ_RAS_TIMING
));
6453 dev_info(adev
->dev
, " MC_SEQ_CAS_TIMING_LP=0x%08X\n",
6454 RREG32(mmMC_SEQ_CAS_TIMING_LP
));
6455 dev_info(adev
->dev
, " MC_SEQ_CAS_TIMING=0x%08X\n",
6456 RREG32(mmMC_SEQ_CAS_TIMING
));
6457 dev_info(adev
->dev
, " MC_SEQ_DLL_STBY_LP=0x%08X\n",
6458 RREG32(mmMC_SEQ_DLL_STBY_LP
));
6459 dev_info(adev
->dev
, " MC_SEQ_DLL_STBY=0x%08X\n",
6460 RREG32(mmMC_SEQ_DLL_STBY
));
6461 dev_info(adev
->dev
, " MC_SEQ_G5PDX_CMD0_LP=0x%08X\n",
6462 RREG32(mmMC_SEQ_G5PDX_CMD0_LP
));
6463 dev_info(adev
->dev
, " MC_SEQ_G5PDX_CMD0=0x%08X\n",
6464 RREG32(mmMC_SEQ_G5PDX_CMD0
));
6465 dev_info(adev
->dev
, " MC_SEQ_G5PDX_CMD1_LP=0x%08X\n",
6466 RREG32(mmMC_SEQ_G5PDX_CMD1_LP
));
6467 dev_info(adev
->dev
, " MC_SEQ_G5PDX_CMD1=0x%08X\n",
6468 RREG32(mmMC_SEQ_G5PDX_CMD1
));
6469 dev_info(adev
->dev
, " MC_SEQ_G5PDX_CTRL_LP=0x%08X\n",
6470 RREG32(mmMC_SEQ_G5PDX_CTRL_LP
));
6471 dev_info(adev
->dev
, " MC_SEQ_G5PDX_CTRL=0x%08X\n",
6472 RREG32(mmMC_SEQ_G5PDX_CTRL
));
6473 dev_info(adev
->dev
, " MC_SEQ_PMG_DVS_CMD_LP=0x%08X\n",
6474 RREG32(mmMC_SEQ_PMG_DVS_CMD_LP
));
6475 dev_info(adev
->dev
, " MC_SEQ_PMG_DVS_CMD=0x%08X\n",
6476 RREG32(mmMC_SEQ_PMG_DVS_CMD
));
6477 dev_info(adev
->dev
, " MC_SEQ_PMG_DVS_CTL_LP=0x%08X\n",
6478 RREG32(mmMC_SEQ_PMG_DVS_CTL_LP
));
6479 dev_info(adev
->dev
, " MC_SEQ_PMG_DVS_CTL=0x%08X\n",
6480 RREG32(mmMC_SEQ_PMG_DVS_CTL
));
6481 dev_info(adev
->dev
, " MC_SEQ_MISC_TIMING_LP=0x%08X\n",
6482 RREG32(mmMC_SEQ_MISC_TIMING_LP
));
6483 dev_info(adev
->dev
, " MC_SEQ_MISC_TIMING=0x%08X\n",
6484 RREG32(mmMC_SEQ_MISC_TIMING
));
6485 dev_info(adev
->dev
, " MC_SEQ_MISC_TIMING2_LP=0x%08X\n",
6486 RREG32(mmMC_SEQ_MISC_TIMING2_LP
));
6487 dev_info(adev
->dev
, " MC_SEQ_MISC_TIMING2=0x%08X\n",
6488 RREG32(mmMC_SEQ_MISC_TIMING2
));
6489 dev_info(adev
->dev
, " MC_SEQ_PMG_CMD_EMRS_LP=0x%08X\n",
6490 RREG32(mmMC_SEQ_PMG_CMD_EMRS_LP
));
6491 dev_info(adev
->dev
, " MC_PMG_CMD_EMRS=0x%08X\n",
6492 RREG32(mmMC_PMG_CMD_EMRS
));
6493 dev_info(adev
->dev
, " MC_SEQ_PMG_CMD_MRS_LP=0x%08X\n",
6494 RREG32(mmMC_SEQ_PMG_CMD_MRS_LP
));
6495 dev_info(adev
->dev
, " MC_PMG_CMD_MRS=0x%08X\n",
6496 RREG32(mmMC_PMG_CMD_MRS
));
6497 dev_info(adev
->dev
, " MC_SEQ_PMG_CMD_MRS1_LP=0x%08X\n",
6498 RREG32(mmMC_SEQ_PMG_CMD_MRS1_LP
));
6499 dev_info(adev
->dev
, " MC_PMG_CMD_MRS1=0x%08X\n",
6500 RREG32(mmMC_PMG_CMD_MRS1
));
6501 dev_info(adev
->dev
, " MC_SEQ_WR_CTL_D0_LP=0x%08X\n",
6502 RREG32(mmMC_SEQ_WR_CTL_D0_LP
));
6503 dev_info(adev
->dev
, " MC_SEQ_WR_CTL_D0=0x%08X\n",
6504 RREG32(mmMC_SEQ_WR_CTL_D0
));
6505 dev_info(adev
->dev
, " MC_SEQ_WR_CTL_D1_LP=0x%08X\n",
6506 RREG32(mmMC_SEQ_WR_CTL_D1_LP
));
6507 dev_info(adev
->dev
, " MC_SEQ_WR_CTL_D1=0x%08X\n",
6508 RREG32(mmMC_SEQ_WR_CTL_D1
));
6509 dev_info(adev
->dev
, " MC_SEQ_RD_CTL_D0_LP=0x%08X\n",
6510 RREG32(mmMC_SEQ_RD_CTL_D0_LP
));
6511 dev_info(adev
->dev
, " MC_SEQ_RD_CTL_D0=0x%08X\n",
6512 RREG32(mmMC_SEQ_RD_CTL_D0
));
6513 dev_info(adev
->dev
, " MC_SEQ_RD_CTL_D1_LP=0x%08X\n",
6514 RREG32(mmMC_SEQ_RD_CTL_D1_LP
));
6515 dev_info(adev
->dev
, " MC_SEQ_RD_CTL_D1=0x%08X\n",
6516 RREG32(mmMC_SEQ_RD_CTL_D1
));
6517 dev_info(adev
->dev
, " MC_SEQ_PMG_TIMING_LP=0x%08X\n",
6518 RREG32(mmMC_SEQ_PMG_TIMING_LP
));
6519 dev_info(adev
->dev
, " MC_SEQ_PMG_TIMING=0x%08X\n",
6520 RREG32(mmMC_SEQ_PMG_TIMING
));
6521 dev_info(adev
->dev
, " MC_SEQ_PMG_CMD_MRS2_LP=0x%08X\n",
6522 RREG32(mmMC_SEQ_PMG_CMD_MRS2_LP
));
6523 dev_info(adev
->dev
, " MC_PMG_CMD_MRS2=0x%08X\n",
6524 RREG32(mmMC_PMG_CMD_MRS2
));
6525 dev_info(adev
->dev
, " MC_SEQ_WR_CTL_2_LP=0x%08X\n",
6526 RREG32(mmMC_SEQ_WR_CTL_2_LP
));
6527 dev_info(adev
->dev
, " MC_SEQ_WR_CTL_2=0x%08X\n",
6528 RREG32(mmMC_SEQ_WR_CTL_2
));
6529 dev_info(adev
->dev
, " PCIE_LC_SPEED_CNTL=0x%08X\n",
6530 RREG32_PCIE(ixPCIE_LC_SPEED_CNTL
));
6531 dev_info(adev
->dev
, " PCIE_LC_LINK_WIDTH_CNTL=0x%08X\n",
6532 RREG32_PCIE(ixPCIE_LC_LINK_WIDTH_CNTL
));
6533 dev_info(adev
->dev
, " SMC_IND_INDEX_0=0x%08X\n",
6534 RREG32(mmSMC_IND_INDEX_0
));
6535 dev_info(adev
->dev
, " SMC_IND_DATA_0=0x%08X\n",
6536 RREG32(mmSMC_IND_DATA_0
));
6537 dev_info(adev
->dev
, " SMC_IND_ACCESS_CNTL=0x%08X\n",
6538 RREG32(mmSMC_IND_ACCESS_CNTL
));
6539 dev_info(adev
->dev
, " SMC_RESP_0=0x%08X\n",
6540 RREG32(mmSMC_RESP_0
));
6541 dev_info(adev
->dev
, " SMC_MESSAGE_0=0x%08X\n",
6542 RREG32(mmSMC_MESSAGE_0
));
6543 dev_info(adev
->dev
, " SMC_SYSCON_RESET_CNTL=0x%08X\n",
6544 RREG32_SMC(ixSMC_SYSCON_RESET_CNTL
));
6545 dev_info(adev
->dev
, " SMC_SYSCON_CLOCK_CNTL_0=0x%08X\n",
6546 RREG32_SMC(ixSMC_SYSCON_CLOCK_CNTL_0
));
6547 dev_info(adev
->dev
, " SMC_SYSCON_MISC_CNTL=0x%08X\n",
6548 RREG32_SMC(ixSMC_SYSCON_MISC_CNTL
));
6549 dev_info(adev
->dev
, " SMC_PC_C=0x%08X\n",
6550 RREG32_SMC(ixSMC_PC_C
));
6553 static int ci_dpm_soft_reset(void *handle
)
6558 static int ci_dpm_set_interrupt_state(struct amdgpu_device
*adev
,
6559 struct amdgpu_irq_src
*source
,
6561 enum amdgpu_interrupt_state state
)
6566 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
:
6568 case AMDGPU_IRQ_STATE_DISABLE
:
6569 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT
);
6570 cg_thermal_int
&= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
;
6571 WREG32_SMC(ixCG_THERMAL_INT
, cg_thermal_int
);
6573 case AMDGPU_IRQ_STATE_ENABLE
:
6574 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT
);
6575 cg_thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK
;
6576 WREG32_SMC(ixCG_THERMAL_INT
, cg_thermal_int
);
6583 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
:
6585 case AMDGPU_IRQ_STATE_DISABLE
:
6586 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT
);
6587 cg_thermal_int
&= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
6588 WREG32_SMC(ixCG_THERMAL_INT
, cg_thermal_int
);
6590 case AMDGPU_IRQ_STATE_ENABLE
:
6591 cg_thermal_int
= RREG32_SMC(ixCG_THERMAL_INT
);
6592 cg_thermal_int
|= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK
;
6593 WREG32_SMC(ixCG_THERMAL_INT
, cg_thermal_int
);
6606 static int ci_dpm_process_interrupt(struct amdgpu_device
*adev
,
6607 struct amdgpu_irq_src
*source
,
6608 struct amdgpu_iv_entry
*entry
)
6610 bool queue_thermal
= false;
6615 switch (entry
->src_id
) {
6616 case 230: /* thermal low to high */
6617 DRM_DEBUG("IH: thermal low to high\n");
6618 adev
->pm
.dpm
.thermal
.high_to_low
= false;
6619 queue_thermal
= true;
6621 case 231: /* thermal high to low */
6622 DRM_DEBUG("IH: thermal high to low\n");
6623 adev
->pm
.dpm
.thermal
.high_to_low
= true;
6624 queue_thermal
= true;
6631 schedule_work(&adev
->pm
.dpm
.thermal
.work
);
6636 static int ci_dpm_set_clockgating_state(void *handle
,
6637 enum amd_clockgating_state state
)
6642 static int ci_dpm_set_powergating_state(void *handle
,
6643 enum amd_powergating_state state
)
6648 const struct amd_ip_funcs ci_dpm_ip_funcs
= {
6649 .early_init
= ci_dpm_early_init
,
6650 .late_init
= ci_dpm_late_init
,
6651 .sw_init
= ci_dpm_sw_init
,
6652 .sw_fini
= ci_dpm_sw_fini
,
6653 .hw_init
= ci_dpm_hw_init
,
6654 .hw_fini
= ci_dpm_hw_fini
,
6655 .suspend
= ci_dpm_suspend
,
6656 .resume
= ci_dpm_resume
,
6657 .is_idle
= ci_dpm_is_idle
,
6658 .wait_for_idle
= ci_dpm_wait_for_idle
,
6659 .soft_reset
= ci_dpm_soft_reset
,
6660 .print_status
= ci_dpm_print_status
,
6661 .set_clockgating_state
= ci_dpm_set_clockgating_state
,
6662 .set_powergating_state
= ci_dpm_set_powergating_state
,
6665 static const struct amdgpu_dpm_funcs ci_dpm_funcs
= {
6666 .get_temperature
= &ci_dpm_get_temp
,
6667 .pre_set_power_state
= &ci_dpm_pre_set_power_state
,
6668 .set_power_state
= &ci_dpm_set_power_state
,
6669 .post_set_power_state
= &ci_dpm_post_set_power_state
,
6670 .display_configuration_changed
= &ci_dpm_display_configuration_changed
,
6671 .get_sclk
= &ci_dpm_get_sclk
,
6672 .get_mclk
= &ci_dpm_get_mclk
,
6673 .print_power_state
= &ci_dpm_print_power_state
,
6674 .debugfs_print_current_performance_level
= &ci_dpm_debugfs_print_current_performance_level
,
6675 .force_performance_level
= &ci_dpm_force_performance_level
,
6676 .vblank_too_short
= &ci_dpm_vblank_too_short
,
6677 .powergate_uvd
= &ci_dpm_powergate_uvd
,
6678 .set_fan_control_mode
= &ci_dpm_set_fan_control_mode
,
6679 .get_fan_control_mode
= &ci_dpm_get_fan_control_mode
,
6680 .set_fan_speed_percent
= &ci_dpm_set_fan_speed_percent
,
6681 .get_fan_speed_percent
= &ci_dpm_get_fan_speed_percent
,
6684 static void ci_dpm_set_dpm_funcs(struct amdgpu_device
*adev
)
6686 if (adev
->pm
.funcs
== NULL
)
6687 adev
->pm
.funcs
= &ci_dpm_funcs
;
6690 static const struct amdgpu_irq_src_funcs ci_dpm_irq_funcs
= {
6691 .set
= ci_dpm_set_interrupt_state
,
6692 .process
= ci_dpm_process_interrupt
,
6695 static void ci_dpm_set_irq_funcs(struct amdgpu_device
*adev
)
6697 adev
->pm
.dpm
.thermal
.irq
.num_types
= AMDGPU_THERMAL_IRQ_LAST
;
6698 adev
->pm
.dpm
.thermal
.irq
.funcs
= &ci_dpm_irq_funcs
;