2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "amdgpu_pm.h"
27 #include "amdgpu_dpm.h"
28 #include "amdgpu_atombios.h"
34 #include "../include/pptable.h"
35 #include <linux/math64.h>
36 #include <linux/seq_file.h>
37 #include <linux/firmware.h>
39 #define MC_CG_ARB_FREQ_F0 0x0a
40 #define MC_CG_ARB_FREQ_F1 0x0b
41 #define MC_CG_ARB_FREQ_F2 0x0c
42 #define MC_CG_ARB_FREQ_F3 0x0d
44 #define SMC_RAM_END 0x20000
46 #define SCLK_MIN_DEEPSLEEP_FREQ 1350
49 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
50 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
51 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
52 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
53 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
54 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
55 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
57 #define BIOS_SCRATCH_4 0x5cd
59 MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
60 MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
61 MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
62 MODULE_FIRMWARE("amdgpu/verde_smc.bin");
63 MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
64 MODULE_FIRMWARE("amdgpu/oland_smc.bin");
65 MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
66 MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
67 MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
68 MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
70 static const struct amd_pm_funcs si_dpm_funcs
;
73 struct _ATOM_POWERPLAY_INFO info
;
74 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
75 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
76 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
77 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
78 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
79 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4
;
80 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5
;
84 struct _ATOM_PPLIB_FANTABLE fan
;
85 struct _ATOM_PPLIB_FANTABLE2 fan2
;
86 struct _ATOM_PPLIB_FANTABLE3 fan3
;
89 union pplib_clock_info
{
90 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
91 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
92 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
93 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
94 struct _ATOM_PPLIB_SI_CLOCK_INFO si
;
97 static const u32 r600_utc
[R600_PM_NUMBER_OF_TC
] =
116 static const u32 r600_dtc
[R600_PM_NUMBER_OF_TC
] =
135 static const struct si_cac_config_reg cac_weights_tahiti
[] =
137 { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND
},
138 { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
139 { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND
},
140 { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND
},
141 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
142 { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
143 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
144 { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
145 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
146 { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND
},
147 { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
148 { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND
},
149 { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND
},
150 { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND
},
151 { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND
},
152 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
153 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
154 { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND
},
155 { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
156 { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND
},
157 { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND
},
158 { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND
},
159 { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
160 { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
161 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
162 { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
163 { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
164 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
165 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
166 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
167 { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND
},
168 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
169 { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
170 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
171 { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
172 { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
173 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
174 { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
175 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
176 { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND
},
177 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
178 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
179 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
180 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
181 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
182 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
183 { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
184 { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
185 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
186 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
187 { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
188 { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
189 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
190 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
191 { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
192 { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
193 { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
194 { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
195 { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
196 { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND
},
200 static const struct si_cac_config_reg lcac_tahiti
[] =
202 { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
203 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
204 { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
205 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
206 { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
207 { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
208 { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
209 { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
210 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
211 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
212 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
213 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
214 { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
215 { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
216 { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
217 { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
218 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
219 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
220 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
221 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
222 { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
223 { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
224 { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
225 { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
226 { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
227 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
228 { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
229 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
230 { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
231 { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
232 { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
233 { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
234 { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
235 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
236 { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
237 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
238 { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
239 { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
240 { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
241 { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
242 { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
243 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
244 { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
245 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
246 { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
247 { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
248 { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
249 { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
250 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
251 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
252 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
253 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
254 { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
255 { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
256 { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
257 { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
258 { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
259 { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
260 { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
261 { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
262 { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
263 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
264 { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
265 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
266 { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
267 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
268 { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
269 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
270 { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
271 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
272 { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
273 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
274 { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
275 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
276 { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
277 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
278 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
279 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
280 { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
281 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
282 { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
283 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
284 { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
285 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
286 { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
287 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
292 static const struct si_cac_config_reg cac_override_tahiti
[] =
297 static const struct si_powertune_data powertune_data_tahiti
=
328 static const struct si_dte_data dte_data_tahiti
=
330 { 1159409, 0, 0, 0, 0 },
339 { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
340 { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
341 { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
347 static const struct si_dte_data dte_data_tahiti_le
=
349 { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
350 { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
358 { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
359 { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
360 { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
366 static const struct si_dte_data dte_data_tahiti_pro
=
368 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
369 { 0x0, 0x0, 0x0, 0x0, 0x0 },
377 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
378 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
379 { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
384 static const struct si_dte_data dte_data_new_zealand
=
386 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
387 { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
395 { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
396 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
397 { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
402 static const struct si_dte_data dte_data_aruba_pro
=
404 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
405 { 0x0, 0x0, 0x0, 0x0, 0x0 },
413 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
414 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
415 { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
420 static const struct si_dte_data dte_data_malta
=
422 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
423 { 0x0, 0x0, 0x0, 0x0, 0x0 },
431 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
432 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
433 { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
438 static const struct si_cac_config_reg cac_weights_pitcairn
[] =
440 { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND
},
441 { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
442 { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
443 { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND
},
444 { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND
},
445 { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
446 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
447 { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
448 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
449 { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND
},
450 { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND
},
451 { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND
},
452 { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND
},
453 { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND
},
454 { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
455 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
456 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
457 { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND
},
458 { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND
},
459 { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND
},
460 { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND
},
461 { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND
},
462 { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND
},
463 { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
464 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
465 { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND
},
466 { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND
},
467 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
468 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
469 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
470 { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND
},
471 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
472 { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND
},
473 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
474 { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND
},
475 { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND
},
476 { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND
},
477 { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
478 { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND
},
479 { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
480 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
481 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
482 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
483 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
484 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
485 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
486 { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
487 { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
488 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
489 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
490 { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
491 { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
492 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
493 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
494 { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
495 { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
496 { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
497 { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
498 { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
499 { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND
},
503 static const struct si_cac_config_reg lcac_pitcairn
[] =
505 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
506 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
507 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
508 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
509 { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
510 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
511 { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
512 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
513 { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
514 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
515 { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
516 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
517 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
518 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
519 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
520 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
521 { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
522 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
523 { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
524 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
525 { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
526 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
527 { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
528 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
529 { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
530 { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
531 { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
532 { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
533 { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
534 { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
535 { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
536 { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
537 { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
538 { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
539 { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
540 { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
541 { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
542 { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
543 { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
544 { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
545 { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
546 { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
547 { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
548 { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
549 { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
550 { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
551 { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
552 { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
553 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
554 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
555 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
556 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
557 { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
558 { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
559 { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
560 { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
561 { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
562 { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
563 { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
564 { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
565 { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
566 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
567 { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
568 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
569 { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
570 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
571 { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
572 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
573 { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
574 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
575 { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
576 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
577 { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
578 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
579 { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
580 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
581 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
582 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
583 { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
584 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
585 { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
586 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
587 { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
588 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
589 { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
590 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
594 static const struct si_cac_config_reg cac_override_pitcairn
[] =
599 static const struct si_powertune_data powertune_data_pitcairn
=
630 static const struct si_dte_data dte_data_pitcairn
=
641 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
642 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
643 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
648 static const struct si_dte_data dte_data_curacao_xt
=
650 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
651 { 0x0, 0x0, 0x0, 0x0, 0x0 },
659 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
660 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
661 { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
666 static const struct si_dte_data dte_data_curacao_pro
=
668 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
669 { 0x0, 0x0, 0x0, 0x0, 0x0 },
677 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
678 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
679 { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
684 static const struct si_dte_data dte_data_neptune_xt
=
686 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
687 { 0x0, 0x0, 0x0, 0x0, 0x0 },
695 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
696 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
697 { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
702 static const struct si_cac_config_reg cac_weights_chelsea_pro
[] =
704 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
705 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
706 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
707 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
708 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
709 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
710 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
711 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
712 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
713 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
714 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
715 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
716 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
717 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
718 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
719 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
720 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
721 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
722 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
723 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
724 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
725 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
726 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
727 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
728 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
729 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
730 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
731 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
732 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
733 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
734 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
735 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
736 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
737 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
738 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
739 { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND
},
740 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
741 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
742 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
743 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
744 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
745 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
746 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
747 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
748 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
749 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
750 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
751 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
752 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
753 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
754 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
755 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
756 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
757 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
758 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
759 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
760 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
761 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
762 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
763 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
767 static const struct si_cac_config_reg cac_weights_chelsea_xt
[] =
769 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
770 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
771 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
772 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
773 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
774 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
775 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
776 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
777 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
778 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
779 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
780 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
781 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
782 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
783 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
784 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
785 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
786 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
787 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
788 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
789 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
790 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
791 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
792 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
793 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
794 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
795 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
796 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
797 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
798 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
799 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
800 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
801 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
802 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
803 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
804 { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND
},
805 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
806 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
807 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
808 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
809 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
810 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
811 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
812 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
813 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
814 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
815 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
816 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
817 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
818 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
819 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
820 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
821 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
822 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
823 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
824 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
825 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
826 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
827 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
828 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
832 static const struct si_cac_config_reg cac_weights_heathrow
[] =
834 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
835 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
836 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
837 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
838 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
839 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
840 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
841 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
842 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
843 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
844 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
845 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
846 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
847 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
848 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
849 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
850 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
851 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
852 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
853 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
854 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
855 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
856 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
857 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
858 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
859 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
860 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
861 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
862 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
863 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
864 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
865 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
866 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
867 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
868 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
869 { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND
},
870 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
871 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
872 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
873 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
874 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
875 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
876 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
877 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
878 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
879 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
880 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
881 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
882 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
883 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
884 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
885 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
886 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
887 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
888 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
889 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
890 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
891 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
892 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
893 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
897 static const struct si_cac_config_reg cac_weights_cape_verde_pro
[] =
899 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
900 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
901 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
902 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
903 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
904 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
905 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
906 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
907 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
908 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
909 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
910 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
911 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
912 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
913 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
914 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
915 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
916 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
917 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
918 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
919 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
920 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
921 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
922 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
923 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
924 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
925 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
926 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
927 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
928 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
929 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
930 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
931 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
932 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
933 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
934 { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND
},
935 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
936 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
937 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
938 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
939 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
940 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
941 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
942 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
943 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
944 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
945 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
946 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
947 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
948 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
949 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
950 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
951 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
952 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
953 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
954 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
955 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
956 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
957 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
958 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
962 static const struct si_cac_config_reg cac_weights_cape_verde
[] =
964 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
965 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
966 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
967 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
968 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
969 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
970 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
971 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
972 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
973 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
974 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
975 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
976 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
977 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
978 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
979 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
980 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
981 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
982 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
983 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
984 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
985 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
986 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
987 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
988 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
989 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
990 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
991 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
992 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
993 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
994 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
995 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
996 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
997 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
998 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
999 { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND
},
1000 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1001 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1002 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1003 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
1004 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
1005 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1006 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1007 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1008 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1009 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1010 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1011 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1012 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1013 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1014 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1015 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1016 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1017 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1018 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1019 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1020 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1021 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1022 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1023 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
1027 static const struct si_cac_config_reg lcac_cape_verde
[] =
1029 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1030 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1031 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1032 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1033 { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1034 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1035 { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1036 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1037 { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1038 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1039 { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1040 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1041 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1042 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1043 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1044 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1045 { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1046 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1047 { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1048 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1049 { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1050 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1051 { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1052 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1053 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1054 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1055 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1056 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1057 { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1058 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1059 { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1060 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1061 { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1062 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1063 { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1064 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1065 { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1066 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1067 { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1068 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1069 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1070 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1071 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1072 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1073 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1074 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1075 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1076 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1077 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1078 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1079 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1080 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1081 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1082 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1086 static const struct si_cac_config_reg cac_override_cape_verde
[] =
1091 static const struct si_powertune_data powertune_data_cape_verde
=
1093 ((1 << 16) | 0x6993),
1122 static const struct si_dte_data dte_data_cape_verde
=
1133 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1134 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1135 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1140 static const struct si_dte_data dte_data_venus_xtx
=
1142 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1143 { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
1151 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1152 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1153 { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1158 static const struct si_dte_data dte_data_venus_xt
=
1160 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1161 { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
1169 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1170 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1171 { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1176 static const struct si_dte_data dte_data_venus_pro
=
1178 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1179 { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
1187 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1188 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1189 { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1194 static const struct si_cac_config_reg cac_weights_oland
[] =
1196 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
1197 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
1198 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
1199 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
1200 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1201 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
1202 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
1203 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
1204 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
1205 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
1206 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
1207 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
1208 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
1209 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
1210 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
1211 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
1212 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
1213 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
1214 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
1215 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
1216 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
1217 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
1218 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
1219 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
1220 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
1221 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1222 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1223 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1224 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1225 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
1226 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1227 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
1228 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
1229 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
1230 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1231 { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND
},
1232 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1233 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1234 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1235 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
1236 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
1237 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1238 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1239 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1240 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1241 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1242 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1243 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1244 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1245 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1246 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1247 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1248 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1249 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1250 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1251 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1252 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1253 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1254 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1255 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
1259 static const struct si_cac_config_reg cac_weights_mars_pro
[] =
1261 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1262 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1263 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1264 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1265 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1266 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1267 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1268 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1269 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1270 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1271 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1272 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1273 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1274 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1275 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1276 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1277 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1278 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1279 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1280 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1281 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1282 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1283 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1284 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1285 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1286 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1287 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1288 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1289 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1290 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1291 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1292 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1293 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1294 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1295 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1296 { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND
},
1297 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1298 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1299 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1300 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1301 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1302 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1303 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1304 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1305 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1306 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1307 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1308 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1309 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1310 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1311 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1312 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1313 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1314 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1315 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1316 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1317 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1318 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1319 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1320 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1324 static const struct si_cac_config_reg cac_weights_mars_xt
[] =
1326 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1327 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1328 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1329 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1330 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1331 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1332 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1333 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1334 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1335 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1336 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1337 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1338 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1339 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1340 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1341 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1342 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1343 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1344 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1345 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1346 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1347 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1348 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1349 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1350 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1351 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1352 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1353 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1354 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1355 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1356 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1357 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1358 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1359 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1360 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1361 { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND
},
1362 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1363 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1364 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1365 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1366 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1367 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1368 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1369 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1370 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1371 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1372 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1373 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1374 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1375 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1376 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1377 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1378 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1379 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1380 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1381 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1382 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1383 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1384 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1385 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1389 static const struct si_cac_config_reg cac_weights_oland_pro
[] =
1391 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1392 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1393 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1394 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1395 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1396 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1397 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1398 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1399 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1400 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1401 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1402 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1403 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1404 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1405 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1406 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1407 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1408 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1409 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1410 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1411 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1412 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1413 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1414 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1415 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1416 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1417 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1418 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1419 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1420 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1421 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1422 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1423 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1424 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1425 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1426 { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND
},
1427 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1428 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1429 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1430 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1431 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1432 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1433 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1434 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1435 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1436 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1437 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1438 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1439 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1440 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1441 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1442 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1443 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1444 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1445 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1446 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1447 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1448 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1449 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1450 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1454 static const struct si_cac_config_reg cac_weights_oland_xt
[] =
1456 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1457 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1458 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1459 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1460 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1461 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1462 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1463 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1464 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1465 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1466 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1467 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1468 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1469 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1470 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1471 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1472 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1473 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1474 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1475 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1476 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1477 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1478 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1479 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1480 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1481 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1482 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1483 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1484 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1485 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1486 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1487 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1488 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1489 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1490 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1491 { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND
},
1492 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1493 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1494 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1495 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1496 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1497 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1498 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1499 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1500 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1501 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1502 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1503 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1504 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1505 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1506 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1507 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1508 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1509 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1510 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1511 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1512 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1513 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1514 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1515 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1519 static const struct si_cac_config_reg lcac_oland
[] =
1521 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1522 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1523 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1524 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1525 { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1526 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1527 { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1528 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1529 { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1530 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1531 { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
1532 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1533 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1534 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1535 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1536 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1537 { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1538 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1539 { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1540 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1541 { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1542 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1543 { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1544 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1545 { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1546 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1547 { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1548 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1549 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1550 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1551 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1552 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1553 { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1554 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1555 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1556 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1557 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1558 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1559 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1560 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1561 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1562 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1566 static const struct si_cac_config_reg lcac_mars_pro
[] =
1568 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1569 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1570 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1571 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1572 { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1573 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1574 { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1575 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1576 { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1577 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1578 { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1579 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1580 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1581 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1582 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1583 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1584 { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1585 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1586 { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1587 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1588 { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1589 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1590 { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1591 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1592 { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1593 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1594 { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1595 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1596 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1597 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1598 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1599 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1600 { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1601 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1602 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1603 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1604 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1605 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1606 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1607 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1608 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1609 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1613 static const struct si_cac_config_reg cac_override_oland
[] =
1618 static const struct si_powertune_data powertune_data_oland
=
1620 ((1 << 16) | 0x6993),
1649 static const struct si_powertune_data powertune_data_mars_pro
=
1651 ((1 << 16) | 0x6993),
1680 static const struct si_dte_data dte_data_oland
=
1691 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1692 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1693 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1698 static const struct si_dte_data dte_data_mars_pro
=
1700 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1701 { 0x0, 0x0, 0x0, 0x0, 0x0 },
1709 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1710 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1711 { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1716 static const struct si_dte_data dte_data_sun_xt
=
1718 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1719 { 0x0, 0x0, 0x0, 0x0, 0x0 },
1727 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1728 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1729 { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1735 static const struct si_cac_config_reg cac_weights_hainan
[] =
1737 { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND
},
1738 { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND
},
1739 { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND
},
1740 { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND
},
1741 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1742 { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND
},
1743 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1744 { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1745 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1746 { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND
},
1747 { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND
},
1748 { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND
},
1749 { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND
},
1750 { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1751 { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND
},
1752 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1753 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1754 { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND
},
1755 { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND
},
1756 { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND
},
1757 { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND
},
1758 { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND
},
1759 { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND
},
1760 { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND
},
1761 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1762 { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND
},
1763 { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND
},
1764 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1765 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1766 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1767 { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND
},
1768 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1769 { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1770 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1771 { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND
},
1772 { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND
},
1773 { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND
},
1774 { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1775 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1776 { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND
},
1777 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1778 { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND
},
1779 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1780 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1781 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1782 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1783 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1784 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1785 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1786 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1787 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1788 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1789 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1790 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1791 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1792 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1793 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1794 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1795 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1796 { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND
},
1800 static const struct si_powertune_data powertune_data_hainan
=
1802 ((1 << 16) | 0x6993),
1831 static struct rv7xx_power_info
*rv770_get_pi(struct amdgpu_device
*adev
);
1832 static struct evergreen_power_info
*evergreen_get_pi(struct amdgpu_device
*adev
);
1833 static struct ni_power_info
*ni_get_pi(struct amdgpu_device
*adev
);
1834 static struct si_ps
*si_get_ps(struct amdgpu_ps
*rps
);
1836 static int si_populate_voltage_value(struct amdgpu_device
*adev
,
1837 const struct atom_voltage_table
*table
,
1838 u16 value
, SISLANDS_SMC_VOLTAGE_VALUE
*voltage
);
1839 static int si_get_std_voltage_value(struct amdgpu_device
*adev
,
1840 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
,
1842 static int si_write_smc_soft_register(struct amdgpu_device
*adev
,
1843 u16 reg_offset
, u32 value
);
1844 static int si_convert_power_level_to_smc(struct amdgpu_device
*adev
,
1845 struct rv7xx_pl
*pl
,
1846 SISLANDS_SMC_HW_PERFORMANCE_LEVEL
*level
);
1847 static int si_calculate_sclk_params(struct amdgpu_device
*adev
,
1849 SISLANDS_SMC_SCLK_VALUE
*sclk
);
1851 static void si_thermal_start_smc_fan_control(struct amdgpu_device
*adev
);
1852 static void si_fan_ctrl_set_default_mode(struct amdgpu_device
*adev
);
1853 static void si_dpm_set_irq_funcs(struct amdgpu_device
*adev
);
1855 static struct si_power_info
*si_get_pi(struct amdgpu_device
*adev
)
1857 struct si_power_info
*pi
= adev
->pm
.dpm
.priv
;
1861 static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients
*coeff
,
1862 u16 v
, s32 t
, u32 ileakage
, u32
*leakage
)
1864 s64 kt
, kv
, leakage_w
, i_leakage
, vddc
;
1865 s64 temperature
, t_slope
, t_intercept
, av
, bv
, t_ref
;
1868 i_leakage
= div64_s64(drm_int2fixp(ileakage
), 100);
1869 vddc
= div64_s64(drm_int2fixp(v
), 1000);
1870 temperature
= div64_s64(drm_int2fixp(t
), 1000);
1872 t_slope
= div64_s64(drm_int2fixp(coeff
->t_slope
), 100000000);
1873 t_intercept
= div64_s64(drm_int2fixp(coeff
->t_intercept
), 100000000);
1874 av
= div64_s64(drm_int2fixp(coeff
->av
), 100000000);
1875 bv
= div64_s64(drm_int2fixp(coeff
->bv
), 100000000);
1876 t_ref
= drm_int2fixp(coeff
->t_ref
);
1878 tmp
= drm_fixp_mul(t_slope
, vddc
) + t_intercept
;
1879 kt
= drm_fixp_exp(drm_fixp_mul(tmp
, temperature
));
1880 kt
= drm_fixp_div(kt
, drm_fixp_exp(drm_fixp_mul(tmp
, t_ref
)));
1881 kv
= drm_fixp_mul(av
, drm_fixp_exp(drm_fixp_mul(bv
, vddc
)));
1883 leakage_w
= drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage
, kt
), kv
), vddc
);
1885 *leakage
= drm_fixp2int(leakage_w
* 1000);
1888 static void si_calculate_leakage_for_v_and_t(struct amdgpu_device
*adev
,
1889 const struct ni_leakage_coeffients
*coeff
,
1895 si_calculate_leakage_for_v_and_t_formula(coeff
, v
, t
, i_leakage
, leakage
);
1898 static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients
*coeff
,
1899 const u32 fixed_kt
, u16 v
,
1900 u32 ileakage
, u32
*leakage
)
1902 s64 kt
, kv
, leakage_w
, i_leakage
, vddc
;
1904 i_leakage
= div64_s64(drm_int2fixp(ileakage
), 100);
1905 vddc
= div64_s64(drm_int2fixp(v
), 1000);
1907 kt
= div64_s64(drm_int2fixp(fixed_kt
), 100000000);
1908 kv
= drm_fixp_mul(div64_s64(drm_int2fixp(coeff
->av
), 100000000),
1909 drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff
->bv
), 100000000), vddc
)));
1911 leakage_w
= drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage
, kt
), kv
), vddc
);
1913 *leakage
= drm_fixp2int(leakage_w
* 1000);
1916 static void si_calculate_leakage_for_v(struct amdgpu_device
*adev
,
1917 const struct ni_leakage_coeffients
*coeff
,
1923 si_calculate_leakage_for_v_formula(coeff
, fixed_kt
, v
, i_leakage
, leakage
);
1927 static void si_update_dte_from_pl2(struct amdgpu_device
*adev
,
1928 struct si_dte_data
*dte_data
)
1930 u32 p_limit1
= adev
->pm
.dpm
.tdp_limit
;
1931 u32 p_limit2
= adev
->pm
.dpm
.near_tdp_limit
;
1932 u32 k
= dte_data
->k
;
1933 u32 t_max
= dte_data
->max_t
;
1934 u32 t_split
[5] = { 10, 15, 20, 25, 30 };
1935 u32 t_0
= dte_data
->t0
;
1938 if (p_limit2
!= 0 && p_limit2
<= p_limit1
) {
1939 dte_data
->tdep_count
= 3;
1941 for (i
= 0; i
< k
; i
++) {
1943 (t_split
[i
] * (t_max
- t_0
/(u32
)1000) * (1 << 14)) /
1944 (p_limit2
* (u32
)100);
1947 dte_data
->tdep_r
[1] = dte_data
->r
[4] * 2;
1949 for (i
= 2; i
< SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE
; i
++) {
1950 dte_data
->tdep_r
[i
] = dte_data
->r
[4];
1953 DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
1957 static struct rv7xx_power_info
*rv770_get_pi(struct amdgpu_device
*adev
)
1959 struct rv7xx_power_info
*pi
= adev
->pm
.dpm
.priv
;
1964 static struct ni_power_info
*ni_get_pi(struct amdgpu_device
*adev
)
1966 struct ni_power_info
*pi
= adev
->pm
.dpm
.priv
;
1971 static struct si_ps
*si_get_ps(struct amdgpu_ps
*aps
)
1973 struct si_ps
*ps
= aps
->ps_priv
;
1978 static void si_initialize_powertune_defaults(struct amdgpu_device
*adev
)
1980 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
1981 struct si_power_info
*si_pi
= si_get_pi(adev
);
1982 bool update_dte_from_pl2
= false;
1984 if (adev
->asic_type
== CHIP_TAHITI
) {
1985 si_pi
->cac_weights
= cac_weights_tahiti
;
1986 si_pi
->lcac_config
= lcac_tahiti
;
1987 si_pi
->cac_override
= cac_override_tahiti
;
1988 si_pi
->powertune_data
= &powertune_data_tahiti
;
1989 si_pi
->dte_data
= dte_data_tahiti
;
1991 switch (adev
->pdev
->device
) {
1993 si_pi
->dte_data
.enable_dte_by_default
= true;
1996 si_pi
->dte_data
= dte_data_new_zealand
;
2002 si_pi
->dte_data
= dte_data_aruba_pro
;
2003 update_dte_from_pl2
= true;
2006 si_pi
->dte_data
= dte_data_malta
;
2007 update_dte_from_pl2
= true;
2010 si_pi
->dte_data
= dte_data_tahiti_pro
;
2011 update_dte_from_pl2
= true;
2014 if (si_pi
->dte_data
.enable_dte_by_default
== true)
2015 DRM_ERROR("DTE is not enabled!\n");
2018 } else if (adev
->asic_type
== CHIP_PITCAIRN
) {
2019 si_pi
->cac_weights
= cac_weights_pitcairn
;
2020 si_pi
->lcac_config
= lcac_pitcairn
;
2021 si_pi
->cac_override
= cac_override_pitcairn
;
2022 si_pi
->powertune_data
= &powertune_data_pitcairn
;
2024 switch (adev
->pdev
->device
) {
2027 si_pi
->dte_data
= dte_data_curacao_xt
;
2028 update_dte_from_pl2
= true;
2032 si_pi
->dte_data
= dte_data_curacao_pro
;
2033 update_dte_from_pl2
= true;
2037 si_pi
->dte_data
= dte_data_neptune_xt
;
2038 update_dte_from_pl2
= true;
2041 si_pi
->dte_data
= dte_data_pitcairn
;
2044 } else if (adev
->asic_type
== CHIP_VERDE
) {
2045 si_pi
->lcac_config
= lcac_cape_verde
;
2046 si_pi
->cac_override
= cac_override_cape_verde
;
2047 si_pi
->powertune_data
= &powertune_data_cape_verde
;
2049 switch (adev
->pdev
->device
) {
2054 si_pi
->cac_weights
= cac_weights_cape_verde_pro
;
2055 si_pi
->dte_data
= dte_data_cape_verde
;
2058 si_pi
->cac_weights
= cac_weights_cape_verde_pro
;
2059 si_pi
->dte_data
= dte_data_sun_xt
;
2060 update_dte_from_pl2
= true;
2064 si_pi
->cac_weights
= cac_weights_heathrow
;
2065 si_pi
->dte_data
= dte_data_cape_verde
;
2069 si_pi
->cac_weights
= cac_weights_chelsea_xt
;
2070 si_pi
->dte_data
= dte_data_cape_verde
;
2073 si_pi
->cac_weights
= cac_weights_chelsea_pro
;
2074 si_pi
->dte_data
= dte_data_cape_verde
;
2077 si_pi
->cac_weights
= cac_weights_heathrow
;
2078 si_pi
->dte_data
= dte_data_venus_xtx
;
2081 si_pi
->cac_weights
= cac_weights_heathrow
;
2082 si_pi
->dte_data
= dte_data_venus_xt
;
2088 si_pi
->cac_weights
= cac_weights_chelsea_pro
;
2089 si_pi
->dte_data
= dte_data_venus_pro
;
2092 si_pi
->cac_weights
= cac_weights_cape_verde
;
2093 si_pi
->dte_data
= dte_data_cape_verde
;
2096 } else if (adev
->asic_type
== CHIP_OLAND
) {
2097 si_pi
->lcac_config
= lcac_mars_pro
;
2098 si_pi
->cac_override
= cac_override_oland
;
2099 si_pi
->powertune_data
= &powertune_data_mars_pro
;
2100 si_pi
->dte_data
= dte_data_mars_pro
;
2102 switch (adev
->pdev
->device
) {
2107 si_pi
->cac_weights
= cac_weights_mars_pro
;
2108 update_dte_from_pl2
= true;
2114 si_pi
->cac_weights
= cac_weights_mars_xt
;
2115 update_dte_from_pl2
= true;
2120 si_pi
->cac_weights
= cac_weights_oland_pro
;
2121 update_dte_from_pl2
= true;
2124 si_pi
->cac_weights
= cac_weights_oland_xt
;
2125 update_dte_from_pl2
= true;
2128 si_pi
->cac_weights
= cac_weights_oland
;
2129 si_pi
->lcac_config
= lcac_oland
;
2130 si_pi
->cac_override
= cac_override_oland
;
2131 si_pi
->powertune_data
= &powertune_data_oland
;
2132 si_pi
->dte_data
= dte_data_oland
;
2135 } else if (adev
->asic_type
== CHIP_HAINAN
) {
2136 si_pi
->cac_weights
= cac_weights_hainan
;
2137 si_pi
->lcac_config
= lcac_oland
;
2138 si_pi
->cac_override
= cac_override_oland
;
2139 si_pi
->powertune_data
= &powertune_data_hainan
;
2140 si_pi
->dte_data
= dte_data_sun_xt
;
2141 update_dte_from_pl2
= true;
2143 DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
2147 ni_pi
->enable_power_containment
= false;
2148 ni_pi
->enable_cac
= false;
2149 ni_pi
->enable_sq_ramping
= false;
2150 si_pi
->enable_dte
= false;
2152 if (si_pi
->powertune_data
->enable_powertune_by_default
) {
2153 ni_pi
->enable_power_containment
= true;
2154 ni_pi
->enable_cac
= true;
2155 if (si_pi
->dte_data
.enable_dte_by_default
) {
2156 si_pi
->enable_dte
= true;
2157 if (update_dte_from_pl2
)
2158 si_update_dte_from_pl2(adev
, &si_pi
->dte_data
);
2161 ni_pi
->enable_sq_ramping
= true;
2164 ni_pi
->driver_calculate_cac_leakage
= true;
2165 ni_pi
->cac_configuration_required
= true;
2167 if (ni_pi
->cac_configuration_required
) {
2168 ni_pi
->support_cac_long_term_average
= true;
2169 si_pi
->dyn_powertune_data
.l2_lta_window_size
=
2170 si_pi
->powertune_data
->l2_lta_window_size_default
;
2171 si_pi
->dyn_powertune_data
.lts_truncate
=
2172 si_pi
->powertune_data
->lts_truncate_default
;
2174 ni_pi
->support_cac_long_term_average
= false;
2175 si_pi
->dyn_powertune_data
.l2_lta_window_size
= 0;
2176 si_pi
->dyn_powertune_data
.lts_truncate
= 0;
2179 si_pi
->dyn_powertune_data
.disable_uvd_powertune
= false;
2182 static u32
si_get_smc_power_scaling_factor(struct amdgpu_device
*adev
)
2187 static u32
si_calculate_cac_wintime(struct amdgpu_device
*adev
)
2192 u32 cac_window_size
;
2194 xclk
= amdgpu_asic_get_xclk(adev
);
2199 cac_window
= RREG32(CG_CAC_CTRL
) & CAC_WINDOW_MASK
;
2200 cac_window_size
= ((cac_window
& 0xFFFF0000) >> 16) * (cac_window
& 0x0000FFFF);
2202 wintime
= (cac_window_size
* 100) / xclk
;
2207 static u32
si_scale_power_for_smc(u32 power_in_watts
, u32 scaling_factor
)
2209 return power_in_watts
;
2212 static int si_calculate_adjusted_tdp_limits(struct amdgpu_device
*adev
,
2213 bool adjust_polarity
,
2216 u32
*near_tdp_limit
)
2218 u32 adjustment_delta
, max_tdp_limit
;
2220 if (tdp_adjustment
> (u32
)adev
->pm
.dpm
.tdp_od_limit
)
2223 max_tdp_limit
= ((100 + 100) * adev
->pm
.dpm
.tdp_limit
) / 100;
2225 if (adjust_polarity
) {
2226 *tdp_limit
= ((100 + tdp_adjustment
) * adev
->pm
.dpm
.tdp_limit
) / 100;
2227 *near_tdp_limit
= adev
->pm
.dpm
.near_tdp_limit_adjusted
+ (*tdp_limit
- adev
->pm
.dpm
.tdp_limit
);
2229 *tdp_limit
= ((100 - tdp_adjustment
) * adev
->pm
.dpm
.tdp_limit
) / 100;
2230 adjustment_delta
= adev
->pm
.dpm
.tdp_limit
- *tdp_limit
;
2231 if (adjustment_delta
< adev
->pm
.dpm
.near_tdp_limit_adjusted
)
2232 *near_tdp_limit
= adev
->pm
.dpm
.near_tdp_limit_adjusted
- adjustment_delta
;
2234 *near_tdp_limit
= 0;
2237 if ((*tdp_limit
<= 0) || (*tdp_limit
> max_tdp_limit
))
2239 if ((*near_tdp_limit
<= 0) || (*near_tdp_limit
> *tdp_limit
))
2245 static int si_populate_smc_tdp_limits(struct amdgpu_device
*adev
,
2246 struct amdgpu_ps
*amdgpu_state
)
2248 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2249 struct si_power_info
*si_pi
= si_get_pi(adev
);
2251 if (ni_pi
->enable_power_containment
) {
2252 SISLANDS_SMC_STATETABLE
*smc_table
= &si_pi
->smc_statetable
;
2253 PP_SIslands_PAPMParameters
*papm_parm
;
2254 struct amdgpu_ppm_table
*ppm
= adev
->pm
.dpm
.dyn_state
.ppm_table
;
2255 u32 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2260 if (scaling_factor
== 0)
2263 memset(smc_table
, 0, sizeof(SISLANDS_SMC_STATETABLE
));
2265 ret
= si_calculate_adjusted_tdp_limits(adev
,
2267 adev
->pm
.dpm
.tdp_adjustment
,
2273 smc_table
->dpm2Params
.TDPLimit
=
2274 cpu_to_be32(si_scale_power_for_smc(tdp_limit
, scaling_factor
) * 1000);
2275 smc_table
->dpm2Params
.NearTDPLimit
=
2276 cpu_to_be32(si_scale_power_for_smc(near_tdp_limit
, scaling_factor
) * 1000);
2277 smc_table
->dpm2Params
.SafePowerLimit
=
2278 cpu_to_be32(si_scale_power_for_smc((near_tdp_limit
* SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT
) / 100, scaling_factor
) * 1000);
2280 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
2281 (si_pi
->state_table_start
+ offsetof(SISLANDS_SMC_STATETABLE
, dpm2Params
) +
2282 offsetof(PP_SIslands_DPM2Parameters
, TDPLimit
)),
2283 (u8
*)(&(smc_table
->dpm2Params
.TDPLimit
)),
2289 if (si_pi
->enable_ppm
) {
2290 papm_parm
= &si_pi
->papm_parm
;
2291 memset(papm_parm
, 0, sizeof(PP_SIslands_PAPMParameters
));
2292 papm_parm
->NearTDPLimitTherm
= cpu_to_be32(ppm
->dgpu_tdp
);
2293 papm_parm
->dGPU_T_Limit
= cpu_to_be32(ppm
->tj_max
);
2294 papm_parm
->dGPU_T_Warning
= cpu_to_be32(95);
2295 papm_parm
->dGPU_T_Hysteresis
= cpu_to_be32(5);
2296 papm_parm
->PlatformPowerLimit
= 0xffffffff;
2297 papm_parm
->NearTDPLimitPAPM
= 0xffffffff;
2299 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->papm_cfg_table_start
,
2301 sizeof(PP_SIslands_PAPMParameters
),
2310 static int si_populate_smc_tdp_limits_2(struct amdgpu_device
*adev
,
2311 struct amdgpu_ps
*amdgpu_state
)
2313 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2314 struct si_power_info
*si_pi
= si_get_pi(adev
);
2316 if (ni_pi
->enable_power_containment
) {
2317 SISLANDS_SMC_STATETABLE
*smc_table
= &si_pi
->smc_statetable
;
2318 u32 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2321 memset(smc_table
, 0, sizeof(SISLANDS_SMC_STATETABLE
));
2323 smc_table
->dpm2Params
.NearTDPLimit
=
2324 cpu_to_be32(si_scale_power_for_smc(adev
->pm
.dpm
.near_tdp_limit_adjusted
, scaling_factor
) * 1000);
2325 smc_table
->dpm2Params
.SafePowerLimit
=
2326 cpu_to_be32(si_scale_power_for_smc((adev
->pm
.dpm
.near_tdp_limit_adjusted
* SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT
) / 100, scaling_factor
) * 1000);
2328 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
2329 (si_pi
->state_table_start
+
2330 offsetof(SISLANDS_SMC_STATETABLE
, dpm2Params
) +
2331 offsetof(PP_SIslands_DPM2Parameters
, NearTDPLimit
)),
2332 (u8
*)(&(smc_table
->dpm2Params
.NearTDPLimit
)),
2342 static u16
si_calculate_power_efficiency_ratio(struct amdgpu_device
*adev
,
2343 const u16 prev_std_vddc
,
2344 const u16 curr_std_vddc
)
2346 u64 margin
= (u64
)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN
;
2347 u64 prev_vddc
= (u64
)prev_std_vddc
;
2348 u64 curr_vddc
= (u64
)curr_std_vddc
;
2349 u64 pwr_efficiency_ratio
, n
, d
;
2351 if ((prev_vddc
== 0) || (curr_vddc
== 0))
2354 n
= div64_u64((u64
)1024 * curr_vddc
* curr_vddc
* ((u64
)1000 + margin
), (u64
)1000);
2355 d
= prev_vddc
* prev_vddc
;
2356 pwr_efficiency_ratio
= div64_u64(n
, d
);
2358 if (pwr_efficiency_ratio
> (u64
)0xFFFF)
2361 return (u16
)pwr_efficiency_ratio
;
2364 static bool si_should_disable_uvd_powertune(struct amdgpu_device
*adev
,
2365 struct amdgpu_ps
*amdgpu_state
)
2367 struct si_power_info
*si_pi
= si_get_pi(adev
);
2369 if (si_pi
->dyn_powertune_data
.disable_uvd_powertune
&&
2370 amdgpu_state
->vclk
&& amdgpu_state
->dclk
)
2376 struct evergreen_power_info
*evergreen_get_pi(struct amdgpu_device
*adev
)
2378 struct evergreen_power_info
*pi
= adev
->pm
.dpm
.priv
;
2383 static int si_populate_power_containment_values(struct amdgpu_device
*adev
,
2384 struct amdgpu_ps
*amdgpu_state
,
2385 SISLANDS_SMC_SWSTATE
*smc_state
)
2387 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
2388 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2389 struct si_ps
*state
= si_get_ps(amdgpu_state
);
2390 SISLANDS_SMC_VOLTAGE_VALUE vddc
;
2397 u16 pwr_efficiency_ratio
;
2399 bool disable_uvd_power_tune
;
2402 if (ni_pi
->enable_power_containment
== false)
2405 if (state
->performance_level_count
== 0)
2408 if (smc_state
->levelCount
!= state
->performance_level_count
)
2411 disable_uvd_power_tune
= si_should_disable_uvd_powertune(adev
, amdgpu_state
);
2413 smc_state
->levels
[0].dpm2
.MaxPS
= 0;
2414 smc_state
->levels
[0].dpm2
.NearTDPDec
= 0;
2415 smc_state
->levels
[0].dpm2
.AboveSafeInc
= 0;
2416 smc_state
->levels
[0].dpm2
.BelowSafeInc
= 0;
2417 smc_state
->levels
[0].dpm2
.PwrEfficiencyRatio
= 0;
2419 for (i
= 1; i
< state
->performance_level_count
; i
++) {
2420 prev_sclk
= state
->performance_levels
[i
-1].sclk
;
2421 max_sclk
= state
->performance_levels
[i
].sclk
;
2423 max_ps_percent
= SISLANDS_DPM2_MAXPS_PERCENT_M
;
2425 max_ps_percent
= SISLANDS_DPM2_MAXPS_PERCENT_H
;
2427 if (prev_sclk
> max_sclk
)
2430 if ((max_ps_percent
== 0) ||
2431 (prev_sclk
== max_sclk
) ||
2432 disable_uvd_power_tune
)
2433 min_sclk
= max_sclk
;
2435 min_sclk
= prev_sclk
;
2437 min_sclk
= (prev_sclk
* (u32
)max_ps_percent
) / 100;
2439 if (min_sclk
< state
->performance_levels
[0].sclk
)
2440 min_sclk
= state
->performance_levels
[0].sclk
;
2445 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
2446 state
->performance_levels
[i
-1].vddc
, &vddc
);
2450 ret
= si_get_std_voltage_value(adev
, &vddc
, &prev_std_vddc
);
2454 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
2455 state
->performance_levels
[i
].vddc
, &vddc
);
2459 ret
= si_get_std_voltage_value(adev
, &vddc
, &curr_std_vddc
);
2463 pwr_efficiency_ratio
= si_calculate_power_efficiency_ratio(adev
,
2464 prev_std_vddc
, curr_std_vddc
);
2466 smc_state
->levels
[i
].dpm2
.MaxPS
= (u8
)((SISLANDS_DPM2_MAX_PULSE_SKIP
* (max_sclk
- min_sclk
)) / max_sclk
);
2467 smc_state
->levels
[i
].dpm2
.NearTDPDec
= SISLANDS_DPM2_NEAR_TDP_DEC
;
2468 smc_state
->levels
[i
].dpm2
.AboveSafeInc
= SISLANDS_DPM2_ABOVE_SAFE_INC
;
2469 smc_state
->levels
[i
].dpm2
.BelowSafeInc
= SISLANDS_DPM2_BELOW_SAFE_INC
;
2470 smc_state
->levels
[i
].dpm2
.PwrEfficiencyRatio
= cpu_to_be16(pwr_efficiency_ratio
);
2476 static int si_populate_sq_ramping_values(struct amdgpu_device
*adev
,
2477 struct amdgpu_ps
*amdgpu_state
,
2478 SISLANDS_SMC_SWSTATE
*smc_state
)
2480 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2481 struct si_ps
*state
= si_get_ps(amdgpu_state
);
2482 u32 sq_power_throttle
, sq_power_throttle2
;
2483 bool enable_sq_ramping
= ni_pi
->enable_sq_ramping
;
2486 if (state
->performance_level_count
== 0)
2489 if (smc_state
->levelCount
!= state
->performance_level_count
)
2492 if (adev
->pm
.dpm
.sq_ramping_threshold
== 0)
2495 if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER
> (MAX_POWER_MASK
>> MAX_POWER_SHIFT
))
2496 enable_sq_ramping
= false;
2498 if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER
> (MIN_POWER_MASK
>> MIN_POWER_SHIFT
))
2499 enable_sq_ramping
= false;
2501 if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA
> (MAX_POWER_DELTA_MASK
>> MAX_POWER_DELTA_SHIFT
))
2502 enable_sq_ramping
= false;
2504 if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE
> (STI_SIZE_MASK
>> STI_SIZE_SHIFT
))
2505 enable_sq_ramping
= false;
2507 if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO
> (LTI_RATIO_MASK
>> LTI_RATIO_SHIFT
))
2508 enable_sq_ramping
= false;
2510 for (i
= 0; i
< state
->performance_level_count
; i
++) {
2511 sq_power_throttle
= 0;
2512 sq_power_throttle2
= 0;
2514 if ((state
->performance_levels
[i
].sclk
>= adev
->pm
.dpm
.sq_ramping_threshold
) &&
2515 enable_sq_ramping
) {
2516 sq_power_throttle
|= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER
);
2517 sq_power_throttle
|= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER
);
2518 sq_power_throttle2
|= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA
);
2519 sq_power_throttle2
|= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE
);
2520 sq_power_throttle2
|= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO
);
2522 sq_power_throttle
|= MAX_POWER_MASK
| MIN_POWER_MASK
;
2523 sq_power_throttle2
|= MAX_POWER_DELTA_MASK
| STI_SIZE_MASK
| LTI_RATIO_MASK
;
2526 smc_state
->levels
[i
].SQPowerThrottle
= cpu_to_be32(sq_power_throttle
);
2527 smc_state
->levels
[i
].SQPowerThrottle_2
= cpu_to_be32(sq_power_throttle2
);
2533 static int si_enable_power_containment(struct amdgpu_device
*adev
,
2534 struct amdgpu_ps
*amdgpu_new_state
,
2537 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2538 PPSMC_Result smc_result
;
2541 if (ni_pi
->enable_power_containment
) {
2543 if (!si_should_disable_uvd_powertune(adev
, amdgpu_new_state
)) {
2544 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_TDPClampingActive
);
2545 if (smc_result
!= PPSMC_Result_OK
) {
2547 ni_pi
->pc_enabled
= false;
2549 ni_pi
->pc_enabled
= true;
2553 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_TDPClampingInactive
);
2554 if (smc_result
!= PPSMC_Result_OK
)
2556 ni_pi
->pc_enabled
= false;
2563 static int si_initialize_smc_dte_tables(struct amdgpu_device
*adev
)
2565 struct si_power_info
*si_pi
= si_get_pi(adev
);
2567 struct si_dte_data
*dte_data
= &si_pi
->dte_data
;
2568 Smc_SIslands_DTE_Configuration
*dte_tables
= NULL
;
2573 if (dte_data
== NULL
)
2574 si_pi
->enable_dte
= false;
2576 if (si_pi
->enable_dte
== false)
2579 if (dte_data
->k
<= 0)
2582 dte_tables
= kzalloc(sizeof(Smc_SIslands_DTE_Configuration
), GFP_KERNEL
);
2583 if (dte_tables
== NULL
) {
2584 si_pi
->enable_dte
= false;
2588 table_size
= dte_data
->k
;
2590 if (table_size
> SMC_SISLANDS_DTE_MAX_FILTER_STAGES
)
2591 table_size
= SMC_SISLANDS_DTE_MAX_FILTER_STAGES
;
2593 tdep_count
= dte_data
->tdep_count
;
2594 if (tdep_count
> SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE
)
2595 tdep_count
= SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE
;
2597 dte_tables
->K
= cpu_to_be32(table_size
);
2598 dte_tables
->T0
= cpu_to_be32(dte_data
->t0
);
2599 dte_tables
->MaxT
= cpu_to_be32(dte_data
->max_t
);
2600 dte_tables
->WindowSize
= dte_data
->window_size
;
2601 dte_tables
->temp_select
= dte_data
->temp_select
;
2602 dte_tables
->DTE_mode
= dte_data
->dte_mode
;
2603 dte_tables
->Tthreshold
= cpu_to_be32(dte_data
->t_threshold
);
2608 for (i
= 0; i
< table_size
; i
++) {
2609 dte_tables
->tau
[i
] = cpu_to_be32(dte_data
->tau
[i
]);
2610 dte_tables
->R
[i
] = cpu_to_be32(dte_data
->r
[i
]);
2613 dte_tables
->Tdep_count
= tdep_count
;
2615 for (i
= 0; i
< (u32
)tdep_count
; i
++) {
2616 dte_tables
->T_limits
[i
] = dte_data
->t_limits
[i
];
2617 dte_tables
->Tdep_tau
[i
] = cpu_to_be32(dte_data
->tdep_tau
[i
]);
2618 dte_tables
->Tdep_R
[i
] = cpu_to_be32(dte_data
->tdep_r
[i
]);
2621 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->dte_table_start
,
2623 sizeof(Smc_SIslands_DTE_Configuration
),
2630 static int si_get_cac_std_voltage_max_min(struct amdgpu_device
*adev
,
2633 struct si_power_info
*si_pi
= si_get_pi(adev
);
2634 struct amdgpu_cac_leakage_table
*table
=
2635 &adev
->pm
.dpm
.dyn_state
.cac_leakage_table
;
2645 for (i
= 0; i
< table
->count
; i
++) {
2646 if (table
->entries
[i
].vddc
> *max
)
2647 *max
= table
->entries
[i
].vddc
;
2648 if (table
->entries
[i
].vddc
< *min
)
2649 *min
= table
->entries
[i
].vddc
;
2652 if (si_pi
->powertune_data
->lkge_lut_v0_percent
> 100)
2655 v0_loadline
= (*min
) * (100 - si_pi
->powertune_data
->lkge_lut_v0_percent
) / 100;
2657 if (v0_loadline
> 0xFFFFUL
)
2660 *min
= (u16
)v0_loadline
;
2662 if ((*min
> *max
) || (*max
== 0) || (*min
== 0))
2668 static u16
si_get_cac_std_voltage_step(u16 max
, u16 min
)
2670 return ((max
- min
) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
- 1)) /
2671 SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
;
2674 static int si_init_dte_leakage_table(struct amdgpu_device
*adev
,
2675 PP_SIslands_CacConfig
*cac_tables
,
2676 u16 vddc_max
, u16 vddc_min
, u16 vddc_step
,
2679 struct si_power_info
*si_pi
= si_get_pi(adev
);
2687 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2689 for (i
= 0; i
< SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES
; i
++) {
2690 t
= (1000 * (i
* t_step
+ t0
));
2692 for (j
= 0; j
< SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
; j
++) {
2693 voltage
= vddc_max
- (vddc_step
* j
);
2695 si_calculate_leakage_for_v_and_t(adev
,
2696 &si_pi
->powertune_data
->leakage_coefficients
,
2699 si_pi
->dyn_powertune_data
.cac_leakage
,
2702 smc_leakage
= si_scale_power_for_smc(leakage
, scaling_factor
) / 4;
2704 if (smc_leakage
> 0xFFFF)
2705 smc_leakage
= 0xFFFF;
2707 cac_tables
->cac_lkge_lut
[i
][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
-1-j
] =
2708 cpu_to_be16((u16
)smc_leakage
);
2714 static int si_init_simplified_leakage_table(struct amdgpu_device
*adev
,
2715 PP_SIslands_CacConfig
*cac_tables
,
2716 u16 vddc_max
, u16 vddc_min
, u16 vddc_step
)
2718 struct si_power_info
*si_pi
= si_get_pi(adev
);
2725 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2727 for (j
= 0; j
< SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
; j
++) {
2728 voltage
= vddc_max
- (vddc_step
* j
);
2730 si_calculate_leakage_for_v(adev
,
2731 &si_pi
->powertune_data
->leakage_coefficients
,
2732 si_pi
->powertune_data
->fixed_kt
,
2734 si_pi
->dyn_powertune_data
.cac_leakage
,
2737 smc_leakage
= si_scale_power_for_smc(leakage
, scaling_factor
) / 4;
2739 if (smc_leakage
> 0xFFFF)
2740 smc_leakage
= 0xFFFF;
2742 for (i
= 0; i
< SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES
; i
++)
2743 cac_tables
->cac_lkge_lut
[i
][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
-1-j
] =
2744 cpu_to_be16((u16
)smc_leakage
);
2749 static int si_initialize_smc_cac_tables(struct amdgpu_device
*adev
)
2751 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2752 struct si_power_info
*si_pi
= si_get_pi(adev
);
2753 PP_SIslands_CacConfig
*cac_tables
= NULL
;
2754 u16 vddc_max
, vddc_min
, vddc_step
;
2756 u32 load_line_slope
, reg
;
2758 u32 ticks_per_us
= amdgpu_asic_get_xclk(adev
) / 100;
2760 if (ni_pi
->enable_cac
== false)
2763 cac_tables
= kzalloc(sizeof(PP_SIslands_CacConfig
), GFP_KERNEL
);
2767 reg
= RREG32(CG_CAC_CTRL
) & ~CAC_WINDOW_MASK
;
2768 reg
|= CAC_WINDOW(si_pi
->powertune_data
->cac_window
);
2769 WREG32(CG_CAC_CTRL
, reg
);
2771 si_pi
->dyn_powertune_data
.cac_leakage
= adev
->pm
.dpm
.cac_leakage
;
2772 si_pi
->dyn_powertune_data
.dc_pwr_value
=
2773 si_pi
->powertune_data
->dc_cac
[NISLANDS_DCCAC_LEVEL_0
];
2774 si_pi
->dyn_powertune_data
.wintime
= si_calculate_cac_wintime(adev
);
2775 si_pi
->dyn_powertune_data
.shift_n
= si_pi
->powertune_data
->shift_n_default
;
2777 si_pi
->dyn_powertune_data
.leakage_minimum_temperature
= 80 * 1000;
2779 ret
= si_get_cac_std_voltage_max_min(adev
, &vddc_max
, &vddc_min
);
2783 vddc_step
= si_get_cac_std_voltage_step(vddc_max
, vddc_min
);
2784 vddc_min
= vddc_max
- (vddc_step
* (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
- 1));
2788 if (si_pi
->enable_dte
|| ni_pi
->driver_calculate_cac_leakage
)
2789 ret
= si_init_dte_leakage_table(adev
, cac_tables
,
2790 vddc_max
, vddc_min
, vddc_step
,
2793 ret
= si_init_simplified_leakage_table(adev
, cac_tables
,
2794 vddc_max
, vddc_min
, vddc_step
);
2798 load_line_slope
= ((u32
)adev
->pm
.dpm
.load_line_slope
<< SMC_SISLANDS_SCALE_R
) / 100;
2800 cac_tables
->l2numWin_TDP
= cpu_to_be32(si_pi
->dyn_powertune_data
.l2_lta_window_size
);
2801 cac_tables
->lts_truncate_n
= si_pi
->dyn_powertune_data
.lts_truncate
;
2802 cac_tables
->SHIFT_N
= si_pi
->dyn_powertune_data
.shift_n
;
2803 cac_tables
->lkge_lut_V0
= cpu_to_be32((u32
)vddc_min
);
2804 cac_tables
->lkge_lut_Vstep
= cpu_to_be32((u32
)vddc_step
);
2805 cac_tables
->R_LL
= cpu_to_be32(load_line_slope
);
2806 cac_tables
->WinTime
= cpu_to_be32(si_pi
->dyn_powertune_data
.wintime
);
2807 cac_tables
->calculation_repeats
= cpu_to_be32(2);
2808 cac_tables
->dc_cac
= cpu_to_be32(0);
2809 cac_tables
->log2_PG_LKG_SCALE
= 12;
2810 cac_tables
->cac_temp
= si_pi
->powertune_data
->operating_temp
;
2811 cac_tables
->lkge_lut_T0
= cpu_to_be32((u32
)t0
);
2812 cac_tables
->lkge_lut_Tstep
= cpu_to_be32((u32
)t_step
);
2814 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->cac_table_start
,
2816 sizeof(PP_SIslands_CacConfig
),
2822 ret
= si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_ticks_per_us
, ticks_per_us
);
2826 ni_pi
->enable_cac
= false;
2827 ni_pi
->enable_power_containment
= false;
2835 static int si_program_cac_config_registers(struct amdgpu_device
*adev
,
2836 const struct si_cac_config_reg
*cac_config_regs
)
2838 const struct si_cac_config_reg
*config_regs
= cac_config_regs
;
2839 u32 data
= 0, offset
;
2844 while (config_regs
->offset
!= 0xFFFFFFFF) {
2845 switch (config_regs
->type
) {
2846 case SISLANDS_CACCONFIG_CGIND
:
2847 offset
= SMC_CG_IND_START
+ config_regs
->offset
;
2848 if (offset
< SMC_CG_IND_END
)
2849 data
= RREG32_SMC(offset
);
2852 data
= RREG32(config_regs
->offset
);
2856 data
&= ~config_regs
->mask
;
2857 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
2859 switch (config_regs
->type
) {
2860 case SISLANDS_CACCONFIG_CGIND
:
2861 offset
= SMC_CG_IND_START
+ config_regs
->offset
;
2862 if (offset
< SMC_CG_IND_END
)
2863 WREG32_SMC(offset
, data
);
2866 WREG32(config_regs
->offset
, data
);
2874 static int si_initialize_hardware_cac_manager(struct amdgpu_device
*adev
)
2876 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2877 struct si_power_info
*si_pi
= si_get_pi(adev
);
2880 if ((ni_pi
->enable_cac
== false) ||
2881 (ni_pi
->cac_configuration_required
== false))
2884 ret
= si_program_cac_config_registers(adev
, si_pi
->lcac_config
);
2887 ret
= si_program_cac_config_registers(adev
, si_pi
->cac_override
);
2890 ret
= si_program_cac_config_registers(adev
, si_pi
->cac_weights
);
2897 static int si_enable_smc_cac(struct amdgpu_device
*adev
,
2898 struct amdgpu_ps
*amdgpu_new_state
,
2901 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2902 struct si_power_info
*si_pi
= si_get_pi(adev
);
2903 PPSMC_Result smc_result
;
2906 if (ni_pi
->enable_cac
) {
2908 if (!si_should_disable_uvd_powertune(adev
, amdgpu_new_state
)) {
2909 if (ni_pi
->support_cac_long_term_average
) {
2910 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_CACLongTermAvgEnable
);
2911 if (smc_result
!= PPSMC_Result_OK
)
2912 ni_pi
->support_cac_long_term_average
= false;
2915 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableCac
);
2916 if (smc_result
!= PPSMC_Result_OK
) {
2918 ni_pi
->cac_enabled
= false;
2920 ni_pi
->cac_enabled
= true;
2923 if (si_pi
->enable_dte
) {
2924 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableDTE
);
2925 if (smc_result
!= PPSMC_Result_OK
)
2929 } else if (ni_pi
->cac_enabled
) {
2930 if (si_pi
->enable_dte
)
2931 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_DisableDTE
);
2933 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_DisableCac
);
2935 ni_pi
->cac_enabled
= false;
2937 if (ni_pi
->support_cac_long_term_average
)
2938 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_CACLongTermAvgDisable
);
2944 static int si_init_smc_spll_table(struct amdgpu_device
*adev
)
2946 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2947 struct si_power_info
*si_pi
= si_get_pi(adev
);
2948 SMC_SISLANDS_SPLL_DIV_TABLE
*spll_table
;
2949 SISLANDS_SMC_SCLK_VALUE sclk_params
;
2957 if (si_pi
->spll_table_start
== 0)
2960 spll_table
= kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE
), GFP_KERNEL
);
2961 if (spll_table
== NULL
)
2964 for (i
= 0; i
< 256; i
++) {
2965 ret
= si_calculate_sclk_params(adev
, sclk
, &sclk_params
);
2968 p_div
= (sclk_params
.vCG_SPLL_FUNC_CNTL
& SPLL_PDIV_A_MASK
) >> SPLL_PDIV_A_SHIFT
;
2969 fb_div
= (sclk_params
.vCG_SPLL_FUNC_CNTL_3
& SPLL_FB_DIV_MASK
) >> SPLL_FB_DIV_SHIFT
;
2970 clk_s
= (sclk_params
.vCG_SPLL_SPREAD_SPECTRUM
& CLK_S_MASK
) >> CLK_S_SHIFT
;
2971 clk_v
= (sclk_params
.vCG_SPLL_SPREAD_SPECTRUM_2
& CLK_V_MASK
) >> CLK_V_SHIFT
;
2973 fb_div
&= ~0x00001FFF;
2977 if (p_div
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT
))
2979 if (fb_div
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT
))
2981 if (clk_s
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT
))
2983 if (clk_v
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT
))
2989 tmp
= ((fb_div
<< SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK
) |
2990 ((p_div
<< SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK
);
2991 spll_table
->freq
[i
] = cpu_to_be32(tmp
);
2993 tmp
= ((clk_v
<< SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK
) |
2994 ((clk_s
<< SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK
);
2995 spll_table
->ss
[i
] = cpu_to_be32(tmp
);
3002 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->spll_table_start
,
3004 sizeof(SMC_SISLANDS_SPLL_DIV_TABLE
),
3008 ni_pi
->enable_power_containment
= false;
3015 static u16
si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device
*adev
,
3018 u16 highest_leakage
= 0;
3019 struct si_power_info
*si_pi
= si_get_pi(adev
);
3022 for (i
= 0; i
< si_pi
->leakage_voltage
.count
; i
++){
3023 if (highest_leakage
< si_pi
->leakage_voltage
.entries
[i
].voltage
)
3024 highest_leakage
= si_pi
->leakage_voltage
.entries
[i
].voltage
;
3027 if (si_pi
->leakage_voltage
.count
&& (highest_leakage
< vce_voltage
))
3028 return highest_leakage
;
3033 static int si_get_vce_clock_voltage(struct amdgpu_device
*adev
,
3034 u32 evclk
, u32 ecclk
, u16
*voltage
)
3038 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
3039 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
3041 if (((evclk
== 0) && (ecclk
== 0)) ||
3042 (table
&& (table
->count
== 0))) {
3047 for (i
= 0; i
< table
->count
; i
++) {
3048 if ((evclk
<= table
->entries
[i
].evclk
) &&
3049 (ecclk
<= table
->entries
[i
].ecclk
)) {
3050 *voltage
= table
->entries
[i
].v
;
3056 /* if no match return the highest voltage */
3058 *voltage
= table
->entries
[table
->count
- 1].v
;
3060 *voltage
= si_get_lower_of_leakage_and_vce_voltage(adev
, *voltage
);
3065 static bool si_dpm_vblank_too_short(void *handle
)
3067 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3068 u32 vblank_time
= amdgpu_dpm_get_vblank_time(adev
);
3069 /* we never hit the non-gddr5 limit so disable it */
3070 u32 switch_limit
= adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
? 450 : 0;
3072 if (vblank_time
< switch_limit
)
3079 static int ni_copy_and_switch_arb_sets(struct amdgpu_device
*adev
,
3080 u32 arb_freq_src
, u32 arb_freq_dest
)
3082 u32 mc_arb_dram_timing
;
3083 u32 mc_arb_dram_timing2
;
3087 switch (arb_freq_src
) {
3088 case MC_CG_ARB_FREQ_F0
:
3089 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING
);
3090 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2
);
3091 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE0_MASK
) >> STATE0_SHIFT
;
3093 case MC_CG_ARB_FREQ_F1
:
3094 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING_1
);
3095 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2_1
);
3096 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE1_MASK
) >> STATE1_SHIFT
;
3098 case MC_CG_ARB_FREQ_F2
:
3099 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING_2
);
3100 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2_2
);
3101 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE2_MASK
) >> STATE2_SHIFT
;
3103 case MC_CG_ARB_FREQ_F3
:
3104 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING_3
);
3105 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2_3
);
3106 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE3_MASK
) >> STATE3_SHIFT
;
3112 switch (arb_freq_dest
) {
3113 case MC_CG_ARB_FREQ_F0
:
3114 WREG32(MC_ARB_DRAM_TIMING
, mc_arb_dram_timing
);
3115 WREG32(MC_ARB_DRAM_TIMING2
, mc_arb_dram_timing2
);
3116 WREG32_P(MC_ARB_BURST_TIME
, STATE0(burst_time
), ~STATE0_MASK
);
3118 case MC_CG_ARB_FREQ_F1
:
3119 WREG32(MC_ARB_DRAM_TIMING_1
, mc_arb_dram_timing
);
3120 WREG32(MC_ARB_DRAM_TIMING2_1
, mc_arb_dram_timing2
);
3121 WREG32_P(MC_ARB_BURST_TIME
, STATE1(burst_time
), ~STATE1_MASK
);
3123 case MC_CG_ARB_FREQ_F2
:
3124 WREG32(MC_ARB_DRAM_TIMING_2
, mc_arb_dram_timing
);
3125 WREG32(MC_ARB_DRAM_TIMING2_2
, mc_arb_dram_timing2
);
3126 WREG32_P(MC_ARB_BURST_TIME
, STATE2(burst_time
), ~STATE2_MASK
);
3128 case MC_CG_ARB_FREQ_F3
:
3129 WREG32(MC_ARB_DRAM_TIMING_3
, mc_arb_dram_timing
);
3130 WREG32(MC_ARB_DRAM_TIMING2_3
, mc_arb_dram_timing2
);
3131 WREG32_P(MC_ARB_BURST_TIME
, STATE3(burst_time
), ~STATE3_MASK
);
3137 mc_cg_config
= RREG32(MC_CG_CONFIG
) | 0x0000000F;
3138 WREG32(MC_CG_CONFIG
, mc_cg_config
);
3139 WREG32_P(MC_ARB_CG
, CG_ARB_REQ(arb_freq_dest
), ~CG_ARB_REQ_MASK
);
3144 static void ni_update_current_ps(struct amdgpu_device
*adev
,
3145 struct amdgpu_ps
*rps
)
3147 struct si_ps
*new_ps
= si_get_ps(rps
);
3148 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
3149 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
3151 eg_pi
->current_rps
= *rps
;
3152 ni_pi
->current_ps
= *new_ps
;
3153 eg_pi
->current_rps
.ps_priv
= &ni_pi
->current_ps
;
3154 adev
->pm
.dpm
.current_ps
= &eg_pi
->current_rps
;
3157 static void ni_update_requested_ps(struct amdgpu_device
*adev
,
3158 struct amdgpu_ps
*rps
)
3160 struct si_ps
*new_ps
= si_get_ps(rps
);
3161 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
3162 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
3164 eg_pi
->requested_rps
= *rps
;
3165 ni_pi
->requested_ps
= *new_ps
;
3166 eg_pi
->requested_rps
.ps_priv
= &ni_pi
->requested_ps
;
3167 adev
->pm
.dpm
.requested_ps
= &eg_pi
->requested_rps
;
3170 static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device
*adev
,
3171 struct amdgpu_ps
*new_ps
,
3172 struct amdgpu_ps
*old_ps
)
3174 struct si_ps
*new_state
= si_get_ps(new_ps
);
3175 struct si_ps
*current_state
= si_get_ps(old_ps
);
3177 if ((new_ps
->vclk
== old_ps
->vclk
) &&
3178 (new_ps
->dclk
== old_ps
->dclk
))
3181 if (new_state
->performance_levels
[new_state
->performance_level_count
- 1].sclk
>=
3182 current_state
->performance_levels
[current_state
->performance_level_count
- 1].sclk
)
3185 amdgpu_asic_set_uvd_clocks(adev
, new_ps
->vclk
, new_ps
->dclk
);
3188 static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device
*adev
,
3189 struct amdgpu_ps
*new_ps
,
3190 struct amdgpu_ps
*old_ps
)
3192 struct si_ps
*new_state
= si_get_ps(new_ps
);
3193 struct si_ps
*current_state
= si_get_ps(old_ps
);
3195 if ((new_ps
->vclk
== old_ps
->vclk
) &&
3196 (new_ps
->dclk
== old_ps
->dclk
))
3199 if (new_state
->performance_levels
[new_state
->performance_level_count
- 1].sclk
<
3200 current_state
->performance_levels
[current_state
->performance_level_count
- 1].sclk
)
3203 amdgpu_asic_set_uvd_clocks(adev
, new_ps
->vclk
, new_ps
->dclk
);
3206 static u16
btc_find_voltage(struct atom_voltage_table
*table
, u16 voltage
)
3210 for (i
= 0; i
< table
->count
; i
++)
3211 if (voltage
<= table
->entries
[i
].value
)
3212 return table
->entries
[i
].value
;
3214 return table
->entries
[table
->count
- 1].value
;
3217 static u32
btc_find_valid_clock(struct amdgpu_clock_array
*clocks
,
3218 u32 max_clock
, u32 requested_clock
)
3222 if ((clocks
== NULL
) || (clocks
->count
== 0))
3223 return (requested_clock
< max_clock
) ? requested_clock
: max_clock
;
3225 for (i
= 0; i
< clocks
->count
; i
++) {
3226 if (clocks
->values
[i
] >= requested_clock
)
3227 return (clocks
->values
[i
] < max_clock
) ? clocks
->values
[i
] : max_clock
;
3230 return (clocks
->values
[clocks
->count
- 1] < max_clock
) ?
3231 clocks
->values
[clocks
->count
- 1] : max_clock
;
3234 static u32
btc_get_valid_mclk(struct amdgpu_device
*adev
,
3235 u32 max_mclk
, u32 requested_mclk
)
3237 return btc_find_valid_clock(&adev
->pm
.dpm
.dyn_state
.valid_mclk_values
,
3238 max_mclk
, requested_mclk
);
3241 static u32
btc_get_valid_sclk(struct amdgpu_device
*adev
,
3242 u32 max_sclk
, u32 requested_sclk
)
3244 return btc_find_valid_clock(&adev
->pm
.dpm
.dyn_state
.valid_sclk_values
,
3245 max_sclk
, requested_sclk
);
3248 static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table
*table
,
3253 if ((table
== NULL
) || (table
->count
== 0)) {
3258 for (i
= 0; i
< table
->count
; i
++) {
3259 if (clock
< table
->entries
[i
].clk
)
3260 clock
= table
->entries
[i
].clk
;
3265 static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table
*table
,
3266 u32 clock
, u16 max_voltage
, u16
*voltage
)
3270 if ((table
== NULL
) || (table
->count
== 0))
3273 for (i
= 0; i
< table
->count
; i
++) {
3274 if (clock
<= table
->entries
[i
].clk
) {
3275 if (*voltage
< table
->entries
[i
].v
)
3276 *voltage
= (u16
)((table
->entries
[i
].v
< max_voltage
) ?
3277 table
->entries
[i
].v
: max_voltage
);
3282 *voltage
= (*voltage
> max_voltage
) ? *voltage
: max_voltage
;
3285 static void btc_adjust_clock_combinations(struct amdgpu_device
*adev
,
3286 const struct amdgpu_clock_and_voltage_limits
*max_limits
,
3287 struct rv7xx_pl
*pl
)
3290 if ((pl
->mclk
== 0) || (pl
->sclk
== 0))
3293 if (pl
->mclk
== pl
->sclk
)
3296 if (pl
->mclk
> pl
->sclk
) {
3297 if (((pl
->mclk
+ (pl
->sclk
- 1)) / pl
->sclk
) > adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
)
3298 pl
->sclk
= btc_get_valid_sclk(adev
,
3301 (adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
- 1)) /
3302 adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
);
3304 if ((pl
->sclk
- pl
->mclk
) > adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
)
3305 pl
->mclk
= btc_get_valid_mclk(adev
,
3308 adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
);
3312 static void btc_apply_voltage_delta_rules(struct amdgpu_device
*adev
,
3313 u16 max_vddc
, u16 max_vddci
,
3314 u16
*vddc
, u16
*vddci
)
3316 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
3319 if ((0 == *vddc
) || (0 == *vddci
))
3322 if (*vddc
> *vddci
) {
3323 if ((*vddc
- *vddci
) > adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
) {
3324 new_voltage
= btc_find_voltage(&eg_pi
->vddci_voltage_table
,
3325 (*vddc
- adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
));
3326 *vddci
= (new_voltage
< max_vddci
) ? new_voltage
: max_vddci
;
3329 if ((*vddci
- *vddc
) > adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
) {
3330 new_voltage
= btc_find_voltage(&eg_pi
->vddc_voltage_table
,
3331 (*vddci
- adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
));
3332 *vddc
= (new_voltage
< max_vddc
) ? new_voltage
: max_vddc
;
3337 static void r600_calculate_u_and_p(u32 i
, u32 r_c
, u32 p_b
,
3344 i_c
= (i
* r_c
) / 100;
3353 *p
= i_c
/ (1 << (2 * (*u
)));
3356 static int r600_calculate_at(u32 t
, u32 h
, u32 fh
, u32 fl
, u32
*tl
, u32
*th
)
3361 if ((fl
== 0) || (fh
== 0) || (fl
> fh
))
3364 k
= (100 * fh
) / fl
;
3365 t1
= (t
* (k
- 100));
3366 a
= (1000 * (100 * h
+ t1
)) / (10000 + (t1
/ 100));
3368 ah
= ((a
* t
) + 5000) / 10000;
3377 static bool r600_is_uvd_state(u32
class, u32 class2
)
3379 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
3381 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
3383 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
3385 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
3387 if (class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
3392 static u8
rv770_get_memory_module_index(struct amdgpu_device
*adev
)
3394 return (u8
) ((RREG32(BIOS_SCRATCH_4
) >> 16) & 0xff);
3397 static void rv770_get_max_vddc(struct amdgpu_device
*adev
)
3399 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3402 if (amdgpu_atombios_get_max_vddc(adev
, 0, 0, &vddc
))
3405 pi
->max_vddc
= vddc
;
3408 static void rv770_get_engine_memory_ss(struct amdgpu_device
*adev
)
3410 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3411 struct amdgpu_atom_ss ss
;
3413 pi
->sclk_ss
= amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
3414 ASIC_INTERNAL_ENGINE_SS
, 0);
3415 pi
->mclk_ss
= amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
3416 ASIC_INTERNAL_MEMORY_SS
, 0);
3418 if (pi
->sclk_ss
|| pi
->mclk_ss
)
3419 pi
->dynamic_ss
= true;
3421 pi
->dynamic_ss
= false;
3425 static void si_apply_state_adjust_rules(struct amdgpu_device
*adev
,
3426 struct amdgpu_ps
*rps
)
3428 struct si_ps
*ps
= si_get_ps(rps
);
3429 struct amdgpu_clock_and_voltage_limits
*max_limits
;
3430 bool disable_mclk_switching
= false;
3431 bool disable_sclk_switching
= false;
3433 u16 vddc
, vddci
, min_vce_voltage
= 0;
3434 u32 max_sclk_vddc
, max_mclk_vddci
, max_mclk_vddc
;
3435 u32 max_sclk
= 0, max_mclk
= 0;
3438 if (adev
->asic_type
== CHIP_HAINAN
) {
3439 if ((adev
->pdev
->revision
== 0x81) ||
3440 (adev
->pdev
->revision
== 0x83) ||
3441 (adev
->pdev
->revision
== 0xC3) ||
3442 (adev
->pdev
->device
== 0x6664) ||
3443 (adev
->pdev
->device
== 0x6665) ||
3444 (adev
->pdev
->device
== 0x6667)) {
3447 if ((adev
->pdev
->revision
== 0xC3) ||
3448 (adev
->pdev
->device
== 0x6665)) {
3452 } else if (adev
->asic_type
== CHIP_OLAND
) {
3453 if ((adev
->pdev
->revision
== 0xC7) ||
3454 (adev
->pdev
->revision
== 0x80) ||
3455 (adev
->pdev
->revision
== 0x81) ||
3456 (adev
->pdev
->revision
== 0x83) ||
3457 (adev
->pdev
->revision
== 0x87) ||
3458 (adev
->pdev
->device
== 0x6604) ||
3459 (adev
->pdev
->device
== 0x6605)) {
3464 if (rps
->vce_active
) {
3465 rps
->evclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].evclk
;
3466 rps
->ecclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].ecclk
;
3467 si_get_vce_clock_voltage(adev
, rps
->evclk
, rps
->ecclk
,
3474 if ((adev
->pm
.dpm
.new_active_crtc_count
> 1) ||
3475 si_dpm_vblank_too_short(adev
))
3476 disable_mclk_switching
= true;
3478 if (rps
->vclk
|| rps
->dclk
) {
3479 disable_mclk_switching
= true;
3480 disable_sclk_switching
= true;
3483 if (adev
->pm
.ac_power
)
3484 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
3486 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
3488 for (i
= ps
->performance_level_count
- 2; i
>= 0; i
--) {
3489 if (ps
->performance_levels
[i
].vddc
> ps
->performance_levels
[i
+1].vddc
)
3490 ps
->performance_levels
[i
].vddc
= ps
->performance_levels
[i
+1].vddc
;
3492 if (adev
->pm
.ac_power
== false) {
3493 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3494 if (ps
->performance_levels
[i
].mclk
> max_limits
->mclk
)
3495 ps
->performance_levels
[i
].mclk
= max_limits
->mclk
;
3496 if (ps
->performance_levels
[i
].sclk
> max_limits
->sclk
)
3497 ps
->performance_levels
[i
].sclk
= max_limits
->sclk
;
3498 if (ps
->performance_levels
[i
].vddc
> max_limits
->vddc
)
3499 ps
->performance_levels
[i
].vddc
= max_limits
->vddc
;
3500 if (ps
->performance_levels
[i
].vddci
> max_limits
->vddci
)
3501 ps
->performance_levels
[i
].vddci
= max_limits
->vddci
;
3505 /* limit clocks to max supported clocks based on voltage dependency tables */
3506 btc_get_max_clock_from_voltage_dependency_table(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
,
3508 btc_get_max_clock_from_voltage_dependency_table(&adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
3510 btc_get_max_clock_from_voltage_dependency_table(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
3513 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3514 if (max_sclk_vddc
) {
3515 if (ps
->performance_levels
[i
].sclk
> max_sclk_vddc
)
3516 ps
->performance_levels
[i
].sclk
= max_sclk_vddc
;
3518 if (max_mclk_vddci
) {
3519 if (ps
->performance_levels
[i
].mclk
> max_mclk_vddci
)
3520 ps
->performance_levels
[i
].mclk
= max_mclk_vddci
;
3522 if (max_mclk_vddc
) {
3523 if (ps
->performance_levels
[i
].mclk
> max_mclk_vddc
)
3524 ps
->performance_levels
[i
].mclk
= max_mclk_vddc
;
3527 if (ps
->performance_levels
[i
].mclk
> max_mclk
)
3528 ps
->performance_levels
[i
].mclk
= max_mclk
;
3531 if (ps
->performance_levels
[i
].sclk
> max_sclk
)
3532 ps
->performance_levels
[i
].sclk
= max_sclk
;
3536 /* XXX validate the min clocks required for display */
3538 if (disable_mclk_switching
) {
3539 mclk
= ps
->performance_levels
[ps
->performance_level_count
- 1].mclk
;
3540 vddci
= ps
->performance_levels
[ps
->performance_level_count
- 1].vddci
;
3542 mclk
= ps
->performance_levels
[0].mclk
;
3543 vddci
= ps
->performance_levels
[0].vddci
;
3546 if (disable_sclk_switching
) {
3547 sclk
= ps
->performance_levels
[ps
->performance_level_count
- 1].sclk
;
3548 vddc
= ps
->performance_levels
[ps
->performance_level_count
- 1].vddc
;
3550 sclk
= ps
->performance_levels
[0].sclk
;
3551 vddc
= ps
->performance_levels
[0].vddc
;
3554 if (rps
->vce_active
) {
3555 if (sclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
)
3556 sclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
;
3557 if (mclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].mclk
)
3558 mclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].mclk
;
3561 /* adjusted low state */
3562 ps
->performance_levels
[0].sclk
= sclk
;
3563 ps
->performance_levels
[0].mclk
= mclk
;
3564 ps
->performance_levels
[0].vddc
= vddc
;
3565 ps
->performance_levels
[0].vddci
= vddci
;
3567 if (disable_sclk_switching
) {
3568 sclk
= ps
->performance_levels
[0].sclk
;
3569 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3570 if (sclk
< ps
->performance_levels
[i
].sclk
)
3571 sclk
= ps
->performance_levels
[i
].sclk
;
3573 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3574 ps
->performance_levels
[i
].sclk
= sclk
;
3575 ps
->performance_levels
[i
].vddc
= vddc
;
3578 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3579 if (ps
->performance_levels
[i
].sclk
< ps
->performance_levels
[i
- 1].sclk
)
3580 ps
->performance_levels
[i
].sclk
= ps
->performance_levels
[i
- 1].sclk
;
3581 if (ps
->performance_levels
[i
].vddc
< ps
->performance_levels
[i
- 1].vddc
)
3582 ps
->performance_levels
[i
].vddc
= ps
->performance_levels
[i
- 1].vddc
;
3586 if (disable_mclk_switching
) {
3587 mclk
= ps
->performance_levels
[0].mclk
;
3588 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3589 if (mclk
< ps
->performance_levels
[i
].mclk
)
3590 mclk
= ps
->performance_levels
[i
].mclk
;
3592 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3593 ps
->performance_levels
[i
].mclk
= mclk
;
3594 ps
->performance_levels
[i
].vddci
= vddci
;
3597 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3598 if (ps
->performance_levels
[i
].mclk
< ps
->performance_levels
[i
- 1].mclk
)
3599 ps
->performance_levels
[i
].mclk
= ps
->performance_levels
[i
- 1].mclk
;
3600 if (ps
->performance_levels
[i
].vddci
< ps
->performance_levels
[i
- 1].vddci
)
3601 ps
->performance_levels
[i
].vddci
= ps
->performance_levels
[i
- 1].vddci
;
3605 for (i
= 0; i
< ps
->performance_level_count
; i
++)
3606 btc_adjust_clock_combinations(adev
, max_limits
,
3607 &ps
->performance_levels
[i
]);
3609 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3610 if (ps
->performance_levels
[i
].vddc
< min_vce_voltage
)
3611 ps
->performance_levels
[i
].vddc
= min_vce_voltage
;
3612 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
,
3613 ps
->performance_levels
[i
].sclk
,
3614 max_limits
->vddc
, &ps
->performance_levels
[i
].vddc
);
3615 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
3616 ps
->performance_levels
[i
].mclk
,
3617 max_limits
->vddci
, &ps
->performance_levels
[i
].vddci
);
3618 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
3619 ps
->performance_levels
[i
].mclk
,
3620 max_limits
->vddc
, &ps
->performance_levels
[i
].vddc
);
3621 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
,
3622 adev
->clock
.current_dispclk
,
3623 max_limits
->vddc
, &ps
->performance_levels
[i
].vddc
);
3626 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3627 btc_apply_voltage_delta_rules(adev
,
3628 max_limits
->vddc
, max_limits
->vddci
,
3629 &ps
->performance_levels
[i
].vddc
,
3630 &ps
->performance_levels
[i
].vddci
);
3633 ps
->dc_compatible
= true;
3634 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3635 if (ps
->performance_levels
[i
].vddc
> adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.vddc
)
3636 ps
->dc_compatible
= false;
3641 static int si_read_smc_soft_register(struct amdgpu_device
*adev
,
3642 u16 reg_offset
, u32
*value
)
3644 struct si_power_info
*si_pi
= si_get_pi(adev
);
3646 return amdgpu_si_read_smc_sram_dword(adev
,
3647 si_pi
->soft_regs_start
+ reg_offset
, value
,
3652 static int si_write_smc_soft_register(struct amdgpu_device
*adev
,
3653 u16 reg_offset
, u32 value
)
3655 struct si_power_info
*si_pi
= si_get_pi(adev
);
3657 return amdgpu_si_write_smc_sram_dword(adev
,
3658 si_pi
->soft_regs_start
+ reg_offset
,
3659 value
, si_pi
->sram_end
);
3662 static bool si_is_special_1gb_platform(struct amdgpu_device
*adev
)
3665 u32 tmp
, width
, row
, column
, bank
, density
;
3666 bool is_memory_gddr5
, is_special
;
3668 tmp
= RREG32(MC_SEQ_MISC0
);
3669 is_memory_gddr5
= (MC_SEQ_MISC0_GDDR5_VALUE
== ((tmp
& MC_SEQ_MISC0_GDDR5_MASK
) >> MC_SEQ_MISC0_GDDR5_SHIFT
));
3670 is_special
= (MC_SEQ_MISC0_REV_ID_VALUE
== ((tmp
& MC_SEQ_MISC0_REV_ID_MASK
) >> MC_SEQ_MISC0_REV_ID_SHIFT
))
3671 & (MC_SEQ_MISC0_VEN_ID_VALUE
== ((tmp
& MC_SEQ_MISC0_VEN_ID_MASK
) >> MC_SEQ_MISC0_VEN_ID_SHIFT
));
3673 WREG32(MC_SEQ_IO_DEBUG_INDEX
, 0xb);
3674 width
= ((RREG32(MC_SEQ_IO_DEBUG_DATA
) >> 1) & 1) ? 16 : 32;
3676 tmp
= RREG32(MC_ARB_RAMCFG
);
3677 row
= ((tmp
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
) + 10;
3678 column
= ((tmp
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
) + 8;
3679 bank
= ((tmp
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
) + 2;
3681 density
= (1 << (row
+ column
- 20 + bank
)) * width
;
3683 if ((adev
->pdev
->device
== 0x6819) &&
3684 is_memory_gddr5
&& is_special
&& (density
== 0x400))
3690 static void si_get_leakage_vddc(struct amdgpu_device
*adev
)
3692 struct si_power_info
*si_pi
= si_get_pi(adev
);
3693 u16 vddc
, count
= 0;
3696 for (i
= 0; i
< SISLANDS_MAX_LEAKAGE_COUNT
; i
++) {
3697 ret
= amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev
, &vddc
, SISLANDS_LEAKAGE_INDEX0
+ i
);
3699 if (!ret
&& (vddc
> 0) && (vddc
!= (SISLANDS_LEAKAGE_INDEX0
+ i
))) {
3700 si_pi
->leakage_voltage
.entries
[count
].voltage
= vddc
;
3701 si_pi
->leakage_voltage
.entries
[count
].leakage_index
=
3702 SISLANDS_LEAKAGE_INDEX0
+ i
;
3706 si_pi
->leakage_voltage
.count
= count
;
3709 static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device
*adev
,
3710 u32 index
, u16
*leakage_voltage
)
3712 struct si_power_info
*si_pi
= si_get_pi(adev
);
3715 if (leakage_voltage
== NULL
)
3718 if ((index
& 0xff00) != 0xff00)
3721 if ((index
& 0xff) > SISLANDS_MAX_LEAKAGE_COUNT
+ 1)
3724 if (index
< SISLANDS_LEAKAGE_INDEX0
)
3727 for (i
= 0; i
< si_pi
->leakage_voltage
.count
; i
++) {
3728 if (si_pi
->leakage_voltage
.entries
[i
].leakage_index
== index
) {
3729 *leakage_voltage
= si_pi
->leakage_voltage
.entries
[i
].voltage
;
3736 static void si_set_dpm_event_sources(struct amdgpu_device
*adev
, u32 sources
)
3738 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3739 bool want_thermal_protection
;
3740 enum amdgpu_dpm_event_src dpm_event_src
;
3745 want_thermal_protection
= false;
3747 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
):
3748 want_thermal_protection
= true;
3749 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_DIGITAL
;
3751 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
):
3752 want_thermal_protection
= true;
3753 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_EXTERNAL
;
3755 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
) |
3756 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
)):
3757 want_thermal_protection
= true;
3758 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL
;
3762 if (want_thermal_protection
) {
3763 WREG32_P(CG_THERMAL_CTRL
, DPM_EVENT_SRC(dpm_event_src
), ~DPM_EVENT_SRC_MASK
);
3764 if (pi
->thermal_protection
)
3765 WREG32_P(GENERAL_PWRMGT
, 0, ~THERMAL_PROTECTION_DIS
);
3767 WREG32_P(GENERAL_PWRMGT
, THERMAL_PROTECTION_DIS
, ~THERMAL_PROTECTION_DIS
);
3771 static void si_enable_auto_throttle_source(struct amdgpu_device
*adev
,
3772 enum amdgpu_dpm_auto_throttle_src source
,
3775 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3778 if (!(pi
->active_auto_throttle_sources
& (1 << source
))) {
3779 pi
->active_auto_throttle_sources
|= 1 << source
;
3780 si_set_dpm_event_sources(adev
, pi
->active_auto_throttle_sources
);
3783 if (pi
->active_auto_throttle_sources
& (1 << source
)) {
3784 pi
->active_auto_throttle_sources
&= ~(1 << source
);
3785 si_set_dpm_event_sources(adev
, pi
->active_auto_throttle_sources
);
3790 static void si_start_dpm(struct amdgpu_device
*adev
)
3792 WREG32_P(GENERAL_PWRMGT
, GLOBAL_PWRMGT_EN
, ~GLOBAL_PWRMGT_EN
);
3795 static void si_stop_dpm(struct amdgpu_device
*adev
)
3797 WREG32_P(GENERAL_PWRMGT
, 0, ~GLOBAL_PWRMGT_EN
);
3800 static void si_enable_sclk_control(struct amdgpu_device
*adev
, bool enable
)
3803 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~SCLK_PWRMGT_OFF
);
3805 WREG32_P(SCLK_PWRMGT_CNTL
, SCLK_PWRMGT_OFF
, ~SCLK_PWRMGT_OFF
);
3810 static int si_notify_hardware_of_thermal_state(struct amdgpu_device
*adev
,
3815 if (thermal_level
== 0) {
3816 ret
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableThermalInterrupt
);
3817 if (ret
== PPSMC_Result_OK
)
3825 static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device
*adev
)
3827 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen
, true);
3832 static int si_notify_hw_of_powersource(struct amdgpu_device
*adev
, bool ac_power
)
3835 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_RunningOnAC
) == PPSMC_Result_OK
) ?
3842 static PPSMC_Result
si_send_msg_to_smc_with_parameter(struct amdgpu_device
*adev
,
3843 PPSMC_Msg msg
, u32 parameter
)
3845 WREG32(SMC_SCRATCH0
, parameter
);
3846 return amdgpu_si_send_msg_to_smc(adev
, msg
);
3849 static int si_restrict_performance_levels_before_switch(struct amdgpu_device
*adev
)
3851 if (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_NoForcedLevel
) != PPSMC_Result_OK
)
3854 return (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, 1) == PPSMC_Result_OK
) ?
3858 static int si_dpm_force_performance_level(void *handle
,
3859 enum amd_dpm_forced_level level
)
3861 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3862 struct amdgpu_ps
*rps
= adev
->pm
.dpm
.current_ps
;
3863 struct si_ps
*ps
= si_get_ps(rps
);
3864 u32 levels
= ps
->performance_level_count
;
3866 if (level
== AMD_DPM_FORCED_LEVEL_HIGH
) {
3867 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, levels
) != PPSMC_Result_OK
)
3870 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetForcedLevels
, 1) != PPSMC_Result_OK
)
3872 } else if (level
== AMD_DPM_FORCED_LEVEL_LOW
) {
3873 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetForcedLevels
, 0) != PPSMC_Result_OK
)
3876 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, 1) != PPSMC_Result_OK
)
3878 } else if (level
== AMD_DPM_FORCED_LEVEL_AUTO
) {
3879 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetForcedLevels
, 0) != PPSMC_Result_OK
)
3882 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, levels
) != PPSMC_Result_OK
)
3886 adev
->pm
.dpm
.forced_level
= level
;
3892 static int si_set_boot_state(struct amdgpu_device
*adev
)
3894 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_SwitchToInitialState
) == PPSMC_Result_OK
) ?
3899 static int si_set_sw_state(struct amdgpu_device
*adev
)
3901 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_SwitchToSwState
) == PPSMC_Result_OK
) ?
3905 static int si_halt_smc(struct amdgpu_device
*adev
)
3907 if (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_Halt
) != PPSMC_Result_OK
)
3910 return (amdgpu_si_wait_for_smc_inactive(adev
) == PPSMC_Result_OK
) ?
3914 static int si_resume_smc(struct amdgpu_device
*adev
)
3916 if (amdgpu_si_send_msg_to_smc(adev
, PPSMC_FlushDataCache
) != PPSMC_Result_OK
)
3919 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_Resume
) == PPSMC_Result_OK
) ?
3923 static void si_dpm_start_smc(struct amdgpu_device
*adev
)
3925 amdgpu_si_program_jump_on_start(adev
);
3926 amdgpu_si_start_smc(adev
);
3927 amdgpu_si_smc_clock(adev
, true);
3930 static void si_dpm_stop_smc(struct amdgpu_device
*adev
)
3932 amdgpu_si_reset_smc(adev
);
3933 amdgpu_si_smc_clock(adev
, false);
3936 static int si_process_firmware_header(struct amdgpu_device
*adev
)
3938 struct si_power_info
*si_pi
= si_get_pi(adev
);
3942 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3943 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3944 SISLANDS_SMC_FIRMWARE_HEADER_stateTable
,
3945 &tmp
, si_pi
->sram_end
);
3949 si_pi
->state_table_start
= tmp
;
3951 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3952 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3953 SISLANDS_SMC_FIRMWARE_HEADER_softRegisters
,
3954 &tmp
, si_pi
->sram_end
);
3958 si_pi
->soft_regs_start
= tmp
;
3960 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3961 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3962 SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable
,
3963 &tmp
, si_pi
->sram_end
);
3967 si_pi
->mc_reg_table_start
= tmp
;
3969 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3970 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3971 SISLANDS_SMC_FIRMWARE_HEADER_fanTable
,
3972 &tmp
, si_pi
->sram_end
);
3976 si_pi
->fan_table_start
= tmp
;
3978 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3979 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3980 SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable
,
3981 &tmp
, si_pi
->sram_end
);
3985 si_pi
->arb_table_start
= tmp
;
3987 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3988 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3989 SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable
,
3990 &tmp
, si_pi
->sram_end
);
3994 si_pi
->cac_table_start
= tmp
;
3996 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3997 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3998 SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration
,
3999 &tmp
, si_pi
->sram_end
);
4003 si_pi
->dte_table_start
= tmp
;
4005 ret
= amdgpu_si_read_smc_sram_dword(adev
,
4006 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
4007 SISLANDS_SMC_FIRMWARE_HEADER_spllTable
,
4008 &tmp
, si_pi
->sram_end
);
4012 si_pi
->spll_table_start
= tmp
;
4014 ret
= amdgpu_si_read_smc_sram_dword(adev
,
4015 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
4016 SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters
,
4017 &tmp
, si_pi
->sram_end
);
4021 si_pi
->papm_cfg_table_start
= tmp
;
4026 static void si_read_clock_registers(struct amdgpu_device
*adev
)
4028 struct si_power_info
*si_pi
= si_get_pi(adev
);
4030 si_pi
->clock_registers
.cg_spll_func_cntl
= RREG32(CG_SPLL_FUNC_CNTL
);
4031 si_pi
->clock_registers
.cg_spll_func_cntl_2
= RREG32(CG_SPLL_FUNC_CNTL_2
);
4032 si_pi
->clock_registers
.cg_spll_func_cntl_3
= RREG32(CG_SPLL_FUNC_CNTL_3
);
4033 si_pi
->clock_registers
.cg_spll_func_cntl_4
= RREG32(CG_SPLL_FUNC_CNTL_4
);
4034 si_pi
->clock_registers
.cg_spll_spread_spectrum
= RREG32(CG_SPLL_SPREAD_SPECTRUM
);
4035 si_pi
->clock_registers
.cg_spll_spread_spectrum_2
= RREG32(CG_SPLL_SPREAD_SPECTRUM_2
);
4036 si_pi
->clock_registers
.dll_cntl
= RREG32(DLL_CNTL
);
4037 si_pi
->clock_registers
.mclk_pwrmgt_cntl
= RREG32(MCLK_PWRMGT_CNTL
);
4038 si_pi
->clock_registers
.mpll_ad_func_cntl
= RREG32(MPLL_AD_FUNC_CNTL
);
4039 si_pi
->clock_registers
.mpll_dq_func_cntl
= RREG32(MPLL_DQ_FUNC_CNTL
);
4040 si_pi
->clock_registers
.mpll_func_cntl
= RREG32(MPLL_FUNC_CNTL
);
4041 si_pi
->clock_registers
.mpll_func_cntl_1
= RREG32(MPLL_FUNC_CNTL_1
);
4042 si_pi
->clock_registers
.mpll_func_cntl_2
= RREG32(MPLL_FUNC_CNTL_2
);
4043 si_pi
->clock_registers
.mpll_ss1
= RREG32(MPLL_SS1
);
4044 si_pi
->clock_registers
.mpll_ss2
= RREG32(MPLL_SS2
);
4047 static void si_enable_thermal_protection(struct amdgpu_device
*adev
,
4051 WREG32_P(GENERAL_PWRMGT
, 0, ~THERMAL_PROTECTION_DIS
);
4053 WREG32_P(GENERAL_PWRMGT
, THERMAL_PROTECTION_DIS
, ~THERMAL_PROTECTION_DIS
);
4056 static void si_enable_acpi_power_management(struct amdgpu_device
*adev
)
4058 WREG32_P(GENERAL_PWRMGT
, STATIC_PM_EN
, ~STATIC_PM_EN
);
4062 static int si_enter_ulp_state(struct amdgpu_device
*adev
)
4064 WREG32(SMC_MESSAGE_0
, PPSMC_MSG_SwitchToMinimumPower
);
4071 static int si_exit_ulp_state(struct amdgpu_device
*adev
)
4075 WREG32(SMC_MESSAGE_0
, PPSMC_MSG_ResumeFromMinimumPower
);
4079 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4080 if (RREG32(SMC_RESP_0
) == 1)
4089 static int si_notify_smc_display_change(struct amdgpu_device
*adev
,
4092 PPSMC_Msg msg
= has_display
?
4093 PPSMC_MSG_HasDisplay
: PPSMC_MSG_NoDisplay
;
4095 return (amdgpu_si_send_msg_to_smc(adev
, msg
) == PPSMC_Result_OK
) ?
4099 static void si_program_response_times(struct amdgpu_device
*adev
)
4101 u32 voltage_response_time
, backbias_response_time
, acpi_delay_time
, vbi_time_out
;
4102 u32 vddc_dly
, acpi_dly
, vbi_dly
;
4103 u32 reference_clock
;
4105 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_mvdd_chg_time
, 1);
4107 voltage_response_time
= (u32
)adev
->pm
.dpm
.voltage_response_time
;
4108 backbias_response_time
= (u32
)adev
->pm
.dpm
.backbias_response_time
;
4110 if (voltage_response_time
== 0)
4111 voltage_response_time
= 1000;
4113 acpi_delay_time
= 15000;
4114 vbi_time_out
= 100000;
4116 reference_clock
= amdgpu_asic_get_xclk(adev
);
4118 vddc_dly
= (voltage_response_time
* reference_clock
) / 100;
4119 acpi_dly
= (acpi_delay_time
* reference_clock
) / 100;
4120 vbi_dly
= (vbi_time_out
* reference_clock
) / 100;
4122 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_delay_vreg
, vddc_dly
);
4123 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_delay_acpi
, acpi_dly
);
4124 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_mclk_chg_timeout
, vbi_dly
);
4125 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_mc_block_delay
, 0xAA);
4128 static void si_program_ds_registers(struct amdgpu_device
*adev
)
4130 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4133 /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
4134 if (adev
->asic_type
== CHIP_TAHITI
&& adev
->rev_id
== 0x0)
4139 if (eg_pi
->sclk_deep_sleep
) {
4140 WREG32_P(MISC_CLK_CNTL
, DEEP_SLEEP_CLK_SEL(tmp
), ~DEEP_SLEEP_CLK_SEL_MASK
);
4141 WREG32_P(CG_SPLL_AUTOSCALE_CNTL
, AUTOSCALE_ON_SS_CLEAR
,
4142 ~AUTOSCALE_ON_SS_CLEAR
);
4146 static void si_program_display_gap(struct amdgpu_device
*adev
)
4151 tmp
= RREG32(CG_DISPLAY_GAP_CNTL
) & ~(DISP1_GAP_MASK
| DISP2_GAP_MASK
);
4152 if (adev
->pm
.dpm
.new_active_crtc_count
> 0)
4153 tmp
|= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM
);
4155 tmp
|= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE
);
4157 if (adev
->pm
.dpm
.new_active_crtc_count
> 1)
4158 tmp
|= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM
);
4160 tmp
|= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE
);
4162 WREG32(CG_DISPLAY_GAP_CNTL
, tmp
);
4164 tmp
= RREG32(DCCG_DISP_SLOW_SELECT_REG
);
4165 pipe
= (tmp
& DCCG_DISP1_SLOW_SELECT_MASK
) >> DCCG_DISP1_SLOW_SELECT_SHIFT
;
4167 if ((adev
->pm
.dpm
.new_active_crtc_count
> 0) &&
4168 (!(adev
->pm
.dpm
.new_active_crtcs
& (1 << pipe
)))) {
4169 /* find the first active crtc */
4170 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
4171 if (adev
->pm
.dpm
.new_active_crtcs
& (1 << i
))
4174 if (i
== adev
->mode_info
.num_crtc
)
4179 tmp
&= ~DCCG_DISP1_SLOW_SELECT_MASK
;
4180 tmp
|= DCCG_DISP1_SLOW_SELECT(pipe
);
4181 WREG32(DCCG_DISP_SLOW_SELECT_REG
, tmp
);
4184 /* Setting this to false forces the performance state to low if the crtcs are disabled.
4185 * This can be a problem on PowerXpress systems or if you want to use the card
4186 * for offscreen rendering or compute if there are no crtcs enabled.
4188 si_notify_smc_display_change(adev
, adev
->pm
.dpm
.new_active_crtc_count
> 0);
4191 static void si_enable_spread_spectrum(struct amdgpu_device
*adev
, bool enable
)
4193 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4197 WREG32_P(GENERAL_PWRMGT
, DYN_SPREAD_SPECTRUM_EN
, ~DYN_SPREAD_SPECTRUM_EN
);
4199 WREG32_P(CG_SPLL_SPREAD_SPECTRUM
, 0, ~SSEN
);
4200 WREG32_P(GENERAL_PWRMGT
, 0, ~DYN_SPREAD_SPECTRUM_EN
);
4204 static void si_setup_bsp(struct amdgpu_device
*adev
)
4206 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4207 u32 xclk
= amdgpu_asic_get_xclk(adev
);
4209 r600_calculate_u_and_p(pi
->asi
,
4215 r600_calculate_u_and_p(pi
->pasi
,
4222 pi
->dsp
= BSP(pi
->bsp
) | BSU(pi
->bsu
);
4223 pi
->psp
= BSP(pi
->pbsp
) | BSU(pi
->pbsu
);
4225 WREG32(CG_BSP
, pi
->dsp
);
4228 static void si_program_git(struct amdgpu_device
*adev
)
4230 WREG32_P(CG_GIT
, CG_GICST(R600_GICST_DFLT
), ~CG_GICST_MASK
);
4233 static void si_program_tp(struct amdgpu_device
*adev
)
4236 enum r600_td td
= R600_TD_DFLT
;
4238 for (i
= 0; i
< R600_PM_NUMBER_OF_TC
; i
++)
4239 WREG32(CG_FFCT_0
+ i
, (UTC_0(r600_utc
[i
]) | DTC_0(r600_dtc
[i
])));
4241 if (td
== R600_TD_AUTO
)
4242 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~FIR_FORCE_TREND_SEL
);
4244 WREG32_P(SCLK_PWRMGT_CNTL
, FIR_FORCE_TREND_SEL
, ~FIR_FORCE_TREND_SEL
);
4246 if (td
== R600_TD_UP
)
4247 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~FIR_TREND_MODE
);
4249 if (td
== R600_TD_DOWN
)
4250 WREG32_P(SCLK_PWRMGT_CNTL
, FIR_TREND_MODE
, ~FIR_TREND_MODE
);
4253 static void si_program_tpp(struct amdgpu_device
*adev
)
4255 WREG32(CG_TPC
, R600_TPC_DFLT
);
4258 static void si_program_sstp(struct amdgpu_device
*adev
)
4260 WREG32(CG_SSP
, (SSTU(R600_SSTU_DFLT
) | SST(R600_SST_DFLT
)));
4263 static void si_enable_display_gap(struct amdgpu_device
*adev
)
4265 u32 tmp
= RREG32(CG_DISPLAY_GAP_CNTL
);
4267 tmp
&= ~(DISP1_GAP_MASK
| DISP2_GAP_MASK
);
4268 tmp
|= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE
) |
4269 DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE
));
4271 tmp
&= ~(DISP1_GAP_MCHG_MASK
| DISP2_GAP_MCHG_MASK
);
4272 tmp
|= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK
) |
4273 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE
));
4274 WREG32(CG_DISPLAY_GAP_CNTL
, tmp
);
4277 static void si_program_vc(struct amdgpu_device
*adev
)
4279 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4281 WREG32(CG_FTV
, pi
->vrc
);
4284 static void si_clear_vc(struct amdgpu_device
*adev
)
4289 static u8
si_get_ddr3_mclk_frequency_ratio(u32 memory_clock
)
4293 if (memory_clock
< 10000)
4295 else if (memory_clock
>= 80000)
4296 mc_para_index
= 0x0f;
4298 mc_para_index
= (u8
)((memory_clock
- 10000) / 5000 + 1);
4299 return mc_para_index
;
4302 static u8
si_get_mclk_frequency_ratio(u32 memory_clock
, bool strobe_mode
)
4307 if (memory_clock
< 12500)
4308 mc_para_index
= 0x00;
4309 else if (memory_clock
> 47500)
4310 mc_para_index
= 0x0f;
4312 mc_para_index
= (u8
)((memory_clock
- 10000) / 2500);
4314 if (memory_clock
< 65000)
4315 mc_para_index
= 0x00;
4316 else if (memory_clock
> 135000)
4317 mc_para_index
= 0x0f;
4319 mc_para_index
= (u8
)((memory_clock
- 60000) / 5000);
4321 return mc_para_index
;
4324 static u8
si_get_strobe_mode_settings(struct amdgpu_device
*adev
, u32 mclk
)
4326 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4327 bool strobe_mode
= false;
4330 if (mclk
<= pi
->mclk_strobe_mode_threshold
)
4333 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
4334 result
= si_get_mclk_frequency_ratio(mclk
, strobe_mode
);
4336 result
= si_get_ddr3_mclk_frequency_ratio(mclk
);
4339 result
|= SISLANDS_SMC_STROBE_ENABLE
;
4344 static int si_upload_firmware(struct amdgpu_device
*adev
)
4346 struct si_power_info
*si_pi
= si_get_pi(adev
);
4348 amdgpu_si_reset_smc(adev
);
4349 amdgpu_si_smc_clock(adev
, false);
4351 return amdgpu_si_load_smc_ucode(adev
, si_pi
->sram_end
);
4354 static bool si_validate_phase_shedding_tables(struct amdgpu_device
*adev
,
4355 const struct atom_voltage_table
*table
,
4356 const struct amdgpu_phase_shedding_limits_table
*limits
)
4358 u32 data
, num_bits
, num_levels
;
4360 if ((table
== NULL
) || (limits
== NULL
))
4363 data
= table
->mask_low
;
4365 num_bits
= hweight32(data
);
4370 num_levels
= (1 << num_bits
);
4372 if (table
->count
!= num_levels
)
4375 if (limits
->count
!= (num_levels
- 1))
4381 static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device
*adev
,
4382 u32 max_voltage_steps
,
4383 struct atom_voltage_table
*voltage_table
)
4385 unsigned int i
, diff
;
4387 if (voltage_table
->count
<= max_voltage_steps
)
4390 diff
= voltage_table
->count
- max_voltage_steps
;
4392 for (i
= 0; i
< max_voltage_steps
; i
++)
4393 voltage_table
->entries
[i
] = voltage_table
->entries
[i
+ diff
];
4395 voltage_table
->count
= max_voltage_steps
;
4398 static int si_get_svi2_voltage_table(struct amdgpu_device
*adev
,
4399 struct amdgpu_clock_voltage_dependency_table
*voltage_dependency_table
,
4400 struct atom_voltage_table
*voltage_table
)
4404 if (voltage_dependency_table
== NULL
)
4407 voltage_table
->mask_low
= 0;
4408 voltage_table
->phase_delay
= 0;
4410 voltage_table
->count
= voltage_dependency_table
->count
;
4411 for (i
= 0; i
< voltage_table
->count
; i
++) {
4412 voltage_table
->entries
[i
].value
= voltage_dependency_table
->entries
[i
].v
;
4413 voltage_table
->entries
[i
].smio_low
= 0;
4419 static int si_construct_voltage_tables(struct amdgpu_device
*adev
)
4421 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4422 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4423 struct si_power_info
*si_pi
= si_get_pi(adev
);
4426 if (pi
->voltage_control
) {
4427 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDC
,
4428 VOLTAGE_OBJ_GPIO_LUT
, &eg_pi
->vddc_voltage_table
);
4432 if (eg_pi
->vddc_voltage_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
)
4433 si_trim_voltage_table_to_fit_state_table(adev
,
4434 SISLANDS_MAX_NO_VREG_STEPS
,
4435 &eg_pi
->vddc_voltage_table
);
4436 } else if (si_pi
->voltage_control_svi2
) {
4437 ret
= si_get_svi2_voltage_table(adev
,
4438 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
4439 &eg_pi
->vddc_voltage_table
);
4446 if (eg_pi
->vddci_control
) {
4447 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDCI
,
4448 VOLTAGE_OBJ_GPIO_LUT
, &eg_pi
->vddci_voltage_table
);
4452 if (eg_pi
->vddci_voltage_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
)
4453 si_trim_voltage_table_to_fit_state_table(adev
,
4454 SISLANDS_MAX_NO_VREG_STEPS
,
4455 &eg_pi
->vddci_voltage_table
);
4457 if (si_pi
->vddci_control_svi2
) {
4458 ret
= si_get_svi2_voltage_table(adev
,
4459 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
4460 &eg_pi
->vddci_voltage_table
);
4465 if (pi
->mvdd_control
) {
4466 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_MVDDC
,
4467 VOLTAGE_OBJ_GPIO_LUT
, &si_pi
->mvdd_voltage_table
);
4470 pi
->mvdd_control
= false;
4474 if (si_pi
->mvdd_voltage_table
.count
== 0) {
4475 pi
->mvdd_control
= false;
4479 if (si_pi
->mvdd_voltage_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
)
4480 si_trim_voltage_table_to_fit_state_table(adev
,
4481 SISLANDS_MAX_NO_VREG_STEPS
,
4482 &si_pi
->mvdd_voltage_table
);
4485 if (si_pi
->vddc_phase_shed_control
) {
4486 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDC
,
4487 VOLTAGE_OBJ_PHASE_LUT
, &si_pi
->vddc_phase_shed_table
);
4489 si_pi
->vddc_phase_shed_control
= false;
4491 if ((si_pi
->vddc_phase_shed_table
.count
== 0) ||
4492 (si_pi
->vddc_phase_shed_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
))
4493 si_pi
->vddc_phase_shed_control
= false;
4499 static void si_populate_smc_voltage_table(struct amdgpu_device
*adev
,
4500 const struct atom_voltage_table
*voltage_table
,
4501 SISLANDS_SMC_STATETABLE
*table
)
4505 for (i
= 0; i
< voltage_table
->count
; i
++)
4506 table
->lowSMIO
[i
] |= cpu_to_be32(voltage_table
->entries
[i
].smio_low
);
4509 static int si_populate_smc_voltage_tables(struct amdgpu_device
*adev
,
4510 SISLANDS_SMC_STATETABLE
*table
)
4512 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4513 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4514 struct si_power_info
*si_pi
= si_get_pi(adev
);
4517 if (si_pi
->voltage_control_svi2
) {
4518 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc
,
4519 si_pi
->svc_gpio_id
);
4520 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd
,
4521 si_pi
->svd_gpio_id
);
4522 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_svi_rework_plat_type
,
4525 if (eg_pi
->vddc_voltage_table
.count
) {
4526 si_populate_smc_voltage_table(adev
, &eg_pi
->vddc_voltage_table
, table
);
4527 table
->voltageMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_VDDC
] =
4528 cpu_to_be32(eg_pi
->vddc_voltage_table
.mask_low
);
4530 for (i
= 0; i
< eg_pi
->vddc_voltage_table
.count
; i
++) {
4531 if (pi
->max_vddc_in_table
<= eg_pi
->vddc_voltage_table
.entries
[i
].value
) {
4532 table
->maxVDDCIndexInPPTable
= i
;
4538 if (eg_pi
->vddci_voltage_table
.count
) {
4539 si_populate_smc_voltage_table(adev
, &eg_pi
->vddci_voltage_table
, table
);
4541 table
->voltageMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_VDDCI
] =
4542 cpu_to_be32(eg_pi
->vddci_voltage_table
.mask_low
);
4546 if (si_pi
->mvdd_voltage_table
.count
) {
4547 si_populate_smc_voltage_table(adev
, &si_pi
->mvdd_voltage_table
, table
);
4549 table
->voltageMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_MVDD
] =
4550 cpu_to_be32(si_pi
->mvdd_voltage_table
.mask_low
);
4553 if (si_pi
->vddc_phase_shed_control
) {
4554 if (si_validate_phase_shedding_tables(adev
, &si_pi
->vddc_phase_shed_table
,
4555 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
)) {
4556 si_populate_smc_voltage_table(adev
, &si_pi
->vddc_phase_shed_table
, table
);
4558 table
->phaseMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING
] =
4559 cpu_to_be32(si_pi
->vddc_phase_shed_table
.mask_low
);
4561 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_phase_shedding_delay
,
4562 (u32
)si_pi
->vddc_phase_shed_table
.phase_delay
);
4564 si_pi
->vddc_phase_shed_control
= false;
4572 static int si_populate_voltage_value(struct amdgpu_device
*adev
,
4573 const struct atom_voltage_table
*table
,
4574 u16 value
, SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4578 for (i
= 0; i
< table
->count
; i
++) {
4579 if (value
<= table
->entries
[i
].value
) {
4580 voltage
->index
= (u8
)i
;
4581 voltage
->value
= cpu_to_be16(table
->entries
[i
].value
);
4586 if (i
>= table
->count
)
4592 static int si_populate_mvdd_value(struct amdgpu_device
*adev
, u32 mclk
,
4593 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4595 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4596 struct si_power_info
*si_pi
= si_get_pi(adev
);
4598 if (pi
->mvdd_control
) {
4599 if (mclk
<= pi
->mvdd_split_frequency
)
4602 voltage
->index
= (u8
)(si_pi
->mvdd_voltage_table
.count
) - 1;
4604 voltage
->value
= cpu_to_be16(si_pi
->mvdd_voltage_table
.entries
[voltage
->index
].value
);
4609 static int si_get_std_voltage_value(struct amdgpu_device
*adev
,
4610 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
,
4614 bool voltage_found
= false;
4615 *std_voltage
= be16_to_cpu(voltage
->value
);
4617 if (adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
) {
4618 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE
) {
4619 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
== NULL
)
4622 for (v_index
= 0; (u32
)v_index
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; v_index
++) {
4623 if (be16_to_cpu(voltage
->value
) ==
4624 (u16
)adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[v_index
].v
) {
4625 voltage_found
= true;
4626 if ((u32
)v_index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
4628 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[v_index
].vddc
;
4631 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
-1].vddc
;
4636 if (!voltage_found
) {
4637 for (v_index
= 0; (u32
)v_index
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; v_index
++) {
4638 if (be16_to_cpu(voltage
->value
) <=
4639 (u16
)adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[v_index
].v
) {
4640 voltage_found
= true;
4641 if ((u32
)v_index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
4643 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[v_index
].vddc
;
4646 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
-1].vddc
;
4652 if ((u32
)voltage
->index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
4653 *std_voltage
= adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[voltage
->index
].vddc
;
4660 static int si_populate_std_voltage_value(struct amdgpu_device
*adev
,
4661 u16 value
, u8 index
,
4662 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4664 voltage
->index
= index
;
4665 voltage
->value
= cpu_to_be16(value
);
4670 static int si_populate_phase_shedding_value(struct amdgpu_device
*adev
,
4671 const struct amdgpu_phase_shedding_limits_table
*limits
,
4672 u16 voltage
, u32 sclk
, u32 mclk
,
4673 SISLANDS_SMC_VOLTAGE_VALUE
*smc_voltage
)
4677 for (i
= 0; i
< limits
->count
; i
++) {
4678 if ((voltage
<= limits
->entries
[i
].voltage
) &&
4679 (sclk
<= limits
->entries
[i
].sclk
) &&
4680 (mclk
<= limits
->entries
[i
].mclk
))
4684 smc_voltage
->phase_settings
= (u8
)i
;
4689 static int si_init_arb_table_index(struct amdgpu_device
*adev
)
4691 struct si_power_info
*si_pi
= si_get_pi(adev
);
4695 ret
= amdgpu_si_read_smc_sram_dword(adev
, si_pi
->arb_table_start
,
4696 &tmp
, si_pi
->sram_end
);
4701 tmp
|= MC_CG_ARB_FREQ_F1
<< 24;
4703 return amdgpu_si_write_smc_sram_dword(adev
, si_pi
->arb_table_start
,
4704 tmp
, si_pi
->sram_end
);
4707 static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device
*adev
)
4709 return ni_copy_and_switch_arb_sets(adev
, MC_CG_ARB_FREQ_F0
, MC_CG_ARB_FREQ_F1
);
4712 static int si_reset_to_default(struct amdgpu_device
*adev
)
4714 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_ResetToDefaults
) == PPSMC_Result_OK
) ?
4718 static int si_force_switch_to_arb_f0(struct amdgpu_device
*adev
)
4720 struct si_power_info
*si_pi
= si_get_pi(adev
);
4724 ret
= amdgpu_si_read_smc_sram_dword(adev
, si_pi
->arb_table_start
,
4725 &tmp
, si_pi
->sram_end
);
4729 tmp
= (tmp
>> 24) & 0xff;
4731 if (tmp
== MC_CG_ARB_FREQ_F0
)
4734 return ni_copy_and_switch_arb_sets(adev
, tmp
, MC_CG_ARB_FREQ_F0
);
4737 static u32
si_calculate_memory_refresh_rate(struct amdgpu_device
*adev
,
4741 u32 dram_refresh_rate
;
4742 u32 mc_arb_rfsh_rate
;
4743 u32 tmp
= (RREG32(MC_ARB_RAMCFG
) & NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
4748 dram_rows
= 1 << (tmp
+ 10);
4750 dram_refresh_rate
= 1 << ((RREG32(MC_SEQ_MISC0
) & 0x3) + 3);
4751 mc_arb_rfsh_rate
= ((engine_clock
* 10) * dram_refresh_rate
/ dram_rows
- 32) / 64;
4753 return mc_arb_rfsh_rate
;
4756 static int si_populate_memory_timing_parameters(struct amdgpu_device
*adev
,
4757 struct rv7xx_pl
*pl
,
4758 SMC_SIslands_MCArbDramTimingRegisterSet
*arb_regs
)
4764 arb_regs
->mc_arb_rfsh_rate
=
4765 (u8
)si_calculate_memory_refresh_rate(adev
, pl
->sclk
);
4767 amdgpu_atombios_set_engine_dram_timings(adev
,
4771 dram_timing
= RREG32(MC_ARB_DRAM_TIMING
);
4772 dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2
);
4773 burst_time
= RREG32(MC_ARB_BURST_TIME
) & STATE0_MASK
;
4775 arb_regs
->mc_arb_dram_timing
= cpu_to_be32(dram_timing
);
4776 arb_regs
->mc_arb_dram_timing2
= cpu_to_be32(dram_timing2
);
4777 arb_regs
->mc_arb_burst_time
= (u8
)burst_time
;
4782 static int si_do_program_memory_timing_parameters(struct amdgpu_device
*adev
,
4783 struct amdgpu_ps
*amdgpu_state
,
4784 unsigned int first_arb_set
)
4786 struct si_power_info
*si_pi
= si_get_pi(adev
);
4787 struct si_ps
*state
= si_get_ps(amdgpu_state
);
4788 SMC_SIslands_MCArbDramTimingRegisterSet arb_regs
= { 0 };
4791 for (i
= 0; i
< state
->performance_level_count
; i
++) {
4792 ret
= si_populate_memory_timing_parameters(adev
, &state
->performance_levels
[i
], &arb_regs
);
4795 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
4796 si_pi
->arb_table_start
+
4797 offsetof(SMC_SIslands_MCArbDramTimingRegisters
, data
) +
4798 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
) * (first_arb_set
+ i
),
4800 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
),
4809 static int si_program_memory_timing_parameters(struct amdgpu_device
*adev
,
4810 struct amdgpu_ps
*amdgpu_new_state
)
4812 return si_do_program_memory_timing_parameters(adev
, amdgpu_new_state
,
4813 SISLANDS_DRIVER_STATE_ARB_INDEX
);
4816 static int si_populate_initial_mvdd_value(struct amdgpu_device
*adev
,
4817 struct SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4819 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4820 struct si_power_info
*si_pi
= si_get_pi(adev
);
4822 if (pi
->mvdd_control
)
4823 return si_populate_voltage_value(adev
, &si_pi
->mvdd_voltage_table
,
4824 si_pi
->mvdd_bootup_value
, voltage
);
4829 static int si_populate_smc_initial_state(struct amdgpu_device
*adev
,
4830 struct amdgpu_ps
*amdgpu_initial_state
,
4831 SISLANDS_SMC_STATETABLE
*table
)
4833 struct si_ps
*initial_state
= si_get_ps(amdgpu_initial_state
);
4834 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4835 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4836 struct si_power_info
*si_pi
= si_get_pi(adev
);
4840 table
->initialState
.levels
[0].mclk
.vDLL_CNTL
=
4841 cpu_to_be32(si_pi
->clock_registers
.dll_cntl
);
4842 table
->initialState
.levels
[0].mclk
.vMCLK_PWRMGT_CNTL
=
4843 cpu_to_be32(si_pi
->clock_registers
.mclk_pwrmgt_cntl
);
4844 table
->initialState
.levels
[0].mclk
.vMPLL_AD_FUNC_CNTL
=
4845 cpu_to_be32(si_pi
->clock_registers
.mpll_ad_func_cntl
);
4846 table
->initialState
.levels
[0].mclk
.vMPLL_DQ_FUNC_CNTL
=
4847 cpu_to_be32(si_pi
->clock_registers
.mpll_dq_func_cntl
);
4848 table
->initialState
.levels
[0].mclk
.vMPLL_FUNC_CNTL
=
4849 cpu_to_be32(si_pi
->clock_registers
.mpll_func_cntl
);
4850 table
->initialState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_1
=
4851 cpu_to_be32(si_pi
->clock_registers
.mpll_func_cntl_1
);
4852 table
->initialState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_2
=
4853 cpu_to_be32(si_pi
->clock_registers
.mpll_func_cntl_2
);
4854 table
->initialState
.levels
[0].mclk
.vMPLL_SS
=
4855 cpu_to_be32(si_pi
->clock_registers
.mpll_ss1
);
4856 table
->initialState
.levels
[0].mclk
.vMPLL_SS2
=
4857 cpu_to_be32(si_pi
->clock_registers
.mpll_ss2
);
4859 table
->initialState
.levels
[0].mclk
.mclk_value
=
4860 cpu_to_be32(initial_state
->performance_levels
[0].mclk
);
4862 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL
=
4863 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl
);
4864 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_2
=
4865 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl_2
);
4866 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_3
=
4867 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl_3
);
4868 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_4
=
4869 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl_4
);
4870 table
->initialState
.levels
[0].sclk
.vCG_SPLL_SPREAD_SPECTRUM
=
4871 cpu_to_be32(si_pi
->clock_registers
.cg_spll_spread_spectrum
);
4872 table
->initialState
.levels
[0].sclk
.vCG_SPLL_SPREAD_SPECTRUM_2
=
4873 cpu_to_be32(si_pi
->clock_registers
.cg_spll_spread_spectrum_2
);
4875 table
->initialState
.levels
[0].sclk
.sclk_value
=
4876 cpu_to_be32(initial_state
->performance_levels
[0].sclk
);
4878 table
->initialState
.levels
[0].arbRefreshState
=
4879 SISLANDS_INITIAL_STATE_ARB_INDEX
;
4881 table
->initialState
.levels
[0].ACIndex
= 0;
4883 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
4884 initial_state
->performance_levels
[0].vddc
,
4885 &table
->initialState
.levels
[0].vddc
);
4890 ret
= si_get_std_voltage_value(adev
,
4891 &table
->initialState
.levels
[0].vddc
,
4894 si_populate_std_voltage_value(adev
, std_vddc
,
4895 table
->initialState
.levels
[0].vddc
.index
,
4896 &table
->initialState
.levels
[0].std_vddc
);
4899 if (eg_pi
->vddci_control
)
4900 si_populate_voltage_value(adev
,
4901 &eg_pi
->vddci_voltage_table
,
4902 initial_state
->performance_levels
[0].vddci
,
4903 &table
->initialState
.levels
[0].vddci
);
4905 if (si_pi
->vddc_phase_shed_control
)
4906 si_populate_phase_shedding_value(adev
,
4907 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
4908 initial_state
->performance_levels
[0].vddc
,
4909 initial_state
->performance_levels
[0].sclk
,
4910 initial_state
->performance_levels
[0].mclk
,
4911 &table
->initialState
.levels
[0].vddc
);
4913 si_populate_initial_mvdd_value(adev
, &table
->initialState
.levels
[0].mvdd
);
4915 reg
= CG_R(0xffff) | CG_L(0);
4916 table
->initialState
.levels
[0].aT
= cpu_to_be32(reg
);
4917 table
->initialState
.levels
[0].bSP
= cpu_to_be32(pi
->dsp
);
4918 table
->initialState
.levels
[0].gen2PCIE
= (u8
)si_pi
->boot_pcie_gen
;
4920 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
4921 table
->initialState
.levels
[0].strobeMode
=
4922 si_get_strobe_mode_settings(adev
,
4923 initial_state
->performance_levels
[0].mclk
);
4925 if (initial_state
->performance_levels
[0].mclk
> pi
->mclk_edc_enable_threshold
)
4926 table
->initialState
.levels
[0].mcFlags
= SISLANDS_SMC_MC_EDC_RD_FLAG
| SISLANDS_SMC_MC_EDC_WR_FLAG
;
4928 table
->initialState
.levels
[0].mcFlags
= 0;
4931 table
->initialState
.levelCount
= 1;
4933 table
->initialState
.flags
|= PPSMC_SWSTATE_FLAG_DC
;
4935 table
->initialState
.levels
[0].dpm2
.MaxPS
= 0;
4936 table
->initialState
.levels
[0].dpm2
.NearTDPDec
= 0;
4937 table
->initialState
.levels
[0].dpm2
.AboveSafeInc
= 0;
4938 table
->initialState
.levels
[0].dpm2
.BelowSafeInc
= 0;
4939 table
->initialState
.levels
[0].dpm2
.PwrEfficiencyRatio
= 0;
4941 reg
= MIN_POWER_MASK
| MAX_POWER_MASK
;
4942 table
->initialState
.levels
[0].SQPowerThrottle
= cpu_to_be32(reg
);
4944 reg
= MAX_POWER_DELTA_MASK
| STI_SIZE_MASK
| LTI_RATIO_MASK
;
4945 table
->initialState
.levels
[0].SQPowerThrottle_2
= cpu_to_be32(reg
);
4950 static int si_populate_smc_acpi_state(struct amdgpu_device
*adev
,
4951 SISLANDS_SMC_STATETABLE
*table
)
4953 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4954 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4955 struct si_power_info
*si_pi
= si_get_pi(adev
);
4956 u32 spll_func_cntl
= si_pi
->clock_registers
.cg_spll_func_cntl
;
4957 u32 spll_func_cntl_2
= si_pi
->clock_registers
.cg_spll_func_cntl_2
;
4958 u32 spll_func_cntl_3
= si_pi
->clock_registers
.cg_spll_func_cntl_3
;
4959 u32 spll_func_cntl_4
= si_pi
->clock_registers
.cg_spll_func_cntl_4
;
4960 u32 dll_cntl
= si_pi
->clock_registers
.dll_cntl
;
4961 u32 mclk_pwrmgt_cntl
= si_pi
->clock_registers
.mclk_pwrmgt_cntl
;
4962 u32 mpll_ad_func_cntl
= si_pi
->clock_registers
.mpll_ad_func_cntl
;
4963 u32 mpll_dq_func_cntl
= si_pi
->clock_registers
.mpll_dq_func_cntl
;
4964 u32 mpll_func_cntl
= si_pi
->clock_registers
.mpll_func_cntl
;
4965 u32 mpll_func_cntl_1
= si_pi
->clock_registers
.mpll_func_cntl_1
;
4966 u32 mpll_func_cntl_2
= si_pi
->clock_registers
.mpll_func_cntl_2
;
4970 table
->ACPIState
= table
->initialState
;
4972 table
->ACPIState
.flags
&= ~PPSMC_SWSTATE_FLAG_DC
;
4974 if (pi
->acpi_vddc
) {
4975 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
4976 pi
->acpi_vddc
, &table
->ACPIState
.levels
[0].vddc
);
4980 ret
= si_get_std_voltage_value(adev
,
4981 &table
->ACPIState
.levels
[0].vddc
, &std_vddc
);
4983 si_populate_std_voltage_value(adev
, std_vddc
,
4984 table
->ACPIState
.levels
[0].vddc
.index
,
4985 &table
->ACPIState
.levels
[0].std_vddc
);
4987 table
->ACPIState
.levels
[0].gen2PCIE
= si_pi
->acpi_pcie_gen
;
4989 if (si_pi
->vddc_phase_shed_control
) {
4990 si_populate_phase_shedding_value(adev
,
4991 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
4995 &table
->ACPIState
.levels
[0].vddc
);
4998 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
4999 pi
->min_vddc_in_table
, &table
->ACPIState
.levels
[0].vddc
);
5003 ret
= si_get_std_voltage_value(adev
,
5004 &table
->ACPIState
.levels
[0].vddc
, &std_vddc
);
5007 si_populate_std_voltage_value(adev
, std_vddc
,
5008 table
->ACPIState
.levels
[0].vddc
.index
,
5009 &table
->ACPIState
.levels
[0].std_vddc
);
5011 table
->ACPIState
.levels
[0].gen2PCIE
=
5012 (u8
)amdgpu_get_pcie_gen_support(adev
,
5013 si_pi
->sys_pcie_mask
,
5014 si_pi
->boot_pcie_gen
,
5017 if (si_pi
->vddc_phase_shed_control
)
5018 si_populate_phase_shedding_value(adev
,
5019 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
5020 pi
->min_vddc_in_table
,
5023 &table
->ACPIState
.levels
[0].vddc
);
5026 if (pi
->acpi_vddc
) {
5027 if (eg_pi
->acpi_vddci
)
5028 si_populate_voltage_value(adev
, &eg_pi
->vddci_voltage_table
,
5030 &table
->ACPIState
.levels
[0].vddci
);
5033 mclk_pwrmgt_cntl
|= MRDCK0_RESET
| MRDCK1_RESET
;
5034 mclk_pwrmgt_cntl
&= ~(MRDCK0_PDNB
| MRDCK1_PDNB
);
5036 dll_cntl
&= ~(MRDCK0_BYPASS
| MRDCK1_BYPASS
);
5038 spll_func_cntl_2
&= ~SCLK_MUX_SEL_MASK
;
5039 spll_func_cntl_2
|= SCLK_MUX_SEL(4);
5041 table
->ACPIState
.levels
[0].mclk
.vDLL_CNTL
=
5042 cpu_to_be32(dll_cntl
);
5043 table
->ACPIState
.levels
[0].mclk
.vMCLK_PWRMGT_CNTL
=
5044 cpu_to_be32(mclk_pwrmgt_cntl
);
5045 table
->ACPIState
.levels
[0].mclk
.vMPLL_AD_FUNC_CNTL
=
5046 cpu_to_be32(mpll_ad_func_cntl
);
5047 table
->ACPIState
.levels
[0].mclk
.vMPLL_DQ_FUNC_CNTL
=
5048 cpu_to_be32(mpll_dq_func_cntl
);
5049 table
->ACPIState
.levels
[0].mclk
.vMPLL_FUNC_CNTL
=
5050 cpu_to_be32(mpll_func_cntl
);
5051 table
->ACPIState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_1
=
5052 cpu_to_be32(mpll_func_cntl_1
);
5053 table
->ACPIState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_2
=
5054 cpu_to_be32(mpll_func_cntl_2
);
5055 table
->ACPIState
.levels
[0].mclk
.vMPLL_SS
=
5056 cpu_to_be32(si_pi
->clock_registers
.mpll_ss1
);
5057 table
->ACPIState
.levels
[0].mclk
.vMPLL_SS2
=
5058 cpu_to_be32(si_pi
->clock_registers
.mpll_ss2
);
5060 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL
=
5061 cpu_to_be32(spll_func_cntl
);
5062 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_2
=
5063 cpu_to_be32(spll_func_cntl_2
);
5064 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_3
=
5065 cpu_to_be32(spll_func_cntl_3
);
5066 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_4
=
5067 cpu_to_be32(spll_func_cntl_4
);
5069 table
->ACPIState
.levels
[0].mclk
.mclk_value
= 0;
5070 table
->ACPIState
.levels
[0].sclk
.sclk_value
= 0;
5072 si_populate_mvdd_value(adev
, 0, &table
->ACPIState
.levels
[0].mvdd
);
5074 if (eg_pi
->dynamic_ac_timing
)
5075 table
->ACPIState
.levels
[0].ACIndex
= 0;
5077 table
->ACPIState
.levels
[0].dpm2
.MaxPS
= 0;
5078 table
->ACPIState
.levels
[0].dpm2
.NearTDPDec
= 0;
5079 table
->ACPIState
.levels
[0].dpm2
.AboveSafeInc
= 0;
5080 table
->ACPIState
.levels
[0].dpm2
.BelowSafeInc
= 0;
5081 table
->ACPIState
.levels
[0].dpm2
.PwrEfficiencyRatio
= 0;
5083 reg
= MIN_POWER_MASK
| MAX_POWER_MASK
;
5084 table
->ACPIState
.levels
[0].SQPowerThrottle
= cpu_to_be32(reg
);
5086 reg
= MAX_POWER_DELTA_MASK
| STI_SIZE_MASK
| LTI_RATIO_MASK
;
5087 table
->ACPIState
.levels
[0].SQPowerThrottle_2
= cpu_to_be32(reg
);
5092 static int si_populate_ulv_state(struct amdgpu_device
*adev
,
5093 SISLANDS_SMC_SWSTATE
*state
)
5095 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
5096 struct si_power_info
*si_pi
= si_get_pi(adev
);
5097 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5098 u32 sclk_in_sr
= 1350; /* ??? */
5101 ret
= si_convert_power_level_to_smc(adev
, &ulv
->pl
,
5104 if (eg_pi
->sclk_deep_sleep
) {
5105 if (sclk_in_sr
<= SCLK_MIN_DEEPSLEEP_FREQ
)
5106 state
->levels
[0].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS
;
5108 state
->levels
[0].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE
;
5110 if (ulv
->one_pcie_lane_in_ulv
)
5111 state
->flags
|= PPSMC_SWSTATE_FLAG_PCIE_X1
;
5112 state
->levels
[0].arbRefreshState
= (u8
)(SISLANDS_ULV_STATE_ARB_INDEX
);
5113 state
->levels
[0].ACIndex
= 1;
5114 state
->levels
[0].std_vddc
= state
->levels
[0].vddc
;
5115 state
->levelCount
= 1;
5117 state
->flags
|= PPSMC_SWSTATE_FLAG_DC
;
5123 static int si_program_ulv_memory_timing_parameters(struct amdgpu_device
*adev
)
5125 struct si_power_info
*si_pi
= si_get_pi(adev
);
5126 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5127 SMC_SIslands_MCArbDramTimingRegisterSet arb_regs
= { 0 };
5130 ret
= si_populate_memory_timing_parameters(adev
, &ulv
->pl
,
5135 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay
,
5136 ulv
->volt_change_delay
);
5138 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
5139 si_pi
->arb_table_start
+
5140 offsetof(SMC_SIslands_MCArbDramTimingRegisters
, data
) +
5141 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
) * SISLANDS_ULV_STATE_ARB_INDEX
,
5143 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
),
5149 static void si_get_mvdd_configuration(struct amdgpu_device
*adev
)
5151 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5153 pi
->mvdd_split_frequency
= 30000;
5156 static int si_init_smc_table(struct amdgpu_device
*adev
)
5158 struct si_power_info
*si_pi
= si_get_pi(adev
);
5159 struct amdgpu_ps
*amdgpu_boot_state
= adev
->pm
.dpm
.boot_ps
;
5160 const struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5161 SISLANDS_SMC_STATETABLE
*table
= &si_pi
->smc_statetable
;
5166 si_populate_smc_voltage_tables(adev
, table
);
5168 switch (adev
->pm
.int_thermal_type
) {
5169 case THERMAL_TYPE_SI
:
5170 case THERMAL_TYPE_EMC2103_WITH_INTERNAL
:
5171 table
->thermalProtectType
= PPSMC_THERMAL_PROTECT_TYPE_INTERNAL
;
5173 case THERMAL_TYPE_NONE
:
5174 table
->thermalProtectType
= PPSMC_THERMAL_PROTECT_TYPE_NONE
;
5177 table
->thermalProtectType
= PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL
;
5181 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_HARDWAREDC
)
5182 table
->systemFlags
|= PPSMC_SYSTEMFLAG_GPIO_DC
;
5184 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_REGULATOR_HOT
) {
5185 if ((adev
->pdev
->device
!= 0x6818) && (adev
->pdev
->device
!= 0x6819))
5186 table
->systemFlags
|= PPSMC_SYSTEMFLAG_REGULATOR_HOT
;
5189 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_STEPVDDC
)
5190 table
->systemFlags
|= PPSMC_SYSTEMFLAG_STEPVDDC
;
5192 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
5193 table
->systemFlags
|= PPSMC_SYSTEMFLAG_GDDR5
;
5195 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY
)
5196 table
->extraFlags
|= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH
;
5198 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE
) {
5199 table
->systemFlags
|= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO
;
5200 vr_hot_gpio
= adev
->pm
.dpm
.backbias_response_time
;
5201 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_vr_hot_gpio
,
5205 ret
= si_populate_smc_initial_state(adev
, amdgpu_boot_state
, table
);
5209 ret
= si_populate_smc_acpi_state(adev
, table
);
5213 table
->driverState
= table
->initialState
;
5215 ret
= si_do_program_memory_timing_parameters(adev
, amdgpu_boot_state
,
5216 SISLANDS_INITIAL_STATE_ARB_INDEX
);
5220 if (ulv
->supported
&& ulv
->pl
.vddc
) {
5221 ret
= si_populate_ulv_state(adev
, &table
->ULVState
);
5225 ret
= si_program_ulv_memory_timing_parameters(adev
);
5229 WREG32(CG_ULV_CONTROL
, ulv
->cg_ulv_control
);
5230 WREG32(CG_ULV_PARAMETER
, ulv
->cg_ulv_parameter
);
5232 lane_width
= amdgpu_get_pcie_lanes(adev
);
5233 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width
, lane_width
);
5235 table
->ULVState
= table
->initialState
;
5238 return amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->state_table_start
,
5239 (u8
*)table
, sizeof(SISLANDS_SMC_STATETABLE
),
5243 static int si_calculate_sclk_params(struct amdgpu_device
*adev
,
5245 SISLANDS_SMC_SCLK_VALUE
*sclk
)
5247 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5248 struct si_power_info
*si_pi
= si_get_pi(adev
);
5249 struct atom_clock_dividers dividers
;
5250 u32 spll_func_cntl
= si_pi
->clock_registers
.cg_spll_func_cntl
;
5251 u32 spll_func_cntl_2
= si_pi
->clock_registers
.cg_spll_func_cntl_2
;
5252 u32 spll_func_cntl_3
= si_pi
->clock_registers
.cg_spll_func_cntl_3
;
5253 u32 spll_func_cntl_4
= si_pi
->clock_registers
.cg_spll_func_cntl_4
;
5254 u32 cg_spll_spread_spectrum
= si_pi
->clock_registers
.cg_spll_spread_spectrum
;
5255 u32 cg_spll_spread_spectrum_2
= si_pi
->clock_registers
.cg_spll_spread_spectrum_2
;
5257 u32 reference_clock
= adev
->clock
.spll
.reference_freq
;
5258 u32 reference_divider
;
5262 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
5263 engine_clock
, false, ÷rs
);
5267 reference_divider
= 1 + dividers
.ref_div
;
5269 tmp
= (u64
) engine_clock
* reference_divider
* dividers
.post_div
* 16384;
5270 do_div(tmp
, reference_clock
);
5273 spll_func_cntl
&= ~(SPLL_PDIV_A_MASK
| SPLL_REF_DIV_MASK
);
5274 spll_func_cntl
|= SPLL_REF_DIV(dividers
.ref_div
);
5275 spll_func_cntl
|= SPLL_PDIV_A(dividers
.post_div
);
5277 spll_func_cntl_2
&= ~SCLK_MUX_SEL_MASK
;
5278 spll_func_cntl_2
|= SCLK_MUX_SEL(2);
5280 spll_func_cntl_3
&= ~SPLL_FB_DIV_MASK
;
5281 spll_func_cntl_3
|= SPLL_FB_DIV(fbdiv
);
5282 spll_func_cntl_3
|= SPLL_DITHEN
;
5285 struct amdgpu_atom_ss ss
;
5286 u32 vco_freq
= engine_clock
* dividers
.post_div
;
5288 if (amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
5289 ASIC_INTERNAL_ENGINE_SS
, vco_freq
)) {
5290 u32 clk_s
= reference_clock
* 5 / (reference_divider
* ss
.rate
);
5291 u32 clk_v
= 4 * ss
.percentage
* fbdiv
/ (clk_s
* 10000);
5293 cg_spll_spread_spectrum
&= ~CLK_S_MASK
;
5294 cg_spll_spread_spectrum
|= CLK_S(clk_s
);
5295 cg_spll_spread_spectrum
|= SSEN
;
5297 cg_spll_spread_spectrum_2
&= ~CLK_V_MASK
;
5298 cg_spll_spread_spectrum_2
|= CLK_V(clk_v
);
5302 sclk
->sclk_value
= engine_clock
;
5303 sclk
->vCG_SPLL_FUNC_CNTL
= spll_func_cntl
;
5304 sclk
->vCG_SPLL_FUNC_CNTL_2
= spll_func_cntl_2
;
5305 sclk
->vCG_SPLL_FUNC_CNTL_3
= spll_func_cntl_3
;
5306 sclk
->vCG_SPLL_FUNC_CNTL_4
= spll_func_cntl_4
;
5307 sclk
->vCG_SPLL_SPREAD_SPECTRUM
= cg_spll_spread_spectrum
;
5308 sclk
->vCG_SPLL_SPREAD_SPECTRUM_2
= cg_spll_spread_spectrum_2
;
5313 static int si_populate_sclk_value(struct amdgpu_device
*adev
,
5315 SISLANDS_SMC_SCLK_VALUE
*sclk
)
5317 SISLANDS_SMC_SCLK_VALUE sclk_tmp
;
5320 ret
= si_calculate_sclk_params(adev
, engine_clock
, &sclk_tmp
);
5322 sclk
->sclk_value
= cpu_to_be32(sclk_tmp
.sclk_value
);
5323 sclk
->vCG_SPLL_FUNC_CNTL
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL
);
5324 sclk
->vCG_SPLL_FUNC_CNTL_2
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL_2
);
5325 sclk
->vCG_SPLL_FUNC_CNTL_3
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL_3
);
5326 sclk
->vCG_SPLL_FUNC_CNTL_4
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL_4
);
5327 sclk
->vCG_SPLL_SPREAD_SPECTRUM
= cpu_to_be32(sclk_tmp
.vCG_SPLL_SPREAD_SPECTRUM
);
5328 sclk
->vCG_SPLL_SPREAD_SPECTRUM_2
= cpu_to_be32(sclk_tmp
.vCG_SPLL_SPREAD_SPECTRUM_2
);
5334 static int si_populate_mclk_value(struct amdgpu_device
*adev
,
5337 SISLANDS_SMC_MCLK_VALUE
*mclk
,
5341 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5342 struct si_power_info
*si_pi
= si_get_pi(adev
);
5343 u32 dll_cntl
= si_pi
->clock_registers
.dll_cntl
;
5344 u32 mclk_pwrmgt_cntl
= si_pi
->clock_registers
.mclk_pwrmgt_cntl
;
5345 u32 mpll_ad_func_cntl
= si_pi
->clock_registers
.mpll_ad_func_cntl
;
5346 u32 mpll_dq_func_cntl
= si_pi
->clock_registers
.mpll_dq_func_cntl
;
5347 u32 mpll_func_cntl
= si_pi
->clock_registers
.mpll_func_cntl
;
5348 u32 mpll_func_cntl_1
= si_pi
->clock_registers
.mpll_func_cntl_1
;
5349 u32 mpll_func_cntl_2
= si_pi
->clock_registers
.mpll_func_cntl_2
;
5350 u32 mpll_ss1
= si_pi
->clock_registers
.mpll_ss1
;
5351 u32 mpll_ss2
= si_pi
->clock_registers
.mpll_ss2
;
5352 struct atom_mpll_param mpll_param
;
5355 ret
= amdgpu_atombios_get_memory_pll_dividers(adev
, memory_clock
, strobe_mode
, &mpll_param
);
5359 mpll_func_cntl
&= ~BWCTRL_MASK
;
5360 mpll_func_cntl
|= BWCTRL(mpll_param
.bwcntl
);
5362 mpll_func_cntl_1
&= ~(CLKF_MASK
| CLKFRAC_MASK
| VCO_MODE_MASK
);
5363 mpll_func_cntl_1
|= CLKF(mpll_param
.clkf
) |
5364 CLKFRAC(mpll_param
.clkfrac
) | VCO_MODE(mpll_param
.vco_mode
);
5366 mpll_ad_func_cntl
&= ~YCLK_POST_DIV_MASK
;
5367 mpll_ad_func_cntl
|= YCLK_POST_DIV(mpll_param
.post_div
);
5369 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
5370 mpll_dq_func_cntl
&= ~(YCLK_SEL_MASK
| YCLK_POST_DIV_MASK
);
5371 mpll_dq_func_cntl
|= YCLK_SEL(mpll_param
.yclk_sel
) |
5372 YCLK_POST_DIV(mpll_param
.post_div
);
5376 struct amdgpu_atom_ss ss
;
5379 u32 reference_clock
= adev
->clock
.mpll
.reference_freq
;
5381 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
5382 freq_nom
= memory_clock
* 4;
5384 freq_nom
= memory_clock
* 2;
5386 tmp
= freq_nom
/ reference_clock
;
5388 if (amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
5389 ASIC_INTERNAL_MEMORY_SS
, freq_nom
)) {
5390 u32 clks
= reference_clock
* 5 / ss
.rate
;
5391 u32 clkv
= (u32
)((((131 * ss
.percentage
* ss
.rate
) / 100) * tmp
) / freq_nom
);
5393 mpll_ss1
&= ~CLKV_MASK
;
5394 mpll_ss1
|= CLKV(clkv
);
5396 mpll_ss2
&= ~CLKS_MASK
;
5397 mpll_ss2
|= CLKS(clks
);
5401 mclk_pwrmgt_cntl
&= ~DLL_SPEED_MASK
;
5402 mclk_pwrmgt_cntl
|= DLL_SPEED(mpll_param
.dll_speed
);
5405 mclk_pwrmgt_cntl
|= MRDCK0_PDNB
| MRDCK1_PDNB
;
5407 mclk_pwrmgt_cntl
&= ~(MRDCK0_PDNB
| MRDCK1_PDNB
);
5409 mclk
->mclk_value
= cpu_to_be32(memory_clock
);
5410 mclk
->vMPLL_FUNC_CNTL
= cpu_to_be32(mpll_func_cntl
);
5411 mclk
->vMPLL_FUNC_CNTL_1
= cpu_to_be32(mpll_func_cntl_1
);
5412 mclk
->vMPLL_FUNC_CNTL_2
= cpu_to_be32(mpll_func_cntl_2
);
5413 mclk
->vMPLL_AD_FUNC_CNTL
= cpu_to_be32(mpll_ad_func_cntl
);
5414 mclk
->vMPLL_DQ_FUNC_CNTL
= cpu_to_be32(mpll_dq_func_cntl
);
5415 mclk
->vMCLK_PWRMGT_CNTL
= cpu_to_be32(mclk_pwrmgt_cntl
);
5416 mclk
->vDLL_CNTL
= cpu_to_be32(dll_cntl
);
5417 mclk
->vMPLL_SS
= cpu_to_be32(mpll_ss1
);
5418 mclk
->vMPLL_SS2
= cpu_to_be32(mpll_ss2
);
5423 static void si_populate_smc_sp(struct amdgpu_device
*adev
,
5424 struct amdgpu_ps
*amdgpu_state
,
5425 SISLANDS_SMC_SWSTATE
*smc_state
)
5427 struct si_ps
*ps
= si_get_ps(amdgpu_state
);
5428 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5431 for (i
= 0; i
< ps
->performance_level_count
- 1; i
++)
5432 smc_state
->levels
[i
].bSP
= cpu_to_be32(pi
->dsp
);
5434 smc_state
->levels
[ps
->performance_level_count
- 1].bSP
=
5435 cpu_to_be32(pi
->psp
);
5438 static int si_convert_power_level_to_smc(struct amdgpu_device
*adev
,
5439 struct rv7xx_pl
*pl
,
5440 SISLANDS_SMC_HW_PERFORMANCE_LEVEL
*level
)
5442 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5443 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
5444 struct si_power_info
*si_pi
= si_get_pi(adev
);
5448 bool gmc_pg
= false;
5450 if (eg_pi
->pcie_performance_request
&&
5451 (si_pi
->force_pcie_gen
!= AMDGPU_PCIE_GEN_INVALID
))
5452 level
->gen2PCIE
= (u8
)si_pi
->force_pcie_gen
;
5454 level
->gen2PCIE
= (u8
)pl
->pcie_gen
;
5456 ret
= si_populate_sclk_value(adev
, pl
->sclk
, &level
->sclk
);
5462 if (pi
->mclk_stutter_mode_threshold
&&
5463 (pl
->mclk
<= pi
->mclk_stutter_mode_threshold
) &&
5464 !eg_pi
->uvd_enabled
&&
5465 (RREG32(DPG_PIPE_STUTTER_CONTROL
) & STUTTER_ENABLE
) &&
5466 (adev
->pm
.dpm
.new_active_crtc_count
<= 2)) {
5467 level
->mcFlags
|= SISLANDS_SMC_MC_STUTTER_EN
;
5470 level
->mcFlags
|= SISLANDS_SMC_MC_PG_EN
;
5473 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
5474 if (pl
->mclk
> pi
->mclk_edc_enable_threshold
)
5475 level
->mcFlags
|= SISLANDS_SMC_MC_EDC_RD_FLAG
;
5477 if (pl
->mclk
> eg_pi
->mclk_edc_wr_enable_threshold
)
5478 level
->mcFlags
|= SISLANDS_SMC_MC_EDC_WR_FLAG
;
5480 level
->strobeMode
= si_get_strobe_mode_settings(adev
, pl
->mclk
);
5482 if (level
->strobeMode
& SISLANDS_SMC_STROBE_ENABLE
) {
5483 if (si_get_mclk_frequency_ratio(pl
->mclk
, true) >=
5484 ((RREG32(MC_SEQ_MISC7
) >> 16) & 0xf))
5485 dll_state_on
= ((RREG32(MC_SEQ_MISC5
) >> 1) & 0x1) ? true : false;
5487 dll_state_on
= ((RREG32(MC_SEQ_MISC6
) >> 1) & 0x1) ? true : false;
5489 dll_state_on
= false;
5492 level
->strobeMode
= si_get_strobe_mode_settings(adev
,
5495 dll_state_on
= ((RREG32(MC_SEQ_MISC5
) >> 1) & 0x1) ? true : false;
5498 ret
= si_populate_mclk_value(adev
,
5502 (level
->strobeMode
& SISLANDS_SMC_STROBE_ENABLE
) != 0, dll_state_on
);
5506 ret
= si_populate_voltage_value(adev
,
5507 &eg_pi
->vddc_voltage_table
,
5508 pl
->vddc
, &level
->vddc
);
5513 ret
= si_get_std_voltage_value(adev
, &level
->vddc
, &std_vddc
);
5517 ret
= si_populate_std_voltage_value(adev
, std_vddc
,
5518 level
->vddc
.index
, &level
->std_vddc
);
5522 if (eg_pi
->vddci_control
) {
5523 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddci_voltage_table
,
5524 pl
->vddci
, &level
->vddci
);
5529 if (si_pi
->vddc_phase_shed_control
) {
5530 ret
= si_populate_phase_shedding_value(adev
,
5531 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
5540 level
->MaxPoweredUpCU
= si_pi
->max_cu
;
5542 ret
= si_populate_mvdd_value(adev
, pl
->mclk
, &level
->mvdd
);
5547 static int si_populate_smc_t(struct amdgpu_device
*adev
,
5548 struct amdgpu_ps
*amdgpu_state
,
5549 SISLANDS_SMC_SWSTATE
*smc_state
)
5551 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5552 struct si_ps
*state
= si_get_ps(amdgpu_state
);
5558 if (state
->performance_level_count
>= 9)
5561 if (state
->performance_level_count
< 2) {
5562 a_t
= CG_R(0xffff) | CG_L(0);
5563 smc_state
->levels
[0].aT
= cpu_to_be32(a_t
);
5567 smc_state
->levels
[0].aT
= cpu_to_be32(0);
5569 for (i
= 0; i
<= state
->performance_level_count
- 2; i
++) {
5570 ret
= r600_calculate_at(
5571 (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS
) * 100 * (i
+ 1),
5573 state
->performance_levels
[i
+ 1].sclk
,
5574 state
->performance_levels
[i
].sclk
,
5579 t_h
= (i
+ 1) * 1000 - 50 * R600_AH_DFLT
;
5580 t_l
= (i
+ 1) * 1000 + 50 * R600_AH_DFLT
;
5583 a_t
= be32_to_cpu(smc_state
->levels
[i
].aT
) & ~CG_R_MASK
;
5584 a_t
|= CG_R(t_l
* pi
->bsp
/ 20000);
5585 smc_state
->levels
[i
].aT
= cpu_to_be32(a_t
);
5587 high_bsp
= (i
== state
->performance_level_count
- 2) ?
5589 a_t
= CG_R(0xffff) | CG_L(t_h
* high_bsp
/ 20000);
5590 smc_state
->levels
[i
+ 1].aT
= cpu_to_be32(a_t
);
5596 static int si_disable_ulv(struct amdgpu_device
*adev
)
5598 struct si_power_info
*si_pi
= si_get_pi(adev
);
5599 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5602 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_DisableULV
) == PPSMC_Result_OK
) ?
5608 static bool si_is_state_ulv_compatible(struct amdgpu_device
*adev
,
5609 struct amdgpu_ps
*amdgpu_state
)
5611 const struct si_power_info
*si_pi
= si_get_pi(adev
);
5612 const struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5613 const struct si_ps
*state
= si_get_ps(amdgpu_state
);
5616 if (state
->performance_levels
[0].mclk
!= ulv
->pl
.mclk
)
5619 /* XXX validate against display requirements! */
5621 for (i
= 0; i
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.count
; i
++) {
5622 if (adev
->clock
.current_dispclk
<=
5623 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[i
].clk
) {
5625 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[i
].v
)
5630 if ((amdgpu_state
->vclk
!= 0) || (amdgpu_state
->dclk
!= 0))
5636 static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device
*adev
,
5637 struct amdgpu_ps
*amdgpu_new_state
)
5639 const struct si_power_info
*si_pi
= si_get_pi(adev
);
5640 const struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5642 if (ulv
->supported
) {
5643 if (si_is_state_ulv_compatible(adev
, amdgpu_new_state
))
5644 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableULV
) == PPSMC_Result_OK
) ?
5650 static int si_convert_power_state_to_smc(struct amdgpu_device
*adev
,
5651 struct amdgpu_ps
*amdgpu_state
,
5652 SISLANDS_SMC_SWSTATE
*smc_state
)
5654 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
5655 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
5656 struct si_power_info
*si_pi
= si_get_pi(adev
);
5657 struct si_ps
*state
= si_get_ps(amdgpu_state
);
5660 u32 sclk_in_sr
= 1350; /* ??? */
5662 if (state
->performance_level_count
> SISLANDS_MAX_HARDWARE_POWERLEVELS
)
5665 threshold
= state
->performance_levels
[state
->performance_level_count
-1].sclk
* 100 / 100;
5667 if (amdgpu_state
->vclk
&& amdgpu_state
->dclk
) {
5668 eg_pi
->uvd_enabled
= true;
5669 if (eg_pi
->smu_uvd_hs
)
5670 smc_state
->flags
|= PPSMC_SWSTATE_FLAG_UVD
;
5672 eg_pi
->uvd_enabled
= false;
5675 if (state
->dc_compatible
)
5676 smc_state
->flags
|= PPSMC_SWSTATE_FLAG_DC
;
5678 smc_state
->levelCount
= 0;
5679 for (i
= 0; i
< state
->performance_level_count
; i
++) {
5680 if (eg_pi
->sclk_deep_sleep
) {
5681 if ((i
== 0) || si_pi
->sclk_deep_sleep_above_low
) {
5682 if (sclk_in_sr
<= SCLK_MIN_DEEPSLEEP_FREQ
)
5683 smc_state
->levels
[i
].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS
;
5685 smc_state
->levels
[i
].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE
;
5689 ret
= si_convert_power_level_to_smc(adev
, &state
->performance_levels
[i
],
5690 &smc_state
->levels
[i
]);
5691 smc_state
->levels
[i
].arbRefreshState
=
5692 (u8
)(SISLANDS_DRIVER_STATE_ARB_INDEX
+ i
);
5697 if (ni_pi
->enable_power_containment
)
5698 smc_state
->levels
[i
].displayWatermark
=
5699 (state
->performance_levels
[i
].sclk
< threshold
) ?
5700 PPSMC_DISPLAY_WATERMARK_LOW
: PPSMC_DISPLAY_WATERMARK_HIGH
;
5702 smc_state
->levels
[i
].displayWatermark
= (i
< 2) ?
5703 PPSMC_DISPLAY_WATERMARK_LOW
: PPSMC_DISPLAY_WATERMARK_HIGH
;
5705 if (eg_pi
->dynamic_ac_timing
)
5706 smc_state
->levels
[i
].ACIndex
= SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
+ i
;
5708 smc_state
->levels
[i
].ACIndex
= 0;
5710 smc_state
->levelCount
++;
5713 si_write_smc_soft_register(adev
,
5714 SI_SMC_SOFT_REGISTER_watermark_threshold
,
5717 si_populate_smc_sp(adev
, amdgpu_state
, smc_state
);
5719 ret
= si_populate_power_containment_values(adev
, amdgpu_state
, smc_state
);
5721 ni_pi
->enable_power_containment
= false;
5723 ret
= si_populate_sq_ramping_values(adev
, amdgpu_state
, smc_state
);
5725 ni_pi
->enable_sq_ramping
= false;
5727 return si_populate_smc_t(adev
, amdgpu_state
, smc_state
);
5730 static int si_upload_sw_state(struct amdgpu_device
*adev
,
5731 struct amdgpu_ps
*amdgpu_new_state
)
5733 struct si_power_info
*si_pi
= si_get_pi(adev
);
5734 struct si_ps
*new_state
= si_get_ps(amdgpu_new_state
);
5736 u32 address
= si_pi
->state_table_start
+
5737 offsetof(SISLANDS_SMC_STATETABLE
, driverState
);
5738 u32 state_size
= sizeof(SISLANDS_SMC_SWSTATE
) +
5739 ((new_state
->performance_level_count
- 1) *
5740 sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL
));
5741 SISLANDS_SMC_SWSTATE
*smc_state
= &si_pi
->smc_statetable
.driverState
;
5743 memset(smc_state
, 0, state_size
);
5745 ret
= si_convert_power_state_to_smc(adev
, amdgpu_new_state
, smc_state
);
5749 return amdgpu_si_copy_bytes_to_smc(adev
, address
, (u8
*)smc_state
,
5750 state_size
, si_pi
->sram_end
);
5753 static int si_upload_ulv_state(struct amdgpu_device
*adev
)
5755 struct si_power_info
*si_pi
= si_get_pi(adev
);
5756 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5759 if (ulv
->supported
&& ulv
->pl
.vddc
) {
5760 u32 address
= si_pi
->state_table_start
+
5761 offsetof(SISLANDS_SMC_STATETABLE
, ULVState
);
5762 SISLANDS_SMC_SWSTATE
*smc_state
= &si_pi
->smc_statetable
.ULVState
;
5763 u32 state_size
= sizeof(SISLANDS_SMC_SWSTATE
);
5765 memset(smc_state
, 0, state_size
);
5767 ret
= si_populate_ulv_state(adev
, smc_state
);
5769 ret
= amdgpu_si_copy_bytes_to_smc(adev
, address
, (u8
*)smc_state
,
5770 state_size
, si_pi
->sram_end
);
5776 static int si_upload_smc_data(struct amdgpu_device
*adev
)
5778 struct amdgpu_crtc
*amdgpu_crtc
= NULL
;
5781 if (adev
->pm
.dpm
.new_active_crtc_count
== 0)
5784 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
5785 if (adev
->pm
.dpm
.new_active_crtcs
& (1 << i
)) {
5786 amdgpu_crtc
= adev
->mode_info
.crtcs
[i
];
5791 if (amdgpu_crtc
== NULL
)
5794 if (amdgpu_crtc
->line_time
<= 0)
5797 if (si_write_smc_soft_register(adev
,
5798 SI_SMC_SOFT_REGISTER_crtc_index
,
5799 amdgpu_crtc
->crtc_id
) != PPSMC_Result_OK
)
5802 if (si_write_smc_soft_register(adev
,
5803 SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min
,
5804 amdgpu_crtc
->wm_high
/ amdgpu_crtc
->line_time
) != PPSMC_Result_OK
)
5807 if (si_write_smc_soft_register(adev
,
5808 SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max
,
5809 amdgpu_crtc
->wm_low
/ amdgpu_crtc
->line_time
) != PPSMC_Result_OK
)
5815 static int si_set_mc_special_registers(struct amdgpu_device
*adev
,
5816 struct si_mc_reg_table
*table
)
5821 for (i
= 0, j
= table
->last
; i
< table
->last
; i
++) {
5822 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5824 switch (table
->mc_reg_address
[i
].s1
) {
5826 temp_reg
= RREG32(MC_PMG_CMD_EMRS
);
5827 table
->mc_reg_address
[j
].s1
= MC_PMG_CMD_EMRS
;
5828 table
->mc_reg_address
[j
].s0
= MC_SEQ_PMG_CMD_EMRS_LP
;
5829 for (k
= 0; k
< table
->num_entries
; k
++)
5830 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5831 ((temp_reg
& 0xffff0000)) |
5832 ((table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xffff0000) >> 16);
5835 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5837 temp_reg
= RREG32(MC_PMG_CMD_MRS
);
5838 table
->mc_reg_address
[j
].s1
= MC_PMG_CMD_MRS
;
5839 table
->mc_reg_address
[j
].s0
= MC_SEQ_PMG_CMD_MRS_LP
;
5840 for (k
= 0; k
< table
->num_entries
; k
++) {
5841 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5842 (temp_reg
& 0xffff0000) |
5843 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0x0000ffff);
5844 if (adev
->gmc
.vram_type
!= AMDGPU_VRAM_TYPE_GDDR5
)
5845 table
->mc_reg_table_entry
[k
].mc_data
[j
] |= 0x100;
5849 if (adev
->gmc
.vram_type
!= AMDGPU_VRAM_TYPE_GDDR5
) {
5850 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5852 table
->mc_reg_address
[j
].s1
= MC_PMG_AUTO_CMD
;
5853 table
->mc_reg_address
[j
].s0
= MC_PMG_AUTO_CMD
;
5854 for (k
= 0; k
< table
->num_entries
; k
++)
5855 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5856 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xffff0000) >> 16;
5860 case MC_SEQ_RESERVE_M
:
5861 temp_reg
= RREG32(MC_PMG_CMD_MRS1
);
5862 table
->mc_reg_address
[j
].s1
= MC_PMG_CMD_MRS1
;
5863 table
->mc_reg_address
[j
].s0
= MC_SEQ_PMG_CMD_MRS1_LP
;
5864 for(k
= 0; k
< table
->num_entries
; k
++)
5865 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5866 (temp_reg
& 0xffff0000) |
5867 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0x0000ffff);
5880 static bool si_check_s0_mc_reg_index(u16 in_reg
, u16
*out_reg
)
5884 case MC_SEQ_RAS_TIMING
:
5885 *out_reg
= MC_SEQ_RAS_TIMING_LP
;
5887 case MC_SEQ_CAS_TIMING
:
5888 *out_reg
= MC_SEQ_CAS_TIMING_LP
;
5890 case MC_SEQ_MISC_TIMING
:
5891 *out_reg
= MC_SEQ_MISC_TIMING_LP
;
5893 case MC_SEQ_MISC_TIMING2
:
5894 *out_reg
= MC_SEQ_MISC_TIMING2_LP
;
5896 case MC_SEQ_RD_CTL_D0
:
5897 *out_reg
= MC_SEQ_RD_CTL_D0_LP
;
5899 case MC_SEQ_RD_CTL_D1
:
5900 *out_reg
= MC_SEQ_RD_CTL_D1_LP
;
5902 case MC_SEQ_WR_CTL_D0
:
5903 *out_reg
= MC_SEQ_WR_CTL_D0_LP
;
5905 case MC_SEQ_WR_CTL_D1
:
5906 *out_reg
= MC_SEQ_WR_CTL_D1_LP
;
5908 case MC_PMG_CMD_EMRS
:
5909 *out_reg
= MC_SEQ_PMG_CMD_EMRS_LP
;
5911 case MC_PMG_CMD_MRS
:
5912 *out_reg
= MC_SEQ_PMG_CMD_MRS_LP
;
5914 case MC_PMG_CMD_MRS1
:
5915 *out_reg
= MC_SEQ_PMG_CMD_MRS1_LP
;
5917 case MC_SEQ_PMG_TIMING
:
5918 *out_reg
= MC_SEQ_PMG_TIMING_LP
;
5920 case MC_PMG_CMD_MRS2
:
5921 *out_reg
= MC_SEQ_PMG_CMD_MRS2_LP
;
5923 case MC_SEQ_WR_CTL_2
:
5924 *out_reg
= MC_SEQ_WR_CTL_2_LP
;
5934 static void si_set_valid_flag(struct si_mc_reg_table
*table
)
5938 for (i
= 0; i
< table
->last
; i
++) {
5939 for (j
= 1; j
< table
->num_entries
; j
++) {
5940 if (table
->mc_reg_table_entry
[j
-1].mc_data
[i
] != table
->mc_reg_table_entry
[j
].mc_data
[i
]) {
5941 table
->valid_flag
|= 1 << i
;
5948 static void si_set_s0_mc_reg_index(struct si_mc_reg_table
*table
)
5953 for (i
= 0; i
< table
->last
; i
++)
5954 table
->mc_reg_address
[i
].s0
= si_check_s0_mc_reg_index(table
->mc_reg_address
[i
].s1
, &address
) ?
5955 address
: table
->mc_reg_address
[i
].s1
;
5959 static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table
*table
,
5960 struct si_mc_reg_table
*si_table
)
5964 if (table
->last
> SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5966 if (table
->num_entries
> MAX_AC_TIMING_ENTRIES
)
5969 for (i
= 0; i
< table
->last
; i
++)
5970 si_table
->mc_reg_address
[i
].s1
= table
->mc_reg_address
[i
].s1
;
5971 si_table
->last
= table
->last
;
5973 for (i
= 0; i
< table
->num_entries
; i
++) {
5974 si_table
->mc_reg_table_entry
[i
].mclk_max
=
5975 table
->mc_reg_table_entry
[i
].mclk_max
;
5976 for (j
= 0; j
< table
->last
; j
++) {
5977 si_table
->mc_reg_table_entry
[i
].mc_data
[j
] =
5978 table
->mc_reg_table_entry
[i
].mc_data
[j
];
5981 si_table
->num_entries
= table
->num_entries
;
5986 static int si_initialize_mc_reg_table(struct amdgpu_device
*adev
)
5988 struct si_power_info
*si_pi
= si_get_pi(adev
);
5989 struct atom_mc_reg_table
*table
;
5990 struct si_mc_reg_table
*si_table
= &si_pi
->mc_reg_table
;
5991 u8 module_index
= rv770_get_memory_module_index(adev
);
5994 table
= kzalloc(sizeof(struct atom_mc_reg_table
), GFP_KERNEL
);
5998 WREG32(MC_SEQ_RAS_TIMING_LP
, RREG32(MC_SEQ_RAS_TIMING
));
5999 WREG32(MC_SEQ_CAS_TIMING_LP
, RREG32(MC_SEQ_CAS_TIMING
));
6000 WREG32(MC_SEQ_MISC_TIMING_LP
, RREG32(MC_SEQ_MISC_TIMING
));
6001 WREG32(MC_SEQ_MISC_TIMING2_LP
, RREG32(MC_SEQ_MISC_TIMING2
));
6002 WREG32(MC_SEQ_PMG_CMD_EMRS_LP
, RREG32(MC_PMG_CMD_EMRS
));
6003 WREG32(MC_SEQ_PMG_CMD_MRS_LP
, RREG32(MC_PMG_CMD_MRS
));
6004 WREG32(MC_SEQ_PMG_CMD_MRS1_LP
, RREG32(MC_PMG_CMD_MRS1
));
6005 WREG32(MC_SEQ_WR_CTL_D0_LP
, RREG32(MC_SEQ_WR_CTL_D0
));
6006 WREG32(MC_SEQ_WR_CTL_D1_LP
, RREG32(MC_SEQ_WR_CTL_D1
));
6007 WREG32(MC_SEQ_RD_CTL_D0_LP
, RREG32(MC_SEQ_RD_CTL_D0
));
6008 WREG32(MC_SEQ_RD_CTL_D1_LP
, RREG32(MC_SEQ_RD_CTL_D1
));
6009 WREG32(MC_SEQ_PMG_TIMING_LP
, RREG32(MC_SEQ_PMG_TIMING
));
6010 WREG32(MC_SEQ_PMG_CMD_MRS2_LP
, RREG32(MC_PMG_CMD_MRS2
));
6011 WREG32(MC_SEQ_WR_CTL_2_LP
, RREG32(MC_SEQ_WR_CTL_2
));
6013 ret
= amdgpu_atombios_init_mc_reg_table(adev
, module_index
, table
);
6017 ret
= si_copy_vbios_mc_reg_table(table
, si_table
);
6021 si_set_s0_mc_reg_index(si_table
);
6023 ret
= si_set_mc_special_registers(adev
, si_table
);
6027 si_set_valid_flag(si_table
);
6036 static void si_populate_mc_reg_addresses(struct amdgpu_device
*adev
,
6037 SMC_SIslands_MCRegisters
*mc_reg_table
)
6039 struct si_power_info
*si_pi
= si_get_pi(adev
);
6042 for (i
= 0, j
= 0; j
< si_pi
->mc_reg_table
.last
; j
++) {
6043 if (si_pi
->mc_reg_table
.valid_flag
& (1 << j
)) {
6044 if (i
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
6046 mc_reg_table
->address
[i
].s0
=
6047 cpu_to_be16(si_pi
->mc_reg_table
.mc_reg_address
[j
].s0
);
6048 mc_reg_table
->address
[i
].s1
=
6049 cpu_to_be16(si_pi
->mc_reg_table
.mc_reg_address
[j
].s1
);
6053 mc_reg_table
->last
= (u8
)i
;
6056 static void si_convert_mc_registers(const struct si_mc_reg_entry
*entry
,
6057 SMC_SIslands_MCRegisterSet
*data
,
6058 u32 num_entries
, u32 valid_flag
)
6062 for(i
= 0, j
= 0; j
< num_entries
; j
++) {
6063 if (valid_flag
& (1 << j
)) {
6064 data
->value
[i
] = cpu_to_be32(entry
->mc_data
[j
]);
6070 static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device
*adev
,
6071 struct rv7xx_pl
*pl
,
6072 SMC_SIslands_MCRegisterSet
*mc_reg_table_data
)
6074 struct si_power_info
*si_pi
= si_get_pi(adev
);
6077 for (i
= 0; i
< si_pi
->mc_reg_table
.num_entries
; i
++) {
6078 if (pl
->mclk
<= si_pi
->mc_reg_table
.mc_reg_table_entry
[i
].mclk_max
)
6082 if ((i
== si_pi
->mc_reg_table
.num_entries
) && (i
> 0))
6085 si_convert_mc_registers(&si_pi
->mc_reg_table
.mc_reg_table_entry
[i
],
6086 mc_reg_table_data
, si_pi
->mc_reg_table
.last
,
6087 si_pi
->mc_reg_table
.valid_flag
);
6090 static void si_convert_mc_reg_table_to_smc(struct amdgpu_device
*adev
,
6091 struct amdgpu_ps
*amdgpu_state
,
6092 SMC_SIslands_MCRegisters
*mc_reg_table
)
6094 struct si_ps
*state
= si_get_ps(amdgpu_state
);
6097 for (i
= 0; i
< state
->performance_level_count
; i
++) {
6098 si_convert_mc_reg_table_entry_to_smc(adev
,
6099 &state
->performance_levels
[i
],
6100 &mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
+ i
]);
6104 static int si_populate_mc_reg_table(struct amdgpu_device
*adev
,
6105 struct amdgpu_ps
*amdgpu_boot_state
)
6107 struct si_ps
*boot_state
= si_get_ps(amdgpu_boot_state
);
6108 struct si_power_info
*si_pi
= si_get_pi(adev
);
6109 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
6110 SMC_SIslands_MCRegisters
*smc_mc_reg_table
= &si_pi
->smc_mc_reg_table
;
6112 memset(smc_mc_reg_table
, 0, sizeof(SMC_SIslands_MCRegisters
));
6114 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_seq_index
, 1);
6116 si_populate_mc_reg_addresses(adev
, smc_mc_reg_table
);
6118 si_convert_mc_reg_table_entry_to_smc(adev
, &boot_state
->performance_levels
[0],
6119 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT
]);
6121 si_convert_mc_registers(&si_pi
->mc_reg_table
.mc_reg_table_entry
[0],
6122 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_ACPI_SLOT
],
6123 si_pi
->mc_reg_table
.last
,
6124 si_pi
->mc_reg_table
.valid_flag
);
6126 if (ulv
->supported
&& ulv
->pl
.vddc
!= 0)
6127 si_convert_mc_reg_table_entry_to_smc(adev
, &ulv
->pl
,
6128 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_ULV_SLOT
]);
6130 si_convert_mc_registers(&si_pi
->mc_reg_table
.mc_reg_table_entry
[0],
6131 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_ULV_SLOT
],
6132 si_pi
->mc_reg_table
.last
,
6133 si_pi
->mc_reg_table
.valid_flag
);
6135 si_convert_mc_reg_table_to_smc(adev
, amdgpu_boot_state
, smc_mc_reg_table
);
6137 return amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->mc_reg_table_start
,
6138 (u8
*)smc_mc_reg_table
,
6139 sizeof(SMC_SIslands_MCRegisters
), si_pi
->sram_end
);
6142 static int si_upload_mc_reg_table(struct amdgpu_device
*adev
,
6143 struct amdgpu_ps
*amdgpu_new_state
)
6145 struct si_ps
*new_state
= si_get_ps(amdgpu_new_state
);
6146 struct si_power_info
*si_pi
= si_get_pi(adev
);
6147 u32 address
= si_pi
->mc_reg_table_start
+
6148 offsetof(SMC_SIslands_MCRegisters
,
6149 data
[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
]);
6150 SMC_SIslands_MCRegisters
*smc_mc_reg_table
= &si_pi
->smc_mc_reg_table
;
6152 memset(smc_mc_reg_table
, 0, sizeof(SMC_SIslands_MCRegisters
));
6154 si_convert_mc_reg_table_to_smc(adev
, amdgpu_new_state
, smc_mc_reg_table
);
6156 return amdgpu_si_copy_bytes_to_smc(adev
, address
,
6157 (u8
*)&smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
],
6158 sizeof(SMC_SIslands_MCRegisterSet
) * new_state
->performance_level_count
,
6162 static void si_enable_voltage_control(struct amdgpu_device
*adev
, bool enable
)
6165 WREG32_P(GENERAL_PWRMGT
, VOLT_PWRMGT_EN
, ~VOLT_PWRMGT_EN
);
6167 WREG32_P(GENERAL_PWRMGT
, 0, ~VOLT_PWRMGT_EN
);
6170 static enum amdgpu_pcie_gen
si_get_maximum_link_speed(struct amdgpu_device
*adev
,
6171 struct amdgpu_ps
*amdgpu_state
)
6173 struct si_ps
*state
= si_get_ps(amdgpu_state
);
6175 u16 pcie_speed
, max_speed
= 0;
6177 for (i
= 0; i
< state
->performance_level_count
; i
++) {
6178 pcie_speed
= state
->performance_levels
[i
].pcie_gen
;
6179 if (max_speed
< pcie_speed
)
6180 max_speed
= pcie_speed
;
6185 static u16
si_get_current_pcie_speed(struct amdgpu_device
*adev
)
6189 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
) & LC_CURRENT_DATA_RATE_MASK
;
6190 speed_cntl
>>= LC_CURRENT_DATA_RATE_SHIFT
;
6192 return (u16
)speed_cntl
;
6195 static void si_request_link_speed_change_before_state_change(struct amdgpu_device
*adev
,
6196 struct amdgpu_ps
*amdgpu_new_state
,
6197 struct amdgpu_ps
*amdgpu_current_state
)
6199 struct si_power_info
*si_pi
= si_get_pi(adev
);
6200 enum amdgpu_pcie_gen target_link_speed
= si_get_maximum_link_speed(adev
, amdgpu_new_state
);
6201 enum amdgpu_pcie_gen current_link_speed
;
6203 if (si_pi
->force_pcie_gen
== AMDGPU_PCIE_GEN_INVALID
)
6204 current_link_speed
= si_get_maximum_link_speed(adev
, amdgpu_current_state
);
6206 current_link_speed
= si_pi
->force_pcie_gen
;
6208 si_pi
->force_pcie_gen
= AMDGPU_PCIE_GEN_INVALID
;
6209 si_pi
->pspp_notify_required
= false;
6210 if (target_link_speed
> current_link_speed
) {
6211 switch (target_link_speed
) {
6212 #if defined(CONFIG_ACPI)
6213 case AMDGPU_PCIE_GEN3
:
6214 if (amdgpu_acpi_pcie_performance_request(adev
, PCIE_PERF_REQ_PECI_GEN3
, false) == 0)
6216 si_pi
->force_pcie_gen
= AMDGPU_PCIE_GEN2
;
6217 if (current_link_speed
== AMDGPU_PCIE_GEN2
)
6220 case AMDGPU_PCIE_GEN2
:
6221 if (amdgpu_acpi_pcie_performance_request(adev
, PCIE_PERF_REQ_PECI_GEN2
, false) == 0)
6226 si_pi
->force_pcie_gen
= si_get_current_pcie_speed(adev
);
6230 if (target_link_speed
< current_link_speed
)
6231 si_pi
->pspp_notify_required
= true;
6235 static void si_notify_link_speed_change_after_state_change(struct amdgpu_device
*adev
,
6236 struct amdgpu_ps
*amdgpu_new_state
,
6237 struct amdgpu_ps
*amdgpu_current_state
)
6239 struct si_power_info
*si_pi
= si_get_pi(adev
);
6240 enum amdgpu_pcie_gen target_link_speed
= si_get_maximum_link_speed(adev
, amdgpu_new_state
);
6243 if (si_pi
->pspp_notify_required
) {
6244 if (target_link_speed
== AMDGPU_PCIE_GEN3
)
6245 request
= PCIE_PERF_REQ_PECI_GEN3
;
6246 else if (target_link_speed
== AMDGPU_PCIE_GEN2
)
6247 request
= PCIE_PERF_REQ_PECI_GEN2
;
6249 request
= PCIE_PERF_REQ_PECI_GEN1
;
6251 if ((request
== PCIE_PERF_REQ_PECI_GEN1
) &&
6252 (si_get_current_pcie_speed(adev
) > 0))
6255 #if defined(CONFIG_ACPI)
6256 amdgpu_acpi_pcie_performance_request(adev
, request
, false);
6262 static int si_ds_request(struct amdgpu_device
*adev
,
6263 bool ds_status_on
, u32 count_write
)
6265 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6267 if (eg_pi
->sclk_deep_sleep
) {
6269 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_CancelThrottleOVRDSCLKDS
) ==
6273 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_ThrottleOVRDSCLKDS
) ==
6274 PPSMC_Result_OK
) ? 0 : -EINVAL
;
6280 static void si_set_max_cu_value(struct amdgpu_device
*adev
)
6282 struct si_power_info
*si_pi
= si_get_pi(adev
);
6284 if (adev
->asic_type
== CHIP_VERDE
) {
6285 switch (adev
->pdev
->device
) {
6321 static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device
*adev
,
6322 struct amdgpu_clock_voltage_dependency_table
*table
)
6326 u16 leakage_voltage
;
6329 for (i
= 0; i
< table
->count
; i
++) {
6330 switch (si_get_leakage_voltage_from_leakage_index(adev
,
6331 table
->entries
[i
].v
,
6332 &leakage_voltage
)) {
6334 table
->entries
[i
].v
= leakage_voltage
;
6344 for (j
= (table
->count
- 2); j
>= 0; j
--) {
6345 table
->entries
[j
].v
= (table
->entries
[j
].v
<= table
->entries
[j
+ 1].v
) ?
6346 table
->entries
[j
].v
: table
->entries
[j
+ 1].v
;
6352 static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device
*adev
)
6356 ret
= si_patch_single_dependency_table_based_on_leakage(adev
,
6357 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
);
6359 DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
6360 ret
= si_patch_single_dependency_table_based_on_leakage(adev
,
6361 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
);
6363 DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
6364 ret
= si_patch_single_dependency_table_based_on_leakage(adev
,
6365 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
);
6367 DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
6371 static void si_set_pcie_lane_width_in_smc(struct amdgpu_device
*adev
,
6372 struct amdgpu_ps
*amdgpu_new_state
,
6373 struct amdgpu_ps
*amdgpu_current_state
)
6376 u32 new_lane_width
=
6377 ((amdgpu_new_state
->caps
& ATOM_PPLIB_PCIE_LINK_WIDTH_MASK
) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT
) + 1;
6378 u32 current_lane_width
=
6379 ((amdgpu_current_state
->caps
& ATOM_PPLIB_PCIE_LINK_WIDTH_MASK
) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT
) + 1;
6381 if (new_lane_width
!= current_lane_width
) {
6382 amdgpu_set_pcie_lanes(adev
, new_lane_width
);
6383 lane_width
= amdgpu_get_pcie_lanes(adev
);
6384 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width
, lane_width
);
6388 static void si_dpm_setup_asic(struct amdgpu_device
*adev
)
6390 si_read_clock_registers(adev
);
6391 si_enable_acpi_power_management(adev
);
6394 static int si_thermal_enable_alert(struct amdgpu_device
*adev
,
6397 u32 thermal_int
= RREG32(CG_THERMAL_INT
);
6400 PPSMC_Result result
;
6402 thermal_int
&= ~(THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
);
6403 WREG32(CG_THERMAL_INT
, thermal_int
);
6404 result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableThermalInterrupt
);
6405 if (result
!= PPSMC_Result_OK
) {
6406 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
6410 thermal_int
|= THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
;
6411 WREG32(CG_THERMAL_INT
, thermal_int
);
6417 static int si_thermal_set_temperature_range(struct amdgpu_device
*adev
,
6418 int min_temp
, int max_temp
)
6420 int low_temp
= 0 * 1000;
6421 int high_temp
= 255 * 1000;
6423 if (low_temp
< min_temp
)
6424 low_temp
= min_temp
;
6425 if (high_temp
> max_temp
)
6426 high_temp
= max_temp
;
6427 if (high_temp
< low_temp
) {
6428 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
6432 WREG32_P(CG_THERMAL_INT
, DIG_THERM_INTH(high_temp
/ 1000), ~DIG_THERM_INTH_MASK
);
6433 WREG32_P(CG_THERMAL_INT
, DIG_THERM_INTL(low_temp
/ 1000), ~DIG_THERM_INTL_MASK
);
6434 WREG32_P(CG_THERMAL_CTRL
, DIG_THERM_DPM(high_temp
/ 1000), ~DIG_THERM_DPM_MASK
);
6436 adev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
6437 adev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
6442 static void si_fan_ctrl_set_static_mode(struct amdgpu_device
*adev
, u32 mode
)
6444 struct si_power_info
*si_pi
= si_get_pi(adev
);
6447 if (si_pi
->fan_ctrl_is_in_default_mode
) {
6448 tmp
= (RREG32(CG_FDO_CTRL2
) & FDO_PWM_MODE_MASK
) >> FDO_PWM_MODE_SHIFT
;
6449 si_pi
->fan_ctrl_default_mode
= tmp
;
6450 tmp
= (RREG32(CG_FDO_CTRL2
) & TMIN_MASK
) >> TMIN_SHIFT
;
6452 si_pi
->fan_ctrl_is_in_default_mode
= false;
6455 tmp
= RREG32(CG_FDO_CTRL2
) & ~TMIN_MASK
;
6457 WREG32(CG_FDO_CTRL2
, tmp
);
6459 tmp
= RREG32(CG_FDO_CTRL2
) & ~FDO_PWM_MODE_MASK
;
6460 tmp
|= FDO_PWM_MODE(mode
);
6461 WREG32(CG_FDO_CTRL2
, tmp
);
6464 static int si_thermal_setup_fan_table(struct amdgpu_device
*adev
)
6466 struct si_power_info
*si_pi
= si_get_pi(adev
);
6467 PP_SIslands_FanTable fan_table
= { FDO_MODE_HARDWARE
};
6469 u32 t_diff1
, t_diff2
, pwm_diff1
, pwm_diff2
;
6470 u16 fdo_min
, slope1
, slope2
;
6471 u32 reference_clock
, tmp
;
6475 if (!si_pi
->fan_table_start
) {
6476 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
6480 duty100
= (RREG32(CG_FDO_CTRL1
) & FMAX_DUTY100_MASK
) >> FMAX_DUTY100_SHIFT
;
6483 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
6487 tmp64
= (u64
)adev
->pm
.dpm
.fan
.pwm_min
* duty100
;
6488 do_div(tmp64
, 10000);
6489 fdo_min
= (u16
)tmp64
;
6491 t_diff1
= adev
->pm
.dpm
.fan
.t_med
- adev
->pm
.dpm
.fan
.t_min
;
6492 t_diff2
= adev
->pm
.dpm
.fan
.t_high
- adev
->pm
.dpm
.fan
.t_med
;
6494 pwm_diff1
= adev
->pm
.dpm
.fan
.pwm_med
- adev
->pm
.dpm
.fan
.pwm_min
;
6495 pwm_diff2
= adev
->pm
.dpm
.fan
.pwm_high
- adev
->pm
.dpm
.fan
.pwm_med
;
6497 slope1
= (u16
)((50 + ((16 * duty100
* pwm_diff1
) / t_diff1
)) / 100);
6498 slope2
= (u16
)((50 + ((16 * duty100
* pwm_diff2
) / t_diff2
)) / 100);
6500 fan_table
.temp_min
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_min
) / 100);
6501 fan_table
.temp_med
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_med
) / 100);
6502 fan_table
.temp_max
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_max
) / 100);
6503 fan_table
.slope1
= cpu_to_be16(slope1
);
6504 fan_table
.slope2
= cpu_to_be16(slope2
);
6505 fan_table
.fdo_min
= cpu_to_be16(fdo_min
);
6506 fan_table
.hys_down
= cpu_to_be16(adev
->pm
.dpm
.fan
.t_hyst
);
6507 fan_table
.hys_up
= cpu_to_be16(1);
6508 fan_table
.hys_slope
= cpu_to_be16(1);
6509 fan_table
.temp_resp_lim
= cpu_to_be16(5);
6510 reference_clock
= amdgpu_asic_get_xclk(adev
);
6512 fan_table
.refresh_period
= cpu_to_be32((adev
->pm
.dpm
.fan
.cycle_delay
*
6513 reference_clock
) / 1600);
6514 fan_table
.fdo_max
= cpu_to_be16((u16
)duty100
);
6516 tmp
= (RREG32(CG_MULT_THERMAL_CTRL
) & TEMP_SEL_MASK
) >> TEMP_SEL_SHIFT
;
6517 fan_table
.temp_src
= (uint8_t)tmp
;
6519 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
6520 si_pi
->fan_table_start
,
6526 DRM_ERROR("Failed to load fan table to the SMC.");
6527 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
6533 static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device
*adev
)
6535 struct si_power_info
*si_pi
= si_get_pi(adev
);
6538 ret
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_StartFanControl
);
6539 if (ret
== PPSMC_Result_OK
) {
6540 si_pi
->fan_is_controlled_by_smc
= true;
6547 static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device
*adev
)
6549 struct si_power_info
*si_pi
= si_get_pi(adev
);
6552 ret
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_StopFanControl
);
6554 if (ret
== PPSMC_Result_OK
) {
6555 si_pi
->fan_is_controlled_by_smc
= false;
6562 static int si_dpm_get_fan_speed_percent(void *handle
,
6567 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6569 if (adev
->pm
.no_fan
)
6572 duty100
= (RREG32(CG_FDO_CTRL1
) & FMAX_DUTY100_MASK
) >> FMAX_DUTY100_SHIFT
;
6573 duty
= (RREG32(CG_THERMAL_STATUS
) & FDO_PWM_DUTY_MASK
) >> FDO_PWM_DUTY_SHIFT
;
6578 tmp64
= (u64
)duty
* 100;
6579 do_div(tmp64
, duty100
);
6580 *speed
= (u32
)tmp64
;
6588 static int si_dpm_set_fan_speed_percent(void *handle
,
6591 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6592 struct si_power_info
*si_pi
= si_get_pi(adev
);
6597 if (adev
->pm
.no_fan
)
6600 if (si_pi
->fan_is_controlled_by_smc
)
6606 duty100
= (RREG32(CG_FDO_CTRL1
) & FMAX_DUTY100_MASK
) >> FMAX_DUTY100_SHIFT
;
6611 tmp64
= (u64
)speed
* duty100
;
6615 tmp
= RREG32(CG_FDO_CTRL0
) & ~FDO_STATIC_DUTY_MASK
;
6616 tmp
|= FDO_STATIC_DUTY(duty
);
6617 WREG32(CG_FDO_CTRL0
, tmp
);
6622 static void si_dpm_set_fan_control_mode(void *handle
, u32 mode
)
6624 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6627 /* stop auto-manage */
6628 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
6629 si_fan_ctrl_stop_smc_fan_control(adev
);
6630 si_fan_ctrl_set_static_mode(adev
, mode
);
6632 /* restart auto-manage */
6633 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
6634 si_thermal_start_smc_fan_control(adev
);
6636 si_fan_ctrl_set_default_mode(adev
);
6640 static u32
si_dpm_get_fan_control_mode(void *handle
)
6642 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6643 struct si_power_info
*si_pi
= si_get_pi(adev
);
6646 if (si_pi
->fan_is_controlled_by_smc
)
6649 tmp
= RREG32(CG_FDO_CTRL2
) & FDO_PWM_MODE_MASK
;
6650 return (tmp
>> FDO_PWM_MODE_SHIFT
);
6654 static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device
*adev
,
6658 u32 xclk
= amdgpu_asic_get_xclk(adev
);
6660 if (adev
->pm
.no_fan
)
6663 if (adev
->pm
.fan_pulses_per_revolution
== 0)
6666 tach_period
= (RREG32(CG_TACH_STATUS
) & TACH_PERIOD_MASK
) >> TACH_PERIOD_SHIFT
;
6667 if (tach_period
== 0)
6670 *speed
= 60 * xclk
* 10000 / tach_period
;
6675 static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device
*adev
,
6678 u32 tach_period
, tmp
;
6679 u32 xclk
= amdgpu_asic_get_xclk(adev
);
6681 if (adev
->pm
.no_fan
)
6684 if (adev
->pm
.fan_pulses_per_revolution
== 0)
6687 if ((speed
< adev
->pm
.fan_min_rpm
) ||
6688 (speed
> adev
->pm
.fan_max_rpm
))
6691 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
6692 si_fan_ctrl_stop_smc_fan_control(adev
);
6694 tach_period
= 60 * xclk
* 10000 / (8 * speed
);
6695 tmp
= RREG32(CG_TACH_CTRL
) & ~TARGET_PERIOD_MASK
;
6696 tmp
|= TARGET_PERIOD(tach_period
);
6697 WREG32(CG_TACH_CTRL
, tmp
);
6699 si_fan_ctrl_set_static_mode(adev
, FDO_PWM_MODE_STATIC_RPM
);
6705 static void si_fan_ctrl_set_default_mode(struct amdgpu_device
*adev
)
6707 struct si_power_info
*si_pi
= si_get_pi(adev
);
6710 if (!si_pi
->fan_ctrl_is_in_default_mode
) {
6711 tmp
= RREG32(CG_FDO_CTRL2
) & ~FDO_PWM_MODE_MASK
;
6712 tmp
|= FDO_PWM_MODE(si_pi
->fan_ctrl_default_mode
);
6713 WREG32(CG_FDO_CTRL2
, tmp
);
6715 tmp
= RREG32(CG_FDO_CTRL2
) & ~TMIN_MASK
;
6716 tmp
|= TMIN(si_pi
->t_min
);
6717 WREG32(CG_FDO_CTRL2
, tmp
);
6718 si_pi
->fan_ctrl_is_in_default_mode
= true;
6722 static void si_thermal_start_smc_fan_control(struct amdgpu_device
*adev
)
6724 if (adev
->pm
.dpm
.fan
.ucode_fan_control
) {
6725 si_fan_ctrl_start_smc_fan_control(adev
);
6726 si_fan_ctrl_set_static_mode(adev
, FDO_PWM_MODE_STATIC
);
6730 static void si_thermal_initialize(struct amdgpu_device
*adev
)
6734 if (adev
->pm
.fan_pulses_per_revolution
) {
6735 tmp
= RREG32(CG_TACH_CTRL
) & ~EDGE_PER_REV_MASK
;
6736 tmp
|= EDGE_PER_REV(adev
->pm
.fan_pulses_per_revolution
-1);
6737 WREG32(CG_TACH_CTRL
, tmp
);
6740 tmp
= RREG32(CG_FDO_CTRL2
) & ~TACH_PWM_RESP_RATE_MASK
;
6741 tmp
|= TACH_PWM_RESP_RATE(0x28);
6742 WREG32(CG_FDO_CTRL2
, tmp
);
6745 static int si_thermal_start_thermal_controller(struct amdgpu_device
*adev
)
6749 si_thermal_initialize(adev
);
6750 ret
= si_thermal_set_temperature_range(adev
, R600_TEMP_RANGE_MIN
, R600_TEMP_RANGE_MAX
);
6753 ret
= si_thermal_enable_alert(adev
, true);
6756 if (adev
->pm
.dpm
.fan
.ucode_fan_control
) {
6757 ret
= si_halt_smc(adev
);
6760 ret
= si_thermal_setup_fan_table(adev
);
6763 ret
= si_resume_smc(adev
);
6766 si_thermal_start_smc_fan_control(adev
);
6772 static void si_thermal_stop_thermal_controller(struct amdgpu_device
*adev
)
6774 if (!adev
->pm
.no_fan
) {
6775 si_fan_ctrl_set_default_mode(adev
);
6776 si_fan_ctrl_stop_smc_fan_control(adev
);
6780 static int si_dpm_enable(struct amdgpu_device
*adev
)
6782 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
6783 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6784 struct si_power_info
*si_pi
= si_get_pi(adev
);
6785 struct amdgpu_ps
*boot_ps
= adev
->pm
.dpm
.boot_ps
;
6788 if (amdgpu_si_is_smc_running(adev
))
6790 if (pi
->voltage_control
|| si_pi
->voltage_control_svi2
)
6791 si_enable_voltage_control(adev
, true);
6792 if (pi
->mvdd_control
)
6793 si_get_mvdd_configuration(adev
);
6794 if (pi
->voltage_control
|| si_pi
->voltage_control_svi2
) {
6795 ret
= si_construct_voltage_tables(adev
);
6797 DRM_ERROR("si_construct_voltage_tables failed\n");
6801 if (eg_pi
->dynamic_ac_timing
) {
6802 ret
= si_initialize_mc_reg_table(adev
);
6804 eg_pi
->dynamic_ac_timing
= false;
6807 si_enable_spread_spectrum(adev
, true);
6808 if (pi
->thermal_protection
)
6809 si_enable_thermal_protection(adev
, true);
6811 si_program_git(adev
);
6812 si_program_tp(adev
);
6813 si_program_tpp(adev
);
6814 si_program_sstp(adev
);
6815 si_enable_display_gap(adev
);
6816 si_program_vc(adev
);
6817 ret
= si_upload_firmware(adev
);
6819 DRM_ERROR("si_upload_firmware failed\n");
6822 ret
= si_process_firmware_header(adev
);
6824 DRM_ERROR("si_process_firmware_header failed\n");
6827 ret
= si_initial_switch_from_arb_f0_to_f1(adev
);
6829 DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
6832 ret
= si_init_smc_table(adev
);
6834 DRM_ERROR("si_init_smc_table failed\n");
6837 ret
= si_init_smc_spll_table(adev
);
6839 DRM_ERROR("si_init_smc_spll_table failed\n");
6842 ret
= si_init_arb_table_index(adev
);
6844 DRM_ERROR("si_init_arb_table_index failed\n");
6847 if (eg_pi
->dynamic_ac_timing
) {
6848 ret
= si_populate_mc_reg_table(adev
, boot_ps
);
6850 DRM_ERROR("si_populate_mc_reg_table failed\n");
6854 ret
= si_initialize_smc_cac_tables(adev
);
6856 DRM_ERROR("si_initialize_smc_cac_tables failed\n");
6859 ret
= si_initialize_hardware_cac_manager(adev
);
6861 DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
6864 ret
= si_initialize_smc_dte_tables(adev
);
6866 DRM_ERROR("si_initialize_smc_dte_tables failed\n");
6869 ret
= si_populate_smc_tdp_limits(adev
, boot_ps
);
6871 DRM_ERROR("si_populate_smc_tdp_limits failed\n");
6874 ret
= si_populate_smc_tdp_limits_2(adev
, boot_ps
);
6876 DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
6879 si_program_response_times(adev
);
6880 si_program_ds_registers(adev
);
6881 si_dpm_start_smc(adev
);
6882 ret
= si_notify_smc_display_change(adev
, false);
6884 DRM_ERROR("si_notify_smc_display_change failed\n");
6887 si_enable_sclk_control(adev
, true);
6890 si_enable_auto_throttle_source(adev
, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
, true);
6891 si_thermal_start_thermal_controller(adev
);
6896 static int si_set_temperature_range(struct amdgpu_device
*adev
)
6900 ret
= si_thermal_enable_alert(adev
, false);
6903 ret
= si_thermal_set_temperature_range(adev
, R600_TEMP_RANGE_MIN
, R600_TEMP_RANGE_MAX
);
6906 ret
= si_thermal_enable_alert(adev
, true);
6913 static void si_dpm_disable(struct amdgpu_device
*adev
)
6915 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
6916 struct amdgpu_ps
*boot_ps
= adev
->pm
.dpm
.boot_ps
;
6918 if (!amdgpu_si_is_smc_running(adev
))
6920 si_thermal_stop_thermal_controller(adev
);
6921 si_disable_ulv(adev
);
6923 if (pi
->thermal_protection
)
6924 si_enable_thermal_protection(adev
, false);
6925 si_enable_power_containment(adev
, boot_ps
, false);
6926 si_enable_smc_cac(adev
, boot_ps
, false);
6927 si_enable_spread_spectrum(adev
, false);
6928 si_enable_auto_throttle_source(adev
, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
, false);
6930 si_reset_to_default(adev
);
6931 si_dpm_stop_smc(adev
);
6932 si_force_switch_to_arb_f0(adev
);
6934 ni_update_current_ps(adev
, boot_ps
);
6937 static int si_dpm_pre_set_power_state(void *handle
)
6939 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6940 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6941 struct amdgpu_ps requested_ps
= *adev
->pm
.dpm
.requested_ps
;
6942 struct amdgpu_ps
*new_ps
= &requested_ps
;
6944 ni_update_requested_ps(adev
, new_ps
);
6945 si_apply_state_adjust_rules(adev
, &eg_pi
->requested_rps
);
6950 static int si_power_control_set_level(struct amdgpu_device
*adev
)
6952 struct amdgpu_ps
*new_ps
= adev
->pm
.dpm
.requested_ps
;
6955 ret
= si_restrict_performance_levels_before_switch(adev
);
6958 ret
= si_halt_smc(adev
);
6961 ret
= si_populate_smc_tdp_limits(adev
, new_ps
);
6964 ret
= si_populate_smc_tdp_limits_2(adev
, new_ps
);
6967 ret
= si_resume_smc(adev
);
6970 ret
= si_set_sw_state(adev
);
6976 static int si_dpm_set_power_state(void *handle
)
6978 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6979 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6980 struct amdgpu_ps
*new_ps
= &eg_pi
->requested_rps
;
6981 struct amdgpu_ps
*old_ps
= &eg_pi
->current_rps
;
6984 ret
= si_disable_ulv(adev
);
6986 DRM_ERROR("si_disable_ulv failed\n");
6989 ret
= si_restrict_performance_levels_before_switch(adev
);
6991 DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
6994 if (eg_pi
->pcie_performance_request
)
6995 si_request_link_speed_change_before_state_change(adev
, new_ps
, old_ps
);
6996 ni_set_uvd_clock_before_set_eng_clock(adev
, new_ps
, old_ps
);
6997 ret
= si_enable_power_containment(adev
, new_ps
, false);
6999 DRM_ERROR("si_enable_power_containment failed\n");
7002 ret
= si_enable_smc_cac(adev
, new_ps
, false);
7004 DRM_ERROR("si_enable_smc_cac failed\n");
7007 ret
= si_halt_smc(adev
);
7009 DRM_ERROR("si_halt_smc failed\n");
7012 ret
= si_upload_sw_state(adev
, new_ps
);
7014 DRM_ERROR("si_upload_sw_state failed\n");
7017 ret
= si_upload_smc_data(adev
);
7019 DRM_ERROR("si_upload_smc_data failed\n");
7022 ret
= si_upload_ulv_state(adev
);
7024 DRM_ERROR("si_upload_ulv_state failed\n");
7027 if (eg_pi
->dynamic_ac_timing
) {
7028 ret
= si_upload_mc_reg_table(adev
, new_ps
);
7030 DRM_ERROR("si_upload_mc_reg_table failed\n");
7034 ret
= si_program_memory_timing_parameters(adev
, new_ps
);
7036 DRM_ERROR("si_program_memory_timing_parameters failed\n");
7039 si_set_pcie_lane_width_in_smc(adev
, new_ps
, old_ps
);
7041 ret
= si_resume_smc(adev
);
7043 DRM_ERROR("si_resume_smc failed\n");
7046 ret
= si_set_sw_state(adev
);
7048 DRM_ERROR("si_set_sw_state failed\n");
7051 ni_set_uvd_clock_after_set_eng_clock(adev
, new_ps
, old_ps
);
7052 if (eg_pi
->pcie_performance_request
)
7053 si_notify_link_speed_change_after_state_change(adev
, new_ps
, old_ps
);
7054 ret
= si_set_power_state_conditionally_enable_ulv(adev
, new_ps
);
7056 DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
7059 ret
= si_enable_smc_cac(adev
, new_ps
, true);
7061 DRM_ERROR("si_enable_smc_cac failed\n");
7064 ret
= si_enable_power_containment(adev
, new_ps
, true);
7066 DRM_ERROR("si_enable_power_containment failed\n");
7070 ret
= si_power_control_set_level(adev
);
7072 DRM_ERROR("si_power_control_set_level failed\n");
7079 static void si_dpm_post_set_power_state(void *handle
)
7081 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7082 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7083 struct amdgpu_ps
*new_ps
= &eg_pi
->requested_rps
;
7085 ni_update_current_ps(adev
, new_ps
);
7089 void si_dpm_reset_asic(struct amdgpu_device
*adev
)
7091 si_restrict_performance_levels_before_switch(adev
);
7092 si_disable_ulv(adev
);
7093 si_set_boot_state(adev
);
7097 static void si_dpm_display_configuration_changed(void *handle
)
7099 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7101 si_program_display_gap(adev
);
7105 static void si_parse_pplib_non_clock_info(struct amdgpu_device
*adev
,
7106 struct amdgpu_ps
*rps
,
7107 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
7110 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
7111 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
7112 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
7114 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
7115 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
7116 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
7117 } else if (r600_is_uvd_state(rps
->class, rps
->class2
)) {
7118 rps
->vclk
= RV770_DEFAULT_VCLK_FREQ
;
7119 rps
->dclk
= RV770_DEFAULT_DCLK_FREQ
;
7125 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
)
7126 adev
->pm
.dpm
.boot_ps
= rps
;
7127 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
7128 adev
->pm
.dpm
.uvd_ps
= rps
;
7131 static void si_parse_pplib_clock_info(struct amdgpu_device
*adev
,
7132 struct amdgpu_ps
*rps
, int index
,
7133 union pplib_clock_info
*clock_info
)
7135 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
7136 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7137 struct si_power_info
*si_pi
= si_get_pi(adev
);
7138 struct si_ps
*ps
= si_get_ps(rps
);
7139 u16 leakage_voltage
;
7140 struct rv7xx_pl
*pl
= &ps
->performance_levels
[index
];
7143 ps
->performance_level_count
= index
+ 1;
7145 pl
->sclk
= le16_to_cpu(clock_info
->si
.usEngineClockLow
);
7146 pl
->sclk
|= clock_info
->si
.ucEngineClockHigh
<< 16;
7147 pl
->mclk
= le16_to_cpu(clock_info
->si
.usMemoryClockLow
);
7148 pl
->mclk
|= clock_info
->si
.ucMemoryClockHigh
<< 16;
7150 pl
->vddc
= le16_to_cpu(clock_info
->si
.usVDDC
);
7151 pl
->vddci
= le16_to_cpu(clock_info
->si
.usVDDCI
);
7152 pl
->flags
= le32_to_cpu(clock_info
->si
.ulFlags
);
7153 pl
->pcie_gen
= amdgpu_get_pcie_gen_support(adev
,
7154 si_pi
->sys_pcie_mask
,
7155 si_pi
->boot_pcie_gen
,
7156 clock_info
->si
.ucPCIEGen
);
7158 /* patch up vddc if necessary */
7159 ret
= si_get_leakage_voltage_from_leakage_index(adev
, pl
->vddc
,
7162 pl
->vddc
= leakage_voltage
;
7164 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
) {
7165 pi
->acpi_vddc
= pl
->vddc
;
7166 eg_pi
->acpi_vddci
= pl
->vddci
;
7167 si_pi
->acpi_pcie_gen
= pl
->pcie_gen
;
7170 if ((rps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
) &&
7172 /* XXX disable for A0 tahiti */
7173 si_pi
->ulv
.supported
= false;
7174 si_pi
->ulv
.pl
= *pl
;
7175 si_pi
->ulv
.one_pcie_lane_in_ulv
= false;
7176 si_pi
->ulv
.volt_change_delay
= SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT
;
7177 si_pi
->ulv
.cg_ulv_parameter
= SISLANDS_CGULVPARAMETER_DFLT
;
7178 si_pi
->ulv
.cg_ulv_control
= SISLANDS_CGULVCONTROL_DFLT
;
7181 if (pi
->min_vddc_in_table
> pl
->vddc
)
7182 pi
->min_vddc_in_table
= pl
->vddc
;
7184 if (pi
->max_vddc_in_table
< pl
->vddc
)
7185 pi
->max_vddc_in_table
= pl
->vddc
;
7187 /* patch up boot state */
7188 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
7189 u16 vddc
, vddci
, mvdd
;
7190 amdgpu_atombios_get_default_voltages(adev
, &vddc
, &vddci
, &mvdd
);
7191 pl
->mclk
= adev
->clock
.default_mclk
;
7192 pl
->sclk
= adev
->clock
.default_sclk
;
7195 si_pi
->mvdd_bootup_value
= mvdd
;
7198 if ((rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) ==
7199 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
) {
7200 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.sclk
= pl
->sclk
;
7201 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.mclk
= pl
->mclk
;
7202 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.vddc
= pl
->vddc
;
7203 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.vddci
= pl
->vddci
;
7207 union pplib_power_state
{
7208 struct _ATOM_PPLIB_STATE v1
;
7209 struct _ATOM_PPLIB_STATE_V2 v2
;
7212 static int si_parse_power_table(struct amdgpu_device
*adev
)
7214 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
7215 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
7216 union pplib_power_state
*power_state
;
7217 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
7218 union pplib_clock_info
*clock_info
;
7219 struct _StateArray
*state_array
;
7220 struct _ClockInfoArray
*clock_info_array
;
7221 struct _NonClockInfoArray
*non_clock_info_array
;
7222 union power_info
*power_info
;
7223 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
7226 u8
*power_state_offset
;
7229 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
7230 &frev
, &crev
, &data_offset
))
7232 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
7234 amdgpu_add_thermal_controller(adev
);
7236 state_array
= (struct _StateArray
*)
7237 (mode_info
->atom_context
->bios
+ data_offset
+
7238 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
7239 clock_info_array
= (struct _ClockInfoArray
*)
7240 (mode_info
->atom_context
->bios
+ data_offset
+
7241 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
7242 non_clock_info_array
= (struct _NonClockInfoArray
*)
7243 (mode_info
->atom_context
->bios
+ data_offset
+
7244 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
7246 adev
->pm
.dpm
.ps
= kcalloc(state_array
->ucNumEntries
,
7247 sizeof(struct amdgpu_ps
),
7249 if (!adev
->pm
.dpm
.ps
)
7251 power_state_offset
= (u8
*)state_array
->states
;
7252 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
7254 power_state
= (union pplib_power_state
*)power_state_offset
;
7255 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
7256 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
7257 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
7258 ps
= kzalloc(sizeof(struct si_ps
), GFP_KERNEL
);
7260 kfree(adev
->pm
.dpm
.ps
);
7263 adev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
7264 si_parse_pplib_non_clock_info(adev
, &adev
->pm
.dpm
.ps
[i
],
7266 non_clock_info_array
->ucEntrySize
);
7268 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
7269 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
7270 clock_array_index
= idx
[j
];
7271 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
7273 if (k
>= SISLANDS_MAX_HARDWARE_POWERLEVELS
)
7275 clock_info
= (union pplib_clock_info
*)
7276 ((u8
*)&clock_info_array
->clockInfo
[0] +
7277 (clock_array_index
* clock_info_array
->ucEntrySize
));
7278 si_parse_pplib_clock_info(adev
,
7279 &adev
->pm
.dpm
.ps
[i
], k
,
7283 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
7285 adev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
7287 /* fill in the vce power states */
7288 for (i
= 0; i
< adev
->pm
.dpm
.num_of_vce_states
; i
++) {
7290 clock_array_index
= adev
->pm
.dpm
.vce_states
[i
].clk_idx
;
7291 clock_info
= (union pplib_clock_info
*)
7292 &clock_info_array
->clockInfo
[clock_array_index
* clock_info_array
->ucEntrySize
];
7293 sclk
= le16_to_cpu(clock_info
->si
.usEngineClockLow
);
7294 sclk
|= clock_info
->si
.ucEngineClockHigh
<< 16;
7295 mclk
= le16_to_cpu(clock_info
->si
.usMemoryClockLow
);
7296 mclk
|= clock_info
->si
.ucMemoryClockHigh
<< 16;
7297 adev
->pm
.dpm
.vce_states
[i
].sclk
= sclk
;
7298 adev
->pm
.dpm
.vce_states
[i
].mclk
= mclk
;
7304 static int si_dpm_init(struct amdgpu_device
*adev
)
7306 struct rv7xx_power_info
*pi
;
7307 struct evergreen_power_info
*eg_pi
;
7308 struct ni_power_info
*ni_pi
;
7309 struct si_power_info
*si_pi
;
7310 struct atom_clock_dividers dividers
;
7313 si_pi
= kzalloc(sizeof(struct si_power_info
), GFP_KERNEL
);
7316 adev
->pm
.dpm
.priv
= si_pi
;
7321 si_pi
->sys_pcie_mask
=
7322 adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_MASK
;
7323 si_pi
->force_pcie_gen
= AMDGPU_PCIE_GEN_INVALID
;
7324 si_pi
->boot_pcie_gen
= si_get_current_pcie_speed(adev
);
7326 si_set_max_cu_value(adev
);
7328 rv770_get_max_vddc(adev
);
7329 si_get_leakage_vddc(adev
);
7330 si_patch_dependency_tables_based_on_leakage(adev
);
7333 eg_pi
->acpi_vddci
= 0;
7334 pi
->min_vddc_in_table
= 0;
7335 pi
->max_vddc_in_table
= 0;
7337 ret
= amdgpu_get_platform_caps(adev
);
7341 ret
= amdgpu_parse_extended_power_table(adev
);
7345 ret
= si_parse_power_table(adev
);
7349 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
=
7351 sizeof(struct amdgpu_clock_voltage_dependency_entry
),
7353 if (!adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
) {
7354 amdgpu_free_extended_power_table(adev
);
7357 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.count
= 4;
7358 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[0].clk
= 0;
7359 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[0].v
= 0;
7360 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[1].clk
= 36000;
7361 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[1].v
= 720;
7362 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[2].clk
= 54000;
7363 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[2].v
= 810;
7364 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[3].clk
= 72000;
7365 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[3].v
= 900;
7367 if (adev
->pm
.dpm
.voltage_response_time
== 0)
7368 adev
->pm
.dpm
.voltage_response_time
= R600_VOLTAGERESPONSETIME_DFLT
;
7369 if (adev
->pm
.dpm
.backbias_response_time
== 0)
7370 adev
->pm
.dpm
.backbias_response_time
= R600_BACKBIASRESPONSETIME_DFLT
;
7372 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
7373 0, false, ÷rs
);
7375 pi
->ref_div
= dividers
.ref_div
+ 1;
7377 pi
->ref_div
= R600_REFERENCEDIVIDER_DFLT
;
7379 eg_pi
->smu_uvd_hs
= false;
7381 pi
->mclk_strobe_mode_threshold
= 40000;
7382 if (si_is_special_1gb_platform(adev
))
7383 pi
->mclk_stutter_mode_threshold
= 0;
7385 pi
->mclk_stutter_mode_threshold
= pi
->mclk_strobe_mode_threshold
;
7386 pi
->mclk_edc_enable_threshold
= 40000;
7387 eg_pi
->mclk_edc_wr_enable_threshold
= 40000;
7389 ni_pi
->mclk_rtt_mode_threshold
= eg_pi
->mclk_edc_wr_enable_threshold
;
7391 pi
->voltage_control
=
7392 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7393 VOLTAGE_OBJ_GPIO_LUT
);
7394 if (!pi
->voltage_control
) {
7395 si_pi
->voltage_control_svi2
=
7396 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7398 if (si_pi
->voltage_control_svi2
)
7399 amdgpu_atombios_get_svi2_info(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7400 &si_pi
->svd_gpio_id
, &si_pi
->svc_gpio_id
);
7404 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_MVDDC
,
7405 VOLTAGE_OBJ_GPIO_LUT
);
7407 eg_pi
->vddci_control
=
7408 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDCI
,
7409 VOLTAGE_OBJ_GPIO_LUT
);
7410 if (!eg_pi
->vddci_control
)
7411 si_pi
->vddci_control_svi2
=
7412 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDCI
,
7415 si_pi
->vddc_phase_shed_control
=
7416 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7417 VOLTAGE_OBJ_PHASE_LUT
);
7419 rv770_get_engine_memory_ss(adev
);
7421 pi
->asi
= RV770_ASI_DFLT
;
7422 pi
->pasi
= CYPRESS_HASI_DFLT
;
7423 pi
->vrc
= SISLANDS_VRC_DFLT
;
7425 pi
->gfx_clock_gating
= true;
7427 eg_pi
->sclk_deep_sleep
= true;
7428 si_pi
->sclk_deep_sleep_above_low
= false;
7430 if (adev
->pm
.int_thermal_type
!= THERMAL_TYPE_NONE
)
7431 pi
->thermal_protection
= true;
7433 pi
->thermal_protection
= false;
7435 eg_pi
->dynamic_ac_timing
= true;
7437 eg_pi
->light_sleep
= true;
7438 #if defined(CONFIG_ACPI)
7439 eg_pi
->pcie_performance_request
=
7440 amdgpu_acpi_is_pcie_performance_request_supported(adev
);
7442 eg_pi
->pcie_performance_request
= false;
7445 si_pi
->sram_end
= SMC_RAM_END
;
7447 adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
= 4;
7448 adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
= 15000;
7449 adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
= 200;
7450 adev
->pm
.dpm
.dyn_state
.valid_sclk_values
.count
= 0;
7451 adev
->pm
.dpm
.dyn_state
.valid_sclk_values
.values
= NULL
;
7452 adev
->pm
.dpm
.dyn_state
.valid_mclk_values
.count
= 0;
7453 adev
->pm
.dpm
.dyn_state
.valid_mclk_values
.values
= NULL
;
7455 si_initialize_powertune_defaults(adev
);
7457 /* make sure dc limits are valid */
7458 if ((adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.sclk
== 0) ||
7459 (adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.mclk
== 0))
7460 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
=
7461 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
7463 si_pi
->fan_ctrl_is_in_default_mode
= true;
7468 static void si_dpm_fini(struct amdgpu_device
*adev
)
7472 if (adev
->pm
.dpm
.ps
)
7473 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++)
7474 kfree(adev
->pm
.dpm
.ps
[i
].ps_priv
);
7475 kfree(adev
->pm
.dpm
.ps
);
7476 kfree(adev
->pm
.dpm
.priv
);
7477 kfree(adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
);
7478 amdgpu_free_extended_power_table(adev
);
7481 static void si_dpm_debugfs_print_current_performance_level(void *handle
,
7484 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7485 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7486 struct amdgpu_ps
*rps
= &eg_pi
->current_rps
;
7487 struct si_ps
*ps
= si_get_ps(rps
);
7488 struct rv7xx_pl
*pl
;
7490 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX
) & CURRENT_STATE_INDEX_MASK
) >>
7491 CURRENT_STATE_INDEX_SHIFT
;
7493 if (current_index
>= ps
->performance_level_count
) {
7494 seq_printf(m
, "invalid dpm profile %d\n", current_index
);
7496 pl
= &ps
->performance_levels
[current_index
];
7497 seq_printf(m
, "uvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
7498 seq_printf(m
, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7499 current_index
, pl
->sclk
, pl
->mclk
, pl
->vddc
, pl
->vddci
, pl
->pcie_gen
+ 1);
7503 static int si_dpm_set_interrupt_state(struct amdgpu_device
*adev
,
7504 struct amdgpu_irq_src
*source
,
7506 enum amdgpu_interrupt_state state
)
7511 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
:
7513 case AMDGPU_IRQ_STATE_DISABLE
:
7514 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7515 cg_thermal_int
|= THERM_INT_MASK_HIGH
;
7516 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7518 case AMDGPU_IRQ_STATE_ENABLE
:
7519 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7520 cg_thermal_int
&= ~THERM_INT_MASK_HIGH
;
7521 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7528 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
:
7530 case AMDGPU_IRQ_STATE_DISABLE
:
7531 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7532 cg_thermal_int
|= THERM_INT_MASK_LOW
;
7533 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7535 case AMDGPU_IRQ_STATE_ENABLE
:
7536 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7537 cg_thermal_int
&= ~THERM_INT_MASK_LOW
;
7538 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7551 static int si_dpm_process_interrupt(struct amdgpu_device
*adev
,
7552 struct amdgpu_irq_src
*source
,
7553 struct amdgpu_iv_entry
*entry
)
7555 bool queue_thermal
= false;
7560 switch (entry
->src_id
) {
7561 case 230: /* thermal low to high */
7562 DRM_DEBUG("IH: thermal low to high\n");
7563 adev
->pm
.dpm
.thermal
.high_to_low
= false;
7564 queue_thermal
= true;
7566 case 231: /* thermal high to low */
7567 DRM_DEBUG("IH: thermal high to low\n");
7568 adev
->pm
.dpm
.thermal
.high_to_low
= true;
7569 queue_thermal
= true;
7576 schedule_work(&adev
->pm
.dpm
.thermal
.work
);
7581 static int si_dpm_late_init(void *handle
)
7584 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7586 if (!adev
->pm
.dpm_enabled
)
7589 ret
= si_set_temperature_range(adev
);
7593 si_dpm_powergate_uvd(adev
, true);
7599 * si_dpm_init_microcode - load ucode images from disk
7601 * @adev: amdgpu_device pointer
7603 * Use the firmware interface to load the ucode images into
7604 * the driver (not loaded into hw).
7605 * Returns 0 on success, error on failure.
7607 static int si_dpm_init_microcode(struct amdgpu_device
*adev
)
7609 const char *chip_name
;
7614 switch (adev
->asic_type
) {
7616 chip_name
= "tahiti";
7619 if ((adev
->pdev
->revision
== 0x81) &&
7620 ((adev
->pdev
->device
== 0x6810) ||
7621 (adev
->pdev
->device
== 0x6811)))
7622 chip_name
= "pitcairn_k";
7624 chip_name
= "pitcairn";
7627 if (((adev
->pdev
->device
== 0x6820) &&
7628 ((adev
->pdev
->revision
== 0x81) ||
7629 (adev
->pdev
->revision
== 0x83))) ||
7630 ((adev
->pdev
->device
== 0x6821) &&
7631 ((adev
->pdev
->revision
== 0x83) ||
7632 (adev
->pdev
->revision
== 0x87))) ||
7633 ((adev
->pdev
->revision
== 0x87) &&
7634 ((adev
->pdev
->device
== 0x6823) ||
7635 (adev
->pdev
->device
== 0x682b))))
7636 chip_name
= "verde_k";
7638 chip_name
= "verde";
7641 if (((adev
->pdev
->revision
== 0x81) &&
7642 ((adev
->pdev
->device
== 0x6600) ||
7643 (adev
->pdev
->device
== 0x6604) ||
7644 (adev
->pdev
->device
== 0x6605) ||
7645 (adev
->pdev
->device
== 0x6610))) ||
7646 ((adev
->pdev
->revision
== 0x83) &&
7647 (adev
->pdev
->device
== 0x6610)))
7648 chip_name
= "oland_k";
7650 chip_name
= "oland";
7653 if (((adev
->pdev
->revision
== 0x81) &&
7654 (adev
->pdev
->device
== 0x6660)) ||
7655 ((adev
->pdev
->revision
== 0x83) &&
7656 ((adev
->pdev
->device
== 0x6660) ||
7657 (adev
->pdev
->device
== 0x6663) ||
7658 (adev
->pdev
->device
== 0x6665) ||
7659 (adev
->pdev
->device
== 0x6667))))
7660 chip_name
= "hainan_k";
7661 else if ((adev
->pdev
->revision
== 0xc3) &&
7662 (adev
->pdev
->device
== 0x6665))
7663 chip_name
= "banks_k_2";
7665 chip_name
= "hainan";
7670 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_smc.bin", chip_name
);
7671 err
= request_firmware(&adev
->pm
.fw
, fw_name
, adev
->dev
);
7674 err
= amdgpu_ucode_validate(adev
->pm
.fw
);
7678 DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
7680 release_firmware(adev
->pm
.fw
);
7687 static int si_dpm_sw_init(void *handle
)
7690 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7692 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 230, &adev
->pm
.dpm
.thermal
.irq
);
7696 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 231, &adev
->pm
.dpm
.thermal
.irq
);
7700 /* default to balanced state */
7701 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_BALANCED
;
7702 adev
->pm
.dpm
.user_state
= POWER_STATE_TYPE_BALANCED
;
7703 adev
->pm
.dpm
.forced_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
7704 adev
->pm
.default_sclk
= adev
->clock
.default_sclk
;
7705 adev
->pm
.default_mclk
= adev
->clock
.default_mclk
;
7706 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
7707 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
7708 adev
->pm
.int_thermal_type
= THERMAL_TYPE_NONE
;
7710 if (amdgpu_dpm
== 0)
7713 ret
= si_dpm_init_microcode(adev
);
7717 INIT_WORK(&adev
->pm
.dpm
.thermal
.work
, amdgpu_dpm_thermal_work_handler
);
7718 mutex_lock(&adev
->pm
.mutex
);
7719 ret
= si_dpm_init(adev
);
7722 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
7723 if (amdgpu_dpm
== 1)
7724 amdgpu_pm_print_power_states(adev
);
7725 mutex_unlock(&adev
->pm
.mutex
);
7726 DRM_INFO("amdgpu: dpm initialized\n");
7732 mutex_unlock(&adev
->pm
.mutex
);
7733 DRM_ERROR("amdgpu: dpm initialization failed\n");
7737 static int si_dpm_sw_fini(void *handle
)
7739 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7741 flush_work(&adev
->pm
.dpm
.thermal
.work
);
7743 mutex_lock(&adev
->pm
.mutex
);
7745 mutex_unlock(&adev
->pm
.mutex
);
7750 static int si_dpm_hw_init(void *handle
)
7754 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7759 mutex_lock(&adev
->pm
.mutex
);
7760 si_dpm_setup_asic(adev
);
7761 ret
= si_dpm_enable(adev
);
7763 adev
->pm
.dpm_enabled
= false;
7765 adev
->pm
.dpm_enabled
= true;
7766 mutex_unlock(&adev
->pm
.mutex
);
7767 amdgpu_pm_compute_clocks(adev
);
7771 static int si_dpm_hw_fini(void *handle
)
7773 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7775 if (adev
->pm
.dpm_enabled
) {
7776 mutex_lock(&adev
->pm
.mutex
);
7777 si_dpm_disable(adev
);
7778 mutex_unlock(&adev
->pm
.mutex
);
7784 static int si_dpm_suspend(void *handle
)
7786 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7788 if (adev
->pm
.dpm_enabled
) {
7789 mutex_lock(&adev
->pm
.mutex
);
7791 si_dpm_disable(adev
);
7792 /* reset the power state */
7793 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
7794 mutex_unlock(&adev
->pm
.mutex
);
7799 static int si_dpm_resume(void *handle
)
7802 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7804 if (adev
->pm
.dpm_enabled
) {
7805 /* asic init will reset to the boot state */
7806 mutex_lock(&adev
->pm
.mutex
);
7807 si_dpm_setup_asic(adev
);
7808 ret
= si_dpm_enable(adev
);
7810 adev
->pm
.dpm_enabled
= false;
7812 adev
->pm
.dpm_enabled
= true;
7813 mutex_unlock(&adev
->pm
.mutex
);
7814 if (adev
->pm
.dpm_enabled
)
7815 amdgpu_pm_compute_clocks(adev
);
7820 static bool si_dpm_is_idle(void *handle
)
7826 static int si_dpm_wait_for_idle(void *handle
)
7832 static int si_dpm_soft_reset(void *handle
)
7837 static int si_dpm_set_clockgating_state(void *handle
,
7838 enum amd_clockgating_state state
)
7843 static int si_dpm_set_powergating_state(void *handle
,
7844 enum amd_powergating_state state
)
7849 /* get temperature in millidegrees */
7850 static int si_dpm_get_temp(void *handle
)
7853 int actual_temp
= 0;
7854 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7856 temp
= (RREG32(CG_MULT_THERMAL_STATUS
) & CTF_TEMP_MASK
) >>
7862 actual_temp
= temp
& 0x1ff;
7864 actual_temp
= (actual_temp
* 1000);
7869 static u32
si_dpm_get_sclk(void *handle
, bool low
)
7871 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7872 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7873 struct si_ps
*requested_state
= si_get_ps(&eg_pi
->requested_rps
);
7876 return requested_state
->performance_levels
[0].sclk
;
7878 return requested_state
->performance_levels
[requested_state
->performance_level_count
- 1].sclk
;
7881 static u32
si_dpm_get_mclk(void *handle
, bool low
)
7883 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7884 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7885 struct si_ps
*requested_state
= si_get_ps(&eg_pi
->requested_rps
);
7888 return requested_state
->performance_levels
[0].mclk
;
7890 return requested_state
->performance_levels
[requested_state
->performance_level_count
- 1].mclk
;
7893 static void si_dpm_print_power_state(void *handle
,
7896 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7897 struct amdgpu_ps
*rps
= (struct amdgpu_ps
*)current_ps
;
7898 struct si_ps
*ps
= si_get_ps(rps
);
7899 struct rv7xx_pl
*pl
;
7902 amdgpu_dpm_print_class_info(rps
->class, rps
->class2
);
7903 amdgpu_dpm_print_cap_info(rps
->caps
);
7904 DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
7905 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
7906 pl
= &ps
->performance_levels
[i
];
7907 if (adev
->asic_type
>= CHIP_TAHITI
)
7908 DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7909 i
, pl
->sclk
, pl
->mclk
, pl
->vddc
, pl
->vddci
, pl
->pcie_gen
+ 1);
7911 DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
7912 i
, pl
->sclk
, pl
->mclk
, pl
->vddc
, pl
->vddci
);
7914 amdgpu_dpm_print_ps_status(adev
, rps
);
7917 static int si_dpm_early_init(void *handle
)
7920 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7922 adev
->powerplay
.pp_funcs
= &si_dpm_funcs
;
7923 adev
->powerplay
.pp_handle
= adev
;
7924 si_dpm_set_irq_funcs(adev
);
7928 static inline bool si_are_power_levels_equal(const struct rv7xx_pl
*si_cpl1
,
7929 const struct rv7xx_pl
*si_cpl2
)
7931 return ((si_cpl1
->mclk
== si_cpl2
->mclk
) &&
7932 (si_cpl1
->sclk
== si_cpl2
->sclk
) &&
7933 (si_cpl1
->pcie_gen
== si_cpl2
->pcie_gen
) &&
7934 (si_cpl1
->vddc
== si_cpl2
->vddc
) &&
7935 (si_cpl1
->vddci
== si_cpl2
->vddci
));
7938 static int si_check_state_equal(void *handle
,
7943 struct si_ps
*si_cps
;
7944 struct si_ps
*si_rps
;
7946 struct amdgpu_ps
*cps
= (struct amdgpu_ps
*)current_ps
;
7947 struct amdgpu_ps
*rps
= (struct amdgpu_ps
*)request_ps
;
7948 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7950 if (adev
== NULL
|| cps
== NULL
|| rps
== NULL
|| equal
== NULL
)
7953 si_cps
= si_get_ps((struct amdgpu_ps
*)cps
);
7954 si_rps
= si_get_ps((struct amdgpu_ps
*)rps
);
7956 if (si_cps
== NULL
) {
7957 printk("si_cps is NULL\n");
7962 if (si_cps
->performance_level_count
!= si_rps
->performance_level_count
) {
7967 for (i
= 0; i
< si_cps
->performance_level_count
; i
++) {
7968 if (!si_are_power_levels_equal(&(si_cps
->performance_levels
[i
]),
7969 &(si_rps
->performance_levels
[i
]))) {
7975 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
7976 *equal
= ((cps
->vclk
== rps
->vclk
) && (cps
->dclk
== rps
->dclk
));
7977 *equal
&= ((cps
->evclk
== rps
->evclk
) && (cps
->ecclk
== rps
->ecclk
));
7982 static int si_dpm_read_sensor(void *handle
, int idx
,
7983 void *value
, int *size
)
7985 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7986 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7987 struct amdgpu_ps
*rps
= &eg_pi
->current_rps
;
7988 struct si_ps
*ps
= si_get_ps(rps
);
7989 uint32_t sclk
, mclk
;
7991 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX
) & CURRENT_STATE_INDEX_MASK
) >>
7992 CURRENT_STATE_INDEX_SHIFT
;
7994 /* size must be at least 4 bytes for all sensors */
7999 case AMDGPU_PP_SENSOR_GFX_SCLK
:
8000 if (pl_index
< ps
->performance_level_count
) {
8001 sclk
= ps
->performance_levels
[pl_index
].sclk
;
8002 *((uint32_t *)value
) = sclk
;
8007 case AMDGPU_PP_SENSOR_GFX_MCLK
:
8008 if (pl_index
< ps
->performance_level_count
) {
8009 mclk
= ps
->performance_levels
[pl_index
].mclk
;
8010 *((uint32_t *)value
) = mclk
;
8015 case AMDGPU_PP_SENSOR_GPU_TEMP
:
8016 *((uint32_t *)value
) = si_dpm_get_temp(adev
);
8024 static const struct amd_ip_funcs si_dpm_ip_funcs
= {
8026 .early_init
= si_dpm_early_init
,
8027 .late_init
= si_dpm_late_init
,
8028 .sw_init
= si_dpm_sw_init
,
8029 .sw_fini
= si_dpm_sw_fini
,
8030 .hw_init
= si_dpm_hw_init
,
8031 .hw_fini
= si_dpm_hw_fini
,
8032 .suspend
= si_dpm_suspend
,
8033 .resume
= si_dpm_resume
,
8034 .is_idle
= si_dpm_is_idle
,
8035 .wait_for_idle
= si_dpm_wait_for_idle
,
8036 .soft_reset
= si_dpm_soft_reset
,
8037 .set_clockgating_state
= si_dpm_set_clockgating_state
,
8038 .set_powergating_state
= si_dpm_set_powergating_state
,
8041 const struct amdgpu_ip_block_version si_smu_ip_block
=
8043 .type
= AMD_IP_BLOCK_TYPE_SMC
,
8047 .funcs
= &si_dpm_ip_funcs
,
8050 static const struct amd_pm_funcs si_dpm_funcs
= {
8051 .pre_set_power_state
= &si_dpm_pre_set_power_state
,
8052 .set_power_state
= &si_dpm_set_power_state
,
8053 .post_set_power_state
= &si_dpm_post_set_power_state
,
8054 .display_configuration_changed
= &si_dpm_display_configuration_changed
,
8055 .get_sclk
= &si_dpm_get_sclk
,
8056 .get_mclk
= &si_dpm_get_mclk
,
8057 .print_power_state
= &si_dpm_print_power_state
,
8058 .debugfs_print_current_performance_level
= &si_dpm_debugfs_print_current_performance_level
,
8059 .force_performance_level
= &si_dpm_force_performance_level
,
8060 .vblank_too_short
= &si_dpm_vblank_too_short
,
8061 .set_fan_control_mode
= &si_dpm_set_fan_control_mode
,
8062 .get_fan_control_mode
= &si_dpm_get_fan_control_mode
,
8063 .set_fan_speed_percent
= &si_dpm_set_fan_speed_percent
,
8064 .get_fan_speed_percent
= &si_dpm_get_fan_speed_percent
,
8065 .check_state_equal
= &si_check_state_equal
,
8066 .get_vce_clock_state
= amdgpu_get_vce_clock_state
,
8067 .read_sensor
= &si_dpm_read_sensor
,
8070 static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs
= {
8071 .set
= si_dpm_set_interrupt_state
,
8072 .process
= si_dpm_process_interrupt
,
8075 static void si_dpm_set_irq_funcs(struct amdgpu_device
*adev
)
8077 adev
->pm
.dpm
.thermal
.irq
.num_types
= AMDGPU_THERMAL_IRQ_LAST
;
8078 adev
->pm
.dpm
.thermal
.irq
.funcs
= &si_dpm_irq_funcs
;