2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/module.h>
25 #include <linux/pci.h>
28 #include "amdgpu_pm.h"
29 #include "amdgpu_dpm.h"
30 #include "amdgpu_atombios.h"
36 #include "../include/pptable.h"
37 #include <linux/math64.h>
38 #include <linux/seq_file.h>
39 #include <linux/firmware.h>
41 #define MC_CG_ARB_FREQ_F0 0x0a
42 #define MC_CG_ARB_FREQ_F1 0x0b
43 #define MC_CG_ARB_FREQ_F2 0x0c
44 #define MC_CG_ARB_FREQ_F3 0x0d
46 #define SMC_RAM_END 0x20000
48 #define SCLK_MIN_DEEPSLEEP_FREQ 1350
51 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
52 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
53 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
54 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
55 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
56 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
57 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
59 #define BIOS_SCRATCH_4 0x5cd
61 MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
62 MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
63 MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
64 MODULE_FIRMWARE("amdgpu/verde_smc.bin");
65 MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
66 MODULE_FIRMWARE("amdgpu/oland_smc.bin");
67 MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
68 MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
69 MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
70 MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
72 static const struct amd_pm_funcs si_dpm_funcs
;
75 struct _ATOM_POWERPLAY_INFO info
;
76 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
77 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
78 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
79 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
80 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
81 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4
;
82 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5
;
86 struct _ATOM_PPLIB_FANTABLE fan
;
87 struct _ATOM_PPLIB_FANTABLE2 fan2
;
88 struct _ATOM_PPLIB_FANTABLE3 fan3
;
91 union pplib_clock_info
{
92 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
93 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
94 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
95 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
96 struct _ATOM_PPLIB_SI_CLOCK_INFO si
;
99 static const u32 r600_utc
[R600_PM_NUMBER_OF_TC
] =
118 static const u32 r600_dtc
[R600_PM_NUMBER_OF_TC
] =
137 static const struct si_cac_config_reg cac_weights_tahiti
[] =
139 { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND
},
140 { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
141 { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND
},
142 { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND
},
143 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
144 { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
145 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
146 { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
147 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
148 { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND
},
149 { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
150 { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND
},
151 { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND
},
152 { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND
},
153 { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND
},
154 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
155 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
156 { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND
},
157 { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
158 { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND
},
159 { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND
},
160 { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND
},
161 { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
162 { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
163 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
164 { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
165 { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
166 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
167 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
168 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
169 { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND
},
170 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
171 { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
172 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
173 { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
174 { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
175 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
176 { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
177 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
178 { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND
},
179 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
180 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
181 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
182 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
183 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
184 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
185 { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
186 { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
187 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
188 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
189 { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
190 { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
191 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
192 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
193 { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
194 { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
195 { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
196 { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
197 { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
198 { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND
},
202 static const struct si_cac_config_reg lcac_tahiti
[] =
204 { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
205 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
206 { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
207 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
208 { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
209 { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
210 { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
211 { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
212 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
213 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
214 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
215 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
216 { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
217 { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
218 { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
219 { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
220 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
221 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
222 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
223 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
224 { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
225 { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
226 { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
227 { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
228 { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
229 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
230 { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
231 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
232 { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
233 { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
234 { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
235 { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
236 { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
237 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
238 { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
239 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
240 { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
241 { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
242 { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
243 { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
244 { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
245 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
246 { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
247 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
248 { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
249 { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
250 { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
251 { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
252 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
253 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
254 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
255 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
256 { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
257 { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
258 { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
259 { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
260 { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
261 { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
262 { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
263 { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
264 { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
265 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
266 { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
267 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
268 { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
269 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
270 { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
271 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
272 { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
273 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
274 { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
275 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
276 { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
277 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
278 { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
279 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
280 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
281 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
282 { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
283 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
284 { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
285 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
286 { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
287 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
288 { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
289 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
294 static const struct si_cac_config_reg cac_override_tahiti
[] =
299 static const struct si_powertune_data powertune_data_tahiti
=
330 static const struct si_dte_data dte_data_tahiti
=
332 { 1159409, 0, 0, 0, 0 },
341 { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
342 { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
343 { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
349 static const struct si_dte_data dte_data_tahiti_le
=
351 { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
352 { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
360 { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
361 { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
362 { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
368 static const struct si_dte_data dte_data_tahiti_pro
=
370 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
371 { 0x0, 0x0, 0x0, 0x0, 0x0 },
379 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
380 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
381 { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
386 static const struct si_dte_data dte_data_new_zealand
=
388 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
389 { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
397 { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
398 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
399 { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
404 static const struct si_dte_data dte_data_aruba_pro
=
406 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
407 { 0x0, 0x0, 0x0, 0x0, 0x0 },
415 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
416 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
417 { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
422 static const struct si_dte_data dte_data_malta
=
424 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
425 { 0x0, 0x0, 0x0, 0x0, 0x0 },
433 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
434 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
435 { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
440 static const struct si_cac_config_reg cac_weights_pitcairn
[] =
442 { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND
},
443 { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
444 { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
445 { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND
},
446 { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND
},
447 { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
448 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
449 { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
450 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
451 { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND
},
452 { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND
},
453 { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND
},
454 { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND
},
455 { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND
},
456 { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
457 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
458 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
459 { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND
},
460 { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND
},
461 { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND
},
462 { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND
},
463 { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND
},
464 { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND
},
465 { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
466 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
467 { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND
},
468 { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND
},
469 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
470 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
471 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
472 { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND
},
473 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
474 { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND
},
475 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
476 { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND
},
477 { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND
},
478 { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND
},
479 { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
480 { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND
},
481 { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
482 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
483 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
484 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
485 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
486 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
487 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
488 { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
489 { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
490 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
491 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
492 { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
493 { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
494 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
495 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
496 { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
497 { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
498 { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
499 { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
500 { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
501 { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND
},
505 static const struct si_cac_config_reg lcac_pitcairn
[] =
507 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
508 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
509 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
510 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
511 { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
512 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
513 { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
514 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
515 { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
516 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
517 { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
518 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
519 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
520 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
521 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
522 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
523 { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
524 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
525 { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
526 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
527 { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
528 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
529 { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
530 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
531 { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
532 { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
533 { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
534 { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
535 { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
536 { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
537 { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
538 { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
539 { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
540 { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
541 { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
542 { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
543 { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
544 { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
545 { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
546 { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
547 { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
548 { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
549 { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
550 { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
551 { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
552 { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
553 { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
554 { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
555 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
556 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
557 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
558 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
559 { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
560 { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
561 { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
562 { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
563 { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
564 { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
565 { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
566 { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
567 { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
568 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
569 { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
570 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
571 { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
572 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
573 { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
574 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
575 { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
576 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
577 { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
578 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
579 { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
580 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
581 { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
582 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
583 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
584 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
585 { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
586 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
587 { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
588 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
589 { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
590 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
591 { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
592 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
596 static const struct si_cac_config_reg cac_override_pitcairn
[] =
601 static const struct si_powertune_data powertune_data_pitcairn
=
632 static const struct si_dte_data dte_data_pitcairn
=
643 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
644 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
645 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
650 static const struct si_dte_data dte_data_curacao_xt
=
652 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
653 { 0x0, 0x0, 0x0, 0x0, 0x0 },
661 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
662 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
663 { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
668 static const struct si_dte_data dte_data_curacao_pro
=
670 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
671 { 0x0, 0x0, 0x0, 0x0, 0x0 },
679 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
680 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
681 { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
686 static const struct si_dte_data dte_data_neptune_xt
=
688 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
689 { 0x0, 0x0, 0x0, 0x0, 0x0 },
697 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
698 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
699 { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
704 static const struct si_cac_config_reg cac_weights_chelsea_pro
[] =
706 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
707 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
708 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
709 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
710 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
711 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
712 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
713 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
714 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
715 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
716 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
717 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
718 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
719 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
720 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
721 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
722 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
723 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
724 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
725 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
726 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
727 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
728 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
729 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
730 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
731 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
732 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
733 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
734 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
735 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
736 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
737 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
738 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
739 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
740 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
741 { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND
},
742 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
743 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
744 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
745 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
746 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
747 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
748 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
749 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
750 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
751 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
752 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
753 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
754 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
755 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
756 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
757 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
758 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
759 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
760 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
761 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
762 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
763 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
764 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
765 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
769 static const struct si_cac_config_reg cac_weights_chelsea_xt
[] =
771 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
772 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
773 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
774 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
775 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
776 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
777 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
778 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
779 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
780 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
781 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
782 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
783 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
784 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
785 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
786 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
787 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
788 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
789 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
790 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
791 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
792 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
793 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
794 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
795 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
796 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
797 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
798 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
799 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
800 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
801 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
802 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
803 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
804 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
805 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
806 { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND
},
807 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
808 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
809 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
810 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
811 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
812 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
813 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
814 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
815 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
816 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
817 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
818 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
819 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
820 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
821 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
822 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
823 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
824 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
825 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
826 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
827 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
828 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
829 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
830 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
834 static const struct si_cac_config_reg cac_weights_heathrow
[] =
836 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
837 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
838 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
839 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
840 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
841 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
842 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
843 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
844 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
845 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
846 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
847 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
848 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
849 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
850 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
851 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
852 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
853 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
854 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
855 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
856 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
857 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
858 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
859 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
860 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
861 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
862 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
863 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
864 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
865 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
866 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
867 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
868 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
869 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
870 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
871 { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND
},
872 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
873 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
874 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
875 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
876 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
877 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
878 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
879 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
880 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
881 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
882 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
883 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
884 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
885 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
886 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
887 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
888 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
889 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
890 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
891 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
892 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
893 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
894 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
895 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
899 static const struct si_cac_config_reg cac_weights_cape_verde_pro
[] =
901 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
902 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
903 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
904 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
905 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
906 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
907 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
908 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
909 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
910 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
911 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
912 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
913 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
914 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
915 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
916 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
917 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
918 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
919 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
920 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
921 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
922 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
923 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
924 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
925 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
926 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
927 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
928 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
929 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
930 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
931 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
932 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
933 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
934 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
935 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
936 { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND
},
937 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
938 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
939 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
940 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
941 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
942 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
943 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
944 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
945 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
946 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
947 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
948 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
949 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
950 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
951 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
952 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
953 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
954 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
955 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
956 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
957 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
958 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
959 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
960 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
964 static const struct si_cac_config_reg cac_weights_cape_verde
[] =
966 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
967 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
968 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
969 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
970 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
971 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
972 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
973 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
974 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
975 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
976 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
977 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
978 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
979 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
980 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
981 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
982 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
983 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
984 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
985 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
986 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
987 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
988 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
989 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
990 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
991 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
992 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
993 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
994 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
995 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
996 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
997 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
998 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
999 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
1000 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1001 { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND
},
1002 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1003 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1004 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1005 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
1006 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
1007 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1008 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1009 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1010 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1011 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1012 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1013 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1014 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1015 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1016 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1017 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1018 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1019 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1020 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1021 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1022 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1023 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1024 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1025 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
1029 static const struct si_cac_config_reg lcac_cape_verde
[] =
1031 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1032 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1033 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1034 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1035 { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1036 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1037 { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1038 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1039 { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1040 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1041 { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1042 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1043 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1044 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1045 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1046 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1047 { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1048 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1049 { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1050 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1051 { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1052 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1053 { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1054 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1055 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1056 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1057 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1058 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1059 { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1060 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1061 { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1062 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1063 { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1064 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1065 { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1066 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1067 { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1068 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1069 { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1070 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1071 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1072 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1073 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1074 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1075 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1076 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1077 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1078 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1079 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1080 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1081 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1082 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1083 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1084 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1088 static const struct si_cac_config_reg cac_override_cape_verde
[] =
1093 static const struct si_powertune_data powertune_data_cape_verde
=
1095 ((1 << 16) | 0x6993),
1124 static const struct si_dte_data dte_data_cape_verde
=
1135 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1136 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1137 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1142 static const struct si_dte_data dte_data_venus_xtx
=
1144 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1145 { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
1153 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1154 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1155 { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1160 static const struct si_dte_data dte_data_venus_xt
=
1162 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1163 { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
1171 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1172 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1173 { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1178 static const struct si_dte_data dte_data_venus_pro
=
1180 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1181 { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
1189 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1190 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1191 { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1196 static const struct si_cac_config_reg cac_weights_oland
[] =
1198 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
1199 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
1200 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
1201 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
1202 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1203 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
1204 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
1205 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
1206 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
1207 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
1208 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
1209 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
1210 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
1211 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
1212 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
1213 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
1214 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
1215 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
1216 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
1217 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
1218 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
1219 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
1220 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
1221 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
1222 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
1223 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1224 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1225 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1226 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1227 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
1228 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1229 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
1230 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
1231 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
1232 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1233 { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND
},
1234 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1235 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1236 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1237 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
1238 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
1239 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1240 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1241 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1242 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1243 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1244 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1245 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1246 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1247 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1248 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1249 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1250 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1251 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1252 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1253 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1254 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1255 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1256 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1257 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
1261 static const struct si_cac_config_reg cac_weights_mars_pro
[] =
1263 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1264 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1265 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1266 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1267 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1268 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1269 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1270 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1271 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1272 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1273 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1274 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1275 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1276 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1277 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1278 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1279 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1280 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1281 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1282 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1283 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1284 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1285 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1286 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1287 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1288 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1289 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1290 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1291 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1292 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1293 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1294 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1295 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1296 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1297 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1298 { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND
},
1299 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1300 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1301 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1302 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1303 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1304 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1305 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1306 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1307 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1308 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1309 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1310 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1311 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1312 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1313 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1314 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1315 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1316 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1317 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1318 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1319 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1320 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1321 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1322 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1326 static const struct si_cac_config_reg cac_weights_mars_xt
[] =
1328 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1329 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1330 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1331 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1332 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1333 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1334 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1335 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1336 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1337 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1338 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1339 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1340 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1341 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1342 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1343 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1344 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1345 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1346 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1347 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1348 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1349 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1350 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1351 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1352 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1353 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1354 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1355 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1356 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1357 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1358 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1359 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1360 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1361 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1362 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1363 { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND
},
1364 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1365 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1366 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1367 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1368 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1369 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1370 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1371 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1372 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1373 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1374 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1375 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1376 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1377 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1378 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1379 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1380 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1381 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1382 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1383 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1384 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1385 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1386 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1387 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1391 static const struct si_cac_config_reg cac_weights_oland_pro
[] =
1393 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1394 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1395 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1396 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1397 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1398 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1399 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1400 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1401 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1402 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1403 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1404 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1405 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1406 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1407 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1408 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1409 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1410 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1411 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1412 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1413 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1414 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1415 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1416 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1417 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1418 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1419 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1420 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1421 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1422 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1423 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1424 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1425 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1426 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1427 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1428 { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND
},
1429 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1430 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1431 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1432 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1433 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1434 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1435 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1436 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1437 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1438 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1439 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1440 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1441 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1442 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1443 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1444 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1445 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1446 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1447 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1448 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1449 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1450 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1451 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1452 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1456 static const struct si_cac_config_reg cac_weights_oland_xt
[] =
1458 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1459 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1460 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1461 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1462 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1463 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1464 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1465 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1466 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1467 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1468 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1469 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1470 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1471 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1472 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1473 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1474 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1475 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1476 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1477 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1478 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1479 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1480 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1481 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1482 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1483 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1484 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1485 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1486 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1487 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1488 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1489 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1490 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1491 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1492 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1493 { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND
},
1494 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1495 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1496 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1497 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1498 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1499 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1500 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1501 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1502 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1503 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1504 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1505 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1506 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1507 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1508 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1509 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1510 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1511 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1512 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1513 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1514 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1515 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1516 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1517 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1521 static const struct si_cac_config_reg lcac_oland
[] =
1523 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1524 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1525 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1526 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1527 { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1528 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1529 { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1530 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1531 { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1532 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1533 { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
1534 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1535 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1536 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1537 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1538 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1539 { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1540 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1541 { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1542 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1543 { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1544 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1545 { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1546 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1547 { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1548 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1549 { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1550 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1551 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1552 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1553 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1554 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1555 { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1556 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1557 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1558 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1559 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1560 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1561 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1562 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1563 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1564 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1568 static const struct si_cac_config_reg lcac_mars_pro
[] =
1570 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1571 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1572 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1573 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1574 { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1575 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1576 { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1577 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1578 { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1579 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1580 { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1581 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1582 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1583 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1584 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1585 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1586 { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1587 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1588 { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1589 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1590 { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1591 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1592 { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1593 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1594 { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1595 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1596 { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1597 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1598 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1599 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1600 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1601 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1602 { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1603 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1604 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1605 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1606 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1607 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1608 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1609 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1610 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1611 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1615 static const struct si_cac_config_reg cac_override_oland
[] =
1620 static const struct si_powertune_data powertune_data_oland
=
1622 ((1 << 16) | 0x6993),
1651 static const struct si_powertune_data powertune_data_mars_pro
=
1653 ((1 << 16) | 0x6993),
1682 static const struct si_dte_data dte_data_oland
=
1693 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1694 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1695 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1700 static const struct si_dte_data dte_data_mars_pro
=
1702 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1703 { 0x0, 0x0, 0x0, 0x0, 0x0 },
1711 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1712 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1713 { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1718 static const struct si_dte_data dte_data_sun_xt
=
1720 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1721 { 0x0, 0x0, 0x0, 0x0, 0x0 },
1729 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1730 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1731 { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1737 static const struct si_cac_config_reg cac_weights_hainan
[] =
1739 { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND
},
1740 { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND
},
1741 { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND
},
1742 { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND
},
1743 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1744 { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND
},
1745 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1746 { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1747 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1748 { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND
},
1749 { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND
},
1750 { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND
},
1751 { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND
},
1752 { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1753 { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND
},
1754 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1755 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1756 { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND
},
1757 { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND
},
1758 { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND
},
1759 { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND
},
1760 { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND
},
1761 { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND
},
1762 { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND
},
1763 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1764 { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND
},
1765 { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND
},
1766 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1767 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1768 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1769 { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND
},
1770 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1771 { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1772 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1773 { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND
},
1774 { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND
},
1775 { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND
},
1776 { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1777 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1778 { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND
},
1779 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1780 { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND
},
1781 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1782 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1783 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1784 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1785 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1786 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1787 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1788 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1789 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1790 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1791 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1792 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1793 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1794 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1795 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1796 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1797 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1798 { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND
},
1802 static const struct si_powertune_data powertune_data_hainan
=
1804 ((1 << 16) | 0x6993),
1833 static struct rv7xx_power_info
*rv770_get_pi(struct amdgpu_device
*adev
);
1834 static struct evergreen_power_info
*evergreen_get_pi(struct amdgpu_device
*adev
);
1835 static struct ni_power_info
*ni_get_pi(struct amdgpu_device
*adev
);
1836 static struct si_ps
*si_get_ps(struct amdgpu_ps
*rps
);
1838 static int si_populate_voltage_value(struct amdgpu_device
*adev
,
1839 const struct atom_voltage_table
*table
,
1840 u16 value
, SISLANDS_SMC_VOLTAGE_VALUE
*voltage
);
1841 static int si_get_std_voltage_value(struct amdgpu_device
*adev
,
1842 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
,
1844 static int si_write_smc_soft_register(struct amdgpu_device
*adev
,
1845 u16 reg_offset
, u32 value
);
1846 static int si_convert_power_level_to_smc(struct amdgpu_device
*adev
,
1847 struct rv7xx_pl
*pl
,
1848 SISLANDS_SMC_HW_PERFORMANCE_LEVEL
*level
);
1849 static int si_calculate_sclk_params(struct amdgpu_device
*adev
,
1851 SISLANDS_SMC_SCLK_VALUE
*sclk
);
1853 static void si_thermal_start_smc_fan_control(struct amdgpu_device
*adev
);
1854 static void si_fan_ctrl_set_default_mode(struct amdgpu_device
*adev
);
1855 static void si_dpm_set_irq_funcs(struct amdgpu_device
*adev
);
1857 static struct si_power_info
*si_get_pi(struct amdgpu_device
*adev
)
1859 struct si_power_info
*pi
= adev
->pm
.dpm
.priv
;
1863 static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients
*coeff
,
1864 u16 v
, s32 t
, u32 ileakage
, u32
*leakage
)
1866 s64 kt
, kv
, leakage_w
, i_leakage
, vddc
;
1867 s64 temperature
, t_slope
, t_intercept
, av
, bv
, t_ref
;
1870 i_leakage
= div64_s64(drm_int2fixp(ileakage
), 100);
1871 vddc
= div64_s64(drm_int2fixp(v
), 1000);
1872 temperature
= div64_s64(drm_int2fixp(t
), 1000);
1874 t_slope
= div64_s64(drm_int2fixp(coeff
->t_slope
), 100000000);
1875 t_intercept
= div64_s64(drm_int2fixp(coeff
->t_intercept
), 100000000);
1876 av
= div64_s64(drm_int2fixp(coeff
->av
), 100000000);
1877 bv
= div64_s64(drm_int2fixp(coeff
->bv
), 100000000);
1878 t_ref
= drm_int2fixp(coeff
->t_ref
);
1880 tmp
= drm_fixp_mul(t_slope
, vddc
) + t_intercept
;
1881 kt
= drm_fixp_exp(drm_fixp_mul(tmp
, temperature
));
1882 kt
= drm_fixp_div(kt
, drm_fixp_exp(drm_fixp_mul(tmp
, t_ref
)));
1883 kv
= drm_fixp_mul(av
, drm_fixp_exp(drm_fixp_mul(bv
, vddc
)));
1885 leakage_w
= drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage
, kt
), kv
), vddc
);
1887 *leakage
= drm_fixp2int(leakage_w
* 1000);
1890 static void si_calculate_leakage_for_v_and_t(struct amdgpu_device
*adev
,
1891 const struct ni_leakage_coeffients
*coeff
,
1897 si_calculate_leakage_for_v_and_t_formula(coeff
, v
, t
, i_leakage
, leakage
);
1900 static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients
*coeff
,
1901 const u32 fixed_kt
, u16 v
,
1902 u32 ileakage
, u32
*leakage
)
1904 s64 kt
, kv
, leakage_w
, i_leakage
, vddc
;
1906 i_leakage
= div64_s64(drm_int2fixp(ileakage
), 100);
1907 vddc
= div64_s64(drm_int2fixp(v
), 1000);
1909 kt
= div64_s64(drm_int2fixp(fixed_kt
), 100000000);
1910 kv
= drm_fixp_mul(div64_s64(drm_int2fixp(coeff
->av
), 100000000),
1911 drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff
->bv
), 100000000), vddc
)));
1913 leakage_w
= drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage
, kt
), kv
), vddc
);
1915 *leakage
= drm_fixp2int(leakage_w
* 1000);
1918 static void si_calculate_leakage_for_v(struct amdgpu_device
*adev
,
1919 const struct ni_leakage_coeffients
*coeff
,
1925 si_calculate_leakage_for_v_formula(coeff
, fixed_kt
, v
, i_leakage
, leakage
);
1929 static void si_update_dte_from_pl2(struct amdgpu_device
*adev
,
1930 struct si_dte_data
*dte_data
)
1932 u32 p_limit1
= adev
->pm
.dpm
.tdp_limit
;
1933 u32 p_limit2
= adev
->pm
.dpm
.near_tdp_limit
;
1934 u32 k
= dte_data
->k
;
1935 u32 t_max
= dte_data
->max_t
;
1936 u32 t_split
[5] = { 10, 15, 20, 25, 30 };
1937 u32 t_0
= dte_data
->t0
;
1940 if (p_limit2
!= 0 && p_limit2
<= p_limit1
) {
1941 dte_data
->tdep_count
= 3;
1943 for (i
= 0; i
< k
; i
++) {
1945 (t_split
[i
] * (t_max
- t_0
/(u32
)1000) * (1 << 14)) /
1946 (p_limit2
* (u32
)100);
1949 dte_data
->tdep_r
[1] = dte_data
->r
[4] * 2;
1951 for (i
= 2; i
< SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE
; i
++) {
1952 dte_data
->tdep_r
[i
] = dte_data
->r
[4];
1955 DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
1959 static struct rv7xx_power_info
*rv770_get_pi(struct amdgpu_device
*adev
)
1961 struct rv7xx_power_info
*pi
= adev
->pm
.dpm
.priv
;
1966 static struct ni_power_info
*ni_get_pi(struct amdgpu_device
*adev
)
1968 struct ni_power_info
*pi
= adev
->pm
.dpm
.priv
;
1973 static struct si_ps
*si_get_ps(struct amdgpu_ps
*aps
)
1975 struct si_ps
*ps
= aps
->ps_priv
;
1980 static void si_initialize_powertune_defaults(struct amdgpu_device
*adev
)
1982 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
1983 struct si_power_info
*si_pi
= si_get_pi(adev
);
1984 bool update_dte_from_pl2
= false;
1986 if (adev
->asic_type
== CHIP_TAHITI
) {
1987 si_pi
->cac_weights
= cac_weights_tahiti
;
1988 si_pi
->lcac_config
= lcac_tahiti
;
1989 si_pi
->cac_override
= cac_override_tahiti
;
1990 si_pi
->powertune_data
= &powertune_data_tahiti
;
1991 si_pi
->dte_data
= dte_data_tahiti
;
1993 switch (adev
->pdev
->device
) {
1995 si_pi
->dte_data
.enable_dte_by_default
= true;
1998 si_pi
->dte_data
= dte_data_new_zealand
;
2004 si_pi
->dte_data
= dte_data_aruba_pro
;
2005 update_dte_from_pl2
= true;
2008 si_pi
->dte_data
= dte_data_malta
;
2009 update_dte_from_pl2
= true;
2012 si_pi
->dte_data
= dte_data_tahiti_pro
;
2013 update_dte_from_pl2
= true;
2016 if (si_pi
->dte_data
.enable_dte_by_default
== true)
2017 DRM_ERROR("DTE is not enabled!\n");
2020 } else if (adev
->asic_type
== CHIP_PITCAIRN
) {
2021 si_pi
->cac_weights
= cac_weights_pitcairn
;
2022 si_pi
->lcac_config
= lcac_pitcairn
;
2023 si_pi
->cac_override
= cac_override_pitcairn
;
2024 si_pi
->powertune_data
= &powertune_data_pitcairn
;
2026 switch (adev
->pdev
->device
) {
2029 si_pi
->dte_data
= dte_data_curacao_xt
;
2030 update_dte_from_pl2
= true;
2034 si_pi
->dte_data
= dte_data_curacao_pro
;
2035 update_dte_from_pl2
= true;
2039 si_pi
->dte_data
= dte_data_neptune_xt
;
2040 update_dte_from_pl2
= true;
2043 si_pi
->dte_data
= dte_data_pitcairn
;
2046 } else if (adev
->asic_type
== CHIP_VERDE
) {
2047 si_pi
->lcac_config
= lcac_cape_verde
;
2048 si_pi
->cac_override
= cac_override_cape_verde
;
2049 si_pi
->powertune_data
= &powertune_data_cape_verde
;
2051 switch (adev
->pdev
->device
) {
2056 si_pi
->cac_weights
= cac_weights_cape_verde_pro
;
2057 si_pi
->dte_data
= dte_data_cape_verde
;
2060 si_pi
->cac_weights
= cac_weights_cape_verde_pro
;
2061 si_pi
->dte_data
= dte_data_sun_xt
;
2062 update_dte_from_pl2
= true;
2066 si_pi
->cac_weights
= cac_weights_heathrow
;
2067 si_pi
->dte_data
= dte_data_cape_verde
;
2071 si_pi
->cac_weights
= cac_weights_chelsea_xt
;
2072 si_pi
->dte_data
= dte_data_cape_verde
;
2075 si_pi
->cac_weights
= cac_weights_chelsea_pro
;
2076 si_pi
->dte_data
= dte_data_cape_verde
;
2079 si_pi
->cac_weights
= cac_weights_heathrow
;
2080 si_pi
->dte_data
= dte_data_venus_xtx
;
2083 si_pi
->cac_weights
= cac_weights_heathrow
;
2084 si_pi
->dte_data
= dte_data_venus_xt
;
2090 si_pi
->cac_weights
= cac_weights_chelsea_pro
;
2091 si_pi
->dte_data
= dte_data_venus_pro
;
2094 si_pi
->cac_weights
= cac_weights_cape_verde
;
2095 si_pi
->dte_data
= dte_data_cape_verde
;
2098 } else if (adev
->asic_type
== CHIP_OLAND
) {
2099 si_pi
->lcac_config
= lcac_mars_pro
;
2100 si_pi
->cac_override
= cac_override_oland
;
2101 si_pi
->powertune_data
= &powertune_data_mars_pro
;
2102 si_pi
->dte_data
= dte_data_mars_pro
;
2104 switch (adev
->pdev
->device
) {
2109 si_pi
->cac_weights
= cac_weights_mars_pro
;
2110 update_dte_from_pl2
= true;
2116 si_pi
->cac_weights
= cac_weights_mars_xt
;
2117 update_dte_from_pl2
= true;
2122 si_pi
->cac_weights
= cac_weights_oland_pro
;
2123 update_dte_from_pl2
= true;
2126 si_pi
->cac_weights
= cac_weights_oland_xt
;
2127 update_dte_from_pl2
= true;
2130 si_pi
->cac_weights
= cac_weights_oland
;
2131 si_pi
->lcac_config
= lcac_oland
;
2132 si_pi
->cac_override
= cac_override_oland
;
2133 si_pi
->powertune_data
= &powertune_data_oland
;
2134 si_pi
->dte_data
= dte_data_oland
;
2137 } else if (adev
->asic_type
== CHIP_HAINAN
) {
2138 si_pi
->cac_weights
= cac_weights_hainan
;
2139 si_pi
->lcac_config
= lcac_oland
;
2140 si_pi
->cac_override
= cac_override_oland
;
2141 si_pi
->powertune_data
= &powertune_data_hainan
;
2142 si_pi
->dte_data
= dte_data_sun_xt
;
2143 update_dte_from_pl2
= true;
2145 DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
2149 ni_pi
->enable_power_containment
= false;
2150 ni_pi
->enable_cac
= false;
2151 ni_pi
->enable_sq_ramping
= false;
2152 si_pi
->enable_dte
= false;
2154 if (si_pi
->powertune_data
->enable_powertune_by_default
) {
2155 ni_pi
->enable_power_containment
= true;
2156 ni_pi
->enable_cac
= true;
2157 if (si_pi
->dte_data
.enable_dte_by_default
) {
2158 si_pi
->enable_dte
= true;
2159 if (update_dte_from_pl2
)
2160 si_update_dte_from_pl2(adev
, &si_pi
->dte_data
);
2163 ni_pi
->enable_sq_ramping
= true;
2166 ni_pi
->driver_calculate_cac_leakage
= true;
2167 ni_pi
->cac_configuration_required
= true;
2169 if (ni_pi
->cac_configuration_required
) {
2170 ni_pi
->support_cac_long_term_average
= true;
2171 si_pi
->dyn_powertune_data
.l2_lta_window_size
=
2172 si_pi
->powertune_data
->l2_lta_window_size_default
;
2173 si_pi
->dyn_powertune_data
.lts_truncate
=
2174 si_pi
->powertune_data
->lts_truncate_default
;
2176 ni_pi
->support_cac_long_term_average
= false;
2177 si_pi
->dyn_powertune_data
.l2_lta_window_size
= 0;
2178 si_pi
->dyn_powertune_data
.lts_truncate
= 0;
2181 si_pi
->dyn_powertune_data
.disable_uvd_powertune
= false;
2184 static u32
si_get_smc_power_scaling_factor(struct amdgpu_device
*adev
)
2189 static u32
si_calculate_cac_wintime(struct amdgpu_device
*adev
)
2194 u32 cac_window_size
;
2196 xclk
= amdgpu_asic_get_xclk(adev
);
2201 cac_window
= RREG32(CG_CAC_CTRL
) & CAC_WINDOW_MASK
;
2202 cac_window_size
= ((cac_window
& 0xFFFF0000) >> 16) * (cac_window
& 0x0000FFFF);
2204 wintime
= (cac_window_size
* 100) / xclk
;
2209 static u32
si_scale_power_for_smc(u32 power_in_watts
, u32 scaling_factor
)
2211 return power_in_watts
;
2214 static int si_calculate_adjusted_tdp_limits(struct amdgpu_device
*adev
,
2215 bool adjust_polarity
,
2218 u32
*near_tdp_limit
)
2220 u32 adjustment_delta
, max_tdp_limit
;
2222 if (tdp_adjustment
> (u32
)adev
->pm
.dpm
.tdp_od_limit
)
2225 max_tdp_limit
= ((100 + 100) * adev
->pm
.dpm
.tdp_limit
) / 100;
2227 if (adjust_polarity
) {
2228 *tdp_limit
= ((100 + tdp_adjustment
) * adev
->pm
.dpm
.tdp_limit
) / 100;
2229 *near_tdp_limit
= adev
->pm
.dpm
.near_tdp_limit_adjusted
+ (*tdp_limit
- adev
->pm
.dpm
.tdp_limit
);
2231 *tdp_limit
= ((100 - tdp_adjustment
) * adev
->pm
.dpm
.tdp_limit
) / 100;
2232 adjustment_delta
= adev
->pm
.dpm
.tdp_limit
- *tdp_limit
;
2233 if (adjustment_delta
< adev
->pm
.dpm
.near_tdp_limit_adjusted
)
2234 *near_tdp_limit
= adev
->pm
.dpm
.near_tdp_limit_adjusted
- adjustment_delta
;
2236 *near_tdp_limit
= 0;
2239 if ((*tdp_limit
<= 0) || (*tdp_limit
> max_tdp_limit
))
2241 if ((*near_tdp_limit
<= 0) || (*near_tdp_limit
> *tdp_limit
))
2247 static int si_populate_smc_tdp_limits(struct amdgpu_device
*adev
,
2248 struct amdgpu_ps
*amdgpu_state
)
2250 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2251 struct si_power_info
*si_pi
= si_get_pi(adev
);
2253 if (ni_pi
->enable_power_containment
) {
2254 SISLANDS_SMC_STATETABLE
*smc_table
= &si_pi
->smc_statetable
;
2255 PP_SIslands_PAPMParameters
*papm_parm
;
2256 struct amdgpu_ppm_table
*ppm
= adev
->pm
.dpm
.dyn_state
.ppm_table
;
2257 u32 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2262 if (scaling_factor
== 0)
2265 memset(smc_table
, 0, sizeof(SISLANDS_SMC_STATETABLE
));
2267 ret
= si_calculate_adjusted_tdp_limits(adev
,
2269 adev
->pm
.dpm
.tdp_adjustment
,
2275 smc_table
->dpm2Params
.TDPLimit
=
2276 cpu_to_be32(si_scale_power_for_smc(tdp_limit
, scaling_factor
) * 1000);
2277 smc_table
->dpm2Params
.NearTDPLimit
=
2278 cpu_to_be32(si_scale_power_for_smc(near_tdp_limit
, scaling_factor
) * 1000);
2279 smc_table
->dpm2Params
.SafePowerLimit
=
2280 cpu_to_be32(si_scale_power_for_smc((near_tdp_limit
* SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT
) / 100, scaling_factor
) * 1000);
2282 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
2283 (si_pi
->state_table_start
+ offsetof(SISLANDS_SMC_STATETABLE
, dpm2Params
) +
2284 offsetof(PP_SIslands_DPM2Parameters
, TDPLimit
)),
2285 (u8
*)(&(smc_table
->dpm2Params
.TDPLimit
)),
2291 if (si_pi
->enable_ppm
) {
2292 papm_parm
= &si_pi
->papm_parm
;
2293 memset(papm_parm
, 0, sizeof(PP_SIslands_PAPMParameters
));
2294 papm_parm
->NearTDPLimitTherm
= cpu_to_be32(ppm
->dgpu_tdp
);
2295 papm_parm
->dGPU_T_Limit
= cpu_to_be32(ppm
->tj_max
);
2296 papm_parm
->dGPU_T_Warning
= cpu_to_be32(95);
2297 papm_parm
->dGPU_T_Hysteresis
= cpu_to_be32(5);
2298 papm_parm
->PlatformPowerLimit
= 0xffffffff;
2299 papm_parm
->NearTDPLimitPAPM
= 0xffffffff;
2301 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->papm_cfg_table_start
,
2303 sizeof(PP_SIslands_PAPMParameters
),
2312 static int si_populate_smc_tdp_limits_2(struct amdgpu_device
*adev
,
2313 struct amdgpu_ps
*amdgpu_state
)
2315 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2316 struct si_power_info
*si_pi
= si_get_pi(adev
);
2318 if (ni_pi
->enable_power_containment
) {
2319 SISLANDS_SMC_STATETABLE
*smc_table
= &si_pi
->smc_statetable
;
2320 u32 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2323 memset(smc_table
, 0, sizeof(SISLANDS_SMC_STATETABLE
));
2325 smc_table
->dpm2Params
.NearTDPLimit
=
2326 cpu_to_be32(si_scale_power_for_smc(adev
->pm
.dpm
.near_tdp_limit_adjusted
, scaling_factor
) * 1000);
2327 smc_table
->dpm2Params
.SafePowerLimit
=
2328 cpu_to_be32(si_scale_power_for_smc((adev
->pm
.dpm
.near_tdp_limit_adjusted
* SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT
) / 100, scaling_factor
) * 1000);
2330 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
2331 (si_pi
->state_table_start
+
2332 offsetof(SISLANDS_SMC_STATETABLE
, dpm2Params
) +
2333 offsetof(PP_SIslands_DPM2Parameters
, NearTDPLimit
)),
2334 (u8
*)(&(smc_table
->dpm2Params
.NearTDPLimit
)),
2344 static u16
si_calculate_power_efficiency_ratio(struct amdgpu_device
*adev
,
2345 const u16 prev_std_vddc
,
2346 const u16 curr_std_vddc
)
2348 u64 margin
= (u64
)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN
;
2349 u64 prev_vddc
= (u64
)prev_std_vddc
;
2350 u64 curr_vddc
= (u64
)curr_std_vddc
;
2351 u64 pwr_efficiency_ratio
, n
, d
;
2353 if ((prev_vddc
== 0) || (curr_vddc
== 0))
2356 n
= div64_u64((u64
)1024 * curr_vddc
* curr_vddc
* ((u64
)1000 + margin
), (u64
)1000);
2357 d
= prev_vddc
* prev_vddc
;
2358 pwr_efficiency_ratio
= div64_u64(n
, d
);
2360 if (pwr_efficiency_ratio
> (u64
)0xFFFF)
2363 return (u16
)pwr_efficiency_ratio
;
2366 static bool si_should_disable_uvd_powertune(struct amdgpu_device
*adev
,
2367 struct amdgpu_ps
*amdgpu_state
)
2369 struct si_power_info
*si_pi
= si_get_pi(adev
);
2371 if (si_pi
->dyn_powertune_data
.disable_uvd_powertune
&&
2372 amdgpu_state
->vclk
&& amdgpu_state
->dclk
)
2378 struct evergreen_power_info
*evergreen_get_pi(struct amdgpu_device
*adev
)
2380 struct evergreen_power_info
*pi
= adev
->pm
.dpm
.priv
;
2385 static int si_populate_power_containment_values(struct amdgpu_device
*adev
,
2386 struct amdgpu_ps
*amdgpu_state
,
2387 SISLANDS_SMC_SWSTATE
*smc_state
)
2389 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
2390 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2391 struct si_ps
*state
= si_get_ps(amdgpu_state
);
2392 SISLANDS_SMC_VOLTAGE_VALUE vddc
;
2399 u16 pwr_efficiency_ratio
;
2401 bool disable_uvd_power_tune
;
2404 if (ni_pi
->enable_power_containment
== false)
2407 if (state
->performance_level_count
== 0)
2410 if (smc_state
->levelCount
!= state
->performance_level_count
)
2413 disable_uvd_power_tune
= si_should_disable_uvd_powertune(adev
, amdgpu_state
);
2415 smc_state
->levels
[0].dpm2
.MaxPS
= 0;
2416 smc_state
->levels
[0].dpm2
.NearTDPDec
= 0;
2417 smc_state
->levels
[0].dpm2
.AboveSafeInc
= 0;
2418 smc_state
->levels
[0].dpm2
.BelowSafeInc
= 0;
2419 smc_state
->levels
[0].dpm2
.PwrEfficiencyRatio
= 0;
2421 for (i
= 1; i
< state
->performance_level_count
; i
++) {
2422 prev_sclk
= state
->performance_levels
[i
-1].sclk
;
2423 max_sclk
= state
->performance_levels
[i
].sclk
;
2425 max_ps_percent
= SISLANDS_DPM2_MAXPS_PERCENT_M
;
2427 max_ps_percent
= SISLANDS_DPM2_MAXPS_PERCENT_H
;
2429 if (prev_sclk
> max_sclk
)
2432 if ((max_ps_percent
== 0) ||
2433 (prev_sclk
== max_sclk
) ||
2434 disable_uvd_power_tune
)
2435 min_sclk
= max_sclk
;
2437 min_sclk
= prev_sclk
;
2439 min_sclk
= (prev_sclk
* (u32
)max_ps_percent
) / 100;
2441 if (min_sclk
< state
->performance_levels
[0].sclk
)
2442 min_sclk
= state
->performance_levels
[0].sclk
;
2447 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
2448 state
->performance_levels
[i
-1].vddc
, &vddc
);
2452 ret
= si_get_std_voltage_value(adev
, &vddc
, &prev_std_vddc
);
2456 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
2457 state
->performance_levels
[i
].vddc
, &vddc
);
2461 ret
= si_get_std_voltage_value(adev
, &vddc
, &curr_std_vddc
);
2465 pwr_efficiency_ratio
= si_calculate_power_efficiency_ratio(adev
,
2466 prev_std_vddc
, curr_std_vddc
);
2468 smc_state
->levels
[i
].dpm2
.MaxPS
= (u8
)((SISLANDS_DPM2_MAX_PULSE_SKIP
* (max_sclk
- min_sclk
)) / max_sclk
);
2469 smc_state
->levels
[i
].dpm2
.NearTDPDec
= SISLANDS_DPM2_NEAR_TDP_DEC
;
2470 smc_state
->levels
[i
].dpm2
.AboveSafeInc
= SISLANDS_DPM2_ABOVE_SAFE_INC
;
2471 smc_state
->levels
[i
].dpm2
.BelowSafeInc
= SISLANDS_DPM2_BELOW_SAFE_INC
;
2472 smc_state
->levels
[i
].dpm2
.PwrEfficiencyRatio
= cpu_to_be16(pwr_efficiency_ratio
);
2478 static int si_populate_sq_ramping_values(struct amdgpu_device
*adev
,
2479 struct amdgpu_ps
*amdgpu_state
,
2480 SISLANDS_SMC_SWSTATE
*smc_state
)
2482 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2483 struct si_ps
*state
= si_get_ps(amdgpu_state
);
2484 u32 sq_power_throttle
, sq_power_throttle2
;
2485 bool enable_sq_ramping
= ni_pi
->enable_sq_ramping
;
2488 if (state
->performance_level_count
== 0)
2491 if (smc_state
->levelCount
!= state
->performance_level_count
)
2494 if (adev
->pm
.dpm
.sq_ramping_threshold
== 0)
2497 if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER
> (MAX_POWER_MASK
>> MAX_POWER_SHIFT
))
2498 enable_sq_ramping
= false;
2500 if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER
> (MIN_POWER_MASK
>> MIN_POWER_SHIFT
))
2501 enable_sq_ramping
= false;
2503 if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA
> (MAX_POWER_DELTA_MASK
>> MAX_POWER_DELTA_SHIFT
))
2504 enable_sq_ramping
= false;
2506 if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE
> (STI_SIZE_MASK
>> STI_SIZE_SHIFT
))
2507 enable_sq_ramping
= false;
2509 if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO
> (LTI_RATIO_MASK
>> LTI_RATIO_SHIFT
))
2510 enable_sq_ramping
= false;
2512 for (i
= 0; i
< state
->performance_level_count
; i
++) {
2513 sq_power_throttle
= 0;
2514 sq_power_throttle2
= 0;
2516 if ((state
->performance_levels
[i
].sclk
>= adev
->pm
.dpm
.sq_ramping_threshold
) &&
2517 enable_sq_ramping
) {
2518 sq_power_throttle
|= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER
);
2519 sq_power_throttle
|= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER
);
2520 sq_power_throttle2
|= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA
);
2521 sq_power_throttle2
|= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE
);
2522 sq_power_throttle2
|= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO
);
2524 sq_power_throttle
|= MAX_POWER_MASK
| MIN_POWER_MASK
;
2525 sq_power_throttle2
|= MAX_POWER_DELTA_MASK
| STI_SIZE_MASK
| LTI_RATIO_MASK
;
2528 smc_state
->levels
[i
].SQPowerThrottle
= cpu_to_be32(sq_power_throttle
);
2529 smc_state
->levels
[i
].SQPowerThrottle_2
= cpu_to_be32(sq_power_throttle2
);
2535 static int si_enable_power_containment(struct amdgpu_device
*adev
,
2536 struct amdgpu_ps
*amdgpu_new_state
,
2539 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2540 PPSMC_Result smc_result
;
2543 if (ni_pi
->enable_power_containment
) {
2545 if (!si_should_disable_uvd_powertune(adev
, amdgpu_new_state
)) {
2546 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_TDPClampingActive
);
2547 if (smc_result
!= PPSMC_Result_OK
) {
2549 ni_pi
->pc_enabled
= false;
2551 ni_pi
->pc_enabled
= true;
2555 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_TDPClampingInactive
);
2556 if (smc_result
!= PPSMC_Result_OK
)
2558 ni_pi
->pc_enabled
= false;
2565 static int si_initialize_smc_dte_tables(struct amdgpu_device
*adev
)
2567 struct si_power_info
*si_pi
= si_get_pi(adev
);
2569 struct si_dte_data
*dte_data
= &si_pi
->dte_data
;
2570 Smc_SIslands_DTE_Configuration
*dte_tables
= NULL
;
2575 if (dte_data
== NULL
)
2576 si_pi
->enable_dte
= false;
2578 if (si_pi
->enable_dte
== false)
2581 if (dte_data
->k
<= 0)
2584 dte_tables
= kzalloc(sizeof(Smc_SIslands_DTE_Configuration
), GFP_KERNEL
);
2585 if (dte_tables
== NULL
) {
2586 si_pi
->enable_dte
= false;
2590 table_size
= dte_data
->k
;
2592 if (table_size
> SMC_SISLANDS_DTE_MAX_FILTER_STAGES
)
2593 table_size
= SMC_SISLANDS_DTE_MAX_FILTER_STAGES
;
2595 tdep_count
= dte_data
->tdep_count
;
2596 if (tdep_count
> SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE
)
2597 tdep_count
= SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE
;
2599 dte_tables
->K
= cpu_to_be32(table_size
);
2600 dte_tables
->T0
= cpu_to_be32(dte_data
->t0
);
2601 dte_tables
->MaxT
= cpu_to_be32(dte_data
->max_t
);
2602 dte_tables
->WindowSize
= dte_data
->window_size
;
2603 dte_tables
->temp_select
= dte_data
->temp_select
;
2604 dte_tables
->DTE_mode
= dte_data
->dte_mode
;
2605 dte_tables
->Tthreshold
= cpu_to_be32(dte_data
->t_threshold
);
2610 for (i
= 0; i
< table_size
; i
++) {
2611 dte_tables
->tau
[i
] = cpu_to_be32(dte_data
->tau
[i
]);
2612 dte_tables
->R
[i
] = cpu_to_be32(dte_data
->r
[i
]);
2615 dte_tables
->Tdep_count
= tdep_count
;
2617 for (i
= 0; i
< (u32
)tdep_count
; i
++) {
2618 dte_tables
->T_limits
[i
] = dte_data
->t_limits
[i
];
2619 dte_tables
->Tdep_tau
[i
] = cpu_to_be32(dte_data
->tdep_tau
[i
]);
2620 dte_tables
->Tdep_R
[i
] = cpu_to_be32(dte_data
->tdep_r
[i
]);
2623 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->dte_table_start
,
2625 sizeof(Smc_SIslands_DTE_Configuration
),
2632 static int si_get_cac_std_voltage_max_min(struct amdgpu_device
*adev
,
2635 struct si_power_info
*si_pi
= si_get_pi(adev
);
2636 struct amdgpu_cac_leakage_table
*table
=
2637 &adev
->pm
.dpm
.dyn_state
.cac_leakage_table
;
2647 for (i
= 0; i
< table
->count
; i
++) {
2648 if (table
->entries
[i
].vddc
> *max
)
2649 *max
= table
->entries
[i
].vddc
;
2650 if (table
->entries
[i
].vddc
< *min
)
2651 *min
= table
->entries
[i
].vddc
;
2654 if (si_pi
->powertune_data
->lkge_lut_v0_percent
> 100)
2657 v0_loadline
= (*min
) * (100 - si_pi
->powertune_data
->lkge_lut_v0_percent
) / 100;
2659 if (v0_loadline
> 0xFFFFUL
)
2662 *min
= (u16
)v0_loadline
;
2664 if ((*min
> *max
) || (*max
== 0) || (*min
== 0))
2670 static u16
si_get_cac_std_voltage_step(u16 max
, u16 min
)
2672 return ((max
- min
) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
- 1)) /
2673 SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
;
2676 static int si_init_dte_leakage_table(struct amdgpu_device
*adev
,
2677 PP_SIslands_CacConfig
*cac_tables
,
2678 u16 vddc_max
, u16 vddc_min
, u16 vddc_step
,
2681 struct si_power_info
*si_pi
= si_get_pi(adev
);
2689 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2691 for (i
= 0; i
< SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES
; i
++) {
2692 t
= (1000 * (i
* t_step
+ t0
));
2694 for (j
= 0; j
< SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
; j
++) {
2695 voltage
= vddc_max
- (vddc_step
* j
);
2697 si_calculate_leakage_for_v_and_t(adev
,
2698 &si_pi
->powertune_data
->leakage_coefficients
,
2701 si_pi
->dyn_powertune_data
.cac_leakage
,
2704 smc_leakage
= si_scale_power_for_smc(leakage
, scaling_factor
) / 4;
2706 if (smc_leakage
> 0xFFFF)
2707 smc_leakage
= 0xFFFF;
2709 cac_tables
->cac_lkge_lut
[i
][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
-1-j
] =
2710 cpu_to_be16((u16
)smc_leakage
);
2716 static int si_init_simplified_leakage_table(struct amdgpu_device
*adev
,
2717 PP_SIslands_CacConfig
*cac_tables
,
2718 u16 vddc_max
, u16 vddc_min
, u16 vddc_step
)
2720 struct si_power_info
*si_pi
= si_get_pi(adev
);
2727 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2729 for (j
= 0; j
< SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
; j
++) {
2730 voltage
= vddc_max
- (vddc_step
* j
);
2732 si_calculate_leakage_for_v(adev
,
2733 &si_pi
->powertune_data
->leakage_coefficients
,
2734 si_pi
->powertune_data
->fixed_kt
,
2736 si_pi
->dyn_powertune_data
.cac_leakage
,
2739 smc_leakage
= si_scale_power_for_smc(leakage
, scaling_factor
) / 4;
2741 if (smc_leakage
> 0xFFFF)
2742 smc_leakage
= 0xFFFF;
2744 for (i
= 0; i
< SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES
; i
++)
2745 cac_tables
->cac_lkge_lut
[i
][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
-1-j
] =
2746 cpu_to_be16((u16
)smc_leakage
);
2751 static int si_initialize_smc_cac_tables(struct amdgpu_device
*adev
)
2753 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2754 struct si_power_info
*si_pi
= si_get_pi(adev
);
2755 PP_SIslands_CacConfig
*cac_tables
= NULL
;
2756 u16 vddc_max
, vddc_min
, vddc_step
;
2758 u32 load_line_slope
, reg
;
2760 u32 ticks_per_us
= amdgpu_asic_get_xclk(adev
) / 100;
2762 if (ni_pi
->enable_cac
== false)
2765 cac_tables
= kzalloc(sizeof(PP_SIslands_CacConfig
), GFP_KERNEL
);
2769 reg
= RREG32(CG_CAC_CTRL
) & ~CAC_WINDOW_MASK
;
2770 reg
|= CAC_WINDOW(si_pi
->powertune_data
->cac_window
);
2771 WREG32(CG_CAC_CTRL
, reg
);
2773 si_pi
->dyn_powertune_data
.cac_leakage
= adev
->pm
.dpm
.cac_leakage
;
2774 si_pi
->dyn_powertune_data
.dc_pwr_value
=
2775 si_pi
->powertune_data
->dc_cac
[NISLANDS_DCCAC_LEVEL_0
];
2776 si_pi
->dyn_powertune_data
.wintime
= si_calculate_cac_wintime(adev
);
2777 si_pi
->dyn_powertune_data
.shift_n
= si_pi
->powertune_data
->shift_n_default
;
2779 si_pi
->dyn_powertune_data
.leakage_minimum_temperature
= 80 * 1000;
2781 ret
= si_get_cac_std_voltage_max_min(adev
, &vddc_max
, &vddc_min
);
2785 vddc_step
= si_get_cac_std_voltage_step(vddc_max
, vddc_min
);
2786 vddc_min
= vddc_max
- (vddc_step
* (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
- 1));
2790 if (si_pi
->enable_dte
|| ni_pi
->driver_calculate_cac_leakage
)
2791 ret
= si_init_dte_leakage_table(adev
, cac_tables
,
2792 vddc_max
, vddc_min
, vddc_step
,
2795 ret
= si_init_simplified_leakage_table(adev
, cac_tables
,
2796 vddc_max
, vddc_min
, vddc_step
);
2800 load_line_slope
= ((u32
)adev
->pm
.dpm
.load_line_slope
<< SMC_SISLANDS_SCALE_R
) / 100;
2802 cac_tables
->l2numWin_TDP
= cpu_to_be32(si_pi
->dyn_powertune_data
.l2_lta_window_size
);
2803 cac_tables
->lts_truncate_n
= si_pi
->dyn_powertune_data
.lts_truncate
;
2804 cac_tables
->SHIFT_N
= si_pi
->dyn_powertune_data
.shift_n
;
2805 cac_tables
->lkge_lut_V0
= cpu_to_be32((u32
)vddc_min
);
2806 cac_tables
->lkge_lut_Vstep
= cpu_to_be32((u32
)vddc_step
);
2807 cac_tables
->R_LL
= cpu_to_be32(load_line_slope
);
2808 cac_tables
->WinTime
= cpu_to_be32(si_pi
->dyn_powertune_data
.wintime
);
2809 cac_tables
->calculation_repeats
= cpu_to_be32(2);
2810 cac_tables
->dc_cac
= cpu_to_be32(0);
2811 cac_tables
->log2_PG_LKG_SCALE
= 12;
2812 cac_tables
->cac_temp
= si_pi
->powertune_data
->operating_temp
;
2813 cac_tables
->lkge_lut_T0
= cpu_to_be32((u32
)t0
);
2814 cac_tables
->lkge_lut_Tstep
= cpu_to_be32((u32
)t_step
);
2816 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->cac_table_start
,
2818 sizeof(PP_SIslands_CacConfig
),
2824 ret
= si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_ticks_per_us
, ticks_per_us
);
2828 ni_pi
->enable_cac
= false;
2829 ni_pi
->enable_power_containment
= false;
2837 static int si_program_cac_config_registers(struct amdgpu_device
*adev
,
2838 const struct si_cac_config_reg
*cac_config_regs
)
2840 const struct si_cac_config_reg
*config_regs
= cac_config_regs
;
2841 u32 data
= 0, offset
;
2846 while (config_regs
->offset
!= 0xFFFFFFFF) {
2847 switch (config_regs
->type
) {
2848 case SISLANDS_CACCONFIG_CGIND
:
2849 offset
= SMC_CG_IND_START
+ config_regs
->offset
;
2850 if (offset
< SMC_CG_IND_END
)
2851 data
= RREG32_SMC(offset
);
2854 data
= RREG32(config_regs
->offset
);
2858 data
&= ~config_regs
->mask
;
2859 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
2861 switch (config_regs
->type
) {
2862 case SISLANDS_CACCONFIG_CGIND
:
2863 offset
= SMC_CG_IND_START
+ config_regs
->offset
;
2864 if (offset
< SMC_CG_IND_END
)
2865 WREG32_SMC(offset
, data
);
2868 WREG32(config_regs
->offset
, data
);
2876 static int si_initialize_hardware_cac_manager(struct amdgpu_device
*adev
)
2878 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2879 struct si_power_info
*si_pi
= si_get_pi(adev
);
2882 if ((ni_pi
->enable_cac
== false) ||
2883 (ni_pi
->cac_configuration_required
== false))
2886 ret
= si_program_cac_config_registers(adev
, si_pi
->lcac_config
);
2889 ret
= si_program_cac_config_registers(adev
, si_pi
->cac_override
);
2892 ret
= si_program_cac_config_registers(adev
, si_pi
->cac_weights
);
2899 static int si_enable_smc_cac(struct amdgpu_device
*adev
,
2900 struct amdgpu_ps
*amdgpu_new_state
,
2903 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2904 struct si_power_info
*si_pi
= si_get_pi(adev
);
2905 PPSMC_Result smc_result
;
2908 if (ni_pi
->enable_cac
) {
2910 if (!si_should_disable_uvd_powertune(adev
, amdgpu_new_state
)) {
2911 if (ni_pi
->support_cac_long_term_average
) {
2912 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_CACLongTermAvgEnable
);
2913 if (smc_result
!= PPSMC_Result_OK
)
2914 ni_pi
->support_cac_long_term_average
= false;
2917 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableCac
);
2918 if (smc_result
!= PPSMC_Result_OK
) {
2920 ni_pi
->cac_enabled
= false;
2922 ni_pi
->cac_enabled
= true;
2925 if (si_pi
->enable_dte
) {
2926 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableDTE
);
2927 if (smc_result
!= PPSMC_Result_OK
)
2931 } else if (ni_pi
->cac_enabled
) {
2932 if (si_pi
->enable_dte
)
2933 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_DisableDTE
);
2935 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_DisableCac
);
2937 ni_pi
->cac_enabled
= false;
2939 if (ni_pi
->support_cac_long_term_average
)
2940 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_CACLongTermAvgDisable
);
2946 static int si_init_smc_spll_table(struct amdgpu_device
*adev
)
2948 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2949 struct si_power_info
*si_pi
= si_get_pi(adev
);
2950 SMC_SISLANDS_SPLL_DIV_TABLE
*spll_table
;
2951 SISLANDS_SMC_SCLK_VALUE sclk_params
;
2959 if (si_pi
->spll_table_start
== 0)
2962 spll_table
= kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE
), GFP_KERNEL
);
2963 if (spll_table
== NULL
)
2966 for (i
= 0; i
< 256; i
++) {
2967 ret
= si_calculate_sclk_params(adev
, sclk
, &sclk_params
);
2970 p_div
= (sclk_params
.vCG_SPLL_FUNC_CNTL
& SPLL_PDIV_A_MASK
) >> SPLL_PDIV_A_SHIFT
;
2971 fb_div
= (sclk_params
.vCG_SPLL_FUNC_CNTL_3
& SPLL_FB_DIV_MASK
) >> SPLL_FB_DIV_SHIFT
;
2972 clk_s
= (sclk_params
.vCG_SPLL_SPREAD_SPECTRUM
& CLK_S_MASK
) >> CLK_S_SHIFT
;
2973 clk_v
= (sclk_params
.vCG_SPLL_SPREAD_SPECTRUM_2
& CLK_V_MASK
) >> CLK_V_SHIFT
;
2975 fb_div
&= ~0x00001FFF;
2979 if (p_div
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT
))
2981 if (fb_div
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT
))
2983 if (clk_s
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT
))
2985 if (clk_v
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT
))
2991 tmp
= ((fb_div
<< SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK
) |
2992 ((p_div
<< SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK
);
2993 spll_table
->freq
[i
] = cpu_to_be32(tmp
);
2995 tmp
= ((clk_v
<< SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK
) |
2996 ((clk_s
<< SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK
);
2997 spll_table
->ss
[i
] = cpu_to_be32(tmp
);
3004 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->spll_table_start
,
3006 sizeof(SMC_SISLANDS_SPLL_DIV_TABLE
),
3010 ni_pi
->enable_power_containment
= false;
3017 static u16
si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device
*adev
,
3020 u16 highest_leakage
= 0;
3021 struct si_power_info
*si_pi
= si_get_pi(adev
);
3024 for (i
= 0; i
< si_pi
->leakage_voltage
.count
; i
++){
3025 if (highest_leakage
< si_pi
->leakage_voltage
.entries
[i
].voltage
)
3026 highest_leakage
= si_pi
->leakage_voltage
.entries
[i
].voltage
;
3029 if (si_pi
->leakage_voltage
.count
&& (highest_leakage
< vce_voltage
))
3030 return highest_leakage
;
3035 static int si_get_vce_clock_voltage(struct amdgpu_device
*adev
,
3036 u32 evclk
, u32 ecclk
, u16
*voltage
)
3040 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
3041 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
3043 if (((evclk
== 0) && (ecclk
== 0)) ||
3044 (table
&& (table
->count
== 0))) {
3049 for (i
= 0; i
< table
->count
; i
++) {
3050 if ((evclk
<= table
->entries
[i
].evclk
) &&
3051 (ecclk
<= table
->entries
[i
].ecclk
)) {
3052 *voltage
= table
->entries
[i
].v
;
3058 /* if no match return the highest voltage */
3060 *voltage
= table
->entries
[table
->count
- 1].v
;
3062 *voltage
= si_get_lower_of_leakage_and_vce_voltage(adev
, *voltage
);
3067 static bool si_dpm_vblank_too_short(void *handle
)
3069 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3070 u32 vblank_time
= amdgpu_dpm_get_vblank_time(adev
);
3071 /* we never hit the non-gddr5 limit so disable it */
3072 u32 switch_limit
= adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
? 450 : 0;
3074 if (vblank_time
< switch_limit
)
3081 static int ni_copy_and_switch_arb_sets(struct amdgpu_device
*adev
,
3082 u32 arb_freq_src
, u32 arb_freq_dest
)
3084 u32 mc_arb_dram_timing
;
3085 u32 mc_arb_dram_timing2
;
3089 switch (arb_freq_src
) {
3090 case MC_CG_ARB_FREQ_F0
:
3091 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING
);
3092 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2
);
3093 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE0_MASK
) >> STATE0_SHIFT
;
3095 case MC_CG_ARB_FREQ_F1
:
3096 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING_1
);
3097 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2_1
);
3098 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE1_MASK
) >> STATE1_SHIFT
;
3100 case MC_CG_ARB_FREQ_F2
:
3101 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING_2
);
3102 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2_2
);
3103 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE2_MASK
) >> STATE2_SHIFT
;
3105 case MC_CG_ARB_FREQ_F3
:
3106 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING_3
);
3107 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2_3
);
3108 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE3_MASK
) >> STATE3_SHIFT
;
3114 switch (arb_freq_dest
) {
3115 case MC_CG_ARB_FREQ_F0
:
3116 WREG32(MC_ARB_DRAM_TIMING
, mc_arb_dram_timing
);
3117 WREG32(MC_ARB_DRAM_TIMING2
, mc_arb_dram_timing2
);
3118 WREG32_P(MC_ARB_BURST_TIME
, STATE0(burst_time
), ~STATE0_MASK
);
3120 case MC_CG_ARB_FREQ_F1
:
3121 WREG32(MC_ARB_DRAM_TIMING_1
, mc_arb_dram_timing
);
3122 WREG32(MC_ARB_DRAM_TIMING2_1
, mc_arb_dram_timing2
);
3123 WREG32_P(MC_ARB_BURST_TIME
, STATE1(burst_time
), ~STATE1_MASK
);
3125 case MC_CG_ARB_FREQ_F2
:
3126 WREG32(MC_ARB_DRAM_TIMING_2
, mc_arb_dram_timing
);
3127 WREG32(MC_ARB_DRAM_TIMING2_2
, mc_arb_dram_timing2
);
3128 WREG32_P(MC_ARB_BURST_TIME
, STATE2(burst_time
), ~STATE2_MASK
);
3130 case MC_CG_ARB_FREQ_F3
:
3131 WREG32(MC_ARB_DRAM_TIMING_3
, mc_arb_dram_timing
);
3132 WREG32(MC_ARB_DRAM_TIMING2_3
, mc_arb_dram_timing2
);
3133 WREG32_P(MC_ARB_BURST_TIME
, STATE3(burst_time
), ~STATE3_MASK
);
3139 mc_cg_config
= RREG32(MC_CG_CONFIG
) | 0x0000000F;
3140 WREG32(MC_CG_CONFIG
, mc_cg_config
);
3141 WREG32_P(MC_ARB_CG
, CG_ARB_REQ(arb_freq_dest
), ~CG_ARB_REQ_MASK
);
3146 static void ni_update_current_ps(struct amdgpu_device
*adev
,
3147 struct amdgpu_ps
*rps
)
3149 struct si_ps
*new_ps
= si_get_ps(rps
);
3150 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
3151 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
3153 eg_pi
->current_rps
= *rps
;
3154 ni_pi
->current_ps
= *new_ps
;
3155 eg_pi
->current_rps
.ps_priv
= &ni_pi
->current_ps
;
3156 adev
->pm
.dpm
.current_ps
= &eg_pi
->current_rps
;
3159 static void ni_update_requested_ps(struct amdgpu_device
*adev
,
3160 struct amdgpu_ps
*rps
)
3162 struct si_ps
*new_ps
= si_get_ps(rps
);
3163 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
3164 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
3166 eg_pi
->requested_rps
= *rps
;
3167 ni_pi
->requested_ps
= *new_ps
;
3168 eg_pi
->requested_rps
.ps_priv
= &ni_pi
->requested_ps
;
3169 adev
->pm
.dpm
.requested_ps
= &eg_pi
->requested_rps
;
3172 static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device
*adev
,
3173 struct amdgpu_ps
*new_ps
,
3174 struct amdgpu_ps
*old_ps
)
3176 struct si_ps
*new_state
= si_get_ps(new_ps
);
3177 struct si_ps
*current_state
= si_get_ps(old_ps
);
3179 if ((new_ps
->vclk
== old_ps
->vclk
) &&
3180 (new_ps
->dclk
== old_ps
->dclk
))
3183 if (new_state
->performance_levels
[new_state
->performance_level_count
- 1].sclk
>=
3184 current_state
->performance_levels
[current_state
->performance_level_count
- 1].sclk
)
3187 amdgpu_asic_set_uvd_clocks(adev
, new_ps
->vclk
, new_ps
->dclk
);
3190 static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device
*adev
,
3191 struct amdgpu_ps
*new_ps
,
3192 struct amdgpu_ps
*old_ps
)
3194 struct si_ps
*new_state
= si_get_ps(new_ps
);
3195 struct si_ps
*current_state
= si_get_ps(old_ps
);
3197 if ((new_ps
->vclk
== old_ps
->vclk
) &&
3198 (new_ps
->dclk
== old_ps
->dclk
))
3201 if (new_state
->performance_levels
[new_state
->performance_level_count
- 1].sclk
<
3202 current_state
->performance_levels
[current_state
->performance_level_count
- 1].sclk
)
3205 amdgpu_asic_set_uvd_clocks(adev
, new_ps
->vclk
, new_ps
->dclk
);
3208 static u16
btc_find_voltage(struct atom_voltage_table
*table
, u16 voltage
)
3212 for (i
= 0; i
< table
->count
; i
++)
3213 if (voltage
<= table
->entries
[i
].value
)
3214 return table
->entries
[i
].value
;
3216 return table
->entries
[table
->count
- 1].value
;
3219 static u32
btc_find_valid_clock(struct amdgpu_clock_array
*clocks
,
3220 u32 max_clock
, u32 requested_clock
)
3224 if ((clocks
== NULL
) || (clocks
->count
== 0))
3225 return (requested_clock
< max_clock
) ? requested_clock
: max_clock
;
3227 for (i
= 0; i
< clocks
->count
; i
++) {
3228 if (clocks
->values
[i
] >= requested_clock
)
3229 return (clocks
->values
[i
] < max_clock
) ? clocks
->values
[i
] : max_clock
;
3232 return (clocks
->values
[clocks
->count
- 1] < max_clock
) ?
3233 clocks
->values
[clocks
->count
- 1] : max_clock
;
3236 static u32
btc_get_valid_mclk(struct amdgpu_device
*adev
,
3237 u32 max_mclk
, u32 requested_mclk
)
3239 return btc_find_valid_clock(&adev
->pm
.dpm
.dyn_state
.valid_mclk_values
,
3240 max_mclk
, requested_mclk
);
3243 static u32
btc_get_valid_sclk(struct amdgpu_device
*adev
,
3244 u32 max_sclk
, u32 requested_sclk
)
3246 return btc_find_valid_clock(&adev
->pm
.dpm
.dyn_state
.valid_sclk_values
,
3247 max_sclk
, requested_sclk
);
3250 static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table
*table
,
3255 if ((table
== NULL
) || (table
->count
== 0)) {
3260 for (i
= 0; i
< table
->count
; i
++) {
3261 if (clock
< table
->entries
[i
].clk
)
3262 clock
= table
->entries
[i
].clk
;
3267 static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table
*table
,
3268 u32 clock
, u16 max_voltage
, u16
*voltage
)
3272 if ((table
== NULL
) || (table
->count
== 0))
3275 for (i
= 0; i
< table
->count
; i
++) {
3276 if (clock
<= table
->entries
[i
].clk
) {
3277 if (*voltage
< table
->entries
[i
].v
)
3278 *voltage
= (u16
)((table
->entries
[i
].v
< max_voltage
) ?
3279 table
->entries
[i
].v
: max_voltage
);
3284 *voltage
= (*voltage
> max_voltage
) ? *voltage
: max_voltage
;
3287 static void btc_adjust_clock_combinations(struct amdgpu_device
*adev
,
3288 const struct amdgpu_clock_and_voltage_limits
*max_limits
,
3289 struct rv7xx_pl
*pl
)
3292 if ((pl
->mclk
== 0) || (pl
->sclk
== 0))
3295 if (pl
->mclk
== pl
->sclk
)
3298 if (pl
->mclk
> pl
->sclk
) {
3299 if (((pl
->mclk
+ (pl
->sclk
- 1)) / pl
->sclk
) > adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
)
3300 pl
->sclk
= btc_get_valid_sclk(adev
,
3303 (adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
- 1)) /
3304 adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
);
3306 if ((pl
->sclk
- pl
->mclk
) > adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
)
3307 pl
->mclk
= btc_get_valid_mclk(adev
,
3310 adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
);
3314 static void btc_apply_voltage_delta_rules(struct amdgpu_device
*adev
,
3315 u16 max_vddc
, u16 max_vddci
,
3316 u16
*vddc
, u16
*vddci
)
3318 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
3321 if ((0 == *vddc
) || (0 == *vddci
))
3324 if (*vddc
> *vddci
) {
3325 if ((*vddc
- *vddci
) > adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
) {
3326 new_voltage
= btc_find_voltage(&eg_pi
->vddci_voltage_table
,
3327 (*vddc
- adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
));
3328 *vddci
= (new_voltage
< max_vddci
) ? new_voltage
: max_vddci
;
3331 if ((*vddci
- *vddc
) > adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
) {
3332 new_voltage
= btc_find_voltage(&eg_pi
->vddc_voltage_table
,
3333 (*vddci
- adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
));
3334 *vddc
= (new_voltage
< max_vddc
) ? new_voltage
: max_vddc
;
3339 static void r600_calculate_u_and_p(u32 i
, u32 r_c
, u32 p_b
,
3346 i_c
= (i
* r_c
) / 100;
3355 *p
= i_c
/ (1 << (2 * (*u
)));
3358 static int r600_calculate_at(u32 t
, u32 h
, u32 fh
, u32 fl
, u32
*tl
, u32
*th
)
3363 if ((fl
== 0) || (fh
== 0) || (fl
> fh
))
3366 k
= (100 * fh
) / fl
;
3367 t1
= (t
* (k
- 100));
3368 a
= (1000 * (100 * h
+ t1
)) / (10000 + (t1
/ 100));
3370 ah
= ((a
* t
) + 5000) / 10000;
3379 static bool r600_is_uvd_state(u32
class, u32 class2
)
3381 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
3383 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
3385 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
3387 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
3389 if (class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
3394 static u8
rv770_get_memory_module_index(struct amdgpu_device
*adev
)
3396 return (u8
) ((RREG32(BIOS_SCRATCH_4
) >> 16) & 0xff);
3399 static void rv770_get_max_vddc(struct amdgpu_device
*adev
)
3401 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3404 if (amdgpu_atombios_get_max_vddc(adev
, 0, 0, &vddc
))
3407 pi
->max_vddc
= vddc
;
3410 static void rv770_get_engine_memory_ss(struct amdgpu_device
*adev
)
3412 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3413 struct amdgpu_atom_ss ss
;
3415 pi
->sclk_ss
= amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
3416 ASIC_INTERNAL_ENGINE_SS
, 0);
3417 pi
->mclk_ss
= amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
3418 ASIC_INTERNAL_MEMORY_SS
, 0);
3420 if (pi
->sclk_ss
|| pi
->mclk_ss
)
3421 pi
->dynamic_ss
= true;
3423 pi
->dynamic_ss
= false;
3427 static void si_apply_state_adjust_rules(struct amdgpu_device
*adev
,
3428 struct amdgpu_ps
*rps
)
3430 struct si_ps
*ps
= si_get_ps(rps
);
3431 struct amdgpu_clock_and_voltage_limits
*max_limits
;
3432 bool disable_mclk_switching
= false;
3433 bool disable_sclk_switching
= false;
3435 u16 vddc
, vddci
, min_vce_voltage
= 0;
3436 u32 max_sclk_vddc
, max_mclk_vddci
, max_mclk_vddc
;
3437 u32 max_sclk
= 0, max_mclk
= 0;
3440 if (adev
->asic_type
== CHIP_HAINAN
) {
3441 if ((adev
->pdev
->revision
== 0x81) ||
3442 (adev
->pdev
->revision
== 0x83) ||
3443 (adev
->pdev
->revision
== 0xC3) ||
3444 (adev
->pdev
->device
== 0x6664) ||
3445 (adev
->pdev
->device
== 0x6665) ||
3446 (adev
->pdev
->device
== 0x6667)) {
3449 if ((adev
->pdev
->revision
== 0xC3) ||
3450 (adev
->pdev
->device
== 0x6665)) {
3454 } else if (adev
->asic_type
== CHIP_OLAND
) {
3455 if ((adev
->pdev
->revision
== 0xC7) ||
3456 (adev
->pdev
->revision
== 0x80) ||
3457 (adev
->pdev
->revision
== 0x81) ||
3458 (adev
->pdev
->revision
== 0x83) ||
3459 (adev
->pdev
->revision
== 0x87) ||
3460 (adev
->pdev
->device
== 0x6604) ||
3461 (adev
->pdev
->device
== 0x6605)) {
3466 if (rps
->vce_active
) {
3467 rps
->evclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].evclk
;
3468 rps
->ecclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].ecclk
;
3469 si_get_vce_clock_voltage(adev
, rps
->evclk
, rps
->ecclk
,
3476 if ((adev
->pm
.dpm
.new_active_crtc_count
> 1) ||
3477 si_dpm_vblank_too_short(adev
))
3478 disable_mclk_switching
= true;
3480 if (rps
->vclk
|| rps
->dclk
) {
3481 disable_mclk_switching
= true;
3482 disable_sclk_switching
= true;
3485 if (adev
->pm
.ac_power
)
3486 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
3488 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
3490 for (i
= ps
->performance_level_count
- 2; i
>= 0; i
--) {
3491 if (ps
->performance_levels
[i
].vddc
> ps
->performance_levels
[i
+1].vddc
)
3492 ps
->performance_levels
[i
].vddc
= ps
->performance_levels
[i
+1].vddc
;
3494 if (adev
->pm
.ac_power
== false) {
3495 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3496 if (ps
->performance_levels
[i
].mclk
> max_limits
->mclk
)
3497 ps
->performance_levels
[i
].mclk
= max_limits
->mclk
;
3498 if (ps
->performance_levels
[i
].sclk
> max_limits
->sclk
)
3499 ps
->performance_levels
[i
].sclk
= max_limits
->sclk
;
3500 if (ps
->performance_levels
[i
].vddc
> max_limits
->vddc
)
3501 ps
->performance_levels
[i
].vddc
= max_limits
->vddc
;
3502 if (ps
->performance_levels
[i
].vddci
> max_limits
->vddci
)
3503 ps
->performance_levels
[i
].vddci
= max_limits
->vddci
;
3507 /* limit clocks to max supported clocks based on voltage dependency tables */
3508 btc_get_max_clock_from_voltage_dependency_table(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
,
3510 btc_get_max_clock_from_voltage_dependency_table(&adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
3512 btc_get_max_clock_from_voltage_dependency_table(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
3515 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3516 if (max_sclk_vddc
) {
3517 if (ps
->performance_levels
[i
].sclk
> max_sclk_vddc
)
3518 ps
->performance_levels
[i
].sclk
= max_sclk_vddc
;
3520 if (max_mclk_vddci
) {
3521 if (ps
->performance_levels
[i
].mclk
> max_mclk_vddci
)
3522 ps
->performance_levels
[i
].mclk
= max_mclk_vddci
;
3524 if (max_mclk_vddc
) {
3525 if (ps
->performance_levels
[i
].mclk
> max_mclk_vddc
)
3526 ps
->performance_levels
[i
].mclk
= max_mclk_vddc
;
3529 if (ps
->performance_levels
[i
].mclk
> max_mclk
)
3530 ps
->performance_levels
[i
].mclk
= max_mclk
;
3533 if (ps
->performance_levels
[i
].sclk
> max_sclk
)
3534 ps
->performance_levels
[i
].sclk
= max_sclk
;
3538 /* XXX validate the min clocks required for display */
3540 if (disable_mclk_switching
) {
3541 mclk
= ps
->performance_levels
[ps
->performance_level_count
- 1].mclk
;
3542 vddci
= ps
->performance_levels
[ps
->performance_level_count
- 1].vddci
;
3544 mclk
= ps
->performance_levels
[0].mclk
;
3545 vddci
= ps
->performance_levels
[0].vddci
;
3548 if (disable_sclk_switching
) {
3549 sclk
= ps
->performance_levels
[ps
->performance_level_count
- 1].sclk
;
3550 vddc
= ps
->performance_levels
[ps
->performance_level_count
- 1].vddc
;
3552 sclk
= ps
->performance_levels
[0].sclk
;
3553 vddc
= ps
->performance_levels
[0].vddc
;
3556 if (rps
->vce_active
) {
3557 if (sclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
)
3558 sclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
;
3559 if (mclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].mclk
)
3560 mclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].mclk
;
3563 /* adjusted low state */
3564 ps
->performance_levels
[0].sclk
= sclk
;
3565 ps
->performance_levels
[0].mclk
= mclk
;
3566 ps
->performance_levels
[0].vddc
= vddc
;
3567 ps
->performance_levels
[0].vddci
= vddci
;
3569 if (disable_sclk_switching
) {
3570 sclk
= ps
->performance_levels
[0].sclk
;
3571 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3572 if (sclk
< ps
->performance_levels
[i
].sclk
)
3573 sclk
= ps
->performance_levels
[i
].sclk
;
3575 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3576 ps
->performance_levels
[i
].sclk
= sclk
;
3577 ps
->performance_levels
[i
].vddc
= vddc
;
3580 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3581 if (ps
->performance_levels
[i
].sclk
< ps
->performance_levels
[i
- 1].sclk
)
3582 ps
->performance_levels
[i
].sclk
= ps
->performance_levels
[i
- 1].sclk
;
3583 if (ps
->performance_levels
[i
].vddc
< ps
->performance_levels
[i
- 1].vddc
)
3584 ps
->performance_levels
[i
].vddc
= ps
->performance_levels
[i
- 1].vddc
;
3588 if (disable_mclk_switching
) {
3589 mclk
= ps
->performance_levels
[0].mclk
;
3590 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3591 if (mclk
< ps
->performance_levels
[i
].mclk
)
3592 mclk
= ps
->performance_levels
[i
].mclk
;
3594 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3595 ps
->performance_levels
[i
].mclk
= mclk
;
3596 ps
->performance_levels
[i
].vddci
= vddci
;
3599 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3600 if (ps
->performance_levels
[i
].mclk
< ps
->performance_levels
[i
- 1].mclk
)
3601 ps
->performance_levels
[i
].mclk
= ps
->performance_levels
[i
- 1].mclk
;
3602 if (ps
->performance_levels
[i
].vddci
< ps
->performance_levels
[i
- 1].vddci
)
3603 ps
->performance_levels
[i
].vddci
= ps
->performance_levels
[i
- 1].vddci
;
3607 for (i
= 0; i
< ps
->performance_level_count
; i
++)
3608 btc_adjust_clock_combinations(adev
, max_limits
,
3609 &ps
->performance_levels
[i
]);
3611 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3612 if (ps
->performance_levels
[i
].vddc
< min_vce_voltage
)
3613 ps
->performance_levels
[i
].vddc
= min_vce_voltage
;
3614 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
,
3615 ps
->performance_levels
[i
].sclk
,
3616 max_limits
->vddc
, &ps
->performance_levels
[i
].vddc
);
3617 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
3618 ps
->performance_levels
[i
].mclk
,
3619 max_limits
->vddci
, &ps
->performance_levels
[i
].vddci
);
3620 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
3621 ps
->performance_levels
[i
].mclk
,
3622 max_limits
->vddc
, &ps
->performance_levels
[i
].vddc
);
3623 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
,
3624 adev
->clock
.current_dispclk
,
3625 max_limits
->vddc
, &ps
->performance_levels
[i
].vddc
);
3628 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3629 btc_apply_voltage_delta_rules(adev
,
3630 max_limits
->vddc
, max_limits
->vddci
,
3631 &ps
->performance_levels
[i
].vddc
,
3632 &ps
->performance_levels
[i
].vddci
);
3635 ps
->dc_compatible
= true;
3636 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3637 if (ps
->performance_levels
[i
].vddc
> adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.vddc
)
3638 ps
->dc_compatible
= false;
3643 static int si_read_smc_soft_register(struct amdgpu_device
*adev
,
3644 u16 reg_offset
, u32
*value
)
3646 struct si_power_info
*si_pi
= si_get_pi(adev
);
3648 return amdgpu_si_read_smc_sram_dword(adev
,
3649 si_pi
->soft_regs_start
+ reg_offset
, value
,
3654 static int si_write_smc_soft_register(struct amdgpu_device
*adev
,
3655 u16 reg_offset
, u32 value
)
3657 struct si_power_info
*si_pi
= si_get_pi(adev
);
3659 return amdgpu_si_write_smc_sram_dword(adev
,
3660 si_pi
->soft_regs_start
+ reg_offset
,
3661 value
, si_pi
->sram_end
);
3664 static bool si_is_special_1gb_platform(struct amdgpu_device
*adev
)
3667 u32 tmp
, width
, row
, column
, bank
, density
;
3668 bool is_memory_gddr5
, is_special
;
3670 tmp
= RREG32(MC_SEQ_MISC0
);
3671 is_memory_gddr5
= (MC_SEQ_MISC0_GDDR5_VALUE
== ((tmp
& MC_SEQ_MISC0_GDDR5_MASK
) >> MC_SEQ_MISC0_GDDR5_SHIFT
));
3672 is_special
= (MC_SEQ_MISC0_REV_ID_VALUE
== ((tmp
& MC_SEQ_MISC0_REV_ID_MASK
) >> MC_SEQ_MISC0_REV_ID_SHIFT
))
3673 & (MC_SEQ_MISC0_VEN_ID_VALUE
== ((tmp
& MC_SEQ_MISC0_VEN_ID_MASK
) >> MC_SEQ_MISC0_VEN_ID_SHIFT
));
3675 WREG32(MC_SEQ_IO_DEBUG_INDEX
, 0xb);
3676 width
= ((RREG32(MC_SEQ_IO_DEBUG_DATA
) >> 1) & 1) ? 16 : 32;
3678 tmp
= RREG32(MC_ARB_RAMCFG
);
3679 row
= ((tmp
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
) + 10;
3680 column
= ((tmp
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
) + 8;
3681 bank
= ((tmp
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
) + 2;
3683 density
= (1 << (row
+ column
- 20 + bank
)) * width
;
3685 if ((adev
->pdev
->device
== 0x6819) &&
3686 is_memory_gddr5
&& is_special
&& (density
== 0x400))
3692 static void si_get_leakage_vddc(struct amdgpu_device
*adev
)
3694 struct si_power_info
*si_pi
= si_get_pi(adev
);
3695 u16 vddc
, count
= 0;
3698 for (i
= 0; i
< SISLANDS_MAX_LEAKAGE_COUNT
; i
++) {
3699 ret
= amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev
, &vddc
, SISLANDS_LEAKAGE_INDEX0
+ i
);
3701 if (!ret
&& (vddc
> 0) && (vddc
!= (SISLANDS_LEAKAGE_INDEX0
+ i
))) {
3702 si_pi
->leakage_voltage
.entries
[count
].voltage
= vddc
;
3703 si_pi
->leakage_voltage
.entries
[count
].leakage_index
=
3704 SISLANDS_LEAKAGE_INDEX0
+ i
;
3708 si_pi
->leakage_voltage
.count
= count
;
3711 static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device
*adev
,
3712 u32 index
, u16
*leakage_voltage
)
3714 struct si_power_info
*si_pi
= si_get_pi(adev
);
3717 if (leakage_voltage
== NULL
)
3720 if ((index
& 0xff00) != 0xff00)
3723 if ((index
& 0xff) > SISLANDS_MAX_LEAKAGE_COUNT
+ 1)
3726 if (index
< SISLANDS_LEAKAGE_INDEX0
)
3729 for (i
= 0; i
< si_pi
->leakage_voltage
.count
; i
++) {
3730 if (si_pi
->leakage_voltage
.entries
[i
].leakage_index
== index
) {
3731 *leakage_voltage
= si_pi
->leakage_voltage
.entries
[i
].voltage
;
3738 static void si_set_dpm_event_sources(struct amdgpu_device
*adev
, u32 sources
)
3740 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3741 bool want_thermal_protection
;
3742 enum amdgpu_dpm_event_src dpm_event_src
;
3747 want_thermal_protection
= false;
3749 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
):
3750 want_thermal_protection
= true;
3751 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_DIGITAL
;
3753 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
):
3754 want_thermal_protection
= true;
3755 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_EXTERNAL
;
3757 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
) |
3758 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
)):
3759 want_thermal_protection
= true;
3760 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL
;
3764 if (want_thermal_protection
) {
3765 WREG32_P(CG_THERMAL_CTRL
, DPM_EVENT_SRC(dpm_event_src
), ~DPM_EVENT_SRC_MASK
);
3766 if (pi
->thermal_protection
)
3767 WREG32_P(GENERAL_PWRMGT
, 0, ~THERMAL_PROTECTION_DIS
);
3769 WREG32_P(GENERAL_PWRMGT
, THERMAL_PROTECTION_DIS
, ~THERMAL_PROTECTION_DIS
);
3773 static void si_enable_auto_throttle_source(struct amdgpu_device
*adev
,
3774 enum amdgpu_dpm_auto_throttle_src source
,
3777 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3780 if (!(pi
->active_auto_throttle_sources
& (1 << source
))) {
3781 pi
->active_auto_throttle_sources
|= 1 << source
;
3782 si_set_dpm_event_sources(adev
, pi
->active_auto_throttle_sources
);
3785 if (pi
->active_auto_throttle_sources
& (1 << source
)) {
3786 pi
->active_auto_throttle_sources
&= ~(1 << source
);
3787 si_set_dpm_event_sources(adev
, pi
->active_auto_throttle_sources
);
3792 static void si_start_dpm(struct amdgpu_device
*adev
)
3794 WREG32_P(GENERAL_PWRMGT
, GLOBAL_PWRMGT_EN
, ~GLOBAL_PWRMGT_EN
);
3797 static void si_stop_dpm(struct amdgpu_device
*adev
)
3799 WREG32_P(GENERAL_PWRMGT
, 0, ~GLOBAL_PWRMGT_EN
);
3802 static void si_enable_sclk_control(struct amdgpu_device
*adev
, bool enable
)
3805 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~SCLK_PWRMGT_OFF
);
3807 WREG32_P(SCLK_PWRMGT_CNTL
, SCLK_PWRMGT_OFF
, ~SCLK_PWRMGT_OFF
);
3812 static int si_notify_hardware_of_thermal_state(struct amdgpu_device
*adev
,
3817 if (thermal_level
== 0) {
3818 ret
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableThermalInterrupt
);
3819 if (ret
== PPSMC_Result_OK
)
3827 static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device
*adev
)
3829 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen
, true);
3834 static int si_notify_hw_of_powersource(struct amdgpu_device
*adev
, bool ac_power
)
3837 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_RunningOnAC
) == PPSMC_Result_OK
) ?
3844 static PPSMC_Result
si_send_msg_to_smc_with_parameter(struct amdgpu_device
*adev
,
3845 PPSMC_Msg msg
, u32 parameter
)
3847 WREG32(SMC_SCRATCH0
, parameter
);
3848 return amdgpu_si_send_msg_to_smc(adev
, msg
);
3851 static int si_restrict_performance_levels_before_switch(struct amdgpu_device
*adev
)
3853 if (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_NoForcedLevel
) != PPSMC_Result_OK
)
3856 return (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, 1) == PPSMC_Result_OK
) ?
3860 static int si_dpm_force_performance_level(void *handle
,
3861 enum amd_dpm_forced_level level
)
3863 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3864 struct amdgpu_ps
*rps
= adev
->pm
.dpm
.current_ps
;
3865 struct si_ps
*ps
= si_get_ps(rps
);
3866 u32 levels
= ps
->performance_level_count
;
3868 if (level
== AMD_DPM_FORCED_LEVEL_HIGH
) {
3869 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, levels
) != PPSMC_Result_OK
)
3872 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetForcedLevels
, 1) != PPSMC_Result_OK
)
3874 } else if (level
== AMD_DPM_FORCED_LEVEL_LOW
) {
3875 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetForcedLevels
, 0) != PPSMC_Result_OK
)
3878 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, 1) != PPSMC_Result_OK
)
3880 } else if (level
== AMD_DPM_FORCED_LEVEL_AUTO
) {
3881 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetForcedLevels
, 0) != PPSMC_Result_OK
)
3884 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, levels
) != PPSMC_Result_OK
)
3888 adev
->pm
.dpm
.forced_level
= level
;
3894 static int si_set_boot_state(struct amdgpu_device
*adev
)
3896 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_SwitchToInitialState
) == PPSMC_Result_OK
) ?
3901 static int si_set_sw_state(struct amdgpu_device
*adev
)
3903 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_SwitchToSwState
) == PPSMC_Result_OK
) ?
3907 static int si_halt_smc(struct amdgpu_device
*adev
)
3909 if (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_Halt
) != PPSMC_Result_OK
)
3912 return (amdgpu_si_wait_for_smc_inactive(adev
) == PPSMC_Result_OK
) ?
3916 static int si_resume_smc(struct amdgpu_device
*adev
)
3918 if (amdgpu_si_send_msg_to_smc(adev
, PPSMC_FlushDataCache
) != PPSMC_Result_OK
)
3921 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_Resume
) == PPSMC_Result_OK
) ?
3925 static void si_dpm_start_smc(struct amdgpu_device
*adev
)
3927 amdgpu_si_program_jump_on_start(adev
);
3928 amdgpu_si_start_smc(adev
);
3929 amdgpu_si_smc_clock(adev
, true);
3932 static void si_dpm_stop_smc(struct amdgpu_device
*adev
)
3934 amdgpu_si_reset_smc(adev
);
3935 amdgpu_si_smc_clock(adev
, false);
3938 static int si_process_firmware_header(struct amdgpu_device
*adev
)
3940 struct si_power_info
*si_pi
= si_get_pi(adev
);
3944 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3945 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3946 SISLANDS_SMC_FIRMWARE_HEADER_stateTable
,
3947 &tmp
, si_pi
->sram_end
);
3951 si_pi
->state_table_start
= tmp
;
3953 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3954 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3955 SISLANDS_SMC_FIRMWARE_HEADER_softRegisters
,
3956 &tmp
, si_pi
->sram_end
);
3960 si_pi
->soft_regs_start
= tmp
;
3962 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3963 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3964 SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable
,
3965 &tmp
, si_pi
->sram_end
);
3969 si_pi
->mc_reg_table_start
= tmp
;
3971 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3972 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3973 SISLANDS_SMC_FIRMWARE_HEADER_fanTable
,
3974 &tmp
, si_pi
->sram_end
);
3978 si_pi
->fan_table_start
= tmp
;
3980 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3981 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3982 SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable
,
3983 &tmp
, si_pi
->sram_end
);
3987 si_pi
->arb_table_start
= tmp
;
3989 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3990 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3991 SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable
,
3992 &tmp
, si_pi
->sram_end
);
3996 si_pi
->cac_table_start
= tmp
;
3998 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3999 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
4000 SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration
,
4001 &tmp
, si_pi
->sram_end
);
4005 si_pi
->dte_table_start
= tmp
;
4007 ret
= amdgpu_si_read_smc_sram_dword(adev
,
4008 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
4009 SISLANDS_SMC_FIRMWARE_HEADER_spllTable
,
4010 &tmp
, si_pi
->sram_end
);
4014 si_pi
->spll_table_start
= tmp
;
4016 ret
= amdgpu_si_read_smc_sram_dword(adev
,
4017 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
4018 SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters
,
4019 &tmp
, si_pi
->sram_end
);
4023 si_pi
->papm_cfg_table_start
= tmp
;
4028 static void si_read_clock_registers(struct amdgpu_device
*adev
)
4030 struct si_power_info
*si_pi
= si_get_pi(adev
);
4032 si_pi
->clock_registers
.cg_spll_func_cntl
= RREG32(CG_SPLL_FUNC_CNTL
);
4033 si_pi
->clock_registers
.cg_spll_func_cntl_2
= RREG32(CG_SPLL_FUNC_CNTL_2
);
4034 si_pi
->clock_registers
.cg_spll_func_cntl_3
= RREG32(CG_SPLL_FUNC_CNTL_3
);
4035 si_pi
->clock_registers
.cg_spll_func_cntl_4
= RREG32(CG_SPLL_FUNC_CNTL_4
);
4036 si_pi
->clock_registers
.cg_spll_spread_spectrum
= RREG32(CG_SPLL_SPREAD_SPECTRUM
);
4037 si_pi
->clock_registers
.cg_spll_spread_spectrum_2
= RREG32(CG_SPLL_SPREAD_SPECTRUM_2
);
4038 si_pi
->clock_registers
.dll_cntl
= RREG32(DLL_CNTL
);
4039 si_pi
->clock_registers
.mclk_pwrmgt_cntl
= RREG32(MCLK_PWRMGT_CNTL
);
4040 si_pi
->clock_registers
.mpll_ad_func_cntl
= RREG32(MPLL_AD_FUNC_CNTL
);
4041 si_pi
->clock_registers
.mpll_dq_func_cntl
= RREG32(MPLL_DQ_FUNC_CNTL
);
4042 si_pi
->clock_registers
.mpll_func_cntl
= RREG32(MPLL_FUNC_CNTL
);
4043 si_pi
->clock_registers
.mpll_func_cntl_1
= RREG32(MPLL_FUNC_CNTL_1
);
4044 si_pi
->clock_registers
.mpll_func_cntl_2
= RREG32(MPLL_FUNC_CNTL_2
);
4045 si_pi
->clock_registers
.mpll_ss1
= RREG32(MPLL_SS1
);
4046 si_pi
->clock_registers
.mpll_ss2
= RREG32(MPLL_SS2
);
4049 static void si_enable_thermal_protection(struct amdgpu_device
*adev
,
4053 WREG32_P(GENERAL_PWRMGT
, 0, ~THERMAL_PROTECTION_DIS
);
4055 WREG32_P(GENERAL_PWRMGT
, THERMAL_PROTECTION_DIS
, ~THERMAL_PROTECTION_DIS
);
4058 static void si_enable_acpi_power_management(struct amdgpu_device
*adev
)
4060 WREG32_P(GENERAL_PWRMGT
, STATIC_PM_EN
, ~STATIC_PM_EN
);
4064 static int si_enter_ulp_state(struct amdgpu_device
*adev
)
4066 WREG32(SMC_MESSAGE_0
, PPSMC_MSG_SwitchToMinimumPower
);
4073 static int si_exit_ulp_state(struct amdgpu_device
*adev
)
4077 WREG32(SMC_MESSAGE_0
, PPSMC_MSG_ResumeFromMinimumPower
);
4081 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4082 if (RREG32(SMC_RESP_0
) == 1)
4091 static int si_notify_smc_display_change(struct amdgpu_device
*adev
,
4094 PPSMC_Msg msg
= has_display
?
4095 PPSMC_MSG_HasDisplay
: PPSMC_MSG_NoDisplay
;
4097 return (amdgpu_si_send_msg_to_smc(adev
, msg
) == PPSMC_Result_OK
) ?
4101 static void si_program_response_times(struct amdgpu_device
*adev
)
4103 u32 voltage_response_time
, acpi_delay_time
, vbi_time_out
;
4104 u32 vddc_dly
, acpi_dly
, vbi_dly
;
4105 u32 reference_clock
;
4107 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_mvdd_chg_time
, 1);
4109 voltage_response_time
= (u32
)adev
->pm
.dpm
.voltage_response_time
;
4111 if (voltage_response_time
== 0)
4112 voltage_response_time
= 1000;
4114 acpi_delay_time
= 15000;
4115 vbi_time_out
= 100000;
4117 reference_clock
= amdgpu_asic_get_xclk(adev
);
4119 vddc_dly
= (voltage_response_time
* reference_clock
) / 100;
4120 acpi_dly
= (acpi_delay_time
* reference_clock
) / 100;
4121 vbi_dly
= (vbi_time_out
* reference_clock
) / 100;
4123 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_delay_vreg
, vddc_dly
);
4124 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_delay_acpi
, acpi_dly
);
4125 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_mclk_chg_timeout
, vbi_dly
);
4126 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_mc_block_delay
, 0xAA);
4129 static void si_program_ds_registers(struct amdgpu_device
*adev
)
4131 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4134 /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
4135 if (adev
->asic_type
== CHIP_TAHITI
&& adev
->rev_id
== 0x0)
4140 if (eg_pi
->sclk_deep_sleep
) {
4141 WREG32_P(MISC_CLK_CNTL
, DEEP_SLEEP_CLK_SEL(tmp
), ~DEEP_SLEEP_CLK_SEL_MASK
);
4142 WREG32_P(CG_SPLL_AUTOSCALE_CNTL
, AUTOSCALE_ON_SS_CLEAR
,
4143 ~AUTOSCALE_ON_SS_CLEAR
);
4147 static void si_program_display_gap(struct amdgpu_device
*adev
)
4152 tmp
= RREG32(CG_DISPLAY_GAP_CNTL
) & ~(DISP1_GAP_MASK
| DISP2_GAP_MASK
);
4153 if (adev
->pm
.dpm
.new_active_crtc_count
> 0)
4154 tmp
|= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM
);
4156 tmp
|= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE
);
4158 if (adev
->pm
.dpm
.new_active_crtc_count
> 1)
4159 tmp
|= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM
);
4161 tmp
|= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE
);
4163 WREG32(CG_DISPLAY_GAP_CNTL
, tmp
);
4165 tmp
= RREG32(DCCG_DISP_SLOW_SELECT_REG
);
4166 pipe
= (tmp
& DCCG_DISP1_SLOW_SELECT_MASK
) >> DCCG_DISP1_SLOW_SELECT_SHIFT
;
4168 if ((adev
->pm
.dpm
.new_active_crtc_count
> 0) &&
4169 (!(adev
->pm
.dpm
.new_active_crtcs
& (1 << pipe
)))) {
4170 /* find the first active crtc */
4171 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
4172 if (adev
->pm
.dpm
.new_active_crtcs
& (1 << i
))
4175 if (i
== adev
->mode_info
.num_crtc
)
4180 tmp
&= ~DCCG_DISP1_SLOW_SELECT_MASK
;
4181 tmp
|= DCCG_DISP1_SLOW_SELECT(pipe
);
4182 WREG32(DCCG_DISP_SLOW_SELECT_REG
, tmp
);
4185 /* Setting this to false forces the performance state to low if the crtcs are disabled.
4186 * This can be a problem on PowerXpress systems or if you want to use the card
4187 * for offscreen rendering or compute if there are no crtcs enabled.
4189 si_notify_smc_display_change(adev
, adev
->pm
.dpm
.new_active_crtc_count
> 0);
4192 static void si_enable_spread_spectrum(struct amdgpu_device
*adev
, bool enable
)
4194 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4198 WREG32_P(GENERAL_PWRMGT
, DYN_SPREAD_SPECTRUM_EN
, ~DYN_SPREAD_SPECTRUM_EN
);
4200 WREG32_P(CG_SPLL_SPREAD_SPECTRUM
, 0, ~SSEN
);
4201 WREG32_P(GENERAL_PWRMGT
, 0, ~DYN_SPREAD_SPECTRUM_EN
);
4205 static void si_setup_bsp(struct amdgpu_device
*adev
)
4207 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4208 u32 xclk
= amdgpu_asic_get_xclk(adev
);
4210 r600_calculate_u_and_p(pi
->asi
,
4216 r600_calculate_u_and_p(pi
->pasi
,
4223 pi
->dsp
= BSP(pi
->bsp
) | BSU(pi
->bsu
);
4224 pi
->psp
= BSP(pi
->pbsp
) | BSU(pi
->pbsu
);
4226 WREG32(CG_BSP
, pi
->dsp
);
4229 static void si_program_git(struct amdgpu_device
*adev
)
4231 WREG32_P(CG_GIT
, CG_GICST(R600_GICST_DFLT
), ~CG_GICST_MASK
);
4234 static void si_program_tp(struct amdgpu_device
*adev
)
4237 enum r600_td td
= R600_TD_DFLT
;
4239 for (i
= 0; i
< R600_PM_NUMBER_OF_TC
; i
++)
4240 WREG32(CG_FFCT_0
+ i
, (UTC_0(r600_utc
[i
]) | DTC_0(r600_dtc
[i
])));
4242 if (td
== R600_TD_AUTO
)
4243 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~FIR_FORCE_TREND_SEL
);
4245 WREG32_P(SCLK_PWRMGT_CNTL
, FIR_FORCE_TREND_SEL
, ~FIR_FORCE_TREND_SEL
);
4247 if (td
== R600_TD_UP
)
4248 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~FIR_TREND_MODE
);
4250 if (td
== R600_TD_DOWN
)
4251 WREG32_P(SCLK_PWRMGT_CNTL
, FIR_TREND_MODE
, ~FIR_TREND_MODE
);
4254 static void si_program_tpp(struct amdgpu_device
*adev
)
4256 WREG32(CG_TPC
, R600_TPC_DFLT
);
4259 static void si_program_sstp(struct amdgpu_device
*adev
)
4261 WREG32(CG_SSP
, (SSTU(R600_SSTU_DFLT
) | SST(R600_SST_DFLT
)));
4264 static void si_enable_display_gap(struct amdgpu_device
*adev
)
4266 u32 tmp
= RREG32(CG_DISPLAY_GAP_CNTL
);
4268 tmp
&= ~(DISP1_GAP_MASK
| DISP2_GAP_MASK
);
4269 tmp
|= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE
) |
4270 DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE
));
4272 tmp
&= ~(DISP1_GAP_MCHG_MASK
| DISP2_GAP_MCHG_MASK
);
4273 tmp
|= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK
) |
4274 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE
));
4275 WREG32(CG_DISPLAY_GAP_CNTL
, tmp
);
4278 static void si_program_vc(struct amdgpu_device
*adev
)
4280 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4282 WREG32(CG_FTV
, pi
->vrc
);
4285 static void si_clear_vc(struct amdgpu_device
*adev
)
4290 static u8
si_get_ddr3_mclk_frequency_ratio(u32 memory_clock
)
4294 if (memory_clock
< 10000)
4296 else if (memory_clock
>= 80000)
4297 mc_para_index
= 0x0f;
4299 mc_para_index
= (u8
)((memory_clock
- 10000) / 5000 + 1);
4300 return mc_para_index
;
4303 static u8
si_get_mclk_frequency_ratio(u32 memory_clock
, bool strobe_mode
)
4308 if (memory_clock
< 12500)
4309 mc_para_index
= 0x00;
4310 else if (memory_clock
> 47500)
4311 mc_para_index
= 0x0f;
4313 mc_para_index
= (u8
)((memory_clock
- 10000) / 2500);
4315 if (memory_clock
< 65000)
4316 mc_para_index
= 0x00;
4317 else if (memory_clock
> 135000)
4318 mc_para_index
= 0x0f;
4320 mc_para_index
= (u8
)((memory_clock
- 60000) / 5000);
4322 return mc_para_index
;
4325 static u8
si_get_strobe_mode_settings(struct amdgpu_device
*adev
, u32 mclk
)
4327 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4328 bool strobe_mode
= false;
4331 if (mclk
<= pi
->mclk_strobe_mode_threshold
)
4334 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
4335 result
= si_get_mclk_frequency_ratio(mclk
, strobe_mode
);
4337 result
= si_get_ddr3_mclk_frequency_ratio(mclk
);
4340 result
|= SISLANDS_SMC_STROBE_ENABLE
;
4345 static int si_upload_firmware(struct amdgpu_device
*adev
)
4347 struct si_power_info
*si_pi
= si_get_pi(adev
);
4349 amdgpu_si_reset_smc(adev
);
4350 amdgpu_si_smc_clock(adev
, false);
4352 return amdgpu_si_load_smc_ucode(adev
, si_pi
->sram_end
);
4355 static bool si_validate_phase_shedding_tables(struct amdgpu_device
*adev
,
4356 const struct atom_voltage_table
*table
,
4357 const struct amdgpu_phase_shedding_limits_table
*limits
)
4359 u32 data
, num_bits
, num_levels
;
4361 if ((table
== NULL
) || (limits
== NULL
))
4364 data
= table
->mask_low
;
4366 num_bits
= hweight32(data
);
4371 num_levels
= (1 << num_bits
);
4373 if (table
->count
!= num_levels
)
4376 if (limits
->count
!= (num_levels
- 1))
4382 static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device
*adev
,
4383 u32 max_voltage_steps
,
4384 struct atom_voltage_table
*voltage_table
)
4386 unsigned int i
, diff
;
4388 if (voltage_table
->count
<= max_voltage_steps
)
4391 diff
= voltage_table
->count
- max_voltage_steps
;
4393 for (i
= 0; i
< max_voltage_steps
; i
++)
4394 voltage_table
->entries
[i
] = voltage_table
->entries
[i
+ diff
];
4396 voltage_table
->count
= max_voltage_steps
;
4399 static int si_get_svi2_voltage_table(struct amdgpu_device
*adev
,
4400 struct amdgpu_clock_voltage_dependency_table
*voltage_dependency_table
,
4401 struct atom_voltage_table
*voltage_table
)
4405 if (voltage_dependency_table
== NULL
)
4408 voltage_table
->mask_low
= 0;
4409 voltage_table
->phase_delay
= 0;
4411 voltage_table
->count
= voltage_dependency_table
->count
;
4412 for (i
= 0; i
< voltage_table
->count
; i
++) {
4413 voltage_table
->entries
[i
].value
= voltage_dependency_table
->entries
[i
].v
;
4414 voltage_table
->entries
[i
].smio_low
= 0;
4420 static int si_construct_voltage_tables(struct amdgpu_device
*adev
)
4422 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4423 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4424 struct si_power_info
*si_pi
= si_get_pi(adev
);
4427 if (pi
->voltage_control
) {
4428 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDC
,
4429 VOLTAGE_OBJ_GPIO_LUT
, &eg_pi
->vddc_voltage_table
);
4433 if (eg_pi
->vddc_voltage_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
)
4434 si_trim_voltage_table_to_fit_state_table(adev
,
4435 SISLANDS_MAX_NO_VREG_STEPS
,
4436 &eg_pi
->vddc_voltage_table
);
4437 } else if (si_pi
->voltage_control_svi2
) {
4438 ret
= si_get_svi2_voltage_table(adev
,
4439 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
4440 &eg_pi
->vddc_voltage_table
);
4447 if (eg_pi
->vddci_control
) {
4448 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDCI
,
4449 VOLTAGE_OBJ_GPIO_LUT
, &eg_pi
->vddci_voltage_table
);
4453 if (eg_pi
->vddci_voltage_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
)
4454 si_trim_voltage_table_to_fit_state_table(adev
,
4455 SISLANDS_MAX_NO_VREG_STEPS
,
4456 &eg_pi
->vddci_voltage_table
);
4458 if (si_pi
->vddci_control_svi2
) {
4459 ret
= si_get_svi2_voltage_table(adev
,
4460 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
4461 &eg_pi
->vddci_voltage_table
);
4466 if (pi
->mvdd_control
) {
4467 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_MVDDC
,
4468 VOLTAGE_OBJ_GPIO_LUT
, &si_pi
->mvdd_voltage_table
);
4471 pi
->mvdd_control
= false;
4475 if (si_pi
->mvdd_voltage_table
.count
== 0) {
4476 pi
->mvdd_control
= false;
4480 if (si_pi
->mvdd_voltage_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
)
4481 si_trim_voltage_table_to_fit_state_table(adev
,
4482 SISLANDS_MAX_NO_VREG_STEPS
,
4483 &si_pi
->mvdd_voltage_table
);
4486 if (si_pi
->vddc_phase_shed_control
) {
4487 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDC
,
4488 VOLTAGE_OBJ_PHASE_LUT
, &si_pi
->vddc_phase_shed_table
);
4490 si_pi
->vddc_phase_shed_control
= false;
4492 if ((si_pi
->vddc_phase_shed_table
.count
== 0) ||
4493 (si_pi
->vddc_phase_shed_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
))
4494 si_pi
->vddc_phase_shed_control
= false;
4500 static void si_populate_smc_voltage_table(struct amdgpu_device
*adev
,
4501 const struct atom_voltage_table
*voltage_table
,
4502 SISLANDS_SMC_STATETABLE
*table
)
4506 for (i
= 0; i
< voltage_table
->count
; i
++)
4507 table
->lowSMIO
[i
] |= cpu_to_be32(voltage_table
->entries
[i
].smio_low
);
4510 static int si_populate_smc_voltage_tables(struct amdgpu_device
*adev
,
4511 SISLANDS_SMC_STATETABLE
*table
)
4513 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4514 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4515 struct si_power_info
*si_pi
= si_get_pi(adev
);
4518 if (si_pi
->voltage_control_svi2
) {
4519 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc
,
4520 si_pi
->svc_gpio_id
);
4521 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd
,
4522 si_pi
->svd_gpio_id
);
4523 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_svi_rework_plat_type
,
4526 if (eg_pi
->vddc_voltage_table
.count
) {
4527 si_populate_smc_voltage_table(adev
, &eg_pi
->vddc_voltage_table
, table
);
4528 table
->voltageMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_VDDC
] =
4529 cpu_to_be32(eg_pi
->vddc_voltage_table
.mask_low
);
4531 for (i
= 0; i
< eg_pi
->vddc_voltage_table
.count
; i
++) {
4532 if (pi
->max_vddc_in_table
<= eg_pi
->vddc_voltage_table
.entries
[i
].value
) {
4533 table
->maxVDDCIndexInPPTable
= i
;
4539 if (eg_pi
->vddci_voltage_table
.count
) {
4540 si_populate_smc_voltage_table(adev
, &eg_pi
->vddci_voltage_table
, table
);
4542 table
->voltageMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_VDDCI
] =
4543 cpu_to_be32(eg_pi
->vddci_voltage_table
.mask_low
);
4547 if (si_pi
->mvdd_voltage_table
.count
) {
4548 si_populate_smc_voltage_table(adev
, &si_pi
->mvdd_voltage_table
, table
);
4550 table
->voltageMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_MVDD
] =
4551 cpu_to_be32(si_pi
->mvdd_voltage_table
.mask_low
);
4554 if (si_pi
->vddc_phase_shed_control
) {
4555 if (si_validate_phase_shedding_tables(adev
, &si_pi
->vddc_phase_shed_table
,
4556 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
)) {
4557 si_populate_smc_voltage_table(adev
, &si_pi
->vddc_phase_shed_table
, table
);
4559 table
->phaseMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING
] =
4560 cpu_to_be32(si_pi
->vddc_phase_shed_table
.mask_low
);
4562 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_phase_shedding_delay
,
4563 (u32
)si_pi
->vddc_phase_shed_table
.phase_delay
);
4565 si_pi
->vddc_phase_shed_control
= false;
4573 static int si_populate_voltage_value(struct amdgpu_device
*adev
,
4574 const struct atom_voltage_table
*table
,
4575 u16 value
, SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4579 for (i
= 0; i
< table
->count
; i
++) {
4580 if (value
<= table
->entries
[i
].value
) {
4581 voltage
->index
= (u8
)i
;
4582 voltage
->value
= cpu_to_be16(table
->entries
[i
].value
);
4587 if (i
>= table
->count
)
4593 static int si_populate_mvdd_value(struct amdgpu_device
*adev
, u32 mclk
,
4594 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4596 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4597 struct si_power_info
*si_pi
= si_get_pi(adev
);
4599 if (pi
->mvdd_control
) {
4600 if (mclk
<= pi
->mvdd_split_frequency
)
4603 voltage
->index
= (u8
)(si_pi
->mvdd_voltage_table
.count
) - 1;
4605 voltage
->value
= cpu_to_be16(si_pi
->mvdd_voltage_table
.entries
[voltage
->index
].value
);
4610 static int si_get_std_voltage_value(struct amdgpu_device
*adev
,
4611 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
,
4615 bool voltage_found
= false;
4616 *std_voltage
= be16_to_cpu(voltage
->value
);
4618 if (adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
) {
4619 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE
) {
4620 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
== NULL
)
4623 for (v_index
= 0; (u32
)v_index
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; v_index
++) {
4624 if (be16_to_cpu(voltage
->value
) ==
4625 (u16
)adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[v_index
].v
) {
4626 voltage_found
= true;
4627 if ((u32
)v_index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
4629 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[v_index
].vddc
;
4632 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
-1].vddc
;
4637 if (!voltage_found
) {
4638 for (v_index
= 0; (u32
)v_index
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; v_index
++) {
4639 if (be16_to_cpu(voltage
->value
) <=
4640 (u16
)adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[v_index
].v
) {
4641 voltage_found
= true;
4642 if ((u32
)v_index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
4644 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[v_index
].vddc
;
4647 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
-1].vddc
;
4653 if ((u32
)voltage
->index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
4654 *std_voltage
= adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[voltage
->index
].vddc
;
4661 static int si_populate_std_voltage_value(struct amdgpu_device
*adev
,
4662 u16 value
, u8 index
,
4663 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4665 voltage
->index
= index
;
4666 voltage
->value
= cpu_to_be16(value
);
4671 static int si_populate_phase_shedding_value(struct amdgpu_device
*adev
,
4672 const struct amdgpu_phase_shedding_limits_table
*limits
,
4673 u16 voltage
, u32 sclk
, u32 mclk
,
4674 SISLANDS_SMC_VOLTAGE_VALUE
*smc_voltage
)
4678 for (i
= 0; i
< limits
->count
; i
++) {
4679 if ((voltage
<= limits
->entries
[i
].voltage
) &&
4680 (sclk
<= limits
->entries
[i
].sclk
) &&
4681 (mclk
<= limits
->entries
[i
].mclk
))
4685 smc_voltage
->phase_settings
= (u8
)i
;
4690 static int si_init_arb_table_index(struct amdgpu_device
*adev
)
4692 struct si_power_info
*si_pi
= si_get_pi(adev
);
4696 ret
= amdgpu_si_read_smc_sram_dword(adev
, si_pi
->arb_table_start
,
4697 &tmp
, si_pi
->sram_end
);
4702 tmp
|= MC_CG_ARB_FREQ_F1
<< 24;
4704 return amdgpu_si_write_smc_sram_dword(adev
, si_pi
->arb_table_start
,
4705 tmp
, si_pi
->sram_end
);
4708 static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device
*adev
)
4710 return ni_copy_and_switch_arb_sets(adev
, MC_CG_ARB_FREQ_F0
, MC_CG_ARB_FREQ_F1
);
4713 static int si_reset_to_default(struct amdgpu_device
*adev
)
4715 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_ResetToDefaults
) == PPSMC_Result_OK
) ?
4719 static int si_force_switch_to_arb_f0(struct amdgpu_device
*adev
)
4721 struct si_power_info
*si_pi
= si_get_pi(adev
);
4725 ret
= amdgpu_si_read_smc_sram_dword(adev
, si_pi
->arb_table_start
,
4726 &tmp
, si_pi
->sram_end
);
4730 tmp
= (tmp
>> 24) & 0xff;
4732 if (tmp
== MC_CG_ARB_FREQ_F0
)
4735 return ni_copy_and_switch_arb_sets(adev
, tmp
, MC_CG_ARB_FREQ_F0
);
4738 static u32
si_calculate_memory_refresh_rate(struct amdgpu_device
*adev
,
4742 u32 dram_refresh_rate
;
4743 u32 mc_arb_rfsh_rate
;
4744 u32 tmp
= (RREG32(MC_ARB_RAMCFG
) & NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
4749 dram_rows
= 1 << (tmp
+ 10);
4751 dram_refresh_rate
= 1 << ((RREG32(MC_SEQ_MISC0
) & 0x3) + 3);
4752 mc_arb_rfsh_rate
= ((engine_clock
* 10) * dram_refresh_rate
/ dram_rows
- 32) / 64;
4754 return mc_arb_rfsh_rate
;
4757 static int si_populate_memory_timing_parameters(struct amdgpu_device
*adev
,
4758 struct rv7xx_pl
*pl
,
4759 SMC_SIslands_MCArbDramTimingRegisterSet
*arb_regs
)
4765 arb_regs
->mc_arb_rfsh_rate
=
4766 (u8
)si_calculate_memory_refresh_rate(adev
, pl
->sclk
);
4768 amdgpu_atombios_set_engine_dram_timings(adev
,
4772 dram_timing
= RREG32(MC_ARB_DRAM_TIMING
);
4773 dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2
);
4774 burst_time
= RREG32(MC_ARB_BURST_TIME
) & STATE0_MASK
;
4776 arb_regs
->mc_arb_dram_timing
= cpu_to_be32(dram_timing
);
4777 arb_regs
->mc_arb_dram_timing2
= cpu_to_be32(dram_timing2
);
4778 arb_regs
->mc_arb_burst_time
= (u8
)burst_time
;
4783 static int si_do_program_memory_timing_parameters(struct amdgpu_device
*adev
,
4784 struct amdgpu_ps
*amdgpu_state
,
4785 unsigned int first_arb_set
)
4787 struct si_power_info
*si_pi
= si_get_pi(adev
);
4788 struct si_ps
*state
= si_get_ps(amdgpu_state
);
4789 SMC_SIslands_MCArbDramTimingRegisterSet arb_regs
= { 0 };
4792 for (i
= 0; i
< state
->performance_level_count
; i
++) {
4793 ret
= si_populate_memory_timing_parameters(adev
, &state
->performance_levels
[i
], &arb_regs
);
4796 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
4797 si_pi
->arb_table_start
+
4798 offsetof(SMC_SIslands_MCArbDramTimingRegisters
, data
) +
4799 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
) * (first_arb_set
+ i
),
4801 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
),
4810 static int si_program_memory_timing_parameters(struct amdgpu_device
*adev
,
4811 struct amdgpu_ps
*amdgpu_new_state
)
4813 return si_do_program_memory_timing_parameters(adev
, amdgpu_new_state
,
4814 SISLANDS_DRIVER_STATE_ARB_INDEX
);
4817 static int si_populate_initial_mvdd_value(struct amdgpu_device
*adev
,
4818 struct SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4820 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4821 struct si_power_info
*si_pi
= si_get_pi(adev
);
4823 if (pi
->mvdd_control
)
4824 return si_populate_voltage_value(adev
, &si_pi
->mvdd_voltage_table
,
4825 si_pi
->mvdd_bootup_value
, voltage
);
4830 static int si_populate_smc_initial_state(struct amdgpu_device
*adev
,
4831 struct amdgpu_ps
*amdgpu_initial_state
,
4832 SISLANDS_SMC_STATETABLE
*table
)
4834 struct si_ps
*initial_state
= si_get_ps(amdgpu_initial_state
);
4835 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4836 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4837 struct si_power_info
*si_pi
= si_get_pi(adev
);
4841 table
->initialState
.levels
[0].mclk
.vDLL_CNTL
=
4842 cpu_to_be32(si_pi
->clock_registers
.dll_cntl
);
4843 table
->initialState
.levels
[0].mclk
.vMCLK_PWRMGT_CNTL
=
4844 cpu_to_be32(si_pi
->clock_registers
.mclk_pwrmgt_cntl
);
4845 table
->initialState
.levels
[0].mclk
.vMPLL_AD_FUNC_CNTL
=
4846 cpu_to_be32(si_pi
->clock_registers
.mpll_ad_func_cntl
);
4847 table
->initialState
.levels
[0].mclk
.vMPLL_DQ_FUNC_CNTL
=
4848 cpu_to_be32(si_pi
->clock_registers
.mpll_dq_func_cntl
);
4849 table
->initialState
.levels
[0].mclk
.vMPLL_FUNC_CNTL
=
4850 cpu_to_be32(si_pi
->clock_registers
.mpll_func_cntl
);
4851 table
->initialState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_1
=
4852 cpu_to_be32(si_pi
->clock_registers
.mpll_func_cntl_1
);
4853 table
->initialState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_2
=
4854 cpu_to_be32(si_pi
->clock_registers
.mpll_func_cntl_2
);
4855 table
->initialState
.levels
[0].mclk
.vMPLL_SS
=
4856 cpu_to_be32(si_pi
->clock_registers
.mpll_ss1
);
4857 table
->initialState
.levels
[0].mclk
.vMPLL_SS2
=
4858 cpu_to_be32(si_pi
->clock_registers
.mpll_ss2
);
4860 table
->initialState
.levels
[0].mclk
.mclk_value
=
4861 cpu_to_be32(initial_state
->performance_levels
[0].mclk
);
4863 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL
=
4864 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl
);
4865 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_2
=
4866 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl_2
);
4867 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_3
=
4868 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl_3
);
4869 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_4
=
4870 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl_4
);
4871 table
->initialState
.levels
[0].sclk
.vCG_SPLL_SPREAD_SPECTRUM
=
4872 cpu_to_be32(si_pi
->clock_registers
.cg_spll_spread_spectrum
);
4873 table
->initialState
.levels
[0].sclk
.vCG_SPLL_SPREAD_SPECTRUM_2
=
4874 cpu_to_be32(si_pi
->clock_registers
.cg_spll_spread_spectrum_2
);
4876 table
->initialState
.levels
[0].sclk
.sclk_value
=
4877 cpu_to_be32(initial_state
->performance_levels
[0].sclk
);
4879 table
->initialState
.levels
[0].arbRefreshState
=
4880 SISLANDS_INITIAL_STATE_ARB_INDEX
;
4882 table
->initialState
.levels
[0].ACIndex
= 0;
4884 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
4885 initial_state
->performance_levels
[0].vddc
,
4886 &table
->initialState
.levels
[0].vddc
);
4891 ret
= si_get_std_voltage_value(adev
,
4892 &table
->initialState
.levels
[0].vddc
,
4895 si_populate_std_voltage_value(adev
, std_vddc
,
4896 table
->initialState
.levels
[0].vddc
.index
,
4897 &table
->initialState
.levels
[0].std_vddc
);
4900 if (eg_pi
->vddci_control
)
4901 si_populate_voltage_value(adev
,
4902 &eg_pi
->vddci_voltage_table
,
4903 initial_state
->performance_levels
[0].vddci
,
4904 &table
->initialState
.levels
[0].vddci
);
4906 if (si_pi
->vddc_phase_shed_control
)
4907 si_populate_phase_shedding_value(adev
,
4908 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
4909 initial_state
->performance_levels
[0].vddc
,
4910 initial_state
->performance_levels
[0].sclk
,
4911 initial_state
->performance_levels
[0].mclk
,
4912 &table
->initialState
.levels
[0].vddc
);
4914 si_populate_initial_mvdd_value(adev
, &table
->initialState
.levels
[0].mvdd
);
4916 reg
= CG_R(0xffff) | CG_L(0);
4917 table
->initialState
.levels
[0].aT
= cpu_to_be32(reg
);
4918 table
->initialState
.levels
[0].bSP
= cpu_to_be32(pi
->dsp
);
4919 table
->initialState
.levels
[0].gen2PCIE
= (u8
)si_pi
->boot_pcie_gen
;
4921 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
4922 table
->initialState
.levels
[0].strobeMode
=
4923 si_get_strobe_mode_settings(adev
,
4924 initial_state
->performance_levels
[0].mclk
);
4926 if (initial_state
->performance_levels
[0].mclk
> pi
->mclk_edc_enable_threshold
)
4927 table
->initialState
.levels
[0].mcFlags
= SISLANDS_SMC_MC_EDC_RD_FLAG
| SISLANDS_SMC_MC_EDC_WR_FLAG
;
4929 table
->initialState
.levels
[0].mcFlags
= 0;
4932 table
->initialState
.levelCount
= 1;
4934 table
->initialState
.flags
|= PPSMC_SWSTATE_FLAG_DC
;
4936 table
->initialState
.levels
[0].dpm2
.MaxPS
= 0;
4937 table
->initialState
.levels
[0].dpm2
.NearTDPDec
= 0;
4938 table
->initialState
.levels
[0].dpm2
.AboveSafeInc
= 0;
4939 table
->initialState
.levels
[0].dpm2
.BelowSafeInc
= 0;
4940 table
->initialState
.levels
[0].dpm2
.PwrEfficiencyRatio
= 0;
4942 reg
= MIN_POWER_MASK
| MAX_POWER_MASK
;
4943 table
->initialState
.levels
[0].SQPowerThrottle
= cpu_to_be32(reg
);
4945 reg
= MAX_POWER_DELTA_MASK
| STI_SIZE_MASK
| LTI_RATIO_MASK
;
4946 table
->initialState
.levels
[0].SQPowerThrottle_2
= cpu_to_be32(reg
);
4951 static int si_populate_smc_acpi_state(struct amdgpu_device
*adev
,
4952 SISLANDS_SMC_STATETABLE
*table
)
4954 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4955 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4956 struct si_power_info
*si_pi
= si_get_pi(adev
);
4957 u32 spll_func_cntl
= si_pi
->clock_registers
.cg_spll_func_cntl
;
4958 u32 spll_func_cntl_2
= si_pi
->clock_registers
.cg_spll_func_cntl_2
;
4959 u32 spll_func_cntl_3
= si_pi
->clock_registers
.cg_spll_func_cntl_3
;
4960 u32 spll_func_cntl_4
= si_pi
->clock_registers
.cg_spll_func_cntl_4
;
4961 u32 dll_cntl
= si_pi
->clock_registers
.dll_cntl
;
4962 u32 mclk_pwrmgt_cntl
= si_pi
->clock_registers
.mclk_pwrmgt_cntl
;
4963 u32 mpll_ad_func_cntl
= si_pi
->clock_registers
.mpll_ad_func_cntl
;
4964 u32 mpll_dq_func_cntl
= si_pi
->clock_registers
.mpll_dq_func_cntl
;
4965 u32 mpll_func_cntl
= si_pi
->clock_registers
.mpll_func_cntl
;
4966 u32 mpll_func_cntl_1
= si_pi
->clock_registers
.mpll_func_cntl_1
;
4967 u32 mpll_func_cntl_2
= si_pi
->clock_registers
.mpll_func_cntl_2
;
4971 table
->ACPIState
= table
->initialState
;
4973 table
->ACPIState
.flags
&= ~PPSMC_SWSTATE_FLAG_DC
;
4975 if (pi
->acpi_vddc
) {
4976 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
4977 pi
->acpi_vddc
, &table
->ACPIState
.levels
[0].vddc
);
4981 ret
= si_get_std_voltage_value(adev
,
4982 &table
->ACPIState
.levels
[0].vddc
, &std_vddc
);
4984 si_populate_std_voltage_value(adev
, std_vddc
,
4985 table
->ACPIState
.levels
[0].vddc
.index
,
4986 &table
->ACPIState
.levels
[0].std_vddc
);
4988 table
->ACPIState
.levels
[0].gen2PCIE
= si_pi
->acpi_pcie_gen
;
4990 if (si_pi
->vddc_phase_shed_control
) {
4991 si_populate_phase_shedding_value(adev
,
4992 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
4996 &table
->ACPIState
.levels
[0].vddc
);
4999 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
5000 pi
->min_vddc_in_table
, &table
->ACPIState
.levels
[0].vddc
);
5004 ret
= si_get_std_voltage_value(adev
,
5005 &table
->ACPIState
.levels
[0].vddc
, &std_vddc
);
5008 si_populate_std_voltage_value(adev
, std_vddc
,
5009 table
->ACPIState
.levels
[0].vddc
.index
,
5010 &table
->ACPIState
.levels
[0].std_vddc
);
5012 table
->ACPIState
.levels
[0].gen2PCIE
=
5013 (u8
)amdgpu_get_pcie_gen_support(adev
,
5014 si_pi
->sys_pcie_mask
,
5015 si_pi
->boot_pcie_gen
,
5018 if (si_pi
->vddc_phase_shed_control
)
5019 si_populate_phase_shedding_value(adev
,
5020 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
5021 pi
->min_vddc_in_table
,
5024 &table
->ACPIState
.levels
[0].vddc
);
5027 if (pi
->acpi_vddc
) {
5028 if (eg_pi
->acpi_vddci
)
5029 si_populate_voltage_value(adev
, &eg_pi
->vddci_voltage_table
,
5031 &table
->ACPIState
.levels
[0].vddci
);
5034 mclk_pwrmgt_cntl
|= MRDCK0_RESET
| MRDCK1_RESET
;
5035 mclk_pwrmgt_cntl
&= ~(MRDCK0_PDNB
| MRDCK1_PDNB
);
5037 dll_cntl
&= ~(MRDCK0_BYPASS
| MRDCK1_BYPASS
);
5039 spll_func_cntl_2
&= ~SCLK_MUX_SEL_MASK
;
5040 spll_func_cntl_2
|= SCLK_MUX_SEL(4);
5042 table
->ACPIState
.levels
[0].mclk
.vDLL_CNTL
=
5043 cpu_to_be32(dll_cntl
);
5044 table
->ACPIState
.levels
[0].mclk
.vMCLK_PWRMGT_CNTL
=
5045 cpu_to_be32(mclk_pwrmgt_cntl
);
5046 table
->ACPIState
.levels
[0].mclk
.vMPLL_AD_FUNC_CNTL
=
5047 cpu_to_be32(mpll_ad_func_cntl
);
5048 table
->ACPIState
.levels
[0].mclk
.vMPLL_DQ_FUNC_CNTL
=
5049 cpu_to_be32(mpll_dq_func_cntl
);
5050 table
->ACPIState
.levels
[0].mclk
.vMPLL_FUNC_CNTL
=
5051 cpu_to_be32(mpll_func_cntl
);
5052 table
->ACPIState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_1
=
5053 cpu_to_be32(mpll_func_cntl_1
);
5054 table
->ACPIState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_2
=
5055 cpu_to_be32(mpll_func_cntl_2
);
5056 table
->ACPIState
.levels
[0].mclk
.vMPLL_SS
=
5057 cpu_to_be32(si_pi
->clock_registers
.mpll_ss1
);
5058 table
->ACPIState
.levels
[0].mclk
.vMPLL_SS2
=
5059 cpu_to_be32(si_pi
->clock_registers
.mpll_ss2
);
5061 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL
=
5062 cpu_to_be32(spll_func_cntl
);
5063 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_2
=
5064 cpu_to_be32(spll_func_cntl_2
);
5065 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_3
=
5066 cpu_to_be32(spll_func_cntl_3
);
5067 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_4
=
5068 cpu_to_be32(spll_func_cntl_4
);
5070 table
->ACPIState
.levels
[0].mclk
.mclk_value
= 0;
5071 table
->ACPIState
.levels
[0].sclk
.sclk_value
= 0;
5073 si_populate_mvdd_value(adev
, 0, &table
->ACPIState
.levels
[0].mvdd
);
5075 if (eg_pi
->dynamic_ac_timing
)
5076 table
->ACPIState
.levels
[0].ACIndex
= 0;
5078 table
->ACPIState
.levels
[0].dpm2
.MaxPS
= 0;
5079 table
->ACPIState
.levels
[0].dpm2
.NearTDPDec
= 0;
5080 table
->ACPIState
.levels
[0].dpm2
.AboveSafeInc
= 0;
5081 table
->ACPIState
.levels
[0].dpm2
.BelowSafeInc
= 0;
5082 table
->ACPIState
.levels
[0].dpm2
.PwrEfficiencyRatio
= 0;
5084 reg
= MIN_POWER_MASK
| MAX_POWER_MASK
;
5085 table
->ACPIState
.levels
[0].SQPowerThrottle
= cpu_to_be32(reg
);
5087 reg
= MAX_POWER_DELTA_MASK
| STI_SIZE_MASK
| LTI_RATIO_MASK
;
5088 table
->ACPIState
.levels
[0].SQPowerThrottle_2
= cpu_to_be32(reg
);
5093 static int si_populate_ulv_state(struct amdgpu_device
*adev
,
5094 SISLANDS_SMC_SWSTATE
*state
)
5096 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
5097 struct si_power_info
*si_pi
= si_get_pi(adev
);
5098 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5099 u32 sclk_in_sr
= 1350; /* ??? */
5102 ret
= si_convert_power_level_to_smc(adev
, &ulv
->pl
,
5105 if (eg_pi
->sclk_deep_sleep
) {
5106 if (sclk_in_sr
<= SCLK_MIN_DEEPSLEEP_FREQ
)
5107 state
->levels
[0].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS
;
5109 state
->levels
[0].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE
;
5111 if (ulv
->one_pcie_lane_in_ulv
)
5112 state
->flags
|= PPSMC_SWSTATE_FLAG_PCIE_X1
;
5113 state
->levels
[0].arbRefreshState
= (u8
)(SISLANDS_ULV_STATE_ARB_INDEX
);
5114 state
->levels
[0].ACIndex
= 1;
5115 state
->levels
[0].std_vddc
= state
->levels
[0].vddc
;
5116 state
->levelCount
= 1;
5118 state
->flags
|= PPSMC_SWSTATE_FLAG_DC
;
5124 static int si_program_ulv_memory_timing_parameters(struct amdgpu_device
*adev
)
5126 struct si_power_info
*si_pi
= si_get_pi(adev
);
5127 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5128 SMC_SIslands_MCArbDramTimingRegisterSet arb_regs
= { 0 };
5131 ret
= si_populate_memory_timing_parameters(adev
, &ulv
->pl
,
5136 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay
,
5137 ulv
->volt_change_delay
);
5139 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
5140 si_pi
->arb_table_start
+
5141 offsetof(SMC_SIslands_MCArbDramTimingRegisters
, data
) +
5142 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
) * SISLANDS_ULV_STATE_ARB_INDEX
,
5144 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
),
5150 static void si_get_mvdd_configuration(struct amdgpu_device
*adev
)
5152 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5154 pi
->mvdd_split_frequency
= 30000;
5157 static int si_init_smc_table(struct amdgpu_device
*adev
)
5159 struct si_power_info
*si_pi
= si_get_pi(adev
);
5160 struct amdgpu_ps
*amdgpu_boot_state
= adev
->pm
.dpm
.boot_ps
;
5161 const struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5162 SISLANDS_SMC_STATETABLE
*table
= &si_pi
->smc_statetable
;
5167 si_populate_smc_voltage_tables(adev
, table
);
5169 switch (adev
->pm
.int_thermal_type
) {
5170 case THERMAL_TYPE_SI
:
5171 case THERMAL_TYPE_EMC2103_WITH_INTERNAL
:
5172 table
->thermalProtectType
= PPSMC_THERMAL_PROTECT_TYPE_INTERNAL
;
5174 case THERMAL_TYPE_NONE
:
5175 table
->thermalProtectType
= PPSMC_THERMAL_PROTECT_TYPE_NONE
;
5178 table
->thermalProtectType
= PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL
;
5182 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_HARDWAREDC
)
5183 table
->systemFlags
|= PPSMC_SYSTEMFLAG_GPIO_DC
;
5185 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_REGULATOR_HOT
) {
5186 if ((adev
->pdev
->device
!= 0x6818) && (adev
->pdev
->device
!= 0x6819))
5187 table
->systemFlags
|= PPSMC_SYSTEMFLAG_REGULATOR_HOT
;
5190 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_STEPVDDC
)
5191 table
->systemFlags
|= PPSMC_SYSTEMFLAG_STEPVDDC
;
5193 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
5194 table
->systemFlags
|= PPSMC_SYSTEMFLAG_GDDR5
;
5196 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY
)
5197 table
->extraFlags
|= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH
;
5199 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE
) {
5200 table
->systemFlags
|= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO
;
5201 vr_hot_gpio
= adev
->pm
.dpm
.backbias_response_time
;
5202 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_vr_hot_gpio
,
5206 ret
= si_populate_smc_initial_state(adev
, amdgpu_boot_state
, table
);
5210 ret
= si_populate_smc_acpi_state(adev
, table
);
5214 table
->driverState
= table
->initialState
;
5216 ret
= si_do_program_memory_timing_parameters(adev
, amdgpu_boot_state
,
5217 SISLANDS_INITIAL_STATE_ARB_INDEX
);
5221 if (ulv
->supported
&& ulv
->pl
.vddc
) {
5222 ret
= si_populate_ulv_state(adev
, &table
->ULVState
);
5226 ret
= si_program_ulv_memory_timing_parameters(adev
);
5230 WREG32(CG_ULV_CONTROL
, ulv
->cg_ulv_control
);
5231 WREG32(CG_ULV_PARAMETER
, ulv
->cg_ulv_parameter
);
5233 lane_width
= amdgpu_get_pcie_lanes(adev
);
5234 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width
, lane_width
);
5236 table
->ULVState
= table
->initialState
;
5239 return amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->state_table_start
,
5240 (u8
*)table
, sizeof(SISLANDS_SMC_STATETABLE
),
5244 static int si_calculate_sclk_params(struct amdgpu_device
*adev
,
5246 SISLANDS_SMC_SCLK_VALUE
*sclk
)
5248 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5249 struct si_power_info
*si_pi
= si_get_pi(adev
);
5250 struct atom_clock_dividers dividers
;
5251 u32 spll_func_cntl
= si_pi
->clock_registers
.cg_spll_func_cntl
;
5252 u32 spll_func_cntl_2
= si_pi
->clock_registers
.cg_spll_func_cntl_2
;
5253 u32 spll_func_cntl_3
= si_pi
->clock_registers
.cg_spll_func_cntl_3
;
5254 u32 spll_func_cntl_4
= si_pi
->clock_registers
.cg_spll_func_cntl_4
;
5255 u32 cg_spll_spread_spectrum
= si_pi
->clock_registers
.cg_spll_spread_spectrum
;
5256 u32 cg_spll_spread_spectrum_2
= si_pi
->clock_registers
.cg_spll_spread_spectrum_2
;
5258 u32 reference_clock
= adev
->clock
.spll
.reference_freq
;
5259 u32 reference_divider
;
5263 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
5264 engine_clock
, false, ÷rs
);
5268 reference_divider
= 1 + dividers
.ref_div
;
5270 tmp
= (u64
) engine_clock
* reference_divider
* dividers
.post_div
* 16384;
5271 do_div(tmp
, reference_clock
);
5274 spll_func_cntl
&= ~(SPLL_PDIV_A_MASK
| SPLL_REF_DIV_MASK
);
5275 spll_func_cntl
|= SPLL_REF_DIV(dividers
.ref_div
);
5276 spll_func_cntl
|= SPLL_PDIV_A(dividers
.post_div
);
5278 spll_func_cntl_2
&= ~SCLK_MUX_SEL_MASK
;
5279 spll_func_cntl_2
|= SCLK_MUX_SEL(2);
5281 spll_func_cntl_3
&= ~SPLL_FB_DIV_MASK
;
5282 spll_func_cntl_3
|= SPLL_FB_DIV(fbdiv
);
5283 spll_func_cntl_3
|= SPLL_DITHEN
;
5286 struct amdgpu_atom_ss ss
;
5287 u32 vco_freq
= engine_clock
* dividers
.post_div
;
5289 if (amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
5290 ASIC_INTERNAL_ENGINE_SS
, vco_freq
)) {
5291 u32 clk_s
= reference_clock
* 5 / (reference_divider
* ss
.rate
);
5292 u32 clk_v
= 4 * ss
.percentage
* fbdiv
/ (clk_s
* 10000);
5294 cg_spll_spread_spectrum
&= ~CLK_S_MASK
;
5295 cg_spll_spread_spectrum
|= CLK_S(clk_s
);
5296 cg_spll_spread_spectrum
|= SSEN
;
5298 cg_spll_spread_spectrum_2
&= ~CLK_V_MASK
;
5299 cg_spll_spread_spectrum_2
|= CLK_V(clk_v
);
5303 sclk
->sclk_value
= engine_clock
;
5304 sclk
->vCG_SPLL_FUNC_CNTL
= spll_func_cntl
;
5305 sclk
->vCG_SPLL_FUNC_CNTL_2
= spll_func_cntl_2
;
5306 sclk
->vCG_SPLL_FUNC_CNTL_3
= spll_func_cntl_3
;
5307 sclk
->vCG_SPLL_FUNC_CNTL_4
= spll_func_cntl_4
;
5308 sclk
->vCG_SPLL_SPREAD_SPECTRUM
= cg_spll_spread_spectrum
;
5309 sclk
->vCG_SPLL_SPREAD_SPECTRUM_2
= cg_spll_spread_spectrum_2
;
5314 static int si_populate_sclk_value(struct amdgpu_device
*adev
,
5316 SISLANDS_SMC_SCLK_VALUE
*sclk
)
5318 SISLANDS_SMC_SCLK_VALUE sclk_tmp
;
5321 ret
= si_calculate_sclk_params(adev
, engine_clock
, &sclk_tmp
);
5323 sclk
->sclk_value
= cpu_to_be32(sclk_tmp
.sclk_value
);
5324 sclk
->vCG_SPLL_FUNC_CNTL
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL
);
5325 sclk
->vCG_SPLL_FUNC_CNTL_2
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL_2
);
5326 sclk
->vCG_SPLL_FUNC_CNTL_3
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL_3
);
5327 sclk
->vCG_SPLL_FUNC_CNTL_4
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL_4
);
5328 sclk
->vCG_SPLL_SPREAD_SPECTRUM
= cpu_to_be32(sclk_tmp
.vCG_SPLL_SPREAD_SPECTRUM
);
5329 sclk
->vCG_SPLL_SPREAD_SPECTRUM_2
= cpu_to_be32(sclk_tmp
.vCG_SPLL_SPREAD_SPECTRUM_2
);
5335 static int si_populate_mclk_value(struct amdgpu_device
*adev
,
5338 SISLANDS_SMC_MCLK_VALUE
*mclk
,
5342 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5343 struct si_power_info
*si_pi
= si_get_pi(adev
);
5344 u32 dll_cntl
= si_pi
->clock_registers
.dll_cntl
;
5345 u32 mclk_pwrmgt_cntl
= si_pi
->clock_registers
.mclk_pwrmgt_cntl
;
5346 u32 mpll_ad_func_cntl
= si_pi
->clock_registers
.mpll_ad_func_cntl
;
5347 u32 mpll_dq_func_cntl
= si_pi
->clock_registers
.mpll_dq_func_cntl
;
5348 u32 mpll_func_cntl
= si_pi
->clock_registers
.mpll_func_cntl
;
5349 u32 mpll_func_cntl_1
= si_pi
->clock_registers
.mpll_func_cntl_1
;
5350 u32 mpll_func_cntl_2
= si_pi
->clock_registers
.mpll_func_cntl_2
;
5351 u32 mpll_ss1
= si_pi
->clock_registers
.mpll_ss1
;
5352 u32 mpll_ss2
= si_pi
->clock_registers
.mpll_ss2
;
5353 struct atom_mpll_param mpll_param
;
5356 ret
= amdgpu_atombios_get_memory_pll_dividers(adev
, memory_clock
, strobe_mode
, &mpll_param
);
5360 mpll_func_cntl
&= ~BWCTRL_MASK
;
5361 mpll_func_cntl
|= BWCTRL(mpll_param
.bwcntl
);
5363 mpll_func_cntl_1
&= ~(CLKF_MASK
| CLKFRAC_MASK
| VCO_MODE_MASK
);
5364 mpll_func_cntl_1
|= CLKF(mpll_param
.clkf
) |
5365 CLKFRAC(mpll_param
.clkfrac
) | VCO_MODE(mpll_param
.vco_mode
);
5367 mpll_ad_func_cntl
&= ~YCLK_POST_DIV_MASK
;
5368 mpll_ad_func_cntl
|= YCLK_POST_DIV(mpll_param
.post_div
);
5370 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
5371 mpll_dq_func_cntl
&= ~(YCLK_SEL_MASK
| YCLK_POST_DIV_MASK
);
5372 mpll_dq_func_cntl
|= YCLK_SEL(mpll_param
.yclk_sel
) |
5373 YCLK_POST_DIV(mpll_param
.post_div
);
5377 struct amdgpu_atom_ss ss
;
5380 u32 reference_clock
= adev
->clock
.mpll
.reference_freq
;
5382 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
5383 freq_nom
= memory_clock
* 4;
5385 freq_nom
= memory_clock
* 2;
5387 tmp
= freq_nom
/ reference_clock
;
5389 if (amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
5390 ASIC_INTERNAL_MEMORY_SS
, freq_nom
)) {
5391 u32 clks
= reference_clock
* 5 / ss
.rate
;
5392 u32 clkv
= (u32
)((((131 * ss
.percentage
* ss
.rate
) / 100) * tmp
) / freq_nom
);
5394 mpll_ss1
&= ~CLKV_MASK
;
5395 mpll_ss1
|= CLKV(clkv
);
5397 mpll_ss2
&= ~CLKS_MASK
;
5398 mpll_ss2
|= CLKS(clks
);
5402 mclk_pwrmgt_cntl
&= ~DLL_SPEED_MASK
;
5403 mclk_pwrmgt_cntl
|= DLL_SPEED(mpll_param
.dll_speed
);
5406 mclk_pwrmgt_cntl
|= MRDCK0_PDNB
| MRDCK1_PDNB
;
5408 mclk_pwrmgt_cntl
&= ~(MRDCK0_PDNB
| MRDCK1_PDNB
);
5410 mclk
->mclk_value
= cpu_to_be32(memory_clock
);
5411 mclk
->vMPLL_FUNC_CNTL
= cpu_to_be32(mpll_func_cntl
);
5412 mclk
->vMPLL_FUNC_CNTL_1
= cpu_to_be32(mpll_func_cntl_1
);
5413 mclk
->vMPLL_FUNC_CNTL_2
= cpu_to_be32(mpll_func_cntl_2
);
5414 mclk
->vMPLL_AD_FUNC_CNTL
= cpu_to_be32(mpll_ad_func_cntl
);
5415 mclk
->vMPLL_DQ_FUNC_CNTL
= cpu_to_be32(mpll_dq_func_cntl
);
5416 mclk
->vMCLK_PWRMGT_CNTL
= cpu_to_be32(mclk_pwrmgt_cntl
);
5417 mclk
->vDLL_CNTL
= cpu_to_be32(dll_cntl
);
5418 mclk
->vMPLL_SS
= cpu_to_be32(mpll_ss1
);
5419 mclk
->vMPLL_SS2
= cpu_to_be32(mpll_ss2
);
5424 static void si_populate_smc_sp(struct amdgpu_device
*adev
,
5425 struct amdgpu_ps
*amdgpu_state
,
5426 SISLANDS_SMC_SWSTATE
*smc_state
)
5428 struct si_ps
*ps
= si_get_ps(amdgpu_state
);
5429 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5432 for (i
= 0; i
< ps
->performance_level_count
- 1; i
++)
5433 smc_state
->levels
[i
].bSP
= cpu_to_be32(pi
->dsp
);
5435 smc_state
->levels
[ps
->performance_level_count
- 1].bSP
=
5436 cpu_to_be32(pi
->psp
);
5439 static int si_convert_power_level_to_smc(struct amdgpu_device
*adev
,
5440 struct rv7xx_pl
*pl
,
5441 SISLANDS_SMC_HW_PERFORMANCE_LEVEL
*level
)
5443 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5444 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
5445 struct si_power_info
*si_pi
= si_get_pi(adev
);
5449 bool gmc_pg
= false;
5451 if (eg_pi
->pcie_performance_request
&&
5452 (si_pi
->force_pcie_gen
!= AMDGPU_PCIE_GEN_INVALID
))
5453 level
->gen2PCIE
= (u8
)si_pi
->force_pcie_gen
;
5455 level
->gen2PCIE
= (u8
)pl
->pcie_gen
;
5457 ret
= si_populate_sclk_value(adev
, pl
->sclk
, &level
->sclk
);
5463 if (pi
->mclk_stutter_mode_threshold
&&
5464 (pl
->mclk
<= pi
->mclk_stutter_mode_threshold
) &&
5465 !eg_pi
->uvd_enabled
&&
5466 (RREG32(DPG_PIPE_STUTTER_CONTROL
) & STUTTER_ENABLE
) &&
5467 (adev
->pm
.dpm
.new_active_crtc_count
<= 2)) {
5468 level
->mcFlags
|= SISLANDS_SMC_MC_STUTTER_EN
;
5471 level
->mcFlags
|= SISLANDS_SMC_MC_PG_EN
;
5474 if (adev
->gmc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
5475 if (pl
->mclk
> pi
->mclk_edc_enable_threshold
)
5476 level
->mcFlags
|= SISLANDS_SMC_MC_EDC_RD_FLAG
;
5478 if (pl
->mclk
> eg_pi
->mclk_edc_wr_enable_threshold
)
5479 level
->mcFlags
|= SISLANDS_SMC_MC_EDC_WR_FLAG
;
5481 level
->strobeMode
= si_get_strobe_mode_settings(adev
, pl
->mclk
);
5483 if (level
->strobeMode
& SISLANDS_SMC_STROBE_ENABLE
) {
5484 if (si_get_mclk_frequency_ratio(pl
->mclk
, true) >=
5485 ((RREG32(MC_SEQ_MISC7
) >> 16) & 0xf))
5486 dll_state_on
= ((RREG32(MC_SEQ_MISC5
) >> 1) & 0x1) ? true : false;
5488 dll_state_on
= ((RREG32(MC_SEQ_MISC6
) >> 1) & 0x1) ? true : false;
5490 dll_state_on
= false;
5493 level
->strobeMode
= si_get_strobe_mode_settings(adev
,
5496 dll_state_on
= ((RREG32(MC_SEQ_MISC5
) >> 1) & 0x1) ? true : false;
5499 ret
= si_populate_mclk_value(adev
,
5503 (level
->strobeMode
& SISLANDS_SMC_STROBE_ENABLE
) != 0, dll_state_on
);
5507 ret
= si_populate_voltage_value(adev
,
5508 &eg_pi
->vddc_voltage_table
,
5509 pl
->vddc
, &level
->vddc
);
5514 ret
= si_get_std_voltage_value(adev
, &level
->vddc
, &std_vddc
);
5518 ret
= si_populate_std_voltage_value(adev
, std_vddc
,
5519 level
->vddc
.index
, &level
->std_vddc
);
5523 if (eg_pi
->vddci_control
) {
5524 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddci_voltage_table
,
5525 pl
->vddci
, &level
->vddci
);
5530 if (si_pi
->vddc_phase_shed_control
) {
5531 ret
= si_populate_phase_shedding_value(adev
,
5532 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
5541 level
->MaxPoweredUpCU
= si_pi
->max_cu
;
5543 ret
= si_populate_mvdd_value(adev
, pl
->mclk
, &level
->mvdd
);
5548 static int si_populate_smc_t(struct amdgpu_device
*adev
,
5549 struct amdgpu_ps
*amdgpu_state
,
5550 SISLANDS_SMC_SWSTATE
*smc_state
)
5552 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5553 struct si_ps
*state
= si_get_ps(amdgpu_state
);
5559 if (state
->performance_level_count
>= 9)
5562 if (state
->performance_level_count
< 2) {
5563 a_t
= CG_R(0xffff) | CG_L(0);
5564 smc_state
->levels
[0].aT
= cpu_to_be32(a_t
);
5568 smc_state
->levels
[0].aT
= cpu_to_be32(0);
5570 for (i
= 0; i
<= state
->performance_level_count
- 2; i
++) {
5571 ret
= r600_calculate_at(
5572 (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS
) * 100 * (i
+ 1),
5574 state
->performance_levels
[i
+ 1].sclk
,
5575 state
->performance_levels
[i
].sclk
,
5580 t_h
= (i
+ 1) * 1000 - 50 * R600_AH_DFLT
;
5581 t_l
= (i
+ 1) * 1000 + 50 * R600_AH_DFLT
;
5584 a_t
= be32_to_cpu(smc_state
->levels
[i
].aT
) & ~CG_R_MASK
;
5585 a_t
|= CG_R(t_l
* pi
->bsp
/ 20000);
5586 smc_state
->levels
[i
].aT
= cpu_to_be32(a_t
);
5588 high_bsp
= (i
== state
->performance_level_count
- 2) ?
5590 a_t
= CG_R(0xffff) | CG_L(t_h
* high_bsp
/ 20000);
5591 smc_state
->levels
[i
+ 1].aT
= cpu_to_be32(a_t
);
5597 static int si_disable_ulv(struct amdgpu_device
*adev
)
5599 struct si_power_info
*si_pi
= si_get_pi(adev
);
5600 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5603 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_DisableULV
) == PPSMC_Result_OK
) ?
5609 static bool si_is_state_ulv_compatible(struct amdgpu_device
*adev
,
5610 struct amdgpu_ps
*amdgpu_state
)
5612 const struct si_power_info
*si_pi
= si_get_pi(adev
);
5613 const struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5614 const struct si_ps
*state
= si_get_ps(amdgpu_state
);
5617 if (state
->performance_levels
[0].mclk
!= ulv
->pl
.mclk
)
5620 /* XXX validate against display requirements! */
5622 for (i
= 0; i
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.count
; i
++) {
5623 if (adev
->clock
.current_dispclk
<=
5624 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[i
].clk
) {
5626 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[i
].v
)
5631 if ((amdgpu_state
->vclk
!= 0) || (amdgpu_state
->dclk
!= 0))
5637 static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device
*adev
,
5638 struct amdgpu_ps
*amdgpu_new_state
)
5640 const struct si_power_info
*si_pi
= si_get_pi(adev
);
5641 const struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5643 if (ulv
->supported
) {
5644 if (si_is_state_ulv_compatible(adev
, amdgpu_new_state
))
5645 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableULV
) == PPSMC_Result_OK
) ?
5651 static int si_convert_power_state_to_smc(struct amdgpu_device
*adev
,
5652 struct amdgpu_ps
*amdgpu_state
,
5653 SISLANDS_SMC_SWSTATE
*smc_state
)
5655 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
5656 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
5657 struct si_power_info
*si_pi
= si_get_pi(adev
);
5658 struct si_ps
*state
= si_get_ps(amdgpu_state
);
5661 u32 sclk_in_sr
= 1350; /* ??? */
5663 if (state
->performance_level_count
> SISLANDS_MAX_HARDWARE_POWERLEVELS
)
5666 threshold
= state
->performance_levels
[state
->performance_level_count
-1].sclk
* 100 / 100;
5668 if (amdgpu_state
->vclk
&& amdgpu_state
->dclk
) {
5669 eg_pi
->uvd_enabled
= true;
5670 if (eg_pi
->smu_uvd_hs
)
5671 smc_state
->flags
|= PPSMC_SWSTATE_FLAG_UVD
;
5673 eg_pi
->uvd_enabled
= false;
5676 if (state
->dc_compatible
)
5677 smc_state
->flags
|= PPSMC_SWSTATE_FLAG_DC
;
5679 smc_state
->levelCount
= 0;
5680 for (i
= 0; i
< state
->performance_level_count
; i
++) {
5681 if (eg_pi
->sclk_deep_sleep
) {
5682 if ((i
== 0) || si_pi
->sclk_deep_sleep_above_low
) {
5683 if (sclk_in_sr
<= SCLK_MIN_DEEPSLEEP_FREQ
)
5684 smc_state
->levels
[i
].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS
;
5686 smc_state
->levels
[i
].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE
;
5690 ret
= si_convert_power_level_to_smc(adev
, &state
->performance_levels
[i
],
5691 &smc_state
->levels
[i
]);
5692 smc_state
->levels
[i
].arbRefreshState
=
5693 (u8
)(SISLANDS_DRIVER_STATE_ARB_INDEX
+ i
);
5698 if (ni_pi
->enable_power_containment
)
5699 smc_state
->levels
[i
].displayWatermark
=
5700 (state
->performance_levels
[i
].sclk
< threshold
) ?
5701 PPSMC_DISPLAY_WATERMARK_LOW
: PPSMC_DISPLAY_WATERMARK_HIGH
;
5703 smc_state
->levels
[i
].displayWatermark
= (i
< 2) ?
5704 PPSMC_DISPLAY_WATERMARK_LOW
: PPSMC_DISPLAY_WATERMARK_HIGH
;
5706 if (eg_pi
->dynamic_ac_timing
)
5707 smc_state
->levels
[i
].ACIndex
= SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
+ i
;
5709 smc_state
->levels
[i
].ACIndex
= 0;
5711 smc_state
->levelCount
++;
5714 si_write_smc_soft_register(adev
,
5715 SI_SMC_SOFT_REGISTER_watermark_threshold
,
5718 si_populate_smc_sp(adev
, amdgpu_state
, smc_state
);
5720 ret
= si_populate_power_containment_values(adev
, amdgpu_state
, smc_state
);
5722 ni_pi
->enable_power_containment
= false;
5724 ret
= si_populate_sq_ramping_values(adev
, amdgpu_state
, smc_state
);
5726 ni_pi
->enable_sq_ramping
= false;
5728 return si_populate_smc_t(adev
, amdgpu_state
, smc_state
);
5731 static int si_upload_sw_state(struct amdgpu_device
*adev
,
5732 struct amdgpu_ps
*amdgpu_new_state
)
5734 struct si_power_info
*si_pi
= si_get_pi(adev
);
5735 struct si_ps
*new_state
= si_get_ps(amdgpu_new_state
);
5737 u32 address
= si_pi
->state_table_start
+
5738 offsetof(SISLANDS_SMC_STATETABLE
, driverState
);
5739 u32 state_size
= sizeof(SISLANDS_SMC_SWSTATE
) +
5740 ((new_state
->performance_level_count
- 1) *
5741 sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL
));
5742 SISLANDS_SMC_SWSTATE
*smc_state
= &si_pi
->smc_statetable
.driverState
;
5744 memset(smc_state
, 0, state_size
);
5746 ret
= si_convert_power_state_to_smc(adev
, amdgpu_new_state
, smc_state
);
5750 return amdgpu_si_copy_bytes_to_smc(adev
, address
, (u8
*)smc_state
,
5751 state_size
, si_pi
->sram_end
);
5754 static int si_upload_ulv_state(struct amdgpu_device
*adev
)
5756 struct si_power_info
*si_pi
= si_get_pi(adev
);
5757 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5760 if (ulv
->supported
&& ulv
->pl
.vddc
) {
5761 u32 address
= si_pi
->state_table_start
+
5762 offsetof(SISLANDS_SMC_STATETABLE
, ULVState
);
5763 SISLANDS_SMC_SWSTATE
*smc_state
= &si_pi
->smc_statetable
.ULVState
;
5764 u32 state_size
= sizeof(SISLANDS_SMC_SWSTATE
);
5766 memset(smc_state
, 0, state_size
);
5768 ret
= si_populate_ulv_state(adev
, smc_state
);
5770 ret
= amdgpu_si_copy_bytes_to_smc(adev
, address
, (u8
*)smc_state
,
5771 state_size
, si_pi
->sram_end
);
5777 static int si_upload_smc_data(struct amdgpu_device
*adev
)
5779 struct amdgpu_crtc
*amdgpu_crtc
= NULL
;
5782 if (adev
->pm
.dpm
.new_active_crtc_count
== 0)
5785 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
5786 if (adev
->pm
.dpm
.new_active_crtcs
& (1 << i
)) {
5787 amdgpu_crtc
= adev
->mode_info
.crtcs
[i
];
5792 if (amdgpu_crtc
== NULL
)
5795 if (amdgpu_crtc
->line_time
<= 0)
5798 if (si_write_smc_soft_register(adev
,
5799 SI_SMC_SOFT_REGISTER_crtc_index
,
5800 amdgpu_crtc
->crtc_id
) != PPSMC_Result_OK
)
5803 if (si_write_smc_soft_register(adev
,
5804 SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min
,
5805 amdgpu_crtc
->wm_high
/ amdgpu_crtc
->line_time
) != PPSMC_Result_OK
)
5808 if (si_write_smc_soft_register(adev
,
5809 SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max
,
5810 amdgpu_crtc
->wm_low
/ amdgpu_crtc
->line_time
) != PPSMC_Result_OK
)
5816 static int si_set_mc_special_registers(struct amdgpu_device
*adev
,
5817 struct si_mc_reg_table
*table
)
5822 for (i
= 0, j
= table
->last
; i
< table
->last
; i
++) {
5823 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5825 switch (table
->mc_reg_address
[i
].s1
) {
5827 temp_reg
= RREG32(MC_PMG_CMD_EMRS
);
5828 table
->mc_reg_address
[j
].s1
= MC_PMG_CMD_EMRS
;
5829 table
->mc_reg_address
[j
].s0
= MC_SEQ_PMG_CMD_EMRS_LP
;
5830 for (k
= 0; k
< table
->num_entries
; k
++)
5831 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5832 ((temp_reg
& 0xffff0000)) |
5833 ((table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xffff0000) >> 16);
5836 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5838 temp_reg
= RREG32(MC_PMG_CMD_MRS
);
5839 table
->mc_reg_address
[j
].s1
= MC_PMG_CMD_MRS
;
5840 table
->mc_reg_address
[j
].s0
= MC_SEQ_PMG_CMD_MRS_LP
;
5841 for (k
= 0; k
< table
->num_entries
; k
++) {
5842 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5843 (temp_reg
& 0xffff0000) |
5844 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0x0000ffff);
5845 if (adev
->gmc
.vram_type
!= AMDGPU_VRAM_TYPE_GDDR5
)
5846 table
->mc_reg_table_entry
[k
].mc_data
[j
] |= 0x100;
5850 if (adev
->gmc
.vram_type
!= AMDGPU_VRAM_TYPE_GDDR5
) {
5851 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5853 table
->mc_reg_address
[j
].s1
= MC_PMG_AUTO_CMD
;
5854 table
->mc_reg_address
[j
].s0
= MC_PMG_AUTO_CMD
;
5855 for (k
= 0; k
< table
->num_entries
; k
++)
5856 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5857 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xffff0000) >> 16;
5861 case MC_SEQ_RESERVE_M
:
5862 temp_reg
= RREG32(MC_PMG_CMD_MRS1
);
5863 table
->mc_reg_address
[j
].s1
= MC_PMG_CMD_MRS1
;
5864 table
->mc_reg_address
[j
].s0
= MC_SEQ_PMG_CMD_MRS1_LP
;
5865 for(k
= 0; k
< table
->num_entries
; k
++)
5866 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5867 (temp_reg
& 0xffff0000) |
5868 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0x0000ffff);
5881 static bool si_check_s0_mc_reg_index(u16 in_reg
, u16
*out_reg
)
5885 case MC_SEQ_RAS_TIMING
:
5886 *out_reg
= MC_SEQ_RAS_TIMING_LP
;
5888 case MC_SEQ_CAS_TIMING
:
5889 *out_reg
= MC_SEQ_CAS_TIMING_LP
;
5891 case MC_SEQ_MISC_TIMING
:
5892 *out_reg
= MC_SEQ_MISC_TIMING_LP
;
5894 case MC_SEQ_MISC_TIMING2
:
5895 *out_reg
= MC_SEQ_MISC_TIMING2_LP
;
5897 case MC_SEQ_RD_CTL_D0
:
5898 *out_reg
= MC_SEQ_RD_CTL_D0_LP
;
5900 case MC_SEQ_RD_CTL_D1
:
5901 *out_reg
= MC_SEQ_RD_CTL_D1_LP
;
5903 case MC_SEQ_WR_CTL_D0
:
5904 *out_reg
= MC_SEQ_WR_CTL_D0_LP
;
5906 case MC_SEQ_WR_CTL_D1
:
5907 *out_reg
= MC_SEQ_WR_CTL_D1_LP
;
5909 case MC_PMG_CMD_EMRS
:
5910 *out_reg
= MC_SEQ_PMG_CMD_EMRS_LP
;
5912 case MC_PMG_CMD_MRS
:
5913 *out_reg
= MC_SEQ_PMG_CMD_MRS_LP
;
5915 case MC_PMG_CMD_MRS1
:
5916 *out_reg
= MC_SEQ_PMG_CMD_MRS1_LP
;
5918 case MC_SEQ_PMG_TIMING
:
5919 *out_reg
= MC_SEQ_PMG_TIMING_LP
;
5921 case MC_PMG_CMD_MRS2
:
5922 *out_reg
= MC_SEQ_PMG_CMD_MRS2_LP
;
5924 case MC_SEQ_WR_CTL_2
:
5925 *out_reg
= MC_SEQ_WR_CTL_2_LP
;
5935 static void si_set_valid_flag(struct si_mc_reg_table
*table
)
5939 for (i
= 0; i
< table
->last
; i
++) {
5940 for (j
= 1; j
< table
->num_entries
; j
++) {
5941 if (table
->mc_reg_table_entry
[j
-1].mc_data
[i
] != table
->mc_reg_table_entry
[j
].mc_data
[i
]) {
5942 table
->valid_flag
|= 1 << i
;
5949 static void si_set_s0_mc_reg_index(struct si_mc_reg_table
*table
)
5954 for (i
= 0; i
< table
->last
; i
++)
5955 table
->mc_reg_address
[i
].s0
= si_check_s0_mc_reg_index(table
->mc_reg_address
[i
].s1
, &address
) ?
5956 address
: table
->mc_reg_address
[i
].s1
;
5960 static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table
*table
,
5961 struct si_mc_reg_table
*si_table
)
5965 if (table
->last
> SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5967 if (table
->num_entries
> MAX_AC_TIMING_ENTRIES
)
5970 for (i
= 0; i
< table
->last
; i
++)
5971 si_table
->mc_reg_address
[i
].s1
= table
->mc_reg_address
[i
].s1
;
5972 si_table
->last
= table
->last
;
5974 for (i
= 0; i
< table
->num_entries
; i
++) {
5975 si_table
->mc_reg_table_entry
[i
].mclk_max
=
5976 table
->mc_reg_table_entry
[i
].mclk_max
;
5977 for (j
= 0; j
< table
->last
; j
++) {
5978 si_table
->mc_reg_table_entry
[i
].mc_data
[j
] =
5979 table
->mc_reg_table_entry
[i
].mc_data
[j
];
5982 si_table
->num_entries
= table
->num_entries
;
5987 static int si_initialize_mc_reg_table(struct amdgpu_device
*adev
)
5989 struct si_power_info
*si_pi
= si_get_pi(adev
);
5990 struct atom_mc_reg_table
*table
;
5991 struct si_mc_reg_table
*si_table
= &si_pi
->mc_reg_table
;
5992 u8 module_index
= rv770_get_memory_module_index(adev
);
5995 table
= kzalloc(sizeof(struct atom_mc_reg_table
), GFP_KERNEL
);
5999 WREG32(MC_SEQ_RAS_TIMING_LP
, RREG32(MC_SEQ_RAS_TIMING
));
6000 WREG32(MC_SEQ_CAS_TIMING_LP
, RREG32(MC_SEQ_CAS_TIMING
));
6001 WREG32(MC_SEQ_MISC_TIMING_LP
, RREG32(MC_SEQ_MISC_TIMING
));
6002 WREG32(MC_SEQ_MISC_TIMING2_LP
, RREG32(MC_SEQ_MISC_TIMING2
));
6003 WREG32(MC_SEQ_PMG_CMD_EMRS_LP
, RREG32(MC_PMG_CMD_EMRS
));
6004 WREG32(MC_SEQ_PMG_CMD_MRS_LP
, RREG32(MC_PMG_CMD_MRS
));
6005 WREG32(MC_SEQ_PMG_CMD_MRS1_LP
, RREG32(MC_PMG_CMD_MRS1
));
6006 WREG32(MC_SEQ_WR_CTL_D0_LP
, RREG32(MC_SEQ_WR_CTL_D0
));
6007 WREG32(MC_SEQ_WR_CTL_D1_LP
, RREG32(MC_SEQ_WR_CTL_D1
));
6008 WREG32(MC_SEQ_RD_CTL_D0_LP
, RREG32(MC_SEQ_RD_CTL_D0
));
6009 WREG32(MC_SEQ_RD_CTL_D1_LP
, RREG32(MC_SEQ_RD_CTL_D1
));
6010 WREG32(MC_SEQ_PMG_TIMING_LP
, RREG32(MC_SEQ_PMG_TIMING
));
6011 WREG32(MC_SEQ_PMG_CMD_MRS2_LP
, RREG32(MC_PMG_CMD_MRS2
));
6012 WREG32(MC_SEQ_WR_CTL_2_LP
, RREG32(MC_SEQ_WR_CTL_2
));
6014 ret
= amdgpu_atombios_init_mc_reg_table(adev
, module_index
, table
);
6018 ret
= si_copy_vbios_mc_reg_table(table
, si_table
);
6022 si_set_s0_mc_reg_index(si_table
);
6024 ret
= si_set_mc_special_registers(adev
, si_table
);
6028 si_set_valid_flag(si_table
);
6037 static void si_populate_mc_reg_addresses(struct amdgpu_device
*adev
,
6038 SMC_SIslands_MCRegisters
*mc_reg_table
)
6040 struct si_power_info
*si_pi
= si_get_pi(adev
);
6043 for (i
= 0, j
= 0; j
< si_pi
->mc_reg_table
.last
; j
++) {
6044 if (si_pi
->mc_reg_table
.valid_flag
& (1 << j
)) {
6045 if (i
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
6047 mc_reg_table
->address
[i
].s0
=
6048 cpu_to_be16(si_pi
->mc_reg_table
.mc_reg_address
[j
].s0
);
6049 mc_reg_table
->address
[i
].s1
=
6050 cpu_to_be16(si_pi
->mc_reg_table
.mc_reg_address
[j
].s1
);
6054 mc_reg_table
->last
= (u8
)i
;
6057 static void si_convert_mc_registers(const struct si_mc_reg_entry
*entry
,
6058 SMC_SIslands_MCRegisterSet
*data
,
6059 u32 num_entries
, u32 valid_flag
)
6063 for(i
= 0, j
= 0; j
< num_entries
; j
++) {
6064 if (valid_flag
& (1 << j
)) {
6065 data
->value
[i
] = cpu_to_be32(entry
->mc_data
[j
]);
6071 static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device
*adev
,
6072 struct rv7xx_pl
*pl
,
6073 SMC_SIslands_MCRegisterSet
*mc_reg_table_data
)
6075 struct si_power_info
*si_pi
= si_get_pi(adev
);
6078 for (i
= 0; i
< si_pi
->mc_reg_table
.num_entries
; i
++) {
6079 if (pl
->mclk
<= si_pi
->mc_reg_table
.mc_reg_table_entry
[i
].mclk_max
)
6083 if ((i
== si_pi
->mc_reg_table
.num_entries
) && (i
> 0))
6086 si_convert_mc_registers(&si_pi
->mc_reg_table
.mc_reg_table_entry
[i
],
6087 mc_reg_table_data
, si_pi
->mc_reg_table
.last
,
6088 si_pi
->mc_reg_table
.valid_flag
);
6091 static void si_convert_mc_reg_table_to_smc(struct amdgpu_device
*adev
,
6092 struct amdgpu_ps
*amdgpu_state
,
6093 SMC_SIslands_MCRegisters
*mc_reg_table
)
6095 struct si_ps
*state
= si_get_ps(amdgpu_state
);
6098 for (i
= 0; i
< state
->performance_level_count
; i
++) {
6099 si_convert_mc_reg_table_entry_to_smc(adev
,
6100 &state
->performance_levels
[i
],
6101 &mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
+ i
]);
6105 static int si_populate_mc_reg_table(struct amdgpu_device
*adev
,
6106 struct amdgpu_ps
*amdgpu_boot_state
)
6108 struct si_ps
*boot_state
= si_get_ps(amdgpu_boot_state
);
6109 struct si_power_info
*si_pi
= si_get_pi(adev
);
6110 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
6111 SMC_SIslands_MCRegisters
*smc_mc_reg_table
= &si_pi
->smc_mc_reg_table
;
6113 memset(smc_mc_reg_table
, 0, sizeof(SMC_SIslands_MCRegisters
));
6115 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_seq_index
, 1);
6117 si_populate_mc_reg_addresses(adev
, smc_mc_reg_table
);
6119 si_convert_mc_reg_table_entry_to_smc(adev
, &boot_state
->performance_levels
[0],
6120 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT
]);
6122 si_convert_mc_registers(&si_pi
->mc_reg_table
.mc_reg_table_entry
[0],
6123 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_ACPI_SLOT
],
6124 si_pi
->mc_reg_table
.last
,
6125 si_pi
->mc_reg_table
.valid_flag
);
6127 if (ulv
->supported
&& ulv
->pl
.vddc
!= 0)
6128 si_convert_mc_reg_table_entry_to_smc(adev
, &ulv
->pl
,
6129 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_ULV_SLOT
]);
6131 si_convert_mc_registers(&si_pi
->mc_reg_table
.mc_reg_table_entry
[0],
6132 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_ULV_SLOT
],
6133 si_pi
->mc_reg_table
.last
,
6134 si_pi
->mc_reg_table
.valid_flag
);
6136 si_convert_mc_reg_table_to_smc(adev
, amdgpu_boot_state
, smc_mc_reg_table
);
6138 return amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->mc_reg_table_start
,
6139 (u8
*)smc_mc_reg_table
,
6140 sizeof(SMC_SIslands_MCRegisters
), si_pi
->sram_end
);
6143 static int si_upload_mc_reg_table(struct amdgpu_device
*adev
,
6144 struct amdgpu_ps
*amdgpu_new_state
)
6146 struct si_ps
*new_state
= si_get_ps(amdgpu_new_state
);
6147 struct si_power_info
*si_pi
= si_get_pi(adev
);
6148 u32 address
= si_pi
->mc_reg_table_start
+
6149 offsetof(SMC_SIslands_MCRegisters
,
6150 data
[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
]);
6151 SMC_SIslands_MCRegisters
*smc_mc_reg_table
= &si_pi
->smc_mc_reg_table
;
6153 memset(smc_mc_reg_table
, 0, sizeof(SMC_SIslands_MCRegisters
));
6155 si_convert_mc_reg_table_to_smc(adev
, amdgpu_new_state
, smc_mc_reg_table
);
6157 return amdgpu_si_copy_bytes_to_smc(adev
, address
,
6158 (u8
*)&smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
],
6159 sizeof(SMC_SIslands_MCRegisterSet
) * new_state
->performance_level_count
,
6163 static void si_enable_voltage_control(struct amdgpu_device
*adev
, bool enable
)
6166 WREG32_P(GENERAL_PWRMGT
, VOLT_PWRMGT_EN
, ~VOLT_PWRMGT_EN
);
6168 WREG32_P(GENERAL_PWRMGT
, 0, ~VOLT_PWRMGT_EN
);
6171 static enum amdgpu_pcie_gen
si_get_maximum_link_speed(struct amdgpu_device
*adev
,
6172 struct amdgpu_ps
*amdgpu_state
)
6174 struct si_ps
*state
= si_get_ps(amdgpu_state
);
6176 u16 pcie_speed
, max_speed
= 0;
6178 for (i
= 0; i
< state
->performance_level_count
; i
++) {
6179 pcie_speed
= state
->performance_levels
[i
].pcie_gen
;
6180 if (max_speed
< pcie_speed
)
6181 max_speed
= pcie_speed
;
6186 static u16
si_get_current_pcie_speed(struct amdgpu_device
*adev
)
6190 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
) & LC_CURRENT_DATA_RATE_MASK
;
6191 speed_cntl
>>= LC_CURRENT_DATA_RATE_SHIFT
;
6193 return (u16
)speed_cntl
;
6196 static void si_request_link_speed_change_before_state_change(struct amdgpu_device
*adev
,
6197 struct amdgpu_ps
*amdgpu_new_state
,
6198 struct amdgpu_ps
*amdgpu_current_state
)
6200 struct si_power_info
*si_pi
= si_get_pi(adev
);
6201 enum amdgpu_pcie_gen target_link_speed
= si_get_maximum_link_speed(adev
, amdgpu_new_state
);
6202 enum amdgpu_pcie_gen current_link_speed
;
6204 if (si_pi
->force_pcie_gen
== AMDGPU_PCIE_GEN_INVALID
)
6205 current_link_speed
= si_get_maximum_link_speed(adev
, amdgpu_current_state
);
6207 current_link_speed
= si_pi
->force_pcie_gen
;
6209 si_pi
->force_pcie_gen
= AMDGPU_PCIE_GEN_INVALID
;
6210 si_pi
->pspp_notify_required
= false;
6211 if (target_link_speed
> current_link_speed
) {
6212 switch (target_link_speed
) {
6213 #if defined(CONFIG_ACPI)
6214 case AMDGPU_PCIE_GEN3
:
6215 if (amdgpu_acpi_pcie_performance_request(adev
, PCIE_PERF_REQ_PECI_GEN3
, false) == 0)
6217 si_pi
->force_pcie_gen
= AMDGPU_PCIE_GEN2
;
6218 if (current_link_speed
== AMDGPU_PCIE_GEN2
)
6221 case AMDGPU_PCIE_GEN2
:
6222 if (amdgpu_acpi_pcie_performance_request(adev
, PCIE_PERF_REQ_PECI_GEN2
, false) == 0)
6227 si_pi
->force_pcie_gen
= si_get_current_pcie_speed(adev
);
6231 if (target_link_speed
< current_link_speed
)
6232 si_pi
->pspp_notify_required
= true;
6236 static void si_notify_link_speed_change_after_state_change(struct amdgpu_device
*adev
,
6237 struct amdgpu_ps
*amdgpu_new_state
,
6238 struct amdgpu_ps
*amdgpu_current_state
)
6240 struct si_power_info
*si_pi
= si_get_pi(adev
);
6241 enum amdgpu_pcie_gen target_link_speed
= si_get_maximum_link_speed(adev
, amdgpu_new_state
);
6244 if (si_pi
->pspp_notify_required
) {
6245 if (target_link_speed
== AMDGPU_PCIE_GEN3
)
6246 request
= PCIE_PERF_REQ_PECI_GEN3
;
6247 else if (target_link_speed
== AMDGPU_PCIE_GEN2
)
6248 request
= PCIE_PERF_REQ_PECI_GEN2
;
6250 request
= PCIE_PERF_REQ_PECI_GEN1
;
6252 if ((request
== PCIE_PERF_REQ_PECI_GEN1
) &&
6253 (si_get_current_pcie_speed(adev
) > 0))
6256 #if defined(CONFIG_ACPI)
6257 amdgpu_acpi_pcie_performance_request(adev
, request
, false);
6263 static int si_ds_request(struct amdgpu_device
*adev
,
6264 bool ds_status_on
, u32 count_write
)
6266 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6268 if (eg_pi
->sclk_deep_sleep
) {
6270 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_CancelThrottleOVRDSCLKDS
) ==
6274 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_ThrottleOVRDSCLKDS
) ==
6275 PPSMC_Result_OK
) ? 0 : -EINVAL
;
6281 static void si_set_max_cu_value(struct amdgpu_device
*adev
)
6283 struct si_power_info
*si_pi
= si_get_pi(adev
);
6285 if (adev
->asic_type
== CHIP_VERDE
) {
6286 switch (adev
->pdev
->device
) {
6322 static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device
*adev
,
6323 struct amdgpu_clock_voltage_dependency_table
*table
)
6327 u16 leakage_voltage
;
6330 for (i
= 0; i
< table
->count
; i
++) {
6331 switch (si_get_leakage_voltage_from_leakage_index(adev
,
6332 table
->entries
[i
].v
,
6333 &leakage_voltage
)) {
6335 table
->entries
[i
].v
= leakage_voltage
;
6345 for (j
= (table
->count
- 2); j
>= 0; j
--) {
6346 table
->entries
[j
].v
= (table
->entries
[j
].v
<= table
->entries
[j
+ 1].v
) ?
6347 table
->entries
[j
].v
: table
->entries
[j
+ 1].v
;
6353 static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device
*adev
)
6357 ret
= si_patch_single_dependency_table_based_on_leakage(adev
,
6358 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
);
6360 DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
6361 ret
= si_patch_single_dependency_table_based_on_leakage(adev
,
6362 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
);
6364 DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
6365 ret
= si_patch_single_dependency_table_based_on_leakage(adev
,
6366 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
);
6368 DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
6372 static void si_set_pcie_lane_width_in_smc(struct amdgpu_device
*adev
,
6373 struct amdgpu_ps
*amdgpu_new_state
,
6374 struct amdgpu_ps
*amdgpu_current_state
)
6377 u32 new_lane_width
=
6378 ((amdgpu_new_state
->caps
& ATOM_PPLIB_PCIE_LINK_WIDTH_MASK
) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT
) + 1;
6379 u32 current_lane_width
=
6380 ((amdgpu_current_state
->caps
& ATOM_PPLIB_PCIE_LINK_WIDTH_MASK
) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT
) + 1;
6382 if (new_lane_width
!= current_lane_width
) {
6383 amdgpu_set_pcie_lanes(adev
, new_lane_width
);
6384 lane_width
= amdgpu_get_pcie_lanes(adev
);
6385 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width
, lane_width
);
6389 static void si_dpm_setup_asic(struct amdgpu_device
*adev
)
6391 si_read_clock_registers(adev
);
6392 si_enable_acpi_power_management(adev
);
6395 static int si_thermal_enable_alert(struct amdgpu_device
*adev
,
6398 u32 thermal_int
= RREG32(CG_THERMAL_INT
);
6401 PPSMC_Result result
;
6403 thermal_int
&= ~(THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
);
6404 WREG32(CG_THERMAL_INT
, thermal_int
);
6405 result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableThermalInterrupt
);
6406 if (result
!= PPSMC_Result_OK
) {
6407 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
6411 thermal_int
|= THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
;
6412 WREG32(CG_THERMAL_INT
, thermal_int
);
6418 static int si_thermal_set_temperature_range(struct amdgpu_device
*adev
,
6419 int min_temp
, int max_temp
)
6421 int low_temp
= 0 * 1000;
6422 int high_temp
= 255 * 1000;
6424 if (low_temp
< min_temp
)
6425 low_temp
= min_temp
;
6426 if (high_temp
> max_temp
)
6427 high_temp
= max_temp
;
6428 if (high_temp
< low_temp
) {
6429 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
6433 WREG32_P(CG_THERMAL_INT
, DIG_THERM_INTH(high_temp
/ 1000), ~DIG_THERM_INTH_MASK
);
6434 WREG32_P(CG_THERMAL_INT
, DIG_THERM_INTL(low_temp
/ 1000), ~DIG_THERM_INTL_MASK
);
6435 WREG32_P(CG_THERMAL_CTRL
, DIG_THERM_DPM(high_temp
/ 1000), ~DIG_THERM_DPM_MASK
);
6437 adev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
6438 adev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
6443 static void si_fan_ctrl_set_static_mode(struct amdgpu_device
*adev
, u32 mode
)
6445 struct si_power_info
*si_pi
= si_get_pi(adev
);
6448 if (si_pi
->fan_ctrl_is_in_default_mode
) {
6449 tmp
= (RREG32(CG_FDO_CTRL2
) & FDO_PWM_MODE_MASK
) >> FDO_PWM_MODE_SHIFT
;
6450 si_pi
->fan_ctrl_default_mode
= tmp
;
6451 tmp
= (RREG32(CG_FDO_CTRL2
) & TMIN_MASK
) >> TMIN_SHIFT
;
6453 si_pi
->fan_ctrl_is_in_default_mode
= false;
6456 tmp
= RREG32(CG_FDO_CTRL2
) & ~TMIN_MASK
;
6458 WREG32(CG_FDO_CTRL2
, tmp
);
6460 tmp
= RREG32(CG_FDO_CTRL2
) & ~FDO_PWM_MODE_MASK
;
6461 tmp
|= FDO_PWM_MODE(mode
);
6462 WREG32(CG_FDO_CTRL2
, tmp
);
6465 static int si_thermal_setup_fan_table(struct amdgpu_device
*adev
)
6467 struct si_power_info
*si_pi
= si_get_pi(adev
);
6468 PP_SIslands_FanTable fan_table
= { FDO_MODE_HARDWARE
};
6470 u32 t_diff1
, t_diff2
, pwm_diff1
, pwm_diff2
;
6471 u16 fdo_min
, slope1
, slope2
;
6472 u32 reference_clock
, tmp
;
6476 if (!si_pi
->fan_table_start
) {
6477 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
6481 duty100
= (RREG32(CG_FDO_CTRL1
) & FMAX_DUTY100_MASK
) >> FMAX_DUTY100_SHIFT
;
6484 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
6488 tmp64
= (u64
)adev
->pm
.dpm
.fan
.pwm_min
* duty100
;
6489 do_div(tmp64
, 10000);
6490 fdo_min
= (u16
)tmp64
;
6492 t_diff1
= adev
->pm
.dpm
.fan
.t_med
- adev
->pm
.dpm
.fan
.t_min
;
6493 t_diff2
= adev
->pm
.dpm
.fan
.t_high
- adev
->pm
.dpm
.fan
.t_med
;
6495 pwm_diff1
= adev
->pm
.dpm
.fan
.pwm_med
- adev
->pm
.dpm
.fan
.pwm_min
;
6496 pwm_diff2
= adev
->pm
.dpm
.fan
.pwm_high
- adev
->pm
.dpm
.fan
.pwm_med
;
6498 slope1
= (u16
)((50 + ((16 * duty100
* pwm_diff1
) / t_diff1
)) / 100);
6499 slope2
= (u16
)((50 + ((16 * duty100
* pwm_diff2
) / t_diff2
)) / 100);
6501 fan_table
.temp_min
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_min
) / 100);
6502 fan_table
.temp_med
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_med
) / 100);
6503 fan_table
.temp_max
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_max
) / 100);
6504 fan_table
.slope1
= cpu_to_be16(slope1
);
6505 fan_table
.slope2
= cpu_to_be16(slope2
);
6506 fan_table
.fdo_min
= cpu_to_be16(fdo_min
);
6507 fan_table
.hys_down
= cpu_to_be16(adev
->pm
.dpm
.fan
.t_hyst
);
6508 fan_table
.hys_up
= cpu_to_be16(1);
6509 fan_table
.hys_slope
= cpu_to_be16(1);
6510 fan_table
.temp_resp_lim
= cpu_to_be16(5);
6511 reference_clock
= amdgpu_asic_get_xclk(adev
);
6513 fan_table
.refresh_period
= cpu_to_be32((adev
->pm
.dpm
.fan
.cycle_delay
*
6514 reference_clock
) / 1600);
6515 fan_table
.fdo_max
= cpu_to_be16((u16
)duty100
);
6517 tmp
= (RREG32(CG_MULT_THERMAL_CTRL
) & TEMP_SEL_MASK
) >> TEMP_SEL_SHIFT
;
6518 fan_table
.temp_src
= (uint8_t)tmp
;
6520 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
6521 si_pi
->fan_table_start
,
6527 DRM_ERROR("Failed to load fan table to the SMC.");
6528 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
6534 static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device
*adev
)
6536 struct si_power_info
*si_pi
= si_get_pi(adev
);
6539 ret
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_StartFanControl
);
6540 if (ret
== PPSMC_Result_OK
) {
6541 si_pi
->fan_is_controlled_by_smc
= true;
6548 static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device
*adev
)
6550 struct si_power_info
*si_pi
= si_get_pi(adev
);
6553 ret
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_StopFanControl
);
6555 if (ret
== PPSMC_Result_OK
) {
6556 si_pi
->fan_is_controlled_by_smc
= false;
6563 static int si_dpm_get_fan_speed_percent(void *handle
,
6568 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6570 if (adev
->pm
.no_fan
)
6573 duty100
= (RREG32(CG_FDO_CTRL1
) & FMAX_DUTY100_MASK
) >> FMAX_DUTY100_SHIFT
;
6574 duty
= (RREG32(CG_THERMAL_STATUS
) & FDO_PWM_DUTY_MASK
) >> FDO_PWM_DUTY_SHIFT
;
6579 tmp64
= (u64
)duty
* 100;
6580 do_div(tmp64
, duty100
);
6581 *speed
= (u32
)tmp64
;
6589 static int si_dpm_set_fan_speed_percent(void *handle
,
6592 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6593 struct si_power_info
*si_pi
= si_get_pi(adev
);
6598 if (adev
->pm
.no_fan
)
6601 if (si_pi
->fan_is_controlled_by_smc
)
6607 duty100
= (RREG32(CG_FDO_CTRL1
) & FMAX_DUTY100_MASK
) >> FMAX_DUTY100_SHIFT
;
6612 tmp64
= (u64
)speed
* duty100
;
6616 tmp
= RREG32(CG_FDO_CTRL0
) & ~FDO_STATIC_DUTY_MASK
;
6617 tmp
|= FDO_STATIC_DUTY(duty
);
6618 WREG32(CG_FDO_CTRL0
, tmp
);
6623 static void si_dpm_set_fan_control_mode(void *handle
, u32 mode
)
6625 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6628 /* stop auto-manage */
6629 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
6630 si_fan_ctrl_stop_smc_fan_control(adev
);
6631 si_fan_ctrl_set_static_mode(adev
, mode
);
6633 /* restart auto-manage */
6634 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
6635 si_thermal_start_smc_fan_control(adev
);
6637 si_fan_ctrl_set_default_mode(adev
);
6641 static u32
si_dpm_get_fan_control_mode(void *handle
)
6643 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6644 struct si_power_info
*si_pi
= si_get_pi(adev
);
6647 if (si_pi
->fan_is_controlled_by_smc
)
6650 tmp
= RREG32(CG_FDO_CTRL2
) & FDO_PWM_MODE_MASK
;
6651 return (tmp
>> FDO_PWM_MODE_SHIFT
);
6655 static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device
*adev
,
6659 u32 xclk
= amdgpu_asic_get_xclk(adev
);
6661 if (adev
->pm
.no_fan
)
6664 if (adev
->pm
.fan_pulses_per_revolution
== 0)
6667 tach_period
= (RREG32(CG_TACH_STATUS
) & TACH_PERIOD_MASK
) >> TACH_PERIOD_SHIFT
;
6668 if (tach_period
== 0)
6671 *speed
= 60 * xclk
* 10000 / tach_period
;
6676 static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device
*adev
,
6679 u32 tach_period
, tmp
;
6680 u32 xclk
= amdgpu_asic_get_xclk(adev
);
6682 if (adev
->pm
.no_fan
)
6685 if (adev
->pm
.fan_pulses_per_revolution
== 0)
6688 if ((speed
< adev
->pm
.fan_min_rpm
) ||
6689 (speed
> adev
->pm
.fan_max_rpm
))
6692 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
6693 si_fan_ctrl_stop_smc_fan_control(adev
);
6695 tach_period
= 60 * xclk
* 10000 / (8 * speed
);
6696 tmp
= RREG32(CG_TACH_CTRL
) & ~TARGET_PERIOD_MASK
;
6697 tmp
|= TARGET_PERIOD(tach_period
);
6698 WREG32(CG_TACH_CTRL
, tmp
);
6700 si_fan_ctrl_set_static_mode(adev
, FDO_PWM_MODE_STATIC_RPM
);
6706 static void si_fan_ctrl_set_default_mode(struct amdgpu_device
*adev
)
6708 struct si_power_info
*si_pi
= si_get_pi(adev
);
6711 if (!si_pi
->fan_ctrl_is_in_default_mode
) {
6712 tmp
= RREG32(CG_FDO_CTRL2
) & ~FDO_PWM_MODE_MASK
;
6713 tmp
|= FDO_PWM_MODE(si_pi
->fan_ctrl_default_mode
);
6714 WREG32(CG_FDO_CTRL2
, tmp
);
6716 tmp
= RREG32(CG_FDO_CTRL2
) & ~TMIN_MASK
;
6717 tmp
|= TMIN(si_pi
->t_min
);
6718 WREG32(CG_FDO_CTRL2
, tmp
);
6719 si_pi
->fan_ctrl_is_in_default_mode
= true;
6723 static void si_thermal_start_smc_fan_control(struct amdgpu_device
*adev
)
6725 if (adev
->pm
.dpm
.fan
.ucode_fan_control
) {
6726 si_fan_ctrl_start_smc_fan_control(adev
);
6727 si_fan_ctrl_set_static_mode(adev
, FDO_PWM_MODE_STATIC
);
6731 static void si_thermal_initialize(struct amdgpu_device
*adev
)
6735 if (adev
->pm
.fan_pulses_per_revolution
) {
6736 tmp
= RREG32(CG_TACH_CTRL
) & ~EDGE_PER_REV_MASK
;
6737 tmp
|= EDGE_PER_REV(adev
->pm
.fan_pulses_per_revolution
-1);
6738 WREG32(CG_TACH_CTRL
, tmp
);
6741 tmp
= RREG32(CG_FDO_CTRL2
) & ~TACH_PWM_RESP_RATE_MASK
;
6742 tmp
|= TACH_PWM_RESP_RATE(0x28);
6743 WREG32(CG_FDO_CTRL2
, tmp
);
6746 static int si_thermal_start_thermal_controller(struct amdgpu_device
*adev
)
6750 si_thermal_initialize(adev
);
6751 ret
= si_thermal_set_temperature_range(adev
, R600_TEMP_RANGE_MIN
, R600_TEMP_RANGE_MAX
);
6754 ret
= si_thermal_enable_alert(adev
, true);
6757 if (adev
->pm
.dpm
.fan
.ucode_fan_control
) {
6758 ret
= si_halt_smc(adev
);
6761 ret
= si_thermal_setup_fan_table(adev
);
6764 ret
= si_resume_smc(adev
);
6767 si_thermal_start_smc_fan_control(adev
);
6773 static void si_thermal_stop_thermal_controller(struct amdgpu_device
*adev
)
6775 if (!adev
->pm
.no_fan
) {
6776 si_fan_ctrl_set_default_mode(adev
);
6777 si_fan_ctrl_stop_smc_fan_control(adev
);
6781 static int si_dpm_enable(struct amdgpu_device
*adev
)
6783 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
6784 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6785 struct si_power_info
*si_pi
= si_get_pi(adev
);
6786 struct amdgpu_ps
*boot_ps
= adev
->pm
.dpm
.boot_ps
;
6789 if (amdgpu_si_is_smc_running(adev
))
6791 if (pi
->voltage_control
|| si_pi
->voltage_control_svi2
)
6792 si_enable_voltage_control(adev
, true);
6793 if (pi
->mvdd_control
)
6794 si_get_mvdd_configuration(adev
);
6795 if (pi
->voltage_control
|| si_pi
->voltage_control_svi2
) {
6796 ret
= si_construct_voltage_tables(adev
);
6798 DRM_ERROR("si_construct_voltage_tables failed\n");
6802 if (eg_pi
->dynamic_ac_timing
) {
6803 ret
= si_initialize_mc_reg_table(adev
);
6805 eg_pi
->dynamic_ac_timing
= false;
6808 si_enable_spread_spectrum(adev
, true);
6809 if (pi
->thermal_protection
)
6810 si_enable_thermal_protection(adev
, true);
6812 si_program_git(adev
);
6813 si_program_tp(adev
);
6814 si_program_tpp(adev
);
6815 si_program_sstp(adev
);
6816 si_enable_display_gap(adev
);
6817 si_program_vc(adev
);
6818 ret
= si_upload_firmware(adev
);
6820 DRM_ERROR("si_upload_firmware failed\n");
6823 ret
= si_process_firmware_header(adev
);
6825 DRM_ERROR("si_process_firmware_header failed\n");
6828 ret
= si_initial_switch_from_arb_f0_to_f1(adev
);
6830 DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
6833 ret
= si_init_smc_table(adev
);
6835 DRM_ERROR("si_init_smc_table failed\n");
6838 ret
= si_init_smc_spll_table(adev
);
6840 DRM_ERROR("si_init_smc_spll_table failed\n");
6843 ret
= si_init_arb_table_index(adev
);
6845 DRM_ERROR("si_init_arb_table_index failed\n");
6848 if (eg_pi
->dynamic_ac_timing
) {
6849 ret
= si_populate_mc_reg_table(adev
, boot_ps
);
6851 DRM_ERROR("si_populate_mc_reg_table failed\n");
6855 ret
= si_initialize_smc_cac_tables(adev
);
6857 DRM_ERROR("si_initialize_smc_cac_tables failed\n");
6860 ret
= si_initialize_hardware_cac_manager(adev
);
6862 DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
6865 ret
= si_initialize_smc_dte_tables(adev
);
6867 DRM_ERROR("si_initialize_smc_dte_tables failed\n");
6870 ret
= si_populate_smc_tdp_limits(adev
, boot_ps
);
6872 DRM_ERROR("si_populate_smc_tdp_limits failed\n");
6875 ret
= si_populate_smc_tdp_limits_2(adev
, boot_ps
);
6877 DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
6880 si_program_response_times(adev
);
6881 si_program_ds_registers(adev
);
6882 si_dpm_start_smc(adev
);
6883 ret
= si_notify_smc_display_change(adev
, false);
6885 DRM_ERROR("si_notify_smc_display_change failed\n");
6888 si_enable_sclk_control(adev
, true);
6891 si_enable_auto_throttle_source(adev
, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
, true);
6892 si_thermal_start_thermal_controller(adev
);
6897 static int si_set_temperature_range(struct amdgpu_device
*adev
)
6901 ret
= si_thermal_enable_alert(adev
, false);
6904 ret
= si_thermal_set_temperature_range(adev
, R600_TEMP_RANGE_MIN
, R600_TEMP_RANGE_MAX
);
6907 ret
= si_thermal_enable_alert(adev
, true);
6914 static void si_dpm_disable(struct amdgpu_device
*adev
)
6916 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
6917 struct amdgpu_ps
*boot_ps
= adev
->pm
.dpm
.boot_ps
;
6919 if (!amdgpu_si_is_smc_running(adev
))
6921 si_thermal_stop_thermal_controller(adev
);
6922 si_disable_ulv(adev
);
6924 if (pi
->thermal_protection
)
6925 si_enable_thermal_protection(adev
, false);
6926 si_enable_power_containment(adev
, boot_ps
, false);
6927 si_enable_smc_cac(adev
, boot_ps
, false);
6928 si_enable_spread_spectrum(adev
, false);
6929 si_enable_auto_throttle_source(adev
, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
, false);
6931 si_reset_to_default(adev
);
6932 si_dpm_stop_smc(adev
);
6933 si_force_switch_to_arb_f0(adev
);
6935 ni_update_current_ps(adev
, boot_ps
);
6938 static int si_dpm_pre_set_power_state(void *handle
)
6940 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6941 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6942 struct amdgpu_ps requested_ps
= *adev
->pm
.dpm
.requested_ps
;
6943 struct amdgpu_ps
*new_ps
= &requested_ps
;
6945 ni_update_requested_ps(adev
, new_ps
);
6946 si_apply_state_adjust_rules(adev
, &eg_pi
->requested_rps
);
6951 static int si_power_control_set_level(struct amdgpu_device
*adev
)
6953 struct amdgpu_ps
*new_ps
= adev
->pm
.dpm
.requested_ps
;
6956 ret
= si_restrict_performance_levels_before_switch(adev
);
6959 ret
= si_halt_smc(adev
);
6962 ret
= si_populate_smc_tdp_limits(adev
, new_ps
);
6965 ret
= si_populate_smc_tdp_limits_2(adev
, new_ps
);
6968 ret
= si_resume_smc(adev
);
6971 ret
= si_set_sw_state(adev
);
6977 static int si_dpm_set_power_state(void *handle
)
6979 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
6980 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6981 struct amdgpu_ps
*new_ps
= &eg_pi
->requested_rps
;
6982 struct amdgpu_ps
*old_ps
= &eg_pi
->current_rps
;
6985 ret
= si_disable_ulv(adev
);
6987 DRM_ERROR("si_disable_ulv failed\n");
6990 ret
= si_restrict_performance_levels_before_switch(adev
);
6992 DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
6995 if (eg_pi
->pcie_performance_request
)
6996 si_request_link_speed_change_before_state_change(adev
, new_ps
, old_ps
);
6997 ni_set_uvd_clock_before_set_eng_clock(adev
, new_ps
, old_ps
);
6998 ret
= si_enable_power_containment(adev
, new_ps
, false);
7000 DRM_ERROR("si_enable_power_containment failed\n");
7003 ret
= si_enable_smc_cac(adev
, new_ps
, false);
7005 DRM_ERROR("si_enable_smc_cac failed\n");
7008 ret
= si_halt_smc(adev
);
7010 DRM_ERROR("si_halt_smc failed\n");
7013 ret
= si_upload_sw_state(adev
, new_ps
);
7015 DRM_ERROR("si_upload_sw_state failed\n");
7018 ret
= si_upload_smc_data(adev
);
7020 DRM_ERROR("si_upload_smc_data failed\n");
7023 ret
= si_upload_ulv_state(adev
);
7025 DRM_ERROR("si_upload_ulv_state failed\n");
7028 if (eg_pi
->dynamic_ac_timing
) {
7029 ret
= si_upload_mc_reg_table(adev
, new_ps
);
7031 DRM_ERROR("si_upload_mc_reg_table failed\n");
7035 ret
= si_program_memory_timing_parameters(adev
, new_ps
);
7037 DRM_ERROR("si_program_memory_timing_parameters failed\n");
7040 si_set_pcie_lane_width_in_smc(adev
, new_ps
, old_ps
);
7042 ret
= si_resume_smc(adev
);
7044 DRM_ERROR("si_resume_smc failed\n");
7047 ret
= si_set_sw_state(adev
);
7049 DRM_ERROR("si_set_sw_state failed\n");
7052 ni_set_uvd_clock_after_set_eng_clock(adev
, new_ps
, old_ps
);
7053 if (eg_pi
->pcie_performance_request
)
7054 si_notify_link_speed_change_after_state_change(adev
, new_ps
, old_ps
);
7055 ret
= si_set_power_state_conditionally_enable_ulv(adev
, new_ps
);
7057 DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
7060 ret
= si_enable_smc_cac(adev
, new_ps
, true);
7062 DRM_ERROR("si_enable_smc_cac failed\n");
7065 ret
= si_enable_power_containment(adev
, new_ps
, true);
7067 DRM_ERROR("si_enable_power_containment failed\n");
7071 ret
= si_power_control_set_level(adev
);
7073 DRM_ERROR("si_power_control_set_level failed\n");
7080 static void si_dpm_post_set_power_state(void *handle
)
7082 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7083 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7084 struct amdgpu_ps
*new_ps
= &eg_pi
->requested_rps
;
7086 ni_update_current_ps(adev
, new_ps
);
7090 void si_dpm_reset_asic(struct amdgpu_device
*adev
)
7092 si_restrict_performance_levels_before_switch(adev
);
7093 si_disable_ulv(adev
);
7094 si_set_boot_state(adev
);
7098 static void si_dpm_display_configuration_changed(void *handle
)
7100 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7102 si_program_display_gap(adev
);
7106 static void si_parse_pplib_non_clock_info(struct amdgpu_device
*adev
,
7107 struct amdgpu_ps
*rps
,
7108 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
7111 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
7112 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
7113 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
7115 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
7116 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
7117 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
7118 } else if (r600_is_uvd_state(rps
->class, rps
->class2
)) {
7119 rps
->vclk
= RV770_DEFAULT_VCLK_FREQ
;
7120 rps
->dclk
= RV770_DEFAULT_DCLK_FREQ
;
7126 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
)
7127 adev
->pm
.dpm
.boot_ps
= rps
;
7128 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
7129 adev
->pm
.dpm
.uvd_ps
= rps
;
7132 static void si_parse_pplib_clock_info(struct amdgpu_device
*adev
,
7133 struct amdgpu_ps
*rps
, int index
,
7134 union pplib_clock_info
*clock_info
)
7136 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
7137 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7138 struct si_power_info
*si_pi
= si_get_pi(adev
);
7139 struct si_ps
*ps
= si_get_ps(rps
);
7140 u16 leakage_voltage
;
7141 struct rv7xx_pl
*pl
= &ps
->performance_levels
[index
];
7144 ps
->performance_level_count
= index
+ 1;
7146 pl
->sclk
= le16_to_cpu(clock_info
->si
.usEngineClockLow
);
7147 pl
->sclk
|= clock_info
->si
.ucEngineClockHigh
<< 16;
7148 pl
->mclk
= le16_to_cpu(clock_info
->si
.usMemoryClockLow
);
7149 pl
->mclk
|= clock_info
->si
.ucMemoryClockHigh
<< 16;
7151 pl
->vddc
= le16_to_cpu(clock_info
->si
.usVDDC
);
7152 pl
->vddci
= le16_to_cpu(clock_info
->si
.usVDDCI
);
7153 pl
->flags
= le32_to_cpu(clock_info
->si
.ulFlags
);
7154 pl
->pcie_gen
= amdgpu_get_pcie_gen_support(adev
,
7155 si_pi
->sys_pcie_mask
,
7156 si_pi
->boot_pcie_gen
,
7157 clock_info
->si
.ucPCIEGen
);
7159 /* patch up vddc if necessary */
7160 ret
= si_get_leakage_voltage_from_leakage_index(adev
, pl
->vddc
,
7163 pl
->vddc
= leakage_voltage
;
7165 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
) {
7166 pi
->acpi_vddc
= pl
->vddc
;
7167 eg_pi
->acpi_vddci
= pl
->vddci
;
7168 si_pi
->acpi_pcie_gen
= pl
->pcie_gen
;
7171 if ((rps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
) &&
7173 /* XXX disable for A0 tahiti */
7174 si_pi
->ulv
.supported
= false;
7175 si_pi
->ulv
.pl
= *pl
;
7176 si_pi
->ulv
.one_pcie_lane_in_ulv
= false;
7177 si_pi
->ulv
.volt_change_delay
= SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT
;
7178 si_pi
->ulv
.cg_ulv_parameter
= SISLANDS_CGULVPARAMETER_DFLT
;
7179 si_pi
->ulv
.cg_ulv_control
= SISLANDS_CGULVCONTROL_DFLT
;
7182 if (pi
->min_vddc_in_table
> pl
->vddc
)
7183 pi
->min_vddc_in_table
= pl
->vddc
;
7185 if (pi
->max_vddc_in_table
< pl
->vddc
)
7186 pi
->max_vddc_in_table
= pl
->vddc
;
7188 /* patch up boot state */
7189 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
7190 u16 vddc
, vddci
, mvdd
;
7191 amdgpu_atombios_get_default_voltages(adev
, &vddc
, &vddci
, &mvdd
);
7192 pl
->mclk
= adev
->clock
.default_mclk
;
7193 pl
->sclk
= adev
->clock
.default_sclk
;
7196 si_pi
->mvdd_bootup_value
= mvdd
;
7199 if ((rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) ==
7200 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
) {
7201 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.sclk
= pl
->sclk
;
7202 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.mclk
= pl
->mclk
;
7203 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.vddc
= pl
->vddc
;
7204 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.vddci
= pl
->vddci
;
7208 union pplib_power_state
{
7209 struct _ATOM_PPLIB_STATE v1
;
7210 struct _ATOM_PPLIB_STATE_V2 v2
;
7213 static int si_parse_power_table(struct amdgpu_device
*adev
)
7215 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
7216 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
7217 union pplib_power_state
*power_state
;
7218 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
7219 union pplib_clock_info
*clock_info
;
7220 struct _StateArray
*state_array
;
7221 struct _ClockInfoArray
*clock_info_array
;
7222 struct _NonClockInfoArray
*non_clock_info_array
;
7223 union power_info
*power_info
;
7224 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
7227 u8
*power_state_offset
;
7230 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
7231 &frev
, &crev
, &data_offset
))
7233 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
7235 amdgpu_add_thermal_controller(adev
);
7237 state_array
= (struct _StateArray
*)
7238 (mode_info
->atom_context
->bios
+ data_offset
+
7239 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
7240 clock_info_array
= (struct _ClockInfoArray
*)
7241 (mode_info
->atom_context
->bios
+ data_offset
+
7242 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
7243 non_clock_info_array
= (struct _NonClockInfoArray
*)
7244 (mode_info
->atom_context
->bios
+ data_offset
+
7245 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
7247 adev
->pm
.dpm
.ps
= kcalloc(state_array
->ucNumEntries
,
7248 sizeof(struct amdgpu_ps
),
7250 if (!adev
->pm
.dpm
.ps
)
7252 power_state_offset
= (u8
*)state_array
->states
;
7253 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
7255 power_state
= (union pplib_power_state
*)power_state_offset
;
7256 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
7257 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
7258 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
7259 ps
= kzalloc(sizeof(struct si_ps
), GFP_KERNEL
);
7261 kfree(adev
->pm
.dpm
.ps
);
7264 adev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
7265 si_parse_pplib_non_clock_info(adev
, &adev
->pm
.dpm
.ps
[i
],
7267 non_clock_info_array
->ucEntrySize
);
7269 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
7270 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
7271 clock_array_index
= idx
[j
];
7272 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
7274 if (k
>= SISLANDS_MAX_HARDWARE_POWERLEVELS
)
7276 clock_info
= (union pplib_clock_info
*)
7277 ((u8
*)&clock_info_array
->clockInfo
[0] +
7278 (clock_array_index
* clock_info_array
->ucEntrySize
));
7279 si_parse_pplib_clock_info(adev
,
7280 &adev
->pm
.dpm
.ps
[i
], k
,
7284 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
7286 adev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
7288 /* fill in the vce power states */
7289 for (i
= 0; i
< adev
->pm
.dpm
.num_of_vce_states
; i
++) {
7291 clock_array_index
= adev
->pm
.dpm
.vce_states
[i
].clk_idx
;
7292 clock_info
= (union pplib_clock_info
*)
7293 &clock_info_array
->clockInfo
[clock_array_index
* clock_info_array
->ucEntrySize
];
7294 sclk
= le16_to_cpu(clock_info
->si
.usEngineClockLow
);
7295 sclk
|= clock_info
->si
.ucEngineClockHigh
<< 16;
7296 mclk
= le16_to_cpu(clock_info
->si
.usMemoryClockLow
);
7297 mclk
|= clock_info
->si
.ucMemoryClockHigh
<< 16;
7298 adev
->pm
.dpm
.vce_states
[i
].sclk
= sclk
;
7299 adev
->pm
.dpm
.vce_states
[i
].mclk
= mclk
;
7305 static int si_dpm_init(struct amdgpu_device
*adev
)
7307 struct rv7xx_power_info
*pi
;
7308 struct evergreen_power_info
*eg_pi
;
7309 struct ni_power_info
*ni_pi
;
7310 struct si_power_info
*si_pi
;
7311 struct atom_clock_dividers dividers
;
7314 si_pi
= kzalloc(sizeof(struct si_power_info
), GFP_KERNEL
);
7317 adev
->pm
.dpm
.priv
= si_pi
;
7322 si_pi
->sys_pcie_mask
=
7323 adev
->pm
.pcie_gen_mask
& CAIL_PCIE_LINK_SPEED_SUPPORT_MASK
;
7324 si_pi
->force_pcie_gen
= AMDGPU_PCIE_GEN_INVALID
;
7325 si_pi
->boot_pcie_gen
= si_get_current_pcie_speed(adev
);
7327 si_set_max_cu_value(adev
);
7329 rv770_get_max_vddc(adev
);
7330 si_get_leakage_vddc(adev
);
7331 si_patch_dependency_tables_based_on_leakage(adev
);
7334 eg_pi
->acpi_vddci
= 0;
7335 pi
->min_vddc_in_table
= 0;
7336 pi
->max_vddc_in_table
= 0;
7338 ret
= amdgpu_get_platform_caps(adev
);
7342 ret
= amdgpu_parse_extended_power_table(adev
);
7346 ret
= si_parse_power_table(adev
);
7350 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
=
7352 sizeof(struct amdgpu_clock_voltage_dependency_entry
),
7354 if (!adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
) {
7355 amdgpu_free_extended_power_table(adev
);
7358 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.count
= 4;
7359 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[0].clk
= 0;
7360 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[0].v
= 0;
7361 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[1].clk
= 36000;
7362 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[1].v
= 720;
7363 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[2].clk
= 54000;
7364 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[2].v
= 810;
7365 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[3].clk
= 72000;
7366 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[3].v
= 900;
7368 if (adev
->pm
.dpm
.voltage_response_time
== 0)
7369 adev
->pm
.dpm
.voltage_response_time
= R600_VOLTAGERESPONSETIME_DFLT
;
7370 if (adev
->pm
.dpm
.backbias_response_time
== 0)
7371 adev
->pm
.dpm
.backbias_response_time
= R600_BACKBIASRESPONSETIME_DFLT
;
7373 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
7374 0, false, ÷rs
);
7376 pi
->ref_div
= dividers
.ref_div
+ 1;
7378 pi
->ref_div
= R600_REFERENCEDIVIDER_DFLT
;
7380 eg_pi
->smu_uvd_hs
= false;
7382 pi
->mclk_strobe_mode_threshold
= 40000;
7383 if (si_is_special_1gb_platform(adev
))
7384 pi
->mclk_stutter_mode_threshold
= 0;
7386 pi
->mclk_stutter_mode_threshold
= pi
->mclk_strobe_mode_threshold
;
7387 pi
->mclk_edc_enable_threshold
= 40000;
7388 eg_pi
->mclk_edc_wr_enable_threshold
= 40000;
7390 ni_pi
->mclk_rtt_mode_threshold
= eg_pi
->mclk_edc_wr_enable_threshold
;
7392 pi
->voltage_control
=
7393 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7394 VOLTAGE_OBJ_GPIO_LUT
);
7395 if (!pi
->voltage_control
) {
7396 si_pi
->voltage_control_svi2
=
7397 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7399 if (si_pi
->voltage_control_svi2
)
7400 amdgpu_atombios_get_svi2_info(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7401 &si_pi
->svd_gpio_id
, &si_pi
->svc_gpio_id
);
7405 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_MVDDC
,
7406 VOLTAGE_OBJ_GPIO_LUT
);
7408 eg_pi
->vddci_control
=
7409 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDCI
,
7410 VOLTAGE_OBJ_GPIO_LUT
);
7411 if (!eg_pi
->vddci_control
)
7412 si_pi
->vddci_control_svi2
=
7413 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDCI
,
7416 si_pi
->vddc_phase_shed_control
=
7417 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7418 VOLTAGE_OBJ_PHASE_LUT
);
7420 rv770_get_engine_memory_ss(adev
);
7422 pi
->asi
= RV770_ASI_DFLT
;
7423 pi
->pasi
= CYPRESS_HASI_DFLT
;
7424 pi
->vrc
= SISLANDS_VRC_DFLT
;
7426 pi
->gfx_clock_gating
= true;
7428 eg_pi
->sclk_deep_sleep
= true;
7429 si_pi
->sclk_deep_sleep_above_low
= false;
7431 if (adev
->pm
.int_thermal_type
!= THERMAL_TYPE_NONE
)
7432 pi
->thermal_protection
= true;
7434 pi
->thermal_protection
= false;
7436 eg_pi
->dynamic_ac_timing
= true;
7438 eg_pi
->light_sleep
= true;
7439 #if defined(CONFIG_ACPI)
7440 eg_pi
->pcie_performance_request
=
7441 amdgpu_acpi_is_pcie_performance_request_supported(adev
);
7443 eg_pi
->pcie_performance_request
= false;
7446 si_pi
->sram_end
= SMC_RAM_END
;
7448 adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
= 4;
7449 adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
= 15000;
7450 adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
= 200;
7451 adev
->pm
.dpm
.dyn_state
.valid_sclk_values
.count
= 0;
7452 adev
->pm
.dpm
.dyn_state
.valid_sclk_values
.values
= NULL
;
7453 adev
->pm
.dpm
.dyn_state
.valid_mclk_values
.count
= 0;
7454 adev
->pm
.dpm
.dyn_state
.valid_mclk_values
.values
= NULL
;
7456 si_initialize_powertune_defaults(adev
);
7458 /* make sure dc limits are valid */
7459 if ((adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.sclk
== 0) ||
7460 (adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.mclk
== 0))
7461 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
=
7462 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
7464 si_pi
->fan_ctrl_is_in_default_mode
= true;
7469 static void si_dpm_fini(struct amdgpu_device
*adev
)
7473 if (adev
->pm
.dpm
.ps
)
7474 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++)
7475 kfree(adev
->pm
.dpm
.ps
[i
].ps_priv
);
7476 kfree(adev
->pm
.dpm
.ps
);
7477 kfree(adev
->pm
.dpm
.priv
);
7478 kfree(adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
);
7479 amdgpu_free_extended_power_table(adev
);
7482 static void si_dpm_debugfs_print_current_performance_level(void *handle
,
7485 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7486 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7487 struct amdgpu_ps
*rps
= &eg_pi
->current_rps
;
7488 struct si_ps
*ps
= si_get_ps(rps
);
7489 struct rv7xx_pl
*pl
;
7491 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX
) & CURRENT_STATE_INDEX_MASK
) >>
7492 CURRENT_STATE_INDEX_SHIFT
;
7494 if (current_index
>= ps
->performance_level_count
) {
7495 seq_printf(m
, "invalid dpm profile %d\n", current_index
);
7497 pl
= &ps
->performance_levels
[current_index
];
7498 seq_printf(m
, "uvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
7499 seq_printf(m
, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7500 current_index
, pl
->sclk
, pl
->mclk
, pl
->vddc
, pl
->vddci
, pl
->pcie_gen
+ 1);
7504 static int si_dpm_set_interrupt_state(struct amdgpu_device
*adev
,
7505 struct amdgpu_irq_src
*source
,
7507 enum amdgpu_interrupt_state state
)
7512 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
:
7514 case AMDGPU_IRQ_STATE_DISABLE
:
7515 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7516 cg_thermal_int
|= THERM_INT_MASK_HIGH
;
7517 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7519 case AMDGPU_IRQ_STATE_ENABLE
:
7520 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7521 cg_thermal_int
&= ~THERM_INT_MASK_HIGH
;
7522 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7529 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
:
7531 case AMDGPU_IRQ_STATE_DISABLE
:
7532 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7533 cg_thermal_int
|= THERM_INT_MASK_LOW
;
7534 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7536 case AMDGPU_IRQ_STATE_ENABLE
:
7537 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7538 cg_thermal_int
&= ~THERM_INT_MASK_LOW
;
7539 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7552 static int si_dpm_process_interrupt(struct amdgpu_device
*adev
,
7553 struct amdgpu_irq_src
*source
,
7554 struct amdgpu_iv_entry
*entry
)
7556 bool queue_thermal
= false;
7561 switch (entry
->src_id
) {
7562 case 230: /* thermal low to high */
7563 DRM_DEBUG("IH: thermal low to high\n");
7564 adev
->pm
.dpm
.thermal
.high_to_low
= false;
7565 queue_thermal
= true;
7567 case 231: /* thermal high to low */
7568 DRM_DEBUG("IH: thermal high to low\n");
7569 adev
->pm
.dpm
.thermal
.high_to_low
= true;
7570 queue_thermal
= true;
7577 schedule_work(&adev
->pm
.dpm
.thermal
.work
);
7582 static int si_dpm_late_init(void *handle
)
7585 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7587 if (!adev
->pm
.dpm_enabled
)
7590 ret
= si_set_temperature_range(adev
);
7594 si_dpm_powergate_uvd(adev
, true);
7600 * si_dpm_init_microcode - load ucode images from disk
7602 * @adev: amdgpu_device pointer
7604 * Use the firmware interface to load the ucode images into
7605 * the driver (not loaded into hw).
7606 * Returns 0 on success, error on failure.
7608 static int si_dpm_init_microcode(struct amdgpu_device
*adev
)
7610 const char *chip_name
;
7615 switch (adev
->asic_type
) {
7617 chip_name
= "tahiti";
7620 if ((adev
->pdev
->revision
== 0x81) &&
7621 ((adev
->pdev
->device
== 0x6810) ||
7622 (adev
->pdev
->device
== 0x6811)))
7623 chip_name
= "pitcairn_k";
7625 chip_name
= "pitcairn";
7628 if (((adev
->pdev
->device
== 0x6820) &&
7629 ((adev
->pdev
->revision
== 0x81) ||
7630 (adev
->pdev
->revision
== 0x83))) ||
7631 ((adev
->pdev
->device
== 0x6821) &&
7632 ((adev
->pdev
->revision
== 0x83) ||
7633 (adev
->pdev
->revision
== 0x87))) ||
7634 ((adev
->pdev
->revision
== 0x87) &&
7635 ((adev
->pdev
->device
== 0x6823) ||
7636 (adev
->pdev
->device
== 0x682b))))
7637 chip_name
= "verde_k";
7639 chip_name
= "verde";
7642 if (((adev
->pdev
->revision
== 0x81) &&
7643 ((adev
->pdev
->device
== 0x6600) ||
7644 (adev
->pdev
->device
== 0x6604) ||
7645 (adev
->pdev
->device
== 0x6605) ||
7646 (adev
->pdev
->device
== 0x6610))) ||
7647 ((adev
->pdev
->revision
== 0x83) &&
7648 (adev
->pdev
->device
== 0x6610)))
7649 chip_name
= "oland_k";
7651 chip_name
= "oland";
7654 if (((adev
->pdev
->revision
== 0x81) &&
7655 (adev
->pdev
->device
== 0x6660)) ||
7656 ((adev
->pdev
->revision
== 0x83) &&
7657 ((adev
->pdev
->device
== 0x6660) ||
7658 (adev
->pdev
->device
== 0x6663) ||
7659 (adev
->pdev
->device
== 0x6665) ||
7660 (adev
->pdev
->device
== 0x6667))))
7661 chip_name
= "hainan_k";
7662 else if ((adev
->pdev
->revision
== 0xc3) &&
7663 (adev
->pdev
->device
== 0x6665))
7664 chip_name
= "banks_k_2";
7666 chip_name
= "hainan";
7671 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_smc.bin", chip_name
);
7672 err
= request_firmware(&adev
->pm
.fw
, fw_name
, adev
->dev
);
7675 err
= amdgpu_ucode_validate(adev
->pm
.fw
);
7679 DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
7681 release_firmware(adev
->pm
.fw
);
7688 static int si_dpm_sw_init(void *handle
)
7691 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7693 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 230, &adev
->pm
.dpm
.thermal
.irq
);
7697 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 231, &adev
->pm
.dpm
.thermal
.irq
);
7701 /* default to balanced state */
7702 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_BALANCED
;
7703 adev
->pm
.dpm
.user_state
= POWER_STATE_TYPE_BALANCED
;
7704 adev
->pm
.dpm
.forced_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
7705 adev
->pm
.default_sclk
= adev
->clock
.default_sclk
;
7706 adev
->pm
.default_mclk
= adev
->clock
.default_mclk
;
7707 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
7708 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
7709 adev
->pm
.int_thermal_type
= THERMAL_TYPE_NONE
;
7711 if (amdgpu_dpm
== 0)
7714 ret
= si_dpm_init_microcode(adev
);
7718 INIT_WORK(&adev
->pm
.dpm
.thermal
.work
, amdgpu_dpm_thermal_work_handler
);
7719 mutex_lock(&adev
->pm
.mutex
);
7720 ret
= si_dpm_init(adev
);
7723 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
7724 if (amdgpu_dpm
== 1)
7725 amdgpu_pm_print_power_states(adev
);
7726 mutex_unlock(&adev
->pm
.mutex
);
7727 DRM_INFO("amdgpu: dpm initialized\n");
7733 mutex_unlock(&adev
->pm
.mutex
);
7734 DRM_ERROR("amdgpu: dpm initialization failed\n");
7738 static int si_dpm_sw_fini(void *handle
)
7740 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7742 flush_work(&adev
->pm
.dpm
.thermal
.work
);
7744 mutex_lock(&adev
->pm
.mutex
);
7746 mutex_unlock(&adev
->pm
.mutex
);
7751 static int si_dpm_hw_init(void *handle
)
7755 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7760 mutex_lock(&adev
->pm
.mutex
);
7761 si_dpm_setup_asic(adev
);
7762 ret
= si_dpm_enable(adev
);
7764 adev
->pm
.dpm_enabled
= false;
7766 adev
->pm
.dpm_enabled
= true;
7767 mutex_unlock(&adev
->pm
.mutex
);
7768 amdgpu_pm_compute_clocks(adev
);
7772 static int si_dpm_hw_fini(void *handle
)
7774 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7776 if (adev
->pm
.dpm_enabled
) {
7777 mutex_lock(&adev
->pm
.mutex
);
7778 si_dpm_disable(adev
);
7779 mutex_unlock(&adev
->pm
.mutex
);
7785 static int si_dpm_suspend(void *handle
)
7787 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7789 if (adev
->pm
.dpm_enabled
) {
7790 mutex_lock(&adev
->pm
.mutex
);
7792 si_dpm_disable(adev
);
7793 /* reset the power state */
7794 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
7795 mutex_unlock(&adev
->pm
.mutex
);
7800 static int si_dpm_resume(void *handle
)
7803 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7805 if (adev
->pm
.dpm_enabled
) {
7806 /* asic init will reset to the boot state */
7807 mutex_lock(&adev
->pm
.mutex
);
7808 si_dpm_setup_asic(adev
);
7809 ret
= si_dpm_enable(adev
);
7811 adev
->pm
.dpm_enabled
= false;
7813 adev
->pm
.dpm_enabled
= true;
7814 mutex_unlock(&adev
->pm
.mutex
);
7815 if (adev
->pm
.dpm_enabled
)
7816 amdgpu_pm_compute_clocks(adev
);
7821 static bool si_dpm_is_idle(void *handle
)
7827 static int si_dpm_wait_for_idle(void *handle
)
7833 static int si_dpm_soft_reset(void *handle
)
7838 static int si_dpm_set_clockgating_state(void *handle
,
7839 enum amd_clockgating_state state
)
7844 static int si_dpm_set_powergating_state(void *handle
,
7845 enum amd_powergating_state state
)
7850 /* get temperature in millidegrees */
7851 static int si_dpm_get_temp(void *handle
)
7854 int actual_temp
= 0;
7855 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7857 temp
= (RREG32(CG_MULT_THERMAL_STATUS
) & CTF_TEMP_MASK
) >>
7863 actual_temp
= temp
& 0x1ff;
7865 actual_temp
= (actual_temp
* 1000);
7870 static u32
si_dpm_get_sclk(void *handle
, bool low
)
7872 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7873 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7874 struct si_ps
*requested_state
= si_get_ps(&eg_pi
->requested_rps
);
7877 return requested_state
->performance_levels
[0].sclk
;
7879 return requested_state
->performance_levels
[requested_state
->performance_level_count
- 1].sclk
;
7882 static u32
si_dpm_get_mclk(void *handle
, bool low
)
7884 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7885 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7886 struct si_ps
*requested_state
= si_get_ps(&eg_pi
->requested_rps
);
7889 return requested_state
->performance_levels
[0].mclk
;
7891 return requested_state
->performance_levels
[requested_state
->performance_level_count
- 1].mclk
;
7894 static void si_dpm_print_power_state(void *handle
,
7897 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7898 struct amdgpu_ps
*rps
= (struct amdgpu_ps
*)current_ps
;
7899 struct si_ps
*ps
= si_get_ps(rps
);
7900 struct rv7xx_pl
*pl
;
7903 amdgpu_dpm_print_class_info(rps
->class, rps
->class2
);
7904 amdgpu_dpm_print_cap_info(rps
->caps
);
7905 DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
7906 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
7907 pl
= &ps
->performance_levels
[i
];
7908 if (adev
->asic_type
>= CHIP_TAHITI
)
7909 DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7910 i
, pl
->sclk
, pl
->mclk
, pl
->vddc
, pl
->vddci
, pl
->pcie_gen
+ 1);
7912 DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
7913 i
, pl
->sclk
, pl
->mclk
, pl
->vddc
, pl
->vddci
);
7915 amdgpu_dpm_print_ps_status(adev
, rps
);
7918 static int si_dpm_early_init(void *handle
)
7921 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7923 adev
->powerplay
.pp_funcs
= &si_dpm_funcs
;
7924 adev
->powerplay
.pp_handle
= adev
;
7925 si_dpm_set_irq_funcs(adev
);
7929 static inline bool si_are_power_levels_equal(const struct rv7xx_pl
*si_cpl1
,
7930 const struct rv7xx_pl
*si_cpl2
)
7932 return ((si_cpl1
->mclk
== si_cpl2
->mclk
) &&
7933 (si_cpl1
->sclk
== si_cpl2
->sclk
) &&
7934 (si_cpl1
->pcie_gen
== si_cpl2
->pcie_gen
) &&
7935 (si_cpl1
->vddc
== si_cpl2
->vddc
) &&
7936 (si_cpl1
->vddci
== si_cpl2
->vddci
));
7939 static int si_check_state_equal(void *handle
,
7944 struct si_ps
*si_cps
;
7945 struct si_ps
*si_rps
;
7947 struct amdgpu_ps
*cps
= (struct amdgpu_ps
*)current_ps
;
7948 struct amdgpu_ps
*rps
= (struct amdgpu_ps
*)request_ps
;
7949 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7951 if (adev
== NULL
|| cps
== NULL
|| rps
== NULL
|| equal
== NULL
)
7954 si_cps
= si_get_ps((struct amdgpu_ps
*)cps
);
7955 si_rps
= si_get_ps((struct amdgpu_ps
*)rps
);
7957 if (si_cps
== NULL
) {
7958 printk("si_cps is NULL\n");
7963 if (si_cps
->performance_level_count
!= si_rps
->performance_level_count
) {
7968 for (i
= 0; i
< si_cps
->performance_level_count
; i
++) {
7969 if (!si_are_power_levels_equal(&(si_cps
->performance_levels
[i
]),
7970 &(si_rps
->performance_levels
[i
]))) {
7976 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
7977 *equal
= ((cps
->vclk
== rps
->vclk
) && (cps
->dclk
== rps
->dclk
));
7978 *equal
&= ((cps
->evclk
== rps
->evclk
) && (cps
->ecclk
== rps
->ecclk
));
7983 static int si_dpm_read_sensor(void *handle
, int idx
,
7984 void *value
, int *size
)
7986 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7987 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7988 struct amdgpu_ps
*rps
= &eg_pi
->current_rps
;
7989 struct si_ps
*ps
= si_get_ps(rps
);
7990 uint32_t sclk
, mclk
;
7992 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX
) & CURRENT_STATE_INDEX_MASK
) >>
7993 CURRENT_STATE_INDEX_SHIFT
;
7995 /* size must be at least 4 bytes for all sensors */
8000 case AMDGPU_PP_SENSOR_GFX_SCLK
:
8001 if (pl_index
< ps
->performance_level_count
) {
8002 sclk
= ps
->performance_levels
[pl_index
].sclk
;
8003 *((uint32_t *)value
) = sclk
;
8008 case AMDGPU_PP_SENSOR_GFX_MCLK
:
8009 if (pl_index
< ps
->performance_level_count
) {
8010 mclk
= ps
->performance_levels
[pl_index
].mclk
;
8011 *((uint32_t *)value
) = mclk
;
8016 case AMDGPU_PP_SENSOR_GPU_TEMP
:
8017 *((uint32_t *)value
) = si_dpm_get_temp(adev
);
8025 static const struct amd_ip_funcs si_dpm_ip_funcs
= {
8027 .early_init
= si_dpm_early_init
,
8028 .late_init
= si_dpm_late_init
,
8029 .sw_init
= si_dpm_sw_init
,
8030 .sw_fini
= si_dpm_sw_fini
,
8031 .hw_init
= si_dpm_hw_init
,
8032 .hw_fini
= si_dpm_hw_fini
,
8033 .suspend
= si_dpm_suspend
,
8034 .resume
= si_dpm_resume
,
8035 .is_idle
= si_dpm_is_idle
,
8036 .wait_for_idle
= si_dpm_wait_for_idle
,
8037 .soft_reset
= si_dpm_soft_reset
,
8038 .set_clockgating_state
= si_dpm_set_clockgating_state
,
8039 .set_powergating_state
= si_dpm_set_powergating_state
,
8042 const struct amdgpu_ip_block_version si_smu_ip_block
=
8044 .type
= AMD_IP_BLOCK_TYPE_SMC
,
8048 .funcs
= &si_dpm_ip_funcs
,
8051 static const struct amd_pm_funcs si_dpm_funcs
= {
8052 .pre_set_power_state
= &si_dpm_pre_set_power_state
,
8053 .set_power_state
= &si_dpm_set_power_state
,
8054 .post_set_power_state
= &si_dpm_post_set_power_state
,
8055 .display_configuration_changed
= &si_dpm_display_configuration_changed
,
8056 .get_sclk
= &si_dpm_get_sclk
,
8057 .get_mclk
= &si_dpm_get_mclk
,
8058 .print_power_state
= &si_dpm_print_power_state
,
8059 .debugfs_print_current_performance_level
= &si_dpm_debugfs_print_current_performance_level
,
8060 .force_performance_level
= &si_dpm_force_performance_level
,
8061 .vblank_too_short
= &si_dpm_vblank_too_short
,
8062 .set_fan_control_mode
= &si_dpm_set_fan_control_mode
,
8063 .get_fan_control_mode
= &si_dpm_get_fan_control_mode
,
8064 .set_fan_speed_percent
= &si_dpm_set_fan_speed_percent
,
8065 .get_fan_speed_percent
= &si_dpm_get_fan_speed_percent
,
8066 .check_state_equal
= &si_check_state_equal
,
8067 .get_vce_clock_state
= amdgpu_get_vce_clock_state
,
8068 .read_sensor
= &si_dpm_read_sensor
,
8071 static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs
= {
8072 .set
= si_dpm_set_interrupt_state
,
8073 .process
= si_dpm_process_interrupt
,
8076 static void si_dpm_set_irq_funcs(struct amdgpu_device
*adev
)
8078 adev
->pm
.dpm
.thermal
.irq
.num_types
= AMDGPU_THERMAL_IRQ_LAST
;
8079 adev
->pm
.dpm
.thermal
.irq
.funcs
= &si_dpm_irq_funcs
;