2 * Copyright 2013 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
26 #include "amdgpu_pm.h"
27 #include "amdgpu_dpm.h"
28 #include "amdgpu_atombios.h"
33 #include "../include/pptable.h"
34 #include <linux/math64.h>
35 #include <linux/seq_file.h>
36 #include <linux/firmware.h>
38 #define MC_CG_ARB_FREQ_F0 0x0a
39 #define MC_CG_ARB_FREQ_F1 0x0b
40 #define MC_CG_ARB_FREQ_F2 0x0c
41 #define MC_CG_ARB_FREQ_F3 0x0d
43 #define SMC_RAM_END 0x20000
45 #define SCLK_MIN_DEEPSLEEP_FREQ 1350
48 /* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
49 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
50 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
51 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
52 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
53 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
54 #define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
56 #define BIOS_SCRATCH_4 0x5cd
58 MODULE_FIRMWARE("radeon/tahiti_smc.bin");
59 MODULE_FIRMWARE("radeon/pitcairn_smc.bin");
60 MODULE_FIRMWARE("radeon/pitcairn_k_smc.bin");
61 MODULE_FIRMWARE("radeon/verde_smc.bin");
62 MODULE_FIRMWARE("radeon/verde_k_smc.bin");
63 MODULE_FIRMWARE("radeon/oland_smc.bin");
64 MODULE_FIRMWARE("radeon/oland_k_smc.bin");
65 MODULE_FIRMWARE("radeon/hainan_smc.bin");
66 MODULE_FIRMWARE("radeon/hainan_k_smc.bin");
67 MODULE_FIRMWARE("radeon/banks_k_2_smc.bin");
70 struct _ATOM_POWERPLAY_INFO info
;
71 struct _ATOM_POWERPLAY_INFO_V2 info_2
;
72 struct _ATOM_POWERPLAY_INFO_V3 info_3
;
73 struct _ATOM_PPLIB_POWERPLAYTABLE pplib
;
74 struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2
;
75 struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3
;
76 struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4
;
77 struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5
;
81 struct _ATOM_PPLIB_FANTABLE fan
;
82 struct _ATOM_PPLIB_FANTABLE2 fan2
;
83 struct _ATOM_PPLIB_FANTABLE3 fan3
;
86 union pplib_clock_info
{
87 struct _ATOM_PPLIB_R600_CLOCK_INFO r600
;
88 struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780
;
89 struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen
;
90 struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo
;
91 struct _ATOM_PPLIB_SI_CLOCK_INFO si
;
94 static const u32 r600_utc
[R600_PM_NUMBER_OF_TC
] =
113 static const u32 r600_dtc
[R600_PM_NUMBER_OF_TC
] =
132 static const struct si_cac_config_reg cac_weights_tahiti
[] =
134 { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND
},
135 { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
136 { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND
},
137 { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND
},
138 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
139 { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
140 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
141 { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
142 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
143 { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND
},
144 { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
145 { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND
},
146 { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND
},
147 { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND
},
148 { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND
},
149 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
150 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
151 { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND
},
152 { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
153 { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND
},
154 { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND
},
155 { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND
},
156 { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
157 { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
158 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
159 { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
160 { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
161 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
162 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
163 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
164 { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND
},
165 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
166 { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
167 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
168 { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
169 { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
170 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
171 { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
172 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
173 { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND
},
174 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
175 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
176 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
177 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
178 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
179 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
180 { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
181 { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
182 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
183 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
184 { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
185 { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
186 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
187 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
188 { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
189 { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
190 { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
191 { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
192 { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
193 { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND
},
197 static const struct si_cac_config_reg lcac_tahiti
[] =
199 { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
200 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
201 { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
202 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
203 { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
204 { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
205 { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND
},
206 { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
207 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
208 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
209 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
210 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
211 { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
212 { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
213 { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
214 { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
215 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
216 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
217 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
218 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
219 { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
220 { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
221 { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
222 { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
223 { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
224 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
225 { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
226 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
227 { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
228 { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
229 { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
230 { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
231 { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
232 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
233 { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
234 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
235 { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
236 { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
237 { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
238 { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
239 { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
240 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
241 { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
242 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
243 { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
244 { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
245 { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND
},
246 { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
247 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
248 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
249 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
250 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
251 { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
252 { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
253 { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
254 { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
255 { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
256 { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
257 { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
258 { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
259 { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
260 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
261 { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
262 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
263 { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
264 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
265 { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
266 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
267 { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
268 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
269 { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
270 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
271 { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
272 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
273 { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
274 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
275 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
276 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
277 { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
278 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
279 { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
280 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
281 { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
282 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
283 { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
284 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
289 static const struct si_cac_config_reg cac_override_tahiti
[] =
294 static const struct si_powertune_data powertune_data_tahiti
=
325 static const struct si_dte_data dte_data_tahiti
=
327 { 1159409, 0, 0, 0, 0 },
336 { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
337 { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
338 { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
344 static const struct si_dte_data dte_data_tahiti_le
=
346 { 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
347 { 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
355 { 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
356 { 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
357 { 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
363 static const struct si_dte_data dte_data_tahiti_pro
=
365 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
366 { 0x0, 0x0, 0x0, 0x0, 0x0 },
374 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
375 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
376 { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
381 static const struct si_dte_data dte_data_new_zealand
=
383 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
384 { 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
392 { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
393 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
394 { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
399 static const struct si_dte_data dte_data_aruba_pro
=
401 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
402 { 0x0, 0x0, 0x0, 0x0, 0x0 },
410 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
411 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
412 { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
417 static const struct si_dte_data dte_data_malta
=
419 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
420 { 0x0, 0x0, 0x0, 0x0, 0x0 },
428 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
429 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
430 { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
435 static const struct si_cac_config_reg cac_weights_pitcairn
[] =
437 { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND
},
438 { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
439 { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
440 { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND
},
441 { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND
},
442 { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
443 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
444 { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
445 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
446 { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND
},
447 { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND
},
448 { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND
},
449 { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND
},
450 { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND
},
451 { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
452 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
453 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
454 { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND
},
455 { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND
},
456 { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND
},
457 { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND
},
458 { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND
},
459 { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND
},
460 { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
461 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
462 { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND
},
463 { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND
},
464 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
465 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
466 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
467 { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND
},
468 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
469 { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND
},
470 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
471 { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND
},
472 { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND
},
473 { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND
},
474 { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
475 { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND
},
476 { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
477 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
478 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
479 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
480 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
481 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
482 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
483 { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
484 { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
485 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
486 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
487 { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
488 { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
489 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
490 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
491 { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
492 { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
493 { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
494 { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
495 { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
496 { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND
},
500 static const struct si_cac_config_reg lcac_pitcairn
[] =
502 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
503 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
504 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
505 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
506 { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
507 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
508 { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
509 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
510 { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
511 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
512 { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
513 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
514 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
515 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
516 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
517 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
518 { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
519 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
520 { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
521 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
522 { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
523 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
524 { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
525 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
526 { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
527 { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
528 { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
529 { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
530 { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
531 { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
532 { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
533 { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
534 { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
535 { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
536 { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
537 { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
538 { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
539 { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
540 { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
541 { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
542 { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
543 { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
544 { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
545 { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
546 { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
547 { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
548 { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
549 { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
550 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
551 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
552 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
553 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
554 { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
555 { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
556 { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
557 { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
558 { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
559 { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
560 { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
561 { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
562 { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
563 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
564 { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
565 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
566 { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
567 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
568 { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
569 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
570 { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
571 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
572 { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
573 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
574 { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
575 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
576 { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
577 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
578 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
579 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
580 { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
581 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
582 { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
583 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
584 { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
585 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
586 { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
587 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
591 static const struct si_cac_config_reg cac_override_pitcairn
[] =
596 static const struct si_powertune_data powertune_data_pitcairn
=
627 static const struct si_dte_data dte_data_pitcairn
=
638 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
639 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
640 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
645 static const struct si_dte_data dte_data_curacao_xt
=
647 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
648 { 0x0, 0x0, 0x0, 0x0, 0x0 },
656 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
657 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
658 { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
663 static const struct si_dte_data dte_data_curacao_pro
=
665 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
666 { 0x0, 0x0, 0x0, 0x0, 0x0 },
674 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
675 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
676 { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
681 static const struct si_dte_data dte_data_neptune_xt
=
683 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
684 { 0x0, 0x0, 0x0, 0x0, 0x0 },
692 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
693 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
694 { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
699 static const struct si_cac_config_reg cac_weights_chelsea_pro
[] =
701 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
702 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
703 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
704 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
705 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
706 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
707 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
708 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
709 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
710 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
711 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
712 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
713 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
714 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
715 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
716 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
717 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
718 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
719 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
720 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
721 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
722 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
723 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
724 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
725 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
726 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
727 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
728 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
729 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
730 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
731 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
732 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
733 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
734 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
735 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
736 { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND
},
737 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
738 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
739 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
740 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
741 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
742 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
743 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
744 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
745 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
746 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
747 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
748 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
749 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
750 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
751 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
752 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
753 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
754 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
755 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
756 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
757 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
758 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
759 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
760 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
764 static const struct si_cac_config_reg cac_weights_chelsea_xt
[] =
766 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
767 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
768 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
769 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
770 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
771 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
772 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
773 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
774 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
775 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
776 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
777 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
778 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
779 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
780 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
781 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
782 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
783 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
784 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
785 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
786 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
787 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
788 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
789 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
790 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
791 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
792 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
793 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
794 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
795 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
796 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
797 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
798 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
799 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
800 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
801 { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND
},
802 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
803 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
804 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
805 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
806 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
807 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
808 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
809 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
810 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
811 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
812 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
813 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
814 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
815 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
816 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
817 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
818 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
819 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
820 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
821 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
822 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
823 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
824 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
825 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
829 static const struct si_cac_config_reg cac_weights_heathrow
[] =
831 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
832 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
833 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
834 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
835 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
836 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
837 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
838 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
839 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
840 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
841 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
842 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
843 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
844 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
845 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
846 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
847 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
848 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
849 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
850 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
851 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
852 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
853 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
854 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
855 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
856 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
857 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
858 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
859 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
860 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
861 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
862 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
863 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
864 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
865 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
866 { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND
},
867 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
868 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
869 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
870 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
871 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
872 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
873 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
874 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
875 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
876 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
877 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
878 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
879 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
880 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
881 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
882 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
883 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
884 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
885 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
886 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
887 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
888 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
889 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
890 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
894 static const struct si_cac_config_reg cac_weights_cape_verde_pro
[] =
896 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
897 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
898 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
899 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
900 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
901 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
902 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
903 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
904 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
905 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
906 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
907 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
908 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
909 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
910 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
911 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
912 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
913 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
914 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
915 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
916 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
917 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
918 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
919 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
920 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
921 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
922 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
923 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
924 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
925 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
926 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
927 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
928 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
929 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
930 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
931 { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND
},
932 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
933 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
934 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
935 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
936 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
937 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
938 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
939 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
940 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
941 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
942 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
943 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
944 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
945 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
946 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
947 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
948 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
949 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
950 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
951 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
952 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
953 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
954 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
955 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
959 static const struct si_cac_config_reg cac_weights_cape_verde
[] =
961 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
962 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
963 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
964 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
965 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
966 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
967 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
968 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
969 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
970 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
971 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
972 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
973 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
974 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
975 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
976 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
977 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
978 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
979 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
980 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
981 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
982 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
983 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
984 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
985 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
986 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
987 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
988 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
989 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
990 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
991 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
992 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
993 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
994 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
995 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
996 { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND
},
997 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
998 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
999 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1000 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
1001 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
1002 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1003 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1004 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1005 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1006 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1007 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1008 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1009 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1010 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1011 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1012 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1013 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1014 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1015 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1016 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1017 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1018 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1019 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1020 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
1024 static const struct si_cac_config_reg lcac_cape_verde
[] =
1026 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1027 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1028 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1029 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1030 { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1031 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1032 { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1033 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1034 { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1035 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1036 { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1037 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1038 { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1039 { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1040 { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1041 { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1042 { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1043 { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1044 { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND
},
1045 { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1046 { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1047 { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1048 { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1049 { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1050 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1051 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1052 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1053 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1054 { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1055 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1056 { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1057 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1058 { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1059 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1060 { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1061 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1062 { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1063 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1064 { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1065 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1066 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1067 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1068 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1069 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1070 { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1071 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1072 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1073 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1074 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1075 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1076 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1077 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1078 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1079 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1083 static const struct si_cac_config_reg cac_override_cape_verde
[] =
1088 static const struct si_powertune_data powertune_data_cape_verde
=
1090 ((1 << 16) | 0x6993),
1119 static const struct si_dte_data dte_data_cape_verde
=
1130 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1131 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1132 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1137 static const struct si_dte_data dte_data_venus_xtx
=
1139 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1140 { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
1148 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1149 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1150 { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1155 static const struct si_dte_data dte_data_venus_xt
=
1157 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1158 { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
1166 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1167 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1168 { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1173 static const struct si_dte_data dte_data_venus_pro
=
1175 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1176 { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
1184 { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1185 { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1186 { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1191 static const struct si_cac_config_reg cac_weights_oland
[] =
1193 { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND
},
1194 { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
1195 { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND
},
1196 { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND
},
1197 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1198 { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
1199 { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND
},
1200 { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND
},
1201 { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND
},
1202 { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND
},
1203 { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND
},
1204 { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND
},
1205 { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND
},
1206 { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
1207 { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND
},
1208 { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND
},
1209 { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND
},
1210 { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND
},
1211 { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND
},
1212 { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND
},
1213 { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND
},
1214 { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND
},
1215 { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND
},
1216 { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND
},
1217 { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND
},
1218 { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1219 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1220 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1221 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1222 { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND
},
1223 { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1224 { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND
},
1225 { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND
},
1226 { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND
},
1227 { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1228 { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND
},
1229 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1230 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1231 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1232 { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND
},
1233 { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND
},
1234 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1235 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1236 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1237 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1238 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1239 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1240 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1241 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1242 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1243 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1244 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1245 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1246 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1247 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1248 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1249 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1250 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1251 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1252 { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND
},
1256 static const struct si_cac_config_reg cac_weights_mars_pro
[] =
1258 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1259 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1260 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1261 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1262 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1263 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1264 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1265 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1266 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1267 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1268 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1269 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1270 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1271 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1272 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1273 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1274 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1275 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1276 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1277 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1278 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1279 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1280 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1281 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1282 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1283 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1284 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1285 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1286 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1287 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1288 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1289 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1290 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1291 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1292 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1293 { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND
},
1294 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1295 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1296 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1297 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1298 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1299 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1300 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1301 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1302 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1303 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1304 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1305 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1306 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1307 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1308 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1309 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1310 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1311 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1312 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1313 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1314 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1315 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1316 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1317 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1321 static const struct si_cac_config_reg cac_weights_mars_xt
[] =
1323 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1324 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1325 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1326 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1327 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1328 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1329 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1330 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1331 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1332 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1333 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1334 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1335 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1336 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1337 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1338 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1339 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1340 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1341 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1342 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1343 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1344 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1345 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1346 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1347 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1348 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1349 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1350 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1351 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1352 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1353 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1354 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1355 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1356 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1357 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1358 { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND
},
1359 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1360 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1361 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1362 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1363 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1364 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1365 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1366 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1367 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1368 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1369 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1370 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1371 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1372 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1373 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1374 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1375 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1376 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1377 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1378 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1379 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1380 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1381 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1382 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1386 static const struct si_cac_config_reg cac_weights_oland_pro
[] =
1388 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1389 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1390 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1391 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1392 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1393 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1394 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1395 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1396 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1397 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1398 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1399 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1400 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1401 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1402 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1403 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1404 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1405 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1406 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1407 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1408 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1409 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1410 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1411 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1412 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1413 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1414 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1415 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1416 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1417 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1418 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1419 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1420 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1421 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1422 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1423 { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND
},
1424 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1425 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1426 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1427 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1428 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1429 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1430 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1431 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1432 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1433 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1434 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1435 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1436 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1437 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1438 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1439 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1440 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1441 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1442 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1443 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1444 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1445 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1446 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1447 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1451 static const struct si_cac_config_reg cac_weights_oland_xt
[] =
1453 { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND
},
1454 { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1455 { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND
},
1456 { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND
},
1457 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1458 { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1459 { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND
},
1460 { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND
},
1461 { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND
},
1462 { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND
},
1463 { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND
},
1464 { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND
},
1465 { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND
},
1466 { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND
},
1467 { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND
},
1468 { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND
},
1469 { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND
},
1470 { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND
},
1471 { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND
},
1472 { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND
},
1473 { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND
},
1474 { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND
},
1475 { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND
},
1476 { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND
},
1477 { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND
},
1478 { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND
},
1479 { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND
},
1480 { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND
},
1481 { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1482 { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND
},
1483 { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND
},
1484 { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1485 { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND
},
1486 { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND
},
1487 { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND
},
1488 { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND
},
1489 { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1490 { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND
},
1491 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1492 { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND
},
1493 { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND
},
1494 { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1495 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1496 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1497 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1498 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1499 { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND
},
1500 { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND
},
1501 { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1502 { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1503 { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND
},
1504 { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND
},
1505 { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1506 { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1507 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1508 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1509 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1510 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1511 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1512 { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND
},
1516 static const struct si_cac_config_reg lcac_oland
[] =
1518 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1519 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1520 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1521 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1522 { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1523 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1524 { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1525 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1526 { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1527 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1528 { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND
},
1529 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1530 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1531 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1532 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1533 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1534 { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1535 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1536 { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1537 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1538 { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1539 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1540 { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1541 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1542 { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1543 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1544 { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1545 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1546 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1547 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1548 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1549 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1550 { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1551 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1552 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1553 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1554 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1555 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1556 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1557 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1558 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1559 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1563 static const struct si_cac_config_reg lcac_mars_pro
[] =
1565 { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1566 { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1567 { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1568 { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1569 { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1570 { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1571 { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1572 { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1573 { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND
},
1574 { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1575 { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1576 { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1577 { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1578 { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1579 { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1580 { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1581 { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1582 { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1583 { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1584 { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1585 { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1586 { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1587 { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1588 { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1589 { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1590 { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1591 { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1592 { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1593 { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND
},
1594 { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1595 { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1596 { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1597 { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1598 { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1599 { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1600 { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1601 { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1602 { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1603 { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1604 { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1605 { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND
},
1606 { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND
},
1610 static const struct si_cac_config_reg cac_override_oland
[] =
1615 static const struct si_powertune_data powertune_data_oland
=
1617 ((1 << 16) | 0x6993),
1646 static const struct si_powertune_data powertune_data_mars_pro
=
1648 ((1 << 16) | 0x6993),
1677 static const struct si_dte_data dte_data_oland
=
1688 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1689 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1690 { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1695 static const struct si_dte_data dte_data_mars_pro
=
1697 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1698 { 0x0, 0x0, 0x0, 0x0, 0x0 },
1706 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1707 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1708 { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1713 static const struct si_dte_data dte_data_sun_xt
=
1715 { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1716 { 0x0, 0x0, 0x0, 0x0, 0x0 },
1724 { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1725 { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1726 { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1732 static const struct si_cac_config_reg cac_weights_hainan
[] =
1734 { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND
},
1735 { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND
},
1736 { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND
},
1737 { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND
},
1738 { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1739 { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND
},
1740 { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1741 { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1742 { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1743 { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND
},
1744 { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND
},
1745 { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND
},
1746 { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND
},
1747 { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1748 { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND
},
1749 { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1750 { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1751 { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND
},
1752 { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND
},
1753 { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND
},
1754 { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND
},
1755 { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND
},
1756 { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND
},
1757 { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND
},
1758 { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1759 { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND
},
1760 { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND
},
1761 { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1762 { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1763 { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1764 { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND
},
1765 { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1766 { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1767 { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1768 { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND
},
1769 { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND
},
1770 { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND
},
1771 { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1772 { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1773 { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND
},
1774 { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1775 { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND
},
1776 { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1777 { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1778 { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND
},
1779 { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND
},
1780 { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1781 { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1782 { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1783 { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1784 { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1785 { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1786 { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1787 { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1788 { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1789 { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1790 { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1791 { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND
},
1792 { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND
},
1793 { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND
},
1797 static const struct si_powertune_data powertune_data_hainan
=
1799 ((1 << 16) | 0x6993),
1828 static struct rv7xx_power_info
*rv770_get_pi(struct amdgpu_device
*adev
);
1829 static struct evergreen_power_info
*evergreen_get_pi(struct amdgpu_device
*adev
);
1830 static struct ni_power_info
*ni_get_pi(struct amdgpu_device
*adev
);
1831 static struct si_ps
*si_get_ps(struct amdgpu_ps
*rps
);
1833 static int si_populate_voltage_value(struct amdgpu_device
*adev
,
1834 const struct atom_voltage_table
*table
,
1835 u16 value
, SISLANDS_SMC_VOLTAGE_VALUE
*voltage
);
1836 static int si_get_std_voltage_value(struct amdgpu_device
*adev
,
1837 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
,
1839 static int si_write_smc_soft_register(struct amdgpu_device
*adev
,
1840 u16 reg_offset
, u32 value
);
1841 static int si_convert_power_level_to_smc(struct amdgpu_device
*adev
,
1842 struct rv7xx_pl
*pl
,
1843 SISLANDS_SMC_HW_PERFORMANCE_LEVEL
*level
);
1844 static int si_calculate_sclk_params(struct amdgpu_device
*adev
,
1846 SISLANDS_SMC_SCLK_VALUE
*sclk
);
1848 static void si_thermal_start_smc_fan_control(struct amdgpu_device
*adev
);
1849 static void si_fan_ctrl_set_default_mode(struct amdgpu_device
*adev
);
1850 static void si_dpm_set_dpm_funcs(struct amdgpu_device
*adev
);
1851 static void si_dpm_set_irq_funcs(struct amdgpu_device
*adev
);
1853 static struct si_power_info
*si_get_pi(struct amdgpu_device
*adev
)
1855 struct si_power_info
*pi
= adev
->pm
.dpm
.priv
;
1859 static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients
*coeff
,
1860 u16 v
, s32 t
, u32 ileakage
, u32
*leakage
)
1862 s64 kt
, kv
, leakage_w
, i_leakage
, vddc
;
1863 s64 temperature
, t_slope
, t_intercept
, av
, bv
, t_ref
;
1866 i_leakage
= div64_s64(drm_int2fixp(ileakage
), 100);
1867 vddc
= div64_s64(drm_int2fixp(v
), 1000);
1868 temperature
= div64_s64(drm_int2fixp(t
), 1000);
1870 t_slope
= div64_s64(drm_int2fixp(coeff
->t_slope
), 100000000);
1871 t_intercept
= div64_s64(drm_int2fixp(coeff
->t_intercept
), 100000000);
1872 av
= div64_s64(drm_int2fixp(coeff
->av
), 100000000);
1873 bv
= div64_s64(drm_int2fixp(coeff
->bv
), 100000000);
1874 t_ref
= drm_int2fixp(coeff
->t_ref
);
1876 tmp
= drm_fixp_mul(t_slope
, vddc
) + t_intercept
;
1877 kt
= drm_fixp_exp(drm_fixp_mul(tmp
, temperature
));
1878 kt
= drm_fixp_div(kt
, drm_fixp_exp(drm_fixp_mul(tmp
, t_ref
)));
1879 kv
= drm_fixp_mul(av
, drm_fixp_exp(drm_fixp_mul(bv
, vddc
)));
1881 leakage_w
= drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage
, kt
), kv
), vddc
);
1883 *leakage
= drm_fixp2int(leakage_w
* 1000);
1886 static void si_calculate_leakage_for_v_and_t(struct amdgpu_device
*adev
,
1887 const struct ni_leakage_coeffients
*coeff
,
1893 si_calculate_leakage_for_v_and_t_formula(coeff
, v
, t
, i_leakage
, leakage
);
1896 static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients
*coeff
,
1897 const u32 fixed_kt
, u16 v
,
1898 u32 ileakage
, u32
*leakage
)
1900 s64 kt
, kv
, leakage_w
, i_leakage
, vddc
;
1902 i_leakage
= div64_s64(drm_int2fixp(ileakage
), 100);
1903 vddc
= div64_s64(drm_int2fixp(v
), 1000);
1905 kt
= div64_s64(drm_int2fixp(fixed_kt
), 100000000);
1906 kv
= drm_fixp_mul(div64_s64(drm_int2fixp(coeff
->av
), 100000000),
1907 drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff
->bv
), 100000000), vddc
)));
1909 leakage_w
= drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage
, kt
), kv
), vddc
);
1911 *leakage
= drm_fixp2int(leakage_w
* 1000);
1914 static void si_calculate_leakage_for_v(struct amdgpu_device
*adev
,
1915 const struct ni_leakage_coeffients
*coeff
,
1921 si_calculate_leakage_for_v_formula(coeff
, fixed_kt
, v
, i_leakage
, leakage
);
1925 static void si_update_dte_from_pl2(struct amdgpu_device
*adev
,
1926 struct si_dte_data
*dte_data
)
1928 u32 p_limit1
= adev
->pm
.dpm
.tdp_limit
;
1929 u32 p_limit2
= adev
->pm
.dpm
.near_tdp_limit
;
1930 u32 k
= dte_data
->k
;
1931 u32 t_max
= dte_data
->max_t
;
1932 u32 t_split
[5] = { 10, 15, 20, 25, 30 };
1933 u32 t_0
= dte_data
->t0
;
1936 if (p_limit2
!= 0 && p_limit2
<= p_limit1
) {
1937 dte_data
->tdep_count
= 3;
1939 for (i
= 0; i
< k
; i
++) {
1941 (t_split
[i
] * (t_max
- t_0
/(u32
)1000) * (1 << 14)) /
1942 (p_limit2
* (u32
)100);
1945 dte_data
->tdep_r
[1] = dte_data
->r
[4] * 2;
1947 for (i
= 2; i
< SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE
; i
++) {
1948 dte_data
->tdep_r
[i
] = dte_data
->r
[4];
1951 DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
1955 static struct rv7xx_power_info
*rv770_get_pi(struct amdgpu_device
*adev
)
1957 struct rv7xx_power_info
*pi
= adev
->pm
.dpm
.priv
;
1962 static struct ni_power_info
*ni_get_pi(struct amdgpu_device
*adev
)
1964 struct ni_power_info
*pi
= adev
->pm
.dpm
.priv
;
1969 static struct si_ps
*si_get_ps(struct amdgpu_ps
*aps
)
1971 struct si_ps
*ps
= aps
->ps_priv
;
1976 static void si_initialize_powertune_defaults(struct amdgpu_device
*adev
)
1978 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
1979 struct si_power_info
*si_pi
= si_get_pi(adev
);
1980 bool update_dte_from_pl2
= false;
1982 if (adev
->asic_type
== CHIP_TAHITI
) {
1983 si_pi
->cac_weights
= cac_weights_tahiti
;
1984 si_pi
->lcac_config
= lcac_tahiti
;
1985 si_pi
->cac_override
= cac_override_tahiti
;
1986 si_pi
->powertune_data
= &powertune_data_tahiti
;
1987 si_pi
->dte_data
= dte_data_tahiti
;
1989 switch (adev
->pdev
->device
) {
1991 si_pi
->dte_data
.enable_dte_by_default
= true;
1994 si_pi
->dte_data
= dte_data_new_zealand
;
2000 si_pi
->dte_data
= dte_data_aruba_pro
;
2001 update_dte_from_pl2
= true;
2004 si_pi
->dte_data
= dte_data_malta
;
2005 update_dte_from_pl2
= true;
2008 si_pi
->dte_data
= dte_data_tahiti_pro
;
2009 update_dte_from_pl2
= true;
2012 if (si_pi
->dte_data
.enable_dte_by_default
== true)
2013 DRM_ERROR("DTE is not enabled!\n");
2016 } else if (adev
->asic_type
== CHIP_PITCAIRN
) {
2017 si_pi
->cac_weights
= cac_weights_pitcairn
;
2018 si_pi
->lcac_config
= lcac_pitcairn
;
2019 si_pi
->cac_override
= cac_override_pitcairn
;
2020 si_pi
->powertune_data
= &powertune_data_pitcairn
;
2022 switch (adev
->pdev
->device
) {
2025 si_pi
->dte_data
= dte_data_curacao_xt
;
2026 update_dte_from_pl2
= true;
2030 si_pi
->dte_data
= dte_data_curacao_pro
;
2031 update_dte_from_pl2
= true;
2035 si_pi
->dte_data
= dte_data_neptune_xt
;
2036 update_dte_from_pl2
= true;
2039 si_pi
->dte_data
= dte_data_pitcairn
;
2042 } else if (adev
->asic_type
== CHIP_VERDE
) {
2043 si_pi
->lcac_config
= lcac_cape_verde
;
2044 si_pi
->cac_override
= cac_override_cape_verde
;
2045 si_pi
->powertune_data
= &powertune_data_cape_verde
;
2047 switch (adev
->pdev
->device
) {
2052 si_pi
->cac_weights
= cac_weights_cape_verde_pro
;
2053 si_pi
->dte_data
= dte_data_cape_verde
;
2056 si_pi
->cac_weights
= cac_weights_cape_verde_pro
;
2057 si_pi
->dte_data
= dte_data_sun_xt
;
2061 si_pi
->cac_weights
= cac_weights_heathrow
;
2062 si_pi
->dte_data
= dte_data_cape_verde
;
2066 si_pi
->cac_weights
= cac_weights_chelsea_xt
;
2067 si_pi
->dte_data
= dte_data_cape_verde
;
2070 si_pi
->cac_weights
= cac_weights_chelsea_pro
;
2071 si_pi
->dte_data
= dte_data_cape_verde
;
2074 si_pi
->cac_weights
= cac_weights_heathrow
;
2075 si_pi
->dte_data
= dte_data_venus_xtx
;
2078 si_pi
->cac_weights
= cac_weights_heathrow
;
2079 si_pi
->dte_data
= dte_data_venus_xt
;
2085 si_pi
->cac_weights
= cac_weights_chelsea_pro
;
2086 si_pi
->dte_data
= dte_data_venus_pro
;
2089 si_pi
->cac_weights
= cac_weights_cape_verde
;
2090 si_pi
->dte_data
= dte_data_cape_verde
;
2093 } else if (adev
->asic_type
== CHIP_OLAND
) {
2094 si_pi
->lcac_config
= lcac_mars_pro
;
2095 si_pi
->cac_override
= cac_override_oland
;
2096 si_pi
->powertune_data
= &powertune_data_mars_pro
;
2097 si_pi
->dte_data
= dte_data_mars_pro
;
2099 switch (adev
->pdev
->device
) {
2104 si_pi
->cac_weights
= cac_weights_mars_pro
;
2105 update_dte_from_pl2
= true;
2111 si_pi
->cac_weights
= cac_weights_mars_xt
;
2112 update_dte_from_pl2
= true;
2117 si_pi
->cac_weights
= cac_weights_oland_pro
;
2118 update_dte_from_pl2
= true;
2121 si_pi
->cac_weights
= cac_weights_oland_xt
;
2122 update_dte_from_pl2
= true;
2125 si_pi
->cac_weights
= cac_weights_oland
;
2126 si_pi
->lcac_config
= lcac_oland
;
2127 si_pi
->cac_override
= cac_override_oland
;
2128 si_pi
->powertune_data
= &powertune_data_oland
;
2129 si_pi
->dte_data
= dte_data_oland
;
2132 } else if (adev
->asic_type
== CHIP_HAINAN
) {
2133 si_pi
->cac_weights
= cac_weights_hainan
;
2134 si_pi
->lcac_config
= lcac_oland
;
2135 si_pi
->cac_override
= cac_override_oland
;
2136 si_pi
->powertune_data
= &powertune_data_hainan
;
2137 si_pi
->dte_data
= dte_data_sun_xt
;
2138 update_dte_from_pl2
= true;
2140 DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
2144 ni_pi
->enable_power_containment
= false;
2145 ni_pi
->enable_cac
= false;
2146 ni_pi
->enable_sq_ramping
= false;
2147 si_pi
->enable_dte
= false;
2149 if (si_pi
->powertune_data
->enable_powertune_by_default
) {
2150 ni_pi
->enable_power_containment
= true;
2151 ni_pi
->enable_cac
= true;
2152 if (si_pi
->dte_data
.enable_dte_by_default
) {
2153 si_pi
->enable_dte
= true;
2154 if (update_dte_from_pl2
)
2155 si_update_dte_from_pl2(adev
, &si_pi
->dte_data
);
2158 ni_pi
->enable_sq_ramping
= true;
2161 ni_pi
->driver_calculate_cac_leakage
= true;
2162 ni_pi
->cac_configuration_required
= true;
2164 if (ni_pi
->cac_configuration_required
) {
2165 ni_pi
->support_cac_long_term_average
= true;
2166 si_pi
->dyn_powertune_data
.l2_lta_window_size
=
2167 si_pi
->powertune_data
->l2_lta_window_size_default
;
2168 si_pi
->dyn_powertune_data
.lts_truncate
=
2169 si_pi
->powertune_data
->lts_truncate_default
;
2171 ni_pi
->support_cac_long_term_average
= false;
2172 si_pi
->dyn_powertune_data
.l2_lta_window_size
= 0;
2173 si_pi
->dyn_powertune_data
.lts_truncate
= 0;
2176 si_pi
->dyn_powertune_data
.disable_uvd_powertune
= false;
2179 static u32
si_get_smc_power_scaling_factor(struct amdgpu_device
*adev
)
2184 static u32
si_calculate_cac_wintime(struct amdgpu_device
*adev
)
2189 u32 cac_window_size
;
2191 xclk
= amdgpu_asic_get_xclk(adev
);
2196 cac_window
= RREG32(CG_CAC_CTRL
) & CAC_WINDOW_MASK
;
2197 cac_window_size
= ((cac_window
& 0xFFFF0000) >> 16) * (cac_window
& 0x0000FFFF);
2199 wintime
= (cac_window_size
* 100) / xclk
;
2204 static u32
si_scale_power_for_smc(u32 power_in_watts
, u32 scaling_factor
)
2206 return power_in_watts
;
2209 static int si_calculate_adjusted_tdp_limits(struct amdgpu_device
*adev
,
2210 bool adjust_polarity
,
2213 u32
*near_tdp_limit
)
2215 u32 adjustment_delta
, max_tdp_limit
;
2217 if (tdp_adjustment
> (u32
)adev
->pm
.dpm
.tdp_od_limit
)
2220 max_tdp_limit
= ((100 + 100) * adev
->pm
.dpm
.tdp_limit
) / 100;
2222 if (adjust_polarity
) {
2223 *tdp_limit
= ((100 + tdp_adjustment
) * adev
->pm
.dpm
.tdp_limit
) / 100;
2224 *near_tdp_limit
= adev
->pm
.dpm
.near_tdp_limit_adjusted
+ (*tdp_limit
- adev
->pm
.dpm
.tdp_limit
);
2226 *tdp_limit
= ((100 - tdp_adjustment
) * adev
->pm
.dpm
.tdp_limit
) / 100;
2227 adjustment_delta
= adev
->pm
.dpm
.tdp_limit
- *tdp_limit
;
2228 if (adjustment_delta
< adev
->pm
.dpm
.near_tdp_limit_adjusted
)
2229 *near_tdp_limit
= adev
->pm
.dpm
.near_tdp_limit_adjusted
- adjustment_delta
;
2231 *near_tdp_limit
= 0;
2234 if ((*tdp_limit
<= 0) || (*tdp_limit
> max_tdp_limit
))
2236 if ((*near_tdp_limit
<= 0) || (*near_tdp_limit
> *tdp_limit
))
2242 static int si_populate_smc_tdp_limits(struct amdgpu_device
*adev
,
2243 struct amdgpu_ps
*amdgpu_state
)
2245 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2246 struct si_power_info
*si_pi
= si_get_pi(adev
);
2248 if (ni_pi
->enable_power_containment
) {
2249 SISLANDS_SMC_STATETABLE
*smc_table
= &si_pi
->smc_statetable
;
2250 PP_SIslands_PAPMParameters
*papm_parm
;
2251 struct amdgpu_ppm_table
*ppm
= adev
->pm
.dpm
.dyn_state
.ppm_table
;
2252 u32 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2257 if (scaling_factor
== 0)
2260 memset(smc_table
, 0, sizeof(SISLANDS_SMC_STATETABLE
));
2262 ret
= si_calculate_adjusted_tdp_limits(adev
,
2264 adev
->pm
.dpm
.tdp_adjustment
,
2270 smc_table
->dpm2Params
.TDPLimit
=
2271 cpu_to_be32(si_scale_power_for_smc(tdp_limit
, scaling_factor
) * 1000);
2272 smc_table
->dpm2Params
.NearTDPLimit
=
2273 cpu_to_be32(si_scale_power_for_smc(near_tdp_limit
, scaling_factor
) * 1000);
2274 smc_table
->dpm2Params
.SafePowerLimit
=
2275 cpu_to_be32(si_scale_power_for_smc((near_tdp_limit
* SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT
) / 100, scaling_factor
) * 1000);
2277 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
2278 (si_pi
->state_table_start
+ offsetof(SISLANDS_SMC_STATETABLE
, dpm2Params
) +
2279 offsetof(PP_SIslands_DPM2Parameters
, TDPLimit
)),
2280 (u8
*)(&(smc_table
->dpm2Params
.TDPLimit
)),
2286 if (si_pi
->enable_ppm
) {
2287 papm_parm
= &si_pi
->papm_parm
;
2288 memset(papm_parm
, 0, sizeof(PP_SIslands_PAPMParameters
));
2289 papm_parm
->NearTDPLimitTherm
= cpu_to_be32(ppm
->dgpu_tdp
);
2290 papm_parm
->dGPU_T_Limit
= cpu_to_be32(ppm
->tj_max
);
2291 papm_parm
->dGPU_T_Warning
= cpu_to_be32(95);
2292 papm_parm
->dGPU_T_Hysteresis
= cpu_to_be32(5);
2293 papm_parm
->PlatformPowerLimit
= 0xffffffff;
2294 papm_parm
->NearTDPLimitPAPM
= 0xffffffff;
2296 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->papm_cfg_table_start
,
2298 sizeof(PP_SIslands_PAPMParameters
),
2307 static int si_populate_smc_tdp_limits_2(struct amdgpu_device
*adev
,
2308 struct amdgpu_ps
*amdgpu_state
)
2310 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2311 struct si_power_info
*si_pi
= si_get_pi(adev
);
2313 if (ni_pi
->enable_power_containment
) {
2314 SISLANDS_SMC_STATETABLE
*smc_table
= &si_pi
->smc_statetable
;
2315 u32 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2318 memset(smc_table
, 0, sizeof(SISLANDS_SMC_STATETABLE
));
2320 smc_table
->dpm2Params
.NearTDPLimit
=
2321 cpu_to_be32(si_scale_power_for_smc(adev
->pm
.dpm
.near_tdp_limit_adjusted
, scaling_factor
) * 1000);
2322 smc_table
->dpm2Params
.SafePowerLimit
=
2323 cpu_to_be32(si_scale_power_for_smc((adev
->pm
.dpm
.near_tdp_limit_adjusted
* SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT
) / 100, scaling_factor
) * 1000);
2325 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
2326 (si_pi
->state_table_start
+
2327 offsetof(SISLANDS_SMC_STATETABLE
, dpm2Params
) +
2328 offsetof(PP_SIslands_DPM2Parameters
, NearTDPLimit
)),
2329 (u8
*)(&(smc_table
->dpm2Params
.NearTDPLimit
)),
2339 static u16
si_calculate_power_efficiency_ratio(struct amdgpu_device
*adev
,
2340 const u16 prev_std_vddc
,
2341 const u16 curr_std_vddc
)
2343 u64 margin
= (u64
)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN
;
2344 u64 prev_vddc
= (u64
)prev_std_vddc
;
2345 u64 curr_vddc
= (u64
)curr_std_vddc
;
2346 u64 pwr_efficiency_ratio
, n
, d
;
2348 if ((prev_vddc
== 0) || (curr_vddc
== 0))
2351 n
= div64_u64((u64
)1024 * curr_vddc
* curr_vddc
* ((u64
)1000 + margin
), (u64
)1000);
2352 d
= prev_vddc
* prev_vddc
;
2353 pwr_efficiency_ratio
= div64_u64(n
, d
);
2355 if (pwr_efficiency_ratio
> (u64
)0xFFFF)
2358 return (u16
)pwr_efficiency_ratio
;
2361 static bool si_should_disable_uvd_powertune(struct amdgpu_device
*adev
,
2362 struct amdgpu_ps
*amdgpu_state
)
2364 struct si_power_info
*si_pi
= si_get_pi(adev
);
2366 if (si_pi
->dyn_powertune_data
.disable_uvd_powertune
&&
2367 amdgpu_state
->vclk
&& amdgpu_state
->dclk
)
2373 struct evergreen_power_info
*evergreen_get_pi(struct amdgpu_device
*adev
)
2375 struct evergreen_power_info
*pi
= adev
->pm
.dpm
.priv
;
2380 static int si_populate_power_containment_values(struct amdgpu_device
*adev
,
2381 struct amdgpu_ps
*amdgpu_state
,
2382 SISLANDS_SMC_SWSTATE
*smc_state
)
2384 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
2385 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2386 struct si_ps
*state
= si_get_ps(amdgpu_state
);
2387 SISLANDS_SMC_VOLTAGE_VALUE vddc
;
2394 u16 pwr_efficiency_ratio
;
2396 bool disable_uvd_power_tune
;
2399 if (ni_pi
->enable_power_containment
== false)
2402 if (state
->performance_level_count
== 0)
2405 if (smc_state
->levelCount
!= state
->performance_level_count
)
2408 disable_uvd_power_tune
= si_should_disable_uvd_powertune(adev
, amdgpu_state
);
2410 smc_state
->levels
[0].dpm2
.MaxPS
= 0;
2411 smc_state
->levels
[0].dpm2
.NearTDPDec
= 0;
2412 smc_state
->levels
[0].dpm2
.AboveSafeInc
= 0;
2413 smc_state
->levels
[0].dpm2
.BelowSafeInc
= 0;
2414 smc_state
->levels
[0].dpm2
.PwrEfficiencyRatio
= 0;
2416 for (i
= 1; i
< state
->performance_level_count
; i
++) {
2417 prev_sclk
= state
->performance_levels
[i
-1].sclk
;
2418 max_sclk
= state
->performance_levels
[i
].sclk
;
2420 max_ps_percent
= SISLANDS_DPM2_MAXPS_PERCENT_M
;
2422 max_ps_percent
= SISLANDS_DPM2_MAXPS_PERCENT_H
;
2424 if (prev_sclk
> max_sclk
)
2427 if ((max_ps_percent
== 0) ||
2428 (prev_sclk
== max_sclk
) ||
2429 disable_uvd_power_tune
)
2430 min_sclk
= max_sclk
;
2432 min_sclk
= prev_sclk
;
2434 min_sclk
= (prev_sclk
* (u32
)max_ps_percent
) / 100;
2436 if (min_sclk
< state
->performance_levels
[0].sclk
)
2437 min_sclk
= state
->performance_levels
[0].sclk
;
2442 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
2443 state
->performance_levels
[i
-1].vddc
, &vddc
);
2447 ret
= si_get_std_voltage_value(adev
, &vddc
, &prev_std_vddc
);
2451 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
2452 state
->performance_levels
[i
].vddc
, &vddc
);
2456 ret
= si_get_std_voltage_value(adev
, &vddc
, &curr_std_vddc
);
2460 pwr_efficiency_ratio
= si_calculate_power_efficiency_ratio(adev
,
2461 prev_std_vddc
, curr_std_vddc
);
2463 smc_state
->levels
[i
].dpm2
.MaxPS
= (u8
)((SISLANDS_DPM2_MAX_PULSE_SKIP
* (max_sclk
- min_sclk
)) / max_sclk
);
2464 smc_state
->levels
[i
].dpm2
.NearTDPDec
= SISLANDS_DPM2_NEAR_TDP_DEC
;
2465 smc_state
->levels
[i
].dpm2
.AboveSafeInc
= SISLANDS_DPM2_ABOVE_SAFE_INC
;
2466 smc_state
->levels
[i
].dpm2
.BelowSafeInc
= SISLANDS_DPM2_BELOW_SAFE_INC
;
2467 smc_state
->levels
[i
].dpm2
.PwrEfficiencyRatio
= cpu_to_be16(pwr_efficiency_ratio
);
2473 static int si_populate_sq_ramping_values(struct amdgpu_device
*adev
,
2474 struct amdgpu_ps
*amdgpu_state
,
2475 SISLANDS_SMC_SWSTATE
*smc_state
)
2477 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2478 struct si_ps
*state
= si_get_ps(amdgpu_state
);
2479 u32 sq_power_throttle
, sq_power_throttle2
;
2480 bool enable_sq_ramping
= ni_pi
->enable_sq_ramping
;
2483 if (state
->performance_level_count
== 0)
2486 if (smc_state
->levelCount
!= state
->performance_level_count
)
2489 if (adev
->pm
.dpm
.sq_ramping_threshold
== 0)
2492 if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER
> (MAX_POWER_MASK
>> MAX_POWER_SHIFT
))
2493 enable_sq_ramping
= false;
2495 if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER
> (MIN_POWER_MASK
>> MIN_POWER_SHIFT
))
2496 enable_sq_ramping
= false;
2498 if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA
> (MAX_POWER_DELTA_MASK
>> MAX_POWER_DELTA_SHIFT
))
2499 enable_sq_ramping
= false;
2501 if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE
> (STI_SIZE_MASK
>> STI_SIZE_SHIFT
))
2502 enable_sq_ramping
= false;
2504 if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO
> (LTI_RATIO_MASK
>> LTI_RATIO_SHIFT
))
2505 enable_sq_ramping
= false;
2507 for (i
= 0; i
< state
->performance_level_count
; i
++) {
2508 sq_power_throttle
= 0;
2509 sq_power_throttle2
= 0;
2511 if ((state
->performance_levels
[i
].sclk
>= adev
->pm
.dpm
.sq_ramping_threshold
) &&
2512 enable_sq_ramping
) {
2513 sq_power_throttle
|= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER
);
2514 sq_power_throttle
|= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER
);
2515 sq_power_throttle2
|= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA
);
2516 sq_power_throttle2
|= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE
);
2517 sq_power_throttle2
|= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO
);
2519 sq_power_throttle
|= MAX_POWER_MASK
| MIN_POWER_MASK
;
2520 sq_power_throttle2
|= MAX_POWER_DELTA_MASK
| STI_SIZE_MASK
| LTI_RATIO_MASK
;
2523 smc_state
->levels
[i
].SQPowerThrottle
= cpu_to_be32(sq_power_throttle
);
2524 smc_state
->levels
[i
].SQPowerThrottle_2
= cpu_to_be32(sq_power_throttle2
);
2530 static int si_enable_power_containment(struct amdgpu_device
*adev
,
2531 struct amdgpu_ps
*amdgpu_new_state
,
2534 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2535 PPSMC_Result smc_result
;
2538 if (ni_pi
->enable_power_containment
) {
2540 if (!si_should_disable_uvd_powertune(adev
, amdgpu_new_state
)) {
2541 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_TDPClampingActive
);
2542 if (smc_result
!= PPSMC_Result_OK
) {
2544 ni_pi
->pc_enabled
= false;
2546 ni_pi
->pc_enabled
= true;
2550 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_TDPClampingInactive
);
2551 if (smc_result
!= PPSMC_Result_OK
)
2553 ni_pi
->pc_enabled
= false;
2560 static int si_initialize_smc_dte_tables(struct amdgpu_device
*adev
)
2562 struct si_power_info
*si_pi
= si_get_pi(adev
);
2564 struct si_dte_data
*dte_data
= &si_pi
->dte_data
;
2565 Smc_SIslands_DTE_Configuration
*dte_tables
= NULL
;
2570 if (dte_data
== NULL
)
2571 si_pi
->enable_dte
= false;
2573 if (si_pi
->enable_dte
== false)
2576 if (dte_data
->k
<= 0)
2579 dte_tables
= kzalloc(sizeof(Smc_SIslands_DTE_Configuration
), GFP_KERNEL
);
2580 if (dte_tables
== NULL
) {
2581 si_pi
->enable_dte
= false;
2585 table_size
= dte_data
->k
;
2587 if (table_size
> SMC_SISLANDS_DTE_MAX_FILTER_STAGES
)
2588 table_size
= SMC_SISLANDS_DTE_MAX_FILTER_STAGES
;
2590 tdep_count
= dte_data
->tdep_count
;
2591 if (tdep_count
> SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE
)
2592 tdep_count
= SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE
;
2594 dte_tables
->K
= cpu_to_be32(table_size
);
2595 dte_tables
->T0
= cpu_to_be32(dte_data
->t0
);
2596 dte_tables
->MaxT
= cpu_to_be32(dte_data
->max_t
);
2597 dte_tables
->WindowSize
= dte_data
->window_size
;
2598 dte_tables
->temp_select
= dte_data
->temp_select
;
2599 dte_tables
->DTE_mode
= dte_data
->dte_mode
;
2600 dte_tables
->Tthreshold
= cpu_to_be32(dte_data
->t_threshold
);
2605 for (i
= 0; i
< table_size
; i
++) {
2606 dte_tables
->tau
[i
] = cpu_to_be32(dte_data
->tau
[i
]);
2607 dte_tables
->R
[i
] = cpu_to_be32(dte_data
->r
[i
]);
2610 dte_tables
->Tdep_count
= tdep_count
;
2612 for (i
= 0; i
< (u32
)tdep_count
; i
++) {
2613 dte_tables
->T_limits
[i
] = dte_data
->t_limits
[i
];
2614 dte_tables
->Tdep_tau
[i
] = cpu_to_be32(dte_data
->tdep_tau
[i
]);
2615 dte_tables
->Tdep_R
[i
] = cpu_to_be32(dte_data
->tdep_r
[i
]);
2618 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->dte_table_start
,
2620 sizeof(Smc_SIslands_DTE_Configuration
),
2627 static int si_get_cac_std_voltage_max_min(struct amdgpu_device
*adev
,
2630 struct si_power_info
*si_pi
= si_get_pi(adev
);
2631 struct amdgpu_cac_leakage_table
*table
=
2632 &adev
->pm
.dpm
.dyn_state
.cac_leakage_table
;
2642 for (i
= 0; i
< table
->count
; i
++) {
2643 if (table
->entries
[i
].vddc
> *max
)
2644 *max
= table
->entries
[i
].vddc
;
2645 if (table
->entries
[i
].vddc
< *min
)
2646 *min
= table
->entries
[i
].vddc
;
2649 if (si_pi
->powertune_data
->lkge_lut_v0_percent
> 100)
2652 v0_loadline
= (*min
) * (100 - si_pi
->powertune_data
->lkge_lut_v0_percent
) / 100;
2654 if (v0_loadline
> 0xFFFFUL
)
2657 *min
= (u16
)v0_loadline
;
2659 if ((*min
> *max
) || (*max
== 0) || (*min
== 0))
2665 static u16
si_get_cac_std_voltage_step(u16 max
, u16 min
)
2667 return ((max
- min
) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
- 1)) /
2668 SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
;
2671 static int si_init_dte_leakage_table(struct amdgpu_device
*adev
,
2672 PP_SIslands_CacConfig
*cac_tables
,
2673 u16 vddc_max
, u16 vddc_min
, u16 vddc_step
,
2676 struct si_power_info
*si_pi
= si_get_pi(adev
);
2684 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2686 for (i
= 0; i
< SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES
; i
++) {
2687 t
= (1000 * (i
* t_step
+ t0
));
2689 for (j
= 0; j
< SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
; j
++) {
2690 voltage
= vddc_max
- (vddc_step
* j
);
2692 si_calculate_leakage_for_v_and_t(adev
,
2693 &si_pi
->powertune_data
->leakage_coefficients
,
2696 si_pi
->dyn_powertune_data
.cac_leakage
,
2699 smc_leakage
= si_scale_power_for_smc(leakage
, scaling_factor
) / 4;
2701 if (smc_leakage
> 0xFFFF)
2702 smc_leakage
= 0xFFFF;
2704 cac_tables
->cac_lkge_lut
[i
][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
-1-j
] =
2705 cpu_to_be16((u16
)smc_leakage
);
2711 static int si_init_simplified_leakage_table(struct amdgpu_device
*adev
,
2712 PP_SIslands_CacConfig
*cac_tables
,
2713 u16 vddc_max
, u16 vddc_min
, u16 vddc_step
)
2715 struct si_power_info
*si_pi
= si_get_pi(adev
);
2722 scaling_factor
= si_get_smc_power_scaling_factor(adev
);
2724 for (j
= 0; j
< SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
; j
++) {
2725 voltage
= vddc_max
- (vddc_step
* j
);
2727 si_calculate_leakage_for_v(adev
,
2728 &si_pi
->powertune_data
->leakage_coefficients
,
2729 si_pi
->powertune_data
->fixed_kt
,
2731 si_pi
->dyn_powertune_data
.cac_leakage
,
2734 smc_leakage
= si_scale_power_for_smc(leakage
, scaling_factor
) / 4;
2736 if (smc_leakage
> 0xFFFF)
2737 smc_leakage
= 0xFFFF;
2739 for (i
= 0; i
< SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES
; i
++)
2740 cac_tables
->cac_lkge_lut
[i
][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
-1-j
] =
2741 cpu_to_be16((u16
)smc_leakage
);
2746 static int si_initialize_smc_cac_tables(struct amdgpu_device
*adev
)
2748 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2749 struct si_power_info
*si_pi
= si_get_pi(adev
);
2750 PP_SIslands_CacConfig
*cac_tables
= NULL
;
2751 u16 vddc_max
, vddc_min
, vddc_step
;
2753 u32 load_line_slope
, reg
;
2755 u32 ticks_per_us
= amdgpu_asic_get_xclk(adev
) / 100;
2757 if (ni_pi
->enable_cac
== false)
2760 cac_tables
= kzalloc(sizeof(PP_SIslands_CacConfig
), GFP_KERNEL
);
2764 reg
= RREG32(CG_CAC_CTRL
) & ~CAC_WINDOW_MASK
;
2765 reg
|= CAC_WINDOW(si_pi
->powertune_data
->cac_window
);
2766 WREG32(CG_CAC_CTRL
, reg
);
2768 si_pi
->dyn_powertune_data
.cac_leakage
= adev
->pm
.dpm
.cac_leakage
;
2769 si_pi
->dyn_powertune_data
.dc_pwr_value
=
2770 si_pi
->powertune_data
->dc_cac
[NISLANDS_DCCAC_LEVEL_0
];
2771 si_pi
->dyn_powertune_data
.wintime
= si_calculate_cac_wintime(adev
);
2772 si_pi
->dyn_powertune_data
.shift_n
= si_pi
->powertune_data
->shift_n_default
;
2774 si_pi
->dyn_powertune_data
.leakage_minimum_temperature
= 80 * 1000;
2776 ret
= si_get_cac_std_voltage_max_min(adev
, &vddc_max
, &vddc_min
);
2780 vddc_step
= si_get_cac_std_voltage_step(vddc_max
, vddc_min
);
2781 vddc_min
= vddc_max
- (vddc_step
* (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES
- 1));
2785 if (si_pi
->enable_dte
|| ni_pi
->driver_calculate_cac_leakage
)
2786 ret
= si_init_dte_leakage_table(adev
, cac_tables
,
2787 vddc_max
, vddc_min
, vddc_step
,
2790 ret
= si_init_simplified_leakage_table(adev
, cac_tables
,
2791 vddc_max
, vddc_min
, vddc_step
);
2795 load_line_slope
= ((u32
)adev
->pm
.dpm
.load_line_slope
<< SMC_SISLANDS_SCALE_R
) / 100;
2797 cac_tables
->l2numWin_TDP
= cpu_to_be32(si_pi
->dyn_powertune_data
.l2_lta_window_size
);
2798 cac_tables
->lts_truncate_n
= si_pi
->dyn_powertune_data
.lts_truncate
;
2799 cac_tables
->SHIFT_N
= si_pi
->dyn_powertune_data
.shift_n
;
2800 cac_tables
->lkge_lut_V0
= cpu_to_be32((u32
)vddc_min
);
2801 cac_tables
->lkge_lut_Vstep
= cpu_to_be32((u32
)vddc_step
);
2802 cac_tables
->R_LL
= cpu_to_be32(load_line_slope
);
2803 cac_tables
->WinTime
= cpu_to_be32(si_pi
->dyn_powertune_data
.wintime
);
2804 cac_tables
->calculation_repeats
= cpu_to_be32(2);
2805 cac_tables
->dc_cac
= cpu_to_be32(0);
2806 cac_tables
->log2_PG_LKG_SCALE
= 12;
2807 cac_tables
->cac_temp
= si_pi
->powertune_data
->operating_temp
;
2808 cac_tables
->lkge_lut_T0
= cpu_to_be32((u32
)t0
);
2809 cac_tables
->lkge_lut_Tstep
= cpu_to_be32((u32
)t_step
);
2811 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->cac_table_start
,
2813 sizeof(PP_SIslands_CacConfig
),
2819 ret
= si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_ticks_per_us
, ticks_per_us
);
2823 ni_pi
->enable_cac
= false;
2824 ni_pi
->enable_power_containment
= false;
2832 static int si_program_cac_config_registers(struct amdgpu_device
*adev
,
2833 const struct si_cac_config_reg
*cac_config_regs
)
2835 const struct si_cac_config_reg
*config_regs
= cac_config_regs
;
2836 u32 data
= 0, offset
;
2841 while (config_regs
->offset
!= 0xFFFFFFFF) {
2842 switch (config_regs
->type
) {
2843 case SISLANDS_CACCONFIG_CGIND
:
2844 offset
= SMC_CG_IND_START
+ config_regs
->offset
;
2845 if (offset
< SMC_CG_IND_END
)
2846 data
= RREG32_SMC(offset
);
2849 data
= RREG32(config_regs
->offset
);
2853 data
&= ~config_regs
->mask
;
2854 data
|= ((config_regs
->value
<< config_regs
->shift
) & config_regs
->mask
);
2856 switch (config_regs
->type
) {
2857 case SISLANDS_CACCONFIG_CGIND
:
2858 offset
= SMC_CG_IND_START
+ config_regs
->offset
;
2859 if (offset
< SMC_CG_IND_END
)
2860 WREG32_SMC(offset
, data
);
2863 WREG32(config_regs
->offset
, data
);
2871 static int si_initialize_hardware_cac_manager(struct amdgpu_device
*adev
)
2873 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2874 struct si_power_info
*si_pi
= si_get_pi(adev
);
2877 if ((ni_pi
->enable_cac
== false) ||
2878 (ni_pi
->cac_configuration_required
== false))
2881 ret
= si_program_cac_config_registers(adev
, si_pi
->lcac_config
);
2884 ret
= si_program_cac_config_registers(adev
, si_pi
->cac_override
);
2887 ret
= si_program_cac_config_registers(adev
, si_pi
->cac_weights
);
2894 static int si_enable_smc_cac(struct amdgpu_device
*adev
,
2895 struct amdgpu_ps
*amdgpu_new_state
,
2898 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2899 struct si_power_info
*si_pi
= si_get_pi(adev
);
2900 PPSMC_Result smc_result
;
2903 if (ni_pi
->enable_cac
) {
2905 if (!si_should_disable_uvd_powertune(adev
, amdgpu_new_state
)) {
2906 if (ni_pi
->support_cac_long_term_average
) {
2907 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_CACLongTermAvgEnable
);
2908 if (smc_result
!= PPSMC_Result_OK
)
2909 ni_pi
->support_cac_long_term_average
= false;
2912 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableCac
);
2913 if (smc_result
!= PPSMC_Result_OK
) {
2915 ni_pi
->cac_enabled
= false;
2917 ni_pi
->cac_enabled
= true;
2920 if (si_pi
->enable_dte
) {
2921 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableDTE
);
2922 if (smc_result
!= PPSMC_Result_OK
)
2926 } else if (ni_pi
->cac_enabled
) {
2927 if (si_pi
->enable_dte
)
2928 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_DisableDTE
);
2930 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_DisableCac
);
2932 ni_pi
->cac_enabled
= false;
2934 if (ni_pi
->support_cac_long_term_average
)
2935 smc_result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_CACLongTermAvgDisable
);
2941 static int si_init_smc_spll_table(struct amdgpu_device
*adev
)
2943 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
2944 struct si_power_info
*si_pi
= si_get_pi(adev
);
2945 SMC_SISLANDS_SPLL_DIV_TABLE
*spll_table
;
2946 SISLANDS_SMC_SCLK_VALUE sclk_params
;
2954 if (si_pi
->spll_table_start
== 0)
2957 spll_table
= kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE
), GFP_KERNEL
);
2958 if (spll_table
== NULL
)
2961 for (i
= 0; i
< 256; i
++) {
2962 ret
= si_calculate_sclk_params(adev
, sclk
, &sclk_params
);
2965 p_div
= (sclk_params
.vCG_SPLL_FUNC_CNTL
& SPLL_PDIV_A_MASK
) >> SPLL_PDIV_A_SHIFT
;
2966 fb_div
= (sclk_params
.vCG_SPLL_FUNC_CNTL_3
& SPLL_FB_DIV_MASK
) >> SPLL_FB_DIV_SHIFT
;
2967 clk_s
= (sclk_params
.vCG_SPLL_SPREAD_SPECTRUM
& CLK_S_MASK
) >> CLK_S_SHIFT
;
2968 clk_v
= (sclk_params
.vCG_SPLL_SPREAD_SPECTRUM_2
& CLK_V_MASK
) >> CLK_V_SHIFT
;
2970 fb_div
&= ~0x00001FFF;
2974 if (p_div
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT
))
2976 if (fb_div
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT
))
2978 if (clk_s
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT
))
2980 if (clk_v
& ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK
>> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT
))
2986 tmp
= ((fb_div
<< SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK
) |
2987 ((p_div
<< SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK
);
2988 spll_table
->freq
[i
] = cpu_to_be32(tmp
);
2990 tmp
= ((clk_v
<< SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK
) |
2991 ((clk_s
<< SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT
) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK
);
2992 spll_table
->ss
[i
] = cpu_to_be32(tmp
);
2999 ret
= amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->spll_table_start
,
3001 sizeof(SMC_SISLANDS_SPLL_DIV_TABLE
),
3005 ni_pi
->enable_power_containment
= false;
3012 static u16
si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device
*adev
,
3015 u16 highest_leakage
= 0;
3016 struct si_power_info
*si_pi
= si_get_pi(adev
);
3019 for (i
= 0; i
< si_pi
->leakage_voltage
.count
; i
++){
3020 if (highest_leakage
< si_pi
->leakage_voltage
.entries
[i
].voltage
)
3021 highest_leakage
= si_pi
->leakage_voltage
.entries
[i
].voltage
;
3024 if (si_pi
->leakage_voltage
.count
&& (highest_leakage
< vce_voltage
))
3025 return highest_leakage
;
3030 static int si_get_vce_clock_voltage(struct amdgpu_device
*adev
,
3031 u32 evclk
, u32 ecclk
, u16
*voltage
)
3035 struct amdgpu_vce_clock_voltage_dependency_table
*table
=
3036 &adev
->pm
.dpm
.dyn_state
.vce_clock_voltage_dependency_table
;
3038 if (((evclk
== 0) && (ecclk
== 0)) ||
3039 (table
&& (table
->count
== 0))) {
3044 for (i
= 0; i
< table
->count
; i
++) {
3045 if ((evclk
<= table
->entries
[i
].evclk
) &&
3046 (ecclk
<= table
->entries
[i
].ecclk
)) {
3047 *voltage
= table
->entries
[i
].v
;
3053 /* if no match return the highest voltage */
3055 *voltage
= table
->entries
[table
->count
- 1].v
;
3057 *voltage
= si_get_lower_of_leakage_and_vce_voltage(adev
, *voltage
);
3062 static bool si_dpm_vblank_too_short(struct amdgpu_device
*adev
)
3065 u32 vblank_time
= amdgpu_dpm_get_vblank_time(adev
);
3066 /* we never hit the non-gddr5 limit so disable it */
3067 u32 switch_limit
= adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
? 450 : 0;
3069 if (vblank_time
< switch_limit
)
3076 static int ni_copy_and_switch_arb_sets(struct amdgpu_device
*adev
,
3077 u32 arb_freq_src
, u32 arb_freq_dest
)
3079 u32 mc_arb_dram_timing
;
3080 u32 mc_arb_dram_timing2
;
3084 switch (arb_freq_src
) {
3085 case MC_CG_ARB_FREQ_F0
:
3086 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING
);
3087 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2
);
3088 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE0_MASK
) >> STATE0_SHIFT
;
3090 case MC_CG_ARB_FREQ_F1
:
3091 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING_1
);
3092 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2_1
);
3093 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE1_MASK
) >> STATE1_SHIFT
;
3095 case MC_CG_ARB_FREQ_F2
:
3096 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING_2
);
3097 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2_2
);
3098 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE2_MASK
) >> STATE2_SHIFT
;
3100 case MC_CG_ARB_FREQ_F3
:
3101 mc_arb_dram_timing
= RREG32(MC_ARB_DRAM_TIMING_3
);
3102 mc_arb_dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2_3
);
3103 burst_time
= (RREG32(MC_ARB_BURST_TIME
) & STATE3_MASK
) >> STATE3_SHIFT
;
3109 switch (arb_freq_dest
) {
3110 case MC_CG_ARB_FREQ_F0
:
3111 WREG32(MC_ARB_DRAM_TIMING
, mc_arb_dram_timing
);
3112 WREG32(MC_ARB_DRAM_TIMING2
, mc_arb_dram_timing2
);
3113 WREG32_P(MC_ARB_BURST_TIME
, STATE0(burst_time
), ~STATE0_MASK
);
3115 case MC_CG_ARB_FREQ_F1
:
3116 WREG32(MC_ARB_DRAM_TIMING_1
, mc_arb_dram_timing
);
3117 WREG32(MC_ARB_DRAM_TIMING2_1
, mc_arb_dram_timing2
);
3118 WREG32_P(MC_ARB_BURST_TIME
, STATE1(burst_time
), ~STATE1_MASK
);
3120 case MC_CG_ARB_FREQ_F2
:
3121 WREG32(MC_ARB_DRAM_TIMING_2
, mc_arb_dram_timing
);
3122 WREG32(MC_ARB_DRAM_TIMING2_2
, mc_arb_dram_timing2
);
3123 WREG32_P(MC_ARB_BURST_TIME
, STATE2(burst_time
), ~STATE2_MASK
);
3125 case MC_CG_ARB_FREQ_F3
:
3126 WREG32(MC_ARB_DRAM_TIMING_3
, mc_arb_dram_timing
);
3127 WREG32(MC_ARB_DRAM_TIMING2_3
, mc_arb_dram_timing2
);
3128 WREG32_P(MC_ARB_BURST_TIME
, STATE3(burst_time
), ~STATE3_MASK
);
3134 mc_cg_config
= RREG32(MC_CG_CONFIG
) | 0x0000000F;
3135 WREG32(MC_CG_CONFIG
, mc_cg_config
);
3136 WREG32_P(MC_ARB_CG
, CG_ARB_REQ(arb_freq_dest
), ~CG_ARB_REQ_MASK
);
3141 static void ni_update_current_ps(struct amdgpu_device
*adev
,
3142 struct amdgpu_ps
*rps
)
3144 struct si_ps
*new_ps
= si_get_ps(rps
);
3145 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
3146 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
3148 eg_pi
->current_rps
= *rps
;
3149 ni_pi
->current_ps
= *new_ps
;
3150 eg_pi
->current_rps
.ps_priv
= &ni_pi
->current_ps
;
3151 adev
->pm
.dpm
.current_ps
= &eg_pi
->current_rps
;
3154 static void ni_update_requested_ps(struct amdgpu_device
*adev
,
3155 struct amdgpu_ps
*rps
)
3157 struct si_ps
*new_ps
= si_get_ps(rps
);
3158 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
3159 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
3161 eg_pi
->requested_rps
= *rps
;
3162 ni_pi
->requested_ps
= *new_ps
;
3163 eg_pi
->requested_rps
.ps_priv
= &ni_pi
->requested_ps
;
3164 adev
->pm
.dpm
.requested_ps
= &eg_pi
->requested_rps
;
3167 static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device
*adev
,
3168 struct amdgpu_ps
*new_ps
,
3169 struct amdgpu_ps
*old_ps
)
3171 struct si_ps
*new_state
= si_get_ps(new_ps
);
3172 struct si_ps
*current_state
= si_get_ps(old_ps
);
3174 if ((new_ps
->vclk
== old_ps
->vclk
) &&
3175 (new_ps
->dclk
== old_ps
->dclk
))
3178 if (new_state
->performance_levels
[new_state
->performance_level_count
- 1].sclk
>=
3179 current_state
->performance_levels
[current_state
->performance_level_count
- 1].sclk
)
3182 amdgpu_asic_set_uvd_clocks(adev
, new_ps
->vclk
, new_ps
->dclk
);
3185 static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device
*adev
,
3186 struct amdgpu_ps
*new_ps
,
3187 struct amdgpu_ps
*old_ps
)
3189 struct si_ps
*new_state
= si_get_ps(new_ps
);
3190 struct si_ps
*current_state
= si_get_ps(old_ps
);
3192 if ((new_ps
->vclk
== old_ps
->vclk
) &&
3193 (new_ps
->dclk
== old_ps
->dclk
))
3196 if (new_state
->performance_levels
[new_state
->performance_level_count
- 1].sclk
<
3197 current_state
->performance_levels
[current_state
->performance_level_count
- 1].sclk
)
3200 amdgpu_asic_set_uvd_clocks(adev
, new_ps
->vclk
, new_ps
->dclk
);
3203 static u16
btc_find_voltage(struct atom_voltage_table
*table
, u16 voltage
)
3207 for (i
= 0; i
< table
->count
; i
++)
3208 if (voltage
<= table
->entries
[i
].value
)
3209 return table
->entries
[i
].value
;
3211 return table
->entries
[table
->count
- 1].value
;
3214 static u32
btc_find_valid_clock(struct amdgpu_clock_array
*clocks
,
3215 u32 max_clock
, u32 requested_clock
)
3219 if ((clocks
== NULL
) || (clocks
->count
== 0))
3220 return (requested_clock
< max_clock
) ? requested_clock
: max_clock
;
3222 for (i
= 0; i
< clocks
->count
; i
++) {
3223 if (clocks
->values
[i
] >= requested_clock
)
3224 return (clocks
->values
[i
] < max_clock
) ? clocks
->values
[i
] : max_clock
;
3227 return (clocks
->values
[clocks
->count
- 1] < max_clock
) ?
3228 clocks
->values
[clocks
->count
- 1] : max_clock
;
3231 static u32
btc_get_valid_mclk(struct amdgpu_device
*adev
,
3232 u32 max_mclk
, u32 requested_mclk
)
3234 return btc_find_valid_clock(&adev
->pm
.dpm
.dyn_state
.valid_mclk_values
,
3235 max_mclk
, requested_mclk
);
3238 static u32
btc_get_valid_sclk(struct amdgpu_device
*adev
,
3239 u32 max_sclk
, u32 requested_sclk
)
3241 return btc_find_valid_clock(&adev
->pm
.dpm
.dyn_state
.valid_sclk_values
,
3242 max_sclk
, requested_sclk
);
3245 static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table
*table
,
3250 if ((table
== NULL
) || (table
->count
== 0)) {
3255 for (i
= 0; i
< table
->count
; i
++) {
3256 if (clock
< table
->entries
[i
].clk
)
3257 clock
= table
->entries
[i
].clk
;
3262 static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table
*table
,
3263 u32 clock
, u16 max_voltage
, u16
*voltage
)
3267 if ((table
== NULL
) || (table
->count
== 0))
3270 for (i
= 0; i
< table
->count
; i
++) {
3271 if (clock
<= table
->entries
[i
].clk
) {
3272 if (*voltage
< table
->entries
[i
].v
)
3273 *voltage
= (u16
)((table
->entries
[i
].v
< max_voltage
) ?
3274 table
->entries
[i
].v
: max_voltage
);
3279 *voltage
= (*voltage
> max_voltage
) ? *voltage
: max_voltage
;
3282 static void btc_adjust_clock_combinations(struct amdgpu_device
*adev
,
3283 const struct amdgpu_clock_and_voltage_limits
*max_limits
,
3284 struct rv7xx_pl
*pl
)
3287 if ((pl
->mclk
== 0) || (pl
->sclk
== 0))
3290 if (pl
->mclk
== pl
->sclk
)
3293 if (pl
->mclk
> pl
->sclk
) {
3294 if (((pl
->mclk
+ (pl
->sclk
- 1)) / pl
->sclk
) > adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
)
3295 pl
->sclk
= btc_get_valid_sclk(adev
,
3298 (adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
- 1)) /
3299 adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
);
3301 if ((pl
->sclk
- pl
->mclk
) > adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
)
3302 pl
->mclk
= btc_get_valid_mclk(adev
,
3305 adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
);
3309 static void btc_apply_voltage_delta_rules(struct amdgpu_device
*adev
,
3310 u16 max_vddc
, u16 max_vddci
,
3311 u16
*vddc
, u16
*vddci
)
3313 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
3316 if ((0 == *vddc
) || (0 == *vddci
))
3319 if (*vddc
> *vddci
) {
3320 if ((*vddc
- *vddci
) > adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
) {
3321 new_voltage
= btc_find_voltage(&eg_pi
->vddci_voltage_table
,
3322 (*vddc
- adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
));
3323 *vddci
= (new_voltage
< max_vddci
) ? new_voltage
: max_vddci
;
3326 if ((*vddci
- *vddc
) > adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
) {
3327 new_voltage
= btc_find_voltage(&eg_pi
->vddc_voltage_table
,
3328 (*vddci
- adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
));
3329 *vddc
= (new_voltage
< max_vddc
) ? new_voltage
: max_vddc
;
3334 static enum amdgpu_pcie_gen
r600_get_pcie_gen_support(struct amdgpu_device
*adev
,
3336 enum amdgpu_pcie_gen asic_gen
,
3337 enum amdgpu_pcie_gen default_gen
)
3340 case AMDGPU_PCIE_GEN1
:
3341 return AMDGPU_PCIE_GEN1
;
3342 case AMDGPU_PCIE_GEN2
:
3343 return AMDGPU_PCIE_GEN2
;
3344 case AMDGPU_PCIE_GEN3
:
3345 return AMDGPU_PCIE_GEN3
;
3347 if ((sys_mask
& DRM_PCIE_SPEED_80
) && (default_gen
== AMDGPU_PCIE_GEN3
))
3348 return AMDGPU_PCIE_GEN3
;
3349 else if ((sys_mask
& DRM_PCIE_SPEED_50
) && (default_gen
== AMDGPU_PCIE_GEN2
))
3350 return AMDGPU_PCIE_GEN2
;
3352 return AMDGPU_PCIE_GEN1
;
3354 return AMDGPU_PCIE_GEN1
;
3357 static void r600_calculate_u_and_p(u32 i
, u32 r_c
, u32 p_b
,
3364 i_c
= (i
* r_c
) / 100;
3373 *p
= i_c
/ (1 << (2 * (*u
)));
3376 static int r600_calculate_at(u32 t
, u32 h
, u32 fh
, u32 fl
, u32
*tl
, u32
*th
)
3381 if ((fl
== 0) || (fh
== 0) || (fl
> fh
))
3384 k
= (100 * fh
) / fl
;
3385 t1
= (t
* (k
- 100));
3386 a
= (1000 * (100 * h
+ t1
)) / (10000 + (t1
/ 100));
3388 ah
= ((a
* t
) + 5000) / 10000;
3397 static bool r600_is_uvd_state(u32
class, u32 class2
)
3399 if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
3401 if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE
)
3403 if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE
)
3405 if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE
)
3407 if (class2
& ATOM_PPLIB_CLASSIFICATION2_MVC
)
3412 static u8
rv770_get_memory_module_index(struct amdgpu_device
*adev
)
3414 return (u8
) ((RREG32(BIOS_SCRATCH_4
) >> 16) & 0xff);
3417 static void rv770_get_max_vddc(struct amdgpu_device
*adev
)
3419 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3422 if (amdgpu_atombios_get_max_vddc(adev
, 0, 0, &vddc
))
3425 pi
->max_vddc
= vddc
;
3428 static void rv770_get_engine_memory_ss(struct amdgpu_device
*adev
)
3430 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3431 struct amdgpu_atom_ss ss
;
3433 pi
->sclk_ss
= amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
3434 ASIC_INTERNAL_ENGINE_SS
, 0);
3435 pi
->mclk_ss
= amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
3436 ASIC_INTERNAL_MEMORY_SS
, 0);
3438 if (pi
->sclk_ss
|| pi
->mclk_ss
)
3439 pi
->dynamic_ss
= true;
3441 pi
->dynamic_ss
= false;
3445 static void si_apply_state_adjust_rules(struct amdgpu_device
*adev
,
3446 struct amdgpu_ps
*rps
)
3448 struct si_ps
*ps
= si_get_ps(rps
);
3449 struct amdgpu_clock_and_voltage_limits
*max_limits
;
3450 bool disable_mclk_switching
= false;
3451 bool disable_sclk_switching
= false;
3453 u16 vddc
, vddci
, min_vce_voltage
= 0;
3454 u32 max_sclk_vddc
, max_mclk_vddci
, max_mclk_vddc
;
3455 u32 max_sclk
= 0, max_mclk
= 0;
3458 if (adev
->asic_type
== CHIP_HAINAN
) {
3459 if ((adev
->pdev
->revision
== 0x81) ||
3460 (adev
->pdev
->revision
== 0x83) ||
3461 (adev
->pdev
->revision
== 0xC3) ||
3462 (adev
->pdev
->device
== 0x6664) ||
3463 (adev
->pdev
->device
== 0x6665) ||
3464 (adev
->pdev
->device
== 0x6667)) {
3467 } else if (adev
->asic_type
== CHIP_OLAND
) {
3468 if ((adev
->pdev
->revision
== 0xC7) ||
3469 (adev
->pdev
->revision
== 0x80) ||
3470 (adev
->pdev
->revision
== 0x81) ||
3471 (adev
->pdev
->revision
== 0x83) ||
3472 (adev
->pdev
->revision
== 0x87) ||
3473 (adev
->pdev
->device
== 0x6604) ||
3474 (adev
->pdev
->device
== 0x6605)) {
3479 if (rps
->vce_active
) {
3480 rps
->evclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].evclk
;
3481 rps
->ecclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].ecclk
;
3482 si_get_vce_clock_voltage(adev
, rps
->evclk
, rps
->ecclk
,
3489 if ((adev
->pm
.dpm
.new_active_crtc_count
> 1) ||
3490 si_dpm_vblank_too_short(adev
))
3491 disable_mclk_switching
= true;
3493 if (rps
->vclk
|| rps
->dclk
) {
3494 disable_mclk_switching
= true;
3495 disable_sclk_switching
= true;
3498 if (adev
->pm
.dpm
.ac_power
)
3499 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
3501 max_limits
= &adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
;
3503 for (i
= ps
->performance_level_count
- 2; i
>= 0; i
--) {
3504 if (ps
->performance_levels
[i
].vddc
> ps
->performance_levels
[i
+1].vddc
)
3505 ps
->performance_levels
[i
].vddc
= ps
->performance_levels
[i
+1].vddc
;
3507 if (adev
->pm
.dpm
.ac_power
== false) {
3508 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3509 if (ps
->performance_levels
[i
].mclk
> max_limits
->mclk
)
3510 ps
->performance_levels
[i
].mclk
= max_limits
->mclk
;
3511 if (ps
->performance_levels
[i
].sclk
> max_limits
->sclk
)
3512 ps
->performance_levels
[i
].sclk
= max_limits
->sclk
;
3513 if (ps
->performance_levels
[i
].vddc
> max_limits
->vddc
)
3514 ps
->performance_levels
[i
].vddc
= max_limits
->vddc
;
3515 if (ps
->performance_levels
[i
].vddci
> max_limits
->vddci
)
3516 ps
->performance_levels
[i
].vddci
= max_limits
->vddci
;
3520 /* limit clocks to max supported clocks based on voltage dependency tables */
3521 btc_get_max_clock_from_voltage_dependency_table(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
,
3523 btc_get_max_clock_from_voltage_dependency_table(&adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
3525 btc_get_max_clock_from_voltage_dependency_table(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
3528 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3529 if (max_sclk_vddc
) {
3530 if (ps
->performance_levels
[i
].sclk
> max_sclk_vddc
)
3531 ps
->performance_levels
[i
].sclk
= max_sclk_vddc
;
3533 if (max_mclk_vddci
) {
3534 if (ps
->performance_levels
[i
].mclk
> max_mclk_vddci
)
3535 ps
->performance_levels
[i
].mclk
= max_mclk_vddci
;
3537 if (max_mclk_vddc
) {
3538 if (ps
->performance_levels
[i
].mclk
> max_mclk_vddc
)
3539 ps
->performance_levels
[i
].mclk
= max_mclk_vddc
;
3542 if (ps
->performance_levels
[i
].mclk
> max_mclk
)
3543 ps
->performance_levels
[i
].mclk
= max_mclk
;
3546 if (ps
->performance_levels
[i
].sclk
> max_sclk
)
3547 ps
->performance_levels
[i
].sclk
= max_sclk
;
3551 /* XXX validate the min clocks required for display */
3553 if (disable_mclk_switching
) {
3554 mclk
= ps
->performance_levels
[ps
->performance_level_count
- 1].mclk
;
3555 vddci
= ps
->performance_levels
[ps
->performance_level_count
- 1].vddci
;
3557 mclk
= ps
->performance_levels
[0].mclk
;
3558 vddci
= ps
->performance_levels
[0].vddci
;
3561 if (disable_sclk_switching
) {
3562 sclk
= ps
->performance_levels
[ps
->performance_level_count
- 1].sclk
;
3563 vddc
= ps
->performance_levels
[ps
->performance_level_count
- 1].vddc
;
3565 sclk
= ps
->performance_levels
[0].sclk
;
3566 vddc
= ps
->performance_levels
[0].vddc
;
3569 if (rps
->vce_active
) {
3570 if (sclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
)
3571 sclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].sclk
;
3572 if (mclk
< adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].mclk
)
3573 mclk
= adev
->pm
.dpm
.vce_states
[adev
->pm
.dpm
.vce_level
].mclk
;
3576 /* adjusted low state */
3577 ps
->performance_levels
[0].sclk
= sclk
;
3578 ps
->performance_levels
[0].mclk
= mclk
;
3579 ps
->performance_levels
[0].vddc
= vddc
;
3580 ps
->performance_levels
[0].vddci
= vddci
;
3582 if (disable_sclk_switching
) {
3583 sclk
= ps
->performance_levels
[0].sclk
;
3584 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3585 if (sclk
< ps
->performance_levels
[i
].sclk
)
3586 sclk
= ps
->performance_levels
[i
].sclk
;
3588 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3589 ps
->performance_levels
[i
].sclk
= sclk
;
3590 ps
->performance_levels
[i
].vddc
= vddc
;
3593 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3594 if (ps
->performance_levels
[i
].sclk
< ps
->performance_levels
[i
- 1].sclk
)
3595 ps
->performance_levels
[i
].sclk
= ps
->performance_levels
[i
- 1].sclk
;
3596 if (ps
->performance_levels
[i
].vddc
< ps
->performance_levels
[i
- 1].vddc
)
3597 ps
->performance_levels
[i
].vddc
= ps
->performance_levels
[i
- 1].vddc
;
3601 if (disable_mclk_switching
) {
3602 mclk
= ps
->performance_levels
[0].mclk
;
3603 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3604 if (mclk
< ps
->performance_levels
[i
].mclk
)
3605 mclk
= ps
->performance_levels
[i
].mclk
;
3607 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3608 ps
->performance_levels
[i
].mclk
= mclk
;
3609 ps
->performance_levels
[i
].vddci
= vddci
;
3612 for (i
= 1; i
< ps
->performance_level_count
; i
++) {
3613 if (ps
->performance_levels
[i
].mclk
< ps
->performance_levels
[i
- 1].mclk
)
3614 ps
->performance_levels
[i
].mclk
= ps
->performance_levels
[i
- 1].mclk
;
3615 if (ps
->performance_levels
[i
].vddci
< ps
->performance_levels
[i
- 1].vddci
)
3616 ps
->performance_levels
[i
].vddci
= ps
->performance_levels
[i
- 1].vddci
;
3620 for (i
= 0; i
< ps
->performance_level_count
; i
++)
3621 btc_adjust_clock_combinations(adev
, max_limits
,
3622 &ps
->performance_levels
[i
]);
3624 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3625 if (ps
->performance_levels
[i
].vddc
< min_vce_voltage
)
3626 ps
->performance_levels
[i
].vddc
= min_vce_voltage
;
3627 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
,
3628 ps
->performance_levels
[i
].sclk
,
3629 max_limits
->vddc
, &ps
->performance_levels
[i
].vddc
);
3630 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
3631 ps
->performance_levels
[i
].mclk
,
3632 max_limits
->vddci
, &ps
->performance_levels
[i
].vddci
);
3633 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
3634 ps
->performance_levels
[i
].mclk
,
3635 max_limits
->vddc
, &ps
->performance_levels
[i
].vddc
);
3636 btc_apply_voltage_dependency_rules(&adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
,
3637 adev
->clock
.current_dispclk
,
3638 max_limits
->vddc
, &ps
->performance_levels
[i
].vddc
);
3641 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3642 btc_apply_voltage_delta_rules(adev
,
3643 max_limits
->vddc
, max_limits
->vddci
,
3644 &ps
->performance_levels
[i
].vddc
,
3645 &ps
->performance_levels
[i
].vddci
);
3648 ps
->dc_compatible
= true;
3649 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
3650 if (ps
->performance_levels
[i
].vddc
> adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.vddc
)
3651 ps
->dc_compatible
= false;
3656 static int si_read_smc_soft_register(struct amdgpu_device
*adev
,
3657 u16 reg_offset
, u32
*value
)
3659 struct si_power_info
*si_pi
= si_get_pi(adev
);
3661 return amdgpu_si_read_smc_sram_dword(adev
,
3662 si_pi
->soft_regs_start
+ reg_offset
, value
,
3667 static int si_write_smc_soft_register(struct amdgpu_device
*adev
,
3668 u16 reg_offset
, u32 value
)
3670 struct si_power_info
*si_pi
= si_get_pi(adev
);
3672 return amdgpu_si_write_smc_sram_dword(adev
,
3673 si_pi
->soft_regs_start
+ reg_offset
,
3674 value
, si_pi
->sram_end
);
3677 static bool si_is_special_1gb_platform(struct amdgpu_device
*adev
)
3680 u32 tmp
, width
, row
, column
, bank
, density
;
3681 bool is_memory_gddr5
, is_special
;
3683 tmp
= RREG32(MC_SEQ_MISC0
);
3684 is_memory_gddr5
= (MC_SEQ_MISC0_GDDR5_VALUE
== ((tmp
& MC_SEQ_MISC0_GDDR5_MASK
) >> MC_SEQ_MISC0_GDDR5_SHIFT
));
3685 is_special
= (MC_SEQ_MISC0_REV_ID_VALUE
== ((tmp
& MC_SEQ_MISC0_REV_ID_MASK
) >> MC_SEQ_MISC0_REV_ID_SHIFT
))
3686 & (MC_SEQ_MISC0_VEN_ID_VALUE
== ((tmp
& MC_SEQ_MISC0_VEN_ID_MASK
) >> MC_SEQ_MISC0_VEN_ID_SHIFT
));
3688 WREG32(MC_SEQ_IO_DEBUG_INDEX
, 0xb);
3689 width
= ((RREG32(MC_SEQ_IO_DEBUG_DATA
) >> 1) & 1) ? 16 : 32;
3691 tmp
= RREG32(MC_ARB_RAMCFG
);
3692 row
= ((tmp
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
) + 10;
3693 column
= ((tmp
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
) + 8;
3694 bank
= ((tmp
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
) + 2;
3696 density
= (1 << (row
+ column
- 20 + bank
)) * width
;
3698 if ((adev
->pdev
->device
== 0x6819) &&
3699 is_memory_gddr5
&& is_special
&& (density
== 0x400))
3705 static void si_get_leakage_vddc(struct amdgpu_device
*adev
)
3707 struct si_power_info
*si_pi
= si_get_pi(adev
);
3708 u16 vddc
, count
= 0;
3711 for (i
= 0; i
< SISLANDS_MAX_LEAKAGE_COUNT
; i
++) {
3712 ret
= amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev
, &vddc
, SISLANDS_LEAKAGE_INDEX0
+ i
);
3714 if (!ret
&& (vddc
> 0) && (vddc
!= (SISLANDS_LEAKAGE_INDEX0
+ i
))) {
3715 si_pi
->leakage_voltage
.entries
[count
].voltage
= vddc
;
3716 si_pi
->leakage_voltage
.entries
[count
].leakage_index
=
3717 SISLANDS_LEAKAGE_INDEX0
+ i
;
3721 si_pi
->leakage_voltage
.count
= count
;
3724 static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device
*adev
,
3725 u32 index
, u16
*leakage_voltage
)
3727 struct si_power_info
*si_pi
= si_get_pi(adev
);
3730 if (leakage_voltage
== NULL
)
3733 if ((index
& 0xff00) != 0xff00)
3736 if ((index
& 0xff) > SISLANDS_MAX_LEAKAGE_COUNT
+ 1)
3739 if (index
< SISLANDS_LEAKAGE_INDEX0
)
3742 for (i
= 0; i
< si_pi
->leakage_voltage
.count
; i
++) {
3743 if (si_pi
->leakage_voltage
.entries
[i
].leakage_index
== index
) {
3744 *leakage_voltage
= si_pi
->leakage_voltage
.entries
[i
].voltage
;
3751 static void si_set_dpm_event_sources(struct amdgpu_device
*adev
, u32 sources
)
3753 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3754 bool want_thermal_protection
;
3755 enum amdgpu_dpm_event_src dpm_event_src
;
3760 want_thermal_protection
= false;
3762 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
):
3763 want_thermal_protection
= true;
3764 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_DIGITAL
;
3766 case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
):
3767 want_thermal_protection
= true;
3768 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_EXTERNAL
;
3770 case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
) |
3771 (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
)):
3772 want_thermal_protection
= true;
3773 dpm_event_src
= AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL
;
3777 if (want_thermal_protection
) {
3778 WREG32_P(CG_THERMAL_CTRL
, DPM_EVENT_SRC(dpm_event_src
), ~DPM_EVENT_SRC_MASK
);
3779 if (pi
->thermal_protection
)
3780 WREG32_P(GENERAL_PWRMGT
, 0, ~THERMAL_PROTECTION_DIS
);
3782 WREG32_P(GENERAL_PWRMGT
, THERMAL_PROTECTION_DIS
, ~THERMAL_PROTECTION_DIS
);
3786 static void si_enable_auto_throttle_source(struct amdgpu_device
*adev
,
3787 enum amdgpu_dpm_auto_throttle_src source
,
3790 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
3793 if (!(pi
->active_auto_throttle_sources
& (1 << source
))) {
3794 pi
->active_auto_throttle_sources
|= 1 << source
;
3795 si_set_dpm_event_sources(adev
, pi
->active_auto_throttle_sources
);
3798 if (pi
->active_auto_throttle_sources
& (1 << source
)) {
3799 pi
->active_auto_throttle_sources
&= ~(1 << source
);
3800 si_set_dpm_event_sources(adev
, pi
->active_auto_throttle_sources
);
3805 static void si_start_dpm(struct amdgpu_device
*adev
)
3807 WREG32_P(GENERAL_PWRMGT
, GLOBAL_PWRMGT_EN
, ~GLOBAL_PWRMGT_EN
);
3810 static void si_stop_dpm(struct amdgpu_device
*adev
)
3812 WREG32_P(GENERAL_PWRMGT
, 0, ~GLOBAL_PWRMGT_EN
);
3815 static void si_enable_sclk_control(struct amdgpu_device
*adev
, bool enable
)
3818 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~SCLK_PWRMGT_OFF
);
3820 WREG32_P(SCLK_PWRMGT_CNTL
, SCLK_PWRMGT_OFF
, ~SCLK_PWRMGT_OFF
);
3825 static int si_notify_hardware_of_thermal_state(struct amdgpu_device
*adev
,
3830 if (thermal_level
== 0) {
3831 ret
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableThermalInterrupt
);
3832 if (ret
== PPSMC_Result_OK
)
3840 static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device
*adev
)
3842 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen
, true);
3847 static int si_notify_hw_of_powersource(struct amdgpu_device
*adev
, bool ac_power
)
3850 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_RunningOnAC
) == PPSMC_Result_OK
) ?
3857 static PPSMC_Result
si_send_msg_to_smc_with_parameter(struct amdgpu_device
*adev
,
3858 PPSMC_Msg msg
, u32 parameter
)
3860 WREG32(SMC_SCRATCH0
, parameter
);
3861 return amdgpu_si_send_msg_to_smc(adev
, msg
);
3864 static int si_restrict_performance_levels_before_switch(struct amdgpu_device
*adev
)
3866 if (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_NoForcedLevel
) != PPSMC_Result_OK
)
3869 return (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, 1) == PPSMC_Result_OK
) ?
3873 static int si_dpm_force_performance_level(struct amdgpu_device
*adev
,
3874 enum amd_dpm_forced_level level
)
3876 struct amdgpu_ps
*rps
= adev
->pm
.dpm
.current_ps
;
3877 struct si_ps
*ps
= si_get_ps(rps
);
3878 u32 levels
= ps
->performance_level_count
;
3880 if (level
== AMD_DPM_FORCED_LEVEL_HIGH
) {
3881 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, levels
) != PPSMC_Result_OK
)
3884 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetForcedLevels
, 1) != PPSMC_Result_OK
)
3886 } else if (level
== AMD_DPM_FORCED_LEVEL_LOW
) {
3887 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetForcedLevels
, 0) != PPSMC_Result_OK
)
3890 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, 1) != PPSMC_Result_OK
)
3892 } else if (level
== AMD_DPM_FORCED_LEVEL_AUTO
) {
3893 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetForcedLevels
, 0) != PPSMC_Result_OK
)
3896 if (si_send_msg_to_smc_with_parameter(adev
, PPSMC_MSG_SetEnabledLevels
, levels
) != PPSMC_Result_OK
)
3900 adev
->pm
.dpm
.forced_level
= level
;
3906 static int si_set_boot_state(struct amdgpu_device
*adev
)
3908 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_SwitchToInitialState
) == PPSMC_Result_OK
) ?
3913 static int si_set_sw_state(struct amdgpu_device
*adev
)
3915 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_SwitchToSwState
) == PPSMC_Result_OK
) ?
3919 static int si_halt_smc(struct amdgpu_device
*adev
)
3921 if (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_Halt
) != PPSMC_Result_OK
)
3924 return (amdgpu_si_wait_for_smc_inactive(adev
) == PPSMC_Result_OK
) ?
3928 static int si_resume_smc(struct amdgpu_device
*adev
)
3930 if (amdgpu_si_send_msg_to_smc(adev
, PPSMC_FlushDataCache
) != PPSMC_Result_OK
)
3933 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_Resume
) == PPSMC_Result_OK
) ?
3937 static void si_dpm_start_smc(struct amdgpu_device
*adev
)
3939 amdgpu_si_program_jump_on_start(adev
);
3940 amdgpu_si_start_smc(adev
);
3941 amdgpu_si_smc_clock(adev
, true);
3944 static void si_dpm_stop_smc(struct amdgpu_device
*adev
)
3946 amdgpu_si_reset_smc(adev
);
3947 amdgpu_si_smc_clock(adev
, false);
3950 static int si_process_firmware_header(struct amdgpu_device
*adev
)
3952 struct si_power_info
*si_pi
= si_get_pi(adev
);
3956 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3957 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3958 SISLANDS_SMC_FIRMWARE_HEADER_stateTable
,
3959 &tmp
, si_pi
->sram_end
);
3963 si_pi
->state_table_start
= tmp
;
3965 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3966 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3967 SISLANDS_SMC_FIRMWARE_HEADER_softRegisters
,
3968 &tmp
, si_pi
->sram_end
);
3972 si_pi
->soft_regs_start
= tmp
;
3974 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3975 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3976 SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable
,
3977 &tmp
, si_pi
->sram_end
);
3981 si_pi
->mc_reg_table_start
= tmp
;
3983 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3984 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3985 SISLANDS_SMC_FIRMWARE_HEADER_fanTable
,
3986 &tmp
, si_pi
->sram_end
);
3990 si_pi
->fan_table_start
= tmp
;
3992 ret
= amdgpu_si_read_smc_sram_dword(adev
,
3993 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
3994 SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable
,
3995 &tmp
, si_pi
->sram_end
);
3999 si_pi
->arb_table_start
= tmp
;
4001 ret
= amdgpu_si_read_smc_sram_dword(adev
,
4002 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
4003 SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable
,
4004 &tmp
, si_pi
->sram_end
);
4008 si_pi
->cac_table_start
= tmp
;
4010 ret
= amdgpu_si_read_smc_sram_dword(adev
,
4011 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
4012 SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration
,
4013 &tmp
, si_pi
->sram_end
);
4017 si_pi
->dte_table_start
= tmp
;
4019 ret
= amdgpu_si_read_smc_sram_dword(adev
,
4020 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
4021 SISLANDS_SMC_FIRMWARE_HEADER_spllTable
,
4022 &tmp
, si_pi
->sram_end
);
4026 si_pi
->spll_table_start
= tmp
;
4028 ret
= amdgpu_si_read_smc_sram_dword(adev
,
4029 SISLANDS_SMC_FIRMWARE_HEADER_LOCATION
+
4030 SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters
,
4031 &tmp
, si_pi
->sram_end
);
4035 si_pi
->papm_cfg_table_start
= tmp
;
4040 static void si_read_clock_registers(struct amdgpu_device
*adev
)
4042 struct si_power_info
*si_pi
= si_get_pi(adev
);
4044 si_pi
->clock_registers
.cg_spll_func_cntl
= RREG32(CG_SPLL_FUNC_CNTL
);
4045 si_pi
->clock_registers
.cg_spll_func_cntl_2
= RREG32(CG_SPLL_FUNC_CNTL_2
);
4046 si_pi
->clock_registers
.cg_spll_func_cntl_3
= RREG32(CG_SPLL_FUNC_CNTL_3
);
4047 si_pi
->clock_registers
.cg_spll_func_cntl_4
= RREG32(CG_SPLL_FUNC_CNTL_4
);
4048 si_pi
->clock_registers
.cg_spll_spread_spectrum
= RREG32(CG_SPLL_SPREAD_SPECTRUM
);
4049 si_pi
->clock_registers
.cg_spll_spread_spectrum_2
= RREG32(CG_SPLL_SPREAD_SPECTRUM_2
);
4050 si_pi
->clock_registers
.dll_cntl
= RREG32(DLL_CNTL
);
4051 si_pi
->clock_registers
.mclk_pwrmgt_cntl
= RREG32(MCLK_PWRMGT_CNTL
);
4052 si_pi
->clock_registers
.mpll_ad_func_cntl
= RREG32(MPLL_AD_FUNC_CNTL
);
4053 si_pi
->clock_registers
.mpll_dq_func_cntl
= RREG32(MPLL_DQ_FUNC_CNTL
);
4054 si_pi
->clock_registers
.mpll_func_cntl
= RREG32(MPLL_FUNC_CNTL
);
4055 si_pi
->clock_registers
.mpll_func_cntl_1
= RREG32(MPLL_FUNC_CNTL_1
);
4056 si_pi
->clock_registers
.mpll_func_cntl_2
= RREG32(MPLL_FUNC_CNTL_2
);
4057 si_pi
->clock_registers
.mpll_ss1
= RREG32(MPLL_SS1
);
4058 si_pi
->clock_registers
.mpll_ss2
= RREG32(MPLL_SS2
);
4061 static void si_enable_thermal_protection(struct amdgpu_device
*adev
,
4065 WREG32_P(GENERAL_PWRMGT
, 0, ~THERMAL_PROTECTION_DIS
);
4067 WREG32_P(GENERAL_PWRMGT
, THERMAL_PROTECTION_DIS
, ~THERMAL_PROTECTION_DIS
);
4070 static void si_enable_acpi_power_management(struct amdgpu_device
*adev
)
4072 WREG32_P(GENERAL_PWRMGT
, STATIC_PM_EN
, ~STATIC_PM_EN
);
4076 static int si_enter_ulp_state(struct amdgpu_device
*adev
)
4078 WREG32(SMC_MESSAGE_0
, PPSMC_MSG_SwitchToMinimumPower
);
4085 static int si_exit_ulp_state(struct amdgpu_device
*adev
)
4089 WREG32(SMC_MESSAGE_0
, PPSMC_MSG_ResumeFromMinimumPower
);
4093 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4094 if (RREG32(SMC_RESP_0
) == 1)
4103 static int si_notify_smc_display_change(struct amdgpu_device
*adev
,
4106 PPSMC_Msg msg
= has_display
?
4107 PPSMC_MSG_HasDisplay
: PPSMC_MSG_NoDisplay
;
4109 return (amdgpu_si_send_msg_to_smc(adev
, msg
) == PPSMC_Result_OK
) ?
4113 static void si_program_response_times(struct amdgpu_device
*adev
)
4115 u32 voltage_response_time
, backbias_response_time
, acpi_delay_time
, vbi_time_out
;
4116 u32 vddc_dly
, acpi_dly
, vbi_dly
;
4117 u32 reference_clock
;
4119 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_mvdd_chg_time
, 1);
4121 voltage_response_time
= (u32
)adev
->pm
.dpm
.voltage_response_time
;
4122 backbias_response_time
= (u32
)adev
->pm
.dpm
.backbias_response_time
;
4124 if (voltage_response_time
== 0)
4125 voltage_response_time
= 1000;
4127 acpi_delay_time
= 15000;
4128 vbi_time_out
= 100000;
4130 reference_clock
= amdgpu_asic_get_xclk(adev
);
4132 vddc_dly
= (voltage_response_time
* reference_clock
) / 100;
4133 acpi_dly
= (acpi_delay_time
* reference_clock
) / 100;
4134 vbi_dly
= (vbi_time_out
* reference_clock
) / 100;
4136 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_delay_vreg
, vddc_dly
);
4137 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_delay_acpi
, acpi_dly
);
4138 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_mclk_chg_timeout
, vbi_dly
);
4139 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_mc_block_delay
, 0xAA);
4142 static void si_program_ds_registers(struct amdgpu_device
*adev
)
4144 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4147 /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
4148 if (adev
->asic_type
== CHIP_TAHITI
&& adev
->rev_id
== 0x0)
4153 if (eg_pi
->sclk_deep_sleep
) {
4154 WREG32_P(MISC_CLK_CNTL
, DEEP_SLEEP_CLK_SEL(tmp
), ~DEEP_SLEEP_CLK_SEL_MASK
);
4155 WREG32_P(CG_SPLL_AUTOSCALE_CNTL
, AUTOSCALE_ON_SS_CLEAR
,
4156 ~AUTOSCALE_ON_SS_CLEAR
);
4160 static void si_program_display_gap(struct amdgpu_device
*adev
)
4165 tmp
= RREG32(CG_DISPLAY_GAP_CNTL
) & ~(DISP1_GAP_MASK
| DISP2_GAP_MASK
);
4166 if (adev
->pm
.dpm
.new_active_crtc_count
> 0)
4167 tmp
|= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM
);
4169 tmp
|= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE
);
4171 if (adev
->pm
.dpm
.new_active_crtc_count
> 1)
4172 tmp
|= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM
);
4174 tmp
|= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE
);
4176 WREG32(CG_DISPLAY_GAP_CNTL
, tmp
);
4178 tmp
= RREG32(DCCG_DISP_SLOW_SELECT_REG
);
4179 pipe
= (tmp
& DCCG_DISP1_SLOW_SELECT_MASK
) >> DCCG_DISP1_SLOW_SELECT_SHIFT
;
4181 if ((adev
->pm
.dpm
.new_active_crtc_count
> 0) &&
4182 (!(adev
->pm
.dpm
.new_active_crtcs
& (1 << pipe
)))) {
4183 /* find the first active crtc */
4184 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
4185 if (adev
->pm
.dpm
.new_active_crtcs
& (1 << i
))
4188 if (i
== adev
->mode_info
.num_crtc
)
4193 tmp
&= ~DCCG_DISP1_SLOW_SELECT_MASK
;
4194 tmp
|= DCCG_DISP1_SLOW_SELECT(pipe
);
4195 WREG32(DCCG_DISP_SLOW_SELECT_REG
, tmp
);
4198 /* Setting this to false forces the performance state to low if the crtcs are disabled.
4199 * This can be a problem on PowerXpress systems or if you want to use the card
4200 * for offscreen rendering or compute if there are no crtcs enabled.
4202 si_notify_smc_display_change(adev
, adev
->pm
.dpm
.new_active_crtc_count
> 0);
4205 static void si_enable_spread_spectrum(struct amdgpu_device
*adev
, bool enable
)
4207 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4211 WREG32_P(GENERAL_PWRMGT
, DYN_SPREAD_SPECTRUM_EN
, ~DYN_SPREAD_SPECTRUM_EN
);
4213 WREG32_P(CG_SPLL_SPREAD_SPECTRUM
, 0, ~SSEN
);
4214 WREG32_P(GENERAL_PWRMGT
, 0, ~DYN_SPREAD_SPECTRUM_EN
);
4218 static void si_setup_bsp(struct amdgpu_device
*adev
)
4220 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4221 u32 xclk
= amdgpu_asic_get_xclk(adev
);
4223 r600_calculate_u_and_p(pi
->asi
,
4229 r600_calculate_u_and_p(pi
->pasi
,
4236 pi
->dsp
= BSP(pi
->bsp
) | BSU(pi
->bsu
);
4237 pi
->psp
= BSP(pi
->pbsp
) | BSU(pi
->pbsu
);
4239 WREG32(CG_BSP
, pi
->dsp
);
4242 static void si_program_git(struct amdgpu_device
*adev
)
4244 WREG32_P(CG_GIT
, CG_GICST(R600_GICST_DFLT
), ~CG_GICST_MASK
);
4247 static void si_program_tp(struct amdgpu_device
*adev
)
4250 enum r600_td td
= R600_TD_DFLT
;
4252 for (i
= 0; i
< R600_PM_NUMBER_OF_TC
; i
++)
4253 WREG32(CG_FFCT_0
+ i
, (UTC_0(r600_utc
[i
]) | DTC_0(r600_dtc
[i
])));
4255 if (td
== R600_TD_AUTO
)
4256 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~FIR_FORCE_TREND_SEL
);
4258 WREG32_P(SCLK_PWRMGT_CNTL
, FIR_FORCE_TREND_SEL
, ~FIR_FORCE_TREND_SEL
);
4260 if (td
== R600_TD_UP
)
4261 WREG32_P(SCLK_PWRMGT_CNTL
, 0, ~FIR_TREND_MODE
);
4263 if (td
== R600_TD_DOWN
)
4264 WREG32_P(SCLK_PWRMGT_CNTL
, FIR_TREND_MODE
, ~FIR_TREND_MODE
);
4267 static void si_program_tpp(struct amdgpu_device
*adev
)
4269 WREG32(CG_TPC
, R600_TPC_DFLT
);
4272 static void si_program_sstp(struct amdgpu_device
*adev
)
4274 WREG32(CG_SSP
, (SSTU(R600_SSTU_DFLT
) | SST(R600_SST_DFLT
)));
4277 static void si_enable_display_gap(struct amdgpu_device
*adev
)
4279 u32 tmp
= RREG32(CG_DISPLAY_GAP_CNTL
);
4281 tmp
&= ~(DISP1_GAP_MASK
| DISP2_GAP_MASK
);
4282 tmp
|= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE
) |
4283 DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE
));
4285 tmp
&= ~(DISP1_GAP_MCHG_MASK
| DISP2_GAP_MCHG_MASK
);
4286 tmp
|= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK
) |
4287 DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE
));
4288 WREG32(CG_DISPLAY_GAP_CNTL
, tmp
);
4291 static void si_program_vc(struct amdgpu_device
*adev
)
4293 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4295 WREG32(CG_FTV
, pi
->vrc
);
4298 static void si_clear_vc(struct amdgpu_device
*adev
)
4303 static u8
si_get_ddr3_mclk_frequency_ratio(u32 memory_clock
)
4307 if (memory_clock
< 10000)
4309 else if (memory_clock
>= 80000)
4310 mc_para_index
= 0x0f;
4312 mc_para_index
= (u8
)((memory_clock
- 10000) / 5000 + 1);
4313 return mc_para_index
;
4316 static u8
si_get_mclk_frequency_ratio(u32 memory_clock
, bool strobe_mode
)
4321 if (memory_clock
< 12500)
4322 mc_para_index
= 0x00;
4323 else if (memory_clock
> 47500)
4324 mc_para_index
= 0x0f;
4326 mc_para_index
= (u8
)((memory_clock
- 10000) / 2500);
4328 if (memory_clock
< 65000)
4329 mc_para_index
= 0x00;
4330 else if (memory_clock
> 135000)
4331 mc_para_index
= 0x0f;
4333 mc_para_index
= (u8
)((memory_clock
- 60000) / 5000);
4335 return mc_para_index
;
4338 static u8
si_get_strobe_mode_settings(struct amdgpu_device
*adev
, u32 mclk
)
4340 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4341 bool strobe_mode
= false;
4344 if (mclk
<= pi
->mclk_strobe_mode_threshold
)
4347 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
4348 result
= si_get_mclk_frequency_ratio(mclk
, strobe_mode
);
4350 result
= si_get_ddr3_mclk_frequency_ratio(mclk
);
4353 result
|= SISLANDS_SMC_STROBE_ENABLE
;
4358 static int si_upload_firmware(struct amdgpu_device
*adev
)
4360 struct si_power_info
*si_pi
= si_get_pi(adev
);
4362 amdgpu_si_reset_smc(adev
);
4363 amdgpu_si_smc_clock(adev
, false);
4365 return amdgpu_si_load_smc_ucode(adev
, si_pi
->sram_end
);
4368 static bool si_validate_phase_shedding_tables(struct amdgpu_device
*adev
,
4369 const struct atom_voltage_table
*table
,
4370 const struct amdgpu_phase_shedding_limits_table
*limits
)
4372 u32 data
, num_bits
, num_levels
;
4374 if ((table
== NULL
) || (limits
== NULL
))
4377 data
= table
->mask_low
;
4379 num_bits
= hweight32(data
);
4384 num_levels
= (1 << num_bits
);
4386 if (table
->count
!= num_levels
)
4389 if (limits
->count
!= (num_levels
- 1))
4395 static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device
*adev
,
4396 u32 max_voltage_steps
,
4397 struct atom_voltage_table
*voltage_table
)
4399 unsigned int i
, diff
;
4401 if (voltage_table
->count
<= max_voltage_steps
)
4404 diff
= voltage_table
->count
- max_voltage_steps
;
4406 for (i
= 0; i
< max_voltage_steps
; i
++)
4407 voltage_table
->entries
[i
] = voltage_table
->entries
[i
+ diff
];
4409 voltage_table
->count
= max_voltage_steps
;
4412 static int si_get_svi2_voltage_table(struct amdgpu_device
*adev
,
4413 struct amdgpu_clock_voltage_dependency_table
*voltage_dependency_table
,
4414 struct atom_voltage_table
*voltage_table
)
4418 if (voltage_dependency_table
== NULL
)
4421 voltage_table
->mask_low
= 0;
4422 voltage_table
->phase_delay
= 0;
4424 voltage_table
->count
= voltage_dependency_table
->count
;
4425 for (i
= 0; i
< voltage_table
->count
; i
++) {
4426 voltage_table
->entries
[i
].value
= voltage_dependency_table
->entries
[i
].v
;
4427 voltage_table
->entries
[i
].smio_low
= 0;
4433 static int si_construct_voltage_tables(struct amdgpu_device
*adev
)
4435 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4436 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4437 struct si_power_info
*si_pi
= si_get_pi(adev
);
4440 if (pi
->voltage_control
) {
4441 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDC
,
4442 VOLTAGE_OBJ_GPIO_LUT
, &eg_pi
->vddc_voltage_table
);
4446 if (eg_pi
->vddc_voltage_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
)
4447 si_trim_voltage_table_to_fit_state_table(adev
,
4448 SISLANDS_MAX_NO_VREG_STEPS
,
4449 &eg_pi
->vddc_voltage_table
);
4450 } else if (si_pi
->voltage_control_svi2
) {
4451 ret
= si_get_svi2_voltage_table(adev
,
4452 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
,
4453 &eg_pi
->vddc_voltage_table
);
4460 if (eg_pi
->vddci_control
) {
4461 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDCI
,
4462 VOLTAGE_OBJ_GPIO_LUT
, &eg_pi
->vddci_voltage_table
);
4466 if (eg_pi
->vddci_voltage_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
)
4467 si_trim_voltage_table_to_fit_state_table(adev
,
4468 SISLANDS_MAX_NO_VREG_STEPS
,
4469 &eg_pi
->vddci_voltage_table
);
4471 if (si_pi
->vddci_control_svi2
) {
4472 ret
= si_get_svi2_voltage_table(adev
,
4473 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
,
4474 &eg_pi
->vddci_voltage_table
);
4479 if (pi
->mvdd_control
) {
4480 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_MVDDC
,
4481 VOLTAGE_OBJ_GPIO_LUT
, &si_pi
->mvdd_voltage_table
);
4484 pi
->mvdd_control
= false;
4488 if (si_pi
->mvdd_voltage_table
.count
== 0) {
4489 pi
->mvdd_control
= false;
4493 if (si_pi
->mvdd_voltage_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
)
4494 si_trim_voltage_table_to_fit_state_table(adev
,
4495 SISLANDS_MAX_NO_VREG_STEPS
,
4496 &si_pi
->mvdd_voltage_table
);
4499 if (si_pi
->vddc_phase_shed_control
) {
4500 ret
= amdgpu_atombios_get_voltage_table(adev
, VOLTAGE_TYPE_VDDC
,
4501 VOLTAGE_OBJ_PHASE_LUT
, &si_pi
->vddc_phase_shed_table
);
4503 si_pi
->vddc_phase_shed_control
= false;
4505 if ((si_pi
->vddc_phase_shed_table
.count
== 0) ||
4506 (si_pi
->vddc_phase_shed_table
.count
> SISLANDS_MAX_NO_VREG_STEPS
))
4507 si_pi
->vddc_phase_shed_control
= false;
4513 static void si_populate_smc_voltage_table(struct amdgpu_device
*adev
,
4514 const struct atom_voltage_table
*voltage_table
,
4515 SISLANDS_SMC_STATETABLE
*table
)
4519 for (i
= 0; i
< voltage_table
->count
; i
++)
4520 table
->lowSMIO
[i
] |= cpu_to_be32(voltage_table
->entries
[i
].smio_low
);
4523 static int si_populate_smc_voltage_tables(struct amdgpu_device
*adev
,
4524 SISLANDS_SMC_STATETABLE
*table
)
4526 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4527 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4528 struct si_power_info
*si_pi
= si_get_pi(adev
);
4531 if (si_pi
->voltage_control_svi2
) {
4532 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc
,
4533 si_pi
->svc_gpio_id
);
4534 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd
,
4535 si_pi
->svd_gpio_id
);
4536 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_svi_rework_plat_type
,
4539 if (eg_pi
->vddc_voltage_table
.count
) {
4540 si_populate_smc_voltage_table(adev
, &eg_pi
->vddc_voltage_table
, table
);
4541 table
->voltageMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_VDDC
] =
4542 cpu_to_be32(eg_pi
->vddc_voltage_table
.mask_low
);
4544 for (i
= 0; i
< eg_pi
->vddc_voltage_table
.count
; i
++) {
4545 if (pi
->max_vddc_in_table
<= eg_pi
->vddc_voltage_table
.entries
[i
].value
) {
4546 table
->maxVDDCIndexInPPTable
= i
;
4552 if (eg_pi
->vddci_voltage_table
.count
) {
4553 si_populate_smc_voltage_table(adev
, &eg_pi
->vddci_voltage_table
, table
);
4555 table
->voltageMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_VDDCI
] =
4556 cpu_to_be32(eg_pi
->vddci_voltage_table
.mask_low
);
4560 if (si_pi
->mvdd_voltage_table
.count
) {
4561 si_populate_smc_voltage_table(adev
, &si_pi
->mvdd_voltage_table
, table
);
4563 table
->voltageMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_MVDD
] =
4564 cpu_to_be32(si_pi
->mvdd_voltage_table
.mask_low
);
4567 if (si_pi
->vddc_phase_shed_control
) {
4568 if (si_validate_phase_shedding_tables(adev
, &si_pi
->vddc_phase_shed_table
,
4569 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
)) {
4570 si_populate_smc_voltage_table(adev
, &si_pi
->vddc_phase_shed_table
, table
);
4572 table
->phaseMaskTable
.lowMask
[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING
] =
4573 cpu_to_be32(si_pi
->vddc_phase_shed_table
.mask_low
);
4575 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_phase_shedding_delay
,
4576 (u32
)si_pi
->vddc_phase_shed_table
.phase_delay
);
4578 si_pi
->vddc_phase_shed_control
= false;
4586 static int si_populate_voltage_value(struct amdgpu_device
*adev
,
4587 const struct atom_voltage_table
*table
,
4588 u16 value
, SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4592 for (i
= 0; i
< table
->count
; i
++) {
4593 if (value
<= table
->entries
[i
].value
) {
4594 voltage
->index
= (u8
)i
;
4595 voltage
->value
= cpu_to_be16(table
->entries
[i
].value
);
4600 if (i
>= table
->count
)
4606 static int si_populate_mvdd_value(struct amdgpu_device
*adev
, u32 mclk
,
4607 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4609 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4610 struct si_power_info
*si_pi
= si_get_pi(adev
);
4612 if (pi
->mvdd_control
) {
4613 if (mclk
<= pi
->mvdd_split_frequency
)
4616 voltage
->index
= (u8
)(si_pi
->mvdd_voltage_table
.count
) - 1;
4618 voltage
->value
= cpu_to_be16(si_pi
->mvdd_voltage_table
.entries
[voltage
->index
].value
);
4623 static int si_get_std_voltage_value(struct amdgpu_device
*adev
,
4624 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
,
4628 bool voltage_found
= false;
4629 *std_voltage
= be16_to_cpu(voltage
->value
);
4631 if (adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
) {
4632 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE
) {
4633 if (adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
== NULL
)
4636 for (v_index
= 0; (u32
)v_index
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; v_index
++) {
4637 if (be16_to_cpu(voltage
->value
) ==
4638 (u16
)adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[v_index
].v
) {
4639 voltage_found
= true;
4640 if ((u32
)v_index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
4642 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[v_index
].vddc
;
4645 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
-1].vddc
;
4650 if (!voltage_found
) {
4651 for (v_index
= 0; (u32
)v_index
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.count
; v_index
++) {
4652 if (be16_to_cpu(voltage
->value
) <=
4653 (u16
)adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
.entries
[v_index
].v
) {
4654 voltage_found
= true;
4655 if ((u32
)v_index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
4657 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[v_index
].vddc
;
4660 adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
-1].vddc
;
4666 if ((u32
)voltage
->index
< adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.count
)
4667 *std_voltage
= adev
->pm
.dpm
.dyn_state
.cac_leakage_table
.entries
[voltage
->index
].vddc
;
4674 static int si_populate_std_voltage_value(struct amdgpu_device
*adev
,
4675 u16 value
, u8 index
,
4676 SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4678 voltage
->index
= index
;
4679 voltage
->value
= cpu_to_be16(value
);
4684 static int si_populate_phase_shedding_value(struct amdgpu_device
*adev
,
4685 const struct amdgpu_phase_shedding_limits_table
*limits
,
4686 u16 voltage
, u32 sclk
, u32 mclk
,
4687 SISLANDS_SMC_VOLTAGE_VALUE
*smc_voltage
)
4691 for (i
= 0; i
< limits
->count
; i
++) {
4692 if ((voltage
<= limits
->entries
[i
].voltage
) &&
4693 (sclk
<= limits
->entries
[i
].sclk
) &&
4694 (mclk
<= limits
->entries
[i
].mclk
))
4698 smc_voltage
->phase_settings
= (u8
)i
;
4703 static int si_init_arb_table_index(struct amdgpu_device
*adev
)
4705 struct si_power_info
*si_pi
= si_get_pi(adev
);
4709 ret
= amdgpu_si_read_smc_sram_dword(adev
, si_pi
->arb_table_start
,
4710 &tmp
, si_pi
->sram_end
);
4715 tmp
|= MC_CG_ARB_FREQ_F1
<< 24;
4717 return amdgpu_si_write_smc_sram_dword(adev
, si_pi
->arb_table_start
,
4718 tmp
, si_pi
->sram_end
);
4721 static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device
*adev
)
4723 return ni_copy_and_switch_arb_sets(adev
, MC_CG_ARB_FREQ_F0
, MC_CG_ARB_FREQ_F1
);
4726 static int si_reset_to_default(struct amdgpu_device
*adev
)
4728 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_ResetToDefaults
) == PPSMC_Result_OK
) ?
4732 static int si_force_switch_to_arb_f0(struct amdgpu_device
*adev
)
4734 struct si_power_info
*si_pi
= si_get_pi(adev
);
4738 ret
= amdgpu_si_read_smc_sram_dword(adev
, si_pi
->arb_table_start
,
4739 &tmp
, si_pi
->sram_end
);
4743 tmp
= (tmp
>> 24) & 0xff;
4745 if (tmp
== MC_CG_ARB_FREQ_F0
)
4748 return ni_copy_and_switch_arb_sets(adev
, tmp
, MC_CG_ARB_FREQ_F0
);
4751 static u32
si_calculate_memory_refresh_rate(struct amdgpu_device
*adev
,
4755 u32 dram_refresh_rate
;
4756 u32 mc_arb_rfsh_rate
;
4757 u32 tmp
= (RREG32(MC_ARB_RAMCFG
) & NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
4762 dram_rows
= 1 << (tmp
+ 10);
4764 dram_refresh_rate
= 1 << ((RREG32(MC_SEQ_MISC0
) & 0x3) + 3);
4765 mc_arb_rfsh_rate
= ((engine_clock
* 10) * dram_refresh_rate
/ dram_rows
- 32) / 64;
4767 return mc_arb_rfsh_rate
;
4770 static int si_populate_memory_timing_parameters(struct amdgpu_device
*adev
,
4771 struct rv7xx_pl
*pl
,
4772 SMC_SIslands_MCArbDramTimingRegisterSet
*arb_regs
)
4778 arb_regs
->mc_arb_rfsh_rate
=
4779 (u8
)si_calculate_memory_refresh_rate(adev
, pl
->sclk
);
4781 amdgpu_atombios_set_engine_dram_timings(adev
,
4785 dram_timing
= RREG32(MC_ARB_DRAM_TIMING
);
4786 dram_timing2
= RREG32(MC_ARB_DRAM_TIMING2
);
4787 burst_time
= RREG32(MC_ARB_BURST_TIME
) & STATE0_MASK
;
4789 arb_regs
->mc_arb_dram_timing
= cpu_to_be32(dram_timing
);
4790 arb_regs
->mc_arb_dram_timing2
= cpu_to_be32(dram_timing2
);
4791 arb_regs
->mc_arb_burst_time
= (u8
)burst_time
;
4796 static int si_do_program_memory_timing_parameters(struct amdgpu_device
*adev
,
4797 struct amdgpu_ps
*amdgpu_state
,
4798 unsigned int first_arb_set
)
4800 struct si_power_info
*si_pi
= si_get_pi(adev
);
4801 struct si_ps
*state
= si_get_ps(amdgpu_state
);
4802 SMC_SIslands_MCArbDramTimingRegisterSet arb_regs
= { 0 };
4805 for (i
= 0; i
< state
->performance_level_count
; i
++) {
4806 ret
= si_populate_memory_timing_parameters(adev
, &state
->performance_levels
[i
], &arb_regs
);
4809 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
4810 si_pi
->arb_table_start
+
4811 offsetof(SMC_SIslands_MCArbDramTimingRegisters
, data
) +
4812 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
) * (first_arb_set
+ i
),
4814 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
),
4823 static int si_program_memory_timing_parameters(struct amdgpu_device
*adev
,
4824 struct amdgpu_ps
*amdgpu_new_state
)
4826 return si_do_program_memory_timing_parameters(adev
, amdgpu_new_state
,
4827 SISLANDS_DRIVER_STATE_ARB_INDEX
);
4830 static int si_populate_initial_mvdd_value(struct amdgpu_device
*adev
,
4831 struct SISLANDS_SMC_VOLTAGE_VALUE
*voltage
)
4833 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4834 struct si_power_info
*si_pi
= si_get_pi(adev
);
4836 if (pi
->mvdd_control
)
4837 return si_populate_voltage_value(adev
, &si_pi
->mvdd_voltage_table
,
4838 si_pi
->mvdd_bootup_value
, voltage
);
4843 static int si_populate_smc_initial_state(struct amdgpu_device
*adev
,
4844 struct amdgpu_ps
*amdgpu_initial_state
,
4845 SISLANDS_SMC_STATETABLE
*table
)
4847 struct si_ps
*initial_state
= si_get_ps(amdgpu_initial_state
);
4848 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4849 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4850 struct si_power_info
*si_pi
= si_get_pi(adev
);
4854 table
->initialState
.levels
[0].mclk
.vDLL_CNTL
=
4855 cpu_to_be32(si_pi
->clock_registers
.dll_cntl
);
4856 table
->initialState
.levels
[0].mclk
.vMCLK_PWRMGT_CNTL
=
4857 cpu_to_be32(si_pi
->clock_registers
.mclk_pwrmgt_cntl
);
4858 table
->initialState
.levels
[0].mclk
.vMPLL_AD_FUNC_CNTL
=
4859 cpu_to_be32(si_pi
->clock_registers
.mpll_ad_func_cntl
);
4860 table
->initialState
.levels
[0].mclk
.vMPLL_DQ_FUNC_CNTL
=
4861 cpu_to_be32(si_pi
->clock_registers
.mpll_dq_func_cntl
);
4862 table
->initialState
.levels
[0].mclk
.vMPLL_FUNC_CNTL
=
4863 cpu_to_be32(si_pi
->clock_registers
.mpll_func_cntl
);
4864 table
->initialState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_1
=
4865 cpu_to_be32(si_pi
->clock_registers
.mpll_func_cntl_1
);
4866 table
->initialState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_2
=
4867 cpu_to_be32(si_pi
->clock_registers
.mpll_func_cntl_2
);
4868 table
->initialState
.levels
[0].mclk
.vMPLL_SS
=
4869 cpu_to_be32(si_pi
->clock_registers
.mpll_ss1
);
4870 table
->initialState
.levels
[0].mclk
.vMPLL_SS2
=
4871 cpu_to_be32(si_pi
->clock_registers
.mpll_ss2
);
4873 table
->initialState
.levels
[0].mclk
.mclk_value
=
4874 cpu_to_be32(initial_state
->performance_levels
[0].mclk
);
4876 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL
=
4877 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl
);
4878 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_2
=
4879 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl_2
);
4880 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_3
=
4881 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl_3
);
4882 table
->initialState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_4
=
4883 cpu_to_be32(si_pi
->clock_registers
.cg_spll_func_cntl_4
);
4884 table
->initialState
.levels
[0].sclk
.vCG_SPLL_SPREAD_SPECTRUM
=
4885 cpu_to_be32(si_pi
->clock_registers
.cg_spll_spread_spectrum
);
4886 table
->initialState
.levels
[0].sclk
.vCG_SPLL_SPREAD_SPECTRUM_2
=
4887 cpu_to_be32(si_pi
->clock_registers
.cg_spll_spread_spectrum_2
);
4889 table
->initialState
.levels
[0].sclk
.sclk_value
=
4890 cpu_to_be32(initial_state
->performance_levels
[0].sclk
);
4892 table
->initialState
.levels
[0].arbRefreshState
=
4893 SISLANDS_INITIAL_STATE_ARB_INDEX
;
4895 table
->initialState
.levels
[0].ACIndex
= 0;
4897 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
4898 initial_state
->performance_levels
[0].vddc
,
4899 &table
->initialState
.levels
[0].vddc
);
4904 ret
= si_get_std_voltage_value(adev
,
4905 &table
->initialState
.levels
[0].vddc
,
4908 si_populate_std_voltage_value(adev
, std_vddc
,
4909 table
->initialState
.levels
[0].vddc
.index
,
4910 &table
->initialState
.levels
[0].std_vddc
);
4913 if (eg_pi
->vddci_control
)
4914 si_populate_voltage_value(adev
,
4915 &eg_pi
->vddci_voltage_table
,
4916 initial_state
->performance_levels
[0].vddci
,
4917 &table
->initialState
.levels
[0].vddci
);
4919 if (si_pi
->vddc_phase_shed_control
)
4920 si_populate_phase_shedding_value(adev
,
4921 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
4922 initial_state
->performance_levels
[0].vddc
,
4923 initial_state
->performance_levels
[0].sclk
,
4924 initial_state
->performance_levels
[0].mclk
,
4925 &table
->initialState
.levels
[0].vddc
);
4927 si_populate_initial_mvdd_value(adev
, &table
->initialState
.levels
[0].mvdd
);
4929 reg
= CG_R(0xffff) | CG_L(0);
4930 table
->initialState
.levels
[0].aT
= cpu_to_be32(reg
);
4931 table
->initialState
.levels
[0].bSP
= cpu_to_be32(pi
->dsp
);
4932 table
->initialState
.levels
[0].gen2PCIE
= (u8
)si_pi
->boot_pcie_gen
;
4934 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
4935 table
->initialState
.levels
[0].strobeMode
=
4936 si_get_strobe_mode_settings(adev
,
4937 initial_state
->performance_levels
[0].mclk
);
4939 if (initial_state
->performance_levels
[0].mclk
> pi
->mclk_edc_enable_threshold
)
4940 table
->initialState
.levels
[0].mcFlags
= SISLANDS_SMC_MC_EDC_RD_FLAG
| SISLANDS_SMC_MC_EDC_WR_FLAG
;
4942 table
->initialState
.levels
[0].mcFlags
= 0;
4945 table
->initialState
.levelCount
= 1;
4947 table
->initialState
.flags
|= PPSMC_SWSTATE_FLAG_DC
;
4949 table
->initialState
.levels
[0].dpm2
.MaxPS
= 0;
4950 table
->initialState
.levels
[0].dpm2
.NearTDPDec
= 0;
4951 table
->initialState
.levels
[0].dpm2
.AboveSafeInc
= 0;
4952 table
->initialState
.levels
[0].dpm2
.BelowSafeInc
= 0;
4953 table
->initialState
.levels
[0].dpm2
.PwrEfficiencyRatio
= 0;
4955 reg
= MIN_POWER_MASK
| MAX_POWER_MASK
;
4956 table
->initialState
.levels
[0].SQPowerThrottle
= cpu_to_be32(reg
);
4958 reg
= MAX_POWER_DELTA_MASK
| STI_SIZE_MASK
| LTI_RATIO_MASK
;
4959 table
->initialState
.levels
[0].SQPowerThrottle_2
= cpu_to_be32(reg
);
4964 static int si_populate_smc_acpi_state(struct amdgpu_device
*adev
,
4965 SISLANDS_SMC_STATETABLE
*table
)
4967 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
4968 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
4969 struct si_power_info
*si_pi
= si_get_pi(adev
);
4970 u32 spll_func_cntl
= si_pi
->clock_registers
.cg_spll_func_cntl
;
4971 u32 spll_func_cntl_2
= si_pi
->clock_registers
.cg_spll_func_cntl_2
;
4972 u32 spll_func_cntl_3
= si_pi
->clock_registers
.cg_spll_func_cntl_3
;
4973 u32 spll_func_cntl_4
= si_pi
->clock_registers
.cg_spll_func_cntl_4
;
4974 u32 dll_cntl
= si_pi
->clock_registers
.dll_cntl
;
4975 u32 mclk_pwrmgt_cntl
= si_pi
->clock_registers
.mclk_pwrmgt_cntl
;
4976 u32 mpll_ad_func_cntl
= si_pi
->clock_registers
.mpll_ad_func_cntl
;
4977 u32 mpll_dq_func_cntl
= si_pi
->clock_registers
.mpll_dq_func_cntl
;
4978 u32 mpll_func_cntl
= si_pi
->clock_registers
.mpll_func_cntl
;
4979 u32 mpll_func_cntl_1
= si_pi
->clock_registers
.mpll_func_cntl_1
;
4980 u32 mpll_func_cntl_2
= si_pi
->clock_registers
.mpll_func_cntl_2
;
4984 table
->ACPIState
= table
->initialState
;
4986 table
->ACPIState
.flags
&= ~PPSMC_SWSTATE_FLAG_DC
;
4988 if (pi
->acpi_vddc
) {
4989 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
4990 pi
->acpi_vddc
, &table
->ACPIState
.levels
[0].vddc
);
4994 ret
= si_get_std_voltage_value(adev
,
4995 &table
->ACPIState
.levels
[0].vddc
, &std_vddc
);
4997 si_populate_std_voltage_value(adev
, std_vddc
,
4998 table
->ACPIState
.levels
[0].vddc
.index
,
4999 &table
->ACPIState
.levels
[0].std_vddc
);
5001 table
->ACPIState
.levels
[0].gen2PCIE
= si_pi
->acpi_pcie_gen
;
5003 if (si_pi
->vddc_phase_shed_control
) {
5004 si_populate_phase_shedding_value(adev
,
5005 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
5009 &table
->ACPIState
.levels
[0].vddc
);
5012 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddc_voltage_table
,
5013 pi
->min_vddc_in_table
, &table
->ACPIState
.levels
[0].vddc
);
5017 ret
= si_get_std_voltage_value(adev
,
5018 &table
->ACPIState
.levels
[0].vddc
, &std_vddc
);
5021 si_populate_std_voltage_value(adev
, std_vddc
,
5022 table
->ACPIState
.levels
[0].vddc
.index
,
5023 &table
->ACPIState
.levels
[0].std_vddc
);
5025 table
->ACPIState
.levels
[0].gen2PCIE
= (u8
)r600_get_pcie_gen_support(adev
,
5026 si_pi
->sys_pcie_mask
,
5027 si_pi
->boot_pcie_gen
,
5030 if (si_pi
->vddc_phase_shed_control
)
5031 si_populate_phase_shedding_value(adev
,
5032 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
5033 pi
->min_vddc_in_table
,
5036 &table
->ACPIState
.levels
[0].vddc
);
5039 if (pi
->acpi_vddc
) {
5040 if (eg_pi
->acpi_vddci
)
5041 si_populate_voltage_value(adev
, &eg_pi
->vddci_voltage_table
,
5043 &table
->ACPIState
.levels
[0].vddci
);
5046 mclk_pwrmgt_cntl
|= MRDCK0_RESET
| MRDCK1_RESET
;
5047 mclk_pwrmgt_cntl
&= ~(MRDCK0_PDNB
| MRDCK1_PDNB
);
5049 dll_cntl
&= ~(MRDCK0_BYPASS
| MRDCK1_BYPASS
);
5051 spll_func_cntl_2
&= ~SCLK_MUX_SEL_MASK
;
5052 spll_func_cntl_2
|= SCLK_MUX_SEL(4);
5054 table
->ACPIState
.levels
[0].mclk
.vDLL_CNTL
=
5055 cpu_to_be32(dll_cntl
);
5056 table
->ACPIState
.levels
[0].mclk
.vMCLK_PWRMGT_CNTL
=
5057 cpu_to_be32(mclk_pwrmgt_cntl
);
5058 table
->ACPIState
.levels
[0].mclk
.vMPLL_AD_FUNC_CNTL
=
5059 cpu_to_be32(mpll_ad_func_cntl
);
5060 table
->ACPIState
.levels
[0].mclk
.vMPLL_DQ_FUNC_CNTL
=
5061 cpu_to_be32(mpll_dq_func_cntl
);
5062 table
->ACPIState
.levels
[0].mclk
.vMPLL_FUNC_CNTL
=
5063 cpu_to_be32(mpll_func_cntl
);
5064 table
->ACPIState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_1
=
5065 cpu_to_be32(mpll_func_cntl_1
);
5066 table
->ACPIState
.levels
[0].mclk
.vMPLL_FUNC_CNTL_2
=
5067 cpu_to_be32(mpll_func_cntl_2
);
5068 table
->ACPIState
.levels
[0].mclk
.vMPLL_SS
=
5069 cpu_to_be32(si_pi
->clock_registers
.mpll_ss1
);
5070 table
->ACPIState
.levels
[0].mclk
.vMPLL_SS2
=
5071 cpu_to_be32(si_pi
->clock_registers
.mpll_ss2
);
5073 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL
=
5074 cpu_to_be32(spll_func_cntl
);
5075 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_2
=
5076 cpu_to_be32(spll_func_cntl_2
);
5077 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_3
=
5078 cpu_to_be32(spll_func_cntl_3
);
5079 table
->ACPIState
.levels
[0].sclk
.vCG_SPLL_FUNC_CNTL_4
=
5080 cpu_to_be32(spll_func_cntl_4
);
5082 table
->ACPIState
.levels
[0].mclk
.mclk_value
= 0;
5083 table
->ACPIState
.levels
[0].sclk
.sclk_value
= 0;
5085 si_populate_mvdd_value(adev
, 0, &table
->ACPIState
.levels
[0].mvdd
);
5087 if (eg_pi
->dynamic_ac_timing
)
5088 table
->ACPIState
.levels
[0].ACIndex
= 0;
5090 table
->ACPIState
.levels
[0].dpm2
.MaxPS
= 0;
5091 table
->ACPIState
.levels
[0].dpm2
.NearTDPDec
= 0;
5092 table
->ACPIState
.levels
[0].dpm2
.AboveSafeInc
= 0;
5093 table
->ACPIState
.levels
[0].dpm2
.BelowSafeInc
= 0;
5094 table
->ACPIState
.levels
[0].dpm2
.PwrEfficiencyRatio
= 0;
5096 reg
= MIN_POWER_MASK
| MAX_POWER_MASK
;
5097 table
->ACPIState
.levels
[0].SQPowerThrottle
= cpu_to_be32(reg
);
5099 reg
= MAX_POWER_DELTA_MASK
| STI_SIZE_MASK
| LTI_RATIO_MASK
;
5100 table
->ACPIState
.levels
[0].SQPowerThrottle_2
= cpu_to_be32(reg
);
5105 static int si_populate_ulv_state(struct amdgpu_device
*adev
,
5106 SISLANDS_SMC_SWSTATE
*state
)
5108 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
5109 struct si_power_info
*si_pi
= si_get_pi(adev
);
5110 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5111 u32 sclk_in_sr
= 1350; /* ??? */
5114 ret
= si_convert_power_level_to_smc(adev
, &ulv
->pl
,
5117 if (eg_pi
->sclk_deep_sleep
) {
5118 if (sclk_in_sr
<= SCLK_MIN_DEEPSLEEP_FREQ
)
5119 state
->levels
[0].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS
;
5121 state
->levels
[0].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE
;
5123 if (ulv
->one_pcie_lane_in_ulv
)
5124 state
->flags
|= PPSMC_SWSTATE_FLAG_PCIE_X1
;
5125 state
->levels
[0].arbRefreshState
= (u8
)(SISLANDS_ULV_STATE_ARB_INDEX
);
5126 state
->levels
[0].ACIndex
= 1;
5127 state
->levels
[0].std_vddc
= state
->levels
[0].vddc
;
5128 state
->levelCount
= 1;
5130 state
->flags
|= PPSMC_SWSTATE_FLAG_DC
;
5136 static int si_program_ulv_memory_timing_parameters(struct amdgpu_device
*adev
)
5138 struct si_power_info
*si_pi
= si_get_pi(adev
);
5139 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5140 SMC_SIslands_MCArbDramTimingRegisterSet arb_regs
= { 0 };
5143 ret
= si_populate_memory_timing_parameters(adev
, &ulv
->pl
,
5148 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay
,
5149 ulv
->volt_change_delay
);
5151 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
5152 si_pi
->arb_table_start
+
5153 offsetof(SMC_SIslands_MCArbDramTimingRegisters
, data
) +
5154 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
) * SISLANDS_ULV_STATE_ARB_INDEX
,
5156 sizeof(SMC_SIslands_MCArbDramTimingRegisterSet
),
5162 static void si_get_mvdd_configuration(struct amdgpu_device
*adev
)
5164 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5166 pi
->mvdd_split_frequency
= 30000;
5169 static int si_init_smc_table(struct amdgpu_device
*adev
)
5171 struct si_power_info
*si_pi
= si_get_pi(adev
);
5172 struct amdgpu_ps
*amdgpu_boot_state
= adev
->pm
.dpm
.boot_ps
;
5173 const struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5174 SISLANDS_SMC_STATETABLE
*table
= &si_pi
->smc_statetable
;
5179 si_populate_smc_voltage_tables(adev
, table
);
5181 switch (adev
->pm
.int_thermal_type
) {
5182 case THERMAL_TYPE_SI
:
5183 case THERMAL_TYPE_EMC2103_WITH_INTERNAL
:
5184 table
->thermalProtectType
= PPSMC_THERMAL_PROTECT_TYPE_INTERNAL
;
5186 case THERMAL_TYPE_NONE
:
5187 table
->thermalProtectType
= PPSMC_THERMAL_PROTECT_TYPE_NONE
;
5190 table
->thermalProtectType
= PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL
;
5194 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_HARDWAREDC
)
5195 table
->systemFlags
|= PPSMC_SYSTEMFLAG_GPIO_DC
;
5197 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_REGULATOR_HOT
) {
5198 if ((adev
->pdev
->device
!= 0x6818) && (adev
->pdev
->device
!= 0x6819))
5199 table
->systemFlags
|= PPSMC_SYSTEMFLAG_REGULATOR_HOT
;
5202 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_STEPVDDC
)
5203 table
->systemFlags
|= PPSMC_SYSTEMFLAG_STEPVDDC
;
5205 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
5206 table
->systemFlags
|= PPSMC_SYSTEMFLAG_GDDR5
;
5208 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY
)
5209 table
->extraFlags
|= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH
;
5211 if (adev
->pm
.dpm
.platform_caps
& ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE
) {
5212 table
->systemFlags
|= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO
;
5213 vr_hot_gpio
= adev
->pm
.dpm
.backbias_response_time
;
5214 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_vr_hot_gpio
,
5218 ret
= si_populate_smc_initial_state(adev
, amdgpu_boot_state
, table
);
5222 ret
= si_populate_smc_acpi_state(adev
, table
);
5226 table
->driverState
= table
->initialState
;
5228 ret
= si_do_program_memory_timing_parameters(adev
, amdgpu_boot_state
,
5229 SISLANDS_INITIAL_STATE_ARB_INDEX
);
5233 if (ulv
->supported
&& ulv
->pl
.vddc
) {
5234 ret
= si_populate_ulv_state(adev
, &table
->ULVState
);
5238 ret
= si_program_ulv_memory_timing_parameters(adev
);
5242 WREG32(CG_ULV_CONTROL
, ulv
->cg_ulv_control
);
5243 WREG32(CG_ULV_PARAMETER
, ulv
->cg_ulv_parameter
);
5245 lane_width
= amdgpu_get_pcie_lanes(adev
);
5246 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width
, lane_width
);
5248 table
->ULVState
= table
->initialState
;
5251 return amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->state_table_start
,
5252 (u8
*)table
, sizeof(SISLANDS_SMC_STATETABLE
),
5256 static int si_calculate_sclk_params(struct amdgpu_device
*adev
,
5258 SISLANDS_SMC_SCLK_VALUE
*sclk
)
5260 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5261 struct si_power_info
*si_pi
= si_get_pi(adev
);
5262 struct atom_clock_dividers dividers
;
5263 u32 spll_func_cntl
= si_pi
->clock_registers
.cg_spll_func_cntl
;
5264 u32 spll_func_cntl_2
= si_pi
->clock_registers
.cg_spll_func_cntl_2
;
5265 u32 spll_func_cntl_3
= si_pi
->clock_registers
.cg_spll_func_cntl_3
;
5266 u32 spll_func_cntl_4
= si_pi
->clock_registers
.cg_spll_func_cntl_4
;
5267 u32 cg_spll_spread_spectrum
= si_pi
->clock_registers
.cg_spll_spread_spectrum
;
5268 u32 cg_spll_spread_spectrum_2
= si_pi
->clock_registers
.cg_spll_spread_spectrum_2
;
5270 u32 reference_clock
= adev
->clock
.spll
.reference_freq
;
5271 u32 reference_divider
;
5275 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
5276 engine_clock
, false, ÷rs
);
5280 reference_divider
= 1 + dividers
.ref_div
;
5282 tmp
= (u64
) engine_clock
* reference_divider
* dividers
.post_div
* 16384;
5283 do_div(tmp
, reference_clock
);
5286 spll_func_cntl
&= ~(SPLL_PDIV_A_MASK
| SPLL_REF_DIV_MASK
);
5287 spll_func_cntl
|= SPLL_REF_DIV(dividers
.ref_div
);
5288 spll_func_cntl
|= SPLL_PDIV_A(dividers
.post_div
);
5290 spll_func_cntl_2
&= ~SCLK_MUX_SEL_MASK
;
5291 spll_func_cntl_2
|= SCLK_MUX_SEL(2);
5293 spll_func_cntl_3
&= ~SPLL_FB_DIV_MASK
;
5294 spll_func_cntl_3
|= SPLL_FB_DIV(fbdiv
);
5295 spll_func_cntl_3
|= SPLL_DITHEN
;
5298 struct amdgpu_atom_ss ss
;
5299 u32 vco_freq
= engine_clock
* dividers
.post_div
;
5301 if (amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
5302 ASIC_INTERNAL_ENGINE_SS
, vco_freq
)) {
5303 u32 clk_s
= reference_clock
* 5 / (reference_divider
* ss
.rate
);
5304 u32 clk_v
= 4 * ss
.percentage
* fbdiv
/ (clk_s
* 10000);
5306 cg_spll_spread_spectrum
&= ~CLK_S_MASK
;
5307 cg_spll_spread_spectrum
|= CLK_S(clk_s
);
5308 cg_spll_spread_spectrum
|= SSEN
;
5310 cg_spll_spread_spectrum_2
&= ~CLK_V_MASK
;
5311 cg_spll_spread_spectrum_2
|= CLK_V(clk_v
);
5315 sclk
->sclk_value
= engine_clock
;
5316 sclk
->vCG_SPLL_FUNC_CNTL
= spll_func_cntl
;
5317 sclk
->vCG_SPLL_FUNC_CNTL_2
= spll_func_cntl_2
;
5318 sclk
->vCG_SPLL_FUNC_CNTL_3
= spll_func_cntl_3
;
5319 sclk
->vCG_SPLL_FUNC_CNTL_4
= spll_func_cntl_4
;
5320 sclk
->vCG_SPLL_SPREAD_SPECTRUM
= cg_spll_spread_spectrum
;
5321 sclk
->vCG_SPLL_SPREAD_SPECTRUM_2
= cg_spll_spread_spectrum_2
;
5326 static int si_populate_sclk_value(struct amdgpu_device
*adev
,
5328 SISLANDS_SMC_SCLK_VALUE
*sclk
)
5330 SISLANDS_SMC_SCLK_VALUE sclk_tmp
;
5333 ret
= si_calculate_sclk_params(adev
, engine_clock
, &sclk_tmp
);
5335 sclk
->sclk_value
= cpu_to_be32(sclk_tmp
.sclk_value
);
5336 sclk
->vCG_SPLL_FUNC_CNTL
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL
);
5337 sclk
->vCG_SPLL_FUNC_CNTL_2
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL_2
);
5338 sclk
->vCG_SPLL_FUNC_CNTL_3
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL_3
);
5339 sclk
->vCG_SPLL_FUNC_CNTL_4
= cpu_to_be32(sclk_tmp
.vCG_SPLL_FUNC_CNTL_4
);
5340 sclk
->vCG_SPLL_SPREAD_SPECTRUM
= cpu_to_be32(sclk_tmp
.vCG_SPLL_SPREAD_SPECTRUM
);
5341 sclk
->vCG_SPLL_SPREAD_SPECTRUM_2
= cpu_to_be32(sclk_tmp
.vCG_SPLL_SPREAD_SPECTRUM_2
);
5347 static int si_populate_mclk_value(struct amdgpu_device
*adev
,
5350 SISLANDS_SMC_MCLK_VALUE
*mclk
,
5354 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5355 struct si_power_info
*si_pi
= si_get_pi(adev
);
5356 u32 dll_cntl
= si_pi
->clock_registers
.dll_cntl
;
5357 u32 mclk_pwrmgt_cntl
= si_pi
->clock_registers
.mclk_pwrmgt_cntl
;
5358 u32 mpll_ad_func_cntl
= si_pi
->clock_registers
.mpll_ad_func_cntl
;
5359 u32 mpll_dq_func_cntl
= si_pi
->clock_registers
.mpll_dq_func_cntl
;
5360 u32 mpll_func_cntl
= si_pi
->clock_registers
.mpll_func_cntl
;
5361 u32 mpll_func_cntl_1
= si_pi
->clock_registers
.mpll_func_cntl_1
;
5362 u32 mpll_func_cntl_2
= si_pi
->clock_registers
.mpll_func_cntl_2
;
5363 u32 mpll_ss1
= si_pi
->clock_registers
.mpll_ss1
;
5364 u32 mpll_ss2
= si_pi
->clock_registers
.mpll_ss2
;
5365 struct atom_mpll_param mpll_param
;
5368 ret
= amdgpu_atombios_get_memory_pll_dividers(adev
, memory_clock
, strobe_mode
, &mpll_param
);
5372 mpll_func_cntl
&= ~BWCTRL_MASK
;
5373 mpll_func_cntl
|= BWCTRL(mpll_param
.bwcntl
);
5375 mpll_func_cntl_1
&= ~(CLKF_MASK
| CLKFRAC_MASK
| VCO_MODE_MASK
);
5376 mpll_func_cntl_1
|= CLKF(mpll_param
.clkf
) |
5377 CLKFRAC(mpll_param
.clkfrac
) | VCO_MODE(mpll_param
.vco_mode
);
5379 mpll_ad_func_cntl
&= ~YCLK_POST_DIV_MASK
;
5380 mpll_ad_func_cntl
|= YCLK_POST_DIV(mpll_param
.post_div
);
5382 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
5383 mpll_dq_func_cntl
&= ~(YCLK_SEL_MASK
| YCLK_POST_DIV_MASK
);
5384 mpll_dq_func_cntl
|= YCLK_SEL(mpll_param
.yclk_sel
) |
5385 YCLK_POST_DIV(mpll_param
.post_div
);
5389 struct amdgpu_atom_ss ss
;
5392 u32 reference_clock
= adev
->clock
.mpll
.reference_freq
;
5394 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
)
5395 freq_nom
= memory_clock
* 4;
5397 freq_nom
= memory_clock
* 2;
5399 tmp
= freq_nom
/ reference_clock
;
5401 if (amdgpu_atombios_get_asic_ss_info(adev
, &ss
,
5402 ASIC_INTERNAL_MEMORY_SS
, freq_nom
)) {
5403 u32 clks
= reference_clock
* 5 / ss
.rate
;
5404 u32 clkv
= (u32
)((((131 * ss
.percentage
* ss
.rate
) / 100) * tmp
) / freq_nom
);
5406 mpll_ss1
&= ~CLKV_MASK
;
5407 mpll_ss1
|= CLKV(clkv
);
5409 mpll_ss2
&= ~CLKS_MASK
;
5410 mpll_ss2
|= CLKS(clks
);
5414 mclk_pwrmgt_cntl
&= ~DLL_SPEED_MASK
;
5415 mclk_pwrmgt_cntl
|= DLL_SPEED(mpll_param
.dll_speed
);
5418 mclk_pwrmgt_cntl
|= MRDCK0_PDNB
| MRDCK1_PDNB
;
5420 mclk_pwrmgt_cntl
&= ~(MRDCK0_PDNB
| MRDCK1_PDNB
);
5422 mclk
->mclk_value
= cpu_to_be32(memory_clock
);
5423 mclk
->vMPLL_FUNC_CNTL
= cpu_to_be32(mpll_func_cntl
);
5424 mclk
->vMPLL_FUNC_CNTL_1
= cpu_to_be32(mpll_func_cntl_1
);
5425 mclk
->vMPLL_FUNC_CNTL_2
= cpu_to_be32(mpll_func_cntl_2
);
5426 mclk
->vMPLL_AD_FUNC_CNTL
= cpu_to_be32(mpll_ad_func_cntl
);
5427 mclk
->vMPLL_DQ_FUNC_CNTL
= cpu_to_be32(mpll_dq_func_cntl
);
5428 mclk
->vMCLK_PWRMGT_CNTL
= cpu_to_be32(mclk_pwrmgt_cntl
);
5429 mclk
->vDLL_CNTL
= cpu_to_be32(dll_cntl
);
5430 mclk
->vMPLL_SS
= cpu_to_be32(mpll_ss1
);
5431 mclk
->vMPLL_SS2
= cpu_to_be32(mpll_ss2
);
5436 static void si_populate_smc_sp(struct amdgpu_device
*adev
,
5437 struct amdgpu_ps
*amdgpu_state
,
5438 SISLANDS_SMC_SWSTATE
*smc_state
)
5440 struct si_ps
*ps
= si_get_ps(amdgpu_state
);
5441 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5444 for (i
= 0; i
< ps
->performance_level_count
- 1; i
++)
5445 smc_state
->levels
[i
].bSP
= cpu_to_be32(pi
->dsp
);
5447 smc_state
->levels
[ps
->performance_level_count
- 1].bSP
=
5448 cpu_to_be32(pi
->psp
);
5451 static int si_convert_power_level_to_smc(struct amdgpu_device
*adev
,
5452 struct rv7xx_pl
*pl
,
5453 SISLANDS_SMC_HW_PERFORMANCE_LEVEL
*level
)
5455 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5456 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
5457 struct si_power_info
*si_pi
= si_get_pi(adev
);
5461 bool gmc_pg
= false;
5463 if (eg_pi
->pcie_performance_request
&&
5464 (si_pi
->force_pcie_gen
!= AMDGPU_PCIE_GEN_INVALID
))
5465 level
->gen2PCIE
= (u8
)si_pi
->force_pcie_gen
;
5467 level
->gen2PCIE
= (u8
)pl
->pcie_gen
;
5469 ret
= si_populate_sclk_value(adev
, pl
->sclk
, &level
->sclk
);
5475 if (pi
->mclk_stutter_mode_threshold
&&
5476 (pl
->mclk
<= pi
->mclk_stutter_mode_threshold
) &&
5477 !eg_pi
->uvd_enabled
&&
5478 (RREG32(DPG_PIPE_STUTTER_CONTROL
) & STUTTER_ENABLE
) &&
5479 (adev
->pm
.dpm
.new_active_crtc_count
<= 2)) {
5480 level
->mcFlags
|= SISLANDS_SMC_MC_STUTTER_EN
;
5483 level
->mcFlags
|= SISLANDS_SMC_MC_PG_EN
;
5486 if (adev
->mc
.vram_type
== AMDGPU_VRAM_TYPE_GDDR5
) {
5487 if (pl
->mclk
> pi
->mclk_edc_enable_threshold
)
5488 level
->mcFlags
|= SISLANDS_SMC_MC_EDC_RD_FLAG
;
5490 if (pl
->mclk
> eg_pi
->mclk_edc_wr_enable_threshold
)
5491 level
->mcFlags
|= SISLANDS_SMC_MC_EDC_WR_FLAG
;
5493 level
->strobeMode
= si_get_strobe_mode_settings(adev
, pl
->mclk
);
5495 if (level
->strobeMode
& SISLANDS_SMC_STROBE_ENABLE
) {
5496 if (si_get_mclk_frequency_ratio(pl
->mclk
, true) >=
5497 ((RREG32(MC_SEQ_MISC7
) >> 16) & 0xf))
5498 dll_state_on
= ((RREG32(MC_SEQ_MISC5
) >> 1) & 0x1) ? true : false;
5500 dll_state_on
= ((RREG32(MC_SEQ_MISC6
) >> 1) & 0x1) ? true : false;
5502 dll_state_on
= false;
5505 level
->strobeMode
= si_get_strobe_mode_settings(adev
,
5508 dll_state_on
= ((RREG32(MC_SEQ_MISC5
) >> 1) & 0x1) ? true : false;
5511 ret
= si_populate_mclk_value(adev
,
5515 (level
->strobeMode
& SISLANDS_SMC_STROBE_ENABLE
) != 0, dll_state_on
);
5519 ret
= si_populate_voltage_value(adev
,
5520 &eg_pi
->vddc_voltage_table
,
5521 pl
->vddc
, &level
->vddc
);
5526 ret
= si_get_std_voltage_value(adev
, &level
->vddc
, &std_vddc
);
5530 ret
= si_populate_std_voltage_value(adev
, std_vddc
,
5531 level
->vddc
.index
, &level
->std_vddc
);
5535 if (eg_pi
->vddci_control
) {
5536 ret
= si_populate_voltage_value(adev
, &eg_pi
->vddci_voltage_table
,
5537 pl
->vddci
, &level
->vddci
);
5542 if (si_pi
->vddc_phase_shed_control
) {
5543 ret
= si_populate_phase_shedding_value(adev
,
5544 &adev
->pm
.dpm
.dyn_state
.phase_shedding_limits_table
,
5553 level
->MaxPoweredUpCU
= si_pi
->max_cu
;
5555 ret
= si_populate_mvdd_value(adev
, pl
->mclk
, &level
->mvdd
);
5560 static int si_populate_smc_t(struct amdgpu_device
*adev
,
5561 struct amdgpu_ps
*amdgpu_state
,
5562 SISLANDS_SMC_SWSTATE
*smc_state
)
5564 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
5565 struct si_ps
*state
= si_get_ps(amdgpu_state
);
5571 if (state
->performance_level_count
>= 9)
5574 if (state
->performance_level_count
< 2) {
5575 a_t
= CG_R(0xffff) | CG_L(0);
5576 smc_state
->levels
[0].aT
= cpu_to_be32(a_t
);
5580 smc_state
->levels
[0].aT
= cpu_to_be32(0);
5582 for (i
= 0; i
<= state
->performance_level_count
- 2; i
++) {
5583 ret
= r600_calculate_at(
5584 (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS
) * 100 * (i
+ 1),
5586 state
->performance_levels
[i
+ 1].sclk
,
5587 state
->performance_levels
[i
].sclk
,
5592 t_h
= (i
+ 1) * 1000 - 50 * R600_AH_DFLT
;
5593 t_l
= (i
+ 1) * 1000 + 50 * R600_AH_DFLT
;
5596 a_t
= be32_to_cpu(smc_state
->levels
[i
].aT
) & ~CG_R_MASK
;
5597 a_t
|= CG_R(t_l
* pi
->bsp
/ 20000);
5598 smc_state
->levels
[i
].aT
= cpu_to_be32(a_t
);
5600 high_bsp
= (i
== state
->performance_level_count
- 2) ?
5602 a_t
= CG_R(0xffff) | CG_L(t_h
* high_bsp
/ 20000);
5603 smc_state
->levels
[i
+ 1].aT
= cpu_to_be32(a_t
);
5609 static int si_disable_ulv(struct amdgpu_device
*adev
)
5611 struct si_power_info
*si_pi
= si_get_pi(adev
);
5612 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5615 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_DisableULV
) == PPSMC_Result_OK
) ?
5621 static bool si_is_state_ulv_compatible(struct amdgpu_device
*adev
,
5622 struct amdgpu_ps
*amdgpu_state
)
5624 const struct si_power_info
*si_pi
= si_get_pi(adev
);
5625 const struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5626 const struct si_ps
*state
= si_get_ps(amdgpu_state
);
5629 if (state
->performance_levels
[0].mclk
!= ulv
->pl
.mclk
)
5632 /* XXX validate against display requirements! */
5634 for (i
= 0; i
< adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.count
; i
++) {
5635 if (adev
->clock
.current_dispclk
<=
5636 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[i
].clk
) {
5638 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[i
].v
)
5643 if ((amdgpu_state
->vclk
!= 0) || (amdgpu_state
->dclk
!= 0))
5649 static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device
*adev
,
5650 struct amdgpu_ps
*amdgpu_new_state
)
5652 const struct si_power_info
*si_pi
= si_get_pi(adev
);
5653 const struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5655 if (ulv
->supported
) {
5656 if (si_is_state_ulv_compatible(adev
, amdgpu_new_state
))
5657 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableULV
) == PPSMC_Result_OK
) ?
5663 static int si_convert_power_state_to_smc(struct amdgpu_device
*adev
,
5664 struct amdgpu_ps
*amdgpu_state
,
5665 SISLANDS_SMC_SWSTATE
*smc_state
)
5667 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
5668 struct ni_power_info
*ni_pi
= ni_get_pi(adev
);
5669 struct si_power_info
*si_pi
= si_get_pi(adev
);
5670 struct si_ps
*state
= si_get_ps(amdgpu_state
);
5673 u32 sclk_in_sr
= 1350; /* ??? */
5675 if (state
->performance_level_count
> SISLANDS_MAX_HARDWARE_POWERLEVELS
)
5678 threshold
= state
->performance_levels
[state
->performance_level_count
-1].sclk
* 100 / 100;
5680 if (amdgpu_state
->vclk
&& amdgpu_state
->dclk
) {
5681 eg_pi
->uvd_enabled
= true;
5682 if (eg_pi
->smu_uvd_hs
)
5683 smc_state
->flags
|= PPSMC_SWSTATE_FLAG_UVD
;
5685 eg_pi
->uvd_enabled
= false;
5688 if (state
->dc_compatible
)
5689 smc_state
->flags
|= PPSMC_SWSTATE_FLAG_DC
;
5691 smc_state
->levelCount
= 0;
5692 for (i
= 0; i
< state
->performance_level_count
; i
++) {
5693 if (eg_pi
->sclk_deep_sleep
) {
5694 if ((i
== 0) || si_pi
->sclk_deep_sleep_above_low
) {
5695 if (sclk_in_sr
<= SCLK_MIN_DEEPSLEEP_FREQ
)
5696 smc_state
->levels
[i
].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS
;
5698 smc_state
->levels
[i
].stateFlags
|= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE
;
5702 ret
= si_convert_power_level_to_smc(adev
, &state
->performance_levels
[i
],
5703 &smc_state
->levels
[i
]);
5704 smc_state
->levels
[i
].arbRefreshState
=
5705 (u8
)(SISLANDS_DRIVER_STATE_ARB_INDEX
+ i
);
5710 if (ni_pi
->enable_power_containment
)
5711 smc_state
->levels
[i
].displayWatermark
=
5712 (state
->performance_levels
[i
].sclk
< threshold
) ?
5713 PPSMC_DISPLAY_WATERMARK_LOW
: PPSMC_DISPLAY_WATERMARK_HIGH
;
5715 smc_state
->levels
[i
].displayWatermark
= (i
< 2) ?
5716 PPSMC_DISPLAY_WATERMARK_LOW
: PPSMC_DISPLAY_WATERMARK_HIGH
;
5718 if (eg_pi
->dynamic_ac_timing
)
5719 smc_state
->levels
[i
].ACIndex
= SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
+ i
;
5721 smc_state
->levels
[i
].ACIndex
= 0;
5723 smc_state
->levelCount
++;
5726 si_write_smc_soft_register(adev
,
5727 SI_SMC_SOFT_REGISTER_watermark_threshold
,
5730 si_populate_smc_sp(adev
, amdgpu_state
, smc_state
);
5732 ret
= si_populate_power_containment_values(adev
, amdgpu_state
, smc_state
);
5734 ni_pi
->enable_power_containment
= false;
5736 ret
= si_populate_sq_ramping_values(adev
, amdgpu_state
, smc_state
);
5738 ni_pi
->enable_sq_ramping
= false;
5740 return si_populate_smc_t(adev
, amdgpu_state
, smc_state
);
5743 static int si_upload_sw_state(struct amdgpu_device
*adev
,
5744 struct amdgpu_ps
*amdgpu_new_state
)
5746 struct si_power_info
*si_pi
= si_get_pi(adev
);
5747 struct si_ps
*new_state
= si_get_ps(amdgpu_new_state
);
5749 u32 address
= si_pi
->state_table_start
+
5750 offsetof(SISLANDS_SMC_STATETABLE
, driverState
);
5751 u32 state_size
= sizeof(SISLANDS_SMC_SWSTATE
) +
5752 ((new_state
->performance_level_count
- 1) *
5753 sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL
));
5754 SISLANDS_SMC_SWSTATE
*smc_state
= &si_pi
->smc_statetable
.driverState
;
5756 memset(smc_state
, 0, state_size
);
5758 ret
= si_convert_power_state_to_smc(adev
, amdgpu_new_state
, smc_state
);
5762 return amdgpu_si_copy_bytes_to_smc(adev
, address
, (u8
*)smc_state
,
5763 state_size
, si_pi
->sram_end
);
5766 static int si_upload_ulv_state(struct amdgpu_device
*adev
)
5768 struct si_power_info
*si_pi
= si_get_pi(adev
);
5769 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
5772 if (ulv
->supported
&& ulv
->pl
.vddc
) {
5773 u32 address
= si_pi
->state_table_start
+
5774 offsetof(SISLANDS_SMC_STATETABLE
, ULVState
);
5775 SISLANDS_SMC_SWSTATE
*smc_state
= &si_pi
->smc_statetable
.ULVState
;
5776 u32 state_size
= sizeof(SISLANDS_SMC_SWSTATE
);
5778 memset(smc_state
, 0, state_size
);
5780 ret
= si_populate_ulv_state(adev
, smc_state
);
5782 ret
= amdgpu_si_copy_bytes_to_smc(adev
, address
, (u8
*)smc_state
,
5783 state_size
, si_pi
->sram_end
);
5789 static int si_upload_smc_data(struct amdgpu_device
*adev
)
5791 struct amdgpu_crtc
*amdgpu_crtc
= NULL
;
5794 if (adev
->pm
.dpm
.new_active_crtc_count
== 0)
5797 for (i
= 0; i
< adev
->mode_info
.num_crtc
; i
++) {
5798 if (adev
->pm
.dpm
.new_active_crtcs
& (1 << i
)) {
5799 amdgpu_crtc
= adev
->mode_info
.crtcs
[i
];
5804 if (amdgpu_crtc
== NULL
)
5807 if (amdgpu_crtc
->line_time
<= 0)
5810 if (si_write_smc_soft_register(adev
,
5811 SI_SMC_SOFT_REGISTER_crtc_index
,
5812 amdgpu_crtc
->crtc_id
) != PPSMC_Result_OK
)
5815 if (si_write_smc_soft_register(adev
,
5816 SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min
,
5817 amdgpu_crtc
->wm_high
/ amdgpu_crtc
->line_time
) != PPSMC_Result_OK
)
5820 if (si_write_smc_soft_register(adev
,
5821 SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max
,
5822 amdgpu_crtc
->wm_low
/ amdgpu_crtc
->line_time
) != PPSMC_Result_OK
)
5828 static int si_set_mc_special_registers(struct amdgpu_device
*adev
,
5829 struct si_mc_reg_table
*table
)
5834 for (i
= 0, j
= table
->last
; i
< table
->last
; i
++) {
5835 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5837 switch (table
->mc_reg_address
[i
].s1
) {
5839 temp_reg
= RREG32(MC_PMG_CMD_EMRS
);
5840 table
->mc_reg_address
[j
].s1
= MC_PMG_CMD_EMRS
;
5841 table
->mc_reg_address
[j
].s0
= MC_SEQ_PMG_CMD_EMRS_LP
;
5842 for (k
= 0; k
< table
->num_entries
; k
++)
5843 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5844 ((temp_reg
& 0xffff0000)) |
5845 ((table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xffff0000) >> 16);
5847 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5850 temp_reg
= RREG32(MC_PMG_CMD_MRS
);
5851 table
->mc_reg_address
[j
].s1
= MC_PMG_CMD_MRS
;
5852 table
->mc_reg_address
[j
].s0
= MC_SEQ_PMG_CMD_MRS_LP
;
5853 for (k
= 0; k
< table
->num_entries
; k
++) {
5854 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5855 (temp_reg
& 0xffff0000) |
5856 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0x0000ffff);
5857 if (adev
->mc
.vram_type
!= AMDGPU_VRAM_TYPE_GDDR5
)
5858 table
->mc_reg_table_entry
[k
].mc_data
[j
] |= 0x100;
5861 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5864 if (adev
->mc
.vram_type
!= AMDGPU_VRAM_TYPE_GDDR5
) {
5865 table
->mc_reg_address
[j
].s1
= MC_PMG_AUTO_CMD
;
5866 table
->mc_reg_address
[j
].s0
= MC_PMG_AUTO_CMD
;
5867 for (k
= 0; k
< table
->num_entries
; k
++)
5868 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5869 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0xffff0000) >> 16;
5871 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5875 case MC_SEQ_RESERVE_M
:
5876 temp_reg
= RREG32(MC_PMG_CMD_MRS1
);
5877 table
->mc_reg_address
[j
].s1
= MC_PMG_CMD_MRS1
;
5878 table
->mc_reg_address
[j
].s0
= MC_SEQ_PMG_CMD_MRS1_LP
;
5879 for(k
= 0; k
< table
->num_entries
; k
++)
5880 table
->mc_reg_table_entry
[k
].mc_data
[j
] =
5881 (temp_reg
& 0xffff0000) |
5882 (table
->mc_reg_table_entry
[k
].mc_data
[i
] & 0x0000ffff);
5884 if (j
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5897 static bool si_check_s0_mc_reg_index(u16 in_reg
, u16
*out_reg
)
5901 case MC_SEQ_RAS_TIMING
:
5902 *out_reg
= MC_SEQ_RAS_TIMING_LP
;
5904 case MC_SEQ_CAS_TIMING
:
5905 *out_reg
= MC_SEQ_CAS_TIMING_LP
;
5907 case MC_SEQ_MISC_TIMING
:
5908 *out_reg
= MC_SEQ_MISC_TIMING_LP
;
5910 case MC_SEQ_MISC_TIMING2
:
5911 *out_reg
= MC_SEQ_MISC_TIMING2_LP
;
5913 case MC_SEQ_RD_CTL_D0
:
5914 *out_reg
= MC_SEQ_RD_CTL_D0_LP
;
5916 case MC_SEQ_RD_CTL_D1
:
5917 *out_reg
= MC_SEQ_RD_CTL_D1_LP
;
5919 case MC_SEQ_WR_CTL_D0
:
5920 *out_reg
= MC_SEQ_WR_CTL_D0_LP
;
5922 case MC_SEQ_WR_CTL_D1
:
5923 *out_reg
= MC_SEQ_WR_CTL_D1_LP
;
5925 case MC_PMG_CMD_EMRS
:
5926 *out_reg
= MC_SEQ_PMG_CMD_EMRS_LP
;
5928 case MC_PMG_CMD_MRS
:
5929 *out_reg
= MC_SEQ_PMG_CMD_MRS_LP
;
5931 case MC_PMG_CMD_MRS1
:
5932 *out_reg
= MC_SEQ_PMG_CMD_MRS1_LP
;
5934 case MC_SEQ_PMG_TIMING
:
5935 *out_reg
= MC_SEQ_PMG_TIMING_LP
;
5937 case MC_PMG_CMD_MRS2
:
5938 *out_reg
= MC_SEQ_PMG_CMD_MRS2_LP
;
5940 case MC_SEQ_WR_CTL_2
:
5941 *out_reg
= MC_SEQ_WR_CTL_2_LP
;
5951 static void si_set_valid_flag(struct si_mc_reg_table
*table
)
5955 for (i
= 0; i
< table
->last
; i
++) {
5956 for (j
= 1; j
< table
->num_entries
; j
++) {
5957 if (table
->mc_reg_table_entry
[j
-1].mc_data
[i
] != table
->mc_reg_table_entry
[j
].mc_data
[i
]) {
5958 table
->valid_flag
|= 1 << i
;
5965 static void si_set_s0_mc_reg_index(struct si_mc_reg_table
*table
)
5970 for (i
= 0; i
< table
->last
; i
++)
5971 table
->mc_reg_address
[i
].s0
= si_check_s0_mc_reg_index(table
->mc_reg_address
[i
].s1
, &address
) ?
5972 address
: table
->mc_reg_address
[i
].s1
;
5976 static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table
*table
,
5977 struct si_mc_reg_table
*si_table
)
5981 if (table
->last
> SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
5983 if (table
->num_entries
> MAX_AC_TIMING_ENTRIES
)
5986 for (i
= 0; i
< table
->last
; i
++)
5987 si_table
->mc_reg_address
[i
].s1
= table
->mc_reg_address
[i
].s1
;
5988 si_table
->last
= table
->last
;
5990 for (i
= 0; i
< table
->num_entries
; i
++) {
5991 si_table
->mc_reg_table_entry
[i
].mclk_max
=
5992 table
->mc_reg_table_entry
[i
].mclk_max
;
5993 for (j
= 0; j
< table
->last
; j
++) {
5994 si_table
->mc_reg_table_entry
[i
].mc_data
[j
] =
5995 table
->mc_reg_table_entry
[i
].mc_data
[j
];
5998 si_table
->num_entries
= table
->num_entries
;
6003 static int si_initialize_mc_reg_table(struct amdgpu_device
*adev
)
6005 struct si_power_info
*si_pi
= si_get_pi(adev
);
6006 struct atom_mc_reg_table
*table
;
6007 struct si_mc_reg_table
*si_table
= &si_pi
->mc_reg_table
;
6008 u8 module_index
= rv770_get_memory_module_index(adev
);
6011 table
= kzalloc(sizeof(struct atom_mc_reg_table
), GFP_KERNEL
);
6015 WREG32(MC_SEQ_RAS_TIMING_LP
, RREG32(MC_SEQ_RAS_TIMING
));
6016 WREG32(MC_SEQ_CAS_TIMING_LP
, RREG32(MC_SEQ_CAS_TIMING
));
6017 WREG32(MC_SEQ_MISC_TIMING_LP
, RREG32(MC_SEQ_MISC_TIMING
));
6018 WREG32(MC_SEQ_MISC_TIMING2_LP
, RREG32(MC_SEQ_MISC_TIMING2
));
6019 WREG32(MC_SEQ_PMG_CMD_EMRS_LP
, RREG32(MC_PMG_CMD_EMRS
));
6020 WREG32(MC_SEQ_PMG_CMD_MRS_LP
, RREG32(MC_PMG_CMD_MRS
));
6021 WREG32(MC_SEQ_PMG_CMD_MRS1_LP
, RREG32(MC_PMG_CMD_MRS1
));
6022 WREG32(MC_SEQ_WR_CTL_D0_LP
, RREG32(MC_SEQ_WR_CTL_D0
));
6023 WREG32(MC_SEQ_WR_CTL_D1_LP
, RREG32(MC_SEQ_WR_CTL_D1
));
6024 WREG32(MC_SEQ_RD_CTL_D0_LP
, RREG32(MC_SEQ_RD_CTL_D0
));
6025 WREG32(MC_SEQ_RD_CTL_D1_LP
, RREG32(MC_SEQ_RD_CTL_D1
));
6026 WREG32(MC_SEQ_PMG_TIMING_LP
, RREG32(MC_SEQ_PMG_TIMING
));
6027 WREG32(MC_SEQ_PMG_CMD_MRS2_LP
, RREG32(MC_PMG_CMD_MRS2
));
6028 WREG32(MC_SEQ_WR_CTL_2_LP
, RREG32(MC_SEQ_WR_CTL_2
));
6030 ret
= amdgpu_atombios_init_mc_reg_table(adev
, module_index
, table
);
6034 ret
= si_copy_vbios_mc_reg_table(table
, si_table
);
6038 si_set_s0_mc_reg_index(si_table
);
6040 ret
= si_set_mc_special_registers(adev
, si_table
);
6044 si_set_valid_flag(si_table
);
6053 static void si_populate_mc_reg_addresses(struct amdgpu_device
*adev
,
6054 SMC_SIslands_MCRegisters
*mc_reg_table
)
6056 struct si_power_info
*si_pi
= si_get_pi(adev
);
6059 for (i
= 0, j
= 0; j
< si_pi
->mc_reg_table
.last
; j
++) {
6060 if (si_pi
->mc_reg_table
.valid_flag
& (1 << j
)) {
6061 if (i
>= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE
)
6063 mc_reg_table
->address
[i
].s0
=
6064 cpu_to_be16(si_pi
->mc_reg_table
.mc_reg_address
[j
].s0
);
6065 mc_reg_table
->address
[i
].s1
=
6066 cpu_to_be16(si_pi
->mc_reg_table
.mc_reg_address
[j
].s1
);
6070 mc_reg_table
->last
= (u8
)i
;
6073 static void si_convert_mc_registers(const struct si_mc_reg_entry
*entry
,
6074 SMC_SIslands_MCRegisterSet
*data
,
6075 u32 num_entries
, u32 valid_flag
)
6079 for(i
= 0, j
= 0; j
< num_entries
; j
++) {
6080 if (valid_flag
& (1 << j
)) {
6081 data
->value
[i
] = cpu_to_be32(entry
->mc_data
[j
]);
6087 static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device
*adev
,
6088 struct rv7xx_pl
*pl
,
6089 SMC_SIslands_MCRegisterSet
*mc_reg_table_data
)
6091 struct si_power_info
*si_pi
= si_get_pi(adev
);
6094 for (i
= 0; i
< si_pi
->mc_reg_table
.num_entries
; i
++) {
6095 if (pl
->mclk
<= si_pi
->mc_reg_table
.mc_reg_table_entry
[i
].mclk_max
)
6099 if ((i
== si_pi
->mc_reg_table
.num_entries
) && (i
> 0))
6102 si_convert_mc_registers(&si_pi
->mc_reg_table
.mc_reg_table_entry
[i
],
6103 mc_reg_table_data
, si_pi
->mc_reg_table
.last
,
6104 si_pi
->mc_reg_table
.valid_flag
);
6107 static void si_convert_mc_reg_table_to_smc(struct amdgpu_device
*adev
,
6108 struct amdgpu_ps
*amdgpu_state
,
6109 SMC_SIslands_MCRegisters
*mc_reg_table
)
6111 struct si_ps
*state
= si_get_ps(amdgpu_state
);
6114 for (i
= 0; i
< state
->performance_level_count
; i
++) {
6115 si_convert_mc_reg_table_entry_to_smc(adev
,
6116 &state
->performance_levels
[i
],
6117 &mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
+ i
]);
6121 static int si_populate_mc_reg_table(struct amdgpu_device
*adev
,
6122 struct amdgpu_ps
*amdgpu_boot_state
)
6124 struct si_ps
*boot_state
= si_get_ps(amdgpu_boot_state
);
6125 struct si_power_info
*si_pi
= si_get_pi(adev
);
6126 struct si_ulv_param
*ulv
= &si_pi
->ulv
;
6127 SMC_SIslands_MCRegisters
*smc_mc_reg_table
= &si_pi
->smc_mc_reg_table
;
6129 memset(smc_mc_reg_table
, 0, sizeof(SMC_SIslands_MCRegisters
));
6131 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_seq_index
, 1);
6133 si_populate_mc_reg_addresses(adev
, smc_mc_reg_table
);
6135 si_convert_mc_reg_table_entry_to_smc(adev
, &boot_state
->performance_levels
[0],
6136 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT
]);
6138 si_convert_mc_registers(&si_pi
->mc_reg_table
.mc_reg_table_entry
[0],
6139 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_ACPI_SLOT
],
6140 si_pi
->mc_reg_table
.last
,
6141 si_pi
->mc_reg_table
.valid_flag
);
6143 if (ulv
->supported
&& ulv
->pl
.vddc
!= 0)
6144 si_convert_mc_reg_table_entry_to_smc(adev
, &ulv
->pl
,
6145 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_ULV_SLOT
]);
6147 si_convert_mc_registers(&si_pi
->mc_reg_table
.mc_reg_table_entry
[0],
6148 &smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_ULV_SLOT
],
6149 si_pi
->mc_reg_table
.last
,
6150 si_pi
->mc_reg_table
.valid_flag
);
6152 si_convert_mc_reg_table_to_smc(adev
, amdgpu_boot_state
, smc_mc_reg_table
);
6154 return amdgpu_si_copy_bytes_to_smc(adev
, si_pi
->mc_reg_table_start
,
6155 (u8
*)smc_mc_reg_table
,
6156 sizeof(SMC_SIslands_MCRegisters
), si_pi
->sram_end
);
6159 static int si_upload_mc_reg_table(struct amdgpu_device
*adev
,
6160 struct amdgpu_ps
*amdgpu_new_state
)
6162 struct si_ps
*new_state
= si_get_ps(amdgpu_new_state
);
6163 struct si_power_info
*si_pi
= si_get_pi(adev
);
6164 u32 address
= si_pi
->mc_reg_table_start
+
6165 offsetof(SMC_SIslands_MCRegisters
,
6166 data
[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
]);
6167 SMC_SIslands_MCRegisters
*smc_mc_reg_table
= &si_pi
->smc_mc_reg_table
;
6169 memset(smc_mc_reg_table
, 0, sizeof(SMC_SIslands_MCRegisters
));
6171 si_convert_mc_reg_table_to_smc(adev
, amdgpu_new_state
, smc_mc_reg_table
);
6173 return amdgpu_si_copy_bytes_to_smc(adev
, address
,
6174 (u8
*)&smc_mc_reg_table
->data
[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT
],
6175 sizeof(SMC_SIslands_MCRegisterSet
) * new_state
->performance_level_count
,
6179 static void si_enable_voltage_control(struct amdgpu_device
*adev
, bool enable
)
6182 WREG32_P(GENERAL_PWRMGT
, VOLT_PWRMGT_EN
, ~VOLT_PWRMGT_EN
);
6184 WREG32_P(GENERAL_PWRMGT
, 0, ~VOLT_PWRMGT_EN
);
6187 static enum amdgpu_pcie_gen
si_get_maximum_link_speed(struct amdgpu_device
*adev
,
6188 struct amdgpu_ps
*amdgpu_state
)
6190 struct si_ps
*state
= si_get_ps(amdgpu_state
);
6192 u16 pcie_speed
, max_speed
= 0;
6194 for (i
= 0; i
< state
->performance_level_count
; i
++) {
6195 pcie_speed
= state
->performance_levels
[i
].pcie_gen
;
6196 if (max_speed
< pcie_speed
)
6197 max_speed
= pcie_speed
;
6202 static u16
si_get_current_pcie_speed(struct amdgpu_device
*adev
)
6206 speed_cntl
= RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL
) & LC_CURRENT_DATA_RATE_MASK
;
6207 speed_cntl
>>= LC_CURRENT_DATA_RATE_SHIFT
;
6209 return (u16
)speed_cntl
;
6212 static void si_request_link_speed_change_before_state_change(struct amdgpu_device
*adev
,
6213 struct amdgpu_ps
*amdgpu_new_state
,
6214 struct amdgpu_ps
*amdgpu_current_state
)
6216 struct si_power_info
*si_pi
= si_get_pi(adev
);
6217 enum amdgpu_pcie_gen target_link_speed
= si_get_maximum_link_speed(adev
, amdgpu_new_state
);
6218 enum amdgpu_pcie_gen current_link_speed
;
6220 if (si_pi
->force_pcie_gen
== AMDGPU_PCIE_GEN_INVALID
)
6221 current_link_speed
= si_get_maximum_link_speed(adev
, amdgpu_current_state
);
6223 current_link_speed
= si_pi
->force_pcie_gen
;
6225 si_pi
->force_pcie_gen
= AMDGPU_PCIE_GEN_INVALID
;
6226 si_pi
->pspp_notify_required
= false;
6227 if (target_link_speed
> current_link_speed
) {
6228 switch (target_link_speed
) {
6229 #if defined(CONFIG_ACPI)
6230 case AMDGPU_PCIE_GEN3
:
6231 if (amdgpu_acpi_pcie_performance_request(adev
, PCIE_PERF_REQ_PECI_GEN3
, false) == 0)
6233 si_pi
->force_pcie_gen
= AMDGPU_PCIE_GEN2
;
6234 if (current_link_speed
== AMDGPU_PCIE_GEN2
)
6236 case AMDGPU_PCIE_GEN2
:
6237 if (amdgpu_acpi_pcie_performance_request(adev
, PCIE_PERF_REQ_PECI_GEN2
, false) == 0)
6241 si_pi
->force_pcie_gen
= si_get_current_pcie_speed(adev
);
6245 if (target_link_speed
< current_link_speed
)
6246 si_pi
->pspp_notify_required
= true;
6250 static void si_notify_link_speed_change_after_state_change(struct amdgpu_device
*adev
,
6251 struct amdgpu_ps
*amdgpu_new_state
,
6252 struct amdgpu_ps
*amdgpu_current_state
)
6254 struct si_power_info
*si_pi
= si_get_pi(adev
);
6255 enum amdgpu_pcie_gen target_link_speed
= si_get_maximum_link_speed(adev
, amdgpu_new_state
);
6258 if (si_pi
->pspp_notify_required
) {
6259 if (target_link_speed
== AMDGPU_PCIE_GEN3
)
6260 request
= PCIE_PERF_REQ_PECI_GEN3
;
6261 else if (target_link_speed
== AMDGPU_PCIE_GEN2
)
6262 request
= PCIE_PERF_REQ_PECI_GEN2
;
6264 request
= PCIE_PERF_REQ_PECI_GEN1
;
6266 if ((request
== PCIE_PERF_REQ_PECI_GEN1
) &&
6267 (si_get_current_pcie_speed(adev
) > 0))
6270 #if defined(CONFIG_ACPI)
6271 amdgpu_acpi_pcie_performance_request(adev
, request
, false);
6277 static int si_ds_request(struct amdgpu_device
*adev
,
6278 bool ds_status_on
, u32 count_write
)
6280 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6282 if (eg_pi
->sclk_deep_sleep
) {
6284 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_CancelThrottleOVRDSCLKDS
) ==
6288 return (amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_ThrottleOVRDSCLKDS
) ==
6289 PPSMC_Result_OK
) ? 0 : -EINVAL
;
6295 static void si_set_max_cu_value(struct amdgpu_device
*adev
)
6297 struct si_power_info
*si_pi
= si_get_pi(adev
);
6299 if (adev
->asic_type
== CHIP_VERDE
) {
6300 switch (adev
->pdev
->device
) {
6336 static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device
*adev
,
6337 struct amdgpu_clock_voltage_dependency_table
*table
)
6341 u16 leakage_voltage
;
6344 for (i
= 0; i
< table
->count
; i
++) {
6345 switch (si_get_leakage_voltage_from_leakage_index(adev
,
6346 table
->entries
[i
].v
,
6347 &leakage_voltage
)) {
6349 table
->entries
[i
].v
= leakage_voltage
;
6359 for (j
= (table
->count
- 2); j
>= 0; j
--) {
6360 table
->entries
[j
].v
= (table
->entries
[j
].v
<= table
->entries
[j
+ 1].v
) ?
6361 table
->entries
[j
].v
: table
->entries
[j
+ 1].v
;
6367 static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device
*adev
)
6371 ret
= si_patch_single_dependency_table_based_on_leakage(adev
,
6372 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_sclk
);
6374 DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
6375 ret
= si_patch_single_dependency_table_based_on_leakage(adev
,
6376 &adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_mclk
);
6378 DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
6379 ret
= si_patch_single_dependency_table_based_on_leakage(adev
,
6380 &adev
->pm
.dpm
.dyn_state
.vddci_dependency_on_mclk
);
6382 DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
6386 static void si_set_pcie_lane_width_in_smc(struct amdgpu_device
*adev
,
6387 struct amdgpu_ps
*amdgpu_new_state
,
6388 struct amdgpu_ps
*amdgpu_current_state
)
6391 u32 new_lane_width
=
6392 (amdgpu_new_state
->caps
& ATOM_PPLIB_PCIE_LINK_WIDTH_MASK
) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT
;
6393 u32 current_lane_width
=
6394 (amdgpu_current_state
->caps
& ATOM_PPLIB_PCIE_LINK_WIDTH_MASK
) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT
;
6396 if (new_lane_width
!= current_lane_width
) {
6397 amdgpu_set_pcie_lanes(adev
, new_lane_width
);
6398 lane_width
= amdgpu_get_pcie_lanes(adev
);
6399 si_write_smc_soft_register(adev
, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width
, lane_width
);
6403 static void si_dpm_setup_asic(struct amdgpu_device
*adev
)
6405 si_read_clock_registers(adev
);
6406 si_enable_acpi_power_management(adev
);
6409 static int si_thermal_enable_alert(struct amdgpu_device
*adev
,
6412 u32 thermal_int
= RREG32(CG_THERMAL_INT
);
6415 PPSMC_Result result
;
6417 thermal_int
&= ~(THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
);
6418 WREG32(CG_THERMAL_INT
, thermal_int
);
6419 result
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_MSG_EnableThermalInterrupt
);
6420 if (result
!= PPSMC_Result_OK
) {
6421 DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
6425 thermal_int
|= THERM_INT_MASK_HIGH
| THERM_INT_MASK_LOW
;
6426 WREG32(CG_THERMAL_INT
, thermal_int
);
6432 static int si_thermal_set_temperature_range(struct amdgpu_device
*adev
,
6433 int min_temp
, int max_temp
)
6435 int low_temp
= 0 * 1000;
6436 int high_temp
= 255 * 1000;
6438 if (low_temp
< min_temp
)
6439 low_temp
= min_temp
;
6440 if (high_temp
> max_temp
)
6441 high_temp
= max_temp
;
6442 if (high_temp
< low_temp
) {
6443 DRM_ERROR("invalid thermal range: %d - %d\n", low_temp
, high_temp
);
6447 WREG32_P(CG_THERMAL_INT
, DIG_THERM_INTH(high_temp
/ 1000), ~DIG_THERM_INTH_MASK
);
6448 WREG32_P(CG_THERMAL_INT
, DIG_THERM_INTL(low_temp
/ 1000), ~DIG_THERM_INTL_MASK
);
6449 WREG32_P(CG_THERMAL_CTRL
, DIG_THERM_DPM(high_temp
/ 1000), ~DIG_THERM_DPM_MASK
);
6451 adev
->pm
.dpm
.thermal
.min_temp
= low_temp
;
6452 adev
->pm
.dpm
.thermal
.max_temp
= high_temp
;
6457 static void si_fan_ctrl_set_static_mode(struct amdgpu_device
*adev
, u32 mode
)
6459 struct si_power_info
*si_pi
= si_get_pi(adev
);
6462 if (si_pi
->fan_ctrl_is_in_default_mode
) {
6463 tmp
= (RREG32(CG_FDO_CTRL2
) & FDO_PWM_MODE_MASK
) >> FDO_PWM_MODE_SHIFT
;
6464 si_pi
->fan_ctrl_default_mode
= tmp
;
6465 tmp
= (RREG32(CG_FDO_CTRL2
) & TMIN_MASK
) >> TMIN_SHIFT
;
6467 si_pi
->fan_ctrl_is_in_default_mode
= false;
6470 tmp
= RREG32(CG_FDO_CTRL2
) & ~TMIN_MASK
;
6472 WREG32(CG_FDO_CTRL2
, tmp
);
6474 tmp
= RREG32(CG_FDO_CTRL2
) & ~FDO_PWM_MODE_MASK
;
6475 tmp
|= FDO_PWM_MODE(mode
);
6476 WREG32(CG_FDO_CTRL2
, tmp
);
6479 static int si_thermal_setup_fan_table(struct amdgpu_device
*adev
)
6481 struct si_power_info
*si_pi
= si_get_pi(adev
);
6482 PP_SIslands_FanTable fan_table
= { FDO_MODE_HARDWARE
};
6484 u32 t_diff1
, t_diff2
, pwm_diff1
, pwm_diff2
;
6485 u16 fdo_min
, slope1
, slope2
;
6486 u32 reference_clock
, tmp
;
6490 if (!si_pi
->fan_table_start
) {
6491 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
6495 duty100
= (RREG32(CG_FDO_CTRL1
) & FMAX_DUTY100_MASK
) >> FMAX_DUTY100_SHIFT
;
6498 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
6502 tmp64
= (u64
)adev
->pm
.dpm
.fan
.pwm_min
* duty100
;
6503 do_div(tmp64
, 10000);
6504 fdo_min
= (u16
)tmp64
;
6506 t_diff1
= adev
->pm
.dpm
.fan
.t_med
- adev
->pm
.dpm
.fan
.t_min
;
6507 t_diff2
= adev
->pm
.dpm
.fan
.t_high
- adev
->pm
.dpm
.fan
.t_med
;
6509 pwm_diff1
= adev
->pm
.dpm
.fan
.pwm_med
- adev
->pm
.dpm
.fan
.pwm_min
;
6510 pwm_diff2
= adev
->pm
.dpm
.fan
.pwm_high
- adev
->pm
.dpm
.fan
.pwm_med
;
6512 slope1
= (u16
)((50 + ((16 * duty100
* pwm_diff1
) / t_diff1
)) / 100);
6513 slope2
= (u16
)((50 + ((16 * duty100
* pwm_diff2
) / t_diff2
)) / 100);
6515 fan_table
.temp_min
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_min
) / 100);
6516 fan_table
.temp_med
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_med
) / 100);
6517 fan_table
.temp_max
= cpu_to_be16((50 + adev
->pm
.dpm
.fan
.t_max
) / 100);
6518 fan_table
.slope1
= cpu_to_be16(slope1
);
6519 fan_table
.slope2
= cpu_to_be16(slope2
);
6520 fan_table
.fdo_min
= cpu_to_be16(fdo_min
);
6521 fan_table
.hys_down
= cpu_to_be16(adev
->pm
.dpm
.fan
.t_hyst
);
6522 fan_table
.hys_up
= cpu_to_be16(1);
6523 fan_table
.hys_slope
= cpu_to_be16(1);
6524 fan_table
.temp_resp_lim
= cpu_to_be16(5);
6525 reference_clock
= amdgpu_asic_get_xclk(adev
);
6527 fan_table
.refresh_period
= cpu_to_be32((adev
->pm
.dpm
.fan
.cycle_delay
*
6528 reference_clock
) / 1600);
6529 fan_table
.fdo_max
= cpu_to_be16((u16
)duty100
);
6531 tmp
= (RREG32(CG_MULT_THERMAL_CTRL
) & TEMP_SEL_MASK
) >> TEMP_SEL_SHIFT
;
6532 fan_table
.temp_src
= (uint8_t)tmp
;
6534 ret
= amdgpu_si_copy_bytes_to_smc(adev
,
6535 si_pi
->fan_table_start
,
6541 DRM_ERROR("Failed to load fan table to the SMC.");
6542 adev
->pm
.dpm
.fan
.ucode_fan_control
= false;
6548 static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device
*adev
)
6550 struct si_power_info
*si_pi
= si_get_pi(adev
);
6553 ret
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_StartFanControl
);
6554 if (ret
== PPSMC_Result_OK
) {
6555 si_pi
->fan_is_controlled_by_smc
= true;
6562 static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device
*adev
)
6564 struct si_power_info
*si_pi
= si_get_pi(adev
);
6567 ret
= amdgpu_si_send_msg_to_smc(adev
, PPSMC_StopFanControl
);
6569 if (ret
== PPSMC_Result_OK
) {
6570 si_pi
->fan_is_controlled_by_smc
= false;
6577 static int si_dpm_get_fan_speed_percent(struct amdgpu_device
*adev
,
6583 if (adev
->pm
.no_fan
)
6586 duty100
= (RREG32(CG_FDO_CTRL1
) & FMAX_DUTY100_MASK
) >> FMAX_DUTY100_SHIFT
;
6587 duty
= (RREG32(CG_THERMAL_STATUS
) & FDO_PWM_DUTY_MASK
) >> FDO_PWM_DUTY_SHIFT
;
6592 tmp64
= (u64
)duty
* 100;
6593 do_div(tmp64
, duty100
);
6594 *speed
= (u32
)tmp64
;
6602 static int si_dpm_set_fan_speed_percent(struct amdgpu_device
*adev
,
6605 struct si_power_info
*si_pi
= si_get_pi(adev
);
6610 if (adev
->pm
.no_fan
)
6613 if (si_pi
->fan_is_controlled_by_smc
)
6619 duty100
= (RREG32(CG_FDO_CTRL1
) & FMAX_DUTY100_MASK
) >> FMAX_DUTY100_SHIFT
;
6624 tmp64
= (u64
)speed
* duty100
;
6628 tmp
= RREG32(CG_FDO_CTRL0
) & ~FDO_STATIC_DUTY_MASK
;
6629 tmp
|= FDO_STATIC_DUTY(duty
);
6630 WREG32(CG_FDO_CTRL0
, tmp
);
6635 static void si_dpm_set_fan_control_mode(struct amdgpu_device
*adev
, u32 mode
)
6638 /* stop auto-manage */
6639 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
6640 si_fan_ctrl_stop_smc_fan_control(adev
);
6641 si_fan_ctrl_set_static_mode(adev
, mode
);
6643 /* restart auto-manage */
6644 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
6645 si_thermal_start_smc_fan_control(adev
);
6647 si_fan_ctrl_set_default_mode(adev
);
6651 static u32
si_dpm_get_fan_control_mode(struct amdgpu_device
*adev
)
6653 struct si_power_info
*si_pi
= si_get_pi(adev
);
6656 if (si_pi
->fan_is_controlled_by_smc
)
6659 tmp
= RREG32(CG_FDO_CTRL2
) & FDO_PWM_MODE_MASK
;
6660 return (tmp
>> FDO_PWM_MODE_SHIFT
);
6664 static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device
*adev
,
6668 u32 xclk
= amdgpu_asic_get_xclk(adev
);
6670 if (adev
->pm
.no_fan
)
6673 if (adev
->pm
.fan_pulses_per_revolution
== 0)
6676 tach_period
= (RREG32(CG_TACH_STATUS
) & TACH_PERIOD_MASK
) >> TACH_PERIOD_SHIFT
;
6677 if (tach_period
== 0)
6680 *speed
= 60 * xclk
* 10000 / tach_period
;
6685 static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device
*adev
,
6688 u32 tach_period
, tmp
;
6689 u32 xclk
= amdgpu_asic_get_xclk(adev
);
6691 if (adev
->pm
.no_fan
)
6694 if (adev
->pm
.fan_pulses_per_revolution
== 0)
6697 if ((speed
< adev
->pm
.fan_min_rpm
) ||
6698 (speed
> adev
->pm
.fan_max_rpm
))
6701 if (adev
->pm
.dpm
.fan
.ucode_fan_control
)
6702 si_fan_ctrl_stop_smc_fan_control(adev
);
6704 tach_period
= 60 * xclk
* 10000 / (8 * speed
);
6705 tmp
= RREG32(CG_TACH_CTRL
) & ~TARGET_PERIOD_MASK
;
6706 tmp
|= TARGET_PERIOD(tach_period
);
6707 WREG32(CG_TACH_CTRL
, tmp
);
6709 si_fan_ctrl_set_static_mode(adev
, FDO_PWM_MODE_STATIC_RPM
);
6715 static void si_fan_ctrl_set_default_mode(struct amdgpu_device
*adev
)
6717 struct si_power_info
*si_pi
= si_get_pi(adev
);
6720 if (!si_pi
->fan_ctrl_is_in_default_mode
) {
6721 tmp
= RREG32(CG_FDO_CTRL2
) & ~FDO_PWM_MODE_MASK
;
6722 tmp
|= FDO_PWM_MODE(si_pi
->fan_ctrl_default_mode
);
6723 WREG32(CG_FDO_CTRL2
, tmp
);
6725 tmp
= RREG32(CG_FDO_CTRL2
) & ~TMIN_MASK
;
6726 tmp
|= TMIN(si_pi
->t_min
);
6727 WREG32(CG_FDO_CTRL2
, tmp
);
6728 si_pi
->fan_ctrl_is_in_default_mode
= true;
6732 static void si_thermal_start_smc_fan_control(struct amdgpu_device
*adev
)
6734 if (adev
->pm
.dpm
.fan
.ucode_fan_control
) {
6735 si_fan_ctrl_start_smc_fan_control(adev
);
6736 si_fan_ctrl_set_static_mode(adev
, FDO_PWM_MODE_STATIC
);
6740 static void si_thermal_initialize(struct amdgpu_device
*adev
)
6744 if (adev
->pm
.fan_pulses_per_revolution
) {
6745 tmp
= RREG32(CG_TACH_CTRL
) & ~EDGE_PER_REV_MASK
;
6746 tmp
|= EDGE_PER_REV(adev
->pm
.fan_pulses_per_revolution
-1);
6747 WREG32(CG_TACH_CTRL
, tmp
);
6750 tmp
= RREG32(CG_FDO_CTRL2
) & ~TACH_PWM_RESP_RATE_MASK
;
6751 tmp
|= TACH_PWM_RESP_RATE(0x28);
6752 WREG32(CG_FDO_CTRL2
, tmp
);
6755 static int si_thermal_start_thermal_controller(struct amdgpu_device
*adev
)
6759 si_thermal_initialize(adev
);
6760 ret
= si_thermal_set_temperature_range(adev
, R600_TEMP_RANGE_MIN
, R600_TEMP_RANGE_MAX
);
6763 ret
= si_thermal_enable_alert(adev
, true);
6766 if (adev
->pm
.dpm
.fan
.ucode_fan_control
) {
6767 ret
= si_halt_smc(adev
);
6770 ret
= si_thermal_setup_fan_table(adev
);
6773 ret
= si_resume_smc(adev
);
6776 si_thermal_start_smc_fan_control(adev
);
6782 static void si_thermal_stop_thermal_controller(struct amdgpu_device
*adev
)
6784 if (!adev
->pm
.no_fan
) {
6785 si_fan_ctrl_set_default_mode(adev
);
6786 si_fan_ctrl_stop_smc_fan_control(adev
);
6790 static int si_dpm_enable(struct amdgpu_device
*adev
)
6792 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
6793 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6794 struct si_power_info
*si_pi
= si_get_pi(adev
);
6795 struct amdgpu_ps
*boot_ps
= adev
->pm
.dpm
.boot_ps
;
6798 if (amdgpu_si_is_smc_running(adev
))
6800 if (pi
->voltage_control
|| si_pi
->voltage_control_svi2
)
6801 si_enable_voltage_control(adev
, true);
6802 if (pi
->mvdd_control
)
6803 si_get_mvdd_configuration(adev
);
6804 if (pi
->voltage_control
|| si_pi
->voltage_control_svi2
) {
6805 ret
= si_construct_voltage_tables(adev
);
6807 DRM_ERROR("si_construct_voltage_tables failed\n");
6811 if (eg_pi
->dynamic_ac_timing
) {
6812 ret
= si_initialize_mc_reg_table(adev
);
6814 eg_pi
->dynamic_ac_timing
= false;
6817 si_enable_spread_spectrum(adev
, true);
6818 if (pi
->thermal_protection
)
6819 si_enable_thermal_protection(adev
, true);
6821 si_program_git(adev
);
6822 si_program_tp(adev
);
6823 si_program_tpp(adev
);
6824 si_program_sstp(adev
);
6825 si_enable_display_gap(adev
);
6826 si_program_vc(adev
);
6827 ret
= si_upload_firmware(adev
);
6829 DRM_ERROR("si_upload_firmware failed\n");
6832 ret
= si_process_firmware_header(adev
);
6834 DRM_ERROR("si_process_firmware_header failed\n");
6837 ret
= si_initial_switch_from_arb_f0_to_f1(adev
);
6839 DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
6842 ret
= si_init_smc_table(adev
);
6844 DRM_ERROR("si_init_smc_table failed\n");
6847 ret
= si_init_smc_spll_table(adev
);
6849 DRM_ERROR("si_init_smc_spll_table failed\n");
6852 ret
= si_init_arb_table_index(adev
);
6854 DRM_ERROR("si_init_arb_table_index failed\n");
6857 if (eg_pi
->dynamic_ac_timing
) {
6858 ret
= si_populate_mc_reg_table(adev
, boot_ps
);
6860 DRM_ERROR("si_populate_mc_reg_table failed\n");
6864 ret
= si_initialize_smc_cac_tables(adev
);
6866 DRM_ERROR("si_initialize_smc_cac_tables failed\n");
6869 ret
= si_initialize_hardware_cac_manager(adev
);
6871 DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
6874 ret
= si_initialize_smc_dte_tables(adev
);
6876 DRM_ERROR("si_initialize_smc_dte_tables failed\n");
6879 ret
= si_populate_smc_tdp_limits(adev
, boot_ps
);
6881 DRM_ERROR("si_populate_smc_tdp_limits failed\n");
6884 ret
= si_populate_smc_tdp_limits_2(adev
, boot_ps
);
6886 DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
6889 si_program_response_times(adev
);
6890 si_program_ds_registers(adev
);
6891 si_dpm_start_smc(adev
);
6892 ret
= si_notify_smc_display_change(adev
, false);
6894 DRM_ERROR("si_notify_smc_display_change failed\n");
6897 si_enable_sclk_control(adev
, true);
6900 si_enable_auto_throttle_source(adev
, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
, true);
6901 si_thermal_start_thermal_controller(adev
);
6902 ni_update_current_ps(adev
, boot_ps
);
6907 static int si_set_temperature_range(struct amdgpu_device
*adev
)
6911 ret
= si_thermal_enable_alert(adev
, false);
6914 ret
= si_thermal_set_temperature_range(adev
, R600_TEMP_RANGE_MIN
, R600_TEMP_RANGE_MAX
);
6917 ret
= si_thermal_enable_alert(adev
, true);
6924 static void si_dpm_disable(struct amdgpu_device
*adev
)
6926 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
6927 struct amdgpu_ps
*boot_ps
= adev
->pm
.dpm
.boot_ps
;
6929 if (!amdgpu_si_is_smc_running(adev
))
6931 si_thermal_stop_thermal_controller(adev
);
6932 si_disable_ulv(adev
);
6934 if (pi
->thermal_protection
)
6935 si_enable_thermal_protection(adev
, false);
6936 si_enable_power_containment(adev
, boot_ps
, false);
6937 si_enable_smc_cac(adev
, boot_ps
, false);
6938 si_enable_spread_spectrum(adev
, false);
6939 si_enable_auto_throttle_source(adev
, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL
, false);
6941 si_reset_to_default(adev
);
6942 si_dpm_stop_smc(adev
);
6943 si_force_switch_to_arb_f0(adev
);
6945 ni_update_current_ps(adev
, boot_ps
);
6948 static int si_dpm_pre_set_power_state(struct amdgpu_device
*adev
)
6950 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6951 struct amdgpu_ps requested_ps
= *adev
->pm
.dpm
.requested_ps
;
6952 struct amdgpu_ps
*new_ps
= &requested_ps
;
6954 ni_update_requested_ps(adev
, new_ps
);
6955 si_apply_state_adjust_rules(adev
, &eg_pi
->requested_rps
);
6960 static int si_power_control_set_level(struct amdgpu_device
*adev
)
6962 struct amdgpu_ps
*new_ps
= adev
->pm
.dpm
.requested_ps
;
6965 ret
= si_restrict_performance_levels_before_switch(adev
);
6968 ret
= si_halt_smc(adev
);
6971 ret
= si_populate_smc_tdp_limits(adev
, new_ps
);
6974 ret
= si_populate_smc_tdp_limits_2(adev
, new_ps
);
6977 ret
= si_resume_smc(adev
);
6980 ret
= si_set_sw_state(adev
);
6986 static int si_dpm_set_power_state(struct amdgpu_device
*adev
)
6988 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
6989 struct amdgpu_ps
*new_ps
= &eg_pi
->requested_rps
;
6990 struct amdgpu_ps
*old_ps
= &eg_pi
->current_rps
;
6993 ret
= si_disable_ulv(adev
);
6995 DRM_ERROR("si_disable_ulv failed\n");
6998 ret
= si_restrict_performance_levels_before_switch(adev
);
7000 DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
7003 if (eg_pi
->pcie_performance_request
)
7004 si_request_link_speed_change_before_state_change(adev
, new_ps
, old_ps
);
7005 ni_set_uvd_clock_before_set_eng_clock(adev
, new_ps
, old_ps
);
7006 ret
= si_enable_power_containment(adev
, new_ps
, false);
7008 DRM_ERROR("si_enable_power_containment failed\n");
7011 ret
= si_enable_smc_cac(adev
, new_ps
, false);
7013 DRM_ERROR("si_enable_smc_cac failed\n");
7016 ret
= si_halt_smc(adev
);
7018 DRM_ERROR("si_halt_smc failed\n");
7021 ret
= si_upload_sw_state(adev
, new_ps
);
7023 DRM_ERROR("si_upload_sw_state failed\n");
7026 ret
= si_upload_smc_data(adev
);
7028 DRM_ERROR("si_upload_smc_data failed\n");
7031 ret
= si_upload_ulv_state(adev
);
7033 DRM_ERROR("si_upload_ulv_state failed\n");
7036 if (eg_pi
->dynamic_ac_timing
) {
7037 ret
= si_upload_mc_reg_table(adev
, new_ps
);
7039 DRM_ERROR("si_upload_mc_reg_table failed\n");
7043 ret
= si_program_memory_timing_parameters(adev
, new_ps
);
7045 DRM_ERROR("si_program_memory_timing_parameters failed\n");
7048 si_set_pcie_lane_width_in_smc(adev
, new_ps
, old_ps
);
7050 ret
= si_resume_smc(adev
);
7052 DRM_ERROR("si_resume_smc failed\n");
7055 ret
= si_set_sw_state(adev
);
7057 DRM_ERROR("si_set_sw_state failed\n");
7060 ni_set_uvd_clock_after_set_eng_clock(adev
, new_ps
, old_ps
);
7061 if (eg_pi
->pcie_performance_request
)
7062 si_notify_link_speed_change_after_state_change(adev
, new_ps
, old_ps
);
7063 ret
= si_set_power_state_conditionally_enable_ulv(adev
, new_ps
);
7065 DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
7068 ret
= si_enable_smc_cac(adev
, new_ps
, true);
7070 DRM_ERROR("si_enable_smc_cac failed\n");
7073 ret
= si_enable_power_containment(adev
, new_ps
, true);
7075 DRM_ERROR("si_enable_power_containment failed\n");
7079 ret
= si_power_control_set_level(adev
);
7081 DRM_ERROR("si_power_control_set_level failed\n");
7088 static void si_dpm_post_set_power_state(struct amdgpu_device
*adev
)
7090 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7091 struct amdgpu_ps
*new_ps
= &eg_pi
->requested_rps
;
7093 ni_update_current_ps(adev
, new_ps
);
7097 void si_dpm_reset_asic(struct amdgpu_device
*adev
)
7099 si_restrict_performance_levels_before_switch(adev
);
7100 si_disable_ulv(adev
);
7101 si_set_boot_state(adev
);
7105 static void si_dpm_display_configuration_changed(struct amdgpu_device
*adev
)
7107 si_program_display_gap(adev
);
7111 static void si_parse_pplib_non_clock_info(struct amdgpu_device
*adev
,
7112 struct amdgpu_ps
*rps
,
7113 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
,
7116 rps
->caps
= le32_to_cpu(non_clock_info
->ulCapsAndSettings
);
7117 rps
->class = le16_to_cpu(non_clock_info
->usClassification
);
7118 rps
->class2
= le16_to_cpu(non_clock_info
->usClassification2
);
7120 if (ATOM_PPLIB_NONCLOCKINFO_VER1
< table_rev
) {
7121 rps
->vclk
= le32_to_cpu(non_clock_info
->ulVCLK
);
7122 rps
->dclk
= le32_to_cpu(non_clock_info
->ulDCLK
);
7123 } else if (r600_is_uvd_state(rps
->class, rps
->class2
)) {
7124 rps
->vclk
= RV770_DEFAULT_VCLK_FREQ
;
7125 rps
->dclk
= RV770_DEFAULT_DCLK_FREQ
;
7131 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
)
7132 adev
->pm
.dpm
.boot_ps
= rps
;
7133 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE
)
7134 adev
->pm
.dpm
.uvd_ps
= rps
;
7137 static void si_parse_pplib_clock_info(struct amdgpu_device
*adev
,
7138 struct amdgpu_ps
*rps
, int index
,
7139 union pplib_clock_info
*clock_info
)
7141 struct rv7xx_power_info
*pi
= rv770_get_pi(adev
);
7142 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7143 struct si_power_info
*si_pi
= si_get_pi(adev
);
7144 struct si_ps
*ps
= si_get_ps(rps
);
7145 u16 leakage_voltage
;
7146 struct rv7xx_pl
*pl
= &ps
->performance_levels
[index
];
7149 ps
->performance_level_count
= index
+ 1;
7151 pl
->sclk
= le16_to_cpu(clock_info
->si
.usEngineClockLow
);
7152 pl
->sclk
|= clock_info
->si
.ucEngineClockHigh
<< 16;
7153 pl
->mclk
= le16_to_cpu(clock_info
->si
.usMemoryClockLow
);
7154 pl
->mclk
|= clock_info
->si
.ucMemoryClockHigh
<< 16;
7156 pl
->vddc
= le16_to_cpu(clock_info
->si
.usVDDC
);
7157 pl
->vddci
= le16_to_cpu(clock_info
->si
.usVDDCI
);
7158 pl
->flags
= le32_to_cpu(clock_info
->si
.ulFlags
);
7159 pl
->pcie_gen
= r600_get_pcie_gen_support(adev
,
7160 si_pi
->sys_pcie_mask
,
7161 si_pi
->boot_pcie_gen
,
7162 clock_info
->si
.ucPCIEGen
);
7164 /* patch up vddc if necessary */
7165 ret
= si_get_leakage_voltage_from_leakage_index(adev
, pl
->vddc
,
7168 pl
->vddc
= leakage_voltage
;
7170 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_ACPI
) {
7171 pi
->acpi_vddc
= pl
->vddc
;
7172 eg_pi
->acpi_vddci
= pl
->vddci
;
7173 si_pi
->acpi_pcie_gen
= pl
->pcie_gen
;
7176 if ((rps
->class2
& ATOM_PPLIB_CLASSIFICATION2_ULV
) &&
7178 /* XXX disable for A0 tahiti */
7179 si_pi
->ulv
.supported
= false;
7180 si_pi
->ulv
.pl
= *pl
;
7181 si_pi
->ulv
.one_pcie_lane_in_ulv
= false;
7182 si_pi
->ulv
.volt_change_delay
= SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT
;
7183 si_pi
->ulv
.cg_ulv_parameter
= SISLANDS_CGULVPARAMETER_DFLT
;
7184 si_pi
->ulv
.cg_ulv_control
= SISLANDS_CGULVCONTROL_DFLT
;
7187 if (pi
->min_vddc_in_table
> pl
->vddc
)
7188 pi
->min_vddc_in_table
= pl
->vddc
;
7190 if (pi
->max_vddc_in_table
< pl
->vddc
)
7191 pi
->max_vddc_in_table
= pl
->vddc
;
7193 /* patch up boot state */
7194 if (rps
->class & ATOM_PPLIB_CLASSIFICATION_BOOT
) {
7195 u16 vddc
, vddci
, mvdd
;
7196 amdgpu_atombios_get_default_voltages(adev
, &vddc
, &vddci
, &mvdd
);
7197 pl
->mclk
= adev
->clock
.default_mclk
;
7198 pl
->sclk
= adev
->clock
.default_sclk
;
7201 si_pi
->mvdd_bootup_value
= mvdd
;
7204 if ((rps
->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK
) ==
7205 ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE
) {
7206 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.sclk
= pl
->sclk
;
7207 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.mclk
= pl
->mclk
;
7208 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.vddc
= pl
->vddc
;
7209 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
.vddci
= pl
->vddci
;
7213 union pplib_power_state
{
7214 struct _ATOM_PPLIB_STATE v1
;
7215 struct _ATOM_PPLIB_STATE_V2 v2
;
7218 static int si_parse_power_table(struct amdgpu_device
*adev
)
7220 struct amdgpu_mode_info
*mode_info
= &adev
->mode_info
;
7221 struct _ATOM_PPLIB_NONCLOCK_INFO
*non_clock_info
;
7222 union pplib_power_state
*power_state
;
7223 int i
, j
, k
, non_clock_array_index
, clock_array_index
;
7224 union pplib_clock_info
*clock_info
;
7225 struct _StateArray
*state_array
;
7226 struct _ClockInfoArray
*clock_info_array
;
7227 struct _NonClockInfoArray
*non_clock_info_array
;
7228 union power_info
*power_info
;
7229 int index
= GetIndexIntoMasterTable(DATA
, PowerPlayInfo
);
7232 u8
*power_state_offset
;
7235 if (!amdgpu_atom_parse_data_header(mode_info
->atom_context
, index
, NULL
,
7236 &frev
, &crev
, &data_offset
))
7238 power_info
= (union power_info
*)(mode_info
->atom_context
->bios
+ data_offset
);
7240 amdgpu_add_thermal_controller(adev
);
7242 state_array
= (struct _StateArray
*)
7243 (mode_info
->atom_context
->bios
+ data_offset
+
7244 le16_to_cpu(power_info
->pplib
.usStateArrayOffset
));
7245 clock_info_array
= (struct _ClockInfoArray
*)
7246 (mode_info
->atom_context
->bios
+ data_offset
+
7247 le16_to_cpu(power_info
->pplib
.usClockInfoArrayOffset
));
7248 non_clock_info_array
= (struct _NonClockInfoArray
*)
7249 (mode_info
->atom_context
->bios
+ data_offset
+
7250 le16_to_cpu(power_info
->pplib
.usNonClockInfoArrayOffset
));
7252 adev
->pm
.dpm
.ps
= kzalloc(sizeof(struct amdgpu_ps
) *
7253 state_array
->ucNumEntries
, GFP_KERNEL
);
7254 if (!adev
->pm
.dpm
.ps
)
7256 power_state_offset
= (u8
*)state_array
->states
;
7257 for (i
= 0; i
< state_array
->ucNumEntries
; i
++) {
7259 power_state
= (union pplib_power_state
*)power_state_offset
;
7260 non_clock_array_index
= power_state
->v2
.nonClockInfoIndex
;
7261 non_clock_info
= (struct _ATOM_PPLIB_NONCLOCK_INFO
*)
7262 &non_clock_info_array
->nonClockInfo
[non_clock_array_index
];
7263 ps
= kzalloc(sizeof(struct si_ps
), GFP_KERNEL
);
7265 kfree(adev
->pm
.dpm
.ps
);
7268 adev
->pm
.dpm
.ps
[i
].ps_priv
= ps
;
7269 si_parse_pplib_non_clock_info(adev
, &adev
->pm
.dpm
.ps
[i
],
7271 non_clock_info_array
->ucEntrySize
);
7273 idx
= (u8
*)&power_state
->v2
.clockInfoIndex
[0];
7274 for (j
= 0; j
< power_state
->v2
.ucNumDPMLevels
; j
++) {
7275 clock_array_index
= idx
[j
];
7276 if (clock_array_index
>= clock_info_array
->ucNumEntries
)
7278 if (k
>= SISLANDS_MAX_HARDWARE_POWERLEVELS
)
7280 clock_info
= (union pplib_clock_info
*)
7281 ((u8
*)&clock_info_array
->clockInfo
[0] +
7282 (clock_array_index
* clock_info_array
->ucEntrySize
));
7283 si_parse_pplib_clock_info(adev
,
7284 &adev
->pm
.dpm
.ps
[i
], k
,
7288 power_state_offset
+= 2 + power_state
->v2
.ucNumDPMLevels
;
7290 adev
->pm
.dpm
.num_ps
= state_array
->ucNumEntries
;
7292 /* fill in the vce power states */
7293 for (i
= 0; i
< adev
->pm
.dpm
.num_of_vce_states
; i
++) {
7295 clock_array_index
= adev
->pm
.dpm
.vce_states
[i
].clk_idx
;
7296 clock_info
= (union pplib_clock_info
*)
7297 &clock_info_array
->clockInfo
[clock_array_index
* clock_info_array
->ucEntrySize
];
7298 sclk
= le16_to_cpu(clock_info
->si
.usEngineClockLow
);
7299 sclk
|= clock_info
->si
.ucEngineClockHigh
<< 16;
7300 mclk
= le16_to_cpu(clock_info
->si
.usMemoryClockLow
);
7301 mclk
|= clock_info
->si
.ucMemoryClockHigh
<< 16;
7302 adev
->pm
.dpm
.vce_states
[i
].sclk
= sclk
;
7303 adev
->pm
.dpm
.vce_states
[i
].mclk
= mclk
;
7309 static int si_dpm_init(struct amdgpu_device
*adev
)
7311 struct rv7xx_power_info
*pi
;
7312 struct evergreen_power_info
*eg_pi
;
7313 struct ni_power_info
*ni_pi
;
7314 struct si_power_info
*si_pi
;
7315 struct atom_clock_dividers dividers
;
7319 si_pi
= kzalloc(sizeof(struct si_power_info
), GFP_KERNEL
);
7322 adev
->pm
.dpm
.priv
= si_pi
;
7327 ret
= drm_pcie_get_speed_cap_mask(adev
->ddev
, &mask
);
7329 si_pi
->sys_pcie_mask
= 0;
7331 si_pi
->sys_pcie_mask
= mask
;
7332 si_pi
->force_pcie_gen
= AMDGPU_PCIE_GEN_INVALID
;
7333 si_pi
->boot_pcie_gen
= si_get_current_pcie_speed(adev
);
7335 si_set_max_cu_value(adev
);
7337 rv770_get_max_vddc(adev
);
7338 si_get_leakage_vddc(adev
);
7339 si_patch_dependency_tables_based_on_leakage(adev
);
7342 eg_pi
->acpi_vddci
= 0;
7343 pi
->min_vddc_in_table
= 0;
7344 pi
->max_vddc_in_table
= 0;
7346 ret
= amdgpu_get_platform_caps(adev
);
7350 ret
= amdgpu_parse_extended_power_table(adev
);
7354 ret
= si_parse_power_table(adev
);
7358 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
=
7359 kzalloc(4 * sizeof(struct amdgpu_clock_voltage_dependency_entry
), GFP_KERNEL
);
7360 if (!adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
) {
7361 amdgpu_free_extended_power_table(adev
);
7364 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.count
= 4;
7365 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[0].clk
= 0;
7366 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[0].v
= 0;
7367 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[1].clk
= 36000;
7368 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[1].v
= 720;
7369 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[2].clk
= 54000;
7370 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[2].v
= 810;
7371 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[3].clk
= 72000;
7372 adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
[3].v
= 900;
7374 if (adev
->pm
.dpm
.voltage_response_time
== 0)
7375 adev
->pm
.dpm
.voltage_response_time
= R600_VOLTAGERESPONSETIME_DFLT
;
7376 if (adev
->pm
.dpm
.backbias_response_time
== 0)
7377 adev
->pm
.dpm
.backbias_response_time
= R600_BACKBIASRESPONSETIME_DFLT
;
7379 ret
= amdgpu_atombios_get_clock_dividers(adev
, COMPUTE_ENGINE_PLL_PARAM
,
7380 0, false, ÷rs
);
7382 pi
->ref_div
= dividers
.ref_div
+ 1;
7384 pi
->ref_div
= R600_REFERENCEDIVIDER_DFLT
;
7386 eg_pi
->smu_uvd_hs
= false;
7388 pi
->mclk_strobe_mode_threshold
= 40000;
7389 if (si_is_special_1gb_platform(adev
))
7390 pi
->mclk_stutter_mode_threshold
= 0;
7392 pi
->mclk_stutter_mode_threshold
= pi
->mclk_strobe_mode_threshold
;
7393 pi
->mclk_edc_enable_threshold
= 40000;
7394 eg_pi
->mclk_edc_wr_enable_threshold
= 40000;
7396 ni_pi
->mclk_rtt_mode_threshold
= eg_pi
->mclk_edc_wr_enable_threshold
;
7398 pi
->voltage_control
=
7399 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7400 VOLTAGE_OBJ_GPIO_LUT
);
7401 if (!pi
->voltage_control
) {
7402 si_pi
->voltage_control_svi2
=
7403 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7405 if (si_pi
->voltage_control_svi2
)
7406 amdgpu_atombios_get_svi2_info(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7407 &si_pi
->svd_gpio_id
, &si_pi
->svc_gpio_id
);
7411 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_MVDDC
,
7412 VOLTAGE_OBJ_GPIO_LUT
);
7414 eg_pi
->vddci_control
=
7415 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDCI
,
7416 VOLTAGE_OBJ_GPIO_LUT
);
7417 if (!eg_pi
->vddci_control
)
7418 si_pi
->vddci_control_svi2
=
7419 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDCI
,
7422 si_pi
->vddc_phase_shed_control
=
7423 amdgpu_atombios_is_voltage_gpio(adev
, SET_VOLTAGE_TYPE_ASIC_VDDC
,
7424 VOLTAGE_OBJ_PHASE_LUT
);
7426 rv770_get_engine_memory_ss(adev
);
7428 pi
->asi
= RV770_ASI_DFLT
;
7429 pi
->pasi
= CYPRESS_HASI_DFLT
;
7430 pi
->vrc
= SISLANDS_VRC_DFLT
;
7432 pi
->gfx_clock_gating
= true;
7434 eg_pi
->sclk_deep_sleep
= true;
7435 si_pi
->sclk_deep_sleep_above_low
= false;
7437 if (adev
->pm
.int_thermal_type
!= THERMAL_TYPE_NONE
)
7438 pi
->thermal_protection
= true;
7440 pi
->thermal_protection
= false;
7442 eg_pi
->dynamic_ac_timing
= true;
7444 eg_pi
->light_sleep
= true;
7445 #if defined(CONFIG_ACPI)
7446 eg_pi
->pcie_performance_request
=
7447 amdgpu_acpi_is_pcie_performance_request_supported(adev
);
7449 eg_pi
->pcie_performance_request
= false;
7452 si_pi
->sram_end
= SMC_RAM_END
;
7454 adev
->pm
.dpm
.dyn_state
.mclk_sclk_ratio
= 4;
7455 adev
->pm
.dpm
.dyn_state
.sclk_mclk_delta
= 15000;
7456 adev
->pm
.dpm
.dyn_state
.vddc_vddci_delta
= 200;
7457 adev
->pm
.dpm
.dyn_state
.valid_sclk_values
.count
= 0;
7458 adev
->pm
.dpm
.dyn_state
.valid_sclk_values
.values
= NULL
;
7459 adev
->pm
.dpm
.dyn_state
.valid_mclk_values
.count
= 0;
7460 adev
->pm
.dpm
.dyn_state
.valid_mclk_values
.values
= NULL
;
7462 si_initialize_powertune_defaults(adev
);
7464 /* make sure dc limits are valid */
7465 if ((adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.sclk
== 0) ||
7466 (adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
.mclk
== 0))
7467 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_dc
=
7468 adev
->pm
.dpm
.dyn_state
.max_clock_voltage_on_ac
;
7470 si_pi
->fan_ctrl_is_in_default_mode
= true;
7475 static void si_dpm_fini(struct amdgpu_device
*adev
)
7479 if (adev
->pm
.dpm
.ps
)
7480 for (i
= 0; i
< adev
->pm
.dpm
.num_ps
; i
++)
7481 kfree(adev
->pm
.dpm
.ps
[i
].ps_priv
);
7482 kfree(adev
->pm
.dpm
.ps
);
7483 kfree(adev
->pm
.dpm
.priv
);
7484 kfree(adev
->pm
.dpm
.dyn_state
.vddc_dependency_on_dispclk
.entries
);
7485 amdgpu_free_extended_power_table(adev
);
7488 static void si_dpm_debugfs_print_current_performance_level(struct amdgpu_device
*adev
,
7491 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7492 struct amdgpu_ps
*rps
= &eg_pi
->current_rps
;
7493 struct si_ps
*ps
= si_get_ps(rps
);
7494 struct rv7xx_pl
*pl
;
7496 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX
) & CURRENT_STATE_INDEX_MASK
) >>
7497 CURRENT_STATE_INDEX_SHIFT
;
7499 if (current_index
>= ps
->performance_level_count
) {
7500 seq_printf(m
, "invalid dpm profile %d\n", current_index
);
7502 pl
= &ps
->performance_levels
[current_index
];
7503 seq_printf(m
, "uvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
7504 seq_printf(m
, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7505 current_index
, pl
->sclk
, pl
->mclk
, pl
->vddc
, pl
->vddci
, pl
->pcie_gen
+ 1);
7509 static int si_dpm_set_interrupt_state(struct amdgpu_device
*adev
,
7510 struct amdgpu_irq_src
*source
,
7512 enum amdgpu_interrupt_state state
)
7517 case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH
:
7519 case AMDGPU_IRQ_STATE_DISABLE
:
7520 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7521 cg_thermal_int
|= THERM_INT_MASK_HIGH
;
7522 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7524 case AMDGPU_IRQ_STATE_ENABLE
:
7525 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7526 cg_thermal_int
&= ~THERM_INT_MASK_HIGH
;
7527 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7534 case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW
:
7536 case AMDGPU_IRQ_STATE_DISABLE
:
7537 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7538 cg_thermal_int
|= THERM_INT_MASK_LOW
;
7539 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7541 case AMDGPU_IRQ_STATE_ENABLE
:
7542 cg_thermal_int
= RREG32_SMC(CG_THERMAL_INT
);
7543 cg_thermal_int
&= ~THERM_INT_MASK_LOW
;
7544 WREG32_SMC(CG_THERMAL_INT
, cg_thermal_int
);
7557 static int si_dpm_process_interrupt(struct amdgpu_device
*adev
,
7558 struct amdgpu_irq_src
*source
,
7559 struct amdgpu_iv_entry
*entry
)
7561 bool queue_thermal
= false;
7566 switch (entry
->src_id
) {
7567 case 230: /* thermal low to high */
7568 DRM_DEBUG("IH: thermal low to high\n");
7569 adev
->pm
.dpm
.thermal
.high_to_low
= false;
7570 queue_thermal
= true;
7572 case 231: /* thermal high to low */
7573 DRM_DEBUG("IH: thermal high to low\n");
7574 adev
->pm
.dpm
.thermal
.high_to_low
= true;
7575 queue_thermal
= true;
7582 schedule_work(&adev
->pm
.dpm
.thermal
.work
);
7587 static int si_dpm_late_init(void *handle
)
7590 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7595 /* init the sysfs and debugfs files late */
7596 ret
= amdgpu_pm_sysfs_init(adev
);
7600 ret
= si_set_temperature_range(adev
);
7604 si_dpm_powergate_uvd(adev
, true);
7610 * si_dpm_init_microcode - load ucode images from disk
7612 * @adev: amdgpu_device pointer
7614 * Use the firmware interface to load the ucode images into
7615 * the driver (not loaded into hw).
7616 * Returns 0 on success, error on failure.
7618 static int si_dpm_init_microcode(struct amdgpu_device
*adev
)
7620 const char *chip_name
;
7625 switch (adev
->asic_type
) {
7627 chip_name
= "tahiti";
7630 if ((adev
->pdev
->revision
== 0x81) &&
7631 ((adev
->pdev
->device
== 0x6810) ||
7632 (adev
->pdev
->device
== 0x6811)))
7633 chip_name
= "pitcairn_k";
7635 chip_name
= "pitcairn";
7638 if (((adev
->pdev
->device
== 0x6820) &&
7639 ((adev
->pdev
->revision
== 0x81) ||
7640 (adev
->pdev
->revision
== 0x83))) ||
7641 ((adev
->pdev
->device
== 0x6821) &&
7642 ((adev
->pdev
->revision
== 0x83) ||
7643 (adev
->pdev
->revision
== 0x87))) ||
7644 ((adev
->pdev
->revision
== 0x87) &&
7645 ((adev
->pdev
->device
== 0x6823) ||
7646 (adev
->pdev
->device
== 0x682b))))
7647 chip_name
= "verde_k";
7649 chip_name
= "verde";
7652 if (((adev
->pdev
->revision
== 0x81) &&
7653 ((adev
->pdev
->device
== 0x6600) ||
7654 (adev
->pdev
->device
== 0x6604) ||
7655 (adev
->pdev
->device
== 0x6605) ||
7656 (adev
->pdev
->device
== 0x6610))) ||
7657 ((adev
->pdev
->revision
== 0x83) &&
7658 (adev
->pdev
->device
== 0x6610)))
7659 chip_name
= "oland_k";
7661 chip_name
= "oland";
7664 if (((adev
->pdev
->revision
== 0x81) &&
7665 (adev
->pdev
->device
== 0x6660)) ||
7666 ((adev
->pdev
->revision
== 0x83) &&
7667 ((adev
->pdev
->device
== 0x6660) ||
7668 (adev
->pdev
->device
== 0x6663) ||
7669 (adev
->pdev
->device
== 0x6665) ||
7670 (adev
->pdev
->device
== 0x6667))))
7671 chip_name
= "hainan_k";
7672 else if ((adev
->pdev
->revision
== 0xc3) &&
7673 (adev
->pdev
->device
== 0x6665))
7674 chip_name
= "banks_k_2";
7676 chip_name
= "hainan";
7681 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_smc.bin", chip_name
);
7682 err
= request_firmware(&adev
->pm
.fw
, fw_name
, adev
->dev
);
7685 err
= amdgpu_ucode_validate(adev
->pm
.fw
);
7689 DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
7691 release_firmware(adev
->pm
.fw
);
7698 static int si_dpm_sw_init(void *handle
)
7701 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7703 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 230, &adev
->pm
.dpm
.thermal
.irq
);
7707 ret
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 231, &adev
->pm
.dpm
.thermal
.irq
);
7711 /* default to balanced state */
7712 adev
->pm
.dpm
.state
= POWER_STATE_TYPE_BALANCED
;
7713 adev
->pm
.dpm
.user_state
= POWER_STATE_TYPE_BALANCED
;
7714 adev
->pm
.dpm
.forced_level
= AMD_DPM_FORCED_LEVEL_AUTO
;
7715 adev
->pm
.default_sclk
= adev
->clock
.default_sclk
;
7716 adev
->pm
.default_mclk
= adev
->clock
.default_mclk
;
7717 adev
->pm
.current_sclk
= adev
->clock
.default_sclk
;
7718 adev
->pm
.current_mclk
= adev
->clock
.default_mclk
;
7719 adev
->pm
.int_thermal_type
= THERMAL_TYPE_NONE
;
7721 if (amdgpu_dpm
== 0)
7724 ret
= si_dpm_init_microcode(adev
);
7728 INIT_WORK(&adev
->pm
.dpm
.thermal
.work
, amdgpu_dpm_thermal_work_handler
);
7729 mutex_lock(&adev
->pm
.mutex
);
7730 ret
= si_dpm_init(adev
);
7733 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
7734 if (amdgpu_dpm
== 1)
7735 amdgpu_pm_print_power_states(adev
);
7736 mutex_unlock(&adev
->pm
.mutex
);
7737 DRM_INFO("amdgpu: dpm initialized\n");
7743 mutex_unlock(&adev
->pm
.mutex
);
7744 DRM_ERROR("amdgpu: dpm initialization failed\n");
7748 static int si_dpm_sw_fini(void *handle
)
7750 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7752 flush_work(&adev
->pm
.dpm
.thermal
.work
);
7754 mutex_lock(&adev
->pm
.mutex
);
7755 amdgpu_pm_sysfs_fini(adev
);
7757 mutex_unlock(&adev
->pm
.mutex
);
7762 static int si_dpm_hw_init(void *handle
)
7766 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7771 mutex_lock(&adev
->pm
.mutex
);
7772 si_dpm_setup_asic(adev
);
7773 ret
= si_dpm_enable(adev
);
7775 adev
->pm
.dpm_enabled
= false;
7777 adev
->pm
.dpm_enabled
= true;
7778 mutex_unlock(&adev
->pm
.mutex
);
7783 static int si_dpm_hw_fini(void *handle
)
7785 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7787 if (adev
->pm
.dpm_enabled
) {
7788 mutex_lock(&adev
->pm
.mutex
);
7789 si_dpm_disable(adev
);
7790 mutex_unlock(&adev
->pm
.mutex
);
7796 static int si_dpm_suspend(void *handle
)
7798 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7800 if (adev
->pm
.dpm_enabled
) {
7801 mutex_lock(&adev
->pm
.mutex
);
7803 si_dpm_disable(adev
);
7804 /* reset the power state */
7805 adev
->pm
.dpm
.current_ps
= adev
->pm
.dpm
.requested_ps
= adev
->pm
.dpm
.boot_ps
;
7806 mutex_unlock(&adev
->pm
.mutex
);
7811 static int si_dpm_resume(void *handle
)
7814 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7816 if (adev
->pm
.dpm_enabled
) {
7817 /* asic init will reset to the boot state */
7818 mutex_lock(&adev
->pm
.mutex
);
7819 si_dpm_setup_asic(adev
);
7820 ret
= si_dpm_enable(adev
);
7822 adev
->pm
.dpm_enabled
= false;
7824 adev
->pm
.dpm_enabled
= true;
7825 mutex_unlock(&adev
->pm
.mutex
);
7826 if (adev
->pm
.dpm_enabled
)
7827 amdgpu_pm_compute_clocks(adev
);
7832 static bool si_dpm_is_idle(void *handle
)
7838 static int si_dpm_wait_for_idle(void *handle
)
7844 static int si_dpm_soft_reset(void *handle
)
7849 static int si_dpm_set_clockgating_state(void *handle
,
7850 enum amd_clockgating_state state
)
7855 static int si_dpm_set_powergating_state(void *handle
,
7856 enum amd_powergating_state state
)
7861 /* get temperature in millidegrees */
7862 static int si_dpm_get_temp(struct amdgpu_device
*adev
)
7865 int actual_temp
= 0;
7867 temp
= (RREG32(CG_MULT_THERMAL_STATUS
) & CTF_TEMP_MASK
) >>
7873 actual_temp
= temp
& 0x1ff;
7875 actual_temp
= (actual_temp
* 1000);
7880 static u32
si_dpm_get_sclk(struct amdgpu_device
*adev
, bool low
)
7882 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7883 struct si_ps
*requested_state
= si_get_ps(&eg_pi
->requested_rps
);
7886 return requested_state
->performance_levels
[0].sclk
;
7888 return requested_state
->performance_levels
[requested_state
->performance_level_count
- 1].sclk
;
7891 static u32
si_dpm_get_mclk(struct amdgpu_device
*adev
, bool low
)
7893 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7894 struct si_ps
*requested_state
= si_get_ps(&eg_pi
->requested_rps
);
7897 return requested_state
->performance_levels
[0].mclk
;
7899 return requested_state
->performance_levels
[requested_state
->performance_level_count
- 1].mclk
;
7902 static void si_dpm_print_power_state(struct amdgpu_device
*adev
,
7903 struct amdgpu_ps
*rps
)
7905 struct si_ps
*ps
= si_get_ps(rps
);
7906 struct rv7xx_pl
*pl
;
7909 amdgpu_dpm_print_class_info(rps
->class, rps
->class2
);
7910 amdgpu_dpm_print_cap_info(rps
->caps
);
7911 DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps
->vclk
, rps
->dclk
);
7912 for (i
= 0; i
< ps
->performance_level_count
; i
++) {
7913 pl
= &ps
->performance_levels
[i
];
7914 if (adev
->asic_type
>= CHIP_TAHITI
)
7915 DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7916 i
, pl
->sclk
, pl
->mclk
, pl
->vddc
, pl
->vddci
, pl
->pcie_gen
+ 1);
7918 DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n",
7919 i
, pl
->sclk
, pl
->mclk
, pl
->vddc
, pl
->vddci
);
7921 amdgpu_dpm_print_ps_status(adev
, rps
);
7924 static int si_dpm_early_init(void *handle
)
7927 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
7929 si_dpm_set_dpm_funcs(adev
);
7930 si_dpm_set_irq_funcs(adev
);
7934 static inline bool si_are_power_levels_equal(const struct rv7xx_pl
*si_cpl1
,
7935 const struct rv7xx_pl
*si_cpl2
)
7937 return ((si_cpl1
->mclk
== si_cpl2
->mclk
) &&
7938 (si_cpl1
->sclk
== si_cpl2
->sclk
) &&
7939 (si_cpl1
->pcie_gen
== si_cpl2
->pcie_gen
) &&
7940 (si_cpl1
->vddc
== si_cpl2
->vddc
) &&
7941 (si_cpl1
->vddci
== si_cpl2
->vddci
));
7944 static int si_check_state_equal(struct amdgpu_device
*adev
,
7945 struct amdgpu_ps
*cps
,
7946 struct amdgpu_ps
*rps
,
7949 struct si_ps
*si_cps
;
7950 struct si_ps
*si_rps
;
7953 if (adev
== NULL
|| cps
== NULL
|| rps
== NULL
|| equal
== NULL
)
7956 si_cps
= si_get_ps(cps
);
7957 si_rps
= si_get_ps(rps
);
7959 if (si_cps
== NULL
) {
7960 printk("si_cps is NULL\n");
7965 if (si_cps
->performance_level_count
!= si_rps
->performance_level_count
) {
7970 for (i
= 0; i
< si_cps
->performance_level_count
; i
++) {
7971 if (!si_are_power_levels_equal(&(si_cps
->performance_levels
[i
]),
7972 &(si_rps
->performance_levels
[i
]))) {
7978 /* If all performance levels are the same try to use the UVD clocks to break the tie.*/
7979 *equal
= ((cps
->vclk
== rps
->vclk
) && (cps
->dclk
== rps
->dclk
));
7980 *equal
&= ((cps
->evclk
== rps
->evclk
) && (cps
->ecclk
== rps
->ecclk
));
7985 static int si_dpm_read_sensor(struct amdgpu_device
*adev
, int idx
,
7986 void *value
, int *size
)
7988 struct evergreen_power_info
*eg_pi
= evergreen_get_pi(adev
);
7989 struct amdgpu_ps
*rps
= &eg_pi
->current_rps
;
7990 struct si_ps
*ps
= si_get_ps(rps
);
7991 uint32_t sclk
, mclk
;
7993 (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX
) & CURRENT_STATE_INDEX_MASK
) >>
7994 CURRENT_STATE_INDEX_SHIFT
;
7996 /* size must be at least 4 bytes for all sensors */
8001 case AMDGPU_PP_SENSOR_GFX_SCLK
:
8002 if (pl_index
< ps
->performance_level_count
) {
8003 sclk
= ps
->performance_levels
[pl_index
].sclk
;
8004 *((uint32_t *)value
) = sclk
;
8009 case AMDGPU_PP_SENSOR_GFX_MCLK
:
8010 if (pl_index
< ps
->performance_level_count
) {
8011 mclk
= ps
->performance_levels
[pl_index
].mclk
;
8012 *((uint32_t *)value
) = mclk
;
8017 case AMDGPU_PP_SENSOR_GPU_TEMP
:
8018 *((uint32_t *)value
) = si_dpm_get_temp(adev
);
8026 const struct amd_ip_funcs si_dpm_ip_funcs
= {
8028 .early_init
= si_dpm_early_init
,
8029 .late_init
= si_dpm_late_init
,
8030 .sw_init
= si_dpm_sw_init
,
8031 .sw_fini
= si_dpm_sw_fini
,
8032 .hw_init
= si_dpm_hw_init
,
8033 .hw_fini
= si_dpm_hw_fini
,
8034 .suspend
= si_dpm_suspend
,
8035 .resume
= si_dpm_resume
,
8036 .is_idle
= si_dpm_is_idle
,
8037 .wait_for_idle
= si_dpm_wait_for_idle
,
8038 .soft_reset
= si_dpm_soft_reset
,
8039 .set_clockgating_state
= si_dpm_set_clockgating_state
,
8040 .set_powergating_state
= si_dpm_set_powergating_state
,
8043 static const struct amdgpu_dpm_funcs si_dpm_funcs
= {
8044 .get_temperature
= &si_dpm_get_temp
,
8045 .pre_set_power_state
= &si_dpm_pre_set_power_state
,
8046 .set_power_state
= &si_dpm_set_power_state
,
8047 .post_set_power_state
= &si_dpm_post_set_power_state
,
8048 .display_configuration_changed
= &si_dpm_display_configuration_changed
,
8049 .get_sclk
= &si_dpm_get_sclk
,
8050 .get_mclk
= &si_dpm_get_mclk
,
8051 .print_power_state
= &si_dpm_print_power_state
,
8052 .debugfs_print_current_performance_level
= &si_dpm_debugfs_print_current_performance_level
,
8053 .force_performance_level
= &si_dpm_force_performance_level
,
8054 .vblank_too_short
= &si_dpm_vblank_too_short
,
8055 .set_fan_control_mode
= &si_dpm_set_fan_control_mode
,
8056 .get_fan_control_mode
= &si_dpm_get_fan_control_mode
,
8057 .set_fan_speed_percent
= &si_dpm_set_fan_speed_percent
,
8058 .get_fan_speed_percent
= &si_dpm_get_fan_speed_percent
,
8059 .check_state_equal
= &si_check_state_equal
,
8060 .get_vce_clock_state
= amdgpu_get_vce_clock_state
,
8061 .read_sensor
= &si_dpm_read_sensor
,
8064 static void si_dpm_set_dpm_funcs(struct amdgpu_device
*adev
)
8066 if (adev
->pm
.funcs
== NULL
)
8067 adev
->pm
.funcs
= &si_dpm_funcs
;
8070 static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs
= {
8071 .set
= si_dpm_set_interrupt_state
,
8072 .process
= si_dpm_process_interrupt
,
8075 static void si_dpm_set_irq_funcs(struct amdgpu_device
*adev
)
8077 adev
->pm
.dpm
.thermal
.irq
.num_types
= AMDGPU_THERMAL_IRQ_LAST
;
8078 adev
->pm
.dpm
.thermal
.irq
.funcs
= &si_dpm_irq_funcs
;