2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
26 #include "amdgpu_ih.h"
27 #include "amdgpu_gfx.h"
30 #include "cik_structs.h"
32 #include "amdgpu_ucode.h"
33 #include "clearstate_ci.h"
35 #include "dce/dce_8_0_d.h"
36 #include "dce/dce_8_0_sh_mask.h"
38 #include "bif/bif_4_1_d.h"
39 #include "bif/bif_4_1_sh_mask.h"
41 #include "gca/gfx_7_0_d.h"
42 #include "gca/gfx_7_2_enum.h"
43 #include "gca/gfx_7_2_sh_mask.h"
45 #include "gmc/gmc_7_0_d.h"
46 #include "gmc/gmc_7_0_sh_mask.h"
48 #include "oss/oss_2_0_d.h"
49 #include "oss/oss_2_0_sh_mask.h"
51 #define NUM_SIMD_PER_CU 0x4 /* missing from the gfx_7 IP headers */
53 #define GFX7_NUM_GFX_RINGS 1
54 #define GFX7_MEC_HPD_SIZE 2048
56 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device
*adev
);
57 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device
*adev
);
58 static void gfx_v7_0_set_gds_init(struct amdgpu_device
*adev
);
60 MODULE_FIRMWARE("amdgpu/bonaire_pfp.bin");
61 MODULE_FIRMWARE("amdgpu/bonaire_me.bin");
62 MODULE_FIRMWARE("amdgpu/bonaire_ce.bin");
63 MODULE_FIRMWARE("amdgpu/bonaire_rlc.bin");
64 MODULE_FIRMWARE("amdgpu/bonaire_mec.bin");
66 MODULE_FIRMWARE("amdgpu/hawaii_pfp.bin");
67 MODULE_FIRMWARE("amdgpu/hawaii_me.bin");
68 MODULE_FIRMWARE("amdgpu/hawaii_ce.bin");
69 MODULE_FIRMWARE("amdgpu/hawaii_rlc.bin");
70 MODULE_FIRMWARE("amdgpu/hawaii_mec.bin");
72 MODULE_FIRMWARE("amdgpu/kaveri_pfp.bin");
73 MODULE_FIRMWARE("amdgpu/kaveri_me.bin");
74 MODULE_FIRMWARE("amdgpu/kaveri_ce.bin");
75 MODULE_FIRMWARE("amdgpu/kaveri_rlc.bin");
76 MODULE_FIRMWARE("amdgpu/kaveri_mec.bin");
77 MODULE_FIRMWARE("amdgpu/kaveri_mec2.bin");
79 MODULE_FIRMWARE("amdgpu/kabini_pfp.bin");
80 MODULE_FIRMWARE("amdgpu/kabini_me.bin");
81 MODULE_FIRMWARE("amdgpu/kabini_ce.bin");
82 MODULE_FIRMWARE("amdgpu/kabini_rlc.bin");
83 MODULE_FIRMWARE("amdgpu/kabini_mec.bin");
85 MODULE_FIRMWARE("amdgpu/mullins_pfp.bin");
86 MODULE_FIRMWARE("amdgpu/mullins_me.bin");
87 MODULE_FIRMWARE("amdgpu/mullins_ce.bin");
88 MODULE_FIRMWARE("amdgpu/mullins_rlc.bin");
89 MODULE_FIRMWARE("amdgpu/mullins_mec.bin");
91 static const struct amdgpu_gds_reg_offset amdgpu_gds_reg_offset
[] =
93 {mmGDS_VMID0_BASE
, mmGDS_VMID0_SIZE
, mmGDS_GWS_VMID0
, mmGDS_OA_VMID0
},
94 {mmGDS_VMID1_BASE
, mmGDS_VMID1_SIZE
, mmGDS_GWS_VMID1
, mmGDS_OA_VMID1
},
95 {mmGDS_VMID2_BASE
, mmGDS_VMID2_SIZE
, mmGDS_GWS_VMID2
, mmGDS_OA_VMID2
},
96 {mmGDS_VMID3_BASE
, mmGDS_VMID3_SIZE
, mmGDS_GWS_VMID3
, mmGDS_OA_VMID3
},
97 {mmGDS_VMID4_BASE
, mmGDS_VMID4_SIZE
, mmGDS_GWS_VMID4
, mmGDS_OA_VMID4
},
98 {mmGDS_VMID5_BASE
, mmGDS_VMID5_SIZE
, mmGDS_GWS_VMID5
, mmGDS_OA_VMID5
},
99 {mmGDS_VMID6_BASE
, mmGDS_VMID6_SIZE
, mmGDS_GWS_VMID6
, mmGDS_OA_VMID6
},
100 {mmGDS_VMID7_BASE
, mmGDS_VMID7_SIZE
, mmGDS_GWS_VMID7
, mmGDS_OA_VMID7
},
101 {mmGDS_VMID8_BASE
, mmGDS_VMID8_SIZE
, mmGDS_GWS_VMID8
, mmGDS_OA_VMID8
},
102 {mmGDS_VMID9_BASE
, mmGDS_VMID9_SIZE
, mmGDS_GWS_VMID9
, mmGDS_OA_VMID9
},
103 {mmGDS_VMID10_BASE
, mmGDS_VMID10_SIZE
, mmGDS_GWS_VMID10
, mmGDS_OA_VMID10
},
104 {mmGDS_VMID11_BASE
, mmGDS_VMID11_SIZE
, mmGDS_GWS_VMID11
, mmGDS_OA_VMID11
},
105 {mmGDS_VMID12_BASE
, mmGDS_VMID12_SIZE
, mmGDS_GWS_VMID12
, mmGDS_OA_VMID12
},
106 {mmGDS_VMID13_BASE
, mmGDS_VMID13_SIZE
, mmGDS_GWS_VMID13
, mmGDS_OA_VMID13
},
107 {mmGDS_VMID14_BASE
, mmGDS_VMID14_SIZE
, mmGDS_GWS_VMID14
, mmGDS_OA_VMID14
},
108 {mmGDS_VMID15_BASE
, mmGDS_VMID15_SIZE
, mmGDS_GWS_VMID15
, mmGDS_OA_VMID15
}
111 static const u32 spectre_rlc_save_restore_register_list
[] =
113 (0x0e00 << 16) | (0xc12c >> 2),
115 (0x0e00 << 16) | (0xc140 >> 2),
117 (0x0e00 << 16) | (0xc150 >> 2),
119 (0x0e00 << 16) | (0xc15c >> 2),
121 (0x0e00 << 16) | (0xc168 >> 2),
123 (0x0e00 << 16) | (0xc170 >> 2),
125 (0x0e00 << 16) | (0xc178 >> 2),
127 (0x0e00 << 16) | (0xc204 >> 2),
129 (0x0e00 << 16) | (0xc2b4 >> 2),
131 (0x0e00 << 16) | (0xc2b8 >> 2),
133 (0x0e00 << 16) | (0xc2bc >> 2),
135 (0x0e00 << 16) | (0xc2c0 >> 2),
137 (0x0e00 << 16) | (0x8228 >> 2),
139 (0x0e00 << 16) | (0x829c >> 2),
141 (0x0e00 << 16) | (0x869c >> 2),
143 (0x0600 << 16) | (0x98f4 >> 2),
145 (0x0e00 << 16) | (0x98f8 >> 2),
147 (0x0e00 << 16) | (0x9900 >> 2),
149 (0x0e00 << 16) | (0xc260 >> 2),
151 (0x0e00 << 16) | (0x90e8 >> 2),
153 (0x0e00 << 16) | (0x3c000 >> 2),
155 (0x0e00 << 16) | (0x3c00c >> 2),
157 (0x0e00 << 16) | (0x8c1c >> 2),
159 (0x0e00 << 16) | (0x9700 >> 2),
161 (0x0e00 << 16) | (0xcd20 >> 2),
163 (0x4e00 << 16) | (0xcd20 >> 2),
165 (0x5e00 << 16) | (0xcd20 >> 2),
167 (0x6e00 << 16) | (0xcd20 >> 2),
169 (0x7e00 << 16) | (0xcd20 >> 2),
171 (0x8e00 << 16) | (0xcd20 >> 2),
173 (0x9e00 << 16) | (0xcd20 >> 2),
175 (0xae00 << 16) | (0xcd20 >> 2),
177 (0xbe00 << 16) | (0xcd20 >> 2),
179 (0x0e00 << 16) | (0x89bc >> 2),
181 (0x0e00 << 16) | (0x8900 >> 2),
184 (0x0e00 << 16) | (0xc130 >> 2),
186 (0x0e00 << 16) | (0xc134 >> 2),
188 (0x0e00 << 16) | (0xc1fc >> 2),
190 (0x0e00 << 16) | (0xc208 >> 2),
192 (0x0e00 << 16) | (0xc264 >> 2),
194 (0x0e00 << 16) | (0xc268 >> 2),
196 (0x0e00 << 16) | (0xc26c >> 2),
198 (0x0e00 << 16) | (0xc270 >> 2),
200 (0x0e00 << 16) | (0xc274 >> 2),
202 (0x0e00 << 16) | (0xc278 >> 2),
204 (0x0e00 << 16) | (0xc27c >> 2),
206 (0x0e00 << 16) | (0xc280 >> 2),
208 (0x0e00 << 16) | (0xc284 >> 2),
210 (0x0e00 << 16) | (0xc288 >> 2),
212 (0x0e00 << 16) | (0xc28c >> 2),
214 (0x0e00 << 16) | (0xc290 >> 2),
216 (0x0e00 << 16) | (0xc294 >> 2),
218 (0x0e00 << 16) | (0xc298 >> 2),
220 (0x0e00 << 16) | (0xc29c >> 2),
222 (0x0e00 << 16) | (0xc2a0 >> 2),
224 (0x0e00 << 16) | (0xc2a4 >> 2),
226 (0x0e00 << 16) | (0xc2a8 >> 2),
228 (0x0e00 << 16) | (0xc2ac >> 2),
230 (0x0e00 << 16) | (0xc2b0 >> 2),
232 (0x0e00 << 16) | (0x301d0 >> 2),
234 (0x0e00 << 16) | (0x30238 >> 2),
236 (0x0e00 << 16) | (0x30250 >> 2),
238 (0x0e00 << 16) | (0x30254 >> 2),
240 (0x0e00 << 16) | (0x30258 >> 2),
242 (0x0e00 << 16) | (0x3025c >> 2),
244 (0x4e00 << 16) | (0xc900 >> 2),
246 (0x5e00 << 16) | (0xc900 >> 2),
248 (0x6e00 << 16) | (0xc900 >> 2),
250 (0x7e00 << 16) | (0xc900 >> 2),
252 (0x8e00 << 16) | (0xc900 >> 2),
254 (0x9e00 << 16) | (0xc900 >> 2),
256 (0xae00 << 16) | (0xc900 >> 2),
258 (0xbe00 << 16) | (0xc900 >> 2),
260 (0x4e00 << 16) | (0xc904 >> 2),
262 (0x5e00 << 16) | (0xc904 >> 2),
264 (0x6e00 << 16) | (0xc904 >> 2),
266 (0x7e00 << 16) | (0xc904 >> 2),
268 (0x8e00 << 16) | (0xc904 >> 2),
270 (0x9e00 << 16) | (0xc904 >> 2),
272 (0xae00 << 16) | (0xc904 >> 2),
274 (0xbe00 << 16) | (0xc904 >> 2),
276 (0x4e00 << 16) | (0xc908 >> 2),
278 (0x5e00 << 16) | (0xc908 >> 2),
280 (0x6e00 << 16) | (0xc908 >> 2),
282 (0x7e00 << 16) | (0xc908 >> 2),
284 (0x8e00 << 16) | (0xc908 >> 2),
286 (0x9e00 << 16) | (0xc908 >> 2),
288 (0xae00 << 16) | (0xc908 >> 2),
290 (0xbe00 << 16) | (0xc908 >> 2),
292 (0x4e00 << 16) | (0xc90c >> 2),
294 (0x5e00 << 16) | (0xc90c >> 2),
296 (0x6e00 << 16) | (0xc90c >> 2),
298 (0x7e00 << 16) | (0xc90c >> 2),
300 (0x8e00 << 16) | (0xc90c >> 2),
302 (0x9e00 << 16) | (0xc90c >> 2),
304 (0xae00 << 16) | (0xc90c >> 2),
306 (0xbe00 << 16) | (0xc90c >> 2),
308 (0x4e00 << 16) | (0xc910 >> 2),
310 (0x5e00 << 16) | (0xc910 >> 2),
312 (0x6e00 << 16) | (0xc910 >> 2),
314 (0x7e00 << 16) | (0xc910 >> 2),
316 (0x8e00 << 16) | (0xc910 >> 2),
318 (0x9e00 << 16) | (0xc910 >> 2),
320 (0xae00 << 16) | (0xc910 >> 2),
322 (0xbe00 << 16) | (0xc910 >> 2),
324 (0x0e00 << 16) | (0xc99c >> 2),
326 (0x0e00 << 16) | (0x9834 >> 2),
328 (0x0000 << 16) | (0x30f00 >> 2),
330 (0x0001 << 16) | (0x30f00 >> 2),
332 (0x0000 << 16) | (0x30f04 >> 2),
334 (0x0001 << 16) | (0x30f04 >> 2),
336 (0x0000 << 16) | (0x30f08 >> 2),
338 (0x0001 << 16) | (0x30f08 >> 2),
340 (0x0000 << 16) | (0x30f0c >> 2),
342 (0x0001 << 16) | (0x30f0c >> 2),
344 (0x0600 << 16) | (0x9b7c >> 2),
346 (0x0e00 << 16) | (0x8a14 >> 2),
348 (0x0e00 << 16) | (0x8a18 >> 2),
350 (0x0600 << 16) | (0x30a00 >> 2),
352 (0x0e00 << 16) | (0x8bf0 >> 2),
354 (0x0e00 << 16) | (0x8bcc >> 2),
356 (0x0e00 << 16) | (0x8b24 >> 2),
358 (0x0e00 << 16) | (0x30a04 >> 2),
360 (0x0600 << 16) | (0x30a10 >> 2),
362 (0x0600 << 16) | (0x30a14 >> 2),
364 (0x0600 << 16) | (0x30a18 >> 2),
366 (0x0600 << 16) | (0x30a2c >> 2),
368 (0x0e00 << 16) | (0xc700 >> 2),
370 (0x0e00 << 16) | (0xc704 >> 2),
372 (0x0e00 << 16) | (0xc708 >> 2),
374 (0x0e00 << 16) | (0xc768 >> 2),
376 (0x0400 << 16) | (0xc770 >> 2),
378 (0x0400 << 16) | (0xc774 >> 2),
380 (0x0400 << 16) | (0xc778 >> 2),
382 (0x0400 << 16) | (0xc77c >> 2),
384 (0x0400 << 16) | (0xc780 >> 2),
386 (0x0400 << 16) | (0xc784 >> 2),
388 (0x0400 << 16) | (0xc788 >> 2),
390 (0x0400 << 16) | (0xc78c >> 2),
392 (0x0400 << 16) | (0xc798 >> 2),
394 (0x0400 << 16) | (0xc79c >> 2),
396 (0x0400 << 16) | (0xc7a0 >> 2),
398 (0x0400 << 16) | (0xc7a4 >> 2),
400 (0x0400 << 16) | (0xc7a8 >> 2),
402 (0x0400 << 16) | (0xc7ac >> 2),
404 (0x0400 << 16) | (0xc7b0 >> 2),
406 (0x0400 << 16) | (0xc7b4 >> 2),
408 (0x0e00 << 16) | (0x9100 >> 2),
410 (0x0e00 << 16) | (0x3c010 >> 2),
412 (0x0e00 << 16) | (0x92a8 >> 2),
414 (0x0e00 << 16) | (0x92ac >> 2),
416 (0x0e00 << 16) | (0x92b4 >> 2),
418 (0x0e00 << 16) | (0x92b8 >> 2),
420 (0x0e00 << 16) | (0x92bc >> 2),
422 (0x0e00 << 16) | (0x92c0 >> 2),
424 (0x0e00 << 16) | (0x92c4 >> 2),
426 (0x0e00 << 16) | (0x92c8 >> 2),
428 (0x0e00 << 16) | (0x92cc >> 2),
430 (0x0e00 << 16) | (0x92d0 >> 2),
432 (0x0e00 << 16) | (0x8c00 >> 2),
434 (0x0e00 << 16) | (0x8c04 >> 2),
436 (0x0e00 << 16) | (0x8c20 >> 2),
438 (0x0e00 << 16) | (0x8c38 >> 2),
440 (0x0e00 << 16) | (0x8c3c >> 2),
442 (0x0e00 << 16) | (0xae00 >> 2),
444 (0x0e00 << 16) | (0x9604 >> 2),
446 (0x0e00 << 16) | (0xac08 >> 2),
448 (0x0e00 << 16) | (0xac0c >> 2),
450 (0x0e00 << 16) | (0xac10 >> 2),
452 (0x0e00 << 16) | (0xac14 >> 2),
454 (0x0e00 << 16) | (0xac58 >> 2),
456 (0x0e00 << 16) | (0xac68 >> 2),
458 (0x0e00 << 16) | (0xac6c >> 2),
460 (0x0e00 << 16) | (0xac70 >> 2),
462 (0x0e00 << 16) | (0xac74 >> 2),
464 (0x0e00 << 16) | (0xac78 >> 2),
466 (0x0e00 << 16) | (0xac7c >> 2),
468 (0x0e00 << 16) | (0xac80 >> 2),
470 (0x0e00 << 16) | (0xac84 >> 2),
472 (0x0e00 << 16) | (0xac88 >> 2),
474 (0x0e00 << 16) | (0xac8c >> 2),
476 (0x0e00 << 16) | (0x970c >> 2),
478 (0x0e00 << 16) | (0x9714 >> 2),
480 (0x0e00 << 16) | (0x9718 >> 2),
482 (0x0e00 << 16) | (0x971c >> 2),
484 (0x0e00 << 16) | (0x31068 >> 2),
486 (0x4e00 << 16) | (0x31068 >> 2),
488 (0x5e00 << 16) | (0x31068 >> 2),
490 (0x6e00 << 16) | (0x31068 >> 2),
492 (0x7e00 << 16) | (0x31068 >> 2),
494 (0x8e00 << 16) | (0x31068 >> 2),
496 (0x9e00 << 16) | (0x31068 >> 2),
498 (0xae00 << 16) | (0x31068 >> 2),
500 (0xbe00 << 16) | (0x31068 >> 2),
502 (0x0e00 << 16) | (0xcd10 >> 2),
504 (0x0e00 << 16) | (0xcd14 >> 2),
506 (0x0e00 << 16) | (0x88b0 >> 2),
508 (0x0e00 << 16) | (0x88b4 >> 2),
510 (0x0e00 << 16) | (0x88b8 >> 2),
512 (0x0e00 << 16) | (0x88bc >> 2),
514 (0x0400 << 16) | (0x89c0 >> 2),
516 (0x0e00 << 16) | (0x88c4 >> 2),
518 (0x0e00 << 16) | (0x88c8 >> 2),
520 (0x0e00 << 16) | (0x88d0 >> 2),
522 (0x0e00 << 16) | (0x88d4 >> 2),
524 (0x0e00 << 16) | (0x88d8 >> 2),
526 (0x0e00 << 16) | (0x8980 >> 2),
528 (0x0e00 << 16) | (0x30938 >> 2),
530 (0x0e00 << 16) | (0x3093c >> 2),
532 (0x0e00 << 16) | (0x30940 >> 2),
534 (0x0e00 << 16) | (0x89a0 >> 2),
536 (0x0e00 << 16) | (0x30900 >> 2),
538 (0x0e00 << 16) | (0x30904 >> 2),
540 (0x0e00 << 16) | (0x89b4 >> 2),
542 (0x0e00 << 16) | (0x3c210 >> 2),
544 (0x0e00 << 16) | (0x3c214 >> 2),
546 (0x0e00 << 16) | (0x3c218 >> 2),
548 (0x0e00 << 16) | (0x8904 >> 2),
551 (0x0e00 << 16) | (0x8c28 >> 2),
552 (0x0e00 << 16) | (0x8c2c >> 2),
553 (0x0e00 << 16) | (0x8c30 >> 2),
554 (0x0e00 << 16) | (0x8c34 >> 2),
555 (0x0e00 << 16) | (0x9600 >> 2),
558 static const u32 kalindi_rlc_save_restore_register_list
[] =
560 (0x0e00 << 16) | (0xc12c >> 2),
562 (0x0e00 << 16) | (0xc140 >> 2),
564 (0x0e00 << 16) | (0xc150 >> 2),
566 (0x0e00 << 16) | (0xc15c >> 2),
568 (0x0e00 << 16) | (0xc168 >> 2),
570 (0x0e00 << 16) | (0xc170 >> 2),
572 (0x0e00 << 16) | (0xc204 >> 2),
574 (0x0e00 << 16) | (0xc2b4 >> 2),
576 (0x0e00 << 16) | (0xc2b8 >> 2),
578 (0x0e00 << 16) | (0xc2bc >> 2),
580 (0x0e00 << 16) | (0xc2c0 >> 2),
582 (0x0e00 << 16) | (0x8228 >> 2),
584 (0x0e00 << 16) | (0x829c >> 2),
586 (0x0e00 << 16) | (0x869c >> 2),
588 (0x0600 << 16) | (0x98f4 >> 2),
590 (0x0e00 << 16) | (0x98f8 >> 2),
592 (0x0e00 << 16) | (0x9900 >> 2),
594 (0x0e00 << 16) | (0xc260 >> 2),
596 (0x0e00 << 16) | (0x90e8 >> 2),
598 (0x0e00 << 16) | (0x3c000 >> 2),
600 (0x0e00 << 16) | (0x3c00c >> 2),
602 (0x0e00 << 16) | (0x8c1c >> 2),
604 (0x0e00 << 16) | (0x9700 >> 2),
606 (0x0e00 << 16) | (0xcd20 >> 2),
608 (0x4e00 << 16) | (0xcd20 >> 2),
610 (0x5e00 << 16) | (0xcd20 >> 2),
612 (0x6e00 << 16) | (0xcd20 >> 2),
614 (0x7e00 << 16) | (0xcd20 >> 2),
616 (0x0e00 << 16) | (0x89bc >> 2),
618 (0x0e00 << 16) | (0x8900 >> 2),
621 (0x0e00 << 16) | (0xc130 >> 2),
623 (0x0e00 << 16) | (0xc134 >> 2),
625 (0x0e00 << 16) | (0xc1fc >> 2),
627 (0x0e00 << 16) | (0xc208 >> 2),
629 (0x0e00 << 16) | (0xc264 >> 2),
631 (0x0e00 << 16) | (0xc268 >> 2),
633 (0x0e00 << 16) | (0xc26c >> 2),
635 (0x0e00 << 16) | (0xc270 >> 2),
637 (0x0e00 << 16) | (0xc274 >> 2),
639 (0x0e00 << 16) | (0xc28c >> 2),
641 (0x0e00 << 16) | (0xc290 >> 2),
643 (0x0e00 << 16) | (0xc294 >> 2),
645 (0x0e00 << 16) | (0xc298 >> 2),
647 (0x0e00 << 16) | (0xc2a0 >> 2),
649 (0x0e00 << 16) | (0xc2a4 >> 2),
651 (0x0e00 << 16) | (0xc2a8 >> 2),
653 (0x0e00 << 16) | (0xc2ac >> 2),
655 (0x0e00 << 16) | (0x301d0 >> 2),
657 (0x0e00 << 16) | (0x30238 >> 2),
659 (0x0e00 << 16) | (0x30250 >> 2),
661 (0x0e00 << 16) | (0x30254 >> 2),
663 (0x0e00 << 16) | (0x30258 >> 2),
665 (0x0e00 << 16) | (0x3025c >> 2),
667 (0x4e00 << 16) | (0xc900 >> 2),
669 (0x5e00 << 16) | (0xc900 >> 2),
671 (0x6e00 << 16) | (0xc900 >> 2),
673 (0x7e00 << 16) | (0xc900 >> 2),
675 (0x4e00 << 16) | (0xc904 >> 2),
677 (0x5e00 << 16) | (0xc904 >> 2),
679 (0x6e00 << 16) | (0xc904 >> 2),
681 (0x7e00 << 16) | (0xc904 >> 2),
683 (0x4e00 << 16) | (0xc908 >> 2),
685 (0x5e00 << 16) | (0xc908 >> 2),
687 (0x6e00 << 16) | (0xc908 >> 2),
689 (0x7e00 << 16) | (0xc908 >> 2),
691 (0x4e00 << 16) | (0xc90c >> 2),
693 (0x5e00 << 16) | (0xc90c >> 2),
695 (0x6e00 << 16) | (0xc90c >> 2),
697 (0x7e00 << 16) | (0xc90c >> 2),
699 (0x4e00 << 16) | (0xc910 >> 2),
701 (0x5e00 << 16) | (0xc910 >> 2),
703 (0x6e00 << 16) | (0xc910 >> 2),
705 (0x7e00 << 16) | (0xc910 >> 2),
707 (0x0e00 << 16) | (0xc99c >> 2),
709 (0x0e00 << 16) | (0x9834 >> 2),
711 (0x0000 << 16) | (0x30f00 >> 2),
713 (0x0000 << 16) | (0x30f04 >> 2),
715 (0x0000 << 16) | (0x30f08 >> 2),
717 (0x0000 << 16) | (0x30f0c >> 2),
719 (0x0600 << 16) | (0x9b7c >> 2),
721 (0x0e00 << 16) | (0x8a14 >> 2),
723 (0x0e00 << 16) | (0x8a18 >> 2),
725 (0x0600 << 16) | (0x30a00 >> 2),
727 (0x0e00 << 16) | (0x8bf0 >> 2),
729 (0x0e00 << 16) | (0x8bcc >> 2),
731 (0x0e00 << 16) | (0x8b24 >> 2),
733 (0x0e00 << 16) | (0x30a04 >> 2),
735 (0x0600 << 16) | (0x30a10 >> 2),
737 (0x0600 << 16) | (0x30a14 >> 2),
739 (0x0600 << 16) | (0x30a18 >> 2),
741 (0x0600 << 16) | (0x30a2c >> 2),
743 (0x0e00 << 16) | (0xc700 >> 2),
745 (0x0e00 << 16) | (0xc704 >> 2),
747 (0x0e00 << 16) | (0xc708 >> 2),
749 (0x0e00 << 16) | (0xc768 >> 2),
751 (0x0400 << 16) | (0xc770 >> 2),
753 (0x0400 << 16) | (0xc774 >> 2),
755 (0x0400 << 16) | (0xc798 >> 2),
757 (0x0400 << 16) | (0xc79c >> 2),
759 (0x0e00 << 16) | (0x9100 >> 2),
761 (0x0e00 << 16) | (0x3c010 >> 2),
763 (0x0e00 << 16) | (0x8c00 >> 2),
765 (0x0e00 << 16) | (0x8c04 >> 2),
767 (0x0e00 << 16) | (0x8c20 >> 2),
769 (0x0e00 << 16) | (0x8c38 >> 2),
771 (0x0e00 << 16) | (0x8c3c >> 2),
773 (0x0e00 << 16) | (0xae00 >> 2),
775 (0x0e00 << 16) | (0x9604 >> 2),
777 (0x0e00 << 16) | (0xac08 >> 2),
779 (0x0e00 << 16) | (0xac0c >> 2),
781 (0x0e00 << 16) | (0xac10 >> 2),
783 (0x0e00 << 16) | (0xac14 >> 2),
785 (0x0e00 << 16) | (0xac58 >> 2),
787 (0x0e00 << 16) | (0xac68 >> 2),
789 (0x0e00 << 16) | (0xac6c >> 2),
791 (0x0e00 << 16) | (0xac70 >> 2),
793 (0x0e00 << 16) | (0xac74 >> 2),
795 (0x0e00 << 16) | (0xac78 >> 2),
797 (0x0e00 << 16) | (0xac7c >> 2),
799 (0x0e00 << 16) | (0xac80 >> 2),
801 (0x0e00 << 16) | (0xac84 >> 2),
803 (0x0e00 << 16) | (0xac88 >> 2),
805 (0x0e00 << 16) | (0xac8c >> 2),
807 (0x0e00 << 16) | (0x970c >> 2),
809 (0x0e00 << 16) | (0x9714 >> 2),
811 (0x0e00 << 16) | (0x9718 >> 2),
813 (0x0e00 << 16) | (0x971c >> 2),
815 (0x0e00 << 16) | (0x31068 >> 2),
817 (0x4e00 << 16) | (0x31068 >> 2),
819 (0x5e00 << 16) | (0x31068 >> 2),
821 (0x6e00 << 16) | (0x31068 >> 2),
823 (0x7e00 << 16) | (0x31068 >> 2),
825 (0x0e00 << 16) | (0xcd10 >> 2),
827 (0x0e00 << 16) | (0xcd14 >> 2),
829 (0x0e00 << 16) | (0x88b0 >> 2),
831 (0x0e00 << 16) | (0x88b4 >> 2),
833 (0x0e00 << 16) | (0x88b8 >> 2),
835 (0x0e00 << 16) | (0x88bc >> 2),
837 (0x0400 << 16) | (0x89c0 >> 2),
839 (0x0e00 << 16) | (0x88c4 >> 2),
841 (0x0e00 << 16) | (0x88c8 >> 2),
843 (0x0e00 << 16) | (0x88d0 >> 2),
845 (0x0e00 << 16) | (0x88d4 >> 2),
847 (0x0e00 << 16) | (0x88d8 >> 2),
849 (0x0e00 << 16) | (0x8980 >> 2),
851 (0x0e00 << 16) | (0x30938 >> 2),
853 (0x0e00 << 16) | (0x3093c >> 2),
855 (0x0e00 << 16) | (0x30940 >> 2),
857 (0x0e00 << 16) | (0x89a0 >> 2),
859 (0x0e00 << 16) | (0x30900 >> 2),
861 (0x0e00 << 16) | (0x30904 >> 2),
863 (0x0e00 << 16) | (0x89b4 >> 2),
865 (0x0e00 << 16) | (0x3e1fc >> 2),
867 (0x0e00 << 16) | (0x3c210 >> 2),
869 (0x0e00 << 16) | (0x3c214 >> 2),
871 (0x0e00 << 16) | (0x3c218 >> 2),
873 (0x0e00 << 16) | (0x8904 >> 2),
876 (0x0e00 << 16) | (0x8c28 >> 2),
877 (0x0e00 << 16) | (0x8c2c >> 2),
878 (0x0e00 << 16) | (0x8c30 >> 2),
879 (0x0e00 << 16) | (0x8c34 >> 2),
880 (0x0e00 << 16) | (0x9600 >> 2),
883 static u32
gfx_v7_0_get_csb_size(struct amdgpu_device
*adev
);
884 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device
*adev
, volatile u32
*buffer
);
885 static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device
*adev
);
886 static void gfx_v7_0_init_pg(struct amdgpu_device
*adev
);
887 static void gfx_v7_0_get_cu_info(struct amdgpu_device
*adev
);
893 * gfx_v7_0_init_microcode - load ucode images from disk
895 * @adev: amdgpu_device pointer
897 * Use the firmware interface to load the ucode images into
898 * the driver (not loaded into hw).
899 * Returns 0 on success, error on failure.
901 static int gfx_v7_0_init_microcode(struct amdgpu_device
*adev
)
903 const char *chip_name
;
909 switch (adev
->asic_type
) {
911 chip_name
= "bonaire";
914 chip_name
= "hawaii";
917 chip_name
= "kaveri";
920 chip_name
= "kabini";
923 chip_name
= "mullins";
928 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_pfp.bin", chip_name
);
929 err
= request_firmware(&adev
->gfx
.pfp_fw
, fw_name
, adev
->dev
);
932 err
= amdgpu_ucode_validate(adev
->gfx
.pfp_fw
);
936 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_me.bin", chip_name
);
937 err
= request_firmware(&adev
->gfx
.me_fw
, fw_name
, adev
->dev
);
940 err
= amdgpu_ucode_validate(adev
->gfx
.me_fw
);
944 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_ce.bin", chip_name
);
945 err
= request_firmware(&adev
->gfx
.ce_fw
, fw_name
, adev
->dev
);
948 err
= amdgpu_ucode_validate(adev
->gfx
.ce_fw
);
952 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_mec.bin", chip_name
);
953 err
= request_firmware(&adev
->gfx
.mec_fw
, fw_name
, adev
->dev
);
956 err
= amdgpu_ucode_validate(adev
->gfx
.mec_fw
);
960 if (adev
->asic_type
== CHIP_KAVERI
) {
961 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_mec2.bin", chip_name
);
962 err
= request_firmware(&adev
->gfx
.mec2_fw
, fw_name
, adev
->dev
);
965 err
= amdgpu_ucode_validate(adev
->gfx
.mec2_fw
);
970 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_rlc.bin", chip_name
);
971 err
= request_firmware(&adev
->gfx
.rlc_fw
, fw_name
, adev
->dev
);
974 err
= amdgpu_ucode_validate(adev
->gfx
.rlc_fw
);
978 pr_err("gfx7: Failed to load firmware \"%s\"\n", fw_name
);
979 release_firmware(adev
->gfx
.pfp_fw
);
980 adev
->gfx
.pfp_fw
= NULL
;
981 release_firmware(adev
->gfx
.me_fw
);
982 adev
->gfx
.me_fw
= NULL
;
983 release_firmware(adev
->gfx
.ce_fw
);
984 adev
->gfx
.ce_fw
= NULL
;
985 release_firmware(adev
->gfx
.mec_fw
);
986 adev
->gfx
.mec_fw
= NULL
;
987 release_firmware(adev
->gfx
.mec2_fw
);
988 adev
->gfx
.mec2_fw
= NULL
;
989 release_firmware(adev
->gfx
.rlc_fw
);
990 adev
->gfx
.rlc_fw
= NULL
;
995 static void gfx_v7_0_free_microcode(struct amdgpu_device
*adev
)
997 release_firmware(adev
->gfx
.pfp_fw
);
998 adev
->gfx
.pfp_fw
= NULL
;
999 release_firmware(adev
->gfx
.me_fw
);
1000 adev
->gfx
.me_fw
= NULL
;
1001 release_firmware(adev
->gfx
.ce_fw
);
1002 adev
->gfx
.ce_fw
= NULL
;
1003 release_firmware(adev
->gfx
.mec_fw
);
1004 adev
->gfx
.mec_fw
= NULL
;
1005 release_firmware(adev
->gfx
.mec2_fw
);
1006 adev
->gfx
.mec2_fw
= NULL
;
1007 release_firmware(adev
->gfx
.rlc_fw
);
1008 adev
->gfx
.rlc_fw
= NULL
;
1012 * gfx_v7_0_tiling_mode_table_init - init the hw tiling table
1014 * @adev: amdgpu_device pointer
1016 * Starting with SI, the tiling setup is done globally in a
1017 * set of 32 tiling modes. Rather than selecting each set of
1018 * parameters per surface as on older asics, we just select
1019 * which index in the tiling table we want to use, and the
1020 * surface uses those parameters (CIK).
1022 static void gfx_v7_0_tiling_mode_table_init(struct amdgpu_device
*adev
)
1024 const u32 num_tile_mode_states
=
1025 ARRAY_SIZE(adev
->gfx
.config
.tile_mode_array
);
1026 const u32 num_secondary_tile_mode_states
=
1027 ARRAY_SIZE(adev
->gfx
.config
.macrotile_mode_array
);
1028 u32 reg_offset
, split_equal_to_row_size
;
1029 uint32_t *tile
, *macrotile
;
1031 tile
= adev
->gfx
.config
.tile_mode_array
;
1032 macrotile
= adev
->gfx
.config
.macrotile_mode_array
;
1034 switch (adev
->gfx
.config
.mem_row_size_in_kb
) {
1036 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_1KB
;
1040 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_2KB
;
1043 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_4KB
;
1047 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++)
1048 tile
[reg_offset
] = 0;
1049 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++)
1050 macrotile
[reg_offset
] = 0;
1052 switch (adev
->asic_type
) {
1054 tile
[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1055 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1056 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1057 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1058 tile
[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1059 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1060 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
1061 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1062 tile
[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1063 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1064 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1065 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1066 tile
[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1067 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1068 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1069 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1070 tile
[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1071 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1072 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1073 TILE_SPLIT(split_equal_to_row_size
));
1074 tile
[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1075 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1076 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1077 tile
[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1078 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1079 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1080 TILE_SPLIT(split_equal_to_row_size
));
1081 tile
[7] = (TILE_SPLIT(split_equal_to_row_size
));
1082 tile
[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
1083 PIPE_CONFIG(ADDR_SURF_P4_16x16
));
1084 tile
[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1085 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1086 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
));
1087 tile
[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1088 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1089 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1090 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1091 tile
[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1092 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1093 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1094 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1095 tile
[12] = (TILE_SPLIT(split_equal_to_row_size
));
1096 tile
[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1097 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1098 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
));
1099 tile
[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1100 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1101 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1102 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1103 tile
[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1
) |
1104 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1105 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1106 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1107 tile
[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1108 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1109 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1110 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1111 tile
[17] = (TILE_SPLIT(split_equal_to_row_size
));
1112 tile
[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1113 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1114 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1115 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1116 tile
[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1117 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1118 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
));
1119 tile
[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1120 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1121 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1122 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1123 tile
[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK
) |
1124 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1125 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1126 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1127 tile
[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
1128 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1129 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1130 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1131 tile
[23] = (TILE_SPLIT(split_equal_to_row_size
));
1132 tile
[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1133 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1134 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1135 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1136 tile
[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
1137 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1138 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1139 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1140 tile
[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK
) |
1141 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1142 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1143 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1144 tile
[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1145 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1146 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
));
1147 tile
[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1148 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1149 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1150 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1151 tile
[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1152 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1153 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1154 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1155 tile
[30] = (TILE_SPLIT(split_equal_to_row_size
));
1157 macrotile
[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1158 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1159 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1160 NUM_BANKS(ADDR_SURF_16_BANK
));
1161 macrotile
[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1162 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1163 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1164 NUM_BANKS(ADDR_SURF_16_BANK
));
1165 macrotile
[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1166 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1167 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1168 NUM_BANKS(ADDR_SURF_16_BANK
));
1169 macrotile
[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1170 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1171 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1172 NUM_BANKS(ADDR_SURF_16_BANK
));
1173 macrotile
[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1174 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1175 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1176 NUM_BANKS(ADDR_SURF_16_BANK
));
1177 macrotile
[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1178 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1179 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1180 NUM_BANKS(ADDR_SURF_8_BANK
));
1181 macrotile
[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1182 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1183 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1184 NUM_BANKS(ADDR_SURF_4_BANK
));
1185 macrotile
[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1186 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8
) |
1187 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1188 NUM_BANKS(ADDR_SURF_16_BANK
));
1189 macrotile
[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1190 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1191 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1192 NUM_BANKS(ADDR_SURF_16_BANK
));
1193 macrotile
[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1194 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1195 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1196 NUM_BANKS(ADDR_SURF_16_BANK
));
1197 macrotile
[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1198 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1199 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1200 NUM_BANKS(ADDR_SURF_16_BANK
));
1201 macrotile
[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1202 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1203 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1204 NUM_BANKS(ADDR_SURF_16_BANK
));
1205 macrotile
[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1206 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1207 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1208 NUM_BANKS(ADDR_SURF_8_BANK
));
1209 macrotile
[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1210 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1211 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1212 NUM_BANKS(ADDR_SURF_4_BANK
));
1214 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++)
1215 WREG32(mmGB_TILE_MODE0
+ reg_offset
, tile
[reg_offset
]);
1216 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++)
1217 if (reg_offset
!= 7)
1218 WREG32(mmGB_MACROTILE_MODE0
+ reg_offset
, macrotile
[reg_offset
]);
1221 tile
[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1222 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1223 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1224 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1225 tile
[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1226 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1227 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
1228 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1229 tile
[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1230 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1231 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1232 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1233 tile
[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1234 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1235 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1236 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1237 tile
[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1238 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1239 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1240 TILE_SPLIT(split_equal_to_row_size
));
1241 tile
[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1242 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1243 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1244 TILE_SPLIT(split_equal_to_row_size
));
1245 tile
[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1246 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1247 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1248 TILE_SPLIT(split_equal_to_row_size
));
1249 tile
[7] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1250 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1251 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1252 TILE_SPLIT(split_equal_to_row_size
));
1253 tile
[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
1254 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
));
1255 tile
[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1256 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1257 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
));
1258 tile
[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1259 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1260 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1261 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1262 tile
[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1263 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1264 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1265 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1266 tile
[12] = (ARRAY_MODE(ARRAY_PRT_2D_TILED_THIN1
) |
1267 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1268 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1269 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1270 tile
[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1271 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1272 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
));
1273 tile
[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1274 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1275 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1276 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1277 tile
[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1
) |
1278 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1279 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1280 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1281 tile
[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1282 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1283 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1284 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1285 tile
[17] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1286 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1287 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1288 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1289 tile
[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1290 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1291 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1292 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1293 tile
[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1294 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1295 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
));
1296 tile
[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1297 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1298 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1299 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1300 tile
[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK
) |
1301 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1302 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1303 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1304 tile
[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
1305 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1306 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1307 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1308 tile
[23] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
1309 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1310 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1311 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1312 tile
[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1313 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1314 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1315 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1316 tile
[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
1317 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1318 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1319 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1320 tile
[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK
) |
1321 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1322 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1323 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1324 tile
[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1325 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1326 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
));
1327 tile
[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1328 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1329 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1330 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1331 tile
[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1332 PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16
) |
1333 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1334 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1335 tile
[30] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1336 PIPE_CONFIG(ADDR_SURF_P4_16x16
) |
1337 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1338 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1340 macrotile
[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1341 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1342 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1343 NUM_BANKS(ADDR_SURF_16_BANK
));
1344 macrotile
[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1345 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1346 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1347 NUM_BANKS(ADDR_SURF_16_BANK
));
1348 macrotile
[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1349 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1350 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1351 NUM_BANKS(ADDR_SURF_16_BANK
));
1352 macrotile
[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1353 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1354 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1355 NUM_BANKS(ADDR_SURF_16_BANK
));
1356 macrotile
[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1357 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1358 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1359 NUM_BANKS(ADDR_SURF_8_BANK
));
1360 macrotile
[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1361 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1362 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1363 NUM_BANKS(ADDR_SURF_4_BANK
));
1364 macrotile
[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1365 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1366 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1367 NUM_BANKS(ADDR_SURF_4_BANK
));
1368 macrotile
[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1369 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1370 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1371 NUM_BANKS(ADDR_SURF_16_BANK
));
1372 macrotile
[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1373 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1374 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1375 NUM_BANKS(ADDR_SURF_16_BANK
));
1376 macrotile
[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1377 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1378 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1379 NUM_BANKS(ADDR_SURF_16_BANK
));
1380 macrotile
[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1381 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1382 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1383 NUM_BANKS(ADDR_SURF_8_BANK
));
1384 macrotile
[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1385 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1386 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1387 NUM_BANKS(ADDR_SURF_16_BANK
));
1388 macrotile
[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1389 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1390 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1391 NUM_BANKS(ADDR_SURF_8_BANK
));
1392 macrotile
[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1393 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1394 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1395 NUM_BANKS(ADDR_SURF_4_BANK
));
1397 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++)
1398 WREG32(mmGB_TILE_MODE0
+ reg_offset
, tile
[reg_offset
]);
1399 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++)
1400 if (reg_offset
!= 7)
1401 WREG32(mmGB_MACROTILE_MODE0
+ reg_offset
, macrotile
[reg_offset
]);
1407 tile
[0] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1408 PIPE_CONFIG(ADDR_SURF_P2
) |
1409 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1410 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1411 tile
[1] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1412 PIPE_CONFIG(ADDR_SURF_P2
) |
1413 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
1414 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1415 tile
[2] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1416 PIPE_CONFIG(ADDR_SURF_P2
) |
1417 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1418 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1419 tile
[3] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1420 PIPE_CONFIG(ADDR_SURF_P2
) |
1421 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1422 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1423 tile
[4] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1424 PIPE_CONFIG(ADDR_SURF_P2
) |
1425 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1426 TILE_SPLIT(split_equal_to_row_size
));
1427 tile
[5] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1428 PIPE_CONFIG(ADDR_SURF_P2
) |
1429 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
));
1430 tile
[6] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1431 PIPE_CONFIG(ADDR_SURF_P2
) |
1432 MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING
) |
1433 TILE_SPLIT(split_equal_to_row_size
));
1434 tile
[7] = (TILE_SPLIT(split_equal_to_row_size
));
1435 tile
[8] = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
1436 PIPE_CONFIG(ADDR_SURF_P2
));
1437 tile
[9] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1438 PIPE_CONFIG(ADDR_SURF_P2
) |
1439 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
));
1440 tile
[10] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1441 PIPE_CONFIG(ADDR_SURF_P2
) |
1442 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1443 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1444 tile
[11] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1445 PIPE_CONFIG(ADDR_SURF_P2
) |
1446 MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1447 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1448 tile
[12] = (TILE_SPLIT(split_equal_to_row_size
));
1449 tile
[13] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1450 PIPE_CONFIG(ADDR_SURF_P2
) |
1451 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
));
1452 tile
[14] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1453 PIPE_CONFIG(ADDR_SURF_P2
) |
1454 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1455 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1456 tile
[15] = (ARRAY_MODE(ARRAY_3D_TILED_THIN1
) |
1457 PIPE_CONFIG(ADDR_SURF_P2
) |
1458 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1459 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1460 tile
[16] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1461 PIPE_CONFIG(ADDR_SURF_P2
) |
1462 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1463 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1464 tile
[17] = (TILE_SPLIT(split_equal_to_row_size
));
1465 tile
[18] = (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1466 PIPE_CONFIG(ADDR_SURF_P2
) |
1467 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1468 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1469 tile
[19] = (ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1470 PIPE_CONFIG(ADDR_SURF_P2
) |
1471 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
));
1472 tile
[20] = (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1473 PIPE_CONFIG(ADDR_SURF_P2
) |
1474 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1475 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1476 tile
[21] = (ARRAY_MODE(ARRAY_3D_TILED_THICK
) |
1477 PIPE_CONFIG(ADDR_SURF_P2
) |
1478 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1479 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1480 tile
[22] = (ARRAY_MODE(ARRAY_PRT_TILED_THICK
) |
1481 PIPE_CONFIG(ADDR_SURF_P2
) |
1482 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1483 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1484 tile
[23] = (TILE_SPLIT(split_equal_to_row_size
));
1485 tile
[24] = (ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1486 PIPE_CONFIG(ADDR_SURF_P2
) |
1487 MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING
) |
1488 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1489 tile
[25] = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
1490 PIPE_CONFIG(ADDR_SURF_P2
) |
1491 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1492 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1493 tile
[26] = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK
) |
1494 PIPE_CONFIG(ADDR_SURF_P2
) |
1495 MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING
) |
1496 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1
));
1497 tile
[27] = (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1498 PIPE_CONFIG(ADDR_SURF_P2
) |
1499 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
));
1500 tile
[28] = (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1501 PIPE_CONFIG(ADDR_SURF_P2
) |
1502 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1503 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2
));
1504 tile
[29] = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1
) |
1505 PIPE_CONFIG(ADDR_SURF_P2
) |
1506 MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING
) |
1507 SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8
));
1508 tile
[30] = (TILE_SPLIT(split_equal_to_row_size
));
1510 macrotile
[0] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1511 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1512 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1513 NUM_BANKS(ADDR_SURF_8_BANK
));
1514 macrotile
[1] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1515 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1516 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1517 NUM_BANKS(ADDR_SURF_8_BANK
));
1518 macrotile
[2] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1519 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1520 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1521 NUM_BANKS(ADDR_SURF_8_BANK
));
1522 macrotile
[3] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1523 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1524 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1525 NUM_BANKS(ADDR_SURF_8_BANK
));
1526 macrotile
[4] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1527 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1528 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1529 NUM_BANKS(ADDR_SURF_8_BANK
));
1530 macrotile
[5] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1531 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1532 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1533 NUM_BANKS(ADDR_SURF_8_BANK
));
1534 macrotile
[6] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1535 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1536 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1537 NUM_BANKS(ADDR_SURF_8_BANK
));
1538 macrotile
[8] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4
) |
1539 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8
) |
1540 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1541 NUM_BANKS(ADDR_SURF_16_BANK
));
1542 macrotile
[9] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_4
) |
1543 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1544 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1545 NUM_BANKS(ADDR_SURF_16_BANK
));
1546 macrotile
[10] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1547 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1548 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1549 NUM_BANKS(ADDR_SURF_16_BANK
));
1550 macrotile
[11] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1551 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1552 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1553 NUM_BANKS(ADDR_SURF_16_BANK
));
1554 macrotile
[12] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1555 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1556 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1557 NUM_BANKS(ADDR_SURF_16_BANK
));
1558 macrotile
[13] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1559 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1560 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
1561 NUM_BANKS(ADDR_SURF_16_BANK
));
1562 macrotile
[14] = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1563 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1564 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1565 NUM_BANKS(ADDR_SURF_8_BANK
));
1567 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++)
1568 WREG32(mmGB_TILE_MODE0
+ reg_offset
, tile
[reg_offset
]);
1569 for (reg_offset
= 0; reg_offset
< num_secondary_tile_mode_states
; reg_offset
++)
1570 if (reg_offset
!= 7)
1571 WREG32(mmGB_MACROTILE_MODE0
+ reg_offset
, macrotile
[reg_offset
]);
1577 * gfx_v7_0_select_se_sh - select which SE, SH to address
1579 * @adev: amdgpu_device pointer
1580 * @se_num: shader engine to address
1581 * @sh_num: sh block to address
1583 * Select which SE, SH combinations to address. Certain
1584 * registers are instanced per SE or SH. 0xffffffff means
1585 * broadcast to all SEs or SHs (CIK).
1587 static void gfx_v7_0_select_se_sh(struct amdgpu_device
*adev
,
1588 u32 se_num
, u32 sh_num
, u32 instance
)
1592 if (instance
== 0xffffffff)
1593 data
= REG_SET_FIELD(0, GRBM_GFX_INDEX
, INSTANCE_BROADCAST_WRITES
, 1);
1595 data
= REG_SET_FIELD(0, GRBM_GFX_INDEX
, INSTANCE_INDEX
, instance
);
1597 if ((se_num
== 0xffffffff) && (sh_num
== 0xffffffff))
1598 data
|= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK
|
1599 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK
;
1600 else if (se_num
== 0xffffffff)
1601 data
|= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK
|
1602 (sh_num
<< GRBM_GFX_INDEX__SH_INDEX__SHIFT
);
1603 else if (sh_num
== 0xffffffff)
1604 data
|= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK
|
1605 (se_num
<< GRBM_GFX_INDEX__SE_INDEX__SHIFT
);
1607 data
|= (sh_num
<< GRBM_GFX_INDEX__SH_INDEX__SHIFT
) |
1608 (se_num
<< GRBM_GFX_INDEX__SE_INDEX__SHIFT
);
1609 WREG32(mmGRBM_GFX_INDEX
, data
);
1613 * gfx_v7_0_get_rb_active_bitmap - computes the mask of enabled RBs
1615 * @adev: amdgpu_device pointer
1617 * Calculates the bitmask of enabled RBs (CIK).
1618 * Returns the enabled RB bitmask.
1620 static u32
gfx_v7_0_get_rb_active_bitmap(struct amdgpu_device
*adev
)
1624 data
= RREG32(mmCC_RB_BACKEND_DISABLE
);
1625 data
|= RREG32(mmGC_USER_RB_BACKEND_DISABLE
);
1627 data
&= CC_RB_BACKEND_DISABLE__BACKEND_DISABLE_MASK
;
1628 data
>>= GC_USER_RB_BACKEND_DISABLE__BACKEND_DISABLE__SHIFT
;
1630 mask
= amdgpu_gfx_create_bitmask(adev
->gfx
.config
.max_backends_per_se
/
1631 adev
->gfx
.config
.max_sh_per_se
);
1633 return (~data
) & mask
;
1637 gfx_v7_0_raster_config(struct amdgpu_device
*adev
, u32
*rconf
, u32
*rconf1
)
1639 switch (adev
->asic_type
) {
1641 *rconf
|= RB_MAP_PKR0(2) | RB_XSEL2(1) | SE_MAP(2) |
1642 SE_XSEL(1) | SE_YSEL(1);
1646 *rconf
|= RB_MAP_PKR0(2) | RB_MAP_PKR1(2) |
1647 RB_XSEL2(1) | PKR_MAP(2) | PKR_XSEL(1) |
1648 PKR_YSEL(1) | SE_MAP(2) | SE_XSEL(2) |
1650 *rconf1
|= SE_PAIR_MAP(2) | SE_PAIR_XSEL(3) |
1654 *rconf
|= RB_MAP_PKR0(2);
1663 DRM_ERROR("unknown asic: 0x%x\n", adev
->asic_type
);
1669 gfx_v7_0_write_harvested_raster_configs(struct amdgpu_device
*adev
,
1670 u32 raster_config
, u32 raster_config_1
,
1671 unsigned rb_mask
, unsigned num_rb
)
1673 unsigned sh_per_se
= max_t(unsigned, adev
->gfx
.config
.max_sh_per_se
, 1);
1674 unsigned num_se
= max_t(unsigned, adev
->gfx
.config
.max_shader_engines
, 1);
1675 unsigned rb_per_pkr
= min_t(unsigned, num_rb
/ num_se
/ sh_per_se
, 2);
1676 unsigned rb_per_se
= num_rb
/ num_se
;
1677 unsigned se_mask
[4];
1680 se_mask
[0] = ((1 << rb_per_se
) - 1) & rb_mask
;
1681 se_mask
[1] = (se_mask
[0] << rb_per_se
) & rb_mask
;
1682 se_mask
[2] = (se_mask
[1] << rb_per_se
) & rb_mask
;
1683 se_mask
[3] = (se_mask
[2] << rb_per_se
) & rb_mask
;
1685 WARN_ON(!(num_se
== 1 || num_se
== 2 || num_se
== 4));
1686 WARN_ON(!(sh_per_se
== 1 || sh_per_se
== 2));
1687 WARN_ON(!(rb_per_pkr
== 1 || rb_per_pkr
== 2));
1689 if ((num_se
> 2) && ((!se_mask
[0] && !se_mask
[1]) ||
1690 (!se_mask
[2] && !se_mask
[3]))) {
1691 raster_config_1
&= ~SE_PAIR_MAP_MASK
;
1693 if (!se_mask
[0] && !se_mask
[1]) {
1695 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_3
);
1698 SE_PAIR_MAP(RASTER_CONFIG_SE_PAIR_MAP_0
);
1702 for (se
= 0; se
< num_se
; se
++) {
1703 unsigned raster_config_se
= raster_config
;
1704 unsigned pkr0_mask
= ((1 << rb_per_pkr
) - 1) << (se
* rb_per_se
);
1705 unsigned pkr1_mask
= pkr0_mask
<< rb_per_pkr
;
1706 int idx
= (se
/ 2) * 2;
1708 if ((num_se
> 1) && (!se_mask
[idx
] || !se_mask
[idx
+ 1])) {
1709 raster_config_se
&= ~SE_MAP_MASK
;
1711 if (!se_mask
[idx
]) {
1712 raster_config_se
|= SE_MAP(RASTER_CONFIG_SE_MAP_3
);
1714 raster_config_se
|= SE_MAP(RASTER_CONFIG_SE_MAP_0
);
1718 pkr0_mask
&= rb_mask
;
1719 pkr1_mask
&= rb_mask
;
1720 if (rb_per_se
> 2 && (!pkr0_mask
|| !pkr1_mask
)) {
1721 raster_config_se
&= ~PKR_MAP_MASK
;
1724 raster_config_se
|= PKR_MAP(RASTER_CONFIG_PKR_MAP_3
);
1726 raster_config_se
|= PKR_MAP(RASTER_CONFIG_PKR_MAP_0
);
1730 if (rb_per_se
>= 2) {
1731 unsigned rb0_mask
= 1 << (se
* rb_per_se
);
1732 unsigned rb1_mask
= rb0_mask
<< 1;
1734 rb0_mask
&= rb_mask
;
1735 rb1_mask
&= rb_mask
;
1736 if (!rb0_mask
|| !rb1_mask
) {
1737 raster_config_se
&= ~RB_MAP_PKR0_MASK
;
1741 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_3
);
1744 RB_MAP_PKR0(RASTER_CONFIG_RB_MAP_0
);
1748 if (rb_per_se
> 2) {
1749 rb0_mask
= 1 << (se
* rb_per_se
+ rb_per_pkr
);
1750 rb1_mask
= rb0_mask
<< 1;
1751 rb0_mask
&= rb_mask
;
1752 rb1_mask
&= rb_mask
;
1753 if (!rb0_mask
|| !rb1_mask
) {
1754 raster_config_se
&= ~RB_MAP_PKR1_MASK
;
1758 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_3
);
1761 RB_MAP_PKR1(RASTER_CONFIG_RB_MAP_0
);
1767 /* GRBM_GFX_INDEX has a different offset on CI+ */
1768 gfx_v7_0_select_se_sh(adev
, se
, 0xffffffff, 0xffffffff);
1769 WREG32(mmPA_SC_RASTER_CONFIG
, raster_config_se
);
1770 WREG32(mmPA_SC_RASTER_CONFIG_1
, raster_config_1
);
1773 /* GRBM_GFX_INDEX has a different offset on CI+ */
1774 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
1778 * gfx_v7_0_setup_rb - setup the RBs on the asic
1780 * @adev: amdgpu_device pointer
1781 * @se_num: number of SEs (shader engines) for the asic
1782 * @sh_per_se: number of SH blocks per SE for the asic
1784 * Configures per-SE/SH RB registers (CIK).
1786 static void gfx_v7_0_setup_rb(struct amdgpu_device
*adev
)
1790 u32 raster_config
= 0, raster_config_1
= 0;
1792 u32 rb_bitmap_width_per_sh
= adev
->gfx
.config
.max_backends_per_se
/
1793 adev
->gfx
.config
.max_sh_per_se
;
1794 unsigned num_rb_pipes
;
1796 mutex_lock(&adev
->grbm_idx_mutex
);
1797 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
1798 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
1799 gfx_v7_0_select_se_sh(adev
, i
, j
, 0xffffffff);
1800 data
= gfx_v7_0_get_rb_active_bitmap(adev
);
1801 active_rbs
|= data
<< ((i
* adev
->gfx
.config
.max_sh_per_se
+ j
) *
1802 rb_bitmap_width_per_sh
);
1805 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
1807 adev
->gfx
.config
.backend_enable_mask
= active_rbs
;
1808 adev
->gfx
.config
.num_rbs
= hweight32(active_rbs
);
1810 num_rb_pipes
= min_t(unsigned, adev
->gfx
.config
.max_backends_per_se
*
1811 adev
->gfx
.config
.max_shader_engines
, 16);
1813 gfx_v7_0_raster_config(adev
, &raster_config
, &raster_config_1
);
1815 if (!adev
->gfx
.config
.backend_enable_mask
||
1816 adev
->gfx
.config
.num_rbs
>= num_rb_pipes
) {
1817 WREG32(mmPA_SC_RASTER_CONFIG
, raster_config
);
1818 WREG32(mmPA_SC_RASTER_CONFIG_1
, raster_config_1
);
1820 gfx_v7_0_write_harvested_raster_configs(adev
, raster_config
, raster_config_1
,
1821 adev
->gfx
.config
.backend_enable_mask
,
1825 /* cache the values for userspace */
1826 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
1827 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
1828 gfx_v7_0_select_se_sh(adev
, i
, j
, 0xffffffff);
1829 adev
->gfx
.config
.rb_config
[i
][j
].rb_backend_disable
=
1830 RREG32(mmCC_RB_BACKEND_DISABLE
);
1831 adev
->gfx
.config
.rb_config
[i
][j
].user_rb_backend_disable
=
1832 RREG32(mmGC_USER_RB_BACKEND_DISABLE
);
1833 adev
->gfx
.config
.rb_config
[i
][j
].raster_config
=
1834 RREG32(mmPA_SC_RASTER_CONFIG
);
1835 adev
->gfx
.config
.rb_config
[i
][j
].raster_config_1
=
1836 RREG32(mmPA_SC_RASTER_CONFIG_1
);
1839 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
1840 mutex_unlock(&adev
->grbm_idx_mutex
);
1844 * gfx_v7_0_init_compute_vmid - gart enable
1846 * @adev: amdgpu_device pointer
1848 * Initialize compute vmid sh_mem registers
1851 #define DEFAULT_SH_MEM_BASES (0x6000)
1852 #define FIRST_COMPUTE_VMID (8)
1853 #define LAST_COMPUTE_VMID (16)
1854 static void gfx_v7_0_init_compute_vmid(struct amdgpu_device
*adev
)
1857 uint32_t sh_mem_config
;
1858 uint32_t sh_mem_bases
;
1861 * Configure apertures:
1862 * LDS: 0x60000000'00000000 - 0x60000001'00000000 (4GB)
1863 * Scratch: 0x60000001'00000000 - 0x60000002'00000000 (4GB)
1864 * GPUVM: 0x60010000'00000000 - 0x60020000'00000000 (1TB)
1866 sh_mem_bases
= DEFAULT_SH_MEM_BASES
| (DEFAULT_SH_MEM_BASES
<< 16);
1867 sh_mem_config
= SH_MEM_ALIGNMENT_MODE_UNALIGNED
<<
1868 SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT
;
1869 sh_mem_config
|= MTYPE_NONCACHED
<< SH_MEM_CONFIG__DEFAULT_MTYPE__SHIFT
;
1870 mutex_lock(&adev
->srbm_mutex
);
1871 for (i
= FIRST_COMPUTE_VMID
; i
< LAST_COMPUTE_VMID
; i
++) {
1872 cik_srbm_select(adev
, 0, 0, 0, i
);
1873 /* CP and shaders */
1874 WREG32(mmSH_MEM_CONFIG
, sh_mem_config
);
1875 WREG32(mmSH_MEM_APE1_BASE
, 1);
1876 WREG32(mmSH_MEM_APE1_LIMIT
, 0);
1877 WREG32(mmSH_MEM_BASES
, sh_mem_bases
);
1879 cik_srbm_select(adev
, 0, 0, 0, 0);
1880 mutex_unlock(&adev
->srbm_mutex
);
1883 static void gfx_v7_0_config_init(struct amdgpu_device
*adev
)
1885 adev
->gfx
.config
.double_offchip_lds_buf
= 1;
1889 * gfx_v7_0_gpu_init - setup the 3D engine
1891 * @adev: amdgpu_device pointer
1893 * Configures the 3D engine and tiling configuration
1894 * registers so that the 3D engine is usable.
1896 static void gfx_v7_0_gpu_init(struct amdgpu_device
*adev
)
1898 u32 sh_mem_cfg
, sh_static_mem_cfg
, sh_mem_base
;
1902 WREG32(mmGRBM_CNTL
, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT
));
1904 WREG32(mmGB_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
1905 WREG32(mmHDP_ADDR_CONFIG
, adev
->gfx
.config
.gb_addr_config
);
1906 WREG32(mmDMIF_ADDR_CALC
, adev
->gfx
.config
.gb_addr_config
);
1908 gfx_v7_0_tiling_mode_table_init(adev
);
1910 gfx_v7_0_setup_rb(adev
);
1911 gfx_v7_0_get_cu_info(adev
);
1912 gfx_v7_0_config_init(adev
);
1914 /* set HW defaults for 3D engine */
1915 WREG32(mmCP_MEQ_THRESHOLDS
,
1916 (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT
) |
1917 (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT
));
1919 mutex_lock(&adev
->grbm_idx_mutex
);
1921 * making sure that the following register writes will be broadcasted
1922 * to all the shaders
1924 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
1926 /* XXX SH_MEM regs */
1927 /* where to put LDS, scratch, GPUVM in FSA64 space */
1928 sh_mem_cfg
= REG_SET_FIELD(0, SH_MEM_CONFIG
, ALIGNMENT_MODE
,
1929 SH_MEM_ALIGNMENT_MODE_UNALIGNED
);
1930 sh_mem_cfg
= REG_SET_FIELD(sh_mem_cfg
, SH_MEM_CONFIG
, DEFAULT_MTYPE
,
1932 sh_mem_cfg
= REG_SET_FIELD(sh_mem_cfg
, SH_MEM_CONFIG
, APE1_MTYPE
,
1934 sh_mem_cfg
= REG_SET_FIELD(sh_mem_cfg
, SH_MEM_CONFIG
, PRIVATE_ATC
, 0);
1936 sh_static_mem_cfg
= REG_SET_FIELD(0, SH_STATIC_MEM_CONFIG
,
1938 sh_static_mem_cfg
= REG_SET_FIELD(sh_static_mem_cfg
, SH_STATIC_MEM_CONFIG
,
1940 sh_static_mem_cfg
= REG_SET_FIELD(sh_static_mem_cfg
, SH_STATIC_MEM_CONFIG
,
1942 WREG32(mmSH_STATIC_MEM_CONFIG
, sh_static_mem_cfg
);
1944 mutex_lock(&adev
->srbm_mutex
);
1945 for (i
= 0; i
< adev
->vm_manager
.id_mgr
[0].num_ids
; i
++) {
1949 sh_mem_base
= adev
->gmc
.shared_aperture_start
>> 48;
1950 cik_srbm_select(adev
, 0, 0, 0, i
);
1951 /* CP and shaders */
1952 WREG32(mmSH_MEM_CONFIG
, sh_mem_cfg
);
1953 WREG32(mmSH_MEM_APE1_BASE
, 1);
1954 WREG32(mmSH_MEM_APE1_LIMIT
, 0);
1955 WREG32(mmSH_MEM_BASES
, sh_mem_base
);
1957 cik_srbm_select(adev
, 0, 0, 0, 0);
1958 mutex_unlock(&adev
->srbm_mutex
);
1960 gfx_v7_0_init_compute_vmid(adev
);
1962 WREG32(mmSX_DEBUG_1
, 0x20);
1964 WREG32(mmTA_CNTL_AUX
, 0x00010000);
1966 tmp
= RREG32(mmSPI_CONFIG_CNTL
);
1968 WREG32(mmSPI_CONFIG_CNTL
, tmp
);
1970 WREG32(mmSQ_CONFIG
, 1);
1972 WREG32(mmDB_DEBUG
, 0);
1974 tmp
= RREG32(mmDB_DEBUG2
) & ~0xf00fffff;
1976 WREG32(mmDB_DEBUG2
, tmp
);
1978 tmp
= RREG32(mmDB_DEBUG3
) & ~0x0002021c;
1980 WREG32(mmDB_DEBUG3
, tmp
);
1982 tmp
= RREG32(mmCB_HW_CONTROL
) & ~0x00010000;
1984 WREG32(mmCB_HW_CONTROL
, tmp
);
1986 WREG32(mmSPI_CONFIG_CNTL_1
, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT
));
1988 WREG32(mmPA_SC_FIFO_SIZE
,
1989 ((adev
->gfx
.config
.sc_prim_fifo_size_frontend
<< PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT
) |
1990 (adev
->gfx
.config
.sc_prim_fifo_size_backend
<< PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT
) |
1991 (adev
->gfx
.config
.sc_hiz_tile_fifo_size
<< PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT
) |
1992 (adev
->gfx
.config
.sc_earlyz_tile_fifo_size
<< PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT
)));
1994 WREG32(mmVGT_NUM_INSTANCES
, 1);
1996 WREG32(mmCP_PERFMON_CNTL
, 0);
1998 WREG32(mmSQ_CONFIG
, 0);
2000 WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS
,
2001 ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT
) |
2002 (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT
)));
2004 WREG32(mmVGT_CACHE_INVALIDATION
,
2005 (VC_AND_TC
<< VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT
) |
2006 (ES_AND_GS_AUTO
<< VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT
));
2008 WREG32(mmVGT_GS_VERTEX_REUSE
, 16);
2009 WREG32(mmPA_SC_LINE_STIPPLE_STATE
, 0);
2011 WREG32(mmPA_CL_ENHANCE
, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK
|
2012 (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT
));
2013 WREG32(mmPA_SC_ENHANCE
, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK
);
2015 tmp
= RREG32(mmSPI_ARB_PRIORITY
);
2016 tmp
= REG_SET_FIELD(tmp
, SPI_ARB_PRIORITY
, PIPE_ORDER_TS0
, 2);
2017 tmp
= REG_SET_FIELD(tmp
, SPI_ARB_PRIORITY
, PIPE_ORDER_TS1
, 2);
2018 tmp
= REG_SET_FIELD(tmp
, SPI_ARB_PRIORITY
, PIPE_ORDER_TS2
, 2);
2019 tmp
= REG_SET_FIELD(tmp
, SPI_ARB_PRIORITY
, PIPE_ORDER_TS3
, 2);
2020 WREG32(mmSPI_ARB_PRIORITY
, tmp
);
2022 mutex_unlock(&adev
->grbm_idx_mutex
);
2028 * GPU scratch registers helpers function.
2031 * gfx_v7_0_scratch_init - setup driver info for CP scratch regs
2033 * @adev: amdgpu_device pointer
2035 * Set up the number and offset of the CP scratch registers.
2036 * NOTE: use of CP scratch registers is a legacy inferface and
2037 * is not used by default on newer asics (r6xx+). On newer asics,
2038 * memory buffers are used for fences rather than scratch regs.
2040 static void gfx_v7_0_scratch_init(struct amdgpu_device
*adev
)
2042 adev
->gfx
.scratch
.num_reg
= 8;
2043 adev
->gfx
.scratch
.reg_base
= mmSCRATCH_REG0
;
2044 adev
->gfx
.scratch
.free_mask
= (1u << adev
->gfx
.scratch
.num_reg
) - 1;
2048 * gfx_v7_0_ring_test_ring - basic gfx ring test
2050 * @adev: amdgpu_device pointer
2051 * @ring: amdgpu_ring structure holding ring information
2053 * Allocate a scratch register and write to it using the gfx ring (CIK).
2054 * Provides a basic gfx ring test to verify that the ring is working.
2055 * Used by gfx_v7_0_cp_gfx_resume();
2056 * Returns 0 on success, error on failure.
2058 static int gfx_v7_0_ring_test_ring(struct amdgpu_ring
*ring
)
2060 struct amdgpu_device
*adev
= ring
->adev
;
2066 r
= amdgpu_gfx_scratch_get(adev
, &scratch
);
2068 DRM_ERROR("amdgpu: cp failed to get scratch reg (%d).\n", r
);
2071 WREG32(scratch
, 0xCAFEDEAD);
2072 r
= amdgpu_ring_alloc(ring
, 3);
2074 DRM_ERROR("amdgpu: cp failed to lock ring %d (%d).\n", ring
->idx
, r
);
2075 amdgpu_gfx_scratch_free(adev
, scratch
);
2078 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_UCONFIG_REG
, 1));
2079 amdgpu_ring_write(ring
, (scratch
- PACKET3_SET_UCONFIG_REG_START
));
2080 amdgpu_ring_write(ring
, 0xDEADBEEF);
2081 amdgpu_ring_commit(ring
);
2083 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
2084 tmp
= RREG32(scratch
);
2085 if (tmp
== 0xDEADBEEF)
2089 if (i
< adev
->usec_timeout
) {
2090 DRM_DEBUG("ring test on %d succeeded in %d usecs\n", ring
->idx
, i
);
2092 DRM_ERROR("amdgpu: ring %d test failed (scratch(0x%04X)=0x%08X)\n",
2093 ring
->idx
, scratch
, tmp
);
2096 amdgpu_gfx_scratch_free(adev
, scratch
);
2101 * gfx_v7_0_ring_emit_hdp - emit an hdp flush on the cp
2103 * @adev: amdgpu_device pointer
2104 * @ridx: amdgpu ring index
2106 * Emits an hdp flush on the cp.
2108 static void gfx_v7_0_ring_emit_hdp_flush(struct amdgpu_ring
*ring
)
2111 int usepfp
= ring
->funcs
->type
== AMDGPU_RING_TYPE_COMPUTE
? 0 : 1;
2113 if (ring
->funcs
->type
== AMDGPU_RING_TYPE_COMPUTE
) {
2116 ref_and_mask
= GPU_HDP_FLUSH_DONE__CP2_MASK
<< ring
->pipe
;
2119 ref_and_mask
= GPU_HDP_FLUSH_DONE__CP6_MASK
<< ring
->pipe
;
2125 ref_and_mask
= GPU_HDP_FLUSH_DONE__CP0_MASK
;
2128 amdgpu_ring_write(ring
, PACKET3(PACKET3_WAIT_REG_MEM
, 5));
2129 amdgpu_ring_write(ring
, (WAIT_REG_MEM_OPERATION(1) | /* write, wait, write */
2130 WAIT_REG_MEM_FUNCTION(3) | /* == */
2131 WAIT_REG_MEM_ENGINE(usepfp
))); /* pfp or me */
2132 amdgpu_ring_write(ring
, mmGPU_HDP_FLUSH_REQ
);
2133 amdgpu_ring_write(ring
, mmGPU_HDP_FLUSH_DONE
);
2134 amdgpu_ring_write(ring
, ref_and_mask
);
2135 amdgpu_ring_write(ring
, ref_and_mask
);
2136 amdgpu_ring_write(ring
, 0x20); /* poll interval */
2139 static void gfx_v7_0_ring_emit_vgt_flush(struct amdgpu_ring
*ring
)
2141 amdgpu_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE
, 0));
2142 amdgpu_ring_write(ring
, EVENT_TYPE(VS_PARTIAL_FLUSH
) |
2145 amdgpu_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE
, 0));
2146 amdgpu_ring_write(ring
, EVENT_TYPE(VGT_FLUSH
) |
2151 * gfx_v7_0_ring_emit_fence_gfx - emit a fence on the gfx ring
2153 * @adev: amdgpu_device pointer
2154 * @fence: amdgpu fence object
2156 * Emits a fence sequnce number on the gfx ring and flushes
2159 static void gfx_v7_0_ring_emit_fence_gfx(struct amdgpu_ring
*ring
, u64 addr
,
2160 u64 seq
, unsigned flags
)
2162 bool write64bit
= flags
& AMDGPU_FENCE_FLAG_64BIT
;
2163 bool int_sel
= flags
& AMDGPU_FENCE_FLAG_INT
;
2164 /* Workaround for cache flush problems. First send a dummy EOP
2165 * event down the pipe with seq one below.
2167 amdgpu_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
2168 amdgpu_ring_write(ring
, (EOP_TCL1_ACTION_EN
|
2170 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT
) |
2172 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
2173 amdgpu_ring_write(ring
, (upper_32_bits(addr
) & 0xffff) |
2174 DATA_SEL(1) | INT_SEL(0));
2175 amdgpu_ring_write(ring
, lower_32_bits(seq
- 1));
2176 amdgpu_ring_write(ring
, upper_32_bits(seq
- 1));
2178 /* Then send the real EOP event down the pipe. */
2179 amdgpu_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
2180 amdgpu_ring_write(ring
, (EOP_TCL1_ACTION_EN
|
2182 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT
) |
2184 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
2185 amdgpu_ring_write(ring
, (upper_32_bits(addr
) & 0xffff) |
2186 DATA_SEL(write64bit
? 2 : 1) | INT_SEL(int_sel
? 2 : 0));
2187 amdgpu_ring_write(ring
, lower_32_bits(seq
));
2188 amdgpu_ring_write(ring
, upper_32_bits(seq
));
2192 * gfx_v7_0_ring_emit_fence_compute - emit a fence on the compute ring
2194 * @adev: amdgpu_device pointer
2195 * @fence: amdgpu fence object
2197 * Emits a fence sequnce number on the compute ring and flushes
2200 static void gfx_v7_0_ring_emit_fence_compute(struct amdgpu_ring
*ring
,
2204 bool write64bit
= flags
& AMDGPU_FENCE_FLAG_64BIT
;
2205 bool int_sel
= flags
& AMDGPU_FENCE_FLAG_INT
;
2207 /* RELEASE_MEM - flush caches, send int */
2208 amdgpu_ring_write(ring
, PACKET3(PACKET3_RELEASE_MEM
, 5));
2209 amdgpu_ring_write(ring
, (EOP_TCL1_ACTION_EN
|
2211 EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT
) |
2213 amdgpu_ring_write(ring
, DATA_SEL(write64bit
? 2 : 1) | INT_SEL(int_sel
? 2 : 0));
2214 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
2215 amdgpu_ring_write(ring
, upper_32_bits(addr
));
2216 amdgpu_ring_write(ring
, lower_32_bits(seq
));
2217 amdgpu_ring_write(ring
, upper_32_bits(seq
));
2224 * gfx_v7_0_ring_emit_ib - emit an IB (Indirect Buffer) on the ring
2226 * @ring: amdgpu_ring structure holding ring information
2227 * @ib: amdgpu indirect buffer object
2229 * Emits an DE (drawing engine) or CE (constant engine) IB
2230 * on the gfx ring. IBs are usually generated by userspace
2231 * acceleration drivers and submitted to the kernel for
2232 * sheduling on the ring. This function schedules the IB
2233 * on the gfx ring for execution by the GPU.
2235 static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring
*ring
,
2236 struct amdgpu_ib
*ib
,
2237 unsigned vmid
, bool ctx_switch
)
2239 u32 header
, control
= 0;
2241 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2243 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
2244 amdgpu_ring_write(ring
, 0);
2247 if (ib
->flags
& AMDGPU_IB_FLAG_CE
)
2248 header
= PACKET3(PACKET3_INDIRECT_BUFFER_CONST
, 2);
2250 header
= PACKET3(PACKET3_INDIRECT_BUFFER
, 2);
2252 control
|= ib
->length_dw
| (vmid
<< 24);
2254 amdgpu_ring_write(ring
, header
);
2255 amdgpu_ring_write(ring
,
2259 (ib
->gpu_addr
& 0xFFFFFFFC));
2260 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFFFF);
2261 amdgpu_ring_write(ring
, control
);
2264 static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring
*ring
,
2265 struct amdgpu_ib
*ib
,
2266 unsigned vmid
, bool ctx_switch
)
2268 u32 control
= INDIRECT_BUFFER_VALID
| ib
->length_dw
| (vmid
<< 24);
2270 amdgpu_ring_write(ring
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
2271 amdgpu_ring_write(ring
,
2275 (ib
->gpu_addr
& 0xFFFFFFFC));
2276 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFFFF);
2277 amdgpu_ring_write(ring
, control
);
2280 static void gfx_v7_ring_emit_cntxcntl(struct amdgpu_ring
*ring
, uint32_t flags
)
2284 dw2
|= 0x80000000; /* set load_enable otherwise this package is just NOPs */
2285 if (flags
& AMDGPU_HAVE_CTX_SWITCH
) {
2286 gfx_v7_0_ring_emit_vgt_flush(ring
);
2287 /* set load_global_config & load_global_uconfig */
2289 /* set load_cs_sh_regs */
2291 /* set load_per_context_state & load_gfx_sh_regs */
2295 amdgpu_ring_write(ring
, PACKET3(PACKET3_CONTEXT_CONTROL
, 1));
2296 amdgpu_ring_write(ring
, dw2
);
2297 amdgpu_ring_write(ring
, 0);
2301 * gfx_v7_0_ring_test_ib - basic ring IB test
2303 * @ring: amdgpu_ring structure holding ring information
2305 * Allocate an IB and execute it on the gfx ring (CIK).
2306 * Provides a basic gfx ring test to verify that IBs are working.
2307 * Returns 0 on success, error on failure.
2309 static int gfx_v7_0_ring_test_ib(struct amdgpu_ring
*ring
, long timeout
)
2311 struct amdgpu_device
*adev
= ring
->adev
;
2312 struct amdgpu_ib ib
;
2313 struct dma_fence
*f
= NULL
;
2318 r
= amdgpu_gfx_scratch_get(adev
, &scratch
);
2320 DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r
);
2323 WREG32(scratch
, 0xCAFEDEAD);
2324 memset(&ib
, 0, sizeof(ib
));
2325 r
= amdgpu_ib_get(adev
, NULL
, 256, &ib
);
2327 DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r
);
2330 ib
.ptr
[0] = PACKET3(PACKET3_SET_UCONFIG_REG
, 1);
2331 ib
.ptr
[1] = ((scratch
- PACKET3_SET_UCONFIG_REG_START
));
2332 ib
.ptr
[2] = 0xDEADBEEF;
2335 r
= amdgpu_ib_schedule(ring
, 1, &ib
, NULL
, &f
);
2339 r
= dma_fence_wait_timeout(f
, false, timeout
);
2341 DRM_ERROR("amdgpu: IB test timed out\n");
2345 DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r
);
2348 tmp
= RREG32(scratch
);
2349 if (tmp
== 0xDEADBEEF) {
2350 DRM_DEBUG("ib test on ring %d succeeded\n", ring
->idx
);
2353 DRM_ERROR("amdgpu: ib test failed (scratch(0x%04X)=0x%08X)\n",
2359 amdgpu_ib_free(adev
, &ib
, NULL
);
2362 amdgpu_gfx_scratch_free(adev
, scratch
);
2368 * On CIK, gfx and compute now have independant command processors.
2371 * Gfx consists of a single ring and can process both gfx jobs and
2372 * compute jobs. The gfx CP consists of three microengines (ME):
2373 * PFP - Pre-Fetch Parser
2375 * CE - Constant Engine
2376 * The PFP and ME make up what is considered the Drawing Engine (DE).
2377 * The CE is an asynchronous engine used for updating buffer desciptors
2378 * used by the DE so that they can be loaded into cache in parallel
2379 * while the DE is processing state update packets.
2382 * The compute CP consists of two microengines (ME):
2383 * MEC1 - Compute MicroEngine 1
2384 * MEC2 - Compute MicroEngine 2
2385 * Each MEC supports 4 compute pipes and each pipe supports 8 queues.
2386 * The queues are exposed to userspace and are programmed directly
2387 * by the compute runtime.
2390 * gfx_v7_0_cp_gfx_enable - enable/disable the gfx CP MEs
2392 * @adev: amdgpu_device pointer
2393 * @enable: enable or disable the MEs
2395 * Halts or unhalts the gfx MEs.
2397 static void gfx_v7_0_cp_gfx_enable(struct amdgpu_device
*adev
, bool enable
)
2402 WREG32(mmCP_ME_CNTL
, 0);
2404 WREG32(mmCP_ME_CNTL
, (CP_ME_CNTL__ME_HALT_MASK
| CP_ME_CNTL__PFP_HALT_MASK
| CP_ME_CNTL__CE_HALT_MASK
));
2405 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
2406 adev
->gfx
.gfx_ring
[i
].ready
= false;
2412 * gfx_v7_0_cp_gfx_load_microcode - load the gfx CP ME ucode
2414 * @adev: amdgpu_device pointer
2416 * Loads the gfx PFP, ME, and CE ucode.
2417 * Returns 0 for success, -EINVAL if the ucode is not available.
2419 static int gfx_v7_0_cp_gfx_load_microcode(struct amdgpu_device
*adev
)
2421 const struct gfx_firmware_header_v1_0
*pfp_hdr
;
2422 const struct gfx_firmware_header_v1_0
*ce_hdr
;
2423 const struct gfx_firmware_header_v1_0
*me_hdr
;
2424 const __le32
*fw_data
;
2425 unsigned i
, fw_size
;
2427 if (!adev
->gfx
.me_fw
|| !adev
->gfx
.pfp_fw
|| !adev
->gfx
.ce_fw
)
2430 pfp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.pfp_fw
->data
;
2431 ce_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.ce_fw
->data
;
2432 me_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.me_fw
->data
;
2434 amdgpu_ucode_print_gfx_hdr(&pfp_hdr
->header
);
2435 amdgpu_ucode_print_gfx_hdr(&ce_hdr
->header
);
2436 amdgpu_ucode_print_gfx_hdr(&me_hdr
->header
);
2437 adev
->gfx
.pfp_fw_version
= le32_to_cpu(pfp_hdr
->header
.ucode_version
);
2438 adev
->gfx
.ce_fw_version
= le32_to_cpu(ce_hdr
->header
.ucode_version
);
2439 adev
->gfx
.me_fw_version
= le32_to_cpu(me_hdr
->header
.ucode_version
);
2440 adev
->gfx
.me_feature_version
= le32_to_cpu(me_hdr
->ucode_feature_version
);
2441 adev
->gfx
.ce_feature_version
= le32_to_cpu(ce_hdr
->ucode_feature_version
);
2442 adev
->gfx
.pfp_feature_version
= le32_to_cpu(pfp_hdr
->ucode_feature_version
);
2444 gfx_v7_0_cp_gfx_enable(adev
, false);
2447 fw_data
= (const __le32
*)
2448 (adev
->gfx
.pfp_fw
->data
+
2449 le32_to_cpu(pfp_hdr
->header
.ucode_array_offset_bytes
));
2450 fw_size
= le32_to_cpu(pfp_hdr
->header
.ucode_size_bytes
) / 4;
2451 WREG32(mmCP_PFP_UCODE_ADDR
, 0);
2452 for (i
= 0; i
< fw_size
; i
++)
2453 WREG32(mmCP_PFP_UCODE_DATA
, le32_to_cpup(fw_data
++));
2454 WREG32(mmCP_PFP_UCODE_ADDR
, adev
->gfx
.pfp_fw_version
);
2457 fw_data
= (const __le32
*)
2458 (adev
->gfx
.ce_fw
->data
+
2459 le32_to_cpu(ce_hdr
->header
.ucode_array_offset_bytes
));
2460 fw_size
= le32_to_cpu(ce_hdr
->header
.ucode_size_bytes
) / 4;
2461 WREG32(mmCP_CE_UCODE_ADDR
, 0);
2462 for (i
= 0; i
< fw_size
; i
++)
2463 WREG32(mmCP_CE_UCODE_DATA
, le32_to_cpup(fw_data
++));
2464 WREG32(mmCP_CE_UCODE_ADDR
, adev
->gfx
.ce_fw_version
);
2467 fw_data
= (const __le32
*)
2468 (adev
->gfx
.me_fw
->data
+
2469 le32_to_cpu(me_hdr
->header
.ucode_array_offset_bytes
));
2470 fw_size
= le32_to_cpu(me_hdr
->header
.ucode_size_bytes
) / 4;
2471 WREG32(mmCP_ME_RAM_WADDR
, 0);
2472 for (i
= 0; i
< fw_size
; i
++)
2473 WREG32(mmCP_ME_RAM_DATA
, le32_to_cpup(fw_data
++));
2474 WREG32(mmCP_ME_RAM_WADDR
, adev
->gfx
.me_fw_version
);
2480 * gfx_v7_0_cp_gfx_start - start the gfx ring
2482 * @adev: amdgpu_device pointer
2484 * Enables the ring and loads the clear state context and other
2485 * packets required to init the ring.
2486 * Returns 0 for success, error for failure.
2488 static int gfx_v7_0_cp_gfx_start(struct amdgpu_device
*adev
)
2490 struct amdgpu_ring
*ring
= &adev
->gfx
.gfx_ring
[0];
2491 const struct cs_section_def
*sect
= NULL
;
2492 const struct cs_extent_def
*ext
= NULL
;
2496 WREG32(mmCP_MAX_CONTEXT
, adev
->gfx
.config
.max_hw_contexts
- 1);
2497 WREG32(mmCP_ENDIAN_SWAP
, 0);
2498 WREG32(mmCP_DEVICE_ID
, 1);
2500 gfx_v7_0_cp_gfx_enable(adev
, true);
2502 r
= amdgpu_ring_alloc(ring
, gfx_v7_0_get_csb_size(adev
) + 8);
2504 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r
);
2508 /* init the CE partitions. CE only used for gfx on CIK */
2509 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_BASE
, 2));
2510 amdgpu_ring_write(ring
, PACKET3_BASE_INDEX(CE_PARTITION_BASE
));
2511 amdgpu_ring_write(ring
, 0x8000);
2512 amdgpu_ring_write(ring
, 0x8000);
2514 /* clear state buffer */
2515 amdgpu_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
2516 amdgpu_ring_write(ring
, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
2518 amdgpu_ring_write(ring
, PACKET3(PACKET3_CONTEXT_CONTROL
, 1));
2519 amdgpu_ring_write(ring
, 0x80000000);
2520 amdgpu_ring_write(ring
, 0x80000000);
2522 for (sect
= adev
->gfx
.rlc
.cs_data
; sect
->section
!= NULL
; ++sect
) {
2523 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
2524 if (sect
->id
== SECT_CONTEXT
) {
2525 amdgpu_ring_write(ring
,
2526 PACKET3(PACKET3_SET_CONTEXT_REG
, ext
->reg_count
));
2527 amdgpu_ring_write(ring
, ext
->reg_index
- PACKET3_SET_CONTEXT_REG_START
);
2528 for (i
= 0; i
< ext
->reg_count
; i
++)
2529 amdgpu_ring_write(ring
, ext
->extent
[i
]);
2534 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
2535 amdgpu_ring_write(ring
, mmPA_SC_RASTER_CONFIG
- PACKET3_SET_CONTEXT_REG_START
);
2536 amdgpu_ring_write(ring
, adev
->gfx
.config
.rb_config
[0][0].raster_config
);
2537 amdgpu_ring_write(ring
, adev
->gfx
.config
.rb_config
[0][0].raster_config_1
);
2539 amdgpu_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
2540 amdgpu_ring_write(ring
, PACKET3_PREAMBLE_END_CLEAR_STATE
);
2542 amdgpu_ring_write(ring
, PACKET3(PACKET3_CLEAR_STATE
, 0));
2543 amdgpu_ring_write(ring
, 0);
2545 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
2546 amdgpu_ring_write(ring
, 0x00000316);
2547 amdgpu_ring_write(ring
, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
2548 amdgpu_ring_write(ring
, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
2550 amdgpu_ring_commit(ring
);
2556 * gfx_v7_0_cp_gfx_resume - setup the gfx ring buffer registers
2558 * @adev: amdgpu_device pointer
2560 * Program the location and size of the gfx ring buffer
2561 * and test it to make sure it's working.
2562 * Returns 0 for success, error for failure.
2564 static int gfx_v7_0_cp_gfx_resume(struct amdgpu_device
*adev
)
2566 struct amdgpu_ring
*ring
;
2569 u64 rb_addr
, rptr_addr
;
2572 WREG32(mmCP_SEM_WAIT_TIMER
, 0x0);
2573 if (adev
->asic_type
!= CHIP_HAWAII
)
2574 WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL
, 0x0);
2576 /* Set the write pointer delay */
2577 WREG32(mmCP_RB_WPTR_DELAY
, 0);
2579 /* set the RB to use vmid 0 */
2580 WREG32(mmCP_RB_VMID
, 0);
2582 WREG32(mmSCRATCH_ADDR
, 0);
2584 /* ring 0 - compute and gfx */
2585 /* Set ring buffer size */
2586 ring
= &adev
->gfx
.gfx_ring
[0];
2587 rb_bufsz
= order_base_2(ring
->ring_size
/ 8);
2588 tmp
= (order_base_2(AMDGPU_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2590 tmp
|= 2 << CP_RB0_CNTL__BUF_SWAP__SHIFT
;
2592 WREG32(mmCP_RB0_CNTL
, tmp
);
2594 /* Initialize the ring buffer's read and write pointers */
2595 WREG32(mmCP_RB0_CNTL
, tmp
| CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK
);
2597 WREG32(mmCP_RB0_WPTR
, lower_32_bits(ring
->wptr
));
2599 /* set the wb address wether it's enabled or not */
2600 rptr_addr
= adev
->wb
.gpu_addr
+ (ring
->rptr_offs
* 4);
2601 WREG32(mmCP_RB0_RPTR_ADDR
, lower_32_bits(rptr_addr
));
2602 WREG32(mmCP_RB0_RPTR_ADDR_HI
, upper_32_bits(rptr_addr
) & 0xFF);
2604 /* scratch register shadowing is no longer supported */
2605 WREG32(mmSCRATCH_UMSK
, 0);
2608 WREG32(mmCP_RB0_CNTL
, tmp
);
2610 rb_addr
= ring
->gpu_addr
>> 8;
2611 WREG32(mmCP_RB0_BASE
, rb_addr
);
2612 WREG32(mmCP_RB0_BASE_HI
, upper_32_bits(rb_addr
));
2614 /* start the ring */
2615 gfx_v7_0_cp_gfx_start(adev
);
2617 r
= amdgpu_ring_test_ring(ring
);
2619 ring
->ready
= false;
2626 static u64
gfx_v7_0_ring_get_rptr(struct amdgpu_ring
*ring
)
2628 return ring
->adev
->wb
.wb
[ring
->rptr_offs
];
2631 static u64
gfx_v7_0_ring_get_wptr_gfx(struct amdgpu_ring
*ring
)
2633 struct amdgpu_device
*adev
= ring
->adev
;
2635 return RREG32(mmCP_RB0_WPTR
);
2638 static void gfx_v7_0_ring_set_wptr_gfx(struct amdgpu_ring
*ring
)
2640 struct amdgpu_device
*adev
= ring
->adev
;
2642 WREG32(mmCP_RB0_WPTR
, lower_32_bits(ring
->wptr
));
2643 (void)RREG32(mmCP_RB0_WPTR
);
2646 static u64
gfx_v7_0_ring_get_wptr_compute(struct amdgpu_ring
*ring
)
2648 /* XXX check if swapping is necessary on BE */
2649 return ring
->adev
->wb
.wb
[ring
->wptr_offs
];
2652 static void gfx_v7_0_ring_set_wptr_compute(struct amdgpu_ring
*ring
)
2654 struct amdgpu_device
*adev
= ring
->adev
;
2656 /* XXX check if swapping is necessary on BE */
2657 adev
->wb
.wb
[ring
->wptr_offs
] = lower_32_bits(ring
->wptr
);
2658 WDOORBELL32(ring
->doorbell_index
, lower_32_bits(ring
->wptr
));
2662 * gfx_v7_0_cp_compute_enable - enable/disable the compute CP MEs
2664 * @adev: amdgpu_device pointer
2665 * @enable: enable or disable the MEs
2667 * Halts or unhalts the compute MEs.
2669 static void gfx_v7_0_cp_compute_enable(struct amdgpu_device
*adev
, bool enable
)
2674 WREG32(mmCP_MEC_CNTL
, 0);
2676 WREG32(mmCP_MEC_CNTL
, (CP_MEC_CNTL__MEC_ME1_HALT_MASK
| CP_MEC_CNTL__MEC_ME2_HALT_MASK
));
2677 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
2678 adev
->gfx
.compute_ring
[i
].ready
= false;
2684 * gfx_v7_0_cp_compute_load_microcode - load the compute CP ME ucode
2686 * @adev: amdgpu_device pointer
2688 * Loads the compute MEC1&2 ucode.
2689 * Returns 0 for success, -EINVAL if the ucode is not available.
2691 static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device
*adev
)
2693 const struct gfx_firmware_header_v1_0
*mec_hdr
;
2694 const __le32
*fw_data
;
2695 unsigned i
, fw_size
;
2697 if (!adev
->gfx
.mec_fw
)
2700 mec_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec_fw
->data
;
2701 amdgpu_ucode_print_gfx_hdr(&mec_hdr
->header
);
2702 adev
->gfx
.mec_fw_version
= le32_to_cpu(mec_hdr
->header
.ucode_version
);
2703 adev
->gfx
.mec_feature_version
= le32_to_cpu(
2704 mec_hdr
->ucode_feature_version
);
2706 gfx_v7_0_cp_compute_enable(adev
, false);
2709 fw_data
= (const __le32
*)
2710 (adev
->gfx
.mec_fw
->data
+
2711 le32_to_cpu(mec_hdr
->header
.ucode_array_offset_bytes
));
2712 fw_size
= le32_to_cpu(mec_hdr
->header
.ucode_size_bytes
) / 4;
2713 WREG32(mmCP_MEC_ME1_UCODE_ADDR
, 0);
2714 for (i
= 0; i
< fw_size
; i
++)
2715 WREG32(mmCP_MEC_ME1_UCODE_DATA
, le32_to_cpup(fw_data
++));
2716 WREG32(mmCP_MEC_ME1_UCODE_ADDR
, 0);
2718 if (adev
->asic_type
== CHIP_KAVERI
) {
2719 const struct gfx_firmware_header_v1_0
*mec2_hdr
;
2721 if (!adev
->gfx
.mec2_fw
)
2724 mec2_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec2_fw
->data
;
2725 amdgpu_ucode_print_gfx_hdr(&mec2_hdr
->header
);
2726 adev
->gfx
.mec2_fw_version
= le32_to_cpu(mec2_hdr
->header
.ucode_version
);
2727 adev
->gfx
.mec2_feature_version
= le32_to_cpu(
2728 mec2_hdr
->ucode_feature_version
);
2731 fw_data
= (const __le32
*)
2732 (adev
->gfx
.mec2_fw
->data
+
2733 le32_to_cpu(mec2_hdr
->header
.ucode_array_offset_bytes
));
2734 fw_size
= le32_to_cpu(mec2_hdr
->header
.ucode_size_bytes
) / 4;
2735 WREG32(mmCP_MEC_ME2_UCODE_ADDR
, 0);
2736 for (i
= 0; i
< fw_size
; i
++)
2737 WREG32(mmCP_MEC_ME2_UCODE_DATA
, le32_to_cpup(fw_data
++));
2738 WREG32(mmCP_MEC_ME2_UCODE_ADDR
, 0);
2745 * gfx_v7_0_cp_compute_fini - stop the compute queues
2747 * @adev: amdgpu_device pointer
2749 * Stop the compute queues and tear down the driver queue
2752 static void gfx_v7_0_cp_compute_fini(struct amdgpu_device
*adev
)
2756 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
2757 struct amdgpu_ring
*ring
= &adev
->gfx
.compute_ring
[i
];
2759 amdgpu_bo_free_kernel(&ring
->mqd_obj
, NULL
, NULL
);
2763 static void gfx_v7_0_mec_fini(struct amdgpu_device
*adev
)
2765 amdgpu_bo_free_kernel(&adev
->gfx
.mec
.hpd_eop_obj
, NULL
, NULL
);
2768 static int gfx_v7_0_mec_init(struct amdgpu_device
*adev
)
2772 size_t mec_hpd_size
;
2774 bitmap_zero(adev
->gfx
.mec
.queue_bitmap
, AMDGPU_MAX_COMPUTE_QUEUES
);
2776 /* take ownership of the relevant compute queues */
2777 amdgpu_gfx_compute_queue_acquire(adev
);
2779 /* allocate space for ALL pipes (even the ones we don't own) */
2780 mec_hpd_size
= adev
->gfx
.mec
.num_mec
* adev
->gfx
.mec
.num_pipe_per_mec
2781 * GFX7_MEC_HPD_SIZE
* 2;
2783 r
= amdgpu_bo_create_reserved(adev
, mec_hpd_size
, PAGE_SIZE
,
2784 AMDGPU_GEM_DOMAIN_GTT
,
2785 &adev
->gfx
.mec
.hpd_eop_obj
,
2786 &adev
->gfx
.mec
.hpd_eop_gpu_addr
,
2789 dev_warn(adev
->dev
, "(%d) create, pin or map of HDP EOP bo failed\n", r
);
2790 gfx_v7_0_mec_fini(adev
);
2794 /* clear memory. Not sure if this is required or not */
2795 memset(hpd
, 0, mec_hpd_size
);
2797 amdgpu_bo_kunmap(adev
->gfx
.mec
.hpd_eop_obj
);
2798 amdgpu_bo_unreserve(adev
->gfx
.mec
.hpd_eop_obj
);
2803 struct hqd_registers
2805 u32 cp_mqd_base_addr
;
2806 u32 cp_mqd_base_addr_hi
;
2809 u32 cp_hqd_persistent_state
;
2810 u32 cp_hqd_pipe_priority
;
2811 u32 cp_hqd_queue_priority
;
2814 u32 cp_hqd_pq_base_hi
;
2816 u32 cp_hqd_pq_rptr_report_addr
;
2817 u32 cp_hqd_pq_rptr_report_addr_hi
;
2818 u32 cp_hqd_pq_wptr_poll_addr
;
2819 u32 cp_hqd_pq_wptr_poll_addr_hi
;
2820 u32 cp_hqd_pq_doorbell_control
;
2822 u32 cp_hqd_pq_control
;
2823 u32 cp_hqd_ib_base_addr
;
2824 u32 cp_hqd_ib_base_addr_hi
;
2826 u32 cp_hqd_ib_control
;
2827 u32 cp_hqd_iq_timer
;
2829 u32 cp_hqd_dequeue_request
;
2830 u32 cp_hqd_dma_offload
;
2831 u32 cp_hqd_sema_cmd
;
2832 u32 cp_hqd_msg_type
;
2833 u32 cp_hqd_atomic0_preop_lo
;
2834 u32 cp_hqd_atomic0_preop_hi
;
2835 u32 cp_hqd_atomic1_preop_lo
;
2836 u32 cp_hqd_atomic1_preop_hi
;
2837 u32 cp_hqd_hq_scheduler0
;
2838 u32 cp_hqd_hq_scheduler1
;
2842 static void gfx_v7_0_compute_pipe_init(struct amdgpu_device
*adev
,
2847 size_t eop_offset
= (mec
* adev
->gfx
.mec
.num_pipe_per_mec
+ pipe
)
2848 * GFX7_MEC_HPD_SIZE
* 2;
2850 mutex_lock(&adev
->srbm_mutex
);
2851 eop_gpu_addr
= adev
->gfx
.mec
.hpd_eop_gpu_addr
+ eop_offset
;
2853 cik_srbm_select(adev
, mec
+ 1, pipe
, 0, 0);
2855 /* write the EOP addr */
2856 WREG32(mmCP_HPD_EOP_BASE_ADDR
, eop_gpu_addr
>> 8);
2857 WREG32(mmCP_HPD_EOP_BASE_ADDR_HI
, upper_32_bits(eop_gpu_addr
) >> 8);
2859 /* set the VMID assigned */
2860 WREG32(mmCP_HPD_EOP_VMID
, 0);
2862 /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */
2863 tmp
= RREG32(mmCP_HPD_EOP_CONTROL
);
2864 tmp
&= ~CP_HPD_EOP_CONTROL__EOP_SIZE_MASK
;
2865 tmp
|= order_base_2(GFX7_MEC_HPD_SIZE
/ 8);
2866 WREG32(mmCP_HPD_EOP_CONTROL
, tmp
);
2868 cik_srbm_select(adev
, 0, 0, 0, 0);
2869 mutex_unlock(&adev
->srbm_mutex
);
2872 static int gfx_v7_0_mqd_deactivate(struct amdgpu_device
*adev
)
2876 /* disable the queue if it's active */
2877 if (RREG32(mmCP_HQD_ACTIVE
) & 1) {
2878 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, 1);
2879 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
2880 if (!(RREG32(mmCP_HQD_ACTIVE
) & 1))
2885 if (i
== adev
->usec_timeout
)
2888 WREG32(mmCP_HQD_DEQUEUE_REQUEST
, 0);
2889 WREG32(mmCP_HQD_PQ_RPTR
, 0);
2890 WREG32(mmCP_HQD_PQ_WPTR
, 0);
2896 static void gfx_v7_0_mqd_init(struct amdgpu_device
*adev
,
2897 struct cik_mqd
*mqd
,
2898 uint64_t mqd_gpu_addr
,
2899 struct amdgpu_ring
*ring
)
2904 /* init the mqd struct */
2905 memset(mqd
, 0, sizeof(struct cik_mqd
));
2907 mqd
->header
= 0xC0310800;
2908 mqd
->compute_static_thread_mgmt_se0
= 0xffffffff;
2909 mqd
->compute_static_thread_mgmt_se1
= 0xffffffff;
2910 mqd
->compute_static_thread_mgmt_se2
= 0xffffffff;
2911 mqd
->compute_static_thread_mgmt_se3
= 0xffffffff;
2913 /* enable doorbell? */
2914 mqd
->cp_hqd_pq_doorbell_control
=
2915 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
);
2916 if (ring
->use_doorbell
)
2917 mqd
->cp_hqd_pq_doorbell_control
|= CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK
;
2919 mqd
->cp_hqd_pq_doorbell_control
&= ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK
;
2921 /* set the pointer to the MQD */
2922 mqd
->cp_mqd_base_addr_lo
= mqd_gpu_addr
& 0xfffffffc;
2923 mqd
->cp_mqd_base_addr_hi
= upper_32_bits(mqd_gpu_addr
);
2925 /* set MQD vmid to 0 */
2926 mqd
->cp_mqd_control
= RREG32(mmCP_MQD_CONTROL
);
2927 mqd
->cp_mqd_control
&= ~CP_MQD_CONTROL__VMID_MASK
;
2929 /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */
2930 hqd_gpu_addr
= ring
->gpu_addr
>> 8;
2931 mqd
->cp_hqd_pq_base_lo
= hqd_gpu_addr
;
2932 mqd
->cp_hqd_pq_base_hi
= upper_32_bits(hqd_gpu_addr
);
2934 /* set up the HQD, this is similar to CP_RB0_CNTL */
2935 mqd
->cp_hqd_pq_control
= RREG32(mmCP_HQD_PQ_CONTROL
);
2936 mqd
->cp_hqd_pq_control
&=
2937 ~(CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK
|
2938 CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE_MASK
);
2940 mqd
->cp_hqd_pq_control
|=
2941 order_base_2(ring
->ring_size
/ 8);
2942 mqd
->cp_hqd_pq_control
|=
2943 (order_base_2(AMDGPU_GPU_PAGE_SIZE
/8) << 8);
2945 mqd
->cp_hqd_pq_control
|=
2946 2 << CP_HQD_PQ_CONTROL__ENDIAN_SWAP__SHIFT
;
2948 mqd
->cp_hqd_pq_control
&=
2949 ~(CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK
|
2950 CP_HQD_PQ_CONTROL__ROQ_PQ_IB_FLIP_MASK
|
2951 CP_HQD_PQ_CONTROL__PQ_VOLATILE_MASK
);
2952 mqd
->cp_hqd_pq_control
|=
2953 CP_HQD_PQ_CONTROL__PRIV_STATE_MASK
|
2954 CP_HQD_PQ_CONTROL__KMD_QUEUE_MASK
; /* assuming kernel queue control */
2956 /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */
2957 wb_gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->wptr_offs
* 4);
2958 mqd
->cp_hqd_pq_wptr_poll_addr_lo
= wb_gpu_addr
& 0xfffffffc;
2959 mqd
->cp_hqd_pq_wptr_poll_addr_hi
= upper_32_bits(wb_gpu_addr
) & 0xffff;
2961 /* set the wb address wether it's enabled or not */
2962 wb_gpu_addr
= adev
->wb
.gpu_addr
+ (ring
->rptr_offs
* 4);
2963 mqd
->cp_hqd_pq_rptr_report_addr_lo
= wb_gpu_addr
& 0xfffffffc;
2964 mqd
->cp_hqd_pq_rptr_report_addr_hi
=
2965 upper_32_bits(wb_gpu_addr
) & 0xffff;
2967 /* enable the doorbell if requested */
2968 if (ring
->use_doorbell
) {
2969 mqd
->cp_hqd_pq_doorbell_control
=
2970 RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL
);
2971 mqd
->cp_hqd_pq_doorbell_control
&=
2972 ~CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET_MASK
;
2973 mqd
->cp_hqd_pq_doorbell_control
|=
2974 (ring
->doorbell_index
<<
2975 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT
);
2976 mqd
->cp_hqd_pq_doorbell_control
|=
2977 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_EN_MASK
;
2978 mqd
->cp_hqd_pq_doorbell_control
&=
2979 ~(CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_SOURCE_MASK
|
2980 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_HIT_MASK
);
2983 mqd
->cp_hqd_pq_doorbell_control
= 0;
2986 /* read and write pointers, similar to CP_RB0_WPTR/_RPTR */
2988 mqd
->cp_hqd_pq_wptr
= lower_32_bits(ring
->wptr
);
2989 mqd
->cp_hqd_pq_rptr
= RREG32(mmCP_HQD_PQ_RPTR
);
2991 /* set the vmid for the queue */
2992 mqd
->cp_hqd_vmid
= 0;
2995 mqd
->cp_hqd_ib_control
= RREG32(mmCP_HQD_IB_CONTROL
);
2996 mqd
->cp_hqd_ib_base_addr_lo
= RREG32(mmCP_HQD_IB_BASE_ADDR
);
2997 mqd
->cp_hqd_ib_base_addr_hi
= RREG32(mmCP_HQD_IB_BASE_ADDR_HI
);
2998 mqd
->cp_hqd_ib_rptr
= RREG32(mmCP_HQD_IB_RPTR
);
2999 mqd
->cp_hqd_persistent_state
= RREG32(mmCP_HQD_PERSISTENT_STATE
);
3000 mqd
->cp_hqd_sema_cmd
= RREG32(mmCP_HQD_SEMA_CMD
);
3001 mqd
->cp_hqd_msg_type
= RREG32(mmCP_HQD_MSG_TYPE
);
3002 mqd
->cp_hqd_atomic0_preop_lo
= RREG32(mmCP_HQD_ATOMIC0_PREOP_LO
);
3003 mqd
->cp_hqd_atomic0_preop_hi
= RREG32(mmCP_HQD_ATOMIC0_PREOP_HI
);
3004 mqd
->cp_hqd_atomic1_preop_lo
= RREG32(mmCP_HQD_ATOMIC1_PREOP_LO
);
3005 mqd
->cp_hqd_atomic1_preop_hi
= RREG32(mmCP_HQD_ATOMIC1_PREOP_HI
);
3006 mqd
->cp_hqd_pq_rptr
= RREG32(mmCP_HQD_PQ_RPTR
);
3007 mqd
->cp_hqd_quantum
= RREG32(mmCP_HQD_QUANTUM
);
3008 mqd
->cp_hqd_pipe_priority
= RREG32(mmCP_HQD_PIPE_PRIORITY
);
3009 mqd
->cp_hqd_queue_priority
= RREG32(mmCP_HQD_QUEUE_PRIORITY
);
3010 mqd
->cp_hqd_iq_rptr
= RREG32(mmCP_HQD_IQ_RPTR
);
3012 /* activate the queue */
3013 mqd
->cp_hqd_active
= 1;
3016 int gfx_v7_0_mqd_commit(struct amdgpu_device
*adev
, struct cik_mqd
*mqd
)
3022 /* HQD registers extend from mmCP_MQD_BASE_ADDR to mmCP_MQD_CONTROL */
3023 mqd_data
= &mqd
->cp_mqd_base_addr_lo
;
3025 /* disable wptr polling */
3026 tmp
= RREG32(mmCP_PQ_WPTR_POLL_CNTL
);
3027 tmp
= REG_SET_FIELD(tmp
, CP_PQ_WPTR_POLL_CNTL
, EN
, 0);
3028 WREG32(mmCP_PQ_WPTR_POLL_CNTL
, tmp
);
3030 /* program all HQD registers */
3031 for (mqd_reg
= mmCP_HQD_VMID
; mqd_reg
<= mmCP_MQD_CONTROL
; mqd_reg
++)
3032 WREG32(mqd_reg
, mqd_data
[mqd_reg
- mmCP_MQD_BASE_ADDR
]);
3034 /* activate the HQD */
3035 for (mqd_reg
= mmCP_MQD_BASE_ADDR
; mqd_reg
<= mmCP_HQD_ACTIVE
; mqd_reg
++)
3036 WREG32(mqd_reg
, mqd_data
[mqd_reg
- mmCP_MQD_BASE_ADDR
]);
3041 static int gfx_v7_0_compute_queue_init(struct amdgpu_device
*adev
, int ring_id
)
3045 struct cik_mqd
*mqd
;
3046 struct amdgpu_ring
*ring
= &adev
->gfx
.compute_ring
[ring_id
];
3048 r
= amdgpu_bo_create_reserved(adev
, sizeof(struct cik_mqd
), PAGE_SIZE
,
3049 AMDGPU_GEM_DOMAIN_GTT
, &ring
->mqd_obj
,
3050 &mqd_gpu_addr
, (void **)&mqd
);
3052 dev_warn(adev
->dev
, "(%d) create MQD bo failed\n", r
);
3056 mutex_lock(&adev
->srbm_mutex
);
3057 cik_srbm_select(adev
, ring
->me
, ring
->pipe
, ring
->queue
, 0);
3059 gfx_v7_0_mqd_init(adev
, mqd
, mqd_gpu_addr
, ring
);
3060 gfx_v7_0_mqd_deactivate(adev
);
3061 gfx_v7_0_mqd_commit(adev
, mqd
);
3063 cik_srbm_select(adev
, 0, 0, 0, 0);
3064 mutex_unlock(&adev
->srbm_mutex
);
3066 amdgpu_bo_kunmap(ring
->mqd_obj
);
3067 amdgpu_bo_unreserve(ring
->mqd_obj
);
3072 * gfx_v7_0_cp_compute_resume - setup the compute queue registers
3074 * @adev: amdgpu_device pointer
3076 * Program the compute queues and test them to make sure they
3078 * Returns 0 for success, error for failure.
3080 static int gfx_v7_0_cp_compute_resume(struct amdgpu_device
*adev
)
3084 struct amdgpu_ring
*ring
;
3086 /* fix up chicken bits */
3087 tmp
= RREG32(mmCP_CPF_DEBUG
);
3089 WREG32(mmCP_CPF_DEBUG
, tmp
);
3091 /* init all pipes (even the ones we don't own) */
3092 for (i
= 0; i
< adev
->gfx
.mec
.num_mec
; i
++)
3093 for (j
= 0; j
< adev
->gfx
.mec
.num_pipe_per_mec
; j
++)
3094 gfx_v7_0_compute_pipe_init(adev
, i
, j
);
3096 /* init the queues */
3097 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
3098 r
= gfx_v7_0_compute_queue_init(adev
, i
);
3100 gfx_v7_0_cp_compute_fini(adev
);
3105 gfx_v7_0_cp_compute_enable(adev
, true);
3107 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
3108 ring
= &adev
->gfx
.compute_ring
[i
];
3110 r
= amdgpu_ring_test_ring(ring
);
3112 ring
->ready
= false;
3118 static void gfx_v7_0_cp_enable(struct amdgpu_device
*adev
, bool enable
)
3120 gfx_v7_0_cp_gfx_enable(adev
, enable
);
3121 gfx_v7_0_cp_compute_enable(adev
, enable
);
3124 static int gfx_v7_0_cp_load_microcode(struct amdgpu_device
*adev
)
3128 r
= gfx_v7_0_cp_gfx_load_microcode(adev
);
3131 r
= gfx_v7_0_cp_compute_load_microcode(adev
);
3138 static void gfx_v7_0_enable_gui_idle_interrupt(struct amdgpu_device
*adev
,
3141 u32 tmp
= RREG32(mmCP_INT_CNTL_RING0
);
3144 tmp
|= (CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK
|
3145 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK
);
3147 tmp
&= ~(CP_INT_CNTL_RING0__CNTX_BUSY_INT_ENABLE_MASK
|
3148 CP_INT_CNTL_RING0__CNTX_EMPTY_INT_ENABLE_MASK
);
3149 WREG32(mmCP_INT_CNTL_RING0
, tmp
);
3152 static int gfx_v7_0_cp_resume(struct amdgpu_device
*adev
)
3156 gfx_v7_0_enable_gui_idle_interrupt(adev
, false);
3158 r
= gfx_v7_0_cp_load_microcode(adev
);
3162 r
= gfx_v7_0_cp_gfx_resume(adev
);
3165 r
= gfx_v7_0_cp_compute_resume(adev
);
3169 gfx_v7_0_enable_gui_idle_interrupt(adev
, true);
3175 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3177 * @ring: the ring to emmit the commands to
3179 * Sync the command pipeline with the PFP. E.g. wait for everything
3182 static void gfx_v7_0_ring_emit_pipeline_sync(struct amdgpu_ring
*ring
)
3184 int usepfp
= (ring
->funcs
->type
== AMDGPU_RING_TYPE_GFX
);
3185 uint32_t seq
= ring
->fence_drv
.sync_seq
;
3186 uint64_t addr
= ring
->fence_drv
.gpu_addr
;
3188 amdgpu_ring_write(ring
, PACKET3(PACKET3_WAIT_REG_MEM
, 5));
3189 amdgpu_ring_write(ring
, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
3190 WAIT_REG_MEM_FUNCTION(3) | /* equal */
3191 WAIT_REG_MEM_ENGINE(usepfp
))); /* pfp or me */
3192 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
3193 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xffffffff);
3194 amdgpu_ring_write(ring
, seq
);
3195 amdgpu_ring_write(ring
, 0xffffffff);
3196 amdgpu_ring_write(ring
, 4); /* poll interval */
3199 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3200 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
3201 amdgpu_ring_write(ring
, 0);
3202 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
3203 amdgpu_ring_write(ring
, 0);
3209 * VMID 0 is the physical GPU addresses as used by the kernel.
3210 * VMIDs 1-15 are used for userspace clients and are handled
3211 * by the amdgpu vm/hsa code.
3214 * gfx_v7_0_ring_emit_vm_flush - cik vm flush using the CP
3216 * @adev: amdgpu_device pointer
3218 * Update the page table base and flush the VM TLB
3219 * using the CP (CIK).
3221 static void gfx_v7_0_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
3222 unsigned vmid
, uint64_t pd_addr
)
3224 int usepfp
= (ring
->funcs
->type
== AMDGPU_RING_TYPE_GFX
);
3226 amdgpu_gmc_emit_flush_gpu_tlb(ring
, vmid
, pd_addr
);
3228 /* wait for the invalidate to complete */
3229 amdgpu_ring_write(ring
, PACKET3(PACKET3_WAIT_REG_MEM
, 5));
3230 amdgpu_ring_write(ring
, (WAIT_REG_MEM_OPERATION(0) | /* wait */
3231 WAIT_REG_MEM_FUNCTION(0) | /* always */
3232 WAIT_REG_MEM_ENGINE(0))); /* me */
3233 amdgpu_ring_write(ring
, mmVM_INVALIDATE_REQUEST
);
3234 amdgpu_ring_write(ring
, 0);
3235 amdgpu_ring_write(ring
, 0); /* ref */
3236 amdgpu_ring_write(ring
, 0); /* mask */
3237 amdgpu_ring_write(ring
, 0x20); /* poll interval */
3239 /* compute doesn't have PFP */
3241 /* sync PFP to ME, otherwise we might get invalid PFP reads */
3242 amdgpu_ring_write(ring
, PACKET3(PACKET3_PFP_SYNC_ME
, 0));
3243 amdgpu_ring_write(ring
, 0x0);
3245 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
3246 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
3247 amdgpu_ring_write(ring
, 0);
3248 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
3249 amdgpu_ring_write(ring
, 0);
3253 static void gfx_v7_0_ring_emit_wreg(struct amdgpu_ring
*ring
,
3254 uint32_t reg
, uint32_t val
)
3256 int usepfp
= (ring
->funcs
->type
== AMDGPU_RING_TYPE_GFX
);
3258 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
3259 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(usepfp
) |
3260 WRITE_DATA_DST_SEL(0)));
3261 amdgpu_ring_write(ring
, reg
);
3262 amdgpu_ring_write(ring
, 0);
3263 amdgpu_ring_write(ring
, val
);
3268 * The RLC is a multi-purpose microengine that handles a
3269 * variety of functions.
3271 static void gfx_v7_0_rlc_fini(struct amdgpu_device
*adev
)
3273 amdgpu_bo_free_kernel(&adev
->gfx
.rlc
.save_restore_obj
, NULL
, NULL
);
3274 amdgpu_bo_free_kernel(&adev
->gfx
.rlc
.clear_state_obj
, NULL
, NULL
);
3275 amdgpu_bo_free_kernel(&adev
->gfx
.rlc
.cp_table_obj
, NULL
, NULL
);
3278 static int gfx_v7_0_rlc_init(struct amdgpu_device
*adev
)
3281 volatile u32
*dst_ptr
;
3283 const struct cs_section_def
*cs_data
;
3286 /* allocate rlc buffers */
3287 if (adev
->flags
& AMD_IS_APU
) {
3288 if (adev
->asic_type
== CHIP_KAVERI
) {
3289 adev
->gfx
.rlc
.reg_list
= spectre_rlc_save_restore_register_list
;
3290 adev
->gfx
.rlc
.reg_list_size
=
3291 (u32
)ARRAY_SIZE(spectre_rlc_save_restore_register_list
);
3293 adev
->gfx
.rlc
.reg_list
= kalindi_rlc_save_restore_register_list
;
3294 adev
->gfx
.rlc
.reg_list_size
=
3295 (u32
)ARRAY_SIZE(kalindi_rlc_save_restore_register_list
);
3298 adev
->gfx
.rlc
.cs_data
= ci_cs_data
;
3299 adev
->gfx
.rlc
.cp_table_size
= ALIGN(CP_ME_TABLE_SIZE
* 5 * 4, 2048); /* CP JT */
3300 adev
->gfx
.rlc
.cp_table_size
+= 64 * 1024; /* GDS */
3302 src_ptr
= adev
->gfx
.rlc
.reg_list
;
3303 dws
= adev
->gfx
.rlc
.reg_list_size
;
3304 dws
+= (5 * 16) + 48 + 48 + 64;
3306 cs_data
= adev
->gfx
.rlc
.cs_data
;
3309 /* save restore block */
3310 r
= amdgpu_bo_create_reserved(adev
, dws
* 4, PAGE_SIZE
,
3311 AMDGPU_GEM_DOMAIN_VRAM
,
3312 &adev
->gfx
.rlc
.save_restore_obj
,
3313 &adev
->gfx
.rlc
.save_restore_gpu_addr
,
3314 (void **)&adev
->gfx
.rlc
.sr_ptr
);
3316 dev_warn(adev
->dev
, "(%d) create, pin or map of RLC sr bo failed\n", r
);
3317 gfx_v7_0_rlc_fini(adev
);
3321 /* write the sr buffer */
3322 dst_ptr
= adev
->gfx
.rlc
.sr_ptr
;
3323 for (i
= 0; i
< adev
->gfx
.rlc
.reg_list_size
; i
++)
3324 dst_ptr
[i
] = cpu_to_le32(src_ptr
[i
]);
3325 amdgpu_bo_kunmap(adev
->gfx
.rlc
.save_restore_obj
);
3326 amdgpu_bo_unreserve(adev
->gfx
.rlc
.save_restore_obj
);
3330 /* clear state block */
3331 adev
->gfx
.rlc
.clear_state_size
= dws
= gfx_v7_0_get_csb_size(adev
);
3333 r
= amdgpu_bo_create_reserved(adev
, dws
* 4, PAGE_SIZE
,
3334 AMDGPU_GEM_DOMAIN_VRAM
,
3335 &adev
->gfx
.rlc
.clear_state_obj
,
3336 &adev
->gfx
.rlc
.clear_state_gpu_addr
,
3337 (void **)&adev
->gfx
.rlc
.cs_ptr
);
3339 dev_warn(adev
->dev
, "(%d) create RLC c bo failed\n", r
);
3340 gfx_v7_0_rlc_fini(adev
);
3344 /* set up the cs buffer */
3345 dst_ptr
= adev
->gfx
.rlc
.cs_ptr
;
3346 gfx_v7_0_get_csb_buffer(adev
, dst_ptr
);
3347 amdgpu_bo_kunmap(adev
->gfx
.rlc
.clear_state_obj
);
3348 amdgpu_bo_unreserve(adev
->gfx
.rlc
.clear_state_obj
);
3351 if (adev
->gfx
.rlc
.cp_table_size
) {
3353 r
= amdgpu_bo_create_reserved(adev
, adev
->gfx
.rlc
.cp_table_size
,
3354 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_VRAM
,
3355 &adev
->gfx
.rlc
.cp_table_obj
,
3356 &adev
->gfx
.rlc
.cp_table_gpu_addr
,
3357 (void **)&adev
->gfx
.rlc
.cp_table_ptr
);
3359 dev_warn(adev
->dev
, "(%d) create RLC cp table bo failed\n", r
);
3360 gfx_v7_0_rlc_fini(adev
);
3364 gfx_v7_0_init_cp_pg_table(adev
);
3366 amdgpu_bo_kunmap(adev
->gfx
.rlc
.cp_table_obj
);
3367 amdgpu_bo_unreserve(adev
->gfx
.rlc
.cp_table_obj
);
3374 static void gfx_v7_0_enable_lbpw(struct amdgpu_device
*adev
, bool enable
)
3378 tmp
= RREG32(mmRLC_LB_CNTL
);
3380 tmp
|= RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK
;
3382 tmp
&= ~RLC_LB_CNTL__LOAD_BALANCE_ENABLE_MASK
;
3383 WREG32(mmRLC_LB_CNTL
, tmp
);
3386 static void gfx_v7_0_wait_for_rlc_serdes(struct amdgpu_device
*adev
)
3391 mutex_lock(&adev
->grbm_idx_mutex
);
3392 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
3393 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
3394 gfx_v7_0_select_se_sh(adev
, i
, j
, 0xffffffff);
3395 for (k
= 0; k
< adev
->usec_timeout
; k
++) {
3396 if (RREG32(mmRLC_SERDES_CU_MASTER_BUSY
) == 0)
3402 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
3403 mutex_unlock(&adev
->grbm_idx_mutex
);
3405 mask
= RLC_SERDES_NONCU_MASTER_BUSY__SE_MASTER_BUSY_MASK
|
3406 RLC_SERDES_NONCU_MASTER_BUSY__GC_MASTER_BUSY_MASK
|
3407 RLC_SERDES_NONCU_MASTER_BUSY__TC0_MASTER_BUSY_MASK
|
3408 RLC_SERDES_NONCU_MASTER_BUSY__TC1_MASTER_BUSY_MASK
;
3409 for (k
= 0; k
< adev
->usec_timeout
; k
++) {
3410 if ((RREG32(mmRLC_SERDES_NONCU_MASTER_BUSY
) & mask
) == 0)
3416 static void gfx_v7_0_update_rlc(struct amdgpu_device
*adev
, u32 rlc
)
3420 tmp
= RREG32(mmRLC_CNTL
);
3422 WREG32(mmRLC_CNTL
, rlc
);
3425 static u32
gfx_v7_0_halt_rlc(struct amdgpu_device
*adev
)
3429 orig
= data
= RREG32(mmRLC_CNTL
);
3431 if (data
& RLC_CNTL__RLC_ENABLE_F32_MASK
) {
3434 data
&= ~RLC_CNTL__RLC_ENABLE_F32_MASK
;
3435 WREG32(mmRLC_CNTL
, data
);
3437 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
3438 if ((RREG32(mmRLC_GPM_STAT
) & RLC_GPM_STAT__RLC_BUSY_MASK
) == 0)
3443 gfx_v7_0_wait_for_rlc_serdes(adev
);
3449 static void gfx_v7_0_enter_rlc_safe_mode(struct amdgpu_device
*adev
)
3453 tmp
= 0x1 | (1 << 1);
3454 WREG32(mmRLC_GPR_REG2
, tmp
);
3456 mask
= RLC_GPM_STAT__GFX_POWER_STATUS_MASK
|
3457 RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK
;
3458 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
3459 if ((RREG32(mmRLC_GPM_STAT
) & mask
) == mask
)
3464 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
3465 if ((RREG32(mmRLC_GPR_REG2
) & 0x1) == 0)
3471 static void gfx_v7_0_exit_rlc_safe_mode(struct amdgpu_device
*adev
)
3475 tmp
= 0x1 | (0 << 1);
3476 WREG32(mmRLC_GPR_REG2
, tmp
);
3480 * gfx_v7_0_rlc_stop - stop the RLC ME
3482 * @adev: amdgpu_device pointer
3484 * Halt the RLC ME (MicroEngine) (CIK).
3486 static void gfx_v7_0_rlc_stop(struct amdgpu_device
*adev
)
3488 WREG32(mmRLC_CNTL
, 0);
3490 gfx_v7_0_enable_gui_idle_interrupt(adev
, false);
3492 gfx_v7_0_wait_for_rlc_serdes(adev
);
3496 * gfx_v7_0_rlc_start - start the RLC ME
3498 * @adev: amdgpu_device pointer
3500 * Unhalt the RLC ME (MicroEngine) (CIK).
3502 static void gfx_v7_0_rlc_start(struct amdgpu_device
*adev
)
3504 WREG32(mmRLC_CNTL
, RLC_CNTL__RLC_ENABLE_F32_MASK
);
3506 gfx_v7_0_enable_gui_idle_interrupt(adev
, true);
3511 static void gfx_v7_0_rlc_reset(struct amdgpu_device
*adev
)
3513 u32 tmp
= RREG32(mmGRBM_SOFT_RESET
);
3515 tmp
|= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK
;
3516 WREG32(mmGRBM_SOFT_RESET
, tmp
);
3518 tmp
&= ~GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK
;
3519 WREG32(mmGRBM_SOFT_RESET
, tmp
);
3524 * gfx_v7_0_rlc_resume - setup the RLC hw
3526 * @adev: amdgpu_device pointer
3528 * Initialize the RLC registers, load the ucode,
3529 * and start the RLC (CIK).
3530 * Returns 0 for success, -EINVAL if the ucode is not available.
3532 static int gfx_v7_0_rlc_resume(struct amdgpu_device
*adev
)
3534 const struct rlc_firmware_header_v1_0
*hdr
;
3535 const __le32
*fw_data
;
3536 unsigned i
, fw_size
;
3539 if (!adev
->gfx
.rlc_fw
)
3542 hdr
= (const struct rlc_firmware_header_v1_0
*)adev
->gfx
.rlc_fw
->data
;
3543 amdgpu_ucode_print_rlc_hdr(&hdr
->header
);
3544 adev
->gfx
.rlc_fw_version
= le32_to_cpu(hdr
->header
.ucode_version
);
3545 adev
->gfx
.rlc_feature_version
= le32_to_cpu(
3546 hdr
->ucode_feature_version
);
3548 gfx_v7_0_rlc_stop(adev
);
3551 tmp
= RREG32(mmRLC_CGCG_CGLS_CTRL
) & 0xfffffffc;
3552 WREG32(mmRLC_CGCG_CGLS_CTRL
, tmp
);
3554 gfx_v7_0_rlc_reset(adev
);
3556 gfx_v7_0_init_pg(adev
);
3558 WREG32(mmRLC_LB_CNTR_INIT
, 0);
3559 WREG32(mmRLC_LB_CNTR_MAX
, 0x00008000);
3561 mutex_lock(&adev
->grbm_idx_mutex
);
3562 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
3563 WREG32(mmRLC_LB_INIT_CU_MASK
, 0xffffffff);
3564 WREG32(mmRLC_LB_PARAMS
, 0x00600408);
3565 WREG32(mmRLC_LB_CNTL
, 0x80000004);
3566 mutex_unlock(&adev
->grbm_idx_mutex
);
3568 WREG32(mmRLC_MC_CNTL
, 0);
3569 WREG32(mmRLC_UCODE_CNTL
, 0);
3571 fw_data
= (const __le32
*)
3572 (adev
->gfx
.rlc_fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3573 fw_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
3574 WREG32(mmRLC_GPM_UCODE_ADDR
, 0);
3575 for (i
= 0; i
< fw_size
; i
++)
3576 WREG32(mmRLC_GPM_UCODE_DATA
, le32_to_cpup(fw_data
++));
3577 WREG32(mmRLC_GPM_UCODE_ADDR
, adev
->gfx
.rlc_fw_version
);
3579 /* XXX - find out what chips support lbpw */
3580 gfx_v7_0_enable_lbpw(adev
, false);
3582 if (adev
->asic_type
== CHIP_BONAIRE
)
3583 WREG32(mmRLC_DRIVER_CPDMA_STATUS
, 0);
3585 gfx_v7_0_rlc_start(adev
);
3590 static void gfx_v7_0_enable_cgcg(struct amdgpu_device
*adev
, bool enable
)
3592 u32 data
, orig
, tmp
, tmp2
;
3594 orig
= data
= RREG32(mmRLC_CGCG_CGLS_CTRL
);
3596 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CGCG
)) {
3597 gfx_v7_0_enable_gui_idle_interrupt(adev
, true);
3599 tmp
= gfx_v7_0_halt_rlc(adev
);
3601 mutex_lock(&adev
->grbm_idx_mutex
);
3602 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
3603 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK
, 0xffffffff);
3604 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK
, 0xffffffff);
3605 tmp2
= RLC_SERDES_WR_CTRL__BPM_ADDR_MASK
|
3606 RLC_SERDES_WR_CTRL__CGCG_OVERRIDE_0_MASK
|
3607 RLC_SERDES_WR_CTRL__CGLS_ENABLE_MASK
;
3608 WREG32(mmRLC_SERDES_WR_CTRL
, tmp2
);
3609 mutex_unlock(&adev
->grbm_idx_mutex
);
3611 gfx_v7_0_update_rlc(adev
, tmp
);
3613 data
|= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK
| RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK
;
3615 WREG32(mmRLC_CGCG_CGLS_CTRL
, data
);
3618 gfx_v7_0_enable_gui_idle_interrupt(adev
, false);
3620 RREG32(mmCB_CGTT_SCLK_CTRL
);
3621 RREG32(mmCB_CGTT_SCLK_CTRL
);
3622 RREG32(mmCB_CGTT_SCLK_CTRL
);
3623 RREG32(mmCB_CGTT_SCLK_CTRL
);
3625 data
&= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK
| RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK
);
3627 WREG32(mmRLC_CGCG_CGLS_CTRL
, data
);
3629 gfx_v7_0_enable_gui_idle_interrupt(adev
, true);
3633 static void gfx_v7_0_enable_mgcg(struct amdgpu_device
*adev
, bool enable
)
3635 u32 data
, orig
, tmp
= 0;
3637 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_MGCG
)) {
3638 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_MGLS
) {
3639 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CP_LS
) {
3640 orig
= data
= RREG32(mmCP_MEM_SLP_CNTL
);
3641 data
|= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
;
3643 WREG32(mmCP_MEM_SLP_CNTL
, data
);
3647 orig
= data
= RREG32(mmRLC_CGTT_MGCG_OVERRIDE
);
3651 WREG32(mmRLC_CGTT_MGCG_OVERRIDE
, data
);
3653 tmp
= gfx_v7_0_halt_rlc(adev
);
3655 mutex_lock(&adev
->grbm_idx_mutex
);
3656 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
3657 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK
, 0xffffffff);
3658 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK
, 0xffffffff);
3659 data
= RLC_SERDES_WR_CTRL__BPM_ADDR_MASK
|
3660 RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_0_MASK
;
3661 WREG32(mmRLC_SERDES_WR_CTRL
, data
);
3662 mutex_unlock(&adev
->grbm_idx_mutex
);
3664 gfx_v7_0_update_rlc(adev
, tmp
);
3666 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CGTS
) {
3667 orig
= data
= RREG32(mmCGTS_SM_CTRL_REG
);
3668 data
&= ~CGTS_SM_CTRL_REG__SM_MODE_MASK
;
3669 data
|= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT
);
3670 data
|= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK
;
3671 data
&= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK
;
3672 if ((adev
->cg_flags
& AMD_CG_SUPPORT_GFX_MGLS
) &&
3673 (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CGTS_LS
))
3674 data
&= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK
;
3675 data
&= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK
;
3676 data
|= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK
;
3677 data
|= (0x96 << CGTS_SM_CTRL_REG__ON_MONITOR_ADD__SHIFT
);
3679 WREG32(mmCGTS_SM_CTRL_REG
, data
);
3682 orig
= data
= RREG32(mmRLC_CGTT_MGCG_OVERRIDE
);
3685 WREG32(mmRLC_CGTT_MGCG_OVERRIDE
, data
);
3687 data
= RREG32(mmRLC_MEM_SLP_CNTL
);
3688 if (data
& RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK
) {
3689 data
&= ~RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK
;
3690 WREG32(mmRLC_MEM_SLP_CNTL
, data
);
3693 data
= RREG32(mmCP_MEM_SLP_CNTL
);
3694 if (data
& CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
) {
3695 data
&= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
;
3696 WREG32(mmCP_MEM_SLP_CNTL
, data
);
3699 orig
= data
= RREG32(mmCGTS_SM_CTRL_REG
);
3700 data
|= CGTS_SM_CTRL_REG__OVERRIDE_MASK
| CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK
;
3702 WREG32(mmCGTS_SM_CTRL_REG
, data
);
3704 tmp
= gfx_v7_0_halt_rlc(adev
);
3706 mutex_lock(&adev
->grbm_idx_mutex
);
3707 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
3708 WREG32(mmRLC_SERDES_WR_CU_MASTER_MASK
, 0xffffffff);
3709 WREG32(mmRLC_SERDES_WR_NONCU_MASTER_MASK
, 0xffffffff);
3710 data
= RLC_SERDES_WR_CTRL__BPM_ADDR_MASK
| RLC_SERDES_WR_CTRL__MGCG_OVERRIDE_1_MASK
;
3711 WREG32(mmRLC_SERDES_WR_CTRL
, data
);
3712 mutex_unlock(&adev
->grbm_idx_mutex
);
3714 gfx_v7_0_update_rlc(adev
, tmp
);
3718 static void gfx_v7_0_update_cg(struct amdgpu_device
*adev
,
3721 gfx_v7_0_enable_gui_idle_interrupt(adev
, false);
3722 /* order matters! */
3724 gfx_v7_0_enable_mgcg(adev
, true);
3725 gfx_v7_0_enable_cgcg(adev
, true);
3727 gfx_v7_0_enable_cgcg(adev
, false);
3728 gfx_v7_0_enable_mgcg(adev
, false);
3730 gfx_v7_0_enable_gui_idle_interrupt(adev
, true);
3733 static void gfx_v7_0_enable_sclk_slowdown_on_pu(struct amdgpu_device
*adev
,
3738 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3739 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_RLC_SMU_HS
))
3740 data
|= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK
;
3742 data
&= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK
;
3744 WREG32(mmRLC_PG_CNTL
, data
);
3747 static void gfx_v7_0_enable_sclk_slowdown_on_pd(struct amdgpu_device
*adev
,
3752 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3753 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_RLC_SMU_HS
))
3754 data
|= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK
;
3756 data
&= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK
;
3758 WREG32(mmRLC_PG_CNTL
, data
);
3761 static void gfx_v7_0_enable_cp_pg(struct amdgpu_device
*adev
, bool enable
)
3765 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3766 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_CP
))
3771 WREG32(mmRLC_PG_CNTL
, data
);
3774 static void gfx_v7_0_enable_gds_pg(struct amdgpu_device
*adev
, bool enable
)
3778 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3779 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_GDS
))
3784 WREG32(mmRLC_PG_CNTL
, data
);
3787 static void gfx_v7_0_init_cp_pg_table(struct amdgpu_device
*adev
)
3789 const __le32
*fw_data
;
3790 volatile u32
*dst_ptr
;
3791 int me
, i
, max_me
= 4;
3793 u32 table_offset
, table_size
;
3795 if (adev
->asic_type
== CHIP_KAVERI
)
3798 if (adev
->gfx
.rlc
.cp_table_ptr
== NULL
)
3801 /* write the cp table buffer */
3802 dst_ptr
= adev
->gfx
.rlc
.cp_table_ptr
;
3803 for (me
= 0; me
< max_me
; me
++) {
3805 const struct gfx_firmware_header_v1_0
*hdr
=
3806 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.ce_fw
->data
;
3807 fw_data
= (const __le32
*)
3808 (adev
->gfx
.ce_fw
->data
+
3809 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3810 table_offset
= le32_to_cpu(hdr
->jt_offset
);
3811 table_size
= le32_to_cpu(hdr
->jt_size
);
3812 } else if (me
== 1) {
3813 const struct gfx_firmware_header_v1_0
*hdr
=
3814 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.pfp_fw
->data
;
3815 fw_data
= (const __le32
*)
3816 (adev
->gfx
.pfp_fw
->data
+
3817 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3818 table_offset
= le32_to_cpu(hdr
->jt_offset
);
3819 table_size
= le32_to_cpu(hdr
->jt_size
);
3820 } else if (me
== 2) {
3821 const struct gfx_firmware_header_v1_0
*hdr
=
3822 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.me_fw
->data
;
3823 fw_data
= (const __le32
*)
3824 (adev
->gfx
.me_fw
->data
+
3825 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3826 table_offset
= le32_to_cpu(hdr
->jt_offset
);
3827 table_size
= le32_to_cpu(hdr
->jt_size
);
3828 } else if (me
== 3) {
3829 const struct gfx_firmware_header_v1_0
*hdr
=
3830 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec_fw
->data
;
3831 fw_data
= (const __le32
*)
3832 (adev
->gfx
.mec_fw
->data
+
3833 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3834 table_offset
= le32_to_cpu(hdr
->jt_offset
);
3835 table_size
= le32_to_cpu(hdr
->jt_size
);
3837 const struct gfx_firmware_header_v1_0
*hdr
=
3838 (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.mec2_fw
->data
;
3839 fw_data
= (const __le32
*)
3840 (adev
->gfx
.mec2_fw
->data
+
3841 le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
3842 table_offset
= le32_to_cpu(hdr
->jt_offset
);
3843 table_size
= le32_to_cpu(hdr
->jt_size
);
3846 for (i
= 0; i
< table_size
; i
++) {
3847 dst_ptr
[bo_offset
+ i
] =
3848 cpu_to_le32(le32_to_cpu(fw_data
[table_offset
+ i
]));
3851 bo_offset
+= table_size
;
3855 static void gfx_v7_0_enable_gfx_cgpg(struct amdgpu_device
*adev
,
3860 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
)) {
3861 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3862 data
|= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK
;
3864 WREG32(mmRLC_PG_CNTL
, data
);
3866 orig
= data
= RREG32(mmRLC_AUTO_PG_CTRL
);
3867 data
|= RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK
;
3869 WREG32(mmRLC_AUTO_PG_CTRL
, data
);
3871 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3872 data
&= ~RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK
;
3874 WREG32(mmRLC_PG_CNTL
, data
);
3876 orig
= data
= RREG32(mmRLC_AUTO_PG_CTRL
);
3877 data
&= ~RLC_AUTO_PG_CTRL__AUTO_PG_EN_MASK
;
3879 WREG32(mmRLC_AUTO_PG_CTRL
, data
);
3881 data
= RREG32(mmDB_RENDER_CONTROL
);
3885 static void gfx_v7_0_set_user_cu_inactive_bitmap(struct amdgpu_device
*adev
,
3893 data
= bitmap
<< GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT
;
3894 data
&= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK
;
3896 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG
, data
);
3899 static u32
gfx_v7_0_get_cu_active_bitmap(struct amdgpu_device
*adev
)
3903 data
= RREG32(mmCC_GC_SHADER_ARRAY_CONFIG
);
3904 data
|= RREG32(mmGC_USER_SHADER_ARRAY_CONFIG
);
3906 data
&= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK
;
3907 data
>>= CC_GC_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT
;
3909 mask
= amdgpu_gfx_create_bitmask(adev
->gfx
.config
.max_cu_per_sh
);
3911 return (~data
) & mask
;
3914 static void gfx_v7_0_init_ao_cu_mask(struct amdgpu_device
*adev
)
3918 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK
, adev
->gfx
.cu_info
.ao_cu_mask
);
3920 tmp
= RREG32(mmRLC_MAX_PG_CU
);
3921 tmp
&= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK
;
3922 tmp
|= (adev
->gfx
.cu_info
.number
<< RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT
);
3923 WREG32(mmRLC_MAX_PG_CU
, tmp
);
3926 static void gfx_v7_0_enable_gfx_static_mgpg(struct amdgpu_device
*adev
,
3931 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3932 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_SMG
))
3933 data
|= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK
;
3935 data
&= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK
;
3937 WREG32(mmRLC_PG_CNTL
, data
);
3940 static void gfx_v7_0_enable_gfx_dynamic_mgpg(struct amdgpu_device
*adev
,
3945 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3946 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_DMG
))
3947 data
|= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK
;
3949 data
&= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK
;
3951 WREG32(mmRLC_PG_CNTL
, data
);
3954 #define RLC_SAVE_AND_RESTORE_STARTING_OFFSET 0x90
3955 #define RLC_CLEAR_STATE_DESCRIPTOR_OFFSET 0x3D
3957 static void gfx_v7_0_init_gfx_cgpg(struct amdgpu_device
*adev
)
3962 if (adev
->gfx
.rlc
.cs_data
) {
3963 WREG32(mmRLC_GPM_SCRATCH_ADDR
, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET
);
3964 WREG32(mmRLC_GPM_SCRATCH_DATA
, upper_32_bits(adev
->gfx
.rlc
.clear_state_gpu_addr
));
3965 WREG32(mmRLC_GPM_SCRATCH_DATA
, lower_32_bits(adev
->gfx
.rlc
.clear_state_gpu_addr
));
3966 WREG32(mmRLC_GPM_SCRATCH_DATA
, adev
->gfx
.rlc
.clear_state_size
);
3968 WREG32(mmRLC_GPM_SCRATCH_ADDR
, RLC_CLEAR_STATE_DESCRIPTOR_OFFSET
);
3969 for (i
= 0; i
< 3; i
++)
3970 WREG32(mmRLC_GPM_SCRATCH_DATA
, 0);
3972 if (adev
->gfx
.rlc
.reg_list
) {
3973 WREG32(mmRLC_GPM_SCRATCH_ADDR
, RLC_SAVE_AND_RESTORE_STARTING_OFFSET
);
3974 for (i
= 0; i
< adev
->gfx
.rlc
.reg_list_size
; i
++)
3975 WREG32(mmRLC_GPM_SCRATCH_DATA
, adev
->gfx
.rlc
.reg_list
[i
]);
3978 orig
= data
= RREG32(mmRLC_PG_CNTL
);
3979 data
|= RLC_PG_CNTL__GFX_POWER_GATING_SRC_MASK
;
3981 WREG32(mmRLC_PG_CNTL
, data
);
3983 WREG32(mmRLC_SAVE_AND_RESTORE_BASE
, adev
->gfx
.rlc
.save_restore_gpu_addr
>> 8);
3984 WREG32(mmRLC_JUMP_TABLE_RESTORE
, adev
->gfx
.rlc
.cp_table_gpu_addr
>> 8);
3986 data
= RREG32(mmCP_RB_WPTR_POLL_CNTL
);
3987 data
&= ~CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT_MASK
;
3988 data
|= (0x60 << CP_RB_WPTR_POLL_CNTL__IDLE_POLL_COUNT__SHIFT
);
3989 WREG32(mmCP_RB_WPTR_POLL_CNTL
, data
);
3992 WREG32(mmRLC_PG_DELAY
, data
);
3994 data
= RREG32(mmRLC_PG_DELAY_2
);
3997 WREG32(mmRLC_PG_DELAY_2
, data
);
3999 data
= RREG32(mmRLC_AUTO_PG_CTRL
);
4000 data
&= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK
;
4001 data
|= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT
);
4002 WREG32(mmRLC_AUTO_PG_CTRL
, data
);
4006 static void gfx_v7_0_update_gfx_pg(struct amdgpu_device
*adev
, bool enable
)
4008 gfx_v7_0_enable_gfx_cgpg(adev
, enable
);
4009 gfx_v7_0_enable_gfx_static_mgpg(adev
, enable
);
4010 gfx_v7_0_enable_gfx_dynamic_mgpg(adev
, enable
);
4013 static u32
gfx_v7_0_get_csb_size(struct amdgpu_device
*adev
)
4016 const struct cs_section_def
*sect
= NULL
;
4017 const struct cs_extent_def
*ext
= NULL
;
4019 if (adev
->gfx
.rlc
.cs_data
== NULL
)
4022 /* begin clear state */
4024 /* context control state */
4027 for (sect
= adev
->gfx
.rlc
.cs_data
; sect
->section
!= NULL
; ++sect
) {
4028 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
4029 if (sect
->id
== SECT_CONTEXT
)
4030 count
+= 2 + ext
->reg_count
;
4035 /* pa_sc_raster_config/pa_sc_raster_config1 */
4037 /* end clear state */
4045 static void gfx_v7_0_get_csb_buffer(struct amdgpu_device
*adev
,
4046 volatile u32
*buffer
)
4049 const struct cs_section_def
*sect
= NULL
;
4050 const struct cs_extent_def
*ext
= NULL
;
4052 if (adev
->gfx
.rlc
.cs_data
== NULL
)
4057 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
4058 buffer
[count
++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
4060 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL
, 1));
4061 buffer
[count
++] = cpu_to_le32(0x80000000);
4062 buffer
[count
++] = cpu_to_le32(0x80000000);
4064 for (sect
= adev
->gfx
.rlc
.cs_data
; sect
->section
!= NULL
; ++sect
) {
4065 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
4066 if (sect
->id
== SECT_CONTEXT
) {
4068 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG
, ext
->reg_count
));
4069 buffer
[count
++] = cpu_to_le32(ext
->reg_index
- PACKET3_SET_CONTEXT_REG_START
);
4070 for (i
= 0; i
< ext
->reg_count
; i
++)
4071 buffer
[count
++] = cpu_to_le32(ext
->extent
[i
]);
4078 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
4079 buffer
[count
++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG
- PACKET3_SET_CONTEXT_REG_START
);
4080 switch (adev
->asic_type
) {
4082 buffer
[count
++] = cpu_to_le32(0x16000012);
4083 buffer
[count
++] = cpu_to_le32(0x00000000);
4086 buffer
[count
++] = cpu_to_le32(0x00000000); /* XXX */
4087 buffer
[count
++] = cpu_to_le32(0x00000000);
4091 buffer
[count
++] = cpu_to_le32(0x00000000); /* XXX */
4092 buffer
[count
++] = cpu_to_le32(0x00000000);
4095 buffer
[count
++] = cpu_to_le32(0x3a00161a);
4096 buffer
[count
++] = cpu_to_le32(0x0000002e);
4099 buffer
[count
++] = cpu_to_le32(0x00000000);
4100 buffer
[count
++] = cpu_to_le32(0x00000000);
4104 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
4105 buffer
[count
++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE
);
4107 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE
, 0));
4108 buffer
[count
++] = cpu_to_le32(0);
4111 static void gfx_v7_0_init_pg(struct amdgpu_device
*adev
)
4113 if (adev
->pg_flags
& (AMD_PG_SUPPORT_GFX_PG
|
4114 AMD_PG_SUPPORT_GFX_SMG
|
4115 AMD_PG_SUPPORT_GFX_DMG
|
4117 AMD_PG_SUPPORT_GDS
|
4118 AMD_PG_SUPPORT_RLC_SMU_HS
)) {
4119 gfx_v7_0_enable_sclk_slowdown_on_pu(adev
, true);
4120 gfx_v7_0_enable_sclk_slowdown_on_pd(adev
, true);
4121 if (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
) {
4122 gfx_v7_0_init_gfx_cgpg(adev
);
4123 gfx_v7_0_enable_cp_pg(adev
, true);
4124 gfx_v7_0_enable_gds_pg(adev
, true);
4126 gfx_v7_0_init_ao_cu_mask(adev
);
4127 gfx_v7_0_update_gfx_pg(adev
, true);
4131 static void gfx_v7_0_fini_pg(struct amdgpu_device
*adev
)
4133 if (adev
->pg_flags
& (AMD_PG_SUPPORT_GFX_PG
|
4134 AMD_PG_SUPPORT_GFX_SMG
|
4135 AMD_PG_SUPPORT_GFX_DMG
|
4137 AMD_PG_SUPPORT_GDS
|
4138 AMD_PG_SUPPORT_RLC_SMU_HS
)) {
4139 gfx_v7_0_update_gfx_pg(adev
, false);
4140 if (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
) {
4141 gfx_v7_0_enable_cp_pg(adev
, false);
4142 gfx_v7_0_enable_gds_pg(adev
, false);
4148 * gfx_v7_0_get_gpu_clock_counter - return GPU clock counter snapshot
4150 * @adev: amdgpu_device pointer
4152 * Fetches a GPU clock counter snapshot (SI).
4153 * Returns the 64 bit clock counter snapshot.
4155 static uint64_t gfx_v7_0_get_gpu_clock_counter(struct amdgpu_device
*adev
)
4159 mutex_lock(&adev
->gfx
.gpu_clock_mutex
);
4160 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT
, 1);
4161 clock
= (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB
) |
4162 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB
) << 32ULL);
4163 mutex_unlock(&adev
->gfx
.gpu_clock_mutex
);
4167 static void gfx_v7_0_ring_emit_gds_switch(struct amdgpu_ring
*ring
,
4169 uint32_t gds_base
, uint32_t gds_size
,
4170 uint32_t gws_base
, uint32_t gws_size
,
4171 uint32_t oa_base
, uint32_t oa_size
)
4173 gds_base
= gds_base
>> AMDGPU_GDS_SHIFT
;
4174 gds_size
= gds_size
>> AMDGPU_GDS_SHIFT
;
4176 gws_base
= gws_base
>> AMDGPU_GWS_SHIFT
;
4177 gws_size
= gws_size
>> AMDGPU_GWS_SHIFT
;
4179 oa_base
= oa_base
>> AMDGPU_OA_SHIFT
;
4180 oa_size
= oa_size
>> AMDGPU_OA_SHIFT
;
4183 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4184 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4185 WRITE_DATA_DST_SEL(0)));
4186 amdgpu_ring_write(ring
, amdgpu_gds_reg_offset
[vmid
].mem_base
);
4187 amdgpu_ring_write(ring
, 0);
4188 amdgpu_ring_write(ring
, gds_base
);
4191 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4192 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4193 WRITE_DATA_DST_SEL(0)));
4194 amdgpu_ring_write(ring
, amdgpu_gds_reg_offset
[vmid
].mem_size
);
4195 amdgpu_ring_write(ring
, 0);
4196 amdgpu_ring_write(ring
, gds_size
);
4199 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4200 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4201 WRITE_DATA_DST_SEL(0)));
4202 amdgpu_ring_write(ring
, amdgpu_gds_reg_offset
[vmid
].gws
);
4203 amdgpu_ring_write(ring
, 0);
4204 amdgpu_ring_write(ring
, gws_size
<< GDS_GWS_VMID0__SIZE__SHIFT
| gws_base
);
4207 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
4208 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(0) |
4209 WRITE_DATA_DST_SEL(0)));
4210 amdgpu_ring_write(ring
, amdgpu_gds_reg_offset
[vmid
].oa
);
4211 amdgpu_ring_write(ring
, 0);
4212 amdgpu_ring_write(ring
, (1 << (oa_size
+ oa_base
)) - (1 << oa_base
));
4215 static uint32_t wave_read_ind(struct amdgpu_device
*adev
, uint32_t simd
, uint32_t wave
, uint32_t address
)
4217 WREG32(mmSQ_IND_INDEX
,
4218 (wave
<< SQ_IND_INDEX__WAVE_ID__SHIFT
) |
4219 (simd
<< SQ_IND_INDEX__SIMD_ID__SHIFT
) |
4220 (address
<< SQ_IND_INDEX__INDEX__SHIFT
) |
4221 (SQ_IND_INDEX__FORCE_READ_MASK
));
4222 return RREG32(mmSQ_IND_DATA
);
4225 static void wave_read_regs(struct amdgpu_device
*adev
, uint32_t simd
,
4226 uint32_t wave
, uint32_t thread
,
4227 uint32_t regno
, uint32_t num
, uint32_t *out
)
4229 WREG32(mmSQ_IND_INDEX
,
4230 (wave
<< SQ_IND_INDEX__WAVE_ID__SHIFT
) |
4231 (simd
<< SQ_IND_INDEX__SIMD_ID__SHIFT
) |
4232 (regno
<< SQ_IND_INDEX__INDEX__SHIFT
) |
4233 (thread
<< SQ_IND_INDEX__THREAD_ID__SHIFT
) |
4234 (SQ_IND_INDEX__FORCE_READ_MASK
) |
4235 (SQ_IND_INDEX__AUTO_INCR_MASK
));
4237 *(out
++) = RREG32(mmSQ_IND_DATA
);
4240 static void gfx_v7_0_read_wave_data(struct amdgpu_device
*adev
, uint32_t simd
, uint32_t wave
, uint32_t *dst
, int *no_fields
)
4242 /* type 0 wave data */
4243 dst
[(*no_fields
)++] = 0;
4244 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_STATUS
);
4245 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_PC_LO
);
4246 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_PC_HI
);
4247 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_EXEC_LO
);
4248 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_EXEC_HI
);
4249 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_HW_ID
);
4250 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_INST_DW0
);
4251 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_INST_DW1
);
4252 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_GPR_ALLOC
);
4253 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_LDS_ALLOC
);
4254 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_TRAPSTS
);
4255 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_IB_STS
);
4256 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_TBA_LO
);
4257 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_TBA_HI
);
4258 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_TMA_LO
);
4259 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_TMA_HI
);
4260 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_IB_DBG0
);
4261 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_M0
);
4264 static void gfx_v7_0_read_wave_sgprs(struct amdgpu_device
*adev
, uint32_t simd
,
4265 uint32_t wave
, uint32_t start
,
4266 uint32_t size
, uint32_t *dst
)
4269 adev
, simd
, wave
, 0,
4270 start
+ SQIND_WAVE_SGPRS_OFFSET
, size
, dst
);
4273 static void gfx_v7_0_select_me_pipe_q(struct amdgpu_device
*adev
,
4274 u32 me
, u32 pipe
, u32 q
)
4276 cik_srbm_select(adev
, me
, pipe
, q
, 0);
4279 static const struct amdgpu_gfx_funcs gfx_v7_0_gfx_funcs
= {
4280 .get_gpu_clock_counter
= &gfx_v7_0_get_gpu_clock_counter
,
4281 .select_se_sh
= &gfx_v7_0_select_se_sh
,
4282 .read_wave_data
= &gfx_v7_0_read_wave_data
,
4283 .read_wave_sgprs
= &gfx_v7_0_read_wave_sgprs
,
4284 .select_me_pipe_q
= &gfx_v7_0_select_me_pipe_q
4287 static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs
= {
4288 .enter_safe_mode
= gfx_v7_0_enter_rlc_safe_mode
,
4289 .exit_safe_mode
= gfx_v7_0_exit_rlc_safe_mode
4292 static int gfx_v7_0_early_init(void *handle
)
4294 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4296 adev
->gfx
.num_gfx_rings
= GFX7_NUM_GFX_RINGS
;
4297 adev
->gfx
.num_compute_rings
= AMDGPU_MAX_COMPUTE_RINGS
;
4298 adev
->gfx
.funcs
= &gfx_v7_0_gfx_funcs
;
4299 adev
->gfx
.rlc
.funcs
= &gfx_v7_0_rlc_funcs
;
4300 gfx_v7_0_set_ring_funcs(adev
);
4301 gfx_v7_0_set_irq_funcs(adev
);
4302 gfx_v7_0_set_gds_init(adev
);
4307 static int gfx_v7_0_late_init(void *handle
)
4309 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4312 r
= amdgpu_irq_get(adev
, &adev
->gfx
.priv_reg_irq
, 0);
4316 r
= amdgpu_irq_get(adev
, &adev
->gfx
.priv_inst_irq
, 0);
4323 static void gfx_v7_0_gpu_early_init(struct amdgpu_device
*adev
)
4326 u32 mc_shared_chmap
, mc_arb_ramcfg
;
4327 u32 dimm00_addr_map
, dimm01_addr_map
, dimm10_addr_map
, dimm11_addr_map
;
4330 switch (adev
->asic_type
) {
4332 adev
->gfx
.config
.max_shader_engines
= 2;
4333 adev
->gfx
.config
.max_tile_pipes
= 4;
4334 adev
->gfx
.config
.max_cu_per_sh
= 7;
4335 adev
->gfx
.config
.max_sh_per_se
= 1;
4336 adev
->gfx
.config
.max_backends_per_se
= 2;
4337 adev
->gfx
.config
.max_texture_channel_caches
= 4;
4338 adev
->gfx
.config
.max_gprs
= 256;
4339 adev
->gfx
.config
.max_gs_threads
= 32;
4340 adev
->gfx
.config
.max_hw_contexts
= 8;
4342 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
4343 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
4344 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
4345 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
4346 gb_addr_config
= BONAIRE_GB_ADDR_CONFIG_GOLDEN
;
4349 adev
->gfx
.config
.max_shader_engines
= 4;
4350 adev
->gfx
.config
.max_tile_pipes
= 16;
4351 adev
->gfx
.config
.max_cu_per_sh
= 11;
4352 adev
->gfx
.config
.max_sh_per_se
= 1;
4353 adev
->gfx
.config
.max_backends_per_se
= 4;
4354 adev
->gfx
.config
.max_texture_channel_caches
= 16;
4355 adev
->gfx
.config
.max_gprs
= 256;
4356 adev
->gfx
.config
.max_gs_threads
= 32;
4357 adev
->gfx
.config
.max_hw_contexts
= 8;
4359 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
4360 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
4361 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
4362 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
4363 gb_addr_config
= HAWAII_GB_ADDR_CONFIG_GOLDEN
;
4366 adev
->gfx
.config
.max_shader_engines
= 1;
4367 adev
->gfx
.config
.max_tile_pipes
= 4;
4368 adev
->gfx
.config
.max_cu_per_sh
= 8;
4369 adev
->gfx
.config
.max_backends_per_se
= 2;
4370 adev
->gfx
.config
.max_sh_per_se
= 1;
4371 adev
->gfx
.config
.max_texture_channel_caches
= 4;
4372 adev
->gfx
.config
.max_gprs
= 256;
4373 adev
->gfx
.config
.max_gs_threads
= 16;
4374 adev
->gfx
.config
.max_hw_contexts
= 8;
4376 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
4377 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
4378 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
4379 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
4380 gb_addr_config
= BONAIRE_GB_ADDR_CONFIG_GOLDEN
;
4385 adev
->gfx
.config
.max_shader_engines
= 1;
4386 adev
->gfx
.config
.max_tile_pipes
= 2;
4387 adev
->gfx
.config
.max_cu_per_sh
= 2;
4388 adev
->gfx
.config
.max_sh_per_se
= 1;
4389 adev
->gfx
.config
.max_backends_per_se
= 1;
4390 adev
->gfx
.config
.max_texture_channel_caches
= 2;
4391 adev
->gfx
.config
.max_gprs
= 256;
4392 adev
->gfx
.config
.max_gs_threads
= 16;
4393 adev
->gfx
.config
.max_hw_contexts
= 8;
4395 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
4396 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
4397 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
4398 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
4399 gb_addr_config
= BONAIRE_GB_ADDR_CONFIG_GOLDEN
;
4403 mc_shared_chmap
= RREG32(mmMC_SHARED_CHMAP
);
4404 adev
->gfx
.config
.mc_arb_ramcfg
= RREG32(mmMC_ARB_RAMCFG
);
4405 mc_arb_ramcfg
= adev
->gfx
.config
.mc_arb_ramcfg
;
4407 adev
->gfx
.config
.num_tile_pipes
= adev
->gfx
.config
.max_tile_pipes
;
4408 adev
->gfx
.config
.mem_max_burst_length_bytes
= 256;
4409 if (adev
->flags
& AMD_IS_APU
) {
4410 /* Get memory bank mapping mode. */
4411 tmp
= RREG32(mmMC_FUS_DRAM0_BANK_ADDR_MAPPING
);
4412 dimm00_addr_map
= REG_GET_FIELD(tmp
, MC_FUS_DRAM0_BANK_ADDR_MAPPING
, DIMM0ADDRMAP
);
4413 dimm01_addr_map
= REG_GET_FIELD(tmp
, MC_FUS_DRAM0_BANK_ADDR_MAPPING
, DIMM1ADDRMAP
);
4415 tmp
= RREG32(mmMC_FUS_DRAM1_BANK_ADDR_MAPPING
);
4416 dimm10_addr_map
= REG_GET_FIELD(tmp
, MC_FUS_DRAM1_BANK_ADDR_MAPPING
, DIMM0ADDRMAP
);
4417 dimm11_addr_map
= REG_GET_FIELD(tmp
, MC_FUS_DRAM1_BANK_ADDR_MAPPING
, DIMM1ADDRMAP
);
4419 /* Validate settings in case only one DIMM installed. */
4420 if ((dimm00_addr_map
== 0) || (dimm00_addr_map
== 3) || (dimm00_addr_map
== 4) || (dimm00_addr_map
> 12))
4421 dimm00_addr_map
= 0;
4422 if ((dimm01_addr_map
== 0) || (dimm01_addr_map
== 3) || (dimm01_addr_map
== 4) || (dimm01_addr_map
> 12))
4423 dimm01_addr_map
= 0;
4424 if ((dimm10_addr_map
== 0) || (dimm10_addr_map
== 3) || (dimm10_addr_map
== 4) || (dimm10_addr_map
> 12))
4425 dimm10_addr_map
= 0;
4426 if ((dimm11_addr_map
== 0) || (dimm11_addr_map
== 3) || (dimm11_addr_map
== 4) || (dimm11_addr_map
> 12))
4427 dimm11_addr_map
= 0;
4429 /* If DIMM Addr map is 8GB, ROW size should be 2KB. Otherwise 1KB. */
4430 /* If ROW size(DIMM1) != ROW size(DMIMM0), ROW size should be larger one. */
4431 if ((dimm00_addr_map
== 11) || (dimm01_addr_map
== 11) || (dimm10_addr_map
== 11) || (dimm11_addr_map
== 11))
4432 adev
->gfx
.config
.mem_row_size_in_kb
= 2;
4434 adev
->gfx
.config
.mem_row_size_in_kb
= 1;
4436 tmp
= (mc_arb_ramcfg
& MC_ARB_RAMCFG__NOOFCOLS_MASK
) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT
;
4437 adev
->gfx
.config
.mem_row_size_in_kb
= (4 * (1 << (8 + tmp
))) / 1024;
4438 if (adev
->gfx
.config
.mem_row_size_in_kb
> 4)
4439 adev
->gfx
.config
.mem_row_size_in_kb
= 4;
4441 /* XXX use MC settings? */
4442 adev
->gfx
.config
.shader_engine_tile_size
= 32;
4443 adev
->gfx
.config
.num_gpus
= 1;
4444 adev
->gfx
.config
.multi_gpu_tile_size
= 64;
4446 /* fix up row size */
4447 gb_addr_config
&= ~GB_ADDR_CONFIG__ROW_SIZE_MASK
;
4448 switch (adev
->gfx
.config
.mem_row_size_in_kb
) {
4451 gb_addr_config
|= (0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT
);
4454 gb_addr_config
|= (1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT
);
4457 gb_addr_config
|= (2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT
);
4460 adev
->gfx
.config
.gb_addr_config
= gb_addr_config
;
4463 static int gfx_v7_0_compute_ring_init(struct amdgpu_device
*adev
, int ring_id
,
4464 int mec
, int pipe
, int queue
)
4468 struct amdgpu_ring
*ring
= &adev
->gfx
.compute_ring
[ring_id
];
4473 ring
->queue
= queue
;
4475 ring
->ring_obj
= NULL
;
4476 ring
->use_doorbell
= true;
4477 ring
->doorbell_index
= AMDGPU_DOORBELL_MEC_RING0
+ ring_id
;
4478 sprintf(ring
->name
, "comp_%d.%d.%d", ring
->me
, ring
->pipe
, ring
->queue
);
4480 irq_type
= AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
4481 + ((ring
->me
- 1) * adev
->gfx
.mec
.num_pipe_per_mec
)
4484 /* type-2 packets are deprecated on MEC, use type-3 instead */
4485 r
= amdgpu_ring_init(adev
, ring
, 1024,
4486 &adev
->gfx
.eop_irq
, irq_type
);
4494 static int gfx_v7_0_sw_init(void *handle
)
4496 struct amdgpu_ring
*ring
;
4497 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4498 int i
, j
, k
, r
, ring_id
;
4500 switch (adev
->asic_type
) {
4502 adev
->gfx
.mec
.num_mec
= 2;
4509 adev
->gfx
.mec
.num_mec
= 1;
4512 adev
->gfx
.mec
.num_pipe_per_mec
= 4;
4513 adev
->gfx
.mec
.num_queue_per_pipe
= 8;
4516 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 181, &adev
->gfx
.eop_irq
);
4520 /* Privileged reg */
4521 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 184,
4522 &adev
->gfx
.priv_reg_irq
);
4526 /* Privileged inst */
4527 r
= amdgpu_irq_add_id(adev
, AMDGPU_IH_CLIENTID_LEGACY
, 185,
4528 &adev
->gfx
.priv_inst_irq
);
4532 gfx_v7_0_scratch_init(adev
);
4534 r
= gfx_v7_0_init_microcode(adev
);
4536 DRM_ERROR("Failed to load gfx firmware!\n");
4540 r
= gfx_v7_0_rlc_init(adev
);
4542 DRM_ERROR("Failed to init rlc BOs!\n");
4546 /* allocate mec buffers */
4547 r
= gfx_v7_0_mec_init(adev
);
4549 DRM_ERROR("Failed to init MEC BOs!\n");
4553 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++) {
4554 ring
= &adev
->gfx
.gfx_ring
[i
];
4555 ring
->ring_obj
= NULL
;
4556 sprintf(ring
->name
, "gfx");
4557 r
= amdgpu_ring_init(adev
, ring
, 1024,
4558 &adev
->gfx
.eop_irq
, AMDGPU_CP_IRQ_GFX_EOP
);
4563 /* set up the compute queues - allocate horizontally across pipes */
4565 for (i
= 0; i
< adev
->gfx
.mec
.num_mec
; ++i
) {
4566 for (j
= 0; j
< adev
->gfx
.mec
.num_queue_per_pipe
; j
++) {
4567 for (k
= 0; k
< adev
->gfx
.mec
.num_pipe_per_mec
; k
++) {
4568 if (!amdgpu_gfx_is_mec_queue_enabled(adev
, i
, k
, j
))
4571 r
= gfx_v7_0_compute_ring_init(adev
,
4582 /* reserve GDS, GWS and OA resource for gfx */
4583 r
= amdgpu_bo_create_kernel(adev
, adev
->gds
.mem
.gfx_partition_size
,
4584 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_GDS
,
4585 &adev
->gds
.gds_gfx_bo
, NULL
, NULL
);
4589 r
= amdgpu_bo_create_kernel(adev
, adev
->gds
.gws
.gfx_partition_size
,
4590 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_GWS
,
4591 &adev
->gds
.gws_gfx_bo
, NULL
, NULL
);
4595 r
= amdgpu_bo_create_kernel(adev
, adev
->gds
.oa
.gfx_partition_size
,
4596 PAGE_SIZE
, AMDGPU_GEM_DOMAIN_OA
,
4597 &adev
->gds
.oa_gfx_bo
, NULL
, NULL
);
4601 adev
->gfx
.ce_ram_size
= 0x8000;
4603 gfx_v7_0_gpu_early_init(adev
);
4608 static int gfx_v7_0_sw_fini(void *handle
)
4611 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4613 amdgpu_bo_free_kernel(&adev
->gds
.oa_gfx_bo
, NULL
, NULL
);
4614 amdgpu_bo_free_kernel(&adev
->gds
.gws_gfx_bo
, NULL
, NULL
);
4615 amdgpu_bo_free_kernel(&adev
->gds
.gds_gfx_bo
, NULL
, NULL
);
4617 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
4618 amdgpu_ring_fini(&adev
->gfx
.gfx_ring
[i
]);
4619 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
4620 amdgpu_ring_fini(&adev
->gfx
.compute_ring
[i
]);
4622 gfx_v7_0_cp_compute_fini(adev
);
4623 gfx_v7_0_rlc_fini(adev
);
4624 gfx_v7_0_mec_fini(adev
);
4625 amdgpu_bo_free_kernel(&adev
->gfx
.rlc
.clear_state_obj
,
4626 &adev
->gfx
.rlc
.clear_state_gpu_addr
,
4627 (void **)&adev
->gfx
.rlc
.cs_ptr
);
4628 if (adev
->gfx
.rlc
.cp_table_size
) {
4629 amdgpu_bo_free_kernel(&adev
->gfx
.rlc
.cp_table_obj
,
4630 &adev
->gfx
.rlc
.cp_table_gpu_addr
,
4631 (void **)&adev
->gfx
.rlc
.cp_table_ptr
);
4633 gfx_v7_0_free_microcode(adev
);
4638 static int gfx_v7_0_hw_init(void *handle
)
4641 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4643 gfx_v7_0_gpu_init(adev
);
4646 r
= gfx_v7_0_rlc_resume(adev
);
4650 r
= gfx_v7_0_cp_resume(adev
);
4657 static int gfx_v7_0_hw_fini(void *handle
)
4659 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4661 amdgpu_irq_put(adev
, &adev
->gfx
.priv_reg_irq
, 0);
4662 amdgpu_irq_put(adev
, &adev
->gfx
.priv_inst_irq
, 0);
4663 gfx_v7_0_cp_enable(adev
, false);
4664 gfx_v7_0_rlc_stop(adev
);
4665 gfx_v7_0_fini_pg(adev
);
4670 static int gfx_v7_0_suspend(void *handle
)
4672 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4674 return gfx_v7_0_hw_fini(adev
);
4677 static int gfx_v7_0_resume(void *handle
)
4679 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4681 return gfx_v7_0_hw_init(adev
);
4684 static bool gfx_v7_0_is_idle(void *handle
)
4686 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4688 if (RREG32(mmGRBM_STATUS
) & GRBM_STATUS__GUI_ACTIVE_MASK
)
4694 static int gfx_v7_0_wait_for_idle(void *handle
)
4698 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4700 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
4701 /* read MC_STATUS */
4702 tmp
= RREG32(mmGRBM_STATUS
) & GRBM_STATUS__GUI_ACTIVE_MASK
;
4711 static int gfx_v7_0_soft_reset(void *handle
)
4713 u32 grbm_soft_reset
= 0, srbm_soft_reset
= 0;
4715 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
4718 tmp
= RREG32(mmGRBM_STATUS
);
4719 if (tmp
& (GRBM_STATUS__PA_BUSY_MASK
| GRBM_STATUS__SC_BUSY_MASK
|
4720 GRBM_STATUS__BCI_BUSY_MASK
| GRBM_STATUS__SX_BUSY_MASK
|
4721 GRBM_STATUS__TA_BUSY_MASK
| GRBM_STATUS__VGT_BUSY_MASK
|
4722 GRBM_STATUS__DB_BUSY_MASK
| GRBM_STATUS__CB_BUSY_MASK
|
4723 GRBM_STATUS__GDS_BUSY_MASK
| GRBM_STATUS__SPI_BUSY_MASK
|
4724 GRBM_STATUS__IA_BUSY_MASK
| GRBM_STATUS__IA_BUSY_NO_DMA_MASK
))
4725 grbm_soft_reset
|= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK
|
4726 GRBM_SOFT_RESET__SOFT_RESET_GFX_MASK
;
4728 if (tmp
& (GRBM_STATUS__CP_BUSY_MASK
| GRBM_STATUS__CP_COHERENCY_BUSY_MASK
)) {
4729 grbm_soft_reset
|= GRBM_SOFT_RESET__SOFT_RESET_CP_MASK
;
4730 srbm_soft_reset
|= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK
;
4734 tmp
= RREG32(mmGRBM_STATUS2
);
4735 if (tmp
& GRBM_STATUS2__RLC_BUSY_MASK
)
4736 grbm_soft_reset
|= GRBM_SOFT_RESET__SOFT_RESET_RLC_MASK
;
4739 tmp
= RREG32(mmSRBM_STATUS
);
4740 if (tmp
& SRBM_STATUS__GRBM_RQ_PENDING_MASK
)
4741 srbm_soft_reset
|= SRBM_SOFT_RESET__SOFT_RESET_GRBM_MASK
;
4743 if (grbm_soft_reset
|| srbm_soft_reset
) {
4745 gfx_v7_0_fini_pg(adev
);
4746 gfx_v7_0_update_cg(adev
, false);
4749 gfx_v7_0_rlc_stop(adev
);
4751 /* Disable GFX parsing/prefetching */
4752 WREG32(mmCP_ME_CNTL
, CP_ME_CNTL__ME_HALT_MASK
| CP_ME_CNTL__PFP_HALT_MASK
| CP_ME_CNTL__CE_HALT_MASK
);
4754 /* Disable MEC parsing/prefetching */
4755 WREG32(mmCP_MEC_CNTL
, CP_MEC_CNTL__MEC_ME1_HALT_MASK
| CP_MEC_CNTL__MEC_ME2_HALT_MASK
);
4757 if (grbm_soft_reset
) {
4758 tmp
= RREG32(mmGRBM_SOFT_RESET
);
4759 tmp
|= grbm_soft_reset
;
4760 dev_info(adev
->dev
, "GRBM_SOFT_RESET=0x%08X\n", tmp
);
4761 WREG32(mmGRBM_SOFT_RESET
, tmp
);
4762 tmp
= RREG32(mmGRBM_SOFT_RESET
);
4766 tmp
&= ~grbm_soft_reset
;
4767 WREG32(mmGRBM_SOFT_RESET
, tmp
);
4768 tmp
= RREG32(mmGRBM_SOFT_RESET
);
4771 if (srbm_soft_reset
) {
4772 tmp
= RREG32(mmSRBM_SOFT_RESET
);
4773 tmp
|= srbm_soft_reset
;
4774 dev_info(adev
->dev
, "SRBM_SOFT_RESET=0x%08X\n", tmp
);
4775 WREG32(mmSRBM_SOFT_RESET
, tmp
);
4776 tmp
= RREG32(mmSRBM_SOFT_RESET
);
4780 tmp
&= ~srbm_soft_reset
;
4781 WREG32(mmSRBM_SOFT_RESET
, tmp
);
4782 tmp
= RREG32(mmSRBM_SOFT_RESET
);
4784 /* Wait a little for things to settle down */
4790 static void gfx_v7_0_set_gfx_eop_interrupt_state(struct amdgpu_device
*adev
,
4791 enum amdgpu_interrupt_state state
)
4796 case AMDGPU_IRQ_STATE_DISABLE
:
4797 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4798 cp_int_cntl
&= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
;
4799 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4801 case AMDGPU_IRQ_STATE_ENABLE
:
4802 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4803 cp_int_cntl
|= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
;
4804 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4811 static void gfx_v7_0_set_compute_eop_interrupt_state(struct amdgpu_device
*adev
,
4813 enum amdgpu_interrupt_state state
)
4815 u32 mec_int_cntl
, mec_int_cntl_reg
;
4818 * amdgpu controls only the first MEC. That's why this function only
4819 * handles the setting of interrupts for this specific MEC. All other
4820 * pipes' interrupts are set by amdkfd.
4826 mec_int_cntl_reg
= mmCP_ME1_PIPE0_INT_CNTL
;
4829 mec_int_cntl_reg
= mmCP_ME1_PIPE1_INT_CNTL
;
4832 mec_int_cntl_reg
= mmCP_ME1_PIPE2_INT_CNTL
;
4835 mec_int_cntl_reg
= mmCP_ME1_PIPE3_INT_CNTL
;
4838 DRM_DEBUG("invalid pipe %d\n", pipe
);
4842 DRM_DEBUG("invalid me %d\n", me
);
4847 case AMDGPU_IRQ_STATE_DISABLE
:
4848 mec_int_cntl
= RREG32(mec_int_cntl_reg
);
4849 mec_int_cntl
&= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
;
4850 WREG32(mec_int_cntl_reg
, mec_int_cntl
);
4852 case AMDGPU_IRQ_STATE_ENABLE
:
4853 mec_int_cntl
= RREG32(mec_int_cntl_reg
);
4854 mec_int_cntl
|= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
;
4855 WREG32(mec_int_cntl_reg
, mec_int_cntl
);
4862 static int gfx_v7_0_set_priv_reg_fault_state(struct amdgpu_device
*adev
,
4863 struct amdgpu_irq_src
*src
,
4865 enum amdgpu_interrupt_state state
)
4870 case AMDGPU_IRQ_STATE_DISABLE
:
4871 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4872 cp_int_cntl
&= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK
;
4873 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4875 case AMDGPU_IRQ_STATE_ENABLE
:
4876 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4877 cp_int_cntl
|= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK
;
4878 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4887 static int gfx_v7_0_set_priv_inst_fault_state(struct amdgpu_device
*adev
,
4888 struct amdgpu_irq_src
*src
,
4890 enum amdgpu_interrupt_state state
)
4895 case AMDGPU_IRQ_STATE_DISABLE
:
4896 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4897 cp_int_cntl
&= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK
;
4898 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4900 case AMDGPU_IRQ_STATE_ENABLE
:
4901 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
4902 cp_int_cntl
|= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK
;
4903 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
4912 static int gfx_v7_0_set_eop_interrupt_state(struct amdgpu_device
*adev
,
4913 struct amdgpu_irq_src
*src
,
4915 enum amdgpu_interrupt_state state
)
4918 case AMDGPU_CP_IRQ_GFX_EOP
:
4919 gfx_v7_0_set_gfx_eop_interrupt_state(adev
, state
);
4921 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
:
4922 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 1, 0, state
);
4924 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP
:
4925 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 1, 1, state
);
4927 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE2_EOP
:
4928 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 1, 2, state
);
4930 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE3_EOP
:
4931 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 1, 3, state
);
4933 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE0_EOP
:
4934 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 2, 0, state
);
4936 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE1_EOP
:
4937 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 2, 1, state
);
4939 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE2_EOP
:
4940 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 2, 2, state
);
4942 case AMDGPU_CP_IRQ_COMPUTE_MEC2_PIPE3_EOP
:
4943 gfx_v7_0_set_compute_eop_interrupt_state(adev
, 2, 3, state
);
4951 static int gfx_v7_0_eop_irq(struct amdgpu_device
*adev
,
4952 struct amdgpu_irq_src
*source
,
4953 struct amdgpu_iv_entry
*entry
)
4956 struct amdgpu_ring
*ring
;
4959 DRM_DEBUG("IH: CP EOP\n");
4960 me_id
= (entry
->ring_id
& 0x0c) >> 2;
4961 pipe_id
= (entry
->ring_id
& 0x03) >> 0;
4964 amdgpu_fence_process(&adev
->gfx
.gfx_ring
[0]);
4968 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
4969 ring
= &adev
->gfx
.compute_ring
[i
];
4970 if ((ring
->me
== me_id
) && (ring
->pipe
== pipe_id
))
4971 amdgpu_fence_process(ring
);
4978 static int gfx_v7_0_priv_reg_irq(struct amdgpu_device
*adev
,
4979 struct amdgpu_irq_src
*source
,
4980 struct amdgpu_iv_entry
*entry
)
4982 DRM_ERROR("Illegal register access in command stream\n");
4983 schedule_work(&adev
->reset_work
);
4987 static int gfx_v7_0_priv_inst_irq(struct amdgpu_device
*adev
,
4988 struct amdgpu_irq_src
*source
,
4989 struct amdgpu_iv_entry
*entry
)
4991 DRM_ERROR("Illegal instruction in command stream\n");
4992 // XXX soft reset the gfx block only
4993 schedule_work(&adev
->reset_work
);
4997 static int gfx_v7_0_set_clockgating_state(void *handle
,
4998 enum amd_clockgating_state state
)
5001 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
5003 if (state
== AMD_CG_STATE_GATE
)
5006 gfx_v7_0_enable_gui_idle_interrupt(adev
, false);
5007 /* order matters! */
5009 gfx_v7_0_enable_mgcg(adev
, true);
5010 gfx_v7_0_enable_cgcg(adev
, true);
5012 gfx_v7_0_enable_cgcg(adev
, false);
5013 gfx_v7_0_enable_mgcg(adev
, false);
5015 gfx_v7_0_enable_gui_idle_interrupt(adev
, true);
5020 static int gfx_v7_0_set_powergating_state(void *handle
,
5021 enum amd_powergating_state state
)
5024 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
5026 if (state
== AMD_PG_STATE_GATE
)
5029 if (adev
->pg_flags
& (AMD_PG_SUPPORT_GFX_PG
|
5030 AMD_PG_SUPPORT_GFX_SMG
|
5031 AMD_PG_SUPPORT_GFX_DMG
|
5033 AMD_PG_SUPPORT_GDS
|
5034 AMD_PG_SUPPORT_RLC_SMU_HS
)) {
5035 gfx_v7_0_update_gfx_pg(adev
, gate
);
5036 if (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
) {
5037 gfx_v7_0_enable_cp_pg(adev
, gate
);
5038 gfx_v7_0_enable_gds_pg(adev
, gate
);
5045 static const struct amd_ip_funcs gfx_v7_0_ip_funcs
= {
5047 .early_init
= gfx_v7_0_early_init
,
5048 .late_init
= gfx_v7_0_late_init
,
5049 .sw_init
= gfx_v7_0_sw_init
,
5050 .sw_fini
= gfx_v7_0_sw_fini
,
5051 .hw_init
= gfx_v7_0_hw_init
,
5052 .hw_fini
= gfx_v7_0_hw_fini
,
5053 .suspend
= gfx_v7_0_suspend
,
5054 .resume
= gfx_v7_0_resume
,
5055 .is_idle
= gfx_v7_0_is_idle
,
5056 .wait_for_idle
= gfx_v7_0_wait_for_idle
,
5057 .soft_reset
= gfx_v7_0_soft_reset
,
5058 .set_clockgating_state
= gfx_v7_0_set_clockgating_state
,
5059 .set_powergating_state
= gfx_v7_0_set_powergating_state
,
5062 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx
= {
5063 .type
= AMDGPU_RING_TYPE_GFX
,
5065 .nop
= PACKET3(PACKET3_NOP
, 0x3FFF),
5066 .support_64bit_ptrs
= false,
5067 .get_rptr
= gfx_v7_0_ring_get_rptr
,
5068 .get_wptr
= gfx_v7_0_ring_get_wptr_gfx
,
5069 .set_wptr
= gfx_v7_0_ring_set_wptr_gfx
,
5071 20 + /* gfx_v7_0_ring_emit_gds_switch */
5072 7 + /* gfx_v7_0_ring_emit_hdp_flush */
5073 5 + /* hdp invalidate */
5074 12 + 12 + 12 + /* gfx_v7_0_ring_emit_fence_gfx x3 for user fence, vm fence */
5075 7 + 4 + /* gfx_v7_0_ring_emit_pipeline_sync */
5076 CIK_FLUSH_GPU_TLB_NUM_WREG
* 5 + 7 + 6 + /* gfx_v7_0_ring_emit_vm_flush */
5077 3 + 4, /* gfx_v7_ring_emit_cntxcntl including vgt flush*/
5078 .emit_ib_size
= 4, /* gfx_v7_0_ring_emit_ib_gfx */
5079 .emit_ib
= gfx_v7_0_ring_emit_ib_gfx
,
5080 .emit_fence
= gfx_v7_0_ring_emit_fence_gfx
,
5081 .emit_pipeline_sync
= gfx_v7_0_ring_emit_pipeline_sync
,
5082 .emit_vm_flush
= gfx_v7_0_ring_emit_vm_flush
,
5083 .emit_gds_switch
= gfx_v7_0_ring_emit_gds_switch
,
5084 .emit_hdp_flush
= gfx_v7_0_ring_emit_hdp_flush
,
5085 .test_ring
= gfx_v7_0_ring_test_ring
,
5086 .test_ib
= gfx_v7_0_ring_test_ib
,
5087 .insert_nop
= amdgpu_ring_insert_nop
,
5088 .pad_ib
= amdgpu_ring_generic_pad_ib
,
5089 .emit_cntxcntl
= gfx_v7_ring_emit_cntxcntl
,
5090 .emit_wreg
= gfx_v7_0_ring_emit_wreg
,
5093 static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute
= {
5094 .type
= AMDGPU_RING_TYPE_COMPUTE
,
5096 .nop
= PACKET3(PACKET3_NOP
, 0x3FFF),
5097 .support_64bit_ptrs
= false,
5098 .get_rptr
= gfx_v7_0_ring_get_rptr
,
5099 .get_wptr
= gfx_v7_0_ring_get_wptr_compute
,
5100 .set_wptr
= gfx_v7_0_ring_set_wptr_compute
,
5102 20 + /* gfx_v7_0_ring_emit_gds_switch */
5103 7 + /* gfx_v7_0_ring_emit_hdp_flush */
5104 5 + /* hdp invalidate */
5105 7 + /* gfx_v7_0_ring_emit_pipeline_sync */
5106 CIK_FLUSH_GPU_TLB_NUM_WREG
* 5 + 7 + /* gfx_v7_0_ring_emit_vm_flush */
5107 7 + 7 + 7, /* gfx_v7_0_ring_emit_fence_compute x3 for user fence, vm fence */
5108 .emit_ib_size
= 4, /* gfx_v7_0_ring_emit_ib_compute */
5109 .emit_ib
= gfx_v7_0_ring_emit_ib_compute
,
5110 .emit_fence
= gfx_v7_0_ring_emit_fence_compute
,
5111 .emit_pipeline_sync
= gfx_v7_0_ring_emit_pipeline_sync
,
5112 .emit_vm_flush
= gfx_v7_0_ring_emit_vm_flush
,
5113 .emit_gds_switch
= gfx_v7_0_ring_emit_gds_switch
,
5114 .emit_hdp_flush
= gfx_v7_0_ring_emit_hdp_flush
,
5115 .test_ring
= gfx_v7_0_ring_test_ring
,
5116 .test_ib
= gfx_v7_0_ring_test_ib
,
5117 .insert_nop
= amdgpu_ring_insert_nop
,
5118 .pad_ib
= amdgpu_ring_generic_pad_ib
,
5119 .emit_wreg
= gfx_v7_0_ring_emit_wreg
,
5122 static void gfx_v7_0_set_ring_funcs(struct amdgpu_device
*adev
)
5126 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
5127 adev
->gfx
.gfx_ring
[i
].funcs
= &gfx_v7_0_ring_funcs_gfx
;
5128 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
5129 adev
->gfx
.compute_ring
[i
].funcs
= &gfx_v7_0_ring_funcs_compute
;
5132 static const struct amdgpu_irq_src_funcs gfx_v7_0_eop_irq_funcs
= {
5133 .set
= gfx_v7_0_set_eop_interrupt_state
,
5134 .process
= gfx_v7_0_eop_irq
,
5137 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_reg_irq_funcs
= {
5138 .set
= gfx_v7_0_set_priv_reg_fault_state
,
5139 .process
= gfx_v7_0_priv_reg_irq
,
5142 static const struct amdgpu_irq_src_funcs gfx_v7_0_priv_inst_irq_funcs
= {
5143 .set
= gfx_v7_0_set_priv_inst_fault_state
,
5144 .process
= gfx_v7_0_priv_inst_irq
,
5147 static void gfx_v7_0_set_irq_funcs(struct amdgpu_device
*adev
)
5149 adev
->gfx
.eop_irq
.num_types
= AMDGPU_CP_IRQ_LAST
;
5150 adev
->gfx
.eop_irq
.funcs
= &gfx_v7_0_eop_irq_funcs
;
5152 adev
->gfx
.priv_reg_irq
.num_types
= 1;
5153 adev
->gfx
.priv_reg_irq
.funcs
= &gfx_v7_0_priv_reg_irq_funcs
;
5155 adev
->gfx
.priv_inst_irq
.num_types
= 1;
5156 adev
->gfx
.priv_inst_irq
.funcs
= &gfx_v7_0_priv_inst_irq_funcs
;
5159 static void gfx_v7_0_set_gds_init(struct amdgpu_device
*adev
)
5161 /* init asci gds info */
5162 adev
->gds
.mem
.total_size
= RREG32(mmGDS_VMID0_SIZE
);
5163 adev
->gds
.gws
.total_size
= 64;
5164 adev
->gds
.oa
.total_size
= 16;
5166 if (adev
->gds
.mem
.total_size
== 64 * 1024) {
5167 adev
->gds
.mem
.gfx_partition_size
= 4096;
5168 adev
->gds
.mem
.cs_partition_size
= 4096;
5170 adev
->gds
.gws
.gfx_partition_size
= 4;
5171 adev
->gds
.gws
.cs_partition_size
= 4;
5173 adev
->gds
.oa
.gfx_partition_size
= 4;
5174 adev
->gds
.oa
.cs_partition_size
= 1;
5176 adev
->gds
.mem
.gfx_partition_size
= 1024;
5177 adev
->gds
.mem
.cs_partition_size
= 1024;
5179 adev
->gds
.gws
.gfx_partition_size
= 16;
5180 adev
->gds
.gws
.cs_partition_size
= 16;
5182 adev
->gds
.oa
.gfx_partition_size
= 4;
5183 adev
->gds
.oa
.cs_partition_size
= 4;
5188 static void gfx_v7_0_get_cu_info(struct amdgpu_device
*adev
)
5190 int i
, j
, k
, counter
, active_cu_number
= 0;
5191 u32 mask
, bitmap
, ao_bitmap
, ao_cu_mask
= 0;
5192 struct amdgpu_cu_info
*cu_info
= &adev
->gfx
.cu_info
;
5193 unsigned disable_masks
[4 * 2];
5196 if (adev
->flags
& AMD_IS_APU
)
5199 ao_cu_num
= adev
->gfx
.config
.max_cu_per_sh
;
5201 memset(cu_info
, 0, sizeof(*cu_info
));
5203 amdgpu_gfx_parse_disable_cu(disable_masks
, 4, 2);
5205 mutex_lock(&adev
->grbm_idx_mutex
);
5206 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
5207 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
5211 gfx_v7_0_select_se_sh(adev
, i
, j
, 0xffffffff);
5213 gfx_v7_0_set_user_cu_inactive_bitmap(
5214 adev
, disable_masks
[i
* 2 + j
]);
5215 bitmap
= gfx_v7_0_get_cu_active_bitmap(adev
);
5216 cu_info
->bitmap
[i
][j
] = bitmap
;
5218 for (k
= 0; k
< adev
->gfx
.config
.max_cu_per_sh
; k
++) {
5219 if (bitmap
& mask
) {
5220 if (counter
< ao_cu_num
)
5226 active_cu_number
+= counter
;
5228 ao_cu_mask
|= (ao_bitmap
<< (i
* 16 + j
* 8));
5229 cu_info
->ao_cu_bitmap
[i
][j
] = ao_bitmap
;
5232 gfx_v7_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
5233 mutex_unlock(&adev
->grbm_idx_mutex
);
5235 cu_info
->number
= active_cu_number
;
5236 cu_info
->ao_cu_mask
= ao_cu_mask
;
5237 cu_info
->simd_per_cu
= NUM_SIMD_PER_CU
;
5238 cu_info
->max_waves_per_simd
= 10;
5239 cu_info
->max_scratch_slots_per_cu
= 32;
5240 cu_info
->wave_front_size
= 64;
5241 cu_info
->lds_size
= 64;
5244 const struct amdgpu_ip_block_version gfx_v7_0_ip_block
=
5246 .type
= AMD_IP_BLOCK_TYPE_GFX
,
5250 .funcs
= &gfx_v7_0_ip_funcs
,
5253 const struct amdgpu_ip_block_version gfx_v7_1_ip_block
=
5255 .type
= AMD_IP_BLOCK_TYPE_GFX
,
5259 .funcs
= &gfx_v7_0_ip_funcs
,
5262 const struct amdgpu_ip_block_version gfx_v7_2_ip_block
=
5264 .type
= AMD_IP_BLOCK_TYPE_GFX
,
5268 .funcs
= &gfx_v7_0_ip_funcs
,
5271 const struct amdgpu_ip_block_version gfx_v7_3_ip_block
=
5273 .type
= AMD_IP_BLOCK_TYPE_GFX
,
5277 .funcs
= &gfx_v7_0_ip_funcs
,