2 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/firmware.h>
24 #include <linux/module.h>
27 #include "amdgpu_ih.h"
28 #include "amdgpu_gfx.h"
29 #include "amdgpu_ucode.h"
30 #include "clearstate_si.h"
31 #include "bif/bif_3_0_d.h"
32 #include "bif/bif_3_0_sh_mask.h"
33 #include "oss/oss_1_0_d.h"
34 #include "oss/oss_1_0_sh_mask.h"
35 #include "gca/gfx_6_0_d.h"
36 #include "gca/gfx_6_0_sh_mask.h"
37 #include "gmc/gmc_6_0_d.h"
38 #include "gmc/gmc_6_0_sh_mask.h"
39 #include "dce/dce_6_0_d.h"
40 #include "dce/dce_6_0_sh_mask.h"
41 #include "gca/gfx_7_2_enum.h"
45 static void gfx_v6_0_set_ring_funcs(struct amdgpu_device
*adev
);
46 static void gfx_v6_0_set_irq_funcs(struct amdgpu_device
*adev
);
47 static void gfx_v6_0_get_cu_info(struct amdgpu_device
*adev
);
49 MODULE_FIRMWARE("amdgpu/tahiti_pfp.bin");
50 MODULE_FIRMWARE("amdgpu/tahiti_me.bin");
51 MODULE_FIRMWARE("amdgpu/tahiti_ce.bin");
52 MODULE_FIRMWARE("amdgpu/tahiti_rlc.bin");
54 MODULE_FIRMWARE("amdgpu/pitcairn_pfp.bin");
55 MODULE_FIRMWARE("amdgpu/pitcairn_me.bin");
56 MODULE_FIRMWARE("amdgpu/pitcairn_ce.bin");
57 MODULE_FIRMWARE("amdgpu/pitcairn_rlc.bin");
59 MODULE_FIRMWARE("amdgpu/verde_pfp.bin");
60 MODULE_FIRMWARE("amdgpu/verde_me.bin");
61 MODULE_FIRMWARE("amdgpu/verde_ce.bin");
62 MODULE_FIRMWARE("amdgpu/verde_rlc.bin");
64 MODULE_FIRMWARE("amdgpu/oland_pfp.bin");
65 MODULE_FIRMWARE("amdgpu/oland_me.bin");
66 MODULE_FIRMWARE("amdgpu/oland_ce.bin");
67 MODULE_FIRMWARE("amdgpu/oland_rlc.bin");
69 MODULE_FIRMWARE("amdgpu/hainan_pfp.bin");
70 MODULE_FIRMWARE("amdgpu/hainan_me.bin");
71 MODULE_FIRMWARE("amdgpu/hainan_ce.bin");
72 MODULE_FIRMWARE("amdgpu/hainan_rlc.bin");
74 static u32
gfx_v6_0_get_csb_size(struct amdgpu_device
*adev
);
75 static void gfx_v6_0_get_csb_buffer(struct amdgpu_device
*adev
, volatile u32
*buffer
);
76 //static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev);
77 static void gfx_v6_0_init_pg(struct amdgpu_device
*adev
);
79 #define ARRAY_MODE(x) ((x) << GB_TILE_MODE0__ARRAY_MODE__SHIFT)
80 #define PIPE_CONFIG(x) ((x) << GB_TILE_MODE0__PIPE_CONFIG__SHIFT)
81 #define TILE_SPLIT(x) ((x) << GB_TILE_MODE0__TILE_SPLIT__SHIFT)
82 #define MICRO_TILE_MODE(x) ((x) << 0)
83 #define SAMPLE_SPLIT(x) ((x) << GB_TILE_MODE0__SAMPLE_SPLIT__SHIFT)
84 #define BANK_WIDTH(x) ((x) << 14)
85 #define BANK_HEIGHT(x) ((x) << 16)
86 #define MACRO_TILE_ASPECT(x) ((x) << 18)
87 #define NUM_BANKS(x) ((x) << 20)
89 static const u32 verde_rlc_save_restore_register_list
[] =
91 (0x8000 << 16) | (0x98f4 >> 2),
93 (0x8040 << 16) | (0x98f4 >> 2),
95 (0x8000 << 16) | (0xe80 >> 2),
97 (0x8040 << 16) | (0xe80 >> 2),
99 (0x8000 << 16) | (0x89bc >> 2),
101 (0x8040 << 16) | (0x89bc >> 2),
103 (0x8000 << 16) | (0x8c1c >> 2),
105 (0x8040 << 16) | (0x8c1c >> 2),
107 (0x9c00 << 16) | (0x98f0 >> 2),
109 (0x9c00 << 16) | (0xe7c >> 2),
111 (0x8000 << 16) | (0x9148 >> 2),
113 (0x8040 << 16) | (0x9148 >> 2),
115 (0x9c00 << 16) | (0x9150 >> 2),
117 (0x9c00 << 16) | (0x897c >> 2),
119 (0x9c00 << 16) | (0x8d8c >> 2),
121 (0x9c00 << 16) | (0xac54 >> 2),
124 (0x9c00 << 16) | (0x98f8 >> 2),
126 (0x9c00 << 16) | (0x9910 >> 2),
128 (0x9c00 << 16) | (0x9914 >> 2),
130 (0x9c00 << 16) | (0x9918 >> 2),
132 (0x9c00 << 16) | (0x991c >> 2),
134 (0x9c00 << 16) | (0x9920 >> 2),
136 (0x9c00 << 16) | (0x9924 >> 2),
138 (0x9c00 << 16) | (0x9928 >> 2),
140 (0x9c00 << 16) | (0x992c >> 2),
142 (0x9c00 << 16) | (0x9930 >> 2),
144 (0x9c00 << 16) | (0x9934 >> 2),
146 (0x9c00 << 16) | (0x9938 >> 2),
148 (0x9c00 << 16) | (0x993c >> 2),
150 (0x9c00 << 16) | (0x9940 >> 2),
152 (0x9c00 << 16) | (0x9944 >> 2),
154 (0x9c00 << 16) | (0x9948 >> 2),
156 (0x9c00 << 16) | (0x994c >> 2),
158 (0x9c00 << 16) | (0x9950 >> 2),
160 (0x9c00 << 16) | (0x9954 >> 2),
162 (0x9c00 << 16) | (0x9958 >> 2),
164 (0x9c00 << 16) | (0x995c >> 2),
166 (0x9c00 << 16) | (0x9960 >> 2),
168 (0x9c00 << 16) | (0x9964 >> 2),
170 (0x9c00 << 16) | (0x9968 >> 2),
172 (0x9c00 << 16) | (0x996c >> 2),
174 (0x9c00 << 16) | (0x9970 >> 2),
176 (0x9c00 << 16) | (0x9974 >> 2),
178 (0x9c00 << 16) | (0x9978 >> 2),
180 (0x9c00 << 16) | (0x997c >> 2),
182 (0x9c00 << 16) | (0x9980 >> 2),
184 (0x9c00 << 16) | (0x9984 >> 2),
186 (0x9c00 << 16) | (0x9988 >> 2),
188 (0x9c00 << 16) | (0x998c >> 2),
190 (0x9c00 << 16) | (0x8c00 >> 2),
192 (0x9c00 << 16) | (0x8c14 >> 2),
194 (0x9c00 << 16) | (0x8c04 >> 2),
196 (0x9c00 << 16) | (0x8c08 >> 2),
198 (0x8000 << 16) | (0x9b7c >> 2),
200 (0x8040 << 16) | (0x9b7c >> 2),
202 (0x8000 << 16) | (0xe84 >> 2),
204 (0x8040 << 16) | (0xe84 >> 2),
206 (0x8000 << 16) | (0x89c0 >> 2),
208 (0x8040 << 16) | (0x89c0 >> 2),
210 (0x8000 << 16) | (0x914c >> 2),
212 (0x8040 << 16) | (0x914c >> 2),
214 (0x8000 << 16) | (0x8c20 >> 2),
216 (0x8040 << 16) | (0x8c20 >> 2),
218 (0x8000 << 16) | (0x9354 >> 2),
220 (0x8040 << 16) | (0x9354 >> 2),
222 (0x9c00 << 16) | (0x9060 >> 2),
224 (0x9c00 << 16) | (0x9364 >> 2),
226 (0x9c00 << 16) | (0x9100 >> 2),
228 (0x9c00 << 16) | (0x913c >> 2),
230 (0x8000 << 16) | (0x90e0 >> 2),
232 (0x8000 << 16) | (0x90e4 >> 2),
234 (0x8000 << 16) | (0x90e8 >> 2),
236 (0x8040 << 16) | (0x90e0 >> 2),
238 (0x8040 << 16) | (0x90e4 >> 2),
240 (0x8040 << 16) | (0x90e8 >> 2),
242 (0x9c00 << 16) | (0x8bcc >> 2),
244 (0x9c00 << 16) | (0x8b24 >> 2),
246 (0x9c00 << 16) | (0x88c4 >> 2),
248 (0x9c00 << 16) | (0x8e50 >> 2),
250 (0x9c00 << 16) | (0x8c0c >> 2),
252 (0x9c00 << 16) | (0x8e58 >> 2),
254 (0x9c00 << 16) | (0x8e5c >> 2),
256 (0x9c00 << 16) | (0x9508 >> 2),
258 (0x9c00 << 16) | (0x950c >> 2),
260 (0x9c00 << 16) | (0x9494 >> 2),
262 (0x9c00 << 16) | (0xac0c >> 2),
264 (0x9c00 << 16) | (0xac10 >> 2),
266 (0x9c00 << 16) | (0xac14 >> 2),
268 (0x9c00 << 16) | (0xae00 >> 2),
270 (0x9c00 << 16) | (0xac08 >> 2),
272 (0x9c00 << 16) | (0x88d4 >> 2),
274 (0x9c00 << 16) | (0x88c8 >> 2),
276 (0x9c00 << 16) | (0x88cc >> 2),
278 (0x9c00 << 16) | (0x89b0 >> 2),
280 (0x9c00 << 16) | (0x8b10 >> 2),
282 (0x9c00 << 16) | (0x8a14 >> 2),
284 (0x9c00 << 16) | (0x9830 >> 2),
286 (0x9c00 << 16) | (0x9834 >> 2),
288 (0x9c00 << 16) | (0x9838 >> 2),
290 (0x9c00 << 16) | (0x9a10 >> 2),
292 (0x8000 << 16) | (0x9870 >> 2),
294 (0x8000 << 16) | (0x9874 >> 2),
296 (0x8001 << 16) | (0x9870 >> 2),
298 (0x8001 << 16) | (0x9874 >> 2),
300 (0x8040 << 16) | (0x9870 >> 2),
302 (0x8040 << 16) | (0x9874 >> 2),
304 (0x8041 << 16) | (0x9870 >> 2),
306 (0x8041 << 16) | (0x9874 >> 2),
311 static int gfx_v6_0_init_microcode(struct amdgpu_device
*adev
)
313 const char *chip_name
;
316 const struct gfx_firmware_header_v1_0
*cp_hdr
;
317 const struct rlc_firmware_header_v1_0
*rlc_hdr
;
321 switch (adev
->asic_type
) {
323 chip_name
= "tahiti";
326 chip_name
= "pitcairn";
335 chip_name
= "hainan";
340 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_pfp.bin", chip_name
);
341 err
= request_firmware(&adev
->gfx
.pfp_fw
, fw_name
, adev
->dev
);
344 err
= amdgpu_ucode_validate(adev
->gfx
.pfp_fw
);
347 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.pfp_fw
->data
;
348 adev
->gfx
.pfp_fw_version
= le32_to_cpu(cp_hdr
->header
.ucode_version
);
349 adev
->gfx
.pfp_feature_version
= le32_to_cpu(cp_hdr
->ucode_feature_version
);
351 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_me.bin", chip_name
);
352 err
= request_firmware(&adev
->gfx
.me_fw
, fw_name
, adev
->dev
);
355 err
= amdgpu_ucode_validate(adev
->gfx
.me_fw
);
358 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.me_fw
->data
;
359 adev
->gfx
.me_fw_version
= le32_to_cpu(cp_hdr
->header
.ucode_version
);
360 adev
->gfx
.me_feature_version
= le32_to_cpu(cp_hdr
->ucode_feature_version
);
362 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_ce.bin", chip_name
);
363 err
= request_firmware(&adev
->gfx
.ce_fw
, fw_name
, adev
->dev
);
366 err
= amdgpu_ucode_validate(adev
->gfx
.ce_fw
);
369 cp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.ce_fw
->data
;
370 adev
->gfx
.ce_fw_version
= le32_to_cpu(cp_hdr
->header
.ucode_version
);
371 adev
->gfx
.ce_feature_version
= le32_to_cpu(cp_hdr
->ucode_feature_version
);
373 snprintf(fw_name
, sizeof(fw_name
), "amdgpu/%s_rlc.bin", chip_name
);
374 err
= request_firmware(&adev
->gfx
.rlc_fw
, fw_name
, adev
->dev
);
377 err
= amdgpu_ucode_validate(adev
->gfx
.rlc_fw
);
378 rlc_hdr
= (const struct rlc_firmware_header_v1_0
*)adev
->gfx
.rlc_fw
->data
;
379 adev
->gfx
.rlc_fw_version
= le32_to_cpu(rlc_hdr
->header
.ucode_version
);
380 adev
->gfx
.rlc_feature_version
= le32_to_cpu(rlc_hdr
->ucode_feature_version
);
384 pr_err("gfx6: Failed to load firmware \"%s\"\n", fw_name
);
385 release_firmware(adev
->gfx
.pfp_fw
);
386 adev
->gfx
.pfp_fw
= NULL
;
387 release_firmware(adev
->gfx
.me_fw
);
388 adev
->gfx
.me_fw
= NULL
;
389 release_firmware(adev
->gfx
.ce_fw
);
390 adev
->gfx
.ce_fw
= NULL
;
391 release_firmware(adev
->gfx
.rlc_fw
);
392 adev
->gfx
.rlc_fw
= NULL
;
397 static void gfx_v6_0_tiling_mode_table_init(struct amdgpu_device
*adev
)
399 const u32 num_tile_mode_states
= ARRAY_SIZE(adev
->gfx
.config
.tile_mode_array
);
400 u32 reg_offset
, split_equal_to_row_size
, *tilemode
;
402 memset(adev
->gfx
.config
.tile_mode_array
, 0, sizeof(adev
->gfx
.config
.tile_mode_array
));
403 tilemode
= adev
->gfx
.config
.tile_mode_array
;
405 switch (adev
->gfx
.config
.mem_row_size_in_kb
) {
407 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_1KB
;
411 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_2KB
;
414 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_4KB
;
418 if (adev
->asic_type
== CHIP_VERDE
) {
419 tilemode
[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
420 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
421 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
422 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
423 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
424 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
425 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
426 NUM_BANKS(ADDR_SURF_16_BANK
);
427 tilemode
[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
428 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
429 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
430 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
431 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
432 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
433 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
434 NUM_BANKS(ADDR_SURF_16_BANK
);
435 tilemode
[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
436 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
437 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
438 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
439 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
440 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
441 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
442 NUM_BANKS(ADDR_SURF_16_BANK
);
443 tilemode
[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
444 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
445 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
446 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
447 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
448 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
449 NUM_BANKS(ADDR_SURF_8_BANK
) |
450 TILE_SPLIT(split_equal_to_row_size
);
451 tilemode
[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
452 ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
453 PIPE_CONFIG(ADDR_SURF_P4_8x16
);
454 tilemode
[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
455 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
456 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
457 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
458 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
459 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
460 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
461 NUM_BANKS(ADDR_SURF_4_BANK
);
462 tilemode
[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
463 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
464 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
465 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
466 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
467 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
468 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
469 NUM_BANKS(ADDR_SURF_4_BANK
);
470 tilemode
[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
471 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
472 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
473 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
474 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
475 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
476 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
477 NUM_BANKS(ADDR_SURF_2_BANK
);
478 tilemode
[8] = ARRAY_MODE(ARRAY_LINEAR_ALIGNED
);
479 tilemode
[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
480 ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
481 PIPE_CONFIG(ADDR_SURF_P4_8x16
);
482 tilemode
[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
483 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
484 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
485 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
486 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
487 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
488 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
489 NUM_BANKS(ADDR_SURF_16_BANK
);
490 tilemode
[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
491 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
492 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
493 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
494 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
495 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
496 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
497 NUM_BANKS(ADDR_SURF_16_BANK
);
498 tilemode
[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
499 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
500 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
501 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
502 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
503 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
504 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
505 NUM_BANKS(ADDR_SURF_16_BANK
);
506 tilemode
[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
507 ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
508 PIPE_CONFIG(ADDR_SURF_P4_8x16
);
509 tilemode
[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
510 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
511 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
512 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
513 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
514 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
515 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
516 NUM_BANKS(ADDR_SURF_16_BANK
);
517 tilemode
[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
518 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
519 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
520 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
521 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
522 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
523 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
524 NUM_BANKS(ADDR_SURF_16_BANK
);
525 tilemode
[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
526 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
527 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
528 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
529 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
530 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
531 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
532 NUM_BANKS(ADDR_SURF_16_BANK
);
533 tilemode
[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
534 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
535 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
536 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
537 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
538 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
539 NUM_BANKS(ADDR_SURF_16_BANK
) |
540 TILE_SPLIT(split_equal_to_row_size
);
541 tilemode
[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
542 ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
543 PIPE_CONFIG(ADDR_SURF_P4_8x16
);
544 tilemode
[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
545 ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
546 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
547 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
548 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
549 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
550 NUM_BANKS(ADDR_SURF_16_BANK
) |
551 TILE_SPLIT(split_equal_to_row_size
);
552 tilemode
[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
553 ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
554 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
555 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
556 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
557 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
558 NUM_BANKS(ADDR_SURF_16_BANK
) |
559 TILE_SPLIT(split_equal_to_row_size
);
560 tilemode
[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
561 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
562 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
563 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
564 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
565 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
566 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
567 NUM_BANKS(ADDR_SURF_8_BANK
);
568 tilemode
[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
569 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
570 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
571 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
572 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
573 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
574 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
575 NUM_BANKS(ADDR_SURF_8_BANK
);
576 tilemode
[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
577 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
578 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
579 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
580 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
581 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
582 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
583 NUM_BANKS(ADDR_SURF_4_BANK
);
584 tilemode
[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
585 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
586 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
587 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
588 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
589 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
590 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
591 NUM_BANKS(ADDR_SURF_4_BANK
);
592 tilemode
[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
593 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
594 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
595 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
596 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
597 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
598 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
599 NUM_BANKS(ADDR_SURF_2_BANK
);
600 tilemode
[26] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
601 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
602 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
603 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
604 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
605 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
606 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
607 NUM_BANKS(ADDR_SURF_2_BANK
);
608 tilemode
[27] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
609 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
610 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
611 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
612 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
613 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
614 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
615 NUM_BANKS(ADDR_SURF_2_BANK
);
616 tilemode
[28] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
617 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
618 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
619 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
620 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
621 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
622 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
623 NUM_BANKS(ADDR_SURF_2_BANK
);
624 tilemode
[29] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
625 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
626 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
627 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
628 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
629 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
630 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
631 NUM_BANKS(ADDR_SURF_2_BANK
);
632 tilemode
[30] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
633 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
634 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
635 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
636 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
637 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
638 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
639 NUM_BANKS(ADDR_SURF_2_BANK
);
640 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++)
641 WREG32(mmGB_TILE_MODE0
+ reg_offset
, tilemode
[reg_offset
]);
642 } else if (adev
->asic_type
== CHIP_OLAND
) {
643 tilemode
[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
644 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
645 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
646 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
647 NUM_BANKS(ADDR_SURF_16_BANK
) |
648 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
649 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
650 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
);
651 tilemode
[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
652 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
653 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
654 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
655 NUM_BANKS(ADDR_SURF_16_BANK
) |
656 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
657 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
658 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
);
659 tilemode
[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
660 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
661 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
662 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
663 NUM_BANKS(ADDR_SURF_16_BANK
) |
664 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
665 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
666 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
);
667 tilemode
[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
668 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
669 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
670 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
671 NUM_BANKS(ADDR_SURF_16_BANK
) |
672 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
673 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
674 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
);
675 tilemode
[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
676 ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
677 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
678 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
679 NUM_BANKS(ADDR_SURF_16_BANK
) |
680 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
681 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
682 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
683 tilemode
[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
684 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
685 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
686 TILE_SPLIT(split_equal_to_row_size
) |
687 NUM_BANKS(ADDR_SURF_16_BANK
) |
688 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
689 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
690 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
691 tilemode
[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
692 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
693 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
694 TILE_SPLIT(split_equal_to_row_size
) |
695 NUM_BANKS(ADDR_SURF_16_BANK
) |
696 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
697 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
698 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
699 tilemode
[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
700 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
701 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
702 TILE_SPLIT(split_equal_to_row_size
) |
703 NUM_BANKS(ADDR_SURF_16_BANK
) |
704 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
705 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
706 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
);
707 tilemode
[8] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
708 ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
709 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
710 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
711 NUM_BANKS(ADDR_SURF_16_BANK
) |
712 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
713 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
714 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
715 tilemode
[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
716 ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
717 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
718 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
719 NUM_BANKS(ADDR_SURF_16_BANK
) |
720 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
721 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
722 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
723 tilemode
[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
724 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
725 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
726 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
727 NUM_BANKS(ADDR_SURF_16_BANK
) |
728 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
729 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
730 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
);
731 tilemode
[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
732 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
733 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
734 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
735 NUM_BANKS(ADDR_SURF_16_BANK
) |
736 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
737 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
738 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
739 tilemode
[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
740 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
741 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
742 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
743 NUM_BANKS(ADDR_SURF_16_BANK
) |
744 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
745 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
746 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
747 tilemode
[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
748 ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
749 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
750 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
751 NUM_BANKS(ADDR_SURF_16_BANK
) |
752 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
753 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
754 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
755 tilemode
[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
756 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
757 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
758 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
759 NUM_BANKS(ADDR_SURF_16_BANK
) |
760 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
761 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
762 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
763 tilemode
[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
764 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
765 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
766 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
767 NUM_BANKS(ADDR_SURF_16_BANK
) |
768 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
769 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
770 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
771 tilemode
[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
772 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
773 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
774 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
775 NUM_BANKS(ADDR_SURF_16_BANK
) |
776 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
777 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
778 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
779 tilemode
[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
780 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
781 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
782 TILE_SPLIT(split_equal_to_row_size
) |
783 NUM_BANKS(ADDR_SURF_16_BANK
) |
784 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
785 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
786 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
787 tilemode
[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
788 ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
789 PIPE_CONFIG(ADDR_SURF_P4_8x16
);
790 tilemode
[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
791 ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
792 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
793 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
794 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
795 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
796 NUM_BANKS(ADDR_SURF_16_BANK
) |
797 TILE_SPLIT(split_equal_to_row_size
);
798 tilemode
[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
799 ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
800 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
801 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
802 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
803 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
804 NUM_BANKS(ADDR_SURF_16_BANK
) |
805 TILE_SPLIT(split_equal_to_row_size
);
806 tilemode
[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
807 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
808 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
809 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
810 NUM_BANKS(ADDR_SURF_16_BANK
) |
811 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
812 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
813 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
814 tilemode
[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
815 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
816 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
817 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
818 NUM_BANKS(ADDR_SURF_16_BANK
) |
819 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
820 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
821 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
);
822 tilemode
[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
823 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
824 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
825 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
826 NUM_BANKS(ADDR_SURF_16_BANK
) |
827 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
828 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
829 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
830 tilemode
[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
831 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
832 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
833 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
834 NUM_BANKS(ADDR_SURF_16_BANK
) |
835 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
836 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
837 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
);
838 tilemode
[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
839 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
840 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
841 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
842 NUM_BANKS(ADDR_SURF_8_BANK
) |
843 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
844 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
845 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
);
846 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++)
847 WREG32(mmGB_TILE_MODE0
+ reg_offset
, tilemode
[reg_offset
]);
848 } else if (adev
->asic_type
== CHIP_HAINAN
) {
849 tilemode
[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
850 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
851 PIPE_CONFIG(ADDR_SURF_P2
) |
852 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
853 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
854 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
855 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
856 NUM_BANKS(ADDR_SURF_16_BANK
);
857 tilemode
[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
858 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
859 PIPE_CONFIG(ADDR_SURF_P2
) |
860 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
861 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
862 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
863 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
864 NUM_BANKS(ADDR_SURF_16_BANK
);
865 tilemode
[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
866 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
867 PIPE_CONFIG(ADDR_SURF_P2
) |
868 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
869 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
870 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
871 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
872 NUM_BANKS(ADDR_SURF_16_BANK
);
873 tilemode
[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
874 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
875 PIPE_CONFIG(ADDR_SURF_P2
) |
876 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
877 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
878 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
879 NUM_BANKS(ADDR_SURF_8_BANK
) |
880 TILE_SPLIT(split_equal_to_row_size
);
881 tilemode
[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
882 ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
883 PIPE_CONFIG(ADDR_SURF_P2
);
884 tilemode
[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
885 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
886 PIPE_CONFIG(ADDR_SURF_P2
) |
887 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
888 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
889 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
890 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
891 NUM_BANKS(ADDR_SURF_8_BANK
);
892 tilemode
[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
893 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
894 PIPE_CONFIG(ADDR_SURF_P2
) |
895 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
896 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
897 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
898 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
899 NUM_BANKS(ADDR_SURF_8_BANK
);
900 tilemode
[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
901 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
902 PIPE_CONFIG(ADDR_SURF_P2
) |
903 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
904 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
905 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
906 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
907 NUM_BANKS(ADDR_SURF_4_BANK
);
908 tilemode
[8] = ARRAY_MODE(ARRAY_LINEAR_ALIGNED
);
909 tilemode
[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
910 ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
911 PIPE_CONFIG(ADDR_SURF_P2
);
912 tilemode
[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
913 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
914 PIPE_CONFIG(ADDR_SURF_P2
) |
915 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
916 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
917 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
918 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
) |
919 NUM_BANKS(ADDR_SURF_16_BANK
);
920 tilemode
[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
921 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
922 PIPE_CONFIG(ADDR_SURF_P2
) |
923 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
924 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
925 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
926 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
927 NUM_BANKS(ADDR_SURF_16_BANK
);
928 tilemode
[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
929 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
930 PIPE_CONFIG(ADDR_SURF_P2
) |
931 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
932 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
933 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
934 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
935 NUM_BANKS(ADDR_SURF_16_BANK
);
936 tilemode
[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
937 ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
938 PIPE_CONFIG(ADDR_SURF_P2
);
939 tilemode
[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
940 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
941 PIPE_CONFIG(ADDR_SURF_P2
) |
942 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
943 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
944 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
945 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
946 NUM_BANKS(ADDR_SURF_16_BANK
);
947 tilemode
[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
948 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
949 PIPE_CONFIG(ADDR_SURF_P2
) |
950 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
951 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
952 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
953 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
954 NUM_BANKS(ADDR_SURF_16_BANK
);
955 tilemode
[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
956 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
957 PIPE_CONFIG(ADDR_SURF_P2
) |
958 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
959 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
960 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
961 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
962 NUM_BANKS(ADDR_SURF_16_BANK
);
963 tilemode
[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
964 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
965 PIPE_CONFIG(ADDR_SURF_P2
) |
966 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
967 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
968 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
969 NUM_BANKS(ADDR_SURF_16_BANK
) |
970 TILE_SPLIT(split_equal_to_row_size
);
971 tilemode
[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
972 ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
973 PIPE_CONFIG(ADDR_SURF_P2
);
974 tilemode
[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
975 ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
976 PIPE_CONFIG(ADDR_SURF_P2
) |
977 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
978 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
979 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
980 NUM_BANKS(ADDR_SURF_16_BANK
) |
981 TILE_SPLIT(split_equal_to_row_size
);
982 tilemode
[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
983 ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
984 PIPE_CONFIG(ADDR_SURF_P2
) |
985 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
986 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
987 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
988 NUM_BANKS(ADDR_SURF_16_BANK
) |
989 TILE_SPLIT(split_equal_to_row_size
);
990 tilemode
[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
991 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
992 PIPE_CONFIG(ADDR_SURF_P2
) |
993 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
994 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
995 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
996 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
997 NUM_BANKS(ADDR_SURF_8_BANK
);
998 tilemode
[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
999 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1000 PIPE_CONFIG(ADDR_SURF_P2
) |
1001 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1002 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1003 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1004 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1005 NUM_BANKS(ADDR_SURF_8_BANK
);
1006 tilemode
[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1007 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1008 PIPE_CONFIG(ADDR_SURF_P2
) |
1009 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1010 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1011 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1012 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1013 NUM_BANKS(ADDR_SURF_8_BANK
);
1014 tilemode
[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1015 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1016 PIPE_CONFIG(ADDR_SURF_P2
) |
1017 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1018 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1019 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1020 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1021 NUM_BANKS(ADDR_SURF_8_BANK
);
1022 tilemode
[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1023 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1024 PIPE_CONFIG(ADDR_SURF_P2
) |
1025 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1026 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1027 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1028 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1029 NUM_BANKS(ADDR_SURF_4_BANK
);
1030 tilemode
[26] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1031 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1032 PIPE_CONFIG(ADDR_SURF_P2
) |
1033 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1034 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1035 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1036 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1037 NUM_BANKS(ADDR_SURF_4_BANK
);
1038 tilemode
[27] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1039 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1040 PIPE_CONFIG(ADDR_SURF_P2
) |
1041 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1042 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1043 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1044 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1045 NUM_BANKS(ADDR_SURF_4_BANK
);
1046 tilemode
[28] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1047 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1048 PIPE_CONFIG(ADDR_SURF_P2
) |
1049 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1050 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1051 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1052 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1053 NUM_BANKS(ADDR_SURF_4_BANK
);
1054 tilemode
[29] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1055 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1056 PIPE_CONFIG(ADDR_SURF_P2
) |
1057 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1058 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1059 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1060 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1061 NUM_BANKS(ADDR_SURF_4_BANK
);
1062 tilemode
[30] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1063 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1064 PIPE_CONFIG(ADDR_SURF_P2
) |
1065 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1066 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1067 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1068 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1069 NUM_BANKS(ADDR_SURF_4_BANK
);
1070 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++)
1071 WREG32(mmGB_TILE_MODE0
+ reg_offset
, tilemode
[reg_offset
]);
1072 } else if ((adev
->asic_type
== CHIP_TAHITI
) || (adev
->asic_type
== CHIP_PITCAIRN
)) {
1073 tilemode
[0] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1074 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1075 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1076 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1077 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1078 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1079 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1080 NUM_BANKS(ADDR_SURF_16_BANK
);
1081 tilemode
[1] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1082 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1083 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1084 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
1085 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1086 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1087 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1088 NUM_BANKS(ADDR_SURF_16_BANK
);
1089 tilemode
[2] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1090 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1091 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1092 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1093 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1094 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1095 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1096 NUM_BANKS(ADDR_SURF_16_BANK
);
1097 tilemode
[3] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1098 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1099 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1100 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1101 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1102 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1103 NUM_BANKS(ADDR_SURF_4_BANK
) |
1104 TILE_SPLIT(split_equal_to_row_size
);
1105 tilemode
[4] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1106 ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1107 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
);
1108 tilemode
[5] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1109 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1110 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1111 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1112 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1113 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1114 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1115 NUM_BANKS(ADDR_SURF_2_BANK
);
1116 tilemode
[6] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1117 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1118 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1119 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1120 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1121 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8
) |
1122 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1123 NUM_BANKS(ADDR_SURF_2_BANK
);
1124 tilemode
[7] = MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1125 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1126 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1127 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1128 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1129 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1130 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1131 NUM_BANKS(ADDR_SURF_2_BANK
);
1132 tilemode
[8] = ARRAY_MODE(ARRAY_LINEAR_ALIGNED
);
1133 tilemode
[9] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1134 ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1135 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
);
1136 tilemode
[10] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1137 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1138 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1139 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1140 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1141 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1142 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1143 NUM_BANKS(ADDR_SURF_16_BANK
);
1144 tilemode
[11] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1145 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1146 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1147 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1148 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1149 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1150 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
) |
1151 NUM_BANKS(ADDR_SURF_16_BANK
);
1152 tilemode
[12] = MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1153 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1154 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1155 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1156 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1157 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1158 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1159 NUM_BANKS(ADDR_SURF_16_BANK
);
1160 tilemode
[13] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1161 ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1162 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
);
1163 tilemode
[14] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1164 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1165 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1166 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1167 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1168 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1169 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1170 NUM_BANKS(ADDR_SURF_16_BANK
);
1171 tilemode
[15] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1172 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1173 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1174 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1175 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1176 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1177 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1178 NUM_BANKS(ADDR_SURF_16_BANK
);
1179 tilemode
[16] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1180 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1181 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1182 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1183 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1184 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1185 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1186 NUM_BANKS(ADDR_SURF_16_BANK
);
1187 tilemode
[17] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1188 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1189 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1190 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1191 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1192 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1193 NUM_BANKS(ADDR_SURF_16_BANK
) |
1194 TILE_SPLIT(split_equal_to_row_size
);
1195 tilemode
[18] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1196 ARRAY_MODE(ARRAY_1D_TILED_THICK
) |
1197 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
);
1198 tilemode
[19] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1199 ARRAY_MODE(ARRAY_2D_TILED_XTHICK
) |
1200 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1201 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1202 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1203 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1204 NUM_BANKS(ADDR_SURF_16_BANK
) |
1205 TILE_SPLIT(split_equal_to_row_size
);
1206 tilemode
[20] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1207 ARRAY_MODE(ARRAY_2D_TILED_THICK
) |
1208 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1209 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1210 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1211 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1212 NUM_BANKS(ADDR_SURF_16_BANK
) |
1213 TILE_SPLIT(split_equal_to_row_size
);
1214 tilemode
[21] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1215 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1216 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1217 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1218 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1219 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8
) |
1220 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1221 NUM_BANKS(ADDR_SURF_4_BANK
);
1222 tilemode
[22] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1223 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1224 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1225 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1226 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1227 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1228 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1229 NUM_BANKS(ADDR_SURF_4_BANK
);
1230 tilemode
[23] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1231 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1232 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1233 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1234 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1235 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8
) |
1236 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1237 NUM_BANKS(ADDR_SURF_2_BANK
);
1238 tilemode
[24] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1239 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1240 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1241 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1242 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1243 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1244 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1245 NUM_BANKS(ADDR_SURF_2_BANK
);
1246 tilemode
[25] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1247 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1248 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1249 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1250 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1251 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1252 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1253 NUM_BANKS(ADDR_SURF_2_BANK
);
1254 tilemode
[26] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1255 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1256 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1257 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1258 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1259 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1260 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1261 NUM_BANKS(ADDR_SURF_2_BANK
);
1262 tilemode
[27] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1263 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1264 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1265 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1266 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1267 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1268 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1269 NUM_BANKS(ADDR_SURF_2_BANK
);
1270 tilemode
[28] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1271 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1272 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1273 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1274 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1275 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1276 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1277 NUM_BANKS(ADDR_SURF_2_BANK
);
1278 tilemode
[29] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1279 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1280 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1281 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1282 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1283 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1284 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1285 NUM_BANKS(ADDR_SURF_2_BANK
);
1286 tilemode
[30] = MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1287 ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1288 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1289 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB
) |
1290 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1291 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1292 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
) |
1293 NUM_BANKS(ADDR_SURF_2_BANK
);
1294 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++)
1295 WREG32(mmGB_TILE_MODE0
+ reg_offset
, tilemode
[reg_offset
]);
1297 DRM_ERROR("unknown asic: 0x%x\n", adev
->asic_type
);
1301 static void gfx_v6_0_select_se_sh(struct amdgpu_device
*adev
, u32 se_num
,
1302 u32 sh_num
, u32 instance
)
1306 if (instance
== 0xffffffff)
1307 data
= REG_SET_FIELD(0, GRBM_GFX_INDEX
, INSTANCE_BROADCAST_WRITES
, 1);
1309 data
= REG_SET_FIELD(0, GRBM_GFX_INDEX
, INSTANCE_INDEX
, instance
);
1311 if ((se_num
== 0xffffffff) && (sh_num
== 0xffffffff))
1312 data
|= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK
|
1313 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK
;
1314 else if (se_num
== 0xffffffff)
1315 data
|= GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK
|
1316 (sh_num
<< GRBM_GFX_INDEX__SH_INDEX__SHIFT
);
1317 else if (sh_num
== 0xffffffff)
1318 data
|= GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK
|
1319 (se_num
<< GRBM_GFX_INDEX__SE_INDEX__SHIFT
);
1321 data
|= (sh_num
<< GRBM_GFX_INDEX__SH_INDEX__SHIFT
) |
1322 (se_num
<< GRBM_GFX_INDEX__SE_INDEX__SHIFT
);
1323 WREG32(mmGRBM_GFX_INDEX
, data
);
1326 static u32
gfx_v6_0_get_rb_active_bitmap(struct amdgpu_device
*adev
)
1330 data
= RREG32(mmCC_RB_BACKEND_DISABLE
) |
1331 RREG32(mmGC_USER_RB_BACKEND_DISABLE
);
1333 data
= REG_GET_FIELD(data
, GC_USER_RB_BACKEND_DISABLE
, BACKEND_DISABLE
);
1335 mask
= amdgpu_gfx_create_bitmask(adev
->gfx
.config
.max_backends_per_se
/
1336 adev
->gfx
.config
.max_sh_per_se
);
1338 return ~data
& mask
;
1341 static void gfx_v6_0_raster_config(struct amdgpu_device
*adev
, u32
*rconf
)
1343 switch (adev
->asic_type
) {
1347 (2 << PA_SC_RASTER_CONFIG__RB_XSEL2__SHIFT
) |
1348 (1 << PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT
) |
1349 (2 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT
) |
1350 (1 << PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT
) |
1351 (2 << PA_SC_RASTER_CONFIG__SE_MAP__SHIFT
) |
1352 (2 << PA_SC_RASTER_CONFIG__SE_XSEL__SHIFT
) |
1353 (2 << PA_SC_RASTER_CONFIG__SE_YSEL__SHIFT
);
1357 (1 << PA_SC_RASTER_CONFIG__RB_XSEL__SHIFT
) |
1358 (2 << PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT
) |
1359 (1 << PA_SC_RASTER_CONFIG__PKR_YSEL__SHIFT
);
1362 *rconf
|= (1 << PA_SC_RASTER_CONFIG__RB_YSEL__SHIFT
);
1368 DRM_ERROR("unknown asic: 0x%x\n", adev
->asic_type
);
1373 static void gfx_v6_0_write_harvested_raster_configs(struct amdgpu_device
*adev
,
1374 u32 raster_config
, unsigned rb_mask
,
1377 unsigned sh_per_se
= max_t(unsigned, adev
->gfx
.config
.max_sh_per_se
, 1);
1378 unsigned num_se
= max_t(unsigned, adev
->gfx
.config
.max_shader_engines
, 1);
1379 unsigned rb_per_pkr
= min_t(unsigned, num_rb
/ num_se
/ sh_per_se
, 2);
1380 unsigned rb_per_se
= num_rb
/ num_se
;
1381 unsigned se_mask
[4];
1384 se_mask
[0] = ((1 << rb_per_se
) - 1) & rb_mask
;
1385 se_mask
[1] = (se_mask
[0] << rb_per_se
) & rb_mask
;
1386 se_mask
[2] = (se_mask
[1] << rb_per_se
) & rb_mask
;
1387 se_mask
[3] = (se_mask
[2] << rb_per_se
) & rb_mask
;
1389 WARN_ON(!(num_se
== 1 || num_se
== 2 || num_se
== 4));
1390 WARN_ON(!(sh_per_se
== 1 || sh_per_se
== 2));
1391 WARN_ON(!(rb_per_pkr
== 1 || rb_per_pkr
== 2));
1393 for (se
= 0; se
< num_se
; se
++) {
1394 unsigned raster_config_se
= raster_config
;
1395 unsigned pkr0_mask
= ((1 << rb_per_pkr
) - 1) << (se
* rb_per_se
);
1396 unsigned pkr1_mask
= pkr0_mask
<< rb_per_pkr
;
1397 int idx
= (se
/ 2) * 2;
1399 if ((num_se
> 1) && (!se_mask
[idx
] || !se_mask
[idx
+ 1])) {
1400 raster_config_se
&= ~PA_SC_RASTER_CONFIG__SE_MAP_MASK
;
1403 raster_config_se
|= RASTER_CONFIG_SE_MAP_3
<< PA_SC_RASTER_CONFIG__SE_MAP__SHIFT
;
1405 raster_config_se
|= RASTER_CONFIG_SE_MAP_0
<< PA_SC_RASTER_CONFIG__SE_MAP__SHIFT
;
1408 pkr0_mask
&= rb_mask
;
1409 pkr1_mask
&= rb_mask
;
1410 if (rb_per_se
> 2 && (!pkr0_mask
|| !pkr1_mask
)) {
1411 raster_config_se
&= ~PA_SC_RASTER_CONFIG__PKR_MAP_MASK
;
1414 raster_config_se
|= RASTER_CONFIG_PKR_MAP_3
<< PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT
;
1416 raster_config_se
|= RASTER_CONFIG_PKR_MAP_0
<< PA_SC_RASTER_CONFIG__PKR_MAP__SHIFT
;
1419 if (rb_per_se
>= 2) {
1420 unsigned rb0_mask
= 1 << (se
* rb_per_se
);
1421 unsigned rb1_mask
= rb0_mask
<< 1;
1423 rb0_mask
&= rb_mask
;
1424 rb1_mask
&= rb_mask
;
1425 if (!rb0_mask
|| !rb1_mask
) {
1426 raster_config_se
&= ~PA_SC_RASTER_CONFIG__RB_MAP_PKR0_MASK
;
1430 RASTER_CONFIG_RB_MAP_3
<< PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT
;
1433 RASTER_CONFIG_RB_MAP_0
<< PA_SC_RASTER_CONFIG__RB_MAP_PKR0__SHIFT
;
1436 if (rb_per_se
> 2) {
1437 rb0_mask
= 1 << (se
* rb_per_se
+ rb_per_pkr
);
1438 rb1_mask
= rb0_mask
<< 1;
1439 rb0_mask
&= rb_mask
;
1440 rb1_mask
&= rb_mask
;
1441 if (!rb0_mask
|| !rb1_mask
) {
1442 raster_config_se
&= ~PA_SC_RASTER_CONFIG__RB_MAP_PKR1_MASK
;
1446 RASTER_CONFIG_RB_MAP_3
<< PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT
;
1449 RASTER_CONFIG_RB_MAP_0
<< PA_SC_RASTER_CONFIG__RB_MAP_PKR1__SHIFT
;
1454 /* GRBM_GFX_INDEX has a different offset on SI */
1455 gfx_v6_0_select_se_sh(adev
, se
, 0xffffffff, 0xffffffff);
1456 WREG32(mmPA_SC_RASTER_CONFIG
, raster_config_se
);
1459 /* GRBM_GFX_INDEX has a different offset on SI */
1460 gfx_v6_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
1463 static void gfx_v6_0_setup_rb(struct amdgpu_device
*adev
)
1467 u32 raster_config
= 0;
1469 u32 rb_bitmap_width_per_sh
= adev
->gfx
.config
.max_backends_per_se
/
1470 adev
->gfx
.config
.max_sh_per_se
;
1471 unsigned num_rb_pipes
;
1473 mutex_lock(&adev
->grbm_idx_mutex
);
1474 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
1475 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
1476 gfx_v6_0_select_se_sh(adev
, i
, j
, 0xffffffff);
1477 data
= gfx_v6_0_get_rb_active_bitmap(adev
);
1478 active_rbs
|= data
<<
1479 ((i
* adev
->gfx
.config
.max_sh_per_se
+ j
) *
1480 rb_bitmap_width_per_sh
);
1483 gfx_v6_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
1485 adev
->gfx
.config
.backend_enable_mask
= active_rbs
;
1486 adev
->gfx
.config
.num_rbs
= hweight32(active_rbs
);
1488 num_rb_pipes
= min_t(unsigned, adev
->gfx
.config
.max_backends_per_se
*
1489 adev
->gfx
.config
.max_shader_engines
, 16);
1491 gfx_v6_0_raster_config(adev
, &raster_config
);
1493 if (!adev
->gfx
.config
.backend_enable_mask
||
1494 adev
->gfx
.config
.num_rbs
>= num_rb_pipes
)
1495 WREG32(mmPA_SC_RASTER_CONFIG
, raster_config
);
1497 gfx_v6_0_write_harvested_raster_configs(adev
, raster_config
,
1498 adev
->gfx
.config
.backend_enable_mask
,
1501 /* cache the values for userspace */
1502 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
1503 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
1504 gfx_v6_0_select_se_sh(adev
, i
, j
, 0xffffffff);
1505 adev
->gfx
.config
.rb_config
[i
][j
].rb_backend_disable
=
1506 RREG32(mmCC_RB_BACKEND_DISABLE
);
1507 adev
->gfx
.config
.rb_config
[i
][j
].user_rb_backend_disable
=
1508 RREG32(mmGC_USER_RB_BACKEND_DISABLE
);
1509 adev
->gfx
.config
.rb_config
[i
][j
].raster_config
=
1510 RREG32(mmPA_SC_RASTER_CONFIG
);
1513 gfx_v6_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
1514 mutex_unlock(&adev
->grbm_idx_mutex
);
1517 static void gfx_v6_0_set_user_cu_inactive_bitmap(struct amdgpu_device
*adev
,
1525 data
= bitmap
<< GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS__SHIFT
;
1526 data
&= GC_USER_SHADER_ARRAY_CONFIG__INACTIVE_CUS_MASK
;
1528 WREG32(mmGC_USER_SHADER_ARRAY_CONFIG
, data
);
1531 static u32
gfx_v6_0_get_cu_enabled(struct amdgpu_device
*adev
)
1535 data
= RREG32(mmCC_GC_SHADER_ARRAY_CONFIG
) |
1536 RREG32(mmGC_USER_SHADER_ARRAY_CONFIG
);
1538 mask
= amdgpu_gfx_create_bitmask(adev
->gfx
.config
.max_cu_per_sh
);
1539 return ~REG_GET_FIELD(data
, CC_GC_SHADER_ARRAY_CONFIG
, INACTIVE_CUS
) & mask
;
1543 static void gfx_v6_0_setup_spi(struct amdgpu_device
*adev
)
1549 mutex_lock(&adev
->grbm_idx_mutex
);
1550 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
1551 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
1552 gfx_v6_0_select_se_sh(adev
, i
, j
, 0xffffffff);
1553 data
= RREG32(mmSPI_STATIC_THREAD_MGMT_3
);
1554 active_cu
= gfx_v6_0_get_cu_enabled(adev
);
1557 for (k
= 0; k
< 16; k
++) {
1559 if (active_cu
& mask
) {
1561 WREG32(mmSPI_STATIC_THREAD_MGMT_3
, data
);
1567 gfx_v6_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
1568 mutex_unlock(&adev
->grbm_idx_mutex
);
1571 static void gfx_v6_0_config_init(struct amdgpu_device
*adev
)
1573 adev
->gfx
.config
.double_offchip_lds_buf
= 0;
1576 static void gfx_v6_0_constants_init(struct amdgpu_device
*adev
)
1578 u32 gb_addr_config
= 0;
1581 u32 hdp_host_path_cntl
;
1584 switch (adev
->asic_type
) {
1586 adev
->gfx
.config
.max_shader_engines
= 2;
1587 adev
->gfx
.config
.max_tile_pipes
= 12;
1588 adev
->gfx
.config
.max_cu_per_sh
= 8;
1589 adev
->gfx
.config
.max_sh_per_se
= 2;
1590 adev
->gfx
.config
.max_backends_per_se
= 4;
1591 adev
->gfx
.config
.max_texture_channel_caches
= 12;
1592 adev
->gfx
.config
.max_gprs
= 256;
1593 adev
->gfx
.config
.max_gs_threads
= 32;
1594 adev
->gfx
.config
.max_hw_contexts
= 8;
1596 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
1597 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
1598 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
1599 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
1600 gb_addr_config
= TAHITI_GB_ADDR_CONFIG_GOLDEN
;
1603 adev
->gfx
.config
.max_shader_engines
= 2;
1604 adev
->gfx
.config
.max_tile_pipes
= 8;
1605 adev
->gfx
.config
.max_cu_per_sh
= 5;
1606 adev
->gfx
.config
.max_sh_per_se
= 2;
1607 adev
->gfx
.config
.max_backends_per_se
= 4;
1608 adev
->gfx
.config
.max_texture_channel_caches
= 8;
1609 adev
->gfx
.config
.max_gprs
= 256;
1610 adev
->gfx
.config
.max_gs_threads
= 32;
1611 adev
->gfx
.config
.max_hw_contexts
= 8;
1613 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
1614 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x100;
1615 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
1616 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
1617 gb_addr_config
= TAHITI_GB_ADDR_CONFIG_GOLDEN
;
1620 adev
->gfx
.config
.max_shader_engines
= 1;
1621 adev
->gfx
.config
.max_tile_pipes
= 4;
1622 adev
->gfx
.config
.max_cu_per_sh
= 5;
1623 adev
->gfx
.config
.max_sh_per_se
= 2;
1624 adev
->gfx
.config
.max_backends_per_se
= 4;
1625 adev
->gfx
.config
.max_texture_channel_caches
= 4;
1626 adev
->gfx
.config
.max_gprs
= 256;
1627 adev
->gfx
.config
.max_gs_threads
= 32;
1628 adev
->gfx
.config
.max_hw_contexts
= 8;
1630 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
1631 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x40;
1632 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
1633 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
1634 gb_addr_config
= VERDE_GB_ADDR_CONFIG_GOLDEN
;
1637 adev
->gfx
.config
.max_shader_engines
= 1;
1638 adev
->gfx
.config
.max_tile_pipes
= 4;
1639 adev
->gfx
.config
.max_cu_per_sh
= 6;
1640 adev
->gfx
.config
.max_sh_per_se
= 1;
1641 adev
->gfx
.config
.max_backends_per_se
= 2;
1642 adev
->gfx
.config
.max_texture_channel_caches
= 4;
1643 adev
->gfx
.config
.max_gprs
= 256;
1644 adev
->gfx
.config
.max_gs_threads
= 16;
1645 adev
->gfx
.config
.max_hw_contexts
= 8;
1647 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
1648 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x40;
1649 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
1650 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
1651 gb_addr_config
= VERDE_GB_ADDR_CONFIG_GOLDEN
;
1654 adev
->gfx
.config
.max_shader_engines
= 1;
1655 adev
->gfx
.config
.max_tile_pipes
= 4;
1656 adev
->gfx
.config
.max_cu_per_sh
= 5;
1657 adev
->gfx
.config
.max_sh_per_se
= 1;
1658 adev
->gfx
.config
.max_backends_per_se
= 1;
1659 adev
->gfx
.config
.max_texture_channel_caches
= 2;
1660 adev
->gfx
.config
.max_gprs
= 256;
1661 adev
->gfx
.config
.max_gs_threads
= 16;
1662 adev
->gfx
.config
.max_hw_contexts
= 8;
1664 adev
->gfx
.config
.sc_prim_fifo_size_frontend
= 0x20;
1665 adev
->gfx
.config
.sc_prim_fifo_size_backend
= 0x40;
1666 adev
->gfx
.config
.sc_hiz_tile_fifo_size
= 0x30;
1667 adev
->gfx
.config
.sc_earlyz_tile_fifo_size
= 0x130;
1668 gb_addr_config
= HAINAN_GB_ADDR_CONFIG_GOLDEN
;
1675 WREG32(mmGRBM_CNTL
, (0xff << GRBM_CNTL__READ_TIMEOUT__SHIFT
));
1676 WREG32(mmSRBM_INT_CNTL
, 1);
1677 WREG32(mmSRBM_INT_ACK
, 1);
1679 WREG32(mmBIF_FB_EN
, BIF_FB_EN__FB_READ_EN_MASK
| BIF_FB_EN__FB_WRITE_EN_MASK
);
1681 adev
->gfx
.config
.mc_arb_ramcfg
= RREG32(mmMC_ARB_RAMCFG
);
1682 mc_arb_ramcfg
= adev
->gfx
.config
.mc_arb_ramcfg
;
1684 adev
->gfx
.config
.num_tile_pipes
= adev
->gfx
.config
.max_tile_pipes
;
1685 adev
->gfx
.config
.mem_max_burst_length_bytes
= 256;
1686 tmp
= (mc_arb_ramcfg
& MC_ARB_RAMCFG__NOOFCOLS_MASK
) >> MC_ARB_RAMCFG__NOOFCOLS__SHIFT
;
1687 adev
->gfx
.config
.mem_row_size_in_kb
= (4 * (1 << (8 + tmp
))) / 1024;
1688 if (adev
->gfx
.config
.mem_row_size_in_kb
> 4)
1689 adev
->gfx
.config
.mem_row_size_in_kb
= 4;
1690 adev
->gfx
.config
.shader_engine_tile_size
= 32;
1691 adev
->gfx
.config
.num_gpus
= 1;
1692 adev
->gfx
.config
.multi_gpu_tile_size
= 64;
1694 gb_addr_config
&= ~GB_ADDR_CONFIG__ROW_SIZE_MASK
;
1695 switch (adev
->gfx
.config
.mem_row_size_in_kb
) {
1698 gb_addr_config
|= 0 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT
;
1701 gb_addr_config
|= 1 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT
;
1704 gb_addr_config
|= 2 << GB_ADDR_CONFIG__ROW_SIZE__SHIFT
;
1707 gb_addr_config
&= ~GB_ADDR_CONFIG__NUM_SHADER_ENGINES_MASK
;
1708 if (adev
->gfx
.config
.max_shader_engines
== 2)
1709 gb_addr_config
|= 1 << GB_ADDR_CONFIG__NUM_SHADER_ENGINES__SHIFT
;
1710 adev
->gfx
.config
.gb_addr_config
= gb_addr_config
;
1712 WREG32(mmGB_ADDR_CONFIG
, gb_addr_config
);
1713 WREG32(mmDMIF_ADDR_CONFIG
, gb_addr_config
);
1714 WREG32(mmDMIF_ADDR_CALC
, gb_addr_config
);
1715 WREG32(mmHDP_ADDR_CONFIG
, gb_addr_config
);
1716 WREG32(mmDMA_TILING_CONFIG
+ DMA0_REGISTER_OFFSET
, gb_addr_config
);
1717 WREG32(mmDMA_TILING_CONFIG
+ DMA1_REGISTER_OFFSET
, gb_addr_config
);
1720 if (adev
->has_uvd
) {
1721 WREG32(mmUVD_UDEC_ADDR_CONFIG
, gb_addr_config
);
1722 WREG32(mmUVD_UDEC_DB_ADDR_CONFIG
, gb_addr_config
);
1723 WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG
, gb_addr_config
);
1726 gfx_v6_0_tiling_mode_table_init(adev
);
1728 gfx_v6_0_setup_rb(adev
);
1730 gfx_v6_0_setup_spi(adev
);
1732 gfx_v6_0_get_cu_info(adev
);
1733 gfx_v6_0_config_init(adev
);
1735 WREG32(mmCP_QUEUE_THRESHOLDS
, ((0x16 << CP_QUEUE_THRESHOLDS__ROQ_IB1_START__SHIFT
) |
1736 (0x2b << CP_QUEUE_THRESHOLDS__ROQ_IB2_START__SHIFT
)));
1737 WREG32(mmCP_MEQ_THRESHOLDS
, (0x30 << CP_MEQ_THRESHOLDS__MEQ1_START__SHIFT
) |
1738 (0x60 << CP_MEQ_THRESHOLDS__MEQ2_START__SHIFT
));
1740 sx_debug_1
= RREG32(mmSX_DEBUG_1
);
1741 WREG32(mmSX_DEBUG_1
, sx_debug_1
);
1743 WREG32(mmSPI_CONFIG_CNTL_1
, (4 << SPI_CONFIG_CNTL_1__VTX_DONE_DELAY__SHIFT
));
1745 WREG32(mmPA_SC_FIFO_SIZE
, ((adev
->gfx
.config
.sc_prim_fifo_size_frontend
<< PA_SC_FIFO_SIZE__SC_FRONTEND_PRIM_FIFO_SIZE__SHIFT
) |
1746 (adev
->gfx
.config
.sc_prim_fifo_size_backend
<< PA_SC_FIFO_SIZE__SC_BACKEND_PRIM_FIFO_SIZE__SHIFT
) |
1747 (adev
->gfx
.config
.sc_hiz_tile_fifo_size
<< PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT
) |
1748 (adev
->gfx
.config
.sc_earlyz_tile_fifo_size
<< PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT
)));
1750 WREG32(mmVGT_NUM_INSTANCES
, 1);
1751 WREG32(mmCP_PERFMON_CNTL
, 0);
1752 WREG32(mmSQ_CONFIG
, 0);
1753 WREG32(mmPA_SC_FORCE_EOV_MAX_CNTS
, ((4095 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_CLK_CNT__SHIFT
) |
1754 (255 << PA_SC_FORCE_EOV_MAX_CNTS__FORCE_EOV_MAX_REZ_CNT__SHIFT
)));
1756 WREG32(mmVGT_CACHE_INVALIDATION
,
1757 (VC_AND_TC
<< VGT_CACHE_INVALIDATION__CACHE_INVALIDATION__SHIFT
) |
1758 (ES_AND_GS_AUTO
<< VGT_CACHE_INVALIDATION__AUTO_INVLD_EN__SHIFT
));
1760 WREG32(mmVGT_GS_VERTEX_REUSE
, 16);
1761 WREG32(mmPA_SC_LINE_STIPPLE_STATE
, 0);
1763 WREG32(mmCB_PERFCOUNTER0_SELECT0
, 0);
1764 WREG32(mmCB_PERFCOUNTER0_SELECT1
, 0);
1765 WREG32(mmCB_PERFCOUNTER1_SELECT0
, 0);
1766 WREG32(mmCB_PERFCOUNTER1_SELECT1
, 0);
1767 WREG32(mmCB_PERFCOUNTER2_SELECT0
, 0);
1768 WREG32(mmCB_PERFCOUNTER2_SELECT1
, 0);
1769 WREG32(mmCB_PERFCOUNTER3_SELECT0
, 0);
1770 WREG32(mmCB_PERFCOUNTER3_SELECT1
, 0);
1772 hdp_host_path_cntl
= RREG32(mmHDP_HOST_PATH_CNTL
);
1773 WREG32(mmHDP_HOST_PATH_CNTL
, hdp_host_path_cntl
);
1775 WREG32(mmPA_CL_ENHANCE
, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK
|
1776 (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT
));
1782 static void gfx_v6_0_scratch_init(struct amdgpu_device
*adev
)
1784 adev
->gfx
.scratch
.num_reg
= 8;
1785 adev
->gfx
.scratch
.reg_base
= mmSCRATCH_REG0
;
1786 adev
->gfx
.scratch
.free_mask
= (1u << adev
->gfx
.scratch
.num_reg
) - 1;
1789 static int gfx_v6_0_ring_test_ring(struct amdgpu_ring
*ring
)
1791 struct amdgpu_device
*adev
= ring
->adev
;
1797 r
= amdgpu_gfx_scratch_get(adev
, &scratch
);
1801 WREG32(scratch
, 0xCAFEDEAD);
1803 r
= amdgpu_ring_alloc(ring
, 3);
1805 goto error_free_scratch
;
1807 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1808 amdgpu_ring_write(ring
, (scratch
- PACKET3_SET_CONFIG_REG_START
));
1809 amdgpu_ring_write(ring
, 0xDEADBEEF);
1810 amdgpu_ring_commit(ring
);
1812 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
1813 tmp
= RREG32(scratch
);
1814 if (tmp
== 0xDEADBEEF)
1819 if (i
>= adev
->usec_timeout
)
1823 amdgpu_gfx_scratch_free(adev
, scratch
);
1827 static void gfx_v6_0_ring_emit_vgt_flush(struct amdgpu_ring
*ring
)
1829 amdgpu_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE
, 0));
1830 amdgpu_ring_write(ring
, EVENT_TYPE(VGT_FLUSH
) |
1834 static void gfx_v6_0_ring_emit_fence(struct amdgpu_ring
*ring
, u64 addr
,
1835 u64 seq
, unsigned flags
)
1837 bool write64bit
= flags
& AMDGPU_FENCE_FLAG_64BIT
;
1838 bool int_sel
= flags
& AMDGPU_FENCE_FLAG_INT
;
1839 /* flush read cache over gart */
1840 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1841 amdgpu_ring_write(ring
, (mmCP_COHER_CNTL2
- PACKET3_SET_CONFIG_REG_START
));
1842 amdgpu_ring_write(ring
, 0);
1843 amdgpu_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
1844 amdgpu_ring_write(ring
, PACKET3_TCL1_ACTION_ENA
|
1845 PACKET3_TC_ACTION_ENA
|
1846 PACKET3_SH_KCACHE_ACTION_ENA
|
1847 PACKET3_SH_ICACHE_ACTION_ENA
);
1848 amdgpu_ring_write(ring
, 0xFFFFFFFF);
1849 amdgpu_ring_write(ring
, 0);
1850 amdgpu_ring_write(ring
, 10); /* poll interval */
1851 /* EVENT_WRITE_EOP - flush caches, send int */
1852 amdgpu_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
1853 amdgpu_ring_write(ring
, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
1854 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
1855 amdgpu_ring_write(ring
, (upper_32_bits(addr
) & 0xffff) |
1856 ((write64bit
? 2 : 1) << CP_EOP_DONE_DATA_CNTL__DATA_SEL__SHIFT
) |
1857 ((int_sel
? 2 : 0) << CP_EOP_DONE_DATA_CNTL__INT_SEL__SHIFT
));
1858 amdgpu_ring_write(ring
, lower_32_bits(seq
));
1859 amdgpu_ring_write(ring
, upper_32_bits(seq
));
1862 static void gfx_v6_0_ring_emit_ib(struct amdgpu_ring
*ring
,
1863 struct amdgpu_job
*job
,
1864 struct amdgpu_ib
*ib
,
1867 unsigned vmid
= AMDGPU_JOB_GET_VMID(job
);
1868 u32 header
, control
= 0;
1870 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
1871 if (flags
& AMDGPU_HAVE_CTX_SWITCH
) {
1872 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
1873 amdgpu_ring_write(ring
, 0);
1876 if (ib
->flags
& AMDGPU_IB_FLAG_CE
)
1877 header
= PACKET3(PACKET3_INDIRECT_BUFFER_CONST
, 2);
1879 header
= PACKET3(PACKET3_INDIRECT_BUFFER
, 2);
1881 control
|= ib
->length_dw
| (vmid
<< 24);
1883 amdgpu_ring_write(ring
, header
);
1884 amdgpu_ring_write(ring
,
1888 (ib
->gpu_addr
& 0xFFFFFFFC));
1889 amdgpu_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFFFF);
1890 amdgpu_ring_write(ring
, control
);
1894 * gfx_v6_0_ring_test_ib - basic ring IB test
1896 * @ring: amdgpu_ring structure holding ring information
1898 * Allocate an IB and execute it on the gfx ring (SI).
1899 * Provides a basic gfx ring test to verify that IBs are working.
1900 * Returns 0 on success, error on failure.
1902 static int gfx_v6_0_ring_test_ib(struct amdgpu_ring
*ring
, long timeout
)
1904 struct amdgpu_device
*adev
= ring
->adev
;
1905 struct amdgpu_ib ib
;
1906 struct dma_fence
*f
= NULL
;
1911 r
= amdgpu_gfx_scratch_get(adev
, &scratch
);
1915 WREG32(scratch
, 0xCAFEDEAD);
1916 memset(&ib
, 0, sizeof(ib
));
1917 r
= amdgpu_ib_get(adev
, NULL
, 256, &ib
);
1921 ib
.ptr
[0] = PACKET3(PACKET3_SET_CONFIG_REG
, 1);
1922 ib
.ptr
[1] = ((scratch
- PACKET3_SET_CONFIG_REG_START
));
1923 ib
.ptr
[2] = 0xDEADBEEF;
1926 r
= amdgpu_ib_schedule(ring
, 1, &ib
, NULL
, &f
);
1930 r
= dma_fence_wait_timeout(f
, false, timeout
);
1937 tmp
= RREG32(scratch
);
1938 if (tmp
== 0xDEADBEEF)
1944 amdgpu_ib_free(adev
, &ib
, NULL
);
1947 amdgpu_gfx_scratch_free(adev
, scratch
);
1951 static void gfx_v6_0_cp_gfx_enable(struct amdgpu_device
*adev
, bool enable
)
1955 WREG32(mmCP_ME_CNTL
, 0);
1957 WREG32(mmCP_ME_CNTL
, (CP_ME_CNTL__ME_HALT_MASK
|
1958 CP_ME_CNTL__PFP_HALT_MASK
|
1959 CP_ME_CNTL__CE_HALT_MASK
));
1960 WREG32(mmSCRATCH_UMSK
, 0);
1961 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
1962 adev
->gfx
.gfx_ring
[i
].sched
.ready
= false;
1963 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
1964 adev
->gfx
.compute_ring
[i
].sched
.ready
= false;
1969 static int gfx_v6_0_cp_gfx_load_microcode(struct amdgpu_device
*adev
)
1972 const struct gfx_firmware_header_v1_0
*pfp_hdr
;
1973 const struct gfx_firmware_header_v1_0
*ce_hdr
;
1974 const struct gfx_firmware_header_v1_0
*me_hdr
;
1975 const __le32
*fw_data
;
1978 if (!adev
->gfx
.me_fw
|| !adev
->gfx
.pfp_fw
|| !adev
->gfx
.ce_fw
)
1981 gfx_v6_0_cp_gfx_enable(adev
, false);
1982 pfp_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.pfp_fw
->data
;
1983 ce_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.ce_fw
->data
;
1984 me_hdr
= (const struct gfx_firmware_header_v1_0
*)adev
->gfx
.me_fw
->data
;
1986 amdgpu_ucode_print_gfx_hdr(&pfp_hdr
->header
);
1987 amdgpu_ucode_print_gfx_hdr(&ce_hdr
->header
);
1988 amdgpu_ucode_print_gfx_hdr(&me_hdr
->header
);
1991 fw_data
= (const __le32
*)
1992 (adev
->gfx
.pfp_fw
->data
+ le32_to_cpu(pfp_hdr
->header
.ucode_array_offset_bytes
));
1993 fw_size
= le32_to_cpu(pfp_hdr
->header
.ucode_size_bytes
) / 4;
1994 WREG32(mmCP_PFP_UCODE_ADDR
, 0);
1995 for (i
= 0; i
< fw_size
; i
++)
1996 WREG32(mmCP_PFP_UCODE_DATA
, le32_to_cpup(fw_data
++));
1997 WREG32(mmCP_PFP_UCODE_ADDR
, 0);
2000 fw_data
= (const __le32
*)
2001 (adev
->gfx
.ce_fw
->data
+ le32_to_cpu(ce_hdr
->header
.ucode_array_offset_bytes
));
2002 fw_size
= le32_to_cpu(ce_hdr
->header
.ucode_size_bytes
) / 4;
2003 WREG32(mmCP_CE_UCODE_ADDR
, 0);
2004 for (i
= 0; i
< fw_size
; i
++)
2005 WREG32(mmCP_CE_UCODE_DATA
, le32_to_cpup(fw_data
++));
2006 WREG32(mmCP_CE_UCODE_ADDR
, 0);
2009 fw_data
= (const __be32
*)
2010 (adev
->gfx
.me_fw
->data
+ le32_to_cpu(me_hdr
->header
.ucode_array_offset_bytes
));
2011 fw_size
= le32_to_cpu(me_hdr
->header
.ucode_size_bytes
) / 4;
2012 WREG32(mmCP_ME_RAM_WADDR
, 0);
2013 for (i
= 0; i
< fw_size
; i
++)
2014 WREG32(mmCP_ME_RAM_DATA
, le32_to_cpup(fw_data
++));
2015 WREG32(mmCP_ME_RAM_WADDR
, 0);
2017 WREG32(mmCP_PFP_UCODE_ADDR
, 0);
2018 WREG32(mmCP_CE_UCODE_ADDR
, 0);
2019 WREG32(mmCP_ME_RAM_WADDR
, 0);
2020 WREG32(mmCP_ME_RAM_RADDR
, 0);
2024 static int gfx_v6_0_cp_gfx_start(struct amdgpu_device
*adev
)
2026 const struct cs_section_def
*sect
= NULL
;
2027 const struct cs_extent_def
*ext
= NULL
;
2028 struct amdgpu_ring
*ring
= &adev
->gfx
.gfx_ring
[0];
2031 r
= amdgpu_ring_alloc(ring
, 7 + 4);
2033 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r
);
2036 amdgpu_ring_write(ring
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
2037 amdgpu_ring_write(ring
, 0x1);
2038 amdgpu_ring_write(ring
, 0x0);
2039 amdgpu_ring_write(ring
, adev
->gfx
.config
.max_hw_contexts
- 1);
2040 amdgpu_ring_write(ring
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
2041 amdgpu_ring_write(ring
, 0);
2042 amdgpu_ring_write(ring
, 0);
2044 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_BASE
, 2));
2045 amdgpu_ring_write(ring
, PACKET3_BASE_INDEX(CE_PARTITION_BASE
));
2046 amdgpu_ring_write(ring
, 0xc000);
2047 amdgpu_ring_write(ring
, 0xe000);
2048 amdgpu_ring_commit(ring
);
2050 gfx_v6_0_cp_gfx_enable(adev
, true);
2052 r
= amdgpu_ring_alloc(ring
, gfx_v6_0_get_csb_size(adev
) + 10);
2054 DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r
);
2058 amdgpu_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
2059 amdgpu_ring_write(ring
, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
2061 for (sect
= adev
->gfx
.rlc
.cs_data
; sect
->section
!= NULL
; ++sect
) {
2062 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
2063 if (sect
->id
== SECT_CONTEXT
) {
2064 amdgpu_ring_write(ring
,
2065 PACKET3(PACKET3_SET_CONTEXT_REG
, ext
->reg_count
));
2066 amdgpu_ring_write(ring
, ext
->reg_index
- PACKET3_SET_CONTEXT_REG_START
);
2067 for (i
= 0; i
< ext
->reg_count
; i
++)
2068 amdgpu_ring_write(ring
, ext
->extent
[i
]);
2073 amdgpu_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
2074 amdgpu_ring_write(ring
, PACKET3_PREAMBLE_END_CLEAR_STATE
);
2076 amdgpu_ring_write(ring
, PACKET3(PACKET3_CLEAR_STATE
, 0));
2077 amdgpu_ring_write(ring
, 0);
2079 amdgpu_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
2080 amdgpu_ring_write(ring
, 0x00000316);
2081 amdgpu_ring_write(ring
, 0x0000000e);
2082 amdgpu_ring_write(ring
, 0x00000010);
2084 amdgpu_ring_commit(ring
);
2089 static int gfx_v6_0_cp_gfx_resume(struct amdgpu_device
*adev
)
2091 struct amdgpu_ring
*ring
;
2097 WREG32(mmCP_SEM_WAIT_TIMER
, 0x0);
2098 WREG32(mmCP_SEM_INCOMPLETE_TIMER_CNTL
, 0x0);
2100 /* Set the write pointer delay */
2101 WREG32(mmCP_RB_WPTR_DELAY
, 0);
2103 WREG32(mmCP_DEBUG
, 0);
2104 WREG32(mmSCRATCH_ADDR
, 0);
2106 /* ring 0 - compute and gfx */
2107 /* Set ring buffer size */
2108 ring
= &adev
->gfx
.gfx_ring
[0];
2109 rb_bufsz
= order_base_2(ring
->ring_size
/ 8);
2110 tmp
= (order_base_2(AMDGPU_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2113 tmp
|= BUF_SWAP_32BIT
;
2115 WREG32(mmCP_RB0_CNTL
, tmp
);
2117 /* Initialize the ring buffer's read and write pointers */
2118 WREG32(mmCP_RB0_CNTL
, tmp
| CP_RB0_CNTL__RB_RPTR_WR_ENA_MASK
);
2120 WREG32(mmCP_RB0_WPTR
, ring
->wptr
);
2122 /* set the wb address whether it's enabled or not */
2123 rptr_addr
= adev
->wb
.gpu_addr
+ (ring
->rptr_offs
* 4);
2124 WREG32(mmCP_RB0_RPTR_ADDR
, lower_32_bits(rptr_addr
));
2125 WREG32(mmCP_RB0_RPTR_ADDR_HI
, upper_32_bits(rptr_addr
) & 0xFF);
2127 WREG32(mmSCRATCH_UMSK
, 0);
2130 WREG32(mmCP_RB0_CNTL
, tmp
);
2132 WREG32(mmCP_RB0_BASE
, ring
->gpu_addr
>> 8);
2134 /* start the rings */
2135 gfx_v6_0_cp_gfx_start(adev
);
2136 r
= amdgpu_ring_test_helper(ring
);
2143 static u64
gfx_v6_0_ring_get_rptr(struct amdgpu_ring
*ring
)
2145 return ring
->adev
->wb
.wb
[ring
->rptr_offs
];
2148 static u64
gfx_v6_0_ring_get_wptr(struct amdgpu_ring
*ring
)
2150 struct amdgpu_device
*adev
= ring
->adev
;
2152 if (ring
== &adev
->gfx
.gfx_ring
[0])
2153 return RREG32(mmCP_RB0_WPTR
);
2154 else if (ring
== &adev
->gfx
.compute_ring
[0])
2155 return RREG32(mmCP_RB1_WPTR
);
2156 else if (ring
== &adev
->gfx
.compute_ring
[1])
2157 return RREG32(mmCP_RB2_WPTR
);
2162 static void gfx_v6_0_ring_set_wptr_gfx(struct amdgpu_ring
*ring
)
2164 struct amdgpu_device
*adev
= ring
->adev
;
2166 WREG32(mmCP_RB0_WPTR
, lower_32_bits(ring
->wptr
));
2167 (void)RREG32(mmCP_RB0_WPTR
);
2170 static void gfx_v6_0_ring_set_wptr_compute(struct amdgpu_ring
*ring
)
2172 struct amdgpu_device
*adev
= ring
->adev
;
2174 if (ring
== &adev
->gfx
.compute_ring
[0]) {
2175 WREG32(mmCP_RB1_WPTR
, lower_32_bits(ring
->wptr
));
2176 (void)RREG32(mmCP_RB1_WPTR
);
2177 } else if (ring
== &adev
->gfx
.compute_ring
[1]) {
2178 WREG32(mmCP_RB2_WPTR
, lower_32_bits(ring
->wptr
));
2179 (void)RREG32(mmCP_RB2_WPTR
);
2186 static int gfx_v6_0_cp_compute_resume(struct amdgpu_device
*adev
)
2188 struct amdgpu_ring
*ring
;
2194 /* ring1 - compute only */
2195 /* Set ring buffer size */
2197 ring
= &adev
->gfx
.compute_ring
[0];
2198 rb_bufsz
= order_base_2(ring
->ring_size
/ 8);
2199 tmp
= (order_base_2(AMDGPU_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2201 tmp
|= BUF_SWAP_32BIT
;
2203 WREG32(mmCP_RB1_CNTL
, tmp
);
2205 WREG32(mmCP_RB1_CNTL
, tmp
| CP_RB1_CNTL__RB_RPTR_WR_ENA_MASK
);
2207 WREG32(mmCP_RB1_WPTR
, ring
->wptr
);
2209 rptr_addr
= adev
->wb
.gpu_addr
+ (ring
->rptr_offs
* 4);
2210 WREG32(mmCP_RB1_RPTR_ADDR
, lower_32_bits(rptr_addr
));
2211 WREG32(mmCP_RB1_RPTR_ADDR_HI
, upper_32_bits(rptr_addr
) & 0xFF);
2214 WREG32(mmCP_RB1_CNTL
, tmp
);
2215 WREG32(mmCP_RB1_BASE
, ring
->gpu_addr
>> 8);
2217 ring
= &adev
->gfx
.compute_ring
[1];
2218 rb_bufsz
= order_base_2(ring
->ring_size
/ 8);
2219 tmp
= (order_base_2(AMDGPU_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2221 tmp
|= BUF_SWAP_32BIT
;
2223 WREG32(mmCP_RB2_CNTL
, tmp
);
2225 WREG32(mmCP_RB2_CNTL
, tmp
| CP_RB2_CNTL__RB_RPTR_WR_ENA_MASK
);
2227 WREG32(mmCP_RB2_WPTR
, ring
->wptr
);
2228 rptr_addr
= adev
->wb
.gpu_addr
+ (ring
->rptr_offs
* 4);
2229 WREG32(mmCP_RB2_RPTR_ADDR
, lower_32_bits(rptr_addr
));
2230 WREG32(mmCP_RB2_RPTR_ADDR_HI
, upper_32_bits(rptr_addr
) & 0xFF);
2233 WREG32(mmCP_RB2_CNTL
, tmp
);
2234 WREG32(mmCP_RB2_BASE
, ring
->gpu_addr
>> 8);
2237 for (i
= 0; i
< 2; i
++) {
2238 r
= amdgpu_ring_test_helper(&adev
->gfx
.compute_ring
[i
]);
2246 static void gfx_v6_0_cp_enable(struct amdgpu_device
*adev
, bool enable
)
2248 gfx_v6_0_cp_gfx_enable(adev
, enable
);
2251 static int gfx_v6_0_cp_load_microcode(struct amdgpu_device
*adev
)
2253 return gfx_v6_0_cp_gfx_load_microcode(adev
);
2256 static void gfx_v6_0_enable_gui_idle_interrupt(struct amdgpu_device
*adev
,
2259 u32 tmp
= RREG32(mmCP_INT_CNTL_RING0
);
2264 tmp
|= (CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK
|
2265 CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK
);
2267 tmp
&= ~(CP_INT_CNTL__CNTX_BUSY_INT_ENABLE_MASK
|
2268 CP_INT_CNTL__CNTX_EMPTY_INT_ENABLE_MASK
);
2269 WREG32(mmCP_INT_CNTL_RING0
, tmp
);
2272 /* read a gfx register */
2273 tmp
= RREG32(mmDB_DEPTH_INFO
);
2275 mask
= RLC_BUSY_STATUS
| GFX_POWER_STATUS
| GFX_CLOCK_STATUS
| GFX_LS_STATUS
;
2276 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
2277 if ((RREG32(mmRLC_STAT
) & mask
) == (GFX_CLOCK_STATUS
| GFX_POWER_STATUS
))
2284 static int gfx_v6_0_cp_resume(struct amdgpu_device
*adev
)
2288 gfx_v6_0_enable_gui_idle_interrupt(adev
, false);
2290 r
= gfx_v6_0_cp_load_microcode(adev
);
2294 r
= gfx_v6_0_cp_gfx_resume(adev
);
2297 r
= gfx_v6_0_cp_compute_resume(adev
);
2301 gfx_v6_0_enable_gui_idle_interrupt(adev
, true);
2306 static void gfx_v6_0_ring_emit_pipeline_sync(struct amdgpu_ring
*ring
)
2308 int usepfp
= (ring
->funcs
->type
== AMDGPU_RING_TYPE_GFX
);
2309 uint32_t seq
= ring
->fence_drv
.sync_seq
;
2310 uint64_t addr
= ring
->fence_drv
.gpu_addr
;
2312 amdgpu_ring_write(ring
, PACKET3(PACKET3_WAIT_REG_MEM
, 5));
2313 amdgpu_ring_write(ring
, (WAIT_REG_MEM_MEM_SPACE(1) | /* memory */
2314 WAIT_REG_MEM_FUNCTION(3) | /* equal */
2315 WAIT_REG_MEM_ENGINE(usepfp
))); /* pfp or me */
2316 amdgpu_ring_write(ring
, addr
& 0xfffffffc);
2317 amdgpu_ring_write(ring
, upper_32_bits(addr
) & 0xffffffff);
2318 amdgpu_ring_write(ring
, seq
);
2319 amdgpu_ring_write(ring
, 0xffffffff);
2320 amdgpu_ring_write(ring
, 4); /* poll interval */
2323 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
2324 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
2325 amdgpu_ring_write(ring
, 0);
2326 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
2327 amdgpu_ring_write(ring
, 0);
2331 static void gfx_v6_0_ring_emit_vm_flush(struct amdgpu_ring
*ring
,
2332 unsigned vmid
, uint64_t pd_addr
)
2334 int usepfp
= (ring
->funcs
->type
== AMDGPU_RING_TYPE_GFX
);
2336 amdgpu_gmc_emit_flush_gpu_tlb(ring
, vmid
, pd_addr
);
2338 /* wait for the invalidate to complete */
2339 amdgpu_ring_write(ring
, PACKET3(PACKET3_WAIT_REG_MEM
, 5));
2340 amdgpu_ring_write(ring
, (WAIT_REG_MEM_FUNCTION(0) | /* always */
2341 WAIT_REG_MEM_ENGINE(0))); /* me */
2342 amdgpu_ring_write(ring
, mmVM_INVALIDATE_REQUEST
);
2343 amdgpu_ring_write(ring
, 0);
2344 amdgpu_ring_write(ring
, 0); /* ref */
2345 amdgpu_ring_write(ring
, 0); /* mask */
2346 amdgpu_ring_write(ring
, 0x20); /* poll interval */
2349 /* sync PFP to ME, otherwise we might get invalid PFP reads */
2350 amdgpu_ring_write(ring
, PACKET3(PACKET3_PFP_SYNC_ME
, 0));
2351 amdgpu_ring_write(ring
, 0x0);
2353 /* synce CE with ME to prevent CE fetch CEIB before context switch done */
2354 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
2355 amdgpu_ring_write(ring
, 0);
2356 amdgpu_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
2357 amdgpu_ring_write(ring
, 0);
2361 static void gfx_v6_0_ring_emit_wreg(struct amdgpu_ring
*ring
,
2362 uint32_t reg
, uint32_t val
)
2364 int usepfp
= (ring
->funcs
->type
== AMDGPU_RING_TYPE_GFX
);
2366 amdgpu_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
2367 amdgpu_ring_write(ring
, (WRITE_DATA_ENGINE_SEL(usepfp
) |
2368 WRITE_DATA_DST_SEL(0)));
2369 amdgpu_ring_write(ring
, reg
);
2370 amdgpu_ring_write(ring
, 0);
2371 amdgpu_ring_write(ring
, val
);
2374 static int gfx_v6_0_rlc_init(struct amdgpu_device
*adev
)
2377 volatile u32
*dst_ptr
;
2379 u64 reg_list_mc_addr
;
2380 const struct cs_section_def
*cs_data
;
2383 adev
->gfx
.rlc
.reg_list
= verde_rlc_save_restore_register_list
;
2384 adev
->gfx
.rlc
.reg_list_size
=
2385 (u32
)ARRAY_SIZE(verde_rlc_save_restore_register_list
);
2387 adev
->gfx
.rlc
.cs_data
= si_cs_data
;
2388 src_ptr
= adev
->gfx
.rlc
.reg_list
;
2389 dws
= adev
->gfx
.rlc
.reg_list_size
;
2390 cs_data
= adev
->gfx
.rlc
.cs_data
;
2393 /* init save restore block */
2394 r
= amdgpu_gfx_rlc_init_sr(adev
, dws
);
2400 /* clear state block */
2401 adev
->gfx
.rlc
.clear_state_size
= gfx_v6_0_get_csb_size(adev
);
2402 dws
= adev
->gfx
.rlc
.clear_state_size
+ (256 / 4);
2404 r
= amdgpu_bo_create_reserved(adev
, dws
* 4, PAGE_SIZE
,
2405 AMDGPU_GEM_DOMAIN_VRAM
,
2406 &adev
->gfx
.rlc
.clear_state_obj
,
2407 &adev
->gfx
.rlc
.clear_state_gpu_addr
,
2408 (void **)&adev
->gfx
.rlc
.cs_ptr
);
2410 dev_warn(adev
->dev
, "(%d) create RLC c bo failed\n", r
);
2411 amdgpu_gfx_rlc_fini(adev
);
2415 /* set up the cs buffer */
2416 dst_ptr
= adev
->gfx
.rlc
.cs_ptr
;
2417 reg_list_mc_addr
= adev
->gfx
.rlc
.clear_state_gpu_addr
+ 256;
2418 dst_ptr
[0] = cpu_to_le32(upper_32_bits(reg_list_mc_addr
));
2419 dst_ptr
[1] = cpu_to_le32(lower_32_bits(reg_list_mc_addr
));
2420 dst_ptr
[2] = cpu_to_le32(adev
->gfx
.rlc
.clear_state_size
);
2421 gfx_v6_0_get_csb_buffer(adev
, &dst_ptr
[(256/4)]);
2422 amdgpu_bo_kunmap(adev
->gfx
.rlc
.clear_state_obj
);
2423 amdgpu_bo_unreserve(adev
->gfx
.rlc
.clear_state_obj
);
2429 static void gfx_v6_0_enable_lbpw(struct amdgpu_device
*adev
, bool enable
)
2431 WREG32_FIELD(RLC_LB_CNTL
, LOAD_BALANCE_ENABLE
, enable
? 1 : 0);
2434 gfx_v6_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
2435 WREG32(mmSPI_LB_CU_MASK
, 0x00ff);
2439 static void gfx_v6_0_wait_for_rlc_serdes(struct amdgpu_device
*adev
)
2443 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
2444 if (RREG32(mmRLC_SERDES_MASTER_BUSY_0
) == 0)
2449 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
2450 if (RREG32(mmRLC_SERDES_MASTER_BUSY_1
) == 0)
2456 static void gfx_v6_0_update_rlc(struct amdgpu_device
*adev
, u32 rlc
)
2460 tmp
= RREG32(mmRLC_CNTL
);
2462 WREG32(mmRLC_CNTL
, rlc
);
2465 static u32
gfx_v6_0_halt_rlc(struct amdgpu_device
*adev
)
2469 orig
= data
= RREG32(mmRLC_CNTL
);
2471 if (data
& RLC_CNTL__RLC_ENABLE_F32_MASK
) {
2472 data
&= ~RLC_CNTL__RLC_ENABLE_F32_MASK
;
2473 WREG32(mmRLC_CNTL
, data
);
2475 gfx_v6_0_wait_for_rlc_serdes(adev
);
2481 static void gfx_v6_0_rlc_stop(struct amdgpu_device
*adev
)
2483 WREG32(mmRLC_CNTL
, 0);
2485 gfx_v6_0_enable_gui_idle_interrupt(adev
, false);
2486 gfx_v6_0_wait_for_rlc_serdes(adev
);
2489 static void gfx_v6_0_rlc_start(struct amdgpu_device
*adev
)
2491 WREG32(mmRLC_CNTL
, RLC_CNTL__RLC_ENABLE_F32_MASK
);
2493 gfx_v6_0_enable_gui_idle_interrupt(adev
, true);
2498 static void gfx_v6_0_rlc_reset(struct amdgpu_device
*adev
)
2500 WREG32_FIELD(GRBM_SOFT_RESET
, SOFT_RESET_RLC
, 1);
2502 WREG32_FIELD(GRBM_SOFT_RESET
, SOFT_RESET_RLC
, 0);
2506 static bool gfx_v6_0_lbpw_supported(struct amdgpu_device
*adev
)
2510 /* Enable LBPW only for DDR3 */
2511 tmp
= RREG32(mmMC_SEQ_MISC0
);
2512 if ((tmp
& 0xF0000000) == 0xB0000000)
2517 static void gfx_v6_0_init_cg(struct amdgpu_device
*adev
)
2521 static int gfx_v6_0_rlc_resume(struct amdgpu_device
*adev
)
2524 const struct rlc_firmware_header_v1_0
*hdr
;
2525 const __le32
*fw_data
;
2529 if (!adev
->gfx
.rlc_fw
)
2532 adev
->gfx
.rlc
.funcs
->stop(adev
);
2533 adev
->gfx
.rlc
.funcs
->reset(adev
);
2534 gfx_v6_0_init_pg(adev
);
2535 gfx_v6_0_init_cg(adev
);
2537 WREG32(mmRLC_RL_BASE
, 0);
2538 WREG32(mmRLC_RL_SIZE
, 0);
2539 WREG32(mmRLC_LB_CNTL
, 0);
2540 WREG32(mmRLC_LB_CNTR_MAX
, 0xffffffff);
2541 WREG32(mmRLC_LB_CNTR_INIT
, 0);
2542 WREG32(mmRLC_LB_INIT_CU_MASK
, 0xffffffff);
2544 WREG32(mmRLC_MC_CNTL
, 0);
2545 WREG32(mmRLC_UCODE_CNTL
, 0);
2547 hdr
= (const struct rlc_firmware_header_v1_0
*)adev
->gfx
.rlc_fw
->data
;
2548 fw_size
= le32_to_cpu(hdr
->header
.ucode_size_bytes
) / 4;
2549 fw_data
= (const __le32
*)
2550 (adev
->gfx
.rlc_fw
->data
+ le32_to_cpu(hdr
->header
.ucode_array_offset_bytes
));
2552 amdgpu_ucode_print_rlc_hdr(&hdr
->header
);
2554 for (i
= 0; i
< fw_size
; i
++) {
2555 WREG32(mmRLC_UCODE_ADDR
, i
);
2556 WREG32(mmRLC_UCODE_DATA
, le32_to_cpup(fw_data
++));
2558 WREG32(mmRLC_UCODE_ADDR
, 0);
2560 gfx_v6_0_enable_lbpw(adev
, gfx_v6_0_lbpw_supported(adev
));
2561 adev
->gfx
.rlc
.funcs
->start(adev
);
2566 static void gfx_v6_0_enable_cgcg(struct amdgpu_device
*adev
, bool enable
)
2568 u32 data
, orig
, tmp
;
2570 orig
= data
= RREG32(mmRLC_CGCG_CGLS_CTRL
);
2572 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CGCG
)) {
2573 gfx_v6_0_enable_gui_idle_interrupt(adev
, true);
2575 WREG32(mmRLC_GCPM_GENERAL_3
, 0x00000080);
2577 tmp
= gfx_v6_0_halt_rlc(adev
);
2579 WREG32(mmRLC_SERDES_WR_MASTER_MASK_0
, 0xffffffff);
2580 WREG32(mmRLC_SERDES_WR_MASTER_MASK_1
, 0xffffffff);
2581 WREG32(mmRLC_SERDES_WR_CTRL
, 0x00b000ff);
2583 gfx_v6_0_wait_for_rlc_serdes(adev
);
2584 gfx_v6_0_update_rlc(adev
, tmp
);
2586 WREG32(mmRLC_SERDES_WR_CTRL
, 0x007000ff);
2588 data
|= RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK
| RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK
;
2590 gfx_v6_0_enable_gui_idle_interrupt(adev
, false);
2592 RREG32(mmCB_CGTT_SCLK_CTRL
);
2593 RREG32(mmCB_CGTT_SCLK_CTRL
);
2594 RREG32(mmCB_CGTT_SCLK_CTRL
);
2595 RREG32(mmCB_CGTT_SCLK_CTRL
);
2597 data
&= ~(RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK
| RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK
);
2601 WREG32(mmRLC_CGCG_CGLS_CTRL
, data
);
2605 static void gfx_v6_0_enable_mgcg(struct amdgpu_device
*adev
, bool enable
)
2608 u32 data
, orig
, tmp
= 0;
2610 if (enable
&& (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_MGCG
)) {
2611 orig
= data
= RREG32(mmCGTS_SM_CTRL_REG
);
2614 WREG32(mmCGTS_SM_CTRL_REG
, data
);
2616 if (adev
->cg_flags
& AMD_CG_SUPPORT_GFX_CP_LS
) {
2617 orig
= data
= RREG32(mmCP_MEM_SLP_CNTL
);
2618 data
|= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
;
2620 WREG32(mmCP_MEM_SLP_CNTL
, data
);
2623 orig
= data
= RREG32(mmRLC_CGTT_MGCG_OVERRIDE
);
2626 WREG32(mmRLC_CGTT_MGCG_OVERRIDE
, data
);
2628 tmp
= gfx_v6_0_halt_rlc(adev
);
2630 WREG32(mmRLC_SERDES_WR_MASTER_MASK_0
, 0xffffffff);
2631 WREG32(mmRLC_SERDES_WR_MASTER_MASK_1
, 0xffffffff);
2632 WREG32(mmRLC_SERDES_WR_CTRL
, 0x00d000ff);
2634 gfx_v6_0_update_rlc(adev
, tmp
);
2636 orig
= data
= RREG32(mmRLC_CGTT_MGCG_OVERRIDE
);
2639 WREG32(mmRLC_CGTT_MGCG_OVERRIDE
, data
);
2641 data
= RREG32(mmCP_MEM_SLP_CNTL
);
2642 if (data
& CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
) {
2643 data
&= ~CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK
;
2644 WREG32(mmCP_MEM_SLP_CNTL
, data
);
2646 orig
= data
= RREG32(mmCGTS_SM_CTRL_REG
);
2647 data
|= CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK
| CGTS_SM_CTRL_REG__OVERRIDE_MASK
;
2649 WREG32(mmCGTS_SM_CTRL_REG
, data
);
2651 tmp
= gfx_v6_0_halt_rlc(adev
);
2653 WREG32(mmRLC_SERDES_WR_MASTER_MASK_0
, 0xffffffff);
2654 WREG32(mmRLC_SERDES_WR_MASTER_MASK_1
, 0xffffffff);
2655 WREG32(mmRLC_SERDES_WR_CTRL
, 0x00e000ff);
2657 gfx_v6_0_update_rlc(adev
, tmp
);
2661 static void gfx_v6_0_update_cg(struct amdgpu_device *adev,
2664 gfx_v6_0_enable_gui_idle_interrupt(adev, false);
2666 gfx_v6_0_enable_mgcg(adev, true);
2667 gfx_v6_0_enable_cgcg(adev, true);
2669 gfx_v6_0_enable_cgcg(adev, false);
2670 gfx_v6_0_enable_mgcg(adev, false);
2672 gfx_v6_0_enable_gui_idle_interrupt(adev, true);
2676 static void gfx_v6_0_enable_sclk_slowdown_on_pu(struct amdgpu_device
*adev
,
2681 static void gfx_v6_0_enable_sclk_slowdown_on_pd(struct amdgpu_device
*adev
,
2686 static void gfx_v6_0_enable_cp_pg(struct amdgpu_device
*adev
, bool enable
)
2690 orig
= data
= RREG32(mmRLC_PG_CNTL
);
2691 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_CP
))
2696 WREG32(mmRLC_PG_CNTL
, data
);
2699 static void gfx_v6_0_enable_gds_pg(struct amdgpu_device
*adev
, bool enable
)
2703 static void gfx_v6_0_init_cp_pg_table(struct amdgpu_device *adev)
2705 const __le32 *fw_data;
2706 volatile u32 *dst_ptr;
2707 int me, i, max_me = 4;
2709 u32 table_offset, table_size;
2711 if (adev->asic_type == CHIP_KAVERI)
2714 if (adev->gfx.rlc.cp_table_ptr == NULL)
2717 dst_ptr = adev->gfx.rlc.cp_table_ptr;
2718 for (me = 0; me < max_me; me++) {
2720 const struct gfx_firmware_header_v1_0 *hdr =
2721 (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
2722 fw_data = (const __le32 *)
2723 (adev->gfx.ce_fw->data +
2724 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2725 table_offset = le32_to_cpu(hdr->jt_offset);
2726 table_size = le32_to_cpu(hdr->jt_size);
2727 } else if (me == 1) {
2728 const struct gfx_firmware_header_v1_0 *hdr =
2729 (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
2730 fw_data = (const __le32 *)
2731 (adev->gfx.pfp_fw->data +
2732 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2733 table_offset = le32_to_cpu(hdr->jt_offset);
2734 table_size = le32_to_cpu(hdr->jt_size);
2735 } else if (me == 2) {
2736 const struct gfx_firmware_header_v1_0 *hdr =
2737 (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
2738 fw_data = (const __le32 *)
2739 (adev->gfx.me_fw->data +
2740 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2741 table_offset = le32_to_cpu(hdr->jt_offset);
2742 table_size = le32_to_cpu(hdr->jt_size);
2743 } else if (me == 3) {
2744 const struct gfx_firmware_header_v1_0 *hdr =
2745 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2746 fw_data = (const __le32 *)
2747 (adev->gfx.mec_fw->data +
2748 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2749 table_offset = le32_to_cpu(hdr->jt_offset);
2750 table_size = le32_to_cpu(hdr->jt_size);
2752 const struct gfx_firmware_header_v1_0 *hdr =
2753 (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2754 fw_data = (const __le32 *)
2755 (adev->gfx.mec2_fw->data +
2756 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
2757 table_offset = le32_to_cpu(hdr->jt_offset);
2758 table_size = le32_to_cpu(hdr->jt_size);
2761 for (i = 0; i < table_size; i ++) {
2762 dst_ptr[bo_offset + i] =
2763 cpu_to_le32(le32_to_cpu(fw_data[table_offset + i]));
2766 bo_offset += table_size;
2770 static void gfx_v6_0_enable_gfx_cgpg(struct amdgpu_device
*adev
,
2773 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
)) {
2774 WREG32(mmRLC_TTOP_D
, RLC_PUD(0x10) | RLC_PDD(0x10) | RLC_TTPD(0x10) | RLC_MSD(0x10));
2775 WREG32_FIELD(RLC_PG_CNTL
, GFX_POWER_GATING_ENABLE
, 1);
2776 WREG32_FIELD(RLC_AUTO_PG_CTRL
, AUTO_PG_EN
, 1);
2778 WREG32_FIELD(RLC_AUTO_PG_CTRL
, AUTO_PG_EN
, 0);
2779 (void)RREG32(mmDB_RENDER_CONTROL
);
2783 static void gfx_v6_0_init_ao_cu_mask(struct amdgpu_device
*adev
)
2787 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK
, adev
->gfx
.cu_info
.ao_cu_mask
);
2789 tmp
= RREG32(mmRLC_MAX_PG_CU
);
2790 tmp
&= ~RLC_MAX_PG_CU__MAX_POWERED_UP_CU_MASK
;
2791 tmp
|= (adev
->gfx
.cu_info
.number
<< RLC_MAX_PG_CU__MAX_POWERED_UP_CU__SHIFT
);
2792 WREG32(mmRLC_MAX_PG_CU
, tmp
);
2795 static void gfx_v6_0_enable_gfx_static_mgpg(struct amdgpu_device
*adev
,
2800 orig
= data
= RREG32(mmRLC_PG_CNTL
);
2801 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_SMG
))
2802 data
|= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK
;
2804 data
&= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK
;
2806 WREG32(mmRLC_PG_CNTL
, data
);
2809 static void gfx_v6_0_enable_gfx_dynamic_mgpg(struct amdgpu_device
*adev
,
2814 orig
= data
= RREG32(mmRLC_PG_CNTL
);
2815 if (enable
&& (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_DMG
))
2816 data
|= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK
;
2818 data
&= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK
;
2820 WREG32(mmRLC_PG_CNTL
, data
);
2823 static void gfx_v6_0_init_gfx_cgpg(struct amdgpu_device
*adev
)
2827 WREG32(mmRLC_SAVE_AND_RESTORE_BASE
, adev
->gfx
.rlc
.save_restore_gpu_addr
>> 8);
2828 WREG32_FIELD(RLC_PG_CNTL
, GFX_POWER_GATING_SRC
, 1);
2829 WREG32(mmRLC_CLEAR_STATE_RESTORE_BASE
, adev
->gfx
.rlc
.clear_state_gpu_addr
>> 8);
2831 tmp
= RREG32(mmRLC_AUTO_PG_CTRL
);
2832 tmp
&= ~RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD_MASK
;
2833 tmp
|= (0x700 << RLC_AUTO_PG_CTRL__GRBM_REG_SAVE_GFX_IDLE_THRESHOLD__SHIFT
);
2834 tmp
&= ~RLC_AUTO_PG_CTRL__PG_AFTER_GRBM_REG_SAVE_THRESHOLD_MASK
;
2835 WREG32(mmRLC_AUTO_PG_CTRL
, tmp
);
2838 static void gfx_v6_0_update_gfx_pg(struct amdgpu_device
*adev
, bool enable
)
2840 gfx_v6_0_enable_gfx_cgpg(adev
, enable
);
2841 gfx_v6_0_enable_gfx_static_mgpg(adev
, enable
);
2842 gfx_v6_0_enable_gfx_dynamic_mgpg(adev
, enable
);
2845 static u32
gfx_v6_0_get_csb_size(struct amdgpu_device
*adev
)
2848 const struct cs_section_def
*sect
= NULL
;
2849 const struct cs_extent_def
*ext
= NULL
;
2851 if (adev
->gfx
.rlc
.cs_data
== NULL
)
2854 /* begin clear state */
2856 /* context control state */
2859 for (sect
= adev
->gfx
.rlc
.cs_data
; sect
->section
!= NULL
; ++sect
) {
2860 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
2861 if (sect
->id
== SECT_CONTEXT
)
2862 count
+= 2 + ext
->reg_count
;
2867 /* pa_sc_raster_config */
2869 /* end clear state */
2877 static void gfx_v6_0_get_csb_buffer(struct amdgpu_device
*adev
,
2878 volatile u32
*buffer
)
2881 const struct cs_section_def
*sect
= NULL
;
2882 const struct cs_extent_def
*ext
= NULL
;
2884 if (adev
->gfx
.rlc
.cs_data
== NULL
)
2889 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
2890 buffer
[count
++] = cpu_to_le32(PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
2891 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_CONTEXT_CONTROL
, 1));
2892 buffer
[count
++] = cpu_to_le32(0x80000000);
2893 buffer
[count
++] = cpu_to_le32(0x80000000);
2895 for (sect
= adev
->gfx
.rlc
.cs_data
; sect
->section
!= NULL
; ++sect
) {
2896 for (ext
= sect
->section
; ext
->extent
!= NULL
; ++ext
) {
2897 if (sect
->id
== SECT_CONTEXT
) {
2899 cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG
, ext
->reg_count
));
2900 buffer
[count
++] = cpu_to_le32(ext
->reg_index
- 0xa000);
2901 for (i
= 0; i
< ext
->reg_count
; i
++)
2902 buffer
[count
++] = cpu_to_le32(ext
->extent
[i
]);
2909 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_SET_CONTEXT_REG
, 1));
2910 buffer
[count
++] = cpu_to_le32(mmPA_SC_RASTER_CONFIG
- PACKET3_SET_CONTEXT_REG_START
);
2911 buffer
[count
++] = cpu_to_le32(adev
->gfx
.config
.rb_config
[0][0].raster_config
);
2913 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
2914 buffer
[count
++] = cpu_to_le32(PACKET3_PREAMBLE_END_CLEAR_STATE
);
2916 buffer
[count
++] = cpu_to_le32(PACKET3(PACKET3_CLEAR_STATE
, 0));
2917 buffer
[count
++] = cpu_to_le32(0);
2920 static void gfx_v6_0_init_pg(struct amdgpu_device
*adev
)
2922 if (adev
->pg_flags
& (AMD_PG_SUPPORT_GFX_PG
|
2923 AMD_PG_SUPPORT_GFX_SMG
|
2924 AMD_PG_SUPPORT_GFX_DMG
|
2926 AMD_PG_SUPPORT_GDS
|
2927 AMD_PG_SUPPORT_RLC_SMU_HS
)) {
2928 gfx_v6_0_enable_sclk_slowdown_on_pu(adev
, true);
2929 gfx_v6_0_enable_sclk_slowdown_on_pd(adev
, true);
2930 if (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
) {
2931 gfx_v6_0_init_gfx_cgpg(adev
);
2932 gfx_v6_0_enable_cp_pg(adev
, true);
2933 gfx_v6_0_enable_gds_pg(adev
, true);
2935 WREG32(mmRLC_SAVE_AND_RESTORE_BASE
, adev
->gfx
.rlc
.save_restore_gpu_addr
>> 8);
2936 WREG32(mmRLC_CLEAR_STATE_RESTORE_BASE
, adev
->gfx
.rlc
.clear_state_gpu_addr
>> 8);
2939 gfx_v6_0_init_ao_cu_mask(adev
);
2940 gfx_v6_0_update_gfx_pg(adev
, true);
2943 WREG32(mmRLC_SAVE_AND_RESTORE_BASE
, adev
->gfx
.rlc
.save_restore_gpu_addr
>> 8);
2944 WREG32(mmRLC_CLEAR_STATE_RESTORE_BASE
, adev
->gfx
.rlc
.clear_state_gpu_addr
>> 8);
2948 static void gfx_v6_0_fini_pg(struct amdgpu_device
*adev
)
2950 if (adev
->pg_flags
& (AMD_PG_SUPPORT_GFX_PG
|
2951 AMD_PG_SUPPORT_GFX_SMG
|
2952 AMD_PG_SUPPORT_GFX_DMG
|
2954 AMD_PG_SUPPORT_GDS
|
2955 AMD_PG_SUPPORT_RLC_SMU_HS
)) {
2956 gfx_v6_0_update_gfx_pg(adev
, false);
2957 if (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
) {
2958 gfx_v6_0_enable_cp_pg(adev
, false);
2959 gfx_v6_0_enable_gds_pg(adev
, false);
2964 static uint64_t gfx_v6_0_get_gpu_clock_counter(struct amdgpu_device
*adev
)
2968 mutex_lock(&adev
->gfx
.gpu_clock_mutex
);
2969 WREG32(mmRLC_CAPTURE_GPU_CLOCK_COUNT
, 1);
2970 clock
= (uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_LSB
) |
2971 ((uint64_t)RREG32(mmRLC_GPU_CLOCK_COUNT_MSB
) << 32ULL);
2972 mutex_unlock(&adev
->gfx
.gpu_clock_mutex
);
2976 static void gfx_v6_ring_emit_cntxcntl(struct amdgpu_ring
*ring
, uint32_t flags
)
2978 if (flags
& AMDGPU_HAVE_CTX_SWITCH
)
2979 gfx_v6_0_ring_emit_vgt_flush(ring
);
2980 amdgpu_ring_write(ring
, PACKET3(PACKET3_CONTEXT_CONTROL
, 1));
2981 amdgpu_ring_write(ring
, 0x80000000);
2982 amdgpu_ring_write(ring
, 0);
2986 static uint32_t wave_read_ind(struct amdgpu_device
*adev
, uint32_t simd
, uint32_t wave
, uint32_t address
)
2988 WREG32(mmSQ_IND_INDEX
,
2989 (wave
<< SQ_IND_INDEX__WAVE_ID__SHIFT
) |
2990 (simd
<< SQ_IND_INDEX__SIMD_ID__SHIFT
) |
2991 (address
<< SQ_IND_INDEX__INDEX__SHIFT
) |
2992 (SQ_IND_INDEX__FORCE_READ_MASK
));
2993 return RREG32(mmSQ_IND_DATA
);
2996 static void wave_read_regs(struct amdgpu_device
*adev
, uint32_t simd
,
2997 uint32_t wave
, uint32_t thread
,
2998 uint32_t regno
, uint32_t num
, uint32_t *out
)
3000 WREG32(mmSQ_IND_INDEX
,
3001 (wave
<< SQ_IND_INDEX__WAVE_ID__SHIFT
) |
3002 (simd
<< SQ_IND_INDEX__SIMD_ID__SHIFT
) |
3003 (regno
<< SQ_IND_INDEX__INDEX__SHIFT
) |
3004 (thread
<< SQ_IND_INDEX__THREAD_ID__SHIFT
) |
3005 (SQ_IND_INDEX__FORCE_READ_MASK
) |
3006 (SQ_IND_INDEX__AUTO_INCR_MASK
));
3008 *(out
++) = RREG32(mmSQ_IND_DATA
);
3011 static void gfx_v6_0_read_wave_data(struct amdgpu_device
*adev
, uint32_t simd
, uint32_t wave
, uint32_t *dst
, int *no_fields
)
3013 /* type 0 wave data */
3014 dst
[(*no_fields
)++] = 0;
3015 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_STATUS
);
3016 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_PC_LO
);
3017 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_PC_HI
);
3018 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_EXEC_LO
);
3019 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_EXEC_HI
);
3020 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_HW_ID
);
3021 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_INST_DW0
);
3022 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_INST_DW1
);
3023 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_GPR_ALLOC
);
3024 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_LDS_ALLOC
);
3025 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_TRAPSTS
);
3026 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_IB_STS
);
3027 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_TBA_LO
);
3028 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_TBA_HI
);
3029 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_TMA_LO
);
3030 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_TMA_HI
);
3031 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_IB_DBG0
);
3032 dst
[(*no_fields
)++] = wave_read_ind(adev
, simd
, wave
, ixSQ_WAVE_M0
);
3035 static void gfx_v6_0_read_wave_sgprs(struct amdgpu_device
*adev
, uint32_t simd
,
3036 uint32_t wave
, uint32_t start
,
3037 uint32_t size
, uint32_t *dst
)
3040 adev
, simd
, wave
, 0,
3041 start
+ SQIND_WAVE_SGPRS_OFFSET
, size
, dst
);
3044 static void gfx_v6_0_select_me_pipe_q(struct amdgpu_device
*adev
,
3045 u32 me
, u32 pipe
, u32 q
, u32 vm
)
3047 DRM_INFO("Not implemented\n");
3050 static const struct amdgpu_gfx_funcs gfx_v6_0_gfx_funcs
= {
3051 .get_gpu_clock_counter
= &gfx_v6_0_get_gpu_clock_counter
,
3052 .select_se_sh
= &gfx_v6_0_select_se_sh
,
3053 .read_wave_data
= &gfx_v6_0_read_wave_data
,
3054 .read_wave_sgprs
= &gfx_v6_0_read_wave_sgprs
,
3055 .select_me_pipe_q
= &gfx_v6_0_select_me_pipe_q
3058 static const struct amdgpu_rlc_funcs gfx_v6_0_rlc_funcs
= {
3059 .init
= gfx_v6_0_rlc_init
,
3060 .resume
= gfx_v6_0_rlc_resume
,
3061 .stop
= gfx_v6_0_rlc_stop
,
3062 .reset
= gfx_v6_0_rlc_reset
,
3063 .start
= gfx_v6_0_rlc_start
3066 static int gfx_v6_0_early_init(void *handle
)
3068 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3070 adev
->gfx
.num_gfx_rings
= GFX6_NUM_GFX_RINGS
;
3071 adev
->gfx
.num_compute_rings
= GFX6_NUM_COMPUTE_RINGS
;
3072 adev
->gfx
.funcs
= &gfx_v6_0_gfx_funcs
;
3073 adev
->gfx
.rlc
.funcs
= &gfx_v6_0_rlc_funcs
;
3074 gfx_v6_0_set_ring_funcs(adev
);
3075 gfx_v6_0_set_irq_funcs(adev
);
3080 static int gfx_v6_0_sw_init(void *handle
)
3082 struct amdgpu_ring
*ring
;
3083 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3086 r
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 181, &adev
->gfx
.eop_irq
);
3090 r
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 184, &adev
->gfx
.priv_reg_irq
);
3094 r
= amdgpu_irq_add_id(adev
, AMDGPU_IRQ_CLIENTID_LEGACY
, 185, &adev
->gfx
.priv_inst_irq
);
3098 gfx_v6_0_scratch_init(adev
);
3100 r
= gfx_v6_0_init_microcode(adev
);
3102 DRM_ERROR("Failed to load gfx firmware!\n");
3106 r
= adev
->gfx
.rlc
.funcs
->init(adev
);
3108 DRM_ERROR("Failed to init rlc BOs!\n");
3112 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++) {
3113 ring
= &adev
->gfx
.gfx_ring
[i
];
3114 ring
->ring_obj
= NULL
;
3115 sprintf(ring
->name
, "gfx");
3116 r
= amdgpu_ring_init(adev
, ring
, 1024,
3117 &adev
->gfx
.eop_irq
, AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP
);
3122 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++) {
3125 if ((i
>= 32) || (i
>= AMDGPU_MAX_COMPUTE_RINGS
)) {
3126 DRM_ERROR("Too many (%d) compute rings!\n", i
);
3129 ring
= &adev
->gfx
.compute_ring
[i
];
3130 ring
->ring_obj
= NULL
;
3131 ring
->use_doorbell
= false;
3132 ring
->doorbell_index
= 0;
3136 sprintf(ring
->name
, "comp_%d.%d.%d", ring
->me
, ring
->pipe
, ring
->queue
);
3137 irq_type
= AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
+ ring
->pipe
;
3138 r
= amdgpu_ring_init(adev
, ring
, 1024,
3139 &adev
->gfx
.eop_irq
, irq_type
);
3147 static int gfx_v6_0_sw_fini(void *handle
)
3150 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3152 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
3153 amdgpu_ring_fini(&adev
->gfx
.gfx_ring
[i
]);
3154 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
3155 amdgpu_ring_fini(&adev
->gfx
.compute_ring
[i
]);
3157 amdgpu_gfx_rlc_fini(adev
);
3162 static int gfx_v6_0_hw_init(void *handle
)
3165 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3167 gfx_v6_0_constants_init(adev
);
3169 r
= adev
->gfx
.rlc
.funcs
->resume(adev
);
3173 r
= gfx_v6_0_cp_resume(adev
);
3177 adev
->gfx
.ce_ram_size
= 0x8000;
3182 static int gfx_v6_0_hw_fini(void *handle
)
3184 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3186 gfx_v6_0_cp_enable(adev
, false);
3187 adev
->gfx
.rlc
.funcs
->stop(adev
);
3188 gfx_v6_0_fini_pg(adev
);
3193 static int gfx_v6_0_suspend(void *handle
)
3195 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3197 return gfx_v6_0_hw_fini(adev
);
3200 static int gfx_v6_0_resume(void *handle
)
3202 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3204 return gfx_v6_0_hw_init(adev
);
3207 static bool gfx_v6_0_is_idle(void *handle
)
3209 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3211 if (RREG32(mmGRBM_STATUS
) & GRBM_STATUS__GUI_ACTIVE_MASK
)
3217 static int gfx_v6_0_wait_for_idle(void *handle
)
3220 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3222 for (i
= 0; i
< adev
->usec_timeout
; i
++) {
3223 if (gfx_v6_0_is_idle(handle
))
3230 static int gfx_v6_0_soft_reset(void *handle
)
3235 static void gfx_v6_0_set_gfx_eop_interrupt_state(struct amdgpu_device
*adev
,
3236 enum amdgpu_interrupt_state state
)
3241 case AMDGPU_IRQ_STATE_DISABLE
:
3242 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
3243 cp_int_cntl
&= ~CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
;
3244 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
3246 case AMDGPU_IRQ_STATE_ENABLE
:
3247 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
3248 cp_int_cntl
|= CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK
;
3249 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
3256 static void gfx_v6_0_set_compute_eop_interrupt_state(struct amdgpu_device
*adev
,
3258 enum amdgpu_interrupt_state state
)
3262 case AMDGPU_IRQ_STATE_DISABLE
:
3264 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING1
);
3265 cp_int_cntl
&= ~CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK
;
3266 WREG32(mmCP_INT_CNTL_RING1
, cp_int_cntl
);
3269 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING2
);
3270 cp_int_cntl
&= ~CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK
;
3271 WREG32(mmCP_INT_CNTL_RING2
, cp_int_cntl
);
3275 case AMDGPU_IRQ_STATE_ENABLE
:
3277 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING1
);
3278 cp_int_cntl
|= CP_INT_CNTL_RING1__TIME_STAMP_INT_ENABLE_MASK
;
3279 WREG32(mmCP_INT_CNTL_RING1
, cp_int_cntl
);
3282 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING2
);
3283 cp_int_cntl
|= CP_INT_CNTL_RING2__TIME_STAMP_INT_ENABLE_MASK
;
3284 WREG32(mmCP_INT_CNTL_RING2
, cp_int_cntl
);
3296 static int gfx_v6_0_set_priv_reg_fault_state(struct amdgpu_device
*adev
,
3297 struct amdgpu_irq_src
*src
,
3299 enum amdgpu_interrupt_state state
)
3304 case AMDGPU_IRQ_STATE_DISABLE
:
3305 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
3306 cp_int_cntl
&= ~CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK
;
3307 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
3309 case AMDGPU_IRQ_STATE_ENABLE
:
3310 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
3311 cp_int_cntl
|= CP_INT_CNTL_RING0__PRIV_REG_INT_ENABLE_MASK
;
3312 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
3321 static int gfx_v6_0_set_priv_inst_fault_state(struct amdgpu_device
*adev
,
3322 struct amdgpu_irq_src
*src
,
3324 enum amdgpu_interrupt_state state
)
3329 case AMDGPU_IRQ_STATE_DISABLE
:
3330 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
3331 cp_int_cntl
&= ~CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK
;
3332 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
3334 case AMDGPU_IRQ_STATE_ENABLE
:
3335 cp_int_cntl
= RREG32(mmCP_INT_CNTL_RING0
);
3336 cp_int_cntl
|= CP_INT_CNTL_RING0__PRIV_INSTR_INT_ENABLE_MASK
;
3337 WREG32(mmCP_INT_CNTL_RING0
, cp_int_cntl
);
3346 static int gfx_v6_0_set_eop_interrupt_state(struct amdgpu_device
*adev
,
3347 struct amdgpu_irq_src
*src
,
3349 enum amdgpu_interrupt_state state
)
3352 case AMDGPU_CP_IRQ_GFX_ME0_PIPE0_EOP
:
3353 gfx_v6_0_set_gfx_eop_interrupt_state(adev
, state
);
3355 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE0_EOP
:
3356 gfx_v6_0_set_compute_eop_interrupt_state(adev
, 0, state
);
3358 case AMDGPU_CP_IRQ_COMPUTE_MEC1_PIPE1_EOP
:
3359 gfx_v6_0_set_compute_eop_interrupt_state(adev
, 1, state
);
3367 static int gfx_v6_0_eop_irq(struct amdgpu_device
*adev
,
3368 struct amdgpu_irq_src
*source
,
3369 struct amdgpu_iv_entry
*entry
)
3371 switch (entry
->ring_id
) {
3373 amdgpu_fence_process(&adev
->gfx
.gfx_ring
[0]);
3377 amdgpu_fence_process(&adev
->gfx
.compute_ring
[entry
->ring_id
- 1]);
3385 static void gfx_v6_0_fault(struct amdgpu_device
*adev
,
3386 struct amdgpu_iv_entry
*entry
)
3388 struct amdgpu_ring
*ring
;
3390 switch (entry
->ring_id
) {
3392 ring
= &adev
->gfx
.gfx_ring
[0];
3396 ring
= &adev
->gfx
.compute_ring
[entry
->ring_id
- 1];
3401 drm_sched_fault(&ring
->sched
);
3404 static int gfx_v6_0_priv_reg_irq(struct amdgpu_device
*adev
,
3405 struct amdgpu_irq_src
*source
,
3406 struct amdgpu_iv_entry
*entry
)
3408 DRM_ERROR("Illegal register access in command stream\n");
3409 gfx_v6_0_fault(adev
, entry
);
3413 static int gfx_v6_0_priv_inst_irq(struct amdgpu_device
*adev
,
3414 struct amdgpu_irq_src
*source
,
3415 struct amdgpu_iv_entry
*entry
)
3417 DRM_ERROR("Illegal instruction in command stream\n");
3418 gfx_v6_0_fault(adev
, entry
);
3422 static int gfx_v6_0_set_clockgating_state(void *handle
,
3423 enum amd_clockgating_state state
)
3426 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3428 if (state
== AMD_CG_STATE_GATE
)
3431 gfx_v6_0_enable_gui_idle_interrupt(adev
, false);
3433 gfx_v6_0_enable_mgcg(adev
, true);
3434 gfx_v6_0_enable_cgcg(adev
, true);
3436 gfx_v6_0_enable_cgcg(adev
, false);
3437 gfx_v6_0_enable_mgcg(adev
, false);
3439 gfx_v6_0_enable_gui_idle_interrupt(adev
, true);
3444 static int gfx_v6_0_set_powergating_state(void *handle
,
3445 enum amd_powergating_state state
)
3448 struct amdgpu_device
*adev
= (struct amdgpu_device
*)handle
;
3450 if (state
== AMD_PG_STATE_GATE
)
3453 if (adev
->pg_flags
& (AMD_PG_SUPPORT_GFX_PG
|
3454 AMD_PG_SUPPORT_GFX_SMG
|
3455 AMD_PG_SUPPORT_GFX_DMG
|
3457 AMD_PG_SUPPORT_GDS
|
3458 AMD_PG_SUPPORT_RLC_SMU_HS
)) {
3459 gfx_v6_0_update_gfx_pg(adev
, gate
);
3460 if (adev
->pg_flags
& AMD_PG_SUPPORT_GFX_PG
) {
3461 gfx_v6_0_enable_cp_pg(adev
, gate
);
3462 gfx_v6_0_enable_gds_pg(adev
, gate
);
3469 static const struct amd_ip_funcs gfx_v6_0_ip_funcs
= {
3471 .early_init
= gfx_v6_0_early_init
,
3473 .sw_init
= gfx_v6_0_sw_init
,
3474 .sw_fini
= gfx_v6_0_sw_fini
,
3475 .hw_init
= gfx_v6_0_hw_init
,
3476 .hw_fini
= gfx_v6_0_hw_fini
,
3477 .suspend
= gfx_v6_0_suspend
,
3478 .resume
= gfx_v6_0_resume
,
3479 .is_idle
= gfx_v6_0_is_idle
,
3480 .wait_for_idle
= gfx_v6_0_wait_for_idle
,
3481 .soft_reset
= gfx_v6_0_soft_reset
,
3482 .set_clockgating_state
= gfx_v6_0_set_clockgating_state
,
3483 .set_powergating_state
= gfx_v6_0_set_powergating_state
,
3486 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_gfx
= {
3487 .type
= AMDGPU_RING_TYPE_GFX
,
3490 .support_64bit_ptrs
= false,
3491 .get_rptr
= gfx_v6_0_ring_get_rptr
,
3492 .get_wptr
= gfx_v6_0_ring_get_wptr
,
3493 .set_wptr
= gfx_v6_0_ring_set_wptr_gfx
,
3495 5 + 5 + /* hdp flush / invalidate */
3496 14 + 14 + 14 + /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
3497 7 + 4 + /* gfx_v6_0_ring_emit_pipeline_sync */
3498 SI_FLUSH_GPU_TLB_NUM_WREG
* 5 + 7 + 6 + /* gfx_v6_0_ring_emit_vm_flush */
3499 3 + 2, /* gfx_v6_ring_emit_cntxcntl including vgt flush */
3500 .emit_ib_size
= 6, /* gfx_v6_0_ring_emit_ib */
3501 .emit_ib
= gfx_v6_0_ring_emit_ib
,
3502 .emit_fence
= gfx_v6_0_ring_emit_fence
,
3503 .emit_pipeline_sync
= gfx_v6_0_ring_emit_pipeline_sync
,
3504 .emit_vm_flush
= gfx_v6_0_ring_emit_vm_flush
,
3505 .test_ring
= gfx_v6_0_ring_test_ring
,
3506 .test_ib
= gfx_v6_0_ring_test_ib
,
3507 .insert_nop
= amdgpu_ring_insert_nop
,
3508 .emit_cntxcntl
= gfx_v6_ring_emit_cntxcntl
,
3509 .emit_wreg
= gfx_v6_0_ring_emit_wreg
,
3512 static const struct amdgpu_ring_funcs gfx_v6_0_ring_funcs_compute
= {
3513 .type
= AMDGPU_RING_TYPE_COMPUTE
,
3516 .get_rptr
= gfx_v6_0_ring_get_rptr
,
3517 .get_wptr
= gfx_v6_0_ring_get_wptr
,
3518 .set_wptr
= gfx_v6_0_ring_set_wptr_compute
,
3520 5 + 5 + /* hdp flush / invalidate */
3521 7 + /* gfx_v6_0_ring_emit_pipeline_sync */
3522 SI_FLUSH_GPU_TLB_NUM_WREG
* 5 + 7 + /* gfx_v6_0_ring_emit_vm_flush */
3523 14 + 14 + 14, /* gfx_v6_0_ring_emit_fence x3 for user fence, vm fence */
3524 .emit_ib_size
= 6, /* gfx_v6_0_ring_emit_ib */
3525 .emit_ib
= gfx_v6_0_ring_emit_ib
,
3526 .emit_fence
= gfx_v6_0_ring_emit_fence
,
3527 .emit_pipeline_sync
= gfx_v6_0_ring_emit_pipeline_sync
,
3528 .emit_vm_flush
= gfx_v6_0_ring_emit_vm_flush
,
3529 .test_ring
= gfx_v6_0_ring_test_ring
,
3530 .test_ib
= gfx_v6_0_ring_test_ib
,
3531 .insert_nop
= amdgpu_ring_insert_nop
,
3532 .emit_wreg
= gfx_v6_0_ring_emit_wreg
,
3535 static void gfx_v6_0_set_ring_funcs(struct amdgpu_device
*adev
)
3539 for (i
= 0; i
< adev
->gfx
.num_gfx_rings
; i
++)
3540 adev
->gfx
.gfx_ring
[i
].funcs
= &gfx_v6_0_ring_funcs_gfx
;
3541 for (i
= 0; i
< adev
->gfx
.num_compute_rings
; i
++)
3542 adev
->gfx
.compute_ring
[i
].funcs
= &gfx_v6_0_ring_funcs_compute
;
3545 static const struct amdgpu_irq_src_funcs gfx_v6_0_eop_irq_funcs
= {
3546 .set
= gfx_v6_0_set_eop_interrupt_state
,
3547 .process
= gfx_v6_0_eop_irq
,
3550 static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_reg_irq_funcs
= {
3551 .set
= gfx_v6_0_set_priv_reg_fault_state
,
3552 .process
= gfx_v6_0_priv_reg_irq
,
3555 static const struct amdgpu_irq_src_funcs gfx_v6_0_priv_inst_irq_funcs
= {
3556 .set
= gfx_v6_0_set_priv_inst_fault_state
,
3557 .process
= gfx_v6_0_priv_inst_irq
,
3560 static void gfx_v6_0_set_irq_funcs(struct amdgpu_device
*adev
)
3562 adev
->gfx
.eop_irq
.num_types
= AMDGPU_CP_IRQ_LAST
;
3563 adev
->gfx
.eop_irq
.funcs
= &gfx_v6_0_eop_irq_funcs
;
3565 adev
->gfx
.priv_reg_irq
.num_types
= 1;
3566 adev
->gfx
.priv_reg_irq
.funcs
= &gfx_v6_0_priv_reg_irq_funcs
;
3568 adev
->gfx
.priv_inst_irq
.num_types
= 1;
3569 adev
->gfx
.priv_inst_irq
.funcs
= &gfx_v6_0_priv_inst_irq_funcs
;
3572 static void gfx_v6_0_get_cu_info(struct amdgpu_device
*adev
)
3574 int i
, j
, k
, counter
, active_cu_number
= 0;
3575 u32 mask
, bitmap
, ao_bitmap
, ao_cu_mask
= 0;
3576 struct amdgpu_cu_info
*cu_info
= &adev
->gfx
.cu_info
;
3577 unsigned disable_masks
[4 * 2];
3580 if (adev
->flags
& AMD_IS_APU
)
3583 ao_cu_num
= adev
->gfx
.config
.max_cu_per_sh
;
3585 memset(cu_info
, 0, sizeof(*cu_info
));
3587 amdgpu_gfx_parse_disable_cu(disable_masks
, 4, 2);
3589 mutex_lock(&adev
->grbm_idx_mutex
);
3590 for (i
= 0; i
< adev
->gfx
.config
.max_shader_engines
; i
++) {
3591 for (j
= 0; j
< adev
->gfx
.config
.max_sh_per_se
; j
++) {
3595 gfx_v6_0_select_se_sh(adev
, i
, j
, 0xffffffff);
3597 gfx_v6_0_set_user_cu_inactive_bitmap(
3598 adev
, disable_masks
[i
* 2 + j
]);
3599 bitmap
= gfx_v6_0_get_cu_enabled(adev
);
3600 cu_info
->bitmap
[i
][j
] = bitmap
;
3602 for (k
= 0; k
< adev
->gfx
.config
.max_cu_per_sh
; k
++) {
3603 if (bitmap
& mask
) {
3604 if (counter
< ao_cu_num
)
3610 active_cu_number
+= counter
;
3612 ao_cu_mask
|= (ao_bitmap
<< (i
* 16 + j
* 8));
3613 cu_info
->ao_cu_bitmap
[i
][j
] = ao_bitmap
;
3617 gfx_v6_0_select_se_sh(adev
, 0xffffffff, 0xffffffff, 0xffffffff);
3618 mutex_unlock(&adev
->grbm_idx_mutex
);
3620 cu_info
->number
= active_cu_number
;
3621 cu_info
->ao_cu_mask
= ao_cu_mask
;
3624 const struct amdgpu_ip_block_version gfx_v6_0_ip_block
=
3626 .type
= AMD_IP_BLOCK_TYPE_GFX
,
3630 .funcs
= &gfx_v6_0_ip_funcs
,