2 * Copyright 2011 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
22 * Authors: Alex Deucher
24 #include <linux/firmware.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 #include <linux/module.h>
30 #include "radeon_asic.h"
31 #include "radeon_drm.h"
34 #include "si_blit_shaders.h"
36 #define SI_PFP_UCODE_SIZE 2144
37 #define SI_PM4_UCODE_SIZE 2144
38 #define SI_CE_UCODE_SIZE 2144
39 #define SI_RLC_UCODE_SIZE 2048
40 #define SI_MC_UCODE_SIZE 7769
42 MODULE_FIRMWARE("radeon/TAHITI_pfp.bin");
43 MODULE_FIRMWARE("radeon/TAHITI_me.bin");
44 MODULE_FIRMWARE("radeon/TAHITI_ce.bin");
45 MODULE_FIRMWARE("radeon/TAHITI_mc.bin");
46 MODULE_FIRMWARE("radeon/TAHITI_rlc.bin");
47 MODULE_FIRMWARE("radeon/PITCAIRN_pfp.bin");
48 MODULE_FIRMWARE("radeon/PITCAIRN_me.bin");
49 MODULE_FIRMWARE("radeon/PITCAIRN_ce.bin");
50 MODULE_FIRMWARE("radeon/PITCAIRN_mc.bin");
51 MODULE_FIRMWARE("radeon/PITCAIRN_rlc.bin");
52 MODULE_FIRMWARE("radeon/VERDE_pfp.bin");
53 MODULE_FIRMWARE("radeon/VERDE_me.bin");
54 MODULE_FIRMWARE("radeon/VERDE_ce.bin");
55 MODULE_FIRMWARE("radeon/VERDE_mc.bin");
56 MODULE_FIRMWARE("radeon/VERDE_rlc.bin");
58 extern int r600_ih_ring_alloc(struct radeon_device
*rdev
);
59 extern void r600_ih_ring_fini(struct radeon_device
*rdev
);
60 extern void evergreen_fix_pci_max_read_req_size(struct radeon_device
*rdev
);
61 extern void evergreen_mc_stop(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
);
62 extern void evergreen_mc_resume(struct radeon_device
*rdev
, struct evergreen_mc_save
*save
);
63 extern u32
evergreen_get_number_of_dram_channels(struct radeon_device
*rdev
);
65 /* get temperature in millidegrees */
66 int si_get_temp(struct radeon_device
*rdev
)
71 temp
= (RREG32(CG_MULT_THERMAL_STATUS
) & CTF_TEMP_MASK
) >>
77 actual_temp
= temp
& 0x1ff;
79 actual_temp
= (actual_temp
* 1000);
84 #define TAHITI_IO_MC_REGS_SIZE 36
86 static const u32 tahiti_io_mc_regs
[TAHITI_IO_MC_REGS_SIZE
][2] = {
87 {0x0000006f, 0x03044000},
88 {0x00000070, 0x0480c018},
89 {0x00000071, 0x00000040},
90 {0x00000072, 0x01000000},
91 {0x00000074, 0x000000ff},
92 {0x00000075, 0x00143400},
93 {0x00000076, 0x08ec0800},
94 {0x00000077, 0x040000cc},
95 {0x00000079, 0x00000000},
96 {0x0000007a, 0x21000409},
97 {0x0000007c, 0x00000000},
98 {0x0000007d, 0xe8000000},
99 {0x0000007e, 0x044408a8},
100 {0x0000007f, 0x00000003},
101 {0x00000080, 0x00000000},
102 {0x00000081, 0x01000000},
103 {0x00000082, 0x02000000},
104 {0x00000083, 0x00000000},
105 {0x00000084, 0xe3f3e4f4},
106 {0x00000085, 0x00052024},
107 {0x00000087, 0x00000000},
108 {0x00000088, 0x66036603},
109 {0x00000089, 0x01000000},
110 {0x0000008b, 0x1c0a0000},
111 {0x0000008c, 0xff010000},
112 {0x0000008e, 0xffffefff},
113 {0x0000008f, 0xfff3efff},
114 {0x00000090, 0xfff3efbf},
115 {0x00000094, 0x00101101},
116 {0x00000095, 0x00000fff},
117 {0x00000096, 0x00116fff},
118 {0x00000097, 0x60010000},
119 {0x00000098, 0x10010000},
120 {0x00000099, 0x00006000},
121 {0x0000009a, 0x00001000},
122 {0x0000009f, 0x00a77400}
125 static const u32 pitcairn_io_mc_regs
[TAHITI_IO_MC_REGS_SIZE
][2] = {
126 {0x0000006f, 0x03044000},
127 {0x00000070, 0x0480c018},
128 {0x00000071, 0x00000040},
129 {0x00000072, 0x01000000},
130 {0x00000074, 0x000000ff},
131 {0x00000075, 0x00143400},
132 {0x00000076, 0x08ec0800},
133 {0x00000077, 0x040000cc},
134 {0x00000079, 0x00000000},
135 {0x0000007a, 0x21000409},
136 {0x0000007c, 0x00000000},
137 {0x0000007d, 0xe8000000},
138 {0x0000007e, 0x044408a8},
139 {0x0000007f, 0x00000003},
140 {0x00000080, 0x00000000},
141 {0x00000081, 0x01000000},
142 {0x00000082, 0x02000000},
143 {0x00000083, 0x00000000},
144 {0x00000084, 0xe3f3e4f4},
145 {0x00000085, 0x00052024},
146 {0x00000087, 0x00000000},
147 {0x00000088, 0x66036603},
148 {0x00000089, 0x01000000},
149 {0x0000008b, 0x1c0a0000},
150 {0x0000008c, 0xff010000},
151 {0x0000008e, 0xffffefff},
152 {0x0000008f, 0xfff3efff},
153 {0x00000090, 0xfff3efbf},
154 {0x00000094, 0x00101101},
155 {0x00000095, 0x00000fff},
156 {0x00000096, 0x00116fff},
157 {0x00000097, 0x60010000},
158 {0x00000098, 0x10010000},
159 {0x00000099, 0x00006000},
160 {0x0000009a, 0x00001000},
161 {0x0000009f, 0x00a47400}
164 static const u32 verde_io_mc_regs
[TAHITI_IO_MC_REGS_SIZE
][2] = {
165 {0x0000006f, 0x03044000},
166 {0x00000070, 0x0480c018},
167 {0x00000071, 0x00000040},
168 {0x00000072, 0x01000000},
169 {0x00000074, 0x000000ff},
170 {0x00000075, 0x00143400},
171 {0x00000076, 0x08ec0800},
172 {0x00000077, 0x040000cc},
173 {0x00000079, 0x00000000},
174 {0x0000007a, 0x21000409},
175 {0x0000007c, 0x00000000},
176 {0x0000007d, 0xe8000000},
177 {0x0000007e, 0x044408a8},
178 {0x0000007f, 0x00000003},
179 {0x00000080, 0x00000000},
180 {0x00000081, 0x01000000},
181 {0x00000082, 0x02000000},
182 {0x00000083, 0x00000000},
183 {0x00000084, 0xe3f3e4f4},
184 {0x00000085, 0x00052024},
185 {0x00000087, 0x00000000},
186 {0x00000088, 0x66036603},
187 {0x00000089, 0x01000000},
188 {0x0000008b, 0x1c0a0000},
189 {0x0000008c, 0xff010000},
190 {0x0000008e, 0xffffefff},
191 {0x0000008f, 0xfff3efff},
192 {0x00000090, 0xfff3efbf},
193 {0x00000094, 0x00101101},
194 {0x00000095, 0x00000fff},
195 {0x00000096, 0x00116fff},
196 {0x00000097, 0x60010000},
197 {0x00000098, 0x10010000},
198 {0x00000099, 0x00006000},
199 {0x0000009a, 0x00001000},
200 {0x0000009f, 0x00a37400}
204 static int si_mc_load_microcode(struct radeon_device
*rdev
)
206 const __be32
*fw_data
;
207 u32 running
, blackout
= 0;
209 int i
, ucode_size
, regs_size
;
214 switch (rdev
->family
) {
216 io_mc_regs
= (u32
*)&tahiti_io_mc_regs
;
217 ucode_size
= SI_MC_UCODE_SIZE
;
218 regs_size
= TAHITI_IO_MC_REGS_SIZE
;
221 io_mc_regs
= (u32
*)&pitcairn_io_mc_regs
;
222 ucode_size
= SI_MC_UCODE_SIZE
;
223 regs_size
= TAHITI_IO_MC_REGS_SIZE
;
227 io_mc_regs
= (u32
*)&verde_io_mc_regs
;
228 ucode_size
= SI_MC_UCODE_SIZE
;
229 regs_size
= TAHITI_IO_MC_REGS_SIZE
;
233 running
= RREG32(MC_SEQ_SUP_CNTL
) & RUN_MASK
;
237 blackout
= RREG32(MC_SHARED_BLACKOUT_CNTL
);
238 WREG32(MC_SHARED_BLACKOUT_CNTL
, blackout
| 1);
241 /* reset the engine and set to writable */
242 WREG32(MC_SEQ_SUP_CNTL
, 0x00000008);
243 WREG32(MC_SEQ_SUP_CNTL
, 0x00000010);
245 /* load mc io regs */
246 for (i
= 0; i
< regs_size
; i
++) {
247 WREG32(MC_SEQ_IO_DEBUG_INDEX
, io_mc_regs
[(i
<< 1)]);
248 WREG32(MC_SEQ_IO_DEBUG_DATA
, io_mc_regs
[(i
<< 1) + 1]);
250 /* load the MC ucode */
251 fw_data
= (const __be32
*)rdev
->mc_fw
->data
;
252 for (i
= 0; i
< ucode_size
; i
++)
253 WREG32(MC_SEQ_SUP_PGM
, be32_to_cpup(fw_data
++));
255 /* put the engine back into the active state */
256 WREG32(MC_SEQ_SUP_CNTL
, 0x00000008);
257 WREG32(MC_SEQ_SUP_CNTL
, 0x00000004);
258 WREG32(MC_SEQ_SUP_CNTL
, 0x00000001);
260 /* wait for training to complete */
261 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
262 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL
) & TRAIN_DONE_D0
)
266 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
267 if (RREG32(MC_SEQ_TRAIN_WAKEUP_CNTL
) & TRAIN_DONE_D1
)
273 WREG32(MC_SHARED_BLACKOUT_CNTL
, blackout
);
279 static int si_init_microcode(struct radeon_device
*rdev
)
281 struct platform_device
*pdev
;
282 const char *chip_name
;
283 const char *rlc_chip_name
;
284 size_t pfp_req_size
, me_req_size
, ce_req_size
, rlc_req_size
, mc_req_size
;
290 pdev
= platform_device_register_simple("radeon_cp", 0, NULL
, 0);
293 printk(KERN_ERR
"radeon_cp: Failed to register firmware\n");
297 switch (rdev
->family
) {
299 chip_name
= "TAHITI";
300 rlc_chip_name
= "TAHITI";
301 pfp_req_size
= SI_PFP_UCODE_SIZE
* 4;
302 me_req_size
= SI_PM4_UCODE_SIZE
* 4;
303 ce_req_size
= SI_CE_UCODE_SIZE
* 4;
304 rlc_req_size
= SI_RLC_UCODE_SIZE
* 4;
305 mc_req_size
= SI_MC_UCODE_SIZE
* 4;
308 chip_name
= "PITCAIRN";
309 rlc_chip_name
= "PITCAIRN";
310 pfp_req_size
= SI_PFP_UCODE_SIZE
* 4;
311 me_req_size
= SI_PM4_UCODE_SIZE
* 4;
312 ce_req_size
= SI_CE_UCODE_SIZE
* 4;
313 rlc_req_size
= SI_RLC_UCODE_SIZE
* 4;
314 mc_req_size
= SI_MC_UCODE_SIZE
* 4;
318 rlc_chip_name
= "VERDE";
319 pfp_req_size
= SI_PFP_UCODE_SIZE
* 4;
320 me_req_size
= SI_PM4_UCODE_SIZE
* 4;
321 ce_req_size
= SI_CE_UCODE_SIZE
* 4;
322 rlc_req_size
= SI_RLC_UCODE_SIZE
* 4;
323 mc_req_size
= SI_MC_UCODE_SIZE
* 4;
328 DRM_INFO("Loading %s Microcode\n", chip_name
);
330 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_pfp.bin", chip_name
);
331 err
= request_firmware(&rdev
->pfp_fw
, fw_name
, &pdev
->dev
);
334 if (rdev
->pfp_fw
->size
!= pfp_req_size
) {
336 "si_cp: Bogus length %zu in firmware \"%s\"\n",
337 rdev
->pfp_fw
->size
, fw_name
);
342 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_me.bin", chip_name
);
343 err
= request_firmware(&rdev
->me_fw
, fw_name
, &pdev
->dev
);
346 if (rdev
->me_fw
->size
!= me_req_size
) {
348 "si_cp: Bogus length %zu in firmware \"%s\"\n",
349 rdev
->me_fw
->size
, fw_name
);
353 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_ce.bin", chip_name
);
354 err
= request_firmware(&rdev
->ce_fw
, fw_name
, &pdev
->dev
);
357 if (rdev
->ce_fw
->size
!= ce_req_size
) {
359 "si_cp: Bogus length %zu in firmware \"%s\"\n",
360 rdev
->ce_fw
->size
, fw_name
);
364 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_rlc.bin", rlc_chip_name
);
365 err
= request_firmware(&rdev
->rlc_fw
, fw_name
, &pdev
->dev
);
368 if (rdev
->rlc_fw
->size
!= rlc_req_size
) {
370 "si_rlc: Bogus length %zu in firmware \"%s\"\n",
371 rdev
->rlc_fw
->size
, fw_name
);
375 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_mc.bin", chip_name
);
376 err
= request_firmware(&rdev
->mc_fw
, fw_name
, &pdev
->dev
);
379 if (rdev
->mc_fw
->size
!= mc_req_size
) {
381 "si_mc: Bogus length %zu in firmware \"%s\"\n",
382 rdev
->mc_fw
->size
, fw_name
);
387 platform_device_unregister(pdev
);
392 "si_cp: Failed to load firmware \"%s\"\n",
394 release_firmware(rdev
->pfp_fw
);
396 release_firmware(rdev
->me_fw
);
398 release_firmware(rdev
->ce_fw
);
400 release_firmware(rdev
->rlc_fw
);
402 release_firmware(rdev
->mc_fw
);
408 /* watermark setup */
409 static u32
dce6_line_buffer_adjust(struct radeon_device
*rdev
,
410 struct radeon_crtc
*radeon_crtc
,
411 struct drm_display_mode
*mode
,
412 struct drm_display_mode
*other_mode
)
417 * There are 3 line buffers, each one shared by 2 display controllers.
418 * DC_LB_MEMORY_SPLIT controls how that line buffer is shared between
419 * the display controllers. The paritioning is done via one of four
420 * preset allocations specified in bits 21:20:
422 * 2 - whole lb, other crtc must be disabled
424 /* this can get tricky if we have two large displays on a paired group
425 * of crtcs. Ideally for multiple large displays we'd assign them to
426 * non-linked crtcs for maximum line buffer allocation.
428 if (radeon_crtc
->base
.enabled
&& mode
) {
436 WREG32(DC_LB_MEMORY_SPLIT
+ radeon_crtc
->crtc_offset
,
437 DC_LB_MEMORY_CONFIG(tmp
));
439 if (radeon_crtc
->base
.enabled
&& mode
) {
449 /* controller not enabled, so no lb used */
453 static u32
si_get_number_of_dram_channels(struct radeon_device
*rdev
)
455 u32 tmp
= RREG32(MC_SHARED_CHMAP
);
457 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
480 struct dce6_wm_params
{
481 u32 dram_channels
; /* number of dram channels */
482 u32 yclk
; /* bandwidth per dram data pin in kHz */
483 u32 sclk
; /* engine clock in kHz */
484 u32 disp_clk
; /* display clock in kHz */
485 u32 src_width
; /* viewport width */
486 u32 active_time
; /* active display time in ns */
487 u32 blank_time
; /* blank time in ns */
488 bool interlaced
; /* mode is interlaced */
489 fixed20_12 vsc
; /* vertical scale ratio */
490 u32 num_heads
; /* number of active crtcs */
491 u32 bytes_per_pixel
; /* bytes per pixel display + overlay */
492 u32 lb_size
; /* line buffer allocated to pipe */
493 u32 vtaps
; /* vertical scaler taps */
496 static u32
dce6_dram_bandwidth(struct dce6_wm_params
*wm
)
498 /* Calculate raw DRAM Bandwidth */
499 fixed20_12 dram_efficiency
; /* 0.7 */
500 fixed20_12 yclk
, dram_channels
, bandwidth
;
503 a
.full
= dfixed_const(1000);
504 yclk
.full
= dfixed_const(wm
->yclk
);
505 yclk
.full
= dfixed_div(yclk
, a
);
506 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
507 a
.full
= dfixed_const(10);
508 dram_efficiency
.full
= dfixed_const(7);
509 dram_efficiency
.full
= dfixed_div(dram_efficiency
, a
);
510 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
511 bandwidth
.full
= dfixed_mul(bandwidth
, dram_efficiency
);
513 return dfixed_trunc(bandwidth
);
516 static u32
dce6_dram_bandwidth_for_display(struct dce6_wm_params
*wm
)
518 /* Calculate DRAM Bandwidth and the part allocated to display. */
519 fixed20_12 disp_dram_allocation
; /* 0.3 to 0.7 */
520 fixed20_12 yclk
, dram_channels
, bandwidth
;
523 a
.full
= dfixed_const(1000);
524 yclk
.full
= dfixed_const(wm
->yclk
);
525 yclk
.full
= dfixed_div(yclk
, a
);
526 dram_channels
.full
= dfixed_const(wm
->dram_channels
* 4);
527 a
.full
= dfixed_const(10);
528 disp_dram_allocation
.full
= dfixed_const(3); /* XXX worse case value 0.3 */
529 disp_dram_allocation
.full
= dfixed_div(disp_dram_allocation
, a
);
530 bandwidth
.full
= dfixed_mul(dram_channels
, yclk
);
531 bandwidth
.full
= dfixed_mul(bandwidth
, disp_dram_allocation
);
533 return dfixed_trunc(bandwidth
);
536 static u32
dce6_data_return_bandwidth(struct dce6_wm_params
*wm
)
538 /* Calculate the display Data return Bandwidth */
539 fixed20_12 return_efficiency
; /* 0.8 */
540 fixed20_12 sclk
, bandwidth
;
543 a
.full
= dfixed_const(1000);
544 sclk
.full
= dfixed_const(wm
->sclk
);
545 sclk
.full
= dfixed_div(sclk
, a
);
546 a
.full
= dfixed_const(10);
547 return_efficiency
.full
= dfixed_const(8);
548 return_efficiency
.full
= dfixed_div(return_efficiency
, a
);
549 a
.full
= dfixed_const(32);
550 bandwidth
.full
= dfixed_mul(a
, sclk
);
551 bandwidth
.full
= dfixed_mul(bandwidth
, return_efficiency
);
553 return dfixed_trunc(bandwidth
);
556 static u32
dce6_get_dmif_bytes_per_request(struct dce6_wm_params
*wm
)
561 static u32
dce6_dmif_request_bandwidth(struct dce6_wm_params
*wm
)
563 /* Calculate the DMIF Request Bandwidth */
564 fixed20_12 disp_clk_request_efficiency
; /* 0.8 */
565 fixed20_12 disp_clk
, sclk
, bandwidth
;
566 fixed20_12 a
, b1
, b2
;
569 a
.full
= dfixed_const(1000);
570 disp_clk
.full
= dfixed_const(wm
->disp_clk
);
571 disp_clk
.full
= dfixed_div(disp_clk
, a
);
572 a
.full
= dfixed_const(dce6_get_dmif_bytes_per_request(wm
) / 2);
573 b1
.full
= dfixed_mul(a
, disp_clk
);
575 a
.full
= dfixed_const(1000);
576 sclk
.full
= dfixed_const(wm
->sclk
);
577 sclk
.full
= dfixed_div(sclk
, a
);
578 a
.full
= dfixed_const(dce6_get_dmif_bytes_per_request(wm
));
579 b2
.full
= dfixed_mul(a
, sclk
);
581 a
.full
= dfixed_const(10);
582 disp_clk_request_efficiency
.full
= dfixed_const(8);
583 disp_clk_request_efficiency
.full
= dfixed_div(disp_clk_request_efficiency
, a
);
585 min_bandwidth
= min(dfixed_trunc(b1
), dfixed_trunc(b2
));
587 a
.full
= dfixed_const(min_bandwidth
);
588 bandwidth
.full
= dfixed_mul(a
, disp_clk_request_efficiency
);
590 return dfixed_trunc(bandwidth
);
593 static u32
dce6_available_bandwidth(struct dce6_wm_params
*wm
)
595 /* Calculate the Available bandwidth. Display can use this temporarily but not in average. */
596 u32 dram_bandwidth
= dce6_dram_bandwidth(wm
);
597 u32 data_return_bandwidth
= dce6_data_return_bandwidth(wm
);
598 u32 dmif_req_bandwidth
= dce6_dmif_request_bandwidth(wm
);
600 return min(dram_bandwidth
, min(data_return_bandwidth
, dmif_req_bandwidth
));
603 static u32
dce6_average_bandwidth(struct dce6_wm_params
*wm
)
605 /* Calculate the display mode Average Bandwidth
606 * DisplayMode should contain the source and destination dimensions,
610 fixed20_12 line_time
;
611 fixed20_12 src_width
;
612 fixed20_12 bandwidth
;
615 a
.full
= dfixed_const(1000);
616 line_time
.full
= dfixed_const(wm
->active_time
+ wm
->blank_time
);
617 line_time
.full
= dfixed_div(line_time
, a
);
618 bpp
.full
= dfixed_const(wm
->bytes_per_pixel
);
619 src_width
.full
= dfixed_const(wm
->src_width
);
620 bandwidth
.full
= dfixed_mul(src_width
, bpp
);
621 bandwidth
.full
= dfixed_mul(bandwidth
, wm
->vsc
);
622 bandwidth
.full
= dfixed_div(bandwidth
, line_time
);
624 return dfixed_trunc(bandwidth
);
627 static u32
dce6_latency_watermark(struct dce6_wm_params
*wm
)
629 /* First calcualte the latency in ns */
630 u32 mc_latency
= 2000; /* 2000 ns. */
631 u32 available_bandwidth
= dce6_available_bandwidth(wm
);
632 u32 worst_chunk_return_time
= (512 * 8 * 1000) / available_bandwidth
;
633 u32 cursor_line_pair_return_time
= (128 * 4 * 1000) / available_bandwidth
;
634 u32 dc_latency
= 40000000 / wm
->disp_clk
; /* dc pipe latency */
635 u32 other_heads_data_return_time
= ((wm
->num_heads
+ 1) * worst_chunk_return_time
) +
636 (wm
->num_heads
* cursor_line_pair_return_time
);
637 u32 latency
= mc_latency
+ other_heads_data_return_time
+ dc_latency
;
638 u32 max_src_lines_per_dst_line
, lb_fill_bw
, line_fill_time
;
639 u32 tmp
, dmif_size
= 12288;
642 if (wm
->num_heads
== 0)
645 a
.full
= dfixed_const(2);
646 b
.full
= dfixed_const(1);
647 if ((wm
->vsc
.full
> a
.full
) ||
648 ((wm
->vsc
.full
> b
.full
) && (wm
->vtaps
>= 3)) ||
650 ((wm
->vsc
.full
>= a
.full
) && wm
->interlaced
))
651 max_src_lines_per_dst_line
= 4;
653 max_src_lines_per_dst_line
= 2;
655 a
.full
= dfixed_const(available_bandwidth
);
656 b
.full
= dfixed_const(wm
->num_heads
);
657 a
.full
= dfixed_div(a
, b
);
659 b
.full
= dfixed_const(mc_latency
+ 512);
660 c
.full
= dfixed_const(wm
->disp_clk
);
661 b
.full
= dfixed_div(b
, c
);
663 c
.full
= dfixed_const(dmif_size
);
664 b
.full
= dfixed_div(c
, b
);
666 tmp
= min(dfixed_trunc(a
), dfixed_trunc(b
));
668 b
.full
= dfixed_const(1000);
669 c
.full
= dfixed_const(wm
->disp_clk
);
670 b
.full
= dfixed_div(c
, b
);
671 c
.full
= dfixed_const(wm
->bytes_per_pixel
);
672 b
.full
= dfixed_mul(b
, c
);
674 lb_fill_bw
= min(tmp
, dfixed_trunc(b
));
676 a
.full
= dfixed_const(max_src_lines_per_dst_line
* wm
->src_width
* wm
->bytes_per_pixel
);
677 b
.full
= dfixed_const(1000);
678 c
.full
= dfixed_const(lb_fill_bw
);
679 b
.full
= dfixed_div(c
, b
);
680 a
.full
= dfixed_div(a
, b
);
681 line_fill_time
= dfixed_trunc(a
);
683 if (line_fill_time
< wm
->active_time
)
686 return latency
+ (line_fill_time
- wm
->active_time
);
690 static bool dce6_average_bandwidth_vs_dram_bandwidth_for_display(struct dce6_wm_params
*wm
)
692 if (dce6_average_bandwidth(wm
) <=
693 (dce6_dram_bandwidth_for_display(wm
) / wm
->num_heads
))
699 static bool dce6_average_bandwidth_vs_available_bandwidth(struct dce6_wm_params
*wm
)
701 if (dce6_average_bandwidth(wm
) <=
702 (dce6_available_bandwidth(wm
) / wm
->num_heads
))
708 static bool dce6_check_latency_hiding(struct dce6_wm_params
*wm
)
710 u32 lb_partitions
= wm
->lb_size
/ wm
->src_width
;
711 u32 line_time
= wm
->active_time
+ wm
->blank_time
;
712 u32 latency_tolerant_lines
;
716 a
.full
= dfixed_const(1);
717 if (wm
->vsc
.full
> a
.full
)
718 latency_tolerant_lines
= 1;
720 if (lb_partitions
<= (wm
->vtaps
+ 1))
721 latency_tolerant_lines
= 1;
723 latency_tolerant_lines
= 2;
726 latency_hiding
= (latency_tolerant_lines
* line_time
+ wm
->blank_time
);
728 if (dce6_latency_watermark(wm
) <= latency_hiding
)
734 static void dce6_program_watermarks(struct radeon_device
*rdev
,
735 struct radeon_crtc
*radeon_crtc
,
736 u32 lb_size
, u32 num_heads
)
738 struct drm_display_mode
*mode
= &radeon_crtc
->base
.mode
;
739 struct dce6_wm_params wm
;
742 u32 latency_watermark_a
= 0, latency_watermark_b
= 0;
743 u32 priority_a_mark
= 0, priority_b_mark
= 0;
744 u32 priority_a_cnt
= PRIORITY_OFF
;
745 u32 priority_b_cnt
= PRIORITY_OFF
;
746 u32 tmp
, arb_control3
;
749 if (radeon_crtc
->base
.enabled
&& num_heads
&& mode
) {
750 pixel_period
= 1000000 / (u32
)mode
->clock
;
751 line_time
= min((u32
)mode
->crtc_htotal
* pixel_period
, (u32
)65535);
755 wm
.yclk
= rdev
->pm
.current_mclk
* 10;
756 wm
.sclk
= rdev
->pm
.current_sclk
* 10;
757 wm
.disp_clk
= mode
->clock
;
758 wm
.src_width
= mode
->crtc_hdisplay
;
759 wm
.active_time
= mode
->crtc_hdisplay
* pixel_period
;
760 wm
.blank_time
= line_time
- wm
.active_time
;
761 wm
.interlaced
= false;
762 if (mode
->flags
& DRM_MODE_FLAG_INTERLACE
)
763 wm
.interlaced
= true;
764 wm
.vsc
= radeon_crtc
->vsc
;
766 if (radeon_crtc
->rmx_type
!= RMX_OFF
)
768 wm
.bytes_per_pixel
= 4; /* XXX: get this from fb config */
769 wm
.lb_size
= lb_size
;
770 if (rdev
->family
== CHIP_ARUBA
)
771 wm
.dram_channels
= evergreen_get_number_of_dram_channels(rdev
);
773 wm
.dram_channels
= si_get_number_of_dram_channels(rdev
);
774 wm
.num_heads
= num_heads
;
776 /* set for high clocks */
777 latency_watermark_a
= min(dce6_latency_watermark(&wm
), (u32
)65535);
778 /* set for low clocks */
779 /* wm.yclk = low clk; wm.sclk = low clk */
780 latency_watermark_b
= min(dce6_latency_watermark(&wm
), (u32
)65535);
782 /* possibly force display priority to high */
783 /* should really do this at mode validation time... */
784 if (!dce6_average_bandwidth_vs_dram_bandwidth_for_display(&wm
) ||
785 !dce6_average_bandwidth_vs_available_bandwidth(&wm
) ||
786 !dce6_check_latency_hiding(&wm
) ||
787 (rdev
->disp_priority
== 2)) {
788 DRM_DEBUG_KMS("force priority to high\n");
789 priority_a_cnt
|= PRIORITY_ALWAYS_ON
;
790 priority_b_cnt
|= PRIORITY_ALWAYS_ON
;
793 a
.full
= dfixed_const(1000);
794 b
.full
= dfixed_const(mode
->clock
);
795 b
.full
= dfixed_div(b
, a
);
796 c
.full
= dfixed_const(latency_watermark_a
);
797 c
.full
= dfixed_mul(c
, b
);
798 c
.full
= dfixed_mul(c
, radeon_crtc
->hsc
);
799 c
.full
= dfixed_div(c
, a
);
800 a
.full
= dfixed_const(16);
801 c
.full
= dfixed_div(c
, a
);
802 priority_a_mark
= dfixed_trunc(c
);
803 priority_a_cnt
|= priority_a_mark
& PRIORITY_MARK_MASK
;
805 a
.full
= dfixed_const(1000);
806 b
.full
= dfixed_const(mode
->clock
);
807 b
.full
= dfixed_div(b
, a
);
808 c
.full
= dfixed_const(latency_watermark_b
);
809 c
.full
= dfixed_mul(c
, b
);
810 c
.full
= dfixed_mul(c
, radeon_crtc
->hsc
);
811 c
.full
= dfixed_div(c
, a
);
812 a
.full
= dfixed_const(16);
813 c
.full
= dfixed_div(c
, a
);
814 priority_b_mark
= dfixed_trunc(c
);
815 priority_b_cnt
|= priority_b_mark
& PRIORITY_MARK_MASK
;
819 arb_control3
= RREG32(DPG_PIPE_ARBITRATION_CONTROL3
+ radeon_crtc
->crtc_offset
);
821 tmp
&= ~LATENCY_WATERMARK_MASK(3);
822 tmp
|= LATENCY_WATERMARK_MASK(1);
823 WREG32(DPG_PIPE_ARBITRATION_CONTROL3
+ radeon_crtc
->crtc_offset
, tmp
);
824 WREG32(DPG_PIPE_LATENCY_CONTROL
+ radeon_crtc
->crtc_offset
,
825 (LATENCY_LOW_WATERMARK(latency_watermark_a
) |
826 LATENCY_HIGH_WATERMARK(line_time
)));
828 tmp
= RREG32(DPG_PIPE_ARBITRATION_CONTROL3
+ radeon_crtc
->crtc_offset
);
829 tmp
&= ~LATENCY_WATERMARK_MASK(3);
830 tmp
|= LATENCY_WATERMARK_MASK(2);
831 WREG32(DPG_PIPE_ARBITRATION_CONTROL3
+ radeon_crtc
->crtc_offset
, tmp
);
832 WREG32(DPG_PIPE_LATENCY_CONTROL
+ radeon_crtc
->crtc_offset
,
833 (LATENCY_LOW_WATERMARK(latency_watermark_b
) |
834 LATENCY_HIGH_WATERMARK(line_time
)));
835 /* restore original selection */
836 WREG32(DPG_PIPE_ARBITRATION_CONTROL3
+ radeon_crtc
->crtc_offset
, arb_control3
);
838 /* write the priority marks */
839 WREG32(PRIORITY_A_CNT
+ radeon_crtc
->crtc_offset
, priority_a_cnt
);
840 WREG32(PRIORITY_B_CNT
+ radeon_crtc
->crtc_offset
, priority_b_cnt
);
844 void dce6_bandwidth_update(struct radeon_device
*rdev
)
846 struct drm_display_mode
*mode0
= NULL
;
847 struct drm_display_mode
*mode1
= NULL
;
848 u32 num_heads
= 0, lb_size
;
851 radeon_update_display_priority(rdev
);
853 for (i
= 0; i
< rdev
->num_crtc
; i
++) {
854 if (rdev
->mode_info
.crtcs
[i
]->base
.enabled
)
857 for (i
= 0; i
< rdev
->num_crtc
; i
+= 2) {
858 mode0
= &rdev
->mode_info
.crtcs
[i
]->base
.mode
;
859 mode1
= &rdev
->mode_info
.crtcs
[i
+1]->base
.mode
;
860 lb_size
= dce6_line_buffer_adjust(rdev
, rdev
->mode_info
.crtcs
[i
], mode0
, mode1
);
861 dce6_program_watermarks(rdev
, rdev
->mode_info
.crtcs
[i
], lb_size
, num_heads
);
862 lb_size
= dce6_line_buffer_adjust(rdev
, rdev
->mode_info
.crtcs
[i
+1], mode1
, mode0
);
863 dce6_program_watermarks(rdev
, rdev
->mode_info
.crtcs
[i
+1], lb_size
, num_heads
);
870 static void si_tiling_mode_table_init(struct radeon_device
*rdev
)
872 const u32 num_tile_mode_states
= 32;
873 u32 reg_offset
, gb_tile_moden
, split_equal_to_row_size
;
875 switch (rdev
->config
.si
.mem_row_size_in_kb
) {
877 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_1KB
;
881 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_2KB
;
884 split_equal_to_row_size
= ADDR_SURF_TILE_SPLIT_4KB
;
888 if ((rdev
->family
== CHIP_TAHITI
) ||
889 (rdev
->family
== CHIP_PITCAIRN
)) {
890 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++) {
891 switch (reg_offset
) {
892 case 0: /* non-AA compressed depth or any compressed stencil */
893 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
894 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
895 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
896 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
897 NUM_BANKS(ADDR_SURF_16_BANK
) |
898 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
899 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
900 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
902 case 1: /* 2xAA/4xAA compressed depth only */
903 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
904 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
905 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
906 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
907 NUM_BANKS(ADDR_SURF_16_BANK
) |
908 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
909 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
910 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
912 case 2: /* 8xAA compressed depth only */
913 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
914 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
915 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
916 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
917 NUM_BANKS(ADDR_SURF_16_BANK
) |
918 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
919 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
920 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
922 case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
923 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
924 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
925 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
926 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
927 NUM_BANKS(ADDR_SURF_16_BANK
) |
928 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
929 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
930 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
932 case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
933 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
934 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
935 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
936 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
937 NUM_BANKS(ADDR_SURF_16_BANK
) |
938 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
939 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
940 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
942 case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
943 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
944 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
945 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
946 TILE_SPLIT(split_equal_to_row_size
) |
947 NUM_BANKS(ADDR_SURF_16_BANK
) |
948 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
949 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
950 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
952 case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
953 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
954 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
955 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
956 TILE_SPLIT(split_equal_to_row_size
) |
957 NUM_BANKS(ADDR_SURF_16_BANK
) |
958 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
959 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
960 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
));
962 case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
963 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
964 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
965 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
966 TILE_SPLIT(split_equal_to_row_size
) |
967 NUM_BANKS(ADDR_SURF_16_BANK
) |
968 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
969 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
970 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
972 case 8: /* 1D and 1D Array Surfaces */
973 gb_tile_moden
= (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
974 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
975 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
976 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
977 NUM_BANKS(ADDR_SURF_16_BANK
) |
978 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
979 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
980 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
982 case 9: /* Displayable maps. */
983 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
984 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
985 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
986 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
987 NUM_BANKS(ADDR_SURF_16_BANK
) |
988 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
989 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
990 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
992 case 10: /* Display 8bpp. */
993 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
994 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
995 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
996 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
997 NUM_BANKS(ADDR_SURF_16_BANK
) |
998 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
999 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1000 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1002 case 11: /* Display 16bpp. */
1003 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1004 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1005 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1006 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1007 NUM_BANKS(ADDR_SURF_16_BANK
) |
1008 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1009 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1010 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1012 case 12: /* Display 32bpp. */
1013 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1014 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1015 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1016 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1017 NUM_BANKS(ADDR_SURF_16_BANK
) |
1018 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1019 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1020 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
));
1022 case 13: /* Thin. */
1023 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1024 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1025 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1026 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1027 NUM_BANKS(ADDR_SURF_16_BANK
) |
1028 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1029 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1030 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1032 case 14: /* Thin 8 bpp. */
1033 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1034 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1035 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1036 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1037 NUM_BANKS(ADDR_SURF_16_BANK
) |
1038 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1039 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1040 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
));
1042 case 15: /* Thin 16 bpp. */
1043 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1044 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1045 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1046 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1047 NUM_BANKS(ADDR_SURF_16_BANK
) |
1048 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1049 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1050 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
));
1052 case 16: /* Thin 32 bpp. */
1053 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1054 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1055 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1056 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1057 NUM_BANKS(ADDR_SURF_16_BANK
) |
1058 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1059 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1060 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
));
1062 case 17: /* Thin 64 bpp. */
1063 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1064 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1065 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1066 TILE_SPLIT(split_equal_to_row_size
) |
1067 NUM_BANKS(ADDR_SURF_16_BANK
) |
1068 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1069 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1070 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
));
1072 case 21: /* 8 bpp PRT. */
1073 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1074 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1075 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1076 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1077 NUM_BANKS(ADDR_SURF_16_BANK
) |
1078 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1079 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1080 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1082 case 22: /* 16 bpp PRT */
1083 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1084 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1085 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1086 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1087 NUM_BANKS(ADDR_SURF_16_BANK
) |
1088 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1089 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1090 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
));
1092 case 23: /* 32 bpp PRT */
1093 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1094 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1095 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1096 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1097 NUM_BANKS(ADDR_SURF_16_BANK
) |
1098 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1099 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1100 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1102 case 24: /* 64 bpp PRT */
1103 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1104 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1105 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1106 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1107 NUM_BANKS(ADDR_SURF_16_BANK
) |
1108 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1109 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1110 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1112 case 25: /* 128 bpp PRT */
1113 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1114 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1115 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1116 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1117 NUM_BANKS(ADDR_SURF_8_BANK
) |
1118 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1119 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1120 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
));
1126 WREG32(GB_TILE_MODE0
+ (reg_offset
* 4), gb_tile_moden
);
1128 } else if (rdev
->family
== CHIP_VERDE
) {
1129 for (reg_offset
= 0; reg_offset
< num_tile_mode_states
; reg_offset
++) {
1130 switch (reg_offset
) {
1131 case 0: /* non-AA compressed depth or any compressed stencil */
1132 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1133 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1134 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1135 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1136 NUM_BANKS(ADDR_SURF_16_BANK
) |
1137 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1138 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1139 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
));
1141 case 1: /* 2xAA/4xAA compressed depth only */
1142 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1143 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1144 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1145 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
1146 NUM_BANKS(ADDR_SURF_16_BANK
) |
1147 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1148 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1149 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
));
1151 case 2: /* 8xAA compressed depth only */
1152 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1153 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1154 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1155 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1156 NUM_BANKS(ADDR_SURF_16_BANK
) |
1157 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1158 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1159 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
));
1161 case 3: /* 2xAA/4xAA compressed depth with stencil (for depth buffer) */
1162 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1163 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1164 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1165 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B
) |
1166 NUM_BANKS(ADDR_SURF_16_BANK
) |
1167 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1168 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1169 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
));
1171 case 4: /* Maps w/ a dimension less than the 2D macro-tile dimensions (for mipmapped depth textures) */
1172 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1173 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1174 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1175 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1176 NUM_BANKS(ADDR_SURF_16_BANK
) |
1177 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1178 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1179 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1181 case 5: /* Uncompressed 16bpp depth - and stencil buffer allocated with it */
1182 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1183 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1184 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1185 TILE_SPLIT(split_equal_to_row_size
) |
1186 NUM_BANKS(ADDR_SURF_16_BANK
) |
1187 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1188 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1189 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1191 case 6: /* Uncompressed 32bpp depth - and stencil buffer allocated with it */
1192 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1193 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1194 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1195 TILE_SPLIT(split_equal_to_row_size
) |
1196 NUM_BANKS(ADDR_SURF_16_BANK
) |
1197 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1198 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1199 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1201 case 7: /* Uncompressed 8bpp stencil without depth (drivers typically do not use) */
1202 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1203 MICRO_TILE_MODE(ADDR_SURF_DEPTH_MICRO_TILING
) |
1204 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1205 TILE_SPLIT(split_equal_to_row_size
) |
1206 NUM_BANKS(ADDR_SURF_16_BANK
) |
1207 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1208 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1209 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
));
1211 case 8: /* 1D and 1D Array Surfaces */
1212 gb_tile_moden
= (ARRAY_MODE(ARRAY_LINEAR_ALIGNED
) |
1213 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1214 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1215 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1216 NUM_BANKS(ADDR_SURF_16_BANK
) |
1217 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1218 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1219 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1221 case 9: /* Displayable maps. */
1222 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1223 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1224 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1225 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1226 NUM_BANKS(ADDR_SURF_16_BANK
) |
1227 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1228 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1229 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1231 case 10: /* Display 8bpp. */
1232 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1233 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1234 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1235 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1236 NUM_BANKS(ADDR_SURF_16_BANK
) |
1237 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1238 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1239 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
));
1241 case 11: /* Display 16bpp. */
1242 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1243 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1244 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1245 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1246 NUM_BANKS(ADDR_SURF_16_BANK
) |
1247 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1248 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1249 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1251 case 12: /* Display 32bpp. */
1252 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1253 MICRO_TILE_MODE(ADDR_SURF_DISPLAY_MICRO_TILING
) |
1254 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1255 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1256 NUM_BANKS(ADDR_SURF_16_BANK
) |
1257 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1258 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1259 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1261 case 13: /* Thin. */
1262 gb_tile_moden
= (ARRAY_MODE(ARRAY_1D_TILED_THIN1
) |
1263 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1264 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1265 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B
) |
1266 NUM_BANKS(ADDR_SURF_16_BANK
) |
1267 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1268 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1269 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1271 case 14: /* Thin 8 bpp. */
1272 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1273 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1274 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1275 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1276 NUM_BANKS(ADDR_SURF_16_BANK
) |
1277 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1278 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1279 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1281 case 15: /* Thin 16 bpp. */
1282 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1283 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1284 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1285 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1286 NUM_BANKS(ADDR_SURF_16_BANK
) |
1287 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1288 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1289 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1291 case 16: /* Thin 32 bpp. */
1292 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1293 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1294 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1295 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1296 NUM_BANKS(ADDR_SURF_16_BANK
) |
1297 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1298 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1299 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1301 case 17: /* Thin 64 bpp. */
1302 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1303 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1304 PIPE_CONFIG(ADDR_SURF_P4_8x16
) |
1305 TILE_SPLIT(split_equal_to_row_size
) |
1306 NUM_BANKS(ADDR_SURF_16_BANK
) |
1307 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1308 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1309 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1311 case 21: /* 8 bpp PRT. */
1312 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1313 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1314 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1315 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1316 NUM_BANKS(ADDR_SURF_16_BANK
) |
1317 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_2
) |
1318 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1319 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1321 case 22: /* 16 bpp PRT */
1322 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1323 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1324 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1325 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1326 NUM_BANKS(ADDR_SURF_16_BANK
) |
1327 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1328 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4
) |
1329 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_4
));
1331 case 23: /* 32 bpp PRT */
1332 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1333 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1334 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1335 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B
) |
1336 NUM_BANKS(ADDR_SURF_16_BANK
) |
1337 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1338 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2
) |
1339 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1341 case 24: /* 64 bpp PRT */
1342 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1343 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1344 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1345 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B
) |
1346 NUM_BANKS(ADDR_SURF_16_BANK
) |
1347 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1348 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1349 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2
));
1351 case 25: /* 128 bpp PRT */
1352 gb_tile_moden
= (ARRAY_MODE(ARRAY_2D_TILED_THIN1
) |
1353 MICRO_TILE_MODE(ADDR_SURF_THIN_MICRO_TILING
) |
1354 PIPE_CONFIG(ADDR_SURF_P8_32x32_8x16
) |
1355 TILE_SPLIT(ADDR_SURF_TILE_SPLIT_1KB
) |
1356 NUM_BANKS(ADDR_SURF_8_BANK
) |
1357 BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1
) |
1358 BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1
) |
1359 MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1
));
1365 WREG32(GB_TILE_MODE0
+ (reg_offset
* 4), gb_tile_moden
);
1368 DRM_ERROR("unknown asic: 0x%x\n", rdev
->family
);
1371 static void si_select_se_sh(struct radeon_device
*rdev
,
1372 u32 se_num
, u32 sh_num
)
1374 u32 data
= INSTANCE_BROADCAST_WRITES
;
1376 if ((se_num
== 0xffffffff) && (sh_num
== 0xffffffff))
1377 data
= SH_BROADCAST_WRITES
| SE_BROADCAST_WRITES
;
1378 else if (se_num
== 0xffffffff)
1379 data
|= SE_BROADCAST_WRITES
| SH_INDEX(sh_num
);
1380 else if (sh_num
== 0xffffffff)
1381 data
|= SH_BROADCAST_WRITES
| SE_INDEX(se_num
);
1383 data
|= SH_INDEX(sh_num
) | SE_INDEX(se_num
);
1384 WREG32(GRBM_GFX_INDEX
, data
);
1387 static u32
si_create_bitmask(u32 bit_width
)
1391 for (i
= 0; i
< bit_width
; i
++) {
1398 static u32
si_get_cu_enabled(struct radeon_device
*rdev
, u32 cu_per_sh
)
1402 data
= RREG32(CC_GC_SHADER_ARRAY_CONFIG
);
1404 data
&= INACTIVE_CUS_MASK
;
1407 data
|= RREG32(GC_USER_SHADER_ARRAY_CONFIG
);
1409 data
>>= INACTIVE_CUS_SHIFT
;
1411 mask
= si_create_bitmask(cu_per_sh
);
1413 return ~data
& mask
;
1416 static void si_setup_spi(struct radeon_device
*rdev
,
1417 u32 se_num
, u32 sh_per_se
,
1421 u32 data
, mask
, active_cu
;
1423 for (i
= 0; i
< se_num
; i
++) {
1424 for (j
= 0; j
< sh_per_se
; j
++) {
1425 si_select_se_sh(rdev
, i
, j
);
1426 data
= RREG32(SPI_STATIC_THREAD_MGMT_3
);
1427 active_cu
= si_get_cu_enabled(rdev
, cu_per_sh
);
1430 for (k
= 0; k
< 16; k
++) {
1432 if (active_cu
& mask
) {
1434 WREG32(SPI_STATIC_THREAD_MGMT_3
, data
);
1440 si_select_se_sh(rdev
, 0xffffffff, 0xffffffff);
1443 static u32
si_get_rb_disabled(struct radeon_device
*rdev
,
1444 u32 max_rb_num
, u32 se_num
,
1449 data
= RREG32(CC_RB_BACKEND_DISABLE
);
1451 data
&= BACKEND_DISABLE_MASK
;
1454 data
|= RREG32(GC_USER_RB_BACKEND_DISABLE
);
1456 data
>>= BACKEND_DISABLE_SHIFT
;
1458 mask
= si_create_bitmask(max_rb_num
/ se_num
/ sh_per_se
);
1463 static void si_setup_rb(struct radeon_device
*rdev
,
1464 u32 se_num
, u32 sh_per_se
,
1469 u32 disabled_rbs
= 0;
1470 u32 enabled_rbs
= 0;
1472 for (i
= 0; i
< se_num
; i
++) {
1473 for (j
= 0; j
< sh_per_se
; j
++) {
1474 si_select_se_sh(rdev
, i
, j
);
1475 data
= si_get_rb_disabled(rdev
, max_rb_num
, se_num
, sh_per_se
);
1476 disabled_rbs
|= data
<< ((i
* sh_per_se
+ j
) * TAHITI_RB_BITMAP_WIDTH_PER_SH
);
1479 si_select_se_sh(rdev
, 0xffffffff, 0xffffffff);
1482 for (i
= 0; i
< max_rb_num
; i
++) {
1483 if (!(disabled_rbs
& mask
))
1484 enabled_rbs
|= mask
;
1488 for (i
= 0; i
< se_num
; i
++) {
1489 si_select_se_sh(rdev
, i
, 0xffffffff);
1491 for (j
= 0; j
< sh_per_se
; j
++) {
1492 switch (enabled_rbs
& 3) {
1494 data
|= (RASTER_CONFIG_RB_MAP_0
<< (i
* sh_per_se
+ j
) * 2);
1497 data
|= (RASTER_CONFIG_RB_MAP_3
<< (i
* sh_per_se
+ j
) * 2);
1501 data
|= (RASTER_CONFIG_RB_MAP_2
<< (i
* sh_per_se
+ j
) * 2);
1506 WREG32(PA_SC_RASTER_CONFIG
, data
);
1508 si_select_se_sh(rdev
, 0xffffffff, 0xffffffff);
1511 static void si_gpu_init(struct radeon_device
*rdev
)
1513 u32 gb_addr_config
= 0;
1514 u32 mc_shared_chmap
, mc_arb_ramcfg
;
1516 u32 hdp_host_path_cntl
;
1520 switch (rdev
->family
) {
1522 rdev
->config
.si
.max_shader_engines
= 2;
1523 rdev
->config
.si
.max_tile_pipes
= 12;
1524 rdev
->config
.si
.max_cu_per_sh
= 8;
1525 rdev
->config
.si
.max_sh_per_se
= 2;
1526 rdev
->config
.si
.max_backends_per_se
= 4;
1527 rdev
->config
.si
.max_texture_channel_caches
= 12;
1528 rdev
->config
.si
.max_gprs
= 256;
1529 rdev
->config
.si
.max_gs_threads
= 32;
1530 rdev
->config
.si
.max_hw_contexts
= 8;
1532 rdev
->config
.si
.sc_prim_fifo_size_frontend
= 0x20;
1533 rdev
->config
.si
.sc_prim_fifo_size_backend
= 0x100;
1534 rdev
->config
.si
.sc_hiz_tile_fifo_size
= 0x30;
1535 rdev
->config
.si
.sc_earlyz_tile_fifo_size
= 0x130;
1536 gb_addr_config
= TAHITI_GB_ADDR_CONFIG_GOLDEN
;
1539 rdev
->config
.si
.max_shader_engines
= 2;
1540 rdev
->config
.si
.max_tile_pipes
= 8;
1541 rdev
->config
.si
.max_cu_per_sh
= 5;
1542 rdev
->config
.si
.max_sh_per_se
= 2;
1543 rdev
->config
.si
.max_backends_per_se
= 4;
1544 rdev
->config
.si
.max_texture_channel_caches
= 8;
1545 rdev
->config
.si
.max_gprs
= 256;
1546 rdev
->config
.si
.max_gs_threads
= 32;
1547 rdev
->config
.si
.max_hw_contexts
= 8;
1549 rdev
->config
.si
.sc_prim_fifo_size_frontend
= 0x20;
1550 rdev
->config
.si
.sc_prim_fifo_size_backend
= 0x100;
1551 rdev
->config
.si
.sc_hiz_tile_fifo_size
= 0x30;
1552 rdev
->config
.si
.sc_earlyz_tile_fifo_size
= 0x130;
1553 gb_addr_config
= TAHITI_GB_ADDR_CONFIG_GOLDEN
;
1557 rdev
->config
.si
.max_shader_engines
= 1;
1558 rdev
->config
.si
.max_tile_pipes
= 4;
1559 rdev
->config
.si
.max_cu_per_sh
= 2;
1560 rdev
->config
.si
.max_sh_per_se
= 2;
1561 rdev
->config
.si
.max_backends_per_se
= 4;
1562 rdev
->config
.si
.max_texture_channel_caches
= 4;
1563 rdev
->config
.si
.max_gprs
= 256;
1564 rdev
->config
.si
.max_gs_threads
= 32;
1565 rdev
->config
.si
.max_hw_contexts
= 8;
1567 rdev
->config
.si
.sc_prim_fifo_size_frontend
= 0x20;
1568 rdev
->config
.si
.sc_prim_fifo_size_backend
= 0x40;
1569 rdev
->config
.si
.sc_hiz_tile_fifo_size
= 0x30;
1570 rdev
->config
.si
.sc_earlyz_tile_fifo_size
= 0x130;
1571 gb_addr_config
= VERDE_GB_ADDR_CONFIG_GOLDEN
;
1575 /* Initialize HDP */
1576 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
1577 WREG32((0x2c14 + j
), 0x00000000);
1578 WREG32((0x2c18 + j
), 0x00000000);
1579 WREG32((0x2c1c + j
), 0x00000000);
1580 WREG32((0x2c20 + j
), 0x00000000);
1581 WREG32((0x2c24 + j
), 0x00000000);
1584 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
1586 evergreen_fix_pci_max_read_req_size(rdev
);
1588 WREG32(BIF_FB_EN
, FB_READ_EN
| FB_WRITE_EN
);
1590 mc_shared_chmap
= RREG32(MC_SHARED_CHMAP
);
1591 mc_arb_ramcfg
= RREG32(MC_ARB_RAMCFG
);
1593 rdev
->config
.si
.num_tile_pipes
= rdev
->config
.si
.max_tile_pipes
;
1594 rdev
->config
.si
.mem_max_burst_length_bytes
= 256;
1595 tmp
= (mc_arb_ramcfg
& NOOFCOLS_MASK
) >> NOOFCOLS_SHIFT
;
1596 rdev
->config
.si
.mem_row_size_in_kb
= (4 * (1 << (8 + tmp
))) / 1024;
1597 if (rdev
->config
.si
.mem_row_size_in_kb
> 4)
1598 rdev
->config
.si
.mem_row_size_in_kb
= 4;
1599 /* XXX use MC settings? */
1600 rdev
->config
.si
.shader_engine_tile_size
= 32;
1601 rdev
->config
.si
.num_gpus
= 1;
1602 rdev
->config
.si
.multi_gpu_tile_size
= 64;
1604 /* fix up row size */
1605 gb_addr_config
&= ~ROW_SIZE_MASK
;
1606 switch (rdev
->config
.si
.mem_row_size_in_kb
) {
1609 gb_addr_config
|= ROW_SIZE(0);
1612 gb_addr_config
|= ROW_SIZE(1);
1615 gb_addr_config
|= ROW_SIZE(2);
1619 /* setup tiling info dword. gb_addr_config is not adequate since it does
1620 * not have bank info, so create a custom tiling dword.
1621 * bits 3:0 num_pipes
1622 * bits 7:4 num_banks
1623 * bits 11:8 group_size
1624 * bits 15:12 row_size
1626 rdev
->config
.si
.tile_config
= 0;
1627 switch (rdev
->config
.si
.num_tile_pipes
) {
1629 rdev
->config
.si
.tile_config
|= (0 << 0);
1632 rdev
->config
.si
.tile_config
|= (1 << 0);
1635 rdev
->config
.si
.tile_config
|= (2 << 0);
1639 /* XXX what about 12? */
1640 rdev
->config
.si
.tile_config
|= (3 << 0);
1643 switch ((mc_arb_ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
) {
1644 case 0: /* four banks */
1645 rdev
->config
.si
.tile_config
|= 0 << 4;
1647 case 1: /* eight banks */
1648 rdev
->config
.si
.tile_config
|= 1 << 4;
1650 case 2: /* sixteen banks */
1652 rdev
->config
.si
.tile_config
|= 2 << 4;
1655 rdev
->config
.si
.tile_config
|=
1656 ((gb_addr_config
& PIPE_INTERLEAVE_SIZE_MASK
) >> PIPE_INTERLEAVE_SIZE_SHIFT
) << 8;
1657 rdev
->config
.si
.tile_config
|=
1658 ((gb_addr_config
& ROW_SIZE_MASK
) >> ROW_SIZE_SHIFT
) << 12;
1660 WREG32(GB_ADDR_CONFIG
, gb_addr_config
);
1661 WREG32(DMIF_ADDR_CONFIG
, gb_addr_config
);
1662 WREG32(HDP_ADDR_CONFIG
, gb_addr_config
);
1664 si_tiling_mode_table_init(rdev
);
1666 si_setup_rb(rdev
, rdev
->config
.si
.max_shader_engines
,
1667 rdev
->config
.si
.max_sh_per_se
,
1668 rdev
->config
.si
.max_backends_per_se
);
1670 si_setup_spi(rdev
, rdev
->config
.si
.max_shader_engines
,
1671 rdev
->config
.si
.max_sh_per_se
,
1672 rdev
->config
.si
.max_cu_per_sh
);
1675 /* set HW defaults for 3D engine */
1676 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) |
1677 ROQ_IB2_START(0x2b)));
1678 WREG32(CP_MEQ_THRESHOLDS
, MEQ1_START(0x30) | MEQ2_START(0x60));
1680 sx_debug_1
= RREG32(SX_DEBUG_1
);
1681 WREG32(SX_DEBUG_1
, sx_debug_1
);
1683 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(4));
1685 WREG32(PA_SC_FIFO_SIZE
, (SC_FRONTEND_PRIM_FIFO_SIZE(rdev
->config
.si
.sc_prim_fifo_size_frontend
) |
1686 SC_BACKEND_PRIM_FIFO_SIZE(rdev
->config
.si
.sc_prim_fifo_size_backend
) |
1687 SC_HIZ_TILE_FIFO_SIZE(rdev
->config
.si
.sc_hiz_tile_fifo_size
) |
1688 SC_EARLYZ_TILE_FIFO_SIZE(rdev
->config
.si
.sc_earlyz_tile_fifo_size
)));
1690 WREG32(VGT_NUM_INSTANCES
, 1);
1692 WREG32(CP_PERFMON_CNTL
, 0);
1694 WREG32(SQ_CONFIG
, 0);
1696 WREG32(PA_SC_FORCE_EOV_MAX_CNTS
, (FORCE_EOV_MAX_CLK_CNT(4095) |
1697 FORCE_EOV_MAX_REZ_CNT(255)));
1699 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(VC_AND_TC
) |
1700 AUTO_INVLD_EN(ES_AND_GS_AUTO
));
1702 WREG32(VGT_GS_VERTEX_REUSE
, 16);
1703 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
1705 WREG32(CB_PERFCOUNTER0_SELECT0
, 0);
1706 WREG32(CB_PERFCOUNTER0_SELECT1
, 0);
1707 WREG32(CB_PERFCOUNTER1_SELECT0
, 0);
1708 WREG32(CB_PERFCOUNTER1_SELECT1
, 0);
1709 WREG32(CB_PERFCOUNTER2_SELECT0
, 0);
1710 WREG32(CB_PERFCOUNTER2_SELECT1
, 0);
1711 WREG32(CB_PERFCOUNTER3_SELECT0
, 0);
1712 WREG32(CB_PERFCOUNTER3_SELECT1
, 0);
1714 tmp
= RREG32(HDP_MISC_CNTL
);
1715 tmp
|= HDP_FLUSH_INVALIDATE_CACHE
;
1716 WREG32(HDP_MISC_CNTL
, tmp
);
1718 hdp_host_path_cntl
= RREG32(HDP_HOST_PATH_CNTL
);
1719 WREG32(HDP_HOST_PATH_CNTL
, hdp_host_path_cntl
);
1721 WREG32(PA_CL_ENHANCE
, CLIP_VTX_REORDER_ENA
| NUM_CLIP_SEQ(3));
1727 * GPU scratch registers helpers function.
1729 static void si_scratch_init(struct radeon_device
*rdev
)
1733 rdev
->scratch
.num_reg
= 7;
1734 rdev
->scratch
.reg_base
= SCRATCH_REG0
;
1735 for (i
= 0; i
< rdev
->scratch
.num_reg
; i
++) {
1736 rdev
->scratch
.free
[i
] = true;
1737 rdev
->scratch
.reg
[i
] = rdev
->scratch
.reg_base
+ (i
* 4);
1741 void si_fence_ring_emit(struct radeon_device
*rdev
,
1742 struct radeon_fence
*fence
)
1744 struct radeon_ring
*ring
= &rdev
->ring
[fence
->ring
];
1745 u64 addr
= rdev
->fence_drv
[fence
->ring
].gpu_addr
;
1747 /* flush read cache over gart */
1748 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1749 radeon_ring_write(ring
, (CP_COHER_CNTL2
- PACKET3_SET_CONFIG_REG_START
) >> 2);
1750 radeon_ring_write(ring
, 0);
1751 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
1752 radeon_ring_write(ring
, PACKET3_TCL1_ACTION_ENA
|
1753 PACKET3_TC_ACTION_ENA
|
1754 PACKET3_SH_KCACHE_ACTION_ENA
|
1755 PACKET3_SH_ICACHE_ACTION_ENA
);
1756 radeon_ring_write(ring
, 0xFFFFFFFF);
1757 radeon_ring_write(ring
, 0);
1758 radeon_ring_write(ring
, 10); /* poll interval */
1759 /* EVENT_WRITE_EOP - flush caches, send int */
1760 radeon_ring_write(ring
, PACKET3(PACKET3_EVENT_WRITE_EOP
, 4));
1761 radeon_ring_write(ring
, EVENT_TYPE(CACHE_FLUSH_AND_INV_TS_EVENT
) | EVENT_INDEX(5));
1762 radeon_ring_write(ring
, addr
& 0xffffffff);
1763 radeon_ring_write(ring
, (upper_32_bits(addr
) & 0xff) | DATA_SEL(1) | INT_SEL(2));
1764 radeon_ring_write(ring
, fence
->seq
);
1765 radeon_ring_write(ring
, 0);
1771 void si_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
1773 struct radeon_ring
*ring
= &rdev
->ring
[ib
->ring
];
1776 if (ib
->is_const_ib
) {
1777 /* set switch buffer packet before const IB */
1778 radeon_ring_write(ring
, PACKET3(PACKET3_SWITCH_BUFFER
, 0));
1779 radeon_ring_write(ring
, 0);
1781 header
= PACKET3(PACKET3_INDIRECT_BUFFER_CONST
, 2);
1784 if (ring
->rptr_save_reg
) {
1785 next_rptr
= ring
->wptr
+ 3 + 4 + 8;
1786 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1787 radeon_ring_write(ring
, ((ring
->rptr_save_reg
-
1788 PACKET3_SET_CONFIG_REG_START
) >> 2));
1789 radeon_ring_write(ring
, next_rptr
);
1790 } else if (rdev
->wb
.enabled
) {
1791 next_rptr
= ring
->wptr
+ 5 + 4 + 8;
1792 radeon_ring_write(ring
, PACKET3(PACKET3_WRITE_DATA
, 3));
1793 radeon_ring_write(ring
, (1 << 8));
1794 radeon_ring_write(ring
, ring
->next_rptr_gpu_addr
& 0xfffffffc);
1795 radeon_ring_write(ring
, upper_32_bits(ring
->next_rptr_gpu_addr
) & 0xffffffff);
1796 radeon_ring_write(ring
, next_rptr
);
1799 header
= PACKET3(PACKET3_INDIRECT_BUFFER
, 2);
1802 radeon_ring_write(ring
, header
);
1803 radeon_ring_write(ring
,
1807 (ib
->gpu_addr
& 0xFFFFFFFC));
1808 radeon_ring_write(ring
, upper_32_bits(ib
->gpu_addr
) & 0xFFFF);
1809 radeon_ring_write(ring
, ib
->length_dw
| (ib
->vm_id
<< 24));
1811 if (!ib
->is_const_ib
) {
1812 /* flush read cache over gart for this vmid */
1813 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1814 radeon_ring_write(ring
, (CP_COHER_CNTL2
- PACKET3_SET_CONFIG_REG_START
) >> 2);
1815 radeon_ring_write(ring
, ib
->vm_id
);
1816 radeon_ring_write(ring
, PACKET3(PACKET3_SURFACE_SYNC
, 3));
1817 radeon_ring_write(ring
, PACKET3_TCL1_ACTION_ENA
|
1818 PACKET3_TC_ACTION_ENA
|
1819 PACKET3_SH_KCACHE_ACTION_ENA
|
1820 PACKET3_SH_ICACHE_ACTION_ENA
);
1821 radeon_ring_write(ring
, 0xFFFFFFFF);
1822 radeon_ring_write(ring
, 0);
1823 radeon_ring_write(ring
, 10); /* poll interval */
1830 static void si_cp_enable(struct radeon_device
*rdev
, bool enable
)
1833 WREG32(CP_ME_CNTL
, 0);
1835 radeon_ttm_set_active_vram_size(rdev
, rdev
->mc
.visible_vram_size
);
1836 WREG32(CP_ME_CNTL
, (CP_ME_HALT
| CP_PFP_HALT
| CP_CE_HALT
));
1837 WREG32(SCRATCH_UMSK
, 0);
1842 static int si_cp_load_microcode(struct radeon_device
*rdev
)
1844 const __be32
*fw_data
;
1847 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
1850 si_cp_enable(rdev
, false);
1853 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
1854 WREG32(CP_PFP_UCODE_ADDR
, 0);
1855 for (i
= 0; i
< SI_PFP_UCODE_SIZE
; i
++)
1856 WREG32(CP_PFP_UCODE_DATA
, be32_to_cpup(fw_data
++));
1857 WREG32(CP_PFP_UCODE_ADDR
, 0);
1860 fw_data
= (const __be32
*)rdev
->ce_fw
->data
;
1861 WREG32(CP_CE_UCODE_ADDR
, 0);
1862 for (i
= 0; i
< SI_CE_UCODE_SIZE
; i
++)
1863 WREG32(CP_CE_UCODE_DATA
, be32_to_cpup(fw_data
++));
1864 WREG32(CP_CE_UCODE_ADDR
, 0);
1867 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
1868 WREG32(CP_ME_RAM_WADDR
, 0);
1869 for (i
= 0; i
< SI_PM4_UCODE_SIZE
; i
++)
1870 WREG32(CP_ME_RAM_DATA
, be32_to_cpup(fw_data
++));
1871 WREG32(CP_ME_RAM_WADDR
, 0);
1873 WREG32(CP_PFP_UCODE_ADDR
, 0);
1874 WREG32(CP_CE_UCODE_ADDR
, 0);
1875 WREG32(CP_ME_RAM_WADDR
, 0);
1876 WREG32(CP_ME_RAM_RADDR
, 0);
1880 static int si_cp_start(struct radeon_device
*rdev
)
1882 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
1885 r
= radeon_ring_lock(rdev
, ring
, 7 + 4);
1887 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1891 radeon_ring_write(ring
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
1892 radeon_ring_write(ring
, 0x1);
1893 radeon_ring_write(ring
, 0x0);
1894 radeon_ring_write(ring
, rdev
->config
.si
.max_hw_contexts
- 1);
1895 radeon_ring_write(ring
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1896 radeon_ring_write(ring
, 0);
1897 radeon_ring_write(ring
, 0);
1899 /* init the CE partitions */
1900 radeon_ring_write(ring
, PACKET3(PACKET3_SET_BASE
, 2));
1901 radeon_ring_write(ring
, PACKET3_BASE_INDEX(CE_PARTITION_BASE
));
1902 radeon_ring_write(ring
, 0xc000);
1903 radeon_ring_write(ring
, 0xe000);
1904 radeon_ring_unlock_commit(rdev
, ring
);
1906 si_cp_enable(rdev
, true);
1908 r
= radeon_ring_lock(rdev
, ring
, si_default_size
+ 10);
1910 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1914 /* setup clear context state */
1915 radeon_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
1916 radeon_ring_write(ring
, PACKET3_PREAMBLE_BEGIN_CLEAR_STATE
);
1918 for (i
= 0; i
< si_default_size
; i
++)
1919 radeon_ring_write(ring
, si_default_state
[i
]);
1921 radeon_ring_write(ring
, PACKET3(PACKET3_PREAMBLE_CNTL
, 0));
1922 radeon_ring_write(ring
, PACKET3_PREAMBLE_END_CLEAR_STATE
);
1924 /* set clear context state */
1925 radeon_ring_write(ring
, PACKET3(PACKET3_CLEAR_STATE
, 0));
1926 radeon_ring_write(ring
, 0);
1928 radeon_ring_write(ring
, PACKET3(PACKET3_SET_CONTEXT_REG
, 2));
1929 radeon_ring_write(ring
, 0x00000316);
1930 radeon_ring_write(ring
, 0x0000000e); /* VGT_VERTEX_REUSE_BLOCK_CNTL */
1931 radeon_ring_write(ring
, 0x00000010); /* VGT_OUT_DEALLOC_CNTL */
1933 radeon_ring_unlock_commit(rdev
, ring
);
1935 for (i
= RADEON_RING_TYPE_GFX_INDEX
; i
<= CAYMAN_RING_TYPE_CP2_INDEX
; ++i
) {
1936 ring
= &rdev
->ring
[i
];
1937 r
= radeon_ring_lock(rdev
, ring
, 2);
1939 /* clear the compute context state */
1940 radeon_ring_write(ring
, PACKET3_COMPUTE(PACKET3_CLEAR_STATE
, 0));
1941 radeon_ring_write(ring
, 0);
1943 radeon_ring_unlock_commit(rdev
, ring
);
1949 static void si_cp_fini(struct radeon_device
*rdev
)
1951 struct radeon_ring
*ring
;
1952 si_cp_enable(rdev
, false);
1954 ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
1955 radeon_ring_fini(rdev
, ring
);
1956 radeon_scratch_free(rdev
, ring
->rptr_save_reg
);
1958 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
];
1959 radeon_ring_fini(rdev
, ring
);
1960 radeon_scratch_free(rdev
, ring
->rptr_save_reg
);
1962 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
];
1963 radeon_ring_fini(rdev
, ring
);
1964 radeon_scratch_free(rdev
, ring
->rptr_save_reg
);
1967 static int si_cp_resume(struct radeon_device
*rdev
)
1969 struct radeon_ring
*ring
;
1974 /* Reset cp; if cp is reset, then PA, SH, VGT also need to be reset */
1975 WREG32(GRBM_SOFT_RESET
, (SOFT_RESET_CP
|
1980 RREG32(GRBM_SOFT_RESET
);
1982 WREG32(GRBM_SOFT_RESET
, 0);
1983 RREG32(GRBM_SOFT_RESET
);
1985 WREG32(CP_SEM_WAIT_TIMER
, 0x0);
1986 WREG32(CP_SEM_INCOMPLETE_TIMER_CNTL
, 0x0);
1988 /* Set the write pointer delay */
1989 WREG32(CP_RB_WPTR_DELAY
, 0);
1991 WREG32(CP_DEBUG
, 0);
1992 WREG32(SCRATCH_ADDR
, ((rdev
->wb
.gpu_addr
+ RADEON_WB_SCRATCH_OFFSET
) >> 8) & 0xFFFFFFFF);
1994 /* ring 0 - compute and gfx */
1995 /* Set ring buffer size */
1996 ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
1997 rb_bufsz
= drm_order(ring
->ring_size
/ 8);
1998 tmp
= (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2000 tmp
|= BUF_SWAP_32BIT
;
2002 WREG32(CP_RB0_CNTL
, tmp
);
2004 /* Initialize the ring buffer's read and write pointers */
2005 WREG32(CP_RB0_CNTL
, tmp
| RB_RPTR_WR_ENA
);
2007 WREG32(CP_RB0_WPTR
, ring
->wptr
);
2009 /* set the wb address wether it's enabled or not */
2010 WREG32(CP_RB0_RPTR_ADDR
, (rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFFFFFFFC);
2011 WREG32(CP_RB0_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP_RPTR_OFFSET
) & 0xFF);
2013 if (rdev
->wb
.enabled
)
2014 WREG32(SCRATCH_UMSK
, 0xff);
2016 tmp
|= RB_NO_UPDATE
;
2017 WREG32(SCRATCH_UMSK
, 0);
2021 WREG32(CP_RB0_CNTL
, tmp
);
2023 WREG32(CP_RB0_BASE
, ring
->gpu_addr
>> 8);
2025 ring
->rptr
= RREG32(CP_RB0_RPTR
);
2027 /* ring1 - compute only */
2028 /* Set ring buffer size */
2029 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
];
2030 rb_bufsz
= drm_order(ring
->ring_size
/ 8);
2031 tmp
= (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2033 tmp
|= BUF_SWAP_32BIT
;
2035 WREG32(CP_RB1_CNTL
, tmp
);
2037 /* Initialize the ring buffer's read and write pointers */
2038 WREG32(CP_RB1_CNTL
, tmp
| RB_RPTR_WR_ENA
);
2040 WREG32(CP_RB1_WPTR
, ring
->wptr
);
2042 /* set the wb address wether it's enabled or not */
2043 WREG32(CP_RB1_RPTR_ADDR
, (rdev
->wb
.gpu_addr
+ RADEON_WB_CP1_RPTR_OFFSET
) & 0xFFFFFFFC);
2044 WREG32(CP_RB1_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP1_RPTR_OFFSET
) & 0xFF);
2047 WREG32(CP_RB1_CNTL
, tmp
);
2049 WREG32(CP_RB1_BASE
, ring
->gpu_addr
>> 8);
2051 ring
->rptr
= RREG32(CP_RB1_RPTR
);
2053 /* ring2 - compute only */
2054 /* Set ring buffer size */
2055 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
];
2056 rb_bufsz
= drm_order(ring
->ring_size
/ 8);
2057 tmp
= (drm_order(RADEON_GPU_PAGE_SIZE
/8) << 8) | rb_bufsz
;
2059 tmp
|= BUF_SWAP_32BIT
;
2061 WREG32(CP_RB2_CNTL
, tmp
);
2063 /* Initialize the ring buffer's read and write pointers */
2064 WREG32(CP_RB2_CNTL
, tmp
| RB_RPTR_WR_ENA
);
2066 WREG32(CP_RB2_WPTR
, ring
->wptr
);
2068 /* set the wb address wether it's enabled or not */
2069 WREG32(CP_RB2_RPTR_ADDR
, (rdev
->wb
.gpu_addr
+ RADEON_WB_CP2_RPTR_OFFSET
) & 0xFFFFFFFC);
2070 WREG32(CP_RB2_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ RADEON_WB_CP2_RPTR_OFFSET
) & 0xFF);
2073 WREG32(CP_RB2_CNTL
, tmp
);
2075 WREG32(CP_RB2_BASE
, ring
->gpu_addr
>> 8);
2077 ring
->rptr
= RREG32(CP_RB2_RPTR
);
2079 /* start the rings */
2081 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ready
= true;
2082 rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
].ready
= true;
2083 rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
].ready
= true;
2084 r
= radeon_ring_test(rdev
, RADEON_RING_TYPE_GFX_INDEX
, &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
]);
2086 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ready
= false;
2087 rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
].ready
= false;
2088 rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
].ready
= false;
2091 r
= radeon_ring_test(rdev
, CAYMAN_RING_TYPE_CP1_INDEX
, &rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
]);
2093 rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
].ready
= false;
2095 r
= radeon_ring_test(rdev
, CAYMAN_RING_TYPE_CP2_INDEX
, &rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
]);
2097 rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
].ready
= false;
2103 bool si_gpu_is_lockup(struct radeon_device
*rdev
, struct radeon_ring
*ring
)
2106 u32 grbm_status
, grbm_status2
;
2107 u32 grbm_status_se0
, grbm_status_se1
;
2109 srbm_status
= RREG32(SRBM_STATUS
);
2110 grbm_status
= RREG32(GRBM_STATUS
);
2111 grbm_status2
= RREG32(GRBM_STATUS2
);
2112 grbm_status_se0
= RREG32(GRBM_STATUS_SE0
);
2113 grbm_status_se1
= RREG32(GRBM_STATUS_SE1
);
2114 if (!(grbm_status
& GUI_ACTIVE
)) {
2115 radeon_ring_lockup_update(ring
);
2118 /* force CP activities */
2119 radeon_ring_force_activity(rdev
, ring
);
2120 return radeon_ring_test_lockup(rdev
, ring
);
2123 static int si_gpu_soft_reset(struct radeon_device
*rdev
)
2125 struct evergreen_mc_save save
;
2128 if (!(RREG32(GRBM_STATUS
) & GUI_ACTIVE
))
2131 dev_info(rdev
->dev
, "GPU softreset \n");
2132 dev_info(rdev
->dev
, " GRBM_STATUS=0x%08X\n",
2133 RREG32(GRBM_STATUS
));
2134 dev_info(rdev
->dev
, " GRBM_STATUS2=0x%08X\n",
2135 RREG32(GRBM_STATUS2
));
2136 dev_info(rdev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
2137 RREG32(GRBM_STATUS_SE0
));
2138 dev_info(rdev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
2139 RREG32(GRBM_STATUS_SE1
));
2140 dev_info(rdev
->dev
, " SRBM_STATUS=0x%08X\n",
2141 RREG32(SRBM_STATUS
));
2142 evergreen_mc_stop(rdev
, &save
);
2143 if (radeon_mc_wait_for_idle(rdev
)) {
2144 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
2146 /* Disable CP parsing/prefetching */
2147 WREG32(CP_ME_CNTL
, CP_ME_HALT
| CP_PFP_HALT
| CP_CE_HALT
);
2149 /* reset all the gfx blocks */
2150 grbm_reset
= (SOFT_RESET_CP
|
2164 dev_info(rdev
->dev
, " GRBM_SOFT_RESET=0x%08X\n", grbm_reset
);
2165 WREG32(GRBM_SOFT_RESET
, grbm_reset
);
2166 (void)RREG32(GRBM_SOFT_RESET
);
2168 WREG32(GRBM_SOFT_RESET
, 0);
2169 (void)RREG32(GRBM_SOFT_RESET
);
2170 /* Wait a little for things to settle down */
2172 dev_info(rdev
->dev
, " GRBM_STATUS=0x%08X\n",
2173 RREG32(GRBM_STATUS
));
2174 dev_info(rdev
->dev
, " GRBM_STATUS2=0x%08X\n",
2175 RREG32(GRBM_STATUS2
));
2176 dev_info(rdev
->dev
, " GRBM_STATUS_SE0=0x%08X\n",
2177 RREG32(GRBM_STATUS_SE0
));
2178 dev_info(rdev
->dev
, " GRBM_STATUS_SE1=0x%08X\n",
2179 RREG32(GRBM_STATUS_SE1
));
2180 dev_info(rdev
->dev
, " SRBM_STATUS=0x%08X\n",
2181 RREG32(SRBM_STATUS
));
2182 evergreen_mc_resume(rdev
, &save
);
2186 int si_asic_reset(struct radeon_device
*rdev
)
2188 return si_gpu_soft_reset(rdev
);
2192 static void si_mc_program(struct radeon_device
*rdev
)
2194 struct evergreen_mc_save save
;
2198 /* Initialize HDP */
2199 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
2200 WREG32((0x2c14 + j
), 0x00000000);
2201 WREG32((0x2c18 + j
), 0x00000000);
2202 WREG32((0x2c1c + j
), 0x00000000);
2203 WREG32((0x2c20 + j
), 0x00000000);
2204 WREG32((0x2c24 + j
), 0x00000000);
2206 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
2208 evergreen_mc_stop(rdev
, &save
);
2209 if (radeon_mc_wait_for_idle(rdev
)) {
2210 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
2212 /* Lockout access through VGA aperture*/
2213 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
2214 /* Update configuration */
2215 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
2216 rdev
->mc
.vram_start
>> 12);
2217 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
2218 rdev
->mc
.vram_end
>> 12);
2219 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
,
2220 rdev
->vram_scratch
.gpu_addr
>> 12);
2221 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
2222 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
2223 WREG32(MC_VM_FB_LOCATION
, tmp
);
2224 /* XXX double check these! */
2225 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
2226 WREG32(HDP_NONSURFACE_INFO
, (2 << 7) | (1 << 30));
2227 WREG32(HDP_NONSURFACE_SIZE
, 0x3FFFFFFF);
2228 WREG32(MC_VM_AGP_BASE
, 0);
2229 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
2230 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
2231 if (radeon_mc_wait_for_idle(rdev
)) {
2232 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
2234 evergreen_mc_resume(rdev
, &save
);
2235 /* we need to own VRAM, so turn off the VGA renderer here
2236 * to stop it overwriting our objects */
2237 rv515_vga_render_disable(rdev
);
2240 /* SI MC address space is 40 bits */
2241 static void si_vram_location(struct radeon_device
*rdev
,
2242 struct radeon_mc
*mc
, u64 base
)
2244 mc
->vram_start
= base
;
2245 if (mc
->mc_vram_size
> (0xFFFFFFFFFFULL
- base
+ 1)) {
2246 dev_warn(rdev
->dev
, "limiting VRAM to PCI aperture size\n");
2247 mc
->real_vram_size
= mc
->aper_size
;
2248 mc
->mc_vram_size
= mc
->aper_size
;
2250 mc
->vram_end
= mc
->vram_start
+ mc
->mc_vram_size
- 1;
2251 dev_info(rdev
->dev
, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
2252 mc
->mc_vram_size
>> 20, mc
->vram_start
,
2253 mc
->vram_end
, mc
->real_vram_size
>> 20);
2256 static void si_gtt_location(struct radeon_device
*rdev
, struct radeon_mc
*mc
)
2258 u64 size_af
, size_bf
;
2260 size_af
= ((0xFFFFFFFFFFULL
- mc
->vram_end
) + mc
->gtt_base_align
) & ~mc
->gtt_base_align
;
2261 size_bf
= mc
->vram_start
& ~mc
->gtt_base_align
;
2262 if (size_bf
> size_af
) {
2263 if (mc
->gtt_size
> size_bf
) {
2264 dev_warn(rdev
->dev
, "limiting GTT\n");
2265 mc
->gtt_size
= size_bf
;
2267 mc
->gtt_start
= (mc
->vram_start
& ~mc
->gtt_base_align
) - mc
->gtt_size
;
2269 if (mc
->gtt_size
> size_af
) {
2270 dev_warn(rdev
->dev
, "limiting GTT\n");
2271 mc
->gtt_size
= size_af
;
2273 mc
->gtt_start
= (mc
->vram_end
+ 1 + mc
->gtt_base_align
) & ~mc
->gtt_base_align
;
2275 mc
->gtt_end
= mc
->gtt_start
+ mc
->gtt_size
- 1;
2276 dev_info(rdev
->dev
, "GTT: %lluM 0x%016llX - 0x%016llX\n",
2277 mc
->gtt_size
>> 20, mc
->gtt_start
, mc
->gtt_end
);
2280 static void si_vram_gtt_location(struct radeon_device
*rdev
,
2281 struct radeon_mc
*mc
)
2283 if (mc
->mc_vram_size
> 0xFFC0000000ULL
) {
2284 /* leave room for at least 1024M GTT */
2285 dev_warn(rdev
->dev
, "limiting VRAM\n");
2286 mc
->real_vram_size
= 0xFFC0000000ULL
;
2287 mc
->mc_vram_size
= 0xFFC0000000ULL
;
2289 si_vram_location(rdev
, &rdev
->mc
, 0);
2290 rdev
->mc
.gtt_base_align
= 0;
2291 si_gtt_location(rdev
, mc
);
2294 static int si_mc_init(struct radeon_device
*rdev
)
2297 int chansize
, numchan
;
2299 /* Get VRAM informations */
2300 rdev
->mc
.vram_is_ddr
= true;
2301 tmp
= RREG32(MC_ARB_RAMCFG
);
2302 if (tmp
& CHANSIZE_OVERRIDE
) {
2304 } else if (tmp
& CHANSIZE_MASK
) {
2309 tmp
= RREG32(MC_SHARED_CHMAP
);
2310 switch ((tmp
& NOOFCHAN_MASK
) >> NOOFCHAN_SHIFT
) {
2340 rdev
->mc
.vram_width
= numchan
* chansize
;
2341 /* Could aper size report 0 ? */
2342 rdev
->mc
.aper_base
= pci_resource_start(rdev
->pdev
, 0);
2343 rdev
->mc
.aper_size
= pci_resource_len(rdev
->pdev
, 0);
2344 /* size in MB on si */
2345 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
) * 1024 * 1024;
2346 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
) * 1024 * 1024;
2347 rdev
->mc
.visible_vram_size
= rdev
->mc
.aper_size
;
2348 si_vram_gtt_location(rdev
, &rdev
->mc
);
2349 radeon_update_bandwidth_info(rdev
);
2357 void si_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
2359 /* flush hdp cache */
2360 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
2362 /* bits 0-15 are the VM contexts0-15 */
2363 WREG32(VM_INVALIDATE_REQUEST
, 1);
2366 int si_pcie_gart_enable(struct radeon_device
*rdev
)
2370 if (rdev
->gart
.robj
== NULL
) {
2371 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
2374 r
= radeon_gart_table_vram_pin(rdev
);
2377 radeon_gart_restore(rdev
);
2378 /* Setup TLB control */
2379 WREG32(MC_VM_MX_L1_TLB_CNTL
,
2382 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
2383 ENABLE_ADVANCED_DRIVER_MODEL
|
2384 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
);
2385 /* Setup L2 cache */
2386 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
|
2387 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
2388 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE
|
2389 EFFECTIVE_L2_QUEUE_SIZE(7) |
2390 CONTEXT1_IDENTITY_ACCESS_MODE(1));
2391 WREG32(VM_L2_CNTL2
, INVALIDATE_ALL_L1_TLBS
| INVALIDATE_L2_CACHE
);
2392 WREG32(VM_L2_CNTL3
, L2_CACHE_BIGK_ASSOCIATIVITY
|
2393 L2_CACHE_BIGK_FRAGMENT_SIZE(0));
2394 /* setup context0 */
2395 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
2396 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
2397 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
2398 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
2399 (u32
)(rdev
->dummy_page
.addr
>> 12));
2400 WREG32(VM_CONTEXT0_CNTL2
, 0);
2401 WREG32(VM_CONTEXT0_CNTL
, (ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
2402 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
));
2408 /* empty context1-15 */
2409 /* FIXME start with 4G, once using 2 level pt switch to full
2412 /* set vm size, must be a multiple of 4 */
2413 WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR
, 0);
2414 WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR
, rdev
->vm_manager
.max_pfn
);
2415 for (i
= 1; i
< 16; i
++) {
2417 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ (i
<< 2),
2418 rdev
->gart
.table_addr
>> 12);
2420 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ ((i
- 8) << 2),
2421 rdev
->gart
.table_addr
>> 12);
2424 /* enable context1-15 */
2425 WREG32(VM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR
,
2426 (u32
)(rdev
->dummy_page
.addr
>> 12));
2427 WREG32(VM_CONTEXT1_CNTL2
, 0);
2428 WREG32(VM_CONTEXT1_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
2429 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
2431 si_pcie_gart_tlb_flush(rdev
);
2432 DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n",
2433 (unsigned)(rdev
->mc
.gtt_size
>> 20),
2434 (unsigned long long)rdev
->gart
.table_addr
);
2435 rdev
->gart
.ready
= true;
2439 void si_pcie_gart_disable(struct radeon_device
*rdev
)
2441 /* Disable all tables */
2442 WREG32(VM_CONTEXT0_CNTL
, 0);
2443 WREG32(VM_CONTEXT1_CNTL
, 0);
2444 /* Setup TLB control */
2445 WREG32(MC_VM_MX_L1_TLB_CNTL
, SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
2446 SYSTEM_APERTURE_UNMAPPED_ACCESS_PASS_THRU
);
2447 /* Setup L2 cache */
2448 WREG32(VM_L2_CNTL
, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
2449 ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE
|
2450 EFFECTIVE_L2_QUEUE_SIZE(7) |
2451 CONTEXT1_IDENTITY_ACCESS_MODE(1));
2452 WREG32(VM_L2_CNTL2
, 0);
2453 WREG32(VM_L2_CNTL3
, L2_CACHE_BIGK_ASSOCIATIVITY
|
2454 L2_CACHE_BIGK_FRAGMENT_SIZE(0));
2455 radeon_gart_table_vram_unpin(rdev
);
2458 void si_pcie_gart_fini(struct radeon_device
*rdev
)
2460 si_pcie_gart_disable(rdev
);
2461 radeon_gart_table_vram_free(rdev
);
2462 radeon_gart_fini(rdev
);
2466 static bool si_vm_reg_valid(u32 reg
)
2468 /* context regs are fine */
2472 /* check config regs */
2474 case GRBM_GFX_INDEX
:
2475 case CP_STRMOUT_CNTL
:
2476 case VGT_VTX_VECT_EJECT_REG
:
2477 case VGT_CACHE_INVALIDATION
:
2478 case VGT_ESGS_RING_SIZE
:
2479 case VGT_GSVS_RING_SIZE
:
2480 case VGT_GS_VERTEX_REUSE
:
2481 case VGT_PRIMITIVE_TYPE
:
2482 case VGT_INDEX_TYPE
:
2483 case VGT_NUM_INDICES
:
2484 case VGT_NUM_INSTANCES
:
2485 case VGT_TF_RING_SIZE
:
2486 case VGT_HS_OFFCHIP_PARAM
:
2487 case VGT_TF_MEMORY_BASE
:
2489 case PA_SU_LINE_STIPPLE_VALUE
:
2490 case PA_SC_LINE_STIPPLE_STATE
:
2493 case SPI_STATIC_THREAD_MGMT_1
:
2494 case SPI_STATIC_THREAD_MGMT_2
:
2495 case SPI_STATIC_THREAD_MGMT_3
:
2496 case SPI_PS_MAX_WAVE_ID
:
2497 case SPI_CONFIG_CNTL
:
2498 case SPI_CONFIG_CNTL_1
:
2502 DRM_ERROR("Invalid register 0x%x in CS\n", reg
);
2507 static int si_vm_packet3_ce_check(struct radeon_device
*rdev
,
2508 u32
*ib
, struct radeon_cs_packet
*pkt
)
2510 switch (pkt
->opcode
) {
2512 case PACKET3_SET_BASE
:
2513 case PACKET3_SET_CE_DE_COUNTERS
:
2514 case PACKET3_LOAD_CONST_RAM
:
2515 case PACKET3_WRITE_CONST_RAM
:
2516 case PACKET3_WRITE_CONST_RAM_OFFSET
:
2517 case PACKET3_DUMP_CONST_RAM
:
2518 case PACKET3_INCREMENT_CE_COUNTER
:
2519 case PACKET3_WAIT_ON_DE_COUNTER
:
2520 case PACKET3_CE_WRITE
:
2523 DRM_ERROR("Invalid CE packet3: 0x%x\n", pkt
->opcode
);
2529 static int si_vm_packet3_gfx_check(struct radeon_device
*rdev
,
2530 u32
*ib
, struct radeon_cs_packet
*pkt
)
2532 u32 idx
= pkt
->idx
+ 1;
2533 u32 idx_value
= ib
[idx
];
2534 u32 start_reg
, end_reg
, reg
, i
;
2536 switch (pkt
->opcode
) {
2538 case PACKET3_SET_BASE
:
2539 case PACKET3_CLEAR_STATE
:
2540 case PACKET3_INDEX_BUFFER_SIZE
:
2541 case PACKET3_DISPATCH_DIRECT
:
2542 case PACKET3_DISPATCH_INDIRECT
:
2543 case PACKET3_ALLOC_GDS
:
2544 case PACKET3_WRITE_GDS_RAM
:
2545 case PACKET3_ATOMIC_GDS
:
2546 case PACKET3_ATOMIC
:
2547 case PACKET3_OCCLUSION_QUERY
:
2548 case PACKET3_SET_PREDICATION
:
2549 case PACKET3_COND_EXEC
:
2550 case PACKET3_PRED_EXEC
:
2551 case PACKET3_DRAW_INDIRECT
:
2552 case PACKET3_DRAW_INDEX_INDIRECT
:
2553 case PACKET3_INDEX_BASE
:
2554 case PACKET3_DRAW_INDEX_2
:
2555 case PACKET3_CONTEXT_CONTROL
:
2556 case PACKET3_INDEX_TYPE
:
2557 case PACKET3_DRAW_INDIRECT_MULTI
:
2558 case PACKET3_DRAW_INDEX_AUTO
:
2559 case PACKET3_DRAW_INDEX_IMMD
:
2560 case PACKET3_NUM_INSTANCES
:
2561 case PACKET3_DRAW_INDEX_MULTI_AUTO
:
2562 case PACKET3_STRMOUT_BUFFER_UPDATE
:
2563 case PACKET3_DRAW_INDEX_OFFSET_2
:
2564 case PACKET3_DRAW_INDEX_MULTI_ELEMENT
:
2565 case PACKET3_DRAW_INDEX_INDIRECT_MULTI
:
2566 case PACKET3_MPEG_INDEX
:
2567 case PACKET3_WAIT_REG_MEM
:
2568 case PACKET3_MEM_WRITE
:
2569 case PACKET3_PFP_SYNC_ME
:
2570 case PACKET3_SURFACE_SYNC
:
2571 case PACKET3_EVENT_WRITE
:
2572 case PACKET3_EVENT_WRITE_EOP
:
2573 case PACKET3_EVENT_WRITE_EOS
:
2574 case PACKET3_SET_CONTEXT_REG
:
2575 case PACKET3_SET_CONTEXT_REG_INDIRECT
:
2576 case PACKET3_SET_SH_REG
:
2577 case PACKET3_SET_SH_REG_OFFSET
:
2578 case PACKET3_INCREMENT_DE_COUNTER
:
2579 case PACKET3_WAIT_ON_CE_COUNTER
:
2580 case PACKET3_WAIT_ON_AVAIL_BUFFER
:
2581 case PACKET3_ME_WRITE
:
2583 case PACKET3_COPY_DATA
:
2584 if ((idx_value
& 0xf00) == 0) {
2585 reg
= ib
[idx
+ 3] * 4;
2586 if (!si_vm_reg_valid(reg
))
2590 case PACKET3_WRITE_DATA
:
2591 if ((idx_value
& 0xf00) == 0) {
2592 start_reg
= ib
[idx
+ 1] * 4;
2593 if (idx_value
& 0x10000) {
2594 if (!si_vm_reg_valid(start_reg
))
2597 for (i
= 0; i
< (pkt
->count
- 2); i
++) {
2598 reg
= start_reg
+ (4 * i
);
2599 if (!si_vm_reg_valid(reg
))
2605 case PACKET3_COND_WRITE
:
2606 if (idx_value
& 0x100) {
2607 reg
= ib
[idx
+ 5] * 4;
2608 if (!si_vm_reg_valid(reg
))
2612 case PACKET3_COPY_DW
:
2613 if (idx_value
& 0x2) {
2614 reg
= ib
[idx
+ 3] * 4;
2615 if (!si_vm_reg_valid(reg
))
2619 case PACKET3_SET_CONFIG_REG
:
2620 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONFIG_REG_START
;
2621 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
2622 if ((start_reg
< PACKET3_SET_CONFIG_REG_START
) ||
2623 (start_reg
>= PACKET3_SET_CONFIG_REG_END
) ||
2624 (end_reg
>= PACKET3_SET_CONFIG_REG_END
)) {
2625 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
2628 for (i
= 0; i
< pkt
->count
; i
++) {
2629 reg
= start_reg
+ (4 * i
);
2630 if (!si_vm_reg_valid(reg
))
2635 DRM_ERROR("Invalid GFX packet3: 0x%x\n", pkt
->opcode
);
2641 static int si_vm_packet3_compute_check(struct radeon_device
*rdev
,
2642 u32
*ib
, struct radeon_cs_packet
*pkt
)
2644 u32 idx
= pkt
->idx
+ 1;
2645 u32 idx_value
= ib
[idx
];
2646 u32 start_reg
, reg
, i
;
2648 switch (pkt
->opcode
) {
2650 case PACKET3_SET_BASE
:
2651 case PACKET3_CLEAR_STATE
:
2652 case PACKET3_DISPATCH_DIRECT
:
2653 case PACKET3_DISPATCH_INDIRECT
:
2654 case PACKET3_ALLOC_GDS
:
2655 case PACKET3_WRITE_GDS_RAM
:
2656 case PACKET3_ATOMIC_GDS
:
2657 case PACKET3_ATOMIC
:
2658 case PACKET3_OCCLUSION_QUERY
:
2659 case PACKET3_SET_PREDICATION
:
2660 case PACKET3_COND_EXEC
:
2661 case PACKET3_PRED_EXEC
:
2662 case PACKET3_CONTEXT_CONTROL
:
2663 case PACKET3_STRMOUT_BUFFER_UPDATE
:
2664 case PACKET3_WAIT_REG_MEM
:
2665 case PACKET3_MEM_WRITE
:
2666 case PACKET3_PFP_SYNC_ME
:
2667 case PACKET3_SURFACE_SYNC
:
2668 case PACKET3_EVENT_WRITE
:
2669 case PACKET3_EVENT_WRITE_EOP
:
2670 case PACKET3_EVENT_WRITE_EOS
:
2671 case PACKET3_SET_CONTEXT_REG
:
2672 case PACKET3_SET_CONTEXT_REG_INDIRECT
:
2673 case PACKET3_SET_SH_REG
:
2674 case PACKET3_SET_SH_REG_OFFSET
:
2675 case PACKET3_INCREMENT_DE_COUNTER
:
2676 case PACKET3_WAIT_ON_CE_COUNTER
:
2677 case PACKET3_WAIT_ON_AVAIL_BUFFER
:
2678 case PACKET3_ME_WRITE
:
2680 case PACKET3_COPY_DATA
:
2681 if ((idx_value
& 0xf00) == 0) {
2682 reg
= ib
[idx
+ 3] * 4;
2683 if (!si_vm_reg_valid(reg
))
2687 case PACKET3_WRITE_DATA
:
2688 if ((idx_value
& 0xf00) == 0) {
2689 start_reg
= ib
[idx
+ 1] * 4;
2690 if (idx_value
& 0x10000) {
2691 if (!si_vm_reg_valid(start_reg
))
2694 for (i
= 0; i
< (pkt
->count
- 2); i
++) {
2695 reg
= start_reg
+ (4 * i
);
2696 if (!si_vm_reg_valid(reg
))
2702 case PACKET3_COND_WRITE
:
2703 if (idx_value
& 0x100) {
2704 reg
= ib
[idx
+ 5] * 4;
2705 if (!si_vm_reg_valid(reg
))
2709 case PACKET3_COPY_DW
:
2710 if (idx_value
& 0x2) {
2711 reg
= ib
[idx
+ 3] * 4;
2712 if (!si_vm_reg_valid(reg
))
2717 DRM_ERROR("Invalid Compute packet3: 0x%x\n", pkt
->opcode
);
2723 int si_ib_parse(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
2727 struct radeon_cs_packet pkt
;
2731 pkt
.type
= CP_PACKET_GET_TYPE(ib
->ptr
[idx
]);
2732 pkt
.count
= CP_PACKET_GET_COUNT(ib
->ptr
[idx
]);
2736 dev_err(rdev
->dev
, "Packet0 not allowed!\n");
2743 pkt
.opcode
= CP_PACKET3_GET_OPCODE(ib
->ptr
[idx
]);
2744 if (ib
->is_const_ib
)
2745 ret
= si_vm_packet3_ce_check(rdev
, ib
->ptr
, &pkt
);
2748 case RADEON_RING_TYPE_GFX_INDEX
:
2749 ret
= si_vm_packet3_gfx_check(rdev
, ib
->ptr
, &pkt
);
2751 case CAYMAN_RING_TYPE_CP1_INDEX
:
2752 case CAYMAN_RING_TYPE_CP2_INDEX
:
2753 ret
= si_vm_packet3_compute_check(rdev
, ib
->ptr
, &pkt
);
2756 dev_err(rdev
->dev
, "Non-PM4 ring %d !\n", ib
->ring
);
2761 idx
+= pkt
.count
+ 2;
2764 dev_err(rdev
->dev
, "Unknown packet type %d !\n", pkt
.type
);
2770 } while (idx
< ib
->length_dw
);
2778 int si_vm_init(struct radeon_device
*rdev
)
2781 rdev
->vm_manager
.nvm
= 16;
2782 /* base offset of vram pages */
2783 rdev
->vm_manager
.vram_base_offset
= 0;
2788 void si_vm_fini(struct radeon_device
*rdev
)
2792 int si_vm_bind(struct radeon_device
*rdev
, struct radeon_vm
*vm
, int id
)
2795 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ (id
<< 2), vm
->pt_gpu_addr
>> 12);
2797 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ ((id
- 8) << 2),
2798 vm
->pt_gpu_addr
>> 12);
2799 /* flush hdp cache */
2800 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
2801 /* bits 0-15 are the VM contexts0-15 */
2802 WREG32(VM_INVALIDATE_REQUEST
, 1 << id
);
2806 void si_vm_unbind(struct radeon_device
*rdev
, struct radeon_vm
*vm
)
2809 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
+ (vm
->id
<< 2), 0);
2811 WREG32(VM_CONTEXT8_PAGE_TABLE_BASE_ADDR
+ ((vm
->id
- 8) << 2), 0);
2812 /* flush hdp cache */
2813 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
2814 /* bits 0-15 are the VM contexts0-15 */
2815 WREG32(VM_INVALIDATE_REQUEST
, 1 << vm
->id
);
2818 void si_vm_tlb_flush(struct radeon_device
*rdev
, struct radeon_vm
*vm
)
2823 /* flush hdp cache */
2824 WREG32(HDP_MEM_COHERENCY_FLUSH_CNTL
, 0x1);
2825 /* bits 0-15 are the VM contexts0-15 */
2826 WREG32(VM_INVALIDATE_REQUEST
, 1 << vm
->id
);
2832 void si_rlc_fini(struct radeon_device
*rdev
)
2836 /* save restore block */
2837 if (rdev
->rlc
.save_restore_obj
) {
2838 r
= radeon_bo_reserve(rdev
->rlc
.save_restore_obj
, false);
2839 if (unlikely(r
!= 0))
2840 dev_warn(rdev
->dev
, "(%d) reserve RLC sr bo failed\n", r
);
2841 radeon_bo_unpin(rdev
->rlc
.save_restore_obj
);
2842 radeon_bo_unreserve(rdev
->rlc
.save_restore_obj
);
2844 radeon_bo_unref(&rdev
->rlc
.save_restore_obj
);
2845 rdev
->rlc
.save_restore_obj
= NULL
;
2848 /* clear state block */
2849 if (rdev
->rlc
.clear_state_obj
) {
2850 r
= radeon_bo_reserve(rdev
->rlc
.clear_state_obj
, false);
2851 if (unlikely(r
!= 0))
2852 dev_warn(rdev
->dev
, "(%d) reserve RLC c bo failed\n", r
);
2853 radeon_bo_unpin(rdev
->rlc
.clear_state_obj
);
2854 radeon_bo_unreserve(rdev
->rlc
.clear_state_obj
);
2856 radeon_bo_unref(&rdev
->rlc
.clear_state_obj
);
2857 rdev
->rlc
.clear_state_obj
= NULL
;
2861 int si_rlc_init(struct radeon_device
*rdev
)
2865 /* save restore block */
2866 if (rdev
->rlc
.save_restore_obj
== NULL
) {
2867 r
= radeon_bo_create(rdev
, RADEON_GPU_PAGE_SIZE
, PAGE_SIZE
, true,
2868 RADEON_GEM_DOMAIN_VRAM
, NULL
,
2869 &rdev
->rlc
.save_restore_obj
);
2871 dev_warn(rdev
->dev
, "(%d) create RLC sr bo failed\n", r
);
2876 r
= radeon_bo_reserve(rdev
->rlc
.save_restore_obj
, false);
2877 if (unlikely(r
!= 0)) {
2881 r
= radeon_bo_pin(rdev
->rlc
.save_restore_obj
, RADEON_GEM_DOMAIN_VRAM
,
2882 &rdev
->rlc
.save_restore_gpu_addr
);
2883 radeon_bo_unreserve(rdev
->rlc
.save_restore_obj
);
2885 dev_warn(rdev
->dev
, "(%d) pin RLC sr bo failed\n", r
);
2890 /* clear state block */
2891 if (rdev
->rlc
.clear_state_obj
== NULL
) {
2892 r
= radeon_bo_create(rdev
, RADEON_GPU_PAGE_SIZE
, PAGE_SIZE
, true,
2893 RADEON_GEM_DOMAIN_VRAM
, NULL
,
2894 &rdev
->rlc
.clear_state_obj
);
2896 dev_warn(rdev
->dev
, "(%d) create RLC c bo failed\n", r
);
2901 r
= radeon_bo_reserve(rdev
->rlc
.clear_state_obj
, false);
2902 if (unlikely(r
!= 0)) {
2906 r
= radeon_bo_pin(rdev
->rlc
.clear_state_obj
, RADEON_GEM_DOMAIN_VRAM
,
2907 &rdev
->rlc
.clear_state_gpu_addr
);
2908 radeon_bo_unreserve(rdev
->rlc
.clear_state_obj
);
2910 dev_warn(rdev
->dev
, "(%d) pin RLC c bo failed\n", r
);
2918 static void si_rlc_stop(struct radeon_device
*rdev
)
2920 WREG32(RLC_CNTL
, 0);
2923 static void si_rlc_start(struct radeon_device
*rdev
)
2925 WREG32(RLC_CNTL
, RLC_ENABLE
);
2928 static int si_rlc_resume(struct radeon_device
*rdev
)
2931 const __be32
*fw_data
;
2938 WREG32(RLC_RL_BASE
, 0);
2939 WREG32(RLC_RL_SIZE
, 0);
2940 WREG32(RLC_LB_CNTL
, 0);
2941 WREG32(RLC_LB_CNTR_MAX
, 0xffffffff);
2942 WREG32(RLC_LB_CNTR_INIT
, 0);
2944 WREG32(RLC_SAVE_AND_RESTORE_BASE
, rdev
->rlc
.save_restore_gpu_addr
>> 8);
2945 WREG32(RLC_CLEAR_STATE_RESTORE_BASE
, rdev
->rlc
.clear_state_gpu_addr
>> 8);
2947 WREG32(RLC_MC_CNTL
, 0);
2948 WREG32(RLC_UCODE_CNTL
, 0);
2950 fw_data
= (const __be32
*)rdev
->rlc_fw
->data
;
2951 for (i
= 0; i
< SI_RLC_UCODE_SIZE
; i
++) {
2952 WREG32(RLC_UCODE_ADDR
, i
);
2953 WREG32(RLC_UCODE_DATA
, be32_to_cpup(fw_data
++));
2955 WREG32(RLC_UCODE_ADDR
, 0);
2962 static void si_enable_interrupts(struct radeon_device
*rdev
)
2964 u32 ih_cntl
= RREG32(IH_CNTL
);
2965 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
2967 ih_cntl
|= ENABLE_INTR
;
2968 ih_rb_cntl
|= IH_RB_ENABLE
;
2969 WREG32(IH_CNTL
, ih_cntl
);
2970 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2971 rdev
->ih
.enabled
= true;
2974 static void si_disable_interrupts(struct radeon_device
*rdev
)
2976 u32 ih_rb_cntl
= RREG32(IH_RB_CNTL
);
2977 u32 ih_cntl
= RREG32(IH_CNTL
);
2979 ih_rb_cntl
&= ~IH_RB_ENABLE
;
2980 ih_cntl
&= ~ENABLE_INTR
;
2981 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
2982 WREG32(IH_CNTL
, ih_cntl
);
2983 /* set rptr, wptr to 0 */
2984 WREG32(IH_RB_RPTR
, 0);
2985 WREG32(IH_RB_WPTR
, 0);
2986 rdev
->ih
.enabled
= false;
2990 static void si_disable_interrupt_state(struct radeon_device
*rdev
)
2994 WREG32(CP_INT_CNTL_RING0
, CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
);
2995 WREG32(CP_INT_CNTL_RING1
, 0);
2996 WREG32(CP_INT_CNTL_RING2
, 0);
2997 WREG32(GRBM_INT_CNTL
, 0);
2998 WREG32(INT_MASK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
2999 WREG32(INT_MASK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
3000 if (rdev
->num_crtc
>= 4) {
3001 WREG32(INT_MASK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
3002 WREG32(INT_MASK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
3004 if (rdev
->num_crtc
>= 6) {
3005 WREG32(INT_MASK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
3006 WREG32(INT_MASK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
3009 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, 0);
3010 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, 0);
3011 if (rdev
->num_crtc
>= 4) {
3012 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, 0);
3013 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, 0);
3015 if (rdev
->num_crtc
>= 6) {
3016 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, 0);
3017 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, 0);
3020 WREG32(DACA_AUTODETECT_INT_CONTROL
, 0);
3022 tmp
= RREG32(DC_HPD1_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3023 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
3024 tmp
= RREG32(DC_HPD2_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3025 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
3026 tmp
= RREG32(DC_HPD3_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3027 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
3028 tmp
= RREG32(DC_HPD4_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3029 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
3030 tmp
= RREG32(DC_HPD5_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3031 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
3032 tmp
= RREG32(DC_HPD6_INT_CONTROL
) & DC_HPDx_INT_POLARITY
;
3033 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
3037 static int si_irq_init(struct radeon_device
*rdev
)
3041 u32 interrupt_cntl
, ih_cntl
, ih_rb_cntl
;
3044 ret
= r600_ih_ring_alloc(rdev
);
3049 si_disable_interrupts(rdev
);
3052 ret
= si_rlc_resume(rdev
);
3054 r600_ih_ring_fini(rdev
);
3058 /* setup interrupt control */
3059 /* set dummy read address to ring address */
3060 WREG32(INTERRUPT_CNTL2
, rdev
->ih
.gpu_addr
>> 8);
3061 interrupt_cntl
= RREG32(INTERRUPT_CNTL
);
3062 /* IH_DUMMY_RD_OVERRIDE=0 - dummy read disabled with msi, enabled without msi
3063 * IH_DUMMY_RD_OVERRIDE=1 - dummy read controlled by IH_DUMMY_RD_EN
3065 interrupt_cntl
&= ~IH_DUMMY_RD_OVERRIDE
;
3066 /* IH_REQ_NONSNOOP_EN=1 if ring is in non-cacheable memory, e.g., vram */
3067 interrupt_cntl
&= ~IH_REQ_NONSNOOP_EN
;
3068 WREG32(INTERRUPT_CNTL
, interrupt_cntl
);
3070 WREG32(IH_RB_BASE
, rdev
->ih
.gpu_addr
>> 8);
3071 rb_bufsz
= drm_order(rdev
->ih
.ring_size
/ 4);
3073 ih_rb_cntl
= (IH_WPTR_OVERFLOW_ENABLE
|
3074 IH_WPTR_OVERFLOW_CLEAR
|
3077 if (rdev
->wb
.enabled
)
3078 ih_rb_cntl
|= IH_WPTR_WRITEBACK_ENABLE
;
3080 /* set the writeback address whether it's enabled or not */
3081 WREG32(IH_RB_WPTR_ADDR_LO
, (rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFFFFFFFC);
3082 WREG32(IH_RB_WPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ R600_WB_IH_WPTR_OFFSET
) & 0xFF);
3084 WREG32(IH_RB_CNTL
, ih_rb_cntl
);
3086 /* set rptr, wptr to 0 */
3087 WREG32(IH_RB_RPTR
, 0);
3088 WREG32(IH_RB_WPTR
, 0);
3090 /* Default settings for IH_CNTL (disabled at first) */
3091 ih_cntl
= MC_WRREQ_CREDIT(0x10) | MC_WR_CLEAN_CNT(0x10) | MC_VMID(0);
3092 /* RPTR_REARM only works if msi's are enabled */
3093 if (rdev
->msi_enabled
)
3094 ih_cntl
|= RPTR_REARM
;
3095 WREG32(IH_CNTL
, ih_cntl
);
3097 /* force the active interrupt state to all disabled */
3098 si_disable_interrupt_state(rdev
);
3100 pci_set_master(rdev
->pdev
);
3103 si_enable_interrupts(rdev
);
3108 int si_irq_set(struct radeon_device
*rdev
)
3110 u32 cp_int_cntl
= CNTX_BUSY_INT_ENABLE
| CNTX_EMPTY_INT_ENABLE
;
3111 u32 cp_int_cntl1
= 0, cp_int_cntl2
= 0;
3112 u32 crtc1
= 0, crtc2
= 0, crtc3
= 0, crtc4
= 0, crtc5
= 0, crtc6
= 0;
3113 u32 hpd1
, hpd2
, hpd3
, hpd4
, hpd5
, hpd6
;
3114 u32 grbm_int_cntl
= 0;
3115 u32 grph1
= 0, grph2
= 0, grph3
= 0, grph4
= 0, grph5
= 0, grph6
= 0;
3117 if (!rdev
->irq
.installed
) {
3118 WARN(1, "Can't enable IRQ/MSI because no handler is installed\n");
3121 /* don't enable anything if the ih is disabled */
3122 if (!rdev
->ih
.enabled
) {
3123 si_disable_interrupts(rdev
);
3124 /* force the active interrupt state to all disabled */
3125 si_disable_interrupt_state(rdev
);
3129 hpd1
= RREG32(DC_HPD1_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3130 hpd2
= RREG32(DC_HPD2_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3131 hpd3
= RREG32(DC_HPD3_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3132 hpd4
= RREG32(DC_HPD4_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3133 hpd5
= RREG32(DC_HPD5_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3134 hpd6
= RREG32(DC_HPD6_INT_CONTROL
) & ~DC_HPDx_INT_EN
;
3136 /* enable CP interrupts on all rings */
3137 if (atomic_read(&rdev
->irq
.ring_int
[RADEON_RING_TYPE_GFX_INDEX
])) {
3138 DRM_DEBUG("si_irq_set: sw int gfx\n");
3139 cp_int_cntl
|= TIME_STAMP_INT_ENABLE
;
3141 if (atomic_read(&rdev
->irq
.ring_int
[CAYMAN_RING_TYPE_CP1_INDEX
])) {
3142 DRM_DEBUG("si_irq_set: sw int cp1\n");
3143 cp_int_cntl1
|= TIME_STAMP_INT_ENABLE
;
3145 if (atomic_read(&rdev
->irq
.ring_int
[CAYMAN_RING_TYPE_CP2_INDEX
])) {
3146 DRM_DEBUG("si_irq_set: sw int cp2\n");
3147 cp_int_cntl2
|= TIME_STAMP_INT_ENABLE
;
3149 if (rdev
->irq
.crtc_vblank_int
[0] ||
3150 atomic_read(&rdev
->irq
.pflip
[0])) {
3151 DRM_DEBUG("si_irq_set: vblank 0\n");
3152 crtc1
|= VBLANK_INT_MASK
;
3154 if (rdev
->irq
.crtc_vblank_int
[1] ||
3155 atomic_read(&rdev
->irq
.pflip
[1])) {
3156 DRM_DEBUG("si_irq_set: vblank 1\n");
3157 crtc2
|= VBLANK_INT_MASK
;
3159 if (rdev
->irq
.crtc_vblank_int
[2] ||
3160 atomic_read(&rdev
->irq
.pflip
[2])) {
3161 DRM_DEBUG("si_irq_set: vblank 2\n");
3162 crtc3
|= VBLANK_INT_MASK
;
3164 if (rdev
->irq
.crtc_vblank_int
[3] ||
3165 atomic_read(&rdev
->irq
.pflip
[3])) {
3166 DRM_DEBUG("si_irq_set: vblank 3\n");
3167 crtc4
|= VBLANK_INT_MASK
;
3169 if (rdev
->irq
.crtc_vblank_int
[4] ||
3170 atomic_read(&rdev
->irq
.pflip
[4])) {
3171 DRM_DEBUG("si_irq_set: vblank 4\n");
3172 crtc5
|= VBLANK_INT_MASK
;
3174 if (rdev
->irq
.crtc_vblank_int
[5] ||
3175 atomic_read(&rdev
->irq
.pflip
[5])) {
3176 DRM_DEBUG("si_irq_set: vblank 5\n");
3177 crtc6
|= VBLANK_INT_MASK
;
3179 if (rdev
->irq
.hpd
[0]) {
3180 DRM_DEBUG("si_irq_set: hpd 1\n");
3181 hpd1
|= DC_HPDx_INT_EN
;
3183 if (rdev
->irq
.hpd
[1]) {
3184 DRM_DEBUG("si_irq_set: hpd 2\n");
3185 hpd2
|= DC_HPDx_INT_EN
;
3187 if (rdev
->irq
.hpd
[2]) {
3188 DRM_DEBUG("si_irq_set: hpd 3\n");
3189 hpd3
|= DC_HPDx_INT_EN
;
3191 if (rdev
->irq
.hpd
[3]) {
3192 DRM_DEBUG("si_irq_set: hpd 4\n");
3193 hpd4
|= DC_HPDx_INT_EN
;
3195 if (rdev
->irq
.hpd
[4]) {
3196 DRM_DEBUG("si_irq_set: hpd 5\n");
3197 hpd5
|= DC_HPDx_INT_EN
;
3199 if (rdev
->irq
.hpd
[5]) {
3200 DRM_DEBUG("si_irq_set: hpd 6\n");
3201 hpd6
|= DC_HPDx_INT_EN
;
3203 if (rdev
->irq
.gui_idle
) {
3204 DRM_DEBUG("gui idle\n");
3205 grbm_int_cntl
|= GUI_IDLE_INT_ENABLE
;
3208 WREG32(CP_INT_CNTL_RING0
, cp_int_cntl
);
3209 WREG32(CP_INT_CNTL_RING1
, cp_int_cntl1
);
3210 WREG32(CP_INT_CNTL_RING2
, cp_int_cntl2
);
3212 WREG32(GRBM_INT_CNTL
, grbm_int_cntl
);
3214 WREG32(INT_MASK
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, crtc1
);
3215 WREG32(INT_MASK
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, crtc2
);
3216 if (rdev
->num_crtc
>= 4) {
3217 WREG32(INT_MASK
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, crtc3
);
3218 WREG32(INT_MASK
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, crtc4
);
3220 if (rdev
->num_crtc
>= 6) {
3221 WREG32(INT_MASK
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, crtc5
);
3222 WREG32(INT_MASK
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, crtc6
);
3225 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, grph1
);
3226 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, grph2
);
3227 if (rdev
->num_crtc
>= 4) {
3228 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, grph3
);
3229 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, grph4
);
3231 if (rdev
->num_crtc
>= 6) {
3232 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, grph5
);
3233 WREG32(GRPH_INT_CONTROL
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, grph6
);
3236 WREG32(DC_HPD1_INT_CONTROL
, hpd1
);
3237 WREG32(DC_HPD2_INT_CONTROL
, hpd2
);
3238 WREG32(DC_HPD3_INT_CONTROL
, hpd3
);
3239 WREG32(DC_HPD4_INT_CONTROL
, hpd4
);
3240 WREG32(DC_HPD5_INT_CONTROL
, hpd5
);
3241 WREG32(DC_HPD6_INT_CONTROL
, hpd6
);
3246 static inline void si_irq_ack(struct radeon_device
*rdev
)
3250 rdev
->irq
.stat_regs
.evergreen
.disp_int
= RREG32(DISP_INTERRUPT_STATUS
);
3251 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE
);
3252 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE2
);
3253 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE3
);
3254 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE4
);
3255 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
= RREG32(DISP_INTERRUPT_STATUS_CONTINUE5
);
3256 rdev
->irq
.stat_regs
.evergreen
.d1grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
);
3257 rdev
->irq
.stat_regs
.evergreen
.d2grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
);
3258 if (rdev
->num_crtc
>= 4) {
3259 rdev
->irq
.stat_regs
.evergreen
.d3grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
);
3260 rdev
->irq
.stat_regs
.evergreen
.d4grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
);
3262 if (rdev
->num_crtc
>= 6) {
3263 rdev
->irq
.stat_regs
.evergreen
.d5grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
);
3264 rdev
->irq
.stat_regs
.evergreen
.d6grph_int
= RREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
);
3267 if (rdev
->irq
.stat_regs
.evergreen
.d1grph_int
& GRPH_PFLIP_INT_OCCURRED
)
3268 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
3269 if (rdev
->irq
.stat_regs
.evergreen
.d2grph_int
& GRPH_PFLIP_INT_OCCURRED
)
3270 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
3271 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VBLANK_INTERRUPT
)
3272 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, VBLANK_ACK
);
3273 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VLINE_INTERRUPT
)
3274 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC0_REGISTER_OFFSET
, VLINE_ACK
);
3275 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VBLANK_INTERRUPT
)
3276 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, VBLANK_ACK
);
3277 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VLINE_INTERRUPT
)
3278 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC1_REGISTER_OFFSET
, VLINE_ACK
);
3280 if (rdev
->num_crtc
>= 4) {
3281 if (rdev
->irq
.stat_regs
.evergreen
.d3grph_int
& GRPH_PFLIP_INT_OCCURRED
)
3282 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
3283 if (rdev
->irq
.stat_regs
.evergreen
.d4grph_int
& GRPH_PFLIP_INT_OCCURRED
)
3284 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
3285 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VBLANK_INTERRUPT
)
3286 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, VBLANK_ACK
);
3287 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VLINE_INTERRUPT
)
3288 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC2_REGISTER_OFFSET
, VLINE_ACK
);
3289 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VBLANK_INTERRUPT
)
3290 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, VBLANK_ACK
);
3291 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VLINE_INTERRUPT
)
3292 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC3_REGISTER_OFFSET
, VLINE_ACK
);
3295 if (rdev
->num_crtc
>= 6) {
3296 if (rdev
->irq
.stat_regs
.evergreen
.d5grph_int
& GRPH_PFLIP_INT_OCCURRED
)
3297 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
3298 if (rdev
->irq
.stat_regs
.evergreen
.d6grph_int
& GRPH_PFLIP_INT_OCCURRED
)
3299 WREG32(GRPH_INT_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, GRPH_PFLIP_INT_CLEAR
);
3300 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VBLANK_INTERRUPT
)
3301 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, VBLANK_ACK
);
3302 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VLINE_INTERRUPT
)
3303 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC4_REGISTER_OFFSET
, VLINE_ACK
);
3304 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VBLANK_INTERRUPT
)
3305 WREG32(VBLANK_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, VBLANK_ACK
);
3306 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VLINE_INTERRUPT
)
3307 WREG32(VLINE_STATUS
+ EVERGREEN_CRTC5_REGISTER_OFFSET
, VLINE_ACK
);
3310 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& DC_HPD1_INTERRUPT
) {
3311 tmp
= RREG32(DC_HPD1_INT_CONTROL
);
3312 tmp
|= DC_HPDx_INT_ACK
;
3313 WREG32(DC_HPD1_INT_CONTROL
, tmp
);
3315 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& DC_HPD2_INTERRUPT
) {
3316 tmp
= RREG32(DC_HPD2_INT_CONTROL
);
3317 tmp
|= DC_HPDx_INT_ACK
;
3318 WREG32(DC_HPD2_INT_CONTROL
, tmp
);
3320 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& DC_HPD3_INTERRUPT
) {
3321 tmp
= RREG32(DC_HPD3_INT_CONTROL
);
3322 tmp
|= DC_HPDx_INT_ACK
;
3323 WREG32(DC_HPD3_INT_CONTROL
, tmp
);
3325 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& DC_HPD4_INTERRUPT
) {
3326 tmp
= RREG32(DC_HPD4_INT_CONTROL
);
3327 tmp
|= DC_HPDx_INT_ACK
;
3328 WREG32(DC_HPD4_INT_CONTROL
, tmp
);
3330 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& DC_HPD5_INTERRUPT
) {
3331 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3332 tmp
|= DC_HPDx_INT_ACK
;
3333 WREG32(DC_HPD5_INT_CONTROL
, tmp
);
3335 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& DC_HPD6_INTERRUPT
) {
3336 tmp
= RREG32(DC_HPD5_INT_CONTROL
);
3337 tmp
|= DC_HPDx_INT_ACK
;
3338 WREG32(DC_HPD6_INT_CONTROL
, tmp
);
3342 static void si_irq_disable(struct radeon_device
*rdev
)
3344 si_disable_interrupts(rdev
);
3345 /* Wait and acknowledge irq */
3348 si_disable_interrupt_state(rdev
);
3351 static void si_irq_suspend(struct radeon_device
*rdev
)
3353 si_irq_disable(rdev
);
3357 static void si_irq_fini(struct radeon_device
*rdev
)
3359 si_irq_suspend(rdev
);
3360 r600_ih_ring_fini(rdev
);
3363 static inline u32
si_get_ih_wptr(struct radeon_device
*rdev
)
3367 if (rdev
->wb
.enabled
)
3368 wptr
= le32_to_cpu(rdev
->wb
.wb
[R600_WB_IH_WPTR_OFFSET
/4]);
3370 wptr
= RREG32(IH_RB_WPTR
);
3372 if (wptr
& RB_OVERFLOW
) {
3373 /* When a ring buffer overflow happen start parsing interrupt
3374 * from the last not overwritten vector (wptr + 16). Hopefully
3375 * this should allow us to catchup.
3377 dev_warn(rdev
->dev
, "IH ring buffer overflow (0x%08X, %d, %d)\n",
3378 wptr
, rdev
->ih
.rptr
, (wptr
+ 16) + rdev
->ih
.ptr_mask
);
3379 rdev
->ih
.rptr
= (wptr
+ 16) & rdev
->ih
.ptr_mask
;
3380 tmp
= RREG32(IH_RB_CNTL
);
3381 tmp
|= IH_WPTR_OVERFLOW_CLEAR
;
3382 WREG32(IH_RB_CNTL
, tmp
);
3384 return (wptr
& rdev
->ih
.ptr_mask
);
3388 * Each IV ring entry is 128 bits:
3389 * [7:0] - interrupt source id
3391 * [59:32] - interrupt source data
3392 * [63:60] - reserved
3395 * [127:80] - reserved
3397 int si_irq_process(struct radeon_device
*rdev
)
3401 u32 src_id
, src_data
, ring_id
;
3403 bool queue_hotplug
= false;
3405 if (!rdev
->ih
.enabled
|| rdev
->shutdown
)
3408 wptr
= si_get_ih_wptr(rdev
);
3411 /* is somebody else already processing irqs? */
3412 if (atomic_xchg(&rdev
->ih
.lock
, 1))
3415 rptr
= rdev
->ih
.rptr
;
3416 DRM_DEBUG("si_irq_process start: rptr %d, wptr %d\n", rptr
, wptr
);
3418 /* Order reading of wptr vs. reading of IH ring data */
3421 /* display interrupts */
3424 while (rptr
!= wptr
) {
3425 /* wptr/rptr are in bytes! */
3426 ring_index
= rptr
/ 4;
3427 src_id
= le32_to_cpu(rdev
->ih
.ring
[ring_index
]) & 0xff;
3428 src_data
= le32_to_cpu(rdev
->ih
.ring
[ring_index
+ 1]) & 0xfffffff;
3429 ring_id
= le32_to_cpu(rdev
->ih
.ring
[ring_index
+ 2]) & 0xff;
3432 case 1: /* D1 vblank/vline */
3434 case 0: /* D1 vblank */
3435 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VBLANK_INTERRUPT
) {
3436 if (rdev
->irq
.crtc_vblank_int
[0]) {
3437 drm_handle_vblank(rdev
->ddev
, 0);
3438 rdev
->pm
.vblank_sync
= true;
3439 wake_up(&rdev
->irq
.vblank_queue
);
3441 if (atomic_read(&rdev
->irq
.pflip
[0]))
3442 radeon_crtc_handle_flip(rdev
, 0);
3443 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~LB_D1_VBLANK_INTERRUPT
;
3444 DRM_DEBUG("IH: D1 vblank\n");
3447 case 1: /* D1 vline */
3448 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& LB_D1_VLINE_INTERRUPT
) {
3449 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~LB_D1_VLINE_INTERRUPT
;
3450 DRM_DEBUG("IH: D1 vline\n");
3454 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3458 case 2: /* D2 vblank/vline */
3460 case 0: /* D2 vblank */
3461 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VBLANK_INTERRUPT
) {
3462 if (rdev
->irq
.crtc_vblank_int
[1]) {
3463 drm_handle_vblank(rdev
->ddev
, 1);
3464 rdev
->pm
.vblank_sync
= true;
3465 wake_up(&rdev
->irq
.vblank_queue
);
3467 if (atomic_read(&rdev
->irq
.pflip
[1]))
3468 radeon_crtc_handle_flip(rdev
, 1);
3469 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~LB_D2_VBLANK_INTERRUPT
;
3470 DRM_DEBUG("IH: D2 vblank\n");
3473 case 1: /* D2 vline */
3474 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& LB_D2_VLINE_INTERRUPT
) {
3475 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~LB_D2_VLINE_INTERRUPT
;
3476 DRM_DEBUG("IH: D2 vline\n");
3480 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3484 case 3: /* D3 vblank/vline */
3486 case 0: /* D3 vblank */
3487 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VBLANK_INTERRUPT
) {
3488 if (rdev
->irq
.crtc_vblank_int
[2]) {
3489 drm_handle_vblank(rdev
->ddev
, 2);
3490 rdev
->pm
.vblank_sync
= true;
3491 wake_up(&rdev
->irq
.vblank_queue
);
3493 if (atomic_read(&rdev
->irq
.pflip
[2]))
3494 radeon_crtc_handle_flip(rdev
, 2);
3495 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~LB_D3_VBLANK_INTERRUPT
;
3496 DRM_DEBUG("IH: D3 vblank\n");
3499 case 1: /* D3 vline */
3500 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& LB_D3_VLINE_INTERRUPT
) {
3501 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~LB_D3_VLINE_INTERRUPT
;
3502 DRM_DEBUG("IH: D3 vline\n");
3506 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3510 case 4: /* D4 vblank/vline */
3512 case 0: /* D4 vblank */
3513 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VBLANK_INTERRUPT
) {
3514 if (rdev
->irq
.crtc_vblank_int
[3]) {
3515 drm_handle_vblank(rdev
->ddev
, 3);
3516 rdev
->pm
.vblank_sync
= true;
3517 wake_up(&rdev
->irq
.vblank_queue
);
3519 if (atomic_read(&rdev
->irq
.pflip
[3]))
3520 radeon_crtc_handle_flip(rdev
, 3);
3521 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~LB_D4_VBLANK_INTERRUPT
;
3522 DRM_DEBUG("IH: D4 vblank\n");
3525 case 1: /* D4 vline */
3526 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& LB_D4_VLINE_INTERRUPT
) {
3527 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~LB_D4_VLINE_INTERRUPT
;
3528 DRM_DEBUG("IH: D4 vline\n");
3532 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3536 case 5: /* D5 vblank/vline */
3538 case 0: /* D5 vblank */
3539 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VBLANK_INTERRUPT
) {
3540 if (rdev
->irq
.crtc_vblank_int
[4]) {
3541 drm_handle_vblank(rdev
->ddev
, 4);
3542 rdev
->pm
.vblank_sync
= true;
3543 wake_up(&rdev
->irq
.vblank_queue
);
3545 if (atomic_read(&rdev
->irq
.pflip
[4]))
3546 radeon_crtc_handle_flip(rdev
, 4);
3547 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~LB_D5_VBLANK_INTERRUPT
;
3548 DRM_DEBUG("IH: D5 vblank\n");
3551 case 1: /* D5 vline */
3552 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& LB_D5_VLINE_INTERRUPT
) {
3553 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~LB_D5_VLINE_INTERRUPT
;
3554 DRM_DEBUG("IH: D5 vline\n");
3558 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3562 case 6: /* D6 vblank/vline */
3564 case 0: /* D6 vblank */
3565 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VBLANK_INTERRUPT
) {
3566 if (rdev
->irq
.crtc_vblank_int
[5]) {
3567 drm_handle_vblank(rdev
->ddev
, 5);
3568 rdev
->pm
.vblank_sync
= true;
3569 wake_up(&rdev
->irq
.vblank_queue
);
3571 if (atomic_read(&rdev
->irq
.pflip
[5]))
3572 radeon_crtc_handle_flip(rdev
, 5);
3573 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~LB_D6_VBLANK_INTERRUPT
;
3574 DRM_DEBUG("IH: D6 vblank\n");
3577 case 1: /* D6 vline */
3578 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& LB_D6_VLINE_INTERRUPT
) {
3579 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~LB_D6_VLINE_INTERRUPT
;
3580 DRM_DEBUG("IH: D6 vline\n");
3584 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3588 case 42: /* HPD hotplug */
3591 if (rdev
->irq
.stat_regs
.evergreen
.disp_int
& DC_HPD1_INTERRUPT
) {
3592 rdev
->irq
.stat_regs
.evergreen
.disp_int
&= ~DC_HPD1_INTERRUPT
;
3593 queue_hotplug
= true;
3594 DRM_DEBUG("IH: HPD1\n");
3598 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
& DC_HPD2_INTERRUPT
) {
3599 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont
&= ~DC_HPD2_INTERRUPT
;
3600 queue_hotplug
= true;
3601 DRM_DEBUG("IH: HPD2\n");
3605 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
& DC_HPD3_INTERRUPT
) {
3606 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont2
&= ~DC_HPD3_INTERRUPT
;
3607 queue_hotplug
= true;
3608 DRM_DEBUG("IH: HPD3\n");
3612 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
& DC_HPD4_INTERRUPT
) {
3613 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont3
&= ~DC_HPD4_INTERRUPT
;
3614 queue_hotplug
= true;
3615 DRM_DEBUG("IH: HPD4\n");
3619 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
& DC_HPD5_INTERRUPT
) {
3620 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont4
&= ~DC_HPD5_INTERRUPT
;
3621 queue_hotplug
= true;
3622 DRM_DEBUG("IH: HPD5\n");
3626 if (rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
& DC_HPD6_INTERRUPT
) {
3627 rdev
->irq
.stat_regs
.evergreen
.disp_int_cont5
&= ~DC_HPD6_INTERRUPT
;
3628 queue_hotplug
= true;
3629 DRM_DEBUG("IH: HPD6\n");
3633 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3637 case 176: /* RINGID0 CP_INT */
3638 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3640 case 177: /* RINGID1 CP_INT */
3641 radeon_fence_process(rdev
, CAYMAN_RING_TYPE_CP1_INDEX
);
3643 case 178: /* RINGID2 CP_INT */
3644 radeon_fence_process(rdev
, CAYMAN_RING_TYPE_CP2_INDEX
);
3646 case 181: /* CP EOP event */
3647 DRM_DEBUG("IH: CP EOP\n");
3650 radeon_fence_process(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3653 radeon_fence_process(rdev
, CAYMAN_RING_TYPE_CP1_INDEX
);
3656 radeon_fence_process(rdev
, CAYMAN_RING_TYPE_CP2_INDEX
);
3660 case 233: /* GUI IDLE */
3661 DRM_DEBUG("IH: GUI idle\n");
3662 wake_up(&rdev
->irq
.idle_queue
);
3665 DRM_DEBUG("Unhandled interrupt: %d %d\n", src_id
, src_data
);
3669 /* wptr/rptr are in bytes! */
3671 rptr
&= rdev
->ih
.ptr_mask
;
3674 schedule_work(&rdev
->hotplug_work
);
3675 rdev
->ih
.rptr
= rptr
;
3676 WREG32(IH_RB_RPTR
, rdev
->ih
.rptr
);
3677 atomic_set(&rdev
->ih
.lock
, 0);
3679 /* make sure wptr hasn't changed while processing */
3680 wptr
= si_get_ih_wptr(rdev
);
3688 * startup/shutdown callbacks
3690 static int si_startup(struct radeon_device
*rdev
)
3692 struct radeon_ring
*ring
;
3695 if (!rdev
->me_fw
|| !rdev
->pfp_fw
|| !rdev
->ce_fw
||
3696 !rdev
->rlc_fw
|| !rdev
->mc_fw
) {
3697 r
= si_init_microcode(rdev
);
3699 DRM_ERROR("Failed to load firmware!\n");
3704 r
= si_mc_load_microcode(rdev
);
3706 DRM_ERROR("Failed to load MC firmware!\n");
3710 r
= r600_vram_scratch_init(rdev
);
3714 si_mc_program(rdev
);
3715 r
= si_pcie_gart_enable(rdev
);
3721 r
= evergreen_blit_init(rdev
);
3723 r600_blit_fini(rdev
);
3724 rdev
->asic
->copy
= NULL
;
3725 dev_warn(rdev
->dev
, "failed blitter (%d) falling back to memcpy\n", r
);
3728 /* allocate rlc buffers */
3729 r
= si_rlc_init(rdev
);
3731 DRM_ERROR("Failed to init rlc BOs!\n");
3735 /* allocate wb buffer */
3736 r
= radeon_wb_init(rdev
);
3740 r
= radeon_fence_driver_start_ring(rdev
, RADEON_RING_TYPE_GFX_INDEX
);
3742 dev_err(rdev
->dev
, "failed initializing CP fences (%d).\n", r
);
3746 r
= radeon_fence_driver_start_ring(rdev
, CAYMAN_RING_TYPE_CP1_INDEX
);
3748 dev_err(rdev
->dev
, "failed initializing CP fences (%d).\n", r
);
3752 r
= radeon_fence_driver_start_ring(rdev
, CAYMAN_RING_TYPE_CP2_INDEX
);
3754 dev_err(rdev
->dev
, "failed initializing CP fences (%d).\n", r
);
3759 r
= si_irq_init(rdev
);
3761 DRM_ERROR("radeon: IH init failed (%d).\n", r
);
3762 radeon_irq_kms_fini(rdev
);
3767 ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
3768 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, RADEON_WB_CP_RPTR_OFFSET
,
3769 CP_RB0_RPTR
, CP_RB0_WPTR
,
3770 0, 0xfffff, RADEON_CP_PACKET2
);
3774 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
];
3775 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, RADEON_WB_CP1_RPTR_OFFSET
,
3776 CP_RB1_RPTR
, CP_RB1_WPTR
,
3777 0, 0xfffff, RADEON_CP_PACKET2
);
3781 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
];
3782 r
= radeon_ring_init(rdev
, ring
, ring
->ring_size
, RADEON_WB_CP2_RPTR_OFFSET
,
3783 CP_RB2_RPTR
, CP_RB2_WPTR
,
3784 0, 0xfffff, RADEON_CP_PACKET2
);
3788 r
= si_cp_load_microcode(rdev
);
3791 r
= si_cp_resume(rdev
);
3795 r
= radeon_ib_pool_init(rdev
);
3797 dev_err(rdev
->dev
, "IB initialization failed (%d).\n", r
);
3801 r
= radeon_vm_manager_init(rdev
);
3803 dev_err(rdev
->dev
, "vm manager initialization failed (%d).\n", r
);
3810 int si_resume(struct radeon_device
*rdev
)
3814 /* Do not reset GPU before posting, on rv770 hw unlike on r500 hw,
3815 * posting will perform necessary task to bring back GPU into good
3819 atom_asic_init(rdev
->mode_info
.atom_context
);
3821 rdev
->accel_working
= true;
3822 r
= si_startup(rdev
);
3824 DRM_ERROR("si startup failed on resume\n");
3825 rdev
->accel_working
= false;
3833 int si_suspend(struct radeon_device
*rdev
)
3835 si_cp_enable(rdev
, false);
3836 rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
].ready
= false;
3837 rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
].ready
= false;
3838 rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
].ready
= false;
3839 si_irq_suspend(rdev
);
3840 radeon_wb_disable(rdev
);
3841 si_pcie_gart_disable(rdev
);
3845 /* Plan is to move initialization in that function and use
3846 * helper function so that radeon_device_init pretty much
3847 * do nothing more than calling asic specific function. This
3848 * should also allow to remove a bunch of callback function
3851 int si_init(struct radeon_device
*rdev
)
3853 struct radeon_ring
*ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
3857 if (!radeon_get_bios(rdev
)) {
3858 if (ASIC_IS_AVIVO(rdev
))
3861 /* Must be an ATOMBIOS */
3862 if (!rdev
->is_atom_bios
) {
3863 dev_err(rdev
->dev
, "Expecting atombios for cayman GPU\n");
3866 r
= radeon_atombios_init(rdev
);
3870 /* Post card if necessary */
3871 if (!radeon_card_posted(rdev
)) {
3873 dev_err(rdev
->dev
, "Card not posted and no BIOS - ignoring\n");
3876 DRM_INFO("GPU not posted. posting now...\n");
3877 atom_asic_init(rdev
->mode_info
.atom_context
);
3879 /* Initialize scratch registers */
3880 si_scratch_init(rdev
);
3881 /* Initialize surface registers */
3882 radeon_surface_init(rdev
);
3883 /* Initialize clocks */
3884 radeon_get_clock_info(rdev
->ddev
);
3887 r
= radeon_fence_driver_init(rdev
);
3891 /* initialize memory controller */
3892 r
= si_mc_init(rdev
);
3895 /* Memory manager */
3896 r
= radeon_bo_init(rdev
);
3900 r
= radeon_irq_kms_init(rdev
);
3904 ring
= &rdev
->ring
[RADEON_RING_TYPE_GFX_INDEX
];
3905 ring
->ring_obj
= NULL
;
3906 r600_ring_init(rdev
, ring
, 1024 * 1024);
3908 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_CP1_INDEX
];
3909 ring
->ring_obj
= NULL
;
3910 r600_ring_init(rdev
, ring
, 1024 * 1024);
3912 ring
= &rdev
->ring
[CAYMAN_RING_TYPE_CP2_INDEX
];
3913 ring
->ring_obj
= NULL
;
3914 r600_ring_init(rdev
, ring
, 1024 * 1024);
3916 rdev
->ih
.ring_obj
= NULL
;
3917 r600_ih_ring_init(rdev
, 64 * 1024);
3919 r
= r600_pcie_gart_init(rdev
);
3923 rdev
->accel_working
= true;
3924 r
= si_startup(rdev
);
3926 dev_err(rdev
->dev
, "disabling GPU acceleration\n");
3930 radeon_wb_fini(rdev
);
3931 radeon_ib_pool_fini(rdev
);
3932 radeon_vm_manager_fini(rdev
);
3933 radeon_irq_kms_fini(rdev
);
3934 si_pcie_gart_fini(rdev
);
3935 rdev
->accel_working
= false;
3938 /* Don't start up if the MC ucode is missing.
3939 * The default clocks and voltages before the MC ucode
3940 * is loaded are not suffient for advanced operations.
3943 DRM_ERROR("radeon: MC ucode required for NI+.\n");
3950 void si_fini(struct radeon_device
*rdev
)
3953 r600_blit_fini(rdev
);
3958 radeon_wb_fini(rdev
);
3959 radeon_vm_manager_fini(rdev
);
3960 radeon_ib_pool_fini(rdev
);
3961 radeon_irq_kms_fini(rdev
);
3962 si_pcie_gart_fini(rdev
);
3963 r600_vram_scratch_fini(rdev
);
3964 radeon_gem_fini(rdev
);
3965 radeon_fence_driver_fini(rdev
);
3966 radeon_bo_fini(rdev
);
3967 radeon_atombios_fini(rdev
);
3973 * si_get_gpu_clock - return GPU clock counter snapshot
3975 * @rdev: radeon_device pointer
3977 * Fetches a GPU clock counter snapshot (SI).
3978 * Returns the 64 bit clock counter snapshot.
3980 uint64_t si_get_gpu_clock(struct radeon_device
*rdev
)
3984 mutex_lock(&rdev
->gpu_clock_mutex
);
3985 WREG32(RLC_CAPTURE_GPU_CLOCK_COUNT
, 1);
3986 clock
= (uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_LSB
) |
3987 ((uint64_t)RREG32(RLC_GPU_CLOCK_COUNT_MSB
) << 32ULL);
3988 mutex_unlock(&rdev
->gpu_clock_mutex
);