2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
29 #include <linux/firmware.h>
30 #include <linux/platform_device.h>
32 #include "radeon_drm.h"
34 #include "radeon_mode.h"
39 #define PFP_UCODE_SIZE 576
40 #define PM4_UCODE_SIZE 1792
41 #define R700_PFP_UCODE_SIZE 848
42 #define R700_PM4_UCODE_SIZE 1360
45 MODULE_FIRMWARE("radeon/R600_pfp.bin");
46 MODULE_FIRMWARE("radeon/R600_me.bin");
47 MODULE_FIRMWARE("radeon/RV610_pfp.bin");
48 MODULE_FIRMWARE("radeon/RV610_me.bin");
49 MODULE_FIRMWARE("radeon/RV630_pfp.bin");
50 MODULE_FIRMWARE("radeon/RV630_me.bin");
51 MODULE_FIRMWARE("radeon/RV620_pfp.bin");
52 MODULE_FIRMWARE("radeon/RV620_me.bin");
53 MODULE_FIRMWARE("radeon/RV635_pfp.bin");
54 MODULE_FIRMWARE("radeon/RV635_me.bin");
55 MODULE_FIRMWARE("radeon/RV670_pfp.bin");
56 MODULE_FIRMWARE("radeon/RV670_me.bin");
57 MODULE_FIRMWARE("radeon/RS780_pfp.bin");
58 MODULE_FIRMWARE("radeon/RS780_me.bin");
59 MODULE_FIRMWARE("radeon/RV770_pfp.bin");
60 MODULE_FIRMWARE("radeon/RV770_me.bin");
61 MODULE_FIRMWARE("radeon/RV730_pfp.bin");
62 MODULE_FIRMWARE("radeon/RV730_me.bin");
63 MODULE_FIRMWARE("radeon/RV710_pfp.bin");
64 MODULE_FIRMWARE("radeon/RV710_me.bin");
66 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
);
68 /* r600,rv610,rv630,rv620,rv635,rv670 */
69 int r600_mc_wait_for_idle(struct radeon_device
*rdev
);
70 void r600_gpu_init(struct radeon_device
*rdev
);
71 void r600_fini(struct radeon_device
*rdev
);
76 int r600_gart_clear_page(struct radeon_device
*rdev
, int i
)
78 void __iomem
*ptr
= (void *)rdev
->gart
.table
.vram
.ptr
;
81 if (i
< 0 || i
> rdev
->gart
.num_gpu_pages
)
84 writeq(pte
, ((void __iomem
*)ptr
) + (i
* 8));
88 void r600_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
93 WREG32(VM_CONTEXT0_INVALIDATION_LOW_ADDR
, rdev
->mc
.gtt_start
>> 12);
94 WREG32(VM_CONTEXT0_INVALIDATION_HIGH_ADDR
, (rdev
->mc
.gtt_end
- 1) >> 12);
95 WREG32(VM_CONTEXT0_REQUEST_RESPONSE
, REQUEST_TYPE(1));
96 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
98 tmp
= RREG32(VM_CONTEXT0_REQUEST_RESPONSE
);
99 tmp
= (tmp
& RESPONSE_TYPE_MASK
) >> RESPONSE_TYPE_SHIFT
;
101 printk(KERN_WARNING
"[drm] r600 flush TLB failed\n");
111 int r600_pcie_gart_init(struct radeon_device
*rdev
)
115 if (rdev
->gart
.table
.vram
.robj
) {
116 WARN(1, "R600 PCIE GART already initialized.\n");
119 /* Initialize common gart structure */
120 r
= radeon_gart_init(rdev
);
123 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 8;
124 return radeon_gart_table_vram_alloc(rdev
);
127 int r600_pcie_gart_enable(struct radeon_device
*rdev
)
132 if (rdev
->gart
.table
.vram
.robj
== NULL
) {
133 dev_err(rdev
->dev
, "No VRAM object for PCIE GART.\n");
136 r
= radeon_gart_table_vram_pin(rdev
);
141 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
142 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
143 EFFECTIVE_L2_QUEUE_SIZE(7));
144 WREG32(VM_L2_CNTL2
, 0);
145 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
146 /* Setup TLB control */
147 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
148 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
149 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
150 ENABLE_WAIT_L2_QUERY
;
151 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
152 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
153 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
154 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
155 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
156 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
157 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
158 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
159 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
160 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
161 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
162 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
163 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
164 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
165 WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR
, rdev
->mc
.gtt_start
>> 12);
166 WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR
, rdev
->mc
.gtt_end
>> 12);
167 WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR
, rdev
->gart
.table_addr
>> 12);
168 WREG32(VM_CONTEXT0_CNTL
, ENABLE_CONTEXT
| PAGE_TABLE_DEPTH(0) |
169 RANGE_PROTECTION_FAULT_ENABLE_DEFAULT
);
170 WREG32(VM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR
,
171 (u32
)(rdev
->dummy_page
.addr
>> 12));
172 for (i
= 1; i
< 7; i
++)
173 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
175 r600_pcie_gart_tlb_flush(rdev
);
176 rdev
->gart
.ready
= true;
180 void r600_pcie_gart_disable(struct radeon_device
*rdev
)
185 /* Disable all tables */
186 for (i
= 0; i
< 7; i
++)
187 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
189 /* Disable L2 cache */
190 WREG32(VM_L2_CNTL
, ENABLE_L2_FRAGMENT_PROCESSING
|
191 EFFECTIVE_L2_QUEUE_SIZE(7));
192 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
193 /* Setup L1 TLB control */
194 tmp
= EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
195 ENABLE_WAIT_L2_QUERY
;
196 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
197 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
198 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
199 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
200 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
201 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
202 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
203 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
204 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
);
205 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
);
206 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
207 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
208 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
);
209 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
210 if (rdev
->gart
.table
.vram
.robj
) {
211 radeon_object_kunmap(rdev
->gart
.table
.vram
.robj
);
212 radeon_object_unpin(rdev
->gart
.table
.vram
.robj
);
216 void r600_pcie_gart_fini(struct radeon_device
*rdev
)
218 r600_pcie_gart_disable(rdev
);
219 radeon_gart_table_vram_free(rdev
);
220 radeon_gart_fini(rdev
);
223 void r600_agp_enable(struct radeon_device
*rdev
)
229 WREG32(VM_L2_CNTL
, ENABLE_L2_CACHE
| ENABLE_L2_FRAGMENT_PROCESSING
|
230 ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE
|
231 EFFECTIVE_L2_QUEUE_SIZE(7));
232 WREG32(VM_L2_CNTL2
, 0);
233 WREG32(VM_L2_CNTL3
, BANK_SELECT_0(0) | BANK_SELECT_1(1));
234 /* Setup TLB control */
235 tmp
= ENABLE_L1_TLB
| ENABLE_L1_FRAGMENT_PROCESSING
|
236 SYSTEM_ACCESS_MODE_NOT_IN_SYS
|
237 EFFECTIVE_L1_TLB_SIZE(5) | EFFECTIVE_L1_QUEUE_SIZE(5) |
238 ENABLE_WAIT_L2_QUERY
;
239 WREG32(MC_VM_L1_TLB_MCB_RD_SYS_CNTL
, tmp
);
240 WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL
, tmp
);
241 WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL
, tmp
| ENABLE_L1_STRICT_ORDERING
);
242 WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL
, tmp
);
243 WREG32(MC_VM_L1_TLB_MCD_RD_A_CNTL
, tmp
);
244 WREG32(MC_VM_L1_TLB_MCD_WR_A_CNTL
, tmp
);
245 WREG32(MC_VM_L1_TLB_MCD_RD_B_CNTL
, tmp
);
246 WREG32(MC_VM_L1_TLB_MCD_WR_B_CNTL
, tmp
);
247 WREG32(MC_VM_L1_TLB_MCB_RD_GFX_CNTL
, tmp
);
248 WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL
, tmp
);
249 WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL
, tmp
);
250 WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL
, tmp
);
251 WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
252 WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL
, tmp
| ENABLE_SEMAPHORE_MODE
);
253 for (i
= 0; i
< 7; i
++)
254 WREG32(VM_CONTEXT0_CNTL
+ (i
* 4), 0);
257 int r600_mc_wait_for_idle(struct radeon_device
*rdev
)
262 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
264 tmp
= RREG32(R_000E50_SRBM_STATUS
) & 0x3F00;
272 static void r600_mc_program(struct radeon_device
*rdev
)
274 struct rv515_mc_save save
;
279 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
280 WREG32((0x2c14 + j
), 0x00000000);
281 WREG32((0x2c18 + j
), 0x00000000);
282 WREG32((0x2c1c + j
), 0x00000000);
283 WREG32((0x2c20 + j
), 0x00000000);
284 WREG32((0x2c24 + j
), 0x00000000);
286 WREG32(HDP_REG_COHERENCY_FLUSH_CNTL
, 0);
288 rv515_mc_stop(rdev
, &save
);
289 if (r600_mc_wait_for_idle(rdev
)) {
290 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
292 /* Lockout access through VGA aperture (doesn't exist before R600) */
293 WREG32(VGA_HDP_CONTROL
, VGA_MEMORY_DISABLE
);
294 /* Update configuration */
295 if (rdev
->flags
& RADEON_IS_AGP
) {
296 if (rdev
->mc
.vram_start
< rdev
->mc
.gtt_start
) {
297 /* VRAM before AGP */
298 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
299 rdev
->mc
.vram_start
>> 12);
300 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
301 rdev
->mc
.gtt_end
>> 12);
304 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
,
305 rdev
->mc
.gtt_start
>> 12);
306 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
,
307 rdev
->mc
.vram_end
>> 12);
310 WREG32(MC_VM_SYSTEM_APERTURE_LOW_ADDR
, rdev
->mc
.vram_start
>> 12);
311 WREG32(MC_VM_SYSTEM_APERTURE_HIGH_ADDR
, rdev
->mc
.vram_end
>> 12);
313 WREG32(MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR
, 0);
314 tmp
= ((rdev
->mc
.vram_end
>> 24) & 0xFFFF) << 16;
315 tmp
|= ((rdev
->mc
.vram_start
>> 24) & 0xFFFF);
316 WREG32(MC_VM_FB_LOCATION
, tmp
);
317 WREG32(HDP_NONSURFACE_BASE
, (rdev
->mc
.vram_start
>> 8));
318 WREG32(HDP_NONSURFACE_INFO
, (2 << 7));
319 WREG32(HDP_NONSURFACE_SIZE
, rdev
->mc
.mc_vram_size
| 0x3FF);
320 if (rdev
->flags
& RADEON_IS_AGP
) {
321 WREG32(MC_VM_AGP_TOP
, rdev
->mc
.gtt_end
>> 22);
322 WREG32(MC_VM_AGP_BOT
, rdev
->mc
.gtt_start
>> 22);
323 WREG32(MC_VM_AGP_BASE
, rdev
->mc
.agp_base
>> 22);
325 WREG32(MC_VM_AGP_BASE
, 0);
326 WREG32(MC_VM_AGP_TOP
, 0x0FFFFFFF);
327 WREG32(MC_VM_AGP_BOT
, 0x0FFFFFFF);
329 if (r600_mc_wait_for_idle(rdev
)) {
330 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
332 rv515_mc_resume(rdev
, &save
);
333 /* we need to own VRAM, so turn off the VGA renderer here
334 * to stop it overwriting our objects */
335 rv515_vga_render_disable(rdev
);
338 int r600_mc_init(struct radeon_device
*rdev
)
345 /* Get VRAM informations */
346 rdev
->mc
.vram_width
= 128;
347 rdev
->mc
.vram_is_ddr
= true;
348 tmp
= RREG32(RAMCFG
);
349 if (tmp
& CHANSIZE_OVERRIDE
) {
351 } else if (tmp
& CHANSIZE_MASK
) {
356 if (rdev
->family
== CHIP_R600
) {
357 rdev
->mc
.vram_width
= 8 * chansize
;
358 } else if (rdev
->family
== CHIP_RV670
) {
359 rdev
->mc
.vram_width
= 4 * chansize
;
360 } else if ((rdev
->family
== CHIP_RV610
) ||
361 (rdev
->family
== CHIP_RV620
)) {
362 rdev
->mc
.vram_width
= chansize
;
363 } else if ((rdev
->family
== CHIP_RV630
) ||
364 (rdev
->family
== CHIP_RV635
)) {
365 rdev
->mc
.vram_width
= 2 * chansize
;
367 /* Could aper size report 0 ? */
368 rdev
->mc
.aper_base
= drm_get_resource_start(rdev
->ddev
, 0);
369 rdev
->mc
.aper_size
= drm_get_resource_len(rdev
->ddev
, 0);
370 /* Setup GPU memory space */
371 rdev
->mc
.mc_vram_size
= RREG32(CONFIG_MEMSIZE
);
372 rdev
->mc
.real_vram_size
= RREG32(CONFIG_MEMSIZE
);
374 if (rdev
->mc
.mc_vram_size
> rdev
->mc
.aper_size
)
375 rdev
->mc
.mc_vram_size
= rdev
->mc
.aper_size
;
377 if (rdev
->mc
.real_vram_size
> rdev
->mc
.aper_size
)
378 rdev
->mc
.real_vram_size
= rdev
->mc
.aper_size
;
380 if (rdev
->flags
& RADEON_IS_AGP
) {
381 r
= radeon_agp_init(rdev
);
384 /* gtt_size is setup by radeon_agp_init */
385 rdev
->mc
.gtt_location
= rdev
->mc
.agp_base
;
386 tmp
= 0xFFFFFFFFUL
- rdev
->mc
.agp_base
- rdev
->mc
.gtt_size
;
387 /* Try to put vram before or after AGP because we
388 * we want SYSTEM_APERTURE to cover both VRAM and
389 * AGP so that GPU can catch out of VRAM/AGP access
391 if (rdev
->mc
.gtt_location
> rdev
->mc
.mc_vram_size
) {
392 /* Enought place before */
393 rdev
->mc
.vram_location
= rdev
->mc
.gtt_location
-
394 rdev
->mc
.mc_vram_size
;
395 } else if (tmp
> rdev
->mc
.mc_vram_size
) {
396 /* Enought place after */
397 rdev
->mc
.vram_location
= rdev
->mc
.gtt_location
+
400 /* Try to setup VRAM then AGP might not
401 * not work on some card
403 rdev
->mc
.vram_location
= 0x00000000UL
;
404 rdev
->mc
.gtt_location
= rdev
->mc
.mc_vram_size
;
407 if (rdev
->family
== CHIP_RS780
|| rdev
->family
== CHIP_RS880
) {
408 rdev
->mc
.vram_location
= (RREG32(MC_VM_FB_LOCATION
) &
410 rdev
->mc
.gtt_size
= radeon_gart_size
* 1024 * 1024;
411 tmp
= rdev
->mc
.vram_location
+ rdev
->mc
.mc_vram_size
;
412 if ((0xFFFFFFFFUL
- tmp
) >= rdev
->mc
.gtt_size
) {
413 /* Enough place after vram */
414 rdev
->mc
.gtt_location
= tmp
;
415 } else if (rdev
->mc
.vram_location
>= rdev
->mc
.gtt_size
) {
416 /* Enough place before vram */
417 rdev
->mc
.gtt_location
= 0;
419 /* Not enough place after or before shrink
422 if (rdev
->mc
.vram_location
> (0xFFFFFFFFUL
- tmp
)) {
423 rdev
->mc
.gtt_location
= 0;
424 rdev
->mc
.gtt_size
= rdev
->mc
.vram_location
;
426 rdev
->mc
.gtt_location
= tmp
;
427 rdev
->mc
.gtt_size
= 0xFFFFFFFFUL
- tmp
;
430 rdev
->mc
.gtt_location
= rdev
->mc
.mc_vram_size
;
432 rdev
->mc
.vram_location
= 0x00000000UL
;
433 rdev
->mc
.gtt_location
= rdev
->mc
.mc_vram_size
;
434 rdev
->mc
.gtt_size
= radeon_gart_size
* 1024 * 1024;
437 rdev
->mc
.vram_start
= rdev
->mc
.vram_location
;
438 rdev
->mc
.vram_end
= rdev
->mc
.vram_location
+ rdev
->mc
.mc_vram_size
- 1;
439 rdev
->mc
.gtt_start
= rdev
->mc
.gtt_location
;
440 rdev
->mc
.gtt_end
= rdev
->mc
.gtt_location
+ rdev
->mc
.gtt_size
- 1;
441 /* FIXME: we should enforce default clock in case GPU is not in
444 a
.full
= rfixed_const(100);
445 rdev
->pm
.sclk
.full
= rfixed_const(rdev
->clock
.default_sclk
);
446 rdev
->pm
.sclk
.full
= rfixed_div(rdev
->pm
.sclk
, a
);
450 /* We doesn't check that the GPU really needs a reset we simply do the
451 * reset, it's up to the caller to determine if the GPU needs one. We
452 * might add an helper function to check that.
454 int r600_gpu_soft_reset(struct radeon_device
*rdev
)
456 struct rv515_mc_save save
;
457 u32 grbm_busy_mask
= S_008010_VC_BUSY(1) | S_008010_VGT_BUSY_NO_DMA(1) |
458 S_008010_VGT_BUSY(1) | S_008010_TA03_BUSY(1) |
459 S_008010_TC_BUSY(1) | S_008010_SX_BUSY(1) |
460 S_008010_SH_BUSY(1) | S_008010_SPI03_BUSY(1) |
461 S_008010_SMX_BUSY(1) | S_008010_SC_BUSY(1) |
462 S_008010_PA_BUSY(1) | S_008010_DB03_BUSY(1) |
463 S_008010_CR_BUSY(1) | S_008010_CB03_BUSY(1) |
464 S_008010_GUI_ACTIVE(1);
465 u32 grbm2_busy_mask
= S_008014_SPI0_BUSY(1) | S_008014_SPI1_BUSY(1) |
466 S_008014_SPI2_BUSY(1) | S_008014_SPI3_BUSY(1) |
467 S_008014_TA0_BUSY(1) | S_008014_TA1_BUSY(1) |
468 S_008014_TA2_BUSY(1) | S_008014_TA3_BUSY(1) |
469 S_008014_DB0_BUSY(1) | S_008014_DB1_BUSY(1) |
470 S_008014_DB2_BUSY(1) | S_008014_DB3_BUSY(1) |
471 S_008014_CB0_BUSY(1) | S_008014_CB1_BUSY(1) |
472 S_008014_CB2_BUSY(1) | S_008014_CB3_BUSY(1);
476 dev_info(rdev
->dev
, "GPU softreset \n");
477 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS=0x%08X\n",
478 RREG32(R_008010_GRBM_STATUS
));
479 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2=0x%08X\n",
480 RREG32(R_008014_GRBM_STATUS2
));
481 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS=0x%08X\n",
482 RREG32(R_000E50_SRBM_STATUS
));
483 rv515_mc_stop(rdev
, &save
);
484 if (r600_mc_wait_for_idle(rdev
)) {
485 dev_warn(rdev
->dev
, "Wait for MC idle timedout !\n");
487 /* Disable CP parsing/prefetching */
488 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(0xff));
489 /* Check if any of the rendering block is busy and reset it */
490 if ((RREG32(R_008010_GRBM_STATUS
) & grbm_busy_mask
) ||
491 (RREG32(R_008014_GRBM_STATUS2
) & grbm2_busy_mask
)) {
492 tmp
= S_008020_SOFT_RESET_CR(1) |
493 S_008020_SOFT_RESET_DB(1) |
494 S_008020_SOFT_RESET_CB(1) |
495 S_008020_SOFT_RESET_PA(1) |
496 S_008020_SOFT_RESET_SC(1) |
497 S_008020_SOFT_RESET_SMX(1) |
498 S_008020_SOFT_RESET_SPI(1) |
499 S_008020_SOFT_RESET_SX(1) |
500 S_008020_SOFT_RESET_SH(1) |
501 S_008020_SOFT_RESET_TC(1) |
502 S_008020_SOFT_RESET_TA(1) |
503 S_008020_SOFT_RESET_VC(1) |
504 S_008020_SOFT_RESET_VGT(1);
505 dev_info(rdev
->dev
, " R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
506 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
507 (void)RREG32(R_008020_GRBM_SOFT_RESET
);
509 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
510 (void)RREG32(R_008020_GRBM_SOFT_RESET
);
512 /* Reset CP (we always reset CP) */
513 tmp
= S_008020_SOFT_RESET_CP(1);
514 dev_info(rdev
->dev
, "R_008020_GRBM_SOFT_RESET=0x%08X\n", tmp
);
515 WREG32(R_008020_GRBM_SOFT_RESET
, tmp
);
516 (void)RREG32(R_008020_GRBM_SOFT_RESET
);
518 WREG32(R_008020_GRBM_SOFT_RESET
, 0);
519 (void)RREG32(R_008020_GRBM_SOFT_RESET
);
520 /* Reset others GPU block if necessary */
521 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
522 srbm_reset
|= S_000E60_SOFT_RESET_RLC(1);
523 if (G_000E50_GRBM_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS
)))
524 srbm_reset
|= S_000E60_SOFT_RESET_GRBM(1);
525 if (G_000E50_HI_RQ_PENDING(RREG32(R_000E50_SRBM_STATUS
)))
526 srbm_reset
|= S_000E60_SOFT_RESET_IH(1);
527 if (G_000E50_VMC_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
528 srbm_reset
|= S_000E60_SOFT_RESET_VMC(1);
529 if (G_000E50_MCB_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
530 srbm_reset
|= S_000E60_SOFT_RESET_MC(1);
531 if (G_000E50_MCDZ_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
532 srbm_reset
|= S_000E60_SOFT_RESET_MC(1);
533 if (G_000E50_MCDY_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
534 srbm_reset
|= S_000E60_SOFT_RESET_MC(1);
535 if (G_000E50_MCDX_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
536 srbm_reset
|= S_000E60_SOFT_RESET_MC(1);
537 if (G_000E50_MCDW_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
538 srbm_reset
|= S_000E60_SOFT_RESET_MC(1);
539 if (G_000E50_RLC_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
540 srbm_reset
|= S_000E60_SOFT_RESET_RLC(1);
541 if (G_000E50_SEM_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
542 srbm_reset
|= S_000E60_SOFT_RESET_SEM(1);
543 if (G_000E50_BIF_BUSY(RREG32(R_000E50_SRBM_STATUS
)))
544 srbm_reset
|= S_000E60_SOFT_RESET_BIF(1);
545 dev_info(rdev
->dev
, " R_000E60_SRBM_SOFT_RESET=0x%08X\n", srbm_reset
);
546 WREG32(R_000E60_SRBM_SOFT_RESET
, srbm_reset
);
547 (void)RREG32(R_000E60_SRBM_SOFT_RESET
);
549 WREG32(R_000E60_SRBM_SOFT_RESET
, 0);
550 (void)RREG32(R_000E60_SRBM_SOFT_RESET
);
551 WREG32(R_000E60_SRBM_SOFT_RESET
, srbm_reset
);
552 (void)RREG32(R_000E60_SRBM_SOFT_RESET
);
554 WREG32(R_000E60_SRBM_SOFT_RESET
, 0);
555 (void)RREG32(R_000E60_SRBM_SOFT_RESET
);
556 /* Wait a little for things to settle down */
558 dev_info(rdev
->dev
, " R_008010_GRBM_STATUS=0x%08X\n",
559 RREG32(R_008010_GRBM_STATUS
));
560 dev_info(rdev
->dev
, " R_008014_GRBM_STATUS2=0x%08X\n",
561 RREG32(R_008014_GRBM_STATUS2
));
562 dev_info(rdev
->dev
, " R_000E50_SRBM_STATUS=0x%08X\n",
563 RREG32(R_000E50_SRBM_STATUS
));
564 /* After reset we need to reinit the asic as GPU often endup in an
567 atom_asic_init(rdev
->mode_info
.atom_context
);
568 rv515_mc_resume(rdev
, &save
);
572 int r600_gpu_reset(struct radeon_device
*rdev
)
574 return r600_gpu_soft_reset(rdev
);
577 static u32
r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes
,
579 u32 backend_disable_mask
)
582 u32 enabled_backends_mask
;
583 u32 enabled_backends_count
;
585 u32 swizzle_pipe
[R6XX_MAX_PIPES
];
589 if (num_tile_pipes
> R6XX_MAX_PIPES
)
590 num_tile_pipes
= R6XX_MAX_PIPES
;
591 if (num_tile_pipes
< 1)
593 if (num_backends
> R6XX_MAX_BACKENDS
)
594 num_backends
= R6XX_MAX_BACKENDS
;
595 if (num_backends
< 1)
598 enabled_backends_mask
= 0;
599 enabled_backends_count
= 0;
600 for (i
= 0; i
< R6XX_MAX_BACKENDS
; ++i
) {
601 if (((backend_disable_mask
>> i
) & 1) == 0) {
602 enabled_backends_mask
|= (1 << i
);
603 ++enabled_backends_count
;
605 if (enabled_backends_count
== num_backends
)
609 if (enabled_backends_count
== 0) {
610 enabled_backends_mask
= 1;
611 enabled_backends_count
= 1;
614 if (enabled_backends_count
!= num_backends
)
615 num_backends
= enabled_backends_count
;
617 memset((uint8_t *)&swizzle_pipe
[0], 0, sizeof(u32
) * R6XX_MAX_PIPES
);
618 switch (num_tile_pipes
) {
674 for (cur_pipe
= 0; cur_pipe
< num_tile_pipes
; ++cur_pipe
) {
675 while (((1 << cur_backend
) & enabled_backends_mask
) == 0)
676 cur_backend
= (cur_backend
+ 1) % R6XX_MAX_BACKENDS
;
678 backend_map
|= (u32
)(((cur_backend
& 3) << (swizzle_pipe
[cur_pipe
] * 2)));
680 cur_backend
= (cur_backend
+ 1) % R6XX_MAX_BACKENDS
;
686 int r600_count_pipe_bits(uint32_t val
)
690 for (i
= 0; i
< 32; i
++) {
697 void r600_gpu_init(struct radeon_device
*rdev
)
704 u32 sq_gpr_resource_mgmt_1
= 0;
705 u32 sq_gpr_resource_mgmt_2
= 0;
706 u32 sq_thread_resource_mgmt
= 0;
707 u32 sq_stack_resource_mgmt_1
= 0;
708 u32 sq_stack_resource_mgmt_2
= 0;
710 /* FIXME: implement */
711 switch (rdev
->family
) {
713 rdev
->config
.r600
.max_pipes
= 4;
714 rdev
->config
.r600
.max_tile_pipes
= 8;
715 rdev
->config
.r600
.max_simds
= 4;
716 rdev
->config
.r600
.max_backends
= 4;
717 rdev
->config
.r600
.max_gprs
= 256;
718 rdev
->config
.r600
.max_threads
= 192;
719 rdev
->config
.r600
.max_stack_entries
= 256;
720 rdev
->config
.r600
.max_hw_contexts
= 8;
721 rdev
->config
.r600
.max_gs_threads
= 16;
722 rdev
->config
.r600
.sx_max_export_size
= 128;
723 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
724 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
725 rdev
->config
.r600
.sq_num_cf_insts
= 2;
729 rdev
->config
.r600
.max_pipes
= 2;
730 rdev
->config
.r600
.max_tile_pipes
= 2;
731 rdev
->config
.r600
.max_simds
= 3;
732 rdev
->config
.r600
.max_backends
= 1;
733 rdev
->config
.r600
.max_gprs
= 128;
734 rdev
->config
.r600
.max_threads
= 192;
735 rdev
->config
.r600
.max_stack_entries
= 128;
736 rdev
->config
.r600
.max_hw_contexts
= 8;
737 rdev
->config
.r600
.max_gs_threads
= 4;
738 rdev
->config
.r600
.sx_max_export_size
= 128;
739 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
740 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
741 rdev
->config
.r600
.sq_num_cf_insts
= 2;
747 rdev
->config
.r600
.max_pipes
= 1;
748 rdev
->config
.r600
.max_tile_pipes
= 1;
749 rdev
->config
.r600
.max_simds
= 2;
750 rdev
->config
.r600
.max_backends
= 1;
751 rdev
->config
.r600
.max_gprs
= 128;
752 rdev
->config
.r600
.max_threads
= 192;
753 rdev
->config
.r600
.max_stack_entries
= 128;
754 rdev
->config
.r600
.max_hw_contexts
= 4;
755 rdev
->config
.r600
.max_gs_threads
= 4;
756 rdev
->config
.r600
.sx_max_export_size
= 128;
757 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
758 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
759 rdev
->config
.r600
.sq_num_cf_insts
= 1;
762 rdev
->config
.r600
.max_pipes
= 4;
763 rdev
->config
.r600
.max_tile_pipes
= 4;
764 rdev
->config
.r600
.max_simds
= 4;
765 rdev
->config
.r600
.max_backends
= 4;
766 rdev
->config
.r600
.max_gprs
= 192;
767 rdev
->config
.r600
.max_threads
= 192;
768 rdev
->config
.r600
.max_stack_entries
= 256;
769 rdev
->config
.r600
.max_hw_contexts
= 8;
770 rdev
->config
.r600
.max_gs_threads
= 16;
771 rdev
->config
.r600
.sx_max_export_size
= 128;
772 rdev
->config
.r600
.sx_max_export_pos_size
= 16;
773 rdev
->config
.r600
.sx_max_export_smx_size
= 128;
774 rdev
->config
.r600
.sq_num_cf_insts
= 2;
781 for (i
= 0, j
= 0; i
< 32; i
++, j
+= 0x18) {
782 WREG32((0x2c14 + j
), 0x00000000);
783 WREG32((0x2c18 + j
), 0x00000000);
784 WREG32((0x2c1c + j
), 0x00000000);
785 WREG32((0x2c20 + j
), 0x00000000);
786 WREG32((0x2c24 + j
), 0x00000000);
789 WREG32(GRBM_CNTL
, GRBM_READ_TIMEOUT(0xff));
793 ramcfg
= RREG32(RAMCFG
);
794 switch (rdev
->config
.r600
.max_tile_pipes
) {
796 tiling_config
|= PIPE_TILING(0);
799 tiling_config
|= PIPE_TILING(1);
802 tiling_config
|= PIPE_TILING(2);
805 tiling_config
|= PIPE_TILING(3);
810 tiling_config
|= BANK_TILING((ramcfg
& NOOFBANK_MASK
) >> NOOFBANK_SHIFT
);
811 tiling_config
|= GROUP_SIZE(0);
812 tmp
= (ramcfg
& NOOFROWS_MASK
) >> NOOFROWS_SHIFT
;
814 tiling_config
|= ROW_TILING(3);
815 tiling_config
|= SAMPLE_SPLIT(3);
817 tiling_config
|= ROW_TILING(tmp
);
818 tiling_config
|= SAMPLE_SPLIT(tmp
);
820 tiling_config
|= BANK_SWAPS(1);
821 tmp
= r600_get_tile_pipe_to_backend_map(rdev
->config
.r600
.max_tile_pipes
,
822 rdev
->config
.r600
.max_backends
,
823 (0xff << rdev
->config
.r600
.max_backends
) & 0xff);
824 tiling_config
|= BACKEND_MAP(tmp
);
825 WREG32(GB_TILING_CONFIG
, tiling_config
);
826 WREG32(DCP_TILING_CONFIG
, tiling_config
& 0xffff);
827 WREG32(HDP_TILING_CONFIG
, tiling_config
& 0xffff);
829 tmp
= BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK
<< rdev
->config
.r600
.max_backends
) & R6XX_MAX_BACKENDS_MASK
);
830 WREG32(CC_RB_BACKEND_DISABLE
, tmp
);
833 tmp
= INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK
<< rdev
->config
.r600
.max_pipes
) & R6XX_MAX_PIPES_MASK
);
834 tmp
|= INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK
<< rdev
->config
.r600
.max_simds
) & R6XX_MAX_SIMDS_MASK
);
835 WREG32(CC_GC_SHADER_PIPE_CONFIG
, tmp
);
836 WREG32(GC_USER_SHADER_PIPE_CONFIG
, tmp
);
838 tmp
= R6XX_MAX_BACKENDS
- r600_count_pipe_bits(tmp
& INACTIVE_QD_PIPES_MASK
);
839 WREG32(VGT_OUT_DEALLOC_CNTL
, (tmp
* 4) & DEALLOC_DIST_MASK
);
840 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL
, ((tmp
* 4) - 2) & VTX_REUSE_DEPTH_MASK
);
842 /* Setup some CP states */
843 WREG32(CP_QUEUE_THRESHOLDS
, (ROQ_IB1_START(0x16) | ROQ_IB2_START(0x2b)));
844 WREG32(CP_MEQ_THRESHOLDS
, (MEQ_END(0x40) | ROQ_END(0x40)));
846 WREG32(TA_CNTL_AUX
, (DISABLE_CUBE_ANISO
| SYNC_GRADIENT
|
847 SYNC_WALKER
| SYNC_ALIGNER
));
848 /* Setup various GPU states */
849 if (rdev
->family
== CHIP_RV670
)
850 WREG32(ARB_GDEC_RD_CNTL
, 0x00000021);
852 tmp
= RREG32(SX_DEBUG_1
);
853 tmp
|= SMX_EVENT_RELEASE
;
854 if ((rdev
->family
> CHIP_R600
))
855 tmp
|= ENABLE_NEW_SMX_ADDRESS
;
856 WREG32(SX_DEBUG_1
, tmp
);
858 if (((rdev
->family
) == CHIP_R600
) ||
859 ((rdev
->family
) == CHIP_RV630
) ||
860 ((rdev
->family
) == CHIP_RV610
) ||
861 ((rdev
->family
) == CHIP_RV620
) ||
862 ((rdev
->family
) == CHIP_RS780
)) {
863 WREG32(DB_DEBUG
, PREZ_MUST_WAIT_FOR_POSTZ_DONE
);
867 WREG32(DB_WATERMARKS
, (DEPTH_FREE(4) | DEPTH_CACHELINE_FREE(16) |
868 DEPTH_FLUSH(16) | DEPTH_PENDING_FREE(4)));
870 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
871 WREG32(VGT_NUM_INSTANCES
, 0);
873 WREG32(SPI_CONFIG_CNTL
, GPR_WRITE_PRIORITY(0));
874 WREG32(SPI_CONFIG_CNTL_1
, VTX_DONE_DELAY(0));
876 tmp
= RREG32(SQ_MS_FIFO_SIZES
);
877 if (((rdev
->family
) == CHIP_RV610
) ||
878 ((rdev
->family
) == CHIP_RV620
) ||
879 ((rdev
->family
) == CHIP_RS780
)) {
880 tmp
= (CACHE_FIFO_SIZE(0xa) |
881 FETCH_FIFO_HIWATER(0xa) |
882 DONE_FIFO_HIWATER(0xe0) |
883 ALU_UPDATE_FIFO_HIWATER(0x8));
884 } else if (((rdev
->family
) == CHIP_R600
) ||
885 ((rdev
->family
) == CHIP_RV630
)) {
886 tmp
&= ~DONE_FIFO_HIWATER(0xff);
887 tmp
|= DONE_FIFO_HIWATER(0x4);
889 WREG32(SQ_MS_FIFO_SIZES
, tmp
);
891 /* SQ_CONFIG, SQ_GPR_RESOURCE_MGMT, SQ_THREAD_RESOURCE_MGMT, SQ_STACK_RESOURCE_MGMT
892 * should be adjusted as needed by the 2D/3D drivers. This just sets default values
894 sq_config
= RREG32(SQ_CONFIG
);
895 sq_config
&= ~(PS_PRIO(3) |
899 sq_config
|= (DX9_CONSTS
|
906 if ((rdev
->family
) == CHIP_R600
) {
907 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(124) |
909 NUM_CLAUSE_TEMP_GPRS(4));
910 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(0) |
912 sq_thread_resource_mgmt
= (NUM_PS_THREADS(136) |
916 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(128) |
917 NUM_VS_STACK_ENTRIES(128));
918 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(0) |
919 NUM_ES_STACK_ENTRIES(0));
920 } else if (((rdev
->family
) == CHIP_RV610
) ||
921 ((rdev
->family
) == CHIP_RV620
) ||
922 ((rdev
->family
) == CHIP_RS780
)) {
923 /* no vertex cache */
924 sq_config
&= ~VC_ENABLE
;
926 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
928 NUM_CLAUSE_TEMP_GPRS(2));
929 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
931 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
935 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
936 NUM_VS_STACK_ENTRIES(40));
937 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
938 NUM_ES_STACK_ENTRIES(16));
939 } else if (((rdev
->family
) == CHIP_RV630
) ||
940 ((rdev
->family
) == CHIP_RV635
)) {
941 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
943 NUM_CLAUSE_TEMP_GPRS(2));
944 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(18) |
946 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
950 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(40) |
951 NUM_VS_STACK_ENTRIES(40));
952 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(32) |
953 NUM_ES_STACK_ENTRIES(16));
954 } else if ((rdev
->family
) == CHIP_RV670
) {
955 sq_gpr_resource_mgmt_1
= (NUM_PS_GPRS(44) |
957 NUM_CLAUSE_TEMP_GPRS(2));
958 sq_gpr_resource_mgmt_2
= (NUM_GS_GPRS(17) |
960 sq_thread_resource_mgmt
= (NUM_PS_THREADS(79) |
964 sq_stack_resource_mgmt_1
= (NUM_PS_STACK_ENTRIES(64) |
965 NUM_VS_STACK_ENTRIES(64));
966 sq_stack_resource_mgmt_2
= (NUM_GS_STACK_ENTRIES(64) |
967 NUM_ES_STACK_ENTRIES(64));
970 WREG32(SQ_CONFIG
, sq_config
);
971 WREG32(SQ_GPR_RESOURCE_MGMT_1
, sq_gpr_resource_mgmt_1
);
972 WREG32(SQ_GPR_RESOURCE_MGMT_2
, sq_gpr_resource_mgmt_2
);
973 WREG32(SQ_THREAD_RESOURCE_MGMT
, sq_thread_resource_mgmt
);
974 WREG32(SQ_STACK_RESOURCE_MGMT_1
, sq_stack_resource_mgmt_1
);
975 WREG32(SQ_STACK_RESOURCE_MGMT_2
, sq_stack_resource_mgmt_2
);
977 if (((rdev
->family
) == CHIP_RV610
) ||
978 ((rdev
->family
) == CHIP_RV620
) ||
979 ((rdev
->family
) == CHIP_RS780
)) {
980 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(TC_ONLY
));
982 WREG32(VGT_CACHE_INVALIDATION
, CACHE_INVALIDATION(VC_AND_TC
));
985 /* More default values. 2D/3D driver should adjust as needed */
986 WREG32(PA_SC_AA_SAMPLE_LOCS_2S
, (S0_X(0xc) | S0_Y(0x4) |
987 S1_X(0x4) | S1_Y(0xc)));
988 WREG32(PA_SC_AA_SAMPLE_LOCS_4S
, (S0_X(0xe) | S0_Y(0xe) |
989 S1_X(0x2) | S1_Y(0x2) |
990 S2_X(0xa) | S2_Y(0x6) |
991 S3_X(0x6) | S3_Y(0xa)));
992 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD0
, (S0_X(0xe) | S0_Y(0xb) |
993 S1_X(0x4) | S1_Y(0xc) |
994 S2_X(0x1) | S2_Y(0x6) |
995 S3_X(0xa) | S3_Y(0xe)));
996 WREG32(PA_SC_AA_SAMPLE_LOCS_8S_WD1
, (S4_X(0x6) | S4_Y(0x1) |
997 S5_X(0x0) | S5_Y(0x0) |
998 S6_X(0xb) | S6_Y(0x4) |
999 S7_X(0x7) | S7_Y(0x8)));
1001 WREG32(VGT_STRMOUT_EN
, 0);
1002 tmp
= rdev
->config
.r600
.max_pipes
* 16;
1003 switch (rdev
->family
) {
1018 WREG32(VGT_ES_PER_GS
, 128);
1019 WREG32(VGT_GS_PER_ES
, tmp
);
1020 WREG32(VGT_GS_PER_VS
, 2);
1021 WREG32(VGT_GS_VERTEX_REUSE
, 16);
1023 /* more default values. 2D/3D driver should adjust as needed */
1024 WREG32(PA_SC_LINE_STIPPLE_STATE
, 0);
1025 WREG32(VGT_STRMOUT_EN
, 0);
1027 WREG32(PA_SC_MODE_CNTL
, 0);
1028 WREG32(PA_SC_AA_CONFIG
, 0);
1029 WREG32(PA_SC_LINE_STIPPLE
, 0);
1030 WREG32(SPI_INPUT_Z
, 0);
1031 WREG32(SPI_PS_IN_CONTROL_0
, NUM_INTERP(2));
1032 WREG32(CB_COLOR7_FRAG
, 0);
1034 /* Clear render buffer base addresses */
1035 WREG32(CB_COLOR0_BASE
, 0);
1036 WREG32(CB_COLOR1_BASE
, 0);
1037 WREG32(CB_COLOR2_BASE
, 0);
1038 WREG32(CB_COLOR3_BASE
, 0);
1039 WREG32(CB_COLOR4_BASE
, 0);
1040 WREG32(CB_COLOR5_BASE
, 0);
1041 WREG32(CB_COLOR6_BASE
, 0);
1042 WREG32(CB_COLOR7_BASE
, 0);
1043 WREG32(CB_COLOR7_FRAG
, 0);
1045 switch (rdev
->family
) {
1049 tmp
= TC_L2_SIZE(8);
1053 tmp
= TC_L2_SIZE(4);
1056 tmp
= TC_L2_SIZE(0) | L2_DISABLE_LATE_HIT
;
1059 tmp
= TC_L2_SIZE(0);
1062 WREG32(TC_CNTL
, tmp
);
1064 tmp
= RREG32(HDP_HOST_PATH_CNTL
);
1065 WREG32(HDP_HOST_PATH_CNTL
, tmp
);
1067 tmp
= RREG32(ARB_POP
);
1068 tmp
|= ENABLE_TC128
;
1069 WREG32(ARB_POP
, tmp
);
1071 WREG32(PA_SC_MULTI_CHIP_CNTL
, 0);
1072 WREG32(PA_CL_ENHANCE
, (CLIP_VTX_REORDER_ENA
|
1074 WREG32(PA_SC_ENHANCE
, FORCE_EOV_MAX_CLK_CNT(4095));
1079 * Indirect registers accessor
1081 u32
r600_pciep_rreg(struct radeon_device
*rdev
, u32 reg
)
1085 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1086 (void)RREG32(PCIE_PORT_INDEX
);
1087 r
= RREG32(PCIE_PORT_DATA
);
1091 void r600_pciep_wreg(struct radeon_device
*rdev
, u32 reg
, u32 v
)
1093 WREG32(PCIE_PORT_INDEX
, ((reg
) & 0xff));
1094 (void)RREG32(PCIE_PORT_INDEX
);
1095 WREG32(PCIE_PORT_DATA
, (v
));
1096 (void)RREG32(PCIE_PORT_DATA
);
1103 void r600_cp_stop(struct radeon_device
*rdev
)
1105 WREG32(R_0086D8_CP_ME_CNTL
, S_0086D8_CP_ME_HALT(1));
1108 int r600_cp_init_microcode(struct radeon_device
*rdev
)
1110 struct platform_device
*pdev
;
1111 const char *chip_name
;
1112 size_t pfp_req_size
, me_req_size
;
1118 pdev
= platform_device_register_simple("radeon_cp", 0, NULL
, 0);
1121 printk(KERN_ERR
"radeon_cp: Failed to register firmware\n");
1125 switch (rdev
->family
) {
1126 case CHIP_R600
: chip_name
= "R600"; break;
1127 case CHIP_RV610
: chip_name
= "RV610"; break;
1128 case CHIP_RV630
: chip_name
= "RV630"; break;
1129 case CHIP_RV620
: chip_name
= "RV620"; break;
1130 case CHIP_RV635
: chip_name
= "RV635"; break;
1131 case CHIP_RV670
: chip_name
= "RV670"; break;
1133 case CHIP_RS880
: chip_name
= "RS780"; break;
1134 case CHIP_RV770
: chip_name
= "RV770"; break;
1136 case CHIP_RV740
: chip_name
= "RV730"; break;
1137 case CHIP_RV710
: chip_name
= "RV710"; break;
1141 if (rdev
->family
>= CHIP_RV770
) {
1142 pfp_req_size
= R700_PFP_UCODE_SIZE
* 4;
1143 me_req_size
= R700_PM4_UCODE_SIZE
* 4;
1145 pfp_req_size
= PFP_UCODE_SIZE
* 4;
1146 me_req_size
= PM4_UCODE_SIZE
* 12;
1149 DRM_INFO("Loading %s CP Microcode\n", chip_name
);
1151 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_pfp.bin", chip_name
);
1152 err
= request_firmware(&rdev
->pfp_fw
, fw_name
, &pdev
->dev
);
1155 if (rdev
->pfp_fw
->size
!= pfp_req_size
) {
1157 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1158 rdev
->pfp_fw
->size
, fw_name
);
1163 snprintf(fw_name
, sizeof(fw_name
), "radeon/%s_me.bin", chip_name
);
1164 err
= request_firmware(&rdev
->me_fw
, fw_name
, &pdev
->dev
);
1167 if (rdev
->me_fw
->size
!= me_req_size
) {
1169 "r600_cp: Bogus length %zu in firmware \"%s\"\n",
1170 rdev
->me_fw
->size
, fw_name
);
1174 platform_device_unregister(pdev
);
1179 "r600_cp: Failed to load firmware \"%s\"\n",
1181 release_firmware(rdev
->pfp_fw
);
1182 rdev
->pfp_fw
= NULL
;
1183 release_firmware(rdev
->me_fw
);
1189 static int r600_cp_load_microcode(struct radeon_device
*rdev
)
1191 const __be32
*fw_data
;
1194 if (!rdev
->me_fw
|| !rdev
->pfp_fw
)
1199 WREG32(CP_RB_CNTL
, RB_NO_UPDATE
| RB_BLKSZ(15) | RB_BUFSZ(3));
1202 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
1203 RREG32(GRBM_SOFT_RESET
);
1205 WREG32(GRBM_SOFT_RESET
, 0);
1207 WREG32(CP_ME_RAM_WADDR
, 0);
1209 fw_data
= (const __be32
*)rdev
->me_fw
->data
;
1210 WREG32(CP_ME_RAM_WADDR
, 0);
1211 for (i
= 0; i
< PM4_UCODE_SIZE
* 3; i
++)
1212 WREG32(CP_ME_RAM_DATA
,
1213 be32_to_cpup(fw_data
++));
1215 fw_data
= (const __be32
*)rdev
->pfp_fw
->data
;
1216 WREG32(CP_PFP_UCODE_ADDR
, 0);
1217 for (i
= 0; i
< PFP_UCODE_SIZE
; i
++)
1218 WREG32(CP_PFP_UCODE_DATA
,
1219 be32_to_cpup(fw_data
++));
1221 WREG32(CP_PFP_UCODE_ADDR
, 0);
1222 WREG32(CP_ME_RAM_WADDR
, 0);
1223 WREG32(CP_ME_RAM_RADDR
, 0);
1227 int r600_cp_start(struct radeon_device
*rdev
)
1232 r
= radeon_ring_lock(rdev
, 7);
1234 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1237 radeon_ring_write(rdev
, PACKET3(PACKET3_ME_INITIALIZE
, 5));
1238 radeon_ring_write(rdev
, 0x1);
1239 if (rdev
->family
< CHIP_RV770
) {
1240 radeon_ring_write(rdev
, 0x3);
1241 radeon_ring_write(rdev
, rdev
->config
.r600
.max_hw_contexts
- 1);
1243 radeon_ring_write(rdev
, 0x0);
1244 radeon_ring_write(rdev
, rdev
->config
.rv770
.max_hw_contexts
- 1);
1246 radeon_ring_write(rdev
, PACKET3_ME_INITIALIZE_DEVICE_ID(1));
1247 radeon_ring_write(rdev
, 0);
1248 radeon_ring_write(rdev
, 0);
1249 radeon_ring_unlock_commit(rdev
);
1252 WREG32(R_0086D8_CP_ME_CNTL
, cp_me
);
1256 int r600_cp_resume(struct radeon_device
*rdev
)
1263 WREG32(GRBM_SOFT_RESET
, SOFT_RESET_CP
);
1264 RREG32(GRBM_SOFT_RESET
);
1266 WREG32(GRBM_SOFT_RESET
, 0);
1268 /* Set ring buffer size */
1269 rb_bufsz
= drm_order(rdev
->cp
.ring_size
/ 8);
1271 WREG32(CP_RB_CNTL
, BUF_SWAP_32BIT
| RB_NO_UPDATE
|
1272 (drm_order(4096/8) << 8) | rb_bufsz
);
1274 WREG32(CP_RB_CNTL
, RB_NO_UPDATE
| (drm_order(4096/8) << 8) | rb_bufsz
);
1276 WREG32(CP_SEM_WAIT_TIMER
, 0x4);
1278 /* Set the write pointer delay */
1279 WREG32(CP_RB_WPTR_DELAY
, 0);
1281 /* Initialize the ring buffer's read and write pointers */
1282 tmp
= RREG32(CP_RB_CNTL
);
1283 WREG32(CP_RB_CNTL
, tmp
| RB_RPTR_WR_ENA
);
1284 WREG32(CP_RB_RPTR_WR
, 0);
1285 WREG32(CP_RB_WPTR
, 0);
1286 WREG32(CP_RB_RPTR_ADDR
, rdev
->cp
.gpu_addr
& 0xFFFFFFFF);
1287 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->cp
.gpu_addr
));
1289 WREG32(CP_RB_CNTL
, tmp
);
1291 WREG32(CP_RB_BASE
, rdev
->cp
.gpu_addr
>> 8);
1292 WREG32(CP_DEBUG
, (1 << 27) | (1 << 28));
1294 rdev
->cp
.rptr
= RREG32(CP_RB_RPTR
);
1295 rdev
->cp
.wptr
= RREG32(CP_RB_WPTR
);
1297 r600_cp_start(rdev
);
1298 rdev
->cp
.ready
= true;
1299 r
= radeon_ring_test(rdev
);
1301 rdev
->cp
.ready
= false;
1307 void r600_cp_commit(struct radeon_device
*rdev
)
1309 WREG32(CP_RB_WPTR
, rdev
->cp
.wptr
);
1310 (void)RREG32(CP_RB_WPTR
);
1313 void r600_ring_init(struct radeon_device
*rdev
, unsigned ring_size
)
1317 /* Align ring size */
1318 rb_bufsz
= drm_order(ring_size
/ 8);
1319 ring_size
= (1 << (rb_bufsz
+ 1)) * 4;
1320 rdev
->cp
.ring_size
= ring_size
;
1321 rdev
->cp
.align_mask
= 16 - 1;
1326 * GPU scratch registers helpers function.
1328 void r600_scratch_init(struct radeon_device
*rdev
)
1332 rdev
->scratch
.num_reg
= 7;
1333 for (i
= 0; i
< rdev
->scratch
.num_reg
; i
++) {
1334 rdev
->scratch
.free
[i
] = true;
1335 rdev
->scratch
.reg
[i
] = SCRATCH_REG0
+ (i
* 4);
1339 int r600_ring_test(struct radeon_device
*rdev
)
1346 r
= radeon_scratch_get(rdev
, &scratch
);
1348 DRM_ERROR("radeon: cp failed to get scratch reg (%d).\n", r
);
1351 WREG32(scratch
, 0xCAFEDEAD);
1352 r
= radeon_ring_lock(rdev
, 3);
1354 DRM_ERROR("radeon: cp failed to lock ring (%d).\n", r
);
1355 radeon_scratch_free(rdev
, scratch
);
1358 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1359 radeon_ring_write(rdev
, ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
1360 radeon_ring_write(rdev
, 0xDEADBEEF);
1361 radeon_ring_unlock_commit(rdev
);
1362 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1363 tmp
= RREG32(scratch
);
1364 if (tmp
== 0xDEADBEEF)
1368 if (i
< rdev
->usec_timeout
) {
1369 DRM_INFO("ring test succeeded in %d usecs\n", i
);
1371 DRM_ERROR("radeon: ring test failed (scratch(0x%04X)=0x%08X)\n",
1375 radeon_scratch_free(rdev
, scratch
);
1379 void r600_wb_disable(struct radeon_device
*rdev
)
1381 WREG32(SCRATCH_UMSK
, 0);
1382 if (rdev
->wb
.wb_obj
) {
1383 radeon_object_kunmap(rdev
->wb
.wb_obj
);
1384 radeon_object_unpin(rdev
->wb
.wb_obj
);
1388 void r600_wb_fini(struct radeon_device
*rdev
)
1390 r600_wb_disable(rdev
);
1391 if (rdev
->wb
.wb_obj
) {
1392 radeon_object_unref(&rdev
->wb
.wb_obj
);
1394 rdev
->wb
.wb_obj
= NULL
;
1398 int r600_wb_enable(struct radeon_device
*rdev
)
1402 if (rdev
->wb
.wb_obj
== NULL
) {
1403 r
= radeon_object_create(rdev
, NULL
, 4096, true,
1404 RADEON_GEM_DOMAIN_GTT
, false, &rdev
->wb
.wb_obj
);
1406 dev_warn(rdev
->dev
, "failed to create WB buffer (%d).\n", r
);
1409 r
= radeon_object_pin(rdev
->wb
.wb_obj
, RADEON_GEM_DOMAIN_GTT
,
1410 &rdev
->wb
.gpu_addr
);
1412 dev_warn(rdev
->dev
, "failed to pin WB buffer (%d).\n", r
);
1416 r
= radeon_object_kmap(rdev
->wb
.wb_obj
, (void **)&rdev
->wb
.wb
);
1418 dev_warn(rdev
->dev
, "failed to map WB buffer (%d).\n", r
);
1423 WREG32(SCRATCH_ADDR
, (rdev
->wb
.gpu_addr
>> 8) & 0xFFFFFFFF);
1424 WREG32(CP_RB_RPTR_ADDR
, (rdev
->wb
.gpu_addr
+ 1024) & 0xFFFFFFFC);
1425 WREG32(CP_RB_RPTR_ADDR_HI
, upper_32_bits(rdev
->wb
.gpu_addr
+ 1024) & 0xFF);
1426 WREG32(SCRATCH_UMSK
, 0xff);
1430 void r600_fence_ring_emit(struct radeon_device
*rdev
,
1431 struct radeon_fence
*fence
)
1433 /* Emit fence sequence & fire IRQ */
1434 radeon_ring_write(rdev
, PACKET3(PACKET3_SET_CONFIG_REG
, 1));
1435 radeon_ring_write(rdev
, ((rdev
->fence_drv
.scratch_reg
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2));
1436 radeon_ring_write(rdev
, fence
->seq
);
1439 int r600_copy_dma(struct radeon_device
*rdev
,
1440 uint64_t src_offset
,
1441 uint64_t dst_offset
,
1443 struct radeon_fence
*fence
)
1445 /* FIXME: implement */
1449 int r600_copy_blit(struct radeon_device
*rdev
,
1450 uint64_t src_offset
, uint64_t dst_offset
,
1451 unsigned num_pages
, struct radeon_fence
*fence
)
1453 r600_blit_prepare_copy(rdev
, num_pages
* 4096);
1454 r600_kms_blit_copy(rdev
, src_offset
, dst_offset
, num_pages
* 4096);
1455 r600_blit_done_copy(rdev
, fence
);
1459 int r600_irq_process(struct radeon_device
*rdev
)
1461 /* FIXME: implement */
1465 int r600_irq_set(struct radeon_device
*rdev
)
1467 /* FIXME: implement */
1471 int r600_set_surface_reg(struct radeon_device
*rdev
, int reg
,
1472 uint32_t tiling_flags
, uint32_t pitch
,
1473 uint32_t offset
, uint32_t obj_size
)
1475 /* FIXME: implement */
1479 void r600_clear_surface_reg(struct radeon_device
*rdev
, int reg
)
1481 /* FIXME: implement */
1485 bool r600_card_posted(struct radeon_device
*rdev
)
1489 /* first check CRTCs */
1490 reg
= RREG32(D1CRTC_CONTROL
) |
1491 RREG32(D2CRTC_CONTROL
);
1495 /* then check MEM_SIZE, in case the crtcs are off */
1496 if (RREG32(CONFIG_MEMSIZE
))
1502 int r600_startup(struct radeon_device
*rdev
)
1506 r600_mc_program(rdev
);
1507 if (rdev
->flags
& RADEON_IS_AGP
) {
1508 r600_agp_enable(rdev
);
1510 r
= r600_pcie_gart_enable(rdev
);
1514 r600_gpu_init(rdev
);
1516 r
= radeon_object_pin(rdev
->r600_blit
.shader_obj
, RADEON_GEM_DOMAIN_VRAM
,
1517 &rdev
->r600_blit
.shader_gpu_addr
);
1519 DRM_ERROR("failed to pin blit object %d\n", r
);
1523 r
= radeon_ring_init(rdev
, rdev
->cp
.ring_size
);
1526 r
= r600_cp_load_microcode(rdev
);
1529 r
= r600_cp_resume(rdev
);
1532 /* write back buffer are not vital so don't worry about failure */
1533 r600_wb_enable(rdev
);
1537 void r600_vga_set_state(struct radeon_device
*rdev
, bool state
)
1541 temp
= RREG32(CONFIG_CNTL
);
1542 if (state
== false) {
1548 WREG32(CONFIG_CNTL
, temp
);
1551 int r600_resume(struct radeon_device
*rdev
)
1555 /* Do not reset GPU before posting, on r600 hw unlike on r500 hw,
1556 * posting will perform necessary task to bring back GPU into good
1560 atom_asic_init(rdev
->mode_info
.atom_context
);
1561 /* Initialize clocks */
1562 r
= radeon_clocks_init(rdev
);
1567 r
= r600_startup(rdev
);
1569 DRM_ERROR("r600 startup failed on resume\n");
1573 r
= r600_ib_test(rdev
);
1575 DRM_ERROR("radeon: failled testing IB (%d).\n", r
);
1581 int r600_suspend(struct radeon_device
*rdev
)
1583 /* FIXME: we should wait for ring to be empty */
1585 rdev
->cp
.ready
= false;
1586 r600_wb_disable(rdev
);
1587 r600_pcie_gart_disable(rdev
);
1588 /* unpin shaders bo */
1589 radeon_object_unpin(rdev
->r600_blit
.shader_obj
);
1593 /* Plan is to move initialization in that function and use
1594 * helper function so that radeon_device_init pretty much
1595 * do nothing more than calling asic specific function. This
1596 * should also allow to remove a bunch of callback function
1599 int r600_init(struct radeon_device
*rdev
)
1603 r
= radeon_dummy_page_init(rdev
);
1606 if (r600_debugfs_mc_info_init(rdev
)) {
1607 DRM_ERROR("Failed to register debugfs file for mc !\n");
1609 /* This don't do much */
1610 r
= radeon_gem_init(rdev
);
1614 if (!radeon_get_bios(rdev
)) {
1615 if (ASIC_IS_AVIVO(rdev
))
1618 /* Must be an ATOMBIOS */
1619 if (!rdev
->is_atom_bios
) {
1620 dev_err(rdev
->dev
, "Expecting atombios for R600 GPU\n");
1623 r
= radeon_atombios_init(rdev
);
1626 /* Post card if necessary */
1627 if (!r600_card_posted(rdev
) && rdev
->bios
) {
1628 DRM_INFO("GPU not posted. posting now...\n");
1629 atom_asic_init(rdev
->mode_info
.atom_context
);
1631 /* Initialize scratch registers */
1632 r600_scratch_init(rdev
);
1633 /* Initialize surface registers */
1634 radeon_surface_init(rdev
);
1635 radeon_get_clock_info(rdev
->ddev
);
1636 r
= radeon_clocks_init(rdev
);
1640 r
= radeon_fence_driver_init(rdev
);
1643 r
= r600_mc_init(rdev
);
1646 /* Memory manager */
1647 r
= radeon_object_init(rdev
);
1650 rdev
->cp
.ring_obj
= NULL
;
1651 r600_ring_init(rdev
, 1024 * 1024);
1653 if (!rdev
->me_fw
|| !rdev
->pfp_fw
) {
1654 r
= r600_cp_init_microcode(rdev
);
1656 DRM_ERROR("Failed to load firmware!\n");
1661 r
= r600_pcie_gart_init(rdev
);
1665 rdev
->accel_working
= true;
1666 r
= r600_blit_init(rdev
);
1668 DRM_ERROR("radeon: failled blitter (%d).\n", r
);
1672 r
= r600_startup(rdev
);
1676 radeon_ring_fini(rdev
);
1677 r600_pcie_gart_fini(rdev
);
1678 rdev
->accel_working
= false;
1680 if (rdev
->accel_working
) {
1681 r
= radeon_ib_pool_init(rdev
);
1683 DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r
);
1684 rdev
->accel_working
= false;
1686 r
= r600_ib_test(rdev
);
1688 DRM_ERROR("radeon: failled testing IB (%d).\n", r
);
1689 rdev
->accel_working
= false;
1695 void r600_fini(struct radeon_device
*rdev
)
1697 /* Suspend operations */
1700 r600_blit_fini(rdev
);
1701 radeon_ring_fini(rdev
);
1703 r600_pcie_gart_fini(rdev
);
1704 radeon_gem_fini(rdev
);
1705 radeon_fence_driver_fini(rdev
);
1706 radeon_clocks_fini(rdev
);
1707 if (rdev
->flags
& RADEON_IS_AGP
)
1708 radeon_agp_fini(rdev
);
1709 radeon_object_fini(rdev
);
1710 radeon_atombios_fini(rdev
);
1713 radeon_dummy_page_fini(rdev
);
1720 void r600_ring_ib_execute(struct radeon_device
*rdev
, struct radeon_ib
*ib
)
1722 /* FIXME: implement */
1723 radeon_ring_write(rdev
, PACKET3(PACKET3_INDIRECT_BUFFER
, 2));
1724 radeon_ring_write(rdev
, ib
->gpu_addr
& 0xFFFFFFFC);
1725 radeon_ring_write(rdev
, upper_32_bits(ib
->gpu_addr
) & 0xFF);
1726 radeon_ring_write(rdev
, ib
->length_dw
);
1729 int r600_ib_test(struct radeon_device
*rdev
)
1731 struct radeon_ib
*ib
;
1737 r
= radeon_scratch_get(rdev
, &scratch
);
1739 DRM_ERROR("radeon: failed to get scratch reg (%d).\n", r
);
1742 WREG32(scratch
, 0xCAFEDEAD);
1743 r
= radeon_ib_get(rdev
, &ib
);
1745 DRM_ERROR("radeon: failed to get ib (%d).\n", r
);
1748 ib
->ptr
[0] = PACKET3(PACKET3_SET_CONFIG_REG
, 1);
1749 ib
->ptr
[1] = ((scratch
- PACKET3_SET_CONFIG_REG_OFFSET
) >> 2);
1750 ib
->ptr
[2] = 0xDEADBEEF;
1751 ib
->ptr
[3] = PACKET2(0);
1752 ib
->ptr
[4] = PACKET2(0);
1753 ib
->ptr
[5] = PACKET2(0);
1754 ib
->ptr
[6] = PACKET2(0);
1755 ib
->ptr
[7] = PACKET2(0);
1756 ib
->ptr
[8] = PACKET2(0);
1757 ib
->ptr
[9] = PACKET2(0);
1758 ib
->ptr
[10] = PACKET2(0);
1759 ib
->ptr
[11] = PACKET2(0);
1760 ib
->ptr
[12] = PACKET2(0);
1761 ib
->ptr
[13] = PACKET2(0);
1762 ib
->ptr
[14] = PACKET2(0);
1763 ib
->ptr
[15] = PACKET2(0);
1765 r
= radeon_ib_schedule(rdev
, ib
);
1767 radeon_scratch_free(rdev
, scratch
);
1768 radeon_ib_free(rdev
, &ib
);
1769 DRM_ERROR("radeon: failed to schedule ib (%d).\n", r
);
1772 r
= radeon_fence_wait(ib
->fence
, false);
1774 DRM_ERROR("radeon: fence wait failed (%d).\n", r
);
1777 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
1778 tmp
= RREG32(scratch
);
1779 if (tmp
== 0xDEADBEEF)
1783 if (i
< rdev
->usec_timeout
) {
1784 DRM_INFO("ib test succeeded in %u usecs\n", i
);
1786 DRM_ERROR("radeon: ib test failed (sracth(0x%04X)=0x%08X)\n",
1790 radeon_scratch_free(rdev
, scratch
);
1791 radeon_ib_free(rdev
, &ib
);
1801 #if defined(CONFIG_DEBUG_FS)
1803 static int r600_debugfs_cp_ring_info(struct seq_file
*m
, void *data
)
1805 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1806 struct drm_device
*dev
= node
->minor
->dev
;
1807 struct radeon_device
*rdev
= dev
->dev_private
;
1809 unsigned count
, i
, j
;
1811 radeon_ring_free_size(rdev
);
1812 rdp
= RREG32(CP_RB_RPTR
);
1813 wdp
= RREG32(CP_RB_WPTR
);
1814 count
= (rdp
+ rdev
->cp
.ring_size
- wdp
) & rdev
->cp
.ptr_mask
;
1815 seq_printf(m
, "CP_STAT 0x%08x\n", RREG32(CP_STAT
));
1816 seq_printf(m
, "CP_RB_WPTR 0x%08x\n", wdp
);
1817 seq_printf(m
, "CP_RB_RPTR 0x%08x\n", rdp
);
1818 seq_printf(m
, "%u free dwords in ring\n", rdev
->cp
.ring_free_dw
);
1819 seq_printf(m
, "%u dwords in ring\n", count
);
1820 for (j
= 0; j
<= count
; j
++) {
1821 i
= (rdp
+ j
) & rdev
->cp
.ptr_mask
;
1822 seq_printf(m
, "r[%04d]=0x%08x\n", i
, rdev
->cp
.ring
[i
]);
1827 static int r600_debugfs_mc_info(struct seq_file
*m
, void *data
)
1829 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
1830 struct drm_device
*dev
= node
->minor
->dev
;
1831 struct radeon_device
*rdev
= dev
->dev_private
;
1833 DREG32_SYS(m
, rdev
, R_000E50_SRBM_STATUS
);
1834 DREG32_SYS(m
, rdev
, VM_L2_STATUS
);
1838 static struct drm_info_list r600_mc_info_list
[] = {
1839 {"r600_mc_info", r600_debugfs_mc_info
, 0, NULL
},
1840 {"r600_ring_info", r600_debugfs_cp_ring_info
, 0, NULL
},
1844 int r600_debugfs_mc_info_init(struct radeon_device
*rdev
)
1846 #if defined(CONFIG_DEBUG_FS)
1847 return radeon_debugfs_add_files(rdev
, r600_mc_info_list
, ARRAY_SIZE(r600_mc_info_list
));