2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
28 #include <linux/seq_file.h>
31 #include "radeon_reg.h"
33 #include "radeon_drm.h"
34 #include "radeon_share.h"
36 /* r300,r350,rv350,rv370,rv380 depends on : */
37 void r100_hdp_reset(struct radeon_device
*rdev
);
38 int r100_cp_reset(struct radeon_device
*rdev
);
39 int r100_rb2d_reset(struct radeon_device
*rdev
);
40 int r100_cp_init(struct radeon_device
*rdev
, unsigned ring_size
);
41 int r100_pci_gart_enable(struct radeon_device
*rdev
);
42 void r100_pci_gart_disable(struct radeon_device
*rdev
);
43 void r100_mc_setup(struct radeon_device
*rdev
);
44 void r100_mc_disable_clients(struct radeon_device
*rdev
);
45 int r100_gui_wait_for_idle(struct radeon_device
*rdev
);
46 int r100_cs_packet_parse(struct radeon_cs_parser
*p
,
47 struct radeon_cs_packet
*pkt
,
49 int r100_cs_packet_parse_vline(struct radeon_cs_parser
*p
);
50 int r100_cs_packet_next_reloc(struct radeon_cs_parser
*p
,
51 struct radeon_cs_reloc
**cs_reloc
);
52 int r100_cs_parse_packet0(struct radeon_cs_parser
*p
,
53 struct radeon_cs_packet
*pkt
,
54 const unsigned *auth
, unsigned n
,
55 radeon_packet0_check_t check
);
56 void r100_cs_dump_packet(struct radeon_cs_parser
*p
,
57 struct radeon_cs_packet
*pkt
);
58 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser
*p
,
59 struct radeon_cs_packet
*pkt
,
60 struct radeon_object
*robj
);
62 /* This files gather functions specifics to:
63 * r300,r350,rv350,rv370,rv380
65 * Some of these functions might be used by newer ASICs.
67 void r300_gpu_init(struct radeon_device
*rdev
);
68 int r300_mc_wait_for_idle(struct radeon_device
*rdev
);
69 int rv370_debugfs_pcie_gart_info_init(struct radeon_device
*rdev
);
73 * rv370,rv380 PCIE GART
75 void rv370_pcie_gart_tlb_flush(struct radeon_device
*rdev
)
80 /* Workaround HW bug do flush 2 times */
81 for (i
= 0; i
< 2; i
++) {
82 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
83 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
| RADEON_PCIE_TX_GART_INVALIDATE_TLB
);
84 (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
85 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
);
90 int rv370_pcie_gart_enable(struct radeon_device
*rdev
)
96 /* Initialize common gart structure */
97 r
= radeon_gart_init(rdev
);
101 r
= rv370_debugfs_pcie_gart_info_init(rdev
);
103 DRM_ERROR("Failed to register debugfs file for PCIE gart !\n");
105 rdev
->gart
.table_size
= rdev
->gart
.num_gpu_pages
* 4;
106 r
= radeon_gart_table_vram_alloc(rdev
);
110 /* discard memory request outside of configured range */
111 tmp
= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD
;
112 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
);
113 WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO
, rdev
->mc
.gtt_location
);
114 tmp
= rdev
->mc
.gtt_location
+ rdev
->mc
.gtt_size
- 4096;
115 WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO
, tmp
);
116 WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI
, 0);
117 WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI
, 0);
118 table_addr
= rdev
->gart
.table_addr
;
119 WREG32_PCIE(RADEON_PCIE_TX_GART_BASE
, table_addr
);
120 /* FIXME: setup default page */
121 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO
, rdev
->mc
.vram_location
);
122 WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI
, 0);
124 WREG32_PCIE(0x18, 0);
125 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
126 tmp
|= RADEON_PCIE_TX_GART_EN
;
127 tmp
|= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD
;
128 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
);
129 rv370_pcie_gart_tlb_flush(rdev
);
130 DRM_INFO("PCIE GART of %uM enabled (table at 0x%08X).\n",
131 rdev
->mc
.gtt_size
>> 20, table_addr
);
132 rdev
->gart
.ready
= true;
136 void rv370_pcie_gart_disable(struct radeon_device
*rdev
)
140 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
141 tmp
|= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD
;
142 WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
, tmp
& ~RADEON_PCIE_TX_GART_EN
);
143 if (rdev
->gart
.table
.vram
.robj
) {
144 radeon_object_kunmap(rdev
->gart
.table
.vram
.robj
);
145 radeon_object_unpin(rdev
->gart
.table
.vram
.robj
);
149 int rv370_pcie_gart_set_page(struct radeon_device
*rdev
, int i
, uint64_t addr
)
151 void __iomem
*ptr
= (void *)rdev
->gart
.table
.vram
.ptr
;
153 if (i
< 0 || i
> rdev
->gart
.num_gpu_pages
) {
156 addr
= (lower_32_bits(addr
) >> 8) |
157 ((upper_32_bits(addr
) & 0xff) << 24) |
159 /* on x86 we want this to be CPU endian, on powerpc
160 * on powerpc without HW swappers, it'll get swapped on way
161 * into VRAM - so no need for cpu_to_le32 on VRAM tables */
162 writel(addr
, ((void __iomem
*)ptr
) + (i
* 4));
166 int r300_gart_enable(struct radeon_device
*rdev
)
169 if (rdev
->flags
& RADEON_IS_AGP
) {
170 if (rdev
->family
> CHIP_RV350
) {
171 rv370_pcie_gart_disable(rdev
);
173 r100_pci_gart_disable(rdev
);
178 if (rdev
->flags
& RADEON_IS_PCIE
) {
179 rdev
->asic
->gart_disable
= &rv370_pcie_gart_disable
;
180 rdev
->asic
->gart_tlb_flush
= &rv370_pcie_gart_tlb_flush
;
181 rdev
->asic
->gart_set_page
= &rv370_pcie_gart_set_page
;
182 return rv370_pcie_gart_enable(rdev
);
184 return r100_pci_gart_enable(rdev
);
191 int r300_mc_init(struct radeon_device
*rdev
)
195 if (r100_debugfs_rbbm_init(rdev
)) {
196 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
200 r100_pci_gart_disable(rdev
);
201 if (rdev
->flags
& RADEON_IS_PCIE
) {
202 rv370_pcie_gart_disable(rdev
);
205 /* Setup GPU memory space */
206 rdev
->mc
.vram_location
= 0xFFFFFFFFUL
;
207 rdev
->mc
.gtt_location
= 0xFFFFFFFFUL
;
208 if (rdev
->flags
& RADEON_IS_AGP
) {
209 r
= radeon_agp_init(rdev
);
211 printk(KERN_WARNING
"[drm] Disabling AGP\n");
212 rdev
->flags
&= ~RADEON_IS_AGP
;
213 rdev
->mc
.gtt_size
= radeon_gart_size
* 1024 * 1024;
215 rdev
->mc
.gtt_location
= rdev
->mc
.agp_base
;
218 r
= radeon_mc_setup(rdev
);
223 /* Program GPU memory space */
224 r100_mc_disable_clients(rdev
);
225 if (r300_mc_wait_for_idle(rdev
)) {
226 printk(KERN_WARNING
"Failed to wait MC idle while "
227 "programming pipes. Bad things might happen.\n");
233 void r300_mc_fini(struct radeon_device
*rdev
)
235 if (rdev
->flags
& RADEON_IS_PCIE
) {
236 rv370_pcie_gart_disable(rdev
);
237 radeon_gart_table_vram_free(rdev
);
239 r100_pci_gart_disable(rdev
);
240 radeon_gart_table_ram_free(rdev
);
242 radeon_gart_fini(rdev
);
249 void r300_fence_ring_emit(struct radeon_device
*rdev
,
250 struct radeon_fence
*fence
)
252 /* Who ever call radeon_fence_emit should call ring_lock and ask
253 * for enough space (today caller are ib schedule and buffer move) */
254 /* Write SC register so SC & US assert idle */
255 radeon_ring_write(rdev
, PACKET0(0x43E0, 0));
256 radeon_ring_write(rdev
, 0);
257 radeon_ring_write(rdev
, PACKET0(0x43E4, 0));
258 radeon_ring_write(rdev
, 0);
260 radeon_ring_write(rdev
, PACKET0(0x4E4C, 0));
261 radeon_ring_write(rdev
, (2 << 0));
262 radeon_ring_write(rdev
, PACKET0(0x4F18, 0));
263 radeon_ring_write(rdev
, (1 << 0));
264 /* Wait until IDLE & CLEAN */
265 radeon_ring_write(rdev
, PACKET0(0x1720, 0));
266 radeon_ring_write(rdev
, (1 << 17) | (1 << 16) | (1 << 9));
267 /* Emit fence sequence & fire IRQ */
268 radeon_ring_write(rdev
, PACKET0(rdev
->fence_drv
.scratch_reg
, 0));
269 radeon_ring_write(rdev
, fence
->seq
);
270 radeon_ring_write(rdev
, PACKET0(RADEON_GEN_INT_STATUS
, 0));
271 radeon_ring_write(rdev
, RADEON_SW_INT_FIRE
);
276 * Global GPU functions
278 int r300_copy_dma(struct radeon_device
*rdev
,
282 struct radeon_fence
*fence
)
289 /* radeon pitch is /64 */
290 size
= num_pages
<< PAGE_SHIFT
;
291 num_loops
= DIV_ROUND_UP(size
, 0x1FFFFF);
292 r
= radeon_ring_lock(rdev
, num_loops
* 4 + 64);
294 DRM_ERROR("radeon: moving bo (%d).\n", r
);
297 /* Must wait for 2D idle & clean before DMA or hangs might happen */
298 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0 ));
299 radeon_ring_write(rdev
, (1 << 16));
300 for (i
= 0; i
< num_loops
; i
++) {
302 if (cur_size
> 0x1FFFFF) {
306 radeon_ring_write(rdev
, PACKET0(0x720, 2));
307 radeon_ring_write(rdev
, src_offset
);
308 radeon_ring_write(rdev
, dst_offset
);
309 radeon_ring_write(rdev
, cur_size
| (1 << 31) | (1 << 30));
310 src_offset
+= cur_size
;
311 dst_offset
+= cur_size
;
313 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0));
314 radeon_ring_write(rdev
, RADEON_WAIT_DMA_GUI_IDLE
);
316 r
= radeon_fence_emit(rdev
, fence
);
318 radeon_ring_unlock_commit(rdev
);
322 void r300_ring_start(struct radeon_device
*rdev
)
324 unsigned gb_tile_config
;
327 /* Sub pixel 1/12 so we can have 4K rendering according to doc */
328 gb_tile_config
= (R300_ENABLE_TILING
| R300_TILE_SIZE_16
);
329 switch(rdev
->num_gb_pipes
) {
331 gb_tile_config
|= R300_PIPE_COUNT_R300
;
334 gb_tile_config
|= R300_PIPE_COUNT_R420_3P
;
337 gb_tile_config
|= R300_PIPE_COUNT_R420
;
341 gb_tile_config
|= R300_PIPE_COUNT_RV350
;
345 r
= radeon_ring_lock(rdev
, 64);
349 radeon_ring_write(rdev
, PACKET0(RADEON_ISYNC_CNTL
, 0));
350 radeon_ring_write(rdev
,
351 RADEON_ISYNC_ANY2D_IDLE3D
|
352 RADEON_ISYNC_ANY3D_IDLE2D
|
353 RADEON_ISYNC_WAIT_IDLEGUI
|
354 RADEON_ISYNC_CPSCRATCH_IDLEGUI
);
355 radeon_ring_write(rdev
, PACKET0(R300_GB_TILE_CONFIG
, 0));
356 radeon_ring_write(rdev
, gb_tile_config
);
357 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0));
358 radeon_ring_write(rdev
,
359 RADEON_WAIT_2D_IDLECLEAN
|
360 RADEON_WAIT_3D_IDLECLEAN
);
361 radeon_ring_write(rdev
, PACKET0(0x170C, 0));
362 radeon_ring_write(rdev
, 1 << 31);
363 radeon_ring_write(rdev
, PACKET0(R300_GB_SELECT
, 0));
364 radeon_ring_write(rdev
, 0);
365 radeon_ring_write(rdev
, PACKET0(R300_GB_ENABLE
, 0));
366 radeon_ring_write(rdev
, 0);
367 radeon_ring_write(rdev
, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT
, 0));
368 radeon_ring_write(rdev
, R300_RB3D_DC_FLUSH
| R300_RB3D_DC_FREE
);
369 radeon_ring_write(rdev
, PACKET0(R300_RB3D_ZCACHE_CTLSTAT
, 0));
370 radeon_ring_write(rdev
, R300_ZC_FLUSH
| R300_ZC_FREE
);
371 radeon_ring_write(rdev
, PACKET0(RADEON_WAIT_UNTIL
, 0));
372 radeon_ring_write(rdev
,
373 RADEON_WAIT_2D_IDLECLEAN
|
374 RADEON_WAIT_3D_IDLECLEAN
);
375 radeon_ring_write(rdev
, PACKET0(R300_GB_AA_CONFIG
, 0));
376 radeon_ring_write(rdev
, 0);
377 radeon_ring_write(rdev
, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT
, 0));
378 radeon_ring_write(rdev
, R300_RB3D_DC_FLUSH
| R300_RB3D_DC_FREE
);
379 radeon_ring_write(rdev
, PACKET0(R300_RB3D_ZCACHE_CTLSTAT
, 0));
380 radeon_ring_write(rdev
, R300_ZC_FLUSH
| R300_ZC_FREE
);
381 radeon_ring_write(rdev
, PACKET0(R300_GB_MSPOS0
, 0));
382 radeon_ring_write(rdev
,
383 ((6 << R300_MS_X0_SHIFT
) |
384 (6 << R300_MS_Y0_SHIFT
) |
385 (6 << R300_MS_X1_SHIFT
) |
386 (6 << R300_MS_Y1_SHIFT
) |
387 (6 << R300_MS_X2_SHIFT
) |
388 (6 << R300_MS_Y2_SHIFT
) |
389 (6 << R300_MSBD0_Y_SHIFT
) |
390 (6 << R300_MSBD0_X_SHIFT
)));
391 radeon_ring_write(rdev
, PACKET0(R300_GB_MSPOS1
, 0));
392 radeon_ring_write(rdev
,
393 ((6 << R300_MS_X3_SHIFT
) |
394 (6 << R300_MS_Y3_SHIFT
) |
395 (6 << R300_MS_X4_SHIFT
) |
396 (6 << R300_MS_Y4_SHIFT
) |
397 (6 << R300_MS_X5_SHIFT
) |
398 (6 << R300_MS_Y5_SHIFT
) |
399 (6 << R300_MSBD1_SHIFT
)));
400 radeon_ring_write(rdev
, PACKET0(R300_GA_ENHANCE
, 0));
401 radeon_ring_write(rdev
, R300_GA_DEADLOCK_CNTL
| R300_GA_FASTSYNC_CNTL
);
402 radeon_ring_write(rdev
, PACKET0(R300_GA_POLY_MODE
, 0));
403 radeon_ring_write(rdev
,
404 R300_FRONT_PTYPE_TRIANGE
| R300_BACK_PTYPE_TRIANGE
);
405 radeon_ring_write(rdev
, PACKET0(R300_GA_ROUND_MODE
, 0));
406 radeon_ring_write(rdev
,
407 R300_GEOMETRY_ROUND_NEAREST
|
408 R300_COLOR_ROUND_NEAREST
);
409 radeon_ring_unlock_commit(rdev
);
412 void r300_errata(struct radeon_device
*rdev
)
414 rdev
->pll_errata
= 0;
416 if (rdev
->family
== CHIP_R300
&&
417 (RREG32(RADEON_CONFIG_CNTL
) & RADEON_CFG_ATI_REV_ID_MASK
) == RADEON_CFG_ATI_REV_A11
) {
418 rdev
->pll_errata
|= CHIP_ERRATA_R300_CG
;
422 int r300_mc_wait_for_idle(struct radeon_device
*rdev
)
427 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
429 tmp
= RREG32(0x0150);
430 if (tmp
& (1 << 4)) {
438 void r300_gpu_init(struct radeon_device
*rdev
)
440 uint32_t gb_tile_config
, tmp
;
442 r100_hdp_reset(rdev
);
443 /* FIXME: rv380 one pipes ? */
444 if ((rdev
->family
== CHIP_R300
) || (rdev
->family
== CHIP_R350
)) {
446 rdev
->num_gb_pipes
= 2;
448 /* rv350,rv370,rv380 */
449 rdev
->num_gb_pipes
= 1;
451 rdev
->num_z_pipes
= 1;
452 gb_tile_config
= (R300_ENABLE_TILING
| R300_TILE_SIZE_16
);
453 switch (rdev
->num_gb_pipes
) {
455 gb_tile_config
|= R300_PIPE_COUNT_R300
;
458 gb_tile_config
|= R300_PIPE_COUNT_R420_3P
;
461 gb_tile_config
|= R300_PIPE_COUNT_R420
;
465 gb_tile_config
|= R300_PIPE_COUNT_RV350
;
468 WREG32(R300_GB_TILE_CONFIG
, gb_tile_config
);
470 if (r100_gui_wait_for_idle(rdev
)) {
471 printk(KERN_WARNING
"Failed to wait GUI idle while "
472 "programming pipes. Bad things might happen.\n");
475 tmp
= RREG32(0x170C);
476 WREG32(0x170C, tmp
| (1 << 31));
478 WREG32(R300_RB2D_DSTCACHE_MODE
,
479 R300_DC_AUTOFLUSH_ENABLE
|
480 R300_DC_DC_DISABLE_IGNORE_PE
);
482 if (r100_gui_wait_for_idle(rdev
)) {
483 printk(KERN_WARNING
"Failed to wait GUI idle while "
484 "programming pipes. Bad things might happen.\n");
486 if (r300_mc_wait_for_idle(rdev
)) {
487 printk(KERN_WARNING
"Failed to wait MC idle while "
488 "programming pipes. Bad things might happen.\n");
490 DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized.\n",
491 rdev
->num_gb_pipes
, rdev
->num_z_pipes
);
494 int r300_ga_reset(struct radeon_device
*rdev
)
500 reinit_cp
= rdev
->cp
.ready
;
501 rdev
->cp
.ready
= false;
502 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
503 WREG32(RADEON_CP_CSQ_MODE
, 0);
504 WREG32(RADEON_CP_CSQ_CNTL
, 0);
505 WREG32(RADEON_RBBM_SOFT_RESET
, 0x32005);
506 (void)RREG32(RADEON_RBBM_SOFT_RESET
);
508 WREG32(RADEON_RBBM_SOFT_RESET
, 0);
509 /* Wait to prevent race in RBBM_STATUS */
511 tmp
= RREG32(RADEON_RBBM_STATUS
);
512 if (tmp
& ((1 << 20) | (1 << 26))) {
513 DRM_ERROR("VAP & CP still busy (RBBM_STATUS=0x%08X)", tmp
);
514 /* GA still busy soft reset it */
515 WREG32(0x429C, 0x200);
516 WREG32(R300_VAP_PVS_STATE_FLUSH_REG
, 0);
521 /* Wait to prevent race in RBBM_STATUS */
523 tmp
= RREG32(RADEON_RBBM_STATUS
);
524 if (!(tmp
& ((1 << 20) | (1 << 26)))) {
528 for (i
= 0; i
< rdev
->usec_timeout
; i
++) {
529 tmp
= RREG32(RADEON_RBBM_STATUS
);
530 if (!(tmp
& ((1 << 20) | (1 << 26)))) {
531 DRM_INFO("GA reset succeed (RBBM_STATUS=0x%08X)\n",
534 return r100_cp_init(rdev
, rdev
->cp
.ring_size
);
540 tmp
= RREG32(RADEON_RBBM_STATUS
);
541 DRM_ERROR("Failed to reset GA ! (RBBM_STATUS=0x%08X)\n", tmp
);
545 int r300_gpu_reset(struct radeon_device
*rdev
)
549 /* reset order likely matter */
550 status
= RREG32(RADEON_RBBM_STATUS
);
552 r100_hdp_reset(rdev
);
554 if (status
& ((1 << 17) | (1 << 18) | (1 << 27))) {
555 r100_rb2d_reset(rdev
);
558 if (status
& ((1 << 20) | (1 << 26))) {
562 status
= RREG32(RADEON_RBBM_STATUS
);
563 if (status
& (1 << 16)) {
566 /* Check if GPU is idle */
567 status
= RREG32(RADEON_RBBM_STATUS
);
568 if (status
& (1 << 31)) {
569 DRM_ERROR("Failed to reset GPU (RBBM_STATUS=0x%08X)\n", status
);
572 DRM_INFO("GPU reset succeed (RBBM_STATUS=0x%08X)\n", status
);
578 * r300,r350,rv350,rv380 VRAM info
580 void r300_vram_info(struct radeon_device
*rdev
)
584 /* DDR for all card after R300 & IGP */
585 rdev
->mc
.vram_is_ddr
= true;
586 tmp
= RREG32(RADEON_MEM_CNTL
);
587 if (tmp
& R300_MEM_NUM_CHANNELS_MASK
) {
588 rdev
->mc
.vram_width
= 128;
590 rdev
->mc
.vram_width
= 64;
593 r100_vram_init_sizes(rdev
);
601 void rv370_set_pcie_lanes(struct radeon_device
*rdev
, int lanes
)
603 uint32_t link_width_cntl
, mask
;
605 if (rdev
->flags
& RADEON_IS_IGP
)
608 if (!(rdev
->flags
& RADEON_IS_PCIE
))
611 /* FIXME wait for idle */
615 mask
= RADEON_PCIE_LC_LINK_WIDTH_X0
;
618 mask
= RADEON_PCIE_LC_LINK_WIDTH_X1
;
621 mask
= RADEON_PCIE_LC_LINK_WIDTH_X2
;
624 mask
= RADEON_PCIE_LC_LINK_WIDTH_X4
;
627 mask
= RADEON_PCIE_LC_LINK_WIDTH_X8
;
630 mask
= RADEON_PCIE_LC_LINK_WIDTH_X12
;
634 mask
= RADEON_PCIE_LC_LINK_WIDTH_X16
;
638 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
640 if ((link_width_cntl
& RADEON_PCIE_LC_LINK_WIDTH_RD_MASK
) ==
641 (mask
<< RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT
))
644 link_width_cntl
&= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK
|
645 RADEON_PCIE_LC_RECONFIG_NOW
|
646 RADEON_PCIE_LC_RECONFIG_LATER
|
647 RADEON_PCIE_LC_SHORT_RECONFIG_EN
);
648 link_width_cntl
|= mask
;
649 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, link_width_cntl
);
650 WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
, (link_width_cntl
|
651 RADEON_PCIE_LC_RECONFIG_NOW
));
653 /* wait for lane set to complete */
654 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
655 while (link_width_cntl
== 0xffffffff)
656 link_width_cntl
= RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL
);
664 #if defined(CONFIG_DEBUG_FS)
665 static int rv370_debugfs_pcie_gart_info(struct seq_file
*m
, void *data
)
667 struct drm_info_node
*node
= (struct drm_info_node
*) m
->private;
668 struct drm_device
*dev
= node
->minor
->dev
;
669 struct radeon_device
*rdev
= dev
->dev_private
;
672 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL
);
673 seq_printf(m
, "PCIE_TX_GART_CNTL 0x%08x\n", tmp
);
674 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_BASE
);
675 seq_printf(m
, "PCIE_TX_GART_BASE 0x%08x\n", tmp
);
676 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO
);
677 seq_printf(m
, "PCIE_TX_GART_START_LO 0x%08x\n", tmp
);
678 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI
);
679 seq_printf(m
, "PCIE_TX_GART_START_HI 0x%08x\n", tmp
);
680 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO
);
681 seq_printf(m
, "PCIE_TX_GART_END_LO 0x%08x\n", tmp
);
682 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI
);
683 seq_printf(m
, "PCIE_TX_GART_END_HI 0x%08x\n", tmp
);
684 tmp
= RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR
);
685 seq_printf(m
, "PCIE_TX_GART_ERROR 0x%08x\n", tmp
);
689 static struct drm_info_list rv370_pcie_gart_info_list
[] = {
690 {"rv370_pcie_gart_info", rv370_debugfs_pcie_gart_info
, 0, NULL
},
694 int rv370_debugfs_pcie_gart_info_init(struct radeon_device
*rdev
)
696 #if defined(CONFIG_DEBUG_FS)
697 return radeon_debugfs_add_files(rdev
, rv370_pcie_gart_info_list
, 1);
707 struct r300_cs_track_cb
{
708 struct radeon_object
*robj
;
714 struct r300_cs_track_array
{
715 struct radeon_object
*robj
;
719 struct r300_cs_track_texture
{
720 struct radeon_object
*robj
;
726 unsigned tex_coord_type
;
736 struct r300_cs_track
{
740 unsigned vap_vf_cntl
;
741 unsigned immd_dwords
;
744 struct r300_cs_track_array arrays
[11];
745 struct r300_cs_track_cb cb
[4];
746 struct r300_cs_track_cb zb
;
747 struct r300_cs_track_texture textures
[16];
751 static inline void r300_cs_track_texture_print(struct r300_cs_track_texture
*t
)
753 DRM_ERROR("pitch %d\n", t
->pitch
);
754 DRM_ERROR("width %d\n", t
->width
);
755 DRM_ERROR("height %d\n", t
->height
);
756 DRM_ERROR("num levels %d\n", t
->num_levels
);
757 DRM_ERROR("depth %d\n", t
->txdepth
);
758 DRM_ERROR("bpp %d\n", t
->cpp
);
759 DRM_ERROR("coordinate type %d\n", t
->tex_coord_type
);
760 DRM_ERROR("width round to power of 2 %d\n", t
->roundup_w
);
761 DRM_ERROR("height round to power of 2 %d\n", t
->roundup_h
);
764 static inline int r300_cs_track_texture_check(struct radeon_device
*rdev
,
765 struct r300_cs_track
*track
)
767 struct radeon_object
*robj
;
771 for (u
= 0; u
< 16; u
++) {
772 if (!track
->textures
[u
].enabled
)
774 robj
= track
->textures
[u
].robj
;
776 DRM_ERROR("No texture bound to unit %u\n", u
);
780 for (i
= 0; i
<= track
->textures
[u
].num_levels
; i
++) {
781 if (track
->textures
[u
].use_pitch
) {
782 w
= track
->textures
[u
].pitch
/ (1 << i
);
784 w
= track
->textures
[u
].width
/ (1 << i
);
785 if (rdev
->family
>= CHIP_RV515
)
786 w
|= track
->textures
[u
].width_11
;
787 if (track
->textures
[u
].roundup_w
)
788 w
= roundup_pow_of_two(w
);
790 h
= track
->textures
[u
].height
/ (1 << i
);
791 if (rdev
->family
>= CHIP_RV515
)
792 h
|= track
->textures
[u
].height_11
;
793 if (track
->textures
[u
].roundup_h
)
794 h
= roundup_pow_of_two(h
);
797 size
*= track
->textures
[u
].cpp
;
798 switch (track
->textures
[u
].tex_coord_type
) {
802 size
*= (1 << track
->textures
[u
].txdepth
);
808 DRM_ERROR("Invalid texture coordinate type %u for unit "
809 "%u\n", track
->textures
[u
].tex_coord_type
, u
);
812 if (size
> radeon_object_size(robj
)) {
813 DRM_ERROR("Texture of unit %u needs %lu bytes but is "
814 "%lu\n", u
, size
, radeon_object_size(robj
));
815 r300_cs_track_texture_print(&track
->textures
[u
]);
822 int r300_cs_track_check(struct radeon_device
*rdev
, struct r300_cs_track
*track
)
829 for (i
= 0; i
< track
->num_cb
; i
++) {
830 if (track
->cb
[i
].robj
== NULL
) {
831 DRM_ERROR("[drm] No buffer for color buffer %d !\n", i
);
834 size
= track
->cb
[i
].pitch
* track
->cb
[i
].cpp
* track
->maxy
;
835 size
+= track
->cb
[i
].offset
;
836 if (size
> radeon_object_size(track
->cb
[i
].robj
)) {
837 DRM_ERROR("[drm] Buffer too small for color buffer %d "
838 "(need %lu have %lu) !\n", i
, size
,
839 radeon_object_size(track
->cb
[i
].robj
));
840 DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
841 i
, track
->cb
[i
].pitch
, track
->cb
[i
].cpp
,
842 track
->cb
[i
].offset
, track
->maxy
);
846 if (track
->z_enabled
) {
847 if (track
->zb
.robj
== NULL
) {
848 DRM_ERROR("[drm] No buffer for z buffer !\n");
851 size
= track
->zb
.pitch
* track
->zb
.cpp
* track
->maxy
;
852 size
+= track
->zb
.offset
;
853 if (size
> radeon_object_size(track
->zb
.robj
)) {
854 DRM_ERROR("[drm] Buffer too small for z buffer "
855 "(need %lu have %lu) !\n", size
,
856 radeon_object_size(track
->zb
.robj
));
860 prim_walk
= (track
->vap_vf_cntl
>> 4) & 0x3;
861 nverts
= (track
->vap_vf_cntl
>> 16) & 0xFFFF;
864 for (i
= 0; i
< track
->num_arrays
; i
++) {
865 size
= track
->arrays
[i
].esize
* track
->max_indx
* 4;
866 if (track
->arrays
[i
].robj
== NULL
) {
867 DRM_ERROR("(PW %u) Vertex array %u no buffer "
868 "bound\n", prim_walk
, i
);
871 if (size
> radeon_object_size(track
->arrays
[i
].robj
)) {
872 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
873 "have %lu dwords\n", prim_walk
, i
,
875 radeon_object_size(track
->arrays
[i
].robj
) >> 2);
876 DRM_ERROR("Max indices %u\n", track
->max_indx
);
882 for (i
= 0; i
< track
->num_arrays
; i
++) {
883 size
= track
->arrays
[i
].esize
* (nverts
- 1) * 4;
884 if (track
->arrays
[i
].robj
== NULL
) {
885 DRM_ERROR("(PW %u) Vertex array %u no buffer "
886 "bound\n", prim_walk
, i
);
889 if (size
> radeon_object_size(track
->arrays
[i
].robj
)) {
890 DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
891 "have %lu dwords\n", prim_walk
, i
, size
>> 2,
892 radeon_object_size(track
->arrays
[i
].robj
) >> 2);
898 size
= track
->vtx_size
* nverts
;
899 if (size
!= track
->immd_dwords
) {
900 DRM_ERROR("IMMD draw %u dwors but needs %lu dwords\n",
901 track
->immd_dwords
, size
);
902 DRM_ERROR("VAP_VF_CNTL.NUM_VERTICES %u, VTX_SIZE %u\n",
903 nverts
, track
->vtx_size
);
908 DRM_ERROR("[drm] Invalid primitive walk %d for VAP_VF_CNTL\n",
912 return r300_cs_track_texture_check(rdev
, track
);
915 static inline void r300_cs_track_clear(struct r300_cs_track
*track
)
921 for (i
= 0; i
< track
->num_cb
; i
++) {
922 track
->cb
[i
].robj
= NULL
;
923 track
->cb
[i
].pitch
= 8192;
924 track
->cb
[i
].cpp
= 16;
925 track
->cb
[i
].offset
= 0;
927 track
->z_enabled
= true;
928 track
->zb
.robj
= NULL
;
929 track
->zb
.pitch
= 8192;
931 track
->zb
.offset
= 0;
932 track
->vtx_size
= 0x7F;
933 track
->immd_dwords
= 0xFFFFFFFFUL
;
934 track
->num_arrays
= 11;
935 track
->max_indx
= 0x00FFFFFFUL
;
936 for (i
= 0; i
< track
->num_arrays
; i
++) {
937 track
->arrays
[i
].robj
= NULL
;
938 track
->arrays
[i
].esize
= 0x7F;
940 for (i
= 0; i
< 16; i
++) {
941 track
->textures
[i
].pitch
= 16536;
942 track
->textures
[i
].width
= 16536;
943 track
->textures
[i
].height
= 16536;
944 track
->textures
[i
].width_11
= 1 << 11;
945 track
->textures
[i
].height_11
= 1 << 11;
946 track
->textures
[i
].num_levels
= 12;
947 track
->textures
[i
].txdepth
= 16;
948 track
->textures
[i
].cpp
= 64;
949 track
->textures
[i
].tex_coord_type
= 1;
950 track
->textures
[i
].robj
= NULL
;
951 /* CS IB emission code makes sure texture unit are disabled */
952 track
->textures
[i
].enabled
= false;
953 track
->textures
[i
].roundup_w
= true;
954 track
->textures
[i
].roundup_h
= true;
958 static const unsigned r300_reg_safe_bm
[159] = {
959 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
960 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
961 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
962 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
963 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
964 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
965 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
966 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
967 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
968 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
969 0x17FF1FFF, 0xFFFFFFFC, 0xFFFFFFFF, 0xFF30FFBF,
970 0xFFFFFFF8, 0xC3E6FFFF, 0xFFFFF6DF, 0xFFFFFFFF,
971 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
972 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
973 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFF03F,
974 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
975 0xFFFFFFFF, 0xFFFFEFCE, 0xF00EBFFF, 0x007C0000,
976 0xF0000078, 0xFF000009, 0xFFFFFFFF, 0xFFFFFFFF,
977 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
978 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
979 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
980 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
981 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
982 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
983 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
984 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
985 0xFFFFF7FF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
986 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
987 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
988 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
989 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
990 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
991 0xFFFFFC78, 0xFFFFFFFF, 0xFFFFFFFE, 0xFFFFFFFF,
992 0x38FF8F50, 0xFFF88082, 0xF000000C, 0xFAE009FF,
993 0x0000FFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000,
994 0x00000000, 0x0000C100, 0x00000000, 0x00000000,
995 0x00000000, 0x00000000, 0x00000000, 0x00000000,
996 0x00000000, 0xFFFF0000, 0xFFFFFFFF, 0xFF80FFFF,
997 0x00000000, 0x00000000, 0x00000000, 0x00000000,
998 0x0003FC01, 0xFFFFFCF8, 0xFF800B19,
1001 static int r300_packet0_check(struct radeon_cs_parser
*p
,
1002 struct radeon_cs_packet
*pkt
,
1003 unsigned idx
, unsigned reg
)
1005 struct radeon_cs_chunk
*ib_chunk
;
1006 struct radeon_cs_reloc
*reloc
;
1007 struct r300_cs_track
*track
;
1008 volatile uint32_t *ib
;
1009 uint32_t tmp
, tile_flags
= 0;
1014 ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
1015 track
= (struct r300_cs_track
*)p
->track
;
1017 case AVIVO_D1MODE_VLINE_START_END
:
1018 case RADEON_CRTC_GUI_TRIG_VLINE
:
1019 r
= r100_cs_packet_parse_vline(p
);
1021 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1023 r100_cs_dump_packet(p
, pkt
);
1027 case RADEON_DST_PITCH_OFFSET
:
1028 case RADEON_SRC_PITCH_OFFSET
:
1029 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1031 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1033 r100_cs_dump_packet(p
, pkt
);
1036 tmp
= ib_chunk
->kdata
[idx
] & 0x003fffff;
1037 tmp
+= (((u32
)reloc
->lobj
.gpu_offset
) >> 10);
1039 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
1040 tile_flags
|= RADEON_DST_TILE_MACRO
;
1041 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
1042 if (reg
== RADEON_SRC_PITCH_OFFSET
) {
1043 DRM_ERROR("Cannot src blit from microtiled surface\n");
1044 r100_cs_dump_packet(p
, pkt
);
1047 tile_flags
|= RADEON_DST_TILE_MICRO
;
1050 ib
[idx
] = (ib_chunk
->kdata
[idx
] & 0x3fc00000) | tmp
;
1052 case R300_RB3D_COLOROFFSET0
:
1053 case R300_RB3D_COLOROFFSET1
:
1054 case R300_RB3D_COLOROFFSET2
:
1055 case R300_RB3D_COLOROFFSET3
:
1056 i
= (reg
- R300_RB3D_COLOROFFSET0
) >> 2;
1057 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1059 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1061 r100_cs_dump_packet(p
, pkt
);
1064 track
->cb
[i
].robj
= reloc
->robj
;
1065 track
->cb
[i
].offset
= ib_chunk
->kdata
[idx
];
1066 ib
[idx
] = ib_chunk
->kdata
[idx
] + ((u32
)reloc
->lobj
.gpu_offset
);
1068 case R300_ZB_DEPTHOFFSET
:
1069 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1071 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1073 r100_cs_dump_packet(p
, pkt
);
1076 track
->zb
.robj
= reloc
->robj
;
1077 track
->zb
.offset
= ib_chunk
->kdata
[idx
];
1078 ib
[idx
] = ib_chunk
->kdata
[idx
] + ((u32
)reloc
->lobj
.gpu_offset
);
1080 case R300_TX_OFFSET_0
:
1081 case R300_TX_OFFSET_0
+4:
1082 case R300_TX_OFFSET_0
+8:
1083 case R300_TX_OFFSET_0
+12:
1084 case R300_TX_OFFSET_0
+16:
1085 case R300_TX_OFFSET_0
+20:
1086 case R300_TX_OFFSET_0
+24:
1087 case R300_TX_OFFSET_0
+28:
1088 case R300_TX_OFFSET_0
+32:
1089 case R300_TX_OFFSET_0
+36:
1090 case R300_TX_OFFSET_0
+40:
1091 case R300_TX_OFFSET_0
+44:
1092 case R300_TX_OFFSET_0
+48:
1093 case R300_TX_OFFSET_0
+52:
1094 case R300_TX_OFFSET_0
+56:
1095 case R300_TX_OFFSET_0
+60:
1096 i
= (reg
- R300_TX_OFFSET_0
) >> 2;
1097 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1099 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1101 r100_cs_dump_packet(p
, pkt
);
1104 ib
[idx
] = ib_chunk
->kdata
[idx
] + ((u32
)reloc
->lobj
.gpu_offset
);
1105 track
->textures
[i
].robj
= reloc
->robj
;
1107 /* Tracked registers */
1110 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
];
1114 track
->vtx_size
= ib_chunk
->kdata
[idx
] & 0x7F;
1117 /* VAP_VF_MAX_VTX_INDX */
1118 track
->max_indx
= ib_chunk
->kdata
[idx
] & 0x00FFFFFFUL
;
1122 track
->maxy
= ((ib_chunk
->kdata
[idx
] >> 13) & 0x1FFF) + 1;
1123 if (p
->rdev
->family
< CHIP_RV515
) {
1124 track
->maxy
-= 1440;
1129 track
->num_cb
= ((ib_chunk
->kdata
[idx
] >> 5) & 0x3) + 1;
1135 /* RB3D_COLORPITCH0 */
1136 /* RB3D_COLORPITCH1 */
1137 /* RB3D_COLORPITCH2 */
1138 /* RB3D_COLORPITCH3 */
1139 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1141 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1143 r100_cs_dump_packet(p
, pkt
);
1147 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
1148 tile_flags
|= R300_COLOR_TILE_ENABLE
;
1149 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
1150 tile_flags
|= R300_COLOR_MICROTILE_ENABLE
;
1152 tmp
= ib_chunk
->kdata
[idx
] & ~(0x7 << 16);
1156 i
= (reg
- 0x4E38) >> 2;
1157 track
->cb
[i
].pitch
= ib_chunk
->kdata
[idx
] & 0x3FFE;
1158 switch (((ib_chunk
->kdata
[idx
] >> 21) & 0xF)) {
1162 track
->cb
[i
].cpp
= 1;
1168 track
->cb
[i
].cpp
= 2;
1171 track
->cb
[i
].cpp
= 4;
1174 track
->cb
[i
].cpp
= 8;
1177 track
->cb
[i
].cpp
= 16;
1180 DRM_ERROR("Invalid color buffer format (%d) !\n",
1181 ((ib_chunk
->kdata
[idx
] >> 21) & 0xF));
1187 if (ib_chunk
->kdata
[idx
] & 2) {
1188 track
->z_enabled
= true;
1190 track
->z_enabled
= false;
1195 switch ((ib_chunk
->kdata
[idx
] & 0xF)) {
1204 DRM_ERROR("Invalid z buffer format (%d) !\n",
1205 (ib_chunk
->kdata
[idx
] & 0xF));
1211 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1213 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1215 r100_cs_dump_packet(p
, pkt
);
1219 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
1220 tile_flags
|= R300_DEPTHMACROTILE_ENABLE
;
1221 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
1222 tile_flags
|= R300_DEPTHMICROTILE_TILED
;;
1224 tmp
= ib_chunk
->kdata
[idx
] & ~(0x7 << 16);
1228 track
->zb
.pitch
= ib_chunk
->kdata
[idx
] & 0x3FFC;
1231 for (i
= 0; i
< 16; i
++) {
1234 enabled
= !!(ib_chunk
->kdata
[idx
] & (1 << i
));
1235 track
->textures
[i
].enabled
= enabled
;
1254 /* TX_FORMAT1_[0-15] */
1255 i
= (reg
- 0x44C0) >> 2;
1256 tmp
= (ib_chunk
->kdata
[idx
] >> 25) & 0x3;
1257 track
->textures
[i
].tex_coord_type
= tmp
;
1258 switch ((ib_chunk
->kdata
[idx
] & 0x1F)) {
1265 track
->textures
[i
].cpp
= 1;
1276 track
->textures
[i
].cpp
= 2;
1287 track
->textures
[i
].cpp
= 4;
1292 track
->textures
[i
].cpp
= 8;
1295 track
->textures
[i
].cpp
= 16;
1298 DRM_ERROR("Invalid texture format %u\n",
1299 (ib_chunk
->kdata
[idx
] & 0x1F));
1320 /* TX_FILTER0_[0-15] */
1321 i
= (reg
- 0x4400) >> 2;
1322 tmp
= ib_chunk
->kdata
[idx
] & 0x7;;
1323 if (tmp
== 2 || tmp
== 4 || tmp
== 6) {
1324 track
->textures
[i
].roundup_w
= false;
1326 tmp
= (ib_chunk
->kdata
[idx
] >> 3) & 0x7;;
1327 if (tmp
== 2 || tmp
== 4 || tmp
== 6) {
1328 track
->textures
[i
].roundup_h
= false;
1347 /* TX_FORMAT2_[0-15] */
1348 i
= (reg
- 0x4500) >> 2;
1349 tmp
= ib_chunk
->kdata
[idx
] & 0x3FFF;
1350 track
->textures
[i
].pitch
= tmp
+ 1;
1351 if (p
->rdev
->family
>= CHIP_RV515
) {
1352 tmp
= ((ib_chunk
->kdata
[idx
] >> 15) & 1) << 11;
1353 track
->textures
[i
].width_11
= tmp
;
1354 tmp
= ((ib_chunk
->kdata
[idx
] >> 16) & 1) << 11;
1355 track
->textures
[i
].height_11
= tmp
;
1374 /* TX_FORMAT0_[0-15] */
1375 i
= (reg
- 0x4480) >> 2;
1376 tmp
= ib_chunk
->kdata
[idx
] & 0x7FF;
1377 track
->textures
[i
].width
= tmp
+ 1;
1378 tmp
= (ib_chunk
->kdata
[idx
] >> 11) & 0x7FF;
1379 track
->textures
[i
].height
= tmp
+ 1;
1380 tmp
= (ib_chunk
->kdata
[idx
] >> 26) & 0xF;
1381 track
->textures
[i
].num_levels
= tmp
;
1382 tmp
= ib_chunk
->kdata
[idx
] & (1 << 31);
1383 track
->textures
[i
].use_pitch
= !!tmp
;
1384 tmp
= (ib_chunk
->kdata
[idx
] >> 22) & 0xF;
1385 track
->textures
[i
].txdepth
= tmp
;
1387 case R300_ZB_ZPASS_ADDR
:
1388 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1390 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
1392 r100_cs_dump_packet(p
, pkt
);
1395 ib
[idx
] = ib_chunk
->kdata
[idx
] + ((u32
)reloc
->lobj
.gpu_offset
);
1398 /* valid register only on RV530 */
1399 if (p
->rdev
->family
== CHIP_RV530
)
1401 /* fallthrough do not move */
1403 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
1410 static int r300_packet3_check(struct radeon_cs_parser
*p
,
1411 struct radeon_cs_packet
*pkt
)
1413 struct radeon_cs_chunk
*ib_chunk
;
1414 struct radeon_cs_reloc
*reloc
;
1415 struct r300_cs_track
*track
;
1416 volatile uint32_t *ib
;
1422 ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
1424 track
= (struct r300_cs_track
*)p
->track
;
1425 switch(pkt
->opcode
) {
1426 case PACKET3_3D_LOAD_VBPNTR
:
1427 c
= ib_chunk
->kdata
[idx
++] & 0x1F;
1428 track
->num_arrays
= c
;
1429 for (i
= 0; i
< (c
- 1); i
+=2, idx
+=3) {
1430 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1432 DRM_ERROR("No reloc for packet3 %d\n",
1434 r100_cs_dump_packet(p
, pkt
);
1437 ib
[idx
+1] = ib_chunk
->kdata
[idx
+1] + ((u32
)reloc
->lobj
.gpu_offset
);
1438 track
->arrays
[i
+ 0].robj
= reloc
->robj
;
1439 track
->arrays
[i
+ 0].esize
= ib_chunk
->kdata
[idx
] >> 8;
1440 track
->arrays
[i
+ 0].esize
&= 0x7F;
1441 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1443 DRM_ERROR("No reloc for packet3 %d\n",
1445 r100_cs_dump_packet(p
, pkt
);
1448 ib
[idx
+2] = ib_chunk
->kdata
[idx
+2] + ((u32
)reloc
->lobj
.gpu_offset
);
1449 track
->arrays
[i
+ 1].robj
= reloc
->robj
;
1450 track
->arrays
[i
+ 1].esize
= ib_chunk
->kdata
[idx
] >> 24;
1451 track
->arrays
[i
+ 1].esize
&= 0x7F;
1454 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1456 DRM_ERROR("No reloc for packet3 %d\n",
1458 r100_cs_dump_packet(p
, pkt
);
1461 ib
[idx
+1] = ib_chunk
->kdata
[idx
+1] + ((u32
)reloc
->lobj
.gpu_offset
);
1462 track
->arrays
[i
+ 0].robj
= reloc
->robj
;
1463 track
->arrays
[i
+ 0].esize
= ib_chunk
->kdata
[idx
] >> 8;
1464 track
->arrays
[i
+ 0].esize
&= 0x7F;
1467 case PACKET3_INDX_BUFFER
:
1468 r
= r100_cs_packet_next_reloc(p
, &reloc
);
1470 DRM_ERROR("No reloc for packet3 %d\n", pkt
->opcode
);
1471 r100_cs_dump_packet(p
, pkt
);
1474 ib
[idx
+1] = ib_chunk
->kdata
[idx
+1] + ((u32
)reloc
->lobj
.gpu_offset
);
1475 r
= r100_cs_track_check_pkt3_indx_buffer(p
, pkt
, reloc
->robj
);
1481 case PACKET3_3D_DRAW_IMMD
:
1482 /* Number of dwords is vtx_size * (num_vertices - 1)
1483 * PRIM_WALK must be equal to 3 vertex data in embedded
1485 if (((ib_chunk
->kdata
[idx
+1] >> 4) & 0x3) != 3) {
1486 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1489 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
+1];
1490 track
->immd_dwords
= pkt
->count
- 1;
1491 r
= r300_cs_track_check(p
->rdev
, track
);
1496 case PACKET3_3D_DRAW_IMMD_2
:
1497 /* Number of dwords is vtx_size * (num_vertices - 1)
1498 * PRIM_WALK must be equal to 3 vertex data in embedded
1500 if (((ib_chunk
->kdata
[idx
] >> 4) & 0x3) != 3) {
1501 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1504 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
];
1505 track
->immd_dwords
= pkt
->count
;
1506 r
= r300_cs_track_check(p
->rdev
, track
);
1511 case PACKET3_3D_DRAW_VBUF
:
1512 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
+ 1];
1513 r
= r300_cs_track_check(p
->rdev
, track
);
1518 case PACKET3_3D_DRAW_VBUF_2
:
1519 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
];
1520 r
= r300_cs_track_check(p
->rdev
, track
);
1525 case PACKET3_3D_DRAW_INDX
:
1526 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
+ 1];
1527 r
= r300_cs_track_check(p
->rdev
, track
);
1532 case PACKET3_3D_DRAW_INDX_2
:
1533 track
->vap_vf_cntl
= ib_chunk
->kdata
[idx
];
1534 r
= r300_cs_track_check(p
->rdev
, track
);
1542 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
1548 int r300_cs_parse(struct radeon_cs_parser
*p
)
1550 struct radeon_cs_packet pkt
;
1551 struct r300_cs_track track
;
1554 r300_cs_track_clear(&track
);
1557 r
= r100_cs_packet_parse(p
, &pkt
, p
->idx
);
1561 p
->idx
+= pkt
.count
+ 2;
1564 r
= r100_cs_parse_packet0(p
, &pkt
,
1565 p
->rdev
->config
.r300
.reg_safe_bm
,
1566 p
->rdev
->config
.r300
.reg_safe_bm_size
,
1567 &r300_packet0_check
);
1572 r
= r300_packet3_check(p
, &pkt
);
1575 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
1581 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
1585 int r300_init(struct radeon_device
*rdev
)
1587 rdev
->config
.r300
.reg_safe_bm
= r300_reg_safe_bm
;
1588 rdev
->config
.r300
.reg_safe_bm_size
= ARRAY_SIZE(r300_reg_safe_bm
);