2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include "evergreend.h"
31 #include "evergreen_reg_safe.h"
32 #include "cayman_reg_safe.h"
34 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser
*p
,
35 struct radeon_cs_reloc
**cs_reloc
);
37 struct evergreen_cs_track
{
43 u32 cb_color_base_last
[12];
44 struct radeon_bo
*cb_color_bo
[12];
45 u32 cb_color_bo_offset
[12];
46 struct radeon_bo
*cb_color_fmask_bo
[8];
47 struct radeon_bo
*cb_color_cmask_bo
[8];
48 u32 cb_color_info
[12];
49 u32 cb_color_view
[12];
50 u32 cb_color_pitch_idx
[12];
51 u32 cb_color_slice_idx
[12];
52 u32 cb_color_dim_idx
[12];
54 u32 cb_color_pitch
[12];
55 u32 cb_color_slice
[12];
56 u32 cb_color_cmask_slice
[8];
57 u32 cb_color_fmask_slice
[8];
60 u32 vgt_strmout_config
;
61 u32 vgt_strmout_buffer_config
;
65 u32 db_depth_size_idx
;
69 u32 db_z_write_offset
;
70 struct radeon_bo
*db_z_read_bo
;
71 struct radeon_bo
*db_z_write_bo
;
75 u32 db_s_write_offset
;
76 struct radeon_bo
*db_s_read_bo
;
77 struct radeon_bo
*db_s_write_bo
;
80 static void evergreen_cs_track_init(struct evergreen_cs_track
*track
)
84 for (i
= 0; i
< 8; i
++) {
85 track
->cb_color_fmask_bo
[i
] = NULL
;
86 track
->cb_color_cmask_bo
[i
] = NULL
;
87 track
->cb_color_cmask_slice
[i
] = 0;
88 track
->cb_color_fmask_slice
[i
] = 0;
91 for (i
= 0; i
< 12; i
++) {
92 track
->cb_color_base_last
[i
] = 0;
93 track
->cb_color_bo
[i
] = NULL
;
94 track
->cb_color_bo_offset
[i
] = 0xFFFFFFFF;
95 track
->cb_color_info
[i
] = 0;
96 track
->cb_color_view
[i
] = 0;
97 track
->cb_color_pitch_idx
[i
] = 0;
98 track
->cb_color_slice_idx
[i
] = 0;
99 track
->cb_color_dim
[i
] = 0;
100 track
->cb_color_pitch
[i
] = 0;
101 track
->cb_color_slice
[i
] = 0;
102 track
->cb_color_dim
[i
] = 0;
104 track
->cb_target_mask
= 0xFFFFFFFF;
105 track
->cb_shader_mask
= 0xFFFFFFFF;
107 track
->db_depth_view
= 0xFFFFC000;
108 track
->db_depth_size
= 0xFFFFFFFF;
109 track
->db_depth_size_idx
= 0;
110 track
->db_depth_control
= 0xFFFFFFFF;
111 track
->db_z_info
= 0xFFFFFFFF;
112 track
->db_z_idx
= 0xFFFFFFFF;
113 track
->db_z_read_offset
= 0xFFFFFFFF;
114 track
->db_z_write_offset
= 0xFFFFFFFF;
115 track
->db_z_read_bo
= NULL
;
116 track
->db_z_write_bo
= NULL
;
117 track
->db_s_info
= 0xFFFFFFFF;
118 track
->db_s_idx
= 0xFFFFFFFF;
119 track
->db_s_read_offset
= 0xFFFFFFFF;
120 track
->db_s_write_offset
= 0xFFFFFFFF;
121 track
->db_s_read_bo
= NULL
;
122 track
->db_s_write_bo
= NULL
;
125 static inline int evergreen_cs_track_validate_cb(struct radeon_cs_parser
*p
, int i
)
131 static int evergreen_cs_track_check(struct radeon_cs_parser
*p
)
133 struct evergreen_cs_track
*track
= p
->track
;
135 /* we don't support stream out buffer yet */
136 if (track
->vgt_strmout_config
|| track
->vgt_strmout_buffer_config
) {
137 dev_warn(p
->dev
, "this kernel doesn't support SMX output buffer\n");
146 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
147 * @parser: parser structure holding parsing context.
148 * @pkt: where to store packet informations
150 * Assume that chunk_ib_index is properly set. Will return -EINVAL
151 * if packet is bigger than remaining ib size. or if packets is unknown.
153 int evergreen_cs_packet_parse(struct radeon_cs_parser
*p
,
154 struct radeon_cs_packet
*pkt
,
157 struct radeon_cs_chunk
*ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
160 if (idx
>= ib_chunk
->length_dw
) {
161 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
162 idx
, ib_chunk
->length_dw
);
165 header
= radeon_get_ib_value(p
, idx
);
167 pkt
->type
= CP_PACKET_GET_TYPE(header
);
168 pkt
->count
= CP_PACKET_GET_COUNT(header
);
172 pkt
->reg
= CP_PACKET0_GET_REG(header
);
175 pkt
->opcode
= CP_PACKET3_GET_OPCODE(header
);
181 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
184 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
185 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
186 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
193 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
194 * @parser: parser structure holding parsing context.
195 * @data: pointer to relocation data
196 * @offset_start: starting offset
197 * @offset_mask: offset mask (to align start offset on)
198 * @reloc: reloc informations
200 * Check next packet is relocation packet3, do bo validation and compute
201 * GPU offset using the provided start.
203 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser
*p
,
204 struct radeon_cs_reloc
**cs_reloc
)
206 struct radeon_cs_chunk
*relocs_chunk
;
207 struct radeon_cs_packet p3reloc
;
211 if (p
->chunk_relocs_idx
== -1) {
212 DRM_ERROR("No relocation chunk !\n");
216 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
217 r
= evergreen_cs_packet_parse(p
, &p3reloc
, p
->idx
);
221 p
->idx
+= p3reloc
.count
+ 2;
222 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
223 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
227 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
228 if (idx
>= relocs_chunk
->length_dw
) {
229 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
230 idx
, relocs_chunk
->length_dw
);
233 /* FIXME: we assume reloc size is 4 dwords */
234 *cs_reloc
= p
->relocs_ptr
[(idx
/ 4)];
239 * evergreen_cs_packet_next_is_pkt3_nop() - test if next packet is packet3 nop for reloc
240 * @parser: parser structure holding parsing context.
242 * Check next packet is relocation packet3, do bo validation and compute
243 * GPU offset using the provided start.
245 static inline int evergreen_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser
*p
)
247 struct radeon_cs_packet p3reloc
;
250 r
= evergreen_cs_packet_parse(p
, &p3reloc
, p
->idx
);
254 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
261 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
262 * @parser: parser structure holding parsing context.
264 * Userspace sends a special sequence for VLINE waits.
265 * PACKET0 - VLINE_START_END + value
266 * PACKET3 - WAIT_REG_MEM poll vline status reg
267 * RELOC (P3) - crtc_id in reloc.
269 * This function parses this and relocates the VLINE START END
270 * and WAIT_REG_MEM packets to the correct crtc.
271 * It also detects a switched off crtc and nulls out the
274 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser
*p
)
276 struct drm_mode_object
*obj
;
277 struct drm_crtc
*crtc
;
278 struct radeon_crtc
*radeon_crtc
;
279 struct radeon_cs_packet p3reloc
, wait_reg_mem
;
282 uint32_t header
, h_idx
, reg
, wait_reg_mem_info
;
283 volatile uint32_t *ib
;
287 /* parse the WAIT_REG_MEM */
288 r
= evergreen_cs_packet_parse(p
, &wait_reg_mem
, p
->idx
);
292 /* check its a WAIT_REG_MEM */
293 if (wait_reg_mem
.type
!= PACKET_TYPE3
||
294 wait_reg_mem
.opcode
!= PACKET3_WAIT_REG_MEM
) {
295 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
299 wait_reg_mem_info
= radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 1);
300 /* bit 4 is reg (0) or mem (1) */
301 if (wait_reg_mem_info
& 0x10) {
302 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
305 /* waiting for value to be equal */
306 if ((wait_reg_mem_info
& 0x7) != 0x3) {
307 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
310 if ((radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 2) << 2) != EVERGREEN_VLINE_STATUS
) {
311 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
315 if (radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 5) != EVERGREEN_VLINE_STAT
) {
316 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
320 /* jump over the NOP */
321 r
= evergreen_cs_packet_parse(p
, &p3reloc
, p
->idx
+ wait_reg_mem
.count
+ 2);
326 p
->idx
+= wait_reg_mem
.count
+ 2;
327 p
->idx
+= p3reloc
.count
+ 2;
329 header
= radeon_get_ib_value(p
, h_idx
);
330 crtc_id
= radeon_get_ib_value(p
, h_idx
+ 2 + 7 + 1);
331 reg
= CP_PACKET0_GET_REG(header
);
332 obj
= drm_mode_object_find(p
->rdev
->ddev
, crtc_id
, DRM_MODE_OBJECT_CRTC
);
334 DRM_ERROR("cannot find crtc %d\n", crtc_id
);
337 crtc
= obj_to_crtc(obj
);
338 radeon_crtc
= to_radeon_crtc(crtc
);
339 crtc_id
= radeon_crtc
->crtc_id
;
341 if (!crtc
->enabled
) {
342 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
343 ib
[h_idx
+ 2] = PACKET2(0);
344 ib
[h_idx
+ 3] = PACKET2(0);
345 ib
[h_idx
+ 4] = PACKET2(0);
346 ib
[h_idx
+ 5] = PACKET2(0);
347 ib
[h_idx
+ 6] = PACKET2(0);
348 ib
[h_idx
+ 7] = PACKET2(0);
349 ib
[h_idx
+ 8] = PACKET2(0);
352 case EVERGREEN_VLINE_START_END
:
353 header
&= ~R600_CP_PACKET0_REG_MASK
;
354 header
|= (EVERGREEN_VLINE_START_END
+ radeon_crtc
->crtc_offset
) >> 2;
356 ib
[h_idx
+ 4] = (EVERGREEN_VLINE_STATUS
+ radeon_crtc
->crtc_offset
) >> 2;
359 DRM_ERROR("unknown crtc reloc\n");
366 static int evergreen_packet0_check(struct radeon_cs_parser
*p
,
367 struct radeon_cs_packet
*pkt
,
368 unsigned idx
, unsigned reg
)
373 case EVERGREEN_VLINE_START_END
:
374 r
= evergreen_cs_packet_parse_vline(p
);
376 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
382 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
389 static int evergreen_cs_parse_packet0(struct radeon_cs_parser
*p
,
390 struct radeon_cs_packet
*pkt
)
398 for (i
= 0; i
<= pkt
->count
; i
++, idx
++, reg
+= 4) {
399 r
= evergreen_packet0_check(p
, pkt
, idx
, reg
);
408 * evergreen_cs_check_reg() - check if register is authorized or not
409 * @parser: parser structure holding parsing context
410 * @reg: register we are testing
411 * @idx: index into the cs buffer
413 * This function will test against evergreen_reg_safe_bm and return 0
414 * if register is safe. If register is not flag as safe this function
415 * will test it against a list of register needind special handling.
417 static inline int evergreen_cs_check_reg(struct radeon_cs_parser
*p
, u32 reg
, u32 idx
)
419 struct evergreen_cs_track
*track
= (struct evergreen_cs_track
*)p
->track
;
420 struct radeon_cs_reloc
*reloc
;
425 if (p
->rdev
->family
>= CHIP_CAYMAN
)
426 last_reg
= ARRAY_SIZE(cayman_reg_safe_bm
);
428 last_reg
= ARRAY_SIZE(evergreen_reg_safe_bm
);
432 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
435 m
= 1 << ((reg
>> 2) & 31);
436 if (p
->rdev
->family
>= CHIP_CAYMAN
) {
437 if (!(cayman_reg_safe_bm
[i
] & m
))
440 if (!(evergreen_reg_safe_bm
[i
] & m
))
445 /* force following reg to 0 in an attemp to disable out buffer
446 * which will need us to better understand how it works to perform
447 * security check on it (Jerome)
449 case SQ_ESGS_RING_SIZE
:
450 case SQ_GSVS_RING_SIZE
:
451 case SQ_ESTMP_RING_SIZE
:
452 case SQ_GSTMP_RING_SIZE
:
453 case SQ_HSTMP_RING_SIZE
:
454 case SQ_LSTMP_RING_SIZE
:
455 case SQ_PSTMP_RING_SIZE
:
456 case SQ_VSTMP_RING_SIZE
:
457 case SQ_ESGS_RING_ITEMSIZE
:
458 case SQ_ESTMP_RING_ITEMSIZE
:
459 case SQ_GSTMP_RING_ITEMSIZE
:
460 case SQ_GSVS_RING_ITEMSIZE
:
461 case SQ_GS_VERT_ITEMSIZE
:
462 case SQ_GS_VERT_ITEMSIZE_1
:
463 case SQ_GS_VERT_ITEMSIZE_2
:
464 case SQ_GS_VERT_ITEMSIZE_3
:
465 case SQ_GSVS_RING_OFFSET_1
:
466 case SQ_GSVS_RING_OFFSET_2
:
467 case SQ_GSVS_RING_OFFSET_3
:
468 case SQ_HSTMP_RING_ITEMSIZE
:
469 case SQ_LSTMP_RING_ITEMSIZE
:
470 case SQ_PSTMP_RING_ITEMSIZE
:
471 case SQ_VSTMP_RING_ITEMSIZE
:
472 case VGT_TF_RING_SIZE
:
473 /* get value to populate the IB don't remove */
474 /*tmp =radeon_get_ib_value(p, idx);
477 case SQ_ESGS_RING_BASE
:
478 case SQ_GSVS_RING_BASE
:
479 case SQ_ESTMP_RING_BASE
:
480 case SQ_GSTMP_RING_BASE
:
481 case SQ_HSTMP_RING_BASE
:
482 case SQ_LSTMP_RING_BASE
:
483 case SQ_PSTMP_RING_BASE
:
484 case SQ_VSTMP_RING_BASE
:
485 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
487 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
491 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
493 case DB_DEPTH_CONTROL
:
494 track
->db_depth_control
= radeon_get_ib_value(p
, idx
);
497 if (p
->rdev
->family
< CHIP_CAYMAN
) {
498 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
503 case CAYMAN_DB_DEPTH_INFO
:
504 if (p
->rdev
->family
< CHIP_CAYMAN
) {
505 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
511 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
513 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
517 track
->db_z_info
= radeon_get_ib_value(p
, idx
);
518 ib
[idx
] &= ~Z_ARRAY_MODE(0xf);
519 track
->db_z_info
&= ~Z_ARRAY_MODE(0xf);
520 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
521 ib
[idx
] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
522 track
->db_z_info
|= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
524 ib
[idx
] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
525 track
->db_z_info
|= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
528 case DB_STENCIL_INFO
:
529 track
->db_s_info
= radeon_get_ib_value(p
, idx
);
532 track
->db_depth_view
= radeon_get_ib_value(p
, idx
);
535 track
->db_depth_size
= radeon_get_ib_value(p
, idx
);
536 track
->db_depth_size_idx
= idx
;
539 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
541 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
545 track
->db_z_read_offset
= radeon_get_ib_value(p
, idx
);
546 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
547 track
->db_z_read_bo
= reloc
->robj
;
549 case DB_Z_WRITE_BASE
:
550 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
552 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
556 track
->db_z_write_offset
= radeon_get_ib_value(p
, idx
);
557 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
558 track
->db_z_write_bo
= reloc
->robj
;
560 case DB_STENCIL_READ_BASE
:
561 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
563 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
567 track
->db_s_read_offset
= radeon_get_ib_value(p
, idx
);
568 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
569 track
->db_s_read_bo
= reloc
->robj
;
571 case DB_STENCIL_WRITE_BASE
:
572 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
574 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
578 track
->db_s_write_offset
= radeon_get_ib_value(p
, idx
);
579 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
580 track
->db_s_write_bo
= reloc
->robj
;
582 case VGT_STRMOUT_CONFIG
:
583 track
->vgt_strmout_config
= radeon_get_ib_value(p
, idx
);
585 case VGT_STRMOUT_BUFFER_CONFIG
:
586 track
->vgt_strmout_buffer_config
= radeon_get_ib_value(p
, idx
);
589 track
->cb_target_mask
= radeon_get_ib_value(p
, idx
);
592 track
->cb_shader_mask
= radeon_get_ib_value(p
, idx
);
594 case PA_SC_AA_CONFIG
:
595 if (p
->rdev
->family
>= CHIP_CAYMAN
) {
596 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
600 tmp
= radeon_get_ib_value(p
, idx
) & MSAA_NUM_SAMPLES_MASK
;
601 track
->nsamples
= 1 << tmp
;
603 case CAYMAN_PA_SC_AA_CONFIG
:
604 if (p
->rdev
->family
< CHIP_CAYMAN
) {
605 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
609 tmp
= radeon_get_ib_value(p
, idx
) & CAYMAN_MSAA_NUM_SAMPLES_MASK
;
610 track
->nsamples
= 1 << tmp
;
620 tmp
= (reg
- CB_COLOR0_VIEW
) / 0x3c;
621 track
->cb_color_view
[tmp
] = radeon_get_ib_value(p
, idx
);
625 case CB_COLOR10_VIEW
:
626 case CB_COLOR11_VIEW
:
627 tmp
= ((reg
- CB_COLOR8_VIEW
) / 0x1c) + 8;
628 track
->cb_color_view
[tmp
] = radeon_get_ib_value(p
, idx
);
638 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
640 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
644 tmp
= (reg
- CB_COLOR0_INFO
) / 0x3c;
645 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
646 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
647 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
648 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
649 } else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
650 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
651 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
656 case CB_COLOR10_INFO
:
657 case CB_COLOR11_INFO
:
658 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
660 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
664 tmp
= ((reg
- CB_COLOR8_INFO
) / 0x1c) + 8;
665 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
666 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
667 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
668 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
669 } else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
670 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
671 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
674 case CB_COLOR0_PITCH
:
675 case CB_COLOR1_PITCH
:
676 case CB_COLOR2_PITCH
:
677 case CB_COLOR3_PITCH
:
678 case CB_COLOR4_PITCH
:
679 case CB_COLOR5_PITCH
:
680 case CB_COLOR6_PITCH
:
681 case CB_COLOR7_PITCH
:
682 tmp
= (reg
- CB_COLOR0_PITCH
) / 0x3c;
683 track
->cb_color_pitch
[tmp
] = radeon_get_ib_value(p
, idx
);
684 track
->cb_color_pitch_idx
[tmp
] = idx
;
686 case CB_COLOR8_PITCH
:
687 case CB_COLOR9_PITCH
:
688 case CB_COLOR10_PITCH
:
689 case CB_COLOR11_PITCH
:
690 tmp
= ((reg
- CB_COLOR8_PITCH
) / 0x1c) + 8;
691 track
->cb_color_pitch
[tmp
] = radeon_get_ib_value(p
, idx
);
692 track
->cb_color_pitch_idx
[tmp
] = idx
;
694 case CB_COLOR0_SLICE
:
695 case CB_COLOR1_SLICE
:
696 case CB_COLOR2_SLICE
:
697 case CB_COLOR3_SLICE
:
698 case CB_COLOR4_SLICE
:
699 case CB_COLOR5_SLICE
:
700 case CB_COLOR6_SLICE
:
701 case CB_COLOR7_SLICE
:
702 tmp
= (reg
- CB_COLOR0_SLICE
) / 0x3c;
703 track
->cb_color_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
704 track
->cb_color_slice_idx
[tmp
] = idx
;
706 case CB_COLOR8_SLICE
:
707 case CB_COLOR9_SLICE
:
708 case CB_COLOR10_SLICE
:
709 case CB_COLOR11_SLICE
:
710 tmp
= ((reg
- CB_COLOR8_SLICE
) / 0x1c) + 8;
711 track
->cb_color_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
712 track
->cb_color_slice_idx
[tmp
] = idx
;
714 case CB_COLOR0_ATTRIB
:
715 case CB_COLOR1_ATTRIB
:
716 case CB_COLOR2_ATTRIB
:
717 case CB_COLOR3_ATTRIB
:
718 case CB_COLOR4_ATTRIB
:
719 case CB_COLOR5_ATTRIB
:
720 case CB_COLOR6_ATTRIB
:
721 case CB_COLOR7_ATTRIB
:
722 case CB_COLOR8_ATTRIB
:
723 case CB_COLOR9_ATTRIB
:
724 case CB_COLOR10_ATTRIB
:
725 case CB_COLOR11_ATTRIB
:
735 tmp
= (reg
- CB_COLOR0_DIM
) / 0x3c;
736 track
->cb_color_dim
[tmp
] = radeon_get_ib_value(p
, idx
);
737 track
->cb_color_dim_idx
[tmp
] = idx
;
743 tmp
= ((reg
- CB_COLOR8_DIM
) / 0x1c) + 8;
744 track
->cb_color_dim
[tmp
] = radeon_get_ib_value(p
, idx
);
745 track
->cb_color_dim_idx
[tmp
] = idx
;
747 case CB_COLOR0_FMASK
:
748 case CB_COLOR1_FMASK
:
749 case CB_COLOR2_FMASK
:
750 case CB_COLOR3_FMASK
:
751 case CB_COLOR4_FMASK
:
752 case CB_COLOR5_FMASK
:
753 case CB_COLOR6_FMASK
:
754 case CB_COLOR7_FMASK
:
755 tmp
= (reg
- CB_COLOR0_FMASK
) / 0x3c;
756 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
758 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
761 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
762 track
->cb_color_fmask_bo
[tmp
] = reloc
->robj
;
764 case CB_COLOR0_CMASK
:
765 case CB_COLOR1_CMASK
:
766 case CB_COLOR2_CMASK
:
767 case CB_COLOR3_CMASK
:
768 case CB_COLOR4_CMASK
:
769 case CB_COLOR5_CMASK
:
770 case CB_COLOR6_CMASK
:
771 case CB_COLOR7_CMASK
:
772 tmp
= (reg
- CB_COLOR0_CMASK
) / 0x3c;
773 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
775 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
778 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
779 track
->cb_color_cmask_bo
[tmp
] = reloc
->robj
;
781 case CB_COLOR0_FMASK_SLICE
:
782 case CB_COLOR1_FMASK_SLICE
:
783 case CB_COLOR2_FMASK_SLICE
:
784 case CB_COLOR3_FMASK_SLICE
:
785 case CB_COLOR4_FMASK_SLICE
:
786 case CB_COLOR5_FMASK_SLICE
:
787 case CB_COLOR6_FMASK_SLICE
:
788 case CB_COLOR7_FMASK_SLICE
:
789 tmp
= (reg
- CB_COLOR0_FMASK_SLICE
) / 0x3c;
790 track
->cb_color_fmask_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
792 case CB_COLOR0_CMASK_SLICE
:
793 case CB_COLOR1_CMASK_SLICE
:
794 case CB_COLOR2_CMASK_SLICE
:
795 case CB_COLOR3_CMASK_SLICE
:
796 case CB_COLOR4_CMASK_SLICE
:
797 case CB_COLOR5_CMASK_SLICE
:
798 case CB_COLOR6_CMASK_SLICE
:
799 case CB_COLOR7_CMASK_SLICE
:
800 tmp
= (reg
- CB_COLOR0_CMASK_SLICE
) / 0x3c;
801 track
->cb_color_cmask_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
811 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
813 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
817 tmp
= (reg
- CB_COLOR0_BASE
) / 0x3c;
818 track
->cb_color_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
);
819 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
820 track
->cb_color_base_last
[tmp
] = ib
[idx
];
821 track
->cb_color_bo
[tmp
] = reloc
->robj
;
825 case CB_COLOR10_BASE
:
826 case CB_COLOR11_BASE
:
827 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
829 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
833 tmp
= ((reg
- CB_COLOR8_BASE
) / 0x1c) + 8;
834 track
->cb_color_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
);
835 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
836 track
->cb_color_base_last
[tmp
] = ib
[idx
];
837 track
->cb_color_bo
[tmp
] = reloc
->robj
;
849 case CB_IMMED10_BASE
:
850 case CB_IMMED11_BASE
:
851 case DB_HTILE_DATA_BASE
:
852 case SQ_PGM_START_FS
:
853 case SQ_PGM_START_ES
:
854 case SQ_PGM_START_VS
:
855 case SQ_PGM_START_GS
:
856 case SQ_PGM_START_PS
:
857 case SQ_PGM_START_HS
:
858 case SQ_PGM_START_LS
:
860 case SQ_CONST_MEM_BASE
:
861 case SQ_ALU_CONST_CACHE_GS_0
:
862 case SQ_ALU_CONST_CACHE_GS_1
:
863 case SQ_ALU_CONST_CACHE_GS_2
:
864 case SQ_ALU_CONST_CACHE_GS_3
:
865 case SQ_ALU_CONST_CACHE_GS_4
:
866 case SQ_ALU_CONST_CACHE_GS_5
:
867 case SQ_ALU_CONST_CACHE_GS_6
:
868 case SQ_ALU_CONST_CACHE_GS_7
:
869 case SQ_ALU_CONST_CACHE_GS_8
:
870 case SQ_ALU_CONST_CACHE_GS_9
:
871 case SQ_ALU_CONST_CACHE_GS_10
:
872 case SQ_ALU_CONST_CACHE_GS_11
:
873 case SQ_ALU_CONST_CACHE_GS_12
:
874 case SQ_ALU_CONST_CACHE_GS_13
:
875 case SQ_ALU_CONST_CACHE_GS_14
:
876 case SQ_ALU_CONST_CACHE_GS_15
:
877 case SQ_ALU_CONST_CACHE_PS_0
:
878 case SQ_ALU_CONST_CACHE_PS_1
:
879 case SQ_ALU_CONST_CACHE_PS_2
:
880 case SQ_ALU_CONST_CACHE_PS_3
:
881 case SQ_ALU_CONST_CACHE_PS_4
:
882 case SQ_ALU_CONST_CACHE_PS_5
:
883 case SQ_ALU_CONST_CACHE_PS_6
:
884 case SQ_ALU_CONST_CACHE_PS_7
:
885 case SQ_ALU_CONST_CACHE_PS_8
:
886 case SQ_ALU_CONST_CACHE_PS_9
:
887 case SQ_ALU_CONST_CACHE_PS_10
:
888 case SQ_ALU_CONST_CACHE_PS_11
:
889 case SQ_ALU_CONST_CACHE_PS_12
:
890 case SQ_ALU_CONST_CACHE_PS_13
:
891 case SQ_ALU_CONST_CACHE_PS_14
:
892 case SQ_ALU_CONST_CACHE_PS_15
:
893 case SQ_ALU_CONST_CACHE_VS_0
:
894 case SQ_ALU_CONST_CACHE_VS_1
:
895 case SQ_ALU_CONST_CACHE_VS_2
:
896 case SQ_ALU_CONST_CACHE_VS_3
:
897 case SQ_ALU_CONST_CACHE_VS_4
:
898 case SQ_ALU_CONST_CACHE_VS_5
:
899 case SQ_ALU_CONST_CACHE_VS_6
:
900 case SQ_ALU_CONST_CACHE_VS_7
:
901 case SQ_ALU_CONST_CACHE_VS_8
:
902 case SQ_ALU_CONST_CACHE_VS_9
:
903 case SQ_ALU_CONST_CACHE_VS_10
:
904 case SQ_ALU_CONST_CACHE_VS_11
:
905 case SQ_ALU_CONST_CACHE_VS_12
:
906 case SQ_ALU_CONST_CACHE_VS_13
:
907 case SQ_ALU_CONST_CACHE_VS_14
:
908 case SQ_ALU_CONST_CACHE_VS_15
:
909 case SQ_ALU_CONST_CACHE_HS_0
:
910 case SQ_ALU_CONST_CACHE_HS_1
:
911 case SQ_ALU_CONST_CACHE_HS_2
:
912 case SQ_ALU_CONST_CACHE_HS_3
:
913 case SQ_ALU_CONST_CACHE_HS_4
:
914 case SQ_ALU_CONST_CACHE_HS_5
:
915 case SQ_ALU_CONST_CACHE_HS_6
:
916 case SQ_ALU_CONST_CACHE_HS_7
:
917 case SQ_ALU_CONST_CACHE_HS_8
:
918 case SQ_ALU_CONST_CACHE_HS_9
:
919 case SQ_ALU_CONST_CACHE_HS_10
:
920 case SQ_ALU_CONST_CACHE_HS_11
:
921 case SQ_ALU_CONST_CACHE_HS_12
:
922 case SQ_ALU_CONST_CACHE_HS_13
:
923 case SQ_ALU_CONST_CACHE_HS_14
:
924 case SQ_ALU_CONST_CACHE_HS_15
:
925 case SQ_ALU_CONST_CACHE_LS_0
:
926 case SQ_ALU_CONST_CACHE_LS_1
:
927 case SQ_ALU_CONST_CACHE_LS_2
:
928 case SQ_ALU_CONST_CACHE_LS_3
:
929 case SQ_ALU_CONST_CACHE_LS_4
:
930 case SQ_ALU_CONST_CACHE_LS_5
:
931 case SQ_ALU_CONST_CACHE_LS_6
:
932 case SQ_ALU_CONST_CACHE_LS_7
:
933 case SQ_ALU_CONST_CACHE_LS_8
:
934 case SQ_ALU_CONST_CACHE_LS_9
:
935 case SQ_ALU_CONST_CACHE_LS_10
:
936 case SQ_ALU_CONST_CACHE_LS_11
:
937 case SQ_ALU_CONST_CACHE_LS_12
:
938 case SQ_ALU_CONST_CACHE_LS_13
:
939 case SQ_ALU_CONST_CACHE_LS_14
:
940 case SQ_ALU_CONST_CACHE_LS_15
:
941 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
943 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
947 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
950 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
957 * evergreen_check_texture_resource() - check if register is authorized or not
958 * @p: parser structure holding parsing context
959 * @idx: index into the cs buffer
960 * @texture: texture's bo structure
961 * @mipmap: mipmap's bo structure
963 * This function will check that the resource has valid field and that
964 * the texture and mipmap bo object are big enough to cover this resource.
966 static inline int evergreen_check_texture_resource(struct radeon_cs_parser
*p
, u32 idx
,
967 struct radeon_bo
*texture
,
968 struct radeon_bo
*mipmap
)
974 static int evergreen_packet3_check(struct radeon_cs_parser
*p
,
975 struct radeon_cs_packet
*pkt
)
977 struct radeon_cs_reloc
*reloc
;
978 struct evergreen_cs_track
*track
;
982 unsigned start_reg
, end_reg
, reg
;
986 track
= (struct evergreen_cs_track
*)p
->track
;
989 idx_value
= radeon_get_ib_value(p
, idx
);
991 switch (pkt
->opcode
) {
992 case PACKET3_SET_PREDICATION
:
996 if (pkt
->count
!= 1) {
997 DRM_ERROR("bad SET PREDICATION\n");
1001 tmp
= radeon_get_ib_value(p
, idx
+ 1);
1002 pred_op
= (tmp
>> 16) & 0x7;
1004 /* for the clear predicate operation */
1009 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op
);
1013 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1015 DRM_ERROR("bad SET PREDICATION\n");
1019 ib
[idx
+ 0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1020 ib
[idx
+ 1] = tmp
+ (upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff);
1023 case PACKET3_CONTEXT_CONTROL
:
1024 if (pkt
->count
!= 1) {
1025 DRM_ERROR("bad CONTEXT_CONTROL\n");
1029 case PACKET3_INDEX_TYPE
:
1030 case PACKET3_NUM_INSTANCES
:
1031 case PACKET3_CLEAR_STATE
:
1033 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1037 case CAYMAN_PACKET3_DEALLOC_STATE
:
1038 if (p
->rdev
->family
< CHIP_CAYMAN
) {
1039 DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
1043 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1047 case PACKET3_INDEX_BASE
:
1048 if (pkt
->count
!= 1) {
1049 DRM_ERROR("bad INDEX_BASE\n");
1052 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1054 DRM_ERROR("bad INDEX_BASE\n");
1057 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1058 ib
[idx
+1] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1059 r
= evergreen_cs_track_check(p
);
1061 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1065 case PACKET3_DRAW_INDEX
:
1066 if (pkt
->count
!= 3) {
1067 DRM_ERROR("bad DRAW_INDEX\n");
1070 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1072 DRM_ERROR("bad DRAW_INDEX\n");
1075 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1076 ib
[idx
+1] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1077 r
= evergreen_cs_track_check(p
);
1079 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1083 case PACKET3_DRAW_INDEX_2
:
1084 if (pkt
->count
!= 4) {
1085 DRM_ERROR("bad DRAW_INDEX_2\n");
1088 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1090 DRM_ERROR("bad DRAW_INDEX_2\n");
1093 ib
[idx
+1] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1094 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1095 r
= evergreen_cs_track_check(p
);
1097 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1101 case PACKET3_DRAW_INDEX_AUTO
:
1102 if (pkt
->count
!= 1) {
1103 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1106 r
= evergreen_cs_track_check(p
);
1108 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1112 case PACKET3_DRAW_INDEX_MULTI_AUTO
:
1113 if (pkt
->count
!= 2) {
1114 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1117 r
= evergreen_cs_track_check(p
);
1119 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1123 case PACKET3_DRAW_INDEX_IMMD
:
1124 if (pkt
->count
< 2) {
1125 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1128 r
= evergreen_cs_track_check(p
);
1130 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1134 case PACKET3_DRAW_INDEX_OFFSET
:
1135 if (pkt
->count
!= 2) {
1136 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1139 r
= evergreen_cs_track_check(p
);
1141 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1145 case PACKET3_DRAW_INDEX_OFFSET_2
:
1146 if (pkt
->count
!= 3) {
1147 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
1150 r
= evergreen_cs_track_check(p
);
1152 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1156 case PACKET3_WAIT_REG_MEM
:
1157 if (pkt
->count
!= 5) {
1158 DRM_ERROR("bad WAIT_REG_MEM\n");
1161 /* bit 4 is reg (0) or mem (1) */
1162 if (idx_value
& 0x10) {
1163 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1165 DRM_ERROR("bad WAIT_REG_MEM\n");
1168 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1169 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1172 case PACKET3_SURFACE_SYNC
:
1173 if (pkt
->count
!= 3) {
1174 DRM_ERROR("bad SURFACE_SYNC\n");
1177 /* 0xffffffff/0x0 is flush all cache flag */
1178 if (radeon_get_ib_value(p
, idx
+ 1) != 0xffffffff ||
1179 radeon_get_ib_value(p
, idx
+ 2) != 0) {
1180 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1182 DRM_ERROR("bad SURFACE_SYNC\n");
1185 ib
[idx
+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1188 case PACKET3_EVENT_WRITE
:
1189 if (pkt
->count
!= 2 && pkt
->count
!= 0) {
1190 DRM_ERROR("bad EVENT_WRITE\n");
1194 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1196 DRM_ERROR("bad EVENT_WRITE\n");
1199 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1200 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1203 case PACKET3_EVENT_WRITE_EOP
:
1204 if (pkt
->count
!= 4) {
1205 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1208 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1210 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1213 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1214 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1216 case PACKET3_EVENT_WRITE_EOS
:
1217 if (pkt
->count
!= 3) {
1218 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1221 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1223 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1226 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1227 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1229 case PACKET3_SET_CONFIG_REG
:
1230 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONFIG_REG_START
;
1231 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1232 if ((start_reg
< PACKET3_SET_CONFIG_REG_START
) ||
1233 (start_reg
>= PACKET3_SET_CONFIG_REG_END
) ||
1234 (end_reg
>= PACKET3_SET_CONFIG_REG_END
)) {
1235 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1238 for (i
= 0; i
< pkt
->count
; i
++) {
1239 reg
= start_reg
+ (4 * i
);
1240 r
= evergreen_cs_check_reg(p
, reg
, idx
+1+i
);
1245 case PACKET3_SET_CONTEXT_REG
:
1246 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONTEXT_REG_START
;
1247 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1248 if ((start_reg
< PACKET3_SET_CONTEXT_REG_START
) ||
1249 (start_reg
>= PACKET3_SET_CONTEXT_REG_END
) ||
1250 (end_reg
>= PACKET3_SET_CONTEXT_REG_END
)) {
1251 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1254 for (i
= 0; i
< pkt
->count
; i
++) {
1255 reg
= start_reg
+ (4 * i
);
1256 r
= evergreen_cs_check_reg(p
, reg
, idx
+1+i
);
1261 case PACKET3_SET_RESOURCE
:
1262 if (pkt
->count
% 8) {
1263 DRM_ERROR("bad SET_RESOURCE\n");
1266 start_reg
= (idx_value
<< 2) + PACKET3_SET_RESOURCE_START
;
1267 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1268 if ((start_reg
< PACKET3_SET_RESOURCE_START
) ||
1269 (start_reg
>= PACKET3_SET_RESOURCE_END
) ||
1270 (end_reg
>= PACKET3_SET_RESOURCE_END
)) {
1271 DRM_ERROR("bad SET_RESOURCE\n");
1274 for (i
= 0; i
< (pkt
->count
/ 8); i
++) {
1275 struct radeon_bo
*texture
, *mipmap
;
1278 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p
, idx
+1+(i
*8)+7))) {
1279 case SQ_TEX_VTX_VALID_TEXTURE
:
1281 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1283 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1286 ib
[idx
+1+(i
*8)+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1287 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
1288 ib
[idx
+1+(i
*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
1289 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
1290 ib
[idx
+1+(i
*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
1291 texture
= reloc
->robj
;
1293 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1295 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1298 ib
[idx
+1+(i
*8)+3] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1299 mipmap
= reloc
->robj
;
1300 r
= evergreen_check_texture_resource(p
, idx
+1+(i
*8),
1305 case SQ_TEX_VTX_VALID_BUFFER
:
1307 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1309 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
1312 offset
= radeon_get_ib_value(p
, idx
+1+(i
*8)+0);
1313 size
= radeon_get_ib_value(p
, idx
+1+(i
*8)+1);
1314 if (p
->rdev
&& (size
+ offset
) > radeon_bo_size(reloc
->robj
)) {
1315 /* force size to size of the buffer */
1316 dev_warn(p
->dev
, "vbo resource seems too big for the bo\n");
1317 ib
[idx
+1+(i
*8)+1] = radeon_bo_size(reloc
->robj
);
1319 ib
[idx
+1+(i
*8)+0] += (u32
)((reloc
->lobj
.gpu_offset
) & 0xffffffff);
1320 ib
[idx
+1+(i
*8)+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1322 case SQ_TEX_VTX_INVALID_TEXTURE
:
1323 case SQ_TEX_VTX_INVALID_BUFFER
:
1325 DRM_ERROR("bad SET_RESOURCE\n");
1330 case PACKET3_SET_ALU_CONST
:
1331 /* XXX fix me ALU const buffers only */
1333 case PACKET3_SET_BOOL_CONST
:
1334 start_reg
= (idx_value
<< 2) + PACKET3_SET_BOOL_CONST_START
;
1335 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1336 if ((start_reg
< PACKET3_SET_BOOL_CONST_START
) ||
1337 (start_reg
>= PACKET3_SET_BOOL_CONST_END
) ||
1338 (end_reg
>= PACKET3_SET_BOOL_CONST_END
)) {
1339 DRM_ERROR("bad SET_BOOL_CONST\n");
1343 case PACKET3_SET_LOOP_CONST
:
1344 start_reg
= (idx_value
<< 2) + PACKET3_SET_LOOP_CONST_START
;
1345 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1346 if ((start_reg
< PACKET3_SET_LOOP_CONST_START
) ||
1347 (start_reg
>= PACKET3_SET_LOOP_CONST_END
) ||
1348 (end_reg
>= PACKET3_SET_LOOP_CONST_END
)) {
1349 DRM_ERROR("bad SET_LOOP_CONST\n");
1353 case PACKET3_SET_CTL_CONST
:
1354 start_reg
= (idx_value
<< 2) + PACKET3_SET_CTL_CONST_START
;
1355 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1356 if ((start_reg
< PACKET3_SET_CTL_CONST_START
) ||
1357 (start_reg
>= PACKET3_SET_CTL_CONST_END
) ||
1358 (end_reg
>= PACKET3_SET_CTL_CONST_END
)) {
1359 DRM_ERROR("bad SET_CTL_CONST\n");
1363 case PACKET3_SET_SAMPLER
:
1364 if (pkt
->count
% 3) {
1365 DRM_ERROR("bad SET_SAMPLER\n");
1368 start_reg
= (idx_value
<< 2) + PACKET3_SET_SAMPLER_START
;
1369 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1370 if ((start_reg
< PACKET3_SET_SAMPLER_START
) ||
1371 (start_reg
>= PACKET3_SET_SAMPLER_END
) ||
1372 (end_reg
>= PACKET3_SET_SAMPLER_END
)) {
1373 DRM_ERROR("bad SET_SAMPLER\n");
1380 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
1386 int evergreen_cs_parse(struct radeon_cs_parser
*p
)
1388 struct radeon_cs_packet pkt
;
1389 struct evergreen_cs_track
*track
;
1392 if (p
->track
== NULL
) {
1393 /* initialize tracker, we are in kms */
1394 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
1397 evergreen_cs_track_init(track
);
1398 track
->npipes
= p
->rdev
->config
.evergreen
.tiling_npipes
;
1399 track
->nbanks
= p
->rdev
->config
.evergreen
.tiling_nbanks
;
1400 track
->group_size
= p
->rdev
->config
.evergreen
.tiling_group_size
;
1404 r
= evergreen_cs_packet_parse(p
, &pkt
, p
->idx
);
1410 p
->idx
+= pkt
.count
+ 2;
1413 r
= evergreen_cs_parse_packet0(p
, &pkt
);
1418 r
= evergreen_packet3_check(p
, &pkt
);
1421 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
1431 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
1433 for (r
= 0; r
< p
->ib
->length_dw
; r
++) {
1434 printk(KERN_INFO
"%05d 0x%08X\n", r
, p
->ib
->ptr
[r
]);