2 * Copyright 2010 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
30 #include "evergreend.h"
31 #include "evergreen_reg_safe.h"
32 #include "cayman_reg_safe.h"
34 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser
*p
,
35 struct radeon_cs_reloc
**cs_reloc
);
37 struct evergreen_cs_track
{
43 u32 cb_color_base_last
[12];
44 struct radeon_bo
*cb_color_bo
[12];
45 u32 cb_color_bo_offset
[12];
46 struct radeon_bo
*cb_color_fmask_bo
[8];
47 struct radeon_bo
*cb_color_cmask_bo
[8];
48 u32 cb_color_info
[12];
49 u32 cb_color_view
[12];
50 u32 cb_color_pitch_idx
[12];
51 u32 cb_color_slice_idx
[12];
52 u32 cb_color_dim_idx
[12];
54 u32 cb_color_pitch
[12];
55 u32 cb_color_slice
[12];
56 u32 cb_color_cmask_slice
[8];
57 u32 cb_color_fmask_slice
[8];
60 u32 vgt_strmout_config
;
61 u32 vgt_strmout_buffer_config
;
65 u32 db_depth_size_idx
;
69 u32 db_z_write_offset
;
70 struct radeon_bo
*db_z_read_bo
;
71 struct radeon_bo
*db_z_write_bo
;
75 u32 db_s_write_offset
;
76 struct radeon_bo
*db_s_read_bo
;
77 struct radeon_bo
*db_s_write_bo
;
80 static void evergreen_cs_track_init(struct evergreen_cs_track
*track
)
84 for (i
= 0; i
< 8; i
++) {
85 track
->cb_color_fmask_bo
[i
] = NULL
;
86 track
->cb_color_cmask_bo
[i
] = NULL
;
87 track
->cb_color_cmask_slice
[i
] = 0;
88 track
->cb_color_fmask_slice
[i
] = 0;
91 for (i
= 0; i
< 12; i
++) {
92 track
->cb_color_base_last
[i
] = 0;
93 track
->cb_color_bo
[i
] = NULL
;
94 track
->cb_color_bo_offset
[i
] = 0xFFFFFFFF;
95 track
->cb_color_info
[i
] = 0;
96 track
->cb_color_view
[i
] = 0;
97 track
->cb_color_pitch_idx
[i
] = 0;
98 track
->cb_color_slice_idx
[i
] = 0;
99 track
->cb_color_dim
[i
] = 0;
100 track
->cb_color_pitch
[i
] = 0;
101 track
->cb_color_slice
[i
] = 0;
102 track
->cb_color_dim
[i
] = 0;
104 track
->cb_target_mask
= 0xFFFFFFFF;
105 track
->cb_shader_mask
= 0xFFFFFFFF;
107 track
->db_depth_view
= 0xFFFFC000;
108 track
->db_depth_size
= 0xFFFFFFFF;
109 track
->db_depth_size_idx
= 0;
110 track
->db_depth_control
= 0xFFFFFFFF;
111 track
->db_z_info
= 0xFFFFFFFF;
112 track
->db_z_idx
= 0xFFFFFFFF;
113 track
->db_z_read_offset
= 0xFFFFFFFF;
114 track
->db_z_write_offset
= 0xFFFFFFFF;
115 track
->db_z_read_bo
= NULL
;
116 track
->db_z_write_bo
= NULL
;
117 track
->db_s_info
= 0xFFFFFFFF;
118 track
->db_s_idx
= 0xFFFFFFFF;
119 track
->db_s_read_offset
= 0xFFFFFFFF;
120 track
->db_s_write_offset
= 0xFFFFFFFF;
121 track
->db_s_read_bo
= NULL
;
122 track
->db_s_write_bo
= NULL
;
125 static int evergreen_cs_track_check(struct radeon_cs_parser
*p
)
127 struct evergreen_cs_track
*track
= p
->track
;
129 /* we don't support stream out buffer yet */
130 if (track
->vgt_strmout_config
|| track
->vgt_strmout_buffer_config
) {
131 dev_warn(p
->dev
, "this kernel doesn't support SMX output buffer\n");
140 * evergreen_cs_packet_parse() - parse cp packet and point ib index to next packet
141 * @parser: parser structure holding parsing context.
142 * @pkt: where to store packet informations
144 * Assume that chunk_ib_index is properly set. Will return -EINVAL
145 * if packet is bigger than remaining ib size. or if packets is unknown.
147 int evergreen_cs_packet_parse(struct radeon_cs_parser
*p
,
148 struct radeon_cs_packet
*pkt
,
151 struct radeon_cs_chunk
*ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
154 if (idx
>= ib_chunk
->length_dw
) {
155 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
156 idx
, ib_chunk
->length_dw
);
159 header
= radeon_get_ib_value(p
, idx
);
161 pkt
->type
= CP_PACKET_GET_TYPE(header
);
162 pkt
->count
= CP_PACKET_GET_COUNT(header
);
166 pkt
->reg
= CP_PACKET0_GET_REG(header
);
169 pkt
->opcode
= CP_PACKET3_GET_OPCODE(header
);
175 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
178 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
179 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
180 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
187 * evergreen_cs_packet_next_reloc() - parse next packet which should be reloc packet3
188 * @parser: parser structure holding parsing context.
189 * @data: pointer to relocation data
190 * @offset_start: starting offset
191 * @offset_mask: offset mask (to align start offset on)
192 * @reloc: reloc informations
194 * Check next packet is relocation packet3, do bo validation and compute
195 * GPU offset using the provided start.
197 static int evergreen_cs_packet_next_reloc(struct radeon_cs_parser
*p
,
198 struct radeon_cs_reloc
**cs_reloc
)
200 struct radeon_cs_chunk
*relocs_chunk
;
201 struct radeon_cs_packet p3reloc
;
205 if (p
->chunk_relocs_idx
== -1) {
206 DRM_ERROR("No relocation chunk !\n");
210 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
211 r
= evergreen_cs_packet_parse(p
, &p3reloc
, p
->idx
);
215 p
->idx
+= p3reloc
.count
+ 2;
216 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
217 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
221 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
222 if (idx
>= relocs_chunk
->length_dw
) {
223 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
224 idx
, relocs_chunk
->length_dw
);
227 /* FIXME: we assume reloc size is 4 dwords */
228 *cs_reloc
= p
->relocs_ptr
[(idx
/ 4)];
233 * evergreen_cs_packet_next_vline() - parse userspace VLINE packet
234 * @parser: parser structure holding parsing context.
236 * Userspace sends a special sequence for VLINE waits.
237 * PACKET0 - VLINE_START_END + value
238 * PACKET3 - WAIT_REG_MEM poll vline status reg
239 * RELOC (P3) - crtc_id in reloc.
241 * This function parses this and relocates the VLINE START END
242 * and WAIT_REG_MEM packets to the correct crtc.
243 * It also detects a switched off crtc and nulls out the
246 static int evergreen_cs_packet_parse_vline(struct radeon_cs_parser
*p
)
248 struct drm_mode_object
*obj
;
249 struct drm_crtc
*crtc
;
250 struct radeon_crtc
*radeon_crtc
;
251 struct radeon_cs_packet p3reloc
, wait_reg_mem
;
254 uint32_t header
, h_idx
, reg
, wait_reg_mem_info
;
255 volatile uint32_t *ib
;
259 /* parse the WAIT_REG_MEM */
260 r
= evergreen_cs_packet_parse(p
, &wait_reg_mem
, p
->idx
);
264 /* check its a WAIT_REG_MEM */
265 if (wait_reg_mem
.type
!= PACKET_TYPE3
||
266 wait_reg_mem
.opcode
!= PACKET3_WAIT_REG_MEM
) {
267 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
271 wait_reg_mem_info
= radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 1);
272 /* bit 4 is reg (0) or mem (1) */
273 if (wait_reg_mem_info
& 0x10) {
274 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
277 /* waiting for value to be equal */
278 if ((wait_reg_mem_info
& 0x7) != 0x3) {
279 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
282 if ((radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 2) << 2) != EVERGREEN_VLINE_STATUS
) {
283 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
287 if (radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 5) != EVERGREEN_VLINE_STAT
) {
288 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
292 /* jump over the NOP */
293 r
= evergreen_cs_packet_parse(p
, &p3reloc
, p
->idx
+ wait_reg_mem
.count
+ 2);
298 p
->idx
+= wait_reg_mem
.count
+ 2;
299 p
->idx
+= p3reloc
.count
+ 2;
301 header
= radeon_get_ib_value(p
, h_idx
);
302 crtc_id
= radeon_get_ib_value(p
, h_idx
+ 2 + 7 + 1);
303 reg
= CP_PACKET0_GET_REG(header
);
304 obj
= drm_mode_object_find(p
->rdev
->ddev
, crtc_id
, DRM_MODE_OBJECT_CRTC
);
306 DRM_ERROR("cannot find crtc %d\n", crtc_id
);
309 crtc
= obj_to_crtc(obj
);
310 radeon_crtc
= to_radeon_crtc(crtc
);
311 crtc_id
= radeon_crtc
->crtc_id
;
313 if (!crtc
->enabled
) {
314 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
315 ib
[h_idx
+ 2] = PACKET2(0);
316 ib
[h_idx
+ 3] = PACKET2(0);
317 ib
[h_idx
+ 4] = PACKET2(0);
318 ib
[h_idx
+ 5] = PACKET2(0);
319 ib
[h_idx
+ 6] = PACKET2(0);
320 ib
[h_idx
+ 7] = PACKET2(0);
321 ib
[h_idx
+ 8] = PACKET2(0);
324 case EVERGREEN_VLINE_START_END
:
325 header
&= ~R600_CP_PACKET0_REG_MASK
;
326 header
|= (EVERGREEN_VLINE_START_END
+ radeon_crtc
->crtc_offset
) >> 2;
328 ib
[h_idx
+ 4] = (EVERGREEN_VLINE_STATUS
+ radeon_crtc
->crtc_offset
) >> 2;
331 DRM_ERROR("unknown crtc reloc\n");
338 static int evergreen_packet0_check(struct radeon_cs_parser
*p
,
339 struct radeon_cs_packet
*pkt
,
340 unsigned idx
, unsigned reg
)
345 case EVERGREEN_VLINE_START_END
:
346 r
= evergreen_cs_packet_parse_vline(p
);
348 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
354 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
361 static int evergreen_cs_parse_packet0(struct radeon_cs_parser
*p
,
362 struct radeon_cs_packet
*pkt
)
370 for (i
= 0; i
<= pkt
->count
; i
++, idx
++, reg
+= 4) {
371 r
= evergreen_packet0_check(p
, pkt
, idx
, reg
);
380 * evergreen_cs_check_reg() - check if register is authorized or not
381 * @parser: parser structure holding parsing context
382 * @reg: register we are testing
383 * @idx: index into the cs buffer
385 * This function will test against evergreen_reg_safe_bm and return 0
386 * if register is safe. If register is not flag as safe this function
387 * will test it against a list of register needind special handling.
389 static int evergreen_cs_check_reg(struct radeon_cs_parser
*p
, u32 reg
, u32 idx
)
391 struct evergreen_cs_track
*track
= (struct evergreen_cs_track
*)p
->track
;
392 struct radeon_cs_reloc
*reloc
;
397 if (p
->rdev
->family
>= CHIP_CAYMAN
)
398 last_reg
= ARRAY_SIZE(cayman_reg_safe_bm
);
400 last_reg
= ARRAY_SIZE(evergreen_reg_safe_bm
);
404 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
407 m
= 1 << ((reg
>> 2) & 31);
408 if (p
->rdev
->family
>= CHIP_CAYMAN
) {
409 if (!(cayman_reg_safe_bm
[i
] & m
))
412 if (!(evergreen_reg_safe_bm
[i
] & m
))
417 /* force following reg to 0 in an attempt to disable out buffer
418 * which will need us to better understand how it works to perform
419 * security check on it (Jerome)
421 case SQ_ESGS_RING_SIZE
:
422 case SQ_GSVS_RING_SIZE
:
423 case SQ_ESTMP_RING_SIZE
:
424 case SQ_GSTMP_RING_SIZE
:
425 case SQ_HSTMP_RING_SIZE
:
426 case SQ_LSTMP_RING_SIZE
:
427 case SQ_PSTMP_RING_SIZE
:
428 case SQ_VSTMP_RING_SIZE
:
429 case SQ_ESGS_RING_ITEMSIZE
:
430 case SQ_ESTMP_RING_ITEMSIZE
:
431 case SQ_GSTMP_RING_ITEMSIZE
:
432 case SQ_GSVS_RING_ITEMSIZE
:
433 case SQ_GS_VERT_ITEMSIZE
:
434 case SQ_GS_VERT_ITEMSIZE_1
:
435 case SQ_GS_VERT_ITEMSIZE_2
:
436 case SQ_GS_VERT_ITEMSIZE_3
:
437 case SQ_GSVS_RING_OFFSET_1
:
438 case SQ_GSVS_RING_OFFSET_2
:
439 case SQ_GSVS_RING_OFFSET_3
:
440 case SQ_HSTMP_RING_ITEMSIZE
:
441 case SQ_LSTMP_RING_ITEMSIZE
:
442 case SQ_PSTMP_RING_ITEMSIZE
:
443 case SQ_VSTMP_RING_ITEMSIZE
:
444 case VGT_TF_RING_SIZE
:
445 /* get value to populate the IB don't remove */
446 /*tmp =radeon_get_ib_value(p, idx);
449 case SQ_ESGS_RING_BASE
:
450 case SQ_GSVS_RING_BASE
:
451 case SQ_ESTMP_RING_BASE
:
452 case SQ_GSTMP_RING_BASE
:
453 case SQ_HSTMP_RING_BASE
:
454 case SQ_LSTMP_RING_BASE
:
455 case SQ_PSTMP_RING_BASE
:
456 case SQ_VSTMP_RING_BASE
:
457 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
459 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
463 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
465 case DB_DEPTH_CONTROL
:
466 track
->db_depth_control
= radeon_get_ib_value(p
, idx
);
469 if (p
->rdev
->family
< CHIP_CAYMAN
) {
470 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
475 case CAYMAN_DB_DEPTH_INFO
:
476 if (p
->rdev
->family
< CHIP_CAYMAN
) {
477 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
483 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
485 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
489 track
->db_z_info
= radeon_get_ib_value(p
, idx
);
490 ib
[idx
] &= ~Z_ARRAY_MODE(0xf);
491 track
->db_z_info
&= ~Z_ARRAY_MODE(0xf);
492 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
493 ib
[idx
] |= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
494 track
->db_z_info
|= Z_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
496 ib
[idx
] |= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
497 track
->db_z_info
|= Z_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
500 case DB_STENCIL_INFO
:
501 track
->db_s_info
= radeon_get_ib_value(p
, idx
);
504 track
->db_depth_view
= radeon_get_ib_value(p
, idx
);
507 track
->db_depth_size
= radeon_get_ib_value(p
, idx
);
508 track
->db_depth_size_idx
= idx
;
511 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
513 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
517 track
->db_z_read_offset
= radeon_get_ib_value(p
, idx
);
518 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
519 track
->db_z_read_bo
= reloc
->robj
;
521 case DB_Z_WRITE_BASE
:
522 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
524 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
528 track
->db_z_write_offset
= radeon_get_ib_value(p
, idx
);
529 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
530 track
->db_z_write_bo
= reloc
->robj
;
532 case DB_STENCIL_READ_BASE
:
533 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
535 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
539 track
->db_s_read_offset
= radeon_get_ib_value(p
, idx
);
540 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
541 track
->db_s_read_bo
= reloc
->robj
;
543 case DB_STENCIL_WRITE_BASE
:
544 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
546 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
550 track
->db_s_write_offset
= radeon_get_ib_value(p
, idx
);
551 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
552 track
->db_s_write_bo
= reloc
->robj
;
554 case VGT_STRMOUT_CONFIG
:
555 track
->vgt_strmout_config
= radeon_get_ib_value(p
, idx
);
557 case VGT_STRMOUT_BUFFER_CONFIG
:
558 track
->vgt_strmout_buffer_config
= radeon_get_ib_value(p
, idx
);
561 track
->cb_target_mask
= radeon_get_ib_value(p
, idx
);
564 track
->cb_shader_mask
= radeon_get_ib_value(p
, idx
);
566 case PA_SC_AA_CONFIG
:
567 if (p
->rdev
->family
>= CHIP_CAYMAN
) {
568 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
572 tmp
= radeon_get_ib_value(p
, idx
) & MSAA_NUM_SAMPLES_MASK
;
573 track
->nsamples
= 1 << tmp
;
575 case CAYMAN_PA_SC_AA_CONFIG
:
576 if (p
->rdev
->family
< CHIP_CAYMAN
) {
577 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
581 tmp
= radeon_get_ib_value(p
, idx
) & CAYMAN_MSAA_NUM_SAMPLES_MASK
;
582 track
->nsamples
= 1 << tmp
;
592 tmp
= (reg
- CB_COLOR0_VIEW
) / 0x3c;
593 track
->cb_color_view
[tmp
] = radeon_get_ib_value(p
, idx
);
597 case CB_COLOR10_VIEW
:
598 case CB_COLOR11_VIEW
:
599 tmp
= ((reg
- CB_COLOR8_VIEW
) / 0x1c) + 8;
600 track
->cb_color_view
[tmp
] = radeon_get_ib_value(p
, idx
);
610 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
612 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
616 tmp
= (reg
- CB_COLOR0_INFO
) / 0x3c;
617 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
618 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
619 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
620 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
621 } else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
622 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
623 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
628 case CB_COLOR10_INFO
:
629 case CB_COLOR11_INFO
:
630 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
632 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
636 tmp
= ((reg
- CB_COLOR8_INFO
) / 0x1c) + 8;
637 track
->cb_color_info
[tmp
] = radeon_get_ib_value(p
, idx
);
638 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
) {
639 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
640 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
641 } else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
) {
642 ib
[idx
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
643 track
->cb_color_info
[tmp
] |= CB_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
646 case CB_COLOR0_PITCH
:
647 case CB_COLOR1_PITCH
:
648 case CB_COLOR2_PITCH
:
649 case CB_COLOR3_PITCH
:
650 case CB_COLOR4_PITCH
:
651 case CB_COLOR5_PITCH
:
652 case CB_COLOR6_PITCH
:
653 case CB_COLOR7_PITCH
:
654 tmp
= (reg
- CB_COLOR0_PITCH
) / 0x3c;
655 track
->cb_color_pitch
[tmp
] = radeon_get_ib_value(p
, idx
);
656 track
->cb_color_pitch_idx
[tmp
] = idx
;
658 case CB_COLOR8_PITCH
:
659 case CB_COLOR9_PITCH
:
660 case CB_COLOR10_PITCH
:
661 case CB_COLOR11_PITCH
:
662 tmp
= ((reg
- CB_COLOR8_PITCH
) / 0x1c) + 8;
663 track
->cb_color_pitch
[tmp
] = radeon_get_ib_value(p
, idx
);
664 track
->cb_color_pitch_idx
[tmp
] = idx
;
666 case CB_COLOR0_SLICE
:
667 case CB_COLOR1_SLICE
:
668 case CB_COLOR2_SLICE
:
669 case CB_COLOR3_SLICE
:
670 case CB_COLOR4_SLICE
:
671 case CB_COLOR5_SLICE
:
672 case CB_COLOR6_SLICE
:
673 case CB_COLOR7_SLICE
:
674 tmp
= (reg
- CB_COLOR0_SLICE
) / 0x3c;
675 track
->cb_color_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
676 track
->cb_color_slice_idx
[tmp
] = idx
;
678 case CB_COLOR8_SLICE
:
679 case CB_COLOR9_SLICE
:
680 case CB_COLOR10_SLICE
:
681 case CB_COLOR11_SLICE
:
682 tmp
= ((reg
- CB_COLOR8_SLICE
) / 0x1c) + 8;
683 track
->cb_color_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
684 track
->cb_color_slice_idx
[tmp
] = idx
;
686 case CB_COLOR0_ATTRIB
:
687 case CB_COLOR1_ATTRIB
:
688 case CB_COLOR2_ATTRIB
:
689 case CB_COLOR3_ATTRIB
:
690 case CB_COLOR4_ATTRIB
:
691 case CB_COLOR5_ATTRIB
:
692 case CB_COLOR6_ATTRIB
:
693 case CB_COLOR7_ATTRIB
:
694 case CB_COLOR8_ATTRIB
:
695 case CB_COLOR9_ATTRIB
:
696 case CB_COLOR10_ATTRIB
:
697 case CB_COLOR11_ATTRIB
:
707 tmp
= (reg
- CB_COLOR0_DIM
) / 0x3c;
708 track
->cb_color_dim
[tmp
] = radeon_get_ib_value(p
, idx
);
709 track
->cb_color_dim_idx
[tmp
] = idx
;
715 tmp
= ((reg
- CB_COLOR8_DIM
) / 0x1c) + 8;
716 track
->cb_color_dim
[tmp
] = radeon_get_ib_value(p
, idx
);
717 track
->cb_color_dim_idx
[tmp
] = idx
;
719 case CB_COLOR0_FMASK
:
720 case CB_COLOR1_FMASK
:
721 case CB_COLOR2_FMASK
:
722 case CB_COLOR3_FMASK
:
723 case CB_COLOR4_FMASK
:
724 case CB_COLOR5_FMASK
:
725 case CB_COLOR6_FMASK
:
726 case CB_COLOR7_FMASK
:
727 tmp
= (reg
- CB_COLOR0_FMASK
) / 0x3c;
728 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
730 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
733 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
734 track
->cb_color_fmask_bo
[tmp
] = reloc
->robj
;
736 case CB_COLOR0_CMASK
:
737 case CB_COLOR1_CMASK
:
738 case CB_COLOR2_CMASK
:
739 case CB_COLOR3_CMASK
:
740 case CB_COLOR4_CMASK
:
741 case CB_COLOR5_CMASK
:
742 case CB_COLOR6_CMASK
:
743 case CB_COLOR7_CMASK
:
744 tmp
= (reg
- CB_COLOR0_CMASK
) / 0x3c;
745 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
747 dev_err(p
->dev
, "bad SET_CONTEXT_REG 0x%04X\n", reg
);
750 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
751 track
->cb_color_cmask_bo
[tmp
] = reloc
->robj
;
753 case CB_COLOR0_FMASK_SLICE
:
754 case CB_COLOR1_FMASK_SLICE
:
755 case CB_COLOR2_FMASK_SLICE
:
756 case CB_COLOR3_FMASK_SLICE
:
757 case CB_COLOR4_FMASK_SLICE
:
758 case CB_COLOR5_FMASK_SLICE
:
759 case CB_COLOR6_FMASK_SLICE
:
760 case CB_COLOR7_FMASK_SLICE
:
761 tmp
= (reg
- CB_COLOR0_FMASK_SLICE
) / 0x3c;
762 track
->cb_color_fmask_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
764 case CB_COLOR0_CMASK_SLICE
:
765 case CB_COLOR1_CMASK_SLICE
:
766 case CB_COLOR2_CMASK_SLICE
:
767 case CB_COLOR3_CMASK_SLICE
:
768 case CB_COLOR4_CMASK_SLICE
:
769 case CB_COLOR5_CMASK_SLICE
:
770 case CB_COLOR6_CMASK_SLICE
:
771 case CB_COLOR7_CMASK_SLICE
:
772 tmp
= (reg
- CB_COLOR0_CMASK_SLICE
) / 0x3c;
773 track
->cb_color_cmask_slice
[tmp
] = radeon_get_ib_value(p
, idx
);
783 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
785 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
789 tmp
= (reg
- CB_COLOR0_BASE
) / 0x3c;
790 track
->cb_color_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
);
791 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
792 track
->cb_color_base_last
[tmp
] = ib
[idx
];
793 track
->cb_color_bo
[tmp
] = reloc
->robj
;
797 case CB_COLOR10_BASE
:
798 case CB_COLOR11_BASE
:
799 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
801 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
805 tmp
= ((reg
- CB_COLOR8_BASE
) / 0x1c) + 8;
806 track
->cb_color_bo_offset
[tmp
] = radeon_get_ib_value(p
, idx
);
807 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
808 track
->cb_color_base_last
[tmp
] = ib
[idx
];
809 track
->cb_color_bo
[tmp
] = reloc
->robj
;
821 case CB_IMMED10_BASE
:
822 case CB_IMMED11_BASE
:
823 case DB_HTILE_DATA_BASE
:
824 case SQ_PGM_START_FS
:
825 case SQ_PGM_START_ES
:
826 case SQ_PGM_START_VS
:
827 case SQ_PGM_START_GS
:
828 case SQ_PGM_START_PS
:
829 case SQ_PGM_START_HS
:
830 case SQ_PGM_START_LS
:
831 case SQ_CONST_MEM_BASE
:
832 case SQ_ALU_CONST_CACHE_GS_0
:
833 case SQ_ALU_CONST_CACHE_GS_1
:
834 case SQ_ALU_CONST_CACHE_GS_2
:
835 case SQ_ALU_CONST_CACHE_GS_3
:
836 case SQ_ALU_CONST_CACHE_GS_4
:
837 case SQ_ALU_CONST_CACHE_GS_5
:
838 case SQ_ALU_CONST_CACHE_GS_6
:
839 case SQ_ALU_CONST_CACHE_GS_7
:
840 case SQ_ALU_CONST_CACHE_GS_8
:
841 case SQ_ALU_CONST_CACHE_GS_9
:
842 case SQ_ALU_CONST_CACHE_GS_10
:
843 case SQ_ALU_CONST_CACHE_GS_11
:
844 case SQ_ALU_CONST_CACHE_GS_12
:
845 case SQ_ALU_CONST_CACHE_GS_13
:
846 case SQ_ALU_CONST_CACHE_GS_14
:
847 case SQ_ALU_CONST_CACHE_GS_15
:
848 case SQ_ALU_CONST_CACHE_PS_0
:
849 case SQ_ALU_CONST_CACHE_PS_1
:
850 case SQ_ALU_CONST_CACHE_PS_2
:
851 case SQ_ALU_CONST_CACHE_PS_3
:
852 case SQ_ALU_CONST_CACHE_PS_4
:
853 case SQ_ALU_CONST_CACHE_PS_5
:
854 case SQ_ALU_CONST_CACHE_PS_6
:
855 case SQ_ALU_CONST_CACHE_PS_7
:
856 case SQ_ALU_CONST_CACHE_PS_8
:
857 case SQ_ALU_CONST_CACHE_PS_9
:
858 case SQ_ALU_CONST_CACHE_PS_10
:
859 case SQ_ALU_CONST_CACHE_PS_11
:
860 case SQ_ALU_CONST_CACHE_PS_12
:
861 case SQ_ALU_CONST_CACHE_PS_13
:
862 case SQ_ALU_CONST_CACHE_PS_14
:
863 case SQ_ALU_CONST_CACHE_PS_15
:
864 case SQ_ALU_CONST_CACHE_VS_0
:
865 case SQ_ALU_CONST_CACHE_VS_1
:
866 case SQ_ALU_CONST_CACHE_VS_2
:
867 case SQ_ALU_CONST_CACHE_VS_3
:
868 case SQ_ALU_CONST_CACHE_VS_4
:
869 case SQ_ALU_CONST_CACHE_VS_5
:
870 case SQ_ALU_CONST_CACHE_VS_6
:
871 case SQ_ALU_CONST_CACHE_VS_7
:
872 case SQ_ALU_CONST_CACHE_VS_8
:
873 case SQ_ALU_CONST_CACHE_VS_9
:
874 case SQ_ALU_CONST_CACHE_VS_10
:
875 case SQ_ALU_CONST_CACHE_VS_11
:
876 case SQ_ALU_CONST_CACHE_VS_12
:
877 case SQ_ALU_CONST_CACHE_VS_13
:
878 case SQ_ALU_CONST_CACHE_VS_14
:
879 case SQ_ALU_CONST_CACHE_VS_15
:
880 case SQ_ALU_CONST_CACHE_HS_0
:
881 case SQ_ALU_CONST_CACHE_HS_1
:
882 case SQ_ALU_CONST_CACHE_HS_2
:
883 case SQ_ALU_CONST_CACHE_HS_3
:
884 case SQ_ALU_CONST_CACHE_HS_4
:
885 case SQ_ALU_CONST_CACHE_HS_5
:
886 case SQ_ALU_CONST_CACHE_HS_6
:
887 case SQ_ALU_CONST_CACHE_HS_7
:
888 case SQ_ALU_CONST_CACHE_HS_8
:
889 case SQ_ALU_CONST_CACHE_HS_9
:
890 case SQ_ALU_CONST_CACHE_HS_10
:
891 case SQ_ALU_CONST_CACHE_HS_11
:
892 case SQ_ALU_CONST_CACHE_HS_12
:
893 case SQ_ALU_CONST_CACHE_HS_13
:
894 case SQ_ALU_CONST_CACHE_HS_14
:
895 case SQ_ALU_CONST_CACHE_HS_15
:
896 case SQ_ALU_CONST_CACHE_LS_0
:
897 case SQ_ALU_CONST_CACHE_LS_1
:
898 case SQ_ALU_CONST_CACHE_LS_2
:
899 case SQ_ALU_CONST_CACHE_LS_3
:
900 case SQ_ALU_CONST_CACHE_LS_4
:
901 case SQ_ALU_CONST_CACHE_LS_5
:
902 case SQ_ALU_CONST_CACHE_LS_6
:
903 case SQ_ALU_CONST_CACHE_LS_7
:
904 case SQ_ALU_CONST_CACHE_LS_8
:
905 case SQ_ALU_CONST_CACHE_LS_9
:
906 case SQ_ALU_CONST_CACHE_LS_10
:
907 case SQ_ALU_CONST_CACHE_LS_11
:
908 case SQ_ALU_CONST_CACHE_LS_12
:
909 case SQ_ALU_CONST_CACHE_LS_13
:
910 case SQ_ALU_CONST_CACHE_LS_14
:
911 case SQ_ALU_CONST_CACHE_LS_15
:
912 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
914 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
918 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
920 case SX_MEMORY_EXPORT_BASE
:
921 if (p
->rdev
->family
>= CHIP_CAYMAN
) {
922 dev_warn(p
->dev
, "bad SET_CONFIG_REG "
926 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
928 dev_warn(p
->dev
, "bad SET_CONFIG_REG "
932 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
934 case CAYMAN_SX_SCATTER_EXPORT_BASE
:
935 if (p
->rdev
->family
< CHIP_CAYMAN
) {
936 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
940 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
942 dev_warn(p
->dev
, "bad SET_CONTEXT_REG "
946 ib
[idx
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
949 dev_warn(p
->dev
, "forbidden register 0x%08x at %d\n", reg
, idx
);
956 * evergreen_check_texture_resource() - check if register is authorized or not
957 * @p: parser structure holding parsing context
958 * @idx: index into the cs buffer
959 * @texture: texture's bo structure
960 * @mipmap: mipmap's bo structure
962 * This function will check that the resource has valid field and that
963 * the texture and mipmap bo object are big enough to cover this resource.
965 static int evergreen_check_texture_resource(struct radeon_cs_parser
*p
, u32 idx
,
966 struct radeon_bo
*texture
,
967 struct radeon_bo
*mipmap
)
973 static int evergreen_packet3_check(struct radeon_cs_parser
*p
,
974 struct radeon_cs_packet
*pkt
)
976 struct radeon_cs_reloc
*reloc
;
977 struct evergreen_cs_track
*track
;
981 unsigned start_reg
, end_reg
, reg
;
985 track
= (struct evergreen_cs_track
*)p
->track
;
988 idx_value
= radeon_get_ib_value(p
, idx
);
990 switch (pkt
->opcode
) {
991 case PACKET3_SET_PREDICATION
:
995 if (pkt
->count
!= 1) {
996 DRM_ERROR("bad SET PREDICATION\n");
1000 tmp
= radeon_get_ib_value(p
, idx
+ 1);
1001 pred_op
= (tmp
>> 16) & 0x7;
1003 /* for the clear predicate operation */
1008 DRM_ERROR("bad SET PREDICATION operation %d\n", pred_op
);
1012 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1014 DRM_ERROR("bad SET PREDICATION\n");
1018 ib
[idx
+ 0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1019 ib
[idx
+ 1] = tmp
+ (upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff);
1022 case PACKET3_CONTEXT_CONTROL
:
1023 if (pkt
->count
!= 1) {
1024 DRM_ERROR("bad CONTEXT_CONTROL\n");
1028 case PACKET3_INDEX_TYPE
:
1029 case PACKET3_NUM_INSTANCES
:
1030 case PACKET3_CLEAR_STATE
:
1032 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1036 case CAYMAN_PACKET3_DEALLOC_STATE
:
1037 if (p
->rdev
->family
< CHIP_CAYMAN
) {
1038 DRM_ERROR("bad PACKET3_DEALLOC_STATE\n");
1042 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES/CLEAR_STATE\n");
1046 case PACKET3_INDEX_BASE
:
1047 if (pkt
->count
!= 1) {
1048 DRM_ERROR("bad INDEX_BASE\n");
1051 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1053 DRM_ERROR("bad INDEX_BASE\n");
1056 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1057 ib
[idx
+1] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1058 r
= evergreen_cs_track_check(p
);
1060 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1064 case PACKET3_DRAW_INDEX
:
1065 if (pkt
->count
!= 3) {
1066 DRM_ERROR("bad DRAW_INDEX\n");
1069 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1071 DRM_ERROR("bad DRAW_INDEX\n");
1074 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1075 ib
[idx
+1] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1076 r
= evergreen_cs_track_check(p
);
1078 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1082 case PACKET3_DRAW_INDEX_2
:
1083 if (pkt
->count
!= 4) {
1084 DRM_ERROR("bad DRAW_INDEX_2\n");
1087 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1089 DRM_ERROR("bad DRAW_INDEX_2\n");
1092 ib
[idx
+1] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1093 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1094 r
= evergreen_cs_track_check(p
);
1096 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1100 case PACKET3_DRAW_INDEX_AUTO
:
1101 if (pkt
->count
!= 1) {
1102 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
1105 r
= evergreen_cs_track_check(p
);
1107 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1111 case PACKET3_DRAW_INDEX_MULTI_AUTO
:
1112 if (pkt
->count
!= 2) {
1113 DRM_ERROR("bad DRAW_INDEX_MULTI_AUTO\n");
1116 r
= evergreen_cs_track_check(p
);
1118 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1122 case PACKET3_DRAW_INDEX_IMMD
:
1123 if (pkt
->count
< 2) {
1124 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
1127 r
= evergreen_cs_track_check(p
);
1129 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1133 case PACKET3_DRAW_INDEX_OFFSET
:
1134 if (pkt
->count
!= 2) {
1135 DRM_ERROR("bad DRAW_INDEX_OFFSET\n");
1138 r
= evergreen_cs_track_check(p
);
1140 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1144 case PACKET3_DRAW_INDEX_OFFSET_2
:
1145 if (pkt
->count
!= 3) {
1146 DRM_ERROR("bad DRAW_INDEX_OFFSET_2\n");
1149 r
= evergreen_cs_track_check(p
);
1151 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1155 case PACKET3_DISPATCH_DIRECT
:
1156 if (pkt
->count
!= 3) {
1157 DRM_ERROR("bad DISPATCH_DIRECT\n");
1160 r
= evergreen_cs_track_check(p
);
1162 dev_warn(p
->dev
, "%s:%d invalid cmd stream %d\n", __func__
, __LINE__
, idx
);
1166 case PACKET3_DISPATCH_INDIRECT
:
1167 if (pkt
->count
!= 1) {
1168 DRM_ERROR("bad DISPATCH_INDIRECT\n");
1171 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1173 DRM_ERROR("bad DISPATCH_INDIRECT\n");
1176 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1177 r
= evergreen_cs_track_check(p
);
1179 dev_warn(p
->dev
, "%s:%d invalid cmd stream\n", __func__
, __LINE__
);
1183 case PACKET3_WAIT_REG_MEM
:
1184 if (pkt
->count
!= 5) {
1185 DRM_ERROR("bad WAIT_REG_MEM\n");
1188 /* bit 4 is reg (0) or mem (1) */
1189 if (idx_value
& 0x10) {
1190 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1192 DRM_ERROR("bad WAIT_REG_MEM\n");
1195 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1196 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1199 case PACKET3_SURFACE_SYNC
:
1200 if (pkt
->count
!= 3) {
1201 DRM_ERROR("bad SURFACE_SYNC\n");
1204 /* 0xffffffff/0x0 is flush all cache flag */
1205 if (radeon_get_ib_value(p
, idx
+ 1) != 0xffffffff ||
1206 radeon_get_ib_value(p
, idx
+ 2) != 0) {
1207 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1209 DRM_ERROR("bad SURFACE_SYNC\n");
1212 ib
[idx
+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1215 case PACKET3_EVENT_WRITE
:
1216 if (pkt
->count
!= 2 && pkt
->count
!= 0) {
1217 DRM_ERROR("bad EVENT_WRITE\n");
1221 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1223 DRM_ERROR("bad EVENT_WRITE\n");
1226 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1227 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1230 case PACKET3_EVENT_WRITE_EOP
:
1231 if (pkt
->count
!= 4) {
1232 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1235 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1237 DRM_ERROR("bad EVENT_WRITE_EOP\n");
1240 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1241 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1243 case PACKET3_EVENT_WRITE_EOS
:
1244 if (pkt
->count
!= 3) {
1245 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1248 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1250 DRM_ERROR("bad EVENT_WRITE_EOS\n");
1253 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
1254 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1256 case PACKET3_SET_CONFIG_REG
:
1257 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONFIG_REG_START
;
1258 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1259 if ((start_reg
< PACKET3_SET_CONFIG_REG_START
) ||
1260 (start_reg
>= PACKET3_SET_CONFIG_REG_END
) ||
1261 (end_reg
>= PACKET3_SET_CONFIG_REG_END
)) {
1262 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
1265 for (i
= 0; i
< pkt
->count
; i
++) {
1266 reg
= start_reg
+ (4 * i
);
1267 r
= evergreen_cs_check_reg(p
, reg
, idx
+1+i
);
1272 case PACKET3_SET_CONTEXT_REG
:
1273 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONTEXT_REG_START
;
1274 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1275 if ((start_reg
< PACKET3_SET_CONTEXT_REG_START
) ||
1276 (start_reg
>= PACKET3_SET_CONTEXT_REG_END
) ||
1277 (end_reg
>= PACKET3_SET_CONTEXT_REG_END
)) {
1278 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
1281 for (i
= 0; i
< pkt
->count
; i
++) {
1282 reg
= start_reg
+ (4 * i
);
1283 r
= evergreen_cs_check_reg(p
, reg
, idx
+1+i
);
1288 case PACKET3_SET_RESOURCE
:
1289 if (pkt
->count
% 8) {
1290 DRM_ERROR("bad SET_RESOURCE\n");
1293 start_reg
= (idx_value
<< 2) + PACKET3_SET_RESOURCE_START
;
1294 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1295 if ((start_reg
< PACKET3_SET_RESOURCE_START
) ||
1296 (start_reg
>= PACKET3_SET_RESOURCE_END
) ||
1297 (end_reg
>= PACKET3_SET_RESOURCE_END
)) {
1298 DRM_ERROR("bad SET_RESOURCE\n");
1301 for (i
= 0; i
< (pkt
->count
/ 8); i
++) {
1302 struct radeon_bo
*texture
, *mipmap
;
1305 switch (G__SQ_CONSTANT_TYPE(radeon_get_ib_value(p
, idx
+1+(i
*8)+7))) {
1306 case SQ_TEX_VTX_VALID_TEXTURE
:
1308 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1310 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1313 ib
[idx
+1+(i
*8)+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1314 if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MACRO
)
1315 ib
[idx
+1+(i
*8)+1] |= TEX_ARRAY_MODE(ARRAY_2D_TILED_THIN1
);
1316 else if (reloc
->lobj
.tiling_flags
& RADEON_TILING_MICRO
)
1317 ib
[idx
+1+(i
*8)+1] |= TEX_ARRAY_MODE(ARRAY_1D_TILED_THIN1
);
1318 texture
= reloc
->robj
;
1320 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1322 DRM_ERROR("bad SET_RESOURCE (tex)\n");
1325 ib
[idx
+1+(i
*8)+3] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
1326 mipmap
= reloc
->robj
;
1327 r
= evergreen_check_texture_resource(p
, idx
+1+(i
*8),
1332 case SQ_TEX_VTX_VALID_BUFFER
:
1334 r
= evergreen_cs_packet_next_reloc(p
, &reloc
);
1336 DRM_ERROR("bad SET_RESOURCE (vtx)\n");
1339 offset
= radeon_get_ib_value(p
, idx
+1+(i
*8)+0);
1340 size
= radeon_get_ib_value(p
, idx
+1+(i
*8)+1);
1341 if (p
->rdev
&& (size
+ offset
) > radeon_bo_size(reloc
->robj
)) {
1342 /* force size to size of the buffer */
1343 dev_warn(p
->dev
, "vbo resource seems too big for the bo\n");
1344 ib
[idx
+1+(i
*8)+1] = radeon_bo_size(reloc
->robj
);
1346 ib
[idx
+1+(i
*8)+0] += (u32
)((reloc
->lobj
.gpu_offset
) & 0xffffffff);
1347 ib
[idx
+1+(i
*8)+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
1349 case SQ_TEX_VTX_INVALID_TEXTURE
:
1350 case SQ_TEX_VTX_INVALID_BUFFER
:
1352 DRM_ERROR("bad SET_RESOURCE\n");
1357 case PACKET3_SET_ALU_CONST
:
1358 /* XXX fix me ALU const buffers only */
1360 case PACKET3_SET_BOOL_CONST
:
1361 start_reg
= (idx_value
<< 2) + PACKET3_SET_BOOL_CONST_START
;
1362 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1363 if ((start_reg
< PACKET3_SET_BOOL_CONST_START
) ||
1364 (start_reg
>= PACKET3_SET_BOOL_CONST_END
) ||
1365 (end_reg
>= PACKET3_SET_BOOL_CONST_END
)) {
1366 DRM_ERROR("bad SET_BOOL_CONST\n");
1370 case PACKET3_SET_LOOP_CONST
:
1371 start_reg
= (idx_value
<< 2) + PACKET3_SET_LOOP_CONST_START
;
1372 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1373 if ((start_reg
< PACKET3_SET_LOOP_CONST_START
) ||
1374 (start_reg
>= PACKET3_SET_LOOP_CONST_END
) ||
1375 (end_reg
>= PACKET3_SET_LOOP_CONST_END
)) {
1376 DRM_ERROR("bad SET_LOOP_CONST\n");
1380 case PACKET3_SET_CTL_CONST
:
1381 start_reg
= (idx_value
<< 2) + PACKET3_SET_CTL_CONST_START
;
1382 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1383 if ((start_reg
< PACKET3_SET_CTL_CONST_START
) ||
1384 (start_reg
>= PACKET3_SET_CTL_CONST_END
) ||
1385 (end_reg
>= PACKET3_SET_CTL_CONST_END
)) {
1386 DRM_ERROR("bad SET_CTL_CONST\n");
1390 case PACKET3_SET_SAMPLER
:
1391 if (pkt
->count
% 3) {
1392 DRM_ERROR("bad SET_SAMPLER\n");
1395 start_reg
= (idx_value
<< 2) + PACKET3_SET_SAMPLER_START
;
1396 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
1397 if ((start_reg
< PACKET3_SET_SAMPLER_START
) ||
1398 (start_reg
>= PACKET3_SET_SAMPLER_END
) ||
1399 (end_reg
>= PACKET3_SET_SAMPLER_END
)) {
1400 DRM_ERROR("bad SET_SAMPLER\n");
1407 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
1413 int evergreen_cs_parse(struct radeon_cs_parser
*p
)
1415 struct radeon_cs_packet pkt
;
1416 struct evergreen_cs_track
*track
;
1419 if (p
->track
== NULL
) {
1420 /* initialize tracker, we are in kms */
1421 track
= kzalloc(sizeof(*track
), GFP_KERNEL
);
1424 evergreen_cs_track_init(track
);
1425 track
->npipes
= p
->rdev
->config
.evergreen
.tiling_npipes
;
1426 track
->nbanks
= p
->rdev
->config
.evergreen
.tiling_nbanks
;
1427 track
->group_size
= p
->rdev
->config
.evergreen
.tiling_group_size
;
1431 r
= evergreen_cs_packet_parse(p
, &pkt
, p
->idx
);
1437 p
->idx
+= pkt
.count
+ 2;
1440 r
= evergreen_cs_parse_packet0(p
, &pkt
);
1445 r
= evergreen_packet3_check(p
, &pkt
);
1448 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
1458 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
1460 for (r
= 0; r
< p
->ib
->length_dw
; r
++) {
1461 printk(KERN_INFO
"%05d 0x%08X\n", r
, p
->ib
->ptr
[r
]);