2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
32 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser
*p
,
33 struct radeon_cs_reloc
**cs_reloc
);
34 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser
*p
,
35 struct radeon_cs_reloc
**cs_reloc
);
36 typedef int (*next_reloc_t
)(struct radeon_cs_parser
*, struct radeon_cs_reloc
**);
37 static next_reloc_t r600_cs_packet_next_reloc
= &r600_cs_packet_next_reloc_mm
;
40 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
41 * @parser: parser structure holding parsing context.
42 * @pkt: where to store packet informations
44 * Assume that chunk_ib_index is properly set. Will return -EINVAL
45 * if packet is bigger than remaining ib size. or if packets is unknown.
47 int r600_cs_packet_parse(struct radeon_cs_parser
*p
,
48 struct radeon_cs_packet
*pkt
,
51 struct radeon_cs_chunk
*ib_chunk
= &p
->chunks
[p
->chunk_ib_idx
];
54 if (idx
>= ib_chunk
->length_dw
) {
55 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
56 idx
, ib_chunk
->length_dw
);
59 header
= radeon_get_ib_value(p
, idx
);
61 pkt
->type
= CP_PACKET_GET_TYPE(header
);
62 pkt
->count
= CP_PACKET_GET_COUNT(header
);
66 pkt
->reg
= CP_PACKET0_GET_REG(header
);
69 pkt
->opcode
= CP_PACKET3_GET_OPCODE(header
);
75 DRM_ERROR("Unknown packet type %d at %d !\n", pkt
->type
, idx
);
78 if ((pkt
->count
+ 1 + pkt
->idx
) >= ib_chunk
->length_dw
) {
79 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
80 pkt
->idx
, pkt
->type
, pkt
->count
, ib_chunk
->length_dw
);
87 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
88 * @parser: parser structure holding parsing context.
89 * @data: pointer to relocation data
90 * @offset_start: starting offset
91 * @offset_mask: offset mask (to align start offset on)
92 * @reloc: reloc informations
94 * Check next packet is relocation packet3, do bo validation and compute
95 * GPU offset using the provided start.
97 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser
*p
,
98 struct radeon_cs_reloc
**cs_reloc
)
100 struct radeon_cs_chunk
*relocs_chunk
;
101 struct radeon_cs_packet p3reloc
;
105 if (p
->chunk_relocs_idx
== -1) {
106 DRM_ERROR("No relocation chunk !\n");
110 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
111 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
115 p
->idx
+= p3reloc
.count
+ 2;
116 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
117 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
121 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
122 if (idx
>= relocs_chunk
->length_dw
) {
123 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
124 idx
, relocs_chunk
->length_dw
);
127 /* FIXME: we assume reloc size is 4 dwords */
128 *cs_reloc
= p
->relocs_ptr
[(idx
/ 4)];
133 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
134 * @parser: parser structure holding parsing context.
135 * @data: pointer to relocation data
136 * @offset_start: starting offset
137 * @offset_mask: offset mask (to align start offset on)
138 * @reloc: reloc informations
140 * Check next packet is relocation packet3, do bo validation and compute
141 * GPU offset using the provided start.
143 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser
*p
,
144 struct radeon_cs_reloc
**cs_reloc
)
146 struct radeon_cs_chunk
*relocs_chunk
;
147 struct radeon_cs_packet p3reloc
;
151 if (p
->chunk_relocs_idx
== -1) {
152 DRM_ERROR("No relocation chunk !\n");
156 relocs_chunk
= &p
->chunks
[p
->chunk_relocs_idx
];
157 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
);
161 p
->idx
+= p3reloc
.count
+ 2;
162 if (p3reloc
.type
!= PACKET_TYPE3
|| p3reloc
.opcode
!= PACKET3_NOP
) {
163 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
167 idx
= radeon_get_ib_value(p
, p3reloc
.idx
+ 1);
168 if (idx
>= relocs_chunk
->length_dw
) {
169 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
170 idx
, relocs_chunk
->length_dw
);
173 *cs_reloc
= &p
->relocs
[0];
174 (*cs_reloc
)->lobj
.gpu_offset
= (u64
)relocs_chunk
->kdata
[idx
+ 3] << 32;
175 (*cs_reloc
)->lobj
.gpu_offset
|= relocs_chunk
->kdata
[idx
+ 0];
180 * r600_cs_packet_next_vline() - parse userspace VLINE packet
181 * @parser: parser structure holding parsing context.
183 * Userspace sends a special sequence for VLINE waits.
184 * PACKET0 - VLINE_START_END + value
185 * PACKET3 - WAIT_REG_MEM poll vline status reg
186 * RELOC (P3) - crtc_id in reloc.
188 * This function parses this and relocates the VLINE START END
189 * and WAIT_REG_MEM packets to the correct crtc.
190 * It also detects a switched off crtc and nulls out the
193 static int r600_cs_packet_parse_vline(struct radeon_cs_parser
*p
)
195 struct drm_mode_object
*obj
;
196 struct drm_crtc
*crtc
;
197 struct radeon_crtc
*radeon_crtc
;
198 struct radeon_cs_packet p3reloc
, wait_reg_mem
;
201 uint32_t header
, h_idx
, reg
, wait_reg_mem_info
;
202 volatile uint32_t *ib
;
206 /* parse the WAIT_REG_MEM */
207 r
= r600_cs_packet_parse(p
, &wait_reg_mem
, p
->idx
);
211 /* check its a WAIT_REG_MEM */
212 if (wait_reg_mem
.type
!= PACKET_TYPE3
||
213 wait_reg_mem
.opcode
!= PACKET3_WAIT_REG_MEM
) {
214 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
219 wait_reg_mem_info
= radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 1);
220 /* bit 4 is reg (0) or mem (1) */
221 if (wait_reg_mem_info
& 0x10) {
222 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
226 /* waiting for value to be equal */
227 if ((wait_reg_mem_info
& 0x7) != 0x3) {
228 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
232 if ((radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 2) << 2) != AVIVO_D1MODE_VLINE_STATUS
) {
233 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
238 if (radeon_get_ib_value(p
, wait_reg_mem
.idx
+ 5) != AVIVO_D1MODE_VLINE_STAT
) {
239 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
244 /* jump over the NOP */
245 r
= r600_cs_packet_parse(p
, &p3reloc
, p
->idx
+ wait_reg_mem
.count
+ 2);
250 p
->idx
+= wait_reg_mem
.count
+ 2;
251 p
->idx
+= p3reloc
.count
+ 2;
253 header
= radeon_get_ib_value(p
, h_idx
);
254 crtc_id
= radeon_get_ib_value(p
, h_idx
+ 2 + 7 + 1);
255 reg
= CP_PACKET0_GET_REG(header
);
256 mutex_lock(&p
->rdev
->ddev
->mode_config
.mutex
);
257 obj
= drm_mode_object_find(p
->rdev
->ddev
, crtc_id
, DRM_MODE_OBJECT_CRTC
);
259 DRM_ERROR("cannot find crtc %d\n", crtc_id
);
263 crtc
= obj_to_crtc(obj
);
264 radeon_crtc
= to_radeon_crtc(crtc
);
265 crtc_id
= radeon_crtc
->crtc_id
;
267 if (!crtc
->enabled
) {
268 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
269 ib
[h_idx
+ 2] = PACKET2(0);
270 ib
[h_idx
+ 3] = PACKET2(0);
271 ib
[h_idx
+ 4] = PACKET2(0);
272 ib
[h_idx
+ 5] = PACKET2(0);
273 ib
[h_idx
+ 6] = PACKET2(0);
274 ib
[h_idx
+ 7] = PACKET2(0);
275 ib
[h_idx
+ 8] = PACKET2(0);
276 } else if (crtc_id
== 1) {
278 case AVIVO_D1MODE_VLINE_START_END
:
279 header
&= ~R600_CP_PACKET0_REG_MASK
;
280 header
|= AVIVO_D2MODE_VLINE_START_END
>> 2;
283 DRM_ERROR("unknown crtc reloc\n");
288 ib
[h_idx
+ 4] = AVIVO_D2MODE_VLINE_STATUS
>> 2;
291 mutex_unlock(&p
->rdev
->ddev
->mode_config
.mutex
);
295 static int r600_packet0_check(struct radeon_cs_parser
*p
,
296 struct radeon_cs_packet
*pkt
,
297 unsigned idx
, unsigned reg
)
302 case AVIVO_D1MODE_VLINE_START_END
:
303 r
= r600_cs_packet_parse_vline(p
);
305 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
311 printk(KERN_ERR
"Forbidden register 0x%04X in cs at %d\n",
318 static int r600_cs_parse_packet0(struct radeon_cs_parser
*p
,
319 struct radeon_cs_packet
*pkt
)
327 for (i
= 0; i
<= pkt
->count
; i
++, idx
++, reg
+= 4) {
328 r
= r600_packet0_check(p
, pkt
, idx
, reg
);
336 static int r600_packet3_check(struct radeon_cs_parser
*p
,
337 struct radeon_cs_packet
*pkt
)
339 struct radeon_cs_reloc
*reloc
;
343 unsigned start_reg
, end_reg
, reg
;
349 idx_value
= radeon_get_ib_value(p
, idx
);
351 switch (pkt
->opcode
) {
352 case PACKET3_START_3D_CMDBUF
:
353 if (p
->family
>= CHIP_RV770
|| pkt
->count
) {
354 DRM_ERROR("bad START_3D\n");
358 case PACKET3_CONTEXT_CONTROL
:
359 if (pkt
->count
!= 1) {
360 DRM_ERROR("bad CONTEXT_CONTROL\n");
364 case PACKET3_INDEX_TYPE
:
365 case PACKET3_NUM_INSTANCES
:
367 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
371 case PACKET3_DRAW_INDEX
:
372 if (pkt
->count
!= 3) {
373 DRM_ERROR("bad DRAW_INDEX\n");
376 r
= r600_cs_packet_next_reloc(p
, &reloc
);
378 DRM_ERROR("bad DRAW_INDEX\n");
381 ib
[idx
+0] = idx_value
+ (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
382 ib
[idx
+1] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
384 case PACKET3_DRAW_INDEX_AUTO
:
385 if (pkt
->count
!= 1) {
386 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
390 case PACKET3_DRAW_INDEX_IMMD_BE
:
391 case PACKET3_DRAW_INDEX_IMMD
:
392 if (pkt
->count
< 2) {
393 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
397 case PACKET3_WAIT_REG_MEM
:
398 if (pkt
->count
!= 5) {
399 DRM_ERROR("bad WAIT_REG_MEM\n");
402 /* bit 4 is reg (0) or mem (1) */
403 if (idx_value
& 0x10) {
404 r
= r600_cs_packet_next_reloc(p
, &reloc
);
406 DRM_ERROR("bad WAIT_REG_MEM\n");
409 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
410 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
413 case PACKET3_SURFACE_SYNC
:
414 if (pkt
->count
!= 3) {
415 DRM_ERROR("bad SURFACE_SYNC\n");
418 /* 0xffffffff/0x0 is flush all cache flag */
419 if (radeon_get_ib_value(p
, idx
+ 1) != 0xffffffff ||
420 radeon_get_ib_value(p
, idx
+ 2) != 0) {
421 r
= r600_cs_packet_next_reloc(p
, &reloc
);
423 DRM_ERROR("bad SURFACE_SYNC\n");
426 ib
[idx
+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
429 case PACKET3_EVENT_WRITE
:
430 if (pkt
->count
!= 2 && pkt
->count
!= 0) {
431 DRM_ERROR("bad EVENT_WRITE\n");
435 r
= r600_cs_packet_next_reloc(p
, &reloc
);
437 DRM_ERROR("bad EVENT_WRITE\n");
440 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
441 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
444 case PACKET3_EVENT_WRITE_EOP
:
445 if (pkt
->count
!= 4) {
446 DRM_ERROR("bad EVENT_WRITE_EOP\n");
449 r
= r600_cs_packet_next_reloc(p
, &reloc
);
451 DRM_ERROR("bad EVENT_WRITE\n");
454 ib
[idx
+1] += (u32
)(reloc
->lobj
.gpu_offset
& 0xffffffff);
455 ib
[idx
+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
457 case PACKET3_SET_CONFIG_REG
:
458 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONFIG_REG_OFFSET
;
459 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
460 if ((start_reg
< PACKET3_SET_CONFIG_REG_OFFSET
) ||
461 (start_reg
>= PACKET3_SET_CONFIG_REG_END
) ||
462 (end_reg
>= PACKET3_SET_CONFIG_REG_END
)) {
463 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
466 for (i
= 0; i
< pkt
->count
; i
++) {
467 reg
= start_reg
+ (4 * i
);
470 /* use PACKET3_SURFACE_SYNC */
477 case PACKET3_SET_CONTEXT_REG
:
478 start_reg
= (idx_value
<< 2) + PACKET3_SET_CONTEXT_REG_OFFSET
;
479 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
480 if ((start_reg
< PACKET3_SET_CONTEXT_REG_OFFSET
) ||
481 (start_reg
>= PACKET3_SET_CONTEXT_REG_END
) ||
482 (end_reg
>= PACKET3_SET_CONTEXT_REG_END
)) {
483 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
486 for (i
= 0; i
< pkt
->count
; i
++) {
487 reg
= start_reg
+ (4 * i
);
498 case SQ_PGM_START_FS
:
499 case SQ_PGM_START_ES
:
500 case SQ_PGM_START_VS
:
501 case SQ_PGM_START_GS
:
502 case SQ_PGM_START_PS
:
503 r
= r600_cs_packet_next_reloc(p
, &reloc
);
505 DRM_ERROR("bad SET_CONTEXT_REG "
509 ib
[idx
+1+i
] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
512 case VGT_DMA_BASE_HI
:
513 /* These should be handled by DRAW_INDEX packet 3 */
514 case VGT_STRMOUT_BASE_OFFSET_0
:
515 case VGT_STRMOUT_BASE_OFFSET_1
:
516 case VGT_STRMOUT_BASE_OFFSET_2
:
517 case VGT_STRMOUT_BASE_OFFSET_3
:
518 case VGT_STRMOUT_BASE_OFFSET_HI_0
:
519 case VGT_STRMOUT_BASE_OFFSET_HI_1
:
520 case VGT_STRMOUT_BASE_OFFSET_HI_2
:
521 case VGT_STRMOUT_BASE_OFFSET_HI_3
:
522 case VGT_STRMOUT_BUFFER_BASE_0
:
523 case VGT_STRMOUT_BUFFER_BASE_1
:
524 case VGT_STRMOUT_BUFFER_BASE_2
:
525 case VGT_STRMOUT_BUFFER_BASE_3
:
526 case VGT_STRMOUT_BUFFER_OFFSET_0
:
527 case VGT_STRMOUT_BUFFER_OFFSET_1
:
528 case VGT_STRMOUT_BUFFER_OFFSET_2
:
529 case VGT_STRMOUT_BUFFER_OFFSET_3
:
530 /* These should be handled by STRMOUT_BUFFER packet 3 */
531 DRM_ERROR("bad context reg: 0x%08x\n", reg
);
538 case PACKET3_SET_RESOURCE
:
539 if (pkt
->count
% 7) {
540 DRM_ERROR("bad SET_RESOURCE\n");
543 start_reg
= (idx_value
<< 2) + PACKET3_SET_RESOURCE_OFFSET
;
544 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
545 if ((start_reg
< PACKET3_SET_RESOURCE_OFFSET
) ||
546 (start_reg
>= PACKET3_SET_RESOURCE_END
) ||
547 (end_reg
>= PACKET3_SET_RESOURCE_END
)) {
548 DRM_ERROR("bad SET_RESOURCE\n");
551 for (i
= 0; i
< (pkt
->count
/ 7); i
++) {
552 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p
, idx
+(i
*7)+6+1))) {
553 case SQ_TEX_VTX_VALID_TEXTURE
:
555 r
= r600_cs_packet_next_reloc(p
, &reloc
);
557 DRM_ERROR("bad SET_RESOURCE\n");
560 ib
[idx
+1+(i
*7)+2] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
562 r
= r600_cs_packet_next_reloc(p
, &reloc
);
564 DRM_ERROR("bad SET_RESOURCE\n");
567 ib
[idx
+1+(i
*7)+3] += (u32
)((reloc
->lobj
.gpu_offset
>> 8) & 0xffffffff);
569 case SQ_TEX_VTX_VALID_BUFFER
:
571 r
= r600_cs_packet_next_reloc(p
, &reloc
);
573 DRM_ERROR("bad SET_RESOURCE\n");
576 ib
[idx
+1+(i
*7)+0] += (u32
)((reloc
->lobj
.gpu_offset
) & 0xffffffff);
577 ib
[idx
+1+(i
*7)+2] += upper_32_bits(reloc
->lobj
.gpu_offset
) & 0xff;
579 case SQ_TEX_VTX_INVALID_TEXTURE
:
580 case SQ_TEX_VTX_INVALID_BUFFER
:
582 DRM_ERROR("bad SET_RESOURCE\n");
587 case PACKET3_SET_ALU_CONST
:
588 start_reg
= (idx_value
<< 2) + PACKET3_SET_ALU_CONST_OFFSET
;
589 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
590 if ((start_reg
< PACKET3_SET_ALU_CONST_OFFSET
) ||
591 (start_reg
>= PACKET3_SET_ALU_CONST_END
) ||
592 (end_reg
>= PACKET3_SET_ALU_CONST_END
)) {
593 DRM_ERROR("bad SET_ALU_CONST\n");
597 case PACKET3_SET_BOOL_CONST
:
598 start_reg
= (idx_value
<< 2) + PACKET3_SET_BOOL_CONST_OFFSET
;
599 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
600 if ((start_reg
< PACKET3_SET_BOOL_CONST_OFFSET
) ||
601 (start_reg
>= PACKET3_SET_BOOL_CONST_END
) ||
602 (end_reg
>= PACKET3_SET_BOOL_CONST_END
)) {
603 DRM_ERROR("bad SET_BOOL_CONST\n");
607 case PACKET3_SET_LOOP_CONST
:
608 start_reg
= (idx_value
<< 2) + PACKET3_SET_LOOP_CONST_OFFSET
;
609 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
610 if ((start_reg
< PACKET3_SET_LOOP_CONST_OFFSET
) ||
611 (start_reg
>= PACKET3_SET_LOOP_CONST_END
) ||
612 (end_reg
>= PACKET3_SET_LOOP_CONST_END
)) {
613 DRM_ERROR("bad SET_LOOP_CONST\n");
617 case PACKET3_SET_CTL_CONST
:
618 start_reg
= (idx_value
<< 2) + PACKET3_SET_CTL_CONST_OFFSET
;
619 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
620 if ((start_reg
< PACKET3_SET_CTL_CONST_OFFSET
) ||
621 (start_reg
>= PACKET3_SET_CTL_CONST_END
) ||
622 (end_reg
>= PACKET3_SET_CTL_CONST_END
)) {
623 DRM_ERROR("bad SET_CTL_CONST\n");
627 case PACKET3_SET_SAMPLER
:
628 if (pkt
->count
% 3) {
629 DRM_ERROR("bad SET_SAMPLER\n");
632 start_reg
= (idx_value
<< 2) + PACKET3_SET_SAMPLER_OFFSET
;
633 end_reg
= 4 * pkt
->count
+ start_reg
- 4;
634 if ((start_reg
< PACKET3_SET_SAMPLER_OFFSET
) ||
635 (start_reg
>= PACKET3_SET_SAMPLER_END
) ||
636 (end_reg
>= PACKET3_SET_SAMPLER_END
)) {
637 DRM_ERROR("bad SET_SAMPLER\n");
641 case PACKET3_SURFACE_BASE_UPDATE
:
642 if (p
->family
>= CHIP_RV770
|| p
->family
== CHIP_R600
) {
643 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
647 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
654 DRM_ERROR("Packet3 opcode %x not supported\n", pkt
->opcode
);
660 int r600_cs_parse(struct radeon_cs_parser
*p
)
662 struct radeon_cs_packet pkt
;
666 r
= r600_cs_packet_parse(p
, &pkt
, p
->idx
);
670 p
->idx
+= pkt
.count
+ 2;
673 r
= r600_cs_parse_packet0(p
, &pkt
);
678 r
= r600_packet3_check(p
, &pkt
);
681 DRM_ERROR("Unknown packet type %d !\n", pkt
.type
);
687 } while (p
->idx
< p
->chunks
[p
->chunk_ib_idx
].length_dw
);
689 for (r
= 0; r
< p
->ib
->length_dw
; r
++) {
690 printk(KERN_INFO
"%05d 0x%08X\n", r
, p
->ib
->ptr
[r
]);
697 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser
*p
)
699 if (p
->chunk_relocs_idx
== -1) {
702 p
->relocs
= kcalloc(1, sizeof(struct radeon_cs_reloc
), GFP_KERNEL
);
703 if (p
->relocs
== NULL
) {
710 * cs_parser_fini() - clean parser states
711 * @parser: parser structure holding parsing context.
712 * @error: error number
714 * If error is set than unvalidate buffer, otherwise just free memory
715 * used by parsing context.
717 static void r600_cs_parser_fini(struct radeon_cs_parser
*parser
, int error
)
721 kfree(parser
->relocs
);
722 for (i
= 0; i
< parser
->nchunks
; i
++) {
723 kfree(parser
->chunks
[i
].kdata
);
724 kfree(parser
->chunks
[i
].kpage
[0]);
725 kfree(parser
->chunks
[i
].kpage
[1]);
727 kfree(parser
->chunks
);
728 kfree(parser
->chunks_array
);
731 int r600_cs_legacy(struct drm_device
*dev
, void *data
, struct drm_file
*filp
,
732 unsigned family
, u32
*ib
, int *l
)
734 struct radeon_cs_parser parser
;
735 struct radeon_cs_chunk
*ib_chunk
;
736 struct radeon_ib fake_ib
;
739 /* initialize parser */
740 memset(&parser
, 0, sizeof(struct radeon_cs_parser
));
743 parser
.family
= family
;
744 parser
.ib
= &fake_ib
;
746 r
= radeon_cs_parser_init(&parser
, data
);
748 DRM_ERROR("Failed to initialize parser !\n");
749 r600_cs_parser_fini(&parser
, r
);
752 r
= r600_cs_parser_relocs_legacy(&parser
);
754 DRM_ERROR("Failed to parse relocation !\n");
755 r600_cs_parser_fini(&parser
, r
);
758 /* Copy the packet into the IB, the parser will read from the
759 * input memory (cached) and write to the IB (which can be
761 ib_chunk
= &parser
.chunks
[parser
.chunk_ib_idx
];
762 parser
.ib
->length_dw
= ib_chunk
->length_dw
;
763 *l
= parser
.ib
->length_dw
;
764 r
= r600_cs_parse(&parser
);
766 DRM_ERROR("Invalid command stream !\n");
767 r600_cs_parser_fini(&parser
, r
);
770 r
= radeon_cs_finish_pages(&parser
);
772 DRM_ERROR("Invalid command stream !\n");
773 r600_cs_parser_fini(&parser
, r
);
776 r600_cs_parser_fini(&parser
, r
);
780 void r600_cs_legacy_init(void)
782 r600_cs_packet_next_reloc
= &r600_cs_packet_next_reloc_nomm
;