Full support for Ginger Console
[linux-ginger.git] / drivers / gpu / drm / radeon / r600_cs.c
blob17e42195c632eab62ddef0db40aaf713c9fc067f
1 /*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
28 #include "drmP.h"
29 #include "radeon.h"
30 #include "r600d.h"
32 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
33 struct radeon_cs_reloc **cs_reloc);
34 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
35 struct radeon_cs_reloc **cs_reloc);
36 typedef int (*next_reloc_t)(struct radeon_cs_parser*, struct radeon_cs_reloc**);
37 static next_reloc_t r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_mm;
39 /**
40 * r600_cs_packet_parse() - parse cp packet and point ib index to next packet
41 * @parser: parser structure holding parsing context.
42 * @pkt: where to store packet informations
44 * Assume that chunk_ib_index is properly set. Will return -EINVAL
45 * if packet is bigger than remaining ib size. or if packets is unknown.
46 **/
47 int r600_cs_packet_parse(struct radeon_cs_parser *p,
48 struct radeon_cs_packet *pkt,
49 unsigned idx)
51 struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
52 uint32_t header;
54 if (idx >= ib_chunk->length_dw) {
55 DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
56 idx, ib_chunk->length_dw);
57 return -EINVAL;
59 header = radeon_get_ib_value(p, idx);
60 pkt->idx = idx;
61 pkt->type = CP_PACKET_GET_TYPE(header);
62 pkt->count = CP_PACKET_GET_COUNT(header);
63 pkt->one_reg_wr = 0;
64 switch (pkt->type) {
65 case PACKET_TYPE0:
66 pkt->reg = CP_PACKET0_GET_REG(header);
67 break;
68 case PACKET_TYPE3:
69 pkt->opcode = CP_PACKET3_GET_OPCODE(header);
70 break;
71 case PACKET_TYPE2:
72 pkt->count = -1;
73 break;
74 default:
75 DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
76 return -EINVAL;
78 if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
79 DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
80 pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
81 return -EINVAL;
83 return 0;
86 /**
87 * r600_cs_packet_next_reloc_mm() - parse next packet which should be reloc packet3
88 * @parser: parser structure holding parsing context.
89 * @data: pointer to relocation data
90 * @offset_start: starting offset
91 * @offset_mask: offset mask (to align start offset on)
92 * @reloc: reloc informations
94 * Check next packet is relocation packet3, do bo validation and compute
95 * GPU offset using the provided start.
96 **/
97 static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
98 struct radeon_cs_reloc **cs_reloc)
100 struct radeon_cs_chunk *relocs_chunk;
101 struct radeon_cs_packet p3reloc;
102 unsigned idx;
103 int r;
105 if (p->chunk_relocs_idx == -1) {
106 DRM_ERROR("No relocation chunk !\n");
107 return -EINVAL;
109 *cs_reloc = NULL;
110 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
111 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
112 if (r) {
113 return r;
115 p->idx += p3reloc.count + 2;
116 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
117 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
118 p3reloc.idx);
119 return -EINVAL;
121 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
122 if (idx >= relocs_chunk->length_dw) {
123 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
124 idx, relocs_chunk->length_dw);
125 return -EINVAL;
127 /* FIXME: we assume reloc size is 4 dwords */
128 *cs_reloc = p->relocs_ptr[(idx / 4)];
129 return 0;
133 * r600_cs_packet_next_reloc_nomm() - parse next packet which should be reloc packet3
134 * @parser: parser structure holding parsing context.
135 * @data: pointer to relocation data
136 * @offset_start: starting offset
137 * @offset_mask: offset mask (to align start offset on)
138 * @reloc: reloc informations
140 * Check next packet is relocation packet3, do bo validation and compute
141 * GPU offset using the provided start.
143 static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
144 struct radeon_cs_reloc **cs_reloc)
146 struct radeon_cs_chunk *relocs_chunk;
147 struct radeon_cs_packet p3reloc;
148 unsigned idx;
149 int r;
151 if (p->chunk_relocs_idx == -1) {
152 DRM_ERROR("No relocation chunk !\n");
153 return -EINVAL;
155 *cs_reloc = NULL;
156 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
157 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
158 if (r) {
159 return r;
161 p->idx += p3reloc.count + 2;
162 if (p3reloc.type != PACKET_TYPE3 || p3reloc.opcode != PACKET3_NOP) {
163 DRM_ERROR("No packet3 for relocation for packet at %d.\n",
164 p3reloc.idx);
165 return -EINVAL;
167 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
168 if (idx >= relocs_chunk->length_dw) {
169 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
170 idx, relocs_chunk->length_dw);
171 return -EINVAL;
173 *cs_reloc = &p->relocs[0];
174 (*cs_reloc)->lobj.gpu_offset = (u64)relocs_chunk->kdata[idx + 3] << 32;
175 (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
176 return 0;
180 * r600_cs_packet_next_vline() - parse userspace VLINE packet
181 * @parser: parser structure holding parsing context.
183 * Userspace sends a special sequence for VLINE waits.
184 * PACKET0 - VLINE_START_END + value
185 * PACKET3 - WAIT_REG_MEM poll vline status reg
186 * RELOC (P3) - crtc_id in reloc.
188 * This function parses this and relocates the VLINE START END
189 * and WAIT_REG_MEM packets to the correct crtc.
190 * It also detects a switched off crtc and nulls out the
191 * wait in that case.
193 static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
195 struct drm_mode_object *obj;
196 struct drm_crtc *crtc;
197 struct radeon_crtc *radeon_crtc;
198 struct radeon_cs_packet p3reloc, wait_reg_mem;
199 int crtc_id;
200 int r;
201 uint32_t header, h_idx, reg, wait_reg_mem_info;
202 volatile uint32_t *ib;
204 ib = p->ib->ptr;
206 /* parse the WAIT_REG_MEM */
207 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
208 if (r)
209 return r;
211 /* check its a WAIT_REG_MEM */
212 if (wait_reg_mem.type != PACKET_TYPE3 ||
213 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
214 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
215 r = -EINVAL;
216 return r;
219 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
220 /* bit 4 is reg (0) or mem (1) */
221 if (wait_reg_mem_info & 0x10) {
222 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
223 r = -EINVAL;
224 return r;
226 /* waiting for value to be equal */
227 if ((wait_reg_mem_info & 0x7) != 0x3) {
228 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
229 r = -EINVAL;
230 return r;
232 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
233 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
234 r = -EINVAL;
235 return r;
238 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
239 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
240 r = -EINVAL;
241 return r;
244 /* jump over the NOP */
245 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
246 if (r)
247 return r;
249 h_idx = p->idx - 2;
250 p->idx += wait_reg_mem.count + 2;
251 p->idx += p3reloc.count + 2;
253 header = radeon_get_ib_value(p, h_idx);
254 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
255 reg = CP_PACKET0_GET_REG(header);
256 mutex_lock(&p->rdev->ddev->mode_config.mutex);
257 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
258 if (!obj) {
259 DRM_ERROR("cannot find crtc %d\n", crtc_id);
260 r = -EINVAL;
261 goto out;
263 crtc = obj_to_crtc(obj);
264 radeon_crtc = to_radeon_crtc(crtc);
265 crtc_id = radeon_crtc->crtc_id;
267 if (!crtc->enabled) {
268 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
269 ib[h_idx + 2] = PACKET2(0);
270 ib[h_idx + 3] = PACKET2(0);
271 ib[h_idx + 4] = PACKET2(0);
272 ib[h_idx + 5] = PACKET2(0);
273 ib[h_idx + 6] = PACKET2(0);
274 ib[h_idx + 7] = PACKET2(0);
275 ib[h_idx + 8] = PACKET2(0);
276 } else if (crtc_id == 1) {
277 switch (reg) {
278 case AVIVO_D1MODE_VLINE_START_END:
279 header &= ~R600_CP_PACKET0_REG_MASK;
280 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
281 break;
282 default:
283 DRM_ERROR("unknown crtc reloc\n");
284 r = -EINVAL;
285 goto out;
287 ib[h_idx] = header;
288 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
290 out:
291 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
292 return r;
295 static int r600_packet0_check(struct radeon_cs_parser *p,
296 struct radeon_cs_packet *pkt,
297 unsigned idx, unsigned reg)
299 int r;
301 switch (reg) {
302 case AVIVO_D1MODE_VLINE_START_END:
303 r = r600_cs_packet_parse_vline(p);
304 if (r) {
305 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
306 idx, reg);
307 return r;
309 break;
310 default:
311 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
312 reg, idx);
313 return -EINVAL;
315 return 0;
318 static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
319 struct radeon_cs_packet *pkt)
321 unsigned reg, i;
322 unsigned idx;
323 int r;
325 idx = pkt->idx + 1;
326 reg = pkt->reg;
327 for (i = 0; i <= pkt->count; i++, idx++, reg += 4) {
328 r = r600_packet0_check(p, pkt, idx, reg);
329 if (r) {
330 return r;
333 return 0;
336 static int r600_packet3_check(struct radeon_cs_parser *p,
337 struct radeon_cs_packet *pkt)
339 struct radeon_cs_reloc *reloc;
340 volatile u32 *ib;
341 unsigned idx;
342 unsigned i;
343 unsigned start_reg, end_reg, reg;
344 int r;
345 u32 idx_value;
347 ib = p->ib->ptr;
348 idx = pkt->idx + 1;
349 idx_value = radeon_get_ib_value(p, idx);
351 switch (pkt->opcode) {
352 case PACKET3_START_3D_CMDBUF:
353 if (p->family >= CHIP_RV770 || pkt->count) {
354 DRM_ERROR("bad START_3D\n");
355 return -EINVAL;
357 break;
358 case PACKET3_CONTEXT_CONTROL:
359 if (pkt->count != 1) {
360 DRM_ERROR("bad CONTEXT_CONTROL\n");
361 return -EINVAL;
363 break;
364 case PACKET3_INDEX_TYPE:
365 case PACKET3_NUM_INSTANCES:
366 if (pkt->count) {
367 DRM_ERROR("bad INDEX_TYPE/NUM_INSTANCES\n");
368 return -EINVAL;
370 break;
371 case PACKET3_DRAW_INDEX:
372 if (pkt->count != 3) {
373 DRM_ERROR("bad DRAW_INDEX\n");
374 return -EINVAL;
376 r = r600_cs_packet_next_reloc(p, &reloc);
377 if (r) {
378 DRM_ERROR("bad DRAW_INDEX\n");
379 return -EINVAL;
381 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
382 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
383 break;
384 case PACKET3_DRAW_INDEX_AUTO:
385 if (pkt->count != 1) {
386 DRM_ERROR("bad DRAW_INDEX_AUTO\n");
387 return -EINVAL;
389 break;
390 case PACKET3_DRAW_INDEX_IMMD_BE:
391 case PACKET3_DRAW_INDEX_IMMD:
392 if (pkt->count < 2) {
393 DRM_ERROR("bad DRAW_INDEX_IMMD\n");
394 return -EINVAL;
396 break;
397 case PACKET3_WAIT_REG_MEM:
398 if (pkt->count != 5) {
399 DRM_ERROR("bad WAIT_REG_MEM\n");
400 return -EINVAL;
402 /* bit 4 is reg (0) or mem (1) */
403 if (idx_value & 0x10) {
404 r = r600_cs_packet_next_reloc(p, &reloc);
405 if (r) {
406 DRM_ERROR("bad WAIT_REG_MEM\n");
407 return -EINVAL;
409 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
410 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
412 break;
413 case PACKET3_SURFACE_SYNC:
414 if (pkt->count != 3) {
415 DRM_ERROR("bad SURFACE_SYNC\n");
416 return -EINVAL;
418 /* 0xffffffff/0x0 is flush all cache flag */
419 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
420 radeon_get_ib_value(p, idx + 2) != 0) {
421 r = r600_cs_packet_next_reloc(p, &reloc);
422 if (r) {
423 DRM_ERROR("bad SURFACE_SYNC\n");
424 return -EINVAL;
426 ib[idx+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
428 break;
429 case PACKET3_EVENT_WRITE:
430 if (pkt->count != 2 && pkt->count != 0) {
431 DRM_ERROR("bad EVENT_WRITE\n");
432 return -EINVAL;
434 if (pkt->count) {
435 r = r600_cs_packet_next_reloc(p, &reloc);
436 if (r) {
437 DRM_ERROR("bad EVENT_WRITE\n");
438 return -EINVAL;
440 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
441 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
443 break;
444 case PACKET3_EVENT_WRITE_EOP:
445 if (pkt->count != 4) {
446 DRM_ERROR("bad EVENT_WRITE_EOP\n");
447 return -EINVAL;
449 r = r600_cs_packet_next_reloc(p, &reloc);
450 if (r) {
451 DRM_ERROR("bad EVENT_WRITE\n");
452 return -EINVAL;
454 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
455 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
456 break;
457 case PACKET3_SET_CONFIG_REG:
458 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
459 end_reg = 4 * pkt->count + start_reg - 4;
460 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
461 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
462 (end_reg >= PACKET3_SET_CONFIG_REG_END)) {
463 DRM_ERROR("bad PACKET3_SET_CONFIG_REG\n");
464 return -EINVAL;
466 for (i = 0; i < pkt->count; i++) {
467 reg = start_reg + (4 * i);
468 switch (reg) {
469 case CP_COHER_BASE:
470 /* use PACKET3_SURFACE_SYNC */
471 return -EINVAL;
472 default:
473 break;
476 break;
477 case PACKET3_SET_CONTEXT_REG:
478 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
479 end_reg = 4 * pkt->count + start_reg - 4;
480 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
481 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
482 (end_reg >= PACKET3_SET_CONTEXT_REG_END)) {
483 DRM_ERROR("bad PACKET3_SET_CONTEXT_REG\n");
484 return -EINVAL;
486 for (i = 0; i < pkt->count; i++) {
487 reg = start_reg + (4 * i);
488 switch (reg) {
489 case DB_DEPTH_BASE:
490 case CB_COLOR0_BASE:
491 case CB_COLOR1_BASE:
492 case CB_COLOR2_BASE:
493 case CB_COLOR3_BASE:
494 case CB_COLOR4_BASE:
495 case CB_COLOR5_BASE:
496 case CB_COLOR6_BASE:
497 case CB_COLOR7_BASE:
498 case SQ_PGM_START_FS:
499 case SQ_PGM_START_ES:
500 case SQ_PGM_START_VS:
501 case SQ_PGM_START_GS:
502 case SQ_PGM_START_PS:
503 r = r600_cs_packet_next_reloc(p, &reloc);
504 if (r) {
505 DRM_ERROR("bad SET_CONTEXT_REG "
506 "0x%04X\n", reg);
507 return -EINVAL;
509 ib[idx+1+i] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
510 break;
511 case VGT_DMA_BASE:
512 case VGT_DMA_BASE_HI:
513 /* These should be handled by DRAW_INDEX packet 3 */
514 case VGT_STRMOUT_BASE_OFFSET_0:
515 case VGT_STRMOUT_BASE_OFFSET_1:
516 case VGT_STRMOUT_BASE_OFFSET_2:
517 case VGT_STRMOUT_BASE_OFFSET_3:
518 case VGT_STRMOUT_BASE_OFFSET_HI_0:
519 case VGT_STRMOUT_BASE_OFFSET_HI_1:
520 case VGT_STRMOUT_BASE_OFFSET_HI_2:
521 case VGT_STRMOUT_BASE_OFFSET_HI_3:
522 case VGT_STRMOUT_BUFFER_BASE_0:
523 case VGT_STRMOUT_BUFFER_BASE_1:
524 case VGT_STRMOUT_BUFFER_BASE_2:
525 case VGT_STRMOUT_BUFFER_BASE_3:
526 case VGT_STRMOUT_BUFFER_OFFSET_0:
527 case VGT_STRMOUT_BUFFER_OFFSET_1:
528 case VGT_STRMOUT_BUFFER_OFFSET_2:
529 case VGT_STRMOUT_BUFFER_OFFSET_3:
530 /* These should be handled by STRMOUT_BUFFER packet 3 */
531 DRM_ERROR("bad context reg: 0x%08x\n", reg);
532 return -EINVAL;
533 default:
534 break;
537 break;
538 case PACKET3_SET_RESOURCE:
539 if (pkt->count % 7) {
540 DRM_ERROR("bad SET_RESOURCE\n");
541 return -EINVAL;
543 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
544 end_reg = 4 * pkt->count + start_reg - 4;
545 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
546 (start_reg >= PACKET3_SET_RESOURCE_END) ||
547 (end_reg >= PACKET3_SET_RESOURCE_END)) {
548 DRM_ERROR("bad SET_RESOURCE\n");
549 return -EINVAL;
551 for (i = 0; i < (pkt->count / 7); i++) {
552 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
553 case SQ_TEX_VTX_VALID_TEXTURE:
554 /* tex base */
555 r = r600_cs_packet_next_reloc(p, &reloc);
556 if (r) {
557 DRM_ERROR("bad SET_RESOURCE\n");
558 return -EINVAL;
560 ib[idx+1+(i*7)+2] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
561 /* tex mip base */
562 r = r600_cs_packet_next_reloc(p, &reloc);
563 if (r) {
564 DRM_ERROR("bad SET_RESOURCE\n");
565 return -EINVAL;
567 ib[idx+1+(i*7)+3] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
568 break;
569 case SQ_TEX_VTX_VALID_BUFFER:
570 /* vtx base */
571 r = r600_cs_packet_next_reloc(p, &reloc);
572 if (r) {
573 DRM_ERROR("bad SET_RESOURCE\n");
574 return -EINVAL;
576 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
577 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
578 break;
579 case SQ_TEX_VTX_INVALID_TEXTURE:
580 case SQ_TEX_VTX_INVALID_BUFFER:
581 default:
582 DRM_ERROR("bad SET_RESOURCE\n");
583 return -EINVAL;
586 break;
587 case PACKET3_SET_ALU_CONST:
588 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
589 end_reg = 4 * pkt->count + start_reg - 4;
590 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
591 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
592 (end_reg >= PACKET3_SET_ALU_CONST_END)) {
593 DRM_ERROR("bad SET_ALU_CONST\n");
594 return -EINVAL;
596 break;
597 case PACKET3_SET_BOOL_CONST:
598 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
599 end_reg = 4 * pkt->count + start_reg - 4;
600 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
601 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
602 (end_reg >= PACKET3_SET_BOOL_CONST_END)) {
603 DRM_ERROR("bad SET_BOOL_CONST\n");
604 return -EINVAL;
606 break;
607 case PACKET3_SET_LOOP_CONST:
608 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
609 end_reg = 4 * pkt->count + start_reg - 4;
610 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
611 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
612 (end_reg >= PACKET3_SET_LOOP_CONST_END)) {
613 DRM_ERROR("bad SET_LOOP_CONST\n");
614 return -EINVAL;
616 break;
617 case PACKET3_SET_CTL_CONST:
618 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
619 end_reg = 4 * pkt->count + start_reg - 4;
620 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
621 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
622 (end_reg >= PACKET3_SET_CTL_CONST_END)) {
623 DRM_ERROR("bad SET_CTL_CONST\n");
624 return -EINVAL;
626 break;
627 case PACKET3_SET_SAMPLER:
628 if (pkt->count % 3) {
629 DRM_ERROR("bad SET_SAMPLER\n");
630 return -EINVAL;
632 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
633 end_reg = 4 * pkt->count + start_reg - 4;
634 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
635 (start_reg >= PACKET3_SET_SAMPLER_END) ||
636 (end_reg >= PACKET3_SET_SAMPLER_END)) {
637 DRM_ERROR("bad SET_SAMPLER\n");
638 return -EINVAL;
640 break;
641 case PACKET3_SURFACE_BASE_UPDATE:
642 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
643 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
644 return -EINVAL;
646 if (pkt->count) {
647 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
648 return -EINVAL;
650 break;
651 case PACKET3_NOP:
652 break;
653 default:
654 DRM_ERROR("Packet3 opcode %x not supported\n", pkt->opcode);
655 return -EINVAL;
657 return 0;
660 int r600_cs_parse(struct radeon_cs_parser *p)
662 struct radeon_cs_packet pkt;
663 int r;
665 do {
666 r = r600_cs_packet_parse(p, &pkt, p->idx);
667 if (r) {
668 return r;
670 p->idx += pkt.count + 2;
671 switch (pkt.type) {
672 case PACKET_TYPE0:
673 r = r600_cs_parse_packet0(p, &pkt);
674 break;
675 case PACKET_TYPE2:
676 break;
677 case PACKET_TYPE3:
678 r = r600_packet3_check(p, &pkt);
679 break;
680 default:
681 DRM_ERROR("Unknown packet type %d !\n", pkt.type);
682 return -EINVAL;
684 if (r) {
685 return r;
687 } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw);
688 #if 0
689 for (r = 0; r < p->ib->length_dw; r++) {
690 printk(KERN_INFO "%05d 0x%08X\n", r, p->ib->ptr[r]);
691 mdelay(1);
693 #endif
694 return 0;
697 static int r600_cs_parser_relocs_legacy(struct radeon_cs_parser *p)
699 if (p->chunk_relocs_idx == -1) {
700 return 0;
702 p->relocs = kcalloc(1, sizeof(struct radeon_cs_reloc), GFP_KERNEL);
703 if (p->relocs == NULL) {
704 return -ENOMEM;
706 return 0;
710 * cs_parser_fini() - clean parser states
711 * @parser: parser structure holding parsing context.
712 * @error: error number
714 * If error is set than unvalidate buffer, otherwise just free memory
715 * used by parsing context.
717 static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
719 unsigned i;
721 kfree(parser->relocs);
722 for (i = 0; i < parser->nchunks; i++) {
723 kfree(parser->chunks[i].kdata);
724 kfree(parser->chunks[i].kpage[0]);
725 kfree(parser->chunks[i].kpage[1]);
727 kfree(parser->chunks);
728 kfree(parser->chunks_array);
731 int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
732 unsigned family, u32 *ib, int *l)
734 struct radeon_cs_parser parser;
735 struct radeon_cs_chunk *ib_chunk;
736 struct radeon_ib fake_ib;
737 int r;
739 /* initialize parser */
740 memset(&parser, 0, sizeof(struct radeon_cs_parser));
741 parser.filp = filp;
742 parser.rdev = NULL;
743 parser.family = family;
744 parser.ib = &fake_ib;
745 fake_ib.ptr = ib;
746 r = radeon_cs_parser_init(&parser, data);
747 if (r) {
748 DRM_ERROR("Failed to initialize parser !\n");
749 r600_cs_parser_fini(&parser, r);
750 return r;
752 r = r600_cs_parser_relocs_legacy(&parser);
753 if (r) {
754 DRM_ERROR("Failed to parse relocation !\n");
755 r600_cs_parser_fini(&parser, r);
756 return r;
758 /* Copy the packet into the IB, the parser will read from the
759 * input memory (cached) and write to the IB (which can be
760 * uncached). */
761 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
762 parser.ib->length_dw = ib_chunk->length_dw;
763 *l = parser.ib->length_dw;
764 r = r600_cs_parse(&parser);
765 if (r) {
766 DRM_ERROR("Invalid command stream !\n");
767 r600_cs_parser_fini(&parser, r);
768 return r;
770 r = radeon_cs_finish_pages(&parser);
771 if (r) {
772 DRM_ERROR("Invalid command stream !\n");
773 r600_cs_parser_fini(&parser, r);
774 return r;
776 r600_cs_parser_fini(&parser, r);
777 return r;
780 void r600_cs_legacy_init(void)
782 r600_cs_packet_next_reloc = &r600_cs_packet_next_reloc_nomm;