No empty .Rs/.Re
[netbsd-mini2440.git] / sys / external / bsd / drm / dist / shared-core / via_verifier.c
blob539a3efb3df477e35cdb9fa770fc62664b543f1f
1 /*
2 * Copyright 2004 The Unichrome Project. All Rights Reserved.
3 * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
14 * of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
24 * Author: Thomas Hellstrom 2004, 2005.
25 * This code was written using docs obtained under NDA from VIA Inc.
27 * Don't run this code directly on an AGP buffer. Due to cache problems it will
28 * be very slow.
31 #include "via_3d_reg.h"
32 #include "drmP.h"
33 #include "drm.h"
34 #include "via_drm.h"
35 #include "via_verifier.h"
36 #include "via_drv.h"
38 typedef enum {
39 state_command,
40 state_header2,
41 state_header1,
42 state_vheader5,
43 state_vheader6,
44 state_error
45 } verifier_state_t;
47 typedef enum {
48 no_check = 0,
49 check_for_header2,
50 check_for_header1,
51 check_for_header2_err,
52 check_for_header1_err,
53 check_for_fire,
54 check_z_buffer_addr0,
55 check_z_buffer_addr1,
56 check_z_buffer_addr_mode,
57 check_destination_addr0,
58 check_destination_addr1,
59 check_destination_addr_mode,
60 check_for_dummy,
61 check_for_dd,
62 check_texture_addr0,
63 check_texture_addr1,
64 check_texture_addr2,
65 check_texture_addr3,
66 check_texture_addr4,
67 check_texture_addr5,
68 check_texture_addr6,
69 check_texture_addr7,
70 check_texture_addr8,
71 check_texture_addr_mode,
72 check_for_vertex_count,
73 check_number_texunits,
74 forbidden_command
75 } hazard_t;
78 * Associates each hazard above with a possible multi-command
79 * sequence. For example an address that is split over multiple
80 * commands and that needs to be checked at the first command
81 * that does not include any part of the address.
84 static drm_via_sequence_t seqs[] = {
85 no_sequence,
86 no_sequence,
87 no_sequence,
88 no_sequence,
89 no_sequence,
90 no_sequence,
91 z_address,
92 z_address,
93 z_address,
94 dest_address,
95 dest_address,
96 dest_address,
97 no_sequence,
98 no_sequence,
99 tex_address,
100 tex_address,
101 tex_address,
102 tex_address,
103 tex_address,
104 tex_address,
105 tex_address,
106 tex_address,
107 tex_address,
108 tex_address,
109 no_sequence
112 typedef struct {
113 unsigned int code;
114 hazard_t hz;
115 } hz_init_t;
117 static hz_init_t init_table1[] = {
118 {0xf2, check_for_header2_err},
119 {0xf0, check_for_header1_err},
120 {0xee, check_for_fire},
121 {0xcc, check_for_dummy},
122 {0xdd, check_for_dd},
123 {0x00, no_check},
124 {0x10, check_z_buffer_addr0},
125 {0x11, check_z_buffer_addr1},
126 {0x12, check_z_buffer_addr_mode},
127 {0x13, no_check},
128 {0x14, no_check},
129 {0x15, no_check},
130 {0x23, no_check},
131 {0x24, no_check},
132 {0x33, no_check},
133 {0x34, no_check},
134 {0x35, no_check},
135 {0x36, no_check},
136 {0x37, no_check},
137 {0x38, no_check},
138 {0x39, no_check},
139 {0x3A, no_check},
140 {0x3B, no_check},
141 {0x3C, no_check},
142 {0x3D, no_check},
143 {0x3E, no_check},
144 {0x40, check_destination_addr0},
145 {0x41, check_destination_addr1},
146 {0x42, check_destination_addr_mode},
147 {0x43, no_check},
148 {0x44, no_check},
149 {0x50, no_check},
150 {0x51, no_check},
151 {0x52, no_check},
152 {0x53, no_check},
153 {0x54, no_check},
154 {0x55, no_check},
155 {0x56, no_check},
156 {0x57, no_check},
157 {0x58, no_check},
158 {0x70, no_check},
159 {0x71, no_check},
160 {0x78, no_check},
161 {0x79, no_check},
162 {0x7A, no_check},
163 {0x7B, no_check},
164 {0x7C, no_check},
165 {0x7D, check_for_vertex_count}
168 static hz_init_t init_table2[] = {
169 {0xf2, check_for_header2_err},
170 {0xf0, check_for_header1_err},
171 {0xee, check_for_fire},
172 {0xcc, check_for_dummy},
173 {0x00, check_texture_addr0},
174 {0x01, check_texture_addr0},
175 {0x02, check_texture_addr0},
176 {0x03, check_texture_addr0},
177 {0x04, check_texture_addr0},
178 {0x05, check_texture_addr0},
179 {0x06, check_texture_addr0},
180 {0x07, check_texture_addr0},
181 {0x08, check_texture_addr0},
182 {0x09, check_texture_addr0},
183 {0x20, check_texture_addr1},
184 {0x21, check_texture_addr1},
185 {0x22, check_texture_addr1},
186 {0x23, check_texture_addr4},
187 {0x2B, check_texture_addr3},
188 {0x2C, check_texture_addr3},
189 {0x2D, check_texture_addr3},
190 {0x2E, check_texture_addr3},
191 {0x2F, check_texture_addr3},
192 {0x30, check_texture_addr3},
193 {0x31, check_texture_addr3},
194 {0x32, check_texture_addr3},
195 {0x33, check_texture_addr3},
196 {0x34, check_texture_addr3},
197 {0x4B, check_texture_addr5},
198 {0x4C, check_texture_addr6},
199 {0x51, check_texture_addr7},
200 {0x52, check_texture_addr8},
201 {0x77, check_texture_addr2},
202 {0x78, no_check},
203 {0x79, no_check},
204 {0x7A, no_check},
205 {0x7B, check_texture_addr_mode},
206 {0x7C, no_check},
207 {0x7D, no_check},
208 {0x7E, no_check},
209 {0x7F, no_check},
210 {0x80, no_check},
211 {0x81, no_check},
212 {0x82, no_check},
213 {0x83, no_check},
214 {0x85, no_check},
215 {0x86, no_check},
216 {0x87, no_check},
217 {0x88, no_check},
218 {0x89, no_check},
219 {0x8A, no_check},
220 {0x90, no_check},
221 {0x91, no_check},
222 {0x92, no_check},
223 {0x93, no_check}
226 static hz_init_t init_table3[] = {
227 {0xf2, check_for_header2_err},
228 {0xf0, check_for_header1_err},
229 {0xcc, check_for_dummy},
230 {0x00, check_number_texunits}
233 static hazard_t table1[256];
234 static hazard_t table2[256];
235 static hazard_t table3[256];
237 static __inline__ int
238 eat_words(const uint32_t ** buf, const uint32_t * buf_end, unsigned num_words)
240 if ((buf_end - *buf) >= num_words) {
241 *buf += num_words;
242 return 0;
244 DRM_ERROR("Illegal termination of DMA command buffer\n");
245 return 1;
249 * Partially stolen from drm_memory.h
252 static __inline__ drm_local_map_t *via_drm_lookup_agp_map(drm_via_state_t *seq,
253 unsigned long offset,
254 unsigned long size,
255 struct drm_device *dev)
257 #ifdef __linux__
258 struct drm_map_list *r_list;
259 #endif
260 drm_local_map_t *map = seq->map_cache;
262 if (map && map->offset <= offset
263 && (offset + size) <= (map->offset + map->size)) {
264 return map;
266 #ifdef __linux__
267 list_for_each_entry(r_list, &dev->maplist, head) {
268 map = r_list->map;
269 if (!map)
270 continue;
271 #else
272 TAILQ_FOREACH(map, &dev->maplist, link) {
273 #endif
274 if (map->offset <= offset
275 && (offset + size) <= (map->offset + map->size)
276 && !(map->flags & _DRM_RESTRICTED)
277 && (map->type == _DRM_AGP)) {
278 seq->map_cache = map;
279 return map;
282 return NULL;
286 * Require that all AGP texture levels reside in the same AGP map which should
287 * be mappable by the client. This is not a big restriction.
288 * FIXME: To actually enforce this security policy strictly, drm_rmmap
289 * would have to wait for dma quiescent before removing an AGP map.
290 * The via_drm_lookup_agp_map call in reality seems to take
291 * very little CPU time.
294 static __inline__ int finish_current_sequence(drm_via_state_t * cur_seq)
296 switch (cur_seq->unfinished) {
297 case z_address:
298 DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq->z_addr);
299 break;
300 case dest_address:
301 DRM_DEBUG("Destination start address is 0x%x\n",
302 cur_seq->d_addr);
303 break;
304 case tex_address:
305 if (cur_seq->agp_texture) {
306 unsigned start =
307 cur_seq->tex_level_lo[cur_seq->texture];
308 unsigned end = cur_seq->tex_level_hi[cur_seq->texture];
309 unsigned long lo = ~0, hi = 0, tmp;
310 uint32_t *addr, *pitch, *height, tex;
311 unsigned i;
312 int npot;
314 if (end > 9)
315 end = 9;
316 if (start > 9)
317 start = 9;
319 addr =
320 &(cur_seq->t_addr[tex = cur_seq->texture][start]);
321 pitch = &(cur_seq->pitch[tex][start]);
322 height = &(cur_seq->height[tex][start]);
323 npot = cur_seq->tex_npot[tex];
324 for (i = start; i <= end; ++i) {
325 tmp = *addr++;
326 if (tmp < lo)
327 lo = tmp;
328 if (i == 0 && npot)
329 tmp += (*height++ * *pitch++);
330 else
331 tmp += (*height++ << *pitch++);
332 if (tmp > hi)
333 hi = tmp;
336 if (!via_drm_lookup_agp_map
337 (cur_seq, lo, hi - lo, cur_seq->dev)) {
338 DRM_ERROR
339 ("AGP texture is not in allowed map\n");
340 return 2;
343 break;
344 default:
345 break;
347 cur_seq->unfinished = no_sequence;
348 return 0;
351 static __inline__ int
352 investigate_hazard(uint32_t cmd, hazard_t haz, drm_via_state_t * cur_seq)
354 register uint32_t tmp, *tmp_addr;
356 if (cur_seq->unfinished && (cur_seq->unfinished != seqs[haz])) {
357 int ret;
358 if ((ret = finish_current_sequence(cur_seq)))
359 return ret;
362 switch (haz) {
363 case check_for_header2:
364 if (cmd == HALCYON_HEADER2)
365 return 1;
366 return 0;
367 case check_for_header1:
368 if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
369 return 1;
370 return 0;
371 case check_for_header2_err:
372 if (cmd == HALCYON_HEADER2)
373 return 1;
374 DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
375 break;
376 case check_for_header1_err:
377 if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
378 return 1;
379 DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
380 break;
381 case check_for_fire:
382 if ((cmd & HALCYON_FIREMASK) == HALCYON_FIRECMD)
383 return 1;
384 DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
385 break;
386 case check_for_dummy:
387 if (HC_DUMMY == cmd)
388 return 0;
389 DRM_ERROR("Illegal DMA HC_DUMMY command\n");
390 break;
391 case check_for_dd:
392 if (0xdddddddd == cmd)
393 return 0;
394 DRM_ERROR("Illegal DMA 0xdddddddd command\n");
395 break;
396 case check_z_buffer_addr0:
397 cur_seq->unfinished = z_address;
398 cur_seq->z_addr = (cur_seq->z_addr & 0xFF000000) |
399 (cmd & 0x00FFFFFF);
400 return 0;
401 case check_z_buffer_addr1:
402 cur_seq->unfinished = z_address;
403 cur_seq->z_addr = (cur_seq->z_addr & 0x00FFFFFF) |
404 ((cmd & 0xFF) << 24);
405 return 0;
406 case check_z_buffer_addr_mode:
407 cur_seq->unfinished = z_address;
408 if ((cmd & 0x0000C000) == 0)
409 return 0;
410 DRM_ERROR("Attempt to place Z buffer in system memory\n");
411 return 2;
412 case check_destination_addr0:
413 cur_seq->unfinished = dest_address;
414 cur_seq->d_addr = (cur_seq->d_addr & 0xFF000000) |
415 (cmd & 0x00FFFFFF);
416 return 0;
417 case check_destination_addr1:
418 cur_seq->unfinished = dest_address;
419 cur_seq->d_addr = (cur_seq->d_addr & 0x00FFFFFF) |
420 ((cmd & 0xFF) << 24);
421 return 0;
422 case check_destination_addr_mode:
423 cur_seq->unfinished = dest_address;
424 if ((cmd & 0x0000C000) == 0)
425 return 0;
426 DRM_ERROR
427 ("Attempt to place 3D drawing buffer in system memory\n");
428 return 2;
429 case check_texture_addr0:
430 cur_seq->unfinished = tex_address;
431 tmp = (cmd >> 24);
432 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
433 *tmp_addr = (*tmp_addr & 0xFF000000) | (cmd & 0x00FFFFFF);
434 return 0;
435 case check_texture_addr1:
436 cur_seq->unfinished = tex_address;
437 tmp = ((cmd >> 24) - 0x20);
438 tmp += tmp << 1;
439 tmp_addr = &cur_seq->t_addr[cur_seq->texture][tmp];
440 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
441 tmp_addr++;
442 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF00) << 16);
443 tmp_addr++;
444 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF0000) << 8);
445 return 0;
446 case check_texture_addr2:
447 cur_seq->unfinished = tex_address;
448 cur_seq->tex_level_lo[tmp = cur_seq->texture] = cmd & 0x3F;
449 cur_seq->tex_level_hi[tmp] = (cmd & 0xFC0) >> 6;
450 return 0;
451 case check_texture_addr3:
452 cur_seq->unfinished = tex_address;
453 tmp = ((cmd >> 24) - HC_SubA_HTXnL0Pit);
454 if (tmp == 0 &&
455 (cmd & HC_HTXnEnPit_MASK)) {
456 cur_seq->pitch[cur_seq->texture][tmp] =
457 (cmd & HC_HTXnLnPit_MASK);
458 cur_seq->tex_npot[cur_seq->texture] = 1;
459 } else {
460 cur_seq->pitch[cur_seq->texture][tmp] =
461 (cmd & HC_HTXnLnPitE_MASK) >> HC_HTXnLnPitE_SHIFT;
462 cur_seq->tex_npot[cur_seq->texture] = 0;
463 if (cmd & 0x000FFFFF) {
464 DRM_ERROR
465 ("Unimplemented texture level 0 pitch mode.\n");
466 return 2;
469 return 0;
470 case check_texture_addr4:
471 cur_seq->unfinished = tex_address;
472 tmp_addr = &cur_seq->t_addr[cur_seq->texture][9];
473 *tmp_addr = (*tmp_addr & 0x00FFFFFF) | ((cmd & 0xFF) << 24);
474 return 0;
475 case check_texture_addr5:
476 case check_texture_addr6:
477 cur_seq->unfinished = tex_address;
479 * Texture width. We don't care since we have the pitch.
481 return 0;
482 case check_texture_addr7:
483 cur_seq->unfinished = tex_address;
484 tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
485 tmp_addr[5] = 1 << ((cmd & 0x00F00000) >> 20);
486 tmp_addr[4] = 1 << ((cmd & 0x000F0000) >> 16);
487 tmp_addr[3] = 1 << ((cmd & 0x0000F000) >> 12);
488 tmp_addr[2] = 1 << ((cmd & 0x00000F00) >> 8);
489 tmp_addr[1] = 1 << ((cmd & 0x000000F0) >> 4);
490 tmp_addr[0] = 1 << (cmd & 0x0000000F);
491 return 0;
492 case check_texture_addr8:
493 cur_seq->unfinished = tex_address;
494 tmp_addr = &(cur_seq->height[cur_seq->texture][0]);
495 tmp_addr[9] = 1 << ((cmd & 0x0000F000) >> 12);
496 tmp_addr[8] = 1 << ((cmd & 0x00000F00) >> 8);
497 tmp_addr[7] = 1 << ((cmd & 0x000000F0) >> 4);
498 tmp_addr[6] = 1 << (cmd & 0x0000000F);
499 return 0;
500 case check_texture_addr_mode:
501 cur_seq->unfinished = tex_address;
502 if (2 == (tmp = cmd & 0x00000003)) {
503 DRM_ERROR
504 ("Attempt to fetch texture from system memory.\n");
505 return 2;
507 cur_seq->agp_texture = (tmp == 3);
508 cur_seq->tex_palette_size[cur_seq->texture] =
509 (cmd >> 16) & 0x000000007;
510 return 0;
511 case check_for_vertex_count:
512 cur_seq->vertex_count = cmd & 0x0000FFFF;
513 return 0;
514 case check_number_texunits:
515 cur_seq->multitex = (cmd >> 3) & 1;
516 return 0;
517 default:
518 DRM_ERROR("Illegal DMA data: 0x%x\n", cmd);
519 return 2;
521 return 2;
524 static __inline__ int
525 via_check_prim_list(uint32_t const **buffer, const uint32_t * buf_end,
526 drm_via_state_t * cur_seq)
528 drm_via_private_t *dev_priv =
529 (drm_via_private_t *) cur_seq->dev->dev_private;
530 uint32_t a_fire, bcmd, dw_count;
531 int ret = 0;
532 int have_fire;
533 const uint32_t *buf = *buffer;
535 while (buf < buf_end) {
536 have_fire = 0;
537 if ((buf_end - buf) < 2) {
538 DRM_ERROR
539 ("Unexpected termination of primitive list.\n");
540 ret = 1;
541 break;
543 if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdB)
544 break;
545 bcmd = *buf++;
546 if ((*buf & HC_ACMD_MASK) != HC_ACMD_HCmdA) {
547 DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
548 *buf);
549 ret = 1;
550 break;
552 a_fire =
553 *buf++ | HC_HPLEND_MASK | HC_HPMValidN_MASK |
554 HC_HE3Fire_MASK;
557 * How many dwords per vertex ?
560 if (cur_seq->agp && ((bcmd & (0xF << 11)) == 0)) {
561 DRM_ERROR("Illegal B command vertex data for AGP.\n");
562 ret = 1;
563 break;
566 dw_count = 0;
567 if (bcmd & (1 << 7))
568 dw_count += (cur_seq->multitex) ? 2 : 1;
569 if (bcmd & (1 << 8))
570 dw_count += (cur_seq->multitex) ? 2 : 1;
571 if (bcmd & (1 << 9))
572 dw_count++;
573 if (bcmd & (1 << 10))
574 dw_count++;
575 if (bcmd & (1 << 11))
576 dw_count++;
577 if (bcmd & (1 << 12))
578 dw_count++;
579 if (bcmd & (1 << 13))
580 dw_count++;
581 if (bcmd & (1 << 14))
582 dw_count++;
584 while (buf < buf_end) {
585 if (*buf == a_fire) {
586 if (dev_priv->num_fire_offsets >=
587 VIA_FIRE_BUF_SIZE) {
588 DRM_ERROR("Fire offset buffer full.\n");
589 ret = 1;
590 break;
592 dev_priv->fire_offsets[dev_priv->
593 num_fire_offsets++] =
594 buf;
595 have_fire = 1;
596 buf++;
597 if (buf < buf_end && *buf == a_fire)
598 buf++;
599 break;
601 if ((*buf == HALCYON_HEADER2) ||
602 ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD)) {
603 DRM_ERROR("Missing Vertex Fire command, "
604 "Stray Vertex Fire command or verifier "
605 "lost sync.\n");
606 ret = 1;
607 break;
609 if ((ret = eat_words(&buf, buf_end, dw_count)))
610 break;
612 if (buf >= buf_end && !have_fire) {
613 DRM_ERROR("Missing Vertex Fire command or verifier "
614 "lost sync.\n");
615 ret = 1;
616 break;
618 if (cur_seq->agp && ((buf - cur_seq->buf_start) & 0x01)) {
619 DRM_ERROR("AGP Primitive list end misaligned.\n");
620 ret = 1;
621 break;
624 *buffer = buf;
625 return ret;
628 static __inline__ verifier_state_t
629 via_check_header2(uint32_t const **buffer, const uint32_t * buf_end,
630 drm_via_state_t * hc_state)
632 uint32_t cmd;
633 int hz_mode;
634 hazard_t haz;
635 const uint32_t *buf = *buffer;
636 const hazard_t *hz_table;
638 if ((buf_end - buf) < 2) {
639 DRM_ERROR
640 ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
641 return state_error;
643 buf++;
644 cmd = (*buf++ & 0xFFFF0000) >> 16;
646 switch (cmd) {
647 case HC_ParaType_CmdVdata:
648 if (via_check_prim_list(&buf, buf_end, hc_state))
649 return state_error;
650 *buffer = buf;
651 return state_command;
652 case HC_ParaType_NotTex:
653 hz_table = table1;
654 break;
655 case HC_ParaType_Tex:
656 hc_state->texture = 0;
657 hz_table = table2;
658 break;
659 case (HC_ParaType_Tex | (HC_SubType_Tex1 << 8)):
660 hc_state->texture = 1;
661 hz_table = table2;
662 break;
663 case (HC_ParaType_Tex | (HC_SubType_TexGeneral << 8)):
664 hz_table = table3;
665 break;
666 case HC_ParaType_Auto:
667 if (eat_words(&buf, buf_end, 2))
668 return state_error;
669 *buffer = buf;
670 return state_command;
671 case (HC_ParaType_Palette | (HC_SubType_Stipple << 8)):
672 if (eat_words(&buf, buf_end, 32))
673 return state_error;
674 *buffer = buf;
675 return state_command;
676 case (HC_ParaType_Palette | (HC_SubType_TexPalette0 << 8)):
677 case (HC_ParaType_Palette | (HC_SubType_TexPalette1 << 8)):
678 DRM_ERROR("Texture palettes are rejected because of "
679 "lack of info how to determine their size.\n");
680 return state_error;
681 case (HC_ParaType_Palette | (HC_SubType_FogTable << 8)):
682 DRM_ERROR("Fog factor palettes are rejected because of "
683 "lack of info how to determine their size.\n");
684 return state_error;
685 default:
688 * There are some unimplemented HC_ParaTypes here, that
689 * need to be implemented if the Mesa driver is extended.
692 DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
693 "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
694 cmd, *(buf - 2));
695 *buffer = buf;
696 return state_error;
699 while (buf < buf_end) {
700 cmd = *buf++;
701 if ((haz = hz_table[cmd >> 24])) {
702 if ((hz_mode = investigate_hazard(cmd, haz, hc_state))) {
703 if (hz_mode == 1) {
704 buf--;
705 break;
707 return state_error;
709 } else if (hc_state->unfinished &&
710 finish_current_sequence(hc_state)) {
711 return state_error;
714 if (hc_state->unfinished && finish_current_sequence(hc_state)) {
715 return state_error;
717 *buffer = buf;
718 return state_command;
721 static __inline__ verifier_state_t
722 via_parse_header2(drm_via_private_t * dev_priv, uint32_t const **buffer,
723 const uint32_t * buf_end, int *fire_count)
725 uint32_t cmd;
726 const uint32_t *buf = *buffer;
727 const uint32_t *next_fire;
728 int burst = 0;
730 next_fire = dev_priv->fire_offsets[*fire_count];
731 buf++;
732 cmd = (*buf & 0xFFFF0000) >> 16;
733 VIA_WRITE(HC_REG_TRANS_SET + HC_REG_BASE, *buf++);
734 switch (cmd) {
735 case HC_ParaType_CmdVdata:
736 while ((buf < buf_end) &&
737 (*fire_count < dev_priv->num_fire_offsets) &&
738 (*buf & HC_ACMD_MASK) == HC_ACMD_HCmdB) {
739 while (buf <= next_fire) {
740 VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
741 (burst & 63), *buf++);
742 burst += 4;
744 if ((buf < buf_end)
745 && ((*buf & HALCYON_FIREMASK) == HALCYON_FIRECMD))
746 buf++;
748 if (++(*fire_count) < dev_priv->num_fire_offsets)
749 next_fire = dev_priv->fire_offsets[*fire_count];
751 break;
752 default:
753 while (buf < buf_end) {
755 if (*buf == HC_HEADER2 ||
756 (*buf & HALCYON_HEADER1MASK) == HALCYON_HEADER1 ||
757 (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5 ||
758 (*buf & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
759 break;
761 VIA_WRITE(HC_REG_TRANS_SPACE + HC_REG_BASE +
762 (burst & 63), *buf++);
763 burst += 4;
766 *buffer = buf;
767 return state_command;
770 static __inline__ int verify_mmio_address(uint32_t address)
772 if ((address > 0x3FF) && (address < 0xC00)) {
773 DRM_ERROR("Invalid VIDEO DMA command. "
774 "Attempt to access 3D- or command burst area.\n");
775 return 1;
776 } else if ((address > 0xCFF) && (address < 0x1300)) {
777 DRM_ERROR("Invalid VIDEO DMA command. "
778 "Attempt to access PCI DMA area.\n");
779 return 1;
780 } else if (address > 0x13FF) {
781 DRM_ERROR("Invalid VIDEO DMA command. "
782 "Attempt to access VGA registers.\n");
783 return 1;
785 return 0;
788 static __inline__ int
789 verify_video_tail(uint32_t const **buffer, const uint32_t * buf_end,
790 uint32_t dwords)
792 const uint32_t *buf = *buffer;
794 if (buf_end - buf < dwords) {
795 DRM_ERROR("Illegal termination of video command.\n");
796 return 1;
798 while (dwords--) {
799 if (*buf++) {
800 DRM_ERROR("Illegal video command tail.\n");
801 return 1;
804 *buffer = buf;
805 return 0;
808 static __inline__ verifier_state_t
809 via_check_header1(uint32_t const **buffer, const uint32_t * buf_end)
811 uint32_t cmd;
812 const uint32_t *buf = *buffer;
813 verifier_state_t ret = state_command;
815 while (buf < buf_end) {
816 cmd = *buf;
817 if ((cmd > ((0x3FF >> 2) | HALCYON_HEADER1)) &&
818 (cmd < ((0xC00 >> 2) | HALCYON_HEADER1))) {
819 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
820 break;
821 DRM_ERROR("Invalid HALCYON_HEADER1 command. "
822 "Attempt to access 3D- or command burst area.\n");
823 ret = state_error;
824 break;
825 } else if (cmd > ((0xCFF >> 2) | HALCYON_HEADER1)) {
826 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
827 break;
828 DRM_ERROR("Invalid HALCYON_HEADER1 command. "
829 "Attempt to access VGA registers.\n");
830 ret = state_error;
831 break;
832 } else {
833 buf += 2;
836 *buffer = buf;
837 return ret;
840 static __inline__ verifier_state_t
841 via_parse_header1(drm_via_private_t * dev_priv, uint32_t const **buffer,
842 const uint32_t * buf_end)
844 register uint32_t cmd;
845 const uint32_t *buf = *buffer;
847 while (buf < buf_end) {
848 cmd = *buf;
849 if ((cmd & HALCYON_HEADER1MASK) != HALCYON_HEADER1)
850 break;
851 VIA_WRITE((cmd & ~HALCYON_HEADER1MASK) << 2, *++buf);
852 buf++;
854 *buffer = buf;
855 return state_command;
858 static __inline__ verifier_state_t
859 via_check_vheader5(uint32_t const **buffer, const uint32_t * buf_end)
861 uint32_t data;
862 const uint32_t *buf = *buffer;
864 if (buf_end - buf < 4) {
865 DRM_ERROR("Illegal termination of video header5 command\n");
866 return state_error;
869 data = *buf++ & ~VIA_VIDEOMASK;
870 if (verify_mmio_address(data))
871 return state_error;
873 data = *buf++;
874 if (*buf++ != 0x00F50000) {
875 DRM_ERROR("Illegal header5 header data\n");
876 return state_error;
878 if (*buf++ != 0x00000000) {
879 DRM_ERROR("Illegal header5 header data\n");
880 return state_error;
882 if (eat_words(&buf, buf_end, data))
883 return state_error;
884 if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
885 return state_error;
886 *buffer = buf;
887 return state_command;
891 static __inline__ verifier_state_t
892 via_parse_vheader5(drm_via_private_t * dev_priv, uint32_t const **buffer,
893 const uint32_t * buf_end)
895 uint32_t addr, count, i;
896 const uint32_t *buf = *buffer;
898 addr = *buf++ & ~VIA_VIDEOMASK;
899 i = count = *buf;
900 buf += 3;
901 while (i--) {
902 VIA_WRITE(addr, *buf++);
904 if (count & 3)
905 buf += 4 - (count & 3);
906 *buffer = buf;
907 return state_command;
910 static __inline__ verifier_state_t
911 via_check_vheader6(uint32_t const **buffer, const uint32_t * buf_end)
913 uint32_t data;
914 const uint32_t *buf = *buffer;
915 uint32_t i;
917 if (buf_end - buf < 4) {
918 DRM_ERROR("Illegal termination of video header6 command\n");
919 return state_error;
921 buf++;
922 data = *buf++;
923 if (*buf++ != 0x00F60000) {
924 DRM_ERROR("Illegal header6 header data\n");
925 return state_error;
927 if (*buf++ != 0x00000000) {
928 DRM_ERROR("Illegal header6 header data\n");
929 return state_error;
931 if ((buf_end - buf) < (data << 1)) {
932 DRM_ERROR("Illegal termination of video header6 command\n");
933 return state_error;
935 for (i = 0; i < data; ++i) {
936 if (verify_mmio_address(*buf++))
937 return state_error;
938 buf++;
940 data <<= 1;
941 if ((data & 3) && verify_video_tail(&buf, buf_end, 4 - (data & 3)))
942 return state_error;
943 *buffer = buf;
944 return state_command;
947 static __inline__ verifier_state_t
948 via_parse_vheader6(drm_via_private_t * dev_priv, uint32_t const **buffer,
949 const uint32_t * buf_end)
952 uint32_t addr, count, i;
953 const uint32_t *buf = *buffer;
955 i = count = *++buf;
956 buf += 3;
957 while (i--) {
958 addr = *buf++;
959 VIA_WRITE(addr, *buf++);
961 count <<= 1;
962 if (count & 3)
963 buf += 4 - (count & 3);
964 *buffer = buf;
965 return state_command;
969 via_verify_command_stream(const uint32_t * buf, unsigned int size,
970 struct drm_device * dev, int agp)
973 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
974 drm_via_state_t *hc_state = &dev_priv->hc_state;
975 drm_via_state_t saved_state = *hc_state;
976 uint32_t cmd;
977 const uint32_t *buf_end = buf + (size >> 2);
978 verifier_state_t state = state_command;
979 int cme_video;
980 int supported_3d;
982 cme_video = (dev_priv->chipset == VIA_PRO_GROUP_A ||
983 dev_priv->chipset == VIA_DX9_0);
985 supported_3d = dev_priv->chipset != VIA_DX9_0;
987 hc_state->dev = dev;
988 hc_state->unfinished = no_sequence;
989 hc_state->map_cache = NULL;
990 hc_state->agp = agp;
991 hc_state->buf_start = buf;
992 dev_priv->num_fire_offsets = 0;
994 while (buf < buf_end) {
996 switch (state) {
997 case state_header2:
998 state = via_check_header2(&buf, buf_end, hc_state);
999 break;
1000 case state_header1:
1001 state = via_check_header1(&buf, buf_end);
1002 break;
1003 case state_vheader5:
1004 state = via_check_vheader5(&buf, buf_end);
1005 break;
1006 case state_vheader6:
1007 state = via_check_vheader6(&buf, buf_end);
1008 break;
1009 case state_command:
1010 if ((HALCYON_HEADER2 == (cmd = *buf)) &&
1011 supported_3d)
1012 state = state_header2;
1013 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
1014 state = state_header1;
1015 else if (cme_video
1016 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
1017 state = state_vheader5;
1018 else if (cme_video
1019 && (cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
1020 state = state_vheader6;
1021 else if ((cmd == HALCYON_HEADER2) && !supported_3d) {
1022 DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
1023 state = state_error;
1024 } else {
1025 DRM_ERROR
1026 ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1027 cmd);
1028 state = state_error;
1030 break;
1031 case state_error:
1032 default:
1033 *hc_state = saved_state;
1034 return -EINVAL;
1037 if (state == state_error) {
1038 *hc_state = saved_state;
1039 return -EINVAL;
1041 return 0;
1045 via_parse_command_stream(struct drm_device * dev, const uint32_t * buf,
1046 unsigned int size)
1049 drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
1050 uint32_t cmd;
1051 const uint32_t *buf_end = buf + (size >> 2);
1052 verifier_state_t state = state_command;
1053 int fire_count = 0;
1055 while (buf < buf_end) {
1057 switch (state) {
1058 case state_header2:
1059 state =
1060 via_parse_header2(dev_priv, &buf, buf_end,
1061 &fire_count);
1062 break;
1063 case state_header1:
1064 state = via_parse_header1(dev_priv, &buf, buf_end);
1065 break;
1066 case state_vheader5:
1067 state = via_parse_vheader5(dev_priv, &buf, buf_end);
1068 break;
1069 case state_vheader6:
1070 state = via_parse_vheader6(dev_priv, &buf, buf_end);
1071 break;
1072 case state_command:
1073 if (HALCYON_HEADER2 == (cmd = *buf))
1074 state = state_header2;
1075 else if ((cmd & HALCYON_HEADER1MASK) == HALCYON_HEADER1)
1076 state = state_header1;
1077 else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER5)
1078 state = state_vheader5;
1079 else if ((cmd & VIA_VIDEOMASK) == VIA_VIDEO_HEADER6)
1080 state = state_vheader6;
1081 else {
1082 DRM_ERROR
1083 ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1084 cmd);
1085 state = state_error;
1087 break;
1088 case state_error:
1089 default:
1090 return -EINVAL;
1093 if (state == state_error) {
1094 return -EINVAL;
1096 return 0;
1099 static void
1100 setup_hazard_table(hz_init_t init_table[], hazard_t table[], int size)
1102 int i;
1104 for (i = 0; i < 256; ++i) {
1105 table[i] = forbidden_command;
1108 for (i = 0; i < size; ++i) {
1109 table[init_table[i].code] = init_table[i].hz;
1113 void via_init_command_verifier(void)
1115 setup_hazard_table(init_table1, table1,
1116 sizeof(init_table1) / sizeof(hz_init_t));
1117 setup_hazard_table(init_table2, table2,
1118 sizeof(init_table2) / sizeof(hz_init_t));
1119 setup_hazard_table(init_table3, table3,
1120 sizeof(init_table3) / sizeof(hz_init_t));