2 * Copyright 2004 The Unichrome Project. All Rights Reserved.
3 * Copyright 2005 Thomas Hellstrom. All Rights Reserved.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sub license,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice (including the
13 * next paragraph) shall be included in all copies or substantial portions
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S), AND/OR THE COPYRIGHT HOLDER(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
24 * Author: Thomas Hellstrom 2004, 2005.
25 * This code was written using docs obtained under NDA from VIA Inc.
27 * Don't run this code directly on an AGP buffer. Due to cache problems it will
31 #include "via_3d_reg.h"
35 #include "via_verifier.h"
51 check_for_header2_err
,
52 check_for_header1_err
,
56 check_z_buffer_addr_mode
,
57 check_destination_addr0
,
58 check_destination_addr1
,
59 check_destination_addr_mode
,
71 check_texture_addr_mode
,
72 check_for_vertex_count
,
73 check_number_texunits
,
78 * Associates each hazard above with a possible multi-command
79 * sequence. For example an address that is split over multiple
80 * commands and that needs to be checked at the first command
81 * that does not include any part of the address.
84 static drm_via_sequence_t seqs
[] = {
117 static hz_init_t init_table1
[] = {
118 {0xf2, check_for_header2_err
},
119 {0xf0, check_for_header1_err
},
120 {0xee, check_for_fire
},
121 {0xcc, check_for_dummy
},
122 {0xdd, check_for_dd
},
124 {0x10, check_z_buffer_addr0
},
125 {0x11, check_z_buffer_addr1
},
126 {0x12, check_z_buffer_addr_mode
},
144 {0x40, check_destination_addr0
},
145 {0x41, check_destination_addr1
},
146 {0x42, check_destination_addr_mode
},
165 {0x7D, check_for_vertex_count
}
168 static hz_init_t init_table2
[] = {
169 {0xf2, check_for_header2_err
},
170 {0xf0, check_for_header1_err
},
171 {0xee, check_for_fire
},
172 {0xcc, check_for_dummy
},
173 {0x00, check_texture_addr0
},
174 {0x01, check_texture_addr0
},
175 {0x02, check_texture_addr0
},
176 {0x03, check_texture_addr0
},
177 {0x04, check_texture_addr0
},
178 {0x05, check_texture_addr0
},
179 {0x06, check_texture_addr0
},
180 {0x07, check_texture_addr0
},
181 {0x08, check_texture_addr0
},
182 {0x09, check_texture_addr0
},
183 {0x20, check_texture_addr1
},
184 {0x21, check_texture_addr1
},
185 {0x22, check_texture_addr1
},
186 {0x23, check_texture_addr4
},
187 {0x2B, check_texture_addr3
},
188 {0x2C, check_texture_addr3
},
189 {0x2D, check_texture_addr3
},
190 {0x2E, check_texture_addr3
},
191 {0x2F, check_texture_addr3
},
192 {0x30, check_texture_addr3
},
193 {0x31, check_texture_addr3
},
194 {0x32, check_texture_addr3
},
195 {0x33, check_texture_addr3
},
196 {0x34, check_texture_addr3
},
197 {0x4B, check_texture_addr5
},
198 {0x4C, check_texture_addr6
},
199 {0x51, check_texture_addr7
},
200 {0x52, check_texture_addr8
},
201 {0x77, check_texture_addr2
},
205 {0x7B, check_texture_addr_mode
},
226 static hz_init_t init_table3
[] = {
227 {0xf2, check_for_header2_err
},
228 {0xf0, check_for_header1_err
},
229 {0xcc, check_for_dummy
},
230 {0x00, check_number_texunits
}
233 static hazard_t table1
[256];
234 static hazard_t table2
[256];
235 static hazard_t table3
[256];
237 static __inline__
int
238 eat_words(const uint32_t ** buf
, const uint32_t * buf_end
, unsigned num_words
)
240 if ((buf_end
- *buf
) >= num_words
) {
244 DRM_ERROR("Illegal termination of DMA command buffer\n");
249 * Partially stolen from drm_memory.h
252 static __inline__ drm_local_map_t
*via_drm_lookup_agp_map(drm_via_state_t
*seq
,
253 unsigned long offset
,
257 struct list_head
*list
;
258 drm_map_list_t
*r_list
;
259 drm_local_map_t
*map
= seq
->map_cache
;
261 if (map
&& map
->offset
<= offset
262 && (offset
+ size
) <= (map
->offset
+ map
->size
)) {
266 list_for_each(list
, &dev
->maplist
->head
) {
267 r_list
= (drm_map_list_t
*) list
;
271 if (map
->offset
<= offset
272 && (offset
+ size
) <= (map
->offset
+ map
->size
)
273 && !(map
->flags
& _DRM_RESTRICTED
)
274 && (map
->type
== _DRM_AGP
)) {
275 seq
->map_cache
= map
;
283 * Require that all AGP texture levels reside in the same AGP map which should
284 * be mappable by the client. This is not a big restriction.
285 * FIXME: To actually enforce this security policy strictly, drm_rmmap
286 * would have to wait for dma quiescent before removing an AGP map.
287 * The via_drm_lookup_agp_map call in reality seems to take
288 * very little CPU time.
291 static __inline__
int finish_current_sequence(drm_via_state_t
* cur_seq
)
293 switch (cur_seq
->unfinished
) {
295 DRM_DEBUG("Z Buffer start address is 0x%x\n", cur_seq
->z_addr
);
298 DRM_DEBUG("Destination start address is 0x%x\n",
302 if (cur_seq
->agp_texture
) {
304 cur_seq
->tex_level_lo
[cur_seq
->texture
];
305 unsigned end
= cur_seq
->tex_level_hi
[cur_seq
->texture
];
306 unsigned long lo
= ~0, hi
= 0, tmp
;
307 uint32_t *addr
, *pitch
, *height
, tex
;
317 &(cur_seq
->t_addr
[tex
= cur_seq
->texture
][start
]);
318 pitch
= &(cur_seq
->pitch
[tex
][start
]);
319 height
= &(cur_seq
->height
[tex
][start
]);
320 npot
= cur_seq
->tex_npot
[tex
];
321 for (i
= start
; i
<= end
; ++i
) {
326 tmp
+= (*height
++ * *pitch
++);
328 tmp
+= (*height
++ << *pitch
++);
333 if (!via_drm_lookup_agp_map
334 (cur_seq
, lo
, hi
- lo
, cur_seq
->dev
)) {
336 ("AGP texture is not in allowed map\n");
344 cur_seq
->unfinished
= no_sequence
;
348 static __inline__
int
349 investigate_hazard(uint32_t cmd
, hazard_t hz
, drm_via_state_t
* cur_seq
)
351 register uint32_t tmp
, *tmp_addr
;
353 if (cur_seq
->unfinished
&& (cur_seq
->unfinished
!= seqs
[hz
])) {
355 if ((ret
= finish_current_sequence(cur_seq
)))
360 case check_for_header2
:
361 if (cmd
== HALCYON_HEADER2
)
364 case check_for_header1
:
365 if ((cmd
& HALCYON_HEADER1MASK
) == HALCYON_HEADER1
)
368 case check_for_header2_err
:
369 if (cmd
== HALCYON_HEADER2
)
371 DRM_ERROR("Illegal DMA HALCYON_HEADER2 command\n");
373 case check_for_header1_err
:
374 if ((cmd
& HALCYON_HEADER1MASK
) == HALCYON_HEADER1
)
376 DRM_ERROR("Illegal DMA HALCYON_HEADER1 command\n");
379 if ((cmd
& HALCYON_FIREMASK
) == HALCYON_FIRECMD
)
381 DRM_ERROR("Illegal DMA HALCYON_FIRECMD command\n");
383 case check_for_dummy
:
386 DRM_ERROR("Illegal DMA HC_DUMMY command\n");
389 if (0xdddddddd == cmd
)
391 DRM_ERROR("Illegal DMA 0xdddddddd command\n");
393 case check_z_buffer_addr0
:
394 cur_seq
->unfinished
= z_address
;
395 cur_seq
->z_addr
= (cur_seq
->z_addr
& 0xFF000000) |
398 case check_z_buffer_addr1
:
399 cur_seq
->unfinished
= z_address
;
400 cur_seq
->z_addr
= (cur_seq
->z_addr
& 0x00FFFFFF) |
401 ((cmd
& 0xFF) << 24);
403 case check_z_buffer_addr_mode
:
404 cur_seq
->unfinished
= z_address
;
405 if ((cmd
& 0x0000C000) == 0)
407 DRM_ERROR("Attempt to place Z buffer in system memory\n");
409 case check_destination_addr0
:
410 cur_seq
->unfinished
= dest_address
;
411 cur_seq
->d_addr
= (cur_seq
->d_addr
& 0xFF000000) |
414 case check_destination_addr1
:
415 cur_seq
->unfinished
= dest_address
;
416 cur_seq
->d_addr
= (cur_seq
->d_addr
& 0x00FFFFFF) |
417 ((cmd
& 0xFF) << 24);
419 case check_destination_addr_mode
:
420 cur_seq
->unfinished
= dest_address
;
421 if ((cmd
& 0x0000C000) == 0)
424 ("Attempt to place 3D drawing buffer in system memory\n");
426 case check_texture_addr0
:
427 cur_seq
->unfinished
= tex_address
;
429 tmp_addr
= &cur_seq
->t_addr
[cur_seq
->texture
][tmp
];
430 *tmp_addr
= (*tmp_addr
& 0xFF000000) | (cmd
& 0x00FFFFFF);
432 case check_texture_addr1
:
433 cur_seq
->unfinished
= tex_address
;
434 tmp
= ((cmd
>> 24) - 0x20);
436 tmp_addr
= &cur_seq
->t_addr
[cur_seq
->texture
][tmp
];
437 *tmp_addr
= (*tmp_addr
& 0x00FFFFFF) | ((cmd
& 0xFF) << 24);
439 *tmp_addr
= (*tmp_addr
& 0x00FFFFFF) | ((cmd
& 0xFF00) << 16);
441 *tmp_addr
= (*tmp_addr
& 0x00FFFFFF) | ((cmd
& 0xFF0000) << 8);
443 case check_texture_addr2
:
444 cur_seq
->unfinished
= tex_address
;
445 cur_seq
->tex_level_lo
[tmp
= cur_seq
->texture
] = cmd
& 0x3F;
446 cur_seq
->tex_level_hi
[tmp
] = (cmd
& 0xFC0) >> 6;
448 case check_texture_addr3
:
449 cur_seq
->unfinished
= tex_address
;
450 tmp
= ((cmd
>> 24) - HC_SubA_HTXnL0Pit
);
452 (cmd
& HC_HTXnEnPit_MASK
)) {
453 cur_seq
->pitch
[cur_seq
->texture
][tmp
] =
454 (cmd
& HC_HTXnLnPit_MASK
);
455 cur_seq
->tex_npot
[cur_seq
->texture
] = 1;
457 cur_seq
->pitch
[cur_seq
->texture
][tmp
] =
458 (cmd
& HC_HTXnLnPitE_MASK
) >> HC_HTXnLnPitE_SHIFT
;
459 cur_seq
->tex_npot
[cur_seq
->texture
] = 0;
460 if (cmd
& 0x000FFFFF) {
462 ("Unimplemented texture level 0 pitch mode.\n");
467 case check_texture_addr4
:
468 cur_seq
->unfinished
= tex_address
;
469 tmp_addr
= &cur_seq
->t_addr
[cur_seq
->texture
][9];
470 *tmp_addr
= (*tmp_addr
& 0x00FFFFFF) | ((cmd
& 0xFF) << 24);
472 case check_texture_addr5
:
473 case check_texture_addr6
:
474 cur_seq
->unfinished
= tex_address
;
476 * Texture width. We don't care since we have the pitch.
479 case check_texture_addr7
:
480 cur_seq
->unfinished
= tex_address
;
481 tmp_addr
= &(cur_seq
->height
[cur_seq
->texture
][0]);
482 tmp_addr
[5] = 1 << ((cmd
& 0x00F00000) >> 20);
483 tmp_addr
[4] = 1 << ((cmd
& 0x000F0000) >> 16);
484 tmp_addr
[3] = 1 << ((cmd
& 0x0000F000) >> 12);
485 tmp_addr
[2] = 1 << ((cmd
& 0x00000F00) >> 8);
486 tmp_addr
[1] = 1 << ((cmd
& 0x000000F0) >> 4);
487 tmp_addr
[0] = 1 << (cmd
& 0x0000000F);
489 case check_texture_addr8
:
490 cur_seq
->unfinished
= tex_address
;
491 tmp_addr
= &(cur_seq
->height
[cur_seq
->texture
][0]);
492 tmp_addr
[9] = 1 << ((cmd
& 0x0000F000) >> 12);
493 tmp_addr
[8] = 1 << ((cmd
& 0x00000F00) >> 8);
494 tmp_addr
[7] = 1 << ((cmd
& 0x000000F0) >> 4);
495 tmp_addr
[6] = 1 << (cmd
& 0x0000000F);
497 case check_texture_addr_mode
:
498 cur_seq
->unfinished
= tex_address
;
499 if (2 == (tmp
= cmd
& 0x00000003)) {
501 ("Attempt to fetch texture from system memory.\n");
504 cur_seq
->agp_texture
= (tmp
== 3);
505 cur_seq
->tex_palette_size
[cur_seq
->texture
] =
506 (cmd
>> 16) & 0x000000007;
508 case check_for_vertex_count
:
509 cur_seq
->vertex_count
= cmd
& 0x0000FFFF;
511 case check_number_texunits
:
512 cur_seq
->multitex
= (cmd
>> 3) & 1;
515 DRM_ERROR("Illegal DMA data: 0x%x\n", cmd
);
521 static __inline__
int
522 via_check_prim_list(uint32_t const **buffer
, const uint32_t * buf_end
,
523 drm_via_state_t
* cur_seq
)
525 drm_via_private_t
*dev_priv
=
526 (drm_via_private_t
*) cur_seq
->dev
->dev_private
;
527 uint32_t a_fire
, bcmd
, dw_count
;
530 const uint32_t *buf
= *buffer
;
532 while (buf
< buf_end
) {
534 if ((buf_end
- buf
) < 2) {
536 ("Unexpected termination of primitive list.\n");
540 if ((*buf
& HC_ACMD_MASK
) != HC_ACMD_HCmdB
)
543 if ((*buf
& HC_ACMD_MASK
) != HC_ACMD_HCmdA
) {
544 DRM_ERROR("Expected Vertex List A command, got 0x%x\n",
550 *buf
++ | HC_HPLEND_MASK
| HC_HPMValidN_MASK
|
554 * How many dwords per vertex ?
557 if (cur_seq
->agp
&& ((bcmd
& (0xF << 11)) == 0)) {
558 DRM_ERROR("Illegal B command vertex data for AGP.\n");
565 dw_count
+= (cur_seq
->multitex
) ? 2 : 1;
567 dw_count
+= (cur_seq
->multitex
) ? 2 : 1;
570 if (bcmd
& (1 << 10))
572 if (bcmd
& (1 << 11))
574 if (bcmd
& (1 << 12))
576 if (bcmd
& (1 << 13))
578 if (bcmd
& (1 << 14))
581 while (buf
< buf_end
) {
582 if (*buf
== a_fire
) {
583 if (dev_priv
->num_fire_offsets
>=
585 DRM_ERROR("Fire offset buffer full.\n");
589 dev_priv
->fire_offsets
[dev_priv
->
590 num_fire_offsets
++] =
594 if (buf
< buf_end
&& *buf
== a_fire
)
598 if ((*buf
== HALCYON_HEADER2
) ||
599 ((*buf
& HALCYON_FIREMASK
) == HALCYON_FIRECMD
)) {
600 DRM_ERROR("Missing Vertex Fire command, "
601 "Stray Vertex Fire command or verifier "
606 if ((ret
= eat_words(&buf
, buf_end
, dw_count
)))
609 if (buf
>= buf_end
&& !have_fire
) {
610 DRM_ERROR("Missing Vertex Fire command or verifier "
615 if (cur_seq
->agp
&& ((buf
- cur_seq
->buf_start
) & 0x01)) {
616 DRM_ERROR("AGP Primitive list end misaligned.\n");
625 static __inline__ verifier_state_t
626 via_check_header2(uint32_t const **buffer
, const uint32_t * buf_end
,
627 drm_via_state_t
* hc_state
)
632 const uint32_t *buf
= *buffer
;
633 const hazard_t
*hz_table
;
635 if ((buf_end
- buf
) < 2) {
637 ("Illegal termination of DMA HALCYON_HEADER2 sequence.\n");
641 cmd
= (*buf
++ & 0xFFFF0000) >> 16;
644 case HC_ParaType_CmdVdata
:
645 if (via_check_prim_list(&buf
, buf_end
, hc_state
))
648 return state_command
;
649 case HC_ParaType_NotTex
:
652 case HC_ParaType_Tex
:
653 hc_state
->texture
= 0;
656 case (HC_ParaType_Tex
| (HC_SubType_Tex1
<< 8)):
657 hc_state
->texture
= 1;
660 case (HC_ParaType_Tex
| (HC_SubType_TexGeneral
<< 8)):
663 case HC_ParaType_Auto
:
664 if (eat_words(&buf
, buf_end
, 2))
667 return state_command
;
668 case (HC_ParaType_Palette
| (HC_SubType_Stipple
<< 8)):
669 if (eat_words(&buf
, buf_end
, 32))
672 return state_command
;
673 case (HC_ParaType_Palette
| (HC_SubType_TexPalette0
<< 8)):
674 case (HC_ParaType_Palette
| (HC_SubType_TexPalette1
<< 8)):
675 DRM_ERROR("Texture palettes are rejected because of "
676 "lack of info how to determine their size.\n");
678 case (HC_ParaType_Palette
| (HC_SubType_FogTable
<< 8)):
679 DRM_ERROR("Fog factor palettes are rejected because of "
680 "lack of info how to determine their size.\n");
685 * There are some unimplemented HC_ParaTypes here, that
686 * need to be implemented if the Mesa driver is extended.
689 DRM_ERROR("Invalid or unimplemented HALCYON_HEADER2 "
690 "DMA subcommand: 0x%x. Previous dword: 0x%x\n",
696 while (buf
< buf_end
) {
698 if ((hz
= hz_table
[cmd
>> 24])) {
699 if ((hz_mode
= investigate_hazard(cmd
, hz
, hc_state
))) {
706 } else if (hc_state
->unfinished
&&
707 finish_current_sequence(hc_state
)) {
711 if (hc_state
->unfinished
&& finish_current_sequence(hc_state
)) {
715 return state_command
;
718 static __inline__ verifier_state_t
719 via_parse_header2(drm_via_private_t
* dev_priv
, uint32_t const **buffer
,
720 const uint32_t * buf_end
, int *fire_count
)
723 const uint32_t *buf
= *buffer
;
724 const uint32_t *next_fire
;
727 next_fire
= dev_priv
->fire_offsets
[*fire_count
];
729 cmd
= (*buf
& 0xFFFF0000) >> 16;
730 VIA_WRITE(HC_REG_TRANS_SET
+ HC_REG_BASE
, *buf
++);
732 case HC_ParaType_CmdVdata
:
733 while ((buf
< buf_end
) &&
734 (*fire_count
< dev_priv
->num_fire_offsets
) &&
735 (*buf
& HC_ACMD_MASK
) == HC_ACMD_HCmdB
) {
736 while (buf
<= next_fire
) {
737 VIA_WRITE(HC_REG_TRANS_SPACE
+ HC_REG_BASE
+
738 (burst
& 63), *buf
++);
742 && ((*buf
& HALCYON_FIREMASK
) == HALCYON_FIRECMD
))
745 if (++(*fire_count
) < dev_priv
->num_fire_offsets
)
746 next_fire
= dev_priv
->fire_offsets
[*fire_count
];
750 while (buf
< buf_end
) {
752 if (*buf
== HC_HEADER2
||
753 (*buf
& HALCYON_HEADER1MASK
) == HALCYON_HEADER1
||
754 (*buf
& VIA_VIDEOMASK
) == VIA_VIDEO_HEADER5
||
755 (*buf
& VIA_VIDEOMASK
) == VIA_VIDEO_HEADER6
)
758 VIA_WRITE(HC_REG_TRANS_SPACE
+ HC_REG_BASE
+
759 (burst
& 63), *buf
++);
764 return state_command
;
767 static __inline__
int verify_mmio_address(uint32_t address
)
769 if ((address
> 0x3FF) && (address
< 0xC00)) {
770 DRM_ERROR("Invalid VIDEO DMA command. "
771 "Attempt to access 3D- or command burst area.\n");
773 } else if ((address
> 0xCFF) && (address
< 0x1300)) {
774 DRM_ERROR("Invalid VIDEO DMA command. "
775 "Attempt to access PCI DMA area.\n");
777 } else if (address
> 0x13FF) {
778 DRM_ERROR("Invalid VIDEO DMA command. "
779 "Attempt to access VGA registers.\n");
785 static __inline__
int
786 verify_video_tail(uint32_t const **buffer
, const uint32_t * buf_end
,
789 const uint32_t *buf
= *buffer
;
791 if (buf_end
- buf
< dwords
) {
792 DRM_ERROR("Illegal termination of video command.\n");
797 DRM_ERROR("Illegal video command tail.\n");
805 static __inline__ verifier_state_t
806 via_check_header1(uint32_t const **buffer
, const uint32_t * buf_end
)
809 const uint32_t *buf
= *buffer
;
810 verifier_state_t ret
= state_command
;
812 while (buf
< buf_end
) {
814 if ((cmd
> ((0x3FF >> 2) | HALCYON_HEADER1
)) &&
815 (cmd
< ((0xC00 >> 2) | HALCYON_HEADER1
))) {
816 if ((cmd
& HALCYON_HEADER1MASK
) != HALCYON_HEADER1
)
818 DRM_ERROR("Invalid HALCYON_HEADER1 command. "
819 "Attempt to access 3D- or command burst area.\n");
822 } else if (cmd
> ((0xCFF >> 2) | HALCYON_HEADER1
)) {
823 if ((cmd
& HALCYON_HEADER1MASK
) != HALCYON_HEADER1
)
825 DRM_ERROR("Invalid HALCYON_HEADER1 command. "
826 "Attempt to access VGA registers.\n");
837 static __inline__ verifier_state_t
838 via_parse_header1(drm_via_private_t
* dev_priv
, uint32_t const **buffer
,
839 const uint32_t * buf_end
)
841 register uint32_t cmd
;
842 const uint32_t *buf
= *buffer
;
844 while (buf
< buf_end
) {
846 if ((cmd
& HALCYON_HEADER1MASK
) != HALCYON_HEADER1
)
848 VIA_WRITE((cmd
& ~HALCYON_HEADER1MASK
) << 2, *++buf
);
852 return state_command
;
855 static __inline__ verifier_state_t
856 via_check_vheader5(uint32_t const **buffer
, const uint32_t * buf_end
)
859 const uint32_t *buf
= *buffer
;
861 if (buf_end
- buf
< 4) {
862 DRM_ERROR("Illegal termination of video header5 command\n");
866 data
= *buf
++ & ~VIA_VIDEOMASK
;
867 if (verify_mmio_address(data
))
871 if (*buf
++ != 0x00F50000) {
872 DRM_ERROR("Illegal header5 header data\n");
875 if (*buf
++ != 0x00000000) {
876 DRM_ERROR("Illegal header5 header data\n");
879 if (eat_words(&buf
, buf_end
, data
))
881 if ((data
& 3) && verify_video_tail(&buf
, buf_end
, 4 - (data
& 3)))
884 return state_command
;
888 static __inline__ verifier_state_t
889 via_parse_vheader5(drm_via_private_t
* dev_priv
, uint32_t const **buffer
,
890 const uint32_t * buf_end
)
892 uint32_t addr
, count
, i
;
893 const uint32_t *buf
= *buffer
;
895 addr
= *buf
++ & ~VIA_VIDEOMASK
;
899 VIA_WRITE(addr
, *buf
++);
902 buf
+= 4 - (count
& 3);
904 return state_command
;
907 static __inline__ verifier_state_t
908 via_check_vheader6(uint32_t const **buffer
, const uint32_t * buf_end
)
911 const uint32_t *buf
= *buffer
;
914 if (buf_end
- buf
< 4) {
915 DRM_ERROR("Illegal termination of video header6 command\n");
920 if (*buf
++ != 0x00F60000) {
921 DRM_ERROR("Illegal header6 header data\n");
924 if (*buf
++ != 0x00000000) {
925 DRM_ERROR("Illegal header6 header data\n");
928 if ((buf_end
- buf
) < (data
<< 1)) {
929 DRM_ERROR("Illegal termination of video header6 command\n");
932 for (i
= 0; i
< data
; ++i
) {
933 if (verify_mmio_address(*buf
++))
938 if ((data
& 3) && verify_video_tail(&buf
, buf_end
, 4 - (data
& 3)))
941 return state_command
;
944 static __inline__ verifier_state_t
945 via_parse_vheader6(drm_via_private_t
* dev_priv
, uint32_t const **buffer
,
946 const uint32_t * buf_end
)
949 uint32_t addr
, count
, i
;
950 const uint32_t *buf
= *buffer
;
956 VIA_WRITE(addr
, *buf
++);
960 buf
+= 4 - (count
& 3);
962 return state_command
;
966 via_verify_command_stream(const uint32_t * buf
, unsigned int size
,
967 drm_device_t
* dev
, int agp
)
970 drm_via_private_t
*dev_priv
= (drm_via_private_t
*) dev
->dev_private
;
971 drm_via_state_t
*hc_state
= &dev_priv
->hc_state
;
972 drm_via_state_t saved_state
= *hc_state
;
974 const uint32_t *buf_end
= buf
+ (size
>> 2);
975 verifier_state_t state
= state_command
;
979 cme_video
= (dev_priv
->chipset
== VIA_PRO_GROUP_A
||
980 dev_priv
->chipset
== VIA_DX9_0
);
982 supported_3d
= dev_priv
->chipset
!= VIA_DX9_0
;
985 hc_state
->unfinished
= no_sequence
;
986 hc_state
->map_cache
= NULL
;
988 hc_state
->buf_start
= buf
;
989 dev_priv
->num_fire_offsets
= 0;
991 while (buf
< buf_end
) {
995 state
= via_check_header2(&buf
, buf_end
, hc_state
);
998 state
= via_check_header1(&buf
, buf_end
);
1000 case state_vheader5
:
1001 state
= via_check_vheader5(&buf
, buf_end
);
1003 case state_vheader6
:
1004 state
= via_check_vheader6(&buf
, buf_end
);
1007 if ((HALCYON_HEADER2
== (cmd
= *buf
)) &&
1009 state
= state_header2
;
1010 else if ((cmd
& HALCYON_HEADER1MASK
) == HALCYON_HEADER1
)
1011 state
= state_header1
;
1013 && (cmd
& VIA_VIDEOMASK
) == VIA_VIDEO_HEADER5
)
1014 state
= state_vheader5
;
1016 && (cmd
& VIA_VIDEOMASK
) == VIA_VIDEO_HEADER6
)
1017 state
= state_vheader6
;
1018 else if ((cmd
== HALCYON_HEADER2
) && !supported_3d
) {
1019 DRM_ERROR("Accelerated 3D is not supported on this chipset yet.\n");
1020 state
= state_error
;
1023 ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1025 state
= state_error
;
1030 *hc_state
= saved_state
;
1031 return DRM_ERR(EINVAL
);
1034 if (state
== state_error
) {
1035 *hc_state
= saved_state
;
1036 return DRM_ERR(EINVAL
);
1042 via_parse_command_stream(drm_device_t
* dev
, const uint32_t * buf
,
1046 drm_via_private_t
*dev_priv
= (drm_via_private_t
*) dev
->dev_private
;
1048 const uint32_t *buf_end
= buf
+ (size
>> 2);
1049 verifier_state_t state
= state_command
;
1052 while (buf
< buf_end
) {
1057 via_parse_header2(dev_priv
, &buf
, buf_end
,
1061 state
= via_parse_header1(dev_priv
, &buf
, buf_end
);
1063 case state_vheader5
:
1064 state
= via_parse_vheader5(dev_priv
, &buf
, buf_end
);
1066 case state_vheader6
:
1067 state
= via_parse_vheader6(dev_priv
, &buf
, buf_end
);
1070 if (HALCYON_HEADER2
== (cmd
= *buf
))
1071 state
= state_header2
;
1072 else if ((cmd
& HALCYON_HEADER1MASK
) == HALCYON_HEADER1
)
1073 state
= state_header1
;
1074 else if ((cmd
& VIA_VIDEOMASK
) == VIA_VIDEO_HEADER5
)
1075 state
= state_vheader5
;
1076 else if ((cmd
& VIA_VIDEOMASK
) == VIA_VIDEO_HEADER6
)
1077 state
= state_vheader6
;
1080 ("Invalid / Unimplemented DMA HEADER command. 0x%x\n",
1082 state
= state_error
;
1087 return DRM_ERR(EINVAL
);
1090 if (state
== state_error
) {
1091 return DRM_ERR(EINVAL
);
1097 setup_hazard_table(hz_init_t init_table
[], hazard_t table
[], int size
)
1101 for (i
= 0; i
< 256; ++i
) {
1102 table
[i
] = forbidden_command
;
1105 for (i
= 0; i
< size
; ++i
) {
1106 table
[init_table
[i
].code
] = init_table
[i
].hz
;
1110 void via_init_command_verifier(void)
1112 setup_hazard_table(init_table1
, table1
,
1113 sizeof(init_table1
) / sizeof(hz_init_t
));
1114 setup_hazard_table(init_table2
, table2
,
1115 sizeof(init_table2
) / sizeof(hz_init_t
));
1116 setup_hazard_table(init_table3
, table3
,
1117 sizeof(init_table3
) / sizeof(hz_init_t
));