1 /* savage_state.c -- State and drawing support for Savage
3 * Copyright 2004 Felix Kuehling
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sub license,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the
14 * next paragraph) shall be included in all copies or substantial portions
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NON-INFRINGEMENT. IN NO EVENT SHALL FELIX KUEHLING BE LIABLE FOR
21 * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF
22 * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
26 #include <drm/savage_drm.h>
27 #include "savage_drv.h"
29 void savage_emit_clip_rect_s3d(drm_savage_private_t
* dev_priv
,
30 const struct drm_clip_rect
* pbox
)
32 uint32_t scstart
= dev_priv
->state
.s3d
.new_scstart
;
33 uint32_t scend
= dev_priv
->state
.s3d
.new_scend
;
34 scstart
= (scstart
& ~SAVAGE_SCISSOR_MASK_S3D
) |
35 ((uint32_t) pbox
->x1
& 0x000007ff) |
36 (((uint32_t) pbox
->y1
<< 16) & 0x07ff0000);
37 scend
= (scend
& ~SAVAGE_SCISSOR_MASK_S3D
) |
38 (((uint32_t) pbox
->x2
- 1) & 0x000007ff) |
39 ((((uint32_t) pbox
->y2
- 1) << 16) & 0x07ff0000);
40 if (scstart
!= dev_priv
->state
.s3d
.scstart
||
41 scend
!= dev_priv
->state
.s3d
.scend
) {
44 DMA_WRITE(BCI_CMD_WAIT
| BCI_CMD_WAIT_3D
);
45 DMA_SET_REGISTERS(SAVAGE_SCSTART_S3D
, 2);
48 dev_priv
->state
.s3d
.scstart
= scstart
;
49 dev_priv
->state
.s3d
.scend
= scend
;
50 dev_priv
->waiting
= 1;
55 void savage_emit_clip_rect_s4(drm_savage_private_t
* dev_priv
,
56 const struct drm_clip_rect
* pbox
)
58 uint32_t drawctrl0
= dev_priv
->state
.s4
.new_drawctrl0
;
59 uint32_t drawctrl1
= dev_priv
->state
.s4
.new_drawctrl1
;
60 drawctrl0
= (drawctrl0
& ~SAVAGE_SCISSOR_MASK_S4
) |
61 ((uint32_t) pbox
->x1
& 0x000007ff) |
62 (((uint32_t) pbox
->y1
<< 12) & 0x00fff000);
63 drawctrl1
= (drawctrl1
& ~SAVAGE_SCISSOR_MASK_S4
) |
64 (((uint32_t) pbox
->x2
- 1) & 0x000007ff) |
65 ((((uint32_t) pbox
->y2
- 1) << 12) & 0x00fff000);
66 if (drawctrl0
!= dev_priv
->state
.s4
.drawctrl0
||
67 drawctrl1
!= dev_priv
->state
.s4
.drawctrl1
) {
70 DMA_WRITE(BCI_CMD_WAIT
| BCI_CMD_WAIT_3D
);
71 DMA_SET_REGISTERS(SAVAGE_DRAWCTRL0_S4
, 2);
74 dev_priv
->state
.s4
.drawctrl0
= drawctrl0
;
75 dev_priv
->state
.s4
.drawctrl1
= drawctrl1
;
76 dev_priv
->waiting
= 1;
81 static int savage_verify_texaddr(drm_savage_private_t
* dev_priv
, int unit
,
84 if ((addr
& 6) != 2) { /* reserved bits */
85 DRM_ERROR("bad texAddr%d %08x (reserved bits)\n", unit
, addr
);
88 if (!(addr
& 1)) { /* local */
90 if (addr
< dev_priv
->texture_offset
||
91 addr
>= dev_priv
->texture_offset
+ dev_priv
->texture_size
) {
93 ("bad texAddr%d %08x (local addr out of range)\n",
98 if (!dev_priv
->agp_textures
) {
99 DRM_ERROR("bad texAddr%d %08x (AGP not available)\n",
104 if (addr
< dev_priv
->agp_textures
->offset
||
105 addr
>= (dev_priv
->agp_textures
->offset
+
106 dev_priv
->agp_textures
->size
)) {
108 ("bad texAddr%d %08x (AGP addr out of range)\n",
116 #define SAVE_STATE(reg,where) \
117 if(start <= reg && start+count > reg) \
118 dev_priv->state.where = regs[reg - start]
119 #define SAVE_STATE_MASK(reg,where,mask) do { \
120 if(start <= reg && start+count > reg) { \
122 tmp = regs[reg - start]; \
123 dev_priv->state.where = (tmp & (mask)) | \
124 (dev_priv->state.where & ~(mask)); \
128 static int savage_verify_state_s3d(drm_savage_private_t
* dev_priv
,
129 unsigned int start
, unsigned int count
,
130 const uint32_t *regs
)
132 if (start
< SAVAGE_TEXPALADDR_S3D
||
133 start
+ count
- 1 > SAVAGE_DESTTEXRWWATERMARK_S3D
) {
134 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
135 start
, start
+ count
- 1);
139 SAVE_STATE_MASK(SAVAGE_SCSTART_S3D
, s3d
.new_scstart
,
140 ~SAVAGE_SCISSOR_MASK_S3D
);
141 SAVE_STATE_MASK(SAVAGE_SCEND_S3D
, s3d
.new_scend
,
142 ~SAVAGE_SCISSOR_MASK_S3D
);
144 /* if any texture regs were changed ... */
145 if (start
<= SAVAGE_TEXCTRL_S3D
&&
146 start
+ count
> SAVAGE_TEXPALADDR_S3D
) {
147 /* ... check texture state */
148 SAVE_STATE(SAVAGE_TEXCTRL_S3D
, s3d
.texctrl
);
149 SAVE_STATE(SAVAGE_TEXADDR_S3D
, s3d
.texaddr
);
150 if (dev_priv
->state
.s3d
.texctrl
& SAVAGE_TEXCTRL_TEXEN_MASK
)
151 return savage_verify_texaddr(dev_priv
, 0,
152 dev_priv
->state
.s3d
.texaddr
);
158 static int savage_verify_state_s4(drm_savage_private_t
* dev_priv
,
159 unsigned int start
, unsigned int count
,
160 const uint32_t *regs
)
164 if (start
< SAVAGE_DRAWLOCALCTRL_S4
||
165 start
+ count
- 1 > SAVAGE_TEXBLENDCOLOR_S4
) {
166 DRM_ERROR("invalid register range (0x%04x-0x%04x)\n",
167 start
, start
+ count
- 1);
171 SAVE_STATE_MASK(SAVAGE_DRAWCTRL0_S4
, s4
.new_drawctrl0
,
172 ~SAVAGE_SCISSOR_MASK_S4
);
173 SAVE_STATE_MASK(SAVAGE_DRAWCTRL1_S4
, s4
.new_drawctrl1
,
174 ~SAVAGE_SCISSOR_MASK_S4
);
176 /* if any texture regs were changed ... */
177 if (start
<= SAVAGE_TEXDESCR_S4
&&
178 start
+ count
> SAVAGE_TEXPALADDR_S4
) {
179 /* ... check texture state */
180 SAVE_STATE(SAVAGE_TEXDESCR_S4
, s4
.texdescr
);
181 SAVE_STATE(SAVAGE_TEXADDR0_S4
, s4
.texaddr0
);
182 SAVE_STATE(SAVAGE_TEXADDR1_S4
, s4
.texaddr1
);
183 if (dev_priv
->state
.s4
.texdescr
& SAVAGE_TEXDESCR_TEX0EN_MASK
)
184 ret
|= savage_verify_texaddr(dev_priv
, 0,
185 dev_priv
->state
.s4
.texaddr0
);
186 if (dev_priv
->state
.s4
.texdescr
& SAVAGE_TEXDESCR_TEX1EN_MASK
)
187 ret
|= savage_verify_texaddr(dev_priv
, 1,
188 dev_priv
->state
.s4
.texaddr1
);
195 #undef SAVE_STATE_MASK
197 static int savage_dispatch_state(drm_savage_private_t
* dev_priv
,
198 const drm_savage_cmd_header_t
* cmd_header
,
199 const uint32_t *regs
)
201 unsigned int count
= cmd_header
->state
.count
;
202 unsigned int start
= cmd_header
->state
.start
;
203 unsigned int count2
= 0;
204 unsigned int bci_size
;
211 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
212 ret
= savage_verify_state_s3d(dev_priv
, start
, count
, regs
);
215 /* scissor regs are emitted in savage_dispatch_draw */
216 if (start
< SAVAGE_SCSTART_S3D
) {
217 if (start
+ count
> SAVAGE_SCEND_S3D
+ 1)
218 count2
= count
- (SAVAGE_SCEND_S3D
+ 1 - start
);
219 if (start
+ count
> SAVAGE_SCSTART_S3D
)
220 count
= SAVAGE_SCSTART_S3D
- start
;
221 } else if (start
<= SAVAGE_SCEND_S3D
) {
222 if (start
+ count
> SAVAGE_SCEND_S3D
+ 1) {
223 count
-= SAVAGE_SCEND_S3D
+ 1 - start
;
224 start
= SAVAGE_SCEND_S3D
+ 1;
229 ret
= savage_verify_state_s4(dev_priv
, start
, count
, regs
);
232 /* scissor regs are emitted in savage_dispatch_draw */
233 if (start
< SAVAGE_DRAWCTRL0_S4
) {
234 if (start
+ count
> SAVAGE_DRAWCTRL1_S4
+ 1)
236 (SAVAGE_DRAWCTRL1_S4
+ 1 - start
);
237 if (start
+ count
> SAVAGE_DRAWCTRL0_S4
)
238 count
= SAVAGE_DRAWCTRL0_S4
- start
;
239 } else if (start
<= SAVAGE_DRAWCTRL1_S4
) {
240 if (start
+ count
> SAVAGE_DRAWCTRL1_S4
+ 1) {
241 count
-= SAVAGE_DRAWCTRL1_S4
+ 1 - start
;
242 start
= SAVAGE_DRAWCTRL1_S4
+ 1;
248 bci_size
= count
+ (count
+ 254) / 255 + count2
+ (count2
+ 254) / 255;
250 if (cmd_header
->state
.global
) {
251 BEGIN_DMA(bci_size
+ 1);
252 DMA_WRITE(BCI_CMD_WAIT
| BCI_CMD_WAIT_3D
);
253 dev_priv
->waiting
= 1;
260 unsigned int n
= count
< 255 ? count
: 255;
261 DMA_SET_REGISTERS(start
, n
);
278 static int savage_dispatch_dma_prim(drm_savage_private_t
* dev_priv
,
279 const drm_savage_cmd_header_t
* cmd_header
,
280 const struct drm_buf
* dmabuf
)
282 unsigned char reorder
= 0;
283 unsigned int prim
= cmd_header
->prim
.prim
;
284 unsigned int skip
= cmd_header
->prim
.skip
;
285 unsigned int n
= cmd_header
->prim
.count
;
286 unsigned int start
= cmd_header
->prim
.start
;
291 DRM_ERROR("called without dma buffers!\n");
299 case SAVAGE_PRIM_TRILIST_201
:
301 prim
= SAVAGE_PRIM_TRILIST
;
303 case SAVAGE_PRIM_TRILIST
:
305 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
310 case SAVAGE_PRIM_TRISTRIP
:
311 case SAVAGE_PRIM_TRIFAN
:
314 ("wrong number of vertices %u in TRIFAN/STRIP\n",
320 DRM_ERROR("invalid primitive type %u\n", prim
);
324 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
326 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip
);
330 unsigned int size
= 10 - (skip
& 1) - (skip
>> 1 & 1) -
331 (skip
>> 2 & 1) - (skip
>> 3 & 1) - (skip
>> 4 & 1) -
332 (skip
>> 5 & 1) - (skip
>> 6 & 1) - (skip
>> 7 & 1);
333 if (skip
> SAVAGE_SKIP_ALL_S4
|| size
!= 8) {
334 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip
);
338 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
343 if (start
+ n
> dmabuf
->total
/ 32) {
344 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
345 start
, start
+ n
- 1, dmabuf
->total
/ 32);
349 /* Vertex DMA doesn't work with command DMA at the same time,
350 * so we use BCI_... to submit commands here. Flush buffered
351 * faked DMA first. */
354 if (dmabuf
->bus_address
!= dev_priv
->state
.common
.vbaddr
) {
356 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR
, 1);
357 BCI_WRITE(dmabuf
->bus_address
| dev_priv
->dma_type
);
358 dev_priv
->state
.common
.vbaddr
= dmabuf
->bus_address
;
360 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
) && dev_priv
->waiting
) {
361 /* Workaround for what looks like a hardware bug. If a
362 * WAIT_3D_IDLE was emitted some time before the
363 * indexed drawing command then the engine will lock
364 * up. There are two known workarounds:
365 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
367 for (i
= 0; i
< 63; ++i
)
368 BCI_WRITE(BCI_CMD_WAIT
);
369 dev_priv
->waiting
= 0;
374 /* Can emit up to 255 indices (85 triangles) at once. */
375 unsigned int count
= n
> 255 ? 255 : n
;
377 /* Need to reorder indices for correct flat
378 * shading while preserving the clock sense
379 * for correct culling. Only on Savage3D. */
380 int reorder
[3] = { -1, -1, -1 };
381 reorder
[start
% 3] = 2;
383 BEGIN_BCI((count
+ 1 + 1) / 2);
384 BCI_DRAW_INDICES_S3D(count
, prim
, start
+ 2);
386 for (i
= start
+ 1; i
+ 1 < start
+ count
; i
+= 2)
387 BCI_WRITE((i
+ reorder
[i
% 3]) |
389 reorder
[(i
+ 1) % 3]) << 16));
390 if (i
< start
+ count
)
391 BCI_WRITE(i
+ reorder
[i
% 3]);
392 } else if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
393 BEGIN_BCI((count
+ 1 + 1) / 2);
394 BCI_DRAW_INDICES_S3D(count
, prim
, start
);
396 for (i
= start
+ 1; i
+ 1 < start
+ count
; i
+= 2)
397 BCI_WRITE(i
| ((i
+ 1) << 16));
398 if (i
< start
+ count
)
401 BEGIN_BCI((count
+ 2 + 1) / 2);
402 BCI_DRAW_INDICES_S4(count
, prim
, skip
);
404 for (i
= start
; i
+ 1 < start
+ count
; i
+= 2)
405 BCI_WRITE(i
| ((i
+ 1) << 16));
406 if (i
< start
+ count
)
413 prim
|= BCI_CMD_DRAW_CONT
;
419 static int savage_dispatch_vb_prim(drm_savage_private_t
* dev_priv
,
420 const drm_savage_cmd_header_t
* cmd_header
,
421 const uint32_t *vtxbuf
, unsigned int vb_size
,
422 unsigned int vb_stride
)
424 unsigned char reorder
= 0;
425 unsigned int prim
= cmd_header
->prim
.prim
;
426 unsigned int skip
= cmd_header
->prim
.skip
;
427 unsigned int n
= cmd_header
->prim
.count
;
428 unsigned int start
= cmd_header
->prim
.start
;
429 unsigned int vtx_size
;
437 case SAVAGE_PRIM_TRILIST_201
:
439 prim
= SAVAGE_PRIM_TRILIST
;
441 case SAVAGE_PRIM_TRILIST
:
443 DRM_ERROR("wrong number of vertices %u in TRILIST\n",
448 case SAVAGE_PRIM_TRISTRIP
:
449 case SAVAGE_PRIM_TRIFAN
:
452 ("wrong number of vertices %u in TRIFAN/STRIP\n",
458 DRM_ERROR("invalid primitive type %u\n", prim
);
462 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
463 if (skip
> SAVAGE_SKIP_ALL_S3D
) {
464 DRM_ERROR("invalid skip flags 0x%04x\n", skip
);
467 vtx_size
= 8; /* full vertex */
469 if (skip
> SAVAGE_SKIP_ALL_S4
) {
470 DRM_ERROR("invalid skip flags 0x%04x\n", skip
);
473 vtx_size
= 10; /* full vertex */
476 vtx_size
-= (skip
& 1) + (skip
>> 1 & 1) +
477 (skip
>> 2 & 1) + (skip
>> 3 & 1) + (skip
>> 4 & 1) +
478 (skip
>> 5 & 1) + (skip
>> 6 & 1) + (skip
>> 7 & 1);
480 if (vtx_size
> vb_stride
) {
481 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
482 vtx_size
, vb_stride
);
486 if (start
+ n
> vb_size
/ (vb_stride
* 4)) {
487 DRM_ERROR("vertex indices (%u-%u) out of range (0-%u)\n",
488 start
, start
+ n
- 1, vb_size
/ (vb_stride
* 4));
494 /* Can emit up to 255 vertices (85 triangles) at once. */
495 unsigned int count
= n
> 255 ? 255 : n
;
497 /* Need to reorder vertices for correct flat
498 * shading while preserving the clock sense
499 * for correct culling. Only on Savage3D. */
500 int reorder
[3] = { -1, -1, -1 };
501 reorder
[start
% 3] = 2;
503 BEGIN_DMA(count
* vtx_size
+ 1);
504 DMA_DRAW_PRIMITIVE(count
, prim
, skip
);
506 for (i
= start
; i
< start
+ count
; ++i
) {
507 unsigned int j
= i
+ reorder
[i
% 3];
508 DMA_COPY(&vtxbuf
[vb_stride
* j
], vtx_size
);
513 BEGIN_DMA(count
* vtx_size
+ 1);
514 DMA_DRAW_PRIMITIVE(count
, prim
, skip
);
516 if (vb_stride
== vtx_size
) {
517 DMA_COPY(&vtxbuf
[vb_stride
* start
],
520 for (i
= start
; i
< start
+ count
; ++i
) {
521 DMA_COPY(&vtxbuf
[vb_stride
* i
],
532 prim
|= BCI_CMD_DRAW_CONT
;
538 static int savage_dispatch_dma_idx(drm_savage_private_t
* dev_priv
,
539 const drm_savage_cmd_header_t
* cmd_header
,
541 const struct drm_buf
* dmabuf
)
543 unsigned char reorder
= 0;
544 unsigned int prim
= cmd_header
->idx
.prim
;
545 unsigned int skip
= cmd_header
->idx
.skip
;
546 unsigned int n
= cmd_header
->idx
.count
;
551 DRM_ERROR("called without dma buffers!\n");
559 case SAVAGE_PRIM_TRILIST_201
:
561 prim
= SAVAGE_PRIM_TRILIST
;
563 case SAVAGE_PRIM_TRILIST
:
565 DRM_ERROR("wrong number of indices %u in TRILIST\n", n
);
569 case SAVAGE_PRIM_TRISTRIP
:
570 case SAVAGE_PRIM_TRIFAN
:
573 ("wrong number of indices %u in TRIFAN/STRIP\n", n
);
578 DRM_ERROR("invalid primitive type %u\n", prim
);
582 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
584 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip
);
588 unsigned int size
= 10 - (skip
& 1) - (skip
>> 1 & 1) -
589 (skip
>> 2 & 1) - (skip
>> 3 & 1) - (skip
>> 4 & 1) -
590 (skip
>> 5 & 1) - (skip
>> 6 & 1) - (skip
>> 7 & 1);
591 if (skip
> SAVAGE_SKIP_ALL_S4
|| size
!= 8) {
592 DRM_ERROR("invalid skip flags 0x%04x for DMA\n", skip
);
596 DRM_ERROR("TRILIST_201 used on Savage4 hardware\n");
601 /* Vertex DMA doesn't work with command DMA at the same time,
602 * so we use BCI_... to submit commands here. Flush buffered
603 * faked DMA first. */
606 if (dmabuf
->bus_address
!= dev_priv
->state
.common
.vbaddr
) {
608 BCI_SET_REGISTERS(SAVAGE_VERTBUFADDR
, 1);
609 BCI_WRITE(dmabuf
->bus_address
| dev_priv
->dma_type
);
610 dev_priv
->state
.common
.vbaddr
= dmabuf
->bus_address
;
612 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
) && dev_priv
->waiting
) {
613 /* Workaround for what looks like a hardware bug. If a
614 * WAIT_3D_IDLE was emitted some time before the
615 * indexed drawing command then the engine will lock
616 * up. There are two known workarounds:
617 * WAIT_IDLE_EMPTY or emit at least 63 NOPs. */
619 for (i
= 0; i
< 63; ++i
)
620 BCI_WRITE(BCI_CMD_WAIT
);
621 dev_priv
->waiting
= 0;
626 /* Can emit up to 255 indices (85 triangles) at once. */
627 unsigned int count
= n
> 255 ? 255 : n
;
630 for (i
= 0; i
< count
; ++i
) {
631 if (idx
[i
] > dmabuf
->total
/ 32) {
632 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
633 i
, idx
[i
], dmabuf
->total
/ 32);
639 /* Need to reorder indices for correct flat
640 * shading while preserving the clock sense
641 * for correct culling. Only on Savage3D. */
642 int reorder
[3] = { 2, -1, -1 };
644 BEGIN_BCI((count
+ 1 + 1) / 2);
645 BCI_DRAW_INDICES_S3D(count
, prim
, idx
[2]);
647 for (i
= 1; i
+ 1 < count
; i
+= 2)
648 BCI_WRITE(idx
[i
+ reorder
[i
% 3]] |
650 reorder
[(i
+ 1) % 3]] << 16));
652 BCI_WRITE(idx
[i
+ reorder
[i
% 3]]);
653 } else if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
654 BEGIN_BCI((count
+ 1 + 1) / 2);
655 BCI_DRAW_INDICES_S3D(count
, prim
, idx
[0]);
657 for (i
= 1; i
+ 1 < count
; i
+= 2)
658 BCI_WRITE(idx
[i
] | (idx
[i
+ 1] << 16));
662 BEGIN_BCI((count
+ 2 + 1) / 2);
663 BCI_DRAW_INDICES_S4(count
, prim
, skip
);
665 for (i
= 0; i
+ 1 < count
; i
+= 2)
666 BCI_WRITE(idx
[i
] | (idx
[i
+ 1] << 16));
674 prim
|= BCI_CMD_DRAW_CONT
;
680 static int savage_dispatch_vb_idx(drm_savage_private_t
* dev_priv
,
681 const drm_savage_cmd_header_t
* cmd_header
,
683 const uint32_t *vtxbuf
,
684 unsigned int vb_size
, unsigned int vb_stride
)
686 unsigned char reorder
= 0;
687 unsigned int prim
= cmd_header
->idx
.prim
;
688 unsigned int skip
= cmd_header
->idx
.skip
;
689 unsigned int n
= cmd_header
->idx
.count
;
690 unsigned int vtx_size
;
698 case SAVAGE_PRIM_TRILIST_201
:
700 prim
= SAVAGE_PRIM_TRILIST
;
702 case SAVAGE_PRIM_TRILIST
:
704 DRM_ERROR("wrong number of indices %u in TRILIST\n", n
);
708 case SAVAGE_PRIM_TRISTRIP
:
709 case SAVAGE_PRIM_TRIFAN
:
712 ("wrong number of indices %u in TRIFAN/STRIP\n", n
);
717 DRM_ERROR("invalid primitive type %u\n", prim
);
721 if (S3_SAVAGE3D_SERIES(dev_priv
->chipset
)) {
722 if (skip
> SAVAGE_SKIP_ALL_S3D
) {
723 DRM_ERROR("invalid skip flags 0x%04x\n", skip
);
726 vtx_size
= 8; /* full vertex */
728 if (skip
> SAVAGE_SKIP_ALL_S4
) {
729 DRM_ERROR("invalid skip flags 0x%04x\n", skip
);
732 vtx_size
= 10; /* full vertex */
735 vtx_size
-= (skip
& 1) + (skip
>> 1 & 1) +
736 (skip
>> 2 & 1) + (skip
>> 3 & 1) + (skip
>> 4 & 1) +
737 (skip
>> 5 & 1) + (skip
>> 6 & 1) + (skip
>> 7 & 1);
739 if (vtx_size
> vb_stride
) {
740 DRM_ERROR("vertex size greater than vb stride (%u > %u)\n",
741 vtx_size
, vb_stride
);
747 /* Can emit up to 255 vertices (85 triangles) at once. */
748 unsigned int count
= n
> 255 ? 255 : n
;
751 for (i
= 0; i
< count
; ++i
) {
752 if (idx
[i
] > vb_size
/ (vb_stride
* 4)) {
753 DRM_ERROR("idx[%u]=%u out of range (0-%u)\n",
754 i
, idx
[i
], vb_size
/ (vb_stride
* 4));
760 /* Need to reorder vertices for correct flat
761 * shading while preserving the clock sense
762 * for correct culling. Only on Savage3D. */
763 int reorder
[3] = { 2, -1, -1 };
765 BEGIN_DMA(count
* vtx_size
+ 1);
766 DMA_DRAW_PRIMITIVE(count
, prim
, skip
);
768 for (i
= 0; i
< count
; ++i
) {
769 unsigned int j
= idx
[i
+ reorder
[i
% 3]];
770 DMA_COPY(&vtxbuf
[vb_stride
* j
], vtx_size
);
775 BEGIN_DMA(count
* vtx_size
+ 1);
776 DMA_DRAW_PRIMITIVE(count
, prim
, skip
);
778 for (i
= 0; i
< count
; ++i
) {
779 unsigned int j
= idx
[i
];
780 DMA_COPY(&vtxbuf
[vb_stride
* j
], vtx_size
);
789 prim
|= BCI_CMD_DRAW_CONT
;
795 static int savage_dispatch_clear(drm_savage_private_t
* dev_priv
,
796 const drm_savage_cmd_header_t
* cmd_header
,
797 const drm_savage_cmd_header_t
*data
,
799 const struct drm_clip_rect
*boxes
)
801 unsigned int flags
= cmd_header
->clear0
.flags
;
802 unsigned int clear_cmd
;
803 unsigned int i
, nbufs
;
809 clear_cmd
= BCI_CMD_RECT
| BCI_CMD_RECT_XP
| BCI_CMD_RECT_YP
|
810 BCI_CMD_SEND_COLOR
| BCI_CMD_DEST_PBD_NEW
;
811 BCI_CMD_SET_ROP(clear_cmd
, 0xCC);
813 nbufs
= ((flags
& SAVAGE_FRONT
) ? 1 : 0) +
814 ((flags
& SAVAGE_BACK
) ? 1 : 0) + ((flags
& SAVAGE_DEPTH
) ? 1 : 0);
818 if (data
->clear1
.mask
!= 0xffffffff) {
821 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK
, 1);
822 DMA_WRITE(data
->clear1
.mask
);
825 for (i
= 0; i
< nbox
; ++i
) {
826 unsigned int x
, y
, w
, h
;
828 x
= boxes
[i
].x1
, y
= boxes
[i
].y1
;
829 w
= boxes
[i
].x2
- boxes
[i
].x1
;
830 h
= boxes
[i
].y2
- boxes
[i
].y1
;
831 BEGIN_DMA(nbufs
* 6);
832 for (buf
= SAVAGE_FRONT
; buf
<= SAVAGE_DEPTH
; buf
<<= 1) {
835 DMA_WRITE(clear_cmd
);
838 DMA_WRITE(dev_priv
->front_offset
);
839 DMA_WRITE(dev_priv
->front_bd
);
842 DMA_WRITE(dev_priv
->back_offset
);
843 DMA_WRITE(dev_priv
->back_bd
);
846 DMA_WRITE(dev_priv
->depth_offset
);
847 DMA_WRITE(dev_priv
->depth_bd
);
850 DMA_WRITE(data
->clear1
.value
);
851 DMA_WRITE(BCI_X_Y(x
, y
));
852 DMA_WRITE(BCI_W_H(w
, h
));
856 if (data
->clear1
.mask
!= 0xffffffff) {
859 DMA_SET_REGISTERS(SAVAGE_BITPLANEWTMASK
, 1);
860 DMA_WRITE(0xffffffff);
867 static int savage_dispatch_swap(drm_savage_private_t
* dev_priv
,
868 unsigned int nbox
, const struct drm_clip_rect
*boxes
)
870 unsigned int swap_cmd
;
877 swap_cmd
= BCI_CMD_RECT
| BCI_CMD_RECT_XP
| BCI_CMD_RECT_YP
|
878 BCI_CMD_SRC_PBD_COLOR_NEW
| BCI_CMD_DEST_GBD
;
879 BCI_CMD_SET_ROP(swap_cmd
, 0xCC);
881 for (i
= 0; i
< nbox
; ++i
) {
884 DMA_WRITE(dev_priv
->back_offset
);
885 DMA_WRITE(dev_priv
->back_bd
);
886 DMA_WRITE(BCI_X_Y(boxes
[i
].x1
, boxes
[i
].y1
));
887 DMA_WRITE(BCI_X_Y(boxes
[i
].x1
, boxes
[i
].y1
));
888 DMA_WRITE(BCI_W_H(boxes
[i
].x2
- boxes
[i
].x1
,
889 boxes
[i
].y2
- boxes
[i
].y1
));
896 static int savage_dispatch_draw(drm_savage_private_t
* dev_priv
,
897 const drm_savage_cmd_header_t
*start
,
898 const drm_savage_cmd_header_t
*end
,
899 const struct drm_buf
* dmabuf
,
900 const unsigned int *vtxbuf
,
901 unsigned int vb_size
, unsigned int vb_stride
,
903 const struct drm_clip_rect
*boxes
)
908 for (i
= 0; i
< nbox
; ++i
) {
909 const drm_savage_cmd_header_t
*cmdbuf
;
910 dev_priv
->emit_clip_rect(dev_priv
, &boxes
[i
]);
913 while (cmdbuf
< end
) {
914 drm_savage_cmd_header_t cmd_header
;
915 cmd_header
= *cmdbuf
;
917 switch (cmd_header
.cmd
.cmd
) {
918 case SAVAGE_CMD_DMA_PRIM
:
919 ret
= savage_dispatch_dma_prim(
920 dev_priv
, &cmd_header
, dmabuf
);
922 case SAVAGE_CMD_VB_PRIM
:
923 ret
= savage_dispatch_vb_prim(
924 dev_priv
, &cmd_header
,
925 vtxbuf
, vb_size
, vb_stride
);
927 case SAVAGE_CMD_DMA_IDX
:
928 j
= (cmd_header
.idx
.count
+ 3) / 4;
929 /* j was check in savage_bci_cmdbuf */
930 ret
= savage_dispatch_dma_idx(dev_priv
,
931 &cmd_header
, (const uint16_t *)cmdbuf
,
935 case SAVAGE_CMD_VB_IDX
:
936 j
= (cmd_header
.idx
.count
+ 3) / 4;
937 /* j was check in savage_bci_cmdbuf */
938 ret
= savage_dispatch_vb_idx(dev_priv
,
939 &cmd_header
, (const uint16_t *)cmdbuf
,
940 (const uint32_t *)vtxbuf
, vb_size
,
945 /* What's the best return code? EFAULT? */
946 DRM_ERROR("IMPLEMENTATION ERROR: "
947 "non-drawing-command %d\n",
960 int savage_bci_cmdbuf(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
962 drm_savage_private_t
*dev_priv
= dev
->dev_private
;
963 struct drm_device_dma
*dma
= dev
->dma
;
964 struct drm_buf
*dmabuf
;
965 drm_savage_cmdbuf_t
*cmdbuf
= data
;
966 drm_savage_cmd_header_t
*kcmd_addr
= NULL
;
967 drm_savage_cmd_header_t
*first_draw_cmd
;
968 unsigned int *kvb_addr
= NULL
;
969 struct drm_clip_rect
*kbox_addr
= NULL
;
975 LOCK_TEST_WITH_RETURN(dev
, file_priv
);
977 if (dma
&& dma
->buflist
) {
978 if (cmdbuf
->dma_idx
>= dma
->buf_count
) {
980 ("vertex buffer index %u out of range (0-%u)\n",
981 cmdbuf
->dma_idx
, dma
->buf_count
- 1);
984 dmabuf
= dma
->buflist
[cmdbuf
->dma_idx
];
989 /* Copy the user buffers into kernel temporary areas. This hasn't been
990 * a performance loss compared to VERIFYAREA_READ/
991 * COPY_FROM_USER_UNCHECKED when done in other drivers, and is correct
992 * for locking on FreeBSD.
995 kcmd_addr
= kmalloc_array(cmdbuf
->size
, 8, GFP_KERNEL
);
996 if (kcmd_addr
== NULL
)
999 if (copy_from_user(kcmd_addr
, cmdbuf
->cmd_addr
,
1005 cmdbuf
->cmd_addr
= kcmd_addr
;
1007 if (cmdbuf
->vb_size
) {
1008 kvb_addr
= memdup_user(cmdbuf
->vb_addr
, cmdbuf
->vb_size
);
1009 if (IS_ERR(kvb_addr
)) {
1010 ret
= PTR_ERR(kvb_addr
);
1014 cmdbuf
->vb_addr
= kvb_addr
;
1017 kbox_addr
= kmalloc_array(cmdbuf
->nbox
, sizeof(struct drm_clip_rect
),
1019 if (kbox_addr
== NULL
) {
1024 if (copy_from_user(kbox_addr
, cmdbuf
->box_addr
,
1025 cmdbuf
->nbox
* sizeof(struct drm_clip_rect
))) {
1029 cmdbuf
->box_addr
= kbox_addr
;
1032 /* Make sure writes to DMA buffers are finished before sending
1033 * DMA commands to the graphics hardware. */
1036 /* Coming from user space. Don't know if the Xserver has
1037 * emitted wait commands. Assuming the worst. */
1038 dev_priv
->waiting
= 1;
1041 first_draw_cmd
= NULL
;
1042 while (i
< cmdbuf
->size
) {
1043 drm_savage_cmd_header_t cmd_header
;
1044 cmd_header
= *(drm_savage_cmd_header_t
*)cmdbuf
->cmd_addr
;
1048 /* Group drawing commands with same state to minimize
1049 * iterations over clip rects. */
1051 switch (cmd_header
.cmd
.cmd
) {
1052 case SAVAGE_CMD_DMA_IDX
:
1053 case SAVAGE_CMD_VB_IDX
:
1054 j
= (cmd_header
.idx
.count
+ 3) / 4;
1055 if (i
+ j
> cmdbuf
->size
) {
1056 DRM_ERROR("indexed drawing command extends "
1057 "beyond end of command buffer\n");
1063 case SAVAGE_CMD_DMA_PRIM
:
1064 case SAVAGE_CMD_VB_PRIM
:
1065 if (!first_draw_cmd
)
1066 first_draw_cmd
= cmdbuf
->cmd_addr
- 1;
1067 cmdbuf
->cmd_addr
+= j
;
1071 if (first_draw_cmd
) {
1072 ret
= savage_dispatch_draw(
1073 dev_priv
, first_draw_cmd
,
1074 cmdbuf
->cmd_addr
- 1,
1075 dmabuf
, cmdbuf
->vb_addr
, cmdbuf
->vb_size
,
1077 cmdbuf
->nbox
, cmdbuf
->box_addr
);
1080 first_draw_cmd
= NULL
;
1086 switch (cmd_header
.cmd
.cmd
) {
1087 case SAVAGE_CMD_STATE
:
1088 j
= (cmd_header
.state
.count
+ 1) / 2;
1089 if (i
+ j
> cmdbuf
->size
) {
1090 DRM_ERROR("command SAVAGE_CMD_STATE extends "
1091 "beyond end of command buffer\n");
1096 ret
= savage_dispatch_state(dev_priv
, &cmd_header
,
1097 (const uint32_t *)cmdbuf
->cmd_addr
);
1098 cmdbuf
->cmd_addr
+= j
;
1101 case SAVAGE_CMD_CLEAR
:
1102 if (i
+ 1 > cmdbuf
->size
) {
1103 DRM_ERROR("command SAVAGE_CMD_CLEAR extends "
1104 "beyond end of command buffer\n");
1109 ret
= savage_dispatch_clear(dev_priv
, &cmd_header
,
1116 case SAVAGE_CMD_SWAP
:
1117 ret
= savage_dispatch_swap(dev_priv
, cmdbuf
->nbox
,
1121 DRM_ERROR("invalid command 0x%x\n",
1122 cmd_header
.cmd
.cmd
);
1134 if (first_draw_cmd
) {
1135 ret
= savage_dispatch_draw (
1136 dev_priv
, first_draw_cmd
, cmdbuf
->cmd_addr
, dmabuf
,
1137 cmdbuf
->vb_addr
, cmdbuf
->vb_size
, cmdbuf
->vb_stride
,
1138 cmdbuf
->nbox
, cmdbuf
->box_addr
);
1147 if (dmabuf
&& cmdbuf
->discard
) {
1148 drm_savage_buf_priv_t
*buf_priv
= dmabuf
->dev_private
;
1150 event
= savage_bci_emit_event(dev_priv
, SAVAGE_WAIT_3D
);
1151 SET_AGE(&buf_priv
->age
, event
, dev_priv
->event_wrap
);
1152 savage_freelist_put(dev
, dmabuf
);
1156 /* If we didn't need to allocate them, these'll be NULL */