PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / gpu / drm / mga / mga_state.c
blob314685b7f41fc4acb21a2c9c0cad86ecbd1ccad2
1 /* mga_state.c -- State support for MGA G200/G400 -*- linux-c -*-
2 * Created: Thu Jan 27 02:53:43 2000 by jhartmann@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * All Rights Reserved.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
17 * Software.
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
25 * OTHER DEALINGS IN THE SOFTWARE.
27 * Authors:
28 * Jeff Hartmann <jhartmann@valinux.com>
29 * Keith Whitwell <keith@tungstengraphics.com>
31 * Rewritten by:
32 * Gareth Hughes <gareth@valinux.com>
35 #include <drm/drmP.h>
36 #include <drm/mga_drm.h>
37 #include "mga_drv.h"
39 /* ================================================================
40 * DMA hardware state programming functions
43 static void mga_emit_clip_rect(drm_mga_private_t *dev_priv,
44 struct drm_clip_rect *box)
46 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
47 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
48 unsigned int pitch = dev_priv->front_pitch;
49 DMA_LOCALS;
51 BEGIN_DMA(2);
53 /* Force reset of DWGCTL on G400 (eliminates clip disable bit).
55 if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
56 DMA_BLOCK(MGA_DWGCTL, ctx->dwgctl,
57 MGA_LEN + MGA_EXEC, 0x80000000,
58 MGA_DWGCTL, ctx->dwgctl,
59 MGA_LEN + MGA_EXEC, 0x80000000);
61 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
62 MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1,
63 MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch);
65 ADVANCE_DMA();
68 static __inline__ void mga_g200_emit_context(drm_mga_private_t *dev_priv)
70 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
71 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
72 DMA_LOCALS;
74 BEGIN_DMA(3);
76 DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
77 MGA_MACCESS, ctx->maccess,
78 MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
80 DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
81 MGA_FOGCOL, ctx->fogcolor,
82 MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
84 DMA_BLOCK(MGA_FCOL, ctx->fcol,
85 MGA_DMAPAD, 0x00000000,
86 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
88 ADVANCE_DMA();
91 static __inline__ void mga_g400_emit_context(drm_mga_private_t *dev_priv)
93 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
94 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
95 DMA_LOCALS;
97 BEGIN_DMA(4);
99 DMA_BLOCK(MGA_DSTORG, ctx->dstorg,
100 MGA_MACCESS, ctx->maccess,
101 MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
103 DMA_BLOCK(MGA_ALPHACTRL, ctx->alphactrl,
104 MGA_FOGCOL, ctx->fogcolor,
105 MGA_WFLAG, ctx->wflag, MGA_ZORG, dev_priv->depth_offset);
107 DMA_BLOCK(MGA_WFLAG1, ctx->wflag,
108 MGA_TDUALSTAGE0, ctx->tdualstage0,
109 MGA_TDUALSTAGE1, ctx->tdualstage1, MGA_FCOL, ctx->fcol);
111 DMA_BLOCK(MGA_STENCIL, ctx->stencil,
112 MGA_STENCILCTL, ctx->stencilctl,
113 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
115 ADVANCE_DMA();
118 static __inline__ void mga_g200_emit_tex0(drm_mga_private_t *dev_priv)
120 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
121 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
122 DMA_LOCALS;
124 BEGIN_DMA(4);
126 DMA_BLOCK(MGA_TEXCTL2, tex->texctl2,
127 MGA_TEXCTL, tex->texctl,
128 MGA_TEXFILTER, tex->texfilter,
129 MGA_TEXBORDERCOL, tex->texbordercol);
131 DMA_BLOCK(MGA_TEXORG, tex->texorg,
132 MGA_TEXORG1, tex->texorg1,
133 MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
135 DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
136 MGA_TEXWIDTH, tex->texwidth,
137 MGA_TEXHEIGHT, tex->texheight, MGA_WR24, tex->texwidth);
139 DMA_BLOCK(MGA_WR34, tex->texheight,
140 MGA_TEXTRANS, 0x0000ffff,
141 MGA_TEXTRANSHIGH, 0x0000ffff, MGA_DMAPAD, 0x00000000);
143 ADVANCE_DMA();
146 static __inline__ void mga_g400_emit_tex0(drm_mga_private_t *dev_priv)
148 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
149 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[0];
150 DMA_LOCALS;
152 /* printk("mga_g400_emit_tex0 %x %x %x\n", tex->texorg, */
153 /* tex->texctl, tex->texctl2); */
155 BEGIN_DMA(6);
157 DMA_BLOCK(MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC,
158 MGA_TEXCTL, tex->texctl,
159 MGA_TEXFILTER, tex->texfilter,
160 MGA_TEXBORDERCOL, tex->texbordercol);
162 DMA_BLOCK(MGA_TEXORG, tex->texorg,
163 MGA_TEXORG1, tex->texorg1,
164 MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
166 DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
167 MGA_TEXWIDTH, tex->texwidth,
168 MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000);
170 DMA_BLOCK(MGA_WR57, 0x00000000,
171 MGA_WR53, 0x00000000,
172 MGA_WR61, 0x00000000, MGA_WR52, MGA_G400_WR_MAGIC);
174 DMA_BLOCK(MGA_WR60, MGA_G400_WR_MAGIC,
175 MGA_WR54, tex->texwidth | MGA_G400_WR_MAGIC,
176 MGA_WR62, tex->texheight | MGA_G400_WR_MAGIC,
177 MGA_DMAPAD, 0x00000000);
179 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
180 MGA_DMAPAD, 0x00000000,
181 MGA_TEXTRANS, 0x0000ffff, MGA_TEXTRANSHIGH, 0x0000ffff);
183 ADVANCE_DMA();
186 static __inline__ void mga_g400_emit_tex1(drm_mga_private_t *dev_priv)
188 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
189 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[1];
190 DMA_LOCALS;
192 /* printk("mga_g400_emit_tex1 %x %x %x\n", tex->texorg, */
193 /* tex->texctl, tex->texctl2); */
195 BEGIN_DMA(5);
197 DMA_BLOCK(MGA_TEXCTL2, (tex->texctl2 |
198 MGA_MAP1_ENABLE |
199 MGA_G400_TC2_MAGIC),
200 MGA_TEXCTL, tex->texctl,
201 MGA_TEXFILTER, tex->texfilter,
202 MGA_TEXBORDERCOL, tex->texbordercol);
204 DMA_BLOCK(MGA_TEXORG, tex->texorg,
205 MGA_TEXORG1, tex->texorg1,
206 MGA_TEXORG2, tex->texorg2, MGA_TEXORG3, tex->texorg3);
208 DMA_BLOCK(MGA_TEXORG4, tex->texorg4,
209 MGA_TEXWIDTH, tex->texwidth,
210 MGA_TEXHEIGHT, tex->texheight, MGA_WR49, 0x00000000);
212 DMA_BLOCK(MGA_WR57, 0x00000000,
213 MGA_WR53, 0x00000000,
214 MGA_WR61, 0x00000000,
215 MGA_WR52, tex->texwidth | MGA_G400_WR_MAGIC);
217 DMA_BLOCK(MGA_WR60, tex->texheight | MGA_G400_WR_MAGIC,
218 MGA_TEXTRANS, 0x0000ffff,
219 MGA_TEXTRANSHIGH, 0x0000ffff,
220 MGA_TEXCTL2, tex->texctl2 | MGA_G400_TC2_MAGIC);
222 ADVANCE_DMA();
225 static __inline__ void mga_g200_emit_pipe(drm_mga_private_t *dev_priv)
227 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
228 unsigned int pipe = sarea_priv->warp_pipe;
229 DMA_LOCALS;
231 BEGIN_DMA(3);
233 DMA_BLOCK(MGA_WIADDR, MGA_WMODE_SUSPEND,
234 MGA_WVRTXSZ, 0x00000007,
235 MGA_WFLAG, 0x00000000, MGA_WR24, 0x00000000);
237 DMA_BLOCK(MGA_WR25, 0x00000100,
238 MGA_WR34, 0x00000000,
239 MGA_WR42, 0x0000ffff, MGA_WR60, 0x0000ffff);
241 /* Padding required due to hardware bug.
243 DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
244 MGA_DMAPAD, 0xffffffff,
245 MGA_DMAPAD, 0xffffffff,
246 MGA_WIADDR, (dev_priv->warp_pipe_phys[pipe] |
247 MGA_WMODE_START | dev_priv->wagp_enable));
249 ADVANCE_DMA();
252 static __inline__ void mga_g400_emit_pipe(drm_mga_private_t *dev_priv)
254 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
255 unsigned int pipe = sarea_priv->warp_pipe;
256 DMA_LOCALS;
258 /* printk("mga_g400_emit_pipe %x\n", pipe); */
260 BEGIN_DMA(10);
262 DMA_BLOCK(MGA_WIADDR2, MGA_WMODE_SUSPEND,
263 MGA_DMAPAD, 0x00000000,
264 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
266 if (pipe & MGA_T2) {
267 DMA_BLOCK(MGA_WVRTXSZ, 0x00001e09,
268 MGA_DMAPAD, 0x00000000,
269 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
271 DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
272 MGA_WACCEPTSEQ, 0x00000000,
273 MGA_WACCEPTSEQ, 0x00000000,
274 MGA_WACCEPTSEQ, 0x1e000000);
275 } else {
276 if (dev_priv->warp_pipe & MGA_T2) {
277 /* Flush the WARP pipe */
278 DMA_BLOCK(MGA_YDST, 0x00000000,
279 MGA_FXLEFT, 0x00000000,
280 MGA_FXRIGHT, 0x00000001,
281 MGA_DWGCTL, MGA_DWGCTL_FLUSH);
283 DMA_BLOCK(MGA_LEN + MGA_EXEC, 0x00000001,
284 MGA_DWGSYNC, 0x00007000,
285 MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
286 MGA_LEN + MGA_EXEC, 0x00000000);
288 DMA_BLOCK(MGA_TEXCTL2, (MGA_DUALTEX |
289 MGA_G400_TC2_MAGIC),
290 MGA_LEN + MGA_EXEC, 0x00000000,
291 MGA_TEXCTL2, MGA_G400_TC2_MAGIC,
292 MGA_DMAPAD, 0x00000000);
295 DMA_BLOCK(MGA_WVRTXSZ, 0x00001807,
296 MGA_DMAPAD, 0x00000000,
297 MGA_DMAPAD, 0x00000000, MGA_DMAPAD, 0x00000000);
299 DMA_BLOCK(MGA_WACCEPTSEQ, 0x00000000,
300 MGA_WACCEPTSEQ, 0x00000000,
301 MGA_WACCEPTSEQ, 0x00000000,
302 MGA_WACCEPTSEQ, 0x18000000);
305 DMA_BLOCK(MGA_WFLAG, 0x00000000,
306 MGA_WFLAG1, 0x00000000,
307 MGA_WR56, MGA_G400_WR56_MAGIC, MGA_DMAPAD, 0x00000000);
309 DMA_BLOCK(MGA_WR49, 0x00000000, /* tex0 */
310 MGA_WR57, 0x00000000, /* tex0 */
311 MGA_WR53, 0x00000000, /* tex1 */
312 MGA_WR61, 0x00000000); /* tex1 */
314 DMA_BLOCK(MGA_WR54, MGA_G400_WR_MAGIC, /* tex0 width */
315 MGA_WR62, MGA_G400_WR_MAGIC, /* tex0 height */
316 MGA_WR52, MGA_G400_WR_MAGIC, /* tex1 width */
317 MGA_WR60, MGA_G400_WR_MAGIC); /* tex1 height */
319 /* Padding required due to hardware bug */
320 DMA_BLOCK(MGA_DMAPAD, 0xffffffff,
321 MGA_DMAPAD, 0xffffffff,
322 MGA_DMAPAD, 0xffffffff,
323 MGA_WIADDR2, (dev_priv->warp_pipe_phys[pipe] |
324 MGA_WMODE_START | dev_priv->wagp_enable));
326 ADVANCE_DMA();
329 static void mga_g200_emit_state(drm_mga_private_t *dev_priv)
331 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
332 unsigned int dirty = sarea_priv->dirty;
334 if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
335 mga_g200_emit_pipe(dev_priv);
336 dev_priv->warp_pipe = sarea_priv->warp_pipe;
339 if (dirty & MGA_UPLOAD_CONTEXT) {
340 mga_g200_emit_context(dev_priv);
341 sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
344 if (dirty & MGA_UPLOAD_TEX0) {
345 mga_g200_emit_tex0(dev_priv);
346 sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
350 static void mga_g400_emit_state(drm_mga_private_t *dev_priv)
352 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
353 unsigned int dirty = sarea_priv->dirty;
354 int multitex = sarea_priv->warp_pipe & MGA_T2;
356 if (sarea_priv->warp_pipe != dev_priv->warp_pipe) {
357 mga_g400_emit_pipe(dev_priv);
358 dev_priv->warp_pipe = sarea_priv->warp_pipe;
361 if (dirty & MGA_UPLOAD_CONTEXT) {
362 mga_g400_emit_context(dev_priv);
363 sarea_priv->dirty &= ~MGA_UPLOAD_CONTEXT;
366 if (dirty & MGA_UPLOAD_TEX0) {
367 mga_g400_emit_tex0(dev_priv);
368 sarea_priv->dirty &= ~MGA_UPLOAD_TEX0;
371 if ((dirty & MGA_UPLOAD_TEX1) && multitex) {
372 mga_g400_emit_tex1(dev_priv);
373 sarea_priv->dirty &= ~MGA_UPLOAD_TEX1;
377 /* ================================================================
378 * SAREA state verification
381 /* Disallow all write destinations except the front and backbuffer.
383 static int mga_verify_context(drm_mga_private_t *dev_priv)
385 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
386 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
388 if (ctx->dstorg != dev_priv->front_offset &&
389 ctx->dstorg != dev_priv->back_offset) {
390 DRM_ERROR("*** bad DSTORG: %x (front %x, back %x)\n\n",
391 ctx->dstorg, dev_priv->front_offset,
392 dev_priv->back_offset);
393 ctx->dstorg = 0;
394 return -EINVAL;
397 return 0;
400 /* Disallow texture reads from PCI space.
402 static int mga_verify_tex(drm_mga_private_t *dev_priv, int unit)
404 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
405 drm_mga_texture_regs_t *tex = &sarea_priv->tex_state[unit];
406 unsigned int org;
408 org = tex->texorg & (MGA_TEXORGMAP_MASK | MGA_TEXORGACC_MASK);
410 if (org == (MGA_TEXORGMAP_SYSMEM | MGA_TEXORGACC_PCI)) {
411 DRM_ERROR("*** bad TEXORG: 0x%x, unit %d\n", tex->texorg, unit);
412 tex->texorg = 0;
413 return -EINVAL;
416 return 0;
419 static int mga_verify_state(drm_mga_private_t *dev_priv)
421 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
422 unsigned int dirty = sarea_priv->dirty;
423 int ret = 0;
425 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
426 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
428 if (dirty & MGA_UPLOAD_CONTEXT)
429 ret |= mga_verify_context(dev_priv);
431 if (dirty & MGA_UPLOAD_TEX0)
432 ret |= mga_verify_tex(dev_priv, 0);
434 if (dev_priv->chipset >= MGA_CARD_TYPE_G400) {
435 if (dirty & MGA_UPLOAD_TEX1)
436 ret |= mga_verify_tex(dev_priv, 1);
438 if (dirty & MGA_UPLOAD_PIPE)
439 ret |= (sarea_priv->warp_pipe > MGA_MAX_G400_PIPES);
440 } else {
441 if (dirty & MGA_UPLOAD_PIPE)
442 ret |= (sarea_priv->warp_pipe > MGA_MAX_G200_PIPES);
445 return (ret == 0);
448 static int mga_verify_iload(drm_mga_private_t *dev_priv,
449 unsigned int dstorg, unsigned int length)
451 if (dstorg < dev_priv->texture_offset ||
452 dstorg + length > (dev_priv->texture_offset +
453 dev_priv->texture_size)) {
454 DRM_ERROR("*** bad iload DSTORG: 0x%x\n", dstorg);
455 return -EINVAL;
458 if (length & MGA_ILOAD_MASK) {
459 DRM_ERROR("*** bad iload length: 0x%x\n",
460 length & MGA_ILOAD_MASK);
461 return -EINVAL;
464 return 0;
467 static int mga_verify_blit(drm_mga_private_t *dev_priv,
468 unsigned int srcorg, unsigned int dstorg)
470 if ((srcorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM) ||
471 (dstorg & 0x3) == (MGA_SRCACC_PCI | MGA_SRCMAP_SYSMEM)) {
472 DRM_ERROR("*** bad blit: src=0x%x dst=0x%x\n", srcorg, dstorg);
473 return -EINVAL;
475 return 0;
478 /* ================================================================
482 static void mga_dma_dispatch_clear(struct drm_device *dev, drm_mga_clear_t *clear)
484 drm_mga_private_t *dev_priv = dev->dev_private;
485 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
486 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
487 struct drm_clip_rect *pbox = sarea_priv->boxes;
488 int nbox = sarea_priv->nbox;
489 int i;
490 DMA_LOCALS;
491 DRM_DEBUG("\n");
493 BEGIN_DMA(1);
495 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
496 MGA_DMAPAD, 0x00000000,
497 MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
499 ADVANCE_DMA();
501 for (i = 0; i < nbox; i++) {
502 struct drm_clip_rect *box = &pbox[i];
503 u32 height = box->y2 - box->y1;
505 DRM_DEBUG(" from=%d,%d to=%d,%d\n",
506 box->x1, box->y1, box->x2, box->y2);
508 if (clear->flags & MGA_FRONT) {
509 BEGIN_DMA(2);
511 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
512 MGA_PLNWT, clear->color_mask,
513 MGA_YDSTLEN, (box->y1 << 16) | height,
514 MGA_FXBNDRY, (box->x2 << 16) | box->x1);
516 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
517 MGA_FCOL, clear->clear_color,
518 MGA_DSTORG, dev_priv->front_offset,
519 MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
521 ADVANCE_DMA();
524 if (clear->flags & MGA_BACK) {
525 BEGIN_DMA(2);
527 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
528 MGA_PLNWT, clear->color_mask,
529 MGA_YDSTLEN, (box->y1 << 16) | height,
530 MGA_FXBNDRY, (box->x2 << 16) | box->x1);
532 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
533 MGA_FCOL, clear->clear_color,
534 MGA_DSTORG, dev_priv->back_offset,
535 MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
537 ADVANCE_DMA();
540 if (clear->flags & MGA_DEPTH) {
541 BEGIN_DMA(2);
543 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
544 MGA_PLNWT, clear->depth_mask,
545 MGA_YDSTLEN, (box->y1 << 16) | height,
546 MGA_FXBNDRY, (box->x2 << 16) | box->x1);
548 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
549 MGA_FCOL, clear->clear_depth,
550 MGA_DSTORG, dev_priv->depth_offset,
551 MGA_DWGCTL + MGA_EXEC, dev_priv->clear_cmd);
553 ADVANCE_DMA();
558 BEGIN_DMA(1);
560 /* Force reset of DWGCTL */
561 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
562 MGA_DMAPAD, 0x00000000,
563 MGA_PLNWT, ctx->plnwt, MGA_DWGCTL, ctx->dwgctl);
565 ADVANCE_DMA();
567 FLUSH_DMA();
570 static void mga_dma_dispatch_swap(struct drm_device *dev)
572 drm_mga_private_t *dev_priv = dev->dev_private;
573 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
574 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
575 struct drm_clip_rect *pbox = sarea_priv->boxes;
576 int nbox = sarea_priv->nbox;
577 int i;
578 DMA_LOCALS;
579 DRM_DEBUG("\n");
581 sarea_priv->last_frame.head = dev_priv->prim.tail;
582 sarea_priv->last_frame.wrap = dev_priv->prim.last_wrap;
584 BEGIN_DMA(4 + nbox);
586 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
587 MGA_DMAPAD, 0x00000000,
588 MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
590 DMA_BLOCK(MGA_DSTORG, dev_priv->front_offset,
591 MGA_MACCESS, dev_priv->maccess,
592 MGA_SRCORG, dev_priv->back_offset,
593 MGA_AR5, dev_priv->front_pitch);
595 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
596 MGA_DMAPAD, 0x00000000,
597 MGA_PLNWT, 0xffffffff, MGA_DWGCTL, MGA_DWGCTL_COPY);
599 for (i = 0; i < nbox; i++) {
600 struct drm_clip_rect *box = &pbox[i];
601 u32 height = box->y2 - box->y1;
602 u32 start = box->y1 * dev_priv->front_pitch;
604 DRM_DEBUG(" from=%d,%d to=%d,%d\n",
605 box->x1, box->y1, box->x2, box->y2);
607 DMA_BLOCK(MGA_AR0, start + box->x2 - 1,
608 MGA_AR3, start + box->x1,
609 MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1,
610 MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height);
613 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
614 MGA_PLNWT, ctx->plnwt,
615 MGA_SRCORG, dev_priv->front_offset, MGA_DWGCTL, ctx->dwgctl);
617 ADVANCE_DMA();
619 FLUSH_DMA();
621 DRM_DEBUG("... done.\n");
624 static void mga_dma_dispatch_vertex(struct drm_device *dev, struct drm_buf *buf)
626 drm_mga_private_t *dev_priv = dev->dev_private;
627 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
628 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
629 u32 address = (u32) buf->bus_address;
630 u32 length = (u32) buf->used;
631 int i = 0;
632 DMA_LOCALS;
633 DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
635 if (buf->used) {
636 buf_priv->dispatched = 1;
638 MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
640 do {
641 if (i < sarea_priv->nbox) {
642 mga_emit_clip_rect(dev_priv,
643 &sarea_priv->boxes[i]);
646 BEGIN_DMA(1);
648 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
649 MGA_DMAPAD, 0x00000000,
650 MGA_SECADDRESS, (address |
651 MGA_DMA_VERTEX),
652 MGA_SECEND, ((address + length) |
653 dev_priv->dma_access));
655 ADVANCE_DMA();
656 } while (++i < sarea_priv->nbox);
659 if (buf_priv->discard) {
660 AGE_BUFFER(buf_priv);
661 buf->pending = 0;
662 buf->used = 0;
663 buf_priv->dispatched = 0;
665 mga_freelist_put(dev, buf);
668 FLUSH_DMA();
671 static void mga_dma_dispatch_indices(struct drm_device *dev, struct drm_buf *buf,
672 unsigned int start, unsigned int end)
674 drm_mga_private_t *dev_priv = dev->dev_private;
675 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
676 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
677 u32 address = (u32) buf->bus_address;
678 int i = 0;
679 DMA_LOCALS;
680 DRM_DEBUG("buf=%d start=%d end=%d\n", buf->idx, start, end);
682 if (start != end) {
683 buf_priv->dispatched = 1;
685 MGA_EMIT_STATE(dev_priv, sarea_priv->dirty);
687 do {
688 if (i < sarea_priv->nbox) {
689 mga_emit_clip_rect(dev_priv,
690 &sarea_priv->boxes[i]);
693 BEGIN_DMA(1);
695 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
696 MGA_DMAPAD, 0x00000000,
697 MGA_SETUPADDRESS, address + start,
698 MGA_SETUPEND, ((address + end) |
699 dev_priv->dma_access));
701 ADVANCE_DMA();
702 } while (++i < sarea_priv->nbox);
705 if (buf_priv->discard) {
706 AGE_BUFFER(buf_priv);
707 buf->pending = 0;
708 buf->used = 0;
709 buf_priv->dispatched = 0;
711 mga_freelist_put(dev, buf);
714 FLUSH_DMA();
717 /* This copies a 64 byte aligned agp region to the frambuffer with a
718 * standard blit, the ioctl needs to do checking.
720 static void mga_dma_dispatch_iload(struct drm_device *dev, struct drm_buf *buf,
721 unsigned int dstorg, unsigned int length)
723 drm_mga_private_t *dev_priv = dev->dev_private;
724 drm_mga_buf_priv_t *buf_priv = buf->dev_private;
725 drm_mga_context_regs_t *ctx = &dev_priv->sarea_priv->context_state;
726 u32 srcorg =
727 buf->bus_address | dev_priv->dma_access | MGA_SRCMAP_SYSMEM;
728 u32 y2;
729 DMA_LOCALS;
730 DRM_DEBUG("buf=%d used=%d\n", buf->idx, buf->used);
732 y2 = length / 64;
734 BEGIN_DMA(5);
736 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
737 MGA_DMAPAD, 0x00000000,
738 MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
740 DMA_BLOCK(MGA_DSTORG, dstorg,
741 MGA_MACCESS, 0x00000000, MGA_SRCORG, srcorg, MGA_AR5, 64);
743 DMA_BLOCK(MGA_PITCH, 64,
744 MGA_PLNWT, 0xffffffff,
745 MGA_DMAPAD, 0x00000000, MGA_DWGCTL, MGA_DWGCTL_COPY);
747 DMA_BLOCK(MGA_AR0, 63,
748 MGA_AR3, 0,
749 MGA_FXBNDRY, (63 << 16) | 0, MGA_YDSTLEN + MGA_EXEC, y2);
751 DMA_BLOCK(MGA_PLNWT, ctx->plnwt,
752 MGA_SRCORG, dev_priv->front_offset,
753 MGA_PITCH, dev_priv->front_pitch, MGA_DWGSYNC, 0x00007000);
755 ADVANCE_DMA();
757 AGE_BUFFER(buf_priv);
759 buf->pending = 0;
760 buf->used = 0;
761 buf_priv->dispatched = 0;
763 mga_freelist_put(dev, buf);
765 FLUSH_DMA();
768 static void mga_dma_dispatch_blit(struct drm_device *dev, drm_mga_blit_t *blit)
770 drm_mga_private_t *dev_priv = dev->dev_private;
771 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
772 drm_mga_context_regs_t *ctx = &sarea_priv->context_state;
773 struct drm_clip_rect *pbox = sarea_priv->boxes;
774 int nbox = sarea_priv->nbox;
775 u32 scandir = 0, i;
776 DMA_LOCALS;
777 DRM_DEBUG("\n");
779 BEGIN_DMA(4 + nbox);
781 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
782 MGA_DMAPAD, 0x00000000,
783 MGA_DWGSYNC, 0x00007100, MGA_DWGSYNC, 0x00007000);
785 DMA_BLOCK(MGA_DWGCTL, MGA_DWGCTL_COPY,
786 MGA_PLNWT, blit->planemask,
787 MGA_SRCORG, blit->srcorg, MGA_DSTORG, blit->dstorg);
789 DMA_BLOCK(MGA_SGN, scandir,
790 MGA_MACCESS, dev_priv->maccess,
791 MGA_AR5, blit->ydir * blit->src_pitch,
792 MGA_PITCH, blit->dst_pitch);
794 for (i = 0; i < nbox; i++) {
795 int srcx = pbox[i].x1 + blit->delta_sx;
796 int srcy = pbox[i].y1 + blit->delta_sy;
797 int dstx = pbox[i].x1 + blit->delta_dx;
798 int dsty = pbox[i].y1 + blit->delta_dy;
799 int h = pbox[i].y2 - pbox[i].y1;
800 int w = pbox[i].x2 - pbox[i].x1 - 1;
801 int start;
803 if (blit->ydir == -1)
804 srcy = blit->height - srcy - 1;
806 start = srcy * blit->src_pitch + srcx;
808 DMA_BLOCK(MGA_AR0, start + w,
809 MGA_AR3, start,
810 MGA_FXBNDRY, ((dstx + w) << 16) | (dstx & 0xffff),
811 MGA_YDSTLEN + MGA_EXEC, (dsty << 16) | h);
814 /* Do something to flush AGP?
817 /* Force reset of DWGCTL */
818 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
819 MGA_PLNWT, ctx->plnwt,
820 MGA_PITCH, dev_priv->front_pitch, MGA_DWGCTL, ctx->dwgctl);
822 ADVANCE_DMA();
825 /* ================================================================
829 static int mga_dma_clear(struct drm_device *dev, void *data, struct drm_file *file_priv)
831 drm_mga_private_t *dev_priv = dev->dev_private;
832 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
833 drm_mga_clear_t *clear = data;
835 LOCK_TEST_WITH_RETURN(dev, file_priv);
837 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
838 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
840 WRAP_TEST_WITH_RETURN(dev_priv);
842 mga_dma_dispatch_clear(dev, clear);
844 /* Make sure we restore the 3D state next time.
846 dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
848 return 0;
851 static int mga_dma_swap(struct drm_device *dev, void *data, struct drm_file *file_priv)
853 drm_mga_private_t *dev_priv = dev->dev_private;
854 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
856 LOCK_TEST_WITH_RETURN(dev, file_priv);
858 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
859 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
861 WRAP_TEST_WITH_RETURN(dev_priv);
863 mga_dma_dispatch_swap(dev);
865 /* Make sure we restore the 3D state next time.
867 dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
869 return 0;
872 static int mga_dma_vertex(struct drm_device *dev, void *data, struct drm_file *file_priv)
874 drm_mga_private_t *dev_priv = dev->dev_private;
875 struct drm_device_dma *dma = dev->dma;
876 struct drm_buf *buf;
877 drm_mga_buf_priv_t *buf_priv;
878 drm_mga_vertex_t *vertex = data;
880 LOCK_TEST_WITH_RETURN(dev, file_priv);
882 if (vertex->idx < 0 || vertex->idx > dma->buf_count)
883 return -EINVAL;
884 buf = dma->buflist[vertex->idx];
885 buf_priv = buf->dev_private;
887 buf->used = vertex->used;
888 buf_priv->discard = vertex->discard;
890 if (!mga_verify_state(dev_priv)) {
891 if (vertex->discard) {
892 if (buf_priv->dispatched == 1)
893 AGE_BUFFER(buf_priv);
894 buf_priv->dispatched = 0;
895 mga_freelist_put(dev, buf);
897 return -EINVAL;
900 WRAP_TEST_WITH_RETURN(dev_priv);
902 mga_dma_dispatch_vertex(dev, buf);
904 return 0;
907 static int mga_dma_indices(struct drm_device *dev, void *data, struct drm_file *file_priv)
909 drm_mga_private_t *dev_priv = dev->dev_private;
910 struct drm_device_dma *dma = dev->dma;
911 struct drm_buf *buf;
912 drm_mga_buf_priv_t *buf_priv;
913 drm_mga_indices_t *indices = data;
915 LOCK_TEST_WITH_RETURN(dev, file_priv);
917 if (indices->idx < 0 || indices->idx > dma->buf_count)
918 return -EINVAL;
920 buf = dma->buflist[indices->idx];
921 buf_priv = buf->dev_private;
923 buf_priv->discard = indices->discard;
925 if (!mga_verify_state(dev_priv)) {
926 if (indices->discard) {
927 if (buf_priv->dispatched == 1)
928 AGE_BUFFER(buf_priv);
929 buf_priv->dispatched = 0;
930 mga_freelist_put(dev, buf);
932 return -EINVAL;
935 WRAP_TEST_WITH_RETURN(dev_priv);
937 mga_dma_dispatch_indices(dev, buf, indices->start, indices->end);
939 return 0;
942 static int mga_dma_iload(struct drm_device *dev, void *data, struct drm_file *file_priv)
944 struct drm_device_dma *dma = dev->dma;
945 drm_mga_private_t *dev_priv = dev->dev_private;
946 struct drm_buf *buf;
947 drm_mga_buf_priv_t *buf_priv;
948 drm_mga_iload_t *iload = data;
949 DRM_DEBUG("\n");
951 LOCK_TEST_WITH_RETURN(dev, file_priv);
953 #if 0
954 if (mga_do_wait_for_idle(dev_priv) < 0) {
955 if (MGA_DMA_DEBUG)
956 DRM_INFO("-EBUSY\n");
957 return -EBUSY;
959 #endif
960 if (iload->idx < 0 || iload->idx > dma->buf_count)
961 return -EINVAL;
963 buf = dma->buflist[iload->idx];
964 buf_priv = buf->dev_private;
966 if (mga_verify_iload(dev_priv, iload->dstorg, iload->length)) {
967 mga_freelist_put(dev, buf);
968 return -EINVAL;
971 WRAP_TEST_WITH_RETURN(dev_priv);
973 mga_dma_dispatch_iload(dev, buf, iload->dstorg, iload->length);
975 /* Make sure we restore the 3D state next time.
977 dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
979 return 0;
982 static int mga_dma_blit(struct drm_device *dev, void *data, struct drm_file *file_priv)
984 drm_mga_private_t *dev_priv = dev->dev_private;
985 drm_mga_sarea_t *sarea_priv = dev_priv->sarea_priv;
986 drm_mga_blit_t *blit = data;
987 DRM_DEBUG("\n");
989 LOCK_TEST_WITH_RETURN(dev, file_priv);
991 if (sarea_priv->nbox > MGA_NR_SAREA_CLIPRECTS)
992 sarea_priv->nbox = MGA_NR_SAREA_CLIPRECTS;
994 if (mga_verify_blit(dev_priv, blit->srcorg, blit->dstorg))
995 return -EINVAL;
997 WRAP_TEST_WITH_RETURN(dev_priv);
999 mga_dma_dispatch_blit(dev, blit);
1001 /* Make sure we restore the 3D state next time.
1003 dev_priv->sarea_priv->dirty |= MGA_UPLOAD_CONTEXT;
1005 return 0;
1008 static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *file_priv)
1010 drm_mga_private_t *dev_priv = dev->dev_private;
1011 drm_mga_getparam_t *param = data;
1012 int value;
1014 if (!dev_priv) {
1015 DRM_ERROR("called with no initialization\n");
1016 return -EINVAL;
1019 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1021 switch (param->param) {
1022 case MGA_PARAM_IRQ_NR:
1023 value = drm_dev_to_irq(dev);
1024 break;
1025 case MGA_PARAM_CARD_TYPE:
1026 value = dev_priv->chipset;
1027 break;
1028 default:
1029 return -EINVAL;
1032 if (copy_to_user(param->value, &value, sizeof(int))) {
1033 DRM_ERROR("copy_to_user\n");
1034 return -EFAULT;
1037 return 0;
1040 static int mga_set_fence(struct drm_device *dev, void *data, struct drm_file *file_priv)
1042 drm_mga_private_t *dev_priv = dev->dev_private;
1043 u32 *fence = data;
1044 DMA_LOCALS;
1046 if (!dev_priv) {
1047 DRM_ERROR("called with no initialization\n");
1048 return -EINVAL;
1051 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1053 /* I would normal do this assignment in the declaration of fence,
1054 * but dev_priv may be NULL.
1057 *fence = dev_priv->next_fence_to_post;
1058 dev_priv->next_fence_to_post++;
1060 BEGIN_DMA(1);
1061 DMA_BLOCK(MGA_DMAPAD, 0x00000000,
1062 MGA_DMAPAD, 0x00000000,
1063 MGA_DMAPAD, 0x00000000, MGA_SOFTRAP, 0x00000000);
1064 ADVANCE_DMA();
1066 return 0;
1069 static int mga_wait_fence(struct drm_device *dev, void *data, struct drm_file *
1070 file_priv)
1072 drm_mga_private_t *dev_priv = dev->dev_private;
1073 u32 *fence = data;
1075 if (!dev_priv) {
1076 DRM_ERROR("called with no initialization\n");
1077 return -EINVAL;
1080 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
1082 mga_driver_fence_wait(dev, fence);
1083 return 0;
1086 const struct drm_ioctl_desc mga_ioctls[] = {
1087 DRM_IOCTL_DEF_DRV(MGA_INIT, mga_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1088 DRM_IOCTL_DEF_DRV(MGA_FLUSH, mga_dma_flush, DRM_AUTH),
1089 DRM_IOCTL_DEF_DRV(MGA_RESET, mga_dma_reset, DRM_AUTH),
1090 DRM_IOCTL_DEF_DRV(MGA_SWAP, mga_dma_swap, DRM_AUTH),
1091 DRM_IOCTL_DEF_DRV(MGA_CLEAR, mga_dma_clear, DRM_AUTH),
1092 DRM_IOCTL_DEF_DRV(MGA_VERTEX, mga_dma_vertex, DRM_AUTH),
1093 DRM_IOCTL_DEF_DRV(MGA_INDICES, mga_dma_indices, DRM_AUTH),
1094 DRM_IOCTL_DEF_DRV(MGA_ILOAD, mga_dma_iload, DRM_AUTH),
1095 DRM_IOCTL_DEF_DRV(MGA_BLIT, mga_dma_blit, DRM_AUTH),
1096 DRM_IOCTL_DEF_DRV(MGA_GETPARAM, mga_getparam, DRM_AUTH),
1097 DRM_IOCTL_DEF_DRV(MGA_SET_FENCE, mga_set_fence, DRM_AUTH),
1098 DRM_IOCTL_DEF_DRV(MGA_WAIT_FENCE, mga_wait_fence, DRM_AUTH),
1099 DRM_IOCTL_DEF_DRV(MGA_DMA_BOOTSTRAP, mga_dma_bootstrap, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
1102 int mga_max_ioctl = DRM_ARRAY_SIZE(mga_ioctls);