1 /* gamma_dma.c -- DMA support for GMX 2000 -*- linux-c -*-
2 * Created: Fri Mar 19 14:30:16 1999 by faith@precisioninsight.com
4 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
5 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 * Permission is hereby granted, free of charge, to any person obtaining a
9 * copy of this software and associated documentation files (the "Software"),
10 * to deal in the Software without restriction, including without limitation
11 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
12 * and/or sell copies of the Software, and to permit persons to whom the
13 * Software is furnished to do so, subject to the following conditions:
15 * The above copyright notice and this permission notice (including the next
16 * paragraph) shall be included in all copies or substantial portions of the
19 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
20 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
21 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
22 * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
23 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
24 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
25 * DEALINGS IN THE SOFTWARE.
28 * Rickard E. (Rik) Faith <faith@valinux.com>
35 #include "gamma_drm.h"
36 #include "gamma_drv.h"
38 #include <linux/interrupt.h> /* For task queue support */
39 #include <linux/delay.h>
41 static inline void gamma_dma_dispatch(drm_device_t
*dev
, unsigned long address
,
44 drm_gamma_private_t
*dev_priv
=
45 (drm_gamma_private_t
*)dev
->dev_private
;
47 while ( GAMMA_READ(GAMMA_INFIFOSPACE
) < 2)
50 GAMMA_WRITE(GAMMA_DMAADDRESS
, address
);
52 while (GAMMA_READ(GAMMA_GCOMMANDSTATUS
) != 4)
55 GAMMA_WRITE(GAMMA_DMACOUNT
, length
/ 4);
58 void gamma_dma_quiescent_single(drm_device_t
*dev
)
60 drm_gamma_private_t
*dev_priv
=
61 (drm_gamma_private_t
*)dev
->dev_private
;
62 while (GAMMA_READ(GAMMA_DMACOUNT
))
65 while (GAMMA_READ(GAMMA_INFIFOSPACE
) < 2)
68 GAMMA_WRITE(GAMMA_FILTERMODE
, 1 << 10);
69 GAMMA_WRITE(GAMMA_SYNC
, 0);
72 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS
))
74 } while (GAMMA_READ(GAMMA_OUTPUTFIFO
) != GAMMA_SYNC_TAG
);
77 void gamma_dma_quiescent_dual(drm_device_t
*dev
)
79 drm_gamma_private_t
*dev_priv
=
80 (drm_gamma_private_t
*)dev
->dev_private
;
81 while (GAMMA_READ(GAMMA_DMACOUNT
))
84 while (GAMMA_READ(GAMMA_INFIFOSPACE
) < 3)
87 GAMMA_WRITE(GAMMA_BROADCASTMASK
, 3);
88 GAMMA_WRITE(GAMMA_FILTERMODE
, 1 << 10);
89 GAMMA_WRITE(GAMMA_SYNC
, 0);
91 /* Read from first MX */
93 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS
))
95 } while (GAMMA_READ(GAMMA_OUTPUTFIFO
) != GAMMA_SYNC_TAG
);
97 /* Read from second MX */
99 while (!GAMMA_READ(GAMMA_OUTFIFOWORDS
+ 0x10000))
101 } while (GAMMA_READ(GAMMA_OUTPUTFIFO
+ 0x10000) != GAMMA_SYNC_TAG
);
104 void gamma_dma_ready(drm_device_t
*dev
)
106 drm_gamma_private_t
*dev_priv
=
107 (drm_gamma_private_t
*)dev
->dev_private
;
108 while (GAMMA_READ(GAMMA_DMACOUNT
))
112 static inline int gamma_dma_is_ready(drm_device_t
*dev
)
114 drm_gamma_private_t
*dev_priv
=
115 (drm_gamma_private_t
*)dev
->dev_private
;
116 return (!GAMMA_READ(GAMMA_DMACOUNT
));
119 irqreturn_t
gamma_driver_irq_handler( DRM_IRQ_ARGS
)
121 drm_device_t
*dev
= (drm_device_t
*)arg
;
122 drm_device_dma_t
*dma
= dev
->dma
;
123 drm_gamma_private_t
*dev_priv
=
124 (drm_gamma_private_t
*)dev
->dev_private
;
126 /* FIXME: should check whether we're actually interested in the interrupt? */
127 atomic_inc(&dev
->counts
[6]); /* _DRM_STAT_IRQ */
129 while (GAMMA_READ(GAMMA_INFIFOSPACE
) < 3)
132 GAMMA_WRITE(GAMMA_GDELAYTIMER
, 0xc350/2); /* 0x05S */
133 GAMMA_WRITE(GAMMA_GCOMMANDINTFLAGS
, 8);
134 GAMMA_WRITE(GAMMA_GINTFLAGS
, 0x2001);
135 if (gamma_dma_is_ready(dev
)) {
136 /* Free previous buffer */
137 if (test_and_set_bit(0, &dev
->dma_flag
))
139 if (dma
->this_buffer
) {
140 gamma_free_buffer(dev
, dma
->this_buffer
);
141 dma
->this_buffer
= NULL
;
143 clear_bit(0, &dev
->dma_flag
);
145 /* Dispatch new buffer */
146 schedule_work(&dev
->work
);
151 /* Only called by gamma_dma_schedule. */
152 static int gamma_do_dma(drm_device_t
*dev
, int locked
)
154 unsigned long address
;
155 unsigned long length
;
158 drm_device_dma_t
*dma
= dev
->dma
;
160 if (test_and_set_bit(0, &dev
->dma_flag
)) return -EBUSY
;
163 if (!dma
->next_buffer
) {
164 DRM_ERROR("No next_buffer\n");
165 clear_bit(0, &dev
->dma_flag
);
169 buf
= dma
->next_buffer
;
170 /* WE NOW ARE ON LOGICAL PAGES!! - using page table setup in dma_init */
171 /* So we pass the buffer index value into the physical page offset */
172 address
= buf
->idx
<< 12;
175 DRM_DEBUG("context %d, buffer %d (%ld bytes)\n",
176 buf
->context
, buf
->idx
, length
);
178 if (buf
->list
== DRM_LIST_RECLAIM
) {
179 gamma_clear_next_buffer(dev
);
180 gamma_free_buffer(dev
, buf
);
181 clear_bit(0, &dev
->dma_flag
);
186 DRM_ERROR("0 length buffer\n");
187 gamma_clear_next_buffer(dev
);
188 gamma_free_buffer(dev
, buf
);
189 clear_bit(0, &dev
->dma_flag
);
193 if (!gamma_dma_is_ready(dev
)) {
194 clear_bit(0, &dev
->dma_flag
);
198 if (buf
->while_locked
) {
199 if (!_DRM_LOCK_IS_HELD(dev
->lock
.hw_lock
->lock
)) {
200 DRM_ERROR("Dispatching buffer %d from pid %d"
201 " \"while locked\", but no lock held\n",
202 buf
->idx
, current
->pid
);
205 if (!locked
&& !gamma_lock_take(&dev
->lock
.hw_lock
->lock
,
206 DRM_KERNEL_CONTEXT
)) {
207 clear_bit(0, &dev
->dma_flag
);
212 if (dev
->last_context
!= buf
->context
213 && !(dev
->queuelist
[buf
->context
]->flags
214 & _DRM_CONTEXT_PRESERVED
)) {
215 /* PRE: dev->last_context != buf->context */
216 if (DRM(context_switch
)(dev
, dev
->last_context
,
218 DRM(clear_next_buffer
)(dev
);
219 DRM(free_buffer
)(dev
, buf
);
224 /* POST: we will wait for the context
225 switch and will dispatch on a later call
226 when dev->last_context == buf->context.
227 NOTE WE HOLD THE LOCK THROUGHOUT THIS
231 gamma_clear_next_buffer(dev
);
234 buf
->list
= DRM_LIST_PEND
;
236 /* WE NOW ARE ON LOGICAL PAGES!!! - overriding address */
237 address
= buf
->idx
<< 12;
239 gamma_dma_dispatch(dev
, address
, length
);
240 gamma_free_buffer(dev
, dma
->this_buffer
);
241 dma
->this_buffer
= buf
;
243 atomic_inc(&dev
->counts
[7]); /* _DRM_STAT_DMA */
244 atomic_add(length
, &dev
->counts
[8]); /* _DRM_STAT_PRIMARY */
246 if (!buf
->while_locked
&& !dev
->context_flag
&& !locked
) {
247 if (gamma_lock_free(dev
, &dev
->lock
.hw_lock
->lock
,
248 DRM_KERNEL_CONTEXT
)) {
254 clear_bit(0, &dev
->dma_flag
);
260 static void gamma_dma_timer_bh(unsigned long dev
)
262 gamma_dma_schedule((drm_device_t
*)dev
, 0);
265 void gamma_irq_immediate_bh(void *dev
)
267 gamma_dma_schedule(dev
, 0);
270 int gamma_dma_schedule(drm_device_t
*dev
, int locked
)
279 drm_device_dma_t
*dma
= dev
->dma
;
281 if (test_and_set_bit(0, &dev
->interrupt_flag
)) {
283 atomic_inc(&dev
->counts
[10]); /* _DRM_STAT_MISSED */
286 missed
= atomic_read(&dev
->counts
[10]);
290 if (dev
->context_flag
) {
291 clear_bit(0, &dev
->interrupt_flag
);
294 if (dma
->next_buffer
) {
295 /* Unsent buffer that was previously
296 selected, but that couldn't be sent
297 because the lock could not be obtained
298 or the DMA engine wasn't ready. Try
300 if (!(retcode
= gamma_do_dma(dev
, locked
))) ++processed
;
303 next
= gamma_select_queue(dev
, gamma_dma_timer_bh
);
305 q
= dev
->queuelist
[next
];
306 buf
= gamma_waitlist_get(&q
->waitlist
);
307 dma
->next_buffer
= buf
;
309 if (buf
&& buf
->list
== DRM_LIST_RECLAIM
) {
310 gamma_clear_next_buffer(dev
);
311 gamma_free_buffer(dev
, buf
);
314 } while (next
>= 0 && !dma
->next_buffer
);
315 if (dma
->next_buffer
) {
316 if (!(retcode
= gamma_do_dma(dev
, locked
))) {
323 if (missed
!= atomic_read(&dev
->counts
[10])) {
324 if (gamma_dma_is_ready(dev
)) goto again
;
326 if (processed
&& gamma_dma_is_ready(dev
)) {
332 clear_bit(0, &dev
->interrupt_flag
);
337 static int gamma_dma_priority(struct file
*filp
,
338 drm_device_t
*dev
, drm_dma_t
*d
)
340 unsigned long address
;
341 unsigned long length
;
347 drm_buf_t
*last_buf
= NULL
;
348 drm_device_dma_t
*dma
= dev
->dma
;
349 int *send_indices
= NULL
;
350 int *send_sizes
= NULL
;
352 DECLARE_WAITQUEUE(entry
, current
);
354 /* Turn off interrupt handling */
355 while (test_and_set_bit(0, &dev
->interrupt_flag
)) {
357 if (signal_pending(current
)) return -EINTR
;
359 if (!(d
->flags
& _DRM_DMA_WHILE_LOCKED
)) {
360 while (!gamma_lock_take(&dev
->lock
.hw_lock
->lock
,
361 DRM_KERNEL_CONTEXT
)) {
363 if (signal_pending(current
)) {
364 clear_bit(0, &dev
->interrupt_flag
);
371 send_indices
= DRM(alloc
)(d
->send_count
* sizeof(*send_indices
),
373 if (send_indices
== NULL
)
375 if (copy_from_user(send_indices
, d
->send_indices
,
376 d
->send_count
* sizeof(*send_indices
))) {
381 send_sizes
= DRM(alloc
)(d
->send_count
* sizeof(*send_sizes
),
383 if (send_sizes
== NULL
)
385 if (copy_from_user(send_sizes
, d
->send_sizes
,
386 d
->send_count
* sizeof(*send_sizes
))) {
391 for (i
= 0; i
< d
->send_count
; i
++) {
392 idx
= send_indices
[i
];
393 if (idx
< 0 || idx
>= dma
->buf_count
) {
394 DRM_ERROR("Index %d (of %d max)\n",
395 send_indices
[i
], dma
->buf_count
- 1);
398 buf
= dma
->buflist
[ idx
];
399 if (buf
->filp
!= filp
) {
400 DRM_ERROR("Process %d using buffer not owned\n",
405 if (buf
->list
!= DRM_LIST_NONE
) {
406 DRM_ERROR("Process %d using buffer on list %d\n",
407 current
->pid
, buf
->list
);
411 /* This isn't a race condition on
412 buf->list, since our concern is the
413 buffer reclaim during the time the
414 process closes the /dev/drm? handle, so
415 it can't also be doing DMA. */
416 buf
->list
= DRM_LIST_PRIO
;
417 buf
->used
= send_sizes
[i
];
418 buf
->context
= d
->context
;
419 buf
->while_locked
= d
->flags
& _DRM_DMA_WHILE_LOCKED
;
420 address
= (unsigned long)buf
->address
;
423 DRM_ERROR("0 length buffer\n");
426 DRM_ERROR("Sending pending buffer:"
427 " buffer %d, offset %d\n",
433 DRM_ERROR("Sending waiting buffer:"
434 " buffer %d, offset %d\n",
441 if (dev
->last_context
!= buf
->context
442 && !(dev
->queuelist
[buf
->context
]->flags
443 & _DRM_CONTEXT_PRESERVED
)) {
444 add_wait_queue(&dev
->context_wait
, &entry
);
445 current
->state
= TASK_INTERRUPTIBLE
;
446 /* PRE: dev->last_context != buf->context */
447 DRM(context_switch
)(dev
, dev
->last_context
,
449 /* POST: we will wait for the context
450 switch and will dispatch on a later call
451 when dev->last_context == buf->context.
452 NOTE WE HOLD THE LOCK THROUGHOUT THIS
455 current
->state
= TASK_RUNNING
;
456 remove_wait_queue(&dev
->context_wait
, &entry
);
457 if (signal_pending(current
)) {
461 if (dev
->last_context
!= buf
->context
) {
462 DRM_ERROR("Context mismatch: %d %d\n",
468 gamma_dma_dispatch(dev
, address
, length
);
469 atomic_inc(&dev
->counts
[9]); /* _DRM_STAT_SPECIAL */
470 atomic_add(length
, &dev
->counts
[8]); /* _DRM_STAT_PRIMARY */
473 gamma_free_buffer(dev
, last_buf
);
481 gamma_dma_ready(dev
);
482 gamma_free_buffer(dev
, last_buf
);
485 DRM(free
)(send_indices
, d
->send_count
* sizeof(*send_indices
),
488 DRM(free
)(send_sizes
, d
->send_count
* sizeof(*send_sizes
),
491 if (must_free
&& !dev
->context_flag
) {
492 if (gamma_lock_free(dev
, &dev
->lock
.hw_lock
->lock
,
493 DRM_KERNEL_CONTEXT
)) {
497 clear_bit(0, &dev
->interrupt_flag
);
501 static int gamma_dma_send_buffers(struct file
*filp
,
502 drm_device_t
*dev
, drm_dma_t
*d
)
504 DECLARE_WAITQUEUE(entry
, current
);
505 drm_buf_t
*last_buf
= NULL
;
507 drm_device_dma_t
*dma
= dev
->dma
;
510 if (get_user(send_index
, &d
->send_indices
[d
->send_count
-1]))
513 if (d
->flags
& _DRM_DMA_BLOCK
) {
514 last_buf
= dma
->buflist
[send_index
];
515 add_wait_queue(&last_buf
->dma_wait
, &entry
);
518 if ((retcode
= gamma_dma_enqueue(filp
, d
))) {
519 if (d
->flags
& _DRM_DMA_BLOCK
)
520 remove_wait_queue(&last_buf
->dma_wait
, &entry
);
524 gamma_dma_schedule(dev
, 0);
526 if (d
->flags
& _DRM_DMA_BLOCK
) {
527 DRM_DEBUG("%d waiting\n", current
->pid
);
529 current
->state
= TASK_INTERRUPTIBLE
;
530 if (!last_buf
->waiting
&& !last_buf
->pending
)
531 break; /* finished */
533 if (signal_pending(current
)) {
534 retcode
= -EINTR
; /* Can't restart */
538 current
->state
= TASK_RUNNING
;
539 DRM_DEBUG("%d running\n", current
->pid
);
540 remove_wait_queue(&last_buf
->dma_wait
, &entry
);
542 || (last_buf
->list
==DRM_LIST_PEND
&& !last_buf
->pending
)) {
543 if (!waitqueue_active(&last_buf
->dma_wait
)) {
544 gamma_free_buffer(dev
, last_buf
);
548 DRM_ERROR("ctx%d w%d p%d c%ld i%d l%d pid:%d\n",
552 (long)DRM_WAITCOUNT(dev
, d
->context
),
561 int gamma_dma(struct inode
*inode
, struct file
*filp
, unsigned int cmd
,
564 drm_file_t
*priv
= filp
->private_data
;
565 drm_device_t
*dev
= priv
->dev
;
566 drm_device_dma_t
*dma
= dev
->dma
;
568 drm_dma_t __user
*argp
= (void __user
*)arg
;
571 if (copy_from_user(&d
, argp
, sizeof(d
)))
574 if (d
.send_count
< 0 || d
.send_count
> dma
->buf_count
) {
575 DRM_ERROR("Process %d trying to send %d buffers (of %d max)\n",
576 current
->pid
, d
.send_count
, dma
->buf_count
);
580 if (d
.request_count
< 0 || d
.request_count
> dma
->buf_count
) {
581 DRM_ERROR("Process %d trying to get %d buffers (of %d max)\n",
582 current
->pid
, d
.request_count
, dma
->buf_count
);
587 if (d
.flags
& _DRM_DMA_PRIORITY
)
588 retcode
= gamma_dma_priority(filp
, dev
, &d
);
590 retcode
= gamma_dma_send_buffers(filp
, dev
, &d
);
595 if (!retcode
&& d
.request_count
) {
596 retcode
= gamma_dma_get_buffers(filp
, &d
);
599 DRM_DEBUG("%d returning, granted = %d\n",
600 current
->pid
, d
.granted_count
);
601 if (copy_to_user(argp
, &d
, sizeof(d
)))
607 /* =============================================================
608 * DMA initialization, cleanup
611 static int gamma_do_init_dma( drm_device_t
*dev
, drm_gamma_init_t
*init
)
613 drm_gamma_private_t
*dev_priv
;
614 drm_device_dma_t
*dma
= dev
->dma
;
617 struct list_head
*list
;
620 DRM_DEBUG( "%s\n", __FUNCTION__
);
622 dev_priv
= DRM(alloc
)( sizeof(drm_gamma_private_t
),
627 dev
->dev_private
= (void *)dev_priv
;
629 memset( dev_priv
, 0, sizeof(drm_gamma_private_t
) );
631 dev_priv
->num_rast
= init
->num_rast
;
633 list_for_each(list
, &dev
->maplist
->head
) {
634 drm_map_list_t
*r_list
= list_entry(list
, drm_map_list_t
, head
);
636 r_list
->map
->type
== _DRM_SHM
&&
637 r_list
->map
->flags
& _DRM_CONTAINS_LOCK
) {
638 dev_priv
->sarea
= r_list
->map
;
643 dev_priv
->mmio0
= drm_core_findmap(dev
, init
->mmio0
);
644 dev_priv
->mmio1
= drm_core_findmap(dev
, init
->mmio1
);
645 dev_priv
->mmio2
= drm_core_findmap(dev
, init
->mmio2
);
646 dev_priv
->mmio3
= drm_core_findmap(dev
, init
->mmio3
);
648 dev_priv
->sarea_priv
= (drm_gamma_sarea_t
*)
649 ((u8
*)dev_priv
->sarea
->handle
+
650 init
->sarea_priv_offset
);
653 buf
= dma
->buflist
[GLINT_DRI_BUF_COUNT
];
656 for (i
= 0; i
< GLINT_DRI_BUF_COUNT
; i
++) {
657 buf
= dma
->buflist
[i
];
658 *pgt
= virt_to_phys((void*)buf
->address
) | 0x07;
662 buf
= dma
->buflist
[GLINT_DRI_BUF_COUNT
];
664 dev
->agp_buffer_map
= drm_core_findmap(dev
, init
->buffers_offset
);
665 drm_core_ioremap( dev
->agp_buffer_map
, dev
);
667 buf
= dma
->buflist
[GLINT_DRI_BUF_COUNT
];
670 for (i
= 0; i
< GLINT_DRI_BUF_COUNT
; i
++) {
671 buf
= dma
->buflist
[i
];
672 *pgt
= (unsigned long)buf
->address
+ 0x07;
676 buf
= dma
->buflist
[GLINT_DRI_BUF_COUNT
];
678 while (GAMMA_READ(GAMMA_INFIFOSPACE
) < 1);
679 GAMMA_WRITE( GAMMA_GDMACONTROL
, 0xe);
681 while (GAMMA_READ(GAMMA_INFIFOSPACE
) < 2);
682 GAMMA_WRITE( GAMMA_PAGETABLEADDR
, virt_to_phys((void*)buf
->address
) );
683 GAMMA_WRITE( GAMMA_PAGETABLELENGTH
, 2 );
688 int gamma_do_cleanup_dma( drm_device_t
*dev
)
690 DRM_DEBUG( "%s\n", __FUNCTION__
);
692 /* Make sure interrupts are disabled here because the uninstall ioctl
693 * may not have been called from userspace and after dev_private
694 * is freed, it's too late.
696 if (drm_core_check_feature(dev
, DRIVER_HAVE_IRQ
))
697 if ( dev
->irq_enabled
)
698 DRM(irq_uninstall
)(dev
);
700 if ( dev
->dev_private
) {
702 if ( dev
->agp_buffer_map
!= NULL
)
703 drm_core_ioremapfree( dev
->agp_buffer_map
, dev
);
705 DRM(free
)( dev
->dev_private
, sizeof(drm_gamma_private_t
),
707 dev
->dev_private
= NULL
;
713 int gamma_dma_init( struct inode
*inode
, struct file
*filp
,
714 unsigned int cmd
, unsigned long arg
)
716 drm_file_t
*priv
= filp
->private_data
;
717 drm_device_t
*dev
= priv
->dev
;
718 drm_gamma_init_t init
;
720 LOCK_TEST_WITH_RETURN( dev
, filp
);
722 if ( copy_from_user( &init
, (drm_gamma_init_t __user
*)arg
, sizeof(init
) ) )
725 switch ( init
.func
) {
727 return gamma_do_init_dma( dev
, &init
);
728 case GAMMA_CLEANUP_DMA
:
729 return gamma_do_cleanup_dma( dev
);
735 static int gamma_do_copy_dma( drm_device_t
*dev
, drm_gamma_copy_t
*copy
)
737 drm_device_dma_t
*dma
= dev
->dma
;
738 unsigned int *screenbuf
;
740 DRM_DEBUG( "%s\n", __FUNCTION__
);
742 /* We've DRM_RESTRICTED this DMA buffer */
744 screenbuf
= dma
->buflist
[ GLINT_DRI_BUF_COUNT
+ 1 ]->address
;
747 *buffer
++ = 0x180; /* Tag (FilterMode) */
748 *buffer
++ = 0x200; /* Allow FBColor through */
749 *buffer
++ = 0x53B; /* Tag */
750 *buffer
++ = copy
->Pitch
;
751 *buffer
++ = 0x53A; /* Tag */
752 *buffer
++ = copy
->SrcAddress
;
753 *buffer
++ = 0x539; /* Tag */
754 *buffer
++ = copy
->WidthHeight
; /* Initiates transfer */
755 *buffer
++ = 0x53C; /* Tag - DMAOutputAddress */
756 *buffer
++ = virt_to_phys((void*)screenbuf
);
757 *buffer
++ = 0x53D; /* Tag - DMAOutputCount */
758 *buffer
++ = copy
->Count
; /* Reads HostOutFifo BLOCKS until ..*/
760 /* Data now sitting in dma->buflist[ GLINT_DRI_BUF_COUNT + 1 ] */
761 /* Now put it back to the screen */
763 *buffer
++ = 0x180; /* Tag (FilterMode) */
764 *buffer
++ = 0x400; /* Allow Sync through */
765 *buffer
++ = 0x538; /* Tag - DMARectangleReadTarget */
766 *buffer
++ = 0x155; /* FBSourceData | count */
767 *buffer
++ = 0x537; /* Tag */
768 *buffer
++ = copy
->Pitch
;
769 *buffer
++ = 0x536; /* Tag */
770 *buffer
++ = copy
->DstAddress
;
771 *buffer
++ = 0x535; /* Tag */
772 *buffer
++ = copy
->WidthHeight
; /* Initiates transfer */
773 *buffer
++ = 0x530; /* Tag - DMAAddr */
774 *buffer
++ = virt_to_phys((void*)screenbuf
);
776 *buffer
++ = copy
->Count
; /* initiates DMA transfer of color data */
779 /* need to dispatch it now */
784 int gamma_dma_copy( struct inode
*inode
, struct file
*filp
,
785 unsigned int cmd
, unsigned long arg
)
787 drm_file_t
*priv
= filp
->private_data
;
788 drm_device_t
*dev
= priv
->dev
;
789 drm_gamma_copy_t copy
;
791 if ( copy_from_user( ©
, (drm_gamma_copy_t __user
*)arg
, sizeof(copy
) ) )
794 return gamma_do_copy_dma( dev
, ©
);
797 /* =============================================================
798 * Per Context SAREA Support
801 int gamma_getsareactx(struct inode
*inode
, struct file
*filp
,
802 unsigned int cmd
, unsigned long arg
)
804 drm_file_t
*priv
= filp
->private_data
;
805 drm_device_t
*dev
= priv
->dev
;
806 drm_ctx_priv_map_t __user
*argp
= (void __user
*)arg
;
807 drm_ctx_priv_map_t request
;
810 if (copy_from_user(&request
, argp
, sizeof(request
)))
813 down(&dev
->struct_sem
);
814 if ((int)request
.ctx_id
>= dev
->max_context
) {
815 up(&dev
->struct_sem
);
819 map
= dev
->context_sareas
[request
.ctx_id
];
820 up(&dev
->struct_sem
);
822 request
.handle
= map
->handle
;
823 if (copy_to_user(argp
, &request
, sizeof(request
)))
828 int gamma_setsareactx(struct inode
*inode
, struct file
*filp
,
829 unsigned int cmd
, unsigned long arg
)
831 drm_file_t
*priv
= filp
->private_data
;
832 drm_device_t
*dev
= priv
->dev
;
833 drm_ctx_priv_map_t request
;
834 drm_map_t
*map
= NULL
;
835 drm_map_list_t
*r_list
;
836 struct list_head
*list
;
838 if (copy_from_user(&request
,
839 (drm_ctx_priv_map_t __user
*)arg
,
843 down(&dev
->struct_sem
);
845 list_for_each(list
, &dev
->maplist
->head
) {
846 r_list
= list_entry(list
, drm_map_list_t
, head
);
848 r_list
->map
->handle
== request
.handle
) break;
850 if (list
== &(dev
->maplist
->head
)) {
851 up(&dev
->struct_sem
);
855 up(&dev
->struct_sem
);
857 if (!map
) return -EINVAL
;
859 down(&dev
->struct_sem
);
860 if ((int)request
.ctx_id
>= dev
->max_context
) {
861 up(&dev
->struct_sem
);
864 dev
->context_sareas
[request
.ctx_id
] = map
;
865 up(&dev
->struct_sem
);
869 void gamma_driver_irq_preinstall( drm_device_t
*dev
) {
870 drm_gamma_private_t
*dev_priv
=
871 (drm_gamma_private_t
*)dev
->dev_private
;
873 while(GAMMA_READ(GAMMA_INFIFOSPACE
) < 2)
876 GAMMA_WRITE( GAMMA_GCOMMANDMODE
, 0x00000004 );
877 GAMMA_WRITE( GAMMA_GDMACONTROL
, 0x00000000 );
880 void gamma_driver_irq_postinstall( drm_device_t
*dev
) {
881 drm_gamma_private_t
*dev_priv
=
882 (drm_gamma_private_t
*)dev
->dev_private
;
884 while(GAMMA_READ(GAMMA_INFIFOSPACE
) < 3)
887 GAMMA_WRITE( GAMMA_GINTENABLE
, 0x00002001 );
888 GAMMA_WRITE( GAMMA_COMMANDINTENABLE
, 0x00000008 );
889 GAMMA_WRITE( GAMMA_GDELAYTIMER
, 0x00039090 );
892 void gamma_driver_irq_uninstall( drm_device_t
*dev
) {
893 drm_gamma_private_t
*dev_priv
=
894 (drm_gamma_private_t
*)dev
->dev_private
;
898 while(GAMMA_READ(GAMMA_INFIFOSPACE
) < 3)
901 GAMMA_WRITE( GAMMA_GDELAYTIMER
, 0x00000000 );
902 GAMMA_WRITE( GAMMA_COMMANDINTENABLE
, 0x00000000 );
903 GAMMA_WRITE( GAMMA_GINTENABLE
, 0x00000000 );
906 extern drm_ioctl_desc_t
DRM(ioctls
)[];
908 static int gamma_driver_preinit(drm_device_t
*dev
)
910 /* reset the finish ioctl */
911 DRM(ioctls
)[DRM_IOCTL_NR(DRM_IOCTL_FINISH
)].func
= DRM(finish
);
915 static void gamma_driver_pretakedown(drm_device_t
*dev
)
917 gamma_do_cleanup_dma(dev
);
920 static void gamma_driver_dma_ready(drm_device_t
*dev
)
922 gamma_dma_ready(dev
);
925 static int gamma_driver_dma_quiescent(drm_device_t
*dev
)
927 drm_gamma_private_t
*dev_priv
= (
928 drm_gamma_private_t
*)dev
->dev_private
;
929 if (dev_priv
->num_rast
== 2)
930 gamma_dma_quiescent_dual(dev
);
931 else gamma_dma_quiescent_single(dev
);
935 void gamma_driver_register_fns(drm_device_t
*dev
)
937 dev
->driver_features
= DRIVER_USE_AGP
| DRIVER_USE_MTRR
| DRIVER_PCI_DMA
| DRIVER_HAVE_DMA
| DRIVER_HAVE_IRQ
;
938 DRM(fops
).read
= gamma_fops_read
;
939 DRM(fops
).poll
= gamma_fops_poll
;
940 dev
->driver
.preinit
= gamma_driver_preinit
;
941 dev
->driver
.pretakedown
= gamma_driver_pretakedown
;
942 dev
->driver
.dma_ready
= gamma_driver_dma_ready
;
943 dev
->driver
.dma_quiescent
= gamma_driver_dma_quiescent
;
944 dev
->driver
.dma_flush_block_and_flush
= gamma_flush_block_and_flush
;
945 dev
->driver
.dma_flush_unblock
= gamma_flush_unblock
;