3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
40 * Compute size order. Returns the exponent of the smaller power of two which
41 * is greater or equal to given number.
46 * \todo Can be made faster.
48 int drm_order( unsigned long size
)
53 for (order
= 0, tmp
= size
>> 1; tmp
; tmp
>>= 1, order
++)
56 if (size
& (size
- 1))
61 EXPORT_SYMBOL(drm_order
);
65 * Used to allocate 32-bit handles for _DRM_SHM regions
66 * The 0x10000000 value is chosen to be out of the way of
67 * FB/register and GART physical addresses.
69 static unsigned int map32_handle
= 0x10000000;
73 * Ioctl to specify a range of memory that is available for mapping by a non-root process.
75 * \param inode device inode.
76 * \param filp file pointer.
78 * \param arg pointer to a drm_map structure.
79 * \return zero on success or a negative value on error.
81 * Adjusts the memory offset to its absolute value according to the mapping
82 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
83 * applicable and if supported by the kernel.
85 int drm_addmap(drm_device_t
* dev
, unsigned int offset
,
86 unsigned int size
, drm_map_type_t type
,
87 drm_map_flags_t flags
, drm_local_map_t
** map_ptr
)
91 drm_dma_handle_t
*dmah
;
93 map
= drm_alloc( sizeof(*map
), DRM_MEM_MAPS
);
102 /* Only allow shared memory to be removable since we only keep enough
103 * book keeping information about shared memory to allow for removal
104 * when processes fork.
106 if ( (map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
107 drm_free( map
, sizeof(*map
), DRM_MEM_MAPS
);
110 DRM_DEBUG( "offset = 0x%08lx, size = 0x%08lx, type = %d\n",
111 map
->offset
, map
->size
, map
->type
);
112 if ( (map
->offset
& (~PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
)) ) {
113 drm_free( map
, sizeof(*map
), DRM_MEM_MAPS
);
119 switch ( map
->type
) {
121 case _DRM_FRAME_BUFFER
:
122 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__)
123 if ( map
->offset
+ map
->size
< map
->offset
||
124 map
->offset
< virt_to_phys(high_memory
) ) {
125 drm_free( map
, sizeof(*map
), DRM_MEM_MAPS
);
130 map
->offset
+= dev
->hose
->mem_space
->start
;
132 if (drm_core_has_MTRR(dev
)) {
133 if ( map
->type
== _DRM_FRAME_BUFFER
||
134 (map
->flags
& _DRM_WRITE_COMBINING
) ) {
135 map
->mtrr
= mtrr_add( map
->offset
, map
->size
,
136 MTRR_TYPE_WRCOMB
, 1 );
139 if (map
->type
== _DRM_REGISTERS
)
140 map
->handle
= drm_ioremap( map
->offset
, map
->size
,
145 map
->handle
= vmalloc_32(map
->size
);
146 DRM_DEBUG( "%lu %d %p\n",
147 map
->size
, drm_order( map
->size
), map
->handle
);
148 if ( !map
->handle
) {
149 drm_free( map
, sizeof(*map
), DRM_MEM_MAPS
);
152 map
->offset
= (unsigned long)map
->handle
;
153 if ( map
->flags
& _DRM_CONTAINS_LOCK
) {
154 /* Prevent a 2nd X Server from creating a 2nd lock */
155 if (dev
->lock
.hw_lock
!= NULL
) {
156 vfree( map
->handle
);
157 drm_free( map
, sizeof(*map
), DRM_MEM_MAPS
);
161 dev
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
165 if (drm_core_has_AGP(dev
)) {
167 map
->offset
+= dev
->hose
->mem_space
->start
;
169 map
->offset
+= dev
->agp
->base
;
170 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
173 case _DRM_SCATTER_GATHER
:
175 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
178 map
->offset
+= dev
->sg
->handle
;
180 case _DRM_CONSISTENT
:
181 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
182 * As we're limiting the address to 2^32-1 (or less),
183 * casting it down to 32 bits is no problem, but we
184 * need to point to a 64bit variable first. */
185 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
, 0xffffffffUL
);
187 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
190 map
->handle
= dmah
->vaddr
;
191 map
->offset
= (unsigned long)dmah
->busaddr
;
195 drm_free( map
, sizeof(*map
), DRM_MEM_MAPS
);
199 list
= drm_alloc(sizeof(*list
), DRM_MEM_MAPS
);
201 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
204 memset(list
, 0, sizeof(*list
));
207 down(&dev
->struct_sem
);
208 list_add(&list
->head
, &dev
->maplist
->head
);
210 /* Assign a 32-bit handle for _DRM_SHM mappings */
211 /* We do it here so that dev->struct_sem protects the increment */
212 if (map
->type
== _DRM_SHM
)
213 map
->offset
= map32_handle
+= PAGE_SIZE
;
215 up(&dev
->struct_sem
);
220 EXPORT_SYMBOL(drm_addmap
);
222 int drm_addmap_ioctl(struct inode
*inode
, struct file
*filp
,
223 unsigned int cmd
, unsigned long arg
)
225 drm_file_t
*priv
= filp
->private_data
;
226 drm_device_t
*dev
= priv
->head
->dev
;
229 drm_map_t __user
*argp
= (void __user
*)arg
;
232 if (!(filp
->f_mode
& 3))
233 return -EACCES
; /* Require read/write */
235 if (copy_from_user(& map
, argp
, sizeof(map
))) {
239 err
= drm_addmap( dev
, map
.offset
, map
.size
, map
.type
, map
.flags
,
246 if (copy_to_user(argp
, map_ptr
, sizeof(*map_ptr
)))
248 if (map_ptr
->type
!= _DRM_SHM
) {
249 if (copy_to_user(&argp
->handle
, &map_ptr
->offset
,
250 sizeof(map_ptr
->offset
)))
258 * Remove a map private from list and deallocate resources if the mapping
261 * \param inode device inode.
262 * \param filp file pointer.
263 * \param cmd command.
264 * \param arg pointer to a drm_map_t structure.
265 * \return zero on success or a negative value on error.
267 * Searches the map on drm_device::maplist, removes it from the list, see if
268 * its being used, and free any associate resource (such as MTRR's) if it's not
273 int drm_rmmap(drm_device_t
*dev
, void *handle
)
275 struct list_head
*list
;
276 drm_map_list_t
*r_list
= NULL
;
277 drm_vma_entry_t
*pt
, *prev
;
281 down(&dev
->struct_sem
);
282 list
= &dev
->maplist
->head
;
283 list_for_each(list
, &dev
->maplist
->head
) {
284 r_list
= list_entry(list
, drm_map_list_t
, head
);
287 r_list
->map
->handle
== handle
&&
288 r_list
->map
->flags
& _DRM_REMOVABLE
) break;
291 /* List has wrapped around to the head pointer, or its empty we didn't
294 if(list
== (&dev
->maplist
->head
)) {
295 up(&dev
->struct_sem
);
300 drm_free(list
, sizeof(*list
), DRM_MEM_MAPS
);
302 for (pt
= dev
->vmalist
, prev
= NULL
; pt
; prev
= pt
, pt
= pt
->next
) {
303 if (pt
->vma
->vm_private_data
== map
) found_maps
++;
307 drm_dma_handle_t dmah
;
311 case _DRM_FRAME_BUFFER
:
312 if (drm_core_has_MTRR(dev
)) {
313 if (map
->mtrr
>= 0) {
315 retcode
= mtrr_del(map
->mtrr
,
318 DRM_DEBUG("mtrr_del = %d\n", retcode
);
321 drm_ioremapfree(map
->handle
, map
->size
, dev
);
327 case _DRM_SCATTER_GATHER
:
329 case _DRM_CONSISTENT
:
330 dmah
.vaddr
= map
->handle
;
331 dmah
.busaddr
= map
->offset
;
332 dmah
.size
= map
->size
;
333 __drm_pci_free(dev
, &dmah
);
336 drm_free(map
, sizeof(*map
), DRM_MEM_MAPS
);
338 up(&dev
->struct_sem
);
341 EXPORT_SYMBOL(drm_rmmap
);
343 int drm_rmmap_ioctl(struct inode
*inode
, struct file
*filp
,
344 unsigned int cmd
, unsigned long arg
)
346 drm_file_t
*priv
= filp
->private_data
;
347 drm_device_t
*dev
= priv
->head
->dev
;
350 if (copy_from_user(&request
, (drm_map_t __user
*)arg
, sizeof(request
))) {
354 return drm_rmmap(dev
, request
.handle
);
358 * Cleanup after an error on one of the addbufs() functions.
360 * \param entry buffer entry where the error occurred.
362 * Frees any pages and buffers associated with the given entry.
364 static void drm_cleanup_buf_error(drm_device_t
*dev
, drm_buf_entry_t
*entry
)
368 if (entry
->seg_count
) {
369 for (i
= 0; i
< entry
->seg_count
; i
++) {
370 if (entry
->seglist
[i
]) {
371 drm_free_pages(entry
->seglist
[i
],
376 drm_free(entry
->seglist
,
378 sizeof(*entry
->seglist
),
381 entry
->seg_count
= 0;
384 if (entry
->buf_count
) {
385 for (i
= 0; i
< entry
->buf_count
; i
++) {
386 if (entry
->buflist
[i
].dev_private
) {
387 drm_free(entry
->buflist
[i
].dev_private
,
388 entry
->buflist
[i
].dev_priv_size
,
392 drm_free(entry
->buflist
,
394 sizeof(*entry
->buflist
),
397 entry
->buf_count
= 0;
403 * Add AGP buffers for DMA transfers.
405 * \param dev drm_device_t to which the buffers are to be added.
406 * \param request pointer to a drm_buf_desc_t describing the request.
407 * \return zero on success or a negative number on failure.
409 * After some sanity checks creates a drm_buf structure for each buffer and
410 * reallocates the buffer list of the same size order to accommodate the new
413 static int drm_addbufs_agp(drm_device_t
*dev
, drm_buf_desc_t
*request
)
415 drm_device_dma_t
*dma
= dev
->dma
;
416 drm_buf_entry_t
*entry
;
418 unsigned long offset
;
419 unsigned long agp_offset
;
428 drm_buf_t
**temp_buflist
;
430 if ( !dma
) return -EINVAL
;
432 count
= request
->count
;
433 order
= drm_order(request
->size
);
436 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
437 ? PAGE_ALIGN(size
) : size
;
438 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
439 total
= PAGE_SIZE
<< page_order
;
442 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
444 DRM_DEBUG( "count: %d\n", count
);
445 DRM_DEBUG( "order: %d\n", order
);
446 DRM_DEBUG( "size: %d\n", size
);
447 DRM_DEBUG( "agp_offset: %lu\n", agp_offset
);
448 DRM_DEBUG( "alignment: %d\n", alignment
);
449 DRM_DEBUG( "page_order: %d\n", page_order
);
450 DRM_DEBUG( "total: %d\n", total
);
452 if ( order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
) return -EINVAL
;
453 if ( dev
->queue_count
) return -EBUSY
; /* Not while in use */
455 spin_lock( &dev
->count_lock
);
456 if ( dev
->buf_use
) {
457 spin_unlock( &dev
->count_lock
);
460 atomic_inc( &dev
->buf_alloc
);
461 spin_unlock( &dev
->count_lock
);
463 down( &dev
->struct_sem
);
464 entry
= &dma
->bufs
[order
];
465 if ( entry
->buf_count
) {
466 up( &dev
->struct_sem
);
467 atomic_dec( &dev
->buf_alloc
);
468 return -ENOMEM
; /* May only call once for each order */
471 if (count
< 0 || count
> 4096) {
472 up( &dev
->struct_sem
);
473 atomic_dec( &dev
->buf_alloc
);
477 entry
->buflist
= drm_alloc( count
* sizeof(*entry
->buflist
),
479 if ( !entry
->buflist
) {
480 up( &dev
->struct_sem
);
481 atomic_dec( &dev
->buf_alloc
);
484 memset( entry
->buflist
, 0, count
* sizeof(*entry
->buflist
) );
486 entry
->buf_size
= size
;
487 entry
->page_order
= page_order
;
491 while ( entry
->buf_count
< count
) {
492 buf
= &entry
->buflist
[entry
->buf_count
];
493 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
494 buf
->total
= alignment
;
498 buf
->offset
= (dma
->byte_count
+ offset
);
499 buf
->bus_address
= agp_offset
+ offset
;
500 buf
->address
= (void *)(agp_offset
+ offset
);
504 init_waitqueue_head( &buf
->dma_wait
);
507 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
508 buf
->dev_private
= drm_alloc( buf
->dev_priv_size
,
510 if(!buf
->dev_private
) {
511 /* Set count correctly so we free the proper amount. */
512 entry
->buf_count
= count
;
513 drm_cleanup_buf_error(dev
,entry
);
514 up( &dev
->struct_sem
);
515 atomic_dec( &dev
->buf_alloc
);
518 memset( buf
->dev_private
, 0, buf
->dev_priv_size
);
520 DRM_DEBUG( "buffer %d @ %p\n",
521 entry
->buf_count
, buf
->address
);
525 byte_count
+= PAGE_SIZE
<< page_order
;
528 DRM_DEBUG( "byte_count: %d\n", byte_count
);
530 temp_buflist
= drm_realloc( dma
->buflist
,
531 dma
->buf_count
* sizeof(*dma
->buflist
),
532 (dma
->buf_count
+ entry
->buf_count
)
533 * sizeof(*dma
->buflist
),
536 /* Free the entry because it isn't valid */
537 drm_cleanup_buf_error(dev
,entry
);
538 up( &dev
->struct_sem
);
539 atomic_dec( &dev
->buf_alloc
);
542 dma
->buflist
= temp_buflist
;
544 for ( i
= 0 ; i
< entry
->buf_count
; i
++ ) {
545 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
548 dma
->buf_count
+= entry
->buf_count
;
549 dma
->byte_count
+= byte_count
;
551 DRM_DEBUG( "dma->buf_count : %d\n", dma
->buf_count
);
552 DRM_DEBUG( "entry->buf_count : %d\n", entry
->buf_count
);
554 up( &dev
->struct_sem
);
556 request
->count
= entry
->buf_count
;
557 request
->size
= size
;
559 dma
->flags
= _DRM_DMA_USE_AGP
;
561 atomic_dec( &dev
->buf_alloc
);
564 #endif /* __OS_HAS_AGP */
566 static int drm_addbufs_pci(drm_device_t
*dev
, drm_buf_desc_t
*request
)
568 drm_device_dma_t
*dma
= dev
->dma
;
574 drm_buf_entry_t
*entry
;
578 unsigned long offset
;
582 unsigned long *temp_pagelist
;
583 drm_buf_t
**temp_buflist
;
585 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
)) return -EINVAL
;
586 if ( !dma
) return -EINVAL
;
588 count
= request
->count
;
589 order
= drm_order(request
->size
);
592 DRM_DEBUG( "count=%d, size=%d (%d), order=%d, queue_count=%d\n",
593 request
->count
, request
->size
, size
,
594 order
, dev
->queue_count
);
596 if ( order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
) return -EINVAL
;
597 if ( dev
->queue_count
) return -EBUSY
; /* Not while in use */
599 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
600 ? PAGE_ALIGN(size
) : size
;
601 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
602 total
= PAGE_SIZE
<< page_order
;
604 spin_lock( &dev
->count_lock
);
605 if ( dev
->buf_use
) {
606 spin_unlock( &dev
->count_lock
);
609 atomic_inc( &dev
->buf_alloc
);
610 spin_unlock( &dev
->count_lock
);
612 down( &dev
->struct_sem
);
613 entry
= &dma
->bufs
[order
];
614 if ( entry
->buf_count
) {
615 up( &dev
->struct_sem
);
616 atomic_dec( &dev
->buf_alloc
);
617 return -ENOMEM
; /* May only call once for each order */
620 if (count
< 0 || count
> 4096) {
621 up( &dev
->struct_sem
);
622 atomic_dec( &dev
->buf_alloc
);
626 entry
->buflist
= drm_alloc( count
* sizeof(*entry
->buflist
),
628 if ( !entry
->buflist
) {
629 up( &dev
->struct_sem
);
630 atomic_dec( &dev
->buf_alloc
);
633 memset( entry
->buflist
, 0, count
* sizeof(*entry
->buflist
) );
635 entry
->seglist
= drm_alloc( count
* sizeof(*entry
->seglist
),
637 if ( !entry
->seglist
) {
638 drm_free( entry
->buflist
,
639 count
* sizeof(*entry
->buflist
),
641 up( &dev
->struct_sem
);
642 atomic_dec( &dev
->buf_alloc
);
645 memset( entry
->seglist
, 0, count
* sizeof(*entry
->seglist
) );
647 /* Keep the original pagelist until we know all the allocations
650 temp_pagelist
= drm_alloc( (dma
->page_count
+ (count
<< page_order
))
651 * sizeof(*dma
->pagelist
),
653 if (!temp_pagelist
) {
654 drm_free( entry
->buflist
,
655 count
* sizeof(*entry
->buflist
),
657 drm_free( entry
->seglist
,
658 count
* sizeof(*entry
->seglist
),
660 up( &dev
->struct_sem
);
661 atomic_dec( &dev
->buf_alloc
);
664 memcpy(temp_pagelist
,
666 dma
->page_count
* sizeof(*dma
->pagelist
));
667 DRM_DEBUG( "pagelist: %d entries\n",
668 dma
->page_count
+ (count
<< page_order
) );
670 entry
->buf_size
= size
;
671 entry
->page_order
= page_order
;
675 while ( entry
->buf_count
< count
) {
676 page
= drm_alloc_pages( page_order
, DRM_MEM_DMA
);
678 /* Set count correctly so we free the proper amount. */
679 entry
->buf_count
= count
;
680 entry
->seg_count
= count
;
681 drm_cleanup_buf_error(dev
, entry
);
682 drm_free( temp_pagelist
,
683 (dma
->page_count
+ (count
<< page_order
))
684 * sizeof(*dma
->pagelist
),
686 up( &dev
->struct_sem
);
687 atomic_dec( &dev
->buf_alloc
);
690 entry
->seglist
[entry
->seg_count
++] = page
;
691 for ( i
= 0 ; i
< (1 << page_order
) ; i
++ ) {
692 DRM_DEBUG( "page %d @ 0x%08lx\n",
693 dma
->page_count
+ page_count
,
694 page
+ PAGE_SIZE
* i
);
695 temp_pagelist
[dma
->page_count
+ page_count
++]
696 = page
+ PAGE_SIZE
* i
;
699 offset
+ size
<= total
&& entry
->buf_count
< count
;
700 offset
+= alignment
, ++entry
->buf_count
) {
701 buf
= &entry
->buflist
[entry
->buf_count
];
702 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
703 buf
->total
= alignment
;
706 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
707 buf
->address
= (void *)(page
+ offset
);
711 init_waitqueue_head( &buf
->dma_wait
);
714 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
715 buf
->dev_private
= drm_alloc( buf
->dev_priv_size
,
717 if(!buf
->dev_private
) {
718 /* Set count correctly so we free the proper amount. */
719 entry
->buf_count
= count
;
720 entry
->seg_count
= count
;
721 drm_cleanup_buf_error(dev
,entry
);
722 drm_free( temp_pagelist
,
723 (dma
->page_count
+ (count
<< page_order
))
724 * sizeof(*dma
->pagelist
),
726 up( &dev
->struct_sem
);
727 atomic_dec( &dev
->buf_alloc
);
730 memset( buf
->dev_private
, 0, buf
->dev_priv_size
);
732 DRM_DEBUG( "buffer %d @ %p\n",
733 entry
->buf_count
, buf
->address
);
735 byte_count
+= PAGE_SIZE
<< page_order
;
738 temp_buflist
= drm_realloc( dma
->buflist
,
739 dma
->buf_count
* sizeof(*dma
->buflist
),
740 (dma
->buf_count
+ entry
->buf_count
)
741 * sizeof(*dma
->buflist
),
744 /* Free the entry because it isn't valid */
745 drm_cleanup_buf_error(dev
,entry
);
746 drm_free( temp_pagelist
,
747 (dma
->page_count
+ (count
<< page_order
))
748 * sizeof(*dma
->pagelist
),
750 up( &dev
->struct_sem
);
751 atomic_dec( &dev
->buf_alloc
);
754 dma
->buflist
= temp_buflist
;
756 for ( i
= 0 ; i
< entry
->buf_count
; i
++ ) {
757 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
760 /* No allocations failed, so now we can replace the orginal pagelist
763 if (dma
->page_count
) {
764 drm_free(dma
->pagelist
,
765 dma
->page_count
* sizeof(*dma
->pagelist
),
768 dma
->pagelist
= temp_pagelist
;
770 dma
->buf_count
+= entry
->buf_count
;
771 dma
->seg_count
+= entry
->seg_count
;
772 dma
->page_count
+= entry
->seg_count
<< page_order
;
773 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
775 up( &dev
->struct_sem
);
777 request
->count
= entry
->buf_count
;
778 request
->size
= size
;
780 atomic_dec( &dev
->buf_alloc
);
785 static int drm_addbufs_sg(drm_device_t
*dev
, drm_buf_desc_t
*request
)
787 drm_device_dma_t
*dma
= dev
->dma
;
788 drm_buf_entry_t
*entry
;
790 unsigned long offset
;
791 unsigned long agp_offset
;
800 drm_buf_t
**temp_buflist
;
802 if (!drm_core_check_feature(dev
, DRIVER_SG
)) return -EINVAL
;
804 if ( !dma
) return -EINVAL
;
806 count
= request
->count
;
807 order
= drm_order(request
->size
);
810 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
811 ? PAGE_ALIGN(size
) : size
;
812 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
813 total
= PAGE_SIZE
<< page_order
;
816 agp_offset
= request
->agp_start
;
818 DRM_DEBUG( "count: %d\n", count
);
819 DRM_DEBUG( "order: %d\n", order
);
820 DRM_DEBUG( "size: %d\n", size
);
821 DRM_DEBUG( "agp_offset: %lu\n", agp_offset
);
822 DRM_DEBUG( "alignment: %d\n", alignment
);
823 DRM_DEBUG( "page_order: %d\n", page_order
);
824 DRM_DEBUG( "total: %d\n", total
);
826 if ( order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
) return -EINVAL
;
827 if ( dev
->queue_count
) return -EBUSY
; /* Not while in use */
829 spin_lock( &dev
->count_lock
);
830 if ( dev
->buf_use
) {
831 spin_unlock( &dev
->count_lock
);
834 atomic_inc( &dev
->buf_alloc
);
835 spin_unlock( &dev
->count_lock
);
837 down( &dev
->struct_sem
);
838 entry
= &dma
->bufs
[order
];
839 if ( entry
->buf_count
) {
840 up( &dev
->struct_sem
);
841 atomic_dec( &dev
->buf_alloc
);
842 return -ENOMEM
; /* May only call once for each order */
845 if (count
< 0 || count
> 4096) {
846 up( &dev
->struct_sem
);
847 atomic_dec( &dev
->buf_alloc
);
851 entry
->buflist
= drm_alloc( count
* sizeof(*entry
->buflist
),
853 if ( !entry
->buflist
) {
854 up( &dev
->struct_sem
);
855 atomic_dec( &dev
->buf_alloc
);
858 memset( entry
->buflist
, 0, count
* sizeof(*entry
->buflist
) );
860 entry
->buf_size
= size
;
861 entry
->page_order
= page_order
;
865 while ( entry
->buf_count
< count
) {
866 buf
= &entry
->buflist
[entry
->buf_count
];
867 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
868 buf
->total
= alignment
;
872 buf
->offset
= (dma
->byte_count
+ offset
);
873 buf
->bus_address
= agp_offset
+ offset
;
874 buf
->address
= (void *)(agp_offset
+ offset
+ dev
->sg
->handle
);
878 init_waitqueue_head( &buf
->dma_wait
);
881 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
882 buf
->dev_private
= drm_alloc( buf
->dev_priv_size
,
884 if(!buf
->dev_private
) {
885 /* Set count correctly so we free the proper amount. */
886 entry
->buf_count
= count
;
887 drm_cleanup_buf_error(dev
,entry
);
888 up( &dev
->struct_sem
);
889 atomic_dec( &dev
->buf_alloc
);
893 memset( buf
->dev_private
, 0, buf
->dev_priv_size
);
895 DRM_DEBUG( "buffer %d @ %p\n",
896 entry
->buf_count
, buf
->address
);
900 byte_count
+= PAGE_SIZE
<< page_order
;
903 DRM_DEBUG( "byte_count: %d\n", byte_count
);
905 temp_buflist
= drm_realloc( dma
->buflist
,
906 dma
->buf_count
* sizeof(*dma
->buflist
),
907 (dma
->buf_count
+ entry
->buf_count
)
908 * sizeof(*dma
->buflist
),
911 /* Free the entry because it isn't valid */
912 drm_cleanup_buf_error(dev
,entry
);
913 up( &dev
->struct_sem
);
914 atomic_dec( &dev
->buf_alloc
);
917 dma
->buflist
= temp_buflist
;
919 for ( i
= 0 ; i
< entry
->buf_count
; i
++ ) {
920 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
923 dma
->buf_count
+= entry
->buf_count
;
924 dma
->byte_count
+= byte_count
;
926 DRM_DEBUG( "dma->buf_count : %d\n", dma
->buf_count
);
927 DRM_DEBUG( "entry->buf_count : %d\n", entry
->buf_count
);
929 up( &dev
->struct_sem
);
931 request
->count
= entry
->buf_count
;
932 request
->size
= size
;
934 dma
->flags
= _DRM_DMA_USE_SG
;
936 atomic_dec( &dev
->buf_alloc
);
940 int drm_addbufs_fb(drm_device_t
*dev
, drm_buf_desc_t
*request
)
942 drm_device_dma_t
*dma
= dev
->dma
;
943 drm_buf_entry_t
*entry
;
945 unsigned long offset
;
946 unsigned long agp_offset
;
955 drm_buf_t
**temp_buflist
;
957 if (!drm_core_check_feature(dev
, DRIVER_FB_DMA
))
963 count
= request
->count
;
964 order
= drm_order(request
->size
);
967 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
968 ? PAGE_ALIGN(size
) : size
;
969 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
970 total
= PAGE_SIZE
<< page_order
;
973 agp_offset
= request
->agp_start
;
975 DRM_DEBUG("count: %d\n", count
);
976 DRM_DEBUG("order: %d\n", order
);
977 DRM_DEBUG("size: %d\n", size
);
978 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
979 DRM_DEBUG("alignment: %d\n", alignment
);
980 DRM_DEBUG("page_order: %d\n", page_order
);
981 DRM_DEBUG("total: %d\n", total
);
983 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
985 if (dev
->queue_count
)
986 return -EBUSY
; /* Not while in use */
988 spin_lock(&dev
->count_lock
);
990 spin_unlock(&dev
->count_lock
);
993 atomic_inc(&dev
->buf_alloc
);
994 spin_unlock(&dev
->count_lock
);
996 down(&dev
->struct_sem
);
997 entry
= &dma
->bufs
[order
];
998 if (entry
->buf_count
) {
999 up(&dev
->struct_sem
);
1000 atomic_dec(&dev
->buf_alloc
);
1001 return -ENOMEM
; /* May only call once for each order */
1004 if (count
< 0 || count
> 4096) {
1005 up(&dev
->struct_sem
);
1006 atomic_dec(&dev
->buf_alloc
);
1010 entry
->buflist
= drm_alloc(count
* sizeof(*entry
->buflist
),
1012 if (!entry
->buflist
) {
1013 up(&dev
->struct_sem
);
1014 atomic_dec(&dev
->buf_alloc
);
1017 memset(entry
->buflist
, 0, count
* sizeof(*entry
->buflist
));
1019 entry
->buf_size
= size
;
1020 entry
->page_order
= page_order
;
1024 while (entry
->buf_count
< count
) {
1025 buf
= &entry
->buflist
[entry
->buf_count
];
1026 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1027 buf
->total
= alignment
;
1031 buf
->offset
= (dma
->byte_count
+ offset
);
1032 buf
->bus_address
= agp_offset
+ offset
;
1033 buf
->address
= (void *)(agp_offset
+ offset
);
1037 init_waitqueue_head(&buf
->dma_wait
);
1040 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1041 buf
->dev_private
= drm_alloc(buf
->dev_priv_size
, DRM_MEM_BUFS
);
1042 if (!buf
->dev_private
) {
1043 /* Set count correctly so we free the proper amount. */
1044 entry
->buf_count
= count
;
1045 drm_cleanup_buf_error(dev
, entry
);
1046 up(&dev
->struct_sem
);
1047 atomic_dec(&dev
->buf_alloc
);
1050 memset(buf
->dev_private
, 0, buf
->dev_priv_size
);
1052 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1054 offset
+= alignment
;
1056 byte_count
+= PAGE_SIZE
<< page_order
;
1059 DRM_DEBUG("byte_count: %d\n", byte_count
);
1061 temp_buflist
= drm_realloc(dma
->buflist
,
1062 dma
->buf_count
* sizeof(*dma
->buflist
),
1063 (dma
->buf_count
+ entry
->buf_count
)
1064 * sizeof(*dma
->buflist
), DRM_MEM_BUFS
);
1065 if (!temp_buflist
) {
1066 /* Free the entry because it isn't valid */
1067 drm_cleanup_buf_error(dev
, entry
);
1068 up(&dev
->struct_sem
);
1069 atomic_dec(&dev
->buf_alloc
);
1072 dma
->buflist
= temp_buflist
;
1074 for (i
= 0; i
< entry
->buf_count
; i
++) {
1075 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1078 dma
->buf_count
+= entry
->buf_count
;
1079 dma
->byte_count
+= byte_count
;
1081 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1082 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1084 up(&dev
->struct_sem
);
1086 request
->count
= entry
->buf_count
;
1087 request
->size
= size
;
1089 dma
->flags
= _DRM_DMA_USE_FB
;
1091 atomic_dec(&dev
->buf_alloc
);
1096 * Add buffers for DMA transfers (ioctl).
1098 * \param inode device inode.
1099 * \param filp file pointer.
1100 * \param cmd command.
1101 * \param arg pointer to a drm_buf_desc_t request.
1102 * \return zero on success or a negative number on failure.
1104 * According with the memory type specified in drm_buf_desc::flags and the
1105 * build options, it dispatches the call either to addbufs_agp(),
1106 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1107 * PCI memory respectively.
1109 int drm_addbufs( struct inode
*inode
, struct file
*filp
,
1110 unsigned int cmd
, unsigned long arg
)
1112 drm_buf_desc_t request
;
1113 drm_file_t
*priv
= filp
->private_data
;
1114 drm_device_t
*dev
= priv
->head
->dev
;
1117 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1120 if ( copy_from_user( &request
, (drm_buf_desc_t __user
*)arg
,
1125 if ( request
.flags
& _DRM_AGP_BUFFER
)
1126 ret
=drm_addbufs_agp(dev
, &request
);
1129 if ( request
.flags
& _DRM_SG_BUFFER
)
1130 ret
=drm_addbufs_sg(dev
, &request
);
1131 else if ( request
.flags
& _DRM_FB_BUFFER
)
1132 ret
=drm_addbufs_fb(dev
, &request
);
1134 ret
=drm_addbufs_pci(dev
, &request
);
1137 if (copy_to_user((void __user
*)arg
, &request
,
1147 * Get information about the buffer mappings.
1149 * This was originally mean for debugging purposes, or by a sophisticated
1150 * client library to determine how best to use the available buffers (e.g.,
1151 * large buffers can be used for image transfer).
1153 * \param inode device inode.
1154 * \param filp file pointer.
1155 * \param cmd command.
1156 * \param arg pointer to a drm_buf_info structure.
1157 * \return zero on success or a negative number on failure.
1159 * Increments drm_device::buf_use while holding the drm_device::count_lock
1160 * lock, preventing of allocating more buffers after this call. Information
1161 * about each requested buffer is then copied into user space.
1163 int drm_infobufs( struct inode
*inode
, struct file
*filp
,
1164 unsigned int cmd
, unsigned long arg
)
1166 drm_file_t
*priv
= filp
->private_data
;
1167 drm_device_t
*dev
= priv
->head
->dev
;
1168 drm_device_dma_t
*dma
= dev
->dma
;
1169 drm_buf_info_t request
;
1170 drm_buf_info_t __user
*argp
= (void __user
*)arg
;
1174 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1177 if ( !dma
) return -EINVAL
;
1179 spin_lock( &dev
->count_lock
);
1180 if ( atomic_read( &dev
->buf_alloc
) ) {
1181 spin_unlock( &dev
->count_lock
);
1184 ++dev
->buf_use
; /* Can't allocate more after this call */
1185 spin_unlock( &dev
->count_lock
);
1187 if ( copy_from_user( &request
, argp
, sizeof(request
) ) )
1190 for ( i
= 0, count
= 0 ; i
< DRM_MAX_ORDER
+ 1 ; i
++ ) {
1191 if ( dma
->bufs
[i
].buf_count
) ++count
;
1194 DRM_DEBUG( "count = %d\n", count
);
1196 if ( request
.count
>= count
) {
1197 for ( i
= 0, count
= 0 ; i
< DRM_MAX_ORDER
+ 1 ; i
++ ) {
1198 if ( dma
->bufs
[i
].buf_count
) {
1199 drm_buf_desc_t __user
*to
= &request
.list
[count
];
1200 drm_buf_entry_t
*from
= &dma
->bufs
[i
];
1201 drm_freelist_t
*list
= &dma
->bufs
[i
].freelist
;
1202 if ( copy_to_user( &to
->count
,
1204 sizeof(from
->buf_count
) ) ||
1205 copy_to_user( &to
->size
,
1207 sizeof(from
->buf_size
) ) ||
1208 copy_to_user( &to
->low_mark
,
1210 sizeof(list
->low_mark
) ) ||
1211 copy_to_user( &to
->high_mark
,
1213 sizeof(list
->high_mark
) ) )
1216 DRM_DEBUG( "%d %d %d %d %d\n",
1218 dma
->bufs
[i
].buf_count
,
1219 dma
->bufs
[i
].buf_size
,
1220 dma
->bufs
[i
].freelist
.low_mark
,
1221 dma
->bufs
[i
].freelist
.high_mark
);
1226 request
.count
= count
;
1228 if ( copy_to_user( argp
, &request
, sizeof(request
) ) )
1235 * Specifies a low and high water mark for buffer allocation
1237 * \param inode device inode.
1238 * \param filp file pointer.
1239 * \param cmd command.
1240 * \param arg a pointer to a drm_buf_desc structure.
1241 * \return zero on success or a negative number on failure.
1243 * Verifies that the size order is bounded between the admissible orders and
1244 * updates the respective drm_device_dma::bufs entry low and high water mark.
1246 * \note This ioctl is deprecated and mostly never used.
1248 int drm_markbufs( struct inode
*inode
, struct file
*filp
,
1249 unsigned int cmd
, unsigned long arg
)
1251 drm_file_t
*priv
= filp
->private_data
;
1252 drm_device_t
*dev
= priv
->head
->dev
;
1253 drm_device_dma_t
*dma
= dev
->dma
;
1254 drm_buf_desc_t request
;
1256 drm_buf_entry_t
*entry
;
1258 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1261 if ( !dma
) return -EINVAL
;
1263 if ( copy_from_user( &request
,
1264 (drm_buf_desc_t __user
*)arg
,
1268 DRM_DEBUG( "%d, %d, %d\n",
1269 request
.size
, request
.low_mark
, request
.high_mark
);
1270 order
= drm_order( request
.size
);
1271 if ( order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
) return -EINVAL
;
1272 entry
= &dma
->bufs
[order
];
1274 if ( request
.low_mark
< 0 || request
.low_mark
> entry
->buf_count
)
1276 if ( request
.high_mark
< 0 || request
.high_mark
> entry
->buf_count
)
1279 entry
->freelist
.low_mark
= request
.low_mark
;
1280 entry
->freelist
.high_mark
= request
.high_mark
;
1286 * Unreserve the buffers in list, previously reserved using drmDMA.
1288 * \param inode device inode.
1289 * \param filp file pointer.
1290 * \param cmd command.
1291 * \param arg pointer to a drm_buf_free structure.
1292 * \return zero on success or a negative number on failure.
1294 * Calls free_buffer() for each used buffer.
1295 * This function is primarily used for debugging.
1297 int drm_freebufs( struct inode
*inode
, struct file
*filp
,
1298 unsigned int cmd
, unsigned long arg
)
1300 drm_file_t
*priv
= filp
->private_data
;
1301 drm_device_t
*dev
= priv
->head
->dev
;
1302 drm_device_dma_t
*dma
= dev
->dma
;
1303 drm_buf_free_t request
;
1308 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1311 if ( !dma
) return -EINVAL
;
1313 if ( copy_from_user( &request
,
1314 (drm_buf_free_t __user
*)arg
,
1318 DRM_DEBUG( "%d\n", request
.count
);
1319 for ( i
= 0 ; i
< request
.count
; i
++ ) {
1320 if ( copy_from_user( &idx
,
1324 if ( idx
< 0 || idx
>= dma
->buf_count
) {
1325 DRM_ERROR( "Index %d (of %d max)\n",
1326 idx
, dma
->buf_count
- 1 );
1329 buf
= dma
->buflist
[idx
];
1330 if ( buf
->filp
!= filp
) {
1331 DRM_ERROR( "Process %d freeing buffer not owned\n",
1335 drm_free_buffer( dev
, buf
);
1342 * Maps all of the DMA buffers into client-virtual space (ioctl).
1344 * \param inode device inode.
1345 * \param filp file pointer.
1346 * \param cmd command.
1347 * \param arg pointer to a drm_buf_map structure.
1348 * \return zero on success or a negative number on failure.
1350 * Maps the AGP or SG buffer region with do_mmap(), and copies information
1351 * about each buffer into user space. The PCI buffers are already mapped on the
1352 * addbufs_pci() call.
1354 int drm_mapbufs( struct inode
*inode
, struct file
*filp
,
1355 unsigned int cmd
, unsigned long arg
)
1357 drm_file_t
*priv
= filp
->private_data
;
1358 drm_device_t
*dev
= priv
->head
->dev
;
1359 drm_device_dma_t
*dma
= dev
->dma
;
1360 drm_buf_map_t __user
*argp
= (void __user
*)arg
;
1363 unsigned long virtual;
1364 unsigned long address
;
1365 drm_buf_map_t request
;
1368 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1371 if ( !dma
) return -EINVAL
;
1373 spin_lock( &dev
->count_lock
);
1374 if ( atomic_read( &dev
->buf_alloc
) ) {
1375 spin_unlock( &dev
->count_lock
);
1378 dev
->buf_use
++; /* Can't allocate more after this call */
1379 spin_unlock( &dev
->count_lock
);
1381 if ( copy_from_user( &request
, argp
, sizeof(request
) ) )
1384 if ( request
.count
>= dma
->buf_count
) {
1385 if ((drm_core_has_AGP(dev
) && (dma
->flags
& _DRM_DMA_USE_AGP
))
1386 || (drm_core_check_feature(dev
, DRIVER_SG
)
1387 && (dma
->flags
& _DRM_DMA_USE_SG
))
1388 || (drm_core_check_feature(dev
, DRIVER_FB_DMA
)
1389 && (dma
->flags
& _DRM_DMA_USE_FB
))) {
1390 drm_map_t
*map
= dev
->agp_buffer_map
;
1397 #if LINUX_VERSION_CODE <= 0x020402
1398 down( ¤t
->mm
->mmap_sem
);
1400 down_write( ¤t
->mm
->mmap_sem
);
1402 virtual = do_mmap( filp
, 0, map
->size
,
1403 PROT_READ
| PROT_WRITE
,
1405 (unsigned long)map
->offset
);
1406 #if LINUX_VERSION_CODE <= 0x020402
1407 up( ¤t
->mm
->mmap_sem
);
1409 up_write( ¤t
->mm
->mmap_sem
);
1412 #if LINUX_VERSION_CODE <= 0x020402
1413 down( ¤t
->mm
->mmap_sem
);
1415 down_write( ¤t
->mm
->mmap_sem
);
1417 virtual = do_mmap( filp
, 0, dma
->byte_count
,
1418 PROT_READ
| PROT_WRITE
,
1420 #if LINUX_VERSION_CODE <= 0x020402
1421 up( ¤t
->mm
->mmap_sem
);
1423 up_write( ¤t
->mm
->mmap_sem
);
1426 if ( virtual > -1024UL ) {
1428 retcode
= (signed long)virtual;
1431 request
.virtual = (void __user
*)virtual;
1433 for ( i
= 0 ; i
< dma
->buf_count
; i
++ ) {
1434 if ( copy_to_user( &request
.list
[i
].idx
,
1435 &dma
->buflist
[i
]->idx
,
1436 sizeof(request
.list
[0].idx
) ) ) {
1440 if ( copy_to_user( &request
.list
[i
].total
,
1441 &dma
->buflist
[i
]->total
,
1442 sizeof(request
.list
[0].total
) ) ) {
1446 if ( copy_to_user( &request
.list
[i
].used
,
1452 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1453 if ( copy_to_user( &request
.list
[i
].address
,
1455 sizeof(address
) ) ) {
1462 request
.count
= dma
->buf_count
;
1463 DRM_DEBUG( "%d buffers, retcode = %d\n", request
.count
, retcode
);
1465 if ( copy_to_user( argp
, &request
, sizeof(request
) ) )