3 * Generic buffer template
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
10 * Created: Thu Nov 23 03:10:50 2000 by gareth@valinux.com
12 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <linux/vmalloc.h>
37 #include <linux/slab.h>
38 #include <linux/log2.h>
39 #include <linux/export.h>
40 #include <asm/shmparam.h>
43 static struct drm_map_list
*drm_find_matching_map(struct drm_device
*dev
,
44 struct drm_local_map
*map
)
46 struct drm_map_list
*entry
;
47 list_for_each_entry(entry
, &dev
->maplist
, head
) {
49 * Because the kernel-userspace ABI is fixed at a 32-bit offset
50 * while PCI resources may live above that, we only compare the
51 * lower 32 bits of the map offset for maps of type
52 * _DRM_FRAMEBUFFER or _DRM_REGISTERS.
53 * It is assumed that if a driver have more than one resource
54 * of each type, the lower 32 bits are different.
57 map
->type
!= entry
->map
->type
||
58 entry
->master
!= dev
->primary
->master
)
62 if (map
->flags
!= _DRM_CONTAINS_LOCK
)
66 case _DRM_FRAME_BUFFER
:
67 if ((entry
->map
->offset
& 0xffffffff) ==
68 (map
->offset
& 0xffffffff))
70 default: /* Make gcc happy */
73 if (entry
->map
->offset
== map
->offset
)
80 static int drm_map_handle(struct drm_device
*dev
, struct drm_hash_item
*hash
,
81 unsigned long user_token
, int hashed_handle
, int shm
)
83 int use_hashed_handle
, shift
;
86 #if (BITS_PER_LONG == 64)
87 use_hashed_handle
= ((user_token
& 0xFFFFFFFF00000000UL
) || hashed_handle
);
88 #elif (BITS_PER_LONG == 32)
89 use_hashed_handle
= hashed_handle
;
91 #error Unsupported long size. Neither 64 nor 32 bits.
94 if (!use_hashed_handle
) {
96 hash
->key
= user_token
>> PAGE_SHIFT
;
97 ret
= drm_ht_insert_item(&dev
->map_hash
, hash
);
103 add
= DRM_MAP_HASH_OFFSET
>> PAGE_SHIFT
;
104 if (shm
&& (SHMLBA
> PAGE_SIZE
)) {
105 int bits
= ilog2(SHMLBA
>> PAGE_SHIFT
) + 1;
107 /* For shared memory, we have to preserve the SHMLBA
108 * bits of the eventual vma->vm_pgoff value during
109 * mmap(). Otherwise we run into cache aliasing problems
110 * on some platforms. On these platforms, the pgoff of
111 * a mmap() request is used to pick a suitable virtual
112 * address for the mmap() region such that it will not
113 * cause cache aliasing problems.
115 * Therefore, make sure the SHMLBA relevant bits of the
116 * hash value we use are equal to those in the original
117 * kernel virtual address.
120 add
|= ((user_token
>> PAGE_SHIFT
) & ((1UL << bits
) - 1UL));
123 return drm_ht_just_insert_please(&dev
->map_hash
, hash
,
124 user_token
, 32 - PAGE_SHIFT
- 3,
129 * Core function to create a range of memory available for mapping by a
132 * Adjusts the memory offset to its absolute value according to the mapping
133 * type. Adds the map to the map list drm_device::maplist. Adds MTRR's where
134 * applicable and if supported by the kernel.
136 static int drm_addmap_core(struct drm_device
* dev
, resource_size_t offset
,
137 unsigned int size
, enum drm_map_type type
,
138 enum drm_map_flags flags
,
139 struct drm_map_list
** maplist
)
141 struct drm_local_map
*map
;
142 struct drm_map_list
*list
;
143 drm_dma_handle_t
*dmah
;
144 unsigned long user_token
;
147 map
= kmalloc(sizeof(*map
), GFP_KERNEL
);
151 map
->offset
= offset
;
156 /* Only allow shared memory to be removable since we only keep enough
157 * book keeping information about shared memory to allow for removal
158 * when processes fork.
160 if ((map
->flags
& _DRM_REMOVABLE
) && map
->type
!= _DRM_SHM
) {
164 DRM_DEBUG("offset = 0x%08llx, size = 0x%08lx, type = %d\n",
165 (unsigned long long)map
->offset
, map
->size
, map
->type
);
167 /* page-align _DRM_SHM maps. They are allocated here so there is no security
168 * hole created by that and it works around various broken drivers that use
169 * a non-aligned quantity to map the SAREA. --BenH
171 if (map
->type
== _DRM_SHM
)
172 map
->size
= PAGE_ALIGN(map
->size
);
174 if ((map
->offset
& (~(resource_size_t
)PAGE_MASK
)) || (map
->size
& (~PAGE_MASK
))) {
183 case _DRM_FRAME_BUFFER
:
184 #if !defined(__sparc__) && !defined(__alpha__) && !defined(__ia64__) && !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__arm__)
185 if (map
->offset
+ (map
->size
-1) < map
->offset
||
186 map
->offset
< virt_to_phys(high_memory
)) {
191 /* Some drivers preinitialize some maps, without the X Server
192 * needing to be aware of it. Therefore, we just return success
193 * when the server tries to create a duplicate map.
195 list
= drm_find_matching_map(dev
, map
);
197 if (list
->map
->size
!= map
->size
) {
198 DRM_DEBUG("Matching maps of type %d with "
199 "mismatched sizes, (%ld vs %ld)\n",
200 map
->type
, map
->size
,
202 list
->map
->size
= map
->size
;
210 if (map
->type
== _DRM_FRAME_BUFFER
||
211 (map
->flags
& _DRM_WRITE_COMBINING
)) {
213 arch_phys_wc_add(map
->offset
, map
->size
);
215 if (map
->type
== _DRM_REGISTERS
) {
216 if (map
->flags
& _DRM_WRITE_COMBINING
)
217 map
->handle
= ioremap_wc(map
->offset
,
220 map
->handle
= ioremap(map
->offset
, map
->size
);
229 list
= drm_find_matching_map(dev
, map
);
231 if(list
->map
->size
!= map
->size
) {
232 DRM_DEBUG("Matching maps of type %d with "
233 "mismatched sizes, (%ld vs %ld)\n",
234 map
->type
, map
->size
, list
->map
->size
);
235 list
->map
->size
= map
->size
;
242 map
->handle
= vmalloc_user(map
->size
);
243 DRM_DEBUG("%lu %d %p\n",
244 map
->size
, order_base_2(map
->size
), map
->handle
);
249 map
->offset
= (unsigned long)map
->handle
;
250 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
251 /* Prevent a 2nd X Server from creating a 2nd lock */
252 if (dev
->primary
->master
->lock
.hw_lock
!= NULL
) {
257 dev
->sigdata
.lock
= dev
->primary
->master
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
261 struct drm_agp_mem
*entry
;
269 map
->offset
+= dev
->hose
->mem_space
->start
;
271 /* In some cases (i810 driver), user space may have already
272 * added the AGP base itself, because dev->agp->base previously
273 * only got set during AGP enable. So, only add the base
274 * address if the map's offset isn't already within the
277 if (map
->offset
< dev
->agp
->base
||
278 map
->offset
> dev
->agp
->base
+
279 dev
->agp
->agp_info
.aper_size
* 1024 * 1024 - 1) {
280 map
->offset
+= dev
->agp
->base
;
282 map
->mtrr
= dev
->agp
->agp_mtrr
; /* for getmap */
284 /* This assumes the DRM is in total control of AGP space.
285 * It's not always the case as AGP can be in the control
286 * of user space (i.e. i810 driver). So this loop will get
287 * skipped and we double check that dev->agp->memory is
288 * actually set as well as being invalid before EPERM'ing
290 list_for_each_entry(entry
, &dev
->agp
->memory
, head
) {
291 if ((map
->offset
>= entry
->bound
) &&
292 (map
->offset
+ map
->size
<= entry
->bound
+ entry
->pages
* PAGE_SIZE
)) {
297 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
301 DRM_DEBUG("AGP offset = 0x%08llx, size = 0x%08lx\n",
302 (unsigned long long)map
->offset
, map
->size
);
306 case _DRM_SCATTER_GATHER
:
311 map
->offset
+= (unsigned long)dev
->sg
->virtual;
313 case _DRM_CONSISTENT
:
314 /* dma_addr_t is 64bit on i386 with CONFIG_HIGHMEM64G,
315 * As we're limiting the address to 2^32-1 (or less),
316 * casting it down to 32 bits is no problem, but we
317 * need to point to a 64bit variable first. */
318 dmah
= drm_pci_alloc(dev
, map
->size
, map
->size
);
323 map
->handle
= dmah
->vaddr
;
324 map
->offset
= (unsigned long)dmah
->busaddr
;
332 list
= kzalloc(sizeof(*list
), GFP_KERNEL
);
334 if (map
->type
== _DRM_REGISTERS
)
335 iounmap(map
->handle
);
341 mutex_lock(&dev
->struct_mutex
);
342 list_add(&list
->head
, &dev
->maplist
);
344 /* Assign a 32-bit handle */
345 /* We do it here so that dev->struct_mutex protects the increment */
346 user_token
= (map
->type
== _DRM_SHM
) ? (unsigned long)map
->handle
:
348 ret
= drm_map_handle(dev
, &list
->hash
, user_token
, 0,
349 (map
->type
== _DRM_SHM
));
351 if (map
->type
== _DRM_REGISTERS
)
352 iounmap(map
->handle
);
355 mutex_unlock(&dev
->struct_mutex
);
359 list
->user_token
= list
->hash
.key
<< PAGE_SHIFT
;
360 mutex_unlock(&dev
->struct_mutex
);
362 if (!(map
->flags
& _DRM_DRIVER
))
363 list
->master
= dev
->primary
->master
;
368 int drm_addmap(struct drm_device
* dev
, resource_size_t offset
,
369 unsigned int size
, enum drm_map_type type
,
370 enum drm_map_flags flags
, struct drm_local_map
** map_ptr
)
372 struct drm_map_list
*list
;
375 rc
= drm_addmap_core(dev
, offset
, size
, type
, flags
, &list
);
377 *map_ptr
= list
->map
;
381 EXPORT_SYMBOL(drm_addmap
);
384 * Ioctl to specify a range of memory that is available for mapping by a
387 * \param inode device inode.
388 * \param file_priv DRM file private.
389 * \param cmd command.
390 * \param arg pointer to a drm_map structure.
391 * \return zero on success or a negative value on error.
394 int drm_addmap_ioctl(struct drm_device
*dev
, void *data
,
395 struct drm_file
*file_priv
)
397 struct drm_map
*map
= data
;
398 struct drm_map_list
*maplist
;
401 if (!(capable(CAP_SYS_ADMIN
) || map
->type
== _DRM_AGP
|| map
->type
== _DRM_SHM
))
404 err
= drm_addmap_core(dev
, map
->offset
, map
->size
, map
->type
,
405 map
->flags
, &maplist
);
410 /* avoid a warning on 64-bit, this casting isn't very nice, but the API is set so too late */
411 map
->handle
= (void *)(unsigned long)maplist
->user_token
;
414 * It appears that there are no users of this value whatsoever --
415 * drmAddMap just discards it. Let's not encourage its use.
416 * (Keeping drm_addmap_core's returned mtrr value would be wrong --
417 * it's not a real mtrr index anymore.)
425 * Remove a map private from list and deallocate resources if the mapping
428 * Searches the map on drm_device::maplist, removes it from the list, see if
429 * its being used, and free any associate resource (such as MTRR's) if it's not
434 int drm_rmmap_locked(struct drm_device
*dev
, struct drm_local_map
*map
)
436 struct drm_map_list
*r_list
= NULL
, *list_t
;
437 drm_dma_handle_t dmah
;
439 struct drm_master
*master
;
441 /* Find the list entry for the map and remove it */
442 list_for_each_entry_safe(r_list
, list_t
, &dev
->maplist
, head
) {
443 if (r_list
->map
== map
) {
444 master
= r_list
->master
;
445 list_del(&r_list
->head
);
446 drm_ht_remove_key(&dev
->map_hash
,
447 r_list
->user_token
>> PAGE_SHIFT
);
459 iounmap(map
->handle
);
461 case _DRM_FRAME_BUFFER
:
462 arch_phys_wc_del(map
->mtrr
);
467 if (dev
->sigdata
.lock
== master
->lock
.hw_lock
)
468 dev
->sigdata
.lock
= NULL
;
469 master
->lock
.hw_lock
= NULL
; /* SHM removed */
470 master
->lock
.file_priv
= NULL
;
471 wake_up_interruptible_all(&master
->lock
.lock_queue
);
475 case _DRM_SCATTER_GATHER
:
477 case _DRM_CONSISTENT
:
478 dmah
.vaddr
= map
->handle
;
479 dmah
.busaddr
= map
->offset
;
480 dmah
.size
= map
->size
;
481 __drm_pci_free(dev
, &dmah
);
488 EXPORT_SYMBOL(drm_rmmap_locked
);
490 int drm_rmmap(struct drm_device
*dev
, struct drm_local_map
*map
)
494 mutex_lock(&dev
->struct_mutex
);
495 ret
= drm_rmmap_locked(dev
, map
);
496 mutex_unlock(&dev
->struct_mutex
);
500 EXPORT_SYMBOL(drm_rmmap
);
502 /* The rmmap ioctl appears to be unnecessary. All mappings are torn down on
503 * the last close of the device, and this is necessary for cleanup when things
504 * exit uncleanly. Therefore, having userland manually remove mappings seems
505 * like a pointless exercise since they're going away anyway.
507 * One use case might be after addmap is allowed for normal users for SHM and
508 * gets used by drivers that the server doesn't need to care about. This seems
511 * \param inode device inode.
512 * \param file_priv DRM file private.
513 * \param cmd command.
514 * \param arg pointer to a struct drm_map structure.
515 * \return zero on success or a negative value on error.
517 int drm_rmmap_ioctl(struct drm_device
*dev
, void *data
,
518 struct drm_file
*file_priv
)
520 struct drm_map
*request
= data
;
521 struct drm_local_map
*map
= NULL
;
522 struct drm_map_list
*r_list
;
525 mutex_lock(&dev
->struct_mutex
);
526 list_for_each_entry(r_list
, &dev
->maplist
, head
) {
528 r_list
->user_token
== (unsigned long)request
->handle
&&
529 r_list
->map
->flags
& _DRM_REMOVABLE
) {
535 /* List has wrapped around to the head pointer, or its empty we didn't
538 if (list_empty(&dev
->maplist
) || !map
) {
539 mutex_unlock(&dev
->struct_mutex
);
543 /* Register and framebuffer maps are permanent */
544 if ((map
->type
== _DRM_REGISTERS
) || (map
->type
== _DRM_FRAME_BUFFER
)) {
545 mutex_unlock(&dev
->struct_mutex
);
549 ret
= drm_rmmap_locked(dev
, map
);
551 mutex_unlock(&dev
->struct_mutex
);
557 * Cleanup after an error on one of the addbufs() functions.
559 * \param dev DRM device.
560 * \param entry buffer entry where the error occurred.
562 * Frees any pages and buffers associated with the given entry.
564 static void drm_cleanup_buf_error(struct drm_device
* dev
,
565 struct drm_buf_entry
* entry
)
569 if (entry
->seg_count
) {
570 for (i
= 0; i
< entry
->seg_count
; i
++) {
571 if (entry
->seglist
[i
]) {
572 drm_pci_free(dev
, entry
->seglist
[i
]);
575 kfree(entry
->seglist
);
577 entry
->seg_count
= 0;
580 if (entry
->buf_count
) {
581 for (i
= 0; i
< entry
->buf_count
; i
++) {
582 kfree(entry
->buflist
[i
].dev_private
);
584 kfree(entry
->buflist
);
586 entry
->buf_count
= 0;
592 * Add AGP buffers for DMA transfers.
594 * \param dev struct drm_device to which the buffers are to be added.
595 * \param request pointer to a struct drm_buf_desc describing the request.
596 * \return zero on success or a negative number on failure.
598 * After some sanity checks creates a drm_buf structure for each buffer and
599 * reallocates the buffer list of the same size order to accommodate the new
602 int drm_addbufs_agp(struct drm_device
* dev
, struct drm_buf_desc
* request
)
604 struct drm_device_dma
*dma
= dev
->dma
;
605 struct drm_buf_entry
*entry
;
606 struct drm_agp_mem
*agp_entry
;
608 unsigned long offset
;
609 unsigned long agp_offset
;
618 struct drm_buf
**temp_buflist
;
623 count
= request
->count
;
624 order
= order_base_2(request
->size
);
627 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
628 ? PAGE_ALIGN(size
) : size
;
629 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
630 total
= PAGE_SIZE
<< page_order
;
633 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
635 DRM_DEBUG("count: %d\n", count
);
636 DRM_DEBUG("order: %d\n", order
);
637 DRM_DEBUG("size: %d\n", size
);
638 DRM_DEBUG("agp_offset: %lx\n", agp_offset
);
639 DRM_DEBUG("alignment: %d\n", alignment
);
640 DRM_DEBUG("page_order: %d\n", page_order
);
641 DRM_DEBUG("total: %d\n", total
);
643 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
646 /* Make sure buffers are located in AGP memory that we own */
648 list_for_each_entry(agp_entry
, &dev
->agp
->memory
, head
) {
649 if ((agp_offset
>= agp_entry
->bound
) &&
650 (agp_offset
+ total
* count
<= agp_entry
->bound
+ agp_entry
->pages
* PAGE_SIZE
)) {
655 if (!list_empty(&dev
->agp
->memory
) && !valid
) {
656 DRM_DEBUG("zone invalid\n");
659 spin_lock(&dev
->count_lock
);
661 spin_unlock(&dev
->count_lock
);
664 atomic_inc(&dev
->buf_alloc
);
665 spin_unlock(&dev
->count_lock
);
667 mutex_lock(&dev
->struct_mutex
);
668 entry
= &dma
->bufs
[order
];
669 if (entry
->buf_count
) {
670 mutex_unlock(&dev
->struct_mutex
);
671 atomic_dec(&dev
->buf_alloc
);
672 return -ENOMEM
; /* May only call once for each order */
675 if (count
< 0 || count
> 4096) {
676 mutex_unlock(&dev
->struct_mutex
);
677 atomic_dec(&dev
->buf_alloc
);
681 entry
->buflist
= kzalloc(count
* sizeof(*entry
->buflist
), GFP_KERNEL
);
682 if (!entry
->buflist
) {
683 mutex_unlock(&dev
->struct_mutex
);
684 atomic_dec(&dev
->buf_alloc
);
688 entry
->buf_size
= size
;
689 entry
->page_order
= page_order
;
693 while (entry
->buf_count
< count
) {
694 buf
= &entry
->buflist
[entry
->buf_count
];
695 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
696 buf
->total
= alignment
;
700 buf
->offset
= (dma
->byte_count
+ offset
);
701 buf
->bus_address
= agp_offset
+ offset
;
702 buf
->address
= (void *)(agp_offset
+ offset
);
706 buf
->file_priv
= NULL
;
708 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
709 buf
->dev_private
= kzalloc(buf
->dev_priv_size
, GFP_KERNEL
);
710 if (!buf
->dev_private
) {
711 /* Set count correctly so we free the proper amount. */
712 entry
->buf_count
= count
;
713 drm_cleanup_buf_error(dev
, entry
);
714 mutex_unlock(&dev
->struct_mutex
);
715 atomic_dec(&dev
->buf_alloc
);
719 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
723 byte_count
+= PAGE_SIZE
<< page_order
;
726 DRM_DEBUG("byte_count: %d\n", byte_count
);
728 temp_buflist
= krealloc(dma
->buflist
,
729 (dma
->buf_count
+ entry
->buf_count
) *
730 sizeof(*dma
->buflist
), GFP_KERNEL
);
732 /* Free the entry because it isn't valid */
733 drm_cleanup_buf_error(dev
, entry
);
734 mutex_unlock(&dev
->struct_mutex
);
735 atomic_dec(&dev
->buf_alloc
);
738 dma
->buflist
= temp_buflist
;
740 for (i
= 0; i
< entry
->buf_count
; i
++) {
741 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
744 dma
->buf_count
+= entry
->buf_count
;
745 dma
->seg_count
+= entry
->seg_count
;
746 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
747 dma
->byte_count
+= byte_count
;
749 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
750 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
752 mutex_unlock(&dev
->struct_mutex
);
754 request
->count
= entry
->buf_count
;
755 request
->size
= size
;
757 dma
->flags
= _DRM_DMA_USE_AGP
;
759 atomic_dec(&dev
->buf_alloc
);
762 EXPORT_SYMBOL(drm_addbufs_agp
);
763 #endif /* __OS_HAS_AGP */
765 int drm_addbufs_pci(struct drm_device
* dev
, struct drm_buf_desc
* request
)
767 struct drm_device_dma
*dma
= dev
->dma
;
773 struct drm_buf_entry
*entry
;
774 drm_dma_handle_t
*dmah
;
777 unsigned long offset
;
781 unsigned long *temp_pagelist
;
782 struct drm_buf
**temp_buflist
;
784 if (!drm_core_check_feature(dev
, DRIVER_PCI_DMA
))
790 if (!capable(CAP_SYS_ADMIN
))
793 count
= request
->count
;
794 order
= order_base_2(request
->size
);
797 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
798 request
->count
, request
->size
, size
, order
);
800 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
803 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
804 ? PAGE_ALIGN(size
) : size
;
805 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
806 total
= PAGE_SIZE
<< page_order
;
808 spin_lock(&dev
->count_lock
);
810 spin_unlock(&dev
->count_lock
);
813 atomic_inc(&dev
->buf_alloc
);
814 spin_unlock(&dev
->count_lock
);
816 mutex_lock(&dev
->struct_mutex
);
817 entry
= &dma
->bufs
[order
];
818 if (entry
->buf_count
) {
819 mutex_unlock(&dev
->struct_mutex
);
820 atomic_dec(&dev
->buf_alloc
);
821 return -ENOMEM
; /* May only call once for each order */
824 if (count
< 0 || count
> 4096) {
825 mutex_unlock(&dev
->struct_mutex
);
826 atomic_dec(&dev
->buf_alloc
);
830 entry
->buflist
= kzalloc(count
* sizeof(*entry
->buflist
), GFP_KERNEL
);
831 if (!entry
->buflist
) {
832 mutex_unlock(&dev
->struct_mutex
);
833 atomic_dec(&dev
->buf_alloc
);
837 entry
->seglist
= kzalloc(count
* sizeof(*entry
->seglist
), GFP_KERNEL
);
838 if (!entry
->seglist
) {
839 kfree(entry
->buflist
);
840 mutex_unlock(&dev
->struct_mutex
);
841 atomic_dec(&dev
->buf_alloc
);
845 /* Keep the original pagelist until we know all the allocations
848 temp_pagelist
= kmalloc((dma
->page_count
+ (count
<< page_order
)) *
849 sizeof(*dma
->pagelist
), GFP_KERNEL
);
850 if (!temp_pagelist
) {
851 kfree(entry
->buflist
);
852 kfree(entry
->seglist
);
853 mutex_unlock(&dev
->struct_mutex
);
854 atomic_dec(&dev
->buf_alloc
);
857 memcpy(temp_pagelist
,
858 dma
->pagelist
, dma
->page_count
* sizeof(*dma
->pagelist
));
859 DRM_DEBUG("pagelist: %d entries\n",
860 dma
->page_count
+ (count
<< page_order
));
862 entry
->buf_size
= size
;
863 entry
->page_order
= page_order
;
867 while (entry
->buf_count
< count
) {
869 dmah
= drm_pci_alloc(dev
, PAGE_SIZE
<< page_order
, 0x1000);
872 /* Set count correctly so we free the proper amount. */
873 entry
->buf_count
= count
;
874 entry
->seg_count
= count
;
875 drm_cleanup_buf_error(dev
, entry
);
876 kfree(temp_pagelist
);
877 mutex_unlock(&dev
->struct_mutex
);
878 atomic_dec(&dev
->buf_alloc
);
881 entry
->seglist
[entry
->seg_count
++] = dmah
;
882 for (i
= 0; i
< (1 << page_order
); i
++) {
883 DRM_DEBUG("page %d @ 0x%08lx\n",
884 dma
->page_count
+ page_count
,
885 (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
);
886 temp_pagelist
[dma
->page_count
+ page_count
++]
887 = (unsigned long)dmah
->vaddr
+ PAGE_SIZE
* i
;
890 offset
+ size
<= total
&& entry
->buf_count
< count
;
891 offset
+= alignment
, ++entry
->buf_count
) {
892 buf
= &entry
->buflist
[entry
->buf_count
];
893 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
894 buf
->total
= alignment
;
897 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
898 buf
->address
= (void *)(dmah
->vaddr
+ offset
);
899 buf
->bus_address
= dmah
->busaddr
+ offset
;
903 buf
->file_priv
= NULL
;
905 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
906 buf
->dev_private
= kzalloc(buf
->dev_priv_size
,
908 if (!buf
->dev_private
) {
909 /* Set count correctly so we free the proper amount. */
910 entry
->buf_count
= count
;
911 entry
->seg_count
= count
;
912 drm_cleanup_buf_error(dev
, entry
);
913 kfree(temp_pagelist
);
914 mutex_unlock(&dev
->struct_mutex
);
915 atomic_dec(&dev
->buf_alloc
);
919 DRM_DEBUG("buffer %d @ %p\n",
920 entry
->buf_count
, buf
->address
);
922 byte_count
+= PAGE_SIZE
<< page_order
;
925 temp_buflist
= krealloc(dma
->buflist
,
926 (dma
->buf_count
+ entry
->buf_count
) *
927 sizeof(*dma
->buflist
), GFP_KERNEL
);
929 /* Free the entry because it isn't valid */
930 drm_cleanup_buf_error(dev
, entry
);
931 kfree(temp_pagelist
);
932 mutex_unlock(&dev
->struct_mutex
);
933 atomic_dec(&dev
->buf_alloc
);
936 dma
->buflist
= temp_buflist
;
938 for (i
= 0; i
< entry
->buf_count
; i
++) {
939 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
942 /* No allocations failed, so now we can replace the original pagelist
945 if (dma
->page_count
) {
946 kfree(dma
->pagelist
);
948 dma
->pagelist
= temp_pagelist
;
950 dma
->buf_count
+= entry
->buf_count
;
951 dma
->seg_count
+= entry
->seg_count
;
952 dma
->page_count
+= entry
->seg_count
<< page_order
;
953 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
955 mutex_unlock(&dev
->struct_mutex
);
957 request
->count
= entry
->buf_count
;
958 request
->size
= size
;
960 if (request
->flags
& _DRM_PCI_BUFFER_RO
)
961 dma
->flags
= _DRM_DMA_USE_PCI_RO
;
963 atomic_dec(&dev
->buf_alloc
);
967 EXPORT_SYMBOL(drm_addbufs_pci
);
969 static int drm_addbufs_sg(struct drm_device
* dev
, struct drm_buf_desc
* request
)
971 struct drm_device_dma
*dma
= dev
->dma
;
972 struct drm_buf_entry
*entry
;
974 unsigned long offset
;
975 unsigned long agp_offset
;
984 struct drm_buf
**temp_buflist
;
986 if (!drm_core_check_feature(dev
, DRIVER_SG
))
992 if (!capable(CAP_SYS_ADMIN
))
995 count
= request
->count
;
996 order
= order_base_2(request
->size
);
999 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
1000 ? PAGE_ALIGN(size
) : size
;
1001 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
1002 total
= PAGE_SIZE
<< page_order
;
1005 agp_offset
= request
->agp_start
;
1007 DRM_DEBUG("count: %d\n", count
);
1008 DRM_DEBUG("order: %d\n", order
);
1009 DRM_DEBUG("size: %d\n", size
);
1010 DRM_DEBUG("agp_offset: %lu\n", agp_offset
);
1011 DRM_DEBUG("alignment: %d\n", alignment
);
1012 DRM_DEBUG("page_order: %d\n", page_order
);
1013 DRM_DEBUG("total: %d\n", total
);
1015 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1018 spin_lock(&dev
->count_lock
);
1020 spin_unlock(&dev
->count_lock
);
1023 atomic_inc(&dev
->buf_alloc
);
1024 spin_unlock(&dev
->count_lock
);
1026 mutex_lock(&dev
->struct_mutex
);
1027 entry
= &dma
->bufs
[order
];
1028 if (entry
->buf_count
) {
1029 mutex_unlock(&dev
->struct_mutex
);
1030 atomic_dec(&dev
->buf_alloc
);
1031 return -ENOMEM
; /* May only call once for each order */
1034 if (count
< 0 || count
> 4096) {
1035 mutex_unlock(&dev
->struct_mutex
);
1036 atomic_dec(&dev
->buf_alloc
);
1040 entry
->buflist
= kzalloc(count
* sizeof(*entry
->buflist
),
1042 if (!entry
->buflist
) {
1043 mutex_unlock(&dev
->struct_mutex
);
1044 atomic_dec(&dev
->buf_alloc
);
1048 entry
->buf_size
= size
;
1049 entry
->page_order
= page_order
;
1053 while (entry
->buf_count
< count
) {
1054 buf
= &entry
->buflist
[entry
->buf_count
];
1055 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
1056 buf
->total
= alignment
;
1060 buf
->offset
= (dma
->byte_count
+ offset
);
1061 buf
->bus_address
= agp_offset
+ offset
;
1062 buf
->address
= (void *)(agp_offset
+ offset
1063 + (unsigned long)dev
->sg
->virtual);
1067 buf
->file_priv
= NULL
;
1069 buf
->dev_priv_size
= dev
->driver
->dev_priv_size
;
1070 buf
->dev_private
= kzalloc(buf
->dev_priv_size
, GFP_KERNEL
);
1071 if (!buf
->dev_private
) {
1072 /* Set count correctly so we free the proper amount. */
1073 entry
->buf_count
= count
;
1074 drm_cleanup_buf_error(dev
, entry
);
1075 mutex_unlock(&dev
->struct_mutex
);
1076 atomic_dec(&dev
->buf_alloc
);
1080 DRM_DEBUG("buffer %d @ %p\n", entry
->buf_count
, buf
->address
);
1082 offset
+= alignment
;
1084 byte_count
+= PAGE_SIZE
<< page_order
;
1087 DRM_DEBUG("byte_count: %d\n", byte_count
);
1089 temp_buflist
= krealloc(dma
->buflist
,
1090 (dma
->buf_count
+ entry
->buf_count
) *
1091 sizeof(*dma
->buflist
), GFP_KERNEL
);
1092 if (!temp_buflist
) {
1093 /* Free the entry because it isn't valid */
1094 drm_cleanup_buf_error(dev
, entry
);
1095 mutex_unlock(&dev
->struct_mutex
);
1096 atomic_dec(&dev
->buf_alloc
);
1099 dma
->buflist
= temp_buflist
;
1101 for (i
= 0; i
< entry
->buf_count
; i
++) {
1102 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
1105 dma
->buf_count
+= entry
->buf_count
;
1106 dma
->seg_count
+= entry
->seg_count
;
1107 dma
->page_count
+= byte_count
>> PAGE_SHIFT
;
1108 dma
->byte_count
+= byte_count
;
1110 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
1111 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
1113 mutex_unlock(&dev
->struct_mutex
);
1115 request
->count
= entry
->buf_count
;
1116 request
->size
= size
;
1118 dma
->flags
= _DRM_DMA_USE_SG
;
1120 atomic_dec(&dev
->buf_alloc
);
1125 * Add buffers for DMA transfers (ioctl).
1127 * \param inode device inode.
1128 * \param file_priv DRM file private.
1129 * \param cmd command.
1130 * \param arg pointer to a struct drm_buf_desc request.
1131 * \return zero on success or a negative number on failure.
1133 * According with the memory type specified in drm_buf_desc::flags and the
1134 * build options, it dispatches the call either to addbufs_agp(),
1135 * addbufs_sg() or addbufs_pci() for AGP, scatter-gather or consistent
1136 * PCI memory respectively.
1138 int drm_addbufs(struct drm_device
*dev
, void *data
,
1139 struct drm_file
*file_priv
)
1141 struct drm_buf_desc
*request
= data
;
1144 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1147 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1151 if (request
->flags
& _DRM_AGP_BUFFER
)
1152 ret
= drm_addbufs_agp(dev
, request
);
1155 if (request
->flags
& _DRM_SG_BUFFER
)
1156 ret
= drm_addbufs_sg(dev
, request
);
1157 else if (request
->flags
& _DRM_FB_BUFFER
)
1160 ret
= drm_addbufs_pci(dev
, request
);
1166 * Get information about the buffer mappings.
1168 * This was originally mean for debugging purposes, or by a sophisticated
1169 * client library to determine how best to use the available buffers (e.g.,
1170 * large buffers can be used for image transfer).
1172 * \param inode device inode.
1173 * \param file_priv DRM file private.
1174 * \param cmd command.
1175 * \param arg pointer to a drm_buf_info structure.
1176 * \return zero on success or a negative number on failure.
1178 * Increments drm_device::buf_use while holding the drm_device::count_lock
1179 * lock, preventing of allocating more buffers after this call. Information
1180 * about each requested buffer is then copied into user space.
1182 int drm_infobufs(struct drm_device
*dev
, void *data
,
1183 struct drm_file
*file_priv
)
1185 struct drm_device_dma
*dma
= dev
->dma
;
1186 struct drm_buf_info
*request
= data
;
1190 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1193 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1199 spin_lock(&dev
->count_lock
);
1200 if (atomic_read(&dev
->buf_alloc
)) {
1201 spin_unlock(&dev
->count_lock
);
1204 ++dev
->buf_use
; /* Can't allocate more after this call */
1205 spin_unlock(&dev
->count_lock
);
1207 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1208 if (dma
->bufs
[i
].buf_count
)
1212 DRM_DEBUG("count = %d\n", count
);
1214 if (request
->count
>= count
) {
1215 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
1216 if (dma
->bufs
[i
].buf_count
) {
1217 struct drm_buf_desc __user
*to
=
1218 &request
->list
[count
];
1219 struct drm_buf_entry
*from
= &dma
->bufs
[i
];
1220 struct drm_freelist
*list
= &dma
->bufs
[i
].freelist
;
1221 if (copy_to_user(&to
->count
,
1223 sizeof(from
->buf_count
)) ||
1224 copy_to_user(&to
->size
,
1226 sizeof(from
->buf_size
)) ||
1227 copy_to_user(&to
->low_mark
,
1229 sizeof(list
->low_mark
)) ||
1230 copy_to_user(&to
->high_mark
,
1232 sizeof(list
->high_mark
)))
1235 DRM_DEBUG("%d %d %d %d %d\n",
1237 dma
->bufs
[i
].buf_count
,
1238 dma
->bufs
[i
].buf_size
,
1239 dma
->bufs
[i
].freelist
.low_mark
,
1240 dma
->bufs
[i
].freelist
.high_mark
);
1245 request
->count
= count
;
1251 * Specifies a low and high water mark for buffer allocation
1253 * \param inode device inode.
1254 * \param file_priv DRM file private.
1255 * \param cmd command.
1256 * \param arg a pointer to a drm_buf_desc structure.
1257 * \return zero on success or a negative number on failure.
1259 * Verifies that the size order is bounded between the admissible orders and
1260 * updates the respective drm_device_dma::bufs entry low and high water mark.
1262 * \note This ioctl is deprecated and mostly never used.
1264 int drm_markbufs(struct drm_device
*dev
, void *data
,
1265 struct drm_file
*file_priv
)
1267 struct drm_device_dma
*dma
= dev
->dma
;
1268 struct drm_buf_desc
*request
= data
;
1270 struct drm_buf_entry
*entry
;
1272 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1275 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1281 DRM_DEBUG("%d, %d, %d\n",
1282 request
->size
, request
->low_mark
, request
->high_mark
);
1283 order
= order_base_2(request
->size
);
1284 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
1286 entry
= &dma
->bufs
[order
];
1288 if (request
->low_mark
< 0 || request
->low_mark
> entry
->buf_count
)
1290 if (request
->high_mark
< 0 || request
->high_mark
> entry
->buf_count
)
1293 entry
->freelist
.low_mark
= request
->low_mark
;
1294 entry
->freelist
.high_mark
= request
->high_mark
;
1300 * Unreserve the buffers in list, previously reserved using drmDMA.
1302 * \param inode device inode.
1303 * \param file_priv DRM file private.
1304 * \param cmd command.
1305 * \param arg pointer to a drm_buf_free structure.
1306 * \return zero on success or a negative number on failure.
1308 * Calls free_buffer() for each used buffer.
1309 * This function is primarily used for debugging.
1311 int drm_freebufs(struct drm_device
*dev
, void *data
,
1312 struct drm_file
*file_priv
)
1314 struct drm_device_dma
*dma
= dev
->dma
;
1315 struct drm_buf_free
*request
= data
;
1318 struct drm_buf
*buf
;
1320 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1323 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1329 DRM_DEBUG("%d\n", request
->count
);
1330 for (i
= 0; i
< request
->count
; i
++) {
1331 if (copy_from_user(&idx
, &request
->list
[i
], sizeof(idx
)))
1333 if (idx
< 0 || idx
>= dma
->buf_count
) {
1334 DRM_ERROR("Index %d (of %d max)\n",
1335 idx
, dma
->buf_count
- 1);
1338 buf
= dma
->buflist
[idx
];
1339 if (buf
->file_priv
!= file_priv
) {
1340 DRM_ERROR("Process %d freeing buffer not owned\n",
1341 task_pid_nr(current
));
1344 drm_free_buffer(dev
, buf
);
1351 * Maps all of the DMA buffers into client-virtual space (ioctl).
1353 * \param inode device inode.
1354 * \param file_priv DRM file private.
1355 * \param cmd command.
1356 * \param arg pointer to a drm_buf_map structure.
1357 * \return zero on success or a negative number on failure.
1359 * Maps the AGP, SG or PCI buffer region with vm_mmap(), and copies information
1360 * about each buffer into user space. For PCI buffers, it calls vm_mmap() with
1361 * offset equal to 0, which drm_mmap() interpretes as PCI buffers and calls
1364 int drm_mapbufs(struct drm_device
*dev
, void *data
,
1365 struct drm_file
*file_priv
)
1367 struct drm_device_dma
*dma
= dev
->dma
;
1370 unsigned long virtual;
1371 unsigned long address
;
1372 struct drm_buf_map
*request
= data
;
1375 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1378 if (!drm_core_check_feature(dev
, DRIVER_HAVE_DMA
))
1384 spin_lock(&dev
->count_lock
);
1385 if (atomic_read(&dev
->buf_alloc
)) {
1386 spin_unlock(&dev
->count_lock
);
1389 dev
->buf_use
++; /* Can't allocate more after this call */
1390 spin_unlock(&dev
->count_lock
);
1392 if (request
->count
>= dma
->buf_count
) {
1393 if ((dev
->agp
&& (dma
->flags
& _DRM_DMA_USE_AGP
))
1394 || (drm_core_check_feature(dev
, DRIVER_SG
)
1395 && (dma
->flags
& _DRM_DMA_USE_SG
))) {
1396 struct drm_local_map
*map
= dev
->agp_buffer_map
;
1397 unsigned long token
= dev
->agp_buffer_token
;
1403 virtual = vm_mmap(file_priv
->filp
, 0, map
->size
,
1404 PROT_READ
| PROT_WRITE
,
1408 virtual = vm_mmap(file_priv
->filp
, 0, dma
->byte_count
,
1409 PROT_READ
| PROT_WRITE
,
1412 if (virtual > -1024UL) {
1414 retcode
= (signed long)virtual;
1417 request
->virtual = (void __user
*)virtual;
1419 for (i
= 0; i
< dma
->buf_count
; i
++) {
1420 if (copy_to_user(&request
->list
[i
].idx
,
1421 &dma
->buflist
[i
]->idx
,
1422 sizeof(request
->list
[0].idx
))) {
1426 if (copy_to_user(&request
->list
[i
].total
,
1427 &dma
->buflist
[i
]->total
,
1428 sizeof(request
->list
[0].total
))) {
1432 if (copy_to_user(&request
->list
[i
].used
,
1433 &zero
, sizeof(zero
))) {
1437 address
= virtual + dma
->buflist
[i
]->offset
; /* *** */
1438 if (copy_to_user(&request
->list
[i
].address
,
1439 &address
, sizeof(address
))) {
1446 request
->count
= dma
->buf_count
;
1447 DRM_DEBUG("%d buffers, retcode = %d\n", request
->count
, retcode
);
1452 int drm_dma_ioctl(struct drm_device
*dev
, void *data
,
1453 struct drm_file
*file_priv
)
1455 if (drm_core_check_feature(dev
, DRIVER_MODESET
))
1458 if (dev
->driver
->dma_ioctl
)
1459 return dev
->driver
->dma_ioctl(dev
, data
, file_priv
);
1464 struct drm_local_map
*drm_getsarea(struct drm_device
*dev
)
1466 struct drm_map_list
*entry
;
1468 list_for_each_entry(entry
, &dev
->maplist
, head
) {
1469 if (entry
->map
&& entry
->map
->type
== _DRM_SHM
&&
1470 (entry
->map
->flags
& _DRM_CONTAINS_LOCK
)) {
1476 EXPORT_SYMBOL(drm_getsarea
);