2 * Copyright 1999, 2000 Precision Insight, Inc., Cedar Park, Texas.
3 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice (including the next
14 * paragraph) shall be included in all copies or substantial portions of the
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
21 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
22 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
23 * OTHER DEALINGS IN THE SOFTWARE.
26 * Rickard E. (Rik) Faith <faith@valinux.com>
27 * Gareth Hughes <gareth@valinux.com>
32 * Implementation of the ioctls for setup of DRM mappings and DMA buffers.
35 #if defined(__FreeBSD__)
36 #include "dev/pci/pcireg.h"
41 /* Allocation of PCI memory resources (framebuffer, registers, etc.) for
42 * drm_get_resource_*. Note that they are not RF_ACTIVE, so there's no virtual
43 * address for accessing them. Cleaned up at unload.
45 static int drm_alloc_resource(struct drm_device
*dev
, int resource
)
47 #if defined(__FreeBSD__)
48 if (resource
>= DRM_MAX_PCI_RESOURCE
) {
49 DRM_ERROR("Resource %d too large\n", resource
);
54 if (dev
->pcir
[resource
] != NULL
) {
59 dev
->pcirid
[resource
] = PCIR_BAR(resource
);
60 dev
->pcir
[resource
] = bus_alloc_resource_any(dev
->device
,
61 SYS_RES_MEMORY
, &dev
->pcirid
[resource
], RF_SHAREABLE
);
64 if (dev
->pcir
[resource
] == NULL
) {
65 DRM_ERROR("Couldn't find resource 0x%x\n", resource
);
68 #elif defined(__NetBSD__)
69 /* XXX This space _not_ intentionally left blank! */
75 unsigned long drm_get_resource_start(struct drm_device
*dev
,
76 unsigned int resource
)
78 if (drm_alloc_resource(dev
, resource
) != 0)
81 #if defined(__FreeBSD__)
82 return rman_get_start(dev
->pcir
[resource
]);
83 #elif defined(__NetBSD__)
84 return dev
->pci_map_data
[resource
].base
;
88 unsigned long drm_get_resource_len(struct drm_device
*dev
,
89 unsigned int resource
)
91 if (drm_alloc_resource(dev
, resource
) != 0)
94 #if defined(__FreeBSD__)
95 return rman_get_size(dev
->pcir
[resource
]);
96 #elif defined(__NetBSD__)
97 return dev
->pci_map_data
[resource
].size
;
101 int drm_addmap(struct drm_device
* dev
, unsigned long offset
,
103 enum drm_map_type type
, enum drm_map_flags flags
, drm_local_map_t
**map_ptr
)
105 drm_local_map_t
*map
;
107 /*drm_agp_mem_t *entry;
110 /* Only allow shared memory to be removable since we only keep enough
111 * book keeping information about shared memory to allow for removal
112 * when processes fork.
114 if ((flags
& _DRM_REMOVABLE
) && type
!= _DRM_SHM
) {
115 DRM_ERROR("Requested removable map for non-DRM_SHM\n");
118 if ((offset
& PAGE_MASK
) || (size
& PAGE_MASK
)) {
119 DRM_ERROR("offset/size not page aligned: 0x%lx/0x%lx\n",
123 if (offset
+ size
< offset
) {
124 DRM_ERROR("offset and size wrap around: 0x%lx/0x%lx\n",
129 DRM_DEBUG("offset = 0x%08lx, size = 0x%08lx, type = %d\n", offset
,
132 /* Check if this is just another version of a kernel-allocated map, and
133 * just hand that back if so.
135 if (type
== _DRM_REGISTERS
|| type
== _DRM_FRAME_BUFFER
||
137 TAILQ_FOREACH(map
, &dev
->maplist
, link
) {
138 if (map
->type
== type
&& (map
->offset
== offset
||
139 (map
->type
== _DRM_SHM
&&
140 map
->flags
== _DRM_CONTAINS_LOCK
))) {
142 DRM_DEBUG("Found kernel map %d\n", type
);
149 /* Allocate a new map structure, fill it in, and do any type-specific
150 * initialization necessary.
152 map
= malloc(sizeof(*map
), DRM_MEM_MAPS
, M_ZERO
| M_NOWAIT
);
158 map
->offset
= offset
;
169 map
->handle
= drm_ioremap(dev
, map
);
170 if (!(map
->flags
& _DRM_WRITE_COMBINING
))
173 case _DRM_FRAME_BUFFER
:
174 if (drm_mtrr_add(map
->offset
, map
->size
, DRM_MTRR_WC
) == 0)
178 map
->handle
= malloc(map
->size
, DRM_MEM_MAPS
, M_NOWAIT
);
179 DRM_DEBUG("%lu %d %p\n",
180 map
->size
, drm_order(map
->size
), map
->handle
);
182 free(map
, DRM_MEM_MAPS
);
186 map
->offset
= (unsigned long)map
->handle
;
187 if (map
->flags
& _DRM_CONTAINS_LOCK
) {
188 /* Prevent a 2nd X Server from creating a 2nd lock */
190 if (dev
->lock
.hw_lock
!= NULL
) {
192 free(map
->handle
, DRM_MEM_MAPS
);
193 free(map
, DRM_MEM_MAPS
);
196 dev
->lock
.hw_lock
= map
->handle
; /* Pointer to lock */
202 /* In some cases (i810 driver), user space may have already
203 * added the AGP base itself, because dev->agp->base previously
204 * only got set during AGP enable. So, only add the base
205 * address if the map's offset isn't already within the
208 if (map
->offset
< dev
->agp
->base
||
209 map
->offset
> dev
->agp
->base
+
210 dev
->agp
->info
.ai_aperture_size
- 1) {
211 map
->offset
+= dev
->agp
->base
;
213 map
->mtrr
= dev
->agp
->mtrr
; /* for getmap */
214 /*for (entry = dev->agp->memory; entry; entry = entry->next) {
215 if ((map->offset >= entry->bound) &&
216 (map->offset + map->size <=
217 entry->bound + entry->pages * PAGE_SIZE)) {
223 free(map, DRM_MEM_MAPS);
228 case _DRM_SCATTER_GATHER
:
230 free(map
, DRM_MEM_MAPS
);
234 map
->offset
+= dev
->sg
->handle
;
236 case _DRM_CONSISTENT
:
237 /* Unfortunately, we don't get any alignment specification from
238 * the caller, so we have to guess. drm_pci_alloc requires
239 * a power-of-two alignment, so try to align the bus address of
240 * the map to it size if possible, otherwise just assume
241 * PAGE_SIZE alignment.
244 if ((align
& (align
- 1)) != 0)
246 map
->dmah
= drm_pci_alloc(dev
, map
->size
, align
, 0xfffffffful
);
247 if (map
->dmah
== NULL
) {
248 free(map
, DRM_MEM_MAPS
);
252 map
->handle
= map
->dmah
->vaddr
;
253 map
->offset
= map
->dmah
->busaddr
;
256 DRM_ERROR("Bad map type %d\n", map
->type
);
257 free(map
, DRM_MEM_MAPS
);
263 TAILQ_INSERT_TAIL(&dev
->maplist
, map
, link
);
266 /* Jumped to, with lock held, when a kernel map is found. */
268 DRM_DEBUG("Added map %d 0x%lx/0x%lx\n", map
->type
, map
->offset
,
276 int drm_addmap_ioctl(struct drm_device
*dev
, void *data
,
277 struct drm_file
*file_priv
)
279 struct drm_map
*request
= data
;
280 drm_local_map_t
*map
;
283 if (!(dev
->flags
& (FREAD
|FWRITE
)))
284 return EACCES
; /* Require read/write */
286 if (!DRM_SUSER(DRM_CURPROC
) && request
->type
!= _DRM_AGP
)
290 err
= drm_addmap(dev
, request
->offset
, request
->size
, request
->type
,
291 request
->flags
, &map
);
296 request
->offset
= map
->offset
;
297 request
->size
= map
->size
;
298 request
->type
= map
->type
;
299 request
->flags
= map
->flags
;
300 request
->mtrr
= map
->mtrr
;
301 request
->handle
= map
->handle
;
303 if (request
->type
!= _DRM_SHM
) {
304 request
->handle
= (void *)request
->offset
;
310 void drm_rmmap(struct drm_device
*dev
, drm_local_map_t
*map
)
312 DRM_SPINLOCK_ASSERT(&dev
->dev_lock
);
314 TAILQ_REMOVE(&dev
->maplist
, map
, link
);
318 #if defined(__FreeBSD__)
319 if (map
->bsr
== NULL
)
321 drm_ioremapfree(map
);
323 case _DRM_FRAME_BUFFER
:
325 int __unused retcode
;
326 retcode
= drm_mtrr_del(0, map
->offset
, map
->size
,
328 DRM_DEBUG("mtrr_del = %d\n", retcode
);
332 free(map
->handle
, DRM_MEM_MAPS
);
335 case _DRM_SCATTER_GATHER
:
337 case _DRM_CONSISTENT
:
338 drm_pci_free(dev
, map
->dmah
);
341 DRM_ERROR("Bad map type %d\n", map
->type
);
345 #if defined(__FreeBSD__)
346 if (map
->bsr
!= NULL
) {
347 bus_release_resource(dev
->device
, SYS_RES_MEMORY
, map
->rid
,
352 free(map
, DRM_MEM_MAPS
);
355 /* Remove a map private from list and deallocate resources if the mapping
359 int drm_rmmap_ioctl(struct drm_device
*dev
, void *data
,
360 struct drm_file
*file_priv
)
362 drm_local_map_t
*map
;
363 struct drm_map
*request
= data
;
366 TAILQ_FOREACH(map
, &dev
->maplist
, link
) {
367 if (map
->handle
== request
->handle
&&
368 map
->flags
& _DRM_REMOVABLE
)
372 /* No match found. */
386 static void drm_cleanup_buf_error(struct drm_device
*dev
,
387 drm_buf_entry_t
*entry
)
391 if (entry
->seg_count
) {
392 for (i
= 0; i
< entry
->seg_count
; i
++) {
393 drm_pci_free(dev
, entry
->seglist
[i
]);
395 free(entry
->seglist
, DRM_MEM_SEGS
);
397 entry
->seg_count
= 0;
400 if (entry
->buf_count
) {
401 for (i
= 0; i
< entry
->buf_count
; i
++) {
402 free(entry
->buflist
[i
].dev_private
, DRM_MEM_BUFS
);
404 free(entry
->buflist
, DRM_MEM_BUFS
);
406 entry
->buf_count
= 0;
410 static int drm_do_addbufs_agp(struct drm_device
*dev
, struct drm_buf_desc
*request
)
412 drm_device_dma_t
*dma
= dev
->dma
;
413 drm_buf_entry_t
*entry
;
414 /*drm_agp_mem_t *agp_entry;
417 unsigned long offset
;
418 unsigned long agp_offset
;
427 drm_buf_t
**temp_buflist
;
429 count
= request
->count
;
430 order
= drm_order(request
->size
);
433 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
434 ? round_page(size
) : size
;
435 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
436 total
= PAGE_SIZE
<< page_order
;
439 agp_offset
= dev
->agp
->base
+ request
->agp_start
;
441 DRM_DEBUG("count: %d\n", count
);
442 DRM_DEBUG("order: %d\n", order
);
443 DRM_DEBUG("size: %d\n", size
);
444 DRM_DEBUG("agp_offset: 0x%lx\n", agp_offset
);
445 DRM_DEBUG("alignment: %d\n", alignment
);
446 DRM_DEBUG("page_order: %d\n", page_order
);
447 DRM_DEBUG("total: %d\n", total
);
449 /* Make sure buffers are located in AGP memory that we own */
450 /* Breaks MGA due to drm_alloc_agp not setting up entries for the
451 * memory. Safe to ignore for now because these ioctls are still
455 for (agp_entry = dev->agp->memory; agp_entry;
456 agp_entry = agp_entry->next) {
457 if ((agp_offset >= agp_entry->bound) &&
458 (agp_offset + total * count <=
459 agp_entry->bound + agp_entry->pages * PAGE_SIZE)) {
465 DRM_DEBUG("zone invalid\n");
469 entry
= &dma
->bufs
[order
];
471 entry
->buflist
= malloc(count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
,
473 if (!entry
->buflist
) {
477 entry
->buf_size
= size
;
478 entry
->page_order
= page_order
;
482 while (entry
->buf_count
< count
) {
483 buf
= &entry
->buflist
[entry
->buf_count
];
484 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
485 buf
->total
= alignment
;
489 buf
->offset
= (dma
->byte_count
+ offset
);
490 buf
->bus_address
= agp_offset
+ offset
;
491 buf
->address
= (void *)(agp_offset
+ offset
);
494 buf
->file_priv
= NULL
;
496 buf
->dev_priv_size
= dev
->driver
->buf_priv_size
;
497 buf
->dev_private
= malloc(buf
->dev_priv_size
, DRM_MEM_BUFS
,
499 if (buf
->dev_private
== NULL
) {
500 /* Set count correctly so we free the proper amount. */
501 entry
->buf_count
= count
;
502 drm_cleanup_buf_error(dev
, entry
);
508 byte_count
+= PAGE_SIZE
<< page_order
;
511 DRM_DEBUG("byte_count: %d\n", byte_count
);
513 temp_buflist
= realloc(dma
->buflist
,
514 (dma
->buf_count
+ entry
->buf_count
) * sizeof(*dma
->buflist
),
515 DRM_MEM_BUFS
, M_NOWAIT
);
516 if (temp_buflist
== NULL
) {
517 /* Free the entry because it isn't valid */
518 drm_cleanup_buf_error(dev
, entry
);
521 dma
->buflist
= temp_buflist
;
523 for (i
= 0; i
< entry
->buf_count
; i
++) {
524 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
527 dma
->buf_count
+= entry
->buf_count
;
528 dma
->byte_count
+= byte_count
;
530 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
531 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
533 request
->count
= entry
->buf_count
;
534 request
->size
= size
;
536 dma
->flags
= _DRM_DMA_USE_AGP
;
541 static int drm_do_addbufs_pci(struct drm_device
*dev
, struct drm_buf_desc
*request
)
543 drm_device_dma_t
*dma
= dev
->dma
;
549 drm_buf_entry_t
*entry
;
552 unsigned long offset
;
556 unsigned long *temp_pagelist
;
557 drm_buf_t
**temp_buflist
;
559 count
= request
->count
;
560 order
= drm_order(request
->size
);
563 DRM_DEBUG("count=%d, size=%d (%d), order=%d\n",
564 request
->count
, request
->size
, size
, order
);
566 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
567 ? round_page(size
) : size
;
568 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
569 total
= PAGE_SIZE
<< page_order
;
571 entry
= &dma
->bufs
[order
];
573 entry
->buflist
= malloc(count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
,
575 entry
->seglist
= malloc(count
* sizeof(*entry
->seglist
), DRM_MEM_SEGS
,
578 /* Keep the original pagelist until we know all the allocations
581 temp_pagelist
= malloc((dma
->page_count
+ (count
<< page_order
)) *
582 sizeof(*dma
->pagelist
), DRM_MEM_PAGES
, M_NOWAIT
);
584 if (entry
->buflist
== NULL
|| entry
->seglist
== NULL
||
585 temp_pagelist
== NULL
) {
587 free(temp_pagelist
, DRM_MEM_PAGES
);
589 free(entry
->seglist
, DRM_MEM_SEGS
);
591 free(entry
->buflist
, DRM_MEM_BUFS
);
595 memcpy(temp_pagelist
, dma
->pagelist
, dma
->page_count
*
596 sizeof(*dma
->pagelist
));
598 DRM_DEBUG("pagelist: %d entries\n",
599 dma
->page_count
+ (count
<< page_order
));
601 entry
->buf_size
= size
;
602 entry
->page_order
= page_order
;
606 while (entry
->buf_count
< count
) {
607 DRM_SPINUNLOCK(&dev
->dma_lock
);
608 drm_dma_handle_t
*dmah
= drm_pci_alloc(dev
, size
, alignment
,
610 DRM_SPINLOCK(&dev
->dma_lock
);
612 /* Set count correctly so we free the proper amount. */
613 entry
->buf_count
= count
;
614 entry
->seg_count
= count
;
615 drm_cleanup_buf_error(dev
, entry
);
616 free(temp_pagelist
, DRM_MEM_PAGES
);
620 entry
->seglist
[entry
->seg_count
++] = dmah
;
621 for (i
= 0; i
< (1 << page_order
); i
++) {
622 DRM_DEBUG("page %d @ %p\n",
623 dma
->page_count
+ page_count
,
624 (char *)dmah
->vaddr
+ PAGE_SIZE
* i
);
625 temp_pagelist
[dma
->page_count
+ page_count
++] =
626 (long)dmah
->vaddr
+ PAGE_SIZE
* i
;
629 offset
+ size
<= total
&& entry
->buf_count
< count
;
630 offset
+= alignment
, ++entry
->buf_count
) {
631 buf
= &entry
->buflist
[entry
->buf_count
];
632 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
633 buf
->total
= alignment
;
636 buf
->offset
= (dma
->byte_count
+ byte_count
+ offset
);
637 buf
->address
= ((char *)dmah
->vaddr
+ offset
);
638 buf
->bus_address
= dmah
->busaddr
+ offset
;
641 buf
->file_priv
= NULL
;
643 buf
->dev_priv_size
= dev
->driver
->buf_priv_size
;
644 buf
->dev_private
= malloc(buf
->dev_priv_size
,
645 DRM_MEM_BUFS
, M_NOWAIT
| M_ZERO
);
646 if (buf
->dev_private
== NULL
) {
647 /* Set count correctly so we free the proper amount. */
648 entry
->buf_count
= count
;
649 entry
->seg_count
= count
;
650 drm_cleanup_buf_error(dev
, entry
);
651 free(temp_pagelist
, DRM_MEM_PAGES
);
655 DRM_DEBUG("buffer %d @ %p\n",
656 entry
->buf_count
, buf
->address
);
658 byte_count
+= PAGE_SIZE
<< page_order
;
661 temp_buflist
= realloc(dma
->buflist
,
662 (dma
->buf_count
+ entry
->buf_count
) * sizeof(*dma
->buflist
),
663 DRM_MEM_BUFS
, M_NOWAIT
);
664 if (temp_buflist
== NULL
) {
665 /* Free the entry because it isn't valid */
666 drm_cleanup_buf_error(dev
, entry
);
667 free(temp_pagelist
, DRM_MEM_PAGES
);
670 dma
->buflist
= temp_buflist
;
672 for (i
= 0; i
< entry
->buf_count
; i
++) {
673 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
676 /* No allocations failed, so now we can replace the orginal pagelist
679 free(dma
->pagelist
, DRM_MEM_PAGES
);
680 dma
->pagelist
= temp_pagelist
;
682 dma
->buf_count
+= entry
->buf_count
;
683 dma
->seg_count
+= entry
->seg_count
;
684 dma
->page_count
+= entry
->seg_count
<< page_order
;
685 dma
->byte_count
+= PAGE_SIZE
* (entry
->seg_count
<< page_order
);
687 request
->count
= entry
->buf_count
;
688 request
->size
= size
;
694 static int drm_do_addbufs_sg(struct drm_device
*dev
, struct drm_buf_desc
*request
)
696 drm_device_dma_t
*dma
= dev
->dma
;
697 drm_buf_entry_t
*entry
;
699 unsigned long offset
;
700 unsigned long agp_offset
;
709 drm_buf_t
**temp_buflist
;
711 count
= request
->count
;
712 order
= drm_order(request
->size
);
715 alignment
= (request
->flags
& _DRM_PAGE_ALIGN
)
716 ? round_page(size
) : size
;
717 page_order
= order
- PAGE_SHIFT
> 0 ? order
- PAGE_SHIFT
: 0;
718 total
= PAGE_SIZE
<< page_order
;
721 agp_offset
= request
->agp_start
;
723 DRM_DEBUG("count: %d\n", count
);
724 DRM_DEBUG("order: %d\n", order
);
725 DRM_DEBUG("size: %d\n", size
);
726 DRM_DEBUG("agp_offset: %ld\n", agp_offset
);
727 DRM_DEBUG("alignment: %d\n", alignment
);
728 DRM_DEBUG("page_order: %d\n", page_order
);
729 DRM_DEBUG("total: %d\n", total
);
731 entry
= &dma
->bufs
[order
];
733 entry
->buflist
= malloc(count
* sizeof(*entry
->buflist
), DRM_MEM_BUFS
,
735 if (entry
->buflist
== NULL
)
738 entry
->buf_size
= size
;
739 entry
->page_order
= page_order
;
743 while (entry
->buf_count
< count
) {
744 buf
= &entry
->buflist
[entry
->buf_count
];
745 buf
->idx
= dma
->buf_count
+ entry
->buf_count
;
746 buf
->total
= alignment
;
750 buf
->offset
= (dma
->byte_count
+ offset
);
751 buf
->bus_address
= agp_offset
+ offset
;
752 buf
->address
= (void *)(agp_offset
+ offset
+ dev
->sg
->handle
);
755 buf
->file_priv
= NULL
;
757 buf
->dev_priv_size
= dev
->driver
->buf_priv_size
;
758 buf
->dev_private
= malloc(buf
->dev_priv_size
, DRM_MEM_BUFS
,
760 if (buf
->dev_private
== NULL
) {
761 /* Set count correctly so we free the proper amount. */
762 entry
->buf_count
= count
;
763 drm_cleanup_buf_error(dev
, entry
);
767 DRM_DEBUG("buffer %d @ %p\n",
768 entry
->buf_count
, buf
->address
);
772 byte_count
+= PAGE_SIZE
<< page_order
;
775 DRM_DEBUG("byte_count: %d\n", byte_count
);
777 temp_buflist
= realloc(dma
->buflist
,
778 (dma
->buf_count
+ entry
->buf_count
) * sizeof(*dma
->buflist
),
779 DRM_MEM_BUFS
, M_NOWAIT
);
780 if (temp_buflist
== NULL
) {
781 /* Free the entry because it isn't valid */
782 drm_cleanup_buf_error(dev
, entry
);
785 dma
->buflist
= temp_buflist
;
787 for (i
= 0; i
< entry
->buf_count
; i
++) {
788 dma
->buflist
[i
+ dma
->buf_count
] = &entry
->buflist
[i
];
791 dma
->buf_count
+= entry
->buf_count
;
792 dma
->byte_count
+= byte_count
;
794 DRM_DEBUG("dma->buf_count : %d\n", dma
->buf_count
);
795 DRM_DEBUG("entry->buf_count : %d\n", entry
->buf_count
);
797 request
->count
= entry
->buf_count
;
798 request
->size
= size
;
800 dma
->flags
= _DRM_DMA_USE_SG
;
805 int drm_addbufs_agp(struct drm_device
*dev
, struct drm_buf_desc
*request
)
809 if (request
->count
< 0 || request
->count
> 4096)
812 order
= drm_order(request
->size
);
813 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
816 DRM_SPINLOCK(&dev
->dma_lock
);
818 /* No more allocations after first buffer-using ioctl. */
819 if (dev
->buf_use
!= 0) {
820 DRM_SPINUNLOCK(&dev
->dma_lock
);
823 /* No more than one allocation per order */
824 if (dev
->dma
->bufs
[order
].buf_count
!= 0) {
825 DRM_SPINUNLOCK(&dev
->dma_lock
);
829 ret
= drm_do_addbufs_agp(dev
, request
);
831 DRM_SPINUNLOCK(&dev
->dma_lock
);
836 int drm_addbufs_sg(struct drm_device
*dev
, struct drm_buf_desc
*request
)
840 if (!DRM_SUSER(DRM_CURPROC
))
843 if (request
->count
< 0 || request
->count
> 4096)
846 order
= drm_order(request
->size
);
847 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
850 DRM_SPINLOCK(&dev
->dma_lock
);
852 /* No more allocations after first buffer-using ioctl. */
853 if (dev
->buf_use
!= 0) {
854 DRM_SPINUNLOCK(&dev
->dma_lock
);
857 /* No more than one allocation per order */
858 if (dev
->dma
->bufs
[order
].buf_count
!= 0) {
859 DRM_SPINUNLOCK(&dev
->dma_lock
);
863 ret
= drm_do_addbufs_sg(dev
, request
);
865 DRM_SPINUNLOCK(&dev
->dma_lock
);
870 int drm_addbufs_pci(struct drm_device
*dev
, struct drm_buf_desc
*request
)
874 if (!DRM_SUSER(DRM_CURPROC
))
877 if (request
->count
< 0 || request
->count
> 4096)
880 order
= drm_order(request
->size
);
881 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
)
884 DRM_SPINLOCK(&dev
->dma_lock
);
886 /* No more allocations after first buffer-using ioctl. */
887 if (dev
->buf_use
!= 0) {
888 DRM_SPINUNLOCK(&dev
->dma_lock
);
891 /* No more than one allocation per order */
892 if (dev
->dma
->bufs
[order
].buf_count
!= 0) {
893 DRM_SPINUNLOCK(&dev
->dma_lock
);
897 ret
= drm_do_addbufs_pci(dev
, request
);
899 DRM_SPINUNLOCK(&dev
->dma_lock
);
904 int drm_addbufs(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
906 struct drm_buf_desc
*request
= data
;
909 if (request
->flags
& _DRM_AGP_BUFFER
)
910 err
= drm_addbufs_agp(dev
, request
);
911 else if (request
->flags
& _DRM_SG_BUFFER
)
912 err
= drm_addbufs_sg(dev
, request
);
914 err
= drm_addbufs_pci(dev
, request
);
919 int drm_infobufs(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
921 drm_device_dma_t
*dma
= dev
->dma
;
922 struct drm_buf_info
*request
= data
;
927 DRM_SPINLOCK(&dev
->dma_lock
);
928 ++dev
->buf_use
; /* Can't allocate more after this call */
929 DRM_SPINUNLOCK(&dev
->dma_lock
);
931 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
932 if (dma
->bufs
[i
].buf_count
)
936 DRM_DEBUG("count = %d\n", count
);
938 if (request
->count
>= count
) {
939 for (i
= 0, count
= 0; i
< DRM_MAX_ORDER
+ 1; i
++) {
940 if (dma
->bufs
[i
].buf_count
) {
941 struct drm_buf_desc from
;
943 from
.count
= dma
->bufs
[i
].buf_count
;
944 from
.size
= dma
->bufs
[i
].buf_size
;
945 from
.low_mark
= dma
->bufs
[i
].freelist
.low_mark
;
946 from
.high_mark
= dma
->bufs
[i
].freelist
.high_mark
;
948 if (DRM_COPY_TO_USER(&request
->list
[count
], &from
,
949 sizeof(struct drm_buf_desc
)) != 0) {
954 DRM_DEBUG("%d %d %d %d %d\n",
955 i
, dma
->bufs
[i
].buf_count
,
956 dma
->bufs
[i
].buf_size
,
957 dma
->bufs
[i
].freelist
.low_mark
,
958 dma
->bufs
[i
].freelist
.high_mark
);
963 request
->count
= count
;
968 int drm_markbufs(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
970 drm_device_dma_t
*dma
= dev
->dma
;
971 struct drm_buf_desc
*request
= data
;
974 DRM_DEBUG("%d, %d, %d\n",
975 request
->size
, request
->low_mark
, request
->high_mark
);
978 order
= drm_order(request
->size
);
979 if (order
< DRM_MIN_ORDER
|| order
> DRM_MAX_ORDER
||
980 request
->low_mark
< 0 || request
->high_mark
< 0) {
984 DRM_SPINLOCK(&dev
->dma_lock
);
985 if (request
->low_mark
> dma
->bufs
[order
].buf_count
||
986 request
->high_mark
> dma
->bufs
[order
].buf_count
) {
987 DRM_SPINUNLOCK(&dev
->dma_lock
);
991 dma
->bufs
[order
].freelist
.low_mark
= request
->low_mark
;
992 dma
->bufs
[order
].freelist
.high_mark
= request
->high_mark
;
993 DRM_SPINUNLOCK(&dev
->dma_lock
);
998 int drm_freebufs(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
1000 drm_device_dma_t
*dma
= dev
->dma
;
1001 struct drm_buf_free
*request
= data
;
1007 DRM_DEBUG("%d\n", request
->count
);
1009 DRM_SPINLOCK(&dev
->dma_lock
);
1010 for (i
= 0; i
< request
->count
; i
++) {
1011 if (DRM_COPY_FROM_USER(&idx
, &request
->list
[i
], sizeof(idx
))) {
1015 if (idx
< 0 || idx
>= dma
->buf_count
) {
1016 DRM_ERROR("Index %d (of %d max)\n",
1017 idx
, dma
->buf_count
- 1);
1021 buf
= dma
->buflist
[idx
];
1022 if (buf
->file_priv
!= file_priv
) {
1023 DRM_ERROR("Process %d freeing buffer not owned\n",
1028 drm_free_buffer(dev
, buf
);
1030 DRM_SPINUNLOCK(&dev
->dma_lock
);
1035 int drm_mapbufs(struct drm_device
*dev
, void *data
, struct drm_file
*file_priv
)
1037 drm_device_dma_t
*dma
= dev
->dma
;
1040 vm_offset_t address
;
1041 struct vmspace
*vms
;
1042 #if defined(__FreeBSD__)
1046 #elif defined(__NetBSD__)
1049 vsize_t size
, rsize
;
1052 struct drm_buf_map
*request
= data
;
1055 #if defined(__NetBSD__)
1056 if (!vfinddev(dev
->kdev
, VCHR
, &vn
))
1057 return 0; /* FIXME: Shouldn't this be EINVAL or something? */
1058 #endif /* __NetBSD__ || __OpenBSD */
1060 #if defined(__FreeBSD__)
1061 vms
= DRM_CURPROC
->td_proc
->p_vmspace
;
1062 #elif defined(__NetBSD__)
1063 vms
= DRM_CURPROC
->p_vmspace
;
1066 DRM_SPINLOCK(&dev
->dma_lock
);
1067 dev
->buf_use
++; /* Can't allocate more after this call */
1068 DRM_SPINUNLOCK(&dev
->dma_lock
);
1070 if (request
->count
< dma
->buf_count
)
1073 if ((drm_core_has_AGP(dev
) && (dma
->flags
& _DRM_DMA_USE_AGP
)) ||
1074 (drm_core_check_feature(dev
, DRIVER_SG
) &&
1075 (dma
->flags
& _DRM_DMA_USE_SG
))) {
1076 drm_local_map_t
*map
= dev
->agp_buffer_map
;
1082 size
= round_page(map
->size
);
1085 size
= round_page(dma
->byte_count
),
1089 #if defined(__FreeBSD__)
1090 vaddr
= round_page((vm_offset_t
)vms
->vm_daddr
+ MAXDSIZ
);
1091 #if __FreeBSD_version >= 600023
1092 retcode
= vm_mmap(&vms
->vm_map
, &vaddr
, size
, PROT_READ
| PROT_WRITE
,
1093 VM_PROT_ALL
, MAP_SHARED
| MAP_NOSYNC
, OBJT_DEVICE
, dev
->devnode
, foff
);
1095 retcode
= vm_mmap(&vms
->vm_map
, &vaddr
, size
, PROT_READ
| PROT_WRITE
,
1096 VM_PROT_ALL
, MAP_SHARED
| MAP_NOSYNC
, SLIST_FIRST(&dev
->devnode
->si_hlist
),
1099 #elif defined(__NetBSD__)
1101 vaddr
= curlwp
->l_proc
->p_emul
->e_vm_default_addr(curlwp
->l_proc
,
1102 (vaddr_t
)vms
->vm_daddr
, size
);
1103 rsize
= round_page(size
);
1104 DRM_DEBUG("mmap %#lx/%#lx foff %#llx\n", vaddr
, rsize
, (long long)foff
);
1105 retcode
= uvm_mmap(&vms
->vm_map
, &vaddr
, rsize
,
1106 UVM_PROT_READ
| UVM_PROT_WRITE
, UVM_PROT_ALL
, MAP_SHARED
,
1107 &vn
->v_uobj
, foff
, curproc
->p_rlimit
[RLIMIT_MEMLOCK
].rlim_cur
);
1112 request
->virtual = (void *)vaddr
;
1114 for (i
= 0; i
< dma
->buf_count
; i
++) {
1115 if (DRM_COPY_TO_USER(&request
->list
[i
].idx
,
1116 &dma
->buflist
[i
]->idx
, sizeof(request
->list
[0].idx
))) {
1120 if (DRM_COPY_TO_USER(&request
->list
[i
].total
,
1121 &dma
->buflist
[i
]->total
, sizeof(request
->list
[0].total
))) {
1125 if (DRM_COPY_TO_USER(&request
->list
[i
].used
, &zero
,
1130 address
= vaddr
+ dma
->buflist
[i
]->offset
; /* *** */
1131 if (DRM_COPY_TO_USER(&request
->list
[i
].address
, &address
,
1139 request
->count
= dma
->buf_count
;
1141 DRM_DEBUG("%d buffers, retcode = %d\n", request
->count
, retcode
);
1147 * Compute order. Can be made faster.
1149 int drm_order(unsigned long size
)
1151 #if defined(__FreeBSD__)
1157 order
= flsl(size
) - 1;
1158 if (size
& ~(1ul << order
))
1162 #elif defined(__NetBSD__)
1166 for ( order
= 0, tmp
= size
; tmp
>>= 1 ; ++order
);
1168 if ( size
& ~(1 << order
) )