Linux 2.6.20.7
[linux/fpc-iii.git] / drivers / char / drm / drm_vm.c
blobb9cfc077f6bc845633328aef6965b292fe83871f
1 /**
2 * \file drm_vm.c
3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
9 /*
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include "drmP.h"
37 #if defined(__ia64__)
38 #include <linux/efi.h>
39 #endif
41 static void drm_vm_open(struct vm_area_struct *vma);
42 static void drm_vm_close(struct vm_area_struct *vma);
44 /**
45 * \c nopage method for AGP virtual memory.
47 * \param vma virtual memory area.
48 * \param address access address.
49 * \return pointer to the page structure.
51 * Find the right map and if it's AGP memory find the real physical page to
52 * map, get the page, increment the use count and return it.
54 #if __OS_HAS_AGP
55 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
56 unsigned long address)
58 drm_file_t *priv = vma->vm_file->private_data;
59 drm_device_t *dev = priv->head->dev;
60 drm_map_t *map = NULL;
61 drm_map_list_t *r_list;
62 drm_hash_item_t *hash;
65 * Find the right map
67 if (!drm_core_has_AGP(dev))
68 goto vm_nopage_error;
70 if (!dev->agp || !dev->agp->cant_use_aperture)
71 goto vm_nopage_error;
73 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff << PAGE_SHIFT, &hash))
74 goto vm_nopage_error;
76 r_list = drm_hash_entry(hash, drm_map_list_t, hash);
77 map = r_list->map;
79 if (map && map->type == _DRM_AGP) {
80 unsigned long offset = address - vma->vm_start;
81 unsigned long baddr = map->offset + offset;
82 struct drm_agp_mem *agpmem;
83 struct page *page;
85 #ifdef __alpha__
87 * Adjust to a bus-relative address
89 baddr -= dev->hose->mem_space->start;
90 #endif
93 * It's AGP memory - find the real physical page to map
95 for (agpmem = dev->agp->memory; agpmem; agpmem = agpmem->next) {
96 if (agpmem->bound <= baddr &&
97 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
98 break;
101 if (!agpmem)
102 goto vm_nopage_error;
105 * Get the page, inc the use count, and return it
107 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
108 page = virt_to_page(__va(agpmem->memory->memory[offset]));
109 get_page(page);
111 DRM_DEBUG
112 ("baddr = 0x%lx page = 0x%p, offset = 0x%lx, count=%d\n",
113 baddr, __va(agpmem->memory->memory[offset]), offset,
114 page_count(page));
116 return page;
118 vm_nopage_error:
119 return NOPAGE_SIGBUS; /* Disallow mremap */
121 #else /* __OS_HAS_AGP */
122 static __inline__ struct page *drm_do_vm_nopage(struct vm_area_struct *vma,
123 unsigned long address)
125 return NOPAGE_SIGBUS;
127 #endif /* __OS_HAS_AGP */
130 * \c nopage method for shared virtual memory.
132 * \param vma virtual memory area.
133 * \param address access address.
134 * \return pointer to the page structure.
136 * Get the the mapping, find the real physical page to map, get the page, and
137 * return it.
139 static __inline__ struct page *drm_do_vm_shm_nopage(struct vm_area_struct *vma,
140 unsigned long address)
142 drm_map_t *map = (drm_map_t *) vma->vm_private_data;
143 unsigned long offset;
144 unsigned long i;
145 struct page *page;
147 if (address > vma->vm_end)
148 return NOPAGE_SIGBUS; /* Disallow mremap */
149 if (!map)
150 return NOPAGE_SIGBUS; /* Nothing allocated */
152 offset = address - vma->vm_start;
153 i = (unsigned long)map->handle + offset;
154 page = (map->type == _DRM_CONSISTENT) ?
155 virt_to_page((void *)i) : vmalloc_to_page((void *)i);
156 if (!page)
157 return NOPAGE_SIGBUS;
158 get_page(page);
160 DRM_DEBUG("shm_nopage 0x%lx\n", address);
161 return page;
165 * \c close method for shared virtual memory.
167 * \param vma virtual memory area.
169 * Deletes map information if we are the last
170 * person to close a mapping and it's not in the global maplist.
172 static void drm_vm_shm_close(struct vm_area_struct *vma)
174 drm_file_t *priv = vma->vm_file->private_data;
175 drm_device_t *dev = priv->head->dev;
176 drm_vma_entry_t *pt, *prev, *next;
177 drm_map_t *map;
178 drm_map_list_t *r_list;
179 struct list_head *list;
180 int found_maps = 0;
182 DRM_DEBUG("0x%08lx,0x%08lx\n",
183 vma->vm_start, vma->vm_end - vma->vm_start);
184 atomic_dec(&dev->vma_count);
186 map = vma->vm_private_data;
188 mutex_lock(&dev->struct_mutex);
189 for (pt = dev->vmalist, prev = NULL; pt; pt = next) {
190 next = pt->next;
191 if (pt->vma->vm_private_data == map)
192 found_maps++;
193 if (pt->vma == vma) {
194 if (prev) {
195 prev->next = pt->next;
196 } else {
197 dev->vmalist = pt->next;
199 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
200 } else {
201 prev = pt;
204 /* We were the only map that was found */
205 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
206 /* Check to see if we are in the maplist, if we are not, then
207 * we delete this mappings information.
209 found_maps = 0;
210 list = &dev->maplist->head;
211 list_for_each(list, &dev->maplist->head) {
212 r_list = list_entry(list, drm_map_list_t, head);
213 if (r_list->map == map)
214 found_maps++;
217 if (!found_maps) {
218 drm_dma_handle_t dmah;
220 switch (map->type) {
221 case _DRM_REGISTERS:
222 case _DRM_FRAME_BUFFER:
223 if (drm_core_has_MTRR(dev) && map->mtrr >= 0) {
224 int retcode;
225 retcode = mtrr_del(map->mtrr,
226 map->offset,
227 map->size);
228 DRM_DEBUG("mtrr_del = %d\n", retcode);
230 drm_ioremapfree(map->handle, map->size, dev);
231 break;
232 case _DRM_SHM:
233 vfree(map->handle);
234 break;
235 case _DRM_AGP:
236 case _DRM_SCATTER_GATHER:
237 break;
238 case _DRM_CONSISTENT:
239 dmah.vaddr = map->handle;
240 dmah.busaddr = map->offset;
241 dmah.size = map->size;
242 __drm_pci_free(dev, &dmah);
243 break;
245 drm_free(map, sizeof(*map), DRM_MEM_MAPS);
248 mutex_unlock(&dev->struct_mutex);
252 * \c nopage method for DMA virtual memory.
254 * \param vma virtual memory area.
255 * \param address access address.
256 * \return pointer to the page structure.
258 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
260 static __inline__ struct page *drm_do_vm_dma_nopage(struct vm_area_struct *vma,
261 unsigned long address)
263 drm_file_t *priv = vma->vm_file->private_data;
264 drm_device_t *dev = priv->head->dev;
265 drm_device_dma_t *dma = dev->dma;
266 unsigned long offset;
267 unsigned long page_nr;
268 struct page *page;
270 if (!dma)
271 return NOPAGE_SIGBUS; /* Error */
272 if (address > vma->vm_end)
273 return NOPAGE_SIGBUS; /* Disallow mremap */
274 if (!dma->pagelist)
275 return NOPAGE_SIGBUS; /* Nothing allocated */
277 offset = address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
278 page_nr = offset >> PAGE_SHIFT;
279 page = virt_to_page((dma->pagelist[page_nr] + (offset & (~PAGE_MASK))));
281 get_page(page);
283 DRM_DEBUG("dma_nopage 0x%lx (page %lu)\n", address, page_nr);
284 return page;
288 * \c nopage method for scatter-gather virtual memory.
290 * \param vma virtual memory area.
291 * \param address access address.
292 * \return pointer to the page structure.
294 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
296 static __inline__ struct page *drm_do_vm_sg_nopage(struct vm_area_struct *vma,
297 unsigned long address)
299 drm_map_t *map = (drm_map_t *) vma->vm_private_data;
300 drm_file_t *priv = vma->vm_file->private_data;
301 drm_device_t *dev = priv->head->dev;
302 drm_sg_mem_t *entry = dev->sg;
303 unsigned long offset;
304 unsigned long map_offset;
305 unsigned long page_offset;
306 struct page *page;
308 if (!entry)
309 return NOPAGE_SIGBUS; /* Error */
310 if (address > vma->vm_end)
311 return NOPAGE_SIGBUS; /* Disallow mremap */
312 if (!entry->pagelist)
313 return NOPAGE_SIGBUS; /* Nothing allocated */
315 offset = address - vma->vm_start;
316 map_offset = map->offset - (unsigned long)dev->sg->virtual;
317 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
318 page = entry->pagelist[page_offset];
319 get_page(page);
321 return page;
324 static struct page *drm_vm_nopage(struct vm_area_struct *vma,
325 unsigned long address, int *type)
327 if (type)
328 *type = VM_FAULT_MINOR;
329 return drm_do_vm_nopage(vma, address);
332 static struct page *drm_vm_shm_nopage(struct vm_area_struct *vma,
333 unsigned long address, int *type)
335 if (type)
336 *type = VM_FAULT_MINOR;
337 return drm_do_vm_shm_nopage(vma, address);
340 static struct page *drm_vm_dma_nopage(struct vm_area_struct *vma,
341 unsigned long address, int *type)
343 if (type)
344 *type = VM_FAULT_MINOR;
345 return drm_do_vm_dma_nopage(vma, address);
348 static struct page *drm_vm_sg_nopage(struct vm_area_struct *vma,
349 unsigned long address, int *type)
351 if (type)
352 *type = VM_FAULT_MINOR;
353 return drm_do_vm_sg_nopage(vma, address);
356 /** AGP virtual memory operations */
357 static struct vm_operations_struct drm_vm_ops = {
358 .nopage = drm_vm_nopage,
359 .open = drm_vm_open,
360 .close = drm_vm_close,
363 /** Shared virtual memory operations */
364 static struct vm_operations_struct drm_vm_shm_ops = {
365 .nopage = drm_vm_shm_nopage,
366 .open = drm_vm_open,
367 .close = drm_vm_shm_close,
370 /** DMA virtual memory operations */
371 static struct vm_operations_struct drm_vm_dma_ops = {
372 .nopage = drm_vm_dma_nopage,
373 .open = drm_vm_open,
374 .close = drm_vm_close,
377 /** Scatter-gather virtual memory operations */
378 static struct vm_operations_struct drm_vm_sg_ops = {
379 .nopage = drm_vm_sg_nopage,
380 .open = drm_vm_open,
381 .close = drm_vm_close,
385 * \c open method for shared virtual memory.
387 * \param vma virtual memory area.
389 * Create a new drm_vma_entry structure as the \p vma private data entry and
390 * add it to drm_device::vmalist.
392 static void drm_vm_open(struct vm_area_struct *vma)
394 drm_file_t *priv = vma->vm_file->private_data;
395 drm_device_t *dev = priv->head->dev;
396 drm_vma_entry_t *vma_entry;
398 DRM_DEBUG("0x%08lx,0x%08lx\n",
399 vma->vm_start, vma->vm_end - vma->vm_start);
400 atomic_inc(&dev->vma_count);
402 vma_entry = drm_alloc(sizeof(*vma_entry), DRM_MEM_VMAS);
403 if (vma_entry) {
404 mutex_lock(&dev->struct_mutex);
405 vma_entry->vma = vma;
406 vma_entry->next = dev->vmalist;
407 vma_entry->pid = current->pid;
408 dev->vmalist = vma_entry;
409 mutex_unlock(&dev->struct_mutex);
414 * \c close method for all virtual memory types.
416 * \param vma virtual memory area.
418 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
419 * free it.
421 static void drm_vm_close(struct vm_area_struct *vma)
423 drm_file_t *priv = vma->vm_file->private_data;
424 drm_device_t *dev = priv->head->dev;
425 drm_vma_entry_t *pt, *prev;
427 DRM_DEBUG("0x%08lx,0x%08lx\n",
428 vma->vm_start, vma->vm_end - vma->vm_start);
429 atomic_dec(&dev->vma_count);
431 mutex_lock(&dev->struct_mutex);
432 for (pt = dev->vmalist, prev = NULL; pt; prev = pt, pt = pt->next) {
433 if (pt->vma == vma) {
434 if (prev) {
435 prev->next = pt->next;
436 } else {
437 dev->vmalist = pt->next;
439 drm_free(pt, sizeof(*pt), DRM_MEM_VMAS);
440 break;
443 mutex_unlock(&dev->struct_mutex);
447 * mmap DMA memory.
449 * \param filp file pointer.
450 * \param vma virtual memory area.
451 * \return zero on success or a negative number on failure.
453 * Sets the virtual memory area operations structure to vm_dma_ops, the file
454 * pointer, and calls vm_open().
456 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
458 drm_file_t *priv = filp->private_data;
459 drm_device_t *dev;
460 drm_device_dma_t *dma;
461 unsigned long length = vma->vm_end - vma->vm_start;
463 lock_kernel();
464 dev = priv->head->dev;
465 dma = dev->dma;
466 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
467 vma->vm_start, vma->vm_end, vma->vm_pgoff << PAGE_SHIFT);
469 /* Length must match exact page count */
470 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
471 unlock_kernel();
472 return -EINVAL;
474 unlock_kernel();
476 if (!capable(CAP_SYS_ADMIN) &&
477 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
478 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
479 #if defined(__i386__) || defined(__x86_64__)
480 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
481 #else
482 /* Ye gads this is ugly. With more thought
483 we could move this up higher and use
484 `protection_map' instead. */
485 vma->vm_page_prot =
486 __pgprot(pte_val
487 (pte_wrprotect
488 (__pte(pgprot_val(vma->vm_page_prot)))));
489 #endif
492 vma->vm_ops = &drm_vm_dma_ops;
494 vma->vm_flags |= VM_RESERVED; /* Don't swap */
496 vma->vm_file = filp; /* Needed for drm_vm_open() */
497 drm_vm_open(vma);
498 return 0;
501 unsigned long drm_core_get_map_ofs(drm_map_t * map)
503 return map->offset;
506 EXPORT_SYMBOL(drm_core_get_map_ofs);
508 unsigned long drm_core_get_reg_ofs(struct drm_device *dev)
510 #ifdef __alpha__
511 return dev->hose->dense_mem_base - dev->hose->mem_space->start;
512 #else
513 return 0;
514 #endif
517 EXPORT_SYMBOL(drm_core_get_reg_ofs);
520 * mmap DMA memory.
522 * \param filp file pointer.
523 * \param vma virtual memory area.
524 * \return zero on success or a negative number on failure.
526 * If the virtual memory area has no offset associated with it then it's a DMA
527 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
528 * checks that the restricted flag is not set, sets the virtual memory operations
529 * according to the mapping type and remaps the pages. Finally sets the file
530 * pointer and calls vm_open().
532 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
534 drm_file_t *priv = filp->private_data;
535 drm_device_t *dev = priv->head->dev;
536 drm_map_t *map = NULL;
537 unsigned long offset = 0;
538 drm_hash_item_t *hash;
540 DRM_DEBUG("start = 0x%lx, end = 0x%lx, offset = 0x%lx\n",
541 vma->vm_start, vma->vm_end, vma->vm_pgoff << PAGE_SHIFT);
543 if (!priv->authenticated)
544 return -EACCES;
546 /* We check for "dma". On Apple's UniNorth, it's valid to have
547 * the AGP mapped at physical address 0
548 * --BenH.
550 if (!(vma->vm_pgoff << PAGE_SHIFT)
551 #if __OS_HAS_AGP
552 && (!dev->agp
553 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
554 #endif
556 return drm_mmap_dma(filp, vma);
558 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff << PAGE_SHIFT, &hash)) {
559 DRM_ERROR("Could not find map\n");
560 return -EINVAL;
563 map = drm_hash_entry(hash, drm_map_list_t, hash)->map;
564 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
565 return -EPERM;
567 /* Check for valid size. */
568 if (map->size != vma->vm_end - vma->vm_start)
569 return -EINVAL;
571 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
572 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
573 #if defined(__i386__) || defined(__x86_64__)
574 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
575 #else
576 /* Ye gads this is ugly. With more thought
577 we could move this up higher and use
578 `protection_map' instead. */
579 vma->vm_page_prot =
580 __pgprot(pte_val
581 (pte_wrprotect
582 (__pte(pgprot_val(vma->vm_page_prot)))));
583 #endif
586 switch (map->type) {
587 case _DRM_AGP:
588 if (drm_core_has_AGP(dev) && dev->agp->cant_use_aperture) {
590 * On some platforms we can't talk to bus dma address from the CPU, so for
591 * memory of type DRM_AGP, we'll deal with sorting out the real physical
592 * pages and mappings in nopage()
594 #if defined(__powerpc__)
595 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
596 #endif
597 vma->vm_ops = &drm_vm_ops;
598 break;
600 /* fall through to _DRM_FRAME_BUFFER... */
601 case _DRM_FRAME_BUFFER:
602 case _DRM_REGISTERS:
603 #if defined(__i386__) || defined(__x86_64__)
604 if (boot_cpu_data.x86 > 3 && map->type != _DRM_AGP) {
605 pgprot_val(vma->vm_page_prot) |= _PAGE_PCD;
606 pgprot_val(vma->vm_page_prot) &= ~_PAGE_PWT;
608 #elif defined(__powerpc__)
609 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
610 if (map->type == _DRM_REGISTERS)
611 pgprot_val(vma->vm_page_prot) |= _PAGE_GUARDED;
612 #endif
613 vma->vm_flags |= VM_IO; /* not in core dump */
614 #if defined(__ia64__)
615 if (efi_range_is_wc(vma->vm_start, vma->vm_end - vma->vm_start))
616 vma->vm_page_prot =
617 pgprot_writecombine(vma->vm_page_prot);
618 else
619 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
620 #endif
621 offset = dev->driver->get_reg_ofs(dev);
622 #ifdef __sparc__
623 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
624 if (io_remap_pfn_range(vma, vma->vm_start,
625 (map->offset + offset) >> PAGE_SHIFT,
626 vma->vm_end - vma->vm_start,
627 vma->vm_page_prot))
628 #else
629 if (io_remap_pfn_range(vma, vma->vm_start,
630 (map->offset + offset) >> PAGE_SHIFT,
631 vma->vm_end - vma->vm_start,
632 vma->vm_page_prot))
633 #endif
634 return -EAGAIN;
635 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
636 " offset = 0x%lx\n",
637 map->type,
638 vma->vm_start, vma->vm_end, map->offset + offset);
639 vma->vm_ops = &drm_vm_ops;
640 break;
641 case _DRM_SHM:
642 case _DRM_CONSISTENT:
643 /* Consistent memory is really like shared memory. It's only
644 * allocate in a different way */
645 vma->vm_ops = &drm_vm_shm_ops;
646 vma->vm_private_data = (void *)map;
647 /* Don't let this area swap. Change when
648 DRM_KERNEL advisory is supported. */
649 vma->vm_flags |= VM_RESERVED;
650 break;
651 case _DRM_SCATTER_GATHER:
652 vma->vm_ops = &drm_vm_sg_ops;
653 vma->vm_private_data = (void *)map;
654 vma->vm_flags |= VM_RESERVED;
655 break;
656 default:
657 return -EINVAL; /* This should never happen. */
659 vma->vm_flags |= VM_RESERVED; /* Don't swap */
661 vma->vm_file = filp; /* Needed for drm_vm_open() */
662 drm_vm_open(vma);
663 return 0;
666 EXPORT_SYMBOL(drm_mmap);