PM / sleep: Asynchronous threads for suspend_noirq
[linux/fpc-iii.git] / drivers / gpu / drm / drm_vm.c
blob24e045c4f53140270ebbb28f9a6be21209b704a7
1 /**
2 * \file drm_vm.c
3 * Memory mapping for DRM
5 * \author Rickard E. (Rik) Faith <faith@valinux.com>
6 * \author Gareth Hughes <gareth@valinux.com>
7 */
9 /*
10 * Created: Mon Jan 4 08:58:31 1999 by faith@valinux.com
12 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
13 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
14 * All Rights Reserved.
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of this software and associated documentation files (the "Software"),
18 * to deal in the Software without restriction, including without limitation
19 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
20 * and/or sell copies of the Software, and to permit persons to whom the
21 * Software is furnished to do so, subject to the following conditions:
23 * The above copyright notice and this permission notice (including the next
24 * paragraph) shall be included in all copies or substantial portions of the
25 * Software.
27 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
28 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
29 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
30 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
31 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
32 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
33 * OTHER DEALINGS IN THE SOFTWARE.
36 #include <drm/drmP.h>
37 #include <linux/export.h>
38 #if defined(__ia64__)
39 #include <linux/efi.h>
40 #include <linux/slab.h>
41 #endif
43 static void drm_vm_open(struct vm_area_struct *vma);
44 static void drm_vm_close(struct vm_area_struct *vma);
46 static pgprot_t drm_io_prot(struct drm_local_map *map,
47 struct vm_area_struct *vma)
49 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
51 #if defined(__i386__) || defined(__x86_64__)
52 if (map->type == _DRM_REGISTERS && !(map->flags & _DRM_WRITE_COMBINING))
53 tmp = pgprot_noncached(tmp);
54 else
55 tmp = pgprot_writecombine(tmp);
56 #elif defined(__powerpc__)
57 pgprot_val(tmp) |= _PAGE_NO_CACHE;
58 if (map->type == _DRM_REGISTERS)
59 pgprot_val(tmp) |= _PAGE_GUARDED;
60 #elif defined(__ia64__)
61 if (efi_range_is_wc(vma->vm_start, vma->vm_end -
62 vma->vm_start))
63 tmp = pgprot_writecombine(tmp);
64 else
65 tmp = pgprot_noncached(tmp);
66 #elif defined(__sparc__) || defined(__arm__) || defined(__mips__)
67 tmp = pgprot_noncached(tmp);
68 #endif
69 return tmp;
72 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
74 pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
76 #if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
77 tmp |= _PAGE_NO_CACHE;
78 #endif
79 return tmp;
82 /**
83 * \c fault method for AGP virtual memory.
85 * \param vma virtual memory area.
86 * \param address access address.
87 * \return pointer to the page structure.
89 * Find the right map and if it's AGP memory find the real physical page to
90 * map, get the page, increment the use count and return it.
92 #if __OS_HAS_AGP
93 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
95 struct drm_file *priv = vma->vm_file->private_data;
96 struct drm_device *dev = priv->minor->dev;
97 struct drm_local_map *map = NULL;
98 struct drm_map_list *r_list;
99 struct drm_hash_item *hash;
102 * Find the right map
104 if (!dev->agp)
105 goto vm_fault_error;
107 if (!dev->agp || !dev->agp->cant_use_aperture)
108 goto vm_fault_error;
110 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash))
111 goto vm_fault_error;
113 r_list = drm_hash_entry(hash, struct drm_map_list, hash);
114 map = r_list->map;
116 if (map && map->type == _DRM_AGP) {
118 * Using vm_pgoff as a selector forces us to use this unusual
119 * addressing scheme.
121 resource_size_t offset = (unsigned long)vmf->virtual_address -
122 vma->vm_start;
123 resource_size_t baddr = map->offset + offset;
124 struct drm_agp_mem *agpmem;
125 struct page *page;
127 #ifdef __alpha__
129 * Adjust to a bus-relative address
131 baddr -= dev->hose->mem_space->start;
132 #endif
135 * It's AGP memory - find the real physical page to map
137 list_for_each_entry(agpmem, &dev->agp->memory, head) {
138 if (agpmem->bound <= baddr &&
139 agpmem->bound + agpmem->pages * PAGE_SIZE > baddr)
140 break;
143 if (&agpmem->head == &dev->agp->memory)
144 goto vm_fault_error;
147 * Get the page, inc the use count, and return it
149 offset = (baddr - agpmem->bound) >> PAGE_SHIFT;
150 page = agpmem->memory->pages[offset];
151 get_page(page);
152 vmf->page = page;
154 DRM_DEBUG
155 ("baddr = 0x%llx page = 0x%p, offset = 0x%llx, count=%d\n",
156 (unsigned long long)baddr,
157 agpmem->memory->pages[offset],
158 (unsigned long long)offset,
159 page_count(page));
160 return 0;
162 vm_fault_error:
163 return VM_FAULT_SIGBUS; /* Disallow mremap */
165 #else /* __OS_HAS_AGP */
166 static int drm_do_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
168 return VM_FAULT_SIGBUS;
170 #endif /* __OS_HAS_AGP */
173 * \c nopage method for shared virtual memory.
175 * \param vma virtual memory area.
176 * \param address access address.
177 * \return pointer to the page structure.
179 * Get the mapping, find the real physical page to map, get the page, and
180 * return it.
182 static int drm_do_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
184 struct drm_local_map *map = vma->vm_private_data;
185 unsigned long offset;
186 unsigned long i;
187 struct page *page;
189 if (!map)
190 return VM_FAULT_SIGBUS; /* Nothing allocated */
192 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
193 i = (unsigned long)map->handle + offset;
194 page = vmalloc_to_page((void *)i);
195 if (!page)
196 return VM_FAULT_SIGBUS;
197 get_page(page);
198 vmf->page = page;
200 DRM_DEBUG("shm_fault 0x%lx\n", offset);
201 return 0;
205 * \c close method for shared virtual memory.
207 * \param vma virtual memory area.
209 * Deletes map information if we are the last
210 * person to close a mapping and it's not in the global maplist.
212 static void drm_vm_shm_close(struct vm_area_struct *vma)
214 struct drm_file *priv = vma->vm_file->private_data;
215 struct drm_device *dev = priv->minor->dev;
216 struct drm_vma_entry *pt, *temp;
217 struct drm_local_map *map;
218 struct drm_map_list *r_list;
219 int found_maps = 0;
221 DRM_DEBUG("0x%08lx,0x%08lx\n",
222 vma->vm_start, vma->vm_end - vma->vm_start);
224 map = vma->vm_private_data;
226 mutex_lock(&dev->struct_mutex);
227 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
228 if (pt->vma->vm_private_data == map)
229 found_maps++;
230 if (pt->vma == vma) {
231 list_del(&pt->head);
232 kfree(pt);
236 /* We were the only map that was found */
237 if (found_maps == 1 && map->flags & _DRM_REMOVABLE) {
238 /* Check to see if we are in the maplist, if we are not, then
239 * we delete this mappings information.
241 found_maps = 0;
242 list_for_each_entry(r_list, &dev->maplist, head) {
243 if (r_list->map == map)
244 found_maps++;
247 if (!found_maps) {
248 drm_dma_handle_t dmah;
250 switch (map->type) {
251 case _DRM_REGISTERS:
252 case _DRM_FRAME_BUFFER:
253 arch_phys_wc_del(map->mtrr);
254 iounmap(map->handle);
255 break;
256 case _DRM_SHM:
257 vfree(map->handle);
258 break;
259 case _DRM_AGP:
260 case _DRM_SCATTER_GATHER:
261 break;
262 case _DRM_CONSISTENT:
263 dmah.vaddr = map->handle;
264 dmah.busaddr = map->offset;
265 dmah.size = map->size;
266 __drm_pci_free(dev, &dmah);
267 break;
269 kfree(map);
272 mutex_unlock(&dev->struct_mutex);
276 * \c fault method for DMA virtual memory.
278 * \param vma virtual memory area.
279 * \param address access address.
280 * \return pointer to the page structure.
282 * Determine the page number from the page offset and get it from drm_device_dma::pagelist.
284 static int drm_do_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
286 struct drm_file *priv = vma->vm_file->private_data;
287 struct drm_device *dev = priv->minor->dev;
288 struct drm_device_dma *dma = dev->dma;
289 unsigned long offset;
290 unsigned long page_nr;
291 struct page *page;
293 if (!dma)
294 return VM_FAULT_SIGBUS; /* Error */
295 if (!dma->pagelist)
296 return VM_FAULT_SIGBUS; /* Nothing allocated */
298 offset = (unsigned long)vmf->virtual_address - vma->vm_start; /* vm_[pg]off[set] should be 0 */
299 page_nr = offset >> PAGE_SHIFT; /* page_nr could just be vmf->pgoff */
300 page = virt_to_page((void *)dma->pagelist[page_nr]);
302 get_page(page);
303 vmf->page = page;
305 DRM_DEBUG("dma_fault 0x%lx (page %lu)\n", offset, page_nr);
306 return 0;
310 * \c fault method for scatter-gather virtual memory.
312 * \param vma virtual memory area.
313 * \param address access address.
314 * \return pointer to the page structure.
316 * Determine the map offset from the page offset and get it from drm_sg_mem::pagelist.
318 static int drm_do_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
320 struct drm_local_map *map = vma->vm_private_data;
321 struct drm_file *priv = vma->vm_file->private_data;
322 struct drm_device *dev = priv->minor->dev;
323 struct drm_sg_mem *entry = dev->sg;
324 unsigned long offset;
325 unsigned long map_offset;
326 unsigned long page_offset;
327 struct page *page;
329 if (!entry)
330 return VM_FAULT_SIGBUS; /* Error */
331 if (!entry->pagelist)
332 return VM_FAULT_SIGBUS; /* Nothing allocated */
334 offset = (unsigned long)vmf->virtual_address - vma->vm_start;
335 map_offset = map->offset - (unsigned long)dev->sg->virtual;
336 page_offset = (offset >> PAGE_SHIFT) + (map_offset >> PAGE_SHIFT);
337 page = entry->pagelist[page_offset];
338 get_page(page);
339 vmf->page = page;
341 return 0;
344 static int drm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
346 return drm_do_vm_fault(vma, vmf);
349 static int drm_vm_shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
351 return drm_do_vm_shm_fault(vma, vmf);
354 static int drm_vm_dma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
356 return drm_do_vm_dma_fault(vma, vmf);
359 static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
361 return drm_do_vm_sg_fault(vma, vmf);
364 /** AGP virtual memory operations */
365 static const struct vm_operations_struct drm_vm_ops = {
366 .fault = drm_vm_fault,
367 .open = drm_vm_open,
368 .close = drm_vm_close,
371 /** Shared virtual memory operations */
372 static const struct vm_operations_struct drm_vm_shm_ops = {
373 .fault = drm_vm_shm_fault,
374 .open = drm_vm_open,
375 .close = drm_vm_shm_close,
378 /** DMA virtual memory operations */
379 static const struct vm_operations_struct drm_vm_dma_ops = {
380 .fault = drm_vm_dma_fault,
381 .open = drm_vm_open,
382 .close = drm_vm_close,
385 /** Scatter-gather virtual memory operations */
386 static const struct vm_operations_struct drm_vm_sg_ops = {
387 .fault = drm_vm_sg_fault,
388 .open = drm_vm_open,
389 .close = drm_vm_close,
393 * \c open method for shared virtual memory.
395 * \param vma virtual memory area.
397 * Create a new drm_vma_entry structure as the \p vma private data entry and
398 * add it to drm_device::vmalist.
400 void drm_vm_open_locked(struct drm_device *dev,
401 struct vm_area_struct *vma)
403 struct drm_vma_entry *vma_entry;
405 DRM_DEBUG("0x%08lx,0x%08lx\n",
406 vma->vm_start, vma->vm_end - vma->vm_start);
408 vma_entry = kmalloc(sizeof(*vma_entry), GFP_KERNEL);
409 if (vma_entry) {
410 vma_entry->vma = vma;
411 vma_entry->pid = current->pid;
412 list_add(&vma_entry->head, &dev->vmalist);
415 EXPORT_SYMBOL_GPL(drm_vm_open_locked);
417 static void drm_vm_open(struct vm_area_struct *vma)
419 struct drm_file *priv = vma->vm_file->private_data;
420 struct drm_device *dev = priv->minor->dev;
422 mutex_lock(&dev->struct_mutex);
423 drm_vm_open_locked(dev, vma);
424 mutex_unlock(&dev->struct_mutex);
427 void drm_vm_close_locked(struct drm_device *dev,
428 struct vm_area_struct *vma)
430 struct drm_vma_entry *pt, *temp;
432 DRM_DEBUG("0x%08lx,0x%08lx\n",
433 vma->vm_start, vma->vm_end - vma->vm_start);
435 list_for_each_entry_safe(pt, temp, &dev->vmalist, head) {
436 if (pt->vma == vma) {
437 list_del(&pt->head);
438 kfree(pt);
439 break;
445 * \c close method for all virtual memory types.
447 * \param vma virtual memory area.
449 * Search the \p vma private data entry in drm_device::vmalist, unlink it, and
450 * free it.
452 static void drm_vm_close(struct vm_area_struct *vma)
454 struct drm_file *priv = vma->vm_file->private_data;
455 struct drm_device *dev = priv->minor->dev;
457 mutex_lock(&dev->struct_mutex);
458 drm_vm_close_locked(dev, vma);
459 mutex_unlock(&dev->struct_mutex);
463 * mmap DMA memory.
465 * \param file_priv DRM file private.
466 * \param vma virtual memory area.
467 * \return zero on success or a negative number on failure.
469 * Sets the virtual memory area operations structure to vm_dma_ops, the file
470 * pointer, and calls vm_open().
472 static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
474 struct drm_file *priv = filp->private_data;
475 struct drm_device *dev;
476 struct drm_device_dma *dma;
477 unsigned long length = vma->vm_end - vma->vm_start;
479 dev = priv->minor->dev;
480 dma = dev->dma;
481 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
482 vma->vm_start, vma->vm_end, vma->vm_pgoff);
484 /* Length must match exact page count */
485 if (!dma || (length >> PAGE_SHIFT) != dma->page_count) {
486 return -EINVAL;
489 if (!capable(CAP_SYS_ADMIN) &&
490 (dma->flags & _DRM_DMA_USE_PCI_RO)) {
491 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
492 #if defined(__i386__) || defined(__x86_64__)
493 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
494 #else
495 /* Ye gads this is ugly. With more thought
496 we could move this up higher and use
497 `protection_map' instead. */
498 vma->vm_page_prot =
499 __pgprot(pte_val
500 (pte_wrprotect
501 (__pte(pgprot_val(vma->vm_page_prot)))));
502 #endif
505 vma->vm_ops = &drm_vm_dma_ops;
507 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
509 drm_vm_open_locked(dev, vma);
510 return 0;
513 static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
515 #ifdef __alpha__
516 return dev->hose->dense_mem_base;
517 #else
518 return 0;
519 #endif
523 * mmap DMA memory.
525 * \param file_priv DRM file private.
526 * \param vma virtual memory area.
527 * \return zero on success or a negative number on failure.
529 * If the virtual memory area has no offset associated with it then it's a DMA
530 * area, so calls mmap_dma(). Otherwise searches the map in drm_device::maplist,
531 * checks that the restricted flag is not set, sets the virtual memory operations
532 * according to the mapping type and remaps the pages. Finally sets the file
533 * pointer and calls vm_open().
535 int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
537 struct drm_file *priv = filp->private_data;
538 struct drm_device *dev = priv->minor->dev;
539 struct drm_local_map *map = NULL;
540 resource_size_t offset = 0;
541 struct drm_hash_item *hash;
543 DRM_DEBUG("start = 0x%lx, end = 0x%lx, page offset = 0x%lx\n",
544 vma->vm_start, vma->vm_end, vma->vm_pgoff);
546 if (!priv->authenticated)
547 return -EACCES;
549 /* We check for "dma". On Apple's UniNorth, it's valid to have
550 * the AGP mapped at physical address 0
551 * --BenH.
553 if (!vma->vm_pgoff
554 #if __OS_HAS_AGP
555 && (!dev->agp
556 || dev->agp->agp_info.device->vendor != PCI_VENDOR_ID_APPLE)
557 #endif
559 return drm_mmap_dma(filp, vma);
561 if (drm_ht_find_item(&dev->map_hash, vma->vm_pgoff, &hash)) {
562 DRM_ERROR("Could not find map\n");
563 return -EINVAL;
566 map = drm_hash_entry(hash, struct drm_map_list, hash)->map;
567 if (!map || ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN)))
568 return -EPERM;
570 /* Check for valid size. */
571 if (map->size < vma->vm_end - vma->vm_start)
572 return -EINVAL;
574 if (!capable(CAP_SYS_ADMIN) && (map->flags & _DRM_READ_ONLY)) {
575 vma->vm_flags &= ~(VM_WRITE | VM_MAYWRITE);
576 #if defined(__i386__) || defined(__x86_64__)
577 pgprot_val(vma->vm_page_prot) &= ~_PAGE_RW;
578 #else
579 /* Ye gads this is ugly. With more thought
580 we could move this up higher and use
581 `protection_map' instead. */
582 vma->vm_page_prot =
583 __pgprot(pte_val
584 (pte_wrprotect
585 (__pte(pgprot_val(vma->vm_page_prot)))));
586 #endif
589 switch (map->type) {
590 #if !defined(__arm__)
591 case _DRM_AGP:
592 if (dev->agp && dev->agp->cant_use_aperture) {
594 * On some platforms we can't talk to bus dma address from the CPU, so for
595 * memory of type DRM_AGP, we'll deal with sorting out the real physical
596 * pages and mappings in fault()
598 #if defined(__powerpc__)
599 pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE;
600 #endif
601 vma->vm_ops = &drm_vm_ops;
602 break;
604 /* fall through to _DRM_FRAME_BUFFER... */
605 #endif
606 case _DRM_FRAME_BUFFER:
607 case _DRM_REGISTERS:
608 offset = drm_core_get_reg_ofs(dev);
609 vma->vm_page_prot = drm_io_prot(map, vma);
610 if (io_remap_pfn_range(vma, vma->vm_start,
611 (map->offset + offset) >> PAGE_SHIFT,
612 vma->vm_end - vma->vm_start,
613 vma->vm_page_prot))
614 return -EAGAIN;
615 DRM_DEBUG(" Type = %d; start = 0x%lx, end = 0x%lx,"
616 " offset = 0x%llx\n",
617 map->type,
618 vma->vm_start, vma->vm_end, (unsigned long long)(map->offset + offset));
620 vma->vm_ops = &drm_vm_ops;
621 break;
622 case _DRM_CONSISTENT:
623 /* Consistent memory is really like shared memory. But
624 * it's allocated in a different way, so avoid fault */
625 if (remap_pfn_range(vma, vma->vm_start,
626 page_to_pfn(virt_to_page(map->handle)),
627 vma->vm_end - vma->vm_start, vma->vm_page_prot))
628 return -EAGAIN;
629 vma->vm_page_prot = drm_dma_prot(map->type, vma);
630 /* fall through to _DRM_SHM */
631 case _DRM_SHM:
632 vma->vm_ops = &drm_vm_shm_ops;
633 vma->vm_private_data = (void *)map;
634 break;
635 case _DRM_SCATTER_GATHER:
636 vma->vm_ops = &drm_vm_sg_ops;
637 vma->vm_private_data = (void *)map;
638 vma->vm_page_prot = drm_dma_prot(map->type, vma);
639 break;
640 default:
641 return -EINVAL; /* This should never happen. */
643 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
645 drm_vm_open_locked(dev, vma);
646 return 0;
649 int drm_mmap(struct file *filp, struct vm_area_struct *vma)
651 struct drm_file *priv = filp->private_data;
652 struct drm_device *dev = priv->minor->dev;
653 int ret;
655 if (drm_device_is_unplugged(dev))
656 return -ENODEV;
658 mutex_lock(&dev->struct_mutex);
659 ret = drm_mmap_locked(filp, vma);
660 mutex_unlock(&dev->struct_mutex);
662 return ret;
664 EXPORT_SYMBOL(drm_mmap);