x86/xen: resume timer irqs early
[linux/fpc-iii.git] / drivers / gpu / drm / drm_gem.c
blobda0c0080ac1747ac37cbaf2e00dd9042a3660048
1 /*
2 * Copyright © 2008 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <drm/drmP.h>
40 #include <drm/drm_vma_manager.h>
42 /** @file drm_gem.c
44 * This file provides some of the base ioctls and library routines for
45 * the graphics memory manager implemented by each device driver.
47 * Because various devices have different requirements in terms of
48 * synchronization and migration strategies, implementing that is left up to
49 * the driver, and all that the general API provides should be generic --
50 * allocating objects, reading/writing data with the cpu, freeing objects.
51 * Even there, platform-dependent optimizations for reading/writing data with
52 * the CPU mean we'll likely hook those out to driver-specific calls. However,
53 * the DRI2 implementation wants to have at least allocate/mmap be generic.
55 * The goal was to have swap-backed object allocation managed through
56 * struct file. However, file descriptors as handles to a struct file have
57 * two major failings:
58 * - Process limits prevent more than 1024 or so being used at a time by
59 * default.
60 * - Inability to allocate high fds will aggravate the X Server's select()
61 * handling, and likely that of many GL client applications as well.
63 * This led to a plan of using our own integer IDs (called handles, following
64 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
65 * ioctls. The objects themselves will still include the struct file so
66 * that we can transition to fds if the required kernel infrastructure shows
67 * up at a later date, and as our interface with shmfs for memory allocation.
71 * We make up offsets for buffer objects so we can recognize them at
72 * mmap time.
75 /* pgoff in mmap is an unsigned long, so we need to make sure that
76 * the faked up offset will fit
79 #if BITS_PER_LONG == 64
80 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
81 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
82 #else
83 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFUL >> PAGE_SHIFT) + 1)
84 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFUL >> PAGE_SHIFT) * 16)
85 #endif
87 /**
88 * Initialize the GEM device fields
91 int
92 drm_gem_init(struct drm_device *dev)
94 struct drm_gem_mm *mm;
96 mutex_init(&dev->object_name_lock);
97 idr_init(&dev->object_name_idr);
99 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
100 if (!mm) {
101 DRM_ERROR("out of memory\n");
102 return -ENOMEM;
105 dev->mm_private = mm;
106 drm_vma_offset_manager_init(&mm->vma_manager,
107 DRM_FILE_PAGE_OFFSET_START,
108 DRM_FILE_PAGE_OFFSET_SIZE);
110 return 0;
113 void
114 drm_gem_destroy(struct drm_device *dev)
116 struct drm_gem_mm *mm = dev->mm_private;
118 drm_vma_offset_manager_destroy(&mm->vma_manager);
119 kfree(mm);
120 dev->mm_private = NULL;
124 * Initialize an already allocated GEM object of the specified size with
125 * shmfs backing store.
127 int drm_gem_object_init(struct drm_device *dev,
128 struct drm_gem_object *obj, size_t size)
130 struct file *filp;
132 drm_gem_private_object_init(dev, obj, size);
134 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
135 if (IS_ERR(filp))
136 return PTR_ERR(filp);
138 obj->filp = filp;
140 return 0;
142 EXPORT_SYMBOL(drm_gem_object_init);
145 * Initialize an already allocated GEM object of the specified size with
146 * no GEM provided backing store. Instead the caller is responsible for
147 * backing the object and handling it.
149 void drm_gem_private_object_init(struct drm_device *dev,
150 struct drm_gem_object *obj, size_t size)
152 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
154 obj->dev = dev;
155 obj->filp = NULL;
157 kref_init(&obj->refcount);
158 obj->handle_count = 0;
159 obj->size = size;
160 drm_vma_node_reset(&obj->vma_node);
162 EXPORT_SYMBOL(drm_gem_private_object_init);
165 * Allocate a GEM object of the specified size with shmfs backing store
167 struct drm_gem_object *
168 drm_gem_object_alloc(struct drm_device *dev, size_t size)
170 struct drm_gem_object *obj;
172 obj = kzalloc(sizeof(*obj), GFP_KERNEL);
173 if (!obj)
174 goto free;
176 if (drm_gem_object_init(dev, obj, size) != 0)
177 goto free;
179 if (dev->driver->gem_init_object != NULL &&
180 dev->driver->gem_init_object(obj) != 0) {
181 goto fput;
183 return obj;
184 fput:
185 /* Object_init mangles the global counters - readjust them. */
186 fput(obj->filp);
187 free:
188 kfree(obj);
189 return NULL;
191 EXPORT_SYMBOL(drm_gem_object_alloc);
193 static void
194 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
197 * Note: obj->dma_buf can't disappear as long as we still hold a
198 * handle reference in obj->handle_count.
200 mutex_lock(&filp->prime.lock);
201 if (obj->dma_buf) {
202 drm_prime_remove_buf_handle_locked(&filp->prime,
203 obj->dma_buf);
205 mutex_unlock(&filp->prime.lock);
208 static void drm_gem_object_ref_bug(struct kref *list_kref)
210 BUG();
214 * Called after the last handle to the object has been closed
216 * Removes any name for the object. Note that this must be
217 * called before drm_gem_object_free or we'll be touching
218 * freed memory
220 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
222 struct drm_device *dev = obj->dev;
224 /* Remove any name for this object */
225 if (obj->name) {
226 idr_remove(&dev->object_name_idr, obj->name);
227 obj->name = 0;
229 * The object name held a reference to this object, drop
230 * that now.
232 * This cannot be the last reference, since the handle holds one too.
234 kref_put(&obj->refcount, drm_gem_object_ref_bug);
238 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
240 /* Unbreak the reference cycle if we have an exported dma_buf. */
241 if (obj->dma_buf) {
242 dma_buf_put(obj->dma_buf);
243 obj->dma_buf = NULL;
247 static void
248 drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj)
250 if (WARN_ON(obj->handle_count == 0))
251 return;
254 * Must bump handle count first as this may be the last
255 * ref, in which case the object would disappear before we
256 * checked for a name
259 mutex_lock(&obj->dev->object_name_lock);
260 if (--obj->handle_count == 0) {
261 drm_gem_object_handle_free(obj);
262 drm_gem_object_exported_dma_buf_free(obj);
264 mutex_unlock(&obj->dev->object_name_lock);
266 drm_gem_object_unreference_unlocked(obj);
270 * Removes the mapping from handle to filp for this object.
273 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
275 struct drm_device *dev;
276 struct drm_gem_object *obj;
278 /* This is gross. The idr system doesn't let us try a delete and
279 * return an error code. It just spews if you fail at deleting.
280 * So, we have to grab a lock around finding the object and then
281 * doing the delete on it and dropping the refcount, or the user
282 * could race us to double-decrement the refcount and cause a
283 * use-after-free later. Given the frequency of our handle lookups,
284 * we may want to use ida for number allocation and a hash table
285 * for the pointers, anyway.
287 spin_lock(&filp->table_lock);
289 /* Check if we currently have a reference on the object */
290 obj = idr_find(&filp->object_idr, handle);
291 if (obj == NULL) {
292 spin_unlock(&filp->table_lock);
293 return -EINVAL;
295 dev = obj->dev;
297 /* Release reference and decrement refcount. */
298 idr_remove(&filp->object_idr, handle);
299 spin_unlock(&filp->table_lock);
301 if (drm_core_check_feature(dev, DRIVER_PRIME))
302 drm_gem_remove_prime_handles(obj, filp);
303 drm_vma_node_revoke(&obj->vma_node, filp->filp);
305 if (dev->driver->gem_close_object)
306 dev->driver->gem_close_object(obj, filp);
307 drm_gem_object_handle_unreference_unlocked(obj);
309 return 0;
311 EXPORT_SYMBOL(drm_gem_handle_delete);
314 * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers
316 * This implements the ->dumb_destroy kms driver callback for drivers which use
317 * gem to manage their backing storage.
319 int drm_gem_dumb_destroy(struct drm_file *file,
320 struct drm_device *dev,
321 uint32_t handle)
323 return drm_gem_handle_delete(file, handle);
325 EXPORT_SYMBOL(drm_gem_dumb_destroy);
328 * drm_gem_handle_create_tail - internal functions to create a handle
330 * This expects the dev->object_name_lock to be held already and will drop it
331 * before returning. Used to avoid races in establishing new handles when
332 * importing an object from either an flink name or a dma-buf.
335 drm_gem_handle_create_tail(struct drm_file *file_priv,
336 struct drm_gem_object *obj,
337 u32 *handlep)
339 struct drm_device *dev = obj->dev;
340 int ret;
342 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
345 * Get the user-visible handle using idr. Preload and perform
346 * allocation under our spinlock.
348 idr_preload(GFP_KERNEL);
349 spin_lock(&file_priv->table_lock);
351 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
352 drm_gem_object_reference(obj);
353 obj->handle_count++;
354 spin_unlock(&file_priv->table_lock);
355 idr_preload_end();
356 mutex_unlock(&dev->object_name_lock);
357 if (ret < 0) {
358 drm_gem_object_handle_unreference_unlocked(obj);
359 return ret;
361 *handlep = ret;
363 ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
364 if (ret) {
365 drm_gem_handle_delete(file_priv, *handlep);
366 return ret;
369 if (dev->driver->gem_open_object) {
370 ret = dev->driver->gem_open_object(obj, file_priv);
371 if (ret) {
372 drm_gem_handle_delete(file_priv, *handlep);
373 return ret;
377 return 0;
381 * Create a handle for this object. This adds a handle reference
382 * to the object, which includes a regular reference count. Callers
383 * will likely want to dereference the object afterwards.
386 drm_gem_handle_create(struct drm_file *file_priv,
387 struct drm_gem_object *obj,
388 u32 *handlep)
390 mutex_lock(&obj->dev->object_name_lock);
392 return drm_gem_handle_create_tail(file_priv, obj, handlep);
394 EXPORT_SYMBOL(drm_gem_handle_create);
398 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
399 * @obj: obj in question
401 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
403 void
404 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
406 struct drm_device *dev = obj->dev;
407 struct drm_gem_mm *mm = dev->mm_private;
409 drm_vma_offset_remove(&mm->vma_manager, &obj->vma_node);
411 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
414 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
415 * @obj: obj in question
416 * @size: the virtual size
418 * GEM memory mapping works by handing back to userspace a fake mmap offset
419 * it can use in a subsequent mmap(2) call. The DRM core code then looks
420 * up the object based on the offset and sets up the various memory mapping
421 * structures.
423 * This routine allocates and attaches a fake offset for @obj, in cases where
424 * the virtual size differs from the physical size (ie. obj->size). Otherwise
425 * just use drm_gem_create_mmap_offset().
428 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
430 struct drm_device *dev = obj->dev;
431 struct drm_gem_mm *mm = dev->mm_private;
433 return drm_vma_offset_add(&mm->vma_manager, &obj->vma_node,
434 size / PAGE_SIZE);
436 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
439 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
440 * @obj: obj in question
442 * GEM memory mapping works by handing back to userspace a fake mmap offset
443 * it can use in a subsequent mmap(2) call. The DRM core code then looks
444 * up the object based on the offset and sets up the various memory mapping
445 * structures.
447 * This routine allocates and attaches a fake offset for @obj.
449 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
451 return drm_gem_create_mmap_offset_size(obj, obj->size);
453 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
456 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
457 * from shmem
458 * @obj: obj in question
459 * @gfpmask: gfp mask of requested pages
461 struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask)
463 struct inode *inode;
464 struct address_space *mapping;
465 struct page *p, **pages;
466 int i, npages;
468 /* This is the shared memory object that backs the GEM resource */
469 inode = file_inode(obj->filp);
470 mapping = inode->i_mapping;
472 /* We already BUG_ON() for non-page-aligned sizes in
473 * drm_gem_object_init(), so we should never hit this unless
474 * driver author is doing something really wrong:
476 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
478 npages = obj->size >> PAGE_SHIFT;
480 pages = drm_malloc_ab(npages, sizeof(struct page *));
481 if (pages == NULL)
482 return ERR_PTR(-ENOMEM);
484 gfpmask |= mapping_gfp_mask(mapping);
486 for (i = 0; i < npages; i++) {
487 p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
488 if (IS_ERR(p))
489 goto fail;
490 pages[i] = p;
492 /* There is a hypothetical issue w/ drivers that require
493 * buffer memory in the low 4GB.. if the pages are un-
494 * pinned, and swapped out, they can end up swapped back
495 * in above 4GB. If pages are already in memory, then
496 * shmem_read_mapping_page_gfp will ignore the gfpmask,
497 * even if the already in-memory page disobeys the mask.
499 * It is only a theoretical issue today, because none of
500 * the devices with this limitation can be populated with
501 * enough memory to trigger the issue. But this BUG_ON()
502 * is here as a reminder in case the problem with
503 * shmem_read_mapping_page_gfp() isn't solved by the time
504 * it does become a real issue.
506 * See this thread: http://lkml.org/lkml/2011/7/11/238
508 BUG_ON((gfpmask & __GFP_DMA32) &&
509 (page_to_pfn(p) >= 0x00100000UL));
512 return pages;
514 fail:
515 while (i--)
516 page_cache_release(pages[i]);
518 drm_free_large(pages);
519 return ERR_CAST(p);
521 EXPORT_SYMBOL(drm_gem_get_pages);
524 * drm_gem_put_pages - helper to free backing pages for a GEM object
525 * @obj: obj in question
526 * @pages: pages to free
527 * @dirty: if true, pages will be marked as dirty
528 * @accessed: if true, the pages will be marked as accessed
530 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
531 bool dirty, bool accessed)
533 int i, npages;
535 /* We already BUG_ON() for non-page-aligned sizes in
536 * drm_gem_object_init(), so we should never hit this unless
537 * driver author is doing something really wrong:
539 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
541 npages = obj->size >> PAGE_SHIFT;
543 for (i = 0; i < npages; i++) {
544 if (dirty)
545 set_page_dirty(pages[i]);
547 if (accessed)
548 mark_page_accessed(pages[i]);
550 /* Undo the reference we took when populating the table */
551 page_cache_release(pages[i]);
554 drm_free_large(pages);
556 EXPORT_SYMBOL(drm_gem_put_pages);
558 /** Returns a reference to the object named by the handle. */
559 struct drm_gem_object *
560 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
561 u32 handle)
563 struct drm_gem_object *obj;
565 spin_lock(&filp->table_lock);
567 /* Check if we currently have a reference on the object */
568 obj = idr_find(&filp->object_idr, handle);
569 if (obj == NULL) {
570 spin_unlock(&filp->table_lock);
571 return NULL;
574 drm_gem_object_reference(obj);
576 spin_unlock(&filp->table_lock);
578 return obj;
580 EXPORT_SYMBOL(drm_gem_object_lookup);
583 * Releases the handle to an mm object.
586 drm_gem_close_ioctl(struct drm_device *dev, void *data,
587 struct drm_file *file_priv)
589 struct drm_gem_close *args = data;
590 int ret;
592 if (!(dev->driver->driver_features & DRIVER_GEM))
593 return -ENODEV;
595 ret = drm_gem_handle_delete(file_priv, args->handle);
597 return ret;
601 * Create a global name for an object, returning the name.
603 * Note that the name does not hold a reference; when the object
604 * is freed, the name goes away.
607 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
608 struct drm_file *file_priv)
610 struct drm_gem_flink *args = data;
611 struct drm_gem_object *obj;
612 int ret;
614 if (!(dev->driver->driver_features & DRIVER_GEM))
615 return -ENODEV;
617 obj = drm_gem_object_lookup(dev, file_priv, args->handle);
618 if (obj == NULL)
619 return -ENOENT;
621 mutex_lock(&dev->object_name_lock);
622 idr_preload(GFP_KERNEL);
623 /* prevent races with concurrent gem_close. */
624 if (obj->handle_count == 0) {
625 ret = -ENOENT;
626 goto err;
629 if (!obj->name) {
630 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT);
631 if (ret < 0)
632 goto err;
634 obj->name = ret;
636 /* Allocate a reference for the name table. */
637 drm_gem_object_reference(obj);
640 args->name = (uint64_t) obj->name;
641 ret = 0;
643 err:
644 idr_preload_end();
645 mutex_unlock(&dev->object_name_lock);
646 drm_gem_object_unreference_unlocked(obj);
647 return ret;
651 * Open an object using the global name, returning a handle and the size.
653 * This handle (of course) holds a reference to the object, so the object
654 * will not go away until the handle is deleted.
657 drm_gem_open_ioctl(struct drm_device *dev, void *data,
658 struct drm_file *file_priv)
660 struct drm_gem_open *args = data;
661 struct drm_gem_object *obj;
662 int ret;
663 u32 handle;
665 if (!(dev->driver->driver_features & DRIVER_GEM))
666 return -ENODEV;
668 mutex_lock(&dev->object_name_lock);
669 obj = idr_find(&dev->object_name_idr, (int) args->name);
670 if (obj) {
671 drm_gem_object_reference(obj);
672 } else {
673 mutex_unlock(&dev->object_name_lock);
674 return -ENOENT;
677 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
678 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
679 drm_gem_object_unreference_unlocked(obj);
680 if (ret)
681 return ret;
683 args->handle = handle;
684 args->size = obj->size;
686 return 0;
690 * Called at device open time, sets up the structure for handling refcounting
691 * of mm objects.
693 void
694 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
696 idr_init(&file_private->object_idr);
697 spin_lock_init(&file_private->table_lock);
701 * Called at device close to release the file's
702 * handle references on objects.
704 static int
705 drm_gem_object_release_handle(int id, void *ptr, void *data)
707 struct drm_file *file_priv = data;
708 struct drm_gem_object *obj = ptr;
709 struct drm_device *dev = obj->dev;
711 if (drm_core_check_feature(dev, DRIVER_PRIME))
712 drm_gem_remove_prime_handles(obj, file_priv);
713 drm_vma_node_revoke(&obj->vma_node, file_priv->filp);
715 if (dev->driver->gem_close_object)
716 dev->driver->gem_close_object(obj, file_priv);
718 drm_gem_object_handle_unreference_unlocked(obj);
720 return 0;
724 * Called at close time when the filp is going away.
726 * Releases any remaining references on objects by this filp.
728 void
729 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
731 idr_for_each(&file_private->object_idr,
732 &drm_gem_object_release_handle, file_private);
733 idr_destroy(&file_private->object_idr);
736 void
737 drm_gem_object_release(struct drm_gem_object *obj)
739 WARN_ON(obj->dma_buf);
741 if (obj->filp)
742 fput(obj->filp);
744 EXPORT_SYMBOL(drm_gem_object_release);
747 * Called after the last reference to the object has been lost.
748 * Must be called holding struct_ mutex
750 * Frees the object
752 void
753 drm_gem_object_free(struct kref *kref)
755 struct drm_gem_object *obj = (struct drm_gem_object *) kref;
756 struct drm_device *dev = obj->dev;
758 BUG_ON(!mutex_is_locked(&dev->struct_mutex));
760 if (dev->driver->gem_free_object != NULL)
761 dev->driver->gem_free_object(obj);
763 EXPORT_SYMBOL(drm_gem_object_free);
765 void drm_gem_vm_open(struct vm_area_struct *vma)
767 struct drm_gem_object *obj = vma->vm_private_data;
769 drm_gem_object_reference(obj);
771 mutex_lock(&obj->dev->struct_mutex);
772 drm_vm_open_locked(obj->dev, vma);
773 mutex_unlock(&obj->dev->struct_mutex);
775 EXPORT_SYMBOL(drm_gem_vm_open);
777 void drm_gem_vm_close(struct vm_area_struct *vma)
779 struct drm_gem_object *obj = vma->vm_private_data;
780 struct drm_device *dev = obj->dev;
782 mutex_lock(&dev->struct_mutex);
783 drm_vm_close_locked(obj->dev, vma);
784 drm_gem_object_unreference(obj);
785 mutex_unlock(&dev->struct_mutex);
787 EXPORT_SYMBOL(drm_gem_vm_close);
790 * drm_gem_mmap_obj - memory map a GEM object
791 * @obj: the GEM object to map
792 * @obj_size: the object size to be mapped, in bytes
793 * @vma: VMA for the area to be mapped
795 * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops
796 * provided by the driver. Depending on their requirements, drivers can either
797 * provide a fault handler in their gem_vm_ops (in which case any accesses to
798 * the object will be trapped, to perform migration, GTT binding, surface
799 * register allocation, or performance monitoring), or mmap the buffer memory
800 * synchronously after calling drm_gem_mmap_obj.
802 * This function is mainly intended to implement the DMABUF mmap operation, when
803 * the GEM object is not looked up based on its fake offset. To implement the
804 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
806 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
807 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
808 * callers must verify access restrictions before calling this helper.
810 * NOTE: This function has to be protected with dev->struct_mutex
812 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
813 * size, or if no gem_vm_ops are provided.
815 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
816 struct vm_area_struct *vma)
818 struct drm_device *dev = obj->dev;
820 lockdep_assert_held(&dev->struct_mutex);
822 /* Check for valid size. */
823 if (obj_size < vma->vm_end - vma->vm_start)
824 return -EINVAL;
826 if (!dev->driver->gem_vm_ops)
827 return -EINVAL;
829 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
830 vma->vm_ops = dev->driver->gem_vm_ops;
831 vma->vm_private_data = obj;
832 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
834 /* Take a ref for this mapping of the object, so that the fault
835 * handler can dereference the mmap offset's pointer to the object.
836 * This reference is cleaned up by the corresponding vm_close
837 * (which should happen whether the vma was created by this call, or
838 * by a vm_open due to mremap or partial unmap or whatever).
840 drm_gem_object_reference(obj);
842 drm_vm_open_locked(dev, vma);
843 return 0;
845 EXPORT_SYMBOL(drm_gem_mmap_obj);
848 * drm_gem_mmap - memory map routine for GEM objects
849 * @filp: DRM file pointer
850 * @vma: VMA for the area to be mapped
852 * If a driver supports GEM object mapping, mmap calls on the DRM file
853 * descriptor will end up here.
855 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
856 * contain the fake offset we created when the GTT map ioctl was called on
857 * the object) and map it with a call to drm_gem_mmap_obj().
859 * If the caller is not granted access to the buffer object, the mmap will fail
860 * with EACCES. Please see the vma manager for more information.
862 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
864 struct drm_file *priv = filp->private_data;
865 struct drm_device *dev = priv->minor->dev;
866 struct drm_gem_mm *mm = dev->mm_private;
867 struct drm_gem_object *obj;
868 struct drm_vma_offset_node *node;
869 int ret = 0;
871 if (drm_device_is_unplugged(dev))
872 return -ENODEV;
874 mutex_lock(&dev->struct_mutex);
876 node = drm_vma_offset_exact_lookup(&mm->vma_manager, vma->vm_pgoff,
877 vma_pages(vma));
878 if (!node) {
879 mutex_unlock(&dev->struct_mutex);
880 return drm_mmap(filp, vma);
881 } else if (!drm_vma_node_is_allowed(node, filp)) {
882 mutex_unlock(&dev->struct_mutex);
883 return -EACCES;
886 obj = container_of(node, struct drm_gem_object, vma_node);
887 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma);
889 mutex_unlock(&dev->struct_mutex);
891 return ret;
893 EXPORT_SYMBOL(drm_gem_mmap);